810-iommu-support-layerscape.patch 50 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104110511061107110811091110111111121113111411151116111711181119112011211122112311241125112611271128112911301131113211331134113511361137113811391140114111421143114411451146114711481149115011511152115311541155115611571158115911601161116211631164116511661167116811691170117111721173117411751176117711781179118011811182118311841185118611871188118911901191119211931194119511961197119811991200120112021203120412051206120712081209121012111212121312141215121612171218121912201221122212231224122512261227122812291230123112321233123412351236123712381239124012411242124312441245124612471248124912501251125212531254125512561257125812591260126112621263126412651266126712681269127012711272127312741275127612771278127912801281128212831284128512861287128812891290129112921293129412951296129712981299130013011302130313041305130613071308130913101311131213131314131513161317131813191320132113221323132413251326132713281329133013311332133313341335133613371338133913401341134213431344134513461347134813491350135113521353135413551356135713581359136013611362136313641365136613671368136913701371137213731374137513761377137813791380138113821383138413851386138713881389139013911392139313941395139613971398139914001401140214031404140514061407140814091410141114121413141414151416141714181419142014211422142314241425142614271428142914301431143214331434143514361437143814391440144114421443144414451446144714481449145014511452145314541455145614571458145914601461146214631464146514661467146814691470147114721473147414751476147714781479148014811482148314841485148614871488148914901491149214931494149514961497149814991500150115021503150415051506150715081509151015111512151315141515151615171518151915201521152215231524152515261527152815291530153115321533153415351536153715381539154015411542154315441545154615471548154915501551155215531554155515561557155815591560156115621563156415651566156715681569157015711572157315741575157615771578157915801581158215831584158515861587158815891590159115921593159415951596159715981599160016011602160316041605160616071608160916101611161216131614161516161617161816191620162116221623162416251626162716281629163016311632163316341635163616371638163916401641164216431644164516461647164816491650165116521653165416551656165716581659166016611662166316641665166616671668166916701671
  1. From 89a1f0d7826df69d8e02268b97bc3da02e07203f Mon Sep 17 00:00:00 2001
  2. From: Yangbo Lu <[email protected]>
  3. Date: Thu, 5 Jul 2018 17:35:15 +0800
  4. Subject: [PATCH 22/32] iommu: support layerscape
  5. This is an integrated patch for layerscape smmu support.
  6. Signed-off-by: Eric Auger <[email protected]>
  7. Signed-off-by: Robin Murphy <[email protected]>
  8. Signed-off-by: Nipun Gupta <[email protected]>
  9. Signed-off-by: Sunil Goutham <[email protected]>
  10. Signed-off-by: Yangbo Lu <[email protected]>
  11. ---
  12. drivers/iommu/amd_iommu.c | 56 +++++---
  13. drivers/iommu/arm-smmu-v3.c | 111 +++++++++++-----
  14. drivers/iommu/arm-smmu.c | 100 ++++++++++++---
  15. drivers/iommu/dma-iommu.c | 242 +++++++++++++++++++++++++++++------
  16. drivers/iommu/intel-iommu.c | 92 ++++++++++---
  17. drivers/iommu/iommu.c | 240 ++++++++++++++++++++++++++++++++--
  18. drivers/iommu/mtk_iommu.c | 2 +
  19. drivers/iommu/mtk_iommu_v1.c | 2 +
  20. include/linux/dma-iommu.h | 11 ++
  21. include/linux/iommu.h | 57 +++++++--
  22. 10 files changed, 762 insertions(+), 151 deletions(-)
  23. --- a/drivers/iommu/amd_iommu.c
  24. +++ b/drivers/iommu/amd_iommu.c
  25. @@ -373,6 +373,8 @@ static struct iommu_group *acpihid_devic
  26. if (!entry->group)
  27. entry->group = generic_device_group(dev);
  28. + else
  29. + iommu_group_ref_get(entry->group);
  30. return entry->group;
  31. }
  32. @@ -3160,9 +3162,10 @@ static bool amd_iommu_capable(enum iommu
  33. return false;
  34. }
  35. -static void amd_iommu_get_dm_regions(struct device *dev,
  36. - struct list_head *head)
  37. +static void amd_iommu_get_resv_regions(struct device *dev,
  38. + struct list_head *head)
  39. {
  40. + struct iommu_resv_region *region;
  41. struct unity_map_entry *entry;
  42. int devid;
  43. @@ -3171,41 +3174,56 @@ static void amd_iommu_get_dm_regions(str
  44. return;
  45. list_for_each_entry(entry, &amd_iommu_unity_map, list) {
  46. - struct iommu_dm_region *region;
  47. + size_t length;
  48. + int prot = 0;
  49. if (devid < entry->devid_start || devid > entry->devid_end)
  50. continue;
  51. - region = kzalloc(sizeof(*region), GFP_KERNEL);
  52. + length = entry->address_end - entry->address_start;
  53. + if (entry->prot & IOMMU_PROT_IR)
  54. + prot |= IOMMU_READ;
  55. + if (entry->prot & IOMMU_PROT_IW)
  56. + prot |= IOMMU_WRITE;
  57. +
  58. + region = iommu_alloc_resv_region(entry->address_start,
  59. + length, prot,
  60. + IOMMU_RESV_DIRECT);
  61. if (!region) {
  62. pr_err("Out of memory allocating dm-regions for %s\n",
  63. dev_name(dev));
  64. return;
  65. }
  66. -
  67. - region->start = entry->address_start;
  68. - region->length = entry->address_end - entry->address_start;
  69. - if (entry->prot & IOMMU_PROT_IR)
  70. - region->prot |= IOMMU_READ;
  71. - if (entry->prot & IOMMU_PROT_IW)
  72. - region->prot |= IOMMU_WRITE;
  73. -
  74. list_add_tail(&region->list, head);
  75. }
  76. +
  77. + region = iommu_alloc_resv_region(MSI_RANGE_START,
  78. + MSI_RANGE_END - MSI_RANGE_START + 1,
  79. + 0, IOMMU_RESV_MSI);
  80. + if (!region)
  81. + return;
  82. + list_add_tail(&region->list, head);
  83. +
  84. + region = iommu_alloc_resv_region(HT_RANGE_START,
  85. + HT_RANGE_END - HT_RANGE_START + 1,
  86. + 0, IOMMU_RESV_RESERVED);
  87. + if (!region)
  88. + return;
  89. + list_add_tail(&region->list, head);
  90. }
  91. -static void amd_iommu_put_dm_regions(struct device *dev,
  92. +static void amd_iommu_put_resv_regions(struct device *dev,
  93. struct list_head *head)
  94. {
  95. - struct iommu_dm_region *entry, *next;
  96. + struct iommu_resv_region *entry, *next;
  97. list_for_each_entry_safe(entry, next, head, list)
  98. kfree(entry);
  99. }
  100. -static void amd_iommu_apply_dm_region(struct device *dev,
  101. +static void amd_iommu_apply_resv_region(struct device *dev,
  102. struct iommu_domain *domain,
  103. - struct iommu_dm_region *region)
  104. + struct iommu_resv_region *region)
  105. {
  106. struct dma_ops_domain *dma_dom = to_dma_ops_domain(to_pdomain(domain));
  107. unsigned long start, end;
  108. @@ -3229,9 +3247,9 @@ static const struct iommu_ops amd_iommu_
  109. .add_device = amd_iommu_add_device,
  110. .remove_device = amd_iommu_remove_device,
  111. .device_group = amd_iommu_device_group,
  112. - .get_dm_regions = amd_iommu_get_dm_regions,
  113. - .put_dm_regions = amd_iommu_put_dm_regions,
  114. - .apply_dm_region = amd_iommu_apply_dm_region,
  115. + .get_resv_regions = amd_iommu_get_resv_regions,
  116. + .put_resv_regions = amd_iommu_put_resv_regions,
  117. + .apply_resv_region = amd_iommu_apply_resv_region,
  118. .pgsize_bitmap = AMD_IOMMU_PGSIZES,
  119. };
  120. --- a/drivers/iommu/arm-smmu-v3.c
  121. +++ b/drivers/iommu/arm-smmu-v3.c
  122. @@ -410,6 +410,9 @@
  123. /* High-level queue structures */
  124. #define ARM_SMMU_POLL_TIMEOUT_US 100
  125. +#define MSI_IOVA_BASE 0x8000000
  126. +#define MSI_IOVA_LENGTH 0x100000
  127. +
  128. static bool disable_bypass;
  129. module_param_named(disable_bypass, disable_bypass, bool, S_IRUGO);
  130. MODULE_PARM_DESC(disable_bypass,
  131. @@ -552,9 +555,14 @@ struct arm_smmu_s2_cfg {
  132. };
  133. struct arm_smmu_strtab_ent {
  134. - bool valid;
  135. -
  136. - bool bypass; /* Overrides s1/s2 config */
  137. + /*
  138. + * An STE is "assigned" if the master emitting the corresponding SID
  139. + * is attached to a domain. The behaviour of an unassigned STE is
  140. + * determined by the disable_bypass parameter, whereas an assigned
  141. + * STE behaves according to s1_cfg/s2_cfg, which themselves are
  142. + * configured according to the domain type.
  143. + */
  144. + bool assigned;
  145. struct arm_smmu_s1_cfg *s1_cfg;
  146. struct arm_smmu_s2_cfg *s2_cfg;
  147. };
  148. @@ -627,6 +635,7 @@ enum arm_smmu_domain_stage {
  149. ARM_SMMU_DOMAIN_S1 = 0,
  150. ARM_SMMU_DOMAIN_S2,
  151. ARM_SMMU_DOMAIN_NESTED,
  152. + ARM_SMMU_DOMAIN_BYPASS,
  153. };
  154. struct arm_smmu_domain {
  155. @@ -1000,9 +1009,9 @@ static void arm_smmu_write_strtab_ent(st
  156. * This is hideously complicated, but we only really care about
  157. * three cases at the moment:
  158. *
  159. - * 1. Invalid (all zero) -> bypass (init)
  160. - * 2. Bypass -> translation (attach)
  161. - * 3. Translation -> bypass (detach)
  162. + * 1. Invalid (all zero) -> bypass/fault (init)
  163. + * 2. Bypass/fault -> translation/bypass (attach)
  164. + * 3. Translation/bypass -> bypass/fault (detach)
  165. *
  166. * Given that we can't update the STE atomically and the SMMU
  167. * doesn't read the thing in a defined order, that leaves us
  168. @@ -1041,11 +1050,15 @@ static void arm_smmu_write_strtab_ent(st
  169. }
  170. /* Nuke the existing STE_0 value, as we're going to rewrite it */
  171. - val = ste->valid ? STRTAB_STE_0_V : 0;
  172. + val = STRTAB_STE_0_V;
  173. +
  174. + /* Bypass/fault */
  175. + if (!ste->assigned || !(ste->s1_cfg || ste->s2_cfg)) {
  176. + if (!ste->assigned && disable_bypass)
  177. + val |= STRTAB_STE_0_CFG_ABORT;
  178. + else
  179. + val |= STRTAB_STE_0_CFG_BYPASS;
  180. - if (ste->bypass) {
  181. - val |= disable_bypass ? STRTAB_STE_0_CFG_ABORT
  182. - : STRTAB_STE_0_CFG_BYPASS;
  183. dst[0] = cpu_to_le64(val);
  184. dst[1] = cpu_to_le64(STRTAB_STE_1_SHCFG_INCOMING
  185. << STRTAB_STE_1_SHCFG_SHIFT);
  186. @@ -1108,10 +1121,7 @@ static void arm_smmu_write_strtab_ent(st
  187. static void arm_smmu_init_bypass_stes(u64 *strtab, unsigned int nent)
  188. {
  189. unsigned int i;
  190. - struct arm_smmu_strtab_ent ste = {
  191. - .valid = true,
  192. - .bypass = true,
  193. - };
  194. + struct arm_smmu_strtab_ent ste = { .assigned = false };
  195. for (i = 0; i < nent; ++i) {
  196. arm_smmu_write_strtab_ent(NULL, -1, strtab, &ste);
  197. @@ -1364,8 +1374,6 @@ static bool arm_smmu_capable(enum iommu_
  198. switch (cap) {
  199. case IOMMU_CAP_CACHE_COHERENCY:
  200. return true;
  201. - case IOMMU_CAP_INTR_REMAP:
  202. - return true; /* MSIs are just memory writes */
  203. case IOMMU_CAP_NOEXEC:
  204. return true;
  205. default:
  206. @@ -1377,7 +1385,9 @@ static struct iommu_domain *arm_smmu_dom
  207. {
  208. struct arm_smmu_domain *smmu_domain;
  209. - if (type != IOMMU_DOMAIN_UNMANAGED && type != IOMMU_DOMAIN_DMA)
  210. + if (type != IOMMU_DOMAIN_UNMANAGED &&
  211. + type != IOMMU_DOMAIN_DMA &&
  212. + type != IOMMU_DOMAIN_IDENTITY)
  213. return NULL;
  214. /*
  215. @@ -1508,6 +1518,11 @@ static int arm_smmu_domain_finalise(stru
  216. struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
  217. struct arm_smmu_device *smmu = smmu_domain->smmu;
  218. + if (domain->type == IOMMU_DOMAIN_IDENTITY) {
  219. + smmu_domain->stage = ARM_SMMU_DOMAIN_BYPASS;
  220. + return 0;
  221. + }
  222. +
  223. /* Restrict the stage to what we can actually support */
  224. if (!(smmu->features & ARM_SMMU_FEAT_TRANS_S1))
  225. smmu_domain->stage = ARM_SMMU_DOMAIN_S2;
  226. @@ -1580,7 +1595,7 @@ static __le64 *arm_smmu_get_step_for_sid
  227. return step;
  228. }
  229. -static int arm_smmu_install_ste_for_dev(struct iommu_fwspec *fwspec)
  230. +static void arm_smmu_install_ste_for_dev(struct iommu_fwspec *fwspec)
  231. {
  232. int i, j;
  233. struct arm_smmu_master_data *master = fwspec->iommu_priv;
  234. @@ -1599,17 +1614,14 @@ static int arm_smmu_install_ste_for_dev(
  235. arm_smmu_write_strtab_ent(smmu, sid, step, &master->ste);
  236. }
  237. -
  238. - return 0;
  239. }
  240. static void arm_smmu_detach_dev(struct device *dev)
  241. {
  242. struct arm_smmu_master_data *master = dev->iommu_fwspec->iommu_priv;
  243. - master->ste.bypass = true;
  244. - if (arm_smmu_install_ste_for_dev(dev->iommu_fwspec) < 0)
  245. - dev_warn(dev, "failed to install bypass STE\n");
  246. + master->ste.assigned = false;
  247. + arm_smmu_install_ste_for_dev(dev->iommu_fwspec);
  248. }
  249. static int arm_smmu_attach_dev(struct iommu_domain *domain, struct device *dev)
  250. @@ -1628,7 +1640,7 @@ static int arm_smmu_attach_dev(struct io
  251. ste = &master->ste;
  252. /* Already attached to a different domain? */
  253. - if (!ste->bypass)
  254. + if (ste->assigned)
  255. arm_smmu_detach_dev(dev);
  256. mutex_lock(&smmu_domain->init_mutex);
  257. @@ -1649,10 +1661,12 @@ static int arm_smmu_attach_dev(struct io
  258. goto out_unlock;
  259. }
  260. - ste->bypass = false;
  261. - ste->valid = true;
  262. + ste->assigned = true;
  263. - if (smmu_domain->stage == ARM_SMMU_DOMAIN_S1) {
  264. + if (smmu_domain->stage == ARM_SMMU_DOMAIN_BYPASS) {
  265. + ste->s1_cfg = NULL;
  266. + ste->s2_cfg = NULL;
  267. + } else if (smmu_domain->stage == ARM_SMMU_DOMAIN_S1) {
  268. ste->s1_cfg = &smmu_domain->s1_cfg;
  269. ste->s2_cfg = NULL;
  270. arm_smmu_write_ctx_desc(smmu, ste->s1_cfg);
  271. @@ -1661,10 +1675,7 @@ static int arm_smmu_attach_dev(struct io
  272. ste->s2_cfg = &smmu_domain->s2_cfg;
  273. }
  274. - ret = arm_smmu_install_ste_for_dev(dev->iommu_fwspec);
  275. - if (ret < 0)
  276. - ste->valid = false;
  277. -
  278. + arm_smmu_install_ste_for_dev(dev->iommu_fwspec);
  279. out_unlock:
  280. mutex_unlock(&smmu_domain->init_mutex);
  281. return ret;
  282. @@ -1695,6 +1706,9 @@ arm_smmu_unmap(struct iommu_domain *doma
  283. struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
  284. struct io_pgtable_ops *ops = smmu_domain->pgtbl_ops;
  285. + if (domain->type == IOMMU_DOMAIN_IDENTITY)
  286. + return iova;
  287. +
  288. if (!ops)
  289. return 0;
  290. @@ -1810,7 +1824,7 @@ static void arm_smmu_remove_device(struc
  291. return;
  292. master = fwspec->iommu_priv;
  293. - if (master && master->ste.valid)
  294. + if (master && master->ste.assigned)
  295. arm_smmu_detach_dev(dev);
  296. iommu_group_remove_device(dev);
  297. kfree(master);
  298. @@ -1839,6 +1853,9 @@ static int arm_smmu_domain_get_attr(stru
  299. {
  300. struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
  301. + if (domain->type != IOMMU_DOMAIN_UNMANAGED)
  302. + return -EINVAL;
  303. +
  304. switch (attr) {
  305. case DOMAIN_ATTR_NESTING:
  306. *(int *)data = (smmu_domain->stage == ARM_SMMU_DOMAIN_NESTED);
  307. @@ -1854,6 +1871,9 @@ static int arm_smmu_domain_set_attr(stru
  308. int ret = 0;
  309. struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
  310. + if (domain->type != IOMMU_DOMAIN_UNMANAGED)
  311. + return -EINVAL;
  312. +
  313. mutex_lock(&smmu_domain->init_mutex);
  314. switch (attr) {
  315. @@ -1883,6 +1903,31 @@ static int arm_smmu_of_xlate(struct devi
  316. return iommu_fwspec_add_ids(dev, args->args, 1);
  317. }
  318. +static void arm_smmu_get_resv_regions(struct device *dev,
  319. + struct list_head *head)
  320. +{
  321. + struct iommu_resv_region *region;
  322. + int prot = IOMMU_WRITE | IOMMU_NOEXEC | IOMMU_MMIO;
  323. +
  324. + region = iommu_alloc_resv_region(MSI_IOVA_BASE, MSI_IOVA_LENGTH,
  325. + prot, IOMMU_RESV_SW_MSI);
  326. + if (!region)
  327. + return;
  328. +
  329. + list_add_tail(&region->list, head);
  330. +
  331. + iommu_dma_get_resv_regions(dev, head);
  332. +}
  333. +
  334. +static void arm_smmu_put_resv_regions(struct device *dev,
  335. + struct list_head *head)
  336. +{
  337. + struct iommu_resv_region *entry, *next;
  338. +
  339. + list_for_each_entry_safe(entry, next, head, list)
  340. + kfree(entry);
  341. +}
  342. +
  343. static struct iommu_ops arm_smmu_ops = {
  344. .capable = arm_smmu_capable,
  345. .domain_alloc = arm_smmu_domain_alloc,
  346. @@ -1898,6 +1943,8 @@ static struct iommu_ops arm_smmu_ops = {
  347. .domain_get_attr = arm_smmu_domain_get_attr,
  348. .domain_set_attr = arm_smmu_domain_set_attr,
  349. .of_xlate = arm_smmu_of_xlate,
  350. + .get_resv_regions = arm_smmu_get_resv_regions,
  351. + .put_resv_regions = arm_smmu_put_resv_regions,
  352. .pgsize_bitmap = -1UL, /* Restricted during device attach */
  353. };
  354. --- a/drivers/iommu/arm-smmu.c
  355. +++ b/drivers/iommu/arm-smmu.c
  356. @@ -49,6 +49,7 @@
  357. #include <linux/spinlock.h>
  358. #include <linux/amba/bus.h>
  359. +#include <linux/fsl/mc.h>
  360. #include "io-pgtable.h"
  361. @@ -247,6 +248,7 @@ enum arm_smmu_s2cr_privcfg {
  362. #define ARM_MMU500_ACTLR_CPRE (1 << 1)
  363. #define ARM_MMU500_ACR_CACHE_LOCK (1 << 26)
  364. +#define ARM_MMU500_ACR_SMTNMB_TLBEN (1 << 8)
  365. #define CB_PAR_F (1 << 0)
  366. @@ -278,6 +280,9 @@ enum arm_smmu_s2cr_privcfg {
  367. #define FSYNR0_WNR (1 << 4)
  368. +#define MSI_IOVA_BASE 0x8000000
  369. +#define MSI_IOVA_LENGTH 0x100000
  370. +
  371. static int force_stage;
  372. module_param(force_stage, int, S_IRUGO);
  373. MODULE_PARM_DESC(force_stage,
  374. @@ -401,6 +406,7 @@ enum arm_smmu_domain_stage {
  375. ARM_SMMU_DOMAIN_S1 = 0,
  376. ARM_SMMU_DOMAIN_S2,
  377. ARM_SMMU_DOMAIN_NESTED,
  378. + ARM_SMMU_DOMAIN_BYPASS,
  379. };
  380. struct arm_smmu_domain {
  381. @@ -821,6 +827,12 @@ static int arm_smmu_init_domain_context(
  382. if (smmu_domain->smmu)
  383. goto out_unlock;
  384. + if (domain->type == IOMMU_DOMAIN_IDENTITY) {
  385. + smmu_domain->stage = ARM_SMMU_DOMAIN_BYPASS;
  386. + smmu_domain->smmu = smmu;
  387. + goto out_unlock;
  388. + }
  389. +
  390. /*
  391. * Mapping the requested stage onto what we support is surprisingly
  392. * complicated, mainly because the spec allows S1+S2 SMMUs without
  393. @@ -981,7 +993,7 @@ static void arm_smmu_destroy_domain_cont
  394. void __iomem *cb_base;
  395. int irq;
  396. - if (!smmu)
  397. + if (!smmu || domain->type == IOMMU_DOMAIN_IDENTITY)
  398. return;
  399. /*
  400. @@ -1004,7 +1016,9 @@ static struct iommu_domain *arm_smmu_dom
  401. {
  402. struct arm_smmu_domain *smmu_domain;
  403. - if (type != IOMMU_DOMAIN_UNMANAGED && type != IOMMU_DOMAIN_DMA)
  404. + if (type != IOMMU_DOMAIN_UNMANAGED &&
  405. + type != IOMMU_DOMAIN_DMA &&
  406. + type != IOMMU_DOMAIN_IDENTITY)
  407. return NULL;
  408. /*
  409. * Allocate the domain and initialise some of its data structures.
  410. @@ -1202,10 +1216,15 @@ static int arm_smmu_domain_add_master(st
  411. {
  412. struct arm_smmu_device *smmu = smmu_domain->smmu;
  413. struct arm_smmu_s2cr *s2cr = smmu->s2crs;
  414. - enum arm_smmu_s2cr_type type = S2CR_TYPE_TRANS;
  415. u8 cbndx = smmu_domain->cfg.cbndx;
  416. + enum arm_smmu_s2cr_type type;
  417. int i, idx;
  418. + if (smmu_domain->stage == ARM_SMMU_DOMAIN_BYPASS)
  419. + type = S2CR_TYPE_BYPASS;
  420. + else
  421. + type = S2CR_TYPE_TRANS;
  422. +
  423. for_each_cfg_sme(fwspec, i, idx) {
  424. if (type == s2cr[idx].type && cbndx == s2cr[idx].cbndx)
  425. continue;
  426. @@ -1343,6 +1362,9 @@ static phys_addr_t arm_smmu_iova_to_phys
  427. struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
  428. struct io_pgtable_ops *ops= smmu_domain->pgtbl_ops;
  429. + if (domain->type == IOMMU_DOMAIN_IDENTITY)
  430. + return iova;
  431. +
  432. if (!ops)
  433. return 0;
  434. @@ -1368,8 +1390,6 @@ static bool arm_smmu_capable(enum iommu_
  435. * requests.
  436. */
  437. return true;
  438. - case IOMMU_CAP_INTR_REMAP:
  439. - return true; /* MSIs are just memory writes */
  440. case IOMMU_CAP_NOEXEC:
  441. return true;
  442. default:
  443. @@ -1478,10 +1498,12 @@ static struct iommu_group *arm_smmu_devi
  444. }
  445. if (group)
  446. - return group;
  447. + return iommu_group_ref_get(group);
  448. if (dev_is_pci(dev))
  449. group = pci_device_group(dev);
  450. + else if (dev_is_fsl_mc(dev))
  451. + group = fsl_mc_device_group(dev);
  452. else
  453. group = generic_device_group(dev);
  454. @@ -1493,6 +1515,9 @@ static int arm_smmu_domain_get_attr(stru
  455. {
  456. struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
  457. + if (domain->type != IOMMU_DOMAIN_UNMANAGED)
  458. + return -EINVAL;
  459. +
  460. switch (attr) {
  461. case DOMAIN_ATTR_NESTING:
  462. *(int *)data = (smmu_domain->stage == ARM_SMMU_DOMAIN_NESTED);
  463. @@ -1508,6 +1533,9 @@ static int arm_smmu_domain_set_attr(stru
  464. int ret = 0;
  465. struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
  466. + if (domain->type != IOMMU_DOMAIN_UNMANAGED)
  467. + return -EINVAL;
  468. +
  469. mutex_lock(&smmu_domain->init_mutex);
  470. switch (attr) {
  471. @@ -1534,17 +1562,44 @@ out_unlock:
  472. static int arm_smmu_of_xlate(struct device *dev, struct of_phandle_args *args)
  473. {
  474. - u32 fwid = 0;
  475. + u32 mask, fwid = 0;
  476. if (args->args_count > 0)
  477. fwid |= (u16)args->args[0];
  478. if (args->args_count > 1)
  479. fwid |= (u16)args->args[1] << SMR_MASK_SHIFT;
  480. + else if (!of_property_read_u32(args->np, "stream-match-mask", &mask))
  481. + fwid |= (u16)mask << SMR_MASK_SHIFT;
  482. return iommu_fwspec_add_ids(dev, &fwid, 1);
  483. }
  484. +static void arm_smmu_get_resv_regions(struct device *dev,
  485. + struct list_head *head)
  486. +{
  487. + struct iommu_resv_region *region;
  488. + int prot = IOMMU_WRITE | IOMMU_NOEXEC | IOMMU_MMIO;
  489. +
  490. + region = iommu_alloc_resv_region(MSI_IOVA_BASE, MSI_IOVA_LENGTH,
  491. + prot, IOMMU_RESV_SW_MSI);
  492. + if (!region)
  493. + return;
  494. +
  495. + list_add_tail(&region->list, head);
  496. +
  497. + iommu_dma_get_resv_regions(dev, head);
  498. +}
  499. +
  500. +static void arm_smmu_put_resv_regions(struct device *dev,
  501. + struct list_head *head)
  502. +{
  503. + struct iommu_resv_region *entry, *next;
  504. +
  505. + list_for_each_entry_safe(entry, next, head, list)
  506. + kfree(entry);
  507. +}
  508. +
  509. static struct iommu_ops arm_smmu_ops = {
  510. .capable = arm_smmu_capable,
  511. .domain_alloc = arm_smmu_domain_alloc,
  512. @@ -1560,6 +1615,8 @@ static struct iommu_ops arm_smmu_ops = {
  513. .domain_get_attr = arm_smmu_domain_get_attr,
  514. .domain_set_attr = arm_smmu_domain_set_attr,
  515. .of_xlate = arm_smmu_of_xlate,
  516. + .get_resv_regions = arm_smmu_get_resv_regions,
  517. + .put_resv_regions = arm_smmu_put_resv_regions,
  518. .pgsize_bitmap = -1UL, /* Restricted during device attach */
  519. };
  520. @@ -1581,16 +1638,22 @@ static void arm_smmu_device_reset(struct
  521. for (i = 0; i < smmu->num_mapping_groups; ++i)
  522. arm_smmu_write_sme(smmu, i);
  523. - /*
  524. - * Before clearing ARM_MMU500_ACTLR_CPRE, need to
  525. - * clear CACHE_LOCK bit of ACR first. And, CACHE_LOCK
  526. - * bit is only present in MMU-500r2 onwards.
  527. - */
  528. - reg = readl_relaxed(gr0_base + ARM_SMMU_GR0_ID7);
  529. - major = (reg >> ID7_MAJOR_SHIFT) & ID7_MAJOR_MASK;
  530. - if ((smmu->model == ARM_MMU500) && (major >= 2)) {
  531. + if (smmu->model == ARM_MMU500) {
  532. + /*
  533. + * Before clearing ARM_MMU500_ACTLR_CPRE, need to
  534. + * clear CACHE_LOCK bit of ACR first. And, CACHE_LOCK
  535. + * bit is only present in MMU-500r2 onwards.
  536. + */
  537. + reg = readl_relaxed(gr0_base + ARM_SMMU_GR0_ID7);
  538. + major = (reg >> ID7_MAJOR_SHIFT) & ID7_MAJOR_MASK;
  539. reg = readl_relaxed(gr0_base + ARM_SMMU_GR0_sACR);
  540. - reg &= ~ARM_MMU500_ACR_CACHE_LOCK;
  541. + if (major >= 2)
  542. + reg &= ~ARM_MMU500_ACR_CACHE_LOCK;
  543. + /*
  544. + * Allow unmatched Stream IDs to allocate bypass
  545. + * TLB entries for reduced latency.
  546. + */
  547. + reg |= ARM_MMU500_ACR_SMTNMB_TLBEN;
  548. writel_relaxed(reg, gr0_base + ARM_SMMU_GR0_sACR);
  549. }
  550. @@ -2024,6 +2087,11 @@ static int arm_smmu_device_dt_probe(stru
  551. bus_set_iommu(&pci_bus_type, &arm_smmu_ops);
  552. }
  553. #endif
  554. +#ifdef CONFIG_FSL_MC_BUS
  555. + if (!iommu_present(&fsl_mc_bus_type))
  556. + bus_set_iommu(&fsl_mc_bus_type, &arm_smmu_ops);
  557. +#endif
  558. +
  559. return 0;
  560. }
  561. --- a/drivers/iommu/dma-iommu.c
  562. +++ b/drivers/iommu/dma-iommu.c
  563. @@ -37,15 +37,50 @@ struct iommu_dma_msi_page {
  564. phys_addr_t phys;
  565. };
  566. +enum iommu_dma_cookie_type {
  567. + IOMMU_DMA_IOVA_COOKIE,
  568. + IOMMU_DMA_MSI_COOKIE,
  569. +};
  570. +
  571. struct iommu_dma_cookie {
  572. - struct iova_domain iovad;
  573. - struct list_head msi_page_list;
  574. - spinlock_t msi_lock;
  575. + enum iommu_dma_cookie_type type;
  576. + union {
  577. + /* Full allocator for IOMMU_DMA_IOVA_COOKIE */
  578. + struct iova_domain iovad;
  579. + /* Trivial linear page allocator for IOMMU_DMA_MSI_COOKIE */
  580. + dma_addr_t msi_iova;
  581. + };
  582. + struct list_head msi_page_list;
  583. + spinlock_t msi_lock;
  584. };
  585. +static inline size_t cookie_msi_granule(struct iommu_dma_cookie *cookie)
  586. +{
  587. + if (cookie->type == IOMMU_DMA_IOVA_COOKIE)
  588. + return cookie->iovad.granule;
  589. + return PAGE_SIZE;
  590. +}
  591. +
  592. static inline struct iova_domain *cookie_iovad(struct iommu_domain *domain)
  593. {
  594. - return &((struct iommu_dma_cookie *)domain->iova_cookie)->iovad;
  595. + struct iommu_dma_cookie *cookie = domain->iova_cookie;
  596. +
  597. + if (cookie->type == IOMMU_DMA_IOVA_COOKIE)
  598. + return &cookie->iovad;
  599. + return NULL;
  600. +}
  601. +
  602. +static struct iommu_dma_cookie *cookie_alloc(enum iommu_dma_cookie_type type)
  603. +{
  604. + struct iommu_dma_cookie *cookie;
  605. +
  606. + cookie = kzalloc(sizeof(*cookie), GFP_KERNEL);
  607. + if (cookie) {
  608. + spin_lock_init(&cookie->msi_lock);
  609. + INIT_LIST_HEAD(&cookie->msi_page_list);
  610. + cookie->type = type;
  611. + }
  612. + return cookie;
  613. }
  614. int iommu_dma_init(void)
  615. @@ -62,25 +97,53 @@ int iommu_dma_init(void)
  616. */
  617. int iommu_get_dma_cookie(struct iommu_domain *domain)
  618. {
  619. + if (domain->iova_cookie)
  620. + return -EEXIST;
  621. +
  622. + domain->iova_cookie = cookie_alloc(IOMMU_DMA_IOVA_COOKIE);
  623. + if (!domain->iova_cookie)
  624. + return -ENOMEM;
  625. +
  626. + return 0;
  627. +}
  628. +EXPORT_SYMBOL(iommu_get_dma_cookie);
  629. +
  630. +/**
  631. + * iommu_get_msi_cookie - Acquire just MSI remapping resources
  632. + * @domain: IOMMU domain to prepare
  633. + * @base: Start address of IOVA region for MSI mappings
  634. + *
  635. + * Users who manage their own IOVA allocation and do not want DMA API support,
  636. + * but would still like to take advantage of automatic MSI remapping, can use
  637. + * this to initialise their own domain appropriately. Users should reserve a
  638. + * contiguous IOVA region, starting at @base, large enough to accommodate the
  639. + * number of PAGE_SIZE mappings necessary to cover every MSI doorbell address
  640. + * used by the devices attached to @domain.
  641. + */
  642. +int iommu_get_msi_cookie(struct iommu_domain *domain, dma_addr_t base)
  643. +{
  644. struct iommu_dma_cookie *cookie;
  645. + if (domain->type != IOMMU_DOMAIN_UNMANAGED)
  646. + return -EINVAL;
  647. +
  648. if (domain->iova_cookie)
  649. return -EEXIST;
  650. - cookie = kzalloc(sizeof(*cookie), GFP_KERNEL);
  651. + cookie = cookie_alloc(IOMMU_DMA_MSI_COOKIE);
  652. if (!cookie)
  653. return -ENOMEM;
  654. - spin_lock_init(&cookie->msi_lock);
  655. - INIT_LIST_HEAD(&cookie->msi_page_list);
  656. + cookie->msi_iova = base;
  657. domain->iova_cookie = cookie;
  658. return 0;
  659. }
  660. -EXPORT_SYMBOL(iommu_get_dma_cookie);
  661. +EXPORT_SYMBOL(iommu_get_msi_cookie);
  662. /**
  663. * iommu_put_dma_cookie - Release a domain's DMA mapping resources
  664. - * @domain: IOMMU domain previously prepared by iommu_get_dma_cookie()
  665. + * @domain: IOMMU domain previously prepared by iommu_get_dma_cookie() or
  666. + * iommu_get_msi_cookie()
  667. *
  668. * IOMMU drivers should normally call this from their domain_free callback.
  669. */
  670. @@ -92,7 +155,7 @@ void iommu_put_dma_cookie(struct iommu_d
  671. if (!cookie)
  672. return;
  673. - if (cookie->iovad.granule)
  674. + if (cookie->type == IOMMU_DMA_IOVA_COOKIE && cookie->iovad.granule)
  675. put_iova_domain(&cookie->iovad);
  676. list_for_each_entry_safe(msi, tmp, &cookie->msi_page_list, list) {
  677. @@ -104,21 +167,99 @@ void iommu_put_dma_cookie(struct iommu_d
  678. }
  679. EXPORT_SYMBOL(iommu_put_dma_cookie);
  680. -static void iova_reserve_pci_windows(struct pci_dev *dev,
  681. - struct iova_domain *iovad)
  682. +/**
  683. + * iommu_dma_get_resv_regions - Reserved region driver helper
  684. + * @dev: Device from iommu_get_resv_regions()
  685. + * @list: Reserved region list from iommu_get_resv_regions()
  686. + *
  687. + * IOMMU drivers can use this to implement their .get_resv_regions callback
  688. + * for general non-IOMMU-specific reservations. Currently, this covers host
  689. + * bridge windows for PCI devices.
  690. + */
  691. +void iommu_dma_get_resv_regions(struct device *dev, struct list_head *list)
  692. {
  693. - struct pci_host_bridge *bridge = pci_find_host_bridge(dev->bus);
  694. + struct pci_host_bridge *bridge;
  695. struct resource_entry *window;
  696. - unsigned long lo, hi;
  697. + if (!dev_is_pci(dev))
  698. + return;
  699. +
  700. + bridge = pci_find_host_bridge(to_pci_dev(dev)->bus);
  701. resource_list_for_each_entry(window, &bridge->windows) {
  702. + struct iommu_resv_region *region;
  703. + phys_addr_t start;
  704. + size_t length;
  705. +
  706. if (resource_type(window->res) != IORESOURCE_MEM)
  707. continue;
  708. - lo = iova_pfn(iovad, window->res->start - window->offset);
  709. - hi = iova_pfn(iovad, window->res->end - window->offset);
  710. + start = window->res->start - window->offset;
  711. + length = window->res->end - window->res->start + 1;
  712. + region = iommu_alloc_resv_region(start, length, 0,
  713. + IOMMU_RESV_RESERVED);
  714. + if (!region)
  715. + return;
  716. +
  717. + list_add_tail(&region->list, list);
  718. + }
  719. +}
  720. +EXPORT_SYMBOL(iommu_dma_get_resv_regions);
  721. +
  722. +static int cookie_init_hw_msi_region(struct iommu_dma_cookie *cookie,
  723. + phys_addr_t start, phys_addr_t end)
  724. +{
  725. + struct iova_domain *iovad = &cookie->iovad;
  726. + struct iommu_dma_msi_page *msi_page;
  727. + int i, num_pages;
  728. +
  729. + start -= iova_offset(iovad, start);
  730. + num_pages = iova_align(iovad, end - start) >> iova_shift(iovad);
  731. +
  732. + msi_page = kcalloc(num_pages, sizeof(*msi_page), GFP_KERNEL);
  733. + if (!msi_page)
  734. + return -ENOMEM;
  735. +
  736. + for (i = 0; i < num_pages; i++) {
  737. + msi_page[i].phys = start;
  738. + msi_page[i].iova = start;
  739. + INIT_LIST_HEAD(&msi_page[i].list);
  740. + list_add(&msi_page[i].list, &cookie->msi_page_list);
  741. + start += iovad->granule;
  742. + }
  743. +
  744. + return 0;
  745. +}
  746. +
  747. +static int iova_reserve_iommu_regions(struct device *dev,
  748. + struct iommu_domain *domain)
  749. +{
  750. + struct iommu_dma_cookie *cookie = domain->iova_cookie;
  751. + struct iova_domain *iovad = &cookie->iovad;
  752. + struct iommu_resv_region *region;
  753. + LIST_HEAD(resv_regions);
  754. + int ret = 0;
  755. +
  756. + iommu_get_resv_regions(dev, &resv_regions);
  757. + list_for_each_entry(region, &resv_regions, list) {
  758. + unsigned long lo, hi;
  759. +
  760. + /* We ARE the software that manages these! */
  761. + if (region->type == IOMMU_RESV_SW_MSI)
  762. + continue;
  763. +
  764. + lo = iova_pfn(iovad, region->start);
  765. + hi = iova_pfn(iovad, region->start + region->length - 1);
  766. reserve_iova(iovad, lo, hi);
  767. +
  768. + if (region->type == IOMMU_RESV_MSI)
  769. + ret = cookie_init_hw_msi_region(cookie, region->start,
  770. + region->start + region->length);
  771. + if (ret)
  772. + break;
  773. }
  774. + iommu_put_resv_regions(dev, &resv_regions);
  775. +
  776. + return ret;
  777. }
  778. /**
  779. @@ -136,11 +277,12 @@ static void iova_reserve_pci_windows(str
  780. int iommu_dma_init_domain(struct iommu_domain *domain, dma_addr_t base,
  781. u64 size, struct device *dev)
  782. {
  783. - struct iova_domain *iovad = cookie_iovad(domain);
  784. + struct iommu_dma_cookie *cookie = domain->iova_cookie;
  785. + struct iova_domain *iovad = &cookie->iovad;
  786. unsigned long order, base_pfn, end_pfn;
  787. - if (!iovad)
  788. - return -ENODEV;
  789. + if (!cookie || cookie->type != IOMMU_DMA_IOVA_COOKIE)
  790. + return -EINVAL;
  791. /* Use the smallest supported page size for IOVA granularity */
  792. order = __ffs(domain->pgsize_bitmap);
  793. @@ -160,22 +302,37 @@ int iommu_dma_init_domain(struct iommu_d
  794. end_pfn = min_t(unsigned long, end_pfn,
  795. domain->geometry.aperture_end >> order);
  796. }
  797. + /*
  798. + * PCI devices may have larger DMA masks, but still prefer allocating
  799. + * within a 32-bit mask to avoid DAC addressing. Such limitations don't
  800. + * apply to the typical platform device, so for those we may as well
  801. + * leave the cache limit at the top of their range to save an rb_last()
  802. + * traversal on every allocation.
  803. + */
  804. + if (dev && dev_is_pci(dev))
  805. + end_pfn &= DMA_BIT_MASK(32) >> order;
  806. - /* All we can safely do with an existing domain is enlarge it */
  807. + /* start_pfn is always nonzero for an already-initialised domain */
  808. if (iovad->start_pfn) {
  809. if (1UL << order != iovad->granule ||
  810. - base_pfn != iovad->start_pfn ||
  811. - end_pfn < iovad->dma_32bit_pfn) {
  812. + base_pfn != iovad->start_pfn) {
  813. pr_warn("Incompatible range for DMA domain\n");
  814. return -EFAULT;
  815. }
  816. - iovad->dma_32bit_pfn = end_pfn;
  817. - } else {
  818. - init_iova_domain(iovad, 1UL << order, base_pfn, end_pfn);
  819. - if (dev && dev_is_pci(dev))
  820. - iova_reserve_pci_windows(to_pci_dev(dev), iovad);
  821. + /*
  822. + * If we have devices with different DMA masks, move the free
  823. + * area cache limit down for the benefit of the smaller one.
  824. + */
  825. + iovad->dma_32bit_pfn = min(end_pfn, iovad->dma_32bit_pfn);
  826. +
  827. + return 0;
  828. }
  829. - return 0;
  830. +
  831. + init_iova_domain(iovad, 1UL << order, base_pfn, end_pfn);
  832. + if (!dev)
  833. + return 0;
  834. +
  835. + return iova_reserve_iommu_regions(dev, domain);
  836. }
  837. EXPORT_SYMBOL(iommu_dma_init_domain);
  838. @@ -643,11 +800,12 @@ static struct iommu_dma_msi_page *iommu_
  839. {
  840. struct iommu_dma_cookie *cookie = domain->iova_cookie;
  841. struct iommu_dma_msi_page *msi_page;
  842. - struct iova_domain *iovad = &cookie->iovad;
  843. + struct iova_domain *iovad = cookie_iovad(domain);
  844. struct iova *iova;
  845. int prot = IOMMU_WRITE | IOMMU_NOEXEC | IOMMU_MMIO;
  846. + size_t size = cookie_msi_granule(cookie);
  847. - msi_addr &= ~(phys_addr_t)iova_mask(iovad);
  848. + msi_addr &= ~(phys_addr_t)(size - 1);
  849. list_for_each_entry(msi_page, &cookie->msi_page_list, list)
  850. if (msi_page->phys == msi_addr)
  851. return msi_page;
  852. @@ -656,13 +814,18 @@ static struct iommu_dma_msi_page *iommu_
  853. if (!msi_page)
  854. return NULL;
  855. - iova = __alloc_iova(domain, iovad->granule, dma_get_mask(dev));
  856. - if (!iova)
  857. - goto out_free_page;
  858. -
  859. msi_page->phys = msi_addr;
  860. - msi_page->iova = iova_dma_addr(iovad, iova);
  861. - if (iommu_map(domain, msi_page->iova, msi_addr, iovad->granule, prot))
  862. + if (iovad) {
  863. + iova = __alloc_iova(domain, size, dma_get_mask(dev));
  864. + if (!iova)
  865. + goto out_free_page;
  866. + msi_page->iova = iova_dma_addr(iovad, iova);
  867. + } else {
  868. + msi_page->iova = cookie->msi_iova;
  869. + cookie->msi_iova += size;
  870. + }
  871. +
  872. + if (iommu_map(domain, msi_page->iova, msi_addr, size, prot))
  873. goto out_free_iova;
  874. INIT_LIST_HEAD(&msi_page->list);
  875. @@ -670,7 +833,10 @@ static struct iommu_dma_msi_page *iommu_
  876. return msi_page;
  877. out_free_iova:
  878. - __free_iova(iovad, iova);
  879. + if (iovad)
  880. + __free_iova(iovad, iova);
  881. + else
  882. + cookie->msi_iova -= size;
  883. out_free_page:
  884. kfree(msi_page);
  885. return NULL;
  886. @@ -711,7 +877,7 @@ void iommu_dma_map_msi_msg(int irq, stru
  887. msg->data = ~0U;
  888. } else {
  889. msg->address_hi = upper_32_bits(msi_page->iova);
  890. - msg->address_lo &= iova_mask(&cookie->iovad);
  891. + msg->address_lo &= cookie_msi_granule(cookie) - 1;
  892. msg->address_lo += lower_32_bits(msi_page->iova);
  893. }
  894. }
  895. --- a/drivers/iommu/intel-iommu.c
  896. +++ b/drivers/iommu/intel-iommu.c
  897. @@ -441,6 +441,7 @@ struct dmar_rmrr_unit {
  898. u64 end_address; /* reserved end address */
  899. struct dmar_dev_scope *devices; /* target devices */
  900. int devices_cnt; /* target device count */
  901. + struct iommu_resv_region *resv; /* reserved region handle */
  902. };
  903. struct dmar_atsr_unit {
  904. @@ -4267,27 +4268,40 @@ static inline void init_iommu_pm_ops(voi
  905. int __init dmar_parse_one_rmrr(struct acpi_dmar_header *header, void *arg)
  906. {
  907. struct acpi_dmar_reserved_memory *rmrr;
  908. + int prot = DMA_PTE_READ|DMA_PTE_WRITE;
  909. struct dmar_rmrr_unit *rmrru;
  910. + size_t length;
  911. rmrru = kzalloc(sizeof(*rmrru), GFP_KERNEL);
  912. if (!rmrru)
  913. - return -ENOMEM;
  914. + goto out;
  915. rmrru->hdr = header;
  916. rmrr = (struct acpi_dmar_reserved_memory *)header;
  917. rmrru->base_address = rmrr->base_address;
  918. rmrru->end_address = rmrr->end_address;
  919. +
  920. + length = rmrr->end_address - rmrr->base_address + 1;
  921. + rmrru->resv = iommu_alloc_resv_region(rmrr->base_address, length, prot,
  922. + IOMMU_RESV_DIRECT);
  923. + if (!rmrru->resv)
  924. + goto free_rmrru;
  925. +
  926. rmrru->devices = dmar_alloc_dev_scope((void *)(rmrr + 1),
  927. ((void *)rmrr) + rmrr->header.length,
  928. &rmrru->devices_cnt);
  929. - if (rmrru->devices_cnt && rmrru->devices == NULL) {
  930. - kfree(rmrru);
  931. - return -ENOMEM;
  932. - }
  933. + if (rmrru->devices_cnt && rmrru->devices == NULL)
  934. + goto free_all;
  935. list_add(&rmrru->list, &dmar_rmrr_units);
  936. return 0;
  937. +free_all:
  938. + kfree(rmrru->resv);
  939. +free_rmrru:
  940. + kfree(rmrru);
  941. +out:
  942. + return -ENOMEM;
  943. }
  944. static struct dmar_atsr_unit *dmar_find_atsr(struct acpi_dmar_atsr *atsr)
  945. @@ -4501,6 +4515,7 @@ static void intel_iommu_free_dmars(void)
  946. list_for_each_entry_safe(rmrru, rmrr_n, &dmar_rmrr_units, list) {
  947. list_del(&rmrru->list);
  948. dmar_free_dev_scope(&rmrru->devices, &rmrru->devices_cnt);
  949. + kfree(rmrru->resv);
  950. kfree(rmrru);
  951. }
  952. @@ -5236,6 +5251,45 @@ static void intel_iommu_remove_device(st
  953. iommu_device_unlink(iommu->iommu_dev, dev);
  954. }
  955. +static void intel_iommu_get_resv_regions(struct device *device,
  956. + struct list_head *head)
  957. +{
  958. + struct iommu_resv_region *reg;
  959. + struct dmar_rmrr_unit *rmrr;
  960. + struct device *i_dev;
  961. + int i;
  962. +
  963. + rcu_read_lock();
  964. + for_each_rmrr_units(rmrr) {
  965. + for_each_active_dev_scope(rmrr->devices, rmrr->devices_cnt,
  966. + i, i_dev) {
  967. + if (i_dev != device)
  968. + continue;
  969. +
  970. + list_add_tail(&rmrr->resv->list, head);
  971. + }
  972. + }
  973. + rcu_read_unlock();
  974. +
  975. + reg = iommu_alloc_resv_region(IOAPIC_RANGE_START,
  976. + IOAPIC_RANGE_END - IOAPIC_RANGE_START + 1,
  977. + 0, IOMMU_RESV_MSI);
  978. + if (!reg)
  979. + return;
  980. + list_add_tail(&reg->list, head);
  981. +}
  982. +
  983. +static void intel_iommu_put_resv_regions(struct device *dev,
  984. + struct list_head *head)
  985. +{
  986. + struct iommu_resv_region *entry, *next;
  987. +
  988. + list_for_each_entry_safe(entry, next, head, list) {
  989. + if (entry->type == IOMMU_RESV_RESERVED)
  990. + kfree(entry);
  991. + }
  992. +}
  993. +
  994. #ifdef CONFIG_INTEL_IOMMU_SVM
  995. #define MAX_NR_PASID_BITS (20)
  996. static inline unsigned long intel_iommu_get_pts(struct intel_iommu *iommu)
  997. @@ -5366,19 +5420,21 @@ struct intel_iommu *intel_svm_device_to_
  998. #endif /* CONFIG_INTEL_IOMMU_SVM */
  999. static const struct iommu_ops intel_iommu_ops = {
  1000. - .capable = intel_iommu_capable,
  1001. - .domain_alloc = intel_iommu_domain_alloc,
  1002. - .domain_free = intel_iommu_domain_free,
  1003. - .attach_dev = intel_iommu_attach_device,
  1004. - .detach_dev = intel_iommu_detach_device,
  1005. - .map = intel_iommu_map,
  1006. - .unmap = intel_iommu_unmap,
  1007. - .map_sg = default_iommu_map_sg,
  1008. - .iova_to_phys = intel_iommu_iova_to_phys,
  1009. - .add_device = intel_iommu_add_device,
  1010. - .remove_device = intel_iommu_remove_device,
  1011. - .device_group = pci_device_group,
  1012. - .pgsize_bitmap = INTEL_IOMMU_PGSIZES,
  1013. + .capable = intel_iommu_capable,
  1014. + .domain_alloc = intel_iommu_domain_alloc,
  1015. + .domain_free = intel_iommu_domain_free,
  1016. + .attach_dev = intel_iommu_attach_device,
  1017. + .detach_dev = intel_iommu_detach_device,
  1018. + .map = intel_iommu_map,
  1019. + .unmap = intel_iommu_unmap,
  1020. + .map_sg = default_iommu_map_sg,
  1021. + .iova_to_phys = intel_iommu_iova_to_phys,
  1022. + .add_device = intel_iommu_add_device,
  1023. + .remove_device = intel_iommu_remove_device,
  1024. + .get_resv_regions = intel_iommu_get_resv_regions,
  1025. + .put_resv_regions = intel_iommu_put_resv_regions,
  1026. + .device_group = pci_device_group,
  1027. + .pgsize_bitmap = INTEL_IOMMU_PGSIZES,
  1028. };
  1029. static void quirk_iommu_g4x_gfx(struct pci_dev *dev)
  1030. --- a/drivers/iommu/iommu.c
  1031. +++ b/drivers/iommu/iommu.c
  1032. @@ -33,9 +33,11 @@
  1033. #include <linux/bitops.h>
  1034. #include <linux/property.h>
  1035. #include <trace/events/iommu.h>
  1036. +#include <linux/fsl/mc.h>
  1037. static struct kset *iommu_group_kset;
  1038. static DEFINE_IDA(iommu_group_ida);
  1039. +static unsigned int iommu_def_domain_type = IOMMU_DOMAIN_DMA;
  1040. struct iommu_callback_data {
  1041. const struct iommu_ops *ops;
  1042. @@ -68,6 +70,13 @@ struct iommu_group_attribute {
  1043. const char *buf, size_t count);
  1044. };
  1045. +static const char * const iommu_group_resv_type_string[] = {
  1046. + [IOMMU_RESV_DIRECT] = "direct",
  1047. + [IOMMU_RESV_RESERVED] = "reserved",
  1048. + [IOMMU_RESV_MSI] = "msi",
  1049. + [IOMMU_RESV_SW_MSI] = "msi",
  1050. +};
  1051. +
  1052. #define IOMMU_GROUP_ATTR(_name, _mode, _show, _store) \
  1053. struct iommu_group_attribute iommu_group_attr_##_name = \
  1054. __ATTR(_name, _mode, _show, _store)
  1055. @@ -86,6 +95,18 @@ static int __iommu_attach_group(struct i
  1056. static void __iommu_detach_group(struct iommu_domain *domain,
  1057. struct iommu_group *group);
  1058. +static int __init iommu_set_def_domain_type(char *str)
  1059. +{
  1060. + bool pt;
  1061. +
  1062. + if (!str || strtobool(str, &pt))
  1063. + return -EINVAL;
  1064. +
  1065. + iommu_def_domain_type = pt ? IOMMU_DOMAIN_IDENTITY : IOMMU_DOMAIN_DMA;
  1066. + return 0;
  1067. +}
  1068. +early_param("iommu.passthrough", iommu_set_def_domain_type);
  1069. +
  1070. static ssize_t iommu_group_attr_show(struct kobject *kobj,
  1071. struct attribute *__attr, char *buf)
  1072. {
  1073. @@ -133,8 +154,131 @@ static ssize_t iommu_group_show_name(str
  1074. return sprintf(buf, "%s\n", group->name);
  1075. }
  1076. +/**
  1077. + * iommu_insert_resv_region - Insert a new region in the
  1078. + * list of reserved regions.
  1079. + * @new: new region to insert
  1080. + * @regions: list of regions
  1081. + *
  1082. + * The new element is sorted by address with respect to the other
  1083. + * regions of the same type. In case it overlaps with another
  1084. + * region of the same type, regions are merged. In case it
  1085. + * overlaps with another region of different type, regions are
  1086. + * not merged.
  1087. + */
  1088. +static int iommu_insert_resv_region(struct iommu_resv_region *new,
  1089. + struct list_head *regions)
  1090. +{
  1091. + struct iommu_resv_region *region;
  1092. + phys_addr_t start = new->start;
  1093. + phys_addr_t end = new->start + new->length - 1;
  1094. + struct list_head *pos = regions->next;
  1095. +
  1096. + while (pos != regions) {
  1097. + struct iommu_resv_region *entry =
  1098. + list_entry(pos, struct iommu_resv_region, list);
  1099. + phys_addr_t a = entry->start;
  1100. + phys_addr_t b = entry->start + entry->length - 1;
  1101. + int type = entry->type;
  1102. +
  1103. + if (end < a) {
  1104. + goto insert;
  1105. + } else if (start > b) {
  1106. + pos = pos->next;
  1107. + } else if ((start >= a) && (end <= b)) {
  1108. + if (new->type == type)
  1109. + goto done;
  1110. + else
  1111. + pos = pos->next;
  1112. + } else {
  1113. + if (new->type == type) {
  1114. + phys_addr_t new_start = min(a, start);
  1115. + phys_addr_t new_end = max(b, end);
  1116. +
  1117. + list_del(&entry->list);
  1118. + entry->start = new_start;
  1119. + entry->length = new_end - new_start + 1;
  1120. + iommu_insert_resv_region(entry, regions);
  1121. + } else {
  1122. + pos = pos->next;
  1123. + }
  1124. + }
  1125. + }
  1126. +insert:
  1127. + region = iommu_alloc_resv_region(new->start, new->length,
  1128. + new->prot, new->type);
  1129. + if (!region)
  1130. + return -ENOMEM;
  1131. +
  1132. + list_add_tail(&region->list, pos);
  1133. +done:
  1134. + return 0;
  1135. +}
  1136. +
  1137. +static int
  1138. +iommu_insert_device_resv_regions(struct list_head *dev_resv_regions,
  1139. + struct list_head *group_resv_regions)
  1140. +{
  1141. + struct iommu_resv_region *entry;
  1142. + int ret;
  1143. +
  1144. + list_for_each_entry(entry, dev_resv_regions, list) {
  1145. + ret = iommu_insert_resv_region(entry, group_resv_regions);
  1146. + if (ret)
  1147. + break;
  1148. + }
  1149. + return ret;
  1150. +}
  1151. +
  1152. +int iommu_get_group_resv_regions(struct iommu_group *group,
  1153. + struct list_head *head)
  1154. +{
  1155. + struct iommu_device *device;
  1156. + int ret = 0;
  1157. +
  1158. + mutex_lock(&group->mutex);
  1159. + list_for_each_entry(device, &group->devices, list) {
  1160. + struct list_head dev_resv_regions;
  1161. +
  1162. + INIT_LIST_HEAD(&dev_resv_regions);
  1163. + iommu_get_resv_regions(device->dev, &dev_resv_regions);
  1164. + ret = iommu_insert_device_resv_regions(&dev_resv_regions, head);
  1165. + iommu_put_resv_regions(device->dev, &dev_resv_regions);
  1166. + if (ret)
  1167. + break;
  1168. + }
  1169. + mutex_unlock(&group->mutex);
  1170. + return ret;
  1171. +}
  1172. +EXPORT_SYMBOL_GPL(iommu_get_group_resv_regions);
  1173. +
  1174. +static ssize_t iommu_group_show_resv_regions(struct iommu_group *group,
  1175. + char *buf)
  1176. +{
  1177. + struct iommu_resv_region *region, *next;
  1178. + struct list_head group_resv_regions;
  1179. + char *str = buf;
  1180. +
  1181. + INIT_LIST_HEAD(&group_resv_regions);
  1182. + iommu_get_group_resv_regions(group, &group_resv_regions);
  1183. +
  1184. + list_for_each_entry_safe(region, next, &group_resv_regions, list) {
  1185. + str += sprintf(str, "0x%016llx 0x%016llx %s\n",
  1186. + (long long int)region->start,
  1187. + (long long int)(region->start +
  1188. + region->length - 1),
  1189. + iommu_group_resv_type_string[region->type]);
  1190. + kfree(region);
  1191. + }
  1192. +
  1193. + return (str - buf);
  1194. +}
  1195. +
  1196. static IOMMU_GROUP_ATTR(name, S_IRUGO, iommu_group_show_name, NULL);
  1197. +static IOMMU_GROUP_ATTR(reserved_regions, 0444,
  1198. + iommu_group_show_resv_regions, NULL);
  1199. +
  1200. static void iommu_group_release(struct kobject *kobj)
  1201. {
  1202. struct iommu_group *group = to_iommu_group(kobj);
  1203. @@ -212,6 +356,11 @@ struct iommu_group *iommu_group_alloc(vo
  1204. */
  1205. kobject_put(&group->kobj);
  1206. + ret = iommu_group_create_file(group,
  1207. + &iommu_group_attr_reserved_regions);
  1208. + if (ret)
  1209. + return ERR_PTR(ret);
  1210. +
  1211. pr_debug("Allocated group %d\n", group->id);
  1212. return group;
  1213. @@ -318,7 +467,7 @@ static int iommu_group_create_direct_map
  1214. struct device *dev)
  1215. {
  1216. struct iommu_domain *domain = group->default_domain;
  1217. - struct iommu_dm_region *entry;
  1218. + struct iommu_resv_region *entry;
  1219. struct list_head mappings;
  1220. unsigned long pg_size;
  1221. int ret = 0;
  1222. @@ -331,18 +480,21 @@ static int iommu_group_create_direct_map
  1223. pg_size = 1UL << __ffs(domain->pgsize_bitmap);
  1224. INIT_LIST_HEAD(&mappings);
  1225. - iommu_get_dm_regions(dev, &mappings);
  1226. + iommu_get_resv_regions(dev, &mappings);
  1227. /* We need to consider overlapping regions for different devices */
  1228. list_for_each_entry(entry, &mappings, list) {
  1229. dma_addr_t start, end, addr;
  1230. - if (domain->ops->apply_dm_region)
  1231. - domain->ops->apply_dm_region(dev, domain, entry);
  1232. + if (domain->ops->apply_resv_region)
  1233. + domain->ops->apply_resv_region(dev, domain, entry);
  1234. start = ALIGN(entry->start, pg_size);
  1235. end = ALIGN(entry->start + entry->length, pg_size);
  1236. + if (entry->type != IOMMU_RESV_DIRECT)
  1237. + continue;
  1238. +
  1239. for (addr = start; addr < end; addr += pg_size) {
  1240. phys_addr_t phys_addr;
  1241. @@ -358,7 +510,7 @@ static int iommu_group_create_direct_map
  1242. }
  1243. out:
  1244. - iommu_put_dm_regions(dev, &mappings);
  1245. + iommu_put_resv_regions(dev, &mappings);
  1246. return ret;
  1247. }
  1248. @@ -563,6 +715,19 @@ struct iommu_group *iommu_group_get(stru
  1249. EXPORT_SYMBOL_GPL(iommu_group_get);
  1250. /**
  1251. + * iommu_group_ref_get - Increment reference on a group
  1252. + * @group: the group to use, must not be NULL
  1253. + *
  1254. + * This function is called by iommu drivers to take additional references on an
  1255. + * existing group. Returns the given group for convenience.
  1256. + */
  1257. +struct iommu_group *iommu_group_ref_get(struct iommu_group *group)
  1258. +{
  1259. + kobject_get(group->devices_kobj);
  1260. + return group;
  1261. +}
  1262. +
  1263. +/**
  1264. * iommu_group_put - Decrement group reference
  1265. * @group: the group to use
  1266. *
  1267. @@ -812,6 +977,26 @@ struct iommu_group *pci_device_group(str
  1268. return group;
  1269. }
  1270. +/* Get the IOMMU group for device on fsl-mc bus */
  1271. +struct iommu_group *fsl_mc_device_group(struct device *dev)
  1272. +{
  1273. + struct device *cont_dev = fsl_mc_cont_dev(dev);
  1274. + struct iommu_group *group;
  1275. +
  1276. + /* Container device is responsible for creating the iommu group */
  1277. + if (fsl_mc_is_cont_dev(dev)) {
  1278. + group = iommu_group_alloc();
  1279. + if (IS_ERR(group))
  1280. + return NULL;
  1281. + } else {
  1282. + get_device(cont_dev);
  1283. + group = iommu_group_get(cont_dev);
  1284. + put_device(cont_dev);
  1285. + }
  1286. +
  1287. + return group;
  1288. +}
  1289. +
  1290. /**
  1291. * iommu_group_get_for_dev - Find or create the IOMMU group for a device
  1292. * @dev: target device
  1293. @@ -845,10 +1030,19 @@ struct iommu_group *iommu_group_get_for_
  1294. * IOMMU driver.
  1295. */
  1296. if (!group->default_domain) {
  1297. - group->default_domain = __iommu_domain_alloc(dev->bus,
  1298. - IOMMU_DOMAIN_DMA);
  1299. + struct iommu_domain *dom;
  1300. +
  1301. + dom = __iommu_domain_alloc(dev->bus, iommu_def_domain_type);
  1302. + if (!dom && iommu_def_domain_type != IOMMU_DOMAIN_DMA) {
  1303. + dev_warn(dev,
  1304. + "failed to allocate default IOMMU domain of type %u; falling back to IOMMU_DOMAIN_DMA",
  1305. + iommu_def_domain_type);
  1306. + dom = __iommu_domain_alloc(dev->bus, IOMMU_DOMAIN_DMA);
  1307. + }
  1308. +
  1309. + group->default_domain = dom;
  1310. if (!group->domain)
  1311. - group->domain = group->default_domain;
  1312. + group->domain = dom;
  1313. }
  1314. ret = iommu_group_add_device(group, dev);
  1315. @@ -1557,20 +1751,38 @@ int iommu_domain_set_attr(struct iommu_d
  1316. }
  1317. EXPORT_SYMBOL_GPL(iommu_domain_set_attr);
  1318. -void iommu_get_dm_regions(struct device *dev, struct list_head *list)
  1319. +void iommu_get_resv_regions(struct device *dev, struct list_head *list)
  1320. {
  1321. const struct iommu_ops *ops = dev->bus->iommu_ops;
  1322. - if (ops && ops->get_dm_regions)
  1323. - ops->get_dm_regions(dev, list);
  1324. + if (ops && ops->get_resv_regions)
  1325. + ops->get_resv_regions(dev, list);
  1326. }
  1327. -void iommu_put_dm_regions(struct device *dev, struct list_head *list)
  1328. +void iommu_put_resv_regions(struct device *dev, struct list_head *list)
  1329. {
  1330. const struct iommu_ops *ops = dev->bus->iommu_ops;
  1331. - if (ops && ops->put_dm_regions)
  1332. - ops->put_dm_regions(dev, list);
  1333. + if (ops && ops->put_resv_regions)
  1334. + ops->put_resv_regions(dev, list);
  1335. +}
  1336. +
  1337. +struct iommu_resv_region *iommu_alloc_resv_region(phys_addr_t start,
  1338. + size_t length, int prot,
  1339. + enum iommu_resv_type type)
  1340. +{
  1341. + struct iommu_resv_region *region;
  1342. +
  1343. + region = kzalloc(sizeof(*region), GFP_KERNEL);
  1344. + if (!region)
  1345. + return NULL;
  1346. +
  1347. + INIT_LIST_HEAD(&region->list);
  1348. + region->start = start;
  1349. + region->length = length;
  1350. + region->prot = prot;
  1351. + region->type = type;
  1352. + return region;
  1353. }
  1354. /* Request that a device is direct mapped by the IOMMU */
  1355. --- a/drivers/iommu/mtk_iommu.c
  1356. +++ b/drivers/iommu/mtk_iommu.c
  1357. @@ -410,6 +410,8 @@ static struct iommu_group *mtk_iommu_dev
  1358. data->m4u_group = iommu_group_alloc();
  1359. if (IS_ERR(data->m4u_group))
  1360. dev_err(dev, "Failed to allocate M4U IOMMU group\n");
  1361. + } else {
  1362. + iommu_group_ref_get(data->m4u_group);
  1363. }
  1364. return data->m4u_group;
  1365. }
  1366. --- a/drivers/iommu/mtk_iommu_v1.c
  1367. +++ b/drivers/iommu/mtk_iommu_v1.c
  1368. @@ -502,6 +502,8 @@ static struct iommu_group *mtk_iommu_dev
  1369. data->m4u_group = iommu_group_alloc();
  1370. if (IS_ERR(data->m4u_group))
  1371. dev_err(dev, "Failed to allocate M4U IOMMU group\n");
  1372. + } else {
  1373. + iommu_group_ref_get(data->m4u_group);
  1374. }
  1375. return data->m4u_group;
  1376. }
  1377. --- a/include/linux/dma-iommu.h
  1378. +++ b/include/linux/dma-iommu.h
  1379. @@ -28,6 +28,7 @@ int iommu_dma_init(void);
  1380. /* Domain management interface for IOMMU drivers */
  1381. int iommu_get_dma_cookie(struct iommu_domain *domain);
  1382. +int iommu_get_msi_cookie(struct iommu_domain *domain, dma_addr_t base);
  1383. void iommu_put_dma_cookie(struct iommu_domain *domain);
  1384. /* Setup call for arch DMA mapping code */
  1385. @@ -67,6 +68,7 @@ int iommu_dma_mapping_error(struct devic
  1386. /* The DMA API isn't _quite_ the whole story, though... */
  1387. void iommu_dma_map_msi_msg(int irq, struct msi_msg *msg);
  1388. +void iommu_dma_get_resv_regions(struct device *dev, struct list_head *list);
  1389. #else
  1390. @@ -83,6 +85,11 @@ static inline int iommu_get_dma_cookie(s
  1391. return -ENODEV;
  1392. }
  1393. +static inline int iommu_get_msi_cookie(struct iommu_domain *domain, dma_addr_t base)
  1394. +{
  1395. + return -ENODEV;
  1396. +}
  1397. +
  1398. static inline void iommu_put_dma_cookie(struct iommu_domain *domain)
  1399. {
  1400. }
  1401. @@ -91,6 +98,10 @@ static inline void iommu_dma_map_msi_msg
  1402. {
  1403. }
  1404. +static inline void iommu_dma_get_resv_regions(struct device *dev, struct list_head *list)
  1405. +{
  1406. +}
  1407. +
  1408. #endif /* CONFIG_IOMMU_DMA */
  1409. #endif /* __KERNEL__ */
  1410. #endif /* __DMA_IOMMU_H */
  1411. --- a/include/linux/iommu.h
  1412. +++ b/include/linux/iommu.h
  1413. @@ -117,18 +117,32 @@ enum iommu_attr {
  1414. DOMAIN_ATTR_MAX,
  1415. };
  1416. +/* These are the possible reserved region types */
  1417. +enum iommu_resv_type {
  1418. + /* Memory regions which must be mapped 1:1 at all times */
  1419. + IOMMU_RESV_DIRECT,
  1420. + /* Arbitrary "never map this or give it to a device" address ranges */
  1421. + IOMMU_RESV_RESERVED,
  1422. + /* Hardware MSI region (untranslated) */
  1423. + IOMMU_RESV_MSI,
  1424. + /* Software-managed MSI translation window */
  1425. + IOMMU_RESV_SW_MSI,
  1426. +};
  1427. +
  1428. /**
  1429. - * struct iommu_dm_region - descriptor for a direct mapped memory region
  1430. + * struct iommu_resv_region - descriptor for a reserved memory region
  1431. * @list: Linked list pointers
  1432. * @start: System physical start address of the region
  1433. * @length: Length of the region in bytes
  1434. * @prot: IOMMU Protection flags (READ/WRITE/...)
  1435. + * @type: Type of the reserved region
  1436. */
  1437. -struct iommu_dm_region {
  1438. +struct iommu_resv_region {
  1439. struct list_head list;
  1440. phys_addr_t start;
  1441. size_t length;
  1442. int prot;
  1443. + enum iommu_resv_type type;
  1444. };
  1445. #ifdef CONFIG_IOMMU_API
  1446. @@ -150,9 +164,9 @@ struct iommu_dm_region {
  1447. * @device_group: find iommu group for a particular device
  1448. * @domain_get_attr: Query domain attributes
  1449. * @domain_set_attr: Change domain attributes
  1450. - * @get_dm_regions: Request list of direct mapping requirements for a device
  1451. - * @put_dm_regions: Free list of direct mapping requirements for a device
  1452. - * @apply_dm_region: Temporary helper call-back for iova reserved ranges
  1453. + * @get_resv_regions: Request list of reserved regions for a device
  1454. + * @put_resv_regions: Free list of reserved regions for a device
  1455. + * @apply_resv_region: Temporary helper call-back for iova reserved ranges
  1456. * @domain_window_enable: Configure and enable a particular window for a domain
  1457. * @domain_window_disable: Disable a particular window for a domain
  1458. * @domain_set_windows: Set the number of windows for a domain
  1459. @@ -184,11 +198,12 @@ struct iommu_ops {
  1460. int (*domain_set_attr)(struct iommu_domain *domain,
  1461. enum iommu_attr attr, void *data);
  1462. - /* Request/Free a list of direct mapping requirements for a device */
  1463. - void (*get_dm_regions)(struct device *dev, struct list_head *list);
  1464. - void (*put_dm_regions)(struct device *dev, struct list_head *list);
  1465. - void (*apply_dm_region)(struct device *dev, struct iommu_domain *domain,
  1466. - struct iommu_dm_region *region);
  1467. + /* Request/Free a list of reserved regions for a device */
  1468. + void (*get_resv_regions)(struct device *dev, struct list_head *list);
  1469. + void (*put_resv_regions)(struct device *dev, struct list_head *list);
  1470. + void (*apply_resv_region)(struct device *dev,
  1471. + struct iommu_domain *domain,
  1472. + struct iommu_resv_region *region);
  1473. /* Window handling functions */
  1474. int (*domain_window_enable)(struct iommu_domain *domain, u32 wnd_nr,
  1475. @@ -233,9 +248,14 @@ extern phys_addr_t iommu_iova_to_phys(st
  1476. extern void iommu_set_fault_handler(struct iommu_domain *domain,
  1477. iommu_fault_handler_t handler, void *token);
  1478. -extern void iommu_get_dm_regions(struct device *dev, struct list_head *list);
  1479. -extern void iommu_put_dm_regions(struct device *dev, struct list_head *list);
  1480. +extern void iommu_get_resv_regions(struct device *dev, struct list_head *list);
  1481. +extern void iommu_put_resv_regions(struct device *dev, struct list_head *list);
  1482. extern int iommu_request_dm_for_dev(struct device *dev);
  1483. +extern struct iommu_resv_region *
  1484. +iommu_alloc_resv_region(phys_addr_t start, size_t length, int prot,
  1485. + enum iommu_resv_type type);
  1486. +extern int iommu_get_group_resv_regions(struct iommu_group *group,
  1487. + struct list_head *head);
  1488. extern int iommu_attach_group(struct iommu_domain *domain,
  1489. struct iommu_group *group);
  1490. @@ -253,6 +273,7 @@ extern void iommu_group_remove_device(st
  1491. extern int iommu_group_for_each_dev(struct iommu_group *group, void *data,
  1492. int (*fn)(struct device *, void *));
  1493. extern struct iommu_group *iommu_group_get(struct device *dev);
  1494. +extern struct iommu_group *iommu_group_ref_get(struct iommu_group *group);
  1495. extern void iommu_group_put(struct iommu_group *group);
  1496. extern int iommu_group_register_notifier(struct iommu_group *group,
  1497. struct notifier_block *nb);
  1498. @@ -330,6 +351,8 @@ static inline size_t iommu_map_sg(struct
  1499. extern struct iommu_group *pci_device_group(struct device *dev);
  1500. /* Generic device grouping function */
  1501. extern struct iommu_group *generic_device_group(struct device *dev);
  1502. +/* FSL-MC device grouping function */
  1503. +struct iommu_group *fsl_mc_device_group(struct device *dev);
  1504. /**
  1505. * struct iommu_fwspec - per-device IOMMU instance data
  1506. @@ -439,16 +462,22 @@ static inline void iommu_set_fault_handl
  1507. {
  1508. }
  1509. -static inline void iommu_get_dm_regions(struct device *dev,
  1510. +static inline void iommu_get_resv_regions(struct device *dev,
  1511. struct list_head *list)
  1512. {
  1513. }
  1514. -static inline void iommu_put_dm_regions(struct device *dev,
  1515. +static inline void iommu_put_resv_regions(struct device *dev,
  1516. struct list_head *list)
  1517. {
  1518. }
  1519. +static inline int iommu_get_group_resv_regions(struct iommu_group *group,
  1520. + struct list_head *head)
  1521. +{
  1522. + return -ENODEV;
  1523. +}
  1524. +
  1525. static inline int iommu_request_dm_for_dev(struct device *dev)
  1526. {
  1527. return -ENODEV;