810-iommu-support-layerscape.patch 49 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028102910301031103210331034103510361037103810391040104110421043104410451046104710481049105010511052105310541055105610571058105910601061106210631064106510661067106810691070107110721073107410751076107710781079108010811082108310841085108610871088108910901091109210931094109510961097109810991100110111021103110411051106110711081109111011111112111311141115111611171118111911201121112211231124112511261127112811291130113111321133113411351136113711381139114011411142114311441145114611471148114911501151115211531154115511561157115811591160116111621163116411651166116711681169117011711172117311741175117611771178117911801181118211831184118511861187118811891190119111921193119411951196119711981199120012011202120312041205120612071208120912101211121212131214121512161217121812191220122112221223122412251226122712281229123012311232123312341235123612371238123912401241124212431244124512461247124812491250125112521253125412551256125712581259126012611262126312641265126612671268126912701271127212731274127512761277127812791280128112821283128412851286128712881289129012911292129312941295129612971298129913001301130213031304130513061307130813091310131113121313131413151316131713181319132013211322132313241325132613271328132913301331133213331334133513361337133813391340134113421343134413451346134713481349135013511352135313541355135613571358135913601361136213631364136513661367136813691370137113721373137413751376137713781379138013811382138313841385138613871388138913901391139213931394139513961397139813991400140114021403140414051406140714081409141014111412141314141415141614171418141914201421142214231424142514261427142814291430143114321433143414351436143714381439144014411442144314441445144614471448144914501451145214531454145514561457145814591460146114621463146414651466146714681469147014711472147314741475147614771478147914801481148214831484148514861487148814891490149114921493149414951496149714981499150015011502150315041505150615071508150915101511151215131514151515161517151815191520152115221523152415251526152715281529153015311532153315341535153615371538153915401541154215431544154515461547154815491550155115521553155415551556155715581559156015611562156315641565156615671568156915701571157215731574157515761577157815791580158115821583158415851586158715881589159015911592159315941595159615971598159916001601160216031604160516061607160816091610161116121613161416151616161716181619162016211622162316241625162616271628162916301631
  1. From 0a6c701f92e1aa368c44632fa0985e92703354ed Mon Sep 17 00:00:00 2001
  2. From: Yangbo Lu <[email protected]>
  3. Date: Wed, 17 Jan 2018 15:35:48 +0800
  4. Subject: [PATCH 22/30] iommu: support layerscape
  5. This is an integrated patch for layerscape smmu support.
  6. Signed-off-by: Eric Auger <[email protected]>
  7. Signed-off-by: Robin Murphy <[email protected]>
  8. Signed-off-by: Nipun Gupta <[email protected]>
  9. Signed-off-by: Sunil Goutham <[email protected]>
  10. Signed-off-by: Yangbo Lu <[email protected]>
  11. ---
  12. drivers/iommu/amd_iommu.c | 56 ++++++----
  13. drivers/iommu/arm-smmu-v3.c | 111 ++++++++++++++------
  14. drivers/iommu/arm-smmu.c | 100 +++++++++++++++---
  15. drivers/iommu/dma-iommu.c | 242 ++++++++++++++++++++++++++++++++++++-------
  16. drivers/iommu/intel-iommu.c | 92 ++++++++++++----
  17. drivers/iommu/iommu.c | 219 ++++++++++++++++++++++++++++++++++++---
  18. drivers/iommu/mtk_iommu.c | 2 +
  19. drivers/iommu/mtk_iommu_v1.c | 2 +
  20. include/linux/dma-iommu.h | 11 ++
  21. include/linux/iommu.h | 55 +++++++---
  22. 10 files changed, 739 insertions(+), 151 deletions(-)
  23. --- a/drivers/iommu/amd_iommu.c
  24. +++ b/drivers/iommu/amd_iommu.c
  25. @@ -373,6 +373,8 @@ static struct iommu_group *acpihid_devic
  26. if (!entry->group)
  27. entry->group = generic_device_group(dev);
  28. + else
  29. + iommu_group_ref_get(entry->group);
  30. return entry->group;
  31. }
  32. @@ -3160,9 +3162,10 @@ static bool amd_iommu_capable(enum iommu
  33. return false;
  34. }
  35. -static void amd_iommu_get_dm_regions(struct device *dev,
  36. - struct list_head *head)
  37. +static void amd_iommu_get_resv_regions(struct device *dev,
  38. + struct list_head *head)
  39. {
  40. + struct iommu_resv_region *region;
  41. struct unity_map_entry *entry;
  42. int devid;
  43. @@ -3171,41 +3174,56 @@ static void amd_iommu_get_dm_regions(str
  44. return;
  45. list_for_each_entry(entry, &amd_iommu_unity_map, list) {
  46. - struct iommu_dm_region *region;
  47. + size_t length;
  48. + int prot = 0;
  49. if (devid < entry->devid_start || devid > entry->devid_end)
  50. continue;
  51. - region = kzalloc(sizeof(*region), GFP_KERNEL);
  52. + length = entry->address_end - entry->address_start;
  53. + if (entry->prot & IOMMU_PROT_IR)
  54. + prot |= IOMMU_READ;
  55. + if (entry->prot & IOMMU_PROT_IW)
  56. + prot |= IOMMU_WRITE;
  57. +
  58. + region = iommu_alloc_resv_region(entry->address_start,
  59. + length, prot,
  60. + IOMMU_RESV_DIRECT);
  61. if (!region) {
  62. pr_err("Out of memory allocating dm-regions for %s\n",
  63. dev_name(dev));
  64. return;
  65. }
  66. -
  67. - region->start = entry->address_start;
  68. - region->length = entry->address_end - entry->address_start;
  69. - if (entry->prot & IOMMU_PROT_IR)
  70. - region->prot |= IOMMU_READ;
  71. - if (entry->prot & IOMMU_PROT_IW)
  72. - region->prot |= IOMMU_WRITE;
  73. -
  74. list_add_tail(&region->list, head);
  75. }
  76. +
  77. + region = iommu_alloc_resv_region(MSI_RANGE_START,
  78. + MSI_RANGE_END - MSI_RANGE_START + 1,
  79. + 0, IOMMU_RESV_MSI);
  80. + if (!region)
  81. + return;
  82. + list_add_tail(&region->list, head);
  83. +
  84. + region = iommu_alloc_resv_region(HT_RANGE_START,
  85. + HT_RANGE_END - HT_RANGE_START + 1,
  86. + 0, IOMMU_RESV_RESERVED);
  87. + if (!region)
  88. + return;
  89. + list_add_tail(&region->list, head);
  90. }
  91. -static void amd_iommu_put_dm_regions(struct device *dev,
  92. +static void amd_iommu_put_resv_regions(struct device *dev,
  93. struct list_head *head)
  94. {
  95. - struct iommu_dm_region *entry, *next;
  96. + struct iommu_resv_region *entry, *next;
  97. list_for_each_entry_safe(entry, next, head, list)
  98. kfree(entry);
  99. }
  100. -static void amd_iommu_apply_dm_region(struct device *dev,
  101. +static void amd_iommu_apply_resv_region(struct device *dev,
  102. struct iommu_domain *domain,
  103. - struct iommu_dm_region *region)
  104. + struct iommu_resv_region *region)
  105. {
  106. struct dma_ops_domain *dma_dom = to_dma_ops_domain(to_pdomain(domain));
  107. unsigned long start, end;
  108. @@ -3229,9 +3247,9 @@ static const struct iommu_ops amd_iommu_
  109. .add_device = amd_iommu_add_device,
  110. .remove_device = amd_iommu_remove_device,
  111. .device_group = amd_iommu_device_group,
  112. - .get_dm_regions = amd_iommu_get_dm_regions,
  113. - .put_dm_regions = amd_iommu_put_dm_regions,
  114. - .apply_dm_region = amd_iommu_apply_dm_region,
  115. + .get_resv_regions = amd_iommu_get_resv_regions,
  116. + .put_resv_regions = amd_iommu_put_resv_regions,
  117. + .apply_resv_region = amd_iommu_apply_resv_region,
  118. .pgsize_bitmap = AMD_IOMMU_PGSIZES,
  119. };
  120. --- a/drivers/iommu/arm-smmu-v3.c
  121. +++ b/drivers/iommu/arm-smmu-v3.c
  122. @@ -410,6 +410,9 @@
  123. /* High-level queue structures */
  124. #define ARM_SMMU_POLL_TIMEOUT_US 100
  125. +#define MSI_IOVA_BASE 0x8000000
  126. +#define MSI_IOVA_LENGTH 0x100000
  127. +
  128. static bool disable_bypass;
  129. module_param_named(disable_bypass, disable_bypass, bool, S_IRUGO);
  130. MODULE_PARM_DESC(disable_bypass,
  131. @@ -552,9 +555,14 @@ struct arm_smmu_s2_cfg {
  132. };
  133. struct arm_smmu_strtab_ent {
  134. - bool valid;
  135. -
  136. - bool bypass; /* Overrides s1/s2 config */
  137. + /*
  138. + * An STE is "assigned" if the master emitting the corresponding SID
  139. + * is attached to a domain. The behaviour of an unassigned STE is
  140. + * determined by the disable_bypass parameter, whereas an assigned
  141. + * STE behaves according to s1_cfg/s2_cfg, which themselves are
  142. + * configured according to the domain type.
  143. + */
  144. + bool assigned;
  145. struct arm_smmu_s1_cfg *s1_cfg;
  146. struct arm_smmu_s2_cfg *s2_cfg;
  147. };
  148. @@ -627,6 +635,7 @@ enum arm_smmu_domain_stage {
  149. ARM_SMMU_DOMAIN_S1 = 0,
  150. ARM_SMMU_DOMAIN_S2,
  151. ARM_SMMU_DOMAIN_NESTED,
  152. + ARM_SMMU_DOMAIN_BYPASS,
  153. };
  154. struct arm_smmu_domain {
  155. @@ -1000,9 +1009,9 @@ static void arm_smmu_write_strtab_ent(st
  156. * This is hideously complicated, but we only really care about
  157. * three cases at the moment:
  158. *
  159. - * 1. Invalid (all zero) -> bypass (init)
  160. - * 2. Bypass -> translation (attach)
  161. - * 3. Translation -> bypass (detach)
  162. + * 1. Invalid (all zero) -> bypass/fault (init)
  163. + * 2. Bypass/fault -> translation/bypass (attach)
  164. + * 3. Translation/bypass -> bypass/fault (detach)
  165. *
  166. * Given that we can't update the STE atomically and the SMMU
  167. * doesn't read the thing in a defined order, that leaves us
  168. @@ -1041,11 +1050,15 @@ static void arm_smmu_write_strtab_ent(st
  169. }
  170. /* Nuke the existing STE_0 value, as we're going to rewrite it */
  171. - val = ste->valid ? STRTAB_STE_0_V : 0;
  172. + val = STRTAB_STE_0_V;
  173. +
  174. + /* Bypass/fault */
  175. + if (!ste->assigned || !(ste->s1_cfg || ste->s2_cfg)) {
  176. + if (!ste->assigned && disable_bypass)
  177. + val |= STRTAB_STE_0_CFG_ABORT;
  178. + else
  179. + val |= STRTAB_STE_0_CFG_BYPASS;
  180. - if (ste->bypass) {
  181. - val |= disable_bypass ? STRTAB_STE_0_CFG_ABORT
  182. - : STRTAB_STE_0_CFG_BYPASS;
  183. dst[0] = cpu_to_le64(val);
  184. dst[1] = cpu_to_le64(STRTAB_STE_1_SHCFG_INCOMING
  185. << STRTAB_STE_1_SHCFG_SHIFT);
  186. @@ -1108,10 +1121,7 @@ static void arm_smmu_write_strtab_ent(st
  187. static void arm_smmu_init_bypass_stes(u64 *strtab, unsigned int nent)
  188. {
  189. unsigned int i;
  190. - struct arm_smmu_strtab_ent ste = {
  191. - .valid = true,
  192. - .bypass = true,
  193. - };
  194. + struct arm_smmu_strtab_ent ste = { .assigned = false };
  195. for (i = 0; i < nent; ++i) {
  196. arm_smmu_write_strtab_ent(NULL, -1, strtab, &ste);
  197. @@ -1364,8 +1374,6 @@ static bool arm_smmu_capable(enum iommu_
  198. switch (cap) {
  199. case IOMMU_CAP_CACHE_COHERENCY:
  200. return true;
  201. - case IOMMU_CAP_INTR_REMAP:
  202. - return true; /* MSIs are just memory writes */
  203. case IOMMU_CAP_NOEXEC:
  204. return true;
  205. default:
  206. @@ -1377,7 +1385,9 @@ static struct iommu_domain *arm_smmu_dom
  207. {
  208. struct arm_smmu_domain *smmu_domain;
  209. - if (type != IOMMU_DOMAIN_UNMANAGED && type != IOMMU_DOMAIN_DMA)
  210. + if (type != IOMMU_DOMAIN_UNMANAGED &&
  211. + type != IOMMU_DOMAIN_DMA &&
  212. + type != IOMMU_DOMAIN_IDENTITY)
  213. return NULL;
  214. /*
  215. @@ -1508,6 +1518,11 @@ static int arm_smmu_domain_finalise(stru
  216. struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
  217. struct arm_smmu_device *smmu = smmu_domain->smmu;
  218. + if (domain->type == IOMMU_DOMAIN_IDENTITY) {
  219. + smmu_domain->stage = ARM_SMMU_DOMAIN_BYPASS;
  220. + return 0;
  221. + }
  222. +
  223. /* Restrict the stage to what we can actually support */
  224. if (!(smmu->features & ARM_SMMU_FEAT_TRANS_S1))
  225. smmu_domain->stage = ARM_SMMU_DOMAIN_S2;
  226. @@ -1580,7 +1595,7 @@ static __le64 *arm_smmu_get_step_for_sid
  227. return step;
  228. }
  229. -static int arm_smmu_install_ste_for_dev(struct iommu_fwspec *fwspec)
  230. +static void arm_smmu_install_ste_for_dev(struct iommu_fwspec *fwspec)
  231. {
  232. int i, j;
  233. struct arm_smmu_master_data *master = fwspec->iommu_priv;
  234. @@ -1599,17 +1614,14 @@ static int arm_smmu_install_ste_for_dev(
  235. arm_smmu_write_strtab_ent(smmu, sid, step, &master->ste);
  236. }
  237. -
  238. - return 0;
  239. }
  240. static void arm_smmu_detach_dev(struct device *dev)
  241. {
  242. struct arm_smmu_master_data *master = dev->iommu_fwspec->iommu_priv;
  243. - master->ste.bypass = true;
  244. - if (arm_smmu_install_ste_for_dev(dev->iommu_fwspec) < 0)
  245. - dev_warn(dev, "failed to install bypass STE\n");
  246. + master->ste.assigned = false;
  247. + arm_smmu_install_ste_for_dev(dev->iommu_fwspec);
  248. }
  249. static int arm_smmu_attach_dev(struct iommu_domain *domain, struct device *dev)
  250. @@ -1628,7 +1640,7 @@ static int arm_smmu_attach_dev(struct io
  251. ste = &master->ste;
  252. /* Already attached to a different domain? */
  253. - if (!ste->bypass)
  254. + if (ste->assigned)
  255. arm_smmu_detach_dev(dev);
  256. mutex_lock(&smmu_domain->init_mutex);
  257. @@ -1649,10 +1661,12 @@ static int arm_smmu_attach_dev(struct io
  258. goto out_unlock;
  259. }
  260. - ste->bypass = false;
  261. - ste->valid = true;
  262. + ste->assigned = true;
  263. - if (smmu_domain->stage == ARM_SMMU_DOMAIN_S1) {
  264. + if (smmu_domain->stage == ARM_SMMU_DOMAIN_BYPASS) {
  265. + ste->s1_cfg = NULL;
  266. + ste->s2_cfg = NULL;
  267. + } else if (smmu_domain->stage == ARM_SMMU_DOMAIN_S1) {
  268. ste->s1_cfg = &smmu_domain->s1_cfg;
  269. ste->s2_cfg = NULL;
  270. arm_smmu_write_ctx_desc(smmu, ste->s1_cfg);
  271. @@ -1661,10 +1675,7 @@ static int arm_smmu_attach_dev(struct io
  272. ste->s2_cfg = &smmu_domain->s2_cfg;
  273. }
  274. - ret = arm_smmu_install_ste_for_dev(dev->iommu_fwspec);
  275. - if (ret < 0)
  276. - ste->valid = false;
  277. -
  278. + arm_smmu_install_ste_for_dev(dev->iommu_fwspec);
  279. out_unlock:
  280. mutex_unlock(&smmu_domain->init_mutex);
  281. return ret;
  282. @@ -1712,6 +1723,9 @@ arm_smmu_iova_to_phys(struct iommu_domai
  283. struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
  284. struct io_pgtable_ops *ops = smmu_domain->pgtbl_ops;
  285. + if (domain->type == IOMMU_DOMAIN_IDENTITY)
  286. + return iova;
  287. +
  288. if (!ops)
  289. return 0;
  290. @@ -1810,7 +1824,7 @@ static void arm_smmu_remove_device(struc
  291. return;
  292. master = fwspec->iommu_priv;
  293. - if (master && master->ste.valid)
  294. + if (master && master->ste.assigned)
  295. arm_smmu_detach_dev(dev);
  296. iommu_group_remove_device(dev);
  297. kfree(master);
  298. @@ -1839,6 +1853,9 @@ static int arm_smmu_domain_get_attr(stru
  299. {
  300. struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
  301. + if (domain->type != IOMMU_DOMAIN_UNMANAGED)
  302. + return -EINVAL;
  303. +
  304. switch (attr) {
  305. case DOMAIN_ATTR_NESTING:
  306. *(int *)data = (smmu_domain->stage == ARM_SMMU_DOMAIN_NESTED);
  307. @@ -1854,6 +1871,9 @@ static int arm_smmu_domain_set_attr(stru
  308. int ret = 0;
  309. struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
  310. + if (domain->type != IOMMU_DOMAIN_UNMANAGED)
  311. + return -EINVAL;
  312. +
  313. mutex_lock(&smmu_domain->init_mutex);
  314. switch (attr) {
  315. @@ -1883,6 +1903,31 @@ static int arm_smmu_of_xlate(struct devi
  316. return iommu_fwspec_add_ids(dev, args->args, 1);
  317. }
  318. +static void arm_smmu_get_resv_regions(struct device *dev,
  319. + struct list_head *head)
  320. +{
  321. + struct iommu_resv_region *region;
  322. + int prot = IOMMU_WRITE | IOMMU_NOEXEC | IOMMU_MMIO;
  323. +
  324. + region = iommu_alloc_resv_region(MSI_IOVA_BASE, MSI_IOVA_LENGTH,
  325. + prot, IOMMU_RESV_SW_MSI);
  326. + if (!region)
  327. + return;
  328. +
  329. + list_add_tail(&region->list, head);
  330. +
  331. + iommu_dma_get_resv_regions(dev, head);
  332. +}
  333. +
  334. +static void arm_smmu_put_resv_regions(struct device *dev,
  335. + struct list_head *head)
  336. +{
  337. + struct iommu_resv_region *entry, *next;
  338. +
  339. + list_for_each_entry_safe(entry, next, head, list)
  340. + kfree(entry);
  341. +}
  342. +
  343. static struct iommu_ops arm_smmu_ops = {
  344. .capable = arm_smmu_capable,
  345. .domain_alloc = arm_smmu_domain_alloc,
  346. @@ -1898,6 +1943,8 @@ static struct iommu_ops arm_smmu_ops = {
  347. .domain_get_attr = arm_smmu_domain_get_attr,
  348. .domain_set_attr = arm_smmu_domain_set_attr,
  349. .of_xlate = arm_smmu_of_xlate,
  350. + .get_resv_regions = arm_smmu_get_resv_regions,
  351. + .put_resv_regions = arm_smmu_put_resv_regions,
  352. .pgsize_bitmap = -1UL, /* Restricted during device attach */
  353. };
  354. --- a/drivers/iommu/arm-smmu.c
  355. +++ b/drivers/iommu/arm-smmu.c
  356. @@ -49,6 +49,7 @@
  357. #include <linux/spinlock.h>
  358. #include <linux/amba/bus.h>
  359. +#include "../staging/fsl-mc/include/mc-bus.h"
  360. #include "io-pgtable.h"
  361. @@ -247,6 +248,7 @@ enum arm_smmu_s2cr_privcfg {
  362. #define ARM_MMU500_ACTLR_CPRE (1 << 1)
  363. #define ARM_MMU500_ACR_CACHE_LOCK (1 << 26)
  364. +#define ARM_MMU500_ACR_SMTNMB_TLBEN (1 << 8)
  365. #define CB_PAR_F (1 << 0)
  366. @@ -278,6 +280,9 @@ enum arm_smmu_s2cr_privcfg {
  367. #define FSYNR0_WNR (1 << 4)
  368. +#define MSI_IOVA_BASE 0x8000000
  369. +#define MSI_IOVA_LENGTH 0x100000
  370. +
  371. static int force_stage;
  372. module_param(force_stage, int, S_IRUGO);
  373. MODULE_PARM_DESC(force_stage,
  374. @@ -401,6 +406,7 @@ enum arm_smmu_domain_stage {
  375. ARM_SMMU_DOMAIN_S1 = 0,
  376. ARM_SMMU_DOMAIN_S2,
  377. ARM_SMMU_DOMAIN_NESTED,
  378. + ARM_SMMU_DOMAIN_BYPASS,
  379. };
  380. struct arm_smmu_domain {
  381. @@ -821,6 +827,12 @@ static int arm_smmu_init_domain_context(
  382. if (smmu_domain->smmu)
  383. goto out_unlock;
  384. + if (domain->type == IOMMU_DOMAIN_IDENTITY) {
  385. + smmu_domain->stage = ARM_SMMU_DOMAIN_BYPASS;
  386. + smmu_domain->smmu = smmu;
  387. + goto out_unlock;
  388. + }
  389. +
  390. /*
  391. * Mapping the requested stage onto what we support is surprisingly
  392. * complicated, mainly because the spec allows S1+S2 SMMUs without
  393. @@ -981,7 +993,7 @@ static void arm_smmu_destroy_domain_cont
  394. void __iomem *cb_base;
  395. int irq;
  396. - if (!smmu)
  397. + if (!smmu || domain->type == IOMMU_DOMAIN_IDENTITY)
  398. return;
  399. /*
  400. @@ -1004,7 +1016,9 @@ static struct iommu_domain *arm_smmu_dom
  401. {
  402. struct arm_smmu_domain *smmu_domain;
  403. - if (type != IOMMU_DOMAIN_UNMANAGED && type != IOMMU_DOMAIN_DMA)
  404. + if (type != IOMMU_DOMAIN_UNMANAGED &&
  405. + type != IOMMU_DOMAIN_DMA &&
  406. + type != IOMMU_DOMAIN_IDENTITY)
  407. return NULL;
  408. /*
  409. * Allocate the domain and initialise some of its data structures.
  410. @@ -1202,10 +1216,15 @@ static int arm_smmu_domain_add_master(st
  411. {
  412. struct arm_smmu_device *smmu = smmu_domain->smmu;
  413. struct arm_smmu_s2cr *s2cr = smmu->s2crs;
  414. - enum arm_smmu_s2cr_type type = S2CR_TYPE_TRANS;
  415. u8 cbndx = smmu_domain->cfg.cbndx;
  416. + enum arm_smmu_s2cr_type type;
  417. int i, idx;
  418. + if (smmu_domain->stage == ARM_SMMU_DOMAIN_BYPASS)
  419. + type = S2CR_TYPE_BYPASS;
  420. + else
  421. + type = S2CR_TYPE_TRANS;
  422. +
  423. for_each_cfg_sme(fwspec, i, idx) {
  424. if (type == s2cr[idx].type && cbndx == s2cr[idx].cbndx)
  425. continue;
  426. @@ -1343,6 +1362,9 @@ static phys_addr_t arm_smmu_iova_to_phys
  427. struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
  428. struct io_pgtable_ops *ops= smmu_domain->pgtbl_ops;
  429. + if (domain->type == IOMMU_DOMAIN_IDENTITY)
  430. + return iova;
  431. +
  432. if (!ops)
  433. return 0;
  434. @@ -1368,8 +1390,6 @@ static bool arm_smmu_capable(enum iommu_
  435. * requests.
  436. */
  437. return true;
  438. - case IOMMU_CAP_INTR_REMAP:
  439. - return true; /* MSIs are just memory writes */
  440. case IOMMU_CAP_NOEXEC:
  441. return true;
  442. default:
  443. @@ -1478,10 +1498,12 @@ static struct iommu_group *arm_smmu_devi
  444. }
  445. if (group)
  446. - return group;
  447. + return iommu_group_ref_get(group);
  448. if (dev_is_pci(dev))
  449. group = pci_device_group(dev);
  450. + else if (dev_is_fsl_mc(dev))
  451. + group = fsl_mc_device_group(dev);
  452. else
  453. group = generic_device_group(dev);
  454. @@ -1493,6 +1515,9 @@ static int arm_smmu_domain_get_attr(stru
  455. {
  456. struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
  457. + if (domain->type != IOMMU_DOMAIN_UNMANAGED)
  458. + return -EINVAL;
  459. +
  460. switch (attr) {
  461. case DOMAIN_ATTR_NESTING:
  462. *(int *)data = (smmu_domain->stage == ARM_SMMU_DOMAIN_NESTED);
  463. @@ -1508,6 +1533,9 @@ static int arm_smmu_domain_set_attr(stru
  464. int ret = 0;
  465. struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
  466. + if (domain->type != IOMMU_DOMAIN_UNMANAGED)
  467. + return -EINVAL;
  468. +
  469. mutex_lock(&smmu_domain->init_mutex);
  470. switch (attr) {
  471. @@ -1534,17 +1562,44 @@ out_unlock:
  472. static int arm_smmu_of_xlate(struct device *dev, struct of_phandle_args *args)
  473. {
  474. - u32 fwid = 0;
  475. + u32 mask, fwid = 0;
  476. if (args->args_count > 0)
  477. fwid |= (u16)args->args[0];
  478. if (args->args_count > 1)
  479. fwid |= (u16)args->args[1] << SMR_MASK_SHIFT;
  480. + else if (!of_property_read_u32(args->np, "stream-match-mask", &mask))
  481. + fwid |= (u16)mask << SMR_MASK_SHIFT;
  482. return iommu_fwspec_add_ids(dev, &fwid, 1);
  483. }
  484. +static void arm_smmu_get_resv_regions(struct device *dev,
  485. + struct list_head *head)
  486. +{
  487. + struct iommu_resv_region *region;
  488. + int prot = IOMMU_WRITE | IOMMU_NOEXEC | IOMMU_MMIO;
  489. +
  490. + region = iommu_alloc_resv_region(MSI_IOVA_BASE, MSI_IOVA_LENGTH,
  491. + prot, IOMMU_RESV_SW_MSI);
  492. + if (!region)
  493. + return;
  494. +
  495. + list_add_tail(&region->list, head);
  496. +
  497. + iommu_dma_get_resv_regions(dev, head);
  498. +}
  499. +
  500. +static void arm_smmu_put_resv_regions(struct device *dev,
  501. + struct list_head *head)
  502. +{
  503. + struct iommu_resv_region *entry, *next;
  504. +
  505. + list_for_each_entry_safe(entry, next, head, list)
  506. + kfree(entry);
  507. +}
  508. +
  509. static struct iommu_ops arm_smmu_ops = {
  510. .capable = arm_smmu_capable,
  511. .domain_alloc = arm_smmu_domain_alloc,
  512. @@ -1560,6 +1615,8 @@ static struct iommu_ops arm_smmu_ops = {
  513. .domain_get_attr = arm_smmu_domain_get_attr,
  514. .domain_set_attr = arm_smmu_domain_set_attr,
  515. .of_xlate = arm_smmu_of_xlate,
  516. + .get_resv_regions = arm_smmu_get_resv_regions,
  517. + .put_resv_regions = arm_smmu_put_resv_regions,
  518. .pgsize_bitmap = -1UL, /* Restricted during device attach */
  519. };
  520. @@ -1581,16 +1638,22 @@ static void arm_smmu_device_reset(struct
  521. for (i = 0; i < smmu->num_mapping_groups; ++i)
  522. arm_smmu_write_sme(smmu, i);
  523. - /*
  524. - * Before clearing ARM_MMU500_ACTLR_CPRE, need to
  525. - * clear CACHE_LOCK bit of ACR first. And, CACHE_LOCK
  526. - * bit is only present in MMU-500r2 onwards.
  527. - */
  528. - reg = readl_relaxed(gr0_base + ARM_SMMU_GR0_ID7);
  529. - major = (reg >> ID7_MAJOR_SHIFT) & ID7_MAJOR_MASK;
  530. - if ((smmu->model == ARM_MMU500) && (major >= 2)) {
  531. + if (smmu->model == ARM_MMU500) {
  532. + /*
  533. + * Before clearing ARM_MMU500_ACTLR_CPRE, need to
  534. + * clear CACHE_LOCK bit of ACR first. And, CACHE_LOCK
  535. + * bit is only present in MMU-500r2 onwards.
  536. + */
  537. + reg = readl_relaxed(gr0_base + ARM_SMMU_GR0_ID7);
  538. + major = (reg >> ID7_MAJOR_SHIFT) & ID7_MAJOR_MASK;
  539. reg = readl_relaxed(gr0_base + ARM_SMMU_GR0_sACR);
  540. - reg &= ~ARM_MMU500_ACR_CACHE_LOCK;
  541. + if (major >= 2)
  542. + reg &= ~ARM_MMU500_ACR_CACHE_LOCK;
  543. + /*
  544. + * Allow unmatched Stream IDs to allocate bypass
  545. + * TLB entries for reduced latency.
  546. + */
  547. + reg |= ARM_MMU500_ACR_SMTNMB_TLBEN;
  548. writel_relaxed(reg, gr0_base + ARM_SMMU_GR0_sACR);
  549. }
  550. @@ -2024,6 +2087,11 @@ static int arm_smmu_device_dt_probe(stru
  551. bus_set_iommu(&pci_bus_type, &arm_smmu_ops);
  552. }
  553. #endif
  554. +#ifdef CONFIG_FSL_MC_BUS
  555. + if (!iommu_present(&fsl_mc_bus_type))
  556. + bus_set_iommu(&fsl_mc_bus_type, &arm_smmu_ops);
  557. +#endif
  558. +
  559. return 0;
  560. }
  561. --- a/drivers/iommu/dma-iommu.c
  562. +++ b/drivers/iommu/dma-iommu.c
  563. @@ -37,15 +37,50 @@ struct iommu_dma_msi_page {
  564. phys_addr_t phys;
  565. };
  566. +enum iommu_dma_cookie_type {
  567. + IOMMU_DMA_IOVA_COOKIE,
  568. + IOMMU_DMA_MSI_COOKIE,
  569. +};
  570. +
  571. struct iommu_dma_cookie {
  572. - struct iova_domain iovad;
  573. - struct list_head msi_page_list;
  574. - spinlock_t msi_lock;
  575. + enum iommu_dma_cookie_type type;
  576. + union {
  577. + /* Full allocator for IOMMU_DMA_IOVA_COOKIE */
  578. + struct iova_domain iovad;
  579. + /* Trivial linear page allocator for IOMMU_DMA_MSI_COOKIE */
  580. + dma_addr_t msi_iova;
  581. + };
  582. + struct list_head msi_page_list;
  583. + spinlock_t msi_lock;
  584. };
  585. +static inline size_t cookie_msi_granule(struct iommu_dma_cookie *cookie)
  586. +{
  587. + if (cookie->type == IOMMU_DMA_IOVA_COOKIE)
  588. + return cookie->iovad.granule;
  589. + return PAGE_SIZE;
  590. +}
  591. +
  592. static inline struct iova_domain *cookie_iovad(struct iommu_domain *domain)
  593. {
  594. - return &((struct iommu_dma_cookie *)domain->iova_cookie)->iovad;
  595. + struct iommu_dma_cookie *cookie = domain->iova_cookie;
  596. +
  597. + if (cookie->type == IOMMU_DMA_IOVA_COOKIE)
  598. + return &cookie->iovad;
  599. + return NULL;
  600. +}
  601. +
  602. +static struct iommu_dma_cookie *cookie_alloc(enum iommu_dma_cookie_type type)
  603. +{
  604. + struct iommu_dma_cookie *cookie;
  605. +
  606. + cookie = kzalloc(sizeof(*cookie), GFP_KERNEL);
  607. + if (cookie) {
  608. + spin_lock_init(&cookie->msi_lock);
  609. + INIT_LIST_HEAD(&cookie->msi_page_list);
  610. + cookie->type = type;
  611. + }
  612. + return cookie;
  613. }
  614. int iommu_dma_init(void)
  615. @@ -62,25 +97,53 @@ int iommu_dma_init(void)
  616. */
  617. int iommu_get_dma_cookie(struct iommu_domain *domain)
  618. {
  619. + if (domain->iova_cookie)
  620. + return -EEXIST;
  621. +
  622. + domain->iova_cookie = cookie_alloc(IOMMU_DMA_IOVA_COOKIE);
  623. + if (!domain->iova_cookie)
  624. + return -ENOMEM;
  625. +
  626. + return 0;
  627. +}
  628. +EXPORT_SYMBOL(iommu_get_dma_cookie);
  629. +
  630. +/**
  631. + * iommu_get_msi_cookie - Acquire just MSI remapping resources
  632. + * @domain: IOMMU domain to prepare
  633. + * @base: Start address of IOVA region for MSI mappings
  634. + *
  635. + * Users who manage their own IOVA allocation and do not want DMA API support,
  636. + * but would still like to take advantage of automatic MSI remapping, can use
  637. + * this to initialise their own domain appropriately. Users should reserve a
  638. + * contiguous IOVA region, starting at @base, large enough to accommodate the
  639. + * number of PAGE_SIZE mappings necessary to cover every MSI doorbell address
  640. + * used by the devices attached to @domain.
  641. + */
  642. +int iommu_get_msi_cookie(struct iommu_domain *domain, dma_addr_t base)
  643. +{
  644. struct iommu_dma_cookie *cookie;
  645. + if (domain->type != IOMMU_DOMAIN_UNMANAGED)
  646. + return -EINVAL;
  647. +
  648. if (domain->iova_cookie)
  649. return -EEXIST;
  650. - cookie = kzalloc(sizeof(*cookie), GFP_KERNEL);
  651. + cookie = cookie_alloc(IOMMU_DMA_MSI_COOKIE);
  652. if (!cookie)
  653. return -ENOMEM;
  654. - spin_lock_init(&cookie->msi_lock);
  655. - INIT_LIST_HEAD(&cookie->msi_page_list);
  656. + cookie->msi_iova = base;
  657. domain->iova_cookie = cookie;
  658. return 0;
  659. }
  660. -EXPORT_SYMBOL(iommu_get_dma_cookie);
  661. +EXPORT_SYMBOL(iommu_get_msi_cookie);
  662. /**
  663. * iommu_put_dma_cookie - Release a domain's DMA mapping resources
  664. - * @domain: IOMMU domain previously prepared by iommu_get_dma_cookie()
  665. + * @domain: IOMMU domain previously prepared by iommu_get_dma_cookie() or
  666. + * iommu_get_msi_cookie()
  667. *
  668. * IOMMU drivers should normally call this from their domain_free callback.
  669. */
  670. @@ -92,7 +155,7 @@ void iommu_put_dma_cookie(struct iommu_d
  671. if (!cookie)
  672. return;
  673. - if (cookie->iovad.granule)
  674. + if (cookie->type == IOMMU_DMA_IOVA_COOKIE && cookie->iovad.granule)
  675. put_iova_domain(&cookie->iovad);
  676. list_for_each_entry_safe(msi, tmp, &cookie->msi_page_list, list) {
  677. @@ -104,21 +167,99 @@ void iommu_put_dma_cookie(struct iommu_d
  678. }
  679. EXPORT_SYMBOL(iommu_put_dma_cookie);
  680. -static void iova_reserve_pci_windows(struct pci_dev *dev,
  681. - struct iova_domain *iovad)
  682. +/**
  683. + * iommu_dma_get_resv_regions - Reserved region driver helper
  684. + * @dev: Device from iommu_get_resv_regions()
  685. + * @list: Reserved region list from iommu_get_resv_regions()
  686. + *
  687. + * IOMMU drivers can use this to implement their .get_resv_regions callback
  688. + * for general non-IOMMU-specific reservations. Currently, this covers host
  689. + * bridge windows for PCI devices.
  690. + */
  691. +void iommu_dma_get_resv_regions(struct device *dev, struct list_head *list)
  692. {
  693. - struct pci_host_bridge *bridge = pci_find_host_bridge(dev->bus);
  694. + struct pci_host_bridge *bridge;
  695. struct resource_entry *window;
  696. - unsigned long lo, hi;
  697. + if (!dev_is_pci(dev))
  698. + return;
  699. +
  700. + bridge = pci_find_host_bridge(to_pci_dev(dev)->bus);
  701. resource_list_for_each_entry(window, &bridge->windows) {
  702. + struct iommu_resv_region *region;
  703. + phys_addr_t start;
  704. + size_t length;
  705. +
  706. if (resource_type(window->res) != IORESOURCE_MEM)
  707. continue;
  708. - lo = iova_pfn(iovad, window->res->start - window->offset);
  709. - hi = iova_pfn(iovad, window->res->end - window->offset);
  710. + start = window->res->start - window->offset;
  711. + length = window->res->end - window->res->start + 1;
  712. + region = iommu_alloc_resv_region(start, length, 0,
  713. + IOMMU_RESV_RESERVED);
  714. + if (!region)
  715. + return;
  716. +
  717. + list_add_tail(&region->list, list);
  718. + }
  719. +}
  720. +EXPORT_SYMBOL(iommu_dma_get_resv_regions);
  721. +
  722. +static int cookie_init_hw_msi_region(struct iommu_dma_cookie *cookie,
  723. + phys_addr_t start, phys_addr_t end)
  724. +{
  725. + struct iova_domain *iovad = &cookie->iovad;
  726. + struct iommu_dma_msi_page *msi_page;
  727. + int i, num_pages;
  728. +
  729. + start -= iova_offset(iovad, start);
  730. + num_pages = iova_align(iovad, end - start) >> iova_shift(iovad);
  731. +
  732. + msi_page = kcalloc(num_pages, sizeof(*msi_page), GFP_KERNEL);
  733. + if (!msi_page)
  734. + return -ENOMEM;
  735. +
  736. + for (i = 0; i < num_pages; i++) {
  737. + msi_page[i].phys = start;
  738. + msi_page[i].iova = start;
  739. + INIT_LIST_HEAD(&msi_page[i].list);
  740. + list_add(&msi_page[i].list, &cookie->msi_page_list);
  741. + start += iovad->granule;
  742. + }
  743. +
  744. + return 0;
  745. +}
  746. +
  747. +static int iova_reserve_iommu_regions(struct device *dev,
  748. + struct iommu_domain *domain)
  749. +{
  750. + struct iommu_dma_cookie *cookie = domain->iova_cookie;
  751. + struct iova_domain *iovad = &cookie->iovad;
  752. + struct iommu_resv_region *region;
  753. + LIST_HEAD(resv_regions);
  754. + int ret = 0;
  755. +
  756. + iommu_get_resv_regions(dev, &resv_regions);
  757. + list_for_each_entry(region, &resv_regions, list) {
  758. + unsigned long lo, hi;
  759. +
  760. + /* We ARE the software that manages these! */
  761. + if (region->type == IOMMU_RESV_SW_MSI)
  762. + continue;
  763. +
  764. + lo = iova_pfn(iovad, region->start);
  765. + hi = iova_pfn(iovad, region->start + region->length - 1);
  766. reserve_iova(iovad, lo, hi);
  767. +
  768. + if (region->type == IOMMU_RESV_MSI)
  769. + ret = cookie_init_hw_msi_region(cookie, region->start,
  770. + region->start + region->length);
  771. + if (ret)
  772. + break;
  773. }
  774. + iommu_put_resv_regions(dev, &resv_regions);
  775. +
  776. + return ret;
  777. }
  778. /**
  779. @@ -136,11 +277,12 @@ static void iova_reserve_pci_windows(str
  780. int iommu_dma_init_domain(struct iommu_domain *domain, dma_addr_t base,
  781. u64 size, struct device *dev)
  782. {
  783. - struct iova_domain *iovad = cookie_iovad(domain);
  784. + struct iommu_dma_cookie *cookie = domain->iova_cookie;
  785. + struct iova_domain *iovad = &cookie->iovad;
  786. unsigned long order, base_pfn, end_pfn;
  787. - if (!iovad)
  788. - return -ENODEV;
  789. + if (!cookie || cookie->type != IOMMU_DMA_IOVA_COOKIE)
  790. + return -EINVAL;
  791. /* Use the smallest supported page size for IOVA granularity */
  792. order = __ffs(domain->pgsize_bitmap);
  793. @@ -160,22 +302,37 @@ int iommu_dma_init_domain(struct iommu_d
  794. end_pfn = min_t(unsigned long, end_pfn,
  795. domain->geometry.aperture_end >> order);
  796. }
  797. + /*
  798. + * PCI devices may have larger DMA masks, but still prefer allocating
  799. + * within a 32-bit mask to avoid DAC addressing. Such limitations don't
  800. + * apply to the typical platform device, so for those we may as well
  801. + * leave the cache limit at the top of their range to save an rb_last()
  802. + * traversal on every allocation.
  803. + */
  804. + if (dev && dev_is_pci(dev))
  805. + end_pfn &= DMA_BIT_MASK(32) >> order;
  806. - /* All we can safely do with an existing domain is enlarge it */
  807. + /* start_pfn is always nonzero for an already-initialised domain */
  808. if (iovad->start_pfn) {
  809. if (1UL << order != iovad->granule ||
  810. - base_pfn != iovad->start_pfn ||
  811. - end_pfn < iovad->dma_32bit_pfn) {
  812. + base_pfn != iovad->start_pfn) {
  813. pr_warn("Incompatible range for DMA domain\n");
  814. return -EFAULT;
  815. }
  816. - iovad->dma_32bit_pfn = end_pfn;
  817. - } else {
  818. - init_iova_domain(iovad, 1UL << order, base_pfn, end_pfn);
  819. - if (dev && dev_is_pci(dev))
  820. - iova_reserve_pci_windows(to_pci_dev(dev), iovad);
  821. + /*
  822. + * If we have devices with different DMA masks, move the free
  823. + * area cache limit down for the benefit of the smaller one.
  824. + */
  825. + iovad->dma_32bit_pfn = min(end_pfn, iovad->dma_32bit_pfn);
  826. +
  827. + return 0;
  828. }
  829. - return 0;
  830. +
  831. + init_iova_domain(iovad, 1UL << order, base_pfn, end_pfn);
  832. + if (!dev)
  833. + return 0;
  834. +
  835. + return iova_reserve_iommu_regions(dev, domain);
  836. }
  837. EXPORT_SYMBOL(iommu_dma_init_domain);
  838. @@ -643,11 +800,12 @@ static struct iommu_dma_msi_page *iommu_
  839. {
  840. struct iommu_dma_cookie *cookie = domain->iova_cookie;
  841. struct iommu_dma_msi_page *msi_page;
  842. - struct iova_domain *iovad = &cookie->iovad;
  843. + struct iova_domain *iovad = cookie_iovad(domain);
  844. struct iova *iova;
  845. int prot = IOMMU_WRITE | IOMMU_NOEXEC | IOMMU_MMIO;
  846. + size_t size = cookie_msi_granule(cookie);
  847. - msi_addr &= ~(phys_addr_t)iova_mask(iovad);
  848. + msi_addr &= ~(phys_addr_t)(size - 1);
  849. list_for_each_entry(msi_page, &cookie->msi_page_list, list)
  850. if (msi_page->phys == msi_addr)
  851. return msi_page;
  852. @@ -656,13 +814,18 @@ static struct iommu_dma_msi_page *iommu_
  853. if (!msi_page)
  854. return NULL;
  855. - iova = __alloc_iova(domain, iovad->granule, dma_get_mask(dev));
  856. - if (!iova)
  857. - goto out_free_page;
  858. -
  859. msi_page->phys = msi_addr;
  860. - msi_page->iova = iova_dma_addr(iovad, iova);
  861. - if (iommu_map(domain, msi_page->iova, msi_addr, iovad->granule, prot))
  862. + if (iovad) {
  863. + iova = __alloc_iova(domain, size, dma_get_mask(dev));
  864. + if (!iova)
  865. + goto out_free_page;
  866. + msi_page->iova = iova_dma_addr(iovad, iova);
  867. + } else {
  868. + msi_page->iova = cookie->msi_iova;
  869. + cookie->msi_iova += size;
  870. + }
  871. +
  872. + if (iommu_map(domain, msi_page->iova, msi_addr, size, prot))
  873. goto out_free_iova;
  874. INIT_LIST_HEAD(&msi_page->list);
  875. @@ -670,7 +833,10 @@ static struct iommu_dma_msi_page *iommu_
  876. return msi_page;
  877. out_free_iova:
  878. - __free_iova(iovad, iova);
  879. + if (iovad)
  880. + __free_iova(iovad, iova);
  881. + else
  882. + cookie->msi_iova -= size;
  883. out_free_page:
  884. kfree(msi_page);
  885. return NULL;
  886. @@ -711,7 +877,7 @@ void iommu_dma_map_msi_msg(int irq, stru
  887. msg->data = ~0U;
  888. } else {
  889. msg->address_hi = upper_32_bits(msi_page->iova);
  890. - msg->address_lo &= iova_mask(&cookie->iovad);
  891. + msg->address_lo &= cookie_msi_granule(cookie) - 1;
  892. msg->address_lo += lower_32_bits(msi_page->iova);
  893. }
  894. }
  895. --- a/drivers/iommu/intel-iommu.c
  896. +++ b/drivers/iommu/intel-iommu.c
  897. @@ -440,6 +440,7 @@ struct dmar_rmrr_unit {
  898. u64 end_address; /* reserved end address */
  899. struct dmar_dev_scope *devices; /* target devices */
  900. int devices_cnt; /* target device count */
  901. + struct iommu_resv_region *resv; /* reserved region handle */
  902. };
  903. struct dmar_atsr_unit {
  904. @@ -4251,27 +4252,40 @@ static inline void init_iommu_pm_ops(voi
  905. int __init dmar_parse_one_rmrr(struct acpi_dmar_header *header, void *arg)
  906. {
  907. struct acpi_dmar_reserved_memory *rmrr;
  908. + int prot = DMA_PTE_READ|DMA_PTE_WRITE;
  909. struct dmar_rmrr_unit *rmrru;
  910. + size_t length;
  911. rmrru = kzalloc(sizeof(*rmrru), GFP_KERNEL);
  912. if (!rmrru)
  913. - return -ENOMEM;
  914. + goto out;
  915. rmrru->hdr = header;
  916. rmrr = (struct acpi_dmar_reserved_memory *)header;
  917. rmrru->base_address = rmrr->base_address;
  918. rmrru->end_address = rmrr->end_address;
  919. +
  920. + length = rmrr->end_address - rmrr->base_address + 1;
  921. + rmrru->resv = iommu_alloc_resv_region(rmrr->base_address, length, prot,
  922. + IOMMU_RESV_DIRECT);
  923. + if (!rmrru->resv)
  924. + goto free_rmrru;
  925. +
  926. rmrru->devices = dmar_alloc_dev_scope((void *)(rmrr + 1),
  927. ((void *)rmrr) + rmrr->header.length,
  928. &rmrru->devices_cnt);
  929. - if (rmrru->devices_cnt && rmrru->devices == NULL) {
  930. - kfree(rmrru);
  931. - return -ENOMEM;
  932. - }
  933. + if (rmrru->devices_cnt && rmrru->devices == NULL)
  934. + goto free_all;
  935. list_add(&rmrru->list, &dmar_rmrr_units);
  936. return 0;
  937. +free_all:
  938. + kfree(rmrru->resv);
  939. +free_rmrru:
  940. + kfree(rmrru);
  941. +out:
  942. + return -ENOMEM;
  943. }
  944. static struct dmar_atsr_unit *dmar_find_atsr(struct acpi_dmar_atsr *atsr)
  945. @@ -4485,6 +4499,7 @@ static void intel_iommu_free_dmars(void)
  946. list_for_each_entry_safe(rmrru, rmrr_n, &dmar_rmrr_units, list) {
  947. list_del(&rmrru->list);
  948. dmar_free_dev_scope(&rmrru->devices, &rmrru->devices_cnt);
  949. + kfree(rmrru->resv);
  950. kfree(rmrru);
  951. }
  952. @@ -5220,6 +5235,45 @@ static void intel_iommu_remove_device(st
  953. iommu_device_unlink(iommu->iommu_dev, dev);
  954. }
  955. +static void intel_iommu_get_resv_regions(struct device *device,
  956. + struct list_head *head)
  957. +{
  958. + struct iommu_resv_region *reg;
  959. + struct dmar_rmrr_unit *rmrr;
  960. + struct device *i_dev;
  961. + int i;
  962. +
  963. + rcu_read_lock();
  964. + for_each_rmrr_units(rmrr) {
  965. + for_each_active_dev_scope(rmrr->devices, rmrr->devices_cnt,
  966. + i, i_dev) {
  967. + if (i_dev != device)
  968. + continue;
  969. +
  970. + list_add_tail(&rmrr->resv->list, head);
  971. + }
  972. + }
  973. + rcu_read_unlock();
  974. +
  975. + reg = iommu_alloc_resv_region(IOAPIC_RANGE_START,
  976. + IOAPIC_RANGE_END - IOAPIC_RANGE_START + 1,
  977. + 0, IOMMU_RESV_MSI);
  978. + if (!reg)
  979. + return;
  980. + list_add_tail(&reg->list, head);
  981. +}
  982. +
  983. +static void intel_iommu_put_resv_regions(struct device *dev,
  984. + struct list_head *head)
  985. +{
  986. + struct iommu_resv_region *entry, *next;
  987. +
  988. + list_for_each_entry_safe(entry, next, head, list) {
  989. + if (entry->type == IOMMU_RESV_RESERVED)
  990. + kfree(entry);
  991. + }
  992. +}
  993. +
  994. #ifdef CONFIG_INTEL_IOMMU_SVM
  995. #define MAX_NR_PASID_BITS (20)
  996. static inline unsigned long intel_iommu_get_pts(struct intel_iommu *iommu)
  997. @@ -5350,19 +5404,21 @@ struct intel_iommu *intel_svm_device_to_
  998. #endif /* CONFIG_INTEL_IOMMU_SVM */
  999. static const struct iommu_ops intel_iommu_ops = {
  1000. - .capable = intel_iommu_capable,
  1001. - .domain_alloc = intel_iommu_domain_alloc,
  1002. - .domain_free = intel_iommu_domain_free,
  1003. - .attach_dev = intel_iommu_attach_device,
  1004. - .detach_dev = intel_iommu_detach_device,
  1005. - .map = intel_iommu_map,
  1006. - .unmap = intel_iommu_unmap,
  1007. - .map_sg = default_iommu_map_sg,
  1008. - .iova_to_phys = intel_iommu_iova_to_phys,
  1009. - .add_device = intel_iommu_add_device,
  1010. - .remove_device = intel_iommu_remove_device,
  1011. - .device_group = pci_device_group,
  1012. - .pgsize_bitmap = INTEL_IOMMU_PGSIZES,
  1013. + .capable = intel_iommu_capable,
  1014. + .domain_alloc = intel_iommu_domain_alloc,
  1015. + .domain_free = intel_iommu_domain_free,
  1016. + .attach_dev = intel_iommu_attach_device,
  1017. + .detach_dev = intel_iommu_detach_device,
  1018. + .map = intel_iommu_map,
  1019. + .unmap = intel_iommu_unmap,
  1020. + .map_sg = default_iommu_map_sg,
  1021. + .iova_to_phys = intel_iommu_iova_to_phys,
  1022. + .add_device = intel_iommu_add_device,
  1023. + .remove_device = intel_iommu_remove_device,
  1024. + .get_resv_regions = intel_iommu_get_resv_regions,
  1025. + .put_resv_regions = intel_iommu_put_resv_regions,
  1026. + .device_group = pci_device_group,
  1027. + .pgsize_bitmap = INTEL_IOMMU_PGSIZES,
  1028. };
  1029. static void quirk_iommu_g4x_gfx(struct pci_dev *dev)
  1030. --- a/drivers/iommu/iommu.c
  1031. +++ b/drivers/iommu/iommu.c
  1032. @@ -36,6 +36,7 @@
  1033. static struct kset *iommu_group_kset;
  1034. static DEFINE_IDA(iommu_group_ida);
  1035. +static unsigned int iommu_def_domain_type = IOMMU_DOMAIN_DMA;
  1036. struct iommu_callback_data {
  1037. const struct iommu_ops *ops;
  1038. @@ -68,6 +69,13 @@ struct iommu_group_attribute {
  1039. const char *buf, size_t count);
  1040. };
  1041. +static const char * const iommu_group_resv_type_string[] = {
  1042. + [IOMMU_RESV_DIRECT] = "direct",
  1043. + [IOMMU_RESV_RESERVED] = "reserved",
  1044. + [IOMMU_RESV_MSI] = "msi",
  1045. + [IOMMU_RESV_SW_MSI] = "msi",
  1046. +};
  1047. +
  1048. #define IOMMU_GROUP_ATTR(_name, _mode, _show, _store) \
  1049. struct iommu_group_attribute iommu_group_attr_##_name = \
  1050. __ATTR(_name, _mode, _show, _store)
  1051. @@ -86,6 +94,18 @@ static int __iommu_attach_group(struct i
  1052. static void __iommu_detach_group(struct iommu_domain *domain,
  1053. struct iommu_group *group);
  1054. +static int __init iommu_set_def_domain_type(char *str)
  1055. +{
  1056. + bool pt;
  1057. +
  1058. + if (!str || strtobool(str, &pt))
  1059. + return -EINVAL;
  1060. +
  1061. + iommu_def_domain_type = pt ? IOMMU_DOMAIN_IDENTITY : IOMMU_DOMAIN_DMA;
  1062. + return 0;
  1063. +}
  1064. +early_param("iommu.passthrough", iommu_set_def_domain_type);
  1065. +
  1066. static ssize_t iommu_group_attr_show(struct kobject *kobj,
  1067. struct attribute *__attr, char *buf)
  1068. {
  1069. @@ -133,8 +153,131 @@ static ssize_t iommu_group_show_name(str
  1070. return sprintf(buf, "%s\n", group->name);
  1071. }
  1072. +/**
  1073. + * iommu_insert_resv_region - Insert a new region in the
  1074. + * list of reserved regions.
  1075. + * @new: new region to insert
  1076. + * @regions: list of regions
  1077. + *
  1078. + * The new element is sorted by address with respect to the other
  1079. + * regions of the same type. In case it overlaps with another
  1080. + * region of the same type, regions are merged. In case it
  1081. + * overlaps with another region of different type, regions are
  1082. + * not merged.
  1083. + */
  1084. +static int iommu_insert_resv_region(struct iommu_resv_region *new,
  1085. + struct list_head *regions)
  1086. +{
  1087. + struct iommu_resv_region *region;
  1088. + phys_addr_t start = new->start;
  1089. + phys_addr_t end = new->start + new->length - 1;
  1090. + struct list_head *pos = regions->next;
  1091. +
  1092. + while (pos != regions) {
  1093. + struct iommu_resv_region *entry =
  1094. + list_entry(pos, struct iommu_resv_region, list);
  1095. + phys_addr_t a = entry->start;
  1096. + phys_addr_t b = entry->start + entry->length - 1;
  1097. + int type = entry->type;
  1098. +
  1099. + if (end < a) {
  1100. + goto insert;
  1101. + } else if (start > b) {
  1102. + pos = pos->next;
  1103. + } else if ((start >= a) && (end <= b)) {
  1104. + if (new->type == type)
  1105. + goto done;
  1106. + else
  1107. + pos = pos->next;
  1108. + } else {
  1109. + if (new->type == type) {
  1110. + phys_addr_t new_start = min(a, start);
  1111. + phys_addr_t new_end = max(b, end);
  1112. +
  1113. + list_del(&entry->list);
  1114. + entry->start = new_start;
  1115. + entry->length = new_end - new_start + 1;
  1116. + iommu_insert_resv_region(entry, regions);
  1117. + } else {
  1118. + pos = pos->next;
  1119. + }
  1120. + }
  1121. + }
  1122. +insert:
  1123. + region = iommu_alloc_resv_region(new->start, new->length,
  1124. + new->prot, new->type);
  1125. + if (!region)
  1126. + return -ENOMEM;
  1127. +
  1128. + list_add_tail(&region->list, pos);
  1129. +done:
  1130. + return 0;
  1131. +}
  1132. +
  1133. +static int
  1134. +iommu_insert_device_resv_regions(struct list_head *dev_resv_regions,
  1135. + struct list_head *group_resv_regions)
  1136. +{
  1137. + struct iommu_resv_region *entry;
  1138. + int ret;
  1139. +
  1140. + list_for_each_entry(entry, dev_resv_regions, list) {
  1141. + ret = iommu_insert_resv_region(entry, group_resv_regions);
  1142. + if (ret)
  1143. + break;
  1144. + }
  1145. + return ret;
  1146. +}
  1147. +
  1148. +int iommu_get_group_resv_regions(struct iommu_group *group,
  1149. + struct list_head *head)
  1150. +{
  1151. + struct iommu_device *device;
  1152. + int ret = 0;
  1153. +
  1154. + mutex_lock(&group->mutex);
  1155. + list_for_each_entry(device, &group->devices, list) {
  1156. + struct list_head dev_resv_regions;
  1157. +
  1158. + INIT_LIST_HEAD(&dev_resv_regions);
  1159. + iommu_get_resv_regions(device->dev, &dev_resv_regions);
  1160. + ret = iommu_insert_device_resv_regions(&dev_resv_regions, head);
  1161. + iommu_put_resv_regions(device->dev, &dev_resv_regions);
  1162. + if (ret)
  1163. + break;
  1164. + }
  1165. + mutex_unlock(&group->mutex);
  1166. + return ret;
  1167. +}
  1168. +EXPORT_SYMBOL_GPL(iommu_get_group_resv_regions);
  1169. +
  1170. +static ssize_t iommu_group_show_resv_regions(struct iommu_group *group,
  1171. + char *buf)
  1172. +{
  1173. + struct iommu_resv_region *region, *next;
  1174. + struct list_head group_resv_regions;
  1175. + char *str = buf;
  1176. +
  1177. + INIT_LIST_HEAD(&group_resv_regions);
  1178. + iommu_get_group_resv_regions(group, &group_resv_regions);
  1179. +
  1180. + list_for_each_entry_safe(region, next, &group_resv_regions, list) {
  1181. + str += sprintf(str, "0x%016llx 0x%016llx %s\n",
  1182. + (long long int)region->start,
  1183. + (long long int)(region->start +
  1184. + region->length - 1),
  1185. + iommu_group_resv_type_string[region->type]);
  1186. + kfree(region);
  1187. + }
  1188. +
  1189. + return (str - buf);
  1190. +}
  1191. +
  1192. static IOMMU_GROUP_ATTR(name, S_IRUGO, iommu_group_show_name, NULL);
  1193. +static IOMMU_GROUP_ATTR(reserved_regions, 0444,
  1194. + iommu_group_show_resv_regions, NULL);
  1195. +
  1196. static void iommu_group_release(struct kobject *kobj)
  1197. {
  1198. struct iommu_group *group = to_iommu_group(kobj);
  1199. @@ -212,6 +355,11 @@ struct iommu_group *iommu_group_alloc(vo
  1200. */
  1201. kobject_put(&group->kobj);
  1202. + ret = iommu_group_create_file(group,
  1203. + &iommu_group_attr_reserved_regions);
  1204. + if (ret)
  1205. + return ERR_PTR(ret);
  1206. +
  1207. pr_debug("Allocated group %d\n", group->id);
  1208. return group;
  1209. @@ -318,7 +466,7 @@ static int iommu_group_create_direct_map
  1210. struct device *dev)
  1211. {
  1212. struct iommu_domain *domain = group->default_domain;
  1213. - struct iommu_dm_region *entry;
  1214. + struct iommu_resv_region *entry;
  1215. struct list_head mappings;
  1216. unsigned long pg_size;
  1217. int ret = 0;
  1218. @@ -331,18 +479,21 @@ static int iommu_group_create_direct_map
  1219. pg_size = 1UL << __ffs(domain->pgsize_bitmap);
  1220. INIT_LIST_HEAD(&mappings);
  1221. - iommu_get_dm_regions(dev, &mappings);
  1222. + iommu_get_resv_regions(dev, &mappings);
  1223. /* We need to consider overlapping regions for different devices */
  1224. list_for_each_entry(entry, &mappings, list) {
  1225. dma_addr_t start, end, addr;
  1226. - if (domain->ops->apply_dm_region)
  1227. - domain->ops->apply_dm_region(dev, domain, entry);
  1228. + if (domain->ops->apply_resv_region)
  1229. + domain->ops->apply_resv_region(dev, domain, entry);
  1230. start = ALIGN(entry->start, pg_size);
  1231. end = ALIGN(entry->start + entry->length, pg_size);
  1232. + if (entry->type != IOMMU_RESV_DIRECT)
  1233. + continue;
  1234. +
  1235. for (addr = start; addr < end; addr += pg_size) {
  1236. phys_addr_t phys_addr;
  1237. @@ -358,7 +509,7 @@ static int iommu_group_create_direct_map
  1238. }
  1239. out:
  1240. - iommu_put_dm_regions(dev, &mappings);
  1241. + iommu_put_resv_regions(dev, &mappings);
  1242. return ret;
  1243. }
  1244. @@ -563,6 +714,19 @@ struct iommu_group *iommu_group_get(stru
  1245. EXPORT_SYMBOL_GPL(iommu_group_get);
  1246. /**
  1247. + * iommu_group_ref_get - Increment reference on a group
  1248. + * @group: the group to use, must not be NULL
  1249. + *
  1250. + * This function is called by iommu drivers to take additional references on an
  1251. + * existing group. Returns the given group for convenience.
  1252. + */
  1253. +struct iommu_group *iommu_group_ref_get(struct iommu_group *group)
  1254. +{
  1255. + kobject_get(group->devices_kobj);
  1256. + return group;
  1257. +}
  1258. +
  1259. +/**
  1260. * iommu_group_put - Decrement group reference
  1261. * @group: the group to use
  1262. *
  1263. @@ -845,10 +1009,19 @@ struct iommu_group *iommu_group_get_for_
  1264. * IOMMU driver.
  1265. */
  1266. if (!group->default_domain) {
  1267. - group->default_domain = __iommu_domain_alloc(dev->bus,
  1268. - IOMMU_DOMAIN_DMA);
  1269. + struct iommu_domain *dom;
  1270. +
  1271. + dom = __iommu_domain_alloc(dev->bus, iommu_def_domain_type);
  1272. + if (!dom && iommu_def_domain_type != IOMMU_DOMAIN_DMA) {
  1273. + dev_warn(dev,
  1274. + "failed to allocate default IOMMU domain of type %u; falling back to IOMMU_DOMAIN_DMA",
  1275. + iommu_def_domain_type);
  1276. + dom = __iommu_domain_alloc(dev->bus, IOMMU_DOMAIN_DMA);
  1277. + }
  1278. +
  1279. + group->default_domain = dom;
  1280. if (!group->domain)
  1281. - group->domain = group->default_domain;
  1282. + group->domain = dom;
  1283. }
  1284. ret = iommu_group_add_device(group, dev);
  1285. @@ -1557,20 +1730,38 @@ int iommu_domain_set_attr(struct iommu_d
  1286. }
  1287. EXPORT_SYMBOL_GPL(iommu_domain_set_attr);
  1288. -void iommu_get_dm_regions(struct device *dev, struct list_head *list)
  1289. +void iommu_get_resv_regions(struct device *dev, struct list_head *list)
  1290. {
  1291. const struct iommu_ops *ops = dev->bus->iommu_ops;
  1292. - if (ops && ops->get_dm_regions)
  1293. - ops->get_dm_regions(dev, list);
  1294. + if (ops && ops->get_resv_regions)
  1295. + ops->get_resv_regions(dev, list);
  1296. }
  1297. -void iommu_put_dm_regions(struct device *dev, struct list_head *list)
  1298. +void iommu_put_resv_regions(struct device *dev, struct list_head *list)
  1299. {
  1300. const struct iommu_ops *ops = dev->bus->iommu_ops;
  1301. - if (ops && ops->put_dm_regions)
  1302. - ops->put_dm_regions(dev, list);
  1303. + if (ops && ops->put_resv_regions)
  1304. + ops->put_resv_regions(dev, list);
  1305. +}
  1306. +
  1307. +struct iommu_resv_region *iommu_alloc_resv_region(phys_addr_t start,
  1308. + size_t length, int prot,
  1309. + enum iommu_resv_type type)
  1310. +{
  1311. + struct iommu_resv_region *region;
  1312. +
  1313. + region = kzalloc(sizeof(*region), GFP_KERNEL);
  1314. + if (!region)
  1315. + return NULL;
  1316. +
  1317. + INIT_LIST_HEAD(&region->list);
  1318. + region->start = start;
  1319. + region->length = length;
  1320. + region->prot = prot;
  1321. + region->type = type;
  1322. + return region;
  1323. }
  1324. /* Request that a device is direct mapped by the IOMMU */
  1325. --- a/drivers/iommu/mtk_iommu.c
  1326. +++ b/drivers/iommu/mtk_iommu.c
  1327. @@ -410,6 +410,8 @@ static struct iommu_group *mtk_iommu_dev
  1328. data->m4u_group = iommu_group_alloc();
  1329. if (IS_ERR(data->m4u_group))
  1330. dev_err(dev, "Failed to allocate M4U IOMMU group\n");
  1331. + } else {
  1332. + iommu_group_ref_get(data->m4u_group);
  1333. }
  1334. return data->m4u_group;
  1335. }
  1336. --- a/drivers/iommu/mtk_iommu_v1.c
  1337. +++ b/drivers/iommu/mtk_iommu_v1.c
  1338. @@ -502,6 +502,8 @@ static struct iommu_group *mtk_iommu_dev
  1339. data->m4u_group = iommu_group_alloc();
  1340. if (IS_ERR(data->m4u_group))
  1341. dev_err(dev, "Failed to allocate M4U IOMMU group\n");
  1342. + } else {
  1343. + iommu_group_ref_get(data->m4u_group);
  1344. }
  1345. return data->m4u_group;
  1346. }
  1347. --- a/include/linux/dma-iommu.h
  1348. +++ b/include/linux/dma-iommu.h
  1349. @@ -27,6 +27,7 @@ int iommu_dma_init(void);
  1350. /* Domain management interface for IOMMU drivers */
  1351. int iommu_get_dma_cookie(struct iommu_domain *domain);
  1352. +int iommu_get_msi_cookie(struct iommu_domain *domain, dma_addr_t base);
  1353. void iommu_put_dma_cookie(struct iommu_domain *domain);
  1354. /* Setup call for arch DMA mapping code */
  1355. @@ -66,6 +67,7 @@ int iommu_dma_mapping_error(struct devic
  1356. /* The DMA API isn't _quite_ the whole story, though... */
  1357. void iommu_dma_map_msi_msg(int irq, struct msi_msg *msg);
  1358. +void iommu_dma_get_resv_regions(struct device *dev, struct list_head *list);
  1359. #else
  1360. @@ -82,6 +84,11 @@ static inline int iommu_get_dma_cookie(s
  1361. return -ENODEV;
  1362. }
  1363. +static inline int iommu_get_msi_cookie(struct iommu_domain *domain, dma_addr_t base)
  1364. +{
  1365. + return -ENODEV;
  1366. +}
  1367. +
  1368. static inline void iommu_put_dma_cookie(struct iommu_domain *domain)
  1369. {
  1370. }
  1371. @@ -90,6 +97,10 @@ static inline void iommu_dma_map_msi_msg
  1372. {
  1373. }
  1374. +static inline void iommu_dma_get_resv_regions(struct device *dev, struct list_head *list)
  1375. +{
  1376. +}
  1377. +
  1378. #endif /* CONFIG_IOMMU_DMA */
  1379. #endif /* __KERNEL__ */
  1380. #endif /* __DMA_IOMMU_H */
  1381. --- a/include/linux/iommu.h
  1382. +++ b/include/linux/iommu.h
  1383. @@ -117,18 +117,32 @@ enum iommu_attr {
  1384. DOMAIN_ATTR_MAX,
  1385. };
  1386. +/* These are the possible reserved region types */
  1387. +enum iommu_resv_type {
  1388. + /* Memory regions which must be mapped 1:1 at all times */
  1389. + IOMMU_RESV_DIRECT,
  1390. + /* Arbitrary "never map this or give it to a device" address ranges */
  1391. + IOMMU_RESV_RESERVED,
  1392. + /* Hardware MSI region (untranslated) */
  1393. + IOMMU_RESV_MSI,
  1394. + /* Software-managed MSI translation window */
  1395. + IOMMU_RESV_SW_MSI,
  1396. +};
  1397. +
  1398. /**
  1399. - * struct iommu_dm_region - descriptor for a direct mapped memory region
  1400. + * struct iommu_resv_region - descriptor for a reserved memory region
  1401. * @list: Linked list pointers
  1402. * @start: System physical start address of the region
  1403. * @length: Length of the region in bytes
  1404. * @prot: IOMMU Protection flags (READ/WRITE/...)
  1405. + * @type: Type of the reserved region
  1406. */
  1407. -struct iommu_dm_region {
  1408. +struct iommu_resv_region {
  1409. struct list_head list;
  1410. phys_addr_t start;
  1411. size_t length;
  1412. int prot;
  1413. + enum iommu_resv_type type;
  1414. };
  1415. #ifdef CONFIG_IOMMU_API
  1416. @@ -150,9 +164,9 @@ struct iommu_dm_region {
  1417. * @device_group: find iommu group for a particular device
  1418. * @domain_get_attr: Query domain attributes
  1419. * @domain_set_attr: Change domain attributes
  1420. - * @get_dm_regions: Request list of direct mapping requirements for a device
  1421. - * @put_dm_regions: Free list of direct mapping requirements for a device
  1422. - * @apply_dm_region: Temporary helper call-back for iova reserved ranges
  1423. + * @get_resv_regions: Request list of reserved regions for a device
  1424. + * @put_resv_regions: Free list of reserved regions for a device
  1425. + * @apply_resv_region: Temporary helper call-back for iova reserved ranges
  1426. * @domain_window_enable: Configure and enable a particular window for a domain
  1427. * @domain_window_disable: Disable a particular window for a domain
  1428. * @domain_set_windows: Set the number of windows for a domain
  1429. @@ -184,11 +198,12 @@ struct iommu_ops {
  1430. int (*domain_set_attr)(struct iommu_domain *domain,
  1431. enum iommu_attr attr, void *data);
  1432. - /* Request/Free a list of direct mapping requirements for a device */
  1433. - void (*get_dm_regions)(struct device *dev, struct list_head *list);
  1434. - void (*put_dm_regions)(struct device *dev, struct list_head *list);
  1435. - void (*apply_dm_region)(struct device *dev, struct iommu_domain *domain,
  1436. - struct iommu_dm_region *region);
  1437. + /* Request/Free a list of reserved regions for a device */
  1438. + void (*get_resv_regions)(struct device *dev, struct list_head *list);
  1439. + void (*put_resv_regions)(struct device *dev, struct list_head *list);
  1440. + void (*apply_resv_region)(struct device *dev,
  1441. + struct iommu_domain *domain,
  1442. + struct iommu_resv_region *region);
  1443. /* Window handling functions */
  1444. int (*domain_window_enable)(struct iommu_domain *domain, u32 wnd_nr,
  1445. @@ -233,9 +248,14 @@ extern phys_addr_t iommu_iova_to_phys(st
  1446. extern void iommu_set_fault_handler(struct iommu_domain *domain,
  1447. iommu_fault_handler_t handler, void *token);
  1448. -extern void iommu_get_dm_regions(struct device *dev, struct list_head *list);
  1449. -extern void iommu_put_dm_regions(struct device *dev, struct list_head *list);
  1450. +extern void iommu_get_resv_regions(struct device *dev, struct list_head *list);
  1451. +extern void iommu_put_resv_regions(struct device *dev, struct list_head *list);
  1452. extern int iommu_request_dm_for_dev(struct device *dev);
  1453. +extern struct iommu_resv_region *
  1454. +iommu_alloc_resv_region(phys_addr_t start, size_t length, int prot,
  1455. + enum iommu_resv_type type);
  1456. +extern int iommu_get_group_resv_regions(struct iommu_group *group,
  1457. + struct list_head *head);
  1458. extern int iommu_attach_group(struct iommu_domain *domain,
  1459. struct iommu_group *group);
  1460. @@ -253,6 +273,7 @@ extern void iommu_group_remove_device(st
  1461. extern int iommu_group_for_each_dev(struct iommu_group *group, void *data,
  1462. int (*fn)(struct device *, void *));
  1463. extern struct iommu_group *iommu_group_get(struct device *dev);
  1464. +extern struct iommu_group *iommu_group_ref_get(struct iommu_group *group);
  1465. extern void iommu_group_put(struct iommu_group *group);
  1466. extern int iommu_group_register_notifier(struct iommu_group *group,
  1467. struct notifier_block *nb);
  1468. @@ -439,16 +460,22 @@ static inline void iommu_set_fault_handl
  1469. {
  1470. }
  1471. -static inline void iommu_get_dm_regions(struct device *dev,
  1472. +static inline void iommu_get_resv_regions(struct device *dev,
  1473. struct list_head *list)
  1474. {
  1475. }
  1476. -static inline void iommu_put_dm_regions(struct device *dev,
  1477. +static inline void iommu_put_resv_regions(struct device *dev,
  1478. struct list_head *list)
  1479. {
  1480. }
  1481. +static inline int iommu_get_group_resv_regions(struct iommu_group *group,
  1482. + struct list_head *head)
  1483. +{
  1484. + return -ENODEV;
  1485. +}
  1486. +
  1487. static inline int iommu_request_dm_for_dev(struct device *dev)
  1488. {
  1489. return -ENODEV;