818-vfio-support-layerscape.patch 30 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028102910301031103210331034103510361037103810391040104110421043104410451046104710481049105010511052105310541055105610571058105910601061106210631064106510661067106810691070107110721073107410751076107710781079108010811082108310841085108610871088108910901091109210931094109510961097109810991100110111021103110411051106110711081109111011111112111311141115111611171118111911201121112211231124112511261127112811291130113111321133113411351136113711381139114011411142114311441145114611471148114911501151115211531154115511561157115811591160116111621163116411651166
  1. From 8d82d92ea697145c32bb36d9f39afd5bb0927bc2 Mon Sep 17 00:00:00 2001
  2. From: Yangbo Lu <[email protected]>
  3. Date: Wed, 27 Sep 2017 10:34:46 +0800
  4. Subject: [PATCH] vfio: support layerscape
  5. This is a integrated patch for layerscape vfio support.
  6. Signed-off-by: Bharat Bhushan <[email protected]>
  7. Signed-off-by: Eric Auger <[email protected]>
  8. Signed-off-by: Robin Murphy <[email protected]>
  9. Signed-off-by: Wei Yongjun <[email protected]>
  10. Signed-off-by: Yangbo Lu <[email protected]>
  11. ---
  12. drivers/vfio/Kconfig | 1 +
  13. drivers/vfio/Makefile | 1 +
  14. drivers/vfio/fsl-mc/Kconfig | 9 +
  15. drivers/vfio/fsl-mc/Makefile | 2 +
  16. drivers/vfio/fsl-mc/vfio_fsl_mc.c | 753 ++++++++++++++++++++++++++++++
  17. drivers/vfio/fsl-mc/vfio_fsl_mc_intr.c | 199 ++++++++
  18. drivers/vfio/fsl-mc/vfio_fsl_mc_private.h | 55 +++
  19. drivers/vfio/vfio_iommu_type1.c | 39 +-
  20. include/uapi/linux/vfio.h | 1 +
  21. 9 files changed, 1058 insertions(+), 2 deletions(-)
  22. create mode 100644 drivers/vfio/fsl-mc/Kconfig
  23. create mode 100644 drivers/vfio/fsl-mc/Makefile
  24. create mode 100644 drivers/vfio/fsl-mc/vfio_fsl_mc.c
  25. create mode 100644 drivers/vfio/fsl-mc/vfio_fsl_mc_intr.c
  26. create mode 100644 drivers/vfio/fsl-mc/vfio_fsl_mc_private.h
  27. --- a/drivers/vfio/Kconfig
  28. +++ b/drivers/vfio/Kconfig
  29. @@ -48,4 +48,5 @@ menuconfig VFIO_NOIOMMU
  30. source "drivers/vfio/pci/Kconfig"
  31. source "drivers/vfio/platform/Kconfig"
  32. +source "drivers/vfio/fsl-mc/Kconfig"
  33. source "virt/lib/Kconfig"
  34. --- a/drivers/vfio/Makefile
  35. +++ b/drivers/vfio/Makefile
  36. @@ -7,3 +7,4 @@ obj-$(CONFIG_VFIO_IOMMU_SPAPR_TCE) += vf
  37. obj-$(CONFIG_VFIO_SPAPR_EEH) += vfio_spapr_eeh.o
  38. obj-$(CONFIG_VFIO_PCI) += pci/
  39. obj-$(CONFIG_VFIO_PLATFORM) += platform/
  40. +obj-$(CONFIG_VFIO_FSL_MC) += fsl-mc/
  41. --- /dev/null
  42. +++ b/drivers/vfio/fsl-mc/Kconfig
  43. @@ -0,0 +1,9 @@
  44. +config VFIO_FSL_MC
  45. + tristate "VFIO support for QorIQ DPAA2 fsl-mc bus devices"
  46. + depends on VFIO && FSL_MC_BUS && EVENTFD
  47. + help
  48. + Driver to enable support for the VFIO QorIQ DPAA2 fsl-mc
  49. + (Management Complex) devices. This is required to passthrough
  50. + fsl-mc bus devices using the VFIO framework.
  51. +
  52. + If you don't know what to do here, say N.
  53. --- /dev/null
  54. +++ b/drivers/vfio/fsl-mc/Makefile
  55. @@ -0,0 +1,2 @@
  56. +vfio-fsl_mc-y := vfio_fsl_mc.o
  57. +obj-$(CONFIG_VFIO_FSL_MC) += vfio_fsl_mc.o vfio_fsl_mc_intr.o
  58. --- /dev/null
  59. +++ b/drivers/vfio/fsl-mc/vfio_fsl_mc.c
  60. @@ -0,0 +1,753 @@
  61. +/*
  62. + * Freescale Management Complex (MC) device passthrough using VFIO
  63. + *
  64. + * Copyright (C) 2013-2016 Freescale Semiconductor, Inc.
  65. + * Copyright 2016-2017 NXP
  66. + * Author: Bharat Bhushan <[email protected]>
  67. + *
  68. + * This file is licensed under the terms of the GNU General Public
  69. + * License version 2. This program is licensed "as is" without any
  70. + * warranty of any kind, whether express or implied.
  71. + */
  72. +
  73. +#include <linux/device.h>
  74. +#include <linux/iommu.h>
  75. +#include <linux/module.h>
  76. +#include <linux/mutex.h>
  77. +#include <linux/slab.h>
  78. +#include <linux/types.h>
  79. +#include <linux/vfio.h>
  80. +#include <linux/delay.h>
  81. +
  82. +#include "../../staging/fsl-mc/include/mc.h"
  83. +#include "../../staging/fsl-mc/include/mc-bus.h"
  84. +#include "../../staging/fsl-mc/include/mc-sys.h"
  85. +#include "../../staging/fsl-mc/bus/dprc-cmd.h"
  86. +
  87. +#include "vfio_fsl_mc_private.h"
  88. +
  89. +#define DRIVER_VERSION "0.10"
  90. +#define DRIVER_AUTHOR "Bharat Bhushan <[email protected]>"
  91. +#define DRIVER_DESC "VFIO for FSL-MC devices - User Level meta-driver"
  92. +
  93. +static DEFINE_MUTEX(driver_lock);
  94. +
  95. +/* FSl-MC device regions (address and size) are aligned to 64K.
  96. + * While MC firmware reports size less than 64K for some objects (it actually
  97. + * reports size which does not include reserved space beyond valid bytes).
  98. + * Align the size to PAGE_SIZE for userspace to mmap.
  99. + */
  100. +static size_t aligned_region_size(struct fsl_mc_device *mc_dev, int index)
  101. +{
  102. + size_t size;
  103. +
  104. + size = resource_size(&mc_dev->regions[index]);
  105. + return PAGE_ALIGN(size);
  106. +}
  107. +
  108. +static int vfio_fsl_mc_regions_init(struct vfio_fsl_mc_device *vdev)
  109. +{
  110. + struct fsl_mc_device *mc_dev = vdev->mc_dev;
  111. + int count = mc_dev->obj_desc.region_count;
  112. + int i;
  113. +
  114. + vdev->regions = kcalloc(count, sizeof(struct vfio_fsl_mc_region),
  115. + GFP_KERNEL);
  116. + if (!vdev->regions)
  117. + return -ENOMEM;
  118. +
  119. + for (i = 0; i < mc_dev->obj_desc.region_count; i++) {
  120. + vdev->regions[i].addr = mc_dev->regions[i].start;
  121. + vdev->regions[i].size = aligned_region_size(mc_dev, i);
  122. + vdev->regions[i].type = VFIO_FSL_MC_REGION_TYPE_MMIO;
  123. + if (mc_dev->regions[i].flags & IORESOURCE_CACHEABLE)
  124. + vdev->regions[i].type |=
  125. + VFIO_FSL_MC_REGION_TYPE_CACHEABLE;
  126. + vdev->regions[i].flags = VFIO_REGION_INFO_FLAG_MMAP;
  127. + vdev->regions[i].flags |= VFIO_REGION_INFO_FLAG_READ;
  128. + if (!(mc_dev->regions[i].flags & IORESOURCE_READONLY))
  129. + vdev->regions[i].flags |= VFIO_REGION_INFO_FLAG_WRITE;
  130. + }
  131. +
  132. + vdev->num_regions = mc_dev->obj_desc.region_count;
  133. + return 0;
  134. +}
  135. +
  136. +static void vfio_fsl_mc_regions_cleanup(struct vfio_fsl_mc_device *vdev)
  137. +{
  138. + int i;
  139. +
  140. + for (i = 0; i < vdev->num_regions; i++)
  141. + iounmap(vdev->regions[i].ioaddr);
  142. +
  143. + vdev->num_regions = 0;
  144. + kfree(vdev->regions);
  145. +}
  146. +
  147. +static int vfio_fsl_mc_open(void *device_data)
  148. +{
  149. + struct vfio_fsl_mc_device *vdev = device_data;
  150. + int ret;
  151. +
  152. + if (!try_module_get(THIS_MODULE))
  153. + return -ENODEV;
  154. +
  155. + mutex_lock(&driver_lock);
  156. + if (!vdev->refcnt) {
  157. + ret = vfio_fsl_mc_regions_init(vdev);
  158. + if (ret)
  159. + goto error_region_init;
  160. +
  161. + ret = vfio_fsl_mc_irqs_init(vdev);
  162. + if (ret)
  163. + goto error_irq_init;
  164. + }
  165. +
  166. + vdev->refcnt++;
  167. + mutex_unlock(&driver_lock);
  168. + return 0;
  169. +
  170. +error_irq_init:
  171. + vfio_fsl_mc_regions_cleanup(vdev);
  172. +error_region_init:
  173. + mutex_unlock(&driver_lock);
  174. + if (ret)
  175. + module_put(THIS_MODULE);
  176. +
  177. + return ret;
  178. +}
  179. +
  180. +static void vfio_fsl_mc_release(void *device_data)
  181. +{
  182. + struct vfio_fsl_mc_device *vdev = device_data;
  183. + struct fsl_mc_device *mc_dev = vdev->mc_dev;
  184. +
  185. + mutex_lock(&driver_lock);
  186. +
  187. + if (!(--vdev->refcnt)) {
  188. + vfio_fsl_mc_regions_cleanup(vdev);
  189. + vfio_fsl_mc_irqs_cleanup(vdev);
  190. + }
  191. +
  192. + if (strcmp(mc_dev->obj_desc.type, "dprc") == 0)
  193. + dprc_reset_container(mc_dev->mc_io, 0, mc_dev->mc_handle,
  194. + mc_dev->obj_desc.id);
  195. +
  196. + mutex_unlock(&driver_lock);
  197. +
  198. + module_put(THIS_MODULE);
  199. +}
  200. +
  201. +static long vfio_fsl_mc_ioctl(void *device_data, unsigned int cmd,
  202. + unsigned long arg)
  203. +{
  204. + struct vfio_fsl_mc_device *vdev = device_data;
  205. + struct fsl_mc_device *mc_dev = vdev->mc_dev;
  206. + unsigned long minsz;
  207. +
  208. + if (WARN_ON(!mc_dev))
  209. + return -ENODEV;
  210. +
  211. + switch (cmd) {
  212. + case VFIO_DEVICE_GET_INFO:
  213. + {
  214. + struct vfio_device_info info;
  215. +
  216. + minsz = offsetofend(struct vfio_device_info, num_irqs);
  217. +
  218. + if (copy_from_user(&info, (void __user *)arg, minsz))
  219. + return -EFAULT;
  220. +
  221. + if (info.argsz < minsz)
  222. + return -EINVAL;
  223. +
  224. + info.flags = VFIO_DEVICE_FLAGS_FSL_MC;
  225. + info.num_regions = mc_dev->obj_desc.region_count;
  226. + info.num_irqs = mc_dev->obj_desc.irq_count;
  227. +
  228. + return copy_to_user((void __user *)arg, &info, minsz);
  229. + }
  230. + case VFIO_DEVICE_GET_REGION_INFO:
  231. + {
  232. + struct vfio_region_info info;
  233. +
  234. + minsz = offsetofend(struct vfio_region_info, offset);
  235. +
  236. + if (copy_from_user(&info, (void __user *)arg, minsz))
  237. + return -EFAULT;
  238. +
  239. + if (info.argsz < minsz)
  240. + return -EINVAL;
  241. +
  242. + if (info.index >= vdev->num_regions)
  243. + return -EINVAL;
  244. +
  245. + /* map offset to the physical address */
  246. + info.offset = VFIO_FSL_MC_INDEX_TO_OFFSET(info.index);
  247. + info.size = vdev->regions[info.index].size;
  248. + info.flags = vdev->regions[info.index].flags;
  249. +
  250. + return copy_to_user((void __user *)arg, &info, minsz);
  251. + }
  252. + case VFIO_DEVICE_GET_IRQ_INFO:
  253. + {
  254. + struct vfio_irq_info info;
  255. +
  256. + minsz = offsetofend(struct vfio_irq_info, count);
  257. + if (copy_from_user(&info, (void __user *)arg, minsz))
  258. + return -EFAULT;
  259. +
  260. + if (info.argsz < minsz)
  261. + return -EINVAL;
  262. +
  263. + if (info.index >= mc_dev->obj_desc.irq_count)
  264. + return -EINVAL;
  265. +
  266. + if (vdev->mc_irqs != NULL) {
  267. + info.flags = vdev->mc_irqs[info.index].flags;
  268. + info.count = vdev->mc_irqs[info.index].count;
  269. + } else {
  270. + /*
  271. + * If IRQs are not initialized then these can not
  272. + * be configuted and used by user-space/
  273. + */
  274. + info.flags = 0;
  275. + info.count = 0;
  276. + }
  277. +
  278. + return copy_to_user((void __user *)arg, &info, minsz);
  279. + }
  280. + case VFIO_DEVICE_SET_IRQS:
  281. + {
  282. + struct vfio_irq_set hdr;
  283. + u8 *data = NULL;
  284. + int ret = 0;
  285. +
  286. + minsz = offsetofend(struct vfio_irq_set, count);
  287. +
  288. + if (copy_from_user(&hdr, (void __user *)arg, minsz))
  289. + return -EFAULT;
  290. +
  291. + if (hdr.argsz < minsz)
  292. + return -EINVAL;
  293. +
  294. + if (hdr.index >= mc_dev->obj_desc.irq_count)
  295. + return -EINVAL;
  296. +
  297. + if (hdr.start != 0 || hdr.count > 1)
  298. + return -EINVAL;
  299. +
  300. + if (hdr.count == 0 &&
  301. + (!(hdr.flags & VFIO_IRQ_SET_DATA_NONE) ||
  302. + !(hdr.flags & VFIO_IRQ_SET_ACTION_TRIGGER)))
  303. + return -EINVAL;
  304. +
  305. + if (hdr.flags & ~(VFIO_IRQ_SET_DATA_TYPE_MASK |
  306. + VFIO_IRQ_SET_ACTION_TYPE_MASK))
  307. + return -EINVAL;
  308. +
  309. + if (!(hdr.flags & VFIO_IRQ_SET_DATA_NONE)) {
  310. + size_t size;
  311. +
  312. + if (hdr.flags & VFIO_IRQ_SET_DATA_BOOL)
  313. + size = sizeof(uint8_t);
  314. + else if (hdr.flags & VFIO_IRQ_SET_DATA_EVENTFD)
  315. + size = sizeof(int32_t);
  316. + else
  317. + return -EINVAL;
  318. +
  319. + if (hdr.argsz - minsz < hdr.count * size)
  320. + return -EINVAL;
  321. +
  322. + data = memdup_user((void __user *)(arg + minsz),
  323. + hdr.count * size);
  324. + if (IS_ERR(data))
  325. + return PTR_ERR(data);
  326. + }
  327. +
  328. + ret = vfio_fsl_mc_set_irqs_ioctl(vdev, hdr.flags,
  329. + hdr.index, hdr.start,
  330. + hdr.count, data);
  331. + return ret;
  332. + }
  333. + case VFIO_DEVICE_RESET:
  334. + {
  335. + return -EINVAL;
  336. + }
  337. + default:
  338. + return -EINVAL;
  339. + }
  340. +}
  341. +
  342. +static ssize_t vfio_fsl_mc_read(void *device_data, char __user *buf,
  343. + size_t count, loff_t *ppos)
  344. +{
  345. + struct vfio_fsl_mc_device *vdev = device_data;
  346. + unsigned int index = VFIO_FSL_MC_OFFSET_TO_INDEX(*ppos);
  347. + loff_t off = *ppos & VFIO_FSL_MC_OFFSET_MASK;
  348. + struct vfio_fsl_mc_region *region;
  349. + uint64_t data[8];
  350. + int i;
  351. +
  352. + /* Read ioctl supported only for DPRC device */
  353. + if (strcmp(vdev->mc_dev->obj_desc.type, "dprc"))
  354. + return -EINVAL;
  355. +
  356. + if (index >= vdev->num_regions)
  357. + return -EINVAL;
  358. +
  359. + region = &vdev->regions[index];
  360. +
  361. + if (!(region->flags & VFIO_REGION_INFO_FLAG_READ))
  362. + return -EINVAL;
  363. +
  364. + if (!region->type & VFIO_FSL_MC_REGION_TYPE_MMIO)
  365. + return -EINVAL;
  366. +
  367. + if (!region->ioaddr) {
  368. + region->ioaddr = ioremap_nocache(region->addr, region->size);
  369. + if (!region->ioaddr)
  370. + return -ENOMEM;
  371. + }
  372. +
  373. + if (count != 64 || off != 0)
  374. + return -EINVAL;
  375. +
  376. + for (i = 7; i >= 0; i--)
  377. + data[i] = readq(region->ioaddr + i * sizeof(uint64_t));
  378. +
  379. + if (copy_to_user(buf, data, 64))
  380. + return -EFAULT;
  381. +
  382. + return count;
  383. +}
  384. +
  385. +#define MC_CMD_COMPLETION_TIMEOUT_MS 5000
  386. +#define MC_CMD_COMPLETION_POLLING_MAX_SLEEP_USECS 500
  387. +
  388. +static int vfio_fsl_mc_dprc_wait_for_response(void __iomem *ioaddr)
  389. +{
  390. + enum mc_cmd_status status;
  391. + unsigned long timeout_usecs = MC_CMD_COMPLETION_TIMEOUT_MS * 1000;
  392. +
  393. + for (;;) {
  394. + u64 header;
  395. + struct mc_cmd_header *resp_hdr;
  396. +
  397. + __iormb();
  398. + header = readq(ioaddr);
  399. + __iormb();
  400. +
  401. + resp_hdr = (struct mc_cmd_header *)&header;
  402. + status = (enum mc_cmd_status)resp_hdr->status;
  403. + if (status != MC_CMD_STATUS_READY)
  404. + break;
  405. +
  406. + udelay(MC_CMD_COMPLETION_POLLING_MAX_SLEEP_USECS);
  407. + timeout_usecs -= MC_CMD_COMPLETION_POLLING_MAX_SLEEP_USECS;
  408. + if (timeout_usecs == 0)
  409. + return -ETIMEDOUT;
  410. + }
  411. +
  412. + return 0;
  413. +}
  414. +
  415. +static int vfio_fsl_mc_send_command(void __iomem *ioaddr, uint64_t *cmd_data)
  416. +{
  417. + int i;
  418. +
  419. + /* Write at command header in the end */
  420. + for (i = 7; i >= 0; i--)
  421. + writeq(cmd_data[i], ioaddr + i * sizeof(uint64_t));
  422. +
  423. + /* Wait for response before returning to user-space
  424. + * This can be optimized in future to even prepare response
  425. + * before returning to user-space and avoid read ioctl.
  426. + */
  427. + return vfio_fsl_mc_dprc_wait_for_response(ioaddr);
  428. +}
  429. +
  430. +static int vfio_handle_dprc_commands(void __iomem *ioaddr, uint64_t *cmd_data)
  431. +{
  432. + uint64_t cmd_hdr = cmd_data[0];
  433. + int cmd = (cmd_hdr >> 52) & 0xfff;
  434. +
  435. + switch (cmd) {
  436. + case DPRC_CMDID_OPEN:
  437. + default:
  438. + return vfio_fsl_mc_send_command(ioaddr, cmd_data);
  439. + }
  440. +
  441. + return 0;
  442. +}
  443. +
  444. +static ssize_t vfio_fsl_mc_write(void *device_data, const char __user *buf,
  445. + size_t count, loff_t *ppos)
  446. +{
  447. + struct vfio_fsl_mc_device *vdev = device_data;
  448. + unsigned int index = VFIO_FSL_MC_OFFSET_TO_INDEX(*ppos);
  449. + loff_t off = *ppos & VFIO_FSL_MC_OFFSET_MASK;
  450. + struct vfio_fsl_mc_region *region;
  451. + uint64_t data[8];
  452. + int ret;
  453. +
  454. + /* Write ioctl supported only for DPRC device */
  455. + if (strcmp(vdev->mc_dev->obj_desc.type, "dprc"))
  456. + return -EINVAL;
  457. +
  458. + if (index >= vdev->num_regions)
  459. + return -EINVAL;
  460. +
  461. + region = &vdev->regions[index];
  462. +
  463. + if (!(region->flags & VFIO_REGION_INFO_FLAG_WRITE))
  464. + return -EINVAL;
  465. +
  466. + if (!region->type & VFIO_FSL_MC_REGION_TYPE_MMIO)
  467. + return -EINVAL;
  468. +
  469. + if (!region->ioaddr) {
  470. + region->ioaddr = ioremap_nocache(region->addr, region->size);
  471. + if (!region->ioaddr)
  472. + return -ENOMEM;
  473. + }
  474. +
  475. + if (count != 64 || off != 0)
  476. + return -EINVAL;
  477. +
  478. + if (copy_from_user(&data, buf, 64))
  479. + return -EFAULT;
  480. +
  481. + ret = vfio_handle_dprc_commands(region->ioaddr, data);
  482. + if (ret)
  483. + return ret;
  484. +
  485. + return count;
  486. +}
  487. +
  488. +static int vfio_fsl_mc_mmap_mmio(struct vfio_fsl_mc_region region,
  489. + struct vm_area_struct *vma)
  490. +{
  491. + u64 size = vma->vm_end - vma->vm_start;
  492. + u64 pgoff, base;
  493. +
  494. + pgoff = vma->vm_pgoff &
  495. + ((1U << (VFIO_FSL_MC_OFFSET_SHIFT - PAGE_SHIFT)) - 1);
  496. + base = pgoff << PAGE_SHIFT;
  497. +
  498. + if (region.size < PAGE_SIZE || base + size > region.size)
  499. + return -EINVAL;
  500. + /*
  501. + * Set the REGION_TYPE_CACHEABLE (QBman CENA regs) to be the
  502. + * cache inhibited area of the portal to avoid coherency issues
  503. + * if a user migrates to another core.
  504. + */
  505. + if (region.type & VFIO_FSL_MC_REGION_TYPE_CACHEABLE)
  506. + vma->vm_page_prot = pgprot_cached_ns(vma->vm_page_prot);
  507. + else
  508. + vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
  509. +
  510. + vma->vm_pgoff = (region.addr >> PAGE_SHIFT) + pgoff;
  511. +
  512. + return remap_pfn_range(vma, vma->vm_start, vma->vm_pgoff,
  513. + size, vma->vm_page_prot);
  514. +}
  515. +
  516. +/* Allows mmaping fsl_mc device regions in assigned DPRC */
  517. +static int vfio_fsl_mc_mmap(void *device_data, struct vm_area_struct *vma)
  518. +{
  519. + struct vfio_fsl_mc_device *vdev = device_data;
  520. + struct fsl_mc_device *mc_dev = vdev->mc_dev;
  521. + unsigned long size, addr;
  522. + int index;
  523. +
  524. + index = vma->vm_pgoff >> (VFIO_FSL_MC_OFFSET_SHIFT - PAGE_SHIFT);
  525. +
  526. + if (vma->vm_end < vma->vm_start)
  527. + return -EINVAL;
  528. + if (vma->vm_start & ~PAGE_MASK)
  529. + return -EINVAL;
  530. + if (vma->vm_end & ~PAGE_MASK)
  531. + return -EINVAL;
  532. + if (!(vma->vm_flags & VM_SHARED))
  533. + return -EINVAL;
  534. + if (index >= vdev->num_regions)
  535. + return -EINVAL;
  536. +
  537. + if (!(vdev->regions[index].flags & VFIO_REGION_INFO_FLAG_MMAP))
  538. + return -EINVAL;
  539. +
  540. + if (!(vdev->regions[index].flags & VFIO_REGION_INFO_FLAG_READ)
  541. + && (vma->vm_flags & VM_READ))
  542. + return -EINVAL;
  543. +
  544. + if (!(vdev->regions[index].flags & VFIO_REGION_INFO_FLAG_WRITE)
  545. + && (vma->vm_flags & VM_WRITE))
  546. + return -EINVAL;
  547. +
  548. + addr = vdev->regions[index].addr;
  549. + size = vdev->regions[index].size;
  550. +
  551. + vma->vm_private_data = mc_dev;
  552. +
  553. + if (vdev->regions[index].type & VFIO_FSL_MC_REGION_TYPE_MMIO)
  554. + return vfio_fsl_mc_mmap_mmio(vdev->regions[index], vma);
  555. +
  556. + return -EFAULT;
  557. +}
  558. +
  559. +static const struct vfio_device_ops vfio_fsl_mc_ops = {
  560. + .name = "vfio-fsl-mc",
  561. + .open = vfio_fsl_mc_open,
  562. + .release = vfio_fsl_mc_release,
  563. + .ioctl = vfio_fsl_mc_ioctl,
  564. + .read = vfio_fsl_mc_read,
  565. + .write = vfio_fsl_mc_write,
  566. + .mmap = vfio_fsl_mc_mmap,
  567. +};
  568. +
  569. +static int vfio_fsl_mc_initialize_dprc(struct vfio_fsl_mc_device *vdev)
  570. +{
  571. + struct device *root_dprc_dev;
  572. + struct fsl_mc_device *mc_dev = vdev->mc_dev;
  573. + struct device *dev = &mc_dev->dev;
  574. + struct fsl_mc_bus *mc_bus;
  575. + struct irq_domain *mc_msi_domain;
  576. + unsigned int irq_count;
  577. + int ret;
  578. +
  579. + /* device must be DPRC */
  580. + if (strcmp(mc_dev->obj_desc.type, "dprc"))
  581. + return -EINVAL;
  582. +
  583. + /* mc_io must be un-initialized */
  584. + WARN_ON(mc_dev->mc_io);
  585. +
  586. + /* allocate a portal from the root DPRC for vfio use */
  587. + fsl_mc_get_root_dprc(dev, &root_dprc_dev);
  588. + if (WARN_ON(!root_dprc_dev))
  589. + return -EINVAL;
  590. +
  591. + ret = fsl_mc_portal_allocate(to_fsl_mc_device(root_dprc_dev),
  592. + FSL_MC_IO_ATOMIC_CONTEXT_PORTAL,
  593. + &mc_dev->mc_io);
  594. + if (ret < 0)
  595. + goto clean_msi_domain;
  596. +
  597. + /* Reset MCP before move on */
  598. + ret = fsl_mc_portal_reset(mc_dev->mc_io);
  599. + if (ret < 0) {
  600. + dev_err(dev, "dprc portal reset failed: error = %d\n", ret);
  601. + goto free_mc_portal;
  602. + }
  603. +
  604. + /* MSI domain set up */
  605. + ret = fsl_mc_find_msi_domain(root_dprc_dev->parent, &mc_msi_domain);
  606. + if (ret < 0)
  607. + goto free_mc_portal;
  608. +
  609. + dev_set_msi_domain(&mc_dev->dev, mc_msi_domain);
  610. +
  611. + ret = dprc_open(mc_dev->mc_io, 0, mc_dev->obj_desc.id,
  612. + &mc_dev->mc_handle);
  613. + if (ret) {
  614. + dev_err(dev, "dprc_open() failed: error = %d\n", ret);
  615. + goto free_mc_portal;
  616. + }
  617. +
  618. + /* Initialize resource pool */
  619. + fsl_mc_init_all_resource_pools(mc_dev);
  620. +
  621. + mc_bus = to_fsl_mc_bus(mc_dev);
  622. +
  623. + if (!mc_bus->irq_resources) {
  624. + irq_count = FSL_MC_IRQ_POOL_MAX_TOTAL_IRQS;
  625. + ret = fsl_mc_populate_irq_pool(mc_bus, irq_count);
  626. + if (ret < 0) {
  627. + dev_err(dev, "%s: Failed to init irq-pool\n", __func__);
  628. + goto clean_resource_pool;
  629. + }
  630. + }
  631. +
  632. + mutex_init(&mc_bus->scan_mutex);
  633. +
  634. + mutex_lock(&mc_bus->scan_mutex);
  635. + ret = dprc_scan_objects(mc_dev, mc_dev->driver_override,
  636. + &irq_count);
  637. + mutex_unlock(&mc_bus->scan_mutex);
  638. + if (ret) {
  639. + dev_err(dev, "dprc_scan_objects() fails (%d)\n", ret);
  640. + goto clean_irq_pool;
  641. + }
  642. +
  643. + if (irq_count > FSL_MC_IRQ_POOL_MAX_TOTAL_IRQS) {
  644. + dev_warn(&mc_dev->dev,
  645. + "IRQs needed (%u) exceed IRQs preallocated (%u)\n",
  646. + irq_count, FSL_MC_IRQ_POOL_MAX_TOTAL_IRQS);
  647. + }
  648. +
  649. + return 0;
  650. +
  651. +clean_irq_pool:
  652. + fsl_mc_cleanup_irq_pool(mc_bus);
  653. +
  654. +clean_resource_pool:
  655. + fsl_mc_cleanup_all_resource_pools(mc_dev);
  656. + dprc_close(mc_dev->mc_io, 0, mc_dev->mc_handle);
  657. +
  658. +free_mc_portal:
  659. + fsl_mc_portal_free(mc_dev->mc_io);
  660. +
  661. +clean_msi_domain:
  662. + dev_set_msi_domain(&mc_dev->dev, NULL);
  663. +
  664. + return ret;
  665. +}
  666. +
  667. +static int vfio_fsl_mc_device_remove(struct device *dev, void *data)
  668. +{
  669. + struct fsl_mc_device *mc_dev;
  670. +
  671. + WARN_ON(dev == NULL);
  672. +
  673. + mc_dev = to_fsl_mc_device(dev);
  674. + if (WARN_ON(mc_dev == NULL))
  675. + return -ENODEV;
  676. +
  677. + fsl_mc_device_remove(mc_dev);
  678. + return 0;
  679. +}
  680. +
  681. +static void vfio_fsl_mc_cleanup_dprc(struct vfio_fsl_mc_device *vdev)
  682. +{
  683. + struct fsl_mc_device *mc_dev = vdev->mc_dev;
  684. + struct fsl_mc_bus *mc_bus;
  685. +
  686. + /* device must be DPRC */
  687. + if (strcmp(mc_dev->obj_desc.type, "dprc"))
  688. + return;
  689. +
  690. + device_for_each_child(&mc_dev->dev, NULL, vfio_fsl_mc_device_remove);
  691. +
  692. + mc_bus = to_fsl_mc_bus(mc_dev);
  693. + if (dev_get_msi_domain(&mc_dev->dev))
  694. + fsl_mc_cleanup_irq_pool(mc_bus);
  695. +
  696. + dev_set_msi_domain(&mc_dev->dev, NULL);
  697. +
  698. + fsl_mc_cleanup_all_resource_pools(mc_dev);
  699. + dprc_close(mc_dev->mc_io, 0, mc_dev->mc_handle);
  700. + fsl_mc_portal_free(mc_dev->mc_io);
  701. +}
  702. +
  703. +static int vfio_fsl_mc_probe(struct fsl_mc_device *mc_dev)
  704. +{
  705. + struct iommu_group *group;
  706. + struct vfio_fsl_mc_device *vdev;
  707. + struct device *dev = &mc_dev->dev;
  708. + int ret;
  709. +
  710. + group = vfio_iommu_group_get(dev);
  711. + if (!group) {
  712. + dev_err(dev, "%s: VFIO: No IOMMU group\n", __func__);
  713. + return -EINVAL;
  714. + }
  715. +
  716. + vdev = kzalloc(sizeof(*vdev), GFP_KERNEL);
  717. + if (!vdev) {
  718. + vfio_iommu_group_put(group, dev);
  719. + return -ENOMEM;
  720. + }
  721. +
  722. + vdev->mc_dev = mc_dev;
  723. +
  724. + ret = vfio_add_group_dev(dev, &vfio_fsl_mc_ops, vdev);
  725. + if (ret) {
  726. + dev_err(dev, "%s: Failed to add to vfio group\n", __func__);
  727. + goto free_vfio_device;
  728. + }
  729. +
  730. + /* DPRC container scanned and it's chilren bound with vfio driver */
  731. + if (strcmp(mc_dev->obj_desc.type, "dprc") == 0) {
  732. + ret = vfio_fsl_mc_initialize_dprc(vdev);
  733. + if (ret) {
  734. + vfio_del_group_dev(dev);
  735. + goto free_vfio_device;
  736. + }
  737. + } else {
  738. + struct fsl_mc_device *mc_bus_dev;
  739. +
  740. + /* Non-dprc devices share mc_io from the parent dprc */
  741. + mc_bus_dev = to_fsl_mc_device(mc_dev->dev.parent);
  742. + if (mc_bus_dev == NULL) {
  743. + vfio_del_group_dev(dev);
  744. + goto free_vfio_device;
  745. + }
  746. +
  747. + mc_dev->mc_io = mc_bus_dev->mc_io;
  748. +
  749. + /* Inherit parent MSI domain */
  750. + dev_set_msi_domain(&mc_dev->dev,
  751. + dev_get_msi_domain(mc_dev->dev.parent));
  752. + }
  753. + return 0;
  754. +
  755. +free_vfio_device:
  756. + kfree(vdev);
  757. + vfio_iommu_group_put(group, dev);
  758. + return ret;
  759. +}
  760. +
  761. +static int vfio_fsl_mc_remove(struct fsl_mc_device *mc_dev)
  762. +{
  763. + struct vfio_fsl_mc_device *vdev;
  764. + struct device *dev = &mc_dev->dev;
  765. +
  766. + vdev = vfio_del_group_dev(dev);
  767. + if (!vdev)
  768. + return -EINVAL;
  769. +
  770. + if (strcmp(mc_dev->obj_desc.type, "dprc") == 0)
  771. + vfio_fsl_mc_cleanup_dprc(vdev);
  772. + else
  773. + dev_set_msi_domain(&mc_dev->dev, NULL);
  774. +
  775. + mc_dev->mc_io = NULL;
  776. +
  777. + vfio_iommu_group_put(mc_dev->dev.iommu_group, dev);
  778. + kfree(vdev);
  779. +
  780. + return 0;
  781. +}
  782. +
  783. +/*
  784. + * vfio-fsl_mc is a meta-driver, so use driver_override interface to
  785. + * bind a fsl_mc container with this driver and match_id_table is NULL.
  786. + */
  787. +static struct fsl_mc_driver vfio_fsl_mc_driver = {
  788. + .probe = vfio_fsl_mc_probe,
  789. + .remove = vfio_fsl_mc_remove,
  790. + .match_id_table = NULL,
  791. + .driver = {
  792. + .name = "vfio-fsl-mc",
  793. + .owner = THIS_MODULE,
  794. + },
  795. +};
  796. +
  797. +static int __init vfio_fsl_mc_driver_init(void)
  798. +{
  799. + return fsl_mc_driver_register(&vfio_fsl_mc_driver);
  800. +}
  801. +
  802. +static void __exit vfio_fsl_mc_driver_exit(void)
  803. +{
  804. + fsl_mc_driver_unregister(&vfio_fsl_mc_driver);
  805. +}
  806. +
  807. +module_init(vfio_fsl_mc_driver_init);
  808. +module_exit(vfio_fsl_mc_driver_exit);
  809. +
  810. +MODULE_VERSION(DRIVER_VERSION);
  811. +MODULE_LICENSE("GPL v2");
  812. +MODULE_AUTHOR(DRIVER_AUTHOR);
  813. +MODULE_DESCRIPTION(DRIVER_DESC);
  814. --- /dev/null
  815. +++ b/drivers/vfio/fsl-mc/vfio_fsl_mc_intr.c
  816. @@ -0,0 +1,199 @@
  817. +/*
  818. + * Freescale Management Complex (MC) device passthrough using VFIO
  819. + *
  820. + * Copyright (C) 2013-2016 Freescale Semiconductor, Inc.
  821. + * Author: Bharat Bhushan <[email protected]>
  822. + *
  823. + * This file is licensed under the terms of the GNU General Public
  824. + * License version 2. This program is licensed "as is" without any
  825. + * warranty of any kind, whether express or implied.
  826. + */
  827. +
  828. +#include <linux/vfio.h>
  829. +#include <linux/slab.h>
  830. +#include <linux/types.h>
  831. +#include <linux/eventfd.h>
  832. +#include <linux/msi.h>
  833. +
  834. +#include "../../staging/fsl-mc/include/mc.h"
  835. +#include "vfio_fsl_mc_private.h"
  836. +
  837. +static irqreturn_t vfio_fsl_mc_irq_handler(int irq_num, void *arg)
  838. +{
  839. + struct vfio_fsl_mc_irq *mc_irq = (struct vfio_fsl_mc_irq *)arg;
  840. +
  841. + eventfd_signal(mc_irq->trigger, 1);
  842. + return IRQ_HANDLED;
  843. +}
  844. +
  845. +static int vfio_fsl_mc_irq_mask(struct vfio_fsl_mc_device *vdev,
  846. + unsigned int index, unsigned int start,
  847. + unsigned int count, uint32_t flags,
  848. + void *data)
  849. +{
  850. + return -EINVAL;
  851. +}
  852. +
  853. +static int vfio_fsl_mc_irq_unmask(struct vfio_fsl_mc_device *vdev,
  854. + unsigned int index, unsigned int start,
  855. + unsigned int count, uint32_t flags,
  856. + void *data)
  857. +{
  858. + return -EINVAL;
  859. +}
  860. +
  861. +static int vfio_set_trigger(struct vfio_fsl_mc_device *vdev,
  862. + int index, int fd)
  863. +{
  864. + struct vfio_fsl_mc_irq *irq = &vdev->mc_irqs[index];
  865. + struct eventfd_ctx *trigger;
  866. + int hwirq;
  867. + int ret;
  868. +
  869. + hwirq = vdev->mc_dev->irqs[index]->msi_desc->irq;
  870. + if (irq->trigger) {
  871. + free_irq(hwirq, irq);
  872. + kfree(irq->name);
  873. + eventfd_ctx_put(irq->trigger);
  874. + irq->trigger = NULL;
  875. + }
  876. +
  877. + if (fd < 0) /* Disable only */
  878. + return 0;
  879. +
  880. + irq->name = kasprintf(GFP_KERNEL, "vfio-irq[%d](%s)",
  881. + hwirq, dev_name(&vdev->mc_dev->dev));
  882. + if (!irq->name)
  883. + return -ENOMEM;
  884. +
  885. + trigger = eventfd_ctx_fdget(fd);
  886. + if (IS_ERR(trigger)) {
  887. + kfree(irq->name);
  888. + return PTR_ERR(trigger);
  889. + }
  890. +
  891. + irq->trigger = trigger;
  892. +
  893. + ret = request_irq(hwirq, vfio_fsl_mc_irq_handler, 0,
  894. + irq->name, irq);
  895. + if (ret) {
  896. + kfree(irq->name);
  897. + eventfd_ctx_put(trigger);
  898. + irq->trigger = NULL;
  899. + return ret;
  900. + }
  901. +
  902. + return 0;
  903. +}
  904. +
  905. +int vfio_fsl_mc_irqs_init(struct vfio_fsl_mc_device *vdev)
  906. +{
  907. + struct fsl_mc_device *mc_dev = vdev->mc_dev;
  908. + struct vfio_fsl_mc_irq *mc_irq;
  909. + int irq_count;
  910. + int ret, i;
  911. +
  912. + /* Device does not support any interrupt */
  913. + if (mc_dev->obj_desc.irq_count == 0)
  914. + return 0;
  915. +
  916. + irq_count = mc_dev->obj_desc.irq_count;
  917. +
  918. + mc_irq = kcalloc(irq_count, sizeof(*mc_irq), GFP_KERNEL);
  919. + if (mc_irq == NULL)
  920. + return -ENOMEM;
  921. +
  922. + /* Allocate IRQs */
  923. + ret = fsl_mc_allocate_irqs(mc_dev);
  924. + if (ret) {
  925. + kfree(mc_irq);
  926. + return ret;
  927. + }
  928. +
  929. + for (i = 0; i < irq_count; i++) {
  930. + mc_irq[i].count = 1;
  931. + mc_irq[i].flags = VFIO_IRQ_INFO_EVENTFD;
  932. + }
  933. +
  934. + vdev->mc_irqs = mc_irq;
  935. +
  936. + return 0;
  937. +}
  938. +
  939. +/* Free All IRQs for the given MC object */
  940. +void vfio_fsl_mc_irqs_cleanup(struct vfio_fsl_mc_device *vdev)
  941. +{
  942. + struct fsl_mc_device *mc_dev = vdev->mc_dev;
  943. + int irq_count = mc_dev->obj_desc.irq_count;
  944. + int i;
  945. +
  946. + /* Device does not support any interrupt */
  947. + if (mc_dev->obj_desc.irq_count == 0)
  948. + return;
  949. +
  950. + for (i = 0; i < irq_count; i++)
  951. + vfio_set_trigger(vdev, i, -1);
  952. +
  953. + fsl_mc_free_irqs(mc_dev);
  954. + kfree(vdev->mc_irqs);
  955. +}
  956. +
  957. +static int vfio_fsl_mc_set_irq_trigger(struct vfio_fsl_mc_device *vdev,
  958. + unsigned int index, unsigned int start,
  959. + unsigned int count, uint32_t flags,
  960. + void *data)
  961. +{
  962. + struct vfio_fsl_mc_irq *irq = &vdev->mc_irqs[index];
  963. + int hwirq;
  964. +
  965. + if (!count && (flags & VFIO_IRQ_SET_DATA_NONE))
  966. + return vfio_set_trigger(vdev, index, -1);
  967. +
  968. + if (start != 0 || count != 1)
  969. + return -EINVAL;
  970. +
  971. + if (flags & VFIO_IRQ_SET_DATA_EVENTFD) {
  972. + int32_t fd = *(int32_t *)data;
  973. +
  974. + return vfio_set_trigger(vdev, index, fd);
  975. + }
  976. +
  977. + hwirq = vdev->mc_dev->irqs[index]->msi_desc->irq;
  978. +
  979. + if (flags & VFIO_IRQ_SET_DATA_NONE) {
  980. + vfio_fsl_mc_irq_handler(hwirq, irq);
  981. +
  982. + } else if (flags & VFIO_IRQ_SET_DATA_BOOL) {
  983. + uint8_t trigger = *(uint8_t *)data;
  984. +
  985. + if (trigger)
  986. + vfio_fsl_mc_irq_handler(hwirq, irq);
  987. + }
  988. +
  989. + return 0;
  990. +}
  991. +
  992. +int vfio_fsl_mc_set_irqs_ioctl(struct vfio_fsl_mc_device *vdev,
  993. + uint32_t flags, unsigned int index,
  994. + unsigned int start, unsigned int count,
  995. + void *data)
  996. +{
  997. + int ret = -ENOTTY;
  998. +
  999. + switch (flags & VFIO_IRQ_SET_ACTION_TYPE_MASK) {
  1000. + case VFIO_IRQ_SET_ACTION_MASK:
  1001. + ret = vfio_fsl_mc_irq_mask(vdev, index, start, count,
  1002. + flags, data);
  1003. + break;
  1004. + case VFIO_IRQ_SET_ACTION_UNMASK:
  1005. + ret = vfio_fsl_mc_irq_unmask(vdev, index, start, count,
  1006. + flags, data);
  1007. + break;
  1008. + case VFIO_IRQ_SET_ACTION_TRIGGER:
  1009. + ret = vfio_fsl_mc_set_irq_trigger(vdev, index, start,
  1010. + count, flags, data);
  1011. + break;
  1012. + }
  1013. +
  1014. + return ret;
  1015. +}
  1016. --- /dev/null
  1017. +++ b/drivers/vfio/fsl-mc/vfio_fsl_mc_private.h
  1018. @@ -0,0 +1,55 @@
  1019. +/*
  1020. + * Freescale Management Complex VFIO private declarations
  1021. + *
  1022. + * Copyright (C) 2013-2016 Freescale Semiconductor, Inc.
  1023. + * Copyright 2016 NXP
  1024. + * Author: Bharat Bhushan <[email protected]>
  1025. + *
  1026. + * This file is licensed under the terms of the GNU General Public
  1027. + * License version 2. This program is licensed "as is" without any
  1028. + * warranty of any kind, whether express or implied.
  1029. + */
  1030. +
  1031. +#ifndef VFIO_FSL_MC_PRIVATE_H
  1032. +#define VFIO_FSL_MC_PRIVATE_H
  1033. +
  1034. +#define VFIO_FSL_MC_OFFSET_SHIFT 40
  1035. +#define VFIO_FSL_MC_OFFSET_MASK (((u64)(1) << VFIO_FSL_MC_OFFSET_SHIFT) - 1)
  1036. +
  1037. +#define VFIO_FSL_MC_OFFSET_TO_INDEX(off) (off >> VFIO_FSL_MC_OFFSET_SHIFT)
  1038. +
  1039. +#define VFIO_FSL_MC_INDEX_TO_OFFSET(index) \
  1040. + ((u64)(index) << VFIO_FSL_MC_OFFSET_SHIFT)
  1041. +
  1042. +struct vfio_fsl_mc_irq {
  1043. + u32 flags;
  1044. + u32 count;
  1045. + struct eventfd_ctx *trigger;
  1046. + char *name;
  1047. +};
  1048. +
  1049. +struct vfio_fsl_mc_region {
  1050. + u32 flags;
  1051. +#define VFIO_FSL_MC_REGION_TYPE_MMIO 1
  1052. +#define VFIO_FSL_MC_REGION_TYPE_CACHEABLE 2
  1053. + u32 type;
  1054. + u64 addr;
  1055. + resource_size_t size;
  1056. + void __iomem *ioaddr;
  1057. +};
  1058. +
  1059. +struct vfio_fsl_mc_device {
  1060. + struct fsl_mc_device *mc_dev;
  1061. + int refcnt;
  1062. + u32 num_regions;
  1063. + struct vfio_fsl_mc_region *regions;
  1064. + struct vfio_fsl_mc_irq *mc_irqs;
  1065. +};
  1066. +
  1067. +int vfio_fsl_mc_irqs_init(struct vfio_fsl_mc_device *vdev);
  1068. +void vfio_fsl_mc_irqs_cleanup(struct vfio_fsl_mc_device *vdev);
  1069. +int vfio_fsl_mc_set_irqs_ioctl(struct vfio_fsl_mc_device *vdev,
  1070. + uint32_t flags, unsigned int index,
  1071. + unsigned int start, unsigned int count,
  1072. + void *data);
  1073. +#endif /* VFIO_PCI_PRIVATE_H */
  1074. --- a/drivers/vfio/vfio_iommu_type1.c
  1075. +++ b/drivers/vfio/vfio_iommu_type1.c
  1076. @@ -36,6 +36,8 @@
  1077. #include <linux/uaccess.h>
  1078. #include <linux/vfio.h>
  1079. #include <linux/workqueue.h>
  1080. +#include <linux/dma-iommu.h>
  1081. +#include <linux/irqdomain.h>
  1082. #define DRIVER_VERSION "0.2"
  1083. #define DRIVER_AUTHOR "Alex Williamson <[email protected]>"
  1084. @@ -720,6 +722,27 @@ static void vfio_test_domain_fgsp(struct
  1085. __free_pages(pages, order);
  1086. }
  1087. +static bool vfio_iommu_has_sw_msi(struct iommu_group *group, phys_addr_t *base)
  1088. +{
  1089. + struct list_head group_resv_regions;
  1090. + struct iommu_resv_region *region, *next;
  1091. + bool ret = false;
  1092. +
  1093. + INIT_LIST_HEAD(&group_resv_regions);
  1094. + iommu_get_group_resv_regions(group, &group_resv_regions);
  1095. + list_for_each_entry(region, &group_resv_regions, list) {
  1096. + if (region->type == IOMMU_RESV_SW_MSI) {
  1097. + *base = region->start;
  1098. + ret = true;
  1099. + goto out;
  1100. + }
  1101. + }
  1102. +out:
  1103. + list_for_each_entry_safe(region, next, &group_resv_regions, list)
  1104. + kfree(region);
  1105. + return ret;
  1106. +}
  1107. +
  1108. static int vfio_iommu_type1_attach_group(void *iommu_data,
  1109. struct iommu_group *iommu_group)
  1110. {
  1111. @@ -728,6 +751,8 @@ static int vfio_iommu_type1_attach_group
  1112. struct vfio_domain *domain, *d;
  1113. struct bus_type *bus = NULL;
  1114. int ret;
  1115. + bool resv_msi, msi_remap;
  1116. + phys_addr_t resv_msi_base;
  1117. mutex_lock(&iommu->lock);
  1118. @@ -774,11 +799,15 @@ static int vfio_iommu_type1_attach_group
  1119. if (ret)
  1120. goto out_domain;
  1121. + resv_msi = vfio_iommu_has_sw_msi(iommu_group, &resv_msi_base);
  1122. +
  1123. INIT_LIST_HEAD(&domain->group_list);
  1124. list_add(&group->next, &domain->group_list);
  1125. - if (!allow_unsafe_interrupts &&
  1126. - !iommu_capable(bus, IOMMU_CAP_INTR_REMAP)) {
  1127. + msi_remap = resv_msi ? irq_domain_check_msi_remap() :
  1128. + iommu_capable(bus, IOMMU_CAP_INTR_REMAP);
  1129. +
  1130. + if (!allow_unsafe_interrupts && !msi_remap) {
  1131. pr_warn("%s: No interrupt remapping support. Use the module param \"allow_unsafe_interrupts\" to enable VFIO IOMMU support on this platform\n",
  1132. __func__);
  1133. ret = -EPERM;
  1134. @@ -820,6 +849,12 @@ static int vfio_iommu_type1_attach_group
  1135. if (ret)
  1136. goto out_detach;
  1137. + if (resv_msi) {
  1138. + ret = iommu_get_msi_cookie(domain->domain, resv_msi_base);
  1139. + if (ret)
  1140. + goto out_detach;
  1141. + }
  1142. +
  1143. list_add(&domain->next, &iommu->domain_list);
  1144. mutex_unlock(&iommu->lock);
  1145. --- a/include/uapi/linux/vfio.h
  1146. +++ b/include/uapi/linux/vfio.h
  1147. @@ -198,6 +198,7 @@ struct vfio_device_info {
  1148. #define VFIO_DEVICE_FLAGS_PCI (1 << 1) /* vfio-pci device */
  1149. #define VFIO_DEVICE_FLAGS_PLATFORM (1 << 2) /* vfio-platform device */
  1150. #define VFIO_DEVICE_FLAGS_AMBA (1 << 3) /* vfio-amba device */
  1151. +#define VFIO_DEVICE_FLAGS_FSL_MC (1 << 5) /* vfio-fsl-mc device */
  1152. __u32 num_regions; /* Max region index + 1 */
  1153. __u32 num_irqs; /* Max IRQ index + 1 */
  1154. };