702-pci-support-layerscape.patch 56 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060106110621063106410651066106710681069107010711072107310741075107610771078107910801081108210831084108510861087108810891090109110921093109410951096109710981099110011011102110311041105110611071108110911101111111211131114111511161117111811191120112111221123112411251126112711281129113011311132113311341135113611371138113911401141114211431144114511461147114811491150115111521153115411551156115711581159116011611162116311641165116611671168116911701171117211731174117511761177117811791180118111821183118411851186118711881189119011911192119311941195119611971198119912001201120212031204120512061207120812091210121112121213121412151216121712181219122012211222122312241225122612271228122912301231123212331234123512361237123812391240124112421243124412451246124712481249125012511252125312541255125612571258125912601261126212631264126512661267126812691270127112721273127412751276127712781279128012811282128312841285128612871288128912901291129212931294129512961297129812991300130113021303130413051306130713081309131013111312131313141315131613171318131913201321132213231324132513261327132813291330133113321333133413351336133713381339134013411342134313441345134613471348134913501351135213531354135513561357135813591360136113621363136413651366136713681369137013711372137313741375137613771378137913801381138213831384138513861387138813891390139113921393139413951396139713981399140014011402140314041405140614071408140914101411141214131414141514161417141814191420142114221423142414251426142714281429143014311432143314341435143614371438143914401441144214431444144514461447144814491450145114521453145414551456145714581459146014611462146314641465146614671468146914701471147214731474147514761477147814791480148114821483148414851486148714881489149014911492149314941495149614971498149915001501150215031504150515061507150815091510151115121513151415151516151715181519152015211522152315241525152615271528152915301531153215331534153515361537153815391540154115421543154415451546154715481549155015511552155315541555155615571558155915601561156215631564156515661567156815691570157115721573157415751576157715781579158015811582158315841585158615871588158915901591159215931594159515961597159815991600160116021603160416051606160716081609161016111612161316141615161616171618161916201621162216231624162516261627162816291630163116321633163416351636163716381639164016411642164316441645164616471648164916501651165216531654165516561657165816591660166116621663166416651666166716681669167016711672167316741675167616771678167916801681168216831684168516861687168816891690169116921693169416951696169716981699170017011702170317041705170617071708170917101711171217131714171517161717171817191720172117221723172417251726172717281729173017311732173317341735173617371738173917401741174217431744174517461747174817491750175117521753175417551756175717581759176017611762176317641765176617671768176917701771177217731774177517761777177817791780178117821783178417851786178717881789179017911792179317941795179617971798179918001801180218031804180518061807180818091810181118121813181418151816181718181819182018211822182318241825182618271828182918301831183218331834183518361837183818391840184118421843184418451846184718481849185018511852185318541855185618571858185918601861186218631864186518661867186818691870187118721873187418751876187718781879188018811882188318841885188618871888188918901891189218931894189518961897189818991900190119021903190419051906190719081909191019111912191319141915191619171918191919201921192219231924192519261927192819291930193119321933193419351936193719381939194019411942194319441945194619471948194919501951195219531954195519561957195819591960196119621963196419651966196719681969197019711972197319741975197619771978197919801981198219831984198519861987198819891990199119921993199419951996199719981999200020012002200320042005200620072008200920102011201220132014201520162017201820192020202120222023202420252026202720282029203020312032203320342035203620372038203920402041204220432044204520462047204820492050205120522053205420552056205720582059206020612062206320642065206620672068206920702071207220732074207520762077207820792080208120822083208420852086208720882089209020912092209320942095209620972098
  1. From b2ee6e29bad31facbbf5ac1ce98235ac163d9fa9 Mon Sep 17 00:00:00 2001
  2. From: Yangbo Lu <[email protected]>
  3. Date: Thu, 5 Jul 2018 16:26:47 +0800
  4. Subject: [PATCH 08/32] pci: support layerscape
  5. This is an integrated patch for layerscape pcie support.
  6. Signed-off-by: Po Liu <[email protected]>
  7. Signed-off-by: Liu Gang <[email protected]>
  8. Signed-off-by: Minghuan Lian <[email protected]>
  9. Signed-off-by: hongbo.wang <[email protected]>
  10. Signed-off-by: Bjorn Helgaas <[email protected]>
  11. Signed-off-by: Hou Zhiqiang <[email protected]>
  12. Signed-off-by: Mingkai Hu <[email protected]>
  13. Signed-off-by: Christoph Hellwig <[email protected]>
  14. Signed-off-by: Yangbo Lu <[email protected]>
  15. ---
  16. drivers/irqchip/irq-ls-scfg-msi.c | 257 ++++++-
  17. drivers/pci/host/Makefile | 2 +-
  18. drivers/pci/host/pci-layerscape-ep-debugfs.c | 758 +++++++++++++++++++
  19. drivers/pci/host/pci-layerscape-ep.c | 309 ++++++++
  20. drivers/pci/host/pci-layerscape-ep.h | 115 +++
  21. drivers/pci/host/pci-layerscape.c | 48 +-
  22. drivers/pci/host/pcie-designware.c | 6 +
  23. drivers/pci/host/pcie-designware.h | 1 +
  24. drivers/pci/pci.c | 2 +-
  25. drivers/pci/pcie/portdrv_core.c | 181 ++---
  26. drivers/pci/quirks.c | 15 +
  27. include/linux/pci.h | 1 +
  28. 12 files changed, 1546 insertions(+), 149 deletions(-)
  29. create mode 100644 drivers/pci/host/pci-layerscape-ep-debugfs.c
  30. create mode 100644 drivers/pci/host/pci-layerscape-ep.c
  31. create mode 100644 drivers/pci/host/pci-layerscape-ep.h
  32. --- a/drivers/irqchip/irq-ls-scfg-msi.c
  33. +++ b/drivers/irqchip/irq-ls-scfg-msi.c
  34. @@ -17,13 +17,32 @@
  35. #include <linux/irq.h>
  36. #include <linux/irqchip/chained_irq.h>
  37. #include <linux/irqdomain.h>
  38. +#include <linux/of_irq.h>
  39. #include <linux/of_pci.h>
  40. #include <linux/of_platform.h>
  41. #include <linux/spinlock.h>
  42. -#define MSI_MAX_IRQS 32
  43. -#define MSI_IBS_SHIFT 3
  44. -#define MSIR 4
  45. +#define MSI_IRQS_PER_MSIR 32
  46. +#define MSI_MSIR_OFFSET 4
  47. +
  48. +#define MSI_LS1043V1_1_IRQS_PER_MSIR 8
  49. +#define MSI_LS1043V1_1_MSIR_OFFSET 0x10
  50. +
  51. +struct ls_scfg_msi_cfg {
  52. + u32 ibs_shift; /* Shift of interrupt bit select */
  53. + u32 msir_irqs; /* The irq number per MSIR */
  54. + u32 msir_base; /* The base address of MSIR */
  55. +};
  56. +
  57. +struct ls_scfg_msir {
  58. + struct ls_scfg_msi *msi_data;
  59. + unsigned int index;
  60. + unsigned int gic_irq;
  61. + unsigned int bit_start;
  62. + unsigned int bit_end;
  63. + unsigned int srs; /* Shared interrupt register select */
  64. + void __iomem *reg;
  65. +};
  66. struct ls_scfg_msi {
  67. spinlock_t lock;
  68. @@ -32,8 +51,11 @@ struct ls_scfg_msi {
  69. struct irq_domain *msi_domain;
  70. void __iomem *regs;
  71. phys_addr_t msiir_addr;
  72. - int irq;
  73. - DECLARE_BITMAP(used, MSI_MAX_IRQS);
  74. + struct ls_scfg_msi_cfg *cfg;
  75. + u32 msir_num;
  76. + struct ls_scfg_msir *msir;
  77. + u32 irqs_num;
  78. + unsigned long *used;
  79. };
  80. static struct irq_chip ls_scfg_msi_irq_chip = {
  81. @@ -49,19 +71,56 @@ static struct msi_domain_info ls_scfg_ms
  82. .chip = &ls_scfg_msi_irq_chip,
  83. };
  84. +static int msi_affinity_flag = 1;
  85. +
  86. +static int __init early_parse_ls_scfg_msi(char *p)
  87. +{
  88. + if (p && strncmp(p, "no-affinity", 11) == 0)
  89. + msi_affinity_flag = 0;
  90. + else
  91. + msi_affinity_flag = 1;
  92. +
  93. + return 0;
  94. +}
  95. +early_param("lsmsi", early_parse_ls_scfg_msi);
  96. +
  97. static void ls_scfg_msi_compose_msg(struct irq_data *data, struct msi_msg *msg)
  98. {
  99. struct ls_scfg_msi *msi_data = irq_data_get_irq_chip_data(data);
  100. msg->address_hi = upper_32_bits(msi_data->msiir_addr);
  101. msg->address_lo = lower_32_bits(msi_data->msiir_addr);
  102. - msg->data = data->hwirq << MSI_IBS_SHIFT;
  103. + msg->data = data->hwirq;
  104. +
  105. + if (msi_affinity_flag)
  106. + msg->data |= cpumask_first(data->common->affinity);
  107. }
  108. static int ls_scfg_msi_set_affinity(struct irq_data *irq_data,
  109. const struct cpumask *mask, bool force)
  110. {
  111. - return -EINVAL;
  112. + struct ls_scfg_msi *msi_data = irq_data_get_irq_chip_data(irq_data);
  113. + u32 cpu;
  114. +
  115. + if (!msi_affinity_flag)
  116. + return -EINVAL;
  117. +
  118. + if (!force)
  119. + cpu = cpumask_any_and(mask, cpu_online_mask);
  120. + else
  121. + cpu = cpumask_first(mask);
  122. +
  123. + if (cpu >= msi_data->msir_num)
  124. + return -EINVAL;
  125. +
  126. + if (msi_data->msir[cpu].gic_irq <= 0) {
  127. + pr_warn("cannot bind the irq to cpu%d\n", cpu);
  128. + return -EINVAL;
  129. + }
  130. +
  131. + cpumask_copy(irq_data->common->affinity, mask);
  132. +
  133. + return IRQ_SET_MASK_OK;
  134. }
  135. static struct irq_chip ls_scfg_msi_parent_chip = {
  136. @@ -81,8 +140,8 @@ static int ls_scfg_msi_domain_irq_alloc(
  137. WARN_ON(nr_irqs != 1);
  138. spin_lock(&msi_data->lock);
  139. - pos = find_first_zero_bit(msi_data->used, MSI_MAX_IRQS);
  140. - if (pos < MSI_MAX_IRQS)
  141. + pos = find_first_zero_bit(msi_data->used, msi_data->irqs_num);
  142. + if (pos < msi_data->irqs_num)
  143. __set_bit(pos, msi_data->used);
  144. else
  145. err = -ENOSPC;
  146. @@ -106,7 +165,7 @@ static void ls_scfg_msi_domain_irq_free(
  147. int pos;
  148. pos = d->hwirq;
  149. - if (pos < 0 || pos >= MSI_MAX_IRQS) {
  150. + if (pos < 0 || pos >= msi_data->irqs_num) {
  151. pr_err("failed to teardown msi. Invalid hwirq %d\n", pos);
  152. return;
  153. }
  154. @@ -123,15 +182,22 @@ static const struct irq_domain_ops ls_sc
  155. static void ls_scfg_msi_irq_handler(struct irq_desc *desc)
  156. {
  157. - struct ls_scfg_msi *msi_data = irq_desc_get_handler_data(desc);
  158. + struct ls_scfg_msir *msir = irq_desc_get_handler_data(desc);
  159. + struct ls_scfg_msi *msi_data = msir->msi_data;
  160. unsigned long val;
  161. - int pos, virq;
  162. + int pos, size, virq, hwirq;
  163. chained_irq_enter(irq_desc_get_chip(desc), desc);
  164. - val = ioread32be(msi_data->regs + MSIR);
  165. - for_each_set_bit(pos, &val, MSI_MAX_IRQS) {
  166. - virq = irq_find_mapping(msi_data->parent, (31 - pos));
  167. + val = ioread32be(msir->reg);
  168. +
  169. + pos = msir->bit_start;
  170. + size = msir->bit_end + 1;
  171. +
  172. + for_each_set_bit_from(pos, &val, size) {
  173. + hwirq = ((msir->bit_end - pos) << msi_data->cfg->ibs_shift) |
  174. + msir->srs;
  175. + virq = irq_find_mapping(msi_data->parent, hwirq);
  176. if (virq)
  177. generic_handle_irq(virq);
  178. }
  179. @@ -143,7 +209,7 @@ static int ls_scfg_msi_domains_init(stru
  180. {
  181. /* Initialize MSI domain parent */
  182. msi_data->parent = irq_domain_add_linear(NULL,
  183. - MSI_MAX_IRQS,
  184. + msi_data->irqs_num,
  185. &ls_scfg_msi_domain_ops,
  186. msi_data);
  187. if (!msi_data->parent) {
  188. @@ -164,16 +230,118 @@ static int ls_scfg_msi_domains_init(stru
  189. return 0;
  190. }
  191. +static int ls_scfg_msi_setup_hwirq(struct ls_scfg_msi *msi_data, int index)
  192. +{
  193. + struct ls_scfg_msir *msir;
  194. + int virq, i, hwirq;
  195. +
  196. + virq = platform_get_irq(msi_data->pdev, index);
  197. + if (virq <= 0)
  198. + return -ENODEV;
  199. +
  200. + msir = &msi_data->msir[index];
  201. + msir->index = index;
  202. + msir->msi_data = msi_data;
  203. + msir->gic_irq = virq;
  204. + msir->reg = msi_data->regs + msi_data->cfg->msir_base + 4 * index;
  205. +
  206. + if (msi_data->cfg->msir_irqs == MSI_LS1043V1_1_IRQS_PER_MSIR) {
  207. + msir->bit_start = 32 - ((msir->index + 1) *
  208. + MSI_LS1043V1_1_IRQS_PER_MSIR);
  209. + msir->bit_end = msir->bit_start +
  210. + MSI_LS1043V1_1_IRQS_PER_MSIR - 1;
  211. + } else {
  212. + msir->bit_start = 0;
  213. + msir->bit_end = msi_data->cfg->msir_irqs - 1;
  214. + }
  215. +
  216. + irq_set_chained_handler_and_data(msir->gic_irq,
  217. + ls_scfg_msi_irq_handler,
  218. + msir);
  219. +
  220. + if (msi_affinity_flag) {
  221. + /* Associate MSIR interrupt to the cpu */
  222. + irq_set_affinity(msir->gic_irq, get_cpu_mask(index));
  223. + msir->srs = 0; /* This value is determined by the CPU */
  224. + } else
  225. + msir->srs = index;
  226. +
  227. + /* Release the hwirqs corresponding to this MSIR */
  228. + if (!msi_affinity_flag || msir->index == 0) {
  229. + for (i = 0; i < msi_data->cfg->msir_irqs; i++) {
  230. + hwirq = i << msi_data->cfg->ibs_shift | msir->index;
  231. + bitmap_clear(msi_data->used, hwirq, 1);
  232. + }
  233. + }
  234. +
  235. + return 0;
  236. +}
  237. +
  238. +static int ls_scfg_msi_teardown_hwirq(struct ls_scfg_msir *msir)
  239. +{
  240. + struct ls_scfg_msi *msi_data = msir->msi_data;
  241. + int i, hwirq;
  242. +
  243. + if (msir->gic_irq > 0)
  244. + irq_set_chained_handler_and_data(msir->gic_irq, NULL, NULL);
  245. +
  246. + for (i = 0; i < msi_data->cfg->msir_irqs; i++) {
  247. + hwirq = i << msi_data->cfg->ibs_shift | msir->index;
  248. + bitmap_set(msi_data->used, hwirq, 1);
  249. + }
  250. +
  251. + return 0;
  252. +}
  253. +
  254. +static struct ls_scfg_msi_cfg ls1021_msi_cfg = {
  255. + .ibs_shift = 3,
  256. + .msir_irqs = MSI_IRQS_PER_MSIR,
  257. + .msir_base = MSI_MSIR_OFFSET,
  258. +};
  259. +
  260. +static struct ls_scfg_msi_cfg ls1046_msi_cfg = {
  261. + .ibs_shift = 2,
  262. + .msir_irqs = MSI_IRQS_PER_MSIR,
  263. + .msir_base = MSI_MSIR_OFFSET,
  264. +};
  265. +
  266. +static struct ls_scfg_msi_cfg ls1043_v1_1_msi_cfg = {
  267. + .ibs_shift = 2,
  268. + .msir_irqs = MSI_LS1043V1_1_IRQS_PER_MSIR,
  269. + .msir_base = MSI_LS1043V1_1_MSIR_OFFSET,
  270. +};
  271. +
  272. +static const struct of_device_id ls_scfg_msi_id[] = {
  273. + /* The following two misspelled compatibles are obsolete */
  274. + { .compatible = "fsl,1s1021a-msi", .data = &ls1021_msi_cfg},
  275. + { .compatible = "fsl,1s1043a-msi", .data = &ls1021_msi_cfg},
  276. +
  277. + { .compatible = "fsl,ls1012a-msi", .data = &ls1021_msi_cfg },
  278. + { .compatible = "fsl,ls1021a-msi", .data = &ls1021_msi_cfg },
  279. + { .compatible = "fsl,ls1043a-msi", .data = &ls1021_msi_cfg },
  280. + { .compatible = "fsl,ls1043a-v1.1-msi", .data = &ls1043_v1_1_msi_cfg },
  281. + { .compatible = "fsl,ls1046a-msi", .data = &ls1046_msi_cfg },
  282. + {},
  283. +};
  284. +MODULE_DEVICE_TABLE(of, ls_scfg_msi_id);
  285. +
  286. static int ls_scfg_msi_probe(struct platform_device *pdev)
  287. {
  288. + const struct of_device_id *match;
  289. struct ls_scfg_msi *msi_data;
  290. struct resource *res;
  291. - int ret;
  292. + int i, ret;
  293. +
  294. + match = of_match_device(ls_scfg_msi_id, &pdev->dev);
  295. + if (!match)
  296. + return -ENODEV;
  297. msi_data = devm_kzalloc(&pdev->dev, sizeof(*msi_data), GFP_KERNEL);
  298. if (!msi_data)
  299. return -ENOMEM;
  300. + msi_data->cfg = (struct ls_scfg_msi_cfg *) match->data;
  301. +
  302. res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
  303. msi_data->regs = devm_ioremap_resource(&pdev->dev, res);
  304. if (IS_ERR(msi_data->regs)) {
  305. @@ -182,23 +350,48 @@ static int ls_scfg_msi_probe(struct plat
  306. }
  307. msi_data->msiir_addr = res->start;
  308. - msi_data->irq = platform_get_irq(pdev, 0);
  309. - if (msi_data->irq <= 0) {
  310. - dev_err(&pdev->dev, "failed to get MSI irq\n");
  311. - return -ENODEV;
  312. - }
  313. -
  314. msi_data->pdev = pdev;
  315. spin_lock_init(&msi_data->lock);
  316. + msi_data->irqs_num = MSI_IRQS_PER_MSIR *
  317. + (1 << msi_data->cfg->ibs_shift);
  318. + msi_data->used = devm_kcalloc(&pdev->dev,
  319. + BITS_TO_LONGS(msi_data->irqs_num),
  320. + sizeof(*msi_data->used),
  321. + GFP_KERNEL);
  322. + if (!msi_data->used)
  323. + return -ENOMEM;
  324. + /*
  325. + * Reserve all the hwirqs
  326. + * The available hwirqs will be released in ls1_msi_setup_hwirq()
  327. + */
  328. + bitmap_set(msi_data->used, 0, msi_data->irqs_num);
  329. +
  330. + msi_data->msir_num = of_irq_count(pdev->dev.of_node);
  331. +
  332. + if (msi_affinity_flag) {
  333. + u32 cpu_num;
  334. +
  335. + cpu_num = num_possible_cpus();
  336. + if (msi_data->msir_num >= cpu_num)
  337. + msi_data->msir_num = cpu_num;
  338. + else
  339. + msi_affinity_flag = 0;
  340. + }
  341. +
  342. + msi_data->msir = devm_kcalloc(&pdev->dev, msi_data->msir_num,
  343. + sizeof(*msi_data->msir),
  344. + GFP_KERNEL);
  345. + if (!msi_data->msir)
  346. + return -ENOMEM;
  347. +
  348. + for (i = 0; i < msi_data->msir_num; i++)
  349. + ls_scfg_msi_setup_hwirq(msi_data, i);
  350. +
  351. ret = ls_scfg_msi_domains_init(msi_data);
  352. if (ret)
  353. return ret;
  354. - irq_set_chained_handler_and_data(msi_data->irq,
  355. - ls_scfg_msi_irq_handler,
  356. - msi_data);
  357. -
  358. platform_set_drvdata(pdev, msi_data);
  359. return 0;
  360. @@ -207,8 +400,10 @@ static int ls_scfg_msi_probe(struct plat
  361. static int ls_scfg_msi_remove(struct platform_device *pdev)
  362. {
  363. struct ls_scfg_msi *msi_data = platform_get_drvdata(pdev);
  364. + int i;
  365. - irq_set_chained_handler_and_data(msi_data->irq, NULL, NULL);
  366. + for (i = 0; i < msi_data->msir_num; i++)
  367. + ls_scfg_msi_teardown_hwirq(&msi_data->msir[i]);
  368. irq_domain_remove(msi_data->msi_domain);
  369. irq_domain_remove(msi_data->parent);
  370. @@ -218,12 +413,6 @@ static int ls_scfg_msi_remove(struct pla
  371. return 0;
  372. }
  373. -static const struct of_device_id ls_scfg_msi_id[] = {
  374. - { .compatible = "fsl,1s1021a-msi", },
  375. - { .compatible = "fsl,1s1043a-msi", },
  376. - {},
  377. -};
  378. -
  379. static struct platform_driver ls_scfg_msi_driver = {
  380. .driver = {
  381. .name = "ls-scfg-msi",
  382. --- a/drivers/pci/host/Makefile
  383. +++ b/drivers/pci/host/Makefile
  384. @@ -17,7 +17,7 @@ obj-$(CONFIG_PCIE_XILINX) += pcie-xilinx
  385. obj-$(CONFIG_PCIE_XILINX_NWL) += pcie-xilinx-nwl.o
  386. obj-$(CONFIG_PCI_XGENE) += pci-xgene.o
  387. obj-$(CONFIG_PCI_XGENE_MSI) += pci-xgene-msi.o
  388. -obj-$(CONFIG_PCI_LAYERSCAPE) += pci-layerscape.o
  389. +obj-$(CONFIG_PCI_LAYERSCAPE) += pci-layerscape.o pci-layerscape-ep.o pci-layerscape-ep-debugfs.o
  390. obj-$(CONFIG_PCI_VERSATILE) += pci-versatile.o
  391. obj-$(CONFIG_PCIE_IPROC) += pcie-iproc.o
  392. obj-$(CONFIG_PCIE_IPROC_MSI) += pcie-iproc-msi.o
  393. --- /dev/null
  394. +++ b/drivers/pci/host/pci-layerscape-ep-debugfs.c
  395. @@ -0,0 +1,758 @@
  396. +/*
  397. + * PCIe Endpoint driver for Freescale Layerscape SoCs
  398. + *
  399. + * Copyright (C) 2015 Freescale Semiconductor.
  400. + *
  401. + * Author: Minghuan Lian <[email protected]>
  402. + *
  403. + * This program is free software; you can redistribute it and/or modify
  404. + * it under the terms of the GNU General Public License version 2 as
  405. + * published by the Free Software Foundation.
  406. + */
  407. +
  408. +#include <linux/kernel.h>
  409. +#include <linux/module.h>
  410. +#include <linux/debugfs.h>
  411. +#include <linux/time.h>
  412. +#include <linux/uaccess.h>
  413. +#include <linux/kthread.h>
  414. +#include <linux/slab.h>
  415. +#include <linux/dmaengine.h>
  416. +#include <linux/dma-mapping.h>
  417. +#include <linux/freezer.h>
  418. +
  419. +#include <linux/completion.h>
  420. +
  421. +#include "pci-layerscape-ep.h"
  422. +
  423. +#define PCIE_ATU_INDEX3 (0x3 << 0)
  424. +#define PCIE_ATU_INDEX2 (0x2 << 0)
  425. +#define PCIE_ATU_INDEX1 (0x1 << 0)
  426. +#define PCIE_ATU_INDEX0 (0x0 << 0)
  427. +
  428. +#define PCIE_BAR0_SIZE (4 * 1024) /* 4K */
  429. +#define PCIE_BAR1_SIZE (8 * 1024) /* 8K for MSIX */
  430. +#define PCIE_BAR2_SIZE (4 * 1024) /* 4K */
  431. +#define PCIE_BAR4_SIZE (1 * 1024 * 1024) /* 1M */
  432. +#define PCIE_MSI_OB_SIZE (4 * 1024) /* 4K */
  433. +
  434. +#define PCIE_MSI_MSG_ADDR_OFF 0x54
  435. +#define PCIE_MSI_MSG_DATA_OFF 0x5c
  436. +
  437. +enum test_type {
  438. + TEST_TYPE_DMA,
  439. + TEST_TYPE_MEMCPY
  440. +};
  441. +
  442. +enum test_dirt {
  443. + TEST_DIRT_READ,
  444. + TEST_DIRT_WRITE
  445. +};
  446. +
  447. +enum test_status {
  448. + TEST_IDLE,
  449. + TEST_BUSY
  450. +};
  451. +
  452. +struct ls_ep_test {
  453. + struct ls_ep_dev *ep;
  454. + void __iomem *cfg;
  455. + void __iomem *buf;
  456. + void __iomem *out;
  457. + void __iomem *msi;
  458. + dma_addr_t cfg_addr;
  459. + dma_addr_t buf_addr;
  460. + dma_addr_t out_addr;
  461. + dma_addr_t bus_addr;
  462. + dma_addr_t msi_addr;
  463. + u64 msi_msg_addr;
  464. + u16 msi_msg_data;
  465. + struct task_struct *thread;
  466. + spinlock_t lock;
  467. + struct completion done;
  468. + u32 len;
  469. + int loop;
  470. + char data;
  471. + enum test_dirt dirt;
  472. + enum test_type type;
  473. + enum test_status status;
  474. + u64 result; /* Mbps */
  475. + char cmd[256];
  476. +};
  477. +
  478. +static int ls_pcie_ep_trigger_msi(struct ls_ep_test *test)
  479. +{
  480. + if (!test->msi)
  481. + return -EINVAL;
  482. +
  483. + iowrite32(test->msi_msg_data, test->msi);
  484. +
  485. + return 0;
  486. +}
  487. +
  488. +static int ls_pcie_ep_test_try_run(struct ls_ep_test *test)
  489. +{
  490. + int ret;
  491. +
  492. + spin_lock(&test->lock);
  493. + if (test->status == TEST_IDLE) {
  494. + test->status = TEST_BUSY;
  495. + ret = 0;
  496. + } else
  497. + ret = -EBUSY;
  498. + spin_unlock(&test->lock);
  499. +
  500. + return ret;
  501. +}
  502. +
  503. +static void ls_pcie_ep_test_done(struct ls_ep_test *test)
  504. +{
  505. + spin_lock(&test->lock);
  506. + test->status = TEST_IDLE;
  507. + spin_unlock(&test->lock);
  508. +}
  509. +
  510. +static void ls_pcie_ep_test_dma_cb(void *arg)
  511. +{
  512. + struct ls_ep_test *test = arg;
  513. +
  514. + complete(&test->done);
  515. +}
  516. +
  517. +static int ls_pcie_ep_test_dma(struct ls_ep_test *test)
  518. +{
  519. + dma_cap_mask_t mask;
  520. + struct dma_chan *chan;
  521. + struct dma_device *dma_dev;
  522. + dma_addr_t src, dst;
  523. + enum dma_data_direction direction;
  524. + enum dma_ctrl_flags dma_flags = DMA_CTRL_ACK | DMA_PREP_INTERRUPT;
  525. + struct timespec start, end, period;
  526. + int i = 0;
  527. +
  528. + dma_cap_zero(mask);
  529. + dma_cap_set(DMA_MEMCPY, mask);
  530. +
  531. + chan = dma_request_channel(mask, NULL, test);
  532. + if (!chan) {
  533. + pr_err("failed to request dma channel\n");
  534. + return -EINVAL;
  535. + }
  536. +
  537. + memset(test->buf, test->data, test->len);
  538. +
  539. + if (test->dirt == TEST_DIRT_WRITE) {
  540. + src = test->buf_addr;
  541. + dst = test->out_addr;
  542. + direction = DMA_TO_DEVICE;
  543. + } else {
  544. + src = test->out_addr;
  545. + dst = test->buf_addr;
  546. + direction = DMA_FROM_DEVICE;
  547. + }
  548. +
  549. + dma_dev = chan->device;
  550. + dma_flags = DMA_CTRL_ACK | DMA_PREP_INTERRUPT;
  551. +
  552. + dma_sync_single_for_device(&test->ep->dev, test->buf_addr,
  553. + test->len, direction);
  554. +
  555. + set_freezable();
  556. +
  557. + getrawmonotonic(&start);
  558. + while (!kthread_should_stop() && (i < test->loop)) {
  559. + struct dma_async_tx_descriptor *dma_desc;
  560. + dma_cookie_t dma_cookie = {0};
  561. + unsigned long tmo;
  562. + int status;
  563. +
  564. + init_completion(&test->done);
  565. +
  566. + dma_desc = dma_dev->device_prep_dma_memcpy(chan,
  567. + dst, src,
  568. + test->len,
  569. + dma_flags);
  570. + if (!dma_desc) {
  571. + pr_err("DMA desc constr failed...\n");
  572. + goto _err;
  573. + }
  574. +
  575. + dma_desc->callback = ls_pcie_ep_test_dma_cb;
  576. + dma_desc->callback_param = test;
  577. + dma_cookie = dmaengine_submit(dma_desc);
  578. +
  579. + if (dma_submit_error(dma_cookie)) {
  580. + pr_err("DMA submit error....\n");
  581. + goto _err;
  582. + }
  583. +
  584. + /* Trigger the transaction */
  585. + dma_async_issue_pending(chan);
  586. +
  587. + tmo = wait_for_completion_timeout(&test->done,
  588. + msecs_to_jiffies(5 * test->len));
  589. + if (tmo == 0) {
  590. + pr_err("Self-test copy timed out, disabling\n");
  591. + goto _err;
  592. + }
  593. +
  594. + status = dma_async_is_tx_complete(chan, dma_cookie,
  595. + NULL, NULL);
  596. + if (status != DMA_COMPLETE) {
  597. + pr_err("got completion callback, but status is %s\n",
  598. + status == DMA_ERROR ? "error" : "in progress");
  599. + goto _err;
  600. + }
  601. +
  602. + i++;
  603. + }
  604. +
  605. + getrawmonotonic(&end);
  606. + period = timespec_sub(end, start);
  607. + test->result = test->len * 8ULL * i * 1000;
  608. + do_div(test->result, period.tv_sec * 1000 * 1000 * 1000 + period.tv_nsec);
  609. + dma_release_channel(chan);
  610. +
  611. + return 0;
  612. +
  613. +_err:
  614. + dma_release_channel(chan);
  615. + test->result = 0;
  616. + return -EINVAL;
  617. +}
  618. +
  619. +static int ls_pcie_ep_test_cpy(struct ls_ep_test *test)
  620. +{
  621. + void *dst, *src;
  622. + struct timespec start, end, period;
  623. + int i = 0;
  624. +
  625. + memset(test->buf, test->data, test->len);
  626. +
  627. + if (test->dirt == TEST_DIRT_WRITE) {
  628. + dst = test->out;
  629. + src = test->buf;
  630. + } else {
  631. + dst = test->buf;
  632. + src = test->out;
  633. + }
  634. +
  635. + getrawmonotonic(&start);
  636. + while (!kthread_should_stop() && i < test->loop) {
  637. + memcpy(dst, src, test->len);
  638. + i++;
  639. + }
  640. + getrawmonotonic(&end);
  641. +
  642. + period = timespec_sub(end, start);
  643. + test->result = test->len * 8ULL * i * 1000;
  644. + do_div(test->result, period.tv_sec * 1000 * 1000 * 1000 + period.tv_nsec);
  645. +
  646. + return 0;
  647. +}
  648. +
  649. +int ls_pcie_ep_test_thread(void *arg)
  650. +{
  651. + int ret;
  652. +
  653. + struct ls_ep_test *test = arg;
  654. +
  655. + if (test->type == TEST_TYPE_DMA)
  656. + ret = ls_pcie_ep_test_dma(test);
  657. + else
  658. + ret = ls_pcie_ep_test_cpy(test);
  659. +
  660. + if (ret) {
  661. + pr_err("\n%s \ttest failed\n",
  662. + test->cmd);
  663. + test->result = 0;
  664. + } else
  665. + pr_err("\n%s \tthroughput:%lluMbps\n",
  666. + test->cmd, test->result);
  667. +
  668. + ls_pcie_ep_test_done(test);
  669. +
  670. + ls_pcie_ep_trigger_msi(test);
  671. +
  672. + do_exit(0);
  673. +}
  674. +
  675. +static int ls_pcie_ep_free_test(struct ls_ep_dev *ep)
  676. +{
  677. + struct ls_ep_test *test = ep->driver_data;
  678. +
  679. + if (!test)
  680. + return 0;
  681. +
  682. + if (test->status == TEST_BUSY) {
  683. + kthread_stop(test->thread);
  684. + dev_info(&ep->dev,
  685. + "test is running please wait and run again\n");
  686. + return -EBUSY;
  687. + }
  688. +
  689. + if (test->buf)
  690. + free_pages((unsigned long)test->buf,
  691. + get_order(PCIE_BAR4_SIZE));
  692. +
  693. + if (test->cfg)
  694. + free_pages((unsigned long)test->cfg,
  695. + get_order(PCIE_BAR2_SIZE));
  696. +
  697. + if (test->out)
  698. + iounmap(test->out);
  699. +
  700. + kfree(test);
  701. + ep->driver_data = NULL;
  702. +
  703. + return 0;
  704. +}
  705. +
  706. +static int ls_pcie_ep_init_test(struct ls_ep_dev *ep, u64 bus_addr)
  707. +{
  708. + struct ls_pcie *pcie = ep->pcie;
  709. + struct ls_ep_test *test = ep->driver_data;
  710. + int err;
  711. +
  712. + if (test) {
  713. + dev_info(&ep->dev,
  714. + "Please use 'free' to remove the exiting test\n");
  715. + return -EBUSY;
  716. + }
  717. +
  718. + test = kzalloc(sizeof(*test), GFP_KERNEL);
  719. + if (!test)
  720. + return -ENOMEM;
  721. + ep->driver_data = test;
  722. + test->ep = ep;
  723. + spin_lock_init(&test->lock);
  724. + test->status = TEST_IDLE;
  725. +
  726. + test->buf = dma_alloc_coherent(pcie->dev, get_order(PCIE_BAR4_SIZE),
  727. + &test->buf_addr,
  728. + GFP_KERNEL);
  729. + if (!test->buf) {
  730. + dev_info(&ep->dev, "failed to get mem for bar4\n");
  731. + err = -ENOMEM;
  732. + goto _err;
  733. + }
  734. +
  735. + test->cfg = (void *)__get_free_pages(GFP_KERNEL | __GFP_ZERO,
  736. + get_order(PCIE_BAR2_SIZE));
  737. + if (!test->cfg) {
  738. + dev_info(&ep->dev, "failed to get mem for bar4\n");
  739. + err = -ENOMEM;
  740. + goto _err;
  741. + }
  742. + test->cfg_addr = virt_to_phys(test->cfg);
  743. +
  744. + test->out_addr = pcie->out_base;
  745. + test->out = ioremap(test->out_addr, PCIE_BAR4_SIZE);
  746. + if (!test->out) {
  747. + dev_info(&ep->dev, "failed to map out\n");
  748. + err = -ENOMEM;
  749. + goto _err;
  750. + }
  751. +
  752. + test->bus_addr = bus_addr;
  753. +
  754. + test->msi_addr = test->out_addr + PCIE_BAR4_SIZE;
  755. + test->msi = ioremap(test->msi_addr, PCIE_MSI_OB_SIZE);
  756. + if (!test->msi)
  757. + dev_info(&ep->dev, "failed to map MSI outbound region\n");
  758. +
  759. + test->msi_msg_addr = ioread32(pcie->dbi + PCIE_MSI_MSG_ADDR_OFF) |
  760. + (((u64)ioread32(pcie->dbi + PCIE_MSI_MSG_ADDR_OFF + 4)) << 32);
  761. + test->msi_msg_data = ioread16(pcie->dbi + PCIE_MSI_MSG_DATA_OFF);
  762. +
  763. + ls_pcie_ep_dev_cfg_enable(ep);
  764. +
  765. + /* outbound iATU for memory */
  766. + ls_pcie_iatu_outbound_set(pcie, 0, PCIE_ATU_TYPE_MEM,
  767. + test->out_addr, bus_addr, PCIE_BAR4_SIZE);
  768. + /* outbound iATU for MSI */
  769. + ls_pcie_iatu_outbound_set(pcie, 1, PCIE_ATU_TYPE_MEM,
  770. + test->msi_addr, test->msi_msg_addr,
  771. + PCIE_MSI_OB_SIZE);
  772. +
  773. + /* ATU 0 : INBOUND : map BAR0 */
  774. + ls_pcie_iatu_inbound_set(pcie, 0, 0, test->cfg_addr);
  775. + /* ATU 2 : INBOUND : map BAR2 */
  776. + ls_pcie_iatu_inbound_set(pcie, 2, 2, test->cfg_addr);
  777. + /* ATU 3 : INBOUND : map BAR4 */
  778. + ls_pcie_iatu_inbound_set(pcie, 3, 4, test->buf_addr);
  779. +
  780. + return 0;
  781. +
  782. +_err:
  783. + ls_pcie_ep_free_test(ep);
  784. + return err;
  785. +}
  786. +
  787. +static int ls_pcie_ep_start_test(struct ls_ep_dev *ep, char *cmd)
  788. +{
  789. + struct ls_ep_test *test = ep->driver_data;
  790. + enum test_type type;
  791. + enum test_dirt dirt;
  792. + u32 cnt, len, loop;
  793. + unsigned int data;
  794. + char dirt_str[2];
  795. + int ret;
  796. +
  797. + if (strncmp(cmd, "dma", 3) == 0)
  798. + type = TEST_TYPE_DMA;
  799. + else
  800. + type = TEST_TYPE_MEMCPY;
  801. +
  802. + cnt = sscanf(&cmd[4], "%1s %u %u %x", dirt_str, &len, &loop, &data);
  803. + if (cnt != 4) {
  804. + dev_info(&ep->dev, "format error %s", cmd);
  805. + dev_info(&ep->dev, "dma/cpy <r/w> <packet_size> <loop> <data>\n");
  806. + return -EINVAL;
  807. + }
  808. +
  809. + if (strncmp(dirt_str, "r", 1) == 0)
  810. + dirt = TEST_DIRT_READ;
  811. + else
  812. + dirt = TEST_DIRT_WRITE;
  813. +
  814. + if (len > PCIE_BAR4_SIZE) {
  815. + dev_err(&ep->dev, "max len is %d", PCIE_BAR4_SIZE);
  816. + return -EINVAL;
  817. + }
  818. +
  819. + if (!test) {
  820. + dev_err(&ep->dev, "Please first run init command\n");
  821. + return -EINVAL;
  822. + }
  823. +
  824. + if (ls_pcie_ep_test_try_run(test)) {
  825. + dev_err(&ep->dev, "There is already a test running\n");
  826. + return -EINVAL;
  827. + }
  828. +
  829. + test->len = len;
  830. + test->loop = loop;
  831. + test->type = type;
  832. + test->data = (char)data;
  833. + test->dirt = dirt;
  834. + strcpy(test->cmd, cmd);
  835. + test->thread = kthread_run(ls_pcie_ep_test_thread, test,
  836. + "pcie ep test");
  837. + if (IS_ERR(test->thread)) {
  838. + dev_err(&ep->dev, "fork failed for pcie ep test\n");
  839. + ls_pcie_ep_test_done(test);
  840. + ret = PTR_ERR(test->thread);
  841. + }
  842. +
  843. + return ret;
  844. +}
  845. +
  846. +
  847. +/**
  848. + * ls_pcie_reg_ops_read - read for regs data
  849. + * @filp: the opened file
  850. + * @buffer: where to write the data for the user to read
  851. + * @count: the size of the user's buffer
  852. + * @ppos: file position offset
  853. + **/
  854. +static ssize_t ls_pcie_ep_dbg_regs_read(struct file *filp, char __user *buffer,
  855. + size_t count, loff_t *ppos)
  856. +{
  857. + struct ls_ep_dev *ep = filp->private_data;
  858. + struct ls_pcie *pcie = ep->pcie;
  859. + char *buf;
  860. + int desc = 0, i, len;
  861. +
  862. + buf = kmalloc(4 * 1024, GFP_KERNEL);
  863. + if (!buf)
  864. + return -ENOMEM;
  865. +
  866. + ls_pcie_ep_dev_cfg_enable(ep);
  867. +
  868. + desc += sprintf(buf + desc, "%s", "reg info:");
  869. + for (i = 0; i < 0x200; i += 4) {
  870. + if (i % 16 == 0)
  871. + desc += sprintf(buf + desc, "\n%08x:", i);
  872. + desc += sprintf(buf + desc, " %08x", readl(pcie->dbi + i));
  873. + }
  874. +
  875. + desc += sprintf(buf + desc, "\n%s", "outbound iATU info:\n");
  876. + for (i = 0; i < 6; i++) {
  877. + writel(PCIE_ATU_REGION_OUTBOUND | i,
  878. + pcie->dbi + PCIE_ATU_VIEWPORT);
  879. + desc += sprintf(buf + desc, "iATU%d", i);
  880. + desc += sprintf(buf + desc, "\tLOWER PHYS 0x%08x\n",
  881. + readl(pcie->dbi + PCIE_ATU_LOWER_BASE));
  882. + desc += sprintf(buf + desc, "\tUPPER PHYS 0x%08x\n",
  883. + readl(pcie->dbi + PCIE_ATU_UPPER_BASE));
  884. + desc += sprintf(buf + desc, "\tLOWER BUS 0x%08x\n",
  885. + readl(pcie->dbi + PCIE_ATU_LOWER_TARGET));
  886. + desc += sprintf(buf + desc, "\tUPPER BUS 0x%08x\n",
  887. + readl(pcie->dbi + PCIE_ATU_UPPER_TARGET));
  888. + desc += sprintf(buf + desc, "\tLIMIT 0x%08x\n",
  889. + readl(pcie->dbi + PCIE_ATU_LIMIT));
  890. + desc += sprintf(buf + desc, "\tCR1 0x%08x\n",
  891. + readl(pcie->dbi + PCIE_ATU_CR1));
  892. + desc += sprintf(buf + desc, "\tCR2 0x%08x\n",
  893. + readl(pcie->dbi + PCIE_ATU_CR2));
  894. + }
  895. +
  896. + desc += sprintf(buf + desc, "\n%s", "inbound iATU info:\n");
  897. + for (i = 0; i < 6; i++) {
  898. + writel(PCIE_ATU_REGION_INBOUND | i,
  899. + pcie->dbi + PCIE_ATU_VIEWPORT);
  900. + desc += sprintf(buf + desc, "iATU%d", i);
  901. + desc += sprintf(buf + desc, "\tLOWER BUS 0x%08x\n",
  902. + readl(pcie->dbi + PCIE_ATU_LOWER_BASE));
  903. + desc += sprintf(buf + desc, "\tUPPER BUSs 0x%08x\n",
  904. + readl(pcie->dbi + PCIE_ATU_UPPER_BASE));
  905. + desc += sprintf(buf + desc, "\tLOWER PHYS 0x%08x\n",
  906. + readl(pcie->dbi + PCIE_ATU_LOWER_TARGET));
  907. + desc += sprintf(buf + desc, "\tUPPER PHYS 0x%08x\n",
  908. + readl(pcie->dbi + PCIE_ATU_UPPER_TARGET));
  909. + desc += sprintf(buf + desc, "\tLIMIT 0x%08x\n",
  910. + readl(pcie->dbi + PCIE_ATU_LIMIT));
  911. + desc += sprintf(buf + desc, "\tCR1 0x%08x\n",
  912. + readl(pcie->dbi + PCIE_ATU_CR1));
  913. + desc += sprintf(buf + desc, "\tCR2 0x%08x\n",
  914. + readl(pcie->dbi + PCIE_ATU_CR2));
  915. + }
  916. +
  917. + len = simple_read_from_buffer(buffer, count, ppos, buf, desc);
  918. + kfree(buf);
  919. +
  920. + return len;
  921. +}
  922. +
  923. +/**
  924. + * ls_pcie_ep_dbg_regs_write - write into regs datum
  925. + * @filp: the opened file
  926. + * @buffer: where to find the user's data
  927. + * @count: the length of the user's data
  928. + * @ppos: file position offset
  929. + **/
  930. +static ssize_t ls_pcie_ep_dbg_regs_write(struct file *filp,
  931. + const char __user *buffer,
  932. + size_t count, loff_t *ppos)
  933. +{
  934. + struct ls_ep_dev *ep = filp->private_data;
  935. + struct ls_pcie *pcie = ep->pcie;
  936. + char buf[256];
  937. +
  938. + if (count >= sizeof(buf))
  939. + return -ENOSPC;
  940. +
  941. + memset(buf, 0, sizeof(buf));
  942. +
  943. + if (copy_from_user(buf, buffer, count))
  944. + return -EFAULT;
  945. +
  946. + ls_pcie_ep_dev_cfg_enable(ep);
  947. +
  948. + if (strncmp(buf, "reg", 3) == 0) {
  949. + u32 reg, value;
  950. + int cnt;
  951. +
  952. + cnt = sscanf(&buf[3], "%x %x", &reg, &value);
  953. + if (cnt == 2) {
  954. + writel(value, pcie->dbi + reg);
  955. + value = readl(pcie->dbi + reg);
  956. + dev_info(&ep->dev, "reg 0x%08x: 0x%08x\n",
  957. + reg, value);
  958. + } else {
  959. + dev_info(&ep->dev, "reg <reg> <value>\n");
  960. + }
  961. + } else if (strncmp(buf, "atu", 3) == 0) {
  962. + /* to do */
  963. + dev_info(&ep->dev, " Not support atu command\n");
  964. + } else {
  965. + dev_info(&ep->dev, "Unknown command %s\n", buf);
  966. + dev_info(&ep->dev, "Available commands:\n");
  967. + dev_info(&ep->dev, " reg <reg> <value>\n");
  968. + }
  969. +
  970. + return count;
  971. +}
  972. +
  973. +static const struct file_operations ls_pcie_ep_dbg_regs_fops = {
  974. + .owner = THIS_MODULE,
  975. + .open = simple_open,
  976. + .read = ls_pcie_ep_dbg_regs_read,
  977. + .write = ls_pcie_ep_dbg_regs_write,
  978. +};
  979. +
  980. +static ssize_t ls_pcie_ep_dbg_test_read(struct file *filp,
  981. + char __user *buffer,
  982. + size_t count, loff_t *ppos)
  983. +{
  984. + struct ls_ep_dev *ep = filp->private_data;
  985. + struct ls_ep_test *test = ep->driver_data;
  986. + char buf[512];
  987. + int desc = 0, len;
  988. +
  989. + if (!test) {
  990. + dev_info(&ep->dev, " there is NO test\n");
  991. + return 0;
  992. + }
  993. +
  994. + if (test->status != TEST_IDLE) {
  995. + dev_info(&ep->dev, "test %s is running\n", test->cmd);
  996. + return 0;
  997. + }
  998. +
  999. + desc = sprintf(buf, "MSI ADDR:0x%llx MSI DATA:0x%x\n",
  1000. + test->msi_msg_addr, test->msi_msg_data);
  1001. +
  1002. + desc += sprintf(buf + desc, "%s throughput:%lluMbps\n",
  1003. + test->cmd, test->result);
  1004. +
  1005. + len = simple_read_from_buffer(buffer, count, ppos,
  1006. + buf, desc);
  1007. +
  1008. + return len;
  1009. +}
  1010. +
  1011. +static ssize_t ls_pcie_ep_dbg_test_write(struct file *filp,
  1012. + const char __user *buffer,
  1013. + size_t count, loff_t *ppos)
  1014. +{
  1015. + struct ls_ep_dev *ep = filp->private_data;
  1016. + char buf[256];
  1017. +
  1018. + if (count >= sizeof(buf))
  1019. + return -ENOSPC;
  1020. +
  1021. + memset(buf, 0, sizeof(buf));
  1022. +
  1023. + if (copy_from_user(buf, buffer, count))
  1024. + return -EFAULT;
  1025. +
  1026. + if (strncmp(buf, "init", 4) == 0) {
  1027. + int i = 4;
  1028. + u64 bus_addr;
  1029. +
  1030. + while (buf[i] == ' ')
  1031. + i++;
  1032. +
  1033. + if (kstrtou64(&buf[i], 0, &bus_addr))
  1034. + dev_info(&ep->dev, "command: init <bus_addr>\n");
  1035. + else {
  1036. + if (ls_pcie_ep_init_test(ep, bus_addr))
  1037. + dev_info(&ep->dev, "failed to init test\n");
  1038. + }
  1039. + } else if (strncmp(buf, "free", 4) == 0)
  1040. + ls_pcie_ep_free_test(ep);
  1041. + else if (strncmp(buf, "dma", 3) == 0 ||
  1042. + strncmp(buf, "cpy", 3) == 0)
  1043. + ls_pcie_ep_start_test(ep, buf);
  1044. + else {
  1045. + dev_info(&ep->dev, "Unknown command: %s\n", buf);
  1046. + dev_info(&ep->dev, "Available commands:\n");
  1047. + dev_info(&ep->dev, "\tinit <bus_addr>\n");
  1048. + dev_info(&ep->dev, "\t<dma/cpy> <r/w> <packet_size> <loop>\n");
  1049. + dev_info(&ep->dev, "\tfree\n");
  1050. + }
  1051. +
  1052. + return count;
  1053. +}
  1054. +
  1055. +static const struct file_operations ls_pcie_ep_dbg_test_fops = {
  1056. + .owner = THIS_MODULE,
  1057. + .open = simple_open,
  1058. + .read = ls_pcie_ep_dbg_test_read,
  1059. + .write = ls_pcie_ep_dbg_test_write,
  1060. +};
  1061. +
  1062. +static ssize_t ls_pcie_ep_dbg_dump_read(struct file *filp,
  1063. + char __user *buffer,
  1064. + size_t count, loff_t *ppos)
  1065. +{
  1066. + struct ls_ep_dev *ep = filp->private_data;
  1067. + struct ls_ep_test *test = ep->driver_data;
  1068. + char *buf;
  1069. + int desc = 0, i, len;
  1070. +
  1071. + buf = kmalloc(4 * 1024, GFP_KERNEL);
  1072. + if (!buf)
  1073. + return -ENOMEM;
  1074. +
  1075. + if (!test) {
  1076. + dev_info(&ep->dev, " there is NO test\n");
  1077. + kfree(buf);
  1078. + return 0;
  1079. + }
  1080. +
  1081. + desc += sprintf(buf + desc, "%s", "dump info:");
  1082. + for (i = 0; i < 256; i += 4) {
  1083. + if (i % 16 == 0)
  1084. + desc += sprintf(buf + desc, "\n%08x:", i);
  1085. + desc += sprintf(buf + desc, " %08x", readl(test->buf + i));
  1086. + }
  1087. +
  1088. + desc += sprintf(buf + desc, "\n");
  1089. + len = simple_read_from_buffer(buffer, count, ppos, buf, desc);
  1090. +
  1091. + kfree(buf);
  1092. +
  1093. + return len;
  1094. +}
  1095. +
  1096. +static const struct file_operations ls_pcie_ep_dbg_dump_fops = {
  1097. + .owner = THIS_MODULE,
  1098. + .open = simple_open,
  1099. + .read = ls_pcie_ep_dbg_dump_read,
  1100. +};
  1101. +
  1102. +static int ls_pcie_ep_dev_dbgfs_init(struct ls_ep_dev *ep)
  1103. +{
  1104. + struct ls_pcie *pcie = ep->pcie;
  1105. + struct dentry *pfile;
  1106. +
  1107. + ls_pcie_ep_dev_cfg_enable(ep);
  1108. +
  1109. + ep->dir = debugfs_create_dir(dev_name(&ep->dev), pcie->dir);
  1110. + if (!ep->dir)
  1111. + return -ENOMEM;
  1112. +
  1113. + pfile = debugfs_create_file("regs", 0600, ep->dir, ep,
  1114. + &ls_pcie_ep_dbg_regs_fops);
  1115. + if (!pfile)
  1116. + dev_info(&ep->dev, "debugfs regs for failed\n");
  1117. +
  1118. + pfile = debugfs_create_file("test", 0600, ep->dir, ep,
  1119. + &ls_pcie_ep_dbg_test_fops);
  1120. + if (!pfile)
  1121. + dev_info(&ep->dev, "debugfs test for failed\n");
  1122. +
  1123. + pfile = debugfs_create_file("dump", 0600, ep->dir, ep,
  1124. + &ls_pcie_ep_dbg_dump_fops);
  1125. + if (!pfile)
  1126. + dev_info(&ep->dev, "debugfs dump for failed\n");
  1127. +
  1128. + return 0;
  1129. +}
  1130. +
  1131. +int ls_pcie_ep_dbgfs_init(struct ls_pcie *pcie)
  1132. +{
  1133. + struct ls_ep_dev *ep;
  1134. +
  1135. + pcie->dir = debugfs_create_dir(dev_name(pcie->dev), NULL);
  1136. + if (!pcie->dir)
  1137. + return -ENOMEM;
  1138. +
  1139. + list_for_each_entry(ep, &pcie->ep_list, node)
  1140. + ls_pcie_ep_dev_dbgfs_init(ep);
  1141. +
  1142. + return 0;
  1143. +}
  1144. +
  1145. +int ls_pcie_ep_dbgfs_remove(struct ls_pcie *pcie)
  1146. +{
  1147. + debugfs_remove_recursive(pcie->dir);
  1148. + return 0;
  1149. +}
  1150. +
  1151. +MODULE_AUTHOR("Minghuan Lian <[email protected]>");
  1152. +MODULE_DESCRIPTION("Freescale Layerscape PCIe EP controller driver");
  1153. +MODULE_LICENSE("GPL v2");
  1154. --- /dev/null
  1155. +++ b/drivers/pci/host/pci-layerscape-ep.c
  1156. @@ -0,0 +1,309 @@
  1157. +/*
  1158. + * PCIe Endpoint driver for Freescale Layerscape SoCs
  1159. + *
  1160. + * Copyright (C) 2015 Freescale Semiconductor.
  1161. + *
  1162. + * Author: Minghuan Lian <[email protected]>
  1163. + *
  1164. + * This program is free software; you can redistribute it and/or modify
  1165. + * it under the terms of the GNU General Public License version 2 as
  1166. + * published by the Free Software Foundation.
  1167. + */
  1168. +
  1169. +#include <linux/kernel.h>
  1170. +#include <linux/delay.h>
  1171. +#include <linux/interrupt.h>
  1172. +#include <linux/module.h>
  1173. +#include <linux/of_pci.h>
  1174. +#include <linux/of_platform.h>
  1175. +#include <linux/of_irq.h>
  1176. +#include <linux/of_address.h>
  1177. +#include <linux/pci.h>
  1178. +#include <linux/platform_device.h>
  1179. +#include <linux/resource.h>
  1180. +#include <linux/debugfs.h>
  1181. +#include <linux/time.h>
  1182. +#include <linux/uaccess.h>
  1183. +
  1184. +#include "pci-layerscape-ep.h"
  1185. +
  1186. +struct ls_ep_dev *
  1187. +ls_pci_ep_find(struct ls_pcie *pcie, int dev_id)
  1188. +{
  1189. + struct ls_ep_dev *ep;
  1190. +
  1191. + list_for_each_entry(ep, &pcie->ep_list, node) {
  1192. + if (ep->dev_id == dev_id)
  1193. + return ep;
  1194. + }
  1195. +
  1196. + return NULL;
  1197. +}
  1198. +
  1199. +static void ls_pcie_try_cfg2(struct ls_pcie *pcie, int pf, int vf)
  1200. +{
  1201. + if (pcie->sriov)
  1202. + writel(PCIE_LCTRL0_VAL(pf, vf),
  1203. + pcie->dbi + PCIE_LUT_BASE + PCIE_LUT_LCTRL0);
  1204. +}
  1205. +
  1206. +static bool ls_pcie_is_bridge(struct ls_pcie *pcie)
  1207. +{
  1208. + u32 header_type = 0;
  1209. +
  1210. + header_type = readl(pcie->dbi + (PCI_HEADER_TYPE & ~0x3));
  1211. + header_type = (header_type >> 16) & 0x7f;
  1212. +
  1213. + return header_type == PCI_HEADER_TYPE_BRIDGE;
  1214. +}
  1215. +
  1216. +void ls_pcie_iatu_outbound_set(struct ls_pcie *pcie, int idx, int type,
  1217. + u64 cpu_addr, u64 pci_addr, u32 size)
  1218. +{
  1219. + writel(PCIE_ATU_REGION_OUTBOUND | idx,
  1220. + pcie->dbi + PCIE_ATU_VIEWPORT);
  1221. + writel(lower_32_bits(cpu_addr),
  1222. + pcie->dbi + PCIE_ATU_LOWER_BASE);
  1223. + writel(upper_32_bits(cpu_addr),
  1224. + pcie->dbi + PCIE_ATU_UPPER_BASE);
  1225. + writel(lower_32_bits(cpu_addr + size - 1),
  1226. + pcie->dbi + PCIE_ATU_LIMIT);
  1227. + writel(lower_32_bits(pci_addr),
  1228. + pcie->dbi + PCIE_ATU_LOWER_TARGET);
  1229. + writel(upper_32_bits(pci_addr),
  1230. + pcie->dbi + PCIE_ATU_UPPER_TARGET);
  1231. + writel(type, pcie->dbi + PCIE_ATU_CR1);
  1232. + writel(PCIE_ATU_ENABLE, pcie->dbi + PCIE_ATU_CR2);
  1233. +}
  1234. +
  1235. +/* Use bar match mode and MEM type as default */
  1236. +void ls_pcie_iatu_inbound_set(struct ls_pcie *pcie, int idx,
  1237. + int bar, u64 phys)
  1238. +{
  1239. + writel(PCIE_ATU_REGION_INBOUND | idx, pcie->dbi + PCIE_ATU_VIEWPORT);
  1240. + writel((u32)phys, pcie->dbi + PCIE_ATU_LOWER_TARGET);
  1241. + writel(phys >> 32, pcie->dbi + PCIE_ATU_UPPER_TARGET);
  1242. + writel(PCIE_ATU_TYPE_MEM, pcie->dbi + PCIE_ATU_CR1);
  1243. + writel(PCIE_ATU_ENABLE | PCIE_ATU_BAR_MODE_ENABLE |
  1244. + PCIE_ATU_BAR_NUM(bar), pcie->dbi + PCIE_ATU_CR2);
  1245. +}
  1246. +
  1247. +void ls_pcie_ep_dev_cfg_enable(struct ls_ep_dev *ep)
  1248. +{
  1249. + ls_pcie_try_cfg2(ep->pcie, ep->pf_idx, ep->vf_idx);
  1250. +}
  1251. +
  1252. +void ls_pcie_ep_setup_bar(void *bar_base, int bar, u32 size)
  1253. +{
  1254. + if (size < 4 * 1024)
  1255. + return;
  1256. +
  1257. + switch (bar) {
  1258. + case 0:
  1259. + writel(size - 1, bar_base + PCI_BASE_ADDRESS_0);
  1260. + break;
  1261. + case 1:
  1262. + writel(size - 1, bar_base + PCI_BASE_ADDRESS_1);
  1263. + break;
  1264. + case 2:
  1265. + writel(size - 1, bar_base + PCI_BASE_ADDRESS_2);
  1266. + writel(0, bar_base + PCI_BASE_ADDRESS_3);
  1267. + break;
  1268. + case 4:
  1269. + writel(size - 1, bar_base + PCI_BASE_ADDRESS_4);
  1270. + writel(0, bar_base + PCI_BASE_ADDRESS_5);
  1271. + break;
  1272. + default:
  1273. + break;
  1274. + }
  1275. +}
  1276. +
  1277. +void ls_pcie_ep_dev_setup_bar(struct ls_ep_dev *ep, int bar, u32 size)
  1278. +{
  1279. + struct ls_pcie *pcie = ep->pcie;
  1280. + void *bar_base;
  1281. +
  1282. + if (size < 4 * 1024)
  1283. + return;
  1284. +
  1285. + if (pcie->sriov)
  1286. + bar_base = pcie->dbi;
  1287. + else
  1288. + bar_base = pcie->dbi + PCIE_NO_SRIOV_BAR_BASE;
  1289. +
  1290. + ls_pcie_ep_dev_cfg_enable(ep);
  1291. + ls_pcie_ep_setup_bar(bar_base, bar, size);
  1292. +}
  1293. +
  1294. +static int ls_pcie_ep_dev_init(struct ls_pcie *pcie, int pf_idx, int vf_idx)
  1295. +{
  1296. + struct ls_ep_dev *ep;
  1297. +
  1298. + ep = devm_kzalloc(pcie->dev, sizeof(*ep), GFP_KERNEL);
  1299. + if (!ep)
  1300. + return -ENOMEM;
  1301. +
  1302. + ep->pcie = pcie;
  1303. + ep->pf_idx = pf_idx;
  1304. + ep->vf_idx = vf_idx;
  1305. + if (vf_idx)
  1306. + ep->dev_id = pf_idx + 4 + 4 * (vf_idx - 1);
  1307. + else
  1308. + ep->dev_id = pf_idx;
  1309. +
  1310. + if (ep->vf_idx)
  1311. + dev_set_name(&ep->dev, "pf%d-vf%d",
  1312. + ep->pf_idx,
  1313. + ep->vf_idx);
  1314. + else
  1315. + dev_set_name(&ep->dev, "pf%d",
  1316. + ep->pf_idx);
  1317. +
  1318. + list_add_tail(&ep->node, &pcie->ep_list);
  1319. +
  1320. + return 0;
  1321. +}
  1322. +
  1323. +static int ls_pcie_ep_init(struct ls_pcie *pcie)
  1324. +{
  1325. + u32 sriov_header;
  1326. + int pf, vf, i, j;
  1327. +
  1328. + sriov_header = readl(pcie->dbi + PCIE_SRIOV_POS);
  1329. +
  1330. + if (PCI_EXT_CAP_ID(sriov_header) == PCI_EXT_CAP_ID_SRIOV) {
  1331. + pcie->sriov = PCIE_SRIOV_POS;
  1332. + pf = PCIE_PF_NUM;
  1333. + vf = PCIE_VF_NUM;
  1334. + } else {
  1335. + pcie->sriov = 0;
  1336. + pf = 1;
  1337. + vf = 0;
  1338. + }
  1339. +
  1340. + for (i = 0; i < pf; i++) {
  1341. + for (j = 0; j <= vf; j++)
  1342. + ls_pcie_ep_dev_init(pcie, i, j);
  1343. + }
  1344. +
  1345. + return 0;
  1346. +}
  1347. +
  1348. +static struct ls_pcie_ep_drvdata ls1043_drvdata = {
  1349. + .lut_offset = 0x10000,
  1350. + .ltssm_shift = 24,
  1351. + .lut_dbg = 0x7fc,
  1352. +};
  1353. +
  1354. +static struct ls_pcie_ep_drvdata ls1046_drvdata = {
  1355. + .lut_offset = 0x80000,
  1356. + .ltssm_shift = 24,
  1357. + .lut_dbg = 0x407fc,
  1358. +};
  1359. +
  1360. +static struct ls_pcie_ep_drvdata ls2080_drvdata = {
  1361. + .lut_offset = 0x80000,
  1362. + .ltssm_shift = 0,
  1363. + .lut_dbg = 0x7fc,
  1364. +};
  1365. +
  1366. +static const struct of_device_id ls_pcie_ep_of_match[] = {
  1367. + { .compatible = "fsl,ls1021a-pcie", },
  1368. + { .compatible = "fsl,ls1043a-pcie", .data = &ls1043_drvdata },
  1369. + { .compatible = "fsl,ls1046a-pcie", .data = &ls1046_drvdata },
  1370. + { .compatible = "fsl,ls2080a-pcie", .data = &ls2080_drvdata },
  1371. + { .compatible = "fsl,ls2085a-pcie", .data = &ls2080_drvdata },
  1372. + { },
  1373. +};
  1374. +MODULE_DEVICE_TABLE(of, ls_pcie_ep_of_match);
  1375. +
  1376. +static int ls_pcie_ep_probe(struct platform_device *pdev)
  1377. +{
  1378. + struct ls_pcie *pcie;
  1379. + struct resource *dbi_base, *cfg_res;
  1380. + const struct of_device_id *match;
  1381. + int ret;
  1382. +
  1383. + match = of_match_device(ls_pcie_ep_of_match, &pdev->dev);
  1384. + if (!match)
  1385. + return -ENODEV;
  1386. +
  1387. + pcie = devm_kzalloc(&pdev->dev, sizeof(*pcie), GFP_KERNEL);
  1388. + if (!pcie)
  1389. + return -ENOMEM;
  1390. +
  1391. + pcie->dev = &pdev->dev;
  1392. + INIT_LIST_HEAD(&pcie->ep_list);
  1393. +
  1394. + dbi_base = platform_get_resource_byname(pdev, IORESOURCE_MEM, "regs");
  1395. + pcie->dbi = devm_ioremap_resource(&pdev->dev, dbi_base);
  1396. + if (IS_ERR(pcie->dbi)) {
  1397. + dev_err(&pdev->dev, "missing *regs* space\n");
  1398. + return PTR_ERR(pcie->dbi);
  1399. + }
  1400. +
  1401. + pcie->drvdata = match->data;
  1402. + pcie->lut = pcie->dbi + pcie->drvdata->lut_offset;
  1403. +
  1404. + if (ls_pcie_is_bridge(pcie))
  1405. + return -ENODEV;
  1406. +
  1407. + dev_info(pcie->dev, "in EP mode\n");
  1408. +
  1409. + cfg_res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "config");
  1410. + if (cfg_res)
  1411. + pcie->out_base = cfg_res->start;
  1412. + else {
  1413. + dev_err(&pdev->dev, "missing *config* space\n");
  1414. + return -ENODEV;
  1415. + }
  1416. +
  1417. + ret = ls_pcie_ep_init(pcie);
  1418. + if (ret)
  1419. + return ret;
  1420. +
  1421. + ls_pcie_ep_dbgfs_init(pcie);
  1422. +
  1423. + platform_set_drvdata(pdev, pcie);
  1424. +
  1425. + return 0;
  1426. +}
  1427. +
  1428. +static int ls_pcie_ep_dev_remove(struct ls_ep_dev *ep)
  1429. +{
  1430. + list_del(&ep->node);
  1431. +
  1432. + return 0;
  1433. +}
  1434. +
  1435. +static int ls_pcie_ep_remove(struct platform_device *pdev)
  1436. +{
  1437. + struct ls_pcie *pcie = platform_get_drvdata(pdev);
  1438. + struct ls_ep_dev *ep, *tmp;
  1439. +
  1440. + if (!pcie)
  1441. + return 0;
  1442. +
  1443. + ls_pcie_ep_dbgfs_remove(pcie);
  1444. +
  1445. + list_for_each_entry_safe(ep, tmp, &pcie->ep_list, node)
  1446. + ls_pcie_ep_dev_remove(ep);
  1447. +
  1448. + return 0;
  1449. +}
  1450. +
  1451. +static struct platform_driver ls_pcie_ep_driver = {
  1452. + .driver = {
  1453. + .name = "ls-pcie-ep",
  1454. + .owner = THIS_MODULE,
  1455. + .of_match_table = ls_pcie_ep_of_match,
  1456. + },
  1457. + .probe = ls_pcie_ep_probe,
  1458. + .remove = ls_pcie_ep_remove,
  1459. +};
  1460. +
  1461. +module_platform_driver(ls_pcie_ep_driver);
  1462. +
  1463. +MODULE_AUTHOR("Minghuan Lian <[email protected]>");
  1464. +MODULE_DESCRIPTION("Freescale Layerscape PCIe EP driver");
  1465. +MODULE_LICENSE("GPL v2");
  1466. --- /dev/null
  1467. +++ b/drivers/pci/host/pci-layerscape-ep.h
  1468. @@ -0,0 +1,115 @@
  1469. +/*
  1470. + * PCIe Endpoint driver for Freescale Layerscape SoCs
  1471. + *
  1472. + * Copyright (C) 2015 Freescale Semiconductor.
  1473. + *
  1474. + * Author: Minghuan Lian <[email protected]>
  1475. + *
  1476. + * This program is free software; you can redistribute it and/or modify
  1477. + * it under the terms of the GNU General Public License version 2 as
  1478. + * published by the Free Software Foundation.
  1479. + */
  1480. +
  1481. +
  1482. +#ifndef _PCIE_LAYERSCAPE_EP_H
  1483. +#define _PCIE_LAYERSCAPE_EP_H
  1484. +
  1485. +#include <linux/device.h>
  1486. +
  1487. +/* Synopsis specific PCIE configuration registers */
  1488. +#define PCIE_ATU_VIEWPORT 0x900
  1489. +#define PCIE_ATU_REGION_INBOUND (0x1 << 31)
  1490. +#define PCIE_ATU_REGION_OUTBOUND (0x0 << 31)
  1491. +#define PCIE_ATU_REGION_INDEX3 (0x3 << 0)
  1492. +#define PCIE_ATU_REGION_INDEX2 (0x2 << 0)
  1493. +#define PCIE_ATU_REGION_INDEX1 (0x1 << 0)
  1494. +#define PCIE_ATU_REGION_INDEX0 (0x0 << 0)
  1495. +#define PCIE_ATU_CR1 0x904
  1496. +#define PCIE_ATU_TYPE_MEM (0x0 << 0)
  1497. +#define PCIE_ATU_TYPE_IO (0x2 << 0)
  1498. +#define PCIE_ATU_TYPE_CFG0 (0x4 << 0)
  1499. +#define PCIE_ATU_TYPE_CFG1 (0x5 << 0)
  1500. +#define PCIE_ATU_CR2 0x908
  1501. +#define PCIE_ATU_ENABLE (0x1 << 31)
  1502. +#define PCIE_ATU_BAR_MODE_ENABLE (0x1 << 30)
  1503. +#define PCIE_ATU_LOWER_BASE 0x90C
  1504. +#define PCIE_ATU_UPPER_BASE 0x910
  1505. +#define PCIE_ATU_LIMIT 0x914
  1506. +#define PCIE_ATU_LOWER_TARGET 0x918
  1507. +#define PCIE_ATU_BUS(x) (((x) & 0xff) << 24)
  1508. +#define PCIE_ATU_DEV(x) (((x) & 0x1f) << 19)
  1509. +#define PCIE_ATU_FUNC(x) (((x) & 0x7) << 16)
  1510. +#define PCIE_ATU_UPPER_TARGET 0x91C
  1511. +
  1512. +/* PEX internal configuration registers */
  1513. +#define PCIE_DBI_RO_WR_EN 0x8bc /* DBI Read-Only Write Enable Register */
  1514. +
  1515. +/* PEX LUT registers */
  1516. +#define PCIE_LUT_BASE 0x80000
  1517. +#define PCIE_LUT_DBG 0x7FC /* PEX LUT Debug register */
  1518. +
  1519. +#define PCIE_LUT_LCTRL0 0x7F8
  1520. +
  1521. +#define PCIE_ATU_BAR_NUM(bar) ((bar) << 8)
  1522. +#define PCIE_LCTRL0_CFG2_ENABLE (1 << 31)
  1523. +#define PCIE_LCTRL0_VF(vf) ((vf) << 22)
  1524. +#define PCIE_LCTRL0_PF(pf) ((pf) << 16)
  1525. +#define PCIE_LCTRL0_VF_ACTIVE (1 << 21)
  1526. +#define PCIE_LCTRL0_VAL(pf, vf) (PCIE_LCTRL0_PF(pf) | \
  1527. + PCIE_LCTRL0_VF(vf) | \
  1528. + ((vf) == 0 ? 0 : PCIE_LCTRL0_VF_ACTIVE) | \
  1529. + PCIE_LCTRL0_CFG2_ENABLE)
  1530. +
  1531. +#define PCIE_NO_SRIOV_BAR_BASE 0x1000
  1532. +
  1533. +#define PCIE_SRIOV_POS 0x178
  1534. +#define PCIE_PF_NUM 2
  1535. +#define PCIE_VF_NUM 64
  1536. +
  1537. +struct ls_pcie_ep_drvdata {
  1538. + u32 lut_offset;
  1539. + u32 ltssm_shift;
  1540. + u32 lut_dbg;
  1541. +};
  1542. +
  1543. +struct ls_pcie {
  1544. + struct list_head ep_list;
  1545. + struct device *dev;
  1546. + struct dentry *dir;
  1547. + const struct ls_pcie_ep_drvdata *drvdata;
  1548. + void __iomem *dbi;
  1549. + void __iomem *lut;
  1550. + phys_addr_t out_base;
  1551. + int sriov;
  1552. + int index;
  1553. +};
  1554. +
  1555. +struct ls_ep_dev {
  1556. + struct list_head node;
  1557. + struct ls_pcie *pcie;
  1558. + struct device dev;
  1559. + struct dentry *dir;
  1560. + int pf_idx;
  1561. + int vf_idx;
  1562. + int dev_id;
  1563. + void *driver_data;
  1564. +};
  1565. +
  1566. +struct ls_ep_dev *ls_pci_ep_find(struct ls_pcie *pcie, int dev_id);
  1567. +
  1568. +void ls_pcie_iatu_outbound_set(struct ls_pcie *pcie, int idx, int type,
  1569. + u64 cpu_addr, u64 pci_addr, u32 size);
  1570. +
  1571. +/* Use bar match mode and MEM type as default */
  1572. +void ls_pcie_iatu_inbound_set(struct ls_pcie *pcie, int idx,
  1573. + int bar, u64 phys);
  1574. +
  1575. +void ls_pcie_ep_dev_setup_bar(struct ls_ep_dev *ep, int bar, u32 size);
  1576. +
  1577. +
  1578. +void ls_pcie_ep_dev_cfg_enable(struct ls_ep_dev *ep);
  1579. +
  1580. +int ls_pcie_ep_dbgfs_init(struct ls_pcie *pcie);
  1581. +int ls_pcie_ep_dbgfs_remove(struct ls_pcie *pcie);
  1582. +
  1583. +#endif /* _PCIE_LAYERSCAPE_EP_H */
  1584. --- a/drivers/pci/host/pci-layerscape.c
  1585. +++ b/drivers/pci/host/pci-layerscape.c
  1586. @@ -33,14 +33,18 @@
  1587. /* PEX Internal Configuration Registers */
  1588. #define PCIE_STRFMR1 0x71c /* Symbol Timer & Filter Mask Register1 */
  1589. +#define PCIE_ABSERR 0x8d0 /* Bridge Slave Error Response Register */
  1590. +#define PCIE_ABSERR_SETTING 0x9401 /* Forward error of non-posted request */
  1591. #define PCIE_DBI_RO_WR_EN 0x8bc /* DBI Read-Only Write Enable Register */
  1592. -/* PEX LUT registers */
  1593. -#define PCIE_LUT_DBG 0x7FC /* PEX LUT Debug Register */
  1594. +#define PCIE_IATU_NUM 6
  1595. +
  1596. +static void ls_pcie_host_init(struct pcie_port *pp);
  1597. struct ls_pcie_drvdata {
  1598. u32 lut_offset;
  1599. u32 ltssm_shift;
  1600. + u32 lut_dbg;
  1601. struct pcie_host_ops *ops;
  1602. };
  1603. @@ -86,6 +90,14 @@ static void ls_pcie_drop_msg_tlp(struct
  1604. iowrite32(val, pcie->pp.dbi_base + PCIE_STRFMR1);
  1605. }
  1606. +static void ls_pcie_disable_outbound_atus(struct ls_pcie *pcie)
  1607. +{
  1608. + int i;
  1609. +
  1610. + for (i = 0; i < PCIE_IATU_NUM; i++)
  1611. + dw_pcie_disable_outbound_atu(&pcie->pp, i);
  1612. +}
  1613. +
  1614. static int ls1021_pcie_link_up(struct pcie_port *pp)
  1615. {
  1616. u32 state;
  1617. @@ -134,7 +146,7 @@ static int ls_pcie_link_up(struct pcie_p
  1618. struct ls_pcie *pcie = to_ls_pcie(pp);
  1619. u32 state;
  1620. - state = (ioread32(pcie->lut + PCIE_LUT_DBG) >>
  1621. + state = (ioread32(pcie->lut + pcie->drvdata->lut_dbg) >>
  1622. pcie->drvdata->ltssm_shift) &
  1623. LTSSM_STATE_MASK;
  1624. @@ -144,6 +156,12 @@ static int ls_pcie_link_up(struct pcie_p
  1625. return 1;
  1626. }
  1627. +/* Forward error response of outbound non-posted requests */
  1628. +static void ls_pcie_fix_error_response(struct ls_pcie *pcie)
  1629. +{
  1630. + iowrite32(PCIE_ABSERR_SETTING, pcie->pp.dbi_base + PCIE_ABSERR);
  1631. +}
  1632. +
  1633. static void ls_pcie_host_init(struct pcie_port *pp)
  1634. {
  1635. struct ls_pcie *pcie = to_ls_pcie(pp);
  1636. @@ -153,6 +171,10 @@ static void ls_pcie_host_init(struct pci
  1637. ls_pcie_clear_multifunction(pcie);
  1638. ls_pcie_drop_msg_tlp(pcie);
  1639. iowrite32(0, pcie->pp.dbi_base + PCIE_DBI_RO_WR_EN);
  1640. +
  1641. + ls_pcie_disable_outbound_atus(pcie);
  1642. + ls_pcie_fix_error_response(pcie);
  1643. + dw_pcie_setup_rc(pp);
  1644. }
  1645. static int ls_pcie_msi_host_init(struct pcie_port *pp,
  1646. @@ -196,20 +218,40 @@ static struct ls_pcie_drvdata ls1021_drv
  1647. static struct ls_pcie_drvdata ls1043_drvdata = {
  1648. .lut_offset = 0x10000,
  1649. .ltssm_shift = 24,
  1650. + .lut_dbg = 0x7fc,
  1651. + .ops = &ls_pcie_host_ops,
  1652. +};
  1653. +
  1654. +static struct ls_pcie_drvdata ls1046_drvdata = {
  1655. + .lut_offset = 0x80000,
  1656. + .ltssm_shift = 24,
  1657. + .lut_dbg = 0x407fc,
  1658. .ops = &ls_pcie_host_ops,
  1659. };
  1660. static struct ls_pcie_drvdata ls2080_drvdata = {
  1661. .lut_offset = 0x80000,
  1662. .ltssm_shift = 0,
  1663. + .lut_dbg = 0x7fc,
  1664. + .ops = &ls_pcie_host_ops,
  1665. +};
  1666. +
  1667. +static struct ls_pcie_drvdata ls2088_drvdata = {
  1668. + .lut_offset = 0x80000,
  1669. + .ltssm_shift = 0,
  1670. + .lut_dbg = 0x407fc,
  1671. .ops = &ls_pcie_host_ops,
  1672. };
  1673. static const struct of_device_id ls_pcie_of_match[] = {
  1674. + { .compatible = "fsl,ls1012a-pcie", .data = &ls1046_drvdata },
  1675. { .compatible = "fsl,ls1021a-pcie", .data = &ls1021_drvdata },
  1676. { .compatible = "fsl,ls1043a-pcie", .data = &ls1043_drvdata },
  1677. + { .compatible = "fsl,ls1046a-pcie", .data = &ls1046_drvdata },
  1678. { .compatible = "fsl,ls2080a-pcie", .data = &ls2080_drvdata },
  1679. { .compatible = "fsl,ls2085a-pcie", .data = &ls2080_drvdata },
  1680. + { .compatible = "fsl,ls2088a-pcie", .data = &ls2088_drvdata },
  1681. + { .compatible = "fsl,ls1088a-pcie", .data = &ls2088_drvdata },
  1682. { },
  1683. };
  1684. --- a/drivers/pci/host/pcie-designware.c
  1685. +++ b/drivers/pci/host/pcie-designware.c
  1686. @@ -478,6 +478,12 @@ int dw_pcie_wait_for_link(struct pcie_po
  1687. return -ETIMEDOUT;
  1688. }
  1689. +void dw_pcie_disable_outbound_atu(struct pcie_port *pp, int index)
  1690. +{
  1691. + dw_pcie_writel_rc(pp, PCIE_ATU_VIEWPORT, PCIE_ATU_REGION_OUTBOUND | index);
  1692. + dw_pcie_writel_rc(pp, PCIE_ATU_CR2, 0);
  1693. +}
  1694. +
  1695. int dw_pcie_link_up(struct pcie_port *pp)
  1696. {
  1697. u32 val;
  1698. --- a/drivers/pci/host/pcie-designware.h
  1699. +++ b/drivers/pci/host/pcie-designware.h
  1700. @@ -82,5 +82,6 @@ int dw_pcie_wait_for_link(struct pcie_po
  1701. int dw_pcie_link_up(struct pcie_port *pp);
  1702. void dw_pcie_setup_rc(struct pcie_port *pp);
  1703. int dw_pcie_host_init(struct pcie_port *pp);
  1704. +void dw_pcie_disable_outbound_atu(struct pcie_port *pp, int index);
  1705. #endif /* _PCIE_DESIGNWARE_H */
  1706. --- a/drivers/pci/pci.c
  1707. +++ b/drivers/pci/pci.c
  1708. @@ -454,7 +454,7 @@ struct resource *pci_find_parent_resourc
  1709. pci_bus_for_each_resource(bus, r, i) {
  1710. if (!r)
  1711. continue;
  1712. - if (res->start && resource_contains(r, res)) {
  1713. + if (resource_contains(r, res)) {
  1714. /*
  1715. * If the window is prefetchable but the BAR is
  1716. --- a/drivers/pci/pcie/portdrv_core.c
  1717. +++ b/drivers/pci/pcie/portdrv_core.c
  1718. @@ -44,52 +44,30 @@ static void release_pcie_device(struct d
  1719. }
  1720. /**
  1721. - * pcie_port_msix_add_entry - add entry to given array of MSI-X entries
  1722. - * @entries: Array of MSI-X entries
  1723. - * @new_entry: Index of the entry to add to the array
  1724. - * @nr_entries: Number of entries already in the array
  1725. + * pcibios_check_service_irqs - check irqs in the device tree
  1726. + * @dev: PCI Express port to handle
  1727. + * @irqs: Array of irqs to populate
  1728. + * @mask: Bitmask of port capabilities returned by get_port_device_capability()
  1729. + *
  1730. + * Return value: 0 means no service irqs in the device tree
  1731. *
  1732. - * Return value: Position of the added entry in the array
  1733. */
  1734. -static int pcie_port_msix_add_entry(
  1735. - struct msix_entry *entries, int new_entry, int nr_entries)
  1736. +int __weak pcibios_check_service_irqs(struct pci_dev *dev, int *irqs, int mask)
  1737. {
  1738. - int j;
  1739. -
  1740. - for (j = 0; j < nr_entries; j++)
  1741. - if (entries[j].entry == new_entry)
  1742. - return j;
  1743. -
  1744. - entries[j].entry = new_entry;
  1745. - return j;
  1746. + return 0;
  1747. }
  1748. /**
  1749. * pcie_port_enable_msix - try to set up MSI-X as interrupt mode for given port
  1750. * @dev: PCI Express port to handle
  1751. - * @vectors: Array of interrupt vectors to populate
  1752. + * @irqs: Array of interrupt vectors to populate
  1753. * @mask: Bitmask of port capabilities returned by get_port_device_capability()
  1754. *
  1755. * Return value: 0 on success, error code on failure
  1756. */
  1757. -static int pcie_port_enable_msix(struct pci_dev *dev, int *vectors, int mask)
  1758. +static int pcie_port_enable_msix(struct pci_dev *dev, int *irqs, int mask)
  1759. {
  1760. - struct msix_entry *msix_entries;
  1761. - int idx[PCIE_PORT_DEVICE_MAXSERVICES];
  1762. - int nr_entries, status, pos, i, nvec;
  1763. - u16 reg16;
  1764. - u32 reg32;
  1765. -
  1766. - nr_entries = pci_msix_vec_count(dev);
  1767. - if (nr_entries < 0)
  1768. - return nr_entries;
  1769. - BUG_ON(!nr_entries);
  1770. - if (nr_entries > PCIE_PORT_MAX_MSIX_ENTRIES)
  1771. - nr_entries = PCIE_PORT_MAX_MSIX_ENTRIES;
  1772. -
  1773. - msix_entries = kzalloc(sizeof(*msix_entries) * nr_entries, GFP_KERNEL);
  1774. - if (!msix_entries)
  1775. - return -ENOMEM;
  1776. + int nr_entries, entry, nvec = 0;
  1777. /*
  1778. * Allocate as many entries as the port wants, so that we can check
  1779. @@ -97,20 +75,13 @@ static int pcie_port_enable_msix(struct
  1780. * equal to the number of entries this port actually uses, we'll happily
  1781. * go through without any tricks.
  1782. */
  1783. - for (i = 0; i < nr_entries; i++)
  1784. - msix_entries[i].entry = i;
  1785. -
  1786. - status = pci_enable_msix_exact(dev, msix_entries, nr_entries);
  1787. - if (status)
  1788. - goto Exit;
  1789. -
  1790. - for (i = 0; i < PCIE_PORT_DEVICE_MAXSERVICES; i++)
  1791. - idx[i] = -1;
  1792. - status = -EIO;
  1793. - nvec = 0;
  1794. + nr_entries = pci_alloc_irq_vectors(dev, 1, PCIE_PORT_MAX_MSIX_ENTRIES,
  1795. + PCI_IRQ_MSIX);
  1796. + if (nr_entries < 0)
  1797. + return nr_entries;
  1798. if (mask & (PCIE_PORT_SERVICE_PME | PCIE_PORT_SERVICE_HP)) {
  1799. - int entry;
  1800. + u16 reg16;
  1801. /*
  1802. * The code below follows the PCI Express Base Specification 2.0
  1803. @@ -125,18 +96,16 @@ static int pcie_port_enable_msix(struct
  1804. pcie_capability_read_word(dev, PCI_EXP_FLAGS, &reg16);
  1805. entry = (reg16 & PCI_EXP_FLAGS_IRQ) >> 9;
  1806. if (entry >= nr_entries)
  1807. - goto Error;
  1808. + goto out_free_irqs;
  1809. - i = pcie_port_msix_add_entry(msix_entries, entry, nvec);
  1810. - if (i == nvec)
  1811. - nvec++;
  1812. + irqs[PCIE_PORT_SERVICE_PME_SHIFT] = pci_irq_vector(dev, entry);
  1813. + irqs[PCIE_PORT_SERVICE_HP_SHIFT] = pci_irq_vector(dev, entry);
  1814. - idx[PCIE_PORT_SERVICE_PME_SHIFT] = i;
  1815. - idx[PCIE_PORT_SERVICE_HP_SHIFT] = i;
  1816. + nvec = max(nvec, entry + 1);
  1817. }
  1818. if (mask & PCIE_PORT_SERVICE_AER) {
  1819. - int entry;
  1820. + u32 reg32, pos;
  1821. /*
  1822. * The code below follows Section 7.10.10 of the PCI Express
  1823. @@ -151,13 +120,11 @@ static int pcie_port_enable_msix(struct
  1824. pci_read_config_dword(dev, pos + PCI_ERR_ROOT_STATUS, &reg32);
  1825. entry = reg32 >> 27;
  1826. if (entry >= nr_entries)
  1827. - goto Error;
  1828. + goto out_free_irqs;
  1829. - i = pcie_port_msix_add_entry(msix_entries, entry, nvec);
  1830. - if (i == nvec)
  1831. - nvec++;
  1832. + irqs[PCIE_PORT_SERVICE_AER_SHIFT] = pci_irq_vector(dev, entry);
  1833. - idx[PCIE_PORT_SERVICE_AER_SHIFT] = i;
  1834. + nvec = max(nvec, entry + 1);
  1835. }
  1836. /*
  1837. @@ -165,41 +132,54 @@ static int pcie_port_enable_msix(struct
  1838. * what we have. Otherwise, the port has some extra entries not for the
  1839. * services we know and we need to work around that.
  1840. */
  1841. - if (nvec == nr_entries) {
  1842. - status = 0;
  1843. - } else {
  1844. + if (nvec != nr_entries) {
  1845. /* Drop the temporary MSI-X setup */
  1846. - pci_disable_msix(dev);
  1847. + pci_free_irq_vectors(dev);
  1848. /* Now allocate the MSI-X vectors for real */
  1849. - status = pci_enable_msix_exact(dev, msix_entries, nvec);
  1850. - if (status)
  1851. - goto Exit;
  1852. + nr_entries = pci_alloc_irq_vectors(dev, nvec, nvec,
  1853. + PCI_IRQ_MSIX);
  1854. + if (nr_entries < 0)
  1855. + return nr_entries;
  1856. }
  1857. - for (i = 0; i < PCIE_PORT_DEVICE_MAXSERVICES; i++)
  1858. - vectors[i] = idx[i] >= 0 ? msix_entries[idx[i]].vector : -1;
  1859. -
  1860. - Exit:
  1861. - kfree(msix_entries);
  1862. - return status;
  1863. + return 0;
  1864. - Error:
  1865. - pci_disable_msix(dev);
  1866. - goto Exit;
  1867. +out_free_irqs:
  1868. + pci_free_irq_vectors(dev);
  1869. + return -EIO;
  1870. }
  1871. /**
  1872. - * init_service_irqs - initialize irqs for PCI Express port services
  1873. + * pcie_init_service_irqs - initialize irqs for PCI Express port services
  1874. * @dev: PCI Express port to handle
  1875. * @irqs: Array of irqs to populate
  1876. * @mask: Bitmask of port capabilities returned by get_port_device_capability()
  1877. *
  1878. * Return value: Interrupt mode associated with the port
  1879. */
  1880. -static int init_service_irqs(struct pci_dev *dev, int *irqs, int mask)
  1881. +static int pcie_init_service_irqs(struct pci_dev *dev, int *irqs, int mask)
  1882. {
  1883. - int i, irq = -1;
  1884. + unsigned flags = PCI_IRQ_LEGACY | PCI_IRQ_MSI;
  1885. + int ret, i;
  1886. + int irq = -1;
  1887. +
  1888. + for (i = 0; i < PCIE_PORT_DEVICE_MAXSERVICES; i++)
  1889. + irqs[i] = -1;
  1890. +
  1891. + /* Check if some platforms owns independent irq pins for AER/PME etc.
  1892. + * Some platforms may own independent AER/PME interrupts and set
  1893. + * them in the device tree file.
  1894. + */
  1895. + ret = pcibios_check_service_irqs(dev, irqs, mask);
  1896. + if (ret) {
  1897. + if (dev->irq)
  1898. + irq = dev->irq;
  1899. + for (i = 0; i < PCIE_PORT_DEVICE_MAXSERVICES; i++)
  1900. + if (irqs[i] == -1 && i != PCIE_PORT_SERVICE_VC_SHIFT)
  1901. + irqs[i] = irq;
  1902. + return 0;
  1903. + }
  1904. /*
  1905. * If MSI cannot be used for PCIe PME or hotplug, we have to use
  1906. @@ -207,41 +187,25 @@ static int init_service_irqs(struct pci_
  1907. */
  1908. if (((mask & PCIE_PORT_SERVICE_PME) && pcie_pme_no_msi()) ||
  1909. ((mask & PCIE_PORT_SERVICE_HP) && pciehp_no_msi())) {
  1910. - if (dev->irq)
  1911. - irq = dev->irq;
  1912. - goto no_msi;
  1913. + flags &= ~PCI_IRQ_MSI;
  1914. + } else {
  1915. + /* Try to use MSI-X if supported */
  1916. + if (!pcie_port_enable_msix(dev, irqs, mask))
  1917. + return 0;
  1918. }
  1919. - /* Try to use MSI-X if supported */
  1920. - if (!pcie_port_enable_msix(dev, irqs, mask))
  1921. - return 0;
  1922. -
  1923. - /*
  1924. - * We're not going to use MSI-X, so try MSI and fall back to INTx.
  1925. - * If neither MSI/MSI-X nor INTx available, try other interrupt. On
  1926. - * some platforms, root port doesn't support MSI/MSI-X/INTx in RC mode.
  1927. - */
  1928. - if (!pci_enable_msi(dev) || dev->irq)
  1929. - irq = dev->irq;
  1930. + ret = pci_alloc_irq_vectors(dev, 1, 1, flags);
  1931. + if (ret < 0)
  1932. + return -ENODEV;
  1933. - no_msi:
  1934. - for (i = 0; i < PCIE_PORT_DEVICE_MAXSERVICES; i++)
  1935. - irqs[i] = irq;
  1936. - irqs[PCIE_PORT_SERVICE_VC_SHIFT] = -1;
  1937. + for (i = 0; i < PCIE_PORT_DEVICE_MAXSERVICES; i++) {
  1938. + if (i != PCIE_PORT_SERVICE_VC_SHIFT)
  1939. + irqs[i] = pci_irq_vector(dev, 0);
  1940. + }
  1941. - if (irq < 0)
  1942. - return -ENODEV;
  1943. return 0;
  1944. }
  1945. -static void cleanup_service_irqs(struct pci_dev *dev)
  1946. -{
  1947. - if (dev->msix_enabled)
  1948. - pci_disable_msix(dev);
  1949. - else if (dev->msi_enabled)
  1950. - pci_disable_msi(dev);
  1951. -}
  1952. -
  1953. /**
  1954. * get_port_device_capability - discover capabilities of a PCI Express port
  1955. * @dev: PCI Express port to examine
  1956. @@ -378,7 +342,7 @@ int pcie_port_device_register(struct pci
  1957. * that can be used in the absence of irqs. Allow them to determine
  1958. * if that is to be used.
  1959. */
  1960. - status = init_service_irqs(dev, irqs, capabilities);
  1961. + status = pcie_init_service_irqs(dev, irqs, capabilities);
  1962. if (status) {
  1963. capabilities &= PCIE_PORT_SERVICE_VC | PCIE_PORT_SERVICE_HP;
  1964. if (!capabilities)
  1965. @@ -401,7 +365,7 @@ int pcie_port_device_register(struct pci
  1966. return 0;
  1967. error_cleanup_irqs:
  1968. - cleanup_service_irqs(dev);
  1969. + pci_free_irq_vectors(dev);
  1970. error_disable:
  1971. pci_disable_device(dev);
  1972. return status;
  1973. @@ -469,7 +433,7 @@ static int remove_iter(struct device *de
  1974. void pcie_port_device_remove(struct pci_dev *dev)
  1975. {
  1976. device_for_each_child(&dev->dev, NULL, remove_iter);
  1977. - cleanup_service_irqs(dev);
  1978. + pci_free_irq_vectors(dev);
  1979. pci_disable_device(dev);
  1980. }
  1981. @@ -499,7 +463,6 @@ static int pcie_port_probe_service(struc
  1982. if (status)
  1983. return status;
  1984. - dev_printk(KERN_DEBUG, dev, "service driver %s loaded\n", driver->name);
  1985. get_device(dev);
  1986. return 0;
  1987. }
  1988. @@ -524,8 +487,6 @@ static int pcie_port_remove_service(stru
  1989. pciedev = to_pcie_device(dev);
  1990. driver = to_service_driver(dev->driver);
  1991. if (driver && driver->remove) {
  1992. - dev_printk(KERN_DEBUG, dev, "unloading service driver %s\n",
  1993. - driver->name);
  1994. driver->remove(pciedev);
  1995. put_device(dev);
  1996. }
  1997. --- a/drivers/pci/quirks.c
  1998. +++ b/drivers/pci/quirks.c
  1999. @@ -3329,6 +3329,13 @@ DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_A
  2000. DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_ATHEROS, 0x003c, quirk_no_bus_reset);
  2001. DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_ATHEROS, 0x0033, quirk_no_bus_reset);
  2002. +/*
  2003. + * NXP (Freescale Vendor ID) LS1088 chips do not behave correctly after
  2004. + * bus reset. Link state of device does not comes UP and so config space
  2005. + * never accessible again.
  2006. + */
  2007. +DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_FREESCALE, 0x80c0, quirk_no_bus_reset);
  2008. +
  2009. static void quirk_no_pm_reset(struct pci_dev *dev)
  2010. {
  2011. /*
  2012. @@ -4679,3 +4686,11 @@ DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_IN
  2013. DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_INTEL, 0x2031, quirk_no_aersid);
  2014. DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_INTEL, 0x2032, quirk_no_aersid);
  2015. DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_INTEL, 0x2033, quirk_no_aersid);
  2016. +
  2017. +/* Freescale PCIe doesn't support MSI in RC mode */
  2018. +static void quirk_fsl_no_msi(struct pci_dev *pdev)
  2019. +{
  2020. + if (pci_pcie_type(pdev) == PCI_EXP_TYPE_ROOT_PORT)
  2021. + pdev->no_msi = 1;
  2022. +}
  2023. +DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_FREESCALE, PCI_ANY_ID, quirk_fsl_no_msi);
  2024. --- a/include/linux/pci.h
  2025. +++ b/include/linux/pci.h
  2026. @@ -1825,6 +1825,7 @@ void pcibios_release_device(struct pci_d
  2027. void pcibios_penalize_isa_irq(int irq, int active);
  2028. int pcibios_alloc_irq(struct pci_dev *dev);
  2029. void pcibios_free_irq(struct pci_dev *dev);
  2030. +int pcibios_check_service_irqs(struct pci_dev *dev, int *irqs, int mask);
  2031. #ifdef CONFIG_HIBERNATE_CALLBACKS
  2032. extern struct dev_pm_ops pcibios_pm_ops;