301-arch-support-layerscape.patch 14 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428
  1. From 739029f49bd9181b821298f9d27b29ce2d292967 Mon Sep 17 00:00:00 2001
  2. From: Yangbo Lu <[email protected]>
  3. Date: Mon, 25 Sep 2017 10:03:52 +0800
  4. Subject: [PATCH] arch: support layerscape
  5. MIME-Version: 1.0
  6. Content-Type: text/plain; charset=UTF-8
  7. Content-Transfer-Encoding: 8bit
  8. This is a integrated patch for layerscape arch support.
  9. Signed-off-by: Madalin Bucur <[email protected]>
  10. Signed-off-by: Nipun Gupta <[email protected]>
  11. Signed-off-by: Zhao Qiang <[email protected]>
  12. Signed-off-by: Camelia Groza <[email protected]>
  13. Signed-off-by: Haiying Wang <[email protected]>
  14. Signed-off-by: Pan Jiafei <[email protected]>
  15. Signed-off-by: Po Liu <[email protected]>
  16. Signed-off-by: Bharat Bhushan <[email protected]>
  17. Signed-off-by: Jianhua Xie <[email protected]>
  18. Signed-off-by: Horia Geantă <[email protected]>
  19. Signed-off-by: Yangbo Lu <[email protected]>
  20. ---
  21. arch/arm/include/asm/delay.h | 16 +++++++++
  22. arch/arm/include/asm/io.h | 31 ++++++++++++++++++
  23. arch/arm/include/asm/mach/map.h | 4 +--
  24. arch/arm/include/asm/pgtable.h | 7 ++++
  25. arch/arm/kernel/bios32.c | 43 ++++++++++++++++++++++++
  26. arch/arm/mm/dma-mapping.c | 1 +
  27. arch/arm/mm/ioremap.c | 7 ++++
  28. arch/arm/mm/mmu.c | 9 +++++
  29. arch/arm64/include/asm/cache.h | 2 +-
  30. arch/arm64/include/asm/io.h | 2 ++
  31. arch/arm64/include/asm/pci.h | 4 +++
  32. arch/arm64/include/asm/pgtable-prot.h | 1 +
  33. arch/arm64/include/asm/pgtable.h | 5 +++
  34. arch/arm64/kernel/pci.c | 62 +++++++++++++++++++++++++++++++++++
  35. arch/arm64/mm/dma-mapping.c | 23 ++++++++++---
  36. 15 files changed, 209 insertions(+), 8 deletions(-)
  37. --- a/arch/arm/include/asm/delay.h
  38. +++ b/arch/arm/include/asm/delay.h
  39. @@ -57,6 +57,22 @@ extern void __bad_udelay(void);
  40. __const_udelay((n) * UDELAY_MULT)) : \
  41. __udelay(n))
  42. +#define spin_event_timeout(condition, timeout, delay) \
  43. +({ \
  44. + typeof(condition) __ret; \
  45. + int i = 0; \
  46. + while (!(__ret = (condition)) && (i++ < timeout)) { \
  47. + if (delay) \
  48. + udelay(delay); \
  49. + else \
  50. + cpu_relax(); \
  51. + udelay(1); \
  52. + } \
  53. + if (!__ret) \
  54. + __ret = (condition); \
  55. + __ret; \
  56. +})
  57. +
  58. /* Loop-based definitions for assembly code. */
  59. extern void __loop_delay(unsigned long loops);
  60. extern void __loop_udelay(unsigned long usecs);
  61. --- a/arch/arm/include/asm/io.h
  62. +++ b/arch/arm/include/asm/io.h
  63. @@ -129,6 +129,7 @@ static inline u32 __raw_readl(const vola
  64. #define MT_DEVICE_NONSHARED 1
  65. #define MT_DEVICE_CACHED 2
  66. #define MT_DEVICE_WC 3
  67. +#define MT_MEMORY_RW_NS 4
  68. /*
  69. * types 4 onwards can be found in asm/mach/map.h and are undefined
  70. * for ioremap
  71. @@ -220,6 +221,34 @@ extern int pci_ioremap_io(unsigned int o
  72. #endif
  73. #endif
  74. +/* access ports */
  75. +#define setbits32(_addr, _v) iowrite32be(ioread32be(_addr) | (_v), (_addr))
  76. +#define clrbits32(_addr, _v) iowrite32be(ioread32be(_addr) & ~(_v), (_addr))
  77. +
  78. +#define setbits16(_addr, _v) iowrite16be(ioread16be(_addr) | (_v), (_addr))
  79. +#define clrbits16(_addr, _v) iowrite16be(ioread16be(_addr) & ~(_v), (_addr))
  80. +
  81. +#define setbits8(_addr, _v) iowrite8(ioread8(_addr) | (_v), (_addr))
  82. +#define clrbits8(_addr, _v) iowrite8(ioread8(_addr) & ~(_v), (_addr))
  83. +
  84. +/* Clear and set bits in one shot. These macros can be used to clear and
  85. + * set multiple bits in a register using a single read-modify-write. These
  86. + * macros can also be used to set a multiple-bit bit pattern using a mask,
  87. + * by specifying the mask in the 'clear' parameter and the new bit pattern
  88. + * in the 'set' parameter.
  89. + */
  90. +
  91. +#define clrsetbits_be32(addr, clear, set) \
  92. + iowrite32be((ioread32be(addr) & ~(clear)) | (set), (addr))
  93. +#define clrsetbits_le32(addr, clear, set) \
  94. + iowrite32le((ioread32le(addr) & ~(clear)) | (set), (addr))
  95. +#define clrsetbits_be16(addr, clear, set) \
  96. + iowrite16be((ioread16be(addr) & ~(clear)) | (set), (addr))
  97. +#define clrsetbits_le16(addr, clear, set) \
  98. + iowrite16le((ioread16le(addr) & ~(clear)) | (set), (addr))
  99. +#define clrsetbits_8(addr, clear, set) \
  100. + iowrite8((ioread8(addr) & ~(clear)) | (set), (addr))
  101. +
  102. /*
  103. * IO port access primitives
  104. * -------------------------
  105. @@ -408,6 +437,8 @@ void __iomem *ioremap_wc(resource_size_t
  106. #define ioremap_wc ioremap_wc
  107. #define ioremap_wt ioremap_wc
  108. +void __iomem *ioremap_cache_ns(resource_size_t res_cookie, size_t size);
  109. +
  110. void iounmap(volatile void __iomem *iomem_cookie);
  111. #define iounmap iounmap
  112. --- a/arch/arm/include/asm/mach/map.h
  113. +++ b/arch/arm/include/asm/mach/map.h
  114. @@ -21,9 +21,9 @@ struct map_desc {
  115. unsigned int type;
  116. };
  117. -/* types 0-3 are defined in asm/io.h */
  118. +/* types 0-4 are defined in asm/io.h */
  119. enum {
  120. - MT_UNCACHED = 4,
  121. + MT_UNCACHED = 5,
  122. MT_CACHECLEAN,
  123. MT_MINICLEAN,
  124. MT_LOW_VECTORS,
  125. --- a/arch/arm/include/asm/pgtable.h
  126. +++ b/arch/arm/include/asm/pgtable.h
  127. @@ -118,6 +118,13 @@ extern pgprot_t pgprot_s2_device;
  128. #define pgprot_noncached(prot) \
  129. __pgprot_modify(prot, L_PTE_MT_MASK, L_PTE_MT_UNCACHED)
  130. +#define pgprot_cached(prot) \
  131. + __pgprot_modify(prot, L_PTE_MT_MASK, L_PTE_MT_DEV_CACHED)
  132. +
  133. +#define pgprot_cached_ns(prot) \
  134. + __pgprot_modify(prot, L_PTE_MT_MASK, L_PTE_MT_DEV_CACHED | \
  135. + L_PTE_MT_DEV_NONSHARED)
  136. +
  137. #define pgprot_writecombine(prot) \
  138. __pgprot_modify(prot, L_PTE_MT_MASK, L_PTE_MT_BUFFERABLE)
  139. --- a/arch/arm/kernel/bios32.c
  140. +++ b/arch/arm/kernel/bios32.c
  141. @@ -11,6 +11,8 @@
  142. #include <linux/slab.h>
  143. #include <linux/init.h>
  144. #include <linux/io.h>
  145. +#include <linux/of_irq.h>
  146. +#include <linux/pcieport_if.h>
  147. #include <asm/mach-types.h>
  148. #include <asm/mach/map.h>
  149. @@ -64,6 +66,47 @@ void pcibios_report_status(u_int status_
  150. }
  151. /*
  152. + * Check device tree if the service interrupts are there
  153. + */
  154. +int pcibios_check_service_irqs(struct pci_dev *dev, int *irqs, int mask)
  155. +{
  156. + int ret, count = 0;
  157. + struct device_node *np = NULL;
  158. +
  159. + if (dev->bus->dev.of_node)
  160. + np = dev->bus->dev.of_node;
  161. +
  162. + if (np == NULL)
  163. + return 0;
  164. +
  165. + if (!IS_ENABLED(CONFIG_OF_IRQ))
  166. + return 0;
  167. +
  168. + /* If root port doesn't support MSI/MSI-X/INTx in RC mode,
  169. + * request irq for aer
  170. + */
  171. + if (mask & PCIE_PORT_SERVICE_AER) {
  172. + ret = of_irq_get_byname(np, "aer");
  173. + if (ret > 0) {
  174. + irqs[PCIE_PORT_SERVICE_AER_SHIFT] = ret;
  175. + count++;
  176. + }
  177. + }
  178. +
  179. + if (mask & PCIE_PORT_SERVICE_PME) {
  180. + ret = of_irq_get_byname(np, "pme");
  181. + if (ret > 0) {
  182. + irqs[PCIE_PORT_SERVICE_PME_SHIFT] = ret;
  183. + count++;
  184. + }
  185. + }
  186. +
  187. + /* TODO: add more service interrupts if there it is in the device tree*/
  188. +
  189. + return count;
  190. +}
  191. +
  192. +/*
  193. * We don't use this to fix the device, but initialisation of it.
  194. * It's not the correct use for this, but it works.
  195. * Note that the arbiter/ISA bridge appears to be buggy, specifically in
  196. --- a/arch/arm/mm/dma-mapping.c
  197. +++ b/arch/arm/mm/dma-mapping.c
  198. @@ -2392,6 +2392,7 @@ void arch_setup_dma_ops(struct device *d
  199. set_dma_ops(dev, dma_ops);
  200. }
  201. +EXPORT_SYMBOL(arch_setup_dma_ops);
  202. void arch_teardown_dma_ops(struct device *dev)
  203. {
  204. --- a/arch/arm/mm/ioremap.c
  205. +++ b/arch/arm/mm/ioremap.c
  206. @@ -398,6 +398,13 @@ void __iomem *ioremap_wc(resource_size_t
  207. }
  208. EXPORT_SYMBOL(ioremap_wc);
  209. +void __iomem *ioremap_cache_ns(resource_size_t res_cookie, size_t size)
  210. +{
  211. + return arch_ioremap_caller(res_cookie, size, MT_MEMORY_RW_NS,
  212. + __builtin_return_address(0));
  213. +}
  214. +EXPORT_SYMBOL(ioremap_cache_ns);
  215. +
  216. /*
  217. * Remap an arbitrary physical address space into the kernel virtual
  218. * address space as memory. Needed when the kernel wants to execute
  219. --- a/arch/arm/mm/mmu.c
  220. +++ b/arch/arm/mm/mmu.c
  221. @@ -313,6 +313,13 @@ static struct mem_type mem_types[] __ro_
  222. .prot_sect = PMD_TYPE_SECT | PMD_SECT_AP_WRITE,
  223. .domain = DOMAIN_KERNEL,
  224. },
  225. + [MT_MEMORY_RW_NS] = {
  226. + .prot_pte = L_PTE_PRESENT | L_PTE_YOUNG | L_PTE_DIRTY |
  227. + L_PTE_XN,
  228. + .prot_l1 = PMD_TYPE_TABLE,
  229. + .prot_sect = PMD_TYPE_SECT | PMD_SECT_AP_WRITE | PMD_SECT_XN,
  230. + .domain = DOMAIN_KERNEL,
  231. + },
  232. [MT_ROM] = {
  233. .prot_sect = PMD_TYPE_SECT,
  234. .domain = DOMAIN_KERNEL,
  235. @@ -644,6 +651,7 @@ static void __init build_mem_type_table(
  236. }
  237. kern_pgprot |= PTE_EXT_AF;
  238. vecs_pgprot |= PTE_EXT_AF;
  239. + mem_types[MT_MEMORY_RW_NS].prot_pte |= PTE_EXT_AF | cp->pte;
  240. /*
  241. * Set PXN for user mappings
  242. @@ -672,6 +680,7 @@ static void __init build_mem_type_table(
  243. mem_types[MT_MEMORY_RWX].prot_pte |= kern_pgprot;
  244. mem_types[MT_MEMORY_RW].prot_sect |= ecc_mask | cp->pmd;
  245. mem_types[MT_MEMORY_RW].prot_pte |= kern_pgprot;
  246. + mem_types[MT_MEMORY_RW_NS].prot_sect |= ecc_mask | cp->pmd;
  247. mem_types[MT_MEMORY_DMA_READY].prot_pte |= kern_pgprot;
  248. mem_types[MT_MEMORY_RWX_NONCACHED].prot_sect |= ecc_mask;
  249. mem_types[MT_ROM].prot_sect |= cp->pmd;
  250. --- a/arch/arm64/include/asm/cache.h
  251. +++ b/arch/arm64/include/asm/cache.h
  252. @@ -18,7 +18,7 @@
  253. #include <asm/cachetype.h>
  254. -#define L1_CACHE_SHIFT 7
  255. +#define L1_CACHE_SHIFT 6
  256. #define L1_CACHE_BYTES (1 << L1_CACHE_SHIFT)
  257. /*
  258. --- a/arch/arm64/include/asm/io.h
  259. +++ b/arch/arm64/include/asm/io.h
  260. @@ -171,6 +171,8 @@ extern void __iomem *ioremap_cache(phys_
  261. #define ioremap_nocache(addr, size) __ioremap((addr), (size), __pgprot(PROT_DEVICE_nGnRE))
  262. #define ioremap_wc(addr, size) __ioremap((addr), (size), __pgprot(PROT_NORMAL_NC))
  263. #define ioremap_wt(addr, size) __ioremap((addr), (size), __pgprot(PROT_DEVICE_nGnRE))
  264. +#define ioremap_cache_ns(addr, size) __ioremap((addr), (size), \
  265. + __pgprot(PROT_NORMAL_NS))
  266. #define iounmap __iounmap
  267. /*
  268. --- a/arch/arm64/include/asm/pci.h
  269. +++ b/arch/arm64/include/asm/pci.h
  270. @@ -31,6 +31,10 @@ static inline int pci_get_legacy_ide_irq
  271. return -ENODEV;
  272. }
  273. +#define HAVE_PCI_MMAP
  274. +extern int pci_mmap_page_range(struct pci_dev *dev, struct vm_area_struct *vma,
  275. + enum pci_mmap_state mmap_state,
  276. + int write_combine);
  277. static inline int pci_proc_domain(struct pci_bus *bus)
  278. {
  279. return 1;
  280. --- a/arch/arm64/include/asm/pgtable-prot.h
  281. +++ b/arch/arm64/include/asm/pgtable-prot.h
  282. @@ -42,6 +42,7 @@
  283. #define PROT_NORMAL_NC (PROT_DEFAULT | PTE_PXN | PTE_UXN | PTE_DIRTY | PTE_WRITE | PTE_ATTRINDX(MT_NORMAL_NC))
  284. #define PROT_NORMAL_WT (PROT_DEFAULT | PTE_PXN | PTE_UXN | PTE_DIRTY | PTE_WRITE | PTE_ATTRINDX(MT_NORMAL_WT))
  285. #define PROT_NORMAL (PROT_DEFAULT | PTE_PXN | PTE_UXN | PTE_DIRTY | PTE_WRITE | PTE_ATTRINDX(MT_NORMAL))
  286. +#define PROT_NORMAL_NS (PTE_TYPE_PAGE | PTE_AF | PTE_PXN | PTE_UXN | PTE_DIRTY | PTE_WRITE | PTE_ATTRINDX(MT_NORMAL))
  287. #define PROT_SECT_DEVICE_nGnRE (PROT_SECT_DEFAULT | PMD_SECT_PXN | PMD_SECT_UXN | PMD_ATTRINDX(MT_DEVICE_nGnRE))
  288. #define PROT_SECT_NORMAL (PROT_SECT_DEFAULT | PMD_SECT_PXN | PMD_SECT_UXN | PMD_ATTRINDX(MT_NORMAL))
  289. --- a/arch/arm64/include/asm/pgtable.h
  290. +++ b/arch/arm64/include/asm/pgtable.h
  291. @@ -370,6 +370,11 @@ static inline int pmd_protnone(pmd_t pmd
  292. __pgprot_modify(prot, PTE_ATTRINDX_MASK, PTE_ATTRINDX(MT_DEVICE_nGnRnE) | PTE_PXN | PTE_UXN)
  293. #define pgprot_writecombine(prot) \
  294. __pgprot_modify(prot, PTE_ATTRINDX_MASK, PTE_ATTRINDX(MT_NORMAL_NC) | PTE_PXN | PTE_UXN)
  295. +#define pgprot_cached(prot) \
  296. + __pgprot_modify(prot, PTE_ATTRINDX_MASK, PTE_ATTRINDX(MT_NORMAL) | \
  297. + PTE_PXN | PTE_UXN)
  298. +#define pgprot_cached_ns(prot) \
  299. + __pgprot(pgprot_val(pgprot_cached(prot)) ^ PTE_SHARED)
  300. #define pgprot_device(prot) \
  301. __pgprot_modify(prot, PTE_ATTRINDX_MASK, PTE_ATTRINDX(MT_DEVICE_nGnRE) | PTE_PXN | PTE_UXN)
  302. #define __HAVE_PHYS_MEM_ACCESS_PROT
  303. --- a/arch/arm64/kernel/pci.c
  304. +++ b/arch/arm64/kernel/pci.c
  305. @@ -17,6 +17,8 @@
  306. #include <linux/mm.h>
  307. #include <linux/of_pci.h>
  308. #include <linux/of_platform.h>
  309. +#include <linux/of_irq.h>
  310. +#include <linux/pcieport_if.h>
  311. #include <linux/pci.h>
  312. #include <linux/pci-acpi.h>
  313. #include <linux/pci-ecam.h>
  314. @@ -53,6 +55,66 @@ int pcibios_alloc_irq(struct pci_dev *de
  315. return 0;
  316. }
  317. +
  318. +/*
  319. + * Check device tree if the service interrupts are there
  320. + */
  321. +int pcibios_check_service_irqs(struct pci_dev *dev, int *irqs, int mask)
  322. +{
  323. + int ret, count = 0;
  324. + struct device_node *np = NULL;
  325. +
  326. + if (dev->bus->dev.of_node)
  327. + np = dev->bus->dev.of_node;
  328. +
  329. + if (np == NULL)
  330. + return 0;
  331. +
  332. + if (!IS_ENABLED(CONFIG_OF_IRQ))
  333. + return 0;
  334. +
  335. + /* If root port doesn't support MSI/MSI-X/INTx in RC mode,
  336. + * request irq for aer
  337. + */
  338. + if (mask & PCIE_PORT_SERVICE_AER) {
  339. + ret = of_irq_get_byname(np, "aer");
  340. + if (ret > 0) {
  341. + irqs[PCIE_PORT_SERVICE_AER_SHIFT] = ret;
  342. + count++;
  343. + }
  344. + }
  345. +
  346. + if (mask & PCIE_PORT_SERVICE_PME) {
  347. + ret = of_irq_get_byname(np, "pme");
  348. + if (ret > 0) {
  349. + irqs[PCIE_PORT_SERVICE_PME_SHIFT] = ret;
  350. + count++;
  351. + }
  352. + }
  353. +
  354. + /* TODO: add more service interrupts if there it is in the device tree*/
  355. +
  356. + return count;
  357. +}
  358. +
  359. +int pci_mmap_page_range(struct pci_dev *dev, struct vm_area_struct *vma,
  360. + enum pci_mmap_state mmap_state, int write_combine)
  361. +{
  362. + if (mmap_state == pci_mmap_io)
  363. + return -EINVAL;
  364. +
  365. + /*
  366. + * Mark this as IO
  367. + */
  368. + vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
  369. +
  370. + if (remap_pfn_range(vma, vma->vm_start, vma->vm_pgoff,
  371. + vma->vm_end - vma->vm_start,
  372. + vma->vm_page_prot))
  373. + return -EAGAIN;
  374. +
  375. + return 0;
  376. +}
  377. /*
  378. * raw_pci_read/write - Platform-specific PCI config space access.
  379. --- a/arch/arm64/mm/dma-mapping.c
  380. +++ b/arch/arm64/mm/dma-mapping.c
  381. @@ -30,6 +30,7 @@
  382. #include <linux/swiotlb.h>
  383. #include <asm/cacheflush.h>
  384. +#include <../../../drivers/staging/fsl-mc/include/mc-bus.h>
  385. static int swiotlb __ro_after_init;
  386. @@ -925,6 +926,10 @@ static int __init __iommu_dma_init(void)
  387. if (!ret)
  388. ret = register_iommu_dma_ops_notifier(&pci_bus_type);
  389. #endif
  390. +#ifdef CONFIG_FSL_MC_BUS
  391. + if (!ret)
  392. + ret = register_iommu_dma_ops_notifier(&fsl_mc_bus_type);
  393. +#endif
  394. return ret;
  395. }
  396. arch_initcall(__iommu_dma_init);
  397. @@ -978,3 +983,4 @@ void arch_setup_dma_ops(struct device *d
  398. dev->archdata.dma_coherent = coherent;
  399. __iommu_setup_dma_ops(dev, dma_base, size, iommu);
  400. }
  401. +EXPORT_SYMBOL(arch_setup_dma_ops);