2
0

301-arch-support-layerscape.patch 12 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325
  1. From 6eeff55fd4756f271ad09a914078c9aa45f8359d Mon Sep 17 00:00:00 2001
  2. From: Biwen Li <[email protected]>
  3. Date: Fri, 16 Nov 2018 14:23:40 +0800
  4. Subject: [PATCH 04/39] arch: support layerscape
  5. This is an integrated patch of arch for layerscape
  6. Signed-off-by: Abhimanyu Saini <[email protected]>
  7. Signed-off-by: Alison Wang <[email protected]>
  8. Signed-off-by: Amrita Kumari <[email protected]>
  9. Signed-off-by: Chenhui Zhao <[email protected]>
  10. Signed-off-by: Dave Liu <[email protected]>
  11. Signed-off-by: Guanhua <[email protected]>
  12. Signed-off-by: Haiying Wang <[email protected]>
  13. Signed-off-by: Horia Geantă <[email protected]>
  14. Signed-off-by: Jerry Huang <[email protected]>
  15. Signed-off-by: Jianhua Xie <[email protected]>
  16. Signed-off-by: Jin Qing <[email protected]>
  17. Signed-off-by: Laurentiu Tudor <[email protected]>
  18. Signed-off-by: Li Yang <[email protected]>
  19. Signed-off-by: Madalin Bucur <[email protected]>
  20. Signed-off-by: Pan Jiafei <[email protected]>
  21. Signed-off-by: Poonam Aggrwal <[email protected]>
  22. Signed-off-by: Rajesh Bhagat <[email protected]>
  23. Signed-off-by: Ramneek Mehresh <[email protected]>
  24. Signed-off-by: Ran Wang <[email protected]>
  25. Signed-off-by: Roy Pledge <[email protected]>
  26. Signed-off-by: Shengzhou Liu <[email protected]>
  27. Signed-off-by: Tang Yuantian <[email protected]>
  28. Signed-off-by: Wang Dongsheng <[email protected]>
  29. Signed-off-by: Xie Xiaobo <[email protected]>
  30. Signed-off-by: Yangbo Lu <[email protected]>
  31. Signed-off-by: Zhao Chenhui <[email protected]>
  32. Signed-off-by: Zhao Qiang <[email protected]>
  33. Signed-off-by: Biwen Li <[email protected]>
  34. ---
  35. arch/arm/include/asm/delay.h | 16 ++++++++++++++
  36. arch/arm/include/asm/io.h | 31 +++++++++++++++++++++++++++
  37. arch/arm/include/asm/mach/map.h | 4 ++--
  38. arch/arm/include/asm/pgtable.h | 7 ++++++
  39. arch/arm/kernel/time.c | 3 +++
  40. arch/arm/mm/dma-mapping.c | 1 +
  41. arch/arm/mm/ioremap.c | 7 ++++++
  42. arch/arm/mm/mmu.c | 9 ++++++++
  43. arch/arm64/include/asm/cache.h | 2 +-
  44. arch/arm64/include/asm/io.h | 1 +
  45. arch/arm64/include/asm/pgtable-prot.h | 3 +++
  46. arch/arm64/include/asm/pgtable.h | 5 +++++
  47. arch/arm64/mm/dma-mapping.c | 1 +
  48. arch/arm64/mm/init.c | 12 +++++++----
  49. 14 files changed, 95 insertions(+), 7 deletions(-)
  50. --- a/arch/arm/include/asm/delay.h
  51. +++ b/arch/arm/include/asm/delay.h
  52. @@ -85,6 +85,22 @@ extern void __bad_udelay(void);
  53. __const_udelay((n) * UDELAY_MULT)) : \
  54. __udelay(n))
  55. +#define spin_event_timeout(condition, timeout, delay) \
  56. +({ \
  57. + typeof(condition) __ret; \
  58. + int i = 0; \
  59. + while (!(__ret = (condition)) && (i++ < timeout)) { \
  60. + if (delay) \
  61. + udelay(delay); \
  62. + else \
  63. + cpu_relax(); \
  64. + udelay(1); \
  65. + } \
  66. + if (!__ret) \
  67. + __ret = (condition); \
  68. + __ret; \
  69. +})
  70. +
  71. /* Loop-based definitions for assembly code. */
  72. extern void __loop_delay(unsigned long loops);
  73. extern void __loop_udelay(unsigned long usecs);
  74. --- a/arch/arm/include/asm/io.h
  75. +++ b/arch/arm/include/asm/io.h
  76. @@ -128,6 +128,7 @@ static inline u32 __raw_readl(const vola
  77. #define MT_DEVICE_NONSHARED 1
  78. #define MT_DEVICE_CACHED 2
  79. #define MT_DEVICE_WC 3
  80. +#define MT_MEMORY_RW_NS 4
  81. /*
  82. * types 4 onwards can be found in asm/mach/map.h and are undefined
  83. * for ioremap
  84. @@ -229,6 +230,34 @@ void __iomem *pci_remap_cfgspace(resourc
  85. #endif
  86. #endif
  87. +/* access ports */
  88. +#define setbits32(_addr, _v) iowrite32be(ioread32be(_addr) | (_v), (_addr))
  89. +#define clrbits32(_addr, _v) iowrite32be(ioread32be(_addr) & ~(_v), (_addr))
  90. +
  91. +#define setbits16(_addr, _v) iowrite16be(ioread16be(_addr) | (_v), (_addr))
  92. +#define clrbits16(_addr, _v) iowrite16be(ioread16be(_addr) & ~(_v), (_addr))
  93. +
  94. +#define setbits8(_addr, _v) iowrite8(ioread8(_addr) | (_v), (_addr))
  95. +#define clrbits8(_addr, _v) iowrite8(ioread8(_addr) & ~(_v), (_addr))
  96. +
  97. +/* Clear and set bits in one shot. These macros can be used to clear and
  98. + * set multiple bits in a register using a single read-modify-write. These
  99. + * macros can also be used to set a multiple-bit bit pattern using a mask,
  100. + * by specifying the mask in the 'clear' parameter and the new bit pattern
  101. + * in the 'set' parameter.
  102. + */
  103. +
  104. +#define clrsetbits_be32(addr, clear, set) \
  105. + iowrite32be((ioread32be(addr) & ~(clear)) | (set), (addr))
  106. +#define clrsetbits_le32(addr, clear, set) \
  107. + iowrite32le((ioread32le(addr) & ~(clear)) | (set), (addr))
  108. +#define clrsetbits_be16(addr, clear, set) \
  109. + iowrite16be((ioread16be(addr) & ~(clear)) | (set), (addr))
  110. +#define clrsetbits_le16(addr, clear, set) \
  111. + iowrite16le((ioread16le(addr) & ~(clear)) | (set), (addr))
  112. +#define clrsetbits_8(addr, clear, set) \
  113. + iowrite8((ioread8(addr) & ~(clear)) | (set), (addr))
  114. +
  115. /*
  116. * IO port access primitives
  117. * -------------------------
  118. @@ -417,6 +446,8 @@ void __iomem *ioremap_wc(resource_size_t
  119. #define ioremap_wc ioremap_wc
  120. #define ioremap_wt ioremap_wc
  121. +void __iomem *ioremap_cache_ns(resource_size_t res_cookie, size_t size);
  122. +
  123. void iounmap(volatile void __iomem *iomem_cookie);
  124. #define iounmap iounmap
  125. --- a/arch/arm/include/asm/mach/map.h
  126. +++ b/arch/arm/include/asm/mach/map.h
  127. @@ -21,9 +21,9 @@ struct map_desc {
  128. unsigned int type;
  129. };
  130. -/* types 0-3 are defined in asm/io.h */
  131. +/* types 0-4 are defined in asm/io.h */
  132. enum {
  133. - MT_UNCACHED = 4,
  134. + MT_UNCACHED = 5,
  135. MT_CACHECLEAN,
  136. MT_MINICLEAN,
  137. MT_LOW_VECTORS,
  138. --- a/arch/arm/include/asm/pgtable.h
  139. +++ b/arch/arm/include/asm/pgtable.h
  140. @@ -119,6 +119,13 @@ extern pgprot_t pgprot_s2_device;
  141. #define pgprot_noncached(prot) \
  142. __pgprot_modify(prot, L_PTE_MT_MASK, L_PTE_MT_UNCACHED)
  143. +#define pgprot_cached(prot) \
  144. + __pgprot_modify(prot, L_PTE_MT_MASK, L_PTE_MT_DEV_CACHED)
  145. +
  146. +#define pgprot_cached_ns(prot) \
  147. + __pgprot_modify(prot, L_PTE_MT_MASK, L_PTE_MT_DEV_CACHED | \
  148. + L_PTE_MT_DEV_NONSHARED)
  149. +
  150. #define pgprot_writecombine(prot) \
  151. __pgprot_modify(prot, L_PTE_MT_MASK, L_PTE_MT_BUFFERABLE)
  152. --- a/arch/arm/kernel/time.c
  153. +++ b/arch/arm/kernel/time.c
  154. @@ -12,6 +12,7 @@
  155. * reading the RTC at bootup, etc...
  156. */
  157. #include <linux/clk-provider.h>
  158. +#include <linux/clockchips.h>
  159. #include <linux/clocksource.h>
  160. #include <linux/errno.h>
  161. #include <linux/export.h>
  162. @@ -121,5 +122,7 @@ void __init time_init(void)
  163. of_clk_init(NULL);
  164. #endif
  165. timer_probe();
  166. +
  167. + tick_setup_hrtimer_broadcast();
  168. }
  169. }
  170. --- a/arch/arm/mm/dma-mapping.c
  171. +++ b/arch/arm/mm/dma-mapping.c
  172. @@ -2416,6 +2416,7 @@ void arch_setup_dma_ops(struct device *d
  173. #endif
  174. dev->archdata.dma_ops_setup = true;
  175. }
  176. +EXPORT_SYMBOL(arch_setup_dma_ops);
  177. void arch_teardown_dma_ops(struct device *dev)
  178. {
  179. --- a/arch/arm/mm/ioremap.c
  180. +++ b/arch/arm/mm/ioremap.c
  181. @@ -398,6 +398,13 @@ void __iomem *ioremap_wc(resource_size_t
  182. }
  183. EXPORT_SYMBOL(ioremap_wc);
  184. +void __iomem *ioremap_cache_ns(resource_size_t res_cookie, size_t size)
  185. +{
  186. + return arch_ioremap_caller(res_cookie, size, MT_MEMORY_RW_NS,
  187. + __builtin_return_address(0));
  188. +}
  189. +EXPORT_SYMBOL(ioremap_cache_ns);
  190. +
  191. /*
  192. * Remap an arbitrary physical address space into the kernel virtual
  193. * address space as memory. Needed when the kernel wants to execute
  194. --- a/arch/arm/mm/mmu.c
  195. +++ b/arch/arm/mm/mmu.c
  196. @@ -315,6 +315,13 @@ static struct mem_type mem_types[] __ro_
  197. .prot_sect = PMD_TYPE_SECT | PMD_SECT_AP_WRITE,
  198. .domain = DOMAIN_KERNEL,
  199. },
  200. + [MT_MEMORY_RW_NS] = {
  201. + .prot_pte = L_PTE_PRESENT | L_PTE_YOUNG | L_PTE_DIRTY |
  202. + L_PTE_XN,
  203. + .prot_l1 = PMD_TYPE_TABLE,
  204. + .prot_sect = PMD_TYPE_SECT | PMD_SECT_AP_WRITE | PMD_SECT_XN,
  205. + .domain = DOMAIN_KERNEL,
  206. + },
  207. [MT_ROM] = {
  208. .prot_sect = PMD_TYPE_SECT,
  209. .domain = DOMAIN_KERNEL,
  210. @@ -651,6 +658,7 @@ static void __init build_mem_type_table(
  211. }
  212. kern_pgprot |= PTE_EXT_AF;
  213. vecs_pgprot |= PTE_EXT_AF;
  214. + mem_types[MT_MEMORY_RW_NS].prot_pte |= PTE_EXT_AF | cp->pte;
  215. /*
  216. * Set PXN for user mappings
  217. @@ -679,6 +687,7 @@ static void __init build_mem_type_table(
  218. mem_types[MT_MEMORY_RWX].prot_pte |= kern_pgprot;
  219. mem_types[MT_MEMORY_RW].prot_sect |= ecc_mask | cp->pmd;
  220. mem_types[MT_MEMORY_RW].prot_pte |= kern_pgprot;
  221. + mem_types[MT_MEMORY_RW_NS].prot_sect |= ecc_mask | cp->pmd;
  222. mem_types[MT_MEMORY_DMA_READY].prot_pte |= kern_pgprot;
  223. mem_types[MT_MEMORY_RWX_NONCACHED].prot_sect |= ecc_mask;
  224. mem_types[MT_ROM].prot_sect |= cp->pmd;
  225. --- a/arch/arm64/include/asm/cache.h
  226. +++ b/arch/arm64/include/asm/cache.h
  227. @@ -34,7 +34,7 @@
  228. #define ICACHE_POLICY_VIPT 2
  229. #define ICACHE_POLICY_PIPT 3
  230. -#define L1_CACHE_SHIFT 7
  231. +#define L1_CACHE_SHIFT 6
  232. #define L1_CACHE_BYTES (1 << L1_CACHE_SHIFT)
  233. /*
  234. --- a/arch/arm64/include/asm/io.h
  235. +++ b/arch/arm64/include/asm/io.h
  236. @@ -186,6 +186,7 @@ extern void __iomem *ioremap_cache(phys_
  237. #define ioremap_nocache(addr, size) __ioremap((addr), (size), __pgprot(PROT_DEVICE_nGnRE))
  238. #define ioremap_wc(addr, size) __ioremap((addr), (size), __pgprot(PROT_NORMAL_NC))
  239. #define ioremap_wt(addr, size) __ioremap((addr), (size), __pgprot(PROT_DEVICE_nGnRE))
  240. +#define ioremap_cache_ns(addr, size) __ioremap((addr), (size), __pgprot(PROT_NORMAL_NS))
  241. #define iounmap __iounmap
  242. /*
  243. --- a/arch/arm64/include/asm/pgtable-prot.h
  244. +++ b/arch/arm64/include/asm/pgtable-prot.h
  245. @@ -48,6 +48,8 @@
  246. #define PROT_NORMAL_NC (PROT_DEFAULT | PTE_PXN | PTE_UXN | PTE_DIRTY | PTE_WRITE | PTE_ATTRINDX(MT_NORMAL_NC))
  247. #define PROT_NORMAL_WT (PROT_DEFAULT | PTE_PXN | PTE_UXN | PTE_DIRTY | PTE_WRITE | PTE_ATTRINDX(MT_NORMAL_WT))
  248. #define PROT_NORMAL (PROT_DEFAULT | PTE_PXN | PTE_UXN | PTE_DIRTY | PTE_WRITE | PTE_ATTRINDX(MT_NORMAL))
  249. +#define PROT_NORMAL_NS (PTE_TYPE_PAGE | PTE_AF | PTE_PXN | PTE_UXN | PTE_DIRTY | PTE_WRITE | PTE_ATTRINDX(MT_NORMAL))
  250. +
  251. #define PROT_SECT_DEVICE_nGnRE (PROT_SECT_DEFAULT | PMD_SECT_PXN | PMD_SECT_UXN | PMD_ATTRINDX(MT_DEVICE_nGnRE))
  252. #define PROT_SECT_NORMAL (PROT_SECT_DEFAULT | PMD_SECT_PXN | PMD_SECT_UXN | PMD_ATTRINDX(MT_NORMAL))
  253. @@ -68,6 +70,7 @@
  254. #define PAGE_HYP_DEVICE __pgprot(PROT_DEVICE_nGnRE | PTE_HYP)
  255. #define PAGE_S2 __pgprot(_PROT_DEFAULT | PTE_S2_MEMATTR(MT_S2_NORMAL) | PTE_S2_RDONLY)
  256. +#define PAGE_S2_NS __pgprot(PTE_S2_MEMATTR(MT_S2_NORMAL) | PTE_S2_RDWR | PTE_TYPE_PAGE | PTE_AF)
  257. #define PAGE_S2_DEVICE __pgprot(_PROT_DEFAULT | PTE_S2_MEMATTR(MT_S2_DEVICE_nGnRE) | PTE_S2_RDONLY | PTE_UXN)
  258. #define PAGE_NONE __pgprot(((_PAGE_DEFAULT) & ~PTE_VALID) | PTE_PROT_NONE | PTE_RDONLY | PTE_NG | PTE_PXN | PTE_UXN)
  259. --- a/arch/arm64/include/asm/pgtable.h
  260. +++ b/arch/arm64/include/asm/pgtable.h
  261. @@ -377,6 +377,11 @@ static inline int pmd_protnone(pmd_t pmd
  262. __pgprot_modify(prot, PTE_ATTRINDX_MASK, PTE_ATTRINDX(MT_DEVICE_nGnRnE) | PTE_PXN | PTE_UXN)
  263. #define pgprot_writecombine(prot) \
  264. __pgprot_modify(prot, PTE_ATTRINDX_MASK, PTE_ATTRINDX(MT_NORMAL_NC) | PTE_PXN | PTE_UXN)
  265. +#define pgprot_cached(prot) \
  266. + __pgprot_modify(prot, PTE_ATTRINDX_MASK, PTE_ATTRINDX(MT_NORMAL) | \
  267. + PTE_PXN | PTE_UXN)
  268. +#define pgprot_cached_ns(prot) \
  269. + __pgprot(pgprot_val(pgprot_cached(prot)) ^ PTE_SHARED)
  270. #define pgprot_device(prot) \
  271. __pgprot_modify(prot, PTE_ATTRINDX_MASK, PTE_ATTRINDX(MT_DEVICE_nGnRE) | PTE_PXN | PTE_UXN)
  272. #define __HAVE_PHYS_MEM_ACCESS_PROT
  273. --- a/arch/arm64/mm/dma-mapping.c
  274. +++ b/arch/arm64/mm/dma-mapping.c
  275. @@ -937,3 +937,4 @@ void arch_setup_dma_ops(struct device *d
  276. }
  277. #endif
  278. }
  279. +EXPORT_SYMBOL(arch_setup_dma_ops);
  280. --- a/arch/arm64/mm/init.c
  281. +++ b/arch/arm64/mm/init.c
  282. @@ -457,6 +457,14 @@ void __init arm64_memblock_init(void)
  283. * Register the kernel text, kernel data, initrd, and initial
  284. * pagetables with memblock.
  285. */
  286. +
  287. + /* make this the first reservation so that there are no chances of
  288. + * overlap
  289. + */
  290. + reserve_elfcorehdr();
  291. +
  292. + reserve_crashkernel();
  293. +
  294. memblock_reserve(__pa_symbol(_text), _end - _text);
  295. #ifdef CONFIG_BLK_DEV_INITRD
  296. if (initrd_start) {
  297. @@ -476,10 +484,6 @@ void __init arm64_memblock_init(void)
  298. else
  299. arm64_dma_phys_limit = PHYS_MASK + 1;
  300. - reserve_crashkernel();
  301. -
  302. - reserve_elfcorehdr();
  303. -
  304. high_memory = __va(memblock_end_of_DRAM() - 1) + 1;
  305. dma_contiguous_reserve(arm64_dma_phys_limit);