150-cpu_fixes.patch 12 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365
  1. --- a/arch/mips/include/asm/r4kcache.h
  2. +++ b/arch/mips/include/asm/r4kcache.h
  3. @@ -17,6 +17,20 @@
  4. #include <asm/cpu-features.h>
  5. #include <asm/mipsmtregs.h>
  6. +#ifdef CONFIG_BCM47XX
  7. +#include <asm/paccess.h>
  8. +#include <linux/ssb/ssb.h>
  9. +#define BCM4710_DUMMY_RREG() ((void) *((u8 *) KSEG1ADDR(SSB_ENUM_BASE + SSB_IMSTATE)))
  10. +
  11. +#define BCM4710_FILL_TLB(addr) (*(volatile unsigned long *)(addr))
  12. +#define BCM4710_PROTECTED_FILL_TLB(addr) ({ unsigned long x; get_dbe(x, (volatile unsigned long *)(addr)); })
  13. +#else
  14. +#define BCM4710_DUMMY_RREG()
  15. +
  16. +#define BCM4710_FILL_TLB(addr)
  17. +#define BCM4710_PROTECTED_FILL_TLB(addr)
  18. +#endif
  19. +
  20. /*
  21. * This macro return a properly sign-extended address suitable as base address
  22. * for indexed cache operations. Two issues here:
  23. @@ -150,6 +164,7 @@ static inline void flush_icache_line_ind
  24. static inline void flush_dcache_line_indexed(unsigned long addr)
  25. {
  26. __dflush_prologue
  27. + BCM4710_DUMMY_RREG();
  28. cache_op(Index_Writeback_Inv_D, addr);
  29. __dflush_epilogue
  30. }
  31. @@ -169,6 +184,7 @@ static inline void flush_icache_line(uns
  32. static inline void flush_dcache_line(unsigned long addr)
  33. {
  34. __dflush_prologue
  35. + BCM4710_DUMMY_RREG();
  36. cache_op(Hit_Writeback_Inv_D, addr);
  37. __dflush_epilogue
  38. }
  39. @@ -176,6 +192,7 @@ static inline void flush_dcache_line(uns
  40. static inline void invalidate_dcache_line(unsigned long addr)
  41. {
  42. __dflush_prologue
  43. + BCM4710_DUMMY_RREG();
  44. cache_op(Hit_Invalidate_D, addr);
  45. __dflush_epilogue
  46. }
  47. @@ -208,6 +225,7 @@ static inline void flush_scache_line(uns
  48. */
  49. static inline void protected_flush_icache_line(unsigned long addr)
  50. {
  51. + BCM4710_DUMMY_RREG();
  52. protected_cache_op(Hit_Invalidate_I, addr);
  53. }
  54. @@ -219,6 +237,7 @@ static inline void protected_flush_icach
  55. */
  56. static inline void protected_writeback_dcache_line(unsigned long addr)
  57. {
  58. + BCM4710_DUMMY_RREG();
  59. protected_cache_op(Hit_Writeback_Inv_D, addr);
  60. }
  61. @@ -339,8 +358,52 @@ static inline void invalidate_tcache_pag
  62. : "r" (base), \
  63. "i" (op));
  64. +static inline void blast_dcache(void)
  65. +{
  66. + unsigned long start = KSEG0;
  67. + unsigned long dcache_size = current_cpu_data.dcache.waysize * current_cpu_data.dcache.ways;
  68. + unsigned long end = (start + dcache_size);
  69. +
  70. + do {
  71. + BCM4710_DUMMY_RREG();
  72. + cache_op(Index_Writeback_Inv_D, start);
  73. + start += current_cpu_data.dcache.linesz;
  74. + } while(start < end);
  75. +}
  76. +
  77. +static inline void blast_dcache_page(unsigned long page)
  78. +{
  79. + unsigned long start = page;
  80. + unsigned long end = start + PAGE_SIZE;
  81. +
  82. + BCM4710_FILL_TLB(start);
  83. + do {
  84. + BCM4710_DUMMY_RREG();
  85. + cache_op(Hit_Writeback_Inv_D, start);
  86. + start += current_cpu_data.dcache.linesz;
  87. + } while(start < end);
  88. +}
  89. +
  90. +static inline void blast_dcache_page_indexed(unsigned long page)
  91. +{
  92. + unsigned long start = page;
  93. + unsigned long end = start + PAGE_SIZE;
  94. + unsigned long ws_inc = 1UL << current_cpu_data.dcache.waybit;
  95. + unsigned long ws_end = current_cpu_data.dcache.ways <<
  96. + current_cpu_data.dcache.waybit;
  97. + unsigned long ws, addr;
  98. + for (ws = 0; ws < ws_end; ws += ws_inc) {
  99. + start = page + ws;
  100. + for (addr = start; addr < end; addr += current_cpu_data.dcache.linesz) {
  101. + BCM4710_DUMMY_RREG();
  102. + cache_op(Index_Writeback_Inv_D, addr);
  103. + }
  104. + }
  105. +}
  106. +
  107. +
  108. /* build blast_xxx, blast_xxx_page, blast_xxx_page_indexed */
  109. -#define __BUILD_BLAST_CACHE(pfx, desc, indexop, hitop, lsize) \
  110. +#define __BUILD_BLAST_CACHE(pfx, desc, indexop, hitop, lsize, war) \
  111. static inline void blast_##pfx##cache##lsize(void) \
  112. { \
  113. unsigned long start = INDEX_BASE; \
  114. @@ -352,6 +415,7 @@ static inline void blast_##pfx##cache##l
  115. \
  116. __##pfx##flush_prologue \
  117. \
  118. + war \
  119. for (ws = 0; ws < ws_end; ws += ws_inc) \
  120. for (addr = start; addr < end; addr += lsize * 32) \
  121. cache##lsize##_unroll32(addr|ws, indexop); \
  122. @@ -366,6 +430,7 @@ static inline void blast_##pfx##cache##l
  123. \
  124. __##pfx##flush_prologue \
  125. \
  126. + war \
  127. do { \
  128. cache##lsize##_unroll32(start, hitop); \
  129. start += lsize * 32; \
  130. @@ -384,6 +449,8 @@ static inline void blast_##pfx##cache##l
  131. current_cpu_data.desc.waybit; \
  132. unsigned long ws, addr; \
  133. \
  134. + war \
  135. + \
  136. __##pfx##flush_prologue \
  137. \
  138. for (ws = 0; ws < ws_end; ws += ws_inc) \
  139. @@ -393,35 +460,37 @@ static inline void blast_##pfx##cache##l
  140. __##pfx##flush_epilogue \
  141. }
  142. -__BUILD_BLAST_CACHE(d, dcache, Index_Writeback_Inv_D, Hit_Writeback_Inv_D, 16)
  143. -__BUILD_BLAST_CACHE(i, icache, Index_Invalidate_I, Hit_Invalidate_I, 16)
  144. -__BUILD_BLAST_CACHE(s, scache, Index_Writeback_Inv_SD, Hit_Writeback_Inv_SD, 16)
  145. -__BUILD_BLAST_CACHE(d, dcache, Index_Writeback_Inv_D, Hit_Writeback_Inv_D, 32)
  146. -__BUILD_BLAST_CACHE(i, icache, Index_Invalidate_I, Hit_Invalidate_I, 32)
  147. -__BUILD_BLAST_CACHE(s, scache, Index_Writeback_Inv_SD, Hit_Writeback_Inv_SD, 32)
  148. -__BUILD_BLAST_CACHE(i, icache, Index_Invalidate_I, Hit_Invalidate_I, 64)
  149. -__BUILD_BLAST_CACHE(s, scache, Index_Writeback_Inv_SD, Hit_Writeback_Inv_SD, 64)
  150. -__BUILD_BLAST_CACHE(s, scache, Index_Writeback_Inv_SD, Hit_Writeback_Inv_SD, 128)
  151. -
  152. -__BUILD_BLAST_CACHE(inv_d, dcache, Index_Writeback_Inv_D, Hit_Invalidate_D, 16)
  153. -__BUILD_BLAST_CACHE(inv_d, dcache, Index_Writeback_Inv_D, Hit_Invalidate_D, 32)
  154. -__BUILD_BLAST_CACHE(inv_s, scache, Index_Writeback_Inv_SD, Hit_Invalidate_SD, 16)
  155. -__BUILD_BLAST_CACHE(inv_s, scache, Index_Writeback_Inv_SD, Hit_Invalidate_SD, 32)
  156. -__BUILD_BLAST_CACHE(inv_s, scache, Index_Writeback_Inv_SD, Hit_Invalidate_SD, 64)
  157. -__BUILD_BLAST_CACHE(inv_s, scache, Index_Writeback_Inv_SD, Hit_Invalidate_SD, 128)
  158. +__BUILD_BLAST_CACHE(d, dcache, Index_Writeback_Inv_D, Hit_Writeback_Inv_D, 16, )
  159. +__BUILD_BLAST_CACHE(i, icache, Index_Invalidate_I, Hit_Invalidate_I, 16, BCM4710_FILL_TLB(start);)
  160. +__BUILD_BLAST_CACHE(s, scache, Index_Writeback_Inv_SD, Hit_Writeback_Inv_SD, 16, )
  161. +__BUILD_BLAST_CACHE(d, dcache, Index_Writeback_Inv_D, Hit_Writeback_Inv_D, 32, )
  162. +__BUILD_BLAST_CACHE(i, icache, Index_Invalidate_I, Hit_Invalidate_I, 32, BCM4710_FILL_TLB(start);)
  163. +__BUILD_BLAST_CACHE(s, scache, Index_Writeback_Inv_SD, Hit_Writeback_Inv_SD, 32, )
  164. +__BUILD_BLAST_CACHE(i, icache, Index_Invalidate_I, Hit_Invalidate_I, 64, BCM4710_FILL_TLB(start);)
  165. +__BUILD_BLAST_CACHE(s, scache, Index_Writeback_Inv_SD, Hit_Writeback_Inv_SD, 64, )
  166. +__BUILD_BLAST_CACHE(s, scache, Index_Writeback_Inv_SD, Hit_Writeback_Inv_SD, 128, )
  167. +
  168. +__BUILD_BLAST_CACHE(inv_d, dcache, Index_Writeback_Inv_D, Hit_Invalidate_D, 16, )
  169. +__BUILD_BLAST_CACHE(inv_d, dcache, Index_Writeback_Inv_D, Hit_Invalidate_D, 32, )
  170. +__BUILD_BLAST_CACHE(inv_s, scache, Index_Writeback_Inv_SD, Hit_Invalidate_SD, 16, )
  171. +__BUILD_BLAST_CACHE(inv_s, scache, Index_Writeback_Inv_SD, Hit_Invalidate_SD, 32, )
  172. +__BUILD_BLAST_CACHE(inv_s, scache, Index_Writeback_Inv_SD, Hit_Invalidate_SD, 64, )
  173. +__BUILD_BLAST_CACHE(inv_s, scache, Index_Writeback_Inv_SD, Hit_Invalidate_SD, 128, )
  174. /* build blast_xxx_range, protected_blast_xxx_range */
  175. -#define __BUILD_BLAST_CACHE_RANGE(pfx, desc, hitop, prot) \
  176. +#define __BUILD_BLAST_CACHE_RANGE(pfx, desc, hitop, prot, war, war2) \
  177. static inline void prot##blast_##pfx##cache##_range(unsigned long start, \
  178. unsigned long end) \
  179. { \
  180. unsigned long lsize = cpu_##desc##_line_size(); \
  181. unsigned long addr = start & ~(lsize - 1); \
  182. unsigned long aend = (end - 1) & ~(lsize - 1); \
  183. + war \
  184. \
  185. __##pfx##flush_prologue \
  186. \
  187. while (1) { \
  188. + war2 \
  189. prot##cache_op(hitop, addr); \
  190. if (addr == aend) \
  191. break; \
  192. @@ -431,13 +500,13 @@ static inline void prot##blast_##pfx##ca
  193. __##pfx##flush_epilogue \
  194. }
  195. -__BUILD_BLAST_CACHE_RANGE(d, dcache, Hit_Writeback_Inv_D, protected_)
  196. -__BUILD_BLAST_CACHE_RANGE(s, scache, Hit_Writeback_Inv_SD, protected_)
  197. -__BUILD_BLAST_CACHE_RANGE(i, icache, Hit_Invalidate_I, protected_)
  198. -__BUILD_BLAST_CACHE_RANGE(d, dcache, Hit_Writeback_Inv_D, )
  199. -__BUILD_BLAST_CACHE_RANGE(s, scache, Hit_Writeback_Inv_SD, )
  200. +__BUILD_BLAST_CACHE_RANGE(d, dcache, Hit_Writeback_Inv_D, protected_, BCM4710_PROTECTED_FILL_TLB(addr); BCM4710_PROTECTED_FILL_TLB(aend);, BCM4710_DUMMY_RREG();)
  201. +__BUILD_BLAST_CACHE_RANGE(s, scache, Hit_Writeback_Inv_SD, protected_,, )
  202. +__BUILD_BLAST_CACHE_RANGE(i, icache, Hit_Invalidate_I, protected_,, )
  203. +__BUILD_BLAST_CACHE_RANGE(d, dcache, Hit_Writeback_Inv_D,, BCM4710_FILL_TLB(addr); BCM4710_FILL_TLB(aend);, BCM4710_DUMMY_RREG();)
  204. +__BUILD_BLAST_CACHE_RANGE(s, scache, Hit_Writeback_Inv_SD,,, )
  205. /* blast_inv_dcache_range */
  206. -__BUILD_BLAST_CACHE_RANGE(inv_d, dcache, Hit_Invalidate_D, )
  207. -__BUILD_BLAST_CACHE_RANGE(inv_s, scache, Hit_Invalidate_SD, )
  208. +__BUILD_BLAST_CACHE_RANGE(inv_d, dcache, Hit_Invalidate_D,,,BCM4710_DUMMY_RREG();)
  209. +__BUILD_BLAST_CACHE_RANGE(inv_s, scache, Hit_Invalidate_SD,,, )
  210. #endif /* _ASM_R4KCACHE_H */
  211. --- a/arch/mips/include/asm/stackframe.h
  212. +++ b/arch/mips/include/asm/stackframe.h
  213. @@ -409,6 +409,10 @@
  214. .macro RESTORE_SP_AND_RET
  215. LONG_L sp, PT_R29(sp)
  216. .set mips3
  217. +#ifdef CONFIG_BCM47XX
  218. + nop
  219. + nop
  220. +#endif
  221. eret
  222. .set mips0
  223. .endm
  224. --- a/arch/mips/kernel/genex.S
  225. +++ b/arch/mips/kernel/genex.S
  226. @@ -52,6 +52,10 @@ NESTED(except_vec1_generic, 0, sp)
  227. NESTED(except_vec3_generic, 0, sp)
  228. .set push
  229. .set noat
  230. +#ifdef CONFIG_BCM47XX
  231. + nop
  232. + nop
  233. +#endif
  234. #if R5432_CP0_INTERRUPT_WAR
  235. mfc0 k0, CP0_INDEX
  236. #endif
  237. --- a/arch/mips/mm/c-r4k.c
  238. +++ b/arch/mips/mm/c-r4k.c
  239. @@ -34,6 +34,9 @@
  240. #include <asm/cacheflush.h> /* for run_uncached() */
  241. +/* For enabling BCM4710 cache workarounds */
  242. +int bcm4710 = 0;
  243. +
  244. /*
  245. * Special Variant of smp_call_function for use by cache functions:
  246. *
  247. @@ -104,6 +107,9 @@ static void __cpuinit r4k_blast_dcache_p
  248. {
  249. unsigned long dc_lsize = cpu_dcache_line_size();
  250. + if (bcm4710)
  251. + r4k_blast_dcache_page = blast_dcache_page;
  252. + else
  253. if (dc_lsize == 0)
  254. r4k_blast_dcache_page = (void *)cache_noop;
  255. else if (dc_lsize == 16)
  256. @@ -118,6 +124,9 @@ static void __cpuinit r4k_blast_dcache_p
  257. {
  258. unsigned long dc_lsize = cpu_dcache_line_size();
  259. + if (bcm4710)
  260. + r4k_blast_dcache_page_indexed = blast_dcache_page_indexed;
  261. + else
  262. if (dc_lsize == 0)
  263. r4k_blast_dcache_page_indexed = (void *)cache_noop;
  264. else if (dc_lsize == 16)
  265. @@ -132,6 +141,9 @@ static void __cpuinit r4k_blast_dcache_s
  266. {
  267. unsigned long dc_lsize = cpu_dcache_line_size();
  268. + if (bcm4710)
  269. + r4k_blast_dcache = blast_dcache;
  270. + else
  271. if (dc_lsize == 0)
  272. r4k_blast_dcache = (void *)cache_noop;
  273. else if (dc_lsize == 16)
  274. @@ -647,6 +659,8 @@ static void local_r4k_flush_cache_sigtra
  275. unsigned long addr = (unsigned long) arg;
  276. R4600_HIT_CACHEOP_WAR_IMPL;
  277. + BCM4710_PROTECTED_FILL_TLB(addr);
  278. + BCM4710_PROTECTED_FILL_TLB(addr + 4);
  279. if (dc_lsize)
  280. protected_writeback_dcache_line(addr & ~(dc_lsize - 1));
  281. if (!cpu_icache_snoops_remote_store && scache_size)
  282. @@ -1271,6 +1285,17 @@ static void __cpuinit coherency_setup(vo
  283. * silly idea of putting something else there ...
  284. */
  285. switch (current_cpu_type()) {
  286. + case CPU_BCM3302:
  287. + {
  288. + u32 cm;
  289. + cm = read_c0_diag();
  290. + /* Enable icache */
  291. + cm |= (1 << 31);
  292. + /* Enable dcache */
  293. + cm |= (1 << 30);
  294. + write_c0_diag(cm);
  295. + }
  296. + break;
  297. case CPU_R4000PC:
  298. case CPU_R4000SC:
  299. case CPU_R4000MC:
  300. @@ -1328,6 +1353,15 @@ void __cpuinit r4k_cache_init(void)
  301. break;
  302. }
  303. + /* Check if special workarounds are required */
  304. +#ifdef CONFIG_BCM47XX
  305. + if (current_cpu_data.cputype == CPU_BCM4710 && (current_cpu_data.processor_id & 0xff) == 0) {
  306. + printk("Enabling BCM4710A0 cache workarounds.\n");
  307. + bcm4710 = 1;
  308. + } else
  309. +#endif
  310. + bcm4710 = 0;
  311. +
  312. probe_pcache();
  313. setup_scache();
  314. @@ -1386,5 +1420,13 @@ void __cpuinit r4k_cache_init(void)
  315. #if !defined(CONFIG_MIPS_CMP)
  316. local_r4k___flush_cache_all(NULL);
  317. #endif
  318. +#ifdef CONFIG_BCM47XX
  319. + {
  320. + static void (*_coherency_setup)(void);
  321. + _coherency_setup = (void (*)(void)) KSEG1ADDR(coherency_setup);
  322. + _coherency_setup();
  323. + }
  324. +#else
  325. coherency_setup();
  326. +#endif
  327. }
  328. --- a/arch/mips/mm/tlbex.c
  329. +++ b/arch/mips/mm/tlbex.c
  330. @@ -678,6 +678,9 @@ static void __cpuinit build_r4000_tlb_re
  331. /* No need for uasm_i_nop */
  332. }
  333. +#ifdef CONFIG_BCM47XX
  334. + uasm_i_nop(&p);
  335. +#endif
  336. #ifdef CONFIG_64BIT
  337. build_get_pmde64(&p, &l, &r, K0, K1); /* get pmd in K1 */
  338. #else
  339. @@ -1085,6 +1088,9 @@ build_r4000_tlbchange_handler_head(u32 *
  340. struct uasm_reloc **r, unsigned int pte,
  341. unsigned int ptr)
  342. {
  343. +#ifdef CONFIG_BCM47XX
  344. + uasm_i_nop(p);
  345. +#endif
  346. #ifdef CONFIG_64BIT
  347. build_get_pmde64(p, l, r, pte, ptr); /* get pmd in ptr */
  348. #else