062-mcfv4e_cache_split.patch 29 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979
  1. From 940b4fea5ebfde3abe03c6469a57c01ee961497a Mon Sep 17 00:00:00 2001
  2. From: Kurt Mahan <[email protected]>
  3. Date: Wed, 18 Jun 2008 15:20:21 -0600
  4. Subject: [PATCH] Split 547x/548x and 5445x cache routines into separate files.
  5. LTIBName: mcfv4e-cache-split
  6. Signed-off-by: Kurt Mahan <[email protected]>
  7. ---
  8. include/asm-m68k/cf_5445x_cacheflush.h | 447 ++++++++++++++++++++++++++++++++
  9. include/asm-m68k/cf_548x_cacheflush.h | 259 ++++++++++++++++++
  10. include/asm-m68k/cf_cacheflush.h | 244 +-----------------
  11. 3 files changed, 711 insertions(+), 239 deletions(-)
  12. create mode 100644 include/asm-m68k/cf_5445x_cacheflush.h
  13. create mode 100644 include/asm-m68k/cf_548x_cacheflush.h
  14. --- /dev/null
  15. +++ b/include/asm-m68k/cf_5445x_cacheflush.h
  16. @@ -0,0 +1,447 @@
  17. +/*
  18. + * include/asm-m68k/cf_5445x_cacheflush.h - Coldfire 5445x Cache
  19. + *
  20. + * Based on include/asm-m68k/cacheflush.h
  21. + *
  22. + * Coldfire pieces by:
  23. + * Kurt Mahan [email protected]
  24. + *
  25. + * Copyright Freescale Semiconductor, Inc. 2007, 2008
  26. + *
  27. + * This program is free software; you can redistribute it and/or modify it
  28. + * under the terms of the GNU General Public License as published by the
  29. + * Free Software Foundation; either version 2 of the License, or (at your
  30. + * option) any later version.
  31. + */
  32. +#ifndef M68K_CF_5445x_CACHEFLUSH_H
  33. +#define M68K_CF_5445x_CACHEFLUSH_H
  34. +
  35. +#include <asm/cfcache.h>
  36. +
  37. +/*
  38. + * Coldfire Cache Model
  39. + *
  40. + * The Coldfire processors use a Harvard architecture cache configured
  41. + * as four-way set associative. The cache does not implement bus snooping
  42. + * so cache coherency with other masters must be maintained in software.
  43. + *
  44. + * The cache is managed via the CPUSHL instruction in conjunction with
  45. + * bits set in the CACR (cache control register). Currently the code
  46. + * uses the CPUSHL enhancement which adds the ability to
  47. + * invalidate/clear/push a cacheline by physical address. This feature
  48. + * is designated in the Hardware Configuration Register [D1-CPES].
  49. + *
  50. + * CACR Bits:
  51. + * DPI[28] cpushl invalidate disable for d-cache
  52. + * IDPI[12] cpushl invalidate disable for i-cache
  53. + * SPA[14] cpushl search by physical address
  54. + * IVO[20] cpushl invalidate only
  55. + *
  56. + * Random Terminology:
  57. + * * invalidate = reset the cache line's valid bit
  58. + * * push = generate a line-sized store of the data if its contents are marked
  59. + * as modifed (the modified flag is cleared after the store)
  60. + * * clear = push + invalidate
  61. + */
  62. +
  63. +/**
  64. + * flush_icache - Flush all of the instruction cache
  65. + */
  66. +static inline void flush_icache(void)
  67. +{
  68. + asm volatile("nop\n"
  69. + "moveq%.l #0,%%d0\n"
  70. + "moveq%.l #0,%%d1\n"
  71. + "move%.l %%d0,%%a0\n"
  72. + "1:\n"
  73. + "cpushl %%ic,(%%a0)\n"
  74. + "add%.l #0x0010,%%a0\n"
  75. + "addq%.l #1,%%d1\n"
  76. + "cmpi%.l %0,%%d1\n"
  77. + "bne 1b\n"
  78. + "moveq%.l #0,%%d1\n"
  79. + "addq%.l #1,%%d0\n"
  80. + "move%.l %%d0,%%a0\n"
  81. + "cmpi%.l #4,%%d0\n"
  82. + "bne 1b\n"
  83. + : : "i" (CACHE_SETS)
  84. + : "a0", "d0", "d1");
  85. +}
  86. +
  87. +/**
  88. + * flush_dcache - Flush all of the data cache
  89. + */
  90. +static inline void flush_dcache(void)
  91. +{
  92. + asm volatile("nop\n"
  93. + "moveq%.l #0,%%d0\n"
  94. + "moveq%.l #0,%%d1\n"
  95. + "move%.l %%d0,%%a0\n"
  96. + "1:\n"
  97. + "cpushl %%dc,(%%a0)\n"
  98. + "add%.l #0x0010,%%a0\n"
  99. + "addq%.l #1,%%d1\n"
  100. + "cmpi%.l %0,%%d1\n"
  101. + "bne 1b\n"
  102. + "moveq%.l #0,%%d1\n"
  103. + "addq%.l #1,%%d0\n"
  104. + "move%.l %%d0,%%a0\n"
  105. + "cmpi%.l #4,%%d0\n"
  106. + "bne 1b\n"
  107. + : : "i" (CACHE_SETS)
  108. + : "a0", "d0", "d1");
  109. +}
  110. +
  111. +/**
  112. + * flush_bcache - Flush all of both caches
  113. + */
  114. +static inline void flush_bcache(void)
  115. +{
  116. + asm volatile("nop\n"
  117. + "moveq%.l #0,%%d0\n"
  118. + "moveq%.l #0,%%d1\n"
  119. + "move%.l %%d0,%%a0\n"
  120. + "1:\n"
  121. + "cpushl %%bc,(%%a0)\n"
  122. + "add%.l #0x0010,%%a0\n"
  123. + "addq%.l #1,%%d1\n"
  124. + "cmpi%.l %0,%%d1\n"
  125. + "bne 1b\n"
  126. + "moveq%.l #0,%%d1\n"
  127. + "addq%.l #1,%%d0\n"
  128. + "move%.l %%d0,%%a0\n"
  129. + "cmpi%.l #4,%%d0\n"
  130. + "bne 1b\n"
  131. + : : "i" (CACHE_SETS)
  132. + : "a0", "d0", "d1");
  133. +}
  134. +
  135. +/**
  136. + * cf_cache_clear - invalidate cache
  137. + * @paddr: starting physical address
  138. + * @len: number of bytes
  139. + *
  140. + * Invalidate cache lines starting at paddr for len bytes.
  141. + * Those lines are not pushed.
  142. + */
  143. +static inline void cf_cache_clear(unsigned long paddr, int len)
  144. +{
  145. + /* number of lines */
  146. + len = (len + (CACHE_LINE_SIZE-1)) / CACHE_LINE_SIZE;
  147. + if (len == 0)
  148. + return;
  149. +
  150. + /* align on set boundary */
  151. + paddr &= 0xfffffff0;
  152. +
  153. + asm volatile("nop\n"
  154. + "move%.l %2,%%d0\n"
  155. + "or%.l %3,%%d0\n"
  156. + "movec %%d0,%%cacr\n"
  157. + "move%.l %0,%%a0\n"
  158. + "move%.l %1,%%d0\n"
  159. + "1:\n"
  160. + "cpushl %%bc,(%%a0)\n"
  161. + "lea 0x10(%%a0),%%a0\n"
  162. + "subq%.l #1,%%d0\n"
  163. + "bne%.b 1b\n"
  164. + "movec %2,%%cacr\n"
  165. + : : "a" (paddr), "r" (len),
  166. + "r" (shadow_cacr),
  167. + "i" (CF_CACR_SPA+CF_CACR_IVO)
  168. + : "a0", "d0");
  169. +}
  170. +
  171. +/**
  172. + * cf_cache_push - Push dirty cache out with no invalidate
  173. + * @paddr: starting physical address
  174. + * @len: number of bytes
  175. + *
  176. + * Push the any dirty lines starting at paddr for len bytes.
  177. + * Those lines are not invalidated.
  178. + */
  179. +static inline void cf_cache_push(unsigned long paddr, int len)
  180. +{
  181. + /* number of lines */
  182. + len = (len + (CACHE_LINE_SIZE-1)) / CACHE_LINE_SIZE;
  183. + if (len == 0)
  184. + return;
  185. +
  186. + /* align on set boundary */
  187. + paddr &= 0xfffffff0;
  188. +
  189. + asm volatile("nop\n"
  190. + "move%.l %2,%%d0\n"
  191. + "or%.l %3,%%d0\n"
  192. + "movec %%d0,%%cacr\n"
  193. + "move%.l %0,%%a0\n"
  194. + "move%.l %1,%%d0\n"
  195. + "1:\n"
  196. + "cpushl %%bc,(%%a0)\n"
  197. + "lea 0x10(%%a0),%%a0\n"
  198. + "subq%.l #1,%%d0\n"
  199. + "bne.b 1b\n"
  200. + "movec %2,%%cacr\n"
  201. + : : "a" (paddr), "r" (len),
  202. + "r" (shadow_cacr),
  203. + "i" (CF_CACR_SPA+CF_CACR_DPI+CF_CACR_IDPI)
  204. + : "a0", "d0");
  205. +}
  206. +
  207. +/**
  208. + * cf_cache_flush - Push dirty cache out and invalidate
  209. + * @paddr: starting physical address
  210. + * @len: number of bytes
  211. + *
  212. + * Push the any dirty lines starting at paddr for len bytes and
  213. + * invalidate those lines.
  214. + */
  215. +static inline void cf_cache_flush(unsigned long paddr, int len)
  216. +{
  217. + /* number of lines */
  218. + len = (len + (CACHE_LINE_SIZE-1)) / CACHE_LINE_SIZE;
  219. + if (len == 0)
  220. + return;
  221. +
  222. + /* align on set boundary */
  223. + paddr &= 0xfffffff0;
  224. +
  225. + asm volatile("nop\n"
  226. + "move%.l %2,%%d0\n"
  227. + "or%.l %3,%%d0\n"
  228. + "movec %%d0,%%cacr\n"
  229. + "move%.l %0,%%a0\n"
  230. + "move%.l %1,%%d0\n"
  231. + "1:\n"
  232. + "cpushl %%bc,(%%a0)\n"
  233. + "lea 0x10(%%a0),%%a0\n"
  234. + "subq%.l #1,%%d0\n"
  235. + "bne.b 1b\n"
  236. + "movec %2,%%cacr\n"
  237. + : : "a" (paddr), "r" (len),
  238. + "r" (shadow_cacr),
  239. + "i" (CF_CACR_SPA)
  240. + : "a0", "d0");
  241. +}
  242. +
  243. +/**
  244. + * cf_cache_flush_range - Push dirty data/inst cache in range out and invalidate
  245. + * @vstart - starting virtual address
  246. + * @vend: ending virtual address
  247. + *
  248. + * Push the any dirty data/instr lines starting at paddr for len bytes and
  249. + * invalidate those lines.
  250. + */
  251. +static inline void cf_cache_flush_range(unsigned long vstart, unsigned long vend)
  252. +{
  253. + int len;
  254. +
  255. + /* align on set boundary */
  256. + vstart &= 0xfffffff0;
  257. + vend = PAGE_ALIGN((vend + (CACHE_LINE_SIZE-1))) & 0xfffffff0;
  258. + len = vend - vstart;
  259. + if (len == 0)
  260. + return;
  261. + vstart = __pa(vstart);
  262. + vend = vstart + len;
  263. +
  264. + asm volatile("nop\n"
  265. + "move%.l %2,%%d0\n"
  266. + "or%.l %3,%%d0\n"
  267. + "movec %%d0,%%cacr\n"
  268. + "move%.l %0,%%a0\n"
  269. + "move%.l %1,%%a1\n"
  270. + "1:\n"
  271. + "cpushl %%bc,(%%a0)\n"
  272. + "lea 0x10(%%a0),%%a0\n"
  273. + "cmpa%.l %%a0,%%a1\n"
  274. + "bne.b 1b\n"
  275. + "movec %2,%%cacr\n"
  276. + : /* no return */
  277. + : "a" (vstart), "a" (vend),
  278. + "r" (shadow_cacr),
  279. + "i" (CF_CACR_SPA)
  280. + : "a0", "a1", "d0");
  281. +}
  282. +
  283. +/**
  284. + * cf_dcache_flush_range - Push dirty data cache in range out and invalidate
  285. + * @vstart - starting virtual address
  286. + * @vend: ending virtual address
  287. + *
  288. + * Push the any dirty data lines starting at paddr for len bytes and
  289. + * invalidate those lines.
  290. + */
  291. +static inline void cf_dcache_flush_range(unsigned long vstart, unsigned long vend)
  292. +{
  293. + /* align on set boundary */
  294. + vstart &= 0xfffffff0;
  295. + vend = (vend + (CACHE_LINE_SIZE-1)) & 0xfffffff0;
  296. +
  297. + asm volatile("nop\n"
  298. + "move%.l %2,%%d0\n"
  299. + "or%.l %3,%%d0\n"
  300. + "movec %%d0,%%cacr\n"
  301. + "move%.l %0,%%a0\n"
  302. + "move%.l %1,%%a1\n"
  303. + "1:\n"
  304. + "cpushl %%dc,(%%a0)\n"
  305. + "lea 0x10(%%a0),%%a0\n"
  306. + "cmpa%.l %%a0,%%a1\n"
  307. + "bne.b 1b\n"
  308. + "movec %2,%%cacr\n"
  309. + : /* no return */
  310. + : "a" (__pa(vstart)), "a" (__pa(vend)),
  311. + "r" (shadow_cacr),
  312. + "i" (CF_CACR_SPA)
  313. + : "a0", "a1", "d0");
  314. +}
  315. +
  316. +/**
  317. + * cf_icache_flush_range - Push dirty inst cache in range out and invalidate
  318. + * @vstart - starting virtual address
  319. + * @vend: ending virtual address
  320. + *
  321. + * Push the any dirty instr lines starting at paddr for len bytes and
  322. + * invalidate those lines. This should just be an invalidate since you
  323. + * shouldn't be able to have dirty instruction cache.
  324. + */
  325. +static inline void cf_icache_flush_range(unsigned long vstart, unsigned long vend)
  326. +{
  327. + /* align on set boundary */
  328. + vstart &= 0xfffffff0;
  329. + vend = (vend + (CACHE_LINE_SIZE-1)) & 0xfffffff0;
  330. +
  331. + asm volatile("nop\n"
  332. + "move%.l %2,%%d0\n"
  333. + "or%.l %3,%%d0\n"
  334. + "movec %%d0,%%cacr\n"
  335. + "move%.l %0,%%a0\n"
  336. + "move%.l %1,%%a1\n"
  337. + "1:\n"
  338. + "cpushl %%ic,(%%a0)\n"
  339. + "lea 0x10(%%a0),%%a0\n"
  340. + "cmpa%.l %%a0,%%a1\n"
  341. + "bne.b 1b\n"
  342. + "movec %2,%%cacr\n"
  343. + : /* no return */
  344. + : "a" (__pa(vstart)), "a" (__pa(vend)),
  345. + "r" (shadow_cacr),
  346. + "i" (CF_CACR_SPA)
  347. + : "a0", "a1", "d0");
  348. +}
  349. +
  350. +/**
  351. + * flush_cache_mm - Flush an mm_struct
  352. + * @mm: mm_struct to flush
  353. + */
  354. +static inline void flush_cache_mm(struct mm_struct *mm)
  355. +{
  356. + if (mm == current->mm)
  357. + flush_bcache();
  358. +}
  359. +
  360. +#define flush_cache_dup_mm(mm) flush_cache_mm(mm)
  361. +
  362. +/**
  363. + * flush_cache_range - Flush a cache range
  364. + * @vma: vma struct
  365. + * @start: Starting address
  366. + * @end: Ending address
  367. + *
  368. + * flush_cache_range must be a macro to avoid a dependency on
  369. + * linux/mm.h which includes this file.
  370. + */
  371. +static inline void flush_cache_range(struct vm_area_struct *vma,
  372. + unsigned long start, unsigned long end)
  373. +{
  374. + if (vma->vm_mm == current->mm)
  375. + cf_cache_flush_range(start, end);
  376. +}
  377. +
  378. +/**
  379. + * flush_cache_page - Flush a page of the cache
  380. + * @vma: vma struct
  381. + * @vmaddr:
  382. + * @pfn: page numer
  383. + *
  384. + * flush_cache_page must be a macro to avoid a dependency on
  385. + * linux/mm.h which includes this file.
  386. + */
  387. +static inline void flush_cache_page(struct vm_area_struct *vma,
  388. + unsigned long vmaddr, unsigned long pfn)
  389. +{
  390. + if (vma->vm_mm == current->mm)
  391. + cf_cache_flush_range(vmaddr, vmaddr+PAGE_SIZE);
  392. +}
  393. +
  394. +/**
  395. + * __flush_page_to_ram - Push a page out of the cache
  396. + * @vaddr: Virtual address at start of page
  397. + *
  398. + * Push the page at kernel virtual address *vaddr* and clear
  399. + * the icache.
  400. + */
  401. +static inline void __flush_page_to_ram(void *vaddr)
  402. +{
  403. + asm volatile("nop\n"
  404. + "move%.l %2,%%d0\n"
  405. + "or%.l %3,%%d0\n"
  406. + "movec %%d0,%%cacr\n"
  407. + "move%.l %0,%%d0\n"
  408. + "and%.l #0xfffffff0,%%d0\n"
  409. + "move%.l %%d0,%%a0\n"
  410. + "move%.l %1,%%d0\n"
  411. + "1:\n"
  412. + "cpushl %%bc,(%%a0)\n"
  413. + "lea 0x10(%%a0),%%a0\n"
  414. + "subq%.l #1,%%d0\n"
  415. + "bne.b 1b\n"
  416. + "movec %2,%%cacr\n"
  417. + : : "a" (__pa(vaddr)), "i" (PAGE_SIZE / CACHE_LINE_SIZE),
  418. + "r" (shadow_cacr), "i" (CF_CACR_SPA)
  419. + : "a0", "d0");
  420. +}
  421. +
  422. +/*
  423. + * Various defines for the kernel.
  424. + */
  425. +
  426. +extern void cache_clear(unsigned long paddr, int len);
  427. +extern void cache_push(unsigned long paddr, int len);
  428. +extern void flush_icache_range(unsigned long address, unsigned long endaddr);
  429. +
  430. +#define flush_cache_all() flush_bcache()
  431. +#define flush_cache_vmap(start, end) flush_bcache()
  432. +#define flush_cache_vunmap(start, end) flush_bcache()
  433. +
  434. +#define flush_dcache_range(vstart, vend) cf_dcache_flush_range(vstart, vend)
  435. +#define flush_dcache_page(page) __flush_page_to_ram(page_address(page))
  436. +#define flush_dcache_mmap_lock(mapping) do { } while (0)
  437. +#define flush_dcache_mmap_unlock(mapping) do { } while (0)
  438. +
  439. +#define flush_icache_page(vma, page) __flush_page_to_ram(page_address(page))
  440. +
  441. +/**
  442. + * copy_to_user_page - Copy memory to user page
  443. + */
  444. +static inline void copy_to_user_page(struct vm_area_struct *vma,
  445. + struct page *page, unsigned long vaddr,
  446. + void *dst, void *src, int len)
  447. +{
  448. + memcpy(dst, src, len);
  449. + cf_cache_flush(page_to_phys(page), PAGE_SIZE);
  450. +}
  451. +
  452. +/**
  453. + * copy_from_user_page - Copy memory from user page
  454. + */
  455. +static inline void copy_from_user_page(struct vm_area_struct *vma,
  456. + struct page *page, unsigned long vaddr,
  457. + void *dst, void *src, int len)
  458. +{
  459. + cf_cache_flush(page_to_phys(page), PAGE_SIZE);
  460. + memcpy(dst, src, len);
  461. +}
  462. +
  463. +#endif /* M68K_CF_5445x_CACHEFLUSH_H */
  464. --- /dev/null
  465. +++ b/include/asm-m68k/cf_548x_cacheflush.h
  466. @@ -0,0 +1,259 @@
  467. +/*
  468. + * include/asm-m68k/cf_548x_cacheflush.h - Coldfire 547x/548x Cache
  469. + *
  470. + * Based on include/asm-m68k/cacheflush.h
  471. + *
  472. + * Coldfire pieces by:
  473. + * Kurt Mahan [email protected]
  474. + *
  475. + * Copyright Freescale Semiconductor, Inc. 2007, 2008
  476. + *
  477. + * This program is free software; you can redistribute it and/or modify it
  478. + * under the terms of the GNU General Public License as published by the
  479. + * Free Software Foundation; either version 2 of the License, or (at your
  480. + * option) any later version.
  481. + */
  482. +#ifndef M68K_CF_548x_CACHEFLUSH_H
  483. +#define M68K_CF_548x_CACHEFLUSH_H
  484. +
  485. +#include <asm/cfcache.h>
  486. +/*
  487. + * Cache handling functions
  488. + */
  489. +
  490. +#define flush_icache() \
  491. +({ \
  492. + unsigned long set; \
  493. + unsigned long start_set; \
  494. + unsigned long end_set; \
  495. + \
  496. + start_set = 0; \
  497. + end_set = (unsigned long)LAST_DCACHE_ADDR; \
  498. + \
  499. + for (set = start_set; set <= end_set; set += (0x10 - 3)) { \
  500. + asm volatile("cpushl %%ic,(%0)\n" \
  501. + "\taddq%.l #1,%0\n" \
  502. + "\tcpushl %%ic,(%0)\n" \
  503. + "\taddq%.l #1,%0\n" \
  504. + "\tcpushl %%ic,(%0)\n" \
  505. + "\taddq%.l #1,%0\n" \
  506. + "\tcpushl %%ic,(%0)" : "=a" (set) : "a" (set)); \
  507. + } \
  508. +})
  509. +
  510. +#define flush_dcache() \
  511. +({ \
  512. + unsigned long set; \
  513. + unsigned long start_set; \
  514. + unsigned long end_set; \
  515. + \
  516. + start_set = 0; \
  517. + end_set = (unsigned long)LAST_DCACHE_ADDR; \
  518. + \
  519. + for (set = start_set; set <= end_set; set += (0x10 - 3)) { \
  520. + asm volatile("cpushl %%dc,(%0)\n" \
  521. + "\taddq%.l #1,%0\n" \
  522. + "\tcpushl %%dc,(%0)\n" \
  523. + "\taddq%.l #1,%0\n" \
  524. + "\tcpushl %%dc,(%0)\n" \
  525. + "\taddq%.l #1,%0\n" \
  526. + "\tcpushl %%dc,(%0)" : "=a" (set) : "a" (set)); \
  527. + } \
  528. +})
  529. +
  530. +#define flush_bcache() \
  531. +({ \
  532. + unsigned long set; \
  533. + unsigned long start_set; \
  534. + unsigned long end_set; \
  535. + \
  536. + start_set = 0; \
  537. + end_set = (unsigned long)LAST_DCACHE_ADDR; \
  538. + \
  539. + for (set = start_set; set <= end_set; set += (0x10 - 3)) { \
  540. + asm volatile("cpushl %%bc,(%0)\n" \
  541. + "\taddq%.l #1,%0\n" \
  542. + "\tcpushl %%bc,(%0)\n" \
  543. + "\taddq%.l #1,%0\n" \
  544. + "\tcpushl %%bc,(%0)\n" \
  545. + "\taddq%.l #1,%0\n" \
  546. + "\tcpushl %%bc,(%0)" : "=a" (set) : "a" (set)); \
  547. + } \
  548. +})
  549. +
  550. +/*
  551. + * invalidate the cache for the specified memory range.
  552. + * It starts at the physical address specified for
  553. + * the given number of bytes.
  554. + */
  555. +extern void cache_clear(unsigned long paddr, int len);
  556. +/*
  557. + * push any dirty cache in the specified memory range.
  558. + * It starts at the physical address specified for
  559. + * the given number of bytes.
  560. + */
  561. +extern void cache_push(unsigned long paddr, int len);
  562. +
  563. +/*
  564. + * push and invalidate pages in the specified user virtual
  565. + * memory range.
  566. + */
  567. +extern void cache_push_v(unsigned long vaddr, int len);
  568. +
  569. +/* This is needed whenever the virtual mapping of the current
  570. + process changes. */
  571. +
  572. +/**
  573. + * flush_cache_mm - Flush an mm_struct
  574. + * @mm: mm_struct to flush
  575. + */
  576. +static inline void flush_cache_mm(struct mm_struct *mm)
  577. +{
  578. + if (mm == current->mm)
  579. + flush_bcache();
  580. +}
  581. +
  582. +#define flush_cache_dup_mm(mm) flush_cache_mm(mm)
  583. +
  584. +#define flush_cache_all() flush_bcache()
  585. +
  586. +/**
  587. + * flush_cache_range - Flush a cache range
  588. + * @vma: vma struct
  589. + * @start: Starting address
  590. + * @end: Ending address
  591. + *
  592. + * flush_cache_range must be a macro to avoid a dependency on
  593. + * linux/mm.h which includes this file.
  594. + */
  595. +static inline void flush_cache_range(struct vm_area_struct *vma,
  596. + unsigned long start, unsigned long end)
  597. +{
  598. + if (vma->vm_mm == current->mm)
  599. + flush_bcache();
  600. +// cf_cache_flush_range(start, end);
  601. +}
  602. +
  603. +/**
  604. + * flush_cache_page - Flush a page of the cache
  605. + * @vma: vma struct
  606. + * @vmaddr:
  607. + * @pfn: page numer
  608. + *
  609. + * flush_cache_page must be a macro to avoid a dependency on
  610. + * linux/mm.h which includes this file.
  611. + */
  612. +static inline void flush_cache_page(struct vm_area_struct *vma,
  613. + unsigned long vmaddr, unsigned long pfn)
  614. +{
  615. + if (vma->vm_mm == current->mm)
  616. + flush_bcache();
  617. +// cf_cache_flush_range(vmaddr, vmaddr+PAGE_SIZE);
  618. +}
  619. +
  620. +/* Push the page at kernel virtual address and clear the icache */
  621. +/* RZ: use cpush %bc instead of cpush %dc, cinv %ic */
  622. +#define flush_page_to_ram(page) __flush_page_to_ram((void *) page_address(page))
  623. +extern inline void __flush_page_to_ram(void *address)
  624. +{
  625. + unsigned long set;
  626. + unsigned long start_set;
  627. + unsigned long end_set;
  628. + unsigned long addr = (unsigned long) address;
  629. +
  630. + addr &= ~(PAGE_SIZE - 1); /* round down to page start address */
  631. +
  632. + start_set = addr & _ICACHE_SET_MASK;
  633. + end_set = (addr + PAGE_SIZE-1) & _ICACHE_SET_MASK;
  634. +
  635. + if (start_set > end_set) {
  636. + /* from the begining to the lowest address */
  637. + for (set = 0; set <= end_set; set += (0x10 - 3)) {
  638. + asm volatile("cpushl %%bc,(%0)\n"
  639. + "\taddq%.l #1,%0\n"
  640. + "\tcpushl %%bc,(%0)\n"
  641. + "\taddq%.l #1,%0\n"
  642. + "\tcpushl %%bc,(%0)\n"
  643. + "\taddq%.l #1,%0\n"
  644. + "\tcpushl %%bc,(%0)" : "=a" (set) : "a" (set));
  645. + }
  646. + /* next loop will finish the cache ie pass the hole */
  647. + end_set = LAST_ICACHE_ADDR;
  648. + }
  649. + for (set = start_set; set <= end_set; set += (0x10 - 3)) {
  650. + asm volatile("cpushl %%bc,(%0)\n"
  651. + "\taddq%.l #1,%0\n"
  652. + "\tcpushl %%bc,(%0)\n"
  653. + "\taddq%.l #1,%0\n"
  654. + "\tcpushl %%bc,(%0)\n"
  655. + "\taddq%.l #1,%0\n"
  656. + "\tcpushl %%bc,(%0)" : "=a" (set) : "a" (set));
  657. + }
  658. +}
  659. +
  660. +/* Use __flush_page_to_ram() for flush_dcache_page all values are same - MW */
  661. +#define flush_dcache_page(page) \
  662. + __flush_page_to_ram((void *) page_address(page))
  663. +#define flush_icache_page(vma,pg) \
  664. + __flush_page_to_ram((void *) page_address(pg))
  665. +#define flush_icache_user_range(adr,len) do { } while (0)
  666. +/* NL */
  667. +#define flush_icache_user_page(vma,page,addr,len) do { } while (0)
  668. +
  669. +/* Push n pages at kernel virtual address and clear the icache */
  670. +/* RZ: use cpush %bc instead of cpush %dc, cinv %ic */
  671. +extern inline void flush_icache_range (unsigned long address,
  672. + unsigned long endaddr)
  673. +{
  674. + unsigned long set;
  675. + unsigned long start_set;
  676. + unsigned long end_set;
  677. +
  678. + start_set = address & _ICACHE_SET_MASK;
  679. + end_set = endaddr & _ICACHE_SET_MASK;
  680. +
  681. + if (start_set > end_set) {
  682. + /* from the begining to the lowest address */
  683. + for (set = 0; set <= end_set; set += (0x10 - 3)) {
  684. + asm volatile("cpushl %%ic,(%0)\n"
  685. + "\taddq%.l #1,%0\n"
  686. + "\tcpushl %%ic,(%0)\n"
  687. + "\taddq%.l #1,%0\n"
  688. + "\tcpushl %%ic,(%0)\n"
  689. + "\taddq%.l #1,%0\n"
  690. + "\tcpushl %%ic,(%0)" : "=a" (set) : "a" (set));
  691. + }
  692. + /* next loop will finish the cache ie pass the hole */
  693. + end_set = LAST_ICACHE_ADDR;
  694. + }
  695. + for (set = start_set; set <= end_set; set += (0x10 - 3)) {
  696. + asm volatile("cpushl %%ic,(%0)\n"
  697. + "\taddq%.l #1,%0\n"
  698. + "\tcpushl %%ic,(%0)\n"
  699. + "\taddq%.l #1,%0\n"
  700. + "\tcpushl %%ic,(%0)\n"
  701. + "\taddq%.l #1,%0\n"
  702. + "\tcpushl %%ic,(%0)" : "=a" (set) : "a" (set));
  703. + }
  704. +}
  705. +
  706. +static inline void copy_to_user_page(struct vm_area_struct *vma,
  707. + struct page *page, unsigned long vaddr,
  708. + void *dst, void *src, int len)
  709. +{
  710. + memcpy(dst, src, len);
  711. + flush_icache_user_page(vma, page, vaddr, len);
  712. +}
  713. +static inline void copy_from_user_page(struct vm_area_struct *vma,
  714. + struct page *page, unsigned long vaddr,
  715. + void *dst, void *src, int len)
  716. +{
  717. + memcpy(dst, src, len);
  718. +}
  719. +
  720. +#define flush_cache_vmap(start, end) flush_cache_all()
  721. +#define flush_cache_vunmap(start, end) flush_cache_all()
  722. +#define flush_dcache_mmap_lock(mapping) do { } while (0)
  723. +#define flush_dcache_mmap_unlock(mapping) do { } while (0)
  724. +
  725. +#endif /* M68K_CF_548x_CACHEFLUSH_H */
  726. --- a/include/asm-m68k/cf_cacheflush.h
  727. +++ b/include/asm-m68k/cf_cacheflush.h
  728. @@ -1,244 +1,10 @@
  729. #ifndef M68K_CF_CACHEFLUSH_H
  730. #define M68K_CF_CACHEFLUSH_H
  731. -#include <asm/cfcache.h>
  732. -/*
  733. - * Cache handling functions
  734. - */
  735. -
  736. -#define flush_icache() \
  737. -({ \
  738. - unsigned long set; \
  739. - unsigned long start_set; \
  740. - unsigned long end_set; \
  741. - \
  742. - start_set = 0; \
  743. - end_set = (unsigned long)LAST_DCACHE_ADDR; \
  744. - \
  745. - for (set = start_set; set <= end_set; set += (0x10 - 3)) { \
  746. - asm volatile("cpushl %%ic,(%0)\n" \
  747. - "\taddq%.l #1,%0\n" \
  748. - "\tcpushl %%ic,(%0)\n" \
  749. - "\taddq%.l #1,%0\n" \
  750. - "\tcpushl %%ic,(%0)\n" \
  751. - "\taddq%.l #1,%0\n" \
  752. - "\tcpushl %%ic,(%0)" : "=a" (set) : "a" (set)); \
  753. - } \
  754. -})
  755. -
  756. -#define flush_dcache() \
  757. -({ \
  758. - unsigned long set; \
  759. - unsigned long start_set; \
  760. - unsigned long end_set; \
  761. - \
  762. - start_set = 0; \
  763. - end_set = (unsigned long)LAST_DCACHE_ADDR; \
  764. - \
  765. - for (set = start_set; set <= end_set; set += (0x10 - 3)) { \
  766. - asm volatile("cpushl %%dc,(%0)\n" \
  767. - "\taddq%.l #1,%0\n" \
  768. - "\tcpushl %%dc,(%0)\n" \
  769. - "\taddq%.l #1,%0\n" \
  770. - "\tcpushl %%dc,(%0)\n" \
  771. - "\taddq%.l #1,%0\n" \
  772. - "\tcpushl %%dc,(%0)" : "=a" (set) : "a" (set)); \
  773. - } \
  774. -})
  775. -
  776. -#define flush_bcache() \
  777. -({ \
  778. - unsigned long set; \
  779. - unsigned long start_set; \
  780. - unsigned long end_set; \
  781. - \
  782. - start_set = 0; \
  783. - end_set = (unsigned long)LAST_DCACHE_ADDR; \
  784. - \
  785. - for (set = start_set; set <= end_set; set += (0x10 - 3)) { \
  786. - asm volatile("cpushl %%bc,(%0)\n" \
  787. - "\taddq%.l #1,%0\n" \
  788. - "\tcpushl %%bc,(%0)\n" \
  789. - "\taddq%.l #1,%0\n" \
  790. - "\tcpushl %%bc,(%0)\n" \
  791. - "\taddq%.l #1,%0\n" \
  792. - "\tcpushl %%bc,(%0)" : "=a" (set) : "a" (set)); \
  793. - } \
  794. -})
  795. -
  796. -/*
  797. - * invalidate the cache for the specified memory range.
  798. - * It starts at the physical address specified for
  799. - * the given number of bytes.
  800. - */
  801. -extern void cache_clear(unsigned long paddr, int len);
  802. -/*
  803. - * push any dirty cache in the specified memory range.
  804. - * It starts at the physical address specified for
  805. - * the given number of bytes.
  806. - */
  807. -extern void cache_push(unsigned long paddr, int len);
  808. -
  809. -/*
  810. - * push and invalidate pages in the specified user virtual
  811. - * memory range.
  812. - */
  813. -extern void cache_push_v(unsigned long vaddr, int len);
  814. -
  815. -/* This is needed whenever the virtual mapping of the current
  816. - process changes. */
  817. -
  818. -/**
  819. - * flush_cache_mm - Flush an mm_struct
  820. - * @mm: mm_struct to flush
  821. - */
  822. -static inline void flush_cache_mm(struct mm_struct *mm)
  823. -{
  824. - if (mm == current->mm)
  825. - flush_bcache();
  826. -}
  827. -
  828. -#define flush_cache_dup_mm(mm) flush_cache_mm(mm)
  829. -
  830. -#define flush_cache_all() flush_bcache()
  831. -
  832. -/**
  833. - * flush_cache_range - Flush a cache range
  834. - * @vma: vma struct
  835. - * @start: Starting address
  836. - * @end: Ending address
  837. - *
  838. - * flush_cache_range must be a macro to avoid a dependency on
  839. - * linux/mm.h which includes this file.
  840. - */
  841. -static inline void flush_cache_range(struct vm_area_struct *vma,
  842. - unsigned long start, unsigned long end)
  843. -{
  844. - if (vma->vm_mm == current->mm)
  845. - flush_bcache();
  846. -// cf_cache_flush_range(start, end);
  847. -}
  848. -
  849. -/**
  850. - * flush_cache_page - Flush a page of the cache
  851. - * @vma: vma struct
  852. - * @vmaddr:
  853. - * @pfn: page numer
  854. - *
  855. - * flush_cache_page must be a macro to avoid a dependency on
  856. - * linux/mm.h which includes this file.
  857. - */
  858. -static inline void flush_cache_page(struct vm_area_struct *vma,
  859. - unsigned long vmaddr, unsigned long pfn)
  860. -{
  861. - if (vma->vm_mm == current->mm)
  862. - flush_bcache();
  863. -// cf_cache_flush_range(vmaddr, vmaddr+PAGE_SIZE);
  864. -}
  865. -
  866. -/* Push the page at kernel virtual address and clear the icache */
  867. -/* RZ: use cpush %bc instead of cpush %dc, cinv %ic */
  868. -#define flush_page_to_ram(page) __flush_page_to_ram((void *) page_address(page))
  869. -extern inline void __flush_page_to_ram(void *address)
  870. -{
  871. - unsigned long set;
  872. - unsigned long start_set;
  873. - unsigned long end_set;
  874. - unsigned long addr = (unsigned long) address;
  875. -
  876. - addr &= ~(PAGE_SIZE - 1); /* round down to page start address */
  877. -
  878. - start_set = addr & _ICACHE_SET_MASK;
  879. - end_set = (addr + PAGE_SIZE-1) & _ICACHE_SET_MASK;
  880. -
  881. - if (start_set > end_set) {
  882. - /* from the begining to the lowest address */
  883. - for (set = 0; set <= end_set; set += (0x10 - 3)) {
  884. - asm volatile("cpushl %%bc,(%0)\n"
  885. - "\taddq%.l #1,%0\n"
  886. - "\tcpushl %%bc,(%0)\n"
  887. - "\taddq%.l #1,%0\n"
  888. - "\tcpushl %%bc,(%0)\n"
  889. - "\taddq%.l #1,%0\n"
  890. - "\tcpushl %%bc,(%0)" : "=a" (set) : "a" (set));
  891. - }
  892. - /* next loop will finish the cache ie pass the hole */
  893. - end_set = LAST_ICACHE_ADDR;
  894. - }
  895. - for (set = start_set; set <= end_set; set += (0x10 - 3)) {
  896. - asm volatile("cpushl %%bc,(%0)\n"
  897. - "\taddq%.l #1,%0\n"
  898. - "\tcpushl %%bc,(%0)\n"
  899. - "\taddq%.l #1,%0\n"
  900. - "\tcpushl %%bc,(%0)\n"
  901. - "\taddq%.l #1,%0\n"
  902. - "\tcpushl %%bc,(%0)" : "=a" (set) : "a" (set));
  903. - }
  904. -}
  905. -
  906. -/* Use __flush_page_to_ram() for flush_dcache_page all values are same - MW */
  907. -#define flush_dcache_page(page) \
  908. - __flush_page_to_ram((void *) page_address(page))
  909. -#define flush_icache_page(vma,pg) \
  910. - __flush_page_to_ram((void *) page_address(pg))
  911. -#define flush_icache_user_range(adr,len) do { } while (0)
  912. -/* NL */
  913. -#define flush_icache_user_page(vma,page,addr,len) do { } while (0)
  914. -
  915. -/* Push n pages at kernel virtual address and clear the icache */
  916. -/* RZ: use cpush %bc instead of cpush %dc, cinv %ic */
  917. -extern inline void flush_icache_range (unsigned long address,
  918. - unsigned long endaddr)
  919. -{
  920. - unsigned long set;
  921. - unsigned long start_set;
  922. - unsigned long end_set;
  923. -
  924. - start_set = address & _ICACHE_SET_MASK;
  925. - end_set = endaddr & _ICACHE_SET_MASK;
  926. -
  927. - if (start_set > end_set) {
  928. - /* from the begining to the lowest address */
  929. - for (set = 0; set <= end_set; set += (0x10 - 3)) {
  930. - asm volatile("cpushl %%ic,(%0)\n"
  931. - "\taddq%.l #1,%0\n"
  932. - "\tcpushl %%ic,(%0)\n"
  933. - "\taddq%.l #1,%0\n"
  934. - "\tcpushl %%ic,(%0)\n"
  935. - "\taddq%.l #1,%0\n"
  936. - "\tcpushl %%ic,(%0)" : "=a" (set) : "a" (set));
  937. - }
  938. - /* next loop will finish the cache ie pass the hole */
  939. - end_set = LAST_ICACHE_ADDR;
  940. - }
  941. - for (set = start_set; set <= end_set; set += (0x10 - 3)) {
  942. - asm volatile("cpushl %%ic,(%0)\n"
  943. - "\taddq%.l #1,%0\n"
  944. - "\tcpushl %%ic,(%0)\n"
  945. - "\taddq%.l #1,%0\n"
  946. - "\tcpushl %%ic,(%0)\n"
  947. - "\taddq%.l #1,%0\n"
  948. - "\tcpushl %%ic,(%0)" : "=a" (set) : "a" (set));
  949. - }
  950. -}
  951. -
  952. -static inline void copy_to_user_page(struct vm_area_struct *vma,
  953. - struct page *page, unsigned long vaddr,
  954. - void *dst, void *src, int len)
  955. -{
  956. - memcpy(dst, src, len);
  957. - flush_icache_user_page(vma, page, vaddr, len);
  958. -}
  959. -static inline void copy_from_user_page(struct vm_area_struct *vma,
  960. - struct page *page, unsigned long vaddr,
  961. - void *dst, void *src, int len)
  962. -{
  963. - memcpy(dst, src, len);
  964. -}
  965. -
  966. -#define flush_cache_vmap(start, end) flush_cache_all()
  967. -#define flush_cache_vunmap(start, end) flush_cache_all()
  968. -#define flush_dcache_mmap_lock(mapping) do { } while (0)
  969. -#define flush_dcache_mmap_unlock(mapping) do { } while (0)
  970. +#ifdef CONFIG_M5445X
  971. +#include "cf_5445x_cacheflush.h"
  972. +#else
  973. +#include "cf_548x_cacheflush.h"
  974. +#endif
  975. #endif /* M68K_CF_CACHEFLUSH_H */