| 123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104110511061107110811091110111111121113111411151116111711181119112011211122112311241125112611271128112911301131113211331134113511361137113811391140114111421143114411451146114711481149115011511152115311541155115611571158115911601161116211631164116511661167116811691170117111721173117411751176117711781179118011811182118311841185118611871188118911901191119211931194 |
- From 3592cd3db82e5b010df590079f1e310b5d317248 Mon Sep 17 00:00:00 2001
- From: Kurt Mahan <[email protected]>
- Date: Mon, 3 Dec 2007 23:03:07 -0700
- Subject: [PATCH] Rewrite Coldfire cache code.
- LTIBName: mcfv4e-cache-base-update
- Signed-off-by: Kurt Mahan <[email protected]>
- ---
- arch/m68k/coldfire/cache.c | 196 +-------------
- arch/m68k/coldfire/head.S | 6 +-
- arch/m68k/coldfire/signal.c | 4 +-
- arch/m68k/kernel/sys_m68k.c | 16 ++
- arch/m68k/mm/cache.c | 31 +---
- arch/m68k/mm/memory.c | 76 +-----
- include/asm-m68k/cf_cacheflush.h | 525 +++++++++++++++++++++++++++++---------
- include/asm-m68k/cfcache.h | 95 ++++----
- 8 files changed, 495 insertions(+), 454 deletions(-)
- --- a/arch/m68k/coldfire/cache.c
- +++ b/arch/m68k/coldfire/cache.c
- @@ -1,7 +1,8 @@
- /*
- - * linux/arch/m68k/coldifre/cache.c
- + * linux/arch/m68k/coldfire/cache.c
- *
- * Matt Waddel [email protected]
- + * Kurt Mahan [email protected]
- * Copyright Freescale Semiconductor, Inc. 2007
- *
- * This program is free software; you can redistribute it and/or modify
- @@ -15,191 +16,13 @@
- #include <asm/coldfire.h>
- #include <asm/system.h>
-
- -#define _DCACHE_SIZE (2*16384)
- -#define _ICACHE_SIZE (2*16384)
- -
- -#define _SET_SHIFT 4
- -
- -/*
- - * Masks for cache sizes. Programming note: because the set size is a
- - * power of two, the mask is also the last address in the set.
- - */
- -
- -#define _DCACHE_SET_MASK ((_DCACHE_SIZE/64-1)<<_SET_SHIFT)
- -#define _ICACHE_SET_MASK ((_ICACHE_SIZE/64-1)<<_SET_SHIFT)
- -#define LAST_DCACHE_ADDR _DCACHE_SET_MASK
- -#define LAST_ICACHE_ADDR _ICACHE_SET_MASK
- -
- -/************************************************************
- - * Routine to cleanly flush the cache, pushing all lines and
- - * invalidating them.
- - *
- - * The is the flash-resident version, used after copying the .text
- - * segment from flash to ram.
- - *************************************************************/
- -void FLASHDcacheFlushInvalidate(void)
- - __attribute__ ((section (".text_loader")));
- -
- -void FLASHDcacheFlushInvalidate()
- -{
- - unsigned long set;
- - unsigned long start_set;
- - unsigned long end_set;
- -
- - start_set = 0;
- - end_set = (unsigned long)LAST_DCACHE_ADDR;
- -
- - for (set = start_set; set < end_set; set += (0x10 - 3))
- - asm volatile("cpushl %%dc,(%0)\n"
- - "\taddq%.l #1,%0\n"
- - "\tcpushl %%dc,(%0)\n"
- - "\taddq%.l #1,%0\n"
- - "\tcpushl %%dc,(%0)\n"
- - "\taddq%.l #1,%0\n"
- - "\tcpushl %%dc,(%0)" : : "a" (set));
- -}
- -
- -/************************************************************
- - * Routine to cleanly flush the cache, pushing all lines and
- - * invalidating them.
- - *
- - *************************************************************/
- -void DcacheFlushInvalidate()
- -{
- - unsigned long set;
- - unsigned long start_set;
- - unsigned long end_set;
- -
- - start_set = 0;
- - end_set = (unsigned long)LAST_DCACHE_ADDR;
- -
- - for (set = start_set; set < end_set; set += (0x10 - 3))
- - asm volatile("cpushl %%dc,(%0)\n"
- - "\taddq%.l #1,%0\n"
- - "\tcpushl %%dc,(%0)\n"
- - "\taddq%.l #1,%0\n"
- - "\tcpushl %%dc,(%0)\n"
- - "\taddq%.l #1,%0\n"
- - "\tcpushl %%dc,(%0)" : : "a" (set));
- -}
- -
- -
- -
- -/******************************************************************************
- - * Routine to cleanly flush the a block of cache, pushing all relevant lines
- - * and invalidating them.
- - *
- - ******************************************************************************/
- -void DcacheFlushInvalidateCacheBlock(void *start, unsigned long size)
- -{
- - unsigned long set;
- - unsigned long start_set;
- - unsigned long end_set;
- -
- - /* if size is bigger than the cache can store
- - * set the size to the maximum amount
- - */
- -
- - if (size > LAST_DCACHE_ADDR)
- - size = LAST_DCACHE_ADDR;
- -
- - start_set = ((unsigned long)start) & _DCACHE_SET_MASK;
- - end_set = ((unsigned long)(start+size-1)) & _DCACHE_SET_MASK;
- -
- - if (start_set > end_set) {
- - /* from the begining to the lowest address */
- - for (set = 0; set <= end_set; set += (0x10 - 3))
- - asm volatile("cpushl %%dc,(%0)\n"
- - "\taddq%.l #1,%0\n"
- - "\tcpushl %%dc,(%0)\n"
- - "\taddq%.l #1,%0\n"
- - "\tcpushl %%dc,(%0)\n"
- - "\taddq%.l #1,%0\n"
- - "\tcpushl %%dc,(%0)" : : "a" (set));
- -
- - /* next loop will finish the cache ie pass the hole */
- - end_set = LAST_DCACHE_ADDR;
- - }
- - for (set = start_set; set <= end_set; set += (0x10 - 3))
- - asm volatile("cpushl %%dc,(%0)\n"
- - "\taddq%.l #1,%0\n"
- - "\tcpushl %%dc,(%0)\n"
- - "\taddq%.l #1,%0\n"
- - "\tcpushl %%dc,(%0)\n"
- - "\taddq%.l #1,%0\n"
- - "\tcpushl %%dc,(%0)" : : "a" (set));
- -}
- -
- -
- -void IcacheInvalidateCacheBlock(void *start, unsigned long size)
- -{
- - unsigned long set;
- - unsigned long start_set;
- - unsigned long end_set;
- -
- - /* if size is bigger than the cache can store
- - * set the size to the maximum ammount
- - */
- -
- - if (size > LAST_ICACHE_ADDR)
- - size = LAST_ICACHE_ADDR;
- -
- - start_set = ((unsigned long)start) & _ICACHE_SET_MASK;
- - end_set = ((unsigned long)(start+size-1)) & _ICACHE_SET_MASK;
- -
- - if (start_set > end_set) {
- - /* from the begining to the lowest address */
- - for (set = 0; set <= end_set; set += (0x10 - 3))
- - asm volatile("cpushl %%ic,(%0)\n"
- - "\taddq%.l #1,%0\n"
- - "\tcpushl %%ic,(%0)\n"
- - "\taddq%.l #1,%0\n"
- - "\tcpushl %%ic,(%0)\n"
- - "\taddq%.l #1,%0\n"
- - "\tcpushl %%ic,(%0)" : : "a" (set));
- -
- - /* next loop will finish the cache ie pass the hole */
- - end_set = LAST_ICACHE_ADDR;
- - }
- - for (set = start_set; set <= end_set; set += (0x10 - 3))
- - asm volatile("cpushl %%ic,(%0)\n"
- - "\taddq%.l #1,%0\n"
- - "\tcpushl %%ic,(%0)\n"
- - "\taddq%.l #1,%0\n"
- - "\tcpushl %%ic,(%0)\n"
- - "\taddq%.l #1,%0\n"
- - "\tcpushl %%ic,(%0)" : : "a" (set));
- -}
- -
- -
- -/********************************************************************
- - * Disable the data cache completely
- - ********************************************************************/
- -void DcacheDisable(void)
- -{
- - int newValue;
- - unsigned long flags;
- -
- - local_save_flags(flags);
- - local_irq_disable();
- -
- - DcacheFlushInvalidate(); /* begin by flushing the cache */
- - newValue = CACHE_DISABLE_MODE; /* disable it */
- - cacr_set(newValue);
- - local_irq_restore(flags);
- -}
- -
- -/********************************************************************
- - * Unconditionally enable the data cache
- - ********************************************************************/
- -void DcacheEnable(void)
- -{
- - cacr_set(CACHE_INITIAL_MODE);
- -}
- -
- -
- +/* Cache Control Reg shadow reg */
- unsigned long shadow_cacr;
-
- +/**
- + * cacr_set - Set the Cache Control Register
- + * @x Value to set
- + */
- void cacr_set(unsigned long x)
- {
- shadow_cacr = x;
- @@ -209,6 +32,11 @@ void cacr_set(unsigned long x)
- : "r" (shadow_cacr));
- }
-
- +/**
- + * cacr_get - Get the current value of the Cache Control Register
- + *
- + * @return CACR value
- + */
- unsigned long cacr_get(void)
- {
- return shadow_cacr;
- --- a/arch/m68k/coldfire/head.S
- +++ b/arch/m68k/coldfire/head.S
- @@ -244,7 +244,7 @@ ENTRY(__start)
- /* Setup initial stack pointer */
- movel #0x40001000,%sp
-
- -/* Clear usp */
- +/* Setup usp */
- subl %a0,%a0
- movel %a0,%usp
-
- @@ -252,6 +252,10 @@ ENTRY(__start)
- movec %d0, %rambar1
- movew #0x2700,%sr
-
- +/* reset cache */
- + movel #(CF_CACR_ICINVA + CF_CACR_DCINVA),%d0
- + movecl %d0,%cacr
- +
- movel #(MMU_BASE+1),%d0
- movecl %d0,%mmubar
- movel #MMUOR_CA,%a0 /* Clear tlb entries */
- --- a/arch/m68k/coldfire/signal.c
- +++ b/arch/m68k/coldfire/signal.c
- @@ -37,6 +37,7 @@
- #include <asm/cf_pgtable.h>
- #include <asm/traps.h>
- #include <asm/ucontext.h>
- +#include <asm/cacheflush.h>
-
- #define _BLOCKABLE (~(sigmask(SIGKILL) | sigmask(SIGSTOP)))
-
- @@ -605,10 +606,9 @@ static inline int rt_setup_ucontext(stru
- return err;
- }
-
- -extern void IcacheInvalidateCacheBlock(void *, unsigned long);
- static inline void push_cache(unsigned long vaddr)
- {
- - IcacheInvalidateCacheBlock((void *)vaddr, 8);
- + cf_cache_push(__pa(vaddr), 8);
- }
-
- static inline void __user *
- --- a/arch/m68k/kernel/sys_m68k.c
- +++ b/arch/m68k/kernel/sys_m68k.c
- @@ -29,6 +29,9 @@
- #include <asm/traps.h>
- #include <asm/page.h>
- #include <asm/unistd.h>
- +#ifdef CONFIG_COLDFIRE
- +#include <asm/cacheflush.h>
- +#endif
-
- /*
- * sys_pipe() is the normal C calling standard for creating
- @@ -257,6 +260,7 @@ asmlinkage int sys_ipc (uint call, int f
- return -EINVAL;
- }
-
- +#ifndef CONFIG_COLDFIRE
- /* Convert virtual (user) address VADDR to physical address PADDR */
- #define virt_to_phys_040(vaddr) \
- ({ \
- @@ -580,6 +584,7 @@ cache_flush_060 (unsigned long addr, int
- }
- return 0;
- }
- +#endif /* CONFIG_COLDFIRE */
-
- /* sys_cacheflush -- flush (part of) the processor cache. */
- asmlinkage int
- @@ -612,6 +617,7 @@ sys_cacheflush (unsigned long addr, int
- goto out;
- }
-
- +#ifndef CONFIG_COLDFIRE
- if (CPU_IS_020_OR_030) {
- if (scope == FLUSH_SCOPE_LINE && len < 256) {
- unsigned long cacr;
- @@ -656,6 +662,16 @@ sys_cacheflush (unsigned long addr, int
- ret = cache_flush_060 (addr, scope, cache, len);
- }
- }
- +#else /* CONFIG_COLDFIRE */
- + if ((cache & FLUSH_CACHE_INSN) && (cache & FLUSH_CACHE_DATA))
- + flush_bcache();
- + else if (cache & FLUSH_CACHE_INSN)
- + flush_icache();
- + else
- + flush_dcache();
- +
- + ret = 0;
- +#endif /* CONFIG_COLDFIRE */
- out:
- unlock_kernel();
- return ret;
- --- a/arch/m68k/mm/cache.c
- +++ b/arch/m68k/mm/cache.c
- @@ -81,36 +81,7 @@ static unsigned long virt_to_phys_slow(u
- void flush_icache_range(unsigned long address, unsigned long endaddr)
- {
- #ifdef CONFIG_COLDFIRE
- - unsigned long set;
- - unsigned long start_set;
- - unsigned long end_set;
- -
- - start_set = address & _ICACHE_SET_MASK;
- - end_set = endaddr & _ICACHE_SET_MASK;
- -
- - if (start_set > end_set) {
- - /* from the begining to the lowest address */
- - for (set = 0; set <= end_set; set += (0x10 - 3))
- - asm volatile ("cpushl %%ic,(%0)\n"
- - "\taddq%.l #1,%0\n"
- - "\tcpushl %%ic,(%0)\n"
- - "\taddq%.l #1,%0\n"
- - "\tcpushl %%ic,(%0)\n"
- - "\taddq%.l #1,%0\n"
- - "\tcpushl %%ic,(%0)" : : "a" (set));
- -
- - /* next loop will finish the cache ie pass the hole */
- - end_set = LAST_ICACHE_ADDR;
- - }
- - for (set = start_set; set <= end_set; set += (0x10 - 3))
- - asm volatile ("cpushl %%ic,(%0)\n"
- - "\taddq%.l #1,%0\n"
- - "\tcpushl %%ic,(%0)\n"
- - "\taddq%.l #1,%0\n"
- - "\tcpushl %%ic,(%0)\n"
- - "\taddq%.l #1,%0\n"
- - "\tcpushl %%ic,(%0)" : : "a" (set));
- -
- + cf_icache_flush_range(address, endaddr);
- #else /* !CONFIG_COLDFIRE */
-
- if (CPU_IS_040_OR_060) {
- --- a/arch/m68k/mm/memory.c
- +++ b/arch/m68k/mm/memory.c
- @@ -127,6 +127,7 @@ int free_pointer_table (pmd_t *ptable)
- return 0;
- }
-
- +#ifndef CONFIG_COLDFIRE
- /* invalidate page in both caches */
- static inline void clear040(unsigned long paddr)
- {
- @@ -173,6 +174,7 @@ static inline void pushcl040(unsigned lo
- clear040(paddr);
- local_irq_restore(flags);
- }
- +#endif /* CONFIG_COLDFIRE */
-
- /*
- * 040: Hit every page containing an address in the range paddr..paddr+len-1.
- @@ -203,38 +205,10 @@ static inline void pushcl040(unsigned lo
-
- void cache_clear (unsigned long paddr, int len)
- {
- - if (CPU_IS_CFV4E) {
- - unsigned long set;
- - unsigned long start_set;
- - unsigned long end_set;
- -
- - start_set = paddr & _ICACHE_SET_MASK;
- - end_set = (paddr+len-1) & _ICACHE_SET_MASK;
- -
- - if (start_set > end_set) {
- - /* from the begining to the lowest address */
- - for (set = 0; set <= end_set; set += (0x10 - 3))
- - asm volatile("cpushl %%bc,(%0)\n"
- - "\taddq%.l #1,%0\n"
- - "\tcpushl %%bc,(%0)\n"
- - "\taddq%.l #1,%0\n"
- - "\tcpushl %%bc,(%0)\n"
- - "\taddq%.l #1,%0\n"
- - "\tcpushl %%bc,(%0)" : : "a" (set));
- -
- - /* next loop will finish the cache ie pass the hole */
- - end_set = LAST_ICACHE_ADDR;
- - }
- - for (set = start_set; set <= end_set; set += (0x10 - 3))
- - asm volatile("cpushl %%bc,(%0)\n"
- - "\taddq%.l #1,%0\n"
- - "\tcpushl %%bc,(%0)\n"
- - "\taddq%.l #1,%0\n"
- - "\tcpushl %%bc,(%0)\n"
- - "\taddq%.l #1,%0\n"
- - "\tcpushl %%bc,(%0)" : : "a" (set));
- -
- - } else if (CPU_IS_040_OR_060) {
- +#ifdef CONFIG_COLDFIRE
- + cf_cache_clear(paddr, len);
- +#else
- + if (CPU_IS_040_OR_060) {
- int tmp;
-
- /*
- @@ -268,6 +242,7 @@ void cache_clear (unsigned long paddr, i
- if(mach_l2_flush)
- mach_l2_flush(0);
- #endif
- +#endif /* CONFIG_COLDFIRE */
- }
- EXPORT_SYMBOL(cache_clear);
-
- @@ -281,38 +256,10 @@ EXPORT_SYMBOL(cache_clear);
-
- void cache_push (unsigned long paddr, int len)
- {
- - if (CPU_IS_CFV4E) {
- - unsigned long set;
- - unsigned long start_set;
- - unsigned long end_set;
- -
- - start_set = paddr & _ICACHE_SET_MASK;
- - end_set = (paddr+len-1) & _ICACHE_SET_MASK;
- -
- - if (start_set > end_set) {
- - /* from the begining to the lowest address */
- - for (set = 0; set <= end_set; set += (0x10 - 3))
- - asm volatile("cpushl %%bc,(%0)\n"
- - "\taddq%.l #1,%0\n"
- - "\tcpushl %%bc,(%0)\n"
- - "\taddq%.l #1,%0\n"
- - "\tcpushl %%bc,(%0)\n"
- - "\taddq%.l #1,%0\n"
- - "\tcpushl %%bc,(%0)" : : "a" (set));
- -
- - /* next loop will finish the cache ie pass the hole */
- - end_set = LAST_ICACHE_ADDR;
- - }
- - for (set = start_set; set <= end_set; set += (0x10 - 3))
- - asm volatile("cpushl %%bc,(%0)\n"
- - "\taddq%.l #1,%0\n"
- - "\tcpushl %%bc,(%0)\n"
- - "\taddq%.l #1,%0\n"
- - "\tcpushl %%bc,(%0)\n"
- - "\taddq%.l #1,%0\n"
- - "\tcpushl %%bc,(%0)" : : "a" (set));
- -
- - } else if (CPU_IS_040_OR_060) {
- +#ifdef CONFIG_COLDFIRE
- + cf_cache_push(paddr, len);
- +#else
- + if (CPU_IS_040_OR_060) {
- int tmp = PAGE_SIZE;
-
- /*
- @@ -352,6 +299,7 @@ void cache_push (unsigned long paddr, in
- if(mach_l2_flush)
- mach_l2_flush(1);
- #endif
- +#endif /* CONFIG_COLDFIRE */
- }
- EXPORT_SYMBOL(cache_push);
-
- --- a/include/asm-m68k/cf_cacheflush.h
- +++ b/include/asm-m68k/cf_cacheflush.h
- @@ -1,160 +1,439 @@
- +/*
- + * include/asm-m68k/cf_cacheflush.h - Coldfire Cache
- + *
- + * Based on include/asm-m68k/cacheflush.h
- + *
- + * Coldfire pieces by:
- + * Kurt Mahan [email protected]
- + *
- + * Copyright Freescale Semiconductor, Inc. 2007
- + *
- + * This program is free software; you can redistribute it and/or modify it
- + * under the terms of the GNU General Public License as published by the
- + * Free Software Foundation; either version 2 of the License, or (at your
- + * option) any later version.
- + */
- #ifndef M68K_CF_CACHEFLUSH_H
- #define M68K_CF_CACHEFLUSH_H
-
- #include <asm/cfcache.h>
-
- /*
- - * Cache handling functions
- + * Coldfire Cache Model
- + *
- + * The Coldfire processors use a Harvard architecture cache configured
- + * as four-way set associative. The cache does not implement bus snooping
- + * so cache coherency with other masters must be maintained in software.
- + *
- + * The cache is managed via the CPUSHL instruction in conjunction with
- + * bits set in the CACR (cache control register). Currently the code
- + * uses the CPUSHL enhancement which adds the ability to
- + * invalidate/clear/push a cacheline by physical address. This feature
- + * is designated in the Hardware Configuration Register [D1-CPES].
- + *
- + * CACR Bits:
- + * DPI[28] cpushl invalidate disable for d-cache
- + * IDPI[12] cpushl invalidate disable for i-cache
- + * SPA[14] cpushl search by physical address
- + * IVO[20] cpushl invalidate only
- + *
- + * Random Terminology:
- + * * invalidate = reset the cache line's valid bit
- + * * push = generate a line-sized store of the data if its contents are marked
- + * as modifed (the modified flag is cleared after the store)
- + * * clear = push + invalidate
- */
-
- -#define flush_icache() \
- -({ \
- - unsigned long set; \
- - unsigned long start_set; \
- - unsigned long end_set; \
- - \
- - start_set = 0; \
- - end_set = (unsigned long)LAST_DCACHE_ADDR; \
- - \
- - for (set = start_set; set <= end_set; set += (0x10 - 3)) \
- - asm volatile("cpushl %%ic,(%0)\n" \
- - "\taddq%.l #1,%0\n" \
- - "\tcpushl %%ic,(%0)\n" \
- - "\taddq%.l #1,%0\n" \
- - "\tcpushl %%ic,(%0)\n" \
- - "\taddq%.l #1,%0\n" \
- - "\tcpushl %%ic,(%0)" : : "a" (set)); \
- -})
- +/**
- + * flush_icache - Flush all of the instruction cache
- + */
- +static inline void flush_icache(void)
- +{
- + asm volatile("nop\n"
- + "moveq%.l #0,%%d0\n"
- + "moveq%.l #0,%%d1\n"
- + "move%.l %%d0,%%a0\n"
- + "1:\n"
- + "cpushl %%ic,(%%a0)\n"
- + "add%.l #0x0010,%%a0\n"
- + "addq%.l #1,%%d1\n"
- + "cmpi%.l %0,%%d1\n"
- + "bne 1b\n"
- + "moveq%.l #0,%%d1\n"
- + "addq%.l #1,%%d0\n"
- + "move%.l %%d0,%%a0\n"
- + "cmpi%.l #4,%%d0\n"
- + "bne 1b\n"
- + : : "i" (CACHE_SETS)
- + : "a0", "d0", "d1");
- +}
-
- -/*
- - * invalidate the cache for the specified memory range.
- - * It starts at the physical address specified for
- - * the given number of bytes.
- +/**
- + * flush_dcache - Flush all of the data cache
- */
- -extern void cache_clear(unsigned long paddr, int len);
- -/*
- - * push any dirty cache in the specified memory range.
- - * It starts at the physical address specified for
- - * the given number of bytes.
- +static inline void flush_dcache(void)
- +{
- + asm volatile("nop\n"
- + "moveq%.l #0,%%d0\n"
- + "moveq%.l #0,%%d1\n"
- + "move%.l %%d0,%%a0\n"
- + "1:\n"
- + "cpushl %%dc,(%%a0)\n"
- + "add%.l #0x0010,%%a0\n"
- + "addq%.l #1,%%d1\n"
- + "cmpi%.l %0,%%d1\n"
- + "bne 1b\n"
- + "moveq%.l #0,%%d1\n"
- + "addq%.l #1,%%d0\n"
- + "move%.l %%d0,%%a0\n"
- + "cmpi%.l #4,%%d0\n"
- + "bne 1b\n"
- + : : "i" (CACHE_SETS)
- + : "a0", "d0", "d1");
- +}
- +
- +/**
- + * flush_bcache - Flush all of both caches
- */
- -extern void cache_push(unsigned long paddr, int len);
- +static inline void flush_bcache(void)
- +{
- + asm volatile("nop\n"
- + "moveq%.l #0,%%d0\n"
- + "moveq%.l #0,%%d1\n"
- + "move%.l %%d0,%%a0\n"
- + "1:\n"
- + "cpushl %%bc,(%%a0)\n"
- + "add%.l #0x0010,%%a0\n"
- + "addq%.l #1,%%d1\n"
- + "cmpi%.l %0,%%d1\n"
- + "bne 1b\n"
- + "moveq%.l #0,%%d1\n"
- + "addq%.l #1,%%d0\n"
- + "move%.l %%d0,%%a0\n"
- + "cmpi%.l #4,%%d0\n"
- + "bne 1b\n"
- + : : "i" (CACHE_SETS)
- + : "a0", "d0", "d1");
- +}
-
- -/*
- - * push and invalidate pages in the specified user virtual
- - * memory range.
- +/**
- + * cf_cache_clear - invalidate cache
- + * @paddr: starting physical address
- + * @len: number of bytes
- + *
- + * Invalidate cache lines starting at paddr for len bytes.
- + * Those lines are not pushed.
- + */
- +static inline void cf_cache_clear(unsigned long paddr, int len)
- +{
- + /* number of lines */
- + len = (len + (CACHE_LINE_SIZE-1)) / CACHE_LINE_SIZE;
- +
- + /* align on set boundary */
- + paddr &= 0xfffffff0;
- +
- + asm volatile("nop\n"
- + "move%.l %2,%%d0\n"
- + "or%.l %3,%%d0\n"
- + "movec %%d0,%%cacr\n"
- + "move%.l %0,%%a0\n"
- + "move%.l %1,%%d0\n"
- + "1:\n"
- + "cpushl %%bc,(%%a0)\n"
- + "lea 0x10(%%a0),%%a0\n"
- + "subq%.l #1,%%d0\n"
- + "bne%.b 1b\n"
- + "movec %2,%%cacr\n"
- + : : "a" (paddr), "r" (len),
- + "r" (shadow_cacr),
- + "i" (CF_CACR_SPA+CF_CACR_IVO)
- + : "a0", "d0");
- +}
- +
- +/**
- + * cf_cache_push - Push dirty cache out with no invalidate
- + * @paddr: starting physical address
- + * @len: number of bytes
- + *
- + * Push the any dirty lines starting at paddr for len bytes.
- + * Those lines are not invalidated.
- + */
- +static inline void cf_cache_push(unsigned long paddr, int len)
- +{
- + /* number of lines */
- + len = (len + (CACHE_LINE_SIZE-1)) / CACHE_LINE_SIZE;
- +
- + /* align on set boundary */
- + paddr &= 0xfffffff0;
- +
- + asm volatile("nop\n"
- + "move%.l %2,%%d0\n"
- + "or%.l %3,%%d0\n"
- + "movec %%d0,%%cacr\n"
- + "move%.l %0,%%a0\n"
- + "move%.l %1,%%d0\n"
- + "1:\n"
- + "cpushl %%bc,(%%a0)\n"
- + "lea 0x10(%%a0),%%a0\n"
- + "subq%.l #1,%%d0\n"
- + "bne.b 1b\n"
- + "movec %2,%%cacr\n"
- + : : "a" (paddr), "r" (len),
- + "r" (shadow_cacr),
- + "i" (CF_CACR_SPA+CF_CACR_DPI+CF_CACR_IDPI)
- + : "a0", "d0");
- +}
- +
- +/**
- + * cf_cache_flush - Push dirty cache out and invalidate
- + * @paddr: starting physical address
- + * @len: number of bytes
- + *
- + * Push the any dirty lines starting at paddr for len bytes and
- + * invalidate those lines.
- + */
- +static inline void cf_cache_flush(unsigned long paddr, int len)
- +{
- + /* number of lines */
- + len = (len + (CACHE_LINE_SIZE-1)) / CACHE_LINE_SIZE;
- +
- + /* align on set boundary */
- + paddr &= 0xfffffff0;
- +
- + asm volatile("nop\n"
- + "move%.l %2,%%d0\n"
- + "or%.l %3,%%d0\n"
- + "movec %%d0,%%cacr\n"
- + "move%.l %0,%%a0\n"
- + "move%.l %1,%%d0\n"
- + "1:\n"
- + "cpushl %%bc,(%%a0)\n"
- + "lea 0x10(%%a0),%%a0\n"
- + "subq%.l #1,%%d0\n"
- + "bne.b 1b\n"
- + "movec %2,%%cacr\n"
- + : : "a" (paddr), "r" (len),
- + "r" (shadow_cacr),
- + "i" (CF_CACR_SPA)
- + : "a0", "d0");
- +}
- +
- +/**
- + * cf_cache_flush_range - Push dirty data/inst cache in range out and invalidate
- + * @vstart - starting virtual address
- + * @vend: ending virtual address
- + *
- + * Push the any dirty data/instr lines starting at paddr for len bytes and
- + * invalidate those lines.
- + */
- +static inline void cf_cache_flush_range(unsigned long vstart, unsigned long vend)
- +{
- + int len;
- +
- + /* align on set boundary */
- + vstart &= 0xfffffff0;
- + vend = PAGE_ALIGN((vend + (CACHE_LINE_SIZE-1))) & 0xfffffff0;
- + len = vend - vstart;
- + vstart = __pa(vstart);
- + vend = vstart + len;
- +
- + asm volatile("nop\n"
- + "move%.l %2,%%d0\n"
- + "or%.l %3,%%d0\n"
- + "movec %%d0,%%cacr\n"
- + "move%.l %0,%%a0\n"
- + "move%.l %1,%%a1\n"
- + "1:\n"
- + "cpushl %%bc,(%%a0)\n"
- + "lea 0x10(%%a0),%%a0\n"
- + "cmpa%.l %%a0,%%a1\n"
- + "bne.b 1b\n"
- + "movec %2,%%cacr\n"
- + : /* no return */
- + : "a" (vstart), "a" (vend),
- + "r" (shadow_cacr),
- + "i" (CF_CACR_SPA)
- + : "a0", "a1", "d0");
- +}
- +
- +/**
- + * cf_dcache_flush_range - Push dirty data cache in range out and invalidate
- + * @vstart - starting virtual address
- + * @vend: ending virtual address
- + *
- + * Push the any dirty data lines starting at paddr for len bytes and
- + * invalidate those lines.
- + */
- +static inline void cf_dcache_flush_range(unsigned long vstart, unsigned long vend)
- +{
- + /* align on set boundary */
- + vstart &= 0xfffffff0;
- + vend = (vend + (CACHE_LINE_SIZE-1)) & 0xfffffff0;
- +
- + asm volatile("nop\n"
- + "move%.l %2,%%d0\n"
- + "or%.l %3,%%d0\n"
- + "movec %%d0,%%cacr\n"
- + "move%.l %0,%%a0\n"
- + "move%.l %1,%%a1\n"
- + "1:\n"
- + "cpushl %%dc,(%%a0)\n"
- + "lea 0x10(%%a0),%%a0\n"
- + "cmpa%.l %%a0,%%a1\n"
- + "bne.b 1b\n"
- + "movec %2,%%cacr\n"
- + : /* no return */
- + : "a" (__pa(vstart)), "a" (__pa(vend)),
- + "r" (shadow_cacr),
- + "i" (CF_CACR_SPA)
- + : "a0", "a1", "d0");
- +}
- +
- +/**
- + * cf_icache_flush_range - Push dirty inst cache in range out and invalidate
- + * @vstart - starting virtual address
- + * @vend: ending virtual address
- + *
- + * Push the any dirty instr lines starting at paddr for len bytes and
- + * invalidate those lines. This should just be an invalidate since you
- + * shouldn't be able to have dirty instruction cache.
- */
- -extern void cache_push_v(unsigned long vaddr, int len);
- +static inline void cf_icache_flush_range(unsigned long vstart, unsigned long vend)
- +{
- + /* align on set boundary */
- + vstart &= 0xfffffff0;
- + vend = (vend + (CACHE_LINE_SIZE-1)) & 0xfffffff0;
- +
- + asm volatile("nop\n"
- + "move%.l %2,%%d0\n"
- + "or%.l %3,%%d0\n"
- + "movec %%d0,%%cacr\n"
- + "move%.l %0,%%a0\n"
- + "move%.l %1,%%a1\n"
- + "1:\n"
- + "cpushl %%ic,(%%a0)\n"
- + "lea 0x10(%%a0),%%a0\n"
- + "cmpa%.l %%a0,%%a1\n"
- + "bne.b 1b\n"
- + "movec %2,%%cacr\n"
- + : /* no return */
- + : "a" (__pa(vstart)), "a" (__pa(vend)),
- + "r" (shadow_cacr),
- + "i" (CF_CACR_SPA)
- + : "a0", "a1", "d0");
- +}
-
- -/* This is needed whenever the virtual mapping of the current
- - process changes. */
- +/**
- + * flush_cache_mm - Flush an mm_struct
- + * @mm: mm_struct to flush
- + */
- +static inline void flush_cache_mm(struct mm_struct *mm)
- +{
- + if (mm == current->mm)
- + flush_bcache();
- +}
-
- +#define flush_cache_dup_mm(mm) flush_cache_mm(mm)
-
- -#define flush_cache_all() do { } while (0)
- -#define flush_cache_mm(mm) do { } while (0)
- -#define flush_cache_range(mm, a, b) do { } while (0)
- -#define flush_cache_page(vma, vmaddr, pfn) do { } while (0)
- -
- -#define flush_dcache_range(paddr, len) do { } while (0)
- -
- -/* Push the page at kernel virtual address and clear the icache */
- -/* use cpush %bc instead of cpush %dc, cinv %ic */
- -#define flush_page_to_ram(page) __flush_page_to_ram((void *) page_address(page))
- -extern inline void __flush_page_to_ram(void *address)
- -{
- - unsigned long set;
- - unsigned long start_set;
- - unsigned long end_set;
- - unsigned long addr = (unsigned long) address;
- -
- - addr &= ~(PAGE_SIZE - 1); /* round down to page start address */
- -
- - start_set = addr & _ICACHE_SET_MASK;
- - end_set = (addr + PAGE_SIZE-1) & _ICACHE_SET_MASK;
- -
- - if (start_set > end_set) {
- - /* from the begining to the lowest address */
- - for (set = 0; set <= end_set; set += (0x10 - 3))
- - asm volatile("cpushl %%bc,(%0)\n"
- - "\taddq%.l #1,%0\n"
- - "\tcpushl %%bc,(%0)\n"
- - "\taddq%.l #1,%0\n"
- - "\tcpushl %%bc,(%0)\n"
- - "\taddq%.l #1,%0\n"
- - "\tcpushl %%bc,(%0)" : : "a" (set));
- -
- - /* next loop will finish the cache ie pass the hole */
- - end_set = LAST_ICACHE_ADDR;
- - }
- - for (set = start_set; set <= end_set; set += (0x10 - 3))
- - asm volatile("cpushl %%bc,(%0)\n"
- - "\taddq%.l #1,%0\n"
- - "\tcpushl %%bc,(%0)\n"
- - "\taddq%.l #1,%0\n"
- - "\tcpushl %%bc,(%0)\n"
- - "\taddq%.l #1,%0\n"
- - "\tcpushl %%bc,(%0)" : : "a" (set));
- -}
- -
- -#define flush_dcache_page(page) do { } while (0)
- -#define flush_icache_page(vma, pg) do { } while (0)
- -#define flush_icache_user_range(adr, len) do { } while (0)
- -/* NL */
- -#define flush_icache_user_page(vma, page, addr, len) do { } while (0)
- -
- -/* Push n pages at kernel virtual address and clear the icache */
- -/* use cpush %bc instead of cpush %dc, cinv %ic */
- -extern inline void flush_icache_range(unsigned long address,
- - unsigned long endaddr)
- -{
- - unsigned long set;
- - unsigned long start_set;
- - unsigned long end_set;
- -
- - start_set = address & _ICACHE_SET_MASK;
- - end_set = endaddr & _ICACHE_SET_MASK;
- -
- - if (start_set > end_set) {
- - /* from the begining to the lowest address */
- - for (set = 0; set <= end_set; set += (0x10 - 3))
- - asm volatile("cpushl %%ic,(%0)\n"
- - "\taddq%.l #1,%0\n"
- - "\tcpushl %%ic,(%0)\n"
- - "\taddq%.l #1,%0\n"
- - "\tcpushl %%ic,(%0)\n"
- - "\taddq%.l #1,%0\n"
- - "\tcpushl %%ic,(%0)" : : "a" (set));
- -
- - /* next loop will finish the cache ie pass the hole */
- - end_set = LAST_ICACHE_ADDR;
- - }
- - for (set = start_set; set <= end_set; set += (0x10 - 3))
- - asm volatile("cpushl %%ic,(%0)\n"
- - "\taddq%.l #1,%0\n"
- - "\tcpushl %%ic,(%0)\n"
- - "\taddq%.l #1,%0\n"
- - "\tcpushl %%ic,(%0)\n"
- - "\taddq%.l #1,%0\n"
- - "\tcpushl %%ic,(%0)" : : "a" (set));
- +/**
- + * flush_cache_range - Flush a cache range
- + * @vma: vma struct
- + * @start: Starting address
- + * @end: Ending address
- + *
- + * flush_cache_range must be a macro to avoid a dependency on
- + * linux/mm.h which includes this file.
- + */
- +static inline void flush_cache_range(struct vm_area_struct *vma,
- + unsigned long start, unsigned long end)
- +{
- + if (vma->vm_mm == current->mm)
- + cf_cache_flush_range(start, end);
- }
-
- +/**
- + * flush_cache_page - Flush a page of the cache
- + * @vma: vma struct
- + * @vmaddr:
- + * @pfn: page numer
- + *
- + * flush_cache_page must be a macro to avoid a dependency on
- + * linux/mm.h which includes this file.
- + */
- +static inline void flush_cache_page(struct vm_area_struct *vma,
- + unsigned long vmaddr, unsigned long pfn)
- +{
- + if (vma->vm_mm == current->mm)
- + cf_cache_flush_range(vmaddr, vmaddr+PAGE_SIZE);
- +}
- +
- +/**
- + * __flush_page_to_ram - Push a page out of the cache
- + * @vaddr: Virtual address at start of page
- + *
- + * Push the page at kernel virtual address *vaddr* and clear
- + * the icache.
- + */
- +static inline void __flush_page_to_ram(void *vaddr)
- +{
- + asm volatile("nop\n"
- + "move%.l %2,%%d0\n"
- + "or%.l %3,%%d0\n"
- + "movec %%d0,%%cacr\n"
- + "move%.l %0,%%d0\n"
- + "and%.l #0xfffffff0,%%d0\n"
- + "move%.l %%d0,%%a0\n"
- + "move%.l %1,%%d0\n"
- + "1:\n"
- + "cpushl %%bc,(%%a0)\n"
- + "lea 0x10(%%a0),%%a0\n"
- + "subq%.l #1,%%d0\n"
- + "bne.b 1b\n"
- + "movec %2,%%cacr\n"
- + : : "a" (__pa(vaddr)), "i" (PAGE_SIZE / CACHE_LINE_SIZE),
- + "r" (shadow_cacr), "i" (CF_CACR_SPA)
- + : "a0", "d0");
- +}
- +
- +/*
- + * Various defines for the kernel.
- + */
- +
- +extern void cache_clear(unsigned long paddr, int len);
- +extern void cache_push(unsigned long paddr, int len);
- +extern void flush_icache_range(unsigned long address, unsigned long endaddr);
- +
- +#define flush_cache_all() flush_bcache()
- +#define flush_cache_vmap(start, end) flush_bcache()
- +#define flush_cache_vunmap(start, end) flush_bcache()
- +
- +#define flush_dcache_range(vstart, vend) cf_dcache_flush_range(vstart, vend)
- +#define flush_dcache_page(page) __flush_page_to_ram(page_address(page))
- +#define flush_dcache_mmap_lock(mapping) do { } while (0)
- +#define flush_dcache_mmap_unlock(mapping) do { } while (0)
- +
- +#define flush_icache_page(vma, page) __flush_page_to_ram(page_address(page))
- +
- +/**
- + * copy_to_user_page - Copy memory to user page
- + */
- static inline void copy_to_user_page(struct vm_area_struct *vma,
- struct page *page, unsigned long vaddr,
- void *dst, void *src, int len)
- {
- memcpy(dst, src, len);
- - flush_icache_user_page(vma, page, vaddr, len);
- + cf_cache_flush(page_to_phys(page), PAGE_SIZE);
- }
- +
- +/**
- + * copy_from_user_page - Copy memory from user page
- + */
- static inline void copy_from_user_page(struct vm_area_struct *vma,
- struct page *page, unsigned long vaddr,
- void *dst, void *src, int len)
- {
- + cf_cache_flush(page_to_phys(page), PAGE_SIZE);
- memcpy(dst, src, len);
- }
-
- -#define flush_cache_dup_mm(mm) flush_cache_mm(mm)
- -#define flush_cache_vmap(start, end) flush_cache_all()
- -#define flush_cache_vunmap(start, end) flush_cache_all()
- -#define flush_dcache_mmap_lock(mapping) do { } while (0)
- -#define flush_dcache_mmap_unlock(mapping) do { } while (0)
- -
- #endif /* M68K_CF_CACHEFLUSH_H */
- --- a/include/asm-m68k/cfcache.h
- +++ b/include/asm-m68k/cfcache.h
- @@ -1,19 +1,32 @@
- /*
- - * include/asm-m68k/cfcache.h
- + * include/asm-m68k/cfcache.h - Coldfire Cache Controller
- + *
- + * Kurt Mahan [email protected]
- + *
- + * Copyright Freescale Semiconductor, Inc. 2007
- + *
- + * This program is free software; you can redistribute it and/or modify it
- + * under the terms of the GNU General Public License as published by the
- + * Free Software Foundation; either version 2 of the License, or (at your
- + * option) any later version.
- */
- #ifndef CF_CFCACHE_H
- #define CF_CFCACHE_H
-
- +/*
- + * CACR Cache Control Register
- + */
- #define CF_CACR_DEC (0x80000000) /* Data Cache Enable */
- #define CF_CACR_DW (0x40000000) /* Data default Write-protect */
- #define CF_CACR_DESB (0x20000000) /* Data Enable Store Buffer */
- -#define CF_CACR_DDPI (0x10000000) /* Data Disable CPUSHL Invalidate */
- +#define CF_CACR_DPI (0x10000000) /* Data Disable CPUSHL Invalidate */
- #define CF_CACR_DHLCK (0x08000000) /* 1/2 Data Cache Lock Mode */
- #define CF_CACR_DDCM_00 (0x00000000) /* Cacheable writethrough imprecise */
- #define CF_CACR_DDCM_01 (0x02000000) /* Cacheable copyback */
- #define CF_CACR_DDCM_10 (0x04000000) /* Noncacheable precise */
- #define CF_CACR_DDCM_11 (0x06000000) /* Noncacheable imprecise */
- #define CF_CACR_DCINVA (0x01000000) /* Data Cache Invalidate All */
- +#define CF_CACR_DDSP (0x00800000) /* Data default supervisor-protect */
- #define CF_CACR_IVO (0x00100000) /* Invalidate only */
- #define CF_CACR_BEC (0x00080000) /* Branch Cache Enable */
- #define CF_CACR_BCINVA (0x00040000) /* Branch Cache Invalidate All */
- @@ -24,61 +37,43 @@
- #define CF_CACR_IHLCK (0x00000800) /* 1/2 Instruction Cache Lock Mode */
- #define CF_CACR_IDCM (0x00000400) /* Noncacheable Instr default mode */
- #define CF_CACR_ICINVA (0x00000100) /* Instr Cache Invalidate All */
- +#define CF_CACR_IDSP (0x00000080) /* Ins default supervisor-protect */
- #define CF_CACR_EUSP (0x00000020) /* Switch stacks in user mode */
-
- -#define DCACHE_LINE_SIZE 0x0010 /* bytes per line */
- -#define DCACHE_WAY_SIZE 0x2000 /* words per cache block */
- -#define CACHE_DISABLE_MODE (CF_CACR_DCINVA+CF_CACR_BCINVA+CF_CACR_ICINVA)
- -#ifdef CONFIG_M5445X_DISABLE_CACHE
- -/* disable cache for testing rev0 silicon */
- -#define CACHE_INITIAL_MODE (CF_CACR_EUSP)
- -#else
- -#define CACHE_INITIAL_MODE (CF_CACR_DEC+CF_CACR_BEC+CF_CACR_IEC+CF_CACR_EUSP)
- -#endif
- -
- -#define _DCACHE_SIZE (2*16384)
- -#define _ICACHE_SIZE (2*16384)
- -
- -#define _SET_SHIFT 4
- -
- +#ifdef CONFIG_M54455
- /*
- - * Masks for cache sizes. Programming note: because the set size is a
- - * power of two, the mask is also the last address in the set.
- - * This may need to be #ifdef for other Coldfire processors.
- + * M5445x Cache Configuration
- + * - cache line size is 16 bytes
- + * - cache is 4-way set associative
- + * - each cache has 256 sets (64k / 16bytes / 4way)
- + * - I-Cache size is 16KB
- + * - D-Cache size is 16KB
- */
- +#define ICACHE_SIZE 0x4000 /* instruction - 16k */
- +#define DCACHE_SIZE 0x4000 /* data - 16k */
-
- -#define _DCACHE_SET_MASK ((_DCACHE_SIZE/64-1)<<_SET_SHIFT)
- -#define _ICACHE_SET_MASK ((_ICACHE_SIZE/64-1)<<_SET_SHIFT)
- -#define LAST_DCACHE_ADDR _DCACHE_SET_MASK
- -#define LAST_ICACHE_ADDR _ICACHE_SET_MASK
- -
- +#define CACHE_LINE_SIZE 0x0010 /* 16 bytes */
- +#define CACHE_SETS 0x0100 /* 256 sets */
- +#define CACHE_WAYS 0x0004 /* 4 way */
- +
- +#define CACHE_DISABLE_MODE (CF_CACR_DCINVA+ \
- + CF_CACR_BCINVA+ \
- + CF_CACR_ICINVA)
- +
- +#ifndef CONFIG_M5445X_DISABLE_CACHE
- +#define CACHE_INITIAL_MODE (CF_CACR_DEC+ \
- + CF_CACR_BEC+ \
- + CF_CACR_IEC+ \
- + CF_CACR_EUSP)
- +#else
- +/* cache disabled for testing */
- +#define CACHE_INITIAL_MODE (CF_CACR_EUSP)
- +#endif /* CONFIG_M5445X_DISABLE_CACHE */
- +#endif /* CONFIG_M54455 */
-
- #ifndef __ASSEMBLY__
-
- -extern void DcacheFlushInvalidate(void);
- -
- -extern void DcacheDisable(void);
- -extern void DcacheEnable(void);
- -
- -/******************************************************************************/
- -/*** Unimplemented Cache functionality ***/
- -/******************************************************************************/
- -#define preDcacheInvalidateBlockMark()
- -#define postDcacheInvalidateBlockMark()
- -#define DcacheZeroBlock(p, l) fast_bzero((char *)(p), (long)(l))
- -#define loadDcacheInvalidateBlock() ASSERT(!"Not Implemented on V4e")
- -#define IcacheInvalidateBlock() ASSERT(!"Not Implemented on V4e")
- -
- -/******************************************************************************/
- -/*** Redundant Cache functionality on ColdFire ***/
- -/******************************************************************************/
- -#define DcacheInvalidateBlock(p, l) DcacheFlushInvalidateCacheBlock(p, l)
- -#define DcacheFlushCacheBlock(p, l) DcacheFlushInvalidateCacheBlock(p, l)
- -#define DcacheFlushBlock(p, l) DcacheFlushInvalidateCacheBlock(p, l)
- -
- -extern void DcacheFlushInvalidateCacheBlock(void *start, unsigned long size);
- -extern void FLASHDcacheFlushInvalidate(void);
- -
- +extern unsigned long shadow_cacr;
- extern void cacr_set(unsigned long x);
-
- #endif /* !__ASSEMBLY__ */
|