| 123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979 |
- From 940b4fea5ebfde3abe03c6469a57c01ee961497a Mon Sep 17 00:00:00 2001
- From: Kurt Mahan <[email protected]>
- Date: Wed, 18 Jun 2008 15:20:21 -0600
- Subject: [PATCH] Split 547x/548x and 5445x cache routines into separate files.
- LTIBName: mcfv4e-cache-split
- Signed-off-by: Kurt Mahan <[email protected]>
- ---
- include/asm-m68k/cf_5445x_cacheflush.h | 447 ++++++++++++++++++++++++++++++++
- include/asm-m68k/cf_548x_cacheflush.h | 259 ++++++++++++++++++
- include/asm-m68k/cf_cacheflush.h | 244 +-----------------
- 3 files changed, 711 insertions(+), 239 deletions(-)
- create mode 100644 include/asm-m68k/cf_5445x_cacheflush.h
- create mode 100644 include/asm-m68k/cf_548x_cacheflush.h
- --- /dev/null
- +++ b/include/asm-m68k/cf_5445x_cacheflush.h
- @@ -0,0 +1,447 @@
- +/*
- + * include/asm-m68k/cf_5445x_cacheflush.h - Coldfire 5445x Cache
- + *
- + * Based on include/asm-m68k/cacheflush.h
- + *
- + * Coldfire pieces by:
- + * Kurt Mahan [email protected]
- + *
- + * Copyright Freescale Semiconductor, Inc. 2007, 2008
- + *
- + * This program is free software; you can redistribute it and/or modify it
- + * under the terms of the GNU General Public License as published by the
- + * Free Software Foundation; either version 2 of the License, or (at your
- + * option) any later version.
- + */
- +#ifndef M68K_CF_5445x_CACHEFLUSH_H
- +#define M68K_CF_5445x_CACHEFLUSH_H
- +
- +#include <asm/cfcache.h>
- +
- +/*
- + * Coldfire Cache Model
- + *
- + * The Coldfire processors use a Harvard architecture cache configured
- + * as four-way set associative. The cache does not implement bus snooping
- + * so cache coherency with other masters must be maintained in software.
- + *
- + * The cache is managed via the CPUSHL instruction in conjunction with
- + * bits set in the CACR (cache control register). Currently the code
- + * uses the CPUSHL enhancement which adds the ability to
- + * invalidate/clear/push a cacheline by physical address. This feature
- + * is designated in the Hardware Configuration Register [D1-CPES].
- + *
- + * CACR Bits:
- + * DPI[28] cpushl invalidate disable for d-cache
- + * IDPI[12] cpushl invalidate disable for i-cache
- + * SPA[14] cpushl search by physical address
- + * IVO[20] cpushl invalidate only
- + *
- + * Random Terminology:
- + * * invalidate = reset the cache line's valid bit
- + * * push = generate a line-sized store of the data if its contents are marked
- + * as modifed (the modified flag is cleared after the store)
- + * * clear = push + invalidate
- + */
- +
- +/**
- + * flush_icache - Flush all of the instruction cache
- + */
- +static inline void flush_icache(void)
- +{
- + asm volatile("nop\n"
- + "moveq%.l #0,%%d0\n"
- + "moveq%.l #0,%%d1\n"
- + "move%.l %%d0,%%a0\n"
- + "1:\n"
- + "cpushl %%ic,(%%a0)\n"
- + "add%.l #0x0010,%%a0\n"
- + "addq%.l #1,%%d1\n"
- + "cmpi%.l %0,%%d1\n"
- + "bne 1b\n"
- + "moveq%.l #0,%%d1\n"
- + "addq%.l #1,%%d0\n"
- + "move%.l %%d0,%%a0\n"
- + "cmpi%.l #4,%%d0\n"
- + "bne 1b\n"
- + : : "i" (CACHE_SETS)
- + : "a0", "d0", "d1");
- +}
- +
- +/**
- + * flush_dcache - Flush all of the data cache
- + */
- +static inline void flush_dcache(void)
- +{
- + asm volatile("nop\n"
- + "moveq%.l #0,%%d0\n"
- + "moveq%.l #0,%%d1\n"
- + "move%.l %%d0,%%a0\n"
- + "1:\n"
- + "cpushl %%dc,(%%a0)\n"
- + "add%.l #0x0010,%%a0\n"
- + "addq%.l #1,%%d1\n"
- + "cmpi%.l %0,%%d1\n"
- + "bne 1b\n"
- + "moveq%.l #0,%%d1\n"
- + "addq%.l #1,%%d0\n"
- + "move%.l %%d0,%%a0\n"
- + "cmpi%.l #4,%%d0\n"
- + "bne 1b\n"
- + : : "i" (CACHE_SETS)
- + : "a0", "d0", "d1");
- +}
- +
- +/**
- + * flush_bcache - Flush all of both caches
- + */
- +static inline void flush_bcache(void)
- +{
- + asm volatile("nop\n"
- + "moveq%.l #0,%%d0\n"
- + "moveq%.l #0,%%d1\n"
- + "move%.l %%d0,%%a0\n"
- + "1:\n"
- + "cpushl %%bc,(%%a0)\n"
- + "add%.l #0x0010,%%a0\n"
- + "addq%.l #1,%%d1\n"
- + "cmpi%.l %0,%%d1\n"
- + "bne 1b\n"
- + "moveq%.l #0,%%d1\n"
- + "addq%.l #1,%%d0\n"
- + "move%.l %%d0,%%a0\n"
- + "cmpi%.l #4,%%d0\n"
- + "bne 1b\n"
- + : : "i" (CACHE_SETS)
- + : "a0", "d0", "d1");
- +}
- +
- +/**
- + * cf_cache_clear - invalidate cache
- + * @paddr: starting physical address
- + * @len: number of bytes
- + *
- + * Invalidate cache lines starting at paddr for len bytes.
- + * Those lines are not pushed.
- + */
- +static inline void cf_cache_clear(unsigned long paddr, int len)
- +{
- + /* number of lines */
- + len = (len + (CACHE_LINE_SIZE-1)) / CACHE_LINE_SIZE;
- + if (len == 0)
- + return;
- +
- + /* align on set boundary */
- + paddr &= 0xfffffff0;
- +
- + asm volatile("nop\n"
- + "move%.l %2,%%d0\n"
- + "or%.l %3,%%d0\n"
- + "movec %%d0,%%cacr\n"
- + "move%.l %0,%%a0\n"
- + "move%.l %1,%%d0\n"
- + "1:\n"
- + "cpushl %%bc,(%%a0)\n"
- + "lea 0x10(%%a0),%%a0\n"
- + "subq%.l #1,%%d0\n"
- + "bne%.b 1b\n"
- + "movec %2,%%cacr\n"
- + : : "a" (paddr), "r" (len),
- + "r" (shadow_cacr),
- + "i" (CF_CACR_SPA+CF_CACR_IVO)
- + : "a0", "d0");
- +}
- +
- +/**
- + * cf_cache_push - Push dirty cache out with no invalidate
- + * @paddr: starting physical address
- + * @len: number of bytes
- + *
- + * Push the any dirty lines starting at paddr for len bytes.
- + * Those lines are not invalidated.
- + */
- +static inline void cf_cache_push(unsigned long paddr, int len)
- +{
- + /* number of lines */
- + len = (len + (CACHE_LINE_SIZE-1)) / CACHE_LINE_SIZE;
- + if (len == 0)
- + return;
- +
- + /* align on set boundary */
- + paddr &= 0xfffffff0;
- +
- + asm volatile("nop\n"
- + "move%.l %2,%%d0\n"
- + "or%.l %3,%%d0\n"
- + "movec %%d0,%%cacr\n"
- + "move%.l %0,%%a0\n"
- + "move%.l %1,%%d0\n"
- + "1:\n"
- + "cpushl %%bc,(%%a0)\n"
- + "lea 0x10(%%a0),%%a0\n"
- + "subq%.l #1,%%d0\n"
- + "bne.b 1b\n"
- + "movec %2,%%cacr\n"
- + : : "a" (paddr), "r" (len),
- + "r" (shadow_cacr),
- + "i" (CF_CACR_SPA+CF_CACR_DPI+CF_CACR_IDPI)
- + : "a0", "d0");
- +}
- +
- +/**
- + * cf_cache_flush - Push dirty cache out and invalidate
- + * @paddr: starting physical address
- + * @len: number of bytes
- + *
- + * Push the any dirty lines starting at paddr for len bytes and
- + * invalidate those lines.
- + */
- +static inline void cf_cache_flush(unsigned long paddr, int len)
- +{
- + /* number of lines */
- + len = (len + (CACHE_LINE_SIZE-1)) / CACHE_LINE_SIZE;
- + if (len == 0)
- + return;
- +
- + /* align on set boundary */
- + paddr &= 0xfffffff0;
- +
- + asm volatile("nop\n"
- + "move%.l %2,%%d0\n"
- + "or%.l %3,%%d0\n"
- + "movec %%d0,%%cacr\n"
- + "move%.l %0,%%a0\n"
- + "move%.l %1,%%d0\n"
- + "1:\n"
- + "cpushl %%bc,(%%a0)\n"
- + "lea 0x10(%%a0),%%a0\n"
- + "subq%.l #1,%%d0\n"
- + "bne.b 1b\n"
- + "movec %2,%%cacr\n"
- + : : "a" (paddr), "r" (len),
- + "r" (shadow_cacr),
- + "i" (CF_CACR_SPA)
- + : "a0", "d0");
- +}
- +
- +/**
- + * cf_cache_flush_range - Push dirty data/inst cache in range out and invalidate
- + * @vstart - starting virtual address
- + * @vend: ending virtual address
- + *
- + * Push the any dirty data/instr lines starting at paddr for len bytes and
- + * invalidate those lines.
- + */
- +static inline void cf_cache_flush_range(unsigned long vstart, unsigned long vend)
- +{
- + int len;
- +
- + /* align on set boundary */
- + vstart &= 0xfffffff0;
- + vend = PAGE_ALIGN((vend + (CACHE_LINE_SIZE-1))) & 0xfffffff0;
- + len = vend - vstart;
- + if (len == 0)
- + return;
- + vstart = __pa(vstart);
- + vend = vstart + len;
- +
- + asm volatile("nop\n"
- + "move%.l %2,%%d0\n"
- + "or%.l %3,%%d0\n"
- + "movec %%d0,%%cacr\n"
- + "move%.l %0,%%a0\n"
- + "move%.l %1,%%a1\n"
- + "1:\n"
- + "cpushl %%bc,(%%a0)\n"
- + "lea 0x10(%%a0),%%a0\n"
- + "cmpa%.l %%a0,%%a1\n"
- + "bne.b 1b\n"
- + "movec %2,%%cacr\n"
- + : /* no return */
- + : "a" (vstart), "a" (vend),
- + "r" (shadow_cacr),
- + "i" (CF_CACR_SPA)
- + : "a0", "a1", "d0");
- +}
- +
- +/**
- + * cf_dcache_flush_range - Push dirty data cache in range out and invalidate
- + * @vstart - starting virtual address
- + * @vend: ending virtual address
- + *
- + * Push the any dirty data lines starting at paddr for len bytes and
- + * invalidate those lines.
- + */
- +static inline void cf_dcache_flush_range(unsigned long vstart, unsigned long vend)
- +{
- + /* align on set boundary */
- + vstart &= 0xfffffff0;
- + vend = (vend + (CACHE_LINE_SIZE-1)) & 0xfffffff0;
- +
- + asm volatile("nop\n"
- + "move%.l %2,%%d0\n"
- + "or%.l %3,%%d0\n"
- + "movec %%d0,%%cacr\n"
- + "move%.l %0,%%a0\n"
- + "move%.l %1,%%a1\n"
- + "1:\n"
- + "cpushl %%dc,(%%a0)\n"
- + "lea 0x10(%%a0),%%a0\n"
- + "cmpa%.l %%a0,%%a1\n"
- + "bne.b 1b\n"
- + "movec %2,%%cacr\n"
- + : /* no return */
- + : "a" (__pa(vstart)), "a" (__pa(vend)),
- + "r" (shadow_cacr),
- + "i" (CF_CACR_SPA)
- + : "a0", "a1", "d0");
- +}
- +
- +/**
- + * cf_icache_flush_range - Push dirty inst cache in range out and invalidate
- + * @vstart - starting virtual address
- + * @vend: ending virtual address
- + *
- + * Push the any dirty instr lines starting at paddr for len bytes and
- + * invalidate those lines. This should just be an invalidate since you
- + * shouldn't be able to have dirty instruction cache.
- + */
- +static inline void cf_icache_flush_range(unsigned long vstart, unsigned long vend)
- +{
- + /* align on set boundary */
- + vstart &= 0xfffffff0;
- + vend = (vend + (CACHE_LINE_SIZE-1)) & 0xfffffff0;
- +
- + asm volatile("nop\n"
- + "move%.l %2,%%d0\n"
- + "or%.l %3,%%d0\n"
- + "movec %%d0,%%cacr\n"
- + "move%.l %0,%%a0\n"
- + "move%.l %1,%%a1\n"
- + "1:\n"
- + "cpushl %%ic,(%%a0)\n"
- + "lea 0x10(%%a0),%%a0\n"
- + "cmpa%.l %%a0,%%a1\n"
- + "bne.b 1b\n"
- + "movec %2,%%cacr\n"
- + : /* no return */
- + : "a" (__pa(vstart)), "a" (__pa(vend)),
- + "r" (shadow_cacr),
- + "i" (CF_CACR_SPA)
- + : "a0", "a1", "d0");
- +}
- +
- +/**
- + * flush_cache_mm - Flush an mm_struct
- + * @mm: mm_struct to flush
- + */
- +static inline void flush_cache_mm(struct mm_struct *mm)
- +{
- + if (mm == current->mm)
- + flush_bcache();
- +}
- +
- +#define flush_cache_dup_mm(mm) flush_cache_mm(mm)
- +
- +/**
- + * flush_cache_range - Flush a cache range
- + * @vma: vma struct
- + * @start: Starting address
- + * @end: Ending address
- + *
- + * flush_cache_range must be a macro to avoid a dependency on
- + * linux/mm.h which includes this file.
- + */
- +static inline void flush_cache_range(struct vm_area_struct *vma,
- + unsigned long start, unsigned long end)
- +{
- + if (vma->vm_mm == current->mm)
- + cf_cache_flush_range(start, end);
- +}
- +
- +/**
- + * flush_cache_page - Flush a page of the cache
- + * @vma: vma struct
- + * @vmaddr:
- + * @pfn: page numer
- + *
- + * flush_cache_page must be a macro to avoid a dependency on
- + * linux/mm.h which includes this file.
- + */
- +static inline void flush_cache_page(struct vm_area_struct *vma,
- + unsigned long vmaddr, unsigned long pfn)
- +{
- + if (vma->vm_mm == current->mm)
- + cf_cache_flush_range(vmaddr, vmaddr+PAGE_SIZE);
- +}
- +
- +/**
- + * __flush_page_to_ram - Push a page out of the cache
- + * @vaddr: Virtual address at start of page
- + *
- + * Push the page at kernel virtual address *vaddr* and clear
- + * the icache.
- + */
- +static inline void __flush_page_to_ram(void *vaddr)
- +{
- + asm volatile("nop\n"
- + "move%.l %2,%%d0\n"
- + "or%.l %3,%%d0\n"
- + "movec %%d0,%%cacr\n"
- + "move%.l %0,%%d0\n"
- + "and%.l #0xfffffff0,%%d0\n"
- + "move%.l %%d0,%%a0\n"
- + "move%.l %1,%%d0\n"
- + "1:\n"
- + "cpushl %%bc,(%%a0)\n"
- + "lea 0x10(%%a0),%%a0\n"
- + "subq%.l #1,%%d0\n"
- + "bne.b 1b\n"
- + "movec %2,%%cacr\n"
- + : : "a" (__pa(vaddr)), "i" (PAGE_SIZE / CACHE_LINE_SIZE),
- + "r" (shadow_cacr), "i" (CF_CACR_SPA)
- + : "a0", "d0");
- +}
- +
- +/*
- + * Various defines for the kernel.
- + */
- +
- +extern void cache_clear(unsigned long paddr, int len);
- +extern void cache_push(unsigned long paddr, int len);
- +extern void flush_icache_range(unsigned long address, unsigned long endaddr);
- +
- +#define flush_cache_all() flush_bcache()
- +#define flush_cache_vmap(start, end) flush_bcache()
- +#define flush_cache_vunmap(start, end) flush_bcache()
- +
- +#define flush_dcache_range(vstart, vend) cf_dcache_flush_range(vstart, vend)
- +#define flush_dcache_page(page) __flush_page_to_ram(page_address(page))
- +#define flush_dcache_mmap_lock(mapping) do { } while (0)
- +#define flush_dcache_mmap_unlock(mapping) do { } while (0)
- +
- +#define flush_icache_page(vma, page) __flush_page_to_ram(page_address(page))
- +
- +/**
- + * copy_to_user_page - Copy memory to user page
- + */
- +static inline void copy_to_user_page(struct vm_area_struct *vma,
- + struct page *page, unsigned long vaddr,
- + void *dst, void *src, int len)
- +{
- + memcpy(dst, src, len);
- + cf_cache_flush(page_to_phys(page), PAGE_SIZE);
- +}
- +
- +/**
- + * copy_from_user_page - Copy memory from user page
- + */
- +static inline void copy_from_user_page(struct vm_area_struct *vma,
- + struct page *page, unsigned long vaddr,
- + void *dst, void *src, int len)
- +{
- + cf_cache_flush(page_to_phys(page), PAGE_SIZE);
- + memcpy(dst, src, len);
- +}
- +
- +#endif /* M68K_CF_5445x_CACHEFLUSH_H */
- --- /dev/null
- +++ b/include/asm-m68k/cf_548x_cacheflush.h
- @@ -0,0 +1,259 @@
- +/*
- + * include/asm-m68k/cf_548x_cacheflush.h - Coldfire 547x/548x Cache
- + *
- + * Based on include/asm-m68k/cacheflush.h
- + *
- + * Coldfire pieces by:
- + * Kurt Mahan [email protected]
- + *
- + * Copyright Freescale Semiconductor, Inc. 2007, 2008
- + *
- + * This program is free software; you can redistribute it and/or modify it
- + * under the terms of the GNU General Public License as published by the
- + * Free Software Foundation; either version 2 of the License, or (at your
- + * option) any later version.
- + */
- +#ifndef M68K_CF_548x_CACHEFLUSH_H
- +#define M68K_CF_548x_CACHEFLUSH_H
- +
- +#include <asm/cfcache.h>
- +/*
- + * Cache handling functions
- + */
- +
- +#define flush_icache() \
- +({ \
- + unsigned long set; \
- + unsigned long start_set; \
- + unsigned long end_set; \
- + \
- + start_set = 0; \
- + end_set = (unsigned long)LAST_DCACHE_ADDR; \
- + \
- + for (set = start_set; set <= end_set; set += (0x10 - 3)) { \
- + asm volatile("cpushl %%ic,(%0)\n" \
- + "\taddq%.l #1,%0\n" \
- + "\tcpushl %%ic,(%0)\n" \
- + "\taddq%.l #1,%0\n" \
- + "\tcpushl %%ic,(%0)\n" \
- + "\taddq%.l #1,%0\n" \
- + "\tcpushl %%ic,(%0)" : "=a" (set) : "a" (set)); \
- + } \
- +})
- +
- +#define flush_dcache() \
- +({ \
- + unsigned long set; \
- + unsigned long start_set; \
- + unsigned long end_set; \
- + \
- + start_set = 0; \
- + end_set = (unsigned long)LAST_DCACHE_ADDR; \
- + \
- + for (set = start_set; set <= end_set; set += (0x10 - 3)) { \
- + asm volatile("cpushl %%dc,(%0)\n" \
- + "\taddq%.l #1,%0\n" \
- + "\tcpushl %%dc,(%0)\n" \
- + "\taddq%.l #1,%0\n" \
- + "\tcpushl %%dc,(%0)\n" \
- + "\taddq%.l #1,%0\n" \
- + "\tcpushl %%dc,(%0)" : "=a" (set) : "a" (set)); \
- + } \
- +})
- +
- +#define flush_bcache() \
- +({ \
- + unsigned long set; \
- + unsigned long start_set; \
- + unsigned long end_set; \
- + \
- + start_set = 0; \
- + end_set = (unsigned long)LAST_DCACHE_ADDR; \
- + \
- + for (set = start_set; set <= end_set; set += (0x10 - 3)) { \
- + asm volatile("cpushl %%bc,(%0)\n" \
- + "\taddq%.l #1,%0\n" \
- + "\tcpushl %%bc,(%0)\n" \
- + "\taddq%.l #1,%0\n" \
- + "\tcpushl %%bc,(%0)\n" \
- + "\taddq%.l #1,%0\n" \
- + "\tcpushl %%bc,(%0)" : "=a" (set) : "a" (set)); \
- + } \
- +})
- +
- +/*
- + * invalidate the cache for the specified memory range.
- + * It starts at the physical address specified for
- + * the given number of bytes.
- + */
- +extern void cache_clear(unsigned long paddr, int len);
- +/*
- + * push any dirty cache in the specified memory range.
- + * It starts at the physical address specified for
- + * the given number of bytes.
- + */
- +extern void cache_push(unsigned long paddr, int len);
- +
- +/*
- + * push and invalidate pages in the specified user virtual
- + * memory range.
- + */
- +extern void cache_push_v(unsigned long vaddr, int len);
- +
- +/* This is needed whenever the virtual mapping of the current
- + process changes. */
- +
- +/**
- + * flush_cache_mm - Flush an mm_struct
- + * @mm: mm_struct to flush
- + */
- +static inline void flush_cache_mm(struct mm_struct *mm)
- +{
- + if (mm == current->mm)
- + flush_bcache();
- +}
- +
- +#define flush_cache_dup_mm(mm) flush_cache_mm(mm)
- +
- +#define flush_cache_all() flush_bcache()
- +
- +/**
- + * flush_cache_range - Flush a cache range
- + * @vma: vma struct
- + * @start: Starting address
- + * @end: Ending address
- + *
- + * flush_cache_range must be a macro to avoid a dependency on
- + * linux/mm.h which includes this file.
- + */
- +static inline void flush_cache_range(struct vm_area_struct *vma,
- + unsigned long start, unsigned long end)
- +{
- + if (vma->vm_mm == current->mm)
- + flush_bcache();
- +// cf_cache_flush_range(start, end);
- +}
- +
- +/**
- + * flush_cache_page - Flush a page of the cache
- + * @vma: vma struct
- + * @vmaddr:
- + * @pfn: page numer
- + *
- + * flush_cache_page must be a macro to avoid a dependency on
- + * linux/mm.h which includes this file.
- + */
- +static inline void flush_cache_page(struct vm_area_struct *vma,
- + unsigned long vmaddr, unsigned long pfn)
- +{
- + if (vma->vm_mm == current->mm)
- + flush_bcache();
- +// cf_cache_flush_range(vmaddr, vmaddr+PAGE_SIZE);
- +}
- +
- +/* Push the page at kernel virtual address and clear the icache */
- +/* RZ: use cpush %bc instead of cpush %dc, cinv %ic */
- +#define flush_page_to_ram(page) __flush_page_to_ram((void *) page_address(page))
- +extern inline void __flush_page_to_ram(void *address)
- +{
- + unsigned long set;
- + unsigned long start_set;
- + unsigned long end_set;
- + unsigned long addr = (unsigned long) address;
- +
- + addr &= ~(PAGE_SIZE - 1); /* round down to page start address */
- +
- + start_set = addr & _ICACHE_SET_MASK;
- + end_set = (addr + PAGE_SIZE-1) & _ICACHE_SET_MASK;
- +
- + if (start_set > end_set) {
- + /* from the begining to the lowest address */
- + for (set = 0; set <= end_set; set += (0x10 - 3)) {
- + asm volatile("cpushl %%bc,(%0)\n"
- + "\taddq%.l #1,%0\n"
- + "\tcpushl %%bc,(%0)\n"
- + "\taddq%.l #1,%0\n"
- + "\tcpushl %%bc,(%0)\n"
- + "\taddq%.l #1,%0\n"
- + "\tcpushl %%bc,(%0)" : "=a" (set) : "a" (set));
- + }
- + /* next loop will finish the cache ie pass the hole */
- + end_set = LAST_ICACHE_ADDR;
- + }
- + for (set = start_set; set <= end_set; set += (0x10 - 3)) {
- + asm volatile("cpushl %%bc,(%0)\n"
- + "\taddq%.l #1,%0\n"
- + "\tcpushl %%bc,(%0)\n"
- + "\taddq%.l #1,%0\n"
- + "\tcpushl %%bc,(%0)\n"
- + "\taddq%.l #1,%0\n"
- + "\tcpushl %%bc,(%0)" : "=a" (set) : "a" (set));
- + }
- +}
- +
- +/* Use __flush_page_to_ram() for flush_dcache_page all values are same - MW */
- +#define flush_dcache_page(page) \
- + __flush_page_to_ram((void *) page_address(page))
- +#define flush_icache_page(vma,pg) \
- + __flush_page_to_ram((void *) page_address(pg))
- +#define flush_icache_user_range(adr,len) do { } while (0)
- +/* NL */
- +#define flush_icache_user_page(vma,page,addr,len) do { } while (0)
- +
- +/* Push n pages at kernel virtual address and clear the icache */
- +/* RZ: use cpush %bc instead of cpush %dc, cinv %ic */
- +extern inline void flush_icache_range (unsigned long address,
- + unsigned long endaddr)
- +{
- + unsigned long set;
- + unsigned long start_set;
- + unsigned long end_set;
- +
- + start_set = address & _ICACHE_SET_MASK;
- + end_set = endaddr & _ICACHE_SET_MASK;
- +
- + if (start_set > end_set) {
- + /* from the begining to the lowest address */
- + for (set = 0; set <= end_set; set += (0x10 - 3)) {
- + asm volatile("cpushl %%ic,(%0)\n"
- + "\taddq%.l #1,%0\n"
- + "\tcpushl %%ic,(%0)\n"
- + "\taddq%.l #1,%0\n"
- + "\tcpushl %%ic,(%0)\n"
- + "\taddq%.l #1,%0\n"
- + "\tcpushl %%ic,(%0)" : "=a" (set) : "a" (set));
- + }
- + /* next loop will finish the cache ie pass the hole */
- + end_set = LAST_ICACHE_ADDR;
- + }
- + for (set = start_set; set <= end_set; set += (0x10 - 3)) {
- + asm volatile("cpushl %%ic,(%0)\n"
- + "\taddq%.l #1,%0\n"
- + "\tcpushl %%ic,(%0)\n"
- + "\taddq%.l #1,%0\n"
- + "\tcpushl %%ic,(%0)\n"
- + "\taddq%.l #1,%0\n"
- + "\tcpushl %%ic,(%0)" : "=a" (set) : "a" (set));
- + }
- +}
- +
- +static inline void copy_to_user_page(struct vm_area_struct *vma,
- + struct page *page, unsigned long vaddr,
- + void *dst, void *src, int len)
- +{
- + memcpy(dst, src, len);
- + flush_icache_user_page(vma, page, vaddr, len);
- +}
- +static inline void copy_from_user_page(struct vm_area_struct *vma,
- + struct page *page, unsigned long vaddr,
- + void *dst, void *src, int len)
- +{
- + memcpy(dst, src, len);
- +}
- +
- +#define flush_cache_vmap(start, end) flush_cache_all()
- +#define flush_cache_vunmap(start, end) flush_cache_all()
- +#define flush_dcache_mmap_lock(mapping) do { } while (0)
- +#define flush_dcache_mmap_unlock(mapping) do { } while (0)
- +
- +#endif /* M68K_CF_548x_CACHEFLUSH_H */
- --- a/include/asm-m68k/cf_cacheflush.h
- +++ b/include/asm-m68k/cf_cacheflush.h
- @@ -1,244 +1,10 @@
- #ifndef M68K_CF_CACHEFLUSH_H
- #define M68K_CF_CACHEFLUSH_H
-
- -#include <asm/cfcache.h>
- -/*
- - * Cache handling functions
- - */
- -
- -#define flush_icache() \
- -({ \
- - unsigned long set; \
- - unsigned long start_set; \
- - unsigned long end_set; \
- - \
- - start_set = 0; \
- - end_set = (unsigned long)LAST_DCACHE_ADDR; \
- - \
- - for (set = start_set; set <= end_set; set += (0x10 - 3)) { \
- - asm volatile("cpushl %%ic,(%0)\n" \
- - "\taddq%.l #1,%0\n" \
- - "\tcpushl %%ic,(%0)\n" \
- - "\taddq%.l #1,%0\n" \
- - "\tcpushl %%ic,(%0)\n" \
- - "\taddq%.l #1,%0\n" \
- - "\tcpushl %%ic,(%0)" : "=a" (set) : "a" (set)); \
- - } \
- -})
- -
- -#define flush_dcache() \
- -({ \
- - unsigned long set; \
- - unsigned long start_set; \
- - unsigned long end_set; \
- - \
- - start_set = 0; \
- - end_set = (unsigned long)LAST_DCACHE_ADDR; \
- - \
- - for (set = start_set; set <= end_set; set += (0x10 - 3)) { \
- - asm volatile("cpushl %%dc,(%0)\n" \
- - "\taddq%.l #1,%0\n" \
- - "\tcpushl %%dc,(%0)\n" \
- - "\taddq%.l #1,%0\n" \
- - "\tcpushl %%dc,(%0)\n" \
- - "\taddq%.l #1,%0\n" \
- - "\tcpushl %%dc,(%0)" : "=a" (set) : "a" (set)); \
- - } \
- -})
- -
- -#define flush_bcache() \
- -({ \
- - unsigned long set; \
- - unsigned long start_set; \
- - unsigned long end_set; \
- - \
- - start_set = 0; \
- - end_set = (unsigned long)LAST_DCACHE_ADDR; \
- - \
- - for (set = start_set; set <= end_set; set += (0x10 - 3)) { \
- - asm volatile("cpushl %%bc,(%0)\n" \
- - "\taddq%.l #1,%0\n" \
- - "\tcpushl %%bc,(%0)\n" \
- - "\taddq%.l #1,%0\n" \
- - "\tcpushl %%bc,(%0)\n" \
- - "\taddq%.l #1,%0\n" \
- - "\tcpushl %%bc,(%0)" : "=a" (set) : "a" (set)); \
- - } \
- -})
- -
- -/*
- - * invalidate the cache for the specified memory range.
- - * It starts at the physical address specified for
- - * the given number of bytes.
- - */
- -extern void cache_clear(unsigned long paddr, int len);
- -/*
- - * push any dirty cache in the specified memory range.
- - * It starts at the physical address specified for
- - * the given number of bytes.
- - */
- -extern void cache_push(unsigned long paddr, int len);
- -
- -/*
- - * push and invalidate pages in the specified user virtual
- - * memory range.
- - */
- -extern void cache_push_v(unsigned long vaddr, int len);
- -
- -/* This is needed whenever the virtual mapping of the current
- - process changes. */
- -
- -/**
- - * flush_cache_mm - Flush an mm_struct
- - * @mm: mm_struct to flush
- - */
- -static inline void flush_cache_mm(struct mm_struct *mm)
- -{
- - if (mm == current->mm)
- - flush_bcache();
- -}
- -
- -#define flush_cache_dup_mm(mm) flush_cache_mm(mm)
- -
- -#define flush_cache_all() flush_bcache()
- -
- -/**
- - * flush_cache_range - Flush a cache range
- - * @vma: vma struct
- - * @start: Starting address
- - * @end: Ending address
- - *
- - * flush_cache_range must be a macro to avoid a dependency on
- - * linux/mm.h which includes this file.
- - */
- -static inline void flush_cache_range(struct vm_area_struct *vma,
- - unsigned long start, unsigned long end)
- -{
- - if (vma->vm_mm == current->mm)
- - flush_bcache();
- -// cf_cache_flush_range(start, end);
- -}
- -
- -/**
- - * flush_cache_page - Flush a page of the cache
- - * @vma: vma struct
- - * @vmaddr:
- - * @pfn: page numer
- - *
- - * flush_cache_page must be a macro to avoid a dependency on
- - * linux/mm.h which includes this file.
- - */
- -static inline void flush_cache_page(struct vm_area_struct *vma,
- - unsigned long vmaddr, unsigned long pfn)
- -{
- - if (vma->vm_mm == current->mm)
- - flush_bcache();
- -// cf_cache_flush_range(vmaddr, vmaddr+PAGE_SIZE);
- -}
- -
- -/* Push the page at kernel virtual address and clear the icache */
- -/* RZ: use cpush %bc instead of cpush %dc, cinv %ic */
- -#define flush_page_to_ram(page) __flush_page_to_ram((void *) page_address(page))
- -extern inline void __flush_page_to_ram(void *address)
- -{
- - unsigned long set;
- - unsigned long start_set;
- - unsigned long end_set;
- - unsigned long addr = (unsigned long) address;
- -
- - addr &= ~(PAGE_SIZE - 1); /* round down to page start address */
- -
- - start_set = addr & _ICACHE_SET_MASK;
- - end_set = (addr + PAGE_SIZE-1) & _ICACHE_SET_MASK;
- -
- - if (start_set > end_set) {
- - /* from the begining to the lowest address */
- - for (set = 0; set <= end_set; set += (0x10 - 3)) {
- - asm volatile("cpushl %%bc,(%0)\n"
- - "\taddq%.l #1,%0\n"
- - "\tcpushl %%bc,(%0)\n"
- - "\taddq%.l #1,%0\n"
- - "\tcpushl %%bc,(%0)\n"
- - "\taddq%.l #1,%0\n"
- - "\tcpushl %%bc,(%0)" : "=a" (set) : "a" (set));
- - }
- - /* next loop will finish the cache ie pass the hole */
- - end_set = LAST_ICACHE_ADDR;
- - }
- - for (set = start_set; set <= end_set; set += (0x10 - 3)) {
- - asm volatile("cpushl %%bc,(%0)\n"
- - "\taddq%.l #1,%0\n"
- - "\tcpushl %%bc,(%0)\n"
- - "\taddq%.l #1,%0\n"
- - "\tcpushl %%bc,(%0)\n"
- - "\taddq%.l #1,%0\n"
- - "\tcpushl %%bc,(%0)" : "=a" (set) : "a" (set));
- - }
- -}
- -
- -/* Use __flush_page_to_ram() for flush_dcache_page all values are same - MW */
- -#define flush_dcache_page(page) \
- - __flush_page_to_ram((void *) page_address(page))
- -#define flush_icache_page(vma,pg) \
- - __flush_page_to_ram((void *) page_address(pg))
- -#define flush_icache_user_range(adr,len) do { } while (0)
- -/* NL */
- -#define flush_icache_user_page(vma,page,addr,len) do { } while (0)
- -
- -/* Push n pages at kernel virtual address and clear the icache */
- -/* RZ: use cpush %bc instead of cpush %dc, cinv %ic */
- -extern inline void flush_icache_range (unsigned long address,
- - unsigned long endaddr)
- -{
- - unsigned long set;
- - unsigned long start_set;
- - unsigned long end_set;
- -
- - start_set = address & _ICACHE_SET_MASK;
- - end_set = endaddr & _ICACHE_SET_MASK;
- -
- - if (start_set > end_set) {
- - /* from the begining to the lowest address */
- - for (set = 0; set <= end_set; set += (0x10 - 3)) {
- - asm volatile("cpushl %%ic,(%0)\n"
- - "\taddq%.l #1,%0\n"
- - "\tcpushl %%ic,(%0)\n"
- - "\taddq%.l #1,%0\n"
- - "\tcpushl %%ic,(%0)\n"
- - "\taddq%.l #1,%0\n"
- - "\tcpushl %%ic,(%0)" : "=a" (set) : "a" (set));
- - }
- - /* next loop will finish the cache ie pass the hole */
- - end_set = LAST_ICACHE_ADDR;
- - }
- - for (set = start_set; set <= end_set; set += (0x10 - 3)) {
- - asm volatile("cpushl %%ic,(%0)\n"
- - "\taddq%.l #1,%0\n"
- - "\tcpushl %%ic,(%0)\n"
- - "\taddq%.l #1,%0\n"
- - "\tcpushl %%ic,(%0)\n"
- - "\taddq%.l #1,%0\n"
- - "\tcpushl %%ic,(%0)" : "=a" (set) : "a" (set));
- - }
- -}
- -
- -static inline void copy_to_user_page(struct vm_area_struct *vma,
- - struct page *page, unsigned long vaddr,
- - void *dst, void *src, int len)
- -{
- - memcpy(dst, src, len);
- - flush_icache_user_page(vma, page, vaddr, len);
- -}
- -static inline void copy_from_user_page(struct vm_area_struct *vma,
- - struct page *page, unsigned long vaddr,
- - void *dst, void *src, int len)
- -{
- - memcpy(dst, src, len);
- -}
- -
- -#define flush_cache_vmap(start, end) flush_cache_all()
- -#define flush_cache_vunmap(start, end) flush_cache_all()
- -#define flush_dcache_mmap_lock(mapping) do { } while (0)
- -#define flush_dcache_mmap_unlock(mapping) do { } while (0)
- +#ifdef CONFIG_M5445X
- +#include "cf_5445x_cacheflush.h"
- +#else
- +#include "cf_548x_cacheflush.h"
- +#endif
-
- #endif /* M68K_CF_CACHEFLUSH_H */
|