003-mcfv4e_coldfire_headers2.patch 55 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028102910301031103210331034103510361037103810391040104110421043104410451046104710481049105010511052105310541055105610571058105910601061106210631064106510661067106810691070107110721073107410751076107710781079108010811082108310841085108610871088108910901091109210931094109510961097109810991100110111021103110411051106110711081109111011111112111311141115111611171118111911201121112211231124112511261127112811291130113111321133113411351136113711381139114011411142114311441145114611471148114911501151115211531154115511561157115811591160116111621163116411651166116711681169117011711172117311741175117611771178117911801181118211831184118511861187118811891190119111921193119411951196119711981199120012011202120312041205120612071208120912101211121212131214121512161217121812191220122112221223122412251226122712281229123012311232123312341235123612371238123912401241124212431244124512461247124812491250125112521253125412551256125712581259126012611262126312641265126612671268126912701271127212731274127512761277127812791280128112821283128412851286128712881289129012911292129312941295129612971298129913001301130213031304130513061307130813091310131113121313131413151316131713181319132013211322132313241325132613271328132913301331133213331334133513361337133813391340134113421343134413451346134713481349135013511352135313541355135613571358135913601361136213631364136513661367136813691370137113721373137413751376137713781379138013811382138313841385138613871388138913901391139213931394139513961397139813991400140114021403140414051406140714081409141014111412141314141415141614171418141914201421142214231424142514261427142814291430143114321433143414351436143714381439144014411442144314441445144614471448144914501451145214531454145514561457145814591460146114621463146414651466146714681469147014711472147314741475147614771478147914801481148214831484148514861487148814891490149114921493149414951496149714981499150015011502150315041505150615071508150915101511151215131514151515161517151815191520152115221523152415251526152715281529153015311532153315341535153615371538153915401541154215431544154515461547154815491550155115521553155415551556155715581559156015611562156315641565156615671568156915701571157215731574157515761577157815791580158115821583158415851586158715881589159015911592159315941595159615971598159916001601160216031604160516061607160816091610161116121613161416151616161716181619162016211622162316241625162616271628162916301631163216331634163516361637163816391640164116421643164416451646164716481649165016511652165316541655165616571658165916601661166216631664166516661667166816691670167116721673167416751676167716781679168016811682168316841685168616871688168916901691169216931694169516961697169816991700170117021703170417051706170717081709171017111712171317141715171617171718171917201721172217231724172517261727172817291730173117321733173417351736173717381739174017411742174317441745174617471748174917501751175217531754175517561757175817591760176117621763176417651766176717681769177017711772177317741775177617771778177917801781178217831784178517861787178817891790179117921793179417951796179717981799180018011802180318041805180618071808180918101811181218131814181518161817181818191820182118221823182418251826182718281829183018311832183318341835183618371838183918401841184218431844184518461847184818491850185118521853185418551856185718581859186018611862186318641865
  1. From 3da86cd2810e9ba4e4a9e7471a92025172c1c990 Mon Sep 17 00:00:00 2001
  2. From: Kurt Mahan <[email protected]>
  3. Date: Wed, 31 Oct 2007 16:41:41 -0600
  4. Subject: [PATCH] Add Coldfire specific header files.
  5. LTIBName: mcfv4e-coldfire-headers2
  6. Signed-off-by: Kurt Mahan <[email protected]>
  7. ---
  8. include/asm-m68k/cf_cacheflush.h | 160 ++++++++++++++++
  9. include/asm-m68k/cf_entry.h | 146 +++++++++++++++
  10. include/asm-m68k/cf_pgalloc.h | 99 ++++++++++
  11. include/asm-m68k/cf_pgtable.h | 357 ++++++++++++++++++++++++++++++++++++
  12. include/asm-m68k/cf_tlbflush.h | 59 ++++++
  13. include/asm-m68k/cf_uaccess.h | 376 ++++++++++++++++++++++++++++++++++++++
  14. include/asm-m68k/cfcache.h | 86 +++++++++
  15. include/asm-m68k/cfmmu.h | 104 +++++++++++
  16. include/asm-m68k/coldfire.h | 38 ++++
  17. include/asm-m68k/coldfire_edma.h | 39 ++++
  18. include/asm-m68k/mcfqspi.h | 50 +++++
  19. include/asm-m68k/mcfsim.h | 96 ++++++++++
  20. include/asm-m68k/mcfuart.h | 180 ++++++++++++++++++
  21. 13 files changed, 1790 insertions(+), 0 deletions(-)
  22. create mode 100644 include/asm-m68k/cf_cacheflush.h
  23. create mode 100644 include/asm-m68k/cf_entry.h
  24. create mode 100644 include/asm-m68k/cf_pgalloc.h
  25. create mode 100644 include/asm-m68k/cf_pgtable.h
  26. create mode 100644 include/asm-m68k/cf_tlbflush.h
  27. create mode 100644 include/asm-m68k/cf_uaccess.h
  28. create mode 100644 include/asm-m68k/cfcache.h
  29. create mode 100644 include/asm-m68k/cfmmu.h
  30. create mode 100644 include/asm-m68k/coldfire.h
  31. create mode 100644 include/asm-m68k/coldfire_edma.h
  32. create mode 100644 include/asm-m68k/mcfqspi.h
  33. create mode 100644 include/asm-m68k/mcfsim.h
  34. create mode 100644 include/asm-m68k/mcfuart.h
  35. --- /dev/null
  36. +++ b/include/asm-m68k/cf_cacheflush.h
  37. @@ -0,0 +1,160 @@
  38. +#ifndef M68K_CF_CACHEFLUSH_H
  39. +#define M68K_CF_CACHEFLUSH_H
  40. +
  41. +#include <asm/cfcache.h>
  42. +
  43. +/*
  44. + * Cache handling functions
  45. + */
  46. +
  47. +#define flush_icache() \
  48. +({ \
  49. + unsigned long set; \
  50. + unsigned long start_set; \
  51. + unsigned long end_set; \
  52. + \
  53. + start_set = 0; \
  54. + end_set = (unsigned long)LAST_DCACHE_ADDR; \
  55. + \
  56. + for (set = start_set; set <= end_set; set += (0x10 - 3)) \
  57. + asm volatile("cpushl %%ic,(%0)\n" \
  58. + "\taddq%.l #1,%0\n" \
  59. + "\tcpushl %%ic,(%0)\n" \
  60. + "\taddq%.l #1,%0\n" \
  61. + "\tcpushl %%ic,(%0)\n" \
  62. + "\taddq%.l #1,%0\n" \
  63. + "\tcpushl %%ic,(%0)" : : "a" (set)); \
  64. +})
  65. +
  66. +/*
  67. + * invalidate the cache for the specified memory range.
  68. + * It starts at the physical address specified for
  69. + * the given number of bytes.
  70. + */
  71. +extern void cache_clear(unsigned long paddr, int len);
  72. +/*
  73. + * push any dirty cache in the specified memory range.
  74. + * It starts at the physical address specified for
  75. + * the given number of bytes.
  76. + */
  77. +extern void cache_push(unsigned long paddr, int len);
  78. +
  79. +/*
  80. + * push and invalidate pages in the specified user virtual
  81. + * memory range.
  82. + */
  83. +extern void cache_push_v(unsigned long vaddr, int len);
  84. +
  85. +/* This is needed whenever the virtual mapping of the current
  86. + process changes. */
  87. +
  88. +
  89. +#define flush_cache_all() do { } while (0)
  90. +#define flush_cache_mm(mm) do { } while (0)
  91. +#define flush_cache_range(mm, a, b) do { } while (0)
  92. +#define flush_cache_page(vma, vmaddr, pfn) do { } while (0)
  93. +
  94. +#define flush_dcache_range(paddr, len) do { } while (0)
  95. +
  96. +/* Push the page at kernel virtual address and clear the icache */
  97. +/* use cpush %bc instead of cpush %dc, cinv %ic */
  98. +#define flush_page_to_ram(page) __flush_page_to_ram((void *) page_address(page))
  99. +extern inline void __flush_page_to_ram(void *address)
  100. +{
  101. + unsigned long set;
  102. + unsigned long start_set;
  103. + unsigned long end_set;
  104. + unsigned long addr = (unsigned long) address;
  105. +
  106. + addr &= ~(PAGE_SIZE - 1); /* round down to page start address */
  107. +
  108. + start_set = addr & _ICACHE_SET_MASK;
  109. + end_set = (addr + PAGE_SIZE-1) & _ICACHE_SET_MASK;
  110. +
  111. + if (start_set > end_set) {
  112. + /* from the begining to the lowest address */
  113. + for (set = 0; set <= end_set; set += (0x10 - 3))
  114. + asm volatile("cpushl %%bc,(%0)\n"
  115. + "\taddq%.l #1,%0\n"
  116. + "\tcpushl %%bc,(%0)\n"
  117. + "\taddq%.l #1,%0\n"
  118. + "\tcpushl %%bc,(%0)\n"
  119. + "\taddq%.l #1,%0\n"
  120. + "\tcpushl %%bc,(%0)" : : "a" (set));
  121. +
  122. + /* next loop will finish the cache ie pass the hole */
  123. + end_set = LAST_ICACHE_ADDR;
  124. + }
  125. + for (set = start_set; set <= end_set; set += (0x10 - 3))
  126. + asm volatile("cpushl %%bc,(%0)\n"
  127. + "\taddq%.l #1,%0\n"
  128. + "\tcpushl %%bc,(%0)\n"
  129. + "\taddq%.l #1,%0\n"
  130. + "\tcpushl %%bc,(%0)\n"
  131. + "\taddq%.l #1,%0\n"
  132. + "\tcpushl %%bc,(%0)" : : "a" (set));
  133. +}
  134. +
  135. +#define flush_dcache_page(page) do { } while (0)
  136. +#define flush_icache_page(vma, pg) do { } while (0)
  137. +#define flush_icache_user_range(adr, len) do { } while (0)
  138. +/* NL */
  139. +#define flush_icache_user_page(vma, page, addr, len) do { } while (0)
  140. +
  141. +/* Push n pages at kernel virtual address and clear the icache */
  142. +/* use cpush %bc instead of cpush %dc, cinv %ic */
  143. +extern inline void flush_icache_range(unsigned long address,
  144. + unsigned long endaddr)
  145. +{
  146. + unsigned long set;
  147. + unsigned long start_set;
  148. + unsigned long end_set;
  149. +
  150. + start_set = address & _ICACHE_SET_MASK;
  151. + end_set = endaddr & _ICACHE_SET_MASK;
  152. +
  153. + if (start_set > end_set) {
  154. + /* from the begining to the lowest address */
  155. + for (set = 0; set <= end_set; set += (0x10 - 3))
  156. + asm volatile("cpushl %%ic,(%0)\n"
  157. + "\taddq%.l #1,%0\n"
  158. + "\tcpushl %%ic,(%0)\n"
  159. + "\taddq%.l #1,%0\n"
  160. + "\tcpushl %%ic,(%0)\n"
  161. + "\taddq%.l #1,%0\n"
  162. + "\tcpushl %%ic,(%0)" : : "a" (set));
  163. +
  164. + /* next loop will finish the cache ie pass the hole */
  165. + end_set = LAST_ICACHE_ADDR;
  166. + }
  167. + for (set = start_set; set <= end_set; set += (0x10 - 3))
  168. + asm volatile("cpushl %%ic,(%0)\n"
  169. + "\taddq%.l #1,%0\n"
  170. + "\tcpushl %%ic,(%0)\n"
  171. + "\taddq%.l #1,%0\n"
  172. + "\tcpushl %%ic,(%0)\n"
  173. + "\taddq%.l #1,%0\n"
  174. + "\tcpushl %%ic,(%0)" : : "a" (set));
  175. +}
  176. +
  177. +static inline void copy_to_user_page(struct vm_area_struct *vma,
  178. + struct page *page, unsigned long vaddr,
  179. + void *dst, void *src, int len)
  180. +{
  181. + memcpy(dst, src, len);
  182. + flush_icache_user_page(vma, page, vaddr, len);
  183. +}
  184. +static inline void copy_from_user_page(struct vm_area_struct *vma,
  185. + struct page *page, unsigned long vaddr,
  186. + void *dst, void *src, int len)
  187. +{
  188. + memcpy(dst, src, len);
  189. +}
  190. +
  191. +#define flush_cache_dup_mm(mm) flush_cache_mm(mm)
  192. +#define flush_cache_vmap(start, end) flush_cache_all()
  193. +#define flush_cache_vunmap(start, end) flush_cache_all()
  194. +#define flush_dcache_mmap_lock(mapping) do { } while (0)
  195. +#define flush_dcache_mmap_unlock(mapping) do { } while (0)
  196. +
  197. +#endif /* M68K_CF_CACHEFLUSH_H */
  198. --- /dev/null
  199. +++ b/include/asm-m68k/cf_entry.h
  200. @@ -0,0 +1,146 @@
  201. +#ifndef __CF_M68K_ENTRY_H
  202. +#define __CF_M68K_ENTRY_H
  203. +
  204. +#include <asm/setup.h>
  205. +#include <asm/page.h>
  206. +#include <asm/coldfire.h>
  207. +#include <asm/cfmmu.h>
  208. +#include <asm/asm-offsets.h>
  209. +
  210. +/*
  211. + * Stack layout in 'ret_from_exception':
  212. + *
  213. + * This allows access to the syscall arguments in registers d1-d5
  214. + *
  215. + * 0(sp) - d1
  216. + * 4(sp) - d2
  217. + * 8(sp) - d3
  218. + * C(sp) - d4
  219. + * 10(sp) - d5
  220. + * 14(sp) - a0
  221. + * 18(sp) - a1
  222. + * 1C(sp) - a2
  223. + * 20(sp) - d0
  224. + * 24(sp) - orig_d0
  225. + * 28(sp) - stack adjustment
  226. + * 2C(sp) - sr
  227. + * 2E(sp) - pc
  228. + * 32(sp) - format & vector
  229. + * 36(sp) - MMUSR
  230. + * 3A(sp) - MMUAR
  231. + */
  232. +
  233. +/*
  234. + * 97/05/14 Andreas: Register %a2 is now set to the current task throughout
  235. + * the whole kernel.
  236. + */
  237. +
  238. +/* the following macro is used when enabling interrupts */
  239. +/* portable version */
  240. +#define ALLOWINT (~0x700)
  241. +#define MAX_NOINT_IPL 0
  242. +
  243. +#ifdef __ASSEMBLY__
  244. +
  245. +#define curptr a2
  246. +
  247. +LFLUSH_I_AND_D = 0x00000808
  248. +LSIGTRAP = 5
  249. +
  250. +/* process bits for task_struct.ptrace */
  251. +PT_TRACESYS_OFF = 3
  252. +PT_TRACESYS_BIT = 1
  253. +PT_PTRACED_OFF = 3
  254. +PT_PTRACED_BIT = 0
  255. +PT_DTRACE_OFF = 3
  256. +PT_DTRACE_BIT = 2
  257. +
  258. +#define SAVE_ALL_INT save_all_int
  259. +#define SAVE_ALL_SYS save_all_sys
  260. +#define RESTORE_ALL restore_all
  261. +/*
  262. + * This defines the normal kernel pt-regs layout.
  263. + *
  264. + * regs a3-a6 and d6-d7 are preserved by C code
  265. + * the kernel doesn't mess with usp unless it needs to
  266. + */
  267. +
  268. +/*
  269. + * a -1 in the orig_d0 field signifies
  270. + * that the stack frame is NOT for syscall
  271. + */
  272. +.macro save_all_int
  273. + movel MMUSR,%sp@-
  274. + movel MMUAR,%sp@-
  275. + clrl %sp@- | stk_adj
  276. + pea -1:w | orig d0
  277. + movel %d0,%sp@- | d0
  278. + subal #(8*4), %sp
  279. + moveml %d1-%d5/%a0-%a1/%curptr,%sp@
  280. +.endm
  281. +
  282. +.macro save_all_sys
  283. + movel MMUSR,%sp@-
  284. + movel MMUAR,%sp@-
  285. + clrl %sp@- | stk_adj
  286. + movel %d0,%sp@- | orig d0
  287. + movel %d0,%sp@- | d0
  288. + subal #(8*4), %sp
  289. + moveml %d1-%d5/%a0-%a1/%curptr,%sp@
  290. +.endm
  291. +
  292. +.macro restore_all
  293. + moveml %sp@,%a0-%a1/%curptr/%d1-%d5
  294. + addal #(8*4), %sp
  295. + movel %sp@+,%d0 | d0
  296. + addql #4,%sp | orig d0
  297. + addl %sp@+,%sp | stk_adj
  298. + addql #8,%sp | MMUAR & MMUSR
  299. + rte
  300. +.endm
  301. +
  302. +#define SWITCH_STACK_SIZE (6*4+4) /* includes return address */
  303. +
  304. +#define SAVE_SWITCH_STACK save_switch_stack
  305. +#define RESTORE_SWITCH_STACK restore_switch_stack
  306. +#define GET_CURRENT(tmp) get_current tmp
  307. +
  308. +.macro save_switch_stack
  309. + subal #(6*4), %sp
  310. + moveml %a3-%a6/%d6-%d7,%sp@
  311. +.endm
  312. +
  313. +.macro restore_switch_stack
  314. + moveml %sp@,%a3-%a6/%d6-%d7
  315. + addal #(6*4), %sp
  316. +.endm
  317. +
  318. +.macro get_current reg=%d0
  319. + movel %sp,\reg
  320. + andl #-THREAD_SIZE,\reg
  321. + movel \reg,%curptr
  322. + movel %curptr@,%curptr
  323. +.endm
  324. +
  325. +#else /* C source */
  326. +
  327. +#define STR(X) STR1(X)
  328. +#define STR1(X) #X
  329. +
  330. +#define PT_OFF_ORIG_D0 0x24
  331. +#define PT_OFF_FORMATVEC 0x32
  332. +#define PT_OFF_SR 0x2C
  333. +#define SAVE_ALL_INT \
  334. + "clrl %%sp@-;" /* stk_adj */ \
  335. + "pea -1:w;" /* orig d0 = -1 */ \
  336. + "movel %%d0,%%sp@-;" /* d0 */ \
  337. + "subal #(8*4),%sp" \
  338. + "moveml %%d1-%%d5/%%a0-%%a2,%%sp@"
  339. +#define GET_CURRENT(tmp) \
  340. + "movel %%sp,"#tmp"\n\t" \
  341. + "andw #-"STR(THREAD_SIZE)","#tmp"\n\t" \
  342. + "movel "#tmp",%%a2\n\t"
  343. +
  344. +#endif
  345. +
  346. +#endif /* __CF_M68K_ENTRY_H */
  347. --- /dev/null
  348. +++ b/include/asm-m68k/cf_pgalloc.h
  349. @@ -0,0 +1,99 @@
  350. +#ifndef M68K_CF_PGALLOC_H
  351. +#define M68K_CF_PGALLOC_H
  352. +
  353. +#include <asm/coldfire.h>
  354. +#include <asm/page.h>
  355. +#include <asm/cf_tlbflush.h>
  356. +
  357. +extern inline void pte_free_kernel(pte_t *pte)
  358. +{
  359. + free_page((unsigned long) pte);
  360. +}
  361. +
  362. +extern const char bad_pmd_string[];
  363. +
  364. +extern inline pte_t *pte_alloc_one_kernel(struct mm_struct *mm,
  365. + unsigned long address)
  366. +{
  367. + unsigned long page = __get_free_page(GFP_DMA|__GFP_REPEAT);
  368. +
  369. + if (!page)
  370. + return NULL;
  371. +
  372. + memset((void *)page, 0, PAGE_SIZE);
  373. + return (pte_t *) (page);
  374. +}
  375. +
  376. +extern inline pmd_t *pmd_alloc_kernel(pgd_t *pgd, unsigned long address)
  377. +{
  378. + return (pmd_t *) pgd;
  379. +}
  380. +
  381. +#define pmd_alloc_one_fast(mm, address) ({ BUG(); ((pmd_t *)1); })
  382. +#define pmd_alloc_one(mm, address) ({ BUG(); ((pmd_t *)2); })
  383. +
  384. +#define pte_alloc_one_fast(mm, addr) pte_alloc_one(mm, addr)
  385. +
  386. +#define pmd_populate(mm, pmd, page) (pmd_val(*pmd) = \
  387. + (unsigned long)(page_address(page)))
  388. +#define pmd_populate_kernel(mm, pmd, pte) (pmd_val(*pmd) = (unsigned long)(pte))
  389. +
  390. +static inline void __pte_free_tlb(struct mmu_gather *tlb, struct page *page)
  391. +{
  392. + __free_page(page);
  393. +}
  394. +
  395. +#define __pmd_free_tlb(tlb, pmd) do { } while (0)
  396. +
  397. +static inline struct page *pte_alloc_one(struct mm_struct *mm,
  398. + unsigned long address)
  399. +{
  400. + struct page *page = alloc_pages(GFP_DMA|__GFP_REPEAT, 0);
  401. + pte_t *pte;
  402. +
  403. + if (!page)
  404. + return NULL;
  405. +
  406. + pte = kmap(page);
  407. + if (pte) {
  408. + clear_page(pte);
  409. + __flush_page_to_ram(pte);
  410. + flush_tlb_kernel_page(pte);
  411. + nocache_page(pte);
  412. + }
  413. + kunmap(pte);
  414. +
  415. + return page;
  416. +}
  417. +
  418. +extern inline void pte_free(struct page *page)
  419. +{
  420. + __free_page(page);
  421. +}
  422. +
  423. +/*
  424. + * In our implementation, each pgd entry contains 1 pmd that is never allocated
  425. + * or freed. pgd_present is always 1, so this should never be called. -NL
  426. + */
  427. +#define pmd_free(pmd) BUG()
  428. +
  429. +extern inline void pgd_free(pgd_t *pgd)
  430. +{
  431. + free_page((unsigned long) pgd);
  432. +}
  433. +
  434. +extern inline pgd_t *pgd_alloc(struct mm_struct *mm)
  435. +{
  436. + pgd_t *new_pgd;
  437. +
  438. + new_pgd = (pgd_t *)__get_free_page(GFP_DMA | __GFP_NOWARN);
  439. + if (!new_pgd)
  440. + return NULL;
  441. + memcpy(new_pgd, swapper_pg_dir, PAGE_SIZE);
  442. + memset(new_pgd, 0, PAGE_OFFSET >> PGDIR_SHIFT);
  443. + return new_pgd;
  444. +}
  445. +
  446. +#define pgd_populate(mm, pmd, pte) BUG()
  447. +
  448. +#endif /* M68K_CF_PGALLOC_H */
  449. --- /dev/null
  450. +++ b/include/asm-m68k/cf_pgtable.h
  451. @@ -0,0 +1,357 @@
  452. +#ifndef _CF_PGTABLE_H
  453. +#define _CF_PGTABLE_H
  454. +
  455. +#include <asm/cfmmu.h>
  456. +#include <asm/page.h>
  457. +
  458. +#ifndef __ASSEMBLY__
  459. +#include <asm/virtconvert.h>
  460. +#include <linux/linkage.h>
  461. +
  462. +/* For virtual address to physical address conversion */
  463. +#define VTOP(addr) __pa(addr)
  464. +#define PTOV(addr) __va(addr)
  465. +
  466. +
  467. +#endif /* !__ASSEMBLY__ */
  468. +
  469. +/* Page protection values within PTE. */
  470. +
  471. +/* MMUDR bits, in proper place. */
  472. +#define CF_PAGE_LOCKED (0x00000002)
  473. +#define CF_PAGE_EXEC (0x00000004)
  474. +#define CF_PAGE_WRITABLE (0x00000008)
  475. +#define CF_PAGE_READABLE (0x00000010)
  476. +#define CF_PAGE_SYSTEM (0x00000020)
  477. +#define CF_PAGE_COPYBACK (0x00000040)
  478. +#define CF_PAGE_NOCACHE (0x00000080)
  479. +
  480. +#define CF_CACHEMASK (~0x00000040)
  481. +#define CF_PAGE_MMUDR_MASK (0x000000fe)
  482. +
  483. +#define _PAGE_NOCACHE030 (CF_PAGE_NOCACHE)
  484. +
  485. +/* MMUTR bits, need shifting down. */
  486. +#define CF_PAGE_VALID (0x00000400)
  487. +#define CF_PAGE_SHARED (0x00000800)
  488. +
  489. +#define CF_PAGE_MMUTR_MASK (0x00000c00)
  490. +#define CF_PAGE_MMUTR_SHIFT (10)
  491. +#define CF_ASID_MMU_SHIFT (2)
  492. +
  493. +/* Fake bits, not implemented in CF, will get masked out before
  494. + hitting hardware, and might go away altogether once this port is
  495. + complete. */
  496. +#if PAGE_SHIFT < 13
  497. +#error COLDFIRE Error: Pages must be at least 8k in size
  498. +#endif
  499. +#define CF_PAGE_ACCESSED (0x00001000)
  500. +#define CF_PAGE_FILE (0x00000200)
  501. +#define CF_PAGE_DIRTY (0x00000001)
  502. +
  503. +#define _PAGE_CACHE040 0x020 /* 68040 cache mode, cachable, copyback */
  504. +#define _PAGE_NOCACHE_S 0x040 /* 68040 no-cache mode, serialized */
  505. +#define _PAGE_NOCACHE 0x060 /* 68040 cache mode, non-serialized */
  506. +#define _PAGE_CACHE040W 0x000 /* 68040 cache mode, cachable, write-through */
  507. +#define _DESCTYPE_MASK 0x003
  508. +#define _CACHEMASK040 (~0x060)
  509. +#define _PAGE_GLOBAL040 0x400 /* 68040 global bit, used for kva descs */
  510. +
  511. +
  512. +/* Externally used page protection values. */
  513. +#define _PAGE_PRESENT (CF_PAGE_VALID)
  514. +#define _PAGE_ACCESSED (CF_PAGE_ACCESSED)
  515. +#define _PAGE_DIRTY (CF_PAGE_DIRTY)
  516. +#define _PAGE_READWRITE (CF_PAGE_WRITABLE \
  517. + | CF_PAGE_READABLE \
  518. + | CF_PAGE_SYSTEM \
  519. + | CF_PAGE_SHARED)
  520. +
  521. +/* Compound page protection values. */
  522. +#define PAGE_NONE __pgprot(CF_PAGE_VALID \
  523. + | CF_PAGE_ACCESSED)
  524. +
  525. +#define PAGE_SHARED __pgprot(CF_PAGE_VALID \
  526. + | CF_PAGE_ACCESSED \
  527. + | CF_PAGE_SHARED)
  528. +
  529. +#define PAGE_INIT __pgprot(CF_PAGE_VALID \
  530. + | CF_PAGE_WRITABLE \
  531. + | CF_PAGE_READABLE \
  532. + | CF_PAGE_EXEC \
  533. + | CF_PAGE_SYSTEM \
  534. + | CF_PAGE_SHARED)
  535. +
  536. +#define PAGE_KERNEL __pgprot(CF_PAGE_VALID \
  537. + | CF_PAGE_WRITABLE \
  538. + | CF_PAGE_READABLE \
  539. + | CF_PAGE_EXEC \
  540. + | CF_PAGE_SYSTEM \
  541. + | CF_PAGE_SHARED \
  542. + | CF_PAGE_ACCESSED)
  543. +
  544. +#define PAGE_COPY __pgprot(CF_PAGE_VALID \
  545. + | CF_PAGE_ACCESSED \
  546. + | CF_PAGE_READABLE \
  547. + | CF_PAGE_DIRTY)
  548. +/*
  549. + * Page protections for initialising protection_map. See mm/mmap.c
  550. + * for use. In general, the bit positions are xwr, and P-items are
  551. + * private, the S-items are shared.
  552. + */
  553. +
  554. +#define __P000 PAGE_NONE
  555. +#define __P100 __pgprot(CF_PAGE_VALID \
  556. + | CF_PAGE_ACCESSED \
  557. + | CF_PAGE_EXEC)
  558. +#define __P010 __pgprot(CF_PAGE_VALID \
  559. + | CF_PAGE_WRITABLE \
  560. + | CF_PAGE_ACCESSED)
  561. +#define __P110 __pgprot(CF_PAGE_VALID \
  562. + | CF_PAGE_ACCESSED \
  563. + | CF_PAGE_WRITABLE \
  564. + | CF_PAGE_EXEC)
  565. +#define __P001 __pgprot(CF_PAGE_VALID \
  566. + | CF_PAGE_ACCESSED \
  567. + | CF_PAGE_READABLE)
  568. +#define __P101 __pgprot(CF_PAGE_VALID \
  569. + | CF_PAGE_ACCESSED \
  570. + | CF_PAGE_READABLE \
  571. + | CF_PAGE_EXEC)
  572. +#define __P011 __pgprot(CF_PAGE_VALID \
  573. + | CF_PAGE_READABLE \
  574. + | CF_PAGE_WRITABLE \
  575. + | CF_PAGE_ACCESSED)
  576. +#define __P111 __pgprot(CF_PAGE_VALID \
  577. + | CF_PAGE_ACCESSED \
  578. + | CF_PAGE_WRITABLE \
  579. + | CF_PAGE_READABLE \
  580. + | CF_PAGE_EXEC)
  581. +
  582. +#define __S000 PAGE_NONE
  583. +#define __S100 __pgprot(CF_PAGE_VALID \
  584. + | CF_PAGE_ACCESSED \
  585. + | CF_PAGE_SHARED \
  586. + | CF_PAGE_EXEC)
  587. +#define __S010 PAGE_SHARED
  588. +#define __S110 __pgprot(CF_PAGE_VALID \
  589. + | CF_PAGE_ACCESSED \
  590. + | CF_PAGE_SHARED \
  591. + | CF_PAGE_EXEC)
  592. +#define __S001 __pgprot(CF_PAGE_VALID \
  593. + | CF_PAGE_ACCESSED \
  594. + | CF_PAGE_SHARED \
  595. + | CF_PAGE_READABLE)
  596. +#define __S101 __pgprot(CF_PAGE_VALID \
  597. + | CF_PAGE_ACCESSED \
  598. + | CF_PAGE_SHARED \
  599. + | CF_PAGE_READABLE \
  600. + | CF_PAGE_EXEC)
  601. +#define __S011 __pgprot(CF_PAGE_VALID \
  602. + | CF_PAGE_ACCESSED \
  603. + | CF_PAGE_SHARED \
  604. + | CF_PAGE_READABLE)
  605. +#define __S111 __pgprot(CF_PAGE_VALID \
  606. + | CF_PAGE_ACCESSED \
  607. + | CF_PAGE_SHARED \
  608. + | CF_PAGE_READABLE \
  609. + | CF_PAGE_EXEC)
  610. +
  611. +#define PTE_MASK PAGE_MASK
  612. +#define CF_PAGE_CHG_MASK (PTE_MASK | CF_PAGE_ACCESSED | CF_PAGE_DIRTY)
  613. +
  614. +#ifndef __ASSEMBLY__
  615. +
  616. +/*
  617. + * Conversion functions: convert a page and protection to a page entry,
  618. + * and a page entry and page directory to the page they refer to.
  619. + */
  620. +#define mk_pte(page, pgprot) pfn_pte(page_to_pfn(page), (pgprot))
  621. +
  622. +extern inline pte_t pte_modify(pte_t pte, pgprot_t newprot)
  623. +{
  624. + pte_val(pte) = (pte_val(pte) & CF_PAGE_CHG_MASK) | pgprot_val(newprot);
  625. + return pte;
  626. +}
  627. +
  628. +#define pmd_set(pmdp, ptep) do {} while (0)
  629. +
  630. +extern inline void pgd_set(pgd_t *pgdp, pmd_t *pmdp)
  631. +{
  632. + pgd_val(*pgdp) = virt_to_phys(pmdp);
  633. +}
  634. +
  635. +#define __pte_page(pte) \
  636. + ((unsigned long) ((pte_val(pte) & CF_PAGE_PGNUM_MASK) + PAGE_OFFSET))
  637. +#define __pmd_page(pmd) ((unsigned long) (pmd_val(pmd)))
  638. +
  639. +extern inline int pte_none(pte_t pte)
  640. +{
  641. + return !pte_val(pte);
  642. +}
  643. +extern inline int pte_present(pte_t pte)
  644. +{
  645. + return pte_val(pte) & CF_PAGE_VALID;
  646. +}
  647. +extern inline void pte_clear(struct mm_struct *mm, unsigned long addr,
  648. + pte_t *ptep)
  649. +{
  650. + pte_val(*ptep) = 0;
  651. +}
  652. +
  653. +#define pte_pagenr(pte) ((__pte_page(pte) - PAGE_OFFSET) >> PAGE_SHIFT)
  654. +#define pte_page(pte) virt_to_page(__pte_page(pte))
  655. +
  656. +extern inline int pmd_none2(pmd_t *pmd) { return !pmd_val(*pmd); }
  657. +#define pmd_none(pmd) pmd_none2(&(pmd))
  658. +extern inline int pmd_bad2(pmd_t *pmd) { return 0; }
  659. +#define pmd_bad(pmd) pmd_bad2(&(pmd))
  660. +#define pmd_present(pmd) (!pmd_none2(&(pmd)))
  661. +extern inline void pmd_clear(pmd_t *pmdp) { pmd_val(*pmdp) = 0; }
  662. +
  663. +extern inline int pgd_none(pgd_t pgd) { return 0; }
  664. +extern inline int pgd_bad(pgd_t pgd) { return 0; }
  665. +extern inline int pgd_present(pgd_t pgd) { return 1; }
  666. +extern inline void pgd_clear(pgd_t *pgdp) {}
  667. +
  668. +
  669. +#define pte_ERROR(e) \
  670. + printk(KERN_ERR "%s:%d: bad pte %08lx.\n", \
  671. + __FILE__, __LINE__, pte_val(e))
  672. +#define pmd_ERROR(e) \
  673. + printk(KERN_ERR "%s:%d: bad pmd %08lx.\n", \
  674. + __FILE__, __LINE__, pmd_val(e))
  675. +#define pgd_ERROR(e) \
  676. + printk(KERN_ERR "%s:%d: bad pgd %08lx.\n", \
  677. + __FILE__, __LINE__, pgd_val(e))
  678. +
  679. +
  680. +/*
  681. + * The following only work if pte_present() is true.
  682. + * Undefined behaviour if not...
  683. + * [we have the full set here even if they don't change from m68k]
  684. + */
  685. +extern inline int pte_read(pte_t pte) \
  686. + { return pte_val(pte) & CF_PAGE_READABLE; }
  687. +extern inline int pte_write(pte_t pte) \
  688. + { return pte_val(pte) & CF_PAGE_WRITABLE; }
  689. +extern inline int pte_exec(pte_t pte) \
  690. + { return pte_val(pte) & CF_PAGE_EXEC; }
  691. +extern inline int pte_dirty(pte_t pte) \
  692. + { return pte_val(pte) & CF_PAGE_DIRTY; }
  693. +extern inline int pte_young(pte_t pte) \
  694. + { return pte_val(pte) & CF_PAGE_ACCESSED; }
  695. +extern inline int pte_file(pte_t pte) \
  696. + { return pte_val(pte) & CF_PAGE_FILE; }
  697. +
  698. +extern inline pte_t pte_wrprotect(pte_t pte) \
  699. + { pte_val(pte) &= ~CF_PAGE_WRITABLE; return pte; }
  700. +extern inline pte_t pte_rdprotect(pte_t pte) \
  701. + { pte_val(pte) &= ~CF_PAGE_READABLE; return pte; }
  702. +extern inline pte_t pte_exprotect(pte_t pte) \
  703. + { pte_val(pte) &= ~CF_PAGE_EXEC; return pte; }
  704. +extern inline pte_t pte_mkclean(pte_t pte) \
  705. + { pte_val(pte) &= ~CF_PAGE_DIRTY; return pte; }
  706. +extern inline pte_t pte_mkold(pte_t pte) \
  707. + { pte_val(pte) &= ~CF_PAGE_ACCESSED; return pte; }
  708. +extern inline pte_t pte_mkwrite(pte_t pte) \
  709. + { pte_val(pte) |= CF_PAGE_WRITABLE; return pte; }
  710. +extern inline pte_t pte_mkread(pte_t pte) \
  711. + { pte_val(pte) |= CF_PAGE_READABLE; return pte; }
  712. +extern inline pte_t pte_mkexec(pte_t pte) \
  713. + { pte_val(pte) |= CF_PAGE_EXEC; return pte; }
  714. +extern inline pte_t pte_mkdirty(pte_t pte) \
  715. + { pte_val(pte) |= CF_PAGE_DIRTY; return pte; }
  716. +extern inline pte_t pte_mkyoung(pte_t pte) \
  717. + { pte_val(pte) |= CF_PAGE_ACCESSED; return pte; }
  718. +extern inline pte_t pte_mknocache(pte_t pte) \
  719. + { pte_val(pte) |= 0x80 | (pte_val(pte) & ~0x40); return pte; }
  720. +extern inline pte_t pte_mkcache(pte_t pte) \
  721. + { pte_val(pte) &= ~CF_PAGE_NOCACHE; return pte; }
  722. +
  723. +#define swapper_pg_dir kernel_pg_dir
  724. +extern pgd_t kernel_pg_dir[PTRS_PER_PGD];
  725. +
  726. +/* Find an entry in a pagetable directory. */
  727. +#define pgd_index(address) ((address) >> PGDIR_SHIFT)
  728. +
  729. +#define pgd_offset(mm, address) ((mm)->pgd + pgd_index(address))
  730. +
  731. +/* Find an entry in a kernel pagetable directory. */
  732. +#define pgd_offset_k(address) pgd_offset(&init_mm, address)
  733. +
  734. +/* Find an entry in the second-level pagetable. */
  735. +extern inline pmd_t *pmd_offset(pgd_t *pgd, unsigned long address)
  736. +{
  737. + return (pmd_t *) pgd;
  738. +}
  739. +
  740. +/* Find an entry in the third-level pagetable. */
  741. +#define __pte_offset(address) ((address >> PAGE_SHIFT) & (PTRS_PER_PTE - 1))
  742. +#define pte_offset_kernel(dir, address) ((pte_t *) __pmd_page(*(dir)) + \
  743. + __pte_offset(address))
  744. +
  745. +/* Disable caching for page at given kernel virtual address. */
  746. +static inline void nocache_page(void *vaddr)
  747. +{
  748. + pgd_t *dir;
  749. + pmd_t *pmdp;
  750. + pte_t *ptep;
  751. + unsigned long addr = (unsigned long)vaddr;
  752. +
  753. + dir = pgd_offset_k(addr);
  754. + pmdp = pmd_offset(dir, addr);
  755. + ptep = pte_offset_kernel(pmdp, addr);
  756. + *ptep = pte_mknocache(*ptep);
  757. +}
  758. +
  759. +/* Enable caching for page at given kernel virtual address. */
  760. +static inline void cache_page(void *vaddr)
  761. +{
  762. + pgd_t *dir;
  763. + pmd_t *pmdp;
  764. + pte_t *ptep;
  765. + unsigned long addr = (unsigned long)vaddr;
  766. +
  767. + dir = pgd_offset_k(addr);
  768. + pmdp = pmd_offset(dir, addr);
  769. + ptep = pte_offset_kernel(pmdp, addr);
  770. + *ptep = pte_mkcache(*ptep);
  771. +}
  772. +
  773. +#define PTE_FILE_MAX_BITS 21
  774. +#define PTE_FILE_SHIFT 11
  775. +
  776. +static inline unsigned long pte_to_pgoff(pte_t pte)
  777. +{
  778. + return pte_val(pte) >> PTE_FILE_SHIFT;
  779. +}
  780. +
  781. +static inline pte_t pgoff_to_pte(unsigned pgoff)
  782. +{
  783. + pte_t pte = __pte((pgoff << PTE_FILE_SHIFT) + CF_PAGE_FILE);
  784. + return pte;
  785. +}
  786. +
  787. +/* Encode and de-code a swap entry (must be !pte_none(e) && !pte_present(e)) */
  788. +#define __swp_entry(type, offset) ((swp_entry_t) { (type) | \
  789. + (offset << PTE_FILE_SHIFT) })
  790. +#define __swp_type(x) ((x).val & 0xFF)
  791. +#define __swp_offset(x) ((x).val >> PTE_FILE_SHIFT)
  792. +#define __pte_to_swp_entry(pte) ((swp_entry_t) { pte_val(pte) })
  793. +#define __swp_entry_to_pte(x) (__pte((x).val))
  794. +
  795. +#define pmd_page(pmd) virt_to_page(__pmd_page(pmd))
  796. +
  797. +#define pte_offset_map(pmdp, address) ((pte_t *)__pmd_page(*pmdp) + \
  798. + __pte_offset(address))
  799. +#define pte_offset_map_nested(pmdp, address) pte_offset_map(pmdp, address)
  800. +#define pte_unmap(pte) kunmap(pte)
  801. +#define pte_unmap_nested(pte) kunmap(pte)
  802. +
  803. +#define pfn_pte(pfn, prot) __pte(((pfn) << PAGE_SHIFT) | pgprot_val(prot))
  804. +#define pte_pfn(pte) (pte_val(pte) >> PAGE_SHIFT)
  805. +
  806. +
  807. +#endif /* !__ASSEMBLY__ */
  808. +#endif /* !_CF_PGTABLE_H */
  809. --- /dev/null
  810. +++ b/include/asm-m68k/cf_tlbflush.h
  811. @@ -0,0 +1,59 @@
  812. +#ifndef M68K_CF_TLBFLUSH_H
  813. +#define M68K_CF_TLBFLUSH_H
  814. +
  815. +#include <asm/coldfire.h>
  816. +
  817. +/* Flush all userspace mappings. */
  818. +static inline void flush_tlb_all(void)
  819. +{
  820. + preempt_disable();
  821. + *MMUOR = MMUOR_CNL;
  822. + preempt_enable();
  823. +}
  824. +
  825. +/* Clear user TLB entries within the context named in mm */
  826. +static inline void flush_tlb_mm(struct mm_struct *mm)
  827. +{
  828. + preempt_disable();
  829. + *MMUOR = MMUOR_CNL;
  830. + preempt_enable();
  831. +}
  832. +
  833. +/* Flush a single TLB page. */
  834. +static inline void flush_tlb_page(struct vm_area_struct *vma,
  835. + unsigned long addr)
  836. +{
  837. + preempt_disable();
  838. + *MMUOR = MMUOR_CNL;
  839. + preempt_enable();
  840. +}
  841. +/* Flush a range of pages from TLB. */
  842. +
  843. +static inline void flush_tlb_range(struct mm_struct *mm,
  844. + unsigned long start, unsigned long end)
  845. +{
  846. + preempt_disable();
  847. + *MMUOR = MMUOR_CNL;
  848. + preempt_enable();
  849. +}
  850. +
  851. +/* Flush kernel page from TLB. */
  852. +static inline void flush_tlb_kernel_page(void *addr)
  853. +{
  854. + preempt_disable();
  855. + *MMUOR = MMUOR_CNL;
  856. + preempt_enable();
  857. +}
  858. +
  859. +static inline void flush_tlb_kernel_range(unsigned long start,
  860. + unsigned long end)
  861. +{
  862. + flush_tlb_all();
  863. +}
  864. +
  865. +extern inline void flush_tlb_pgtables(struct mm_struct *mm,
  866. + unsigned long start, unsigned long end)
  867. +{
  868. +}
  869. +
  870. +#endif /* M68K_CF_TLBFLUSH_H */
  871. --- /dev/null
  872. +++ b/include/asm-m68k/cf_uaccess.h
  873. @@ -0,0 +1,376 @@
  874. +#ifndef __M68K_CF_UACCESS_H
  875. +#define __M68K_CF_UACCESS_H
  876. +
  877. +/*
  878. + * User space memory access functions
  879. + */
  880. +
  881. +/* The "moves" command is not available in the CF instruction set. */
  882. +#include <linux/compiler.h>
  883. +#include <linux/errno.h>
  884. +#include <linux/types.h>
  885. +#include <linux/sched.h>
  886. +#include <asm/segment.h>
  887. +
  888. +#define VERIFY_READ 0
  889. +#define VERIFY_WRITE 1
  890. +
  891. +/* We let the MMU do all checking */
  892. +#define access_ok(type, addr, size) 1
  893. +
  894. +/*
  895. + * The exception table consists of pairs of addresses: the first is the
  896. + * address of an instruction that is allowed to fault, and the second is
  897. + * the address at which the program should continue. No registers are
  898. + * modified, so it is entirely up to the continuation code to figure out
  899. + * what to do.
  900. + *
  901. + * All the routines below use bits of fixup code that are out of line
  902. + * with the main instruction path. This means when everything is well,
  903. + * we don't even have to jump over them. Further, they do not intrude
  904. + * on our cache or tlb entries.
  905. + */
  906. +
  907. +struct exception_table_entry
  908. +{
  909. + unsigned long insn, fixup;
  910. +};
  911. +
  912. +extern int __put_user_bad(void);
  913. +extern int __get_user_bad(void);
  914. +
  915. +#define __put_user_asm(res, x, ptr, bwl, reg, err) \
  916. +asm volatile ("\n" \
  917. + "1: move."#bwl" %2,%1\n" \
  918. + "2:\n" \
  919. + " .section .fixup,\"ax\"\n" \
  920. + " .even\n" \
  921. + "10: moveq.l %3,%0\n" \
  922. + " jra 2b\n" \
  923. + " .previous\n" \
  924. + "\n" \
  925. + " .section __ex_table,\"a\"\n" \
  926. + " .align 4\n" \
  927. + " .long 1b,10b\n" \
  928. + " .long 2b,10b\n" \
  929. + " .previous" \
  930. + : "+d" (res), "=m" (*(ptr)) \
  931. + : #reg (x), "i" (err))
  932. +
  933. +/*
  934. + * These are the main single-value transfer routines. They automatically
  935. + * use the right size if we just have the right pointer type.
  936. + */
  937. +
  938. +#define __put_user(x, ptr) \
  939. +({ \
  940. + typeof(*(ptr)) __pu_val = (x); \
  941. + int __pu_err = 0; \
  942. + __chk_user_ptr(ptr); \
  943. + switch (sizeof (*(ptr))) { \
  944. + case 1: \
  945. + __put_user_asm(__pu_err, __pu_val, ptr, b, d, -EFAULT); \
  946. + break; \
  947. + case 2: \
  948. + __put_user_asm(__pu_err, __pu_val, ptr, w, d, -EFAULT); \
  949. + break; \
  950. + case 4: \
  951. + __put_user_asm(__pu_err, __pu_val, ptr, l, r, -EFAULT); \
  952. + break; \
  953. + case 8: \
  954. + { \
  955. + const void __user *__pu_ptr = (ptr); \
  956. + asm volatile ("\n" \
  957. + "1: move.l %2,(%1)+\n" \
  958. + "2: move.l %R2,(%1)\n" \
  959. + "3:\n" \
  960. + " .section .fixup,\"ax\"\n" \
  961. + " .even\n" \
  962. + "10: movel %3,%0\n" \
  963. + " jra 3b\n" \
  964. + " .previous\n" \
  965. + "\n" \
  966. + " .section __ex_table,\"a\"\n" \
  967. + " .align 4\n" \
  968. + " .long 1b,10b\n" \
  969. + " .long 2b,10b\n" \
  970. + " .long 3b,10b\n" \
  971. + " .previous" \
  972. + : "+d" (__pu_err), "+a" (__pu_ptr) \
  973. + : "r" (__pu_val), "i" (-EFAULT) \
  974. + : "memory"); \
  975. + break; \
  976. + } \
  977. + default: \
  978. + __pu_err = __put_user_bad(); \
  979. + break; \
  980. + } \
  981. + __pu_err; \
  982. +})
  983. +#define put_user(x, ptr) __put_user(x, ptr)
  984. +
  985. +
  986. +#define __get_user_asm(res, x, ptr, type, bwl, reg, err) ({ \
  987. + type __gu_val; \
  988. + asm volatile ("\n" \
  989. + "1: move."#bwl" %2,%1\n" \
  990. + "2:\n" \
  991. + " .section .fixup,\"ax\"\n" \
  992. + " .even\n" \
  993. + "10: move.l %3,%0\n" \
  994. + " subl %1,%1\n" \
  995. + " jra 2b\n" \
  996. + " .previous\n" \
  997. + "\n" \
  998. + " .section __ex_table,\"a\"\n" \
  999. + " .align 4\n" \
  1000. + " .long 1b,10b\n" \
  1001. + " .previous" \
  1002. + : "+d" (res), "=&" #reg (__gu_val) \
  1003. + : "m" (*(ptr)), "i" (err)); \
  1004. + (x) = (typeof(*(ptr)))(unsigned long)__gu_val; \
  1005. +})
  1006. +
  1007. +#define __get_user(x, ptr) \
  1008. +({ \
  1009. + int __gu_err = 0; \
  1010. + __chk_user_ptr(ptr); \
  1011. + switch (sizeof(*(ptr))) { \
  1012. + case 1: \
  1013. + __get_user_asm(__gu_err, x, ptr, u8, b, d, -EFAULT); \
  1014. + break; \
  1015. + case 2: \
  1016. + __get_user_asm(__gu_err, x, ptr, u16, w, d, -EFAULT); \
  1017. + break; \
  1018. + case 4: \
  1019. + __get_user_asm(__gu_err, x, ptr, u32, l, r, -EFAULT); \
  1020. + break; \
  1021. +/* case 8: disabled because gcc-4.1 has a broken typeof \
  1022. + { \
  1023. + const void *__gu_ptr = (ptr); \
  1024. + u64 __gu_val; \
  1025. + asm volatile ("\n" \
  1026. + "1: move.l (%2)+,%1\n" \
  1027. + "2: move.l (%2),%R1\n" \
  1028. + "3:\n" \
  1029. + " .section .fixup,\"ax\"\n" \
  1030. + " .even\n" \
  1031. + "10: move.l %3,%0\n" \
  1032. + " subl %1,%1\n" \
  1033. + " subl %R1,%R1\n" \
  1034. + " jra 3b\n" \
  1035. + " .previous\n" \
  1036. + "\n" \
  1037. + " .section __ex_table,\"a\"\n" \
  1038. + " .align 4\n" \
  1039. + " .long 1b,10b\n" \
  1040. + " .long 2b,10b\n" \
  1041. + " .previous" \
  1042. + : "+d" (__gu_err), "=&r" (__gu_val), \
  1043. + "+a" (__gu_ptr) \
  1044. + : "i" (-EFAULT) \
  1045. + : "memory"); \
  1046. + (x) = (typeof(*(ptr)))__gu_val; \
  1047. + break; \
  1048. + } */ \
  1049. + default: \
  1050. + __gu_err = __get_user_bad(); \
  1051. + break; \
  1052. + } \
  1053. + __gu_err; \
  1054. +})
  1055. +#define get_user(x, ptr) __get_user(x, ptr)
  1056. +
  1057. +unsigned long __generic_copy_from_user(void *to, const void __user *from,
  1058. + unsigned long n);
  1059. +unsigned long __generic_copy_to_user(void __user *to, const void *from,
  1060. + unsigned long n);
  1061. +
  1062. +#define __constant_copy_from_user_asm(res, to, from, tmp, n, s1, s2, s3)\
  1063. + asm volatile ("\n" \
  1064. + "1: move."#s1" (%2)+,%3\n" \
  1065. + " move."#s1" %3,(%1)+\n" \
  1066. + "2: move."#s2" (%2)+,%3\n" \
  1067. + " move."#s2" %3,(%1)+\n" \
  1068. + " .ifnc \""#s3"\",\"\"\n" \
  1069. + "3: move."#s3" (%2)+,%3\n" \
  1070. + " move."#s3" %3,(%1)+\n" \
  1071. + " .endif\n" \
  1072. + "4:\n" \
  1073. + " .section __ex_table,\"a\"\n" \
  1074. + " .align 4\n" \
  1075. + " .long 1b,10f\n" \
  1076. + " .long 2b,20f\n" \
  1077. + " .ifnc \""#s3"\",\"\"\n" \
  1078. + " .long 3b,30f\n" \
  1079. + " .endif\n" \
  1080. + " .previous\n" \
  1081. + "\n" \
  1082. + " .section .fixup,\"ax\"\n" \
  1083. + " .even\n" \
  1084. + "10: clr."#s1" (%1)+\n" \
  1085. + "20: clr."#s2" (%1)+\n" \
  1086. + " .ifnc \""#s3"\",\"\"\n" \
  1087. + "30: clr."#s3" (%1)+\n" \
  1088. + " .endif\n" \
  1089. + " moveq.l #"#n",%0\n" \
  1090. + " jra 4b\n" \
  1091. + " .previous\n" \
  1092. + : "+d" (res), "+&a" (to), "+a" (from), "=&d" (tmp) \
  1093. + : : "memory")
  1094. +
  1095. +static __always_inline unsigned long
  1096. +__constant_copy_from_user(void *to, const void __user *from, unsigned long n)
  1097. +{
  1098. + unsigned long res = 0, tmp;
  1099. +
  1100. + switch (n) {
  1101. + case 1:
  1102. + __get_user_asm(res, *(u8 *)to, (u8 __user *)from, u8, b, d, 1);
  1103. + break;
  1104. + case 2:
  1105. + __get_user_asm(res, *(u16 *)to, (u16 __user *)from, u16, w,
  1106. + d, 2);
  1107. + break;
  1108. + case 3:
  1109. + __constant_copy_from_user_asm(res, to, from, tmp, 3, w, b,);
  1110. + break;
  1111. + case 4:
  1112. + __get_user_asm(res, *(u32 *)to, (u32 __user *)from, u32, l,
  1113. + r, 4);
  1114. + break;
  1115. + case 5:
  1116. + __constant_copy_from_user_asm(res, to, from, tmp, 5, l, b,);
  1117. + break;
  1118. + case 6:
  1119. + __constant_copy_from_user_asm(res, to, from, tmp, 6, l, w,);
  1120. + break;
  1121. + case 7:
  1122. + __constant_copy_from_user_asm(res, to, from, tmp, 7, l, w, b);
  1123. + break;
  1124. + case 8:
  1125. + __constant_copy_from_user_asm(res, to, from, tmp, 8, l, l,);
  1126. + break;
  1127. + case 9:
  1128. + __constant_copy_from_user_asm(res, to, from, tmp, 9, l, l, b);
  1129. + break;
  1130. + case 10:
  1131. + __constant_copy_from_user_asm(res, to, from, tmp, 10, l, l, w);
  1132. + break;
  1133. + case 12:
  1134. + __constant_copy_from_user_asm(res, to, from, tmp, 12, l, l, l);
  1135. + break;
  1136. + default:
  1137. + /* we limit the inlined version to 3 moves */
  1138. + return __generic_copy_from_user(to, from, n);
  1139. + }
  1140. +
  1141. + return res;
  1142. +}
  1143. +
  1144. +#define __constant_copy_to_user_asm(res, to, from, tmp, n, s1, s2, s3) \
  1145. + asm volatile ("\n" \
  1146. + " move."#s1" (%2)+,%3\n" \
  1147. + "11: move."#s1" %3,(%1)+\n" \
  1148. + "12: move."#s2" (%2)+,%3\n" \
  1149. + "21: move."#s2" %3,(%1)+\n" \
  1150. + "22:\n" \
  1151. + " .ifnc \""#s3"\",\"\"\n" \
  1152. + " move."#s3" (%2)+,%3\n" \
  1153. + "31: move."#s3" %3,(%1)+\n" \
  1154. + "32:\n" \
  1155. + " .endif\n" \
  1156. + "4:\n" \
  1157. + "\n" \
  1158. + " .section __ex_table,\"a\"\n" \
  1159. + " .align 4\n" \
  1160. + " .long 11b,5f\n" \
  1161. + " .long 12b,5f\n" \
  1162. + " .long 21b,5f\n" \
  1163. + " .long 22b,5f\n" \
  1164. + " .ifnc \""#s3"\",\"\"\n" \
  1165. + " .long 31b,5f\n" \
  1166. + " .long 32b,5f\n" \
  1167. + " .endif\n" \
  1168. + " .previous\n" \
  1169. + "\n" \
  1170. + " .section .fixup,\"ax\"\n" \
  1171. + " .even\n" \
  1172. + "5: moveq.l #"#n",%0\n" \
  1173. + " jra 4b\n" \
  1174. + " .previous\n" \
  1175. + : "+d" (res), "+a" (to), "+a" (from), "=&d" (tmp) \
  1176. + : : "memory")
  1177. +
  1178. +static __always_inline unsigned long
  1179. +__constant_copy_to_user(void __user *to, const void *from, unsigned long n)
  1180. +{
  1181. + unsigned long res = 0, tmp;
  1182. +
  1183. + switch (n) {
  1184. + case 1:
  1185. + __put_user_asm(res, *(u8 *)from, (u8 __user *)to, b, d, 1);
  1186. + break;
  1187. + case 2:
  1188. + __put_user_asm(res, *(u16 *)from, (u16 __user *)to, w, d, 2);
  1189. + break;
  1190. + case 3:
  1191. + __constant_copy_to_user_asm(res, to, from, tmp, 3, w, b,);
  1192. + break;
  1193. + case 4:
  1194. + __put_user_asm(res, *(u32 *)from, (u32 __user *)to, l, r, 4);
  1195. + break;
  1196. + case 5:
  1197. + __constant_copy_to_user_asm(res, to, from, tmp, 5, l, b,);
  1198. + break;
  1199. + case 6:
  1200. + __constant_copy_to_user_asm(res, to, from, tmp, 6, l, w,);
  1201. + break;
  1202. + case 7:
  1203. + __constant_copy_to_user_asm(res, to, from, tmp, 7, l, w, b);
  1204. + break;
  1205. + case 8:
  1206. + __constant_copy_to_user_asm(res, to, from, tmp, 8, l, l,);
  1207. + break;
  1208. + case 9:
  1209. + __constant_copy_to_user_asm(res, to, from, tmp, 9, l, l, b);
  1210. + break;
  1211. + case 10:
  1212. + __constant_copy_to_user_asm(res, to, from, tmp, 10, l, l, w);
  1213. + break;
  1214. + case 12:
  1215. + __constant_copy_to_user_asm(res, to, from, tmp, 12, l, l, l);
  1216. + break;
  1217. + default:
  1218. + /* limit the inlined version to 3 moves */
  1219. + return __generic_copy_to_user(to, from, n);
  1220. + }
  1221. +
  1222. + return res;
  1223. +}
  1224. +
  1225. +#define __copy_from_user(to, from, n) \
  1226. +(__builtin_constant_p(n) ? \
  1227. + __constant_copy_from_user(to, from, n) : \
  1228. + __generic_copy_from_user(to, from, n))
  1229. +
  1230. +#define __copy_to_user(to, from, n) \
  1231. +(__builtin_constant_p(n) ? \
  1232. + __constant_copy_to_user(to, from, n) : \
  1233. + __generic_copy_to_user(to, from, n))
  1234. +
  1235. +#define __copy_to_user_inatomic __copy_to_user
  1236. +#define __copy_from_user_inatomic __copy_from_user
  1237. +
  1238. +#define copy_from_user(to, from, n) __copy_from_user(to, from, n)
  1239. +#define copy_to_user(to, from, n) __copy_to_user(to, from, n)
  1240. +
  1241. +long strncpy_from_user(char *dst, const char __user *src, long count);
  1242. +long strnlen_user(const char __user *src, long n);
  1243. +unsigned long __clear_user(void __user *to, unsigned long n);
  1244. +
  1245. +#define clear_user __clear_user
  1246. +
  1247. +#define strlen_user(str) strnlen_user(str, 32767)
  1248. +
  1249. +#endif /* _M68K_CF_UACCESS_H */
  1250. --- /dev/null
  1251. +++ b/include/asm-m68k/cfcache.h
  1252. @@ -0,0 +1,86 @@
  1253. +/*
  1254. + * include/asm-m68k/cfcache.h
  1255. + */
  1256. +#ifndef CF_CFCACHE_H
  1257. +#define CF_CFCACHE_H
  1258. +
  1259. +#define CF_CACR_DEC (0x80000000) /* Data Cache Enable */
  1260. +#define CF_CACR_DW (0x40000000) /* Data default Write-protect */
  1261. +#define CF_CACR_DESB (0x20000000) /* Data Enable Store Buffer */
  1262. +#define CF_CACR_DDPI (0x10000000) /* Data Disable CPUSHL Invalidate */
  1263. +#define CF_CACR_DHLCK (0x08000000) /* 1/2 Data Cache Lock Mode */
  1264. +#define CF_CACR_DDCM_00 (0x00000000) /* Cacheable writethrough imprecise */
  1265. +#define CF_CACR_DDCM_01 (0x02000000) /* Cacheable copyback */
  1266. +#define CF_CACR_DDCM_10 (0x04000000) /* Noncacheable precise */
  1267. +#define CF_CACR_DDCM_11 (0x06000000) /* Noncacheable imprecise */
  1268. +#define CF_CACR_DCINVA (0x01000000) /* Data Cache Invalidate All */
  1269. +#define CF_CACR_IVO (0x00100000) /* Invalidate only */
  1270. +#define CF_CACR_BEC (0x00080000) /* Branch Cache Enable */
  1271. +#define CF_CACR_BCINVA (0x00040000) /* Branch Cache Invalidate All */
  1272. +#define CF_CACR_IEC (0x00008000) /* Instruction Cache Enable */
  1273. +#define CF_CACR_SPA (0x00004000) /* Search by Physical Address */
  1274. +#define CF_CACR_DNFB (0x00002000) /* Default cache-inhibited fill buf */
  1275. +#define CF_CACR_IDPI (0x00001000) /* Instr Disable CPUSHL Invalidate */
  1276. +#define CF_CACR_IHLCK (0x00000800) /* 1/2 Instruction Cache Lock Mode */
  1277. +#define CF_CACR_IDCM (0x00000400) /* Noncacheable Instr default mode */
  1278. +#define CF_CACR_ICINVA (0x00000100) /* Instr Cache Invalidate All */
  1279. +#define CF_CACR_EUSP (0x00000020) /* Switch stacks in user mode */
  1280. +
  1281. +#define DCACHE_LINE_SIZE 0x0010 /* bytes per line */
  1282. +#define DCACHE_WAY_SIZE 0x2000 /* words per cache block */
  1283. +#define CACHE_DISABLE_MODE (CF_CACR_DCINVA+CF_CACR_BCINVA+CF_CACR_ICINVA)
  1284. +#ifdef CONFIG_M5445X_DISABLE_CACHE
  1285. +/* disable cache for testing rev0 silicon */
  1286. +#define CACHE_INITIAL_MODE (CF_CACR_EUSP)
  1287. +#else
  1288. +#define CACHE_INITIAL_MODE (CF_CACR_DEC+CF_CACR_BEC+CF_CACR_IEC+CF_CACR_EUSP)
  1289. +#endif
  1290. +
  1291. +#define _DCACHE_SIZE (2*16384)
  1292. +#define _ICACHE_SIZE (2*16384)
  1293. +
  1294. +#define _SET_SHIFT 4
  1295. +
  1296. +/*
  1297. + * Masks for cache sizes. Programming note: because the set size is a
  1298. + * power of two, the mask is also the last address in the set.
  1299. + * This may need to be #ifdef for other Coldfire processors.
  1300. + */
  1301. +
  1302. +#define _DCACHE_SET_MASK ((_DCACHE_SIZE/64-1)<<_SET_SHIFT)
  1303. +#define _ICACHE_SET_MASK ((_ICACHE_SIZE/64-1)<<_SET_SHIFT)
  1304. +#define LAST_DCACHE_ADDR _DCACHE_SET_MASK
  1305. +#define LAST_ICACHE_ADDR _ICACHE_SET_MASK
  1306. +
  1307. +
  1308. +#ifndef __ASSEMBLY__
  1309. +
  1310. +extern void DcacheFlushInvalidate(void);
  1311. +
  1312. +extern void DcacheDisable(void);
  1313. +extern void DcacheEnable(void);
  1314. +
  1315. +/******************************************************************************/
  1316. +/*** Unimplemented Cache functionality ***/
  1317. +/******************************************************************************/
  1318. +#define preDcacheInvalidateBlockMark()
  1319. +#define postDcacheInvalidateBlockMark()
  1320. +#define DcacheZeroBlock(p, l) fast_bzero((char *)(p), (long)(l))
  1321. +#define loadDcacheInvalidateBlock() ASSERT(!"Not Implemented on V4e")
  1322. +#define IcacheInvalidateBlock() ASSERT(!"Not Implemented on V4e")
  1323. +
  1324. +/******************************************************************************/
  1325. +/*** Redundant Cache functionality on ColdFire ***/
  1326. +/******************************************************************************/
  1327. +#define DcacheInvalidateBlock(p, l) DcacheFlushInvalidateCacheBlock(p, l)
  1328. +#define DcacheFlushCacheBlock(p, l) DcacheFlushInvalidateCacheBlock(p, l)
  1329. +#define DcacheFlushBlock(p, l) DcacheFlushInvalidateCacheBlock(p, l)
  1330. +
  1331. +extern void DcacheFlushInvalidateCacheBlock(void *start, unsigned long size);
  1332. +extern void FLASHDcacheFlushInvalidate(void);
  1333. +
  1334. +extern void cacr_set(unsigned long x);
  1335. +
  1336. +#endif /* !__ASSEMBLY__ */
  1337. +
  1338. +#endif /* CF_CACHE_H */
  1339. --- /dev/null
  1340. +++ b/include/asm-m68k/cfmmu.h
  1341. @@ -0,0 +1,104 @@
  1342. +/*
  1343. + * Definitions for Coldfire V4e MMU
  1344. + */
  1345. +#include <asm/movs.h>
  1346. +
  1347. +#ifndef __CF_MMU_H__
  1348. +#define __CF_MMU_H__
  1349. +
  1350. +
  1351. +#define MMU_BASE 0xE1000000
  1352. +
  1353. +
  1354. +#define MMUCR (MMU_BASE+0x00)
  1355. +#define MMUCR_ASMN 1
  1356. +#define MMUCR_ASM (1<<MMUCR_ASMN)
  1357. +#define MMUCR_ENN 0
  1358. +#define MMUCR_EN (1<<MMUCR_ENN)
  1359. +
  1360. +#define MMUOR REG16(MMU_BASE+0x04+0x02)
  1361. +#define MMUOR_AAN 16
  1362. +#define MMUOR_AA (0xffff<<MMUOR_AAN)
  1363. +#define MMUOR_STLBN 8
  1364. +#define MMUOR_STLB (1<<MMUOR_STLBN)
  1365. +#define MMUOR_CAN 7
  1366. +#define MMUOR_CA (1<<MMUOR_CAN)
  1367. +#define MMUOR_CNLN 6
  1368. +#define MMUOR_CNL (1<<MMUOR_CNLN)
  1369. +#define MMUOR_CASN 5
  1370. +#define MMUOR_CAS (1<<MMUOR_CASN)
  1371. +#define MMUOR_ITLBN 4
  1372. +#define MMUOR_ITLB (1<<MMUOR_ITLBN)
  1373. +#define MMUOR_ADRN 3
  1374. +#define MMUOR_ADR (1<<MMUOR_ADRN)
  1375. +#define MMUOR_RWN 2
  1376. +#define MMUOR_RW (1<<MMUOR_RWN)
  1377. +#define MMUOR_ACCN 1
  1378. +#define MMUOR_ACC (1<<MMUOR_ACCN)
  1379. +#define MMUOR_UAAN 0
  1380. +#define MMUOR_UAA (1<<MMUOR_UAAN)
  1381. +
  1382. +#define MMUSR REG32(MMU_BASE+0x08)
  1383. +#define MMUSR_SPFN 5
  1384. +#define MMUSR_SPF (1<<MMUSR_SPFN)
  1385. +#define MMUSR_RFN 4
  1386. +#define MMUSR_RF (1<<MMUSR_RFN)
  1387. +#define MMUSR_WFN 3
  1388. +#define MMUSR_WF (1<<MMUSR_WFN)
  1389. +#define MMUSR_HITN 1
  1390. +#define MMUSR_HIT (1<<MMUSR_HITN)
  1391. +
  1392. +#define MMUAR REG32(MMU_BASE+0x10)
  1393. +#define MMUAR_VPN 1
  1394. +#define MMUAR_VP (0xfffffffe)
  1395. +#define MMUAR_SN 0
  1396. +#define MMUAR_S (1<<MMUAR_SN)
  1397. +
  1398. +#define MMUTR REG32(MMU_BASE+0x14)
  1399. +#define MMUTR_VAN 10
  1400. +#define MMUTR_VA (0xfffffc00)
  1401. +#define MMUTR_IDN 2
  1402. +#define MMUTR_ID (0xff<<MMUTR_IDN)
  1403. +#define MMUTR_SGN 1
  1404. +#define MMUTR_SG (1<<MMUTR_SGN)
  1405. +#define MMUTR_VN 0
  1406. +#define MMUTR_V (1<<MMUTR_VN)
  1407. +
  1408. +#define MMUDR REG32(MMU_BASE+0x18)
  1409. +#define MMUDR_PAN 10
  1410. +#define MMUDR_PA (0xfffffc00)
  1411. +#define MMUDR_SZN 8
  1412. +#define MMUDR_SZ_MASK (0x2<<MMUDR_SZN)
  1413. +#define MMUDR_SZ1M (0<<MMUDR_SZN)
  1414. +#define MMUDR_SZ4K (1<<MMUDR_SZN)
  1415. +#define MMUDR_SZ8K (2<<MMUDR_SZN)
  1416. +#define MMUDR_SZ16M (3<<MMUDR_SZN)
  1417. +#define MMUDR_CMN 6
  1418. +#define MMUDR_INC (2<<MMUDR_CMN)
  1419. +#define MMUDR_IC (0<<MMUDR_CMN)
  1420. +#define MMUDR_DWT (0<<MMUDR_CMN)
  1421. +#define MMUDR_DCB (1<<MMUDR_CMN)
  1422. +#define MMUDR_DNCP (2<<MMUDR_CMN)
  1423. +#define MMUDR_DNCIP (3<<MMUDR_CMN)
  1424. +#define MMUDR_SPN 5
  1425. +#define MMUDR_SP (1<<MMUDR_SPN)
  1426. +#define MMUDR_RN 4
  1427. +#define MMUDR_R (1<<MMUDR_RN)
  1428. +#define MMUDR_WN 3
  1429. +#define MMUDR_W (1<<MMUDR_WN)
  1430. +#define MMUDR_XN 2
  1431. +#define MMUDR_X (1<<MMUDR_XN)
  1432. +#define MMUDR_LKN 1
  1433. +#define MMUDR_LK (1<<MMUDR_LKN)
  1434. +
  1435. +
  1436. +#ifndef __ASSEMBLY__
  1437. +#define CF_PMEGS_NUM 256
  1438. +#define CF_INVALID_CONTEXT 255
  1439. +#define CF_PAGE_PGNUM_MASK (PAGE_MASK)
  1440. +
  1441. +extern int cf_tlb_miss(struct pt_regs *regs, int write, int dtlb,
  1442. + int extension_word);
  1443. +#endif /* __ASSEMBLY__*/
  1444. +
  1445. +#endif /* !__CF_MMU_H__ */
  1446. --- /dev/null
  1447. +++ b/include/asm-m68k/coldfire.h
  1448. @@ -0,0 +1,38 @@
  1449. +#ifndef _COLDFIRE_H_
  1450. +#define _COLDFIRE_H_
  1451. +
  1452. +#define MCF_MBAR 0x0
  1453. +#define MCF_RAMBAR1 0x40000000
  1454. +#define MCF_SRAM 0x80000000
  1455. +#define MCF_CLK CONFIG_MCFCLK
  1456. +#define MCF_BUSCLK (CONFIG_MCFCLK/2)
  1457. +
  1458. +#ifdef __ASSEMBLY__
  1459. +#define REG32
  1460. +#define REG16
  1461. +#define REG08
  1462. +#else /* __ASSEMBLY__ */
  1463. +#define REG32(x) ((volatile unsigned long *)(x))
  1464. +#define REG16(x) ((volatile unsigned short *)(x))
  1465. +#define REG08(x) ((volatile unsigned char *)(x))
  1466. +
  1467. +#define MCF_REG32(x) *(volatile unsigned long *)(MCF_MBAR+(x))
  1468. +#define MCF_REG16(x) *(volatile unsigned short *)(MCF_MBAR+(x))
  1469. +#define MCF_REG08(x) *(volatile unsigned char *)(MCF_MBAR+(x))
  1470. +
  1471. +void cacr_set(unsigned long);
  1472. +unsigned long cacr_get(void);
  1473. +
  1474. +#define coldfire_enable_irq0(irq) MCF_INTC0_CIMR = (irq);
  1475. +
  1476. +#define coldfire_enable_irq1(irq) MCF_INTC1_CIMR = (irq);
  1477. +
  1478. +#define coldfire_disable_irq0(irq) MCF_INTC0_SIMR = (irq);
  1479. +
  1480. +#define coldfire_disable_irq1(irq) MCF_INTC1_SIMR = (irq);
  1481. +
  1482. +#define getiprh() MCF_INTC0_IPRH
  1483. +
  1484. +#endif /* __ASSEMBLY__ */
  1485. +
  1486. +#endif /* _COLDFIRE_H_ */
  1487. --- /dev/null
  1488. +++ b/include/asm-m68k/coldfire_edma.h
  1489. @@ -0,0 +1,39 @@
  1490. +#ifndef _LINUX_COLDFIRE_DMA_H
  1491. +#define _LINUX_COLDFIRE_DMA_H
  1492. +
  1493. +#include <linux/interrupt.h>
  1494. +
  1495. +#define EDMA_DRIVER_NAME "ColdFire-eDMA"
  1496. +#define DMA_DEV_MINOR 1
  1497. +
  1498. +#define EDMA_INT_CHANNEL_BASE 8
  1499. +#define EDMA_INT_CONTROLLER_BASE 64
  1500. +#define EDMA_CHANNELS 16
  1501. +
  1502. +#define EDMA_IRQ_LEVEL 5
  1503. +
  1504. +typedef irqreturn_t (*edma_irq_handler)(int, void *);
  1505. +typedef void (*edma_error_handler)(int, void *);
  1506. +
  1507. +void set_edma_params(int channel, u32 source, u32 dest,
  1508. + u32 attr, u32 soff, u32 nbytes, u32 slast,
  1509. + u32 citer, u32 biter, u32 doff, u32 dlast_sga);
  1510. +
  1511. +void start_edma_transfer(int channel, int major_int);
  1512. +
  1513. +void stop_edma_transfer(int channel);
  1514. +
  1515. +void confirm_edma_interrupt_handled(int channel);
  1516. +
  1517. +void init_edma(void);
  1518. +
  1519. +int request_edma_channel(int channel,
  1520. + edma_irq_handler handler,
  1521. + edma_error_handler error_handler,
  1522. + void *dev,
  1523. + spinlock_t *lock,
  1524. + const char *device_id);
  1525. +
  1526. +int free_edma_channel(int channel, void *dev);
  1527. +
  1528. +#endif
  1529. --- /dev/null
  1530. +++ b/include/asm-m68k/mcfqspi.h
  1531. @@ -0,0 +1,50 @@
  1532. +/****************************************************************************/
  1533. +/*
  1534. + * mcfqspi.h - Master QSPI controller for the ColdFire processors
  1535. + *
  1536. + * (C) Copyright 2005, Intec Automation,
  1537. + * Mike Lavender (mike@steroidmicros)
  1538. + *
  1539. +
  1540. + This program is free software; you can redistribute it and/or modify
  1541. + it under the terms of the GNU General Public License as published by
  1542. + the Free Software Foundation; either version 2 of the License, or
  1543. + (at your option) any later version.
  1544. +
  1545. + This program is distributed in the hope that it will be useful,
  1546. + but WITHOUT ANY WARRANTY; without even the implied warranty of
  1547. + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
  1548. + GNU General Public License for more details.
  1549. +
  1550. + You should have received a copy of the GNU General Public License
  1551. + along with this program; if not, write to the Free Software
  1552. + Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. */
  1553. +/* ------------------------------------------------------------------------- */
  1554. +
  1555. +#ifndef MCFQSPI_H_
  1556. +#define MCFQSPI_H_
  1557. +
  1558. +#define QSPI_CS_INIT 0x01
  1559. +#define QSPI_CS_ASSERT 0x02
  1560. +#define QSPI_CS_DROP 0x04
  1561. +
  1562. +struct coldfire_spi_master {
  1563. + u16 bus_num;
  1564. + u16 num_chipselect;
  1565. + u8 irq_source;
  1566. + u32 irq_vector;
  1567. + u32 irq_mask;
  1568. + u8 irq_lp;
  1569. + u8 par_val;
  1570. + void (*cs_control)(u8 cs, u8 command);
  1571. +};
  1572. +
  1573. +
  1574. +struct coldfire_spi_chip {
  1575. + u8 mode;
  1576. + u8 bits_per_word;
  1577. + u8 del_cs_to_clk;
  1578. + u8 del_after_trans;
  1579. + u16 void_write_data;
  1580. +};
  1581. +#endif /*MCFQSPI_H_*/
  1582. --- /dev/null
  1583. +++ b/include/asm-m68k/mcfsim.h
  1584. @@ -0,0 +1,96 @@
  1585. +/*
  1586. + * mcfsim.h -- ColdFire System Integration Module support.
  1587. + *
  1588. + * (C) Copyright 1999-2003, Greg Ungerer ([email protected])
  1589. + * (C) Copyright 2000, Lineo Inc. (www.lineo.com)
  1590. + */
  1591. +
  1592. +#ifndef mcfsim_h
  1593. +#define mcfsim_h
  1594. +
  1595. +#if defined(CONFIG_COLDFIRE)
  1596. +#include <asm/coldfire.h>
  1597. +#endif
  1598. +
  1599. +#if defined(CONFIG_M54455)
  1600. +#include <asm/mcf5445x_intc.h>
  1601. +#include <asm/mcf5445x_gpio.h>
  1602. +#include <asm/mcf5445x_i2c.h>
  1603. +#include <asm/mcf5445x_ccm.h>
  1604. +#include <asm/mcf5445x_pci.h>
  1605. +#include <asm/mcf5445x_pciarb.h>
  1606. +#include <asm/mcf5445x_eport.h>
  1607. +#endif
  1608. +
  1609. +/*
  1610. + * Define the base address of the SIM within the MBAR address space.
  1611. + */
  1612. +#define MCFSIM_BASE 0x0 /* Base address of SIM */
  1613. +
  1614. +/*
  1615. + * Bit definitions for the ICR family of registers.
  1616. + */
  1617. +#define MCFSIM_ICR_AUTOVEC 0x80 /* Auto-vectored intr */
  1618. +#define MCFSIM_ICR_LEVEL0 0x00 /* Level 0 intr */
  1619. +#define MCFSIM_ICR_LEVEL1 0x04 /* Level 1 intr */
  1620. +#define MCFSIM_ICR_LEVEL2 0x08 /* Level 2 intr */
  1621. +#define MCFSIM_ICR_LEVEL3 0x0c /* Level 3 intr */
  1622. +#define MCFSIM_ICR_LEVEL4 0x10 /* Level 4 intr */
  1623. +#define MCFSIM_ICR_LEVEL5 0x14 /* Level 5 intr */
  1624. +#define MCFSIM_ICR_LEVEL6 0x18 /* Level 6 intr */
  1625. +#define MCFSIM_ICR_LEVEL7 0x1c /* Level 7 intr */
  1626. +
  1627. +#define MCFSIM_ICR_PRI0 0x00 /* Priority 0 intr */
  1628. +#define MCFSIM_ICR_PRI1 0x01 /* Priority 1 intr */
  1629. +#define MCFSIM_ICR_PRI2 0x02 /* Priority 2 intr */
  1630. +#define MCFSIM_ICR_PRI3 0x03 /* Priority 3 intr */
  1631. +
  1632. +/*
  1633. + * Bit definitions for the Interrupt Mask register (IMR).
  1634. + */
  1635. +#define MCFSIM_IMR_EINT1 0x0002 /* External intr # 1 */
  1636. +#define MCFSIM_IMR_EINT2 0x0004 /* External intr # 2 */
  1637. +#define MCFSIM_IMR_EINT3 0x0008 /* External intr # 3 */
  1638. +#define MCFSIM_IMR_EINT4 0x0010 /* External intr # 4 */
  1639. +#define MCFSIM_IMR_EINT5 0x0020 /* External intr # 5 */
  1640. +#define MCFSIM_IMR_EINT6 0x0040 /* External intr # 6 */
  1641. +#define MCFSIM_IMR_EINT7 0x0080 /* External intr # 7 */
  1642. +
  1643. +#define MCFSIM_IMR_SWD 0x0100 /* Software Watchdog intr */
  1644. +#define MCFSIM_IMR_TIMER1 0x0200 /* TIMER 1 intr */
  1645. +#define MCFSIM_IMR_TIMER2 0x0400 /* TIMER 2 intr */
  1646. +#define MCFSIM_IMR_MBUS 0x0800 /* MBUS intr */
  1647. +#define MCFSIM_IMR_UART1 0x1000 /* UART 1 intr */
  1648. +#define MCFSIM_IMR_UART2 0x2000 /* UART 2 intr */
  1649. +
  1650. +/*
  1651. + * Mask for all of the SIM devices. Some parts have more or less
  1652. + * SIM devices. This is a catchall for the sandard set.
  1653. + */
  1654. +#ifndef MCFSIM_IMR_MASKALL
  1655. +#define MCFSIM_IMR_MASKALL 0x3ffe /* All intr sources */
  1656. +#endif
  1657. +
  1658. +
  1659. +/*
  1660. + * PIT interrupt settings, if not found in mXXXXsim.h file.
  1661. + */
  1662. +#ifndef ICR_INTRCONF
  1663. +#define ICR_INTRCONF 0x2b /* PIT1 level 5, priority 3 */
  1664. +#endif
  1665. +#ifndef MCFPIT_IMR
  1666. +#define MCFPIT_IMR MCFINTC_IMRH
  1667. +#endif
  1668. +#ifndef MCFPIT_IMR_IBIT
  1669. +#define MCFPIT_IMR_IBIT (1 << (MCFINT_PIT1 - 32))
  1670. +#endif
  1671. +
  1672. +
  1673. +#ifndef __ASSEMBLY__
  1674. +/*
  1675. + * Definition for the interrupt auto-vectoring support.
  1676. + */
  1677. +extern void mcf_autovector(unsigned int vec);
  1678. +#endif /* __ASSEMBLY__ */
  1679. +
  1680. +#endif /* mcfsim_h */
  1681. --- /dev/null
  1682. +++ b/include/asm-m68k/mcfuart.h
  1683. @@ -0,0 +1,180 @@
  1684. +/*
  1685. + * mcfuart.h -- ColdFire internal UART support defines.
  1686. + *
  1687. + * Matt Waddel [email protected]
  1688. + * Copyright Freescale Semiconductor, Inc. 2007
  1689. + *
  1690. + * Derived from m68knommu version of this same file (Greg Ungerer & Lineo).
  1691. + *
  1692. + */
  1693. +
  1694. +#ifndef mcfuart_h
  1695. +#define mcfuart_h
  1696. +
  1697. +/*
  1698. + * Define the base address of the UARTS within the MBAR address
  1699. + * space.
  1700. + */
  1701. +#if defined(CONFIG_M54455)
  1702. +#include <asm/mcf5445x_intc.h>
  1703. +#define MCFUART_BASE1 0xfc060000 /* Base address of UART1 */
  1704. +#define MCFUART_BASE2 0xfc064000 /* Base address of UART2 */
  1705. +#define MCFUART_BASE3 0xfc068000 /* Base address of UART3 */
  1706. +#define MCFINT_VECBASE 64
  1707. +#define MCFINT_UART0 26
  1708. +#endif
  1709. +
  1710. +
  1711. +/*
  1712. + * Define the ColdFire UART register set addresses.
  1713. + */
  1714. +#define MCFUART_UMR 0x00 /* Mode register (r/w) */
  1715. +#define MCFUART_USR 0x04 /* Status register (r) */
  1716. +#define MCFUART_UCSR 0x04 /* Clock Select (w) */
  1717. +#define MCFUART_UCR 0x08 /* Command register (w) */
  1718. +#define MCFUART_URB 0x0c /* Receiver Buffer (r) */
  1719. +#define MCFUART_UTB 0x0c /* Transmit Buffer (w) */
  1720. +#define MCFUART_UIPCR 0x10 /* Input Port Change (r) */
  1721. +#define MCFUART_UACR 0x10 /* Auxiliary Control (w) */
  1722. +#define MCFUART_UISR 0x14 /* Interrup Status (r) */
  1723. +#define MCFUART_UIMR 0x14 /* Interrupt Mask (w) */
  1724. +#define MCFUART_UBG1 0x18 /* Baud Rate MSB (r/w) */
  1725. +#define MCFUART_UBG2 0x1c /* Baud Rate LSB (r/w) */
  1726. +#ifdef CONFIG_M5272
  1727. +#define MCFUART_UTF 0x28 /* Transmitter FIFO (r/w) */
  1728. +#define MCFUART_URF 0x2c /* Receiver FIFO (r/w) */
  1729. +#define MCFUART_UFPD 0x30 /* Frac Prec. Divider (r/w) */
  1730. +#else
  1731. +#define MCFUART_UIVR 0x30 /* Interrupt Vector (r/w) */
  1732. +#endif
  1733. +#define MCFUART_UIPR 0x34 /* Input Port (r) */
  1734. +#define MCFUART_UOP1 0x38 /* Output Port Bit Set (w) */
  1735. +#define MCFUART_UOP0 0x3c /* Output Port Bit Reset (w) */
  1736. +
  1737. +
  1738. +/*
  1739. + * Define bit flags in Mode Register 1 (MR1).
  1740. + */
  1741. +#define MCFUART_MR1_RXRTS 0x80 /* Auto RTS flow control */
  1742. +#define MCFUART_MR1_RXIRQFULL 0x40 /* RX IRQ type FULL */
  1743. +#define MCFUART_MR1_RXIRQRDY 0x00 /* RX IRQ type RDY */
  1744. +#define MCFUART_MR1_RXERRBLOCK 0x20 /* RX block error mode */
  1745. +#define MCFUART_MR1_RXERRCHAR 0x00 /* RX char error mode */
  1746. +
  1747. +#define MCFUART_MR1_PARITYNONE 0x10 /* No parity */
  1748. +#define MCFUART_MR1_PARITYEVEN 0x00 /* Even parity */
  1749. +#define MCFUART_MR1_PARITYODD 0x04 /* Odd parity */
  1750. +#define MCFUART_MR1_PARITYSPACE 0x08 /* Space parity */
  1751. +#define MCFUART_MR1_PARITYMARK 0x0c /* Mark parity */
  1752. +
  1753. +#define MCFUART_MR1_CS5 0x00 /* 5 bits per char */
  1754. +#define MCFUART_MR1_CS6 0x01 /* 6 bits per char */
  1755. +#define MCFUART_MR1_CS7 0x02 /* 7 bits per char */
  1756. +#define MCFUART_MR1_CS8 0x03 /* 8 bits per char */
  1757. +
  1758. +/*
  1759. + * Define bit flags in Mode Register 2 (MR2).
  1760. + */
  1761. +#define MCFUART_MR2_LOOPBACK 0x80 /* Loopback mode */
  1762. +#define MCFUART_MR2_REMOTELOOP 0xc0 /* Remote loopback mode */
  1763. +#define MCFUART_MR2_AUTOECHO 0x40 /* Automatic echo */
  1764. +#define MCFUART_MR2_TXRTS 0x20 /* Assert RTS on TX */
  1765. +#define MCFUART_MR2_TXCTS 0x10 /* Auto CTS flow control */
  1766. +
  1767. +#define MCFUART_MR2_STOP1 0x07 /* 1 stop bit */
  1768. +#define MCFUART_MR2_STOP15 0x08 /* 1.5 stop bits */
  1769. +#define MCFUART_MR2_STOP2 0x0f /* 2 stop bits */
  1770. +
  1771. +/*
  1772. + * Define bit flags in Status Register (USR).
  1773. + */
  1774. +#define MCFUART_USR_RXBREAK 0x80 /* Received BREAK */
  1775. +#define MCFUART_USR_RXFRAMING 0x40 /* Received framing error */
  1776. +#define MCFUART_USR_RXPARITY 0x20 /* Received parity error */
  1777. +#define MCFUART_USR_RXOVERRUN 0x10 /* Received overrun error */
  1778. +#define MCFUART_USR_TXEMPTY 0x08 /* Transmitter empty */
  1779. +#define MCFUART_USR_TXREADY 0x04 /* Transmitter ready */
  1780. +#define MCFUART_USR_RXFULL 0x02 /* Receiver full */
  1781. +#define MCFUART_USR_RXREADY 0x01 /* Receiver ready */
  1782. +
  1783. +#define MCFUART_USR_RXERR (MCFUART_USR_RXBREAK | MCFUART_USR_RXFRAMING | \
  1784. + MCFUART_USR_RXPARITY | MCFUART_USR_RXOVERRUN)
  1785. +
  1786. +/*
  1787. + * Define bit flags in Clock Select Register (UCSR).
  1788. + */
  1789. +#define MCFUART_UCSR_RXCLKTIMER 0xd0 /* RX clock is timer */
  1790. +#define MCFUART_UCSR_RXCLKEXT16 0xe0 /* RX clock is external x16 */
  1791. +#define MCFUART_UCSR_RXCLKEXT1 0xf0 /* RX clock is external x1 */
  1792. +
  1793. +#define MCFUART_UCSR_TXCLKTIMER 0x0d /* TX clock is timer */
  1794. +#define MCFUART_UCSR_TXCLKEXT16 0x0e /* TX clock is external x16 */
  1795. +#define MCFUART_UCSR_TXCLKEXT1 0x0f /* TX clock is external x1 */
  1796. +
  1797. +/*
  1798. + * Define bit flags in Command Register (UCR).
  1799. + */
  1800. +#define MCFUART_UCR_CMDNULL 0x00 /* No command */
  1801. +#define MCFUART_UCR_CMDRESETMRPTR 0x10 /* Reset MR pointer */
  1802. +#define MCFUART_UCR_CMDRESETRX 0x20 /* Reset receiver */
  1803. +#define MCFUART_UCR_CMDRESETTX 0x30 /* Reset transmitter */
  1804. +#define MCFUART_UCR_CMDRESETERR 0x40 /* Reset error status */
  1805. +#define MCFUART_UCR_CMDRESETBREAK 0x50 /* Reset BREAK change */
  1806. +#define MCFUART_UCR_CMDBREAKSTART 0x60 /* Start BREAK */
  1807. +#define MCFUART_UCR_CMDBREAKSTOP 0x70 /* Stop BREAK */
  1808. +
  1809. +#define MCFUART_UCR_TXNULL 0x00 /* No TX command */
  1810. +#define MCFUART_UCR_TXENABLE 0x04 /* Enable TX */
  1811. +#define MCFUART_UCR_TXDISABLE 0x08 /* Disable TX */
  1812. +#define MCFUART_UCR_RXNULL 0x00 /* No RX command */
  1813. +#define MCFUART_UCR_RXENABLE 0x01 /* Enable RX */
  1814. +#define MCFUART_UCR_RXDISABLE 0x02 /* Disable RX */
  1815. +
  1816. +/*
  1817. + * Define bit flags in Input Port Change Register (UIPCR).
  1818. + */
  1819. +#define MCFUART_UIPCR_CTSCOS 0x10 /* CTS change of state */
  1820. +#define MCFUART_UIPCR_CTS 0x01 /* CTS value */
  1821. +
  1822. +/*
  1823. + * Define bit flags in Input Port Register (UIP).
  1824. + */
  1825. +#define MCFUART_UIPR_CTS 0x01 /* CTS value */
  1826. +
  1827. +/*
  1828. + * Define bit flags in Output Port Registers (UOP).
  1829. + * Clear bit by writing to UOP0, set by writing to UOP1.
  1830. + */
  1831. +#define MCFUART_UOP_RTS 0x01 /* RTS set or clear */
  1832. +
  1833. +/*
  1834. + * Define bit flags in the Auxiliary Control Register (UACR).
  1835. + */
  1836. +#define MCFUART_UACR_IEC 0x01 /* Input enable control */
  1837. +
  1838. +/*
  1839. + * Define bit flags in Interrupt Status Register (UISR).
  1840. + * These same bits are used for the Interrupt Mask Register (UIMR).
  1841. + */
  1842. +#define MCFUART_UIR_COS 0x80 /* Change of state (CTS) */
  1843. +#define MCFUART_UIR_DELTABREAK 0x04 /* Break start or stop */
  1844. +#define MCFUART_UIR_RXREADY 0x02 /* Receiver ready */
  1845. +#define MCFUART_UIR_TXREADY 0x01 /* Transmitter ready */
  1846. +
  1847. +#ifdef CONFIG_M5272
  1848. +/*
  1849. + * Define bit flags in the Transmitter FIFO Register (UTF).
  1850. + */
  1851. +#define MCFUART_UTF_TXB 0x1f /* Transmitter data level */
  1852. +#define MCFUART_UTF_FULL 0x20 /* Transmitter fifo full */
  1853. +#define MCFUART_UTF_TXS 0xc0 /* Transmitter status */
  1854. +
  1855. +/*
  1856. + * Define bit flags in the Receiver FIFO Register (URF).
  1857. + */
  1858. +#define MCFUART_URF_RXB 0x1f /* Receiver data level */
  1859. +#define MCFUART_URF_FULL 0x20 /* Receiver fifo full */
  1860. +#define MCFUART_URF_RXS 0xc0 /* Receiver status */
  1861. +#endif
  1862. +
  1863. +#endif /* mcfuart_h */