002-mcfv4e_coldfire_headers.patch 43 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028102910301031103210331034103510361037103810391040104110421043104410451046104710481049105010511052105310541055105610571058105910601061106210631064106510661067106810691070107110721073107410751076107710781079108010811082108310841085108610871088108910901091109210931094109510961097109810991100110111021103110411051106110711081109111011111112111311141115111611171118111911201121112211231124112511261127112811291130113111321133113411351136113711381139114011411142114311441145114611471148114911501151115211531154115511561157115811591160116111621163116411651166116711681169117011711172117311741175117611771178117911801181118211831184118511861187118811891190119111921193119411951196119711981199120012011202120312041205120612071208120912101211121212131214121512161217121812191220122112221223122412251226122712281229123012311232123312341235123612371238123912401241124212431244124512461247124812491250125112521253125412551256125712581259126012611262126312641265126612671268126912701271127212731274127512761277127812791280128112821283128412851286128712881289129012911292129312941295129612971298129913001301130213031304130513061307130813091310131113121313131413151316131713181319132013211322132313241325132613271328132913301331133213331334133513361337133813391340134113421343134413451346134713481349135013511352135313541355135613571358135913601361136213631364136513661367136813691370137113721373137413751376137713781379138013811382138313841385138613871388138913901391139213931394139513961397139813991400140114021403140414051406140714081409141014111412141314141415141614171418141914201421142214231424142514261427142814291430143114321433143414351436143714381439144014411442144314441445144614471448144914501451145214531454145514561457145814591460146114621463146414651466146714681469147014711472147314741475147614771478147914801481148214831484148514861487148814891490149114921493149414951496149714981499150015011502150315041505150615071508150915101511151215131514151515161517151815191520152115221523152415251526152715281529153015311532153315341535153615371538153915401541154215431544154515461547154815491550155115521553155415551556155715581559156015611562156315641565156615671568156915701571157215731574157515761577157815791580158115821583158415851586158715881589159015911592159315941595159615971598159916001601160216031604160516061607160816091610161116121613161416151616161716181619162016211622162316241625162616271628162916301631163216331634163516361637163816391640164116421643164416451646
  1. From 127797e5cf8a036825007586b914b75897aba554 Mon Sep 17 00:00:00 2001
  2. From: Kurt Mahan <[email protected]>
  3. Date: Wed, 31 Oct 2007 16:39:31 -0600
  4. Subject: [PATCH] Add Coldfire Support into existing headers.
  5. Modifications to the various M68k header files to add
  6. Coldfire processor support.
  7. LTIBName: mcfv4e-coldfire-headers
  8. Signed-off-by: Kurt Mahan <[email protected]>
  9. ---
  10. include/asm-m68k/atomic.h | 23 ++-
  11. include/asm-m68k/bitops.h | 426 ++++++++++++++++++++++++++++++++++++++++
  12. include/asm-m68k/bootinfo.h | 13 ++
  13. include/asm-m68k/byteorder.h | 12 +-
  14. include/asm-m68k/cacheflush.h | 4 +
  15. include/asm-m68k/checksum.h | 10 +
  16. include/asm-m68k/delay.h | 26 +++
  17. include/asm-m68k/div64.h | 4 +
  18. include/asm-m68k/elf.h | 2 +-
  19. include/asm-m68k/fpu.h | 2 +
  20. include/asm-m68k/io.h | 26 +++-
  21. include/asm-m68k/irq.h | 5 +-
  22. include/asm-m68k/machdep.h | 7 +
  23. include/asm-m68k/mmu_context.h | 84 ++++++++-
  24. include/asm-m68k/page.h | 20 ++-
  25. include/asm-m68k/page_offset.h | 7 +-
  26. include/asm-m68k/pci.h | 99 ++++++----
  27. include/asm-m68k/pgalloc.h | 4 +-
  28. include/asm-m68k/pgtable.h | 15 ++
  29. include/asm-m68k/processor.h | 46 ++++-
  30. include/asm-m68k/ptrace.h | 11 +
  31. include/asm-m68k/raw_io.h | 58 ++++++
  32. include/asm-m68k/segment.h | 10 +
  33. include/asm-m68k/setup.h | 27 +++
  34. include/asm-m68k/signal.h | 5 +
  35. include/asm-m68k/string.h | 2 +
  36. include/asm-m68k/system.h | 17 ++-
  37. include/asm-m68k/thread_info.h | 1 +
  38. include/asm-m68k/tlbflush.h | 16 ++-
  39. include/asm-m68k/uaccess.h | 4 +
  40. 30 files changed, 925 insertions(+), 61 deletions(-)
  41. --- a/include/asm-m68k/atomic.h
  42. +++ b/include/asm-m68k/atomic.h
  43. @@ -2,7 +2,7 @@
  44. #define __ARCH_M68K_ATOMIC__
  45. -#include <asm/system.h>
  46. +#include <asm/system.h> /* local_irq_XXX() */
  47. /*
  48. * Atomic operations that C can't guarantee us. Useful for
  49. @@ -21,12 +21,20 @@ typedef struct { int counter; } atomic_t
  50. static inline void atomic_add(int i, atomic_t *v)
  51. {
  52. +#ifndef CONFIG_COLDFIRE
  53. __asm__ __volatile__("addl %1,%0" : "+m" (*v) : "id" (i));
  54. +#else
  55. + __asm__ __volatile__("addl %1,%0" : "=m" (*v) : "d" (i), "m" (*v));
  56. +#endif
  57. }
  58. static inline void atomic_sub(int i, atomic_t *v)
  59. {
  60. +#ifndef CONFIG_COLDFIRE
  61. __asm__ __volatile__("subl %1,%0" : "+m" (*v) : "id" (i));
  62. +#else
  63. + __asm__ __volatile__("subl %1,%0" : "=m" (*v) : "d" (i), "m" (*v));
  64. +#endif
  65. }
  66. static inline void atomic_inc(atomic_t *v)
  67. @@ -46,6 +54,14 @@ static inline int atomic_dec_and_test(at
  68. return c != 0;
  69. }
  70. +static __inline__ int atomic_dec_and_test_lt(volatile atomic_t *v)
  71. +{
  72. + char c;
  73. + __asm__ __volatile__("subql #1,%1; slt %0" : "=d" (c), "=m" (*v)
  74. + : "m" (*v));
  75. + return c != 0 ;
  76. +}
  77. +
  78. static inline int atomic_inc_and_test(atomic_t *v)
  79. {
  80. char c;
  81. @@ -156,7 +172,12 @@ static inline int atomic_sub_and_test(in
  82. static inline int atomic_add_negative(int i, atomic_t *v)
  83. {
  84. char c;
  85. +#ifndef CONFIG_COLDFIRE
  86. __asm__ __volatile__("addl %2,%1; smi %0" : "=d" (c), "+m" (*v): "g" (i));
  87. +#else
  88. + __asm__ __volatile__("addl %2,%1; smi %0" : "=d" (c), "=m" (*v)
  89. + : "d" (i) , "m" (*v));
  90. +#endif
  91. return c != 0;
  92. }
  93. --- a/include/asm-m68k/bitops.h
  94. +++ b/include/asm-m68k/bitops.h
  95. @@ -19,6 +19,7 @@
  96. *
  97. * They use the standard big-endian m680x0 bit ordering.
  98. */
  99. +#ifndef CONFIG_COLDFIRE
  100. #define test_and_set_bit(nr,vaddr) \
  101. (__builtin_constant_p(nr) ? \
  102. @@ -457,4 +458,429 @@ static inline int ext2_find_next_bit(con
  103. #endif /* __KERNEL__ */
  104. +#else /* CONFIG_COLDFIRE */
  105. +
  106. +#define test_and_set_bit(nr,vaddr) \
  107. + (__builtin_constant_p(nr) ? \
  108. + __constant_coldfire_test_and_set_bit(nr, vaddr) : \
  109. + __generic_coldfire_test_and_set_bit(nr, vaddr))
  110. +
  111. +
  112. +static __inline__ int __constant_coldfire_test_and_set_bit(int nr,
  113. + volatile void *vaddr)
  114. +{
  115. + char retval;
  116. + volatile char *p = &((volatile char *)vaddr)[(nr^31) >> 3];
  117. +
  118. + __asm__ __volatile__ ("bset %2,%1; sne %0"
  119. + : "=d" (retval), "+QUd" (*p)
  120. + : "di" (nr & 7));
  121. + return retval;
  122. +}
  123. +
  124. +static __inline__ int __generic_coldfire_test_and_set_bit(int nr,
  125. + volatile void *vaddr)
  126. +{
  127. + char retval;
  128. +
  129. + __asm__ __volatile__ ("bset %2,%1; sne %0"
  130. + : "=d" (retval), "=m" (((volatile char *)vaddr)[(nr^31) >> 3])
  131. + : "d" (nr)
  132. + : "memory");
  133. + return retval;
  134. +}
  135. +#define __test_and_set_bit(nr, vaddr) test_and_set_bit(nr, vaddr)
  136. +
  137. +#define set_bit(nr,vaddr) \
  138. + (__builtin_constant_p(nr) ? \
  139. + __constant_coldfire_set_bit(nr, vaddr) : \
  140. + __generic_coldfire_set_bit(nr, vaddr))
  141. +
  142. +static __inline__ void __constant_coldfire_set_bit(int nr,
  143. + volatile void *vaddr)
  144. +{
  145. + volatile char *p = &((volatile char *)vaddr)[(nr^31) >> 3];
  146. + __asm__ __volatile__ ("bset %1,%0"
  147. + : "+QUd" (*p) : "di" (nr & 7));
  148. +}
  149. +
  150. +static __inline__ void __generic_coldfire_set_bit(int nr, volatile void *vaddr)
  151. +{
  152. + __asm__ __volatile__ ("bset %1,%0"
  153. + : "=m" (((volatile char *)vaddr)[(nr^31) >> 3])
  154. + : "d" (nr)
  155. + : "memory");
  156. +}
  157. +#define __set_bit(nr, vaddr) set_bit(nr, vaddr)
  158. +
  159. +#define test_and_clear_bit(nr, vaddr) \
  160. + (__builtin_constant_p(nr) ? \
  161. + __constant_coldfire_test_and_clear_bit(nr, vaddr) : \
  162. + __generic_coldfire_test_and_clear_bit(nr, vaddr))
  163. +
  164. +static __inline__ int __constant_coldfire_test_and_clear_bit(int nr,
  165. + volatile void *vaddr)
  166. +{
  167. + char retval;
  168. + volatile char *p = &((volatile char *)vaddr)[(nr^31) >> 3];
  169. +
  170. + __asm__ __volatile__ ("bclr %2,%1; sne %0"
  171. + : "=d" (retval), "+QUd" (*p)
  172. + : "id" (nr & 7));
  173. +
  174. + return retval;
  175. +}
  176. +
  177. +static __inline__ int __generic_coldfire_test_and_clear_bit(int nr,
  178. + volatile void *vaddr)
  179. +{
  180. + char retval;
  181. +
  182. + __asm__ __volatile__ ("bclr %2,%1; sne %0"
  183. + : "=d" (retval), "=m" (((volatile char *)vaddr)[(nr^31) >> 3])
  184. + : "d" (nr & 7)
  185. + : "memory");
  186. +
  187. + return retval;
  188. +}
  189. +#define __test_and_clear_bit(nr, vaddr) test_and_clear_bit(nr, vaddr)
  190. +
  191. +/*
  192. + * clear_bit() doesn't provide any barrier for the compiler.
  193. + */
  194. +#define smp_mb__before_clear_bit() barrier()
  195. +#define smp_mb__after_clear_bit() barrier()
  196. +
  197. +#define clear_bit(nr,vaddr) \
  198. + (__builtin_constant_p(nr) ? \
  199. + __constant_coldfire_clear_bit(nr, vaddr) : \
  200. + __generic_coldfire_clear_bit(nr, vaddr))
  201. +
  202. +static __inline__ void __constant_coldfire_clear_bit(int nr,
  203. + volatile void *vaddr)
  204. +{
  205. + volatile char *p = &((volatile char *)vaddr)[(nr^31) >> 3];
  206. + __asm__ __volatile__ ("bclr %1,%0"
  207. + : "+QUd" (*p) : "id" (nr & 7));
  208. +}
  209. +
  210. +static __inline__ void __generic_coldfire_clear_bit(int nr,
  211. + volatile void *vaddr)
  212. +{
  213. + __asm__ __volatile__ ("bclr %1,%0"
  214. + : "=m" (((volatile char *)vaddr)[(nr^31) >> 3])
  215. + : "d" (nr)
  216. + : "memory");
  217. +}
  218. +#define __clear_bit(nr, vaddr) clear_bit(nr, vaddr)
  219. +
  220. +#define test_and_change_bit(nr, vaddr) \
  221. + (__builtin_constant_p(nr) ? \
  222. + __constant_coldfire_test_and_change_bit(nr, vaddr) : \
  223. + __generic_coldfire_test_and_change_bit(nr, vaddr))
  224. +
  225. +static __inline__ int __constant_coldfire_test_and_change_bit(int nr,
  226. + volatile void *vaddr)
  227. +{
  228. + char retval;
  229. + volatile char *p = &((volatile char *)vaddr)[(nr^31) >> 3];
  230. +
  231. + __asm__ __volatile__ ("bchg %2,%1; sne %0"
  232. + : "=d" (retval), "+QUd" (*p)
  233. + : "id" (nr & 7));
  234. +
  235. + return retval;
  236. +}
  237. +
  238. +static __inline__ int __generic_coldfire_test_and_change_bit(int nr,
  239. + volatile void *vaddr)
  240. +{
  241. + char retval;
  242. +
  243. + __asm__ __volatile__ ("bchg %2,%1; sne %0"
  244. + : "=d" (retval), "=m" (((volatile char *)vaddr)[(nr^31) >> 3])
  245. + : "id" (nr)
  246. + : "memory");
  247. +
  248. + return retval;
  249. +}
  250. +#define __test_and_change_bit(nr, vaddr) test_and_change_bit(nr, vaddr)
  251. +#define __change_bit(nr, vaddr) change_bit(nr, vaddr)
  252. +
  253. +#define change_bit(nr,vaddr) \
  254. + (__builtin_constant_p(nr) ? \
  255. + __constant_coldfire_change_bit(nr, vaddr) : \
  256. + __generic_coldfire_change_bit(nr, vaddr))
  257. +
  258. +static __inline__ void __constant_coldfire_change_bit(int nr,
  259. + volatile void *vaddr)
  260. +{
  261. + volatile char *p = &((volatile char *)vaddr)[(nr^31) >> 3];
  262. + __asm__ __volatile__ ("bchg %1,%0"
  263. + : "+QUd" (*p) : "id" (nr & 7));
  264. +}
  265. +
  266. +static __inline__ void __generic_coldfire_change_bit(int nr,
  267. + volatile void *vaddr)
  268. +{
  269. + __asm__ __volatile__ ("bchg %1,%0"
  270. + : "=m" (((volatile char *)vaddr)[(nr^31) >> 3])
  271. + : "d" (nr)
  272. + : "memory");
  273. +}
  274. +
  275. +static inline int test_bit(int nr, const unsigned long *vaddr)
  276. +{
  277. + return (vaddr[nr >> 5] & (1UL << (nr & 31))) != 0;
  278. +}
  279. +
  280. +static __inline__ unsigned long ffz(unsigned long word)
  281. +{
  282. + unsigned long result = 0;
  283. +
  284. + while (word & 1) {
  285. + result++;
  286. + word >>= 1;
  287. + }
  288. + return result;
  289. +}
  290. +
  291. +/* find_next_zero_bit() finds the first zero bit in a bit string of length
  292. + * 'size' bits, starting the search at bit 'offset'. This is largely based
  293. + * on Linus's ALPHA routines.
  294. + */
  295. +static __inline__ unsigned long find_next_zero_bit(void *addr,
  296. + unsigned long size, unsigned long offset)
  297. +{
  298. + unsigned long *p = ((unsigned long *) addr) + (offset >> 5);
  299. + unsigned long result = offset & ~31UL;
  300. + unsigned long tmp;
  301. +
  302. + if (offset >= size)
  303. + return size;
  304. + size -= result;
  305. + offset &= 31UL;
  306. + if (offset) {
  307. + tmp = *(p++);
  308. + tmp |= ~0UL >> (32-offset);
  309. + if (size < 32)
  310. + goto found_first;
  311. + if (~tmp)
  312. + goto found_middle;
  313. + size -= 32;
  314. + result += 32;
  315. + }
  316. + while (size & ~31UL) {
  317. + tmp = *(p++);
  318. + if (~tmp)
  319. + goto found_middle;
  320. + result += 32;
  321. + size -= 32;
  322. + }
  323. + if (!size)
  324. + return result;
  325. + tmp = *p;
  326. +
  327. +found_first:
  328. + tmp |= ~0UL >> size;
  329. +found_middle:
  330. + return result + ffz(tmp);
  331. +}
  332. +
  333. +#define find_first_zero_bit(addr, size) find_next_zero_bit(((void *)addr), \
  334. + (size), 0)
  335. +
  336. +/* Ported from included/linux/bitops.h */
  337. +static __inline__ int ffs(int x)
  338. +{
  339. + int r = 1;
  340. +
  341. + if (!x)
  342. + return 0;
  343. + if (!(x & 0xffff)) {
  344. + x >>= 16;
  345. + r += 16;
  346. + }
  347. + if (!(x & 0xff)) {
  348. + x >>= 8;
  349. + r += 8;
  350. + }
  351. + if (!(x & 0xf)) {
  352. + x >>= 4;
  353. + r += 4;
  354. + }
  355. + if (!(x & 3)) {
  356. + x >>= 2;
  357. + r += 2;
  358. + }
  359. + if (!(x & 1)) {
  360. + x >>= 1;
  361. + r += 1;
  362. + }
  363. + return r;
  364. +}
  365. +#define __ffs(x) (ffs(x) - 1)
  366. +
  367. +/* find_next_bit - find the next set bit in a memory region
  368. + * (from asm-ppc/bitops.h)
  369. + */
  370. +static __inline__ unsigned long find_next_bit(const unsigned long *addr,
  371. + unsigned long size, unsigned long offset)
  372. +{
  373. + unsigned int *p = ((unsigned int *) addr) + (offset >> 5);
  374. + unsigned int result = offset & ~31UL;
  375. + unsigned int tmp;
  376. +
  377. + if (offset >= size)
  378. + return size;
  379. + size -= result;
  380. + offset &= 31UL;
  381. + if (offset) {
  382. + tmp = *p++;
  383. + tmp &= ~0UL << offset;
  384. + if (size < 32)
  385. + goto found_first;
  386. + if (tmp)
  387. + goto found_middle;
  388. + size -= 32;
  389. + result += 32;
  390. + }
  391. + while (size >= 32) {
  392. + tmp = *p++;
  393. + if (tmp != 0)
  394. + goto found_middle;
  395. + result += 32;
  396. + size -= 32;
  397. + }
  398. + if (!size)
  399. + return result;
  400. + tmp = *p;
  401. +
  402. +found_first:
  403. + tmp &= ~0UL >> (32 - size);
  404. + if (tmp == 0UL) /* Are any bits set? */
  405. + return result + size; /* Nope. */
  406. +found_middle:
  407. + return result + __ffs(tmp);
  408. +}
  409. +
  410. +#define find_first_bit(addr, size) find_next_bit((addr), (size), 0)
  411. +
  412. +#ifdef __KERNEL__
  413. +
  414. +/* Ported from include/linux/bitops.h */
  415. +static __inline__ int fls(int x)
  416. +{
  417. + int r = 32;
  418. +
  419. + if (!x)
  420. + return 0;
  421. + if (!(x & 0xffff0000u)) {
  422. + x <<= 16;
  423. + r -= 16;
  424. + }
  425. + if (!(x & 0xff000000u)) {
  426. + x <<= 8;
  427. + r -= 8;
  428. + }
  429. + if (!(x & 0xf0000000u)) {
  430. + x <<= 4;
  431. + r -= 4;
  432. + }
  433. + if (!(x & 0xc0000000u)) {
  434. + x <<= 2;
  435. + r -= 2;
  436. + }
  437. + if (!(x & 0x80000000u)) {
  438. + x <<= 1;
  439. + r -= 1;
  440. + }
  441. + return r;
  442. +}
  443. +
  444. +#include <asm-generic/bitops/fls64.h>
  445. +#include <asm-generic/bitops/sched.h>
  446. +#include <asm-generic/bitops/hweight.h>
  447. +
  448. +#define minix_find_first_zero_bit(addr, size) find_next_zero_bit((addr), \
  449. + (size), 0)
  450. +#define minix_test_and_set_bit(nr, addr) test_and_set_bit((nr), \
  451. + (unsigned long *)(addr))
  452. +#define minix_set_bit(nr, addr) set_bit((nr), \
  453. + (unsigned long *)(addr))
  454. +#define minix_test_and_clear_bit(nr, addr) test_and_clear_bit((nr), \
  455. + (unsigned long *)(addr))
  456. +
  457. +static inline int minix_test_bit(int nr, const volatile unsigned long *vaddr)
  458. +{
  459. + int *a = (int *)vaddr;
  460. + int mask;
  461. +
  462. + a += nr >> 5;
  463. + mask = 1 << (nr & 0x1f);
  464. + return ((mask & *a) != 0);
  465. +}
  466. +
  467. +#define ext2_set_bit(nr, addr) test_and_set_bit((nr) ^ 24, \
  468. + (unsigned long *)(addr))
  469. +#define ext2_set_bit_atomic(lock, nr, addr) test_and_set_bit((nr) ^ 24, \
  470. + (unsigned long *)(addr))
  471. +#define ext2_clear_bit(nr, addr) test_and_clear_bit((nr) ^ 24, \
  472. + (unsigned long *)(addr))
  473. +#define ext2_clear_bit_atomic(lock, nr, addr) test_and_clear_bit((nr) ^ 24, \
  474. + (unsigned long *)(addr))
  475. +
  476. +static inline int ext2_test_bit(int nr, const void *vaddr)
  477. +{
  478. + const unsigned char *p = vaddr;
  479. + return (p[nr >> 3] & (1U << (nr & 7))) != 0;
  480. +}
  481. +
  482. +static inline int ext2_find_first_zero_bit(const void *vaddr, unsigned size)
  483. +{
  484. + const unsigned long *p = vaddr, *addr = vaddr;
  485. + int res;
  486. +
  487. + if (!size)
  488. + return 0;
  489. +
  490. + size = (size >> 5) + ((size & 31) > 0);
  491. + while (*p++ == ~0UL) {
  492. + if (--size == 0)
  493. + return (p - addr) << 5;
  494. + }
  495. +
  496. + --p;
  497. + for (res = 0; res < 32; res++)
  498. + if (!ext2_test_bit (res, p))
  499. + break;
  500. + return (p - addr) * 32 + res;
  501. +}
  502. +
  503. +static inline int ext2_find_next_zero_bit(const void *vaddr, unsigned size,
  504. + unsigned offset)
  505. +{
  506. + const unsigned long *addr = vaddr;
  507. + const unsigned long *p = addr + (offset >> 5);
  508. + int bit = offset & 31UL, res;
  509. +
  510. + if (offset >= size)
  511. + return size;
  512. +
  513. + if (bit) {
  514. + /* Look for zero in first longword */
  515. + for (res = bit; res < 32; res++)
  516. + if (!ext2_test_bit (res, p))
  517. + return (p - addr) * 32 + res;
  518. + p++;
  519. + }
  520. + /* No zero yet, search remaining full bytes for a zero */
  521. + res = ext2_find_first_zero_bit(p, size - 32 * (p - addr));
  522. + return (p - addr) * 32 + res;
  523. +}
  524. +
  525. +#endif /* KERNEL */
  526. +
  527. +#endif /* CONFIG_COLDFIRE */
  528. +
  529. #endif /* _M68K_BITOPS_H */
  530. --- a/include/asm-m68k/bootinfo.h
  531. +++ b/include/asm-m68k/bootinfo.h
  532. @@ -49,6 +49,19 @@ struct bi_record {
  533. #endif /* __ASSEMBLY__ */
  534. +#ifndef __ASSEMBLY__
  535. +
  536. +struct uboot_record {
  537. + unsigned long bd_info;
  538. + unsigned long initrd_start;
  539. + unsigned long initrd_end;
  540. + unsigned long cmd_line_start;
  541. + unsigned long cmd_line_stop;
  542. +};
  543. +
  544. +#endif /* __ASSEMBLY__ */
  545. +
  546. +
  547. /*
  548. * Tag Definitions
  549. *
  550. --- a/include/asm-m68k/byteorder.h
  551. +++ b/include/asm-m68k/byteorder.h
  552. @@ -4,8 +4,15 @@
  553. #include <asm/types.h>
  554. #include <linux/compiler.h>
  555. -#ifdef __GNUC__
  556. -
  557. +#if defined(__GNUC__)
  558. +#if defined(__mcfisaaplus__) || defined(__mcfisac__)
  559. +static __inline__ __attribute_const__ __u32 ___arch__swab32(__u32 val)
  560. +{
  561. + __asm__ ("byterev %0" : "=d" (val) : "0" (val));
  562. + return val;
  563. +}
  564. +#define __arch__swab32(x) ___arch__swab32(x)
  565. +#elif !defined(__mcoldfire__)
  566. static __inline__ __attribute_const__ __u32 ___arch__swab32(__u32 val)
  567. {
  568. __asm__("rolw #8,%0; swap %0; rolw #8,%0" : "=d" (val) : "0" (val));
  569. @@ -14,6 +21,7 @@ static __inline__ __attribute_const__ __
  570. #define __arch__swab32(x) ___arch__swab32(x)
  571. #endif
  572. +#endif
  573. #if defined(__GNUC__) && !defined(__STRICT_ANSI__) || defined(__KERNEL__)
  574. # define __BYTEORDER_HAS_U64__
  575. --- a/include/asm-m68k/cacheflush.h
  576. +++ b/include/asm-m68k/cacheflush.h
  577. @@ -6,6 +6,9 @@
  578. /* cache code */
  579. #define FLUSH_I_AND_D (0x00000808)
  580. #define FLUSH_I (0x00000008)
  581. +#ifdef CONFIG_COLDFIRE
  582. +#include <asm/cf_cacheflush.h>
  583. +#else /* !CONFIG_COLDFIRE */
  584. /*
  585. * Cache handling functions
  586. @@ -153,4 +156,5 @@ static inline void copy_from_user_page(s
  587. memcpy(dst, src, len);
  588. }
  589. +#endif /* !CONFIG_COLDFIRE */
  590. #endif /* _M68K_CACHEFLUSH_H */
  591. --- a/include/asm-m68k/checksum.h
  592. +++ b/include/asm-m68k/checksum.h
  593. @@ -34,6 +34,7 @@ extern __wsum csum_partial_copy_nocheck(
  594. void *dst, int len,
  595. __wsum sum);
  596. +#ifndef CONFIG_COLDFIRE /* CF has own copy in arch/m68k/lib/checksum.c */
  597. /*
  598. * This is a version of ip_compute_csum() optimized for IP headers,
  599. * which always checksum on 4 octet boundaries.
  600. @@ -59,6 +60,9 @@ static inline __sum16 ip_fast_csum(const
  601. : "memory");
  602. return (__force __sum16)~sum;
  603. }
  604. +#else
  605. +extern __sum16 ip_fast_csum(const void *iph, unsigned int ihl);
  606. +#endif
  607. /*
  608. * Fold a partial checksum
  609. @@ -67,6 +71,11 @@ static inline __sum16 ip_fast_csum(const
  610. static inline __sum16 csum_fold(__wsum sum)
  611. {
  612. unsigned int tmp = (__force u32)sum;
  613. +#ifdef CONFIG_COLDFIRE
  614. + tmp = (tmp & 0xffff) + (tmp >> 16);
  615. + tmp = (tmp & 0xffff) + (tmp >> 16);
  616. + return (__force __sum16) ~tmp;
  617. +#else
  618. __asm__("swap %1\n\t"
  619. "addw %1, %0\n\t"
  620. "clrw %1\n\t"
  621. @@ -74,6 +83,7 @@ static inline __sum16 csum_fold(__wsum s
  622. : "=&d" (sum), "=&d" (tmp)
  623. : "0" (sum), "1" (tmp));
  624. return (__force __sum16)~sum;
  625. +#endif
  626. }
  627. --- a/include/asm-m68k/delay.h
  628. +++ b/include/asm-m68k/delay.h
  629. @@ -11,8 +11,25 @@
  630. static inline void __delay(unsigned long loops)
  631. {
  632. +#if defined(CONFIG_COLDFIRE)
  633. + /* The coldfire runs this loop at significantly different speeds
  634. + * depending upon long word alignment or not. We'll pad it to
  635. + * long word alignment which is the faster version.
  636. + * The 0x4a8e is of course a 'tstl %fp' instruction. This is better
  637. + * than using a NOP (0x4e71) instruction because it executes in one
  638. + * cycle not three and doesn't allow for an arbitary delay waiting
  639. + * for bus cycles to finish. Also fp/a6 isn't likely to cause a
  640. + * stall waiting for the register to become valid if such is added
  641. + * to the coldfire at some stage.
  642. + */
  643. + __asm__ __volatile__ (".balignw 4, 0x4a8e\n\t"
  644. + "1: subql #1, %0\n\t"
  645. + "jcc 1b"
  646. + : "=d" (loops) : "0" (loops));
  647. +#else
  648. __asm__ __volatile__ ("1: subql #1,%0; jcc 1b"
  649. : "=d" (loops) : "0" (loops));
  650. +#endif
  651. }
  652. extern void __bad_udelay(void);
  653. @@ -26,12 +43,17 @@ extern void __bad_udelay(void);
  654. */
  655. static inline void __const_udelay(unsigned long xloops)
  656. {
  657. +#if defined(CONFIG_COLDFIRE)
  658. +
  659. + __delay(((((unsigned long long) xloops * loops_per_jiffy))>>32)*HZ);
  660. +#else
  661. unsigned long tmp;
  662. __asm__ ("mulul %2,%0:%1"
  663. : "=d" (xloops), "=d" (tmp)
  664. : "d" (xloops), "1" (loops_per_jiffy));
  665. __delay(xloops * HZ);
  666. +#endif
  667. }
  668. static inline void __udelay(unsigned long usecs)
  669. @@ -46,12 +68,16 @@ static inline void __udelay(unsigned lon
  670. static inline unsigned long muldiv(unsigned long a, unsigned long b,
  671. unsigned long c)
  672. {
  673. +#if defined(CONFIG_COLDFIRE)
  674. + return (long)(((unsigned long long)a * b)/c);
  675. +#else
  676. unsigned long tmp;
  677. __asm__ ("mulul %2,%0:%1; divul %3,%0:%1"
  678. : "=d" (tmp), "=d" (a)
  679. : "d" (b), "d" (c), "1" (a));
  680. return a;
  681. +#endif
  682. }
  683. #endif /* defined(_M68K_DELAY_H) */
  684. --- a/include/asm-m68k/div64.h
  685. +++ b/include/asm-m68k/div64.h
  686. @@ -5,6 +5,7 @@
  687. /* n = n / base; return rem; */
  688. +#ifndef CONFIG_COLDFIRE
  689. #define do_div(n, base) ({ \
  690. union { \
  691. unsigned long n32[2]; \
  692. @@ -24,6 +25,9 @@
  693. (n) = __n.n64; \
  694. __rem; \
  695. })
  696. +#else
  697. +# include <asm-generic/div64.h>
  698. +#endif
  699. extern uint64_t div64_64(uint64_t dividend, uint64_t divisor);
  700. #endif /* _M68K_DIV64_H */
  701. --- a/include/asm-m68k/elf.h
  702. +++ b/include/asm-m68k/elf.h
  703. @@ -60,7 +60,7 @@ typedef struct user_m68kfp_struct elf_fp
  704. #define ELF_PLAT_INIT(_r, load_addr) _r->a1 = 0
  705. #define USE_ELF_CORE_DUMP
  706. -#ifndef CONFIG_SUN3
  707. +#if !defined(CONFIG_SUN3) && !defined(CONFIG_COLDFIRE)
  708. #define ELF_EXEC_PAGESIZE 4096
  709. #else
  710. #define ELF_EXEC_PAGESIZE 8192
  711. --- a/include/asm-m68k/fpu.h
  712. +++ b/include/asm-m68k/fpu.h
  713. @@ -12,6 +12,8 @@
  714. #define FPSTATESIZE (96/sizeof(unsigned char))
  715. #elif defined(CONFIG_M68KFPU_EMU)
  716. #define FPSTATESIZE (28/sizeof(unsigned char))
  717. +#elif defined(CONFIG_CFV4E)
  718. +#define FPSTATESIZE (16/sizeof(unsigned char))
  719. #elif defined(CONFIG_M68060)
  720. #define FPSTATESIZE (12/sizeof(unsigned char))
  721. #else
  722. --- a/include/asm-m68k/io.h
  723. +++ b/include/asm-m68k/io.h
  724. @@ -397,10 +397,12 @@ static inline void memcpy_toio(volatile
  725. __builtin_memcpy((void __force *) dst, src, count);
  726. }
  727. -#ifndef CONFIG_SUN3
  728. -#define IO_SPACE_LIMIT 0xffff
  729. -#else
  730. +#if defined(CONFIG_SUN3)
  731. #define IO_SPACE_LIMIT 0x0fffffff
  732. +#elif defined(CONFIG_COLDFIRE)
  733. +#define IO_SPACE_LIMIT 0xffffffff
  734. +#else
  735. +#define IO_SPACE_LIMIT 0xffff
  736. #endif
  737. #endif /* __KERNEL__ */
  738. @@ -418,4 +420,22 @@ static inline void memcpy_toio(volatile
  739. */
  740. #define xlate_dev_kmem_ptr(p) p
  741. +#ifdef CONFIG_COLDFIRE
  742. +
  743. +#define memset_io(a, b, c) memset((void *)(a), (b), (c))
  744. +#define memcpy_fromio(a, b, c) memcpy((a), (void *)(b), (c))
  745. +#define memcpy_toio(a, b, c) memcpy((void *)(a), (b), (c))
  746. +#if !defined(readb)
  747. +#define readb(addr) \
  748. + ({ unsigned char __v = (*(volatile unsigned char *) (addr)); __v; })
  749. +#define readw(addr) \
  750. + ({ unsigned short __v = (*(volatile unsigned short *) (addr)); __v; })
  751. +#define readl(addr) \
  752. + ({ unsigned int __v = (*(volatile unsigned int *) (addr)); __v; })
  753. +#define writeb(b, addr) (void)((*(volatile unsigned char *) (addr)) = (b))
  754. +#define writew(b, addr) (void)((*(volatile unsigned short *) (addr)) = (b))
  755. +#define writel(b, addr) (void)((*(volatile unsigned int *) (addr)) = (b))
  756. +#endif /* readb */
  757. +#endif /* CONFIG_COLDFIRE */
  758. +
  759. #endif /* _IO_H */
  760. --- a/include/asm-m68k/irq.h
  761. +++ b/include/asm-m68k/irq.h
  762. @@ -11,7 +11,10 @@
  763. * Currently the Atari has 72 and the Amiga 24, but if both are
  764. * supported in the kernel it is better to make room for 72.
  765. */
  766. -#if defined(CONFIG_VME) || defined(CONFIG_SUN3) || defined(CONFIG_SUN3X)
  767. +#if defined(CONFIG_COLDFIRE)
  768. +#define SYS_IRQS 256
  769. +#define NR_IRQS SYS_IRQS
  770. +#elif defined(CONFIG_VME) || defined(CONFIG_SUN3) || defined(CONFIG_SUN3X)
  771. #define NR_IRQS 200
  772. #elif defined(CONFIG_ATARI) || defined(CONFIG_MAC)
  773. #define NR_IRQS 72
  774. --- a/include/asm-m68k/machdep.h
  775. +++ b/include/asm-m68k/machdep.h
  776. @@ -32,4 +32,11 @@ extern void (*mach_heartbeat) (int);
  777. extern void (*mach_l2_flush) (int);
  778. extern void (*mach_beep) (unsigned int, unsigned int);
  779. +#ifdef CONFIG_COLDFIRE
  780. +extern void __init config_coldfire(void);
  781. +extern void __init mmu_context_init(void);
  782. +extern irq_handler_t mach_default_handler;
  783. +extern void (*mach_tick)(void);
  784. +#endif
  785. +
  786. #endif /* _M68K_MACHDEP_H */
  787. --- a/include/asm-m68k/mmu_context.h
  788. +++ b/include/asm-m68k/mmu_context.h
  789. @@ -7,7 +7,7 @@ static inline void enter_lazy_tlb(struct
  790. {
  791. }
  792. -#ifndef CONFIG_SUN3
  793. +#if !defined(CONFIG_SUN3) && !defined(CONFIG_COLDFIRE)
  794. #include <asm/setup.h>
  795. #include <asm/page.h>
  796. @@ -102,7 +102,7 @@ static inline void activate_mm(struct mm
  797. switch_mm_0460(next_mm);
  798. }
  799. -#else /* CONFIG_SUN3 */
  800. +#elif defined(CONFIG_SUN3)
  801. #include <asm/sun3mmu.h>
  802. #include <linux/sched.h>
  803. @@ -150,5 +150,83 @@ static inline void activate_mm(struct mm
  804. activate_context(next_mm);
  805. }
  806. -#endif
  807. +#else /* CONFIG_COLDFIRE */
  808. +
  809. +#include <asm/atomic.h>
  810. +#include <asm/bitops.h>
  811. +#include <asm/mmu.h>
  812. +
  813. +#define NO_CONTEXT 256
  814. +#define LAST_CONTEXT 255
  815. +#define FIRST_CONTEXT 1
  816. +
  817. +extern void set_context(mm_context_t context, pgd_t *pgd);
  818. +extern unsigned long context_map[];
  819. +extern mm_context_t next_mmu_context;
  820. +
  821. +extern atomic_t nr_free_contexts;
  822. +extern struct mm_struct *context_mm[LAST_CONTEXT+1];
  823. +extern void steal_context(void);
  824. +
  825. +static inline void get_mmu_context(struct mm_struct *mm)
  826. +{
  827. + mm_context_t ctx;
  828. +
  829. + if (mm->context != NO_CONTEXT)
  830. + return;
  831. + while (atomic_dec_and_test_lt(&nr_free_contexts)) {
  832. + atomic_inc(&nr_free_contexts);
  833. + steal_context();
  834. + }
  835. + ctx = next_mmu_context;
  836. + while (test_and_set_bit(ctx, context_map)) {
  837. + ctx = find_next_zero_bit(context_map, LAST_CONTEXT+1, ctx);
  838. + if (ctx > LAST_CONTEXT)
  839. + ctx = 0;
  840. + }
  841. + next_mmu_context = (ctx + 1) & LAST_CONTEXT;
  842. + mm->context = ctx;
  843. + context_mm[ctx] = mm;
  844. +}
  845. +
  846. +/*
  847. + * Set up the context for a new address space.
  848. + */
  849. +#define init_new_context(tsk, mm) (((mm)->context = NO_CONTEXT), 0)
  850. +
  851. +/*
  852. + * We're finished using the context for an address space.
  853. + */
  854. +static inline void destroy_context(struct mm_struct *mm)
  855. +{
  856. + if (mm->context != NO_CONTEXT) {
  857. + clear_bit(mm->context, context_map);
  858. + mm->context = NO_CONTEXT;
  859. + atomic_inc(&nr_free_contexts);
  860. + }
  861. +}
  862. +
  863. +static inline void switch_mm(struct mm_struct *prev, struct mm_struct *next,
  864. + struct task_struct *tsk)
  865. +{
  866. + get_mmu_context(tsk->mm);
  867. + set_context(tsk->mm->context, next->pgd);
  868. +}
  869. +
  870. +/*
  871. + * After we have set current->mm to a new value, this activates
  872. + * the context for the new mm so we see the new mappings.
  873. + */
  874. +static inline void activate_mm(struct mm_struct *active_mm,
  875. + struct mm_struct *mm)
  876. +{
  877. + get_mmu_context(mm);
  878. + set_context(mm->context, mm->pgd);
  879. +}
  880. +
  881. +#define deactivate_mm(tsk, mm) do { } while (0)
  882. +
  883. +extern void mmu_context_init(void);
  884. +
  885. +#endif /* CONFIG_COLDFIRE */
  886. #endif
  887. --- a/include/asm-m68k/page.h
  888. +++ b/include/asm-m68k/page.h
  889. @@ -4,7 +4,7 @@
  890. #include <linux/const.h>
  891. /* PAGE_SHIFT determines the page size */
  892. -#ifndef CONFIG_SUN3
  893. +#if !defined(CONFIG_SUN3) && !defined(CONFIG_COLDFIRE)
  894. #define PAGE_SHIFT (12)
  895. #else
  896. #define PAGE_SHIFT (13)
  897. @@ -116,10 +116,23 @@ typedef struct page *pgtable_t;
  898. extern unsigned long m68k_memoffset;
  899. -#ifndef CONFIG_SUN3
  900. +#if !defined(CONFIG_SUN3)
  901. #define WANT_PAGE_VIRTUAL
  902. +#if defined(CONFIG_COLDFIRE)
  903. +static inline unsigned long ___pa(void *vaddr)
  904. +{
  905. + return (((unsigned long)vaddr & 0x0fffffff) + CONFIG_SDRAM_BASE);
  906. +}
  907. +#define __pa(vaddr) ___pa((void *)(vaddr))
  908. +
  909. +static inline void *__va(unsigned long paddr)
  910. +{
  911. + return (void *)((paddr & 0x0fffffff) + PAGE_OFFSET);
  912. +}
  913. +
  914. +#else
  915. static inline unsigned long ___pa(void *vaddr)
  916. {
  917. unsigned long paddr;
  918. @@ -141,6 +154,7 @@ static inline void *__va(unsigned long p
  919. : "0" (paddr), "i" (m68k_fixup_memoffset));
  920. return vaddr;
  921. }
  922. +#endif
  923. #else /* !CONFIG_SUN3 */
  924. /* This #define is a horrible hack to suppress lots of warnings. --m */
  925. @@ -172,6 +186,8 @@ static inline void *__va(unsigned long x
  926. * memory node, but we have no highmem, so that works for now.
  927. * TODO: implement (fast) pfn<->pgdat_idx conversion functions, this makes lots
  928. * of the shifts unnecessary.
  929. + *
  930. + * PFNs are used to map physical pages. So PFN[0] maps to the base phys addr.
  931. */
  932. #define virt_to_pfn(kaddr) (__pa(kaddr) >> PAGE_SHIFT)
  933. #define pfn_to_virt(pfn) __va((pfn) << PAGE_SHIFT)
  934. --- a/include/asm-m68k/page_offset.h
  935. +++ b/include/asm-m68k/page_offset.h
  936. @@ -1,8 +1,11 @@
  937. /* This handles the memory map.. */
  938. -#ifndef CONFIG_SUN3
  939. +#if !defined(CONFIG_SUN3) && !defined(CONFIG_COLDFIRE)
  940. #define PAGE_OFFSET_RAW 0x00000000
  941. -#else
  942. +#elif defined(CONFIG_SUN3)
  943. #define PAGE_OFFSET_RAW 0x0E000000
  944. +#else /* CONFIG_COLDFIRE */
  945. +#define PAGE_OFFSET_RAW 0xC0000000
  946. +#define PHYS_OFFSET 0x40000000
  947. #endif
  948. --- a/include/asm-m68k/pci.h
  949. +++ b/include/asm-m68k/pci.h
  950. @@ -1,57 +1,86 @@
  951. -#ifndef _ASM_M68K_PCI_H
  952. -#define _ASM_M68K_PCI_H
  953. -
  954. /*
  955. - * asm-m68k/pci_m68k.h - m68k specific PCI declarations.
  956. + * asm-m68k/pci.h - m68k specific PCI declarations.
  957. *
  958. - * Written by Wout Klaren.
  959. + * Coldfire Implementation Copyright (c) 2007 Freescale Semiconductor, Inc.
  960. + * Kurt Mahan <[email protected]>
  961. */
  962. +#ifndef _ASM_M68K_PCI_H
  963. +#define _ASM_M68K_PCI_H
  964. -#include <asm/scatterlist.h>
  965. +#ifdef CONFIG_PCI
  966. -struct pci_ops;
  967. +#include <asm-generic/pci-dma-compat.h>
  968. /*
  969. - * Structure with hardware dependent information and functions of the
  970. - * PCI bus.
  971. + * The PCI address space does equal the physical memory
  972. + * address space. The networking and block device layers use
  973. + * this boolean for bounce buffer decisions.
  974. */
  975. +#define PCI_DMA_BUS_IS_PHYS (1)
  976. -struct pci_bus_info
  977. -{
  978. - /*
  979. - * Resources of the PCI bus.
  980. - */
  981. -
  982. - struct resource mem_space;
  983. - struct resource io_space;
  984. +#define PCIBIOS_MIN_IO 0x00004000
  985. +#define PCIBIOS_MIN_MEM 0x02000000
  986. - /*
  987. - * System dependent functions.
  988. - */
  989. +#define pcibios_assign_all_busses() 0
  990. +#define pcibios_scan_all_fns(a, b) 0
  991. - struct pci_ops *m68k_pci_ops;
  992. +static inline void
  993. +pcibios_set_master(struct pci_dev *dev)
  994. +{
  995. + /* no special bus mastering setup handling */
  996. +}
  997. - void (*fixup)(int pci_modify);
  998. - void (*conf_device)(struct pci_dev *dev);
  999. -};
  1000. +static inline void
  1001. +pcibios_penalize_isa_irq(int irq, int active)
  1002. +{
  1003. + /* no dynamic PCI IRQ allocation */
  1004. +}
  1005. -#define pcibios_assign_all_busses() 0
  1006. -#define pcibios_scan_all_fns(a, b) 0
  1007. +static inline void
  1008. +pcibios_add_platform_entries(struct pci_dev *dev)
  1009. +{
  1010. + /* no special handling */
  1011. +}
  1012. -static inline void pcibios_set_master(struct pci_dev *dev)
  1013. +static inline void
  1014. +pcibios_resource_to_bus(struct pci_dev *dev, struct pci_bus_region *region,
  1015. + struct resource *res)
  1016. {
  1017. - /* No special bus mastering setup handling */
  1018. +#ifdef CONFIG_M54455
  1019. + if ((res->start == 0xa0000000) || (res->start == 0xa8000000)) {
  1020. + /* HACK! FIX! kludge to fix bridge mapping */
  1021. + region->start = res->start & 0x0fffffff;
  1022. + region->end = res->end & 0x0fffffff;
  1023. + } else {
  1024. + region->start = res->start;
  1025. + region->end = res->end;
  1026. + }
  1027. +#else
  1028. + region->start = res->start;
  1029. + region->end = res->end;
  1030. +#endif
  1031. }
  1032. -static inline void pcibios_penalize_isa_irq(int irq, int active)
  1033. +static inline void
  1034. +pcibios_bus_to_resource(struct pci_dev *dev, struct resource *res,
  1035. + struct pci_bus_region *region)
  1036. {
  1037. - /* We don't do dynamic PCI IRQ allocation */
  1038. + res->start = region->start;
  1039. + res->end = region->end;
  1040. }
  1041. -/* The PCI address space does equal the physical memory
  1042. - * address space. The networking and block device layers use
  1043. - * this boolean for bounce buffer decisions.
  1044. - */
  1045. -#define PCI_DMA_BUS_IS_PHYS (1)
  1046. +static inline struct resource *
  1047. +pcibios_select_root(struct pci_dev *pdev, struct resource *res)
  1048. +{
  1049. + struct resource *root = NULL;
  1050. +
  1051. + if (res->flags & IORESOURCE_IO)
  1052. + root = &ioport_resource;
  1053. + if (res->flags & IORESOURCE_MEM)
  1054. + root = &iomem_resource;
  1055. +
  1056. + return root;
  1057. +}
  1058. +#endif /* CONFIG_PCI */
  1059. #endif /* _ASM_M68K_PCI_H */
  1060. --- a/include/asm-m68k/pgalloc.h
  1061. +++ b/include/asm-m68k/pgalloc.h
  1062. @@ -8,8 +8,10 @@
  1063. #include <asm/virtconvert.h>
  1064. -#ifdef CONFIG_SUN3
  1065. +#if defined(CONFIG_SUN3)
  1066. #include <asm/sun3_pgalloc.h>
  1067. +#elif defined(CONFIG_COLDFIRE)
  1068. +#include <asm/cf_pgalloc.h>
  1069. #else
  1070. #include <asm/motorola_pgalloc.h>
  1071. #endif
  1072. --- a/include/asm-m68k/pgtable.h
  1073. +++ b/include/asm-m68k/pgtable.h
  1074. @@ -40,6 +40,8 @@
  1075. /* PGDIR_SHIFT determines what a third-level page table entry can map */
  1076. #ifdef CONFIG_SUN3
  1077. #define PGDIR_SHIFT 17
  1078. +#elif defined(CONFIG_COLDFIRE)
  1079. +#define PGDIR_SHIFT 22
  1080. #else
  1081. #define PGDIR_SHIFT 25
  1082. #endif
  1083. @@ -54,6 +56,10 @@
  1084. #define PTRS_PER_PTE 16
  1085. #define PTRS_PER_PMD 1
  1086. #define PTRS_PER_PGD 2048
  1087. +#elif defined(CONFIG_COLDFIRE)
  1088. +#define PTRS_PER_PTE 512
  1089. +#define PTRS_PER_PMD 1
  1090. +#define PTRS_PER_PGD 1024
  1091. #else
  1092. #define PTRS_PER_PTE 1024
  1093. #define PTRS_PER_PMD 8
  1094. @@ -66,6 +72,9 @@
  1095. #ifdef CONFIG_SUN3
  1096. #define KMAP_START 0x0DC00000
  1097. #define KMAP_END 0x0E000000
  1098. +#elif defined(CONFIG_COLDFIRE)
  1099. +#define KMAP_START 0xe0000000
  1100. +#define KMAP_END 0xf0000000
  1101. #else
  1102. #define KMAP_START 0xd0000000
  1103. #define KMAP_END 0xf0000000
  1104. @@ -130,6 +139,8 @@ static inline void update_mmu_cache(stru
  1105. #ifdef CONFIG_SUN3
  1106. #include <asm/sun3_pgtable.h>
  1107. +#elif defined(CONFIG_COLDFIRE)
  1108. +#include <asm/cf_pgtable.h>
  1109. #else
  1110. #include <asm/motorola_pgtable.h>
  1111. #endif
  1112. @@ -140,6 +151,9 @@ static inline void update_mmu_cache(stru
  1113. /*
  1114. * Macro to mark a page protection value as "uncacheable".
  1115. */
  1116. +#ifdef CONFIG_COLDFIRE
  1117. +# define pgprot_noncached(prot) (__pgprot(pgprot_val(prot) | CF_PAGE_NOCACHE))
  1118. +#else /* CONFIG_COLDFIRE */
  1119. #ifdef SUN3_PAGE_NOCACHE
  1120. # define __SUN3_PAGE_NOCACHE SUN3_PAGE_NOCACHE
  1121. #else
  1122. @@ -154,6 +168,7 @@ static inline void update_mmu_cache(stru
  1123. ? (__pgprot((pgprot_val(prot) & _CACHEMASK040) | _PAGE_NOCACHE_S)) \
  1124. : (prot)))
  1125. +#endif /* CONFIG_COLDFIRE */
  1126. #endif /* !__ASSEMBLY__ */
  1127. /*
  1128. --- a/include/asm-m68k/processor.h
  1129. +++ b/include/asm-m68k/processor.h
  1130. @@ -22,24 +22,38 @@ static inline unsigned long rdusp(void)
  1131. {
  1132. unsigned long usp;
  1133. +#ifndef CONFIG_COLDFIRE
  1134. __asm__ __volatile__("move %/usp,%0" : "=a" (usp));
  1135. +#else
  1136. + __asm__ __volatile__("movel %/usp,%0" : "=a" (usp));
  1137. +#endif
  1138. return usp;
  1139. }
  1140. static inline void wrusp(unsigned long usp)
  1141. {
  1142. +#ifndef CONFIG_COLDFIRE
  1143. __asm__ __volatile__("move %0,%/usp" : : "a" (usp));
  1144. +#else
  1145. + __asm__ __volatile__("movel %0,%/usp" : : "a" (usp));
  1146. +#endif
  1147. }
  1148. /*
  1149. * User space process size: 3.75GB. This is hardcoded into a few places,
  1150. * so don't change it unless you know what you are doing.
  1151. */
  1152. -#ifndef CONFIG_SUN3
  1153. +#if !defined(CONFIG_SUN3) && !defined(CONFIG_COLDFIRE)
  1154. #define TASK_SIZE (0xF0000000UL)
  1155. +#elif defined(CONFIG_COLDFIRE)
  1156. +#define TASK_SIZE (0xC0000000UL)
  1157. +#else /* CONFIG_SUN3 */
  1158. +#ifdef __ASSEMBLY__
  1159. +#define TASK_SIZE (0x0E000000)
  1160. #else
  1161. #define TASK_SIZE (0x0E000000UL)
  1162. #endif
  1163. +#endif
  1164. #ifdef __KERNEL__
  1165. #define STACK_TOP TASK_SIZE
  1166. @@ -49,9 +63,11 @@ static inline void wrusp(unsigned long u
  1167. /* This decides where the kernel will search for a free chunk of vm
  1168. * space during mmap's.
  1169. */
  1170. -#ifndef CONFIG_SUN3
  1171. -#define TASK_UNMAPPED_BASE 0xC0000000UL
  1172. -#else
  1173. +#if !defined(CONFIG_SUN3) && !defined(CONFIG_COLDFIRE)
  1174. +#define TASK_UNMAPPED_BASE 0xC0000000UL
  1175. +#elif defined(CONFIG_COLDFIRE)
  1176. +#define TASK_UNMAPPED_BASE 0x80000000UL
  1177. +#else /* CONFIG_SUN3 */
  1178. #define TASK_UNMAPPED_BASE 0x0A000000UL
  1179. #endif
  1180. #define TASK_UNMAPPED_ALIGN(addr, off) PAGE_ALIGN(addr)
  1181. @@ -60,7 +76,11 @@ struct thread_struct {
  1182. unsigned long ksp; /* kernel stack pointer */
  1183. unsigned long usp; /* user stack pointer */
  1184. unsigned short sr; /* saved status register */
  1185. +#ifndef CONFIG_COLDFIRE
  1186. unsigned short fs; /* saved fs (sfc, dfc) */
  1187. +#else
  1188. + mm_segment_t fs;
  1189. +#endif
  1190. unsigned long crp[2]; /* cpu root pointer */
  1191. unsigned long esp0; /* points to SR of stack frame */
  1192. unsigned long faddr; /* info about last fault */
  1193. @@ -81,6 +101,7 @@ struct thread_struct {
  1194. /*
  1195. * Do necessary setup to start up a newly executed thread.
  1196. */
  1197. +#ifndef CONFIG_COLDFIRE
  1198. static inline void start_thread(struct pt_regs * regs, unsigned long pc,
  1199. unsigned long usp)
  1200. {
  1201. @@ -91,6 +112,23 @@ static inline void start_thread(struct p
  1202. regs->sr &= ~0x2000;
  1203. wrusp(usp);
  1204. }
  1205. +#else
  1206. +/*
  1207. + * Do necessary setup to start up a newly executed thread.
  1208. + *
  1209. + * pass the data segment into user programs if it exists,
  1210. + * it can't hurt anything as far as I can tell
  1211. + */
  1212. +#define start_thread(_regs, _pc, _usp) \
  1213. +do { \
  1214. + set_fs(USER_DS); /* reads from user space */ \
  1215. + (_regs)->pc = (_pc); \
  1216. + if (current->mm) \
  1217. + (_regs)->d5 = current->mm->start_data; \
  1218. + (_regs)->sr &= ~0x2000; \
  1219. + wrusp(_usp); \
  1220. +} while (0)
  1221. +#endif
  1222. /* Forward declaration, a strange C thing */
  1223. struct task_struct;
  1224. --- a/include/asm-m68k/ptrace.h
  1225. +++ b/include/asm-m68k/ptrace.h
  1226. @@ -38,10 +38,21 @@ struct pt_regs {
  1227. long d0;
  1228. long orig_d0;
  1229. long stkadj;
  1230. +#ifndef CONFIG_COLDFIRE
  1231. unsigned short sr;
  1232. unsigned long pc;
  1233. unsigned format : 4; /* frame format specifier */
  1234. unsigned vector : 12; /* vector offset */
  1235. +#else
  1236. + unsigned long mmuar;
  1237. + unsigned long mmusr;
  1238. + unsigned format : 4; /* frame format specifier */
  1239. + unsigned fs2 : 2;
  1240. + unsigned vector: 8;
  1241. + unsigned fs1 : 2;
  1242. + unsigned short sr;
  1243. + unsigned long pc;
  1244. +#endif
  1245. };
  1246. /*
  1247. --- a/include/asm-m68k/raw_io.h
  1248. +++ b/include/asm-m68k/raw_io.h
  1249. @@ -77,6 +77,7 @@ static inline void raw_outsb(volatile u8
  1250. out_8(port, *buf++);
  1251. }
  1252. +#ifndef CONFIG_COLDFIRE
  1253. static inline void raw_insw(volatile u16 __iomem *port, u16 *buf, unsigned int nr)
  1254. {
  1255. unsigned int tmp;
  1256. @@ -342,6 +343,63 @@ static inline void raw_outsw_swapw(volat
  1257. : "d0", "a0", "a1", "d6");
  1258. }
  1259. +
  1260. +#else /*CONFIG_COLDFIRE */
  1261. +
  1262. +static inline void raw_insw(volatile u16 *port, u16 *buf, unsigned int nr)
  1263. +{
  1264. + unsigned int i;
  1265. +
  1266. + for (i = 0; i < nr; i++)
  1267. + *buf++ = raw_inw(port);
  1268. +}
  1269. +
  1270. +static inline void raw_outsw(volatile u16 *port, const u16 *buf,
  1271. + unsigned int nr)
  1272. +{
  1273. + unsigned int i;
  1274. +
  1275. + for (i = 0; i < nr; i++, buf++)
  1276. + raw_outw(*buf, port);
  1277. +}
  1278. +
  1279. +static inline void raw_insl(volatile u32 *port, u32 *buf, unsigned int nr)
  1280. +{
  1281. + unsigned int i;
  1282. +
  1283. + for (i = 0; i < nr; i++)
  1284. + *buf++ = raw_inl(port);
  1285. +}
  1286. +
  1287. +static inline void raw_outsl(volatile u32 *port, const u32 *buf,
  1288. + unsigned int nr)
  1289. +{
  1290. + unsigned int i;
  1291. +
  1292. + for (i = 0; i < nr; i++, buf++)
  1293. + raw_outl(*buf, port);
  1294. +}
  1295. +
  1296. +static inline void raw_insw_swapw(volatile u16 *port, u16 *buf,
  1297. + unsigned int nr)
  1298. +{
  1299. + unsigned int i;
  1300. +
  1301. + for (i = 0; i < nr; i++)
  1302. + *buf++ = in_le16(port);
  1303. +
  1304. +}
  1305. +
  1306. +static inline void raw_outsw_swapw(volatile u16 __iomem *port, const u16 *buf,
  1307. + unsigned int nr)
  1308. +{
  1309. + unsigned int i;
  1310. +
  1311. + for (i = 0; i < nr; i++, buf++)
  1312. + out_le16(port, *buf);
  1313. +}
  1314. +#endif /*CONFIG_COLDFIRE */
  1315. +
  1316. #endif /* __KERNEL__ */
  1317. #endif /* _RAW_IO_H */
  1318. --- a/include/asm-m68k/segment.h
  1319. +++ b/include/asm-m68k/segment.h
  1320. @@ -29,6 +29,7 @@ typedef struct {
  1321. * Get/set the SFC/DFC registers for MOVES instructions
  1322. */
  1323. +#ifndef CONFIG_COLDFIRE
  1324. static inline mm_segment_t get_fs(void)
  1325. {
  1326. mm_segment_t _v;
  1327. @@ -50,6 +51,15 @@ static inline void set_fs(mm_segment_t v
  1328. : /* no outputs */ : "r" (val.seg) : "memory");
  1329. }
  1330. +#else /* CONFIG_COLDFIRE */
  1331. +
  1332. +#include <asm/current.h>
  1333. +#define get_fs() (current->thread.fs)
  1334. +#define set_fs(val) (current->thread.fs = (val))
  1335. +#define get_ds() (KERNEL_DS)
  1336. +
  1337. +#endif /* CONFIG_COLDFIRE */
  1338. +
  1339. #define segment_eq(a,b) ((a).seg == (b).seg)
  1340. #endif /* __ASSEMBLY__ */
  1341. --- a/include/asm-m68k/setup.h
  1342. +++ b/include/asm-m68k/setup.h
  1343. @@ -40,6 +40,7 @@
  1344. #define MACH_HP300 9
  1345. #define MACH_Q40 10
  1346. #define MACH_SUN3X 11
  1347. +#define MACH_CFMMU 12
  1348. #define COMMAND_LINE_SIZE 256
  1349. @@ -189,6 +190,14 @@ extern unsigned long m68k_machtype;
  1350. # define MACH_TYPE (MACH_SUN3X)
  1351. #endif
  1352. +#if !defined(CONFIG_COLDFIRE)
  1353. +# define MACH_IS_COLDFIRE (0)
  1354. +#else
  1355. +# define CONFIG_COLDFIRE_ONLY
  1356. +# define MACH_IS_COLDFIRE (1)
  1357. +# define MACH_TYPE (MACH_CFMMU)
  1358. +#endif
  1359. +
  1360. #ifndef MACH_TYPE
  1361. # define MACH_TYPE (m68k_machtype)
  1362. #endif
  1363. @@ -211,23 +220,31 @@ extern unsigned long m68k_machtype;
  1364. #define CPUB_68030 1
  1365. #define CPUB_68040 2
  1366. #define CPUB_68060 3
  1367. +#define CPUB_CFV4E 4
  1368. #define CPU_68020 (1<<CPUB_68020)
  1369. #define CPU_68030 (1<<CPUB_68030)
  1370. #define CPU_68040 (1<<CPUB_68040)
  1371. #define CPU_68060 (1<<CPUB_68060)
  1372. +#define CPU_CFV4E (1<<CPUB_CFV4E)
  1373. #define FPUB_68881 0
  1374. #define FPUB_68882 1
  1375. #define FPUB_68040 2 /* Internal FPU */
  1376. #define FPUB_68060 3 /* Internal FPU */
  1377. #define FPUB_SUNFPA 4 /* Sun-3 FPA */
  1378. +#define FPUB_CFV4E 5
  1379. #define FPU_68881 (1<<FPUB_68881)
  1380. #define FPU_68882 (1<<FPUB_68882)
  1381. #define FPU_68040 (1<<FPUB_68040)
  1382. #define FPU_68060 (1<<FPUB_68060)
  1383. #define FPU_SUNFPA (1<<FPUB_SUNFPA)
  1384. +#ifndef CONFIG_M54455
  1385. +#define FPU_CFV4E (1<<FPUB_CFV4E)
  1386. +#else
  1387. +#define FPU_CFV4E 0
  1388. +#endif
  1389. #define MMUB_68851 0
  1390. #define MMUB_68030 1 /* Internal MMU */
  1391. @@ -235,6 +252,7 @@ extern unsigned long m68k_machtype;
  1392. #define MMUB_68060 3 /* Internal MMU */
  1393. #define MMUB_APOLLO 4 /* Custom Apollo */
  1394. #define MMUB_SUN3 5 /* Custom Sun-3 */
  1395. +#define MMUB_CFV4E 6
  1396. #define MMU_68851 (1<<MMUB_68851)
  1397. #define MMU_68030 (1<<MMUB_68030)
  1398. @@ -242,6 +260,7 @@ extern unsigned long m68k_machtype;
  1399. #define MMU_68060 (1<<MMUB_68060)
  1400. #define MMU_SUN3 (1<<MMUB_SUN3)
  1401. #define MMU_APOLLO (1<<MMUB_APOLLO)
  1402. +#define MMU_CFV4E (1<<MMUB_CFV4E)
  1403. #ifdef __KERNEL__
  1404. @@ -341,6 +360,14 @@ extern int m68k_is040or060;
  1405. # endif
  1406. #endif
  1407. +#if !defined(CONFIG_CFV4E)
  1408. +# define CPU_IS_COLDFIRE (0)
  1409. +#else
  1410. +# define CPU_IS_COLDFIRE (1)
  1411. +# define CPU_IS_CFV4E (1)
  1412. +# define MMU_IS_CFV4E (1)
  1413. +#endif
  1414. +
  1415. #define CPU_TYPE (m68k_cputype)
  1416. #ifdef CONFIG_M68KFPU_EMU
  1417. --- a/include/asm-m68k/signal.h
  1418. +++ b/include/asm-m68k/signal.h
  1419. @@ -150,6 +150,7 @@ typedef struct sigaltstack {
  1420. #ifdef __KERNEL__
  1421. #include <asm/sigcontext.h>
  1422. +#ifndef CONFIG_COLDFIRE
  1423. #define __HAVE_ARCH_SIG_BITOPS
  1424. static inline void sigaddset(sigset_t *set, int _sig)
  1425. @@ -200,6 +201,10 @@ static inline int sigfindinword(unsigned
  1426. struct pt_regs;
  1427. extern void ptrace_signal_deliver(struct pt_regs *regs, void *cookie);
  1428. +#else
  1429. +
  1430. +#define ptrace_signal_deliver(regs, cookie) do { } while (0)
  1431. +#endif /* CONFIG_COLDFIRE */
  1432. #endif /* __KERNEL__ */
  1433. --- a/include/asm-m68k/string.h
  1434. +++ b/include/asm-m68k/string.h
  1435. @@ -93,6 +93,7 @@ static inline char *strchr(const char *s
  1436. return (char *)s - 1;
  1437. }
  1438. +#ifndef CONFIG_COLDFIRE
  1439. #define __HAVE_ARCH_STRCMP
  1440. static inline int strcmp(const char *cs, const char *ct)
  1441. {
  1442. @@ -110,6 +111,7 @@ static inline int strcmp(const char *cs,
  1443. : "+a" (cs), "+a" (ct), "=d" (res));
  1444. return res;
  1445. }
  1446. +#endif
  1447. #define __HAVE_ARCH_MEMSET
  1448. extern void *memset(void *, int, __kernel_size_t);
  1449. --- a/include/asm-m68k/system.h
  1450. +++ b/include/asm-m68k/system.h
  1451. @@ -63,16 +63,25 @@ asmlinkage void resume(void);
  1452. #define smp_read_barrier_depends() ((void)0)
  1453. /* interrupt control.. */
  1454. -#if 0
  1455. -#define local_irq_enable() asm volatile ("andiw %0,%%sr": : "i" (ALLOWINT) : "memory")
  1456. -#else
  1457. #include <linux/hardirq.h>
  1458. +#ifndef CONFIG_COLDFIRE
  1459. #define local_irq_enable() ({ \
  1460. if (MACH_IS_Q40 || !hardirq_count()) \
  1461. asm volatile ("andiw %0,%%sr": : "i" (ALLOWINT) : "memory"); \
  1462. })
  1463. -#endif
  1464. #define local_irq_disable() asm volatile ("oriw #0x0700,%%sr": : : "memory")
  1465. +#else /* CONFIG_COLDFIRE */
  1466. +#define local_irq_enable() \
  1467. + asm volatile ("move.w %%sr, %%d0\n\t" \
  1468. + "andil #0xf8ff,%%d0\n\t" \
  1469. + "move.w %%d0, %%sr\n" \
  1470. + : : : "cc", "d0", "memory")
  1471. +#define local_irq_disable() \
  1472. + asm volatile ("move %/sr,%%d0\n\t" \
  1473. + "ori.l #0x0700,%%d0\n\t" \
  1474. + "move %%d0,%/sr\n" \
  1475. + : : : "cc", "%d0", "memory")
  1476. +#endif
  1477. #define local_save_flags(x) asm volatile ("movew %%sr,%0":"=d" (x) : : "memory")
  1478. #define local_irq_restore(x) asm volatile ("movew %0,%%sr": :"d" (x) : "memory")
  1479. --- a/include/asm-m68k/thread_info.h
  1480. +++ b/include/asm-m68k/thread_info.h
  1481. @@ -58,5 +58,6 @@ struct thread_info {
  1482. #define TIF_DELAYED_TRACE 14 /* single step a syscall */
  1483. #define TIF_SYSCALL_TRACE 15 /* syscall trace active */
  1484. #define TIF_MEMDIE 16
  1485. +#define TIF_FREEZE 17 /* freezing processes */
  1486. #endif /* _ASM_M68K_THREAD_INFO_H */
  1487. --- a/include/asm-m68k/tlbflush.h
  1488. +++ b/include/asm-m68k/tlbflush.h
  1489. @@ -2,7 +2,7 @@
  1490. #define _M68K_TLBFLUSH_H
  1491. -#ifndef CONFIG_SUN3
  1492. +#if !defined(CONFIG_SUN3) && !defined(CONFIG_COLDFIRE)
  1493. #include <asm/current.h>
  1494. @@ -92,7 +92,12 @@ static inline void flush_tlb_kernel_rang
  1495. flush_tlb_all();
  1496. }
  1497. -#else
  1498. +static inline void flush_tlb_pgtables(struct mm_struct *mm,
  1499. + unsigned long start, unsigned long end)
  1500. +{
  1501. +}
  1502. +
  1503. +#elif defined(CONFIG_SUN3)
  1504. /* Reserved PMEGs. */
  1505. @@ -214,6 +219,13 @@ static inline void flush_tlb_kernel_page
  1506. sun3_put_segmap (addr & ~(SUN3_PMEG_SIZE - 1), SUN3_INVALID_PMEG);
  1507. }
  1508. +static inline void flush_tlb_pgtables(struct mm_struct *mm,
  1509. + unsigned long start, unsigned long end)
  1510. +{
  1511. +}
  1512. +
  1513. +#else /* CONFIG_COLDFIRE */
  1514. +#include <asm/cf_tlbflush.h>
  1515. #endif
  1516. #endif /* _M68K_TLBFLUSH_H */
  1517. --- a/include/asm-m68k/uaccess.h
  1518. +++ b/include/asm-m68k/uaccess.h
  1519. @@ -1,6 +1,9 @@
  1520. #ifndef __M68K_UACCESS_H
  1521. #define __M68K_UACCESS_H
  1522. +#ifdef CONFIG_COLDFIRE
  1523. +#include <asm/cf_uaccess.h>
  1524. +#else
  1525. /*
  1526. * User space memory access functions
  1527. */
  1528. @@ -367,4 +370,5 @@ unsigned long __clear_user(void __user *
  1529. #define strlen_user(str) strnlen_user(str, 32767)
  1530. +#endif /* CONFIG_COLDFIRE */
  1531. #endif /* _M68K_UACCESS_H */