001-fix_make_headers_install.patch 100 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060106110621063106410651066106710681069107010711072107310741075107610771078107910801081108210831084108510861087108810891090109110921093109410951096109710981099110011011102110311041105110611071108110911101111111211131114111511161117111811191120112111221123112411251126112711281129113011311132113311341135113611371138113911401141114211431144114511461147114811491150115111521153115411551156115711581159116011611162116311641165116611671168116911701171117211731174117511761177117811791180118111821183118411851186118711881189119011911192119311941195119611971198119912001201120212031204120512061207120812091210121112121213121412151216121712181219122012211222122312241225122612271228122912301231123212331234123512361237123812391240124112421243124412451246124712481249125012511252125312541255125612571258125912601261126212631264126512661267126812691270127112721273127412751276127712781279128012811282128312841285128612871288128912901291129212931294129512961297129812991300130113021303130413051306130713081309131013111312131313141315131613171318131913201321132213231324132513261327132813291330133113321333133413351336133713381339134013411342134313441345134613471348134913501351135213531354135513561357135813591360136113621363136413651366136713681369137013711372137313741375137613771378137913801381138213831384138513861387138813891390139113921393139413951396139713981399140014011402140314041405140614071408140914101411141214131414141514161417141814191420142114221423142414251426142714281429143014311432143314341435143614371438143914401441144214431444144514461447144814491450145114521453145414551456145714581459146014611462146314641465146614671468146914701471147214731474147514761477147814791480148114821483148414851486148714881489149014911492149314941495149614971498149915001501150215031504150515061507150815091510151115121513151415151516151715181519152015211522152315241525152615271528152915301531153215331534153515361537153815391540154115421543154415451546154715481549155015511552155315541555155615571558155915601561156215631564156515661567156815691570157115721573157415751576157715781579158015811582158315841585158615871588158915901591159215931594159515961597159815991600160116021603160416051606160716081609161016111612161316141615161616171618161916201621162216231624162516261627162816291630163116321633163416351636163716381639164016411642164316441645164616471648164916501651165216531654165516561657165816591660166116621663166416651666166716681669167016711672167316741675167616771678167916801681168216831684168516861687168816891690169116921693169416951696169716981699170017011702170317041705170617071708170917101711171217131714171517161717171817191720172117221723172417251726172717281729173017311732173317341735173617371738173917401741174217431744174517461747174817491750175117521753175417551756175717581759176017611762176317641765176617671768176917701771177217731774177517761777177817791780178117821783178417851786178717881789179017911792179317941795179617971798179918001801180218031804180518061807180818091810181118121813181418151816181718181819182018211822182318241825182618271828182918301831183218331834183518361837183818391840184118421843184418451846184718481849185018511852185318541855185618571858185918601861186218631864186518661867186818691870187118721873187418751876187718781879188018811882188318841885188618871888188918901891189218931894189518961897189818991900190119021903190419051906190719081909191019111912191319141915191619171918191919201921192219231924192519261927192819291930193119321933193419351936193719381939194019411942194319441945194619471948194919501951195219531954195519561957195819591960196119621963196419651966196719681969197019711972197319741975197619771978197919801981198219831984198519861987198819891990199119921993199419951996199719981999200020012002200320042005200620072008200920102011201220132014201520162017201820192020202120222023202420252026202720282029203020312032203320342035203620372038203920402041204220432044204520462047204820492050205120522053205420552056205720582059206020612062206320642065206620672068206920702071207220732074207520762077207820792080208120822083208420852086208720882089209020912092209320942095209620972098209921002101210221032104210521062107210821092110211121122113211421152116211721182119212021212122212321242125212621272128212921302131213221332134213521362137213821392140214121422143214421452146214721482149215021512152215321542155215621572158215921602161216221632164216521662167216821692170217121722173217421752176217721782179218021812182218321842185218621872188218921902191219221932194219521962197219821992200220122022203220422052206220722082209221022112212221322142215221622172218221922202221222222232224222522262227222822292230223122322233223422352236223722382239224022412242224322442245224622472248224922502251225222532254225522562257225822592260226122622263226422652266226722682269227022712272227322742275227622772278227922802281228222832284228522862287228822892290229122922293229422952296229722982299230023012302230323042305230623072308230923102311231223132314231523162317231823192320232123222323232423252326232723282329233023312332233323342335233623372338233923402341234223432344234523462347234823492350235123522353235423552356235723582359236023612362236323642365236623672368236923702371237223732374237523762377237823792380238123822383238423852386238723882389239023912392239323942395239623972398239924002401240224032404240524062407240824092410241124122413241424152416241724182419242024212422242324242425242624272428242924302431243224332434243524362437243824392440244124422443244424452446244724482449245024512452245324542455245624572458245924602461246224632464246524662467246824692470247124722473247424752476247724782479248024812482248324842485248624872488248924902491249224932494249524962497249824992500250125022503250425052506250725082509251025112512251325142515251625172518251925202521252225232524252525262527252825292530253125322533253425352536253725382539254025412542254325442545254625472548254925502551255225532554255525562557255825592560256125622563256425652566256725682569257025712572257325742575257625772578257925802581258225832584258525862587258825892590259125922593259425952596259725982599260026012602260326042605260626072608260926102611261226132614261526162617261826192620262126222623262426252626262726282629263026312632263326342635263626372638263926402641264226432644264526462647264826492650265126522653265426552656265726582659266026612662266326642665266626672668266926702671267226732674267526762677267826792680268126822683268426852686268726882689269026912692269326942695269626972698269927002701270227032704270527062707270827092710271127122713271427152716271727182719272027212722272327242725272627272728272927302731273227332734273527362737273827392740274127422743274427452746274727482749275027512752275327542755275627572758275927602761276227632764276527662767276827692770277127722773277427752776277727782779278027812782278327842785278627872788278927902791279227932794279527962797279827992800280128022803280428052806280728082809281028112812281328142815281628172818281928202821282228232824282528262827282828292830283128322833283428352836283728382839284028412842284328442845284628472848284928502851285228532854285528562857285828592860286128622863286428652866286728682869287028712872287328742875287628772878287928802881288228832884288528862887288828892890289128922893289428952896289728982899290029012902290329042905290629072908290929102911291229132914291529162917291829192920292129222923292429252926292729282929293029312932293329342935293629372938293929402941294229432944294529462947294829492950295129522953295429552956295729582959296029612962296329642965296629672968296929702971297229732974297529762977297829792980298129822983298429852986298729882989299029912992299329942995299629972998299930003001300230033004300530063007300830093010301130123013301430153016301730183019302030213022302330243025302630273028302930303031303230333034303530363037303830393040304130423043304430453046304730483049305030513052305330543055305630573058305930603061306230633064306530663067306830693070307130723073307430753076307730783079308030813082308330843085308630873088308930903091309230933094309530963097309830993100310131023103310431053106310731083109311031113112311331143115311631173118311931203121312231233124312531263127312831293130313131323133313431353136313731383139314031413142314331443145314631473148314931503151315231533154315531563157315831593160316131623163316431653166316731683169317031713172317331743175317631773178317931803181318231833184318531863187318831893190319131923193319431953196319731983199320032013202320332043205320632073208320932103211321232133214321532163217321832193220322132223223322432253226322732283229323032313232323332343235323632373238323932403241324232433244324532463247324832493250325132523253325432553256325732583259326032613262326332643265326632673268326932703271327232733274327532763277327832793280328132823283328432853286328732883289329032913292329332943295329632973298329933003301330233033304330533063307330833093310331133123313331433153316331733183319332033213322332333243325332633273328332933303331333233333334333533363337333833393340334133423343334433453346334733483349335033513352335333543355335633573358335933603361336233633364336533663367336833693370337133723373337433753376337733783379338033813382338333843385338633873388338933903391339233933394339533963397339833993400340134023403340434053406340734083409341034113412341334143415341634173418341934203421342234233424342534263427342834293430343134323433343434353436343734383439344034413442344334443445344634473448344934503451345234533454345534563457345834593460346134623463346434653466346734683469347034713472347334743475347634773478347934803481348234833484348534863487348834893490349134923493349434953496349734983499350035013502350335043505350635073508350935103511351235133514351535163517351835193520352135223523352435253526352735283529353035313532353335343535353635373538353935403541354235433544354535463547354835493550355135523553355435553556355735583559356035613562356335643565356635673568356935703571357235733574357535763577357835793580358135823583358435853586358735883589359035913592359335943595359635973598359936003601360236033604360536063607360836093610361136123613361436153616361736183619362036213622362336243625362636273628362936303631363236333634363536363637363836393640364136423643364436453646364736483649365036513652365336543655365636573658365936603661366236633664366536663667366836693670367136723673367436753676367736783679368036813682368336843685368636873688368936903691369236933694369536963697369836993700370137023703370437053706370737083709371037113712371337143715371637173718371937203721372237233724372537263727372837293730373137323733373437353736373737383739374037413742374337443745374637473748374937503751375237533754375537563757375837593760376137623763376437653766376737683769377037713772377337743775377637773778377937803781378237833784378537863787378837893790379137923793379437953796379737983799380038013802380338043805380638073808380938103811381238133814
  1. From faec6b6c2cc0219e74569c13f581fc11d8f3fc57 Mon Sep 17 00:00:00 2001
  2. From: Florian Fainelli <[email protected]>
  3. Date: Sun, 17 Mar 2013 20:12:10 +0100
  4. Subject: [PATCH] UM: fix make headers_install after UAPI header installation
  5. Commit 10b63956 (UAPI: Plumb the UAPI Kbuilds into the user
  6. header installation and checking) breaks UML make headers_install with
  7. the following:
  8. $ ARCH=um make headers_install
  9. CHK include/generated/uapi/linux/version.h
  10. UPD include/generated/uapi/linux/version.h
  11. HOSTCC scripts/basic/fixdep
  12. WRAP arch/um/include/generated/asm/bug.h
  13. [snip]
  14. WRAP arch/um/include/generated/asm/trace_clock.h
  15. SYSHDR arch/x86/syscalls/../include/generated/uapi/asm/unistd_32.h
  16. SYSHDR arch/x86/syscalls/../include/generated/uapi/asm/unistd_64.h
  17. SYSHDR arch/x86/syscalls/../include/generated/uapi/asm/unistd_x32.h
  18. SYSTBL arch/x86/syscalls/../include/generated/asm/syscalls_32.h
  19. HOSTCC scripts/unifdef
  20. Makefile:912: *** Headers not exportable for the um architecture. Stop.
  21. zsh: exit 2 ARCH=um make headers_install
  22. The reason for that is because the top-level Makefile does the
  23. following:
  24. $(if $(wildcard $(srctree)/arch/$(hdr-arch)/include/uapi/asm/Kbuild),, \
  25. $(error Headers not exportable for the $(SRCARCH) architecture))
  26. we end-up in the else part of the $(if) statement because UML still uses
  27. the old path in arch/um/include/asm/Kbuild. This patch fixes the issue
  28. by moving the header files to be in arch/um/include/uapi/asm/ thus
  29. making headers_install (and other make targets checking for uapi) to
  30. succeed.
  31. Signed-off-by: Florian Fainelli <[email protected]>
  32. ---
  33. Richard, this has been broken for 3.7+ onwards, if you want me to send
  34. you separate patches for 3.7 and 3.8 let me know. Thanks!
  35. arch/um/include/{ => uapi}/asm/Kbuild | 0
  36. arch/um/include/{ => uapi}/asm/a.out-core.h | 0
  37. arch/um/include/{ => uapi}/asm/bugs.h | 0
  38. arch/um/include/{ => uapi}/asm/cache.h | 0
  39. arch/um/include/{ => uapi}/asm/common.lds.S | 0
  40. arch/um/include/{ => uapi}/asm/dma.h | 0
  41. arch/um/include/{ => uapi}/asm/fixmap.h | 0
  42. arch/um/include/{ => uapi}/asm/irq.h | 0
  43. arch/um/include/{ => uapi}/asm/irqflags.h | 0
  44. arch/um/include/{ => uapi}/asm/kmap_types.h | 0
  45. arch/um/include/{ => uapi}/asm/kvm_para.h | 0
  46. arch/um/include/{ => uapi}/asm/mmu.h | 0
  47. arch/um/include/{ => uapi}/asm/mmu_context.h | 0
  48. arch/um/include/{ => uapi}/asm/page.h | 0
  49. arch/um/include/{ => uapi}/asm/pgalloc.h | 0
  50. arch/um/include/{ => uapi}/asm/pgtable-2level.h | 0
  51. arch/um/include/{ => uapi}/asm/pgtable-3level.h | 0
  52. arch/um/include/{ => uapi}/asm/pgtable.h | 0
  53. arch/um/include/{ => uapi}/asm/processor-generic.h | 0
  54. arch/um/include/{ => uapi}/asm/ptrace-generic.h | 0
  55. arch/um/include/{ => uapi}/asm/setup.h | 0
  56. arch/um/include/{ => uapi}/asm/smp.h | 0
  57. arch/um/include/{ => uapi}/asm/sysrq.h | 0
  58. arch/um/include/{ => uapi}/asm/thread_info.h | 0
  59. arch/um/include/{ => uapi}/asm/timex.h | 0
  60. arch/um/include/{ => uapi}/asm/tlb.h | 0
  61. arch/um/include/{ => uapi}/asm/tlbflush.h | 0
  62. arch/um/include/{ => uapi}/asm/uaccess.h | 0
  63. 28 files changed, 0 insertions(+), 0 deletions(-)
  64. rename arch/um/include/{ => uapi}/asm/Kbuild (100%)
  65. rename arch/um/include/{ => uapi}/asm/a.out-core.h (100%)
  66. rename arch/um/include/{ => uapi}/asm/bugs.h (100%)
  67. rename arch/um/include/{ => uapi}/asm/cache.h (100%)
  68. rename arch/um/include/{ => uapi}/asm/common.lds.S (100%)
  69. rename arch/um/include/{ => uapi}/asm/dma.h (100%)
  70. rename arch/um/include/{ => uapi}/asm/fixmap.h (100%)
  71. rename arch/um/include/{ => uapi}/asm/irq.h (100%)
  72. rename arch/um/include/{ => uapi}/asm/irqflags.h (100%)
  73. rename arch/um/include/{ => uapi}/asm/kmap_types.h (100%)
  74. rename arch/um/include/{ => uapi}/asm/kvm_para.h (100%)
  75. rename arch/um/include/{ => uapi}/asm/mmu.h (100%)
  76. rename arch/um/include/{ => uapi}/asm/mmu_context.h (100%)
  77. rename arch/um/include/{ => uapi}/asm/page.h (100%)
  78. rename arch/um/include/{ => uapi}/asm/pgalloc.h (100%)
  79. rename arch/um/include/{ => uapi}/asm/pgtable-2level.h (100%)
  80. rename arch/um/include/{ => uapi}/asm/pgtable-3level.h (100%)
  81. rename arch/um/include/{ => uapi}/asm/pgtable.h (100%)
  82. rename arch/um/include/{ => uapi}/asm/processor-generic.h (100%)
  83. rename arch/um/include/{ => uapi}/asm/ptrace-generic.h (100%)
  84. rename arch/um/include/{ => uapi}/asm/setup.h (100%)
  85. rename arch/um/include/{ => uapi}/asm/smp.h (100%)
  86. rename arch/um/include/{ => uapi}/asm/sysrq.h (100%)
  87. rename arch/um/include/{ => uapi}/asm/thread_info.h (100%)
  88. rename arch/um/include/{ => uapi}/asm/timex.h (100%)
  89. rename arch/um/include/{ => uapi}/asm/tlb.h (100%)
  90. rename arch/um/include/{ => uapi}/asm/tlbflush.h (100%)
  91. rename arch/um/include/{ => uapi}/asm/uaccess.h (100%)
  92. --- a/arch/um/include/asm/Kbuild
  93. +++ /dev/null
  94. @@ -1,8 +0,0 @@
  95. -generic-y += bug.h cputime.h device.h emergency-restart.h futex.h hardirq.h
  96. -generic-y += hw_irq.h irq_regs.h kdebug.h percpu.h sections.h topology.h xor.h
  97. -generic-y += ftrace.h pci.h io.h param.h delay.h mutex.h current.h exec.h
  98. -generic-y += switch_to.h clkdev.h
  99. -generic-y += trace_clock.h
  100. -generic-y += preempt.h
  101. -generic-y += hash.h
  102. -generic-y += barrier.h
  103. --- a/arch/um/include/asm/a.out-core.h
  104. +++ /dev/null
  105. @@ -1,27 +0,0 @@
  106. -/* a.out coredump register dumper
  107. - *
  108. - * Copyright (C) 2007 Red Hat, Inc. All Rights Reserved.
  109. - * Written by David Howells ([email protected])
  110. - *
  111. - * This program is free software; you can redistribute it and/or
  112. - * modify it under the terms of the GNU General Public Licence
  113. - * as published by the Free Software Foundation; either version
  114. - * 2 of the Licence, or (at your option) any later version.
  115. - */
  116. -
  117. -#ifndef __UM_A_OUT_CORE_H
  118. -#define __UM_A_OUT_CORE_H
  119. -
  120. -#ifdef __KERNEL__
  121. -
  122. -#include <linux/user.h>
  123. -
  124. -/*
  125. - * fill in the user structure for an a.out core dump
  126. - */
  127. -static inline void aout_dump_thread(struct pt_regs *regs, struct user *u)
  128. -{
  129. -}
  130. -
  131. -#endif /* __KERNEL__ */
  132. -#endif /* __UM_A_OUT_CORE_H */
  133. --- a/arch/um/include/asm/bugs.h
  134. +++ /dev/null
  135. @@ -1,6 +0,0 @@
  136. -#ifndef __UM_BUGS_H
  137. -#define __UM_BUGS_H
  138. -
  139. -void check_bugs(void);
  140. -
  141. -#endif
  142. --- a/arch/um/include/asm/cache.h
  143. +++ /dev/null
  144. @@ -1,17 +0,0 @@
  145. -#ifndef __UM_CACHE_H
  146. -#define __UM_CACHE_H
  147. -
  148. -
  149. -#if defined(CONFIG_UML_X86) && !defined(CONFIG_64BIT)
  150. -# define L1_CACHE_SHIFT (CONFIG_X86_L1_CACHE_SHIFT)
  151. -#elif defined(CONFIG_UML_X86) /* 64-bit */
  152. -# define L1_CACHE_SHIFT 6 /* Should be 7 on Intel */
  153. -#else
  154. -/* XXX: this was taken from x86, now it's completely random. Luckily only
  155. - * affects SMP padding. */
  156. -# define L1_CACHE_SHIFT 5
  157. -#endif
  158. -
  159. -#define L1_CACHE_BYTES (1 << L1_CACHE_SHIFT)
  160. -
  161. -#endif
  162. --- a/arch/um/include/asm/common.lds.S
  163. +++ /dev/null
  164. @@ -1,107 +0,0 @@
  165. -#include <asm-generic/vmlinux.lds.h>
  166. -
  167. - .fini : { *(.fini) } =0x9090
  168. - _etext = .;
  169. - PROVIDE (etext = .);
  170. -
  171. - . = ALIGN(4096);
  172. - _sdata = .;
  173. - PROVIDE (sdata = .);
  174. -
  175. - RODATA
  176. -
  177. - .unprotected : { *(.unprotected) }
  178. - . = ALIGN(4096);
  179. - PROVIDE (_unprotected_end = .);
  180. -
  181. - . = ALIGN(4096);
  182. - .note : { *(.note.*) }
  183. - EXCEPTION_TABLE(0)
  184. -
  185. - BUG_TABLE
  186. -
  187. - .uml.setup.init : {
  188. - __uml_setup_start = .;
  189. - *(.uml.setup.init)
  190. - __uml_setup_end = .;
  191. - }
  192. -
  193. - .uml.help.init : {
  194. - __uml_help_start = .;
  195. - *(.uml.help.init)
  196. - __uml_help_end = .;
  197. - }
  198. -
  199. - .uml.postsetup.init : {
  200. - __uml_postsetup_start = .;
  201. - *(.uml.postsetup.init)
  202. - __uml_postsetup_end = .;
  203. - }
  204. -
  205. - .init.setup : {
  206. - INIT_SETUP(0)
  207. - }
  208. -
  209. - PERCPU_SECTION(32)
  210. -
  211. - .initcall.init : {
  212. - INIT_CALLS
  213. - }
  214. -
  215. - .con_initcall.init : {
  216. - CON_INITCALL
  217. - }
  218. -
  219. - .uml.initcall.init : {
  220. - __uml_initcall_start = .;
  221. - *(.uml.initcall.init)
  222. - __uml_initcall_end = .;
  223. - }
  224. -
  225. - SECURITY_INIT
  226. -
  227. - .exitcall : {
  228. - __exitcall_begin = .;
  229. - *(.exitcall.exit)
  230. - __exitcall_end = .;
  231. - }
  232. -
  233. - .uml.exitcall : {
  234. - __uml_exitcall_begin = .;
  235. - *(.uml.exitcall.exit)
  236. - __uml_exitcall_end = .;
  237. - }
  238. -
  239. - . = ALIGN(4);
  240. - .altinstructions : {
  241. - __alt_instructions = .;
  242. - *(.altinstructions)
  243. - __alt_instructions_end = .;
  244. - }
  245. - .altinstr_replacement : { *(.altinstr_replacement) }
  246. - /* .exit.text is discard at runtime, not link time, to deal with references
  247. - from .altinstructions and .eh_frame */
  248. - .exit.text : { *(.exit.text) }
  249. - .exit.data : { *(.exit.data) }
  250. -
  251. - .preinit_array : {
  252. - __preinit_array_start = .;
  253. - *(.preinit_array)
  254. - __preinit_array_end = .;
  255. - }
  256. - .init_array : {
  257. - __init_array_start = .;
  258. - *(.init_array)
  259. - __init_array_end = .;
  260. - }
  261. - .fini_array : {
  262. - __fini_array_start = .;
  263. - *(.fini_array)
  264. - __fini_array_end = .;
  265. - }
  266. -
  267. - . = ALIGN(4096);
  268. - .init.ramfs : {
  269. - INIT_RAM_FS
  270. - }
  271. -
  272. --- a/arch/um/include/asm/dma.h
  273. +++ /dev/null
  274. @@ -1,10 +0,0 @@
  275. -#ifndef __UM_DMA_H
  276. -#define __UM_DMA_H
  277. -
  278. -#include <asm/io.h>
  279. -
  280. -extern unsigned long uml_physmem;
  281. -
  282. -#define MAX_DMA_ADDRESS (uml_physmem)
  283. -
  284. -#endif
  285. --- a/arch/um/include/asm/fixmap.h
  286. +++ /dev/null
  287. @@ -1,60 +0,0 @@
  288. -#ifndef __UM_FIXMAP_H
  289. -#define __UM_FIXMAP_H
  290. -
  291. -#include <asm/processor.h>
  292. -#include <asm/kmap_types.h>
  293. -#include <asm/archparam.h>
  294. -#include <asm/page.h>
  295. -#include <linux/threads.h>
  296. -
  297. -/*
  298. - * Here we define all the compile-time 'special' virtual
  299. - * addresses. The point is to have a constant address at
  300. - * compile time, but to set the physical address only
  301. - * in the boot process. We allocate these special addresses
  302. - * from the end of virtual memory (0xfffff000) backwards.
  303. - * Also this lets us do fail-safe vmalloc(), we
  304. - * can guarantee that these special addresses and
  305. - * vmalloc()-ed addresses never overlap.
  306. - *
  307. - * these 'compile-time allocated' memory buffers are
  308. - * fixed-size 4k pages. (or larger if used with an increment
  309. - * highger than 1) use fixmap_set(idx,phys) to associate
  310. - * physical memory with fixmap indices.
  311. - *
  312. - * TLB entries of such buffers will not be flushed across
  313. - * task switches.
  314. - */
  315. -
  316. -/*
  317. - * on UP currently we will have no trace of the fixmap mechanizm,
  318. - * no page table allocations, etc. This might change in the
  319. - * future, say framebuffers for the console driver(s) could be
  320. - * fix-mapped?
  321. - */
  322. -enum fixed_addresses {
  323. -#ifdef CONFIG_HIGHMEM
  324. - FIX_KMAP_BEGIN, /* reserved pte's for temporary kernel mappings */
  325. - FIX_KMAP_END = FIX_KMAP_BEGIN+(KM_TYPE_NR*NR_CPUS)-1,
  326. -#endif
  327. - __end_of_fixed_addresses
  328. -};
  329. -
  330. -extern void __set_fixmap (enum fixed_addresses idx,
  331. - unsigned long phys, pgprot_t flags);
  332. -
  333. -/*
  334. - * used by vmalloc.c.
  335. - *
  336. - * Leave one empty page between vmalloc'ed areas and
  337. - * the start of the fixmap, and leave one page empty
  338. - * at the top of mem..
  339. - */
  340. -
  341. -#define FIXADDR_TOP (TASK_SIZE - 2 * PAGE_SIZE)
  342. -#define FIXADDR_SIZE (__end_of_fixed_addresses << PAGE_SHIFT)
  343. -#define FIXADDR_START (FIXADDR_TOP - FIXADDR_SIZE)
  344. -
  345. -#include <asm-generic/fixmap.h>
  346. -
  347. -#endif
  348. --- a/arch/um/include/asm/irq.h
  349. +++ /dev/null
  350. @@ -1,23 +0,0 @@
  351. -#ifndef __UM_IRQ_H
  352. -#define __UM_IRQ_H
  353. -
  354. -#define TIMER_IRQ 0
  355. -#define UMN_IRQ 1
  356. -#define CONSOLE_IRQ 2
  357. -#define CONSOLE_WRITE_IRQ 3
  358. -#define UBD_IRQ 4
  359. -#define UM_ETH_IRQ 5
  360. -#define SSL_IRQ 6
  361. -#define SSL_WRITE_IRQ 7
  362. -#define ACCEPT_IRQ 8
  363. -#define MCONSOLE_IRQ 9
  364. -#define WINCH_IRQ 10
  365. -#define SIGIO_WRITE_IRQ 11
  366. -#define TELNETD_IRQ 12
  367. -#define XTERM_IRQ 13
  368. -#define RANDOM_IRQ 14
  369. -
  370. -#define LAST_IRQ RANDOM_IRQ
  371. -#define NR_IRQS (LAST_IRQ + 1)
  372. -
  373. -#endif
  374. --- a/arch/um/include/asm/irqflags.h
  375. +++ /dev/null
  376. @@ -1,42 +0,0 @@
  377. -#ifndef __UM_IRQFLAGS_H
  378. -#define __UM_IRQFLAGS_H
  379. -
  380. -extern int get_signals(void);
  381. -extern int set_signals(int enable);
  382. -extern void block_signals(void);
  383. -extern void unblock_signals(void);
  384. -
  385. -static inline unsigned long arch_local_save_flags(void)
  386. -{
  387. - return get_signals();
  388. -}
  389. -
  390. -static inline void arch_local_irq_restore(unsigned long flags)
  391. -{
  392. - set_signals(flags);
  393. -}
  394. -
  395. -static inline void arch_local_irq_enable(void)
  396. -{
  397. - unblock_signals();
  398. -}
  399. -
  400. -static inline void arch_local_irq_disable(void)
  401. -{
  402. - block_signals();
  403. -}
  404. -
  405. -static inline unsigned long arch_local_irq_save(void)
  406. -{
  407. - unsigned long flags;
  408. - flags = arch_local_save_flags();
  409. - arch_local_irq_disable();
  410. - return flags;
  411. -}
  412. -
  413. -static inline bool arch_irqs_disabled(void)
  414. -{
  415. - return arch_local_save_flags() == 0;
  416. -}
  417. -
  418. -#endif
  419. --- a/arch/um/include/asm/kmap_types.h
  420. +++ /dev/null
  421. @@ -1,13 +0,0 @@
  422. -/*
  423. - * Copyright (C) 2002 Jeff Dike ([email protected])
  424. - * Licensed under the GPL
  425. - */
  426. -
  427. -#ifndef __UM_KMAP_TYPES_H
  428. -#define __UM_KMAP_TYPES_H
  429. -
  430. -/* No more #include "asm/arch/kmap_types.h" ! */
  431. -
  432. -#define KM_TYPE_NR 14
  433. -
  434. -#endif
  435. --- a/arch/um/include/asm/kvm_para.h
  436. +++ /dev/null
  437. @@ -1 +0,0 @@
  438. -#include <asm-generic/kvm_para.h>
  439. --- a/arch/um/include/asm/mmu.h
  440. +++ /dev/null
  441. @@ -1,24 +0,0 @@
  442. -/*
  443. - * Copyright (C) 2002 - 2007 Jeff Dike (jdike@{addtoit,linux.intel}.com)
  444. - * Licensed under the GPL
  445. - */
  446. -
  447. -#ifndef __ARCH_UM_MMU_H
  448. -#define __ARCH_UM_MMU_H
  449. -
  450. -#include <mm_id.h>
  451. -#include <asm/mm_context.h>
  452. -
  453. -typedef struct mm_context {
  454. - struct mm_id id;
  455. - struct uml_arch_mm_context arch;
  456. - struct page *stub_pages[2];
  457. -} mm_context_t;
  458. -
  459. -extern void __switch_mm(struct mm_id * mm_idp);
  460. -
  461. -/* Avoid tangled inclusion with asm/ldt.h */
  462. -extern long init_new_ldt(struct mm_context *to_mm, struct mm_context *from_mm);
  463. -extern void free_ldt(struct mm_context *mm);
  464. -
  465. -#endif
  466. --- a/arch/um/include/asm/mmu_context.h
  467. +++ /dev/null
  468. @@ -1,58 +0,0 @@
  469. -/*
  470. - * Copyright (C) 2002 - 2007 Jeff Dike (jdike@{addtoit,linux.intel}.com)
  471. - * Licensed under the GPL
  472. - */
  473. -
  474. -#ifndef __UM_MMU_CONTEXT_H
  475. -#define __UM_MMU_CONTEXT_H
  476. -
  477. -#include <linux/sched.h>
  478. -#include <asm/mmu.h>
  479. -
  480. -extern void uml_setup_stubs(struct mm_struct *mm);
  481. -extern void arch_exit_mmap(struct mm_struct *mm);
  482. -
  483. -#define deactivate_mm(tsk,mm) do { } while (0)
  484. -
  485. -extern void force_flush_all(void);
  486. -
  487. -static inline void activate_mm(struct mm_struct *old, struct mm_struct *new)
  488. -{
  489. - /*
  490. - * This is called by fs/exec.c and sys_unshare()
  491. - * when the new ->mm is used for the first time.
  492. - */
  493. - __switch_mm(&new->context.id);
  494. - down_write(&new->mmap_sem);
  495. - uml_setup_stubs(new);
  496. - up_write(&new->mmap_sem);
  497. -}
  498. -
  499. -static inline void switch_mm(struct mm_struct *prev, struct mm_struct *next,
  500. - struct task_struct *tsk)
  501. -{
  502. - unsigned cpu = smp_processor_id();
  503. -
  504. - if(prev != next){
  505. - cpumask_clear_cpu(cpu, mm_cpumask(prev));
  506. - cpumask_set_cpu(cpu, mm_cpumask(next));
  507. - if(next != &init_mm)
  508. - __switch_mm(&next->context.id);
  509. - }
  510. -}
  511. -
  512. -static inline void arch_dup_mmap(struct mm_struct *oldmm, struct mm_struct *mm)
  513. -{
  514. - uml_setup_stubs(mm);
  515. -}
  516. -
  517. -static inline void enter_lazy_tlb(struct mm_struct *mm,
  518. - struct task_struct *tsk)
  519. -{
  520. -}
  521. -
  522. -extern int init_new_context(struct task_struct *task, struct mm_struct *mm);
  523. -
  524. -extern void destroy_context(struct mm_struct *mm);
  525. -
  526. -#endif
  527. --- a/arch/um/include/asm/page.h
  528. +++ /dev/null
  529. @@ -1,122 +0,0 @@
  530. -/*
  531. - * Copyright (C) 2000 - 2003 Jeff Dike ([email protected])
  532. - * Copyright 2003 PathScale, Inc.
  533. - * Licensed under the GPL
  534. - */
  535. -
  536. -#ifndef __UM_PAGE_H
  537. -#define __UM_PAGE_H
  538. -
  539. -#include <linux/const.h>
  540. -
  541. -/* PAGE_SHIFT determines the page size */
  542. -#define PAGE_SHIFT 12
  543. -#define PAGE_SIZE (_AC(1, UL) << PAGE_SHIFT)
  544. -#define PAGE_MASK (~(PAGE_SIZE-1))
  545. -
  546. -#ifndef __ASSEMBLY__
  547. -
  548. -struct page;
  549. -
  550. -#include <linux/types.h>
  551. -#include <asm/vm-flags.h>
  552. -
  553. -/*
  554. - * These are used to make use of C type-checking..
  555. - */
  556. -
  557. -#define clear_page(page) memset((void *)(page), 0, PAGE_SIZE)
  558. -#define copy_page(to,from) memcpy((void *)(to), (void *)(from), PAGE_SIZE)
  559. -
  560. -#define clear_user_page(page, vaddr, pg) clear_page(page)
  561. -#define copy_user_page(to, from, vaddr, pg) copy_page(to, from)
  562. -
  563. -#if defined(CONFIG_3_LEVEL_PGTABLES) && !defined(CONFIG_64BIT)
  564. -
  565. -typedef struct { unsigned long pte_low, pte_high; } pte_t;
  566. -typedef struct { unsigned long pmd; } pmd_t;
  567. -typedef struct { unsigned long pgd; } pgd_t;
  568. -#define pte_val(x) ((x).pte_low | ((unsigned long long) (x).pte_high << 32))
  569. -
  570. -#define pte_get_bits(pte, bits) ((pte).pte_low & (bits))
  571. -#define pte_set_bits(pte, bits) ((pte).pte_low |= (bits))
  572. -#define pte_clear_bits(pte, bits) ((pte).pte_low &= ~(bits))
  573. -#define pte_copy(to, from) ({ (to).pte_high = (from).pte_high; \
  574. - smp_wmb(); \
  575. - (to).pte_low = (from).pte_low; })
  576. -#define pte_is_zero(pte) (!((pte).pte_low & ~_PAGE_NEWPAGE) && !(pte).pte_high)
  577. -#define pte_set_val(pte, phys, prot) \
  578. - ({ (pte).pte_high = (phys) >> 32; \
  579. - (pte).pte_low = (phys) | pgprot_val(prot); })
  580. -
  581. -#define pmd_val(x) ((x).pmd)
  582. -#define __pmd(x) ((pmd_t) { (x) } )
  583. -
  584. -typedef unsigned long long pfn_t;
  585. -typedef unsigned long long phys_t;
  586. -
  587. -#else
  588. -
  589. -typedef struct { unsigned long pte; } pte_t;
  590. -typedef struct { unsigned long pgd; } pgd_t;
  591. -
  592. -#ifdef CONFIG_3_LEVEL_PGTABLES
  593. -typedef struct { unsigned long pmd; } pmd_t;
  594. -#define pmd_val(x) ((x).pmd)
  595. -#define __pmd(x) ((pmd_t) { (x) } )
  596. -#endif
  597. -
  598. -#define pte_val(x) ((x).pte)
  599. -
  600. -
  601. -#define pte_get_bits(p, bits) ((p).pte & (bits))
  602. -#define pte_set_bits(p, bits) ((p).pte |= (bits))
  603. -#define pte_clear_bits(p, bits) ((p).pte &= ~(bits))
  604. -#define pte_copy(to, from) ((to).pte = (from).pte)
  605. -#define pte_is_zero(p) (!((p).pte & ~_PAGE_NEWPAGE))
  606. -#define pte_set_val(p, phys, prot) (p).pte = (phys | pgprot_val(prot))
  607. -
  608. -typedef unsigned long pfn_t;
  609. -typedef unsigned long phys_t;
  610. -
  611. -#endif
  612. -
  613. -typedef struct { unsigned long pgprot; } pgprot_t;
  614. -
  615. -typedef struct page *pgtable_t;
  616. -
  617. -#define pgd_val(x) ((x).pgd)
  618. -#define pgprot_val(x) ((x).pgprot)
  619. -
  620. -#define __pte(x) ((pte_t) { (x) } )
  621. -#define __pgd(x) ((pgd_t) { (x) } )
  622. -#define __pgprot(x) ((pgprot_t) { (x) } )
  623. -
  624. -extern unsigned long uml_physmem;
  625. -
  626. -#define PAGE_OFFSET (uml_physmem)
  627. -#define KERNELBASE PAGE_OFFSET
  628. -
  629. -#define __va_space (8*1024*1024)
  630. -
  631. -#include <mem.h>
  632. -
  633. -/* Cast to unsigned long before casting to void * to avoid a warning from
  634. - * mmap_kmem about cutting a long long down to a void *. Not sure that
  635. - * casting is the right thing, but 32-bit UML can't have 64-bit virtual
  636. - * addresses
  637. - */
  638. -#define __pa(virt) to_phys((void *) (unsigned long) (virt))
  639. -#define __va(phys) to_virt((unsigned long) (phys))
  640. -
  641. -#define phys_to_pfn(p) ((pfn_t) ((p) >> PAGE_SHIFT))
  642. -#define pfn_to_phys(pfn) ((phys_t) ((pfn) << PAGE_SHIFT))
  643. -
  644. -#define pfn_valid(pfn) ((pfn) < max_mapnr)
  645. -#define virt_addr_valid(v) pfn_valid(phys_to_pfn(__pa(v)))
  646. -
  647. -#include <asm-generic/memory_model.h>
  648. -#include <asm-generic/getorder.h>
  649. -
  650. -#endif /* __ASSEMBLY__ */
  651. -#endif /* __UM_PAGE_H */
  652. --- a/arch/um/include/asm/pgalloc.h
  653. +++ /dev/null
  654. @@ -1,61 +0,0 @@
  655. -/*
  656. - * Copyright (C) 2000, 2001, 2002 Jeff Dike ([email protected])
  657. - * Copyright 2003 PathScale, Inc.
  658. - * Derived from include/asm-i386/pgalloc.h and include/asm-i386/pgtable.h
  659. - * Licensed under the GPL
  660. - */
  661. -
  662. -#ifndef __UM_PGALLOC_H
  663. -#define __UM_PGALLOC_H
  664. -
  665. -#include <linux/mm.h>
  666. -
  667. -#define pmd_populate_kernel(mm, pmd, pte) \
  668. - set_pmd(pmd, __pmd(_PAGE_TABLE + (unsigned long) __pa(pte)))
  669. -
  670. -#define pmd_populate(mm, pmd, pte) \
  671. - set_pmd(pmd, __pmd(_PAGE_TABLE + \
  672. - ((unsigned long long)page_to_pfn(pte) << \
  673. - (unsigned long long) PAGE_SHIFT)))
  674. -#define pmd_pgtable(pmd) pmd_page(pmd)
  675. -
  676. -/*
  677. - * Allocate and free page tables.
  678. - */
  679. -extern pgd_t *pgd_alloc(struct mm_struct *);
  680. -extern void pgd_free(struct mm_struct *mm, pgd_t *pgd);
  681. -
  682. -extern pte_t *pte_alloc_one_kernel(struct mm_struct *, unsigned long);
  683. -extern pgtable_t pte_alloc_one(struct mm_struct *, unsigned long);
  684. -
  685. -static inline void pte_free_kernel(struct mm_struct *mm, pte_t *pte)
  686. -{
  687. - free_page((unsigned long) pte);
  688. -}
  689. -
  690. -static inline void pte_free(struct mm_struct *mm, pgtable_t pte)
  691. -{
  692. - pgtable_page_dtor(pte);
  693. - __free_page(pte);
  694. -}
  695. -
  696. -#define __pte_free_tlb(tlb,pte, address) \
  697. -do { \
  698. - pgtable_page_dtor(pte); \
  699. - tlb_remove_page((tlb),(pte)); \
  700. -} while (0)
  701. -
  702. -#ifdef CONFIG_3_LEVEL_PGTABLES
  703. -
  704. -static inline void pmd_free(struct mm_struct *mm, pmd_t *pmd)
  705. -{
  706. - free_page((unsigned long)pmd);
  707. -}
  708. -
  709. -#define __pmd_free_tlb(tlb,x, address) tlb_remove_page((tlb),virt_to_page(x))
  710. -#endif
  711. -
  712. -#define check_pgt_cache() do { } while (0)
  713. -
  714. -#endif
  715. -
  716. --- a/arch/um/include/asm/pgtable-2level.h
  717. +++ /dev/null
  718. @@ -1,53 +0,0 @@
  719. -/*
  720. - * Copyright (C) 2000, 2001, 2002 Jeff Dike ([email protected])
  721. - * Copyright 2003 PathScale, Inc.
  722. - * Derived from include/asm-i386/pgtable.h
  723. - * Licensed under the GPL
  724. - */
  725. -
  726. -#ifndef __UM_PGTABLE_2LEVEL_H
  727. -#define __UM_PGTABLE_2LEVEL_H
  728. -
  729. -#include <asm-generic/pgtable-nopmd.h>
  730. -
  731. -/* PGDIR_SHIFT determines what a third-level page table entry can map */
  732. -
  733. -#define PGDIR_SHIFT 22
  734. -#define PGDIR_SIZE (1UL << PGDIR_SHIFT)
  735. -#define PGDIR_MASK (~(PGDIR_SIZE-1))
  736. -
  737. -/*
  738. - * entries per page directory level: the i386 is two-level, so
  739. - * we don't really have any PMD directory physically.
  740. - */
  741. -#define PTRS_PER_PTE 1024
  742. -#define USER_PTRS_PER_PGD ((TASK_SIZE + (PGDIR_SIZE - 1)) / PGDIR_SIZE)
  743. -#define PTRS_PER_PGD 1024
  744. -#define FIRST_USER_ADDRESS 0
  745. -
  746. -#define pte_ERROR(e) \
  747. - printk("%s:%d: bad pte %p(%08lx).\n", __FILE__, __LINE__, &(e), \
  748. - pte_val(e))
  749. -#define pgd_ERROR(e) \
  750. - printk("%s:%d: bad pgd %p(%08lx).\n", __FILE__, __LINE__, &(e), \
  751. - pgd_val(e))
  752. -
  753. -static inline int pgd_newpage(pgd_t pgd) { return 0; }
  754. -static inline void pgd_mkuptodate(pgd_t pgd) { }
  755. -
  756. -#define set_pmd(pmdptr, pmdval) (*(pmdptr) = (pmdval))
  757. -
  758. -#define pte_pfn(x) phys_to_pfn(pte_val(x))
  759. -#define pfn_pte(pfn, prot) __pte(pfn_to_phys(pfn) | pgprot_val(prot))
  760. -#define pfn_pmd(pfn, prot) __pmd(pfn_to_phys(pfn) | pgprot_val(prot))
  761. -
  762. -/*
  763. - * Bits 0 through 4 are taken
  764. - */
  765. -#define PTE_FILE_MAX_BITS 27
  766. -
  767. -#define pte_to_pgoff(pte) (pte_val(pte) >> 5)
  768. -
  769. -#define pgoff_to_pte(off) ((pte_t) { ((off) << 5) + _PAGE_FILE })
  770. -
  771. -#endif
  772. --- a/arch/um/include/asm/pgtable-3level.h
  773. +++ /dev/null
  774. @@ -1,136 +0,0 @@
  775. -/*
  776. - * Copyright 2003 PathScale Inc
  777. - * Derived from include/asm-i386/pgtable.h
  778. - * Licensed under the GPL
  779. - */
  780. -
  781. -#ifndef __UM_PGTABLE_3LEVEL_H
  782. -#define __UM_PGTABLE_3LEVEL_H
  783. -
  784. -#include <asm-generic/pgtable-nopud.h>
  785. -
  786. -/* PGDIR_SHIFT determines what a third-level page table entry can map */
  787. -
  788. -#ifdef CONFIG_64BIT
  789. -#define PGDIR_SHIFT 30
  790. -#else
  791. -#define PGDIR_SHIFT 31
  792. -#endif
  793. -#define PGDIR_SIZE (1UL << PGDIR_SHIFT)
  794. -#define PGDIR_MASK (~(PGDIR_SIZE-1))
  795. -
  796. -/* PMD_SHIFT determines the size of the area a second-level page table can
  797. - * map
  798. - */
  799. -
  800. -#define PMD_SHIFT 21
  801. -#define PMD_SIZE (1UL << PMD_SHIFT)
  802. -#define PMD_MASK (~(PMD_SIZE-1))
  803. -
  804. -/*
  805. - * entries per page directory level
  806. - */
  807. -
  808. -#define PTRS_PER_PTE 512
  809. -#ifdef CONFIG_64BIT
  810. -#define PTRS_PER_PMD 512
  811. -#define PTRS_PER_PGD 512
  812. -#else
  813. -#define PTRS_PER_PMD 1024
  814. -#define PTRS_PER_PGD 1024
  815. -#endif
  816. -
  817. -#define USER_PTRS_PER_PGD ((TASK_SIZE + (PGDIR_SIZE - 1)) / PGDIR_SIZE)
  818. -#define FIRST_USER_ADDRESS 0
  819. -
  820. -#define pte_ERROR(e) \
  821. - printk("%s:%d: bad pte %p(%016lx).\n", __FILE__, __LINE__, &(e), \
  822. - pte_val(e))
  823. -#define pmd_ERROR(e) \
  824. - printk("%s:%d: bad pmd %p(%016lx).\n", __FILE__, __LINE__, &(e), \
  825. - pmd_val(e))
  826. -#define pgd_ERROR(e) \
  827. - printk("%s:%d: bad pgd %p(%016lx).\n", __FILE__, __LINE__, &(e), \
  828. - pgd_val(e))
  829. -
  830. -#define pud_none(x) (!(pud_val(x) & ~_PAGE_NEWPAGE))
  831. -#define pud_bad(x) ((pud_val(x) & (~PAGE_MASK & ~_PAGE_USER)) != _KERNPG_TABLE)
  832. -#define pud_present(x) (pud_val(x) & _PAGE_PRESENT)
  833. -#define pud_populate(mm, pud, pmd) \
  834. - set_pud(pud, __pud(_PAGE_TABLE + __pa(pmd)))
  835. -
  836. -#ifdef CONFIG_64BIT
  837. -#define set_pud(pudptr, pudval) set_64bit((u64 *) (pudptr), pud_val(pudval))
  838. -#else
  839. -#define set_pud(pudptr, pudval) (*(pudptr) = (pudval))
  840. -#endif
  841. -
  842. -static inline int pgd_newpage(pgd_t pgd)
  843. -{
  844. - return(pgd_val(pgd) & _PAGE_NEWPAGE);
  845. -}
  846. -
  847. -static inline void pgd_mkuptodate(pgd_t pgd) { pgd_val(pgd) &= ~_PAGE_NEWPAGE; }
  848. -
  849. -#ifdef CONFIG_64BIT
  850. -#define set_pmd(pmdptr, pmdval) set_64bit((u64 *) (pmdptr), pmd_val(pmdval))
  851. -#else
  852. -#define set_pmd(pmdptr, pmdval) (*(pmdptr) = (pmdval))
  853. -#endif
  854. -
  855. -struct mm_struct;
  856. -extern pmd_t *pmd_alloc_one(struct mm_struct *mm, unsigned long address);
  857. -
  858. -static inline void pud_clear (pud_t *pud)
  859. -{
  860. - set_pud(pud, __pud(_PAGE_NEWPAGE));
  861. -}
  862. -
  863. -#define pud_page(pud) phys_to_page(pud_val(pud) & PAGE_MASK)
  864. -#define pud_page_vaddr(pud) ((unsigned long) __va(pud_val(pud) & PAGE_MASK))
  865. -
  866. -/* Find an entry in the second-level page table.. */
  867. -#define pmd_offset(pud, address) ((pmd_t *) pud_page_vaddr(*(pud)) + \
  868. - pmd_index(address))
  869. -
  870. -static inline unsigned long pte_pfn(pte_t pte)
  871. -{
  872. - return phys_to_pfn(pte_val(pte));
  873. -}
  874. -
  875. -static inline pte_t pfn_pte(pfn_t page_nr, pgprot_t pgprot)
  876. -{
  877. - pte_t pte;
  878. - phys_t phys = pfn_to_phys(page_nr);
  879. -
  880. - pte_set_val(pte, phys, pgprot);
  881. - return pte;
  882. -}
  883. -
  884. -static inline pmd_t pfn_pmd(pfn_t page_nr, pgprot_t pgprot)
  885. -{
  886. - return __pmd((page_nr << PAGE_SHIFT) | pgprot_val(pgprot));
  887. -}
  888. -
  889. -/*
  890. - * Bits 0 through 3 are taken in the low part of the pte,
  891. - * put the 32 bits of offset into the high part.
  892. - */
  893. -#define PTE_FILE_MAX_BITS 32
  894. -
  895. -#ifdef CONFIG_64BIT
  896. -
  897. -#define pte_to_pgoff(p) ((p).pte >> 32)
  898. -
  899. -#define pgoff_to_pte(off) ((pte_t) { ((off) << 32) | _PAGE_FILE })
  900. -
  901. -#else
  902. -
  903. -#define pte_to_pgoff(pte) ((pte).pte_high)
  904. -
  905. -#define pgoff_to_pte(off) ((pte_t) { _PAGE_FILE, (off) })
  906. -
  907. -#endif
  908. -
  909. -#endif
  910. -
  911. --- a/arch/um/include/asm/pgtable.h
  912. +++ /dev/null
  913. @@ -1,375 +0,0 @@
  914. -/*
  915. - * Copyright (C) 2000 - 2007 Jeff Dike (jdike@{addtoit,linux.intel}.com)
  916. - * Copyright 2003 PathScale, Inc.
  917. - * Derived from include/asm-i386/pgtable.h
  918. - * Licensed under the GPL
  919. - */
  920. -
  921. -#ifndef __UM_PGTABLE_H
  922. -#define __UM_PGTABLE_H
  923. -
  924. -#include <asm/fixmap.h>
  925. -
  926. -#define _PAGE_PRESENT 0x001
  927. -#define _PAGE_NEWPAGE 0x002
  928. -#define _PAGE_NEWPROT 0x004
  929. -#define _PAGE_RW 0x020
  930. -#define _PAGE_USER 0x040
  931. -#define _PAGE_ACCESSED 0x080
  932. -#define _PAGE_DIRTY 0x100
  933. -/* If _PAGE_PRESENT is clear, we use these: */
  934. -#define _PAGE_FILE 0x008 /* nonlinear file mapping, saved PTE; unset:swap */
  935. -#define _PAGE_PROTNONE 0x010 /* if the user mapped it with PROT_NONE;
  936. - pte_present gives true */
  937. -
  938. -#ifdef CONFIG_3_LEVEL_PGTABLES
  939. -#include <asm/pgtable-3level.h>
  940. -#else
  941. -#include <asm/pgtable-2level.h>
  942. -#endif
  943. -
  944. -extern pgd_t swapper_pg_dir[PTRS_PER_PGD];
  945. -
  946. -/* zero page used for uninitialized stuff */
  947. -extern unsigned long *empty_zero_page;
  948. -
  949. -#define pgtable_cache_init() do ; while (0)
  950. -
  951. -/* Just any arbitrary offset to the start of the vmalloc VM area: the
  952. - * current 8MB value just means that there will be a 8MB "hole" after the
  953. - * physical memory until the kernel virtual memory starts. That means that
  954. - * any out-of-bounds memory accesses will hopefully be caught.
  955. - * The vmalloc() routines leaves a hole of 4kB between each vmalloced
  956. - * area for the same reason. ;)
  957. - */
  958. -
  959. -extern unsigned long end_iomem;
  960. -
  961. -#define VMALLOC_OFFSET (__va_space)
  962. -#define VMALLOC_START ((end_iomem + VMALLOC_OFFSET) & ~(VMALLOC_OFFSET-1))
  963. -#define PKMAP_BASE ((FIXADDR_START - LAST_PKMAP * PAGE_SIZE) & PMD_MASK)
  964. -#ifdef CONFIG_HIGHMEM
  965. -# define VMALLOC_END (PKMAP_BASE-2*PAGE_SIZE)
  966. -#else
  967. -# define VMALLOC_END (FIXADDR_START-2*PAGE_SIZE)
  968. -#endif
  969. -#define MODULES_VADDR VMALLOC_START
  970. -#define MODULES_END VMALLOC_END
  971. -#define MODULES_LEN (MODULES_VADDR - MODULES_END)
  972. -
  973. -#define _PAGE_TABLE (_PAGE_PRESENT | _PAGE_RW | _PAGE_USER | _PAGE_ACCESSED | _PAGE_DIRTY)
  974. -#define _KERNPG_TABLE (_PAGE_PRESENT | _PAGE_RW | _PAGE_ACCESSED | _PAGE_DIRTY)
  975. -#define _PAGE_CHG_MASK (PAGE_MASK | _PAGE_ACCESSED | _PAGE_DIRTY)
  976. -#define __PAGE_KERNEL_EXEC \
  977. - (_PAGE_PRESENT | _PAGE_RW | _PAGE_DIRTY | _PAGE_ACCESSED)
  978. -#define PAGE_NONE __pgprot(_PAGE_PROTNONE | _PAGE_ACCESSED)
  979. -#define PAGE_SHARED __pgprot(_PAGE_PRESENT | _PAGE_RW | _PAGE_USER | _PAGE_ACCESSED)
  980. -#define PAGE_COPY __pgprot(_PAGE_PRESENT | _PAGE_USER | _PAGE_ACCESSED)
  981. -#define PAGE_READONLY __pgprot(_PAGE_PRESENT | _PAGE_USER | _PAGE_ACCESSED)
  982. -#define PAGE_KERNEL __pgprot(_PAGE_PRESENT | _PAGE_RW | _PAGE_DIRTY | _PAGE_ACCESSED)
  983. -#define PAGE_KERNEL_EXEC __pgprot(__PAGE_KERNEL_EXEC)
  984. -
  985. -/*
  986. - * The i386 can't do page protection for execute, and considers that the same
  987. - * are read.
  988. - * Also, write permissions imply read permissions. This is the closest we can
  989. - * get..
  990. - */
  991. -#define __P000 PAGE_NONE
  992. -#define __P001 PAGE_READONLY
  993. -#define __P010 PAGE_COPY
  994. -#define __P011 PAGE_COPY
  995. -#define __P100 PAGE_READONLY
  996. -#define __P101 PAGE_READONLY
  997. -#define __P110 PAGE_COPY
  998. -#define __P111 PAGE_COPY
  999. -
  1000. -#define __S000 PAGE_NONE
  1001. -#define __S001 PAGE_READONLY
  1002. -#define __S010 PAGE_SHARED
  1003. -#define __S011 PAGE_SHARED
  1004. -#define __S100 PAGE_READONLY
  1005. -#define __S101 PAGE_READONLY
  1006. -#define __S110 PAGE_SHARED
  1007. -#define __S111 PAGE_SHARED
  1008. -
  1009. -/*
  1010. - * ZERO_PAGE is a global shared page that is always zero: used
  1011. - * for zero-mapped memory areas etc..
  1012. - */
  1013. -#define ZERO_PAGE(vaddr) virt_to_page(empty_zero_page)
  1014. -
  1015. -#define pte_clear(mm,addr,xp) pte_set_val(*(xp), (phys_t) 0, __pgprot(_PAGE_NEWPAGE))
  1016. -
  1017. -#define pmd_none(x) (!((unsigned long)pmd_val(x) & ~_PAGE_NEWPAGE))
  1018. -#define pmd_bad(x) ((pmd_val(x) & (~PAGE_MASK & ~_PAGE_USER)) != _KERNPG_TABLE)
  1019. -
  1020. -#define pmd_present(x) (pmd_val(x) & _PAGE_PRESENT)
  1021. -#define pmd_clear(xp) do { pmd_val(*(xp)) = _PAGE_NEWPAGE; } while (0)
  1022. -
  1023. -#define pmd_newpage(x) (pmd_val(x) & _PAGE_NEWPAGE)
  1024. -#define pmd_mkuptodate(x) (pmd_val(x) &= ~_PAGE_NEWPAGE)
  1025. -
  1026. -#define pud_newpage(x) (pud_val(x) & _PAGE_NEWPAGE)
  1027. -#define pud_mkuptodate(x) (pud_val(x) &= ~_PAGE_NEWPAGE)
  1028. -
  1029. -#define pmd_page(pmd) phys_to_page(pmd_val(pmd) & PAGE_MASK)
  1030. -
  1031. -#define pte_page(x) pfn_to_page(pte_pfn(x))
  1032. -
  1033. -#define pte_present(x) pte_get_bits(x, (_PAGE_PRESENT | _PAGE_PROTNONE))
  1034. -
  1035. -/*
  1036. - * =================================
  1037. - * Flags checking section.
  1038. - * =================================
  1039. - */
  1040. -
  1041. -static inline int pte_none(pte_t pte)
  1042. -{
  1043. - return pte_is_zero(pte);
  1044. -}
  1045. -
  1046. -/*
  1047. - * The following only work if pte_present() is true.
  1048. - * Undefined behaviour if not..
  1049. - */
  1050. -static inline int pte_read(pte_t pte)
  1051. -{
  1052. - return((pte_get_bits(pte, _PAGE_USER)) &&
  1053. - !(pte_get_bits(pte, _PAGE_PROTNONE)));
  1054. -}
  1055. -
  1056. -static inline int pte_exec(pte_t pte){
  1057. - return((pte_get_bits(pte, _PAGE_USER)) &&
  1058. - !(pte_get_bits(pte, _PAGE_PROTNONE)));
  1059. -}
  1060. -
  1061. -static inline int pte_write(pte_t pte)
  1062. -{
  1063. - return((pte_get_bits(pte, _PAGE_RW)) &&
  1064. - !(pte_get_bits(pte, _PAGE_PROTNONE)));
  1065. -}
  1066. -
  1067. -/*
  1068. - * The following only works if pte_present() is not true.
  1069. - */
  1070. -static inline int pte_file(pte_t pte)
  1071. -{
  1072. - return pte_get_bits(pte, _PAGE_FILE);
  1073. -}
  1074. -
  1075. -static inline int pte_dirty(pte_t pte)
  1076. -{
  1077. - return pte_get_bits(pte, _PAGE_DIRTY);
  1078. -}
  1079. -
  1080. -static inline int pte_young(pte_t pte)
  1081. -{
  1082. - return pte_get_bits(pte, _PAGE_ACCESSED);
  1083. -}
  1084. -
  1085. -static inline int pte_newpage(pte_t pte)
  1086. -{
  1087. - return pte_get_bits(pte, _PAGE_NEWPAGE);
  1088. -}
  1089. -
  1090. -static inline int pte_newprot(pte_t pte)
  1091. -{
  1092. - return(pte_present(pte) && (pte_get_bits(pte, _PAGE_NEWPROT)));
  1093. -}
  1094. -
  1095. -static inline int pte_special(pte_t pte)
  1096. -{
  1097. - return 0;
  1098. -}
  1099. -
  1100. -/*
  1101. - * =================================
  1102. - * Flags setting section.
  1103. - * =================================
  1104. - */
  1105. -
  1106. -static inline pte_t pte_mknewprot(pte_t pte)
  1107. -{
  1108. - pte_set_bits(pte, _PAGE_NEWPROT);
  1109. - return(pte);
  1110. -}
  1111. -
  1112. -static inline pte_t pte_mkclean(pte_t pte)
  1113. -{
  1114. - pte_clear_bits(pte, _PAGE_DIRTY);
  1115. - return(pte);
  1116. -}
  1117. -
  1118. -static inline pte_t pte_mkold(pte_t pte)
  1119. -{
  1120. - pte_clear_bits(pte, _PAGE_ACCESSED);
  1121. - return(pte);
  1122. -}
  1123. -
  1124. -static inline pte_t pte_wrprotect(pte_t pte)
  1125. -{
  1126. - pte_clear_bits(pte, _PAGE_RW);
  1127. - return(pte_mknewprot(pte));
  1128. -}
  1129. -
  1130. -static inline pte_t pte_mkread(pte_t pte)
  1131. -{
  1132. - pte_set_bits(pte, _PAGE_USER);
  1133. - return(pte_mknewprot(pte));
  1134. -}
  1135. -
  1136. -static inline pte_t pte_mkdirty(pte_t pte)
  1137. -{
  1138. - pte_set_bits(pte, _PAGE_DIRTY);
  1139. - return(pte);
  1140. -}
  1141. -
  1142. -static inline pte_t pte_mkyoung(pte_t pte)
  1143. -{
  1144. - pte_set_bits(pte, _PAGE_ACCESSED);
  1145. - return(pte);
  1146. -}
  1147. -
  1148. -static inline pte_t pte_mkwrite(pte_t pte)
  1149. -{
  1150. - pte_set_bits(pte, _PAGE_RW);
  1151. - return(pte_mknewprot(pte));
  1152. -}
  1153. -
  1154. -static inline pte_t pte_mkuptodate(pte_t pte)
  1155. -{
  1156. - pte_clear_bits(pte, _PAGE_NEWPAGE);
  1157. - if(pte_present(pte))
  1158. - pte_clear_bits(pte, _PAGE_NEWPROT);
  1159. - return(pte);
  1160. -}
  1161. -
  1162. -static inline pte_t pte_mknewpage(pte_t pte)
  1163. -{
  1164. - pte_set_bits(pte, _PAGE_NEWPAGE);
  1165. - return(pte);
  1166. -}
  1167. -
  1168. -static inline pte_t pte_mkspecial(pte_t pte)
  1169. -{
  1170. - return(pte);
  1171. -}
  1172. -
  1173. -static inline void set_pte(pte_t *pteptr, pte_t pteval)
  1174. -{
  1175. - pte_copy(*pteptr, pteval);
  1176. -
  1177. - /* If it's a swap entry, it needs to be marked _PAGE_NEWPAGE so
  1178. - * fix_range knows to unmap it. _PAGE_NEWPROT is specific to
  1179. - * mapped pages.
  1180. - */
  1181. -
  1182. - *pteptr = pte_mknewpage(*pteptr);
  1183. - if(pte_present(*pteptr)) *pteptr = pte_mknewprot(*pteptr);
  1184. -}
  1185. -#define set_pte_at(mm,addr,ptep,pteval) set_pte(ptep,pteval)
  1186. -
  1187. -#define __HAVE_ARCH_PTE_SAME
  1188. -static inline int pte_same(pte_t pte_a, pte_t pte_b)
  1189. -{
  1190. - return !((pte_val(pte_a) ^ pte_val(pte_b)) & ~_PAGE_NEWPAGE);
  1191. -}
  1192. -
  1193. -/*
  1194. - * Conversion functions: convert a page and protection to a page entry,
  1195. - * and a page entry and page directory to the page they refer to.
  1196. - */
  1197. -
  1198. -#define phys_to_page(phys) pfn_to_page(phys_to_pfn(phys))
  1199. -#define __virt_to_page(virt) phys_to_page(__pa(virt))
  1200. -#define page_to_phys(page) pfn_to_phys((pfn_t) page_to_pfn(page))
  1201. -#define virt_to_page(addr) __virt_to_page((const unsigned long) addr)
  1202. -
  1203. -#define mk_pte(page, pgprot) \
  1204. - ({ pte_t pte; \
  1205. - \
  1206. - pte_set_val(pte, page_to_phys(page), (pgprot)); \
  1207. - if (pte_present(pte)) \
  1208. - pte_mknewprot(pte_mknewpage(pte)); \
  1209. - pte;})
  1210. -
  1211. -static inline pte_t pte_modify(pte_t pte, pgprot_t newprot)
  1212. -{
  1213. - pte_set_val(pte, (pte_val(pte) & _PAGE_CHG_MASK), newprot);
  1214. - return pte;
  1215. -}
  1216. -
  1217. -/*
  1218. - * the pgd page can be thought of an array like this: pgd_t[PTRS_PER_PGD]
  1219. - *
  1220. - * this macro returns the index of the entry in the pgd page which would
  1221. - * control the given virtual address
  1222. - */
  1223. -#define pgd_index(address) (((address) >> PGDIR_SHIFT) & (PTRS_PER_PGD-1))
  1224. -
  1225. -/*
  1226. - * pgd_offset() returns a (pgd_t *)
  1227. - * pgd_index() is used get the offset into the pgd page's array of pgd_t's;
  1228. - */
  1229. -#define pgd_offset(mm, address) ((mm)->pgd+pgd_index(address))
  1230. -
  1231. -/*
  1232. - * a shortcut which implies the use of the kernel's pgd, instead
  1233. - * of a process's
  1234. - */
  1235. -#define pgd_offset_k(address) pgd_offset(&init_mm, address)
  1236. -
  1237. -/*
  1238. - * the pmd page can be thought of an array like this: pmd_t[PTRS_PER_PMD]
  1239. - *
  1240. - * this macro returns the index of the entry in the pmd page which would
  1241. - * control the given virtual address
  1242. - */
  1243. -#define pmd_page_vaddr(pmd) ((unsigned long) __va(pmd_val(pmd) & PAGE_MASK))
  1244. -#define pmd_index(address) (((address) >> PMD_SHIFT) & (PTRS_PER_PMD-1))
  1245. -
  1246. -#define pmd_page_vaddr(pmd) \
  1247. - ((unsigned long) __va(pmd_val(pmd) & PAGE_MASK))
  1248. -
  1249. -/*
  1250. - * the pte page can be thought of an array like this: pte_t[PTRS_PER_PTE]
  1251. - *
  1252. - * this macro returns the index of the entry in the pte page which would
  1253. - * control the given virtual address
  1254. - */
  1255. -#define pte_index(address) (((address) >> PAGE_SHIFT) & (PTRS_PER_PTE - 1))
  1256. -#define pte_offset_kernel(dir, address) \
  1257. - ((pte_t *) pmd_page_vaddr(*(dir)) + pte_index(address))
  1258. -#define pte_offset_map(dir, address) \
  1259. - ((pte_t *)page_address(pmd_page(*(dir))) + pte_index(address))
  1260. -#define pte_unmap(pte) do { } while (0)
  1261. -
  1262. -struct mm_struct;
  1263. -extern pte_t *virt_to_pte(struct mm_struct *mm, unsigned long addr);
  1264. -
  1265. -#define update_mmu_cache(vma,address,ptep) do ; while (0)
  1266. -
  1267. -/* Encode and de-code a swap entry */
  1268. -#define __swp_type(x) (((x).val >> 5) & 0x1f)
  1269. -#define __swp_offset(x) ((x).val >> 11)
  1270. -
  1271. -#define __swp_entry(type, offset) \
  1272. - ((swp_entry_t) { ((type) << 5) | ((offset) << 11) })
  1273. -#define __pte_to_swp_entry(pte) \
  1274. - ((swp_entry_t) { pte_val(pte_mkuptodate(pte)) })
  1275. -#define __swp_entry_to_pte(x) ((pte_t) { (x).val })
  1276. -
  1277. -#define kern_addr_valid(addr) (1)
  1278. -
  1279. -#include <asm-generic/pgtable.h>
  1280. -
  1281. -/* Clear a kernel PTE and flush it from the TLB */
  1282. -#define kpte_clear_flush(ptep, vaddr) \
  1283. -do { \
  1284. - pte_clear(&init_mm, (vaddr), (ptep)); \
  1285. - __flush_tlb_one((vaddr)); \
  1286. -} while (0)
  1287. -
  1288. -#endif
  1289. --- a/arch/um/include/asm/processor-generic.h
  1290. +++ /dev/null
  1291. @@ -1,115 +0,0 @@
  1292. -/*
  1293. - * Copyright (C) 2000 - 2007 Jeff Dike (jdike@{addtoit,linux.intel}.com)
  1294. - * Licensed under the GPL
  1295. - */
  1296. -
  1297. -#ifndef __UM_PROCESSOR_GENERIC_H
  1298. -#define __UM_PROCESSOR_GENERIC_H
  1299. -
  1300. -struct pt_regs;
  1301. -
  1302. -struct task_struct;
  1303. -
  1304. -#include <asm/ptrace.h>
  1305. -#include <registers.h>
  1306. -#include <sysdep/archsetjmp.h>
  1307. -
  1308. -#include <linux/prefetch.h>
  1309. -
  1310. -struct mm_struct;
  1311. -
  1312. -struct thread_struct {
  1313. - struct pt_regs regs;
  1314. - struct pt_regs *segv_regs;
  1315. - int singlestep_syscall;
  1316. - void *fault_addr;
  1317. - jmp_buf *fault_catcher;
  1318. - struct task_struct *prev_sched;
  1319. - struct arch_thread arch;
  1320. - jmp_buf switch_buf;
  1321. - struct {
  1322. - int op;
  1323. - union {
  1324. - struct {
  1325. - int pid;
  1326. - } fork, exec;
  1327. - struct {
  1328. - int (*proc)(void *);
  1329. - void *arg;
  1330. - } thread;
  1331. - struct {
  1332. - void (*proc)(void *);
  1333. - void *arg;
  1334. - } cb;
  1335. - } u;
  1336. - } request;
  1337. -};
  1338. -
  1339. -#define INIT_THREAD \
  1340. -{ \
  1341. - .regs = EMPTY_REGS, \
  1342. - .fault_addr = NULL, \
  1343. - .prev_sched = NULL, \
  1344. - .arch = INIT_ARCH_THREAD, \
  1345. - .request = { 0 } \
  1346. -}
  1347. -
  1348. -static inline void release_thread(struct task_struct *task)
  1349. -{
  1350. -}
  1351. -
  1352. -extern unsigned long thread_saved_pc(struct task_struct *t);
  1353. -
  1354. -static inline void mm_copy_segments(struct mm_struct *from_mm,
  1355. - struct mm_struct *new_mm)
  1356. -{
  1357. -}
  1358. -
  1359. -#define init_stack (init_thread_union.stack)
  1360. -
  1361. -/*
  1362. - * User space process size: 3GB (default).
  1363. - */
  1364. -extern unsigned long task_size;
  1365. -
  1366. -#define TASK_SIZE (task_size)
  1367. -
  1368. -#undef STACK_TOP
  1369. -#undef STACK_TOP_MAX
  1370. -
  1371. -extern unsigned long stacksizelim;
  1372. -
  1373. -#define STACK_ROOM (stacksizelim)
  1374. -#define STACK_TOP (TASK_SIZE - 2 * PAGE_SIZE)
  1375. -#define STACK_TOP_MAX STACK_TOP
  1376. -
  1377. -/* This decides where the kernel will search for a free chunk of vm
  1378. - * space during mmap's.
  1379. - */
  1380. -#define TASK_UNMAPPED_BASE (0x40000000)
  1381. -
  1382. -extern void start_thread(struct pt_regs *regs, unsigned long entry,
  1383. - unsigned long stack);
  1384. -
  1385. -struct cpuinfo_um {
  1386. - unsigned long loops_per_jiffy;
  1387. - int ipi_pipe[2];
  1388. -};
  1389. -
  1390. -extern struct cpuinfo_um boot_cpu_data;
  1391. -
  1392. -#define my_cpu_data cpu_data[smp_processor_id()]
  1393. -
  1394. -#ifdef CONFIG_SMP
  1395. -extern struct cpuinfo_um cpu_data[];
  1396. -#define current_cpu_data cpu_data[smp_processor_id()]
  1397. -#else
  1398. -#define cpu_data (&boot_cpu_data)
  1399. -#define current_cpu_data boot_cpu_data
  1400. -#endif
  1401. -
  1402. -
  1403. -#define KSTK_REG(tsk, reg) get_thread_reg(reg, &tsk->thread.switch_buf)
  1404. -extern unsigned long get_wchan(struct task_struct *p);
  1405. -
  1406. -#endif
  1407. --- a/arch/um/include/asm/ptrace-generic.h
  1408. +++ /dev/null
  1409. @@ -1,45 +0,0 @@
  1410. -/*
  1411. - * Copyright (C) 2000 - 2007 Jeff Dike (jdike@{addtoit,linux.intel}.com)
  1412. - * Licensed under the GPL
  1413. - */
  1414. -
  1415. -#ifndef __UM_PTRACE_GENERIC_H
  1416. -#define __UM_PTRACE_GENERIC_H
  1417. -
  1418. -#ifndef __ASSEMBLY__
  1419. -
  1420. -#include <asm/ptrace-abi.h>
  1421. -#include <sysdep/ptrace.h>
  1422. -
  1423. -struct pt_regs {
  1424. - struct uml_pt_regs regs;
  1425. -};
  1426. -
  1427. -#define arch_has_single_step() (1)
  1428. -
  1429. -#define EMPTY_REGS { .regs = EMPTY_UML_PT_REGS }
  1430. -
  1431. -#define PT_REGS_IP(r) UPT_IP(&(r)->regs)
  1432. -#define PT_REGS_SP(r) UPT_SP(&(r)->regs)
  1433. -
  1434. -#define PT_REGS_RESTART_SYSCALL(r) UPT_RESTART_SYSCALL(&(r)->regs)
  1435. -
  1436. -#define PT_REGS_SYSCALL_NR(r) UPT_SYSCALL_NR(&(r)->regs)
  1437. -
  1438. -#define instruction_pointer(regs) PT_REGS_IP(regs)
  1439. -
  1440. -struct task_struct;
  1441. -
  1442. -extern long subarch_ptrace(struct task_struct *child, long request,
  1443. - unsigned long addr, unsigned long data);
  1444. -extern unsigned long getreg(struct task_struct *child, int regno);
  1445. -extern int putreg(struct task_struct *child, int regno, unsigned long value);
  1446. -
  1447. -extern int arch_copy_tls(struct task_struct *new);
  1448. -extern void clear_flushed_tls(struct task_struct *task);
  1449. -extern void syscall_trace_enter(struct pt_regs *regs);
  1450. -extern void syscall_trace_leave(struct pt_regs *regs);
  1451. -
  1452. -#endif
  1453. -
  1454. -#endif
  1455. --- a/arch/um/include/asm/setup.h
  1456. +++ /dev/null
  1457. @@ -1,10 +0,0 @@
  1458. -#ifndef SETUP_H_INCLUDED
  1459. -#define SETUP_H_INCLUDED
  1460. -
  1461. -/* POSIX mandated with _POSIX_ARG_MAX that we can rely on 4096 chars in the
  1462. - * command line, so this choice is ok.
  1463. - */
  1464. -
  1465. -#define COMMAND_LINE_SIZE 4096
  1466. -
  1467. -#endif /* SETUP_H_INCLUDED */
  1468. --- a/arch/um/include/asm/smp.h
  1469. +++ /dev/null
  1470. @@ -1,32 +0,0 @@
  1471. -#ifndef __UM_SMP_H
  1472. -#define __UM_SMP_H
  1473. -
  1474. -#ifdef CONFIG_SMP
  1475. -
  1476. -#include <linux/bitops.h>
  1477. -#include <asm/current.h>
  1478. -#include <linux/cpumask.h>
  1479. -
  1480. -#define raw_smp_processor_id() (current_thread->cpu)
  1481. -
  1482. -#define cpu_logical_map(n) (n)
  1483. -#define cpu_number_map(n) (n)
  1484. -extern int hard_smp_processor_id(void);
  1485. -#define NO_PROC_ID -1
  1486. -
  1487. -extern int ncpus;
  1488. -
  1489. -
  1490. -static inline void smp_cpus_done(unsigned int maxcpus)
  1491. -{
  1492. -}
  1493. -
  1494. -extern struct task_struct *idle_threads[NR_CPUS];
  1495. -
  1496. -#else
  1497. -
  1498. -#define hard_smp_processor_id() 0
  1499. -
  1500. -#endif
  1501. -
  1502. -#endif
  1503. --- a/arch/um/include/asm/sysrq.h
  1504. +++ /dev/null
  1505. @@ -1,7 +0,0 @@
  1506. -#ifndef __UM_SYSRQ_H
  1507. -#define __UM_SYSRQ_H
  1508. -
  1509. -struct task_struct;
  1510. -extern void show_trace(struct task_struct* task, unsigned long *stack);
  1511. -
  1512. -#endif
  1513. --- a/arch/um/include/asm/thread_info.h
  1514. +++ /dev/null
  1515. @@ -1,78 +0,0 @@
  1516. -/*
  1517. - * Copyright (C) 2002 - 2007 Jeff Dike (jdike@{addtoit,linux.intel}.com)
  1518. - * Licensed under the GPL
  1519. - */
  1520. -
  1521. -#ifndef __UM_THREAD_INFO_H
  1522. -#define __UM_THREAD_INFO_H
  1523. -
  1524. -#ifndef __ASSEMBLY__
  1525. -
  1526. -#include <asm/types.h>
  1527. -#include <asm/page.h>
  1528. -#include <asm/uaccess.h>
  1529. -
  1530. -struct thread_info {
  1531. - struct task_struct *task; /* main task structure */
  1532. - struct exec_domain *exec_domain; /* execution domain */
  1533. - unsigned long flags; /* low level flags */
  1534. - __u32 cpu; /* current CPU */
  1535. - int preempt_count; /* 0 => preemptable,
  1536. - <0 => BUG */
  1537. - mm_segment_t addr_limit; /* thread address space:
  1538. - 0-0xBFFFFFFF for user
  1539. - 0-0xFFFFFFFF for kernel */
  1540. - struct restart_block restart_block;
  1541. - struct thread_info *real_thread; /* Points to non-IRQ stack */
  1542. -};
  1543. -
  1544. -#define INIT_THREAD_INFO(tsk) \
  1545. -{ \
  1546. - .task = &tsk, \
  1547. - .exec_domain = &default_exec_domain, \
  1548. - .flags = 0, \
  1549. - .cpu = 0, \
  1550. - .preempt_count = INIT_PREEMPT_COUNT, \
  1551. - .addr_limit = KERNEL_DS, \
  1552. - .restart_block = { \
  1553. - .fn = do_no_restart_syscall, \
  1554. - }, \
  1555. - .real_thread = NULL, \
  1556. -}
  1557. -
  1558. -#define init_thread_info (init_thread_union.thread_info)
  1559. -#define init_stack (init_thread_union.stack)
  1560. -
  1561. -#define THREAD_SIZE ((1 << CONFIG_KERNEL_STACK_ORDER) * PAGE_SIZE)
  1562. -/* how to get the thread information struct from C */
  1563. -static inline struct thread_info *current_thread_info(void)
  1564. -{
  1565. - struct thread_info *ti;
  1566. - unsigned long mask = THREAD_SIZE - 1;
  1567. - void *p;
  1568. -
  1569. - asm volatile ("" : "=r" (p) : "0" (&ti));
  1570. - ti = (struct thread_info *) (((unsigned long)p) & ~mask);
  1571. - return ti;
  1572. -}
  1573. -
  1574. -#define THREAD_SIZE_ORDER CONFIG_KERNEL_STACK_ORDER
  1575. -
  1576. -#endif
  1577. -
  1578. -#define TIF_SYSCALL_TRACE 0 /* syscall trace active */
  1579. -#define TIF_SIGPENDING 1 /* signal pending */
  1580. -#define TIF_NEED_RESCHED 2 /* rescheduling necessary */
  1581. -#define TIF_RESTART_BLOCK 4
  1582. -#define TIF_MEMDIE 5 /* is terminating due to OOM killer */
  1583. -#define TIF_SYSCALL_AUDIT 6
  1584. -#define TIF_RESTORE_SIGMASK 7
  1585. -#define TIF_NOTIFY_RESUME 8
  1586. -
  1587. -#define _TIF_SYSCALL_TRACE (1 << TIF_SYSCALL_TRACE)
  1588. -#define _TIF_SIGPENDING (1 << TIF_SIGPENDING)
  1589. -#define _TIF_NEED_RESCHED (1 << TIF_NEED_RESCHED)
  1590. -#define _TIF_MEMDIE (1 << TIF_MEMDIE)
  1591. -#define _TIF_SYSCALL_AUDIT (1 << TIF_SYSCALL_AUDIT)
  1592. -
  1593. -#endif
  1594. --- a/arch/um/include/asm/timex.h
  1595. +++ /dev/null
  1596. @@ -1,13 +0,0 @@
  1597. -#ifndef __UM_TIMEX_H
  1598. -#define __UM_TIMEX_H
  1599. -
  1600. -typedef unsigned long cycles_t;
  1601. -
  1602. -static inline cycles_t get_cycles (void)
  1603. -{
  1604. - return 0;
  1605. -}
  1606. -
  1607. -#define CLOCK_TICK_RATE (HZ)
  1608. -
  1609. -#endif
  1610. --- a/arch/um/include/asm/tlb.h
  1611. +++ /dev/null
  1612. @@ -1,122 +0,0 @@
  1613. -#ifndef __UM_TLB_H
  1614. -#define __UM_TLB_H
  1615. -
  1616. -#include <linux/pagemap.h>
  1617. -#include <linux/swap.h>
  1618. -#include <asm/percpu.h>
  1619. -#include <asm/pgalloc.h>
  1620. -#include <asm/tlbflush.h>
  1621. -
  1622. -#define tlb_start_vma(tlb, vma) do { } while (0)
  1623. -#define tlb_end_vma(tlb, vma) do { } while (0)
  1624. -#define tlb_flush(tlb) flush_tlb_mm((tlb)->mm)
  1625. -
  1626. -/* struct mmu_gather is an opaque type used by the mm code for passing around
  1627. - * any data needed by arch specific code for tlb_remove_page.
  1628. - */
  1629. -struct mmu_gather {
  1630. - struct mm_struct *mm;
  1631. - unsigned int need_flush; /* Really unmapped some ptes? */
  1632. - unsigned long start;
  1633. - unsigned long end;
  1634. - unsigned int fullmm; /* non-zero means full mm flush */
  1635. -};
  1636. -
  1637. -static inline void __tlb_remove_tlb_entry(struct mmu_gather *tlb, pte_t *ptep,
  1638. - unsigned long address)
  1639. -{
  1640. - if (tlb->start > address)
  1641. - tlb->start = address;
  1642. - if (tlb->end < address + PAGE_SIZE)
  1643. - tlb->end = address + PAGE_SIZE;
  1644. -}
  1645. -
  1646. -static inline void init_tlb_gather(struct mmu_gather *tlb)
  1647. -{
  1648. - tlb->need_flush = 0;
  1649. -
  1650. - tlb->start = TASK_SIZE;
  1651. - tlb->end = 0;
  1652. -
  1653. - if (tlb->fullmm) {
  1654. - tlb->start = 0;
  1655. - tlb->end = TASK_SIZE;
  1656. - }
  1657. -}
  1658. -
  1659. -static inline void
  1660. -tlb_gather_mmu(struct mmu_gather *tlb, struct mm_struct *mm, unsigned long start, unsigned long end)
  1661. -{
  1662. - tlb->mm = mm;
  1663. - tlb->start = start;
  1664. - tlb->end = end;
  1665. - tlb->fullmm = !(start | (end+1));
  1666. -
  1667. - init_tlb_gather(tlb);
  1668. -}
  1669. -
  1670. -extern void flush_tlb_mm_range(struct mm_struct *mm, unsigned long start,
  1671. - unsigned long end);
  1672. -
  1673. -static inline void
  1674. -tlb_flush_mmu(struct mmu_gather *tlb)
  1675. -{
  1676. - if (!tlb->need_flush)
  1677. - return;
  1678. -
  1679. - flush_tlb_mm_range(tlb->mm, tlb->start, tlb->end);
  1680. - init_tlb_gather(tlb);
  1681. -}
  1682. -
  1683. -/* tlb_finish_mmu
  1684. - * Called at the end of the shootdown operation to free up any resources
  1685. - * that were required.
  1686. - */
  1687. -static inline void
  1688. -tlb_finish_mmu(struct mmu_gather *tlb, unsigned long start, unsigned long end)
  1689. -{
  1690. - tlb_flush_mmu(tlb);
  1691. -
  1692. - /* keep the page table cache within bounds */
  1693. - check_pgt_cache();
  1694. -}
  1695. -
  1696. -/* tlb_remove_page
  1697. - * Must perform the equivalent to __free_pte(pte_get_and_clear(ptep)),
  1698. - * while handling the additional races in SMP caused by other CPUs
  1699. - * caching valid mappings in their TLBs.
  1700. - */
  1701. -static inline int __tlb_remove_page(struct mmu_gather *tlb, struct page *page)
  1702. -{
  1703. - tlb->need_flush = 1;
  1704. - free_page_and_swap_cache(page);
  1705. - return 1; /* avoid calling tlb_flush_mmu */
  1706. -}
  1707. -
  1708. -static inline void tlb_remove_page(struct mmu_gather *tlb, struct page *page)
  1709. -{
  1710. - __tlb_remove_page(tlb, page);
  1711. -}
  1712. -
  1713. -/**
  1714. - * tlb_remove_tlb_entry - remember a pte unmapping for later tlb invalidation.
  1715. - *
  1716. - * Record the fact that pte's were really umapped in ->need_flush, so we can
  1717. - * later optimise away the tlb invalidate. This helps when userspace is
  1718. - * unmapping already-unmapped pages, which happens quite a lot.
  1719. - */
  1720. -#define tlb_remove_tlb_entry(tlb, ptep, address) \
  1721. - do { \
  1722. - tlb->need_flush = 1; \
  1723. - __tlb_remove_tlb_entry(tlb, ptep, address); \
  1724. - } while (0)
  1725. -
  1726. -#define pte_free_tlb(tlb, ptep, addr) __pte_free_tlb(tlb, ptep, addr)
  1727. -
  1728. -#define pud_free_tlb(tlb, pudp, addr) __pud_free_tlb(tlb, pudp, addr)
  1729. -
  1730. -#define pmd_free_tlb(tlb, pmdp, addr) __pmd_free_tlb(tlb, pmdp, addr)
  1731. -
  1732. -#define tlb_migrate_finish(mm) do {} while (0)
  1733. -
  1734. -#endif
  1735. --- a/arch/um/include/asm/tlbflush.h
  1736. +++ /dev/null
  1737. @@ -1,31 +0,0 @@
  1738. -/*
  1739. - * Copyright (C) 2002 - 2007 Jeff Dike (jdike@{addtoit,linux.intel}.com)
  1740. - * Licensed under the GPL
  1741. - */
  1742. -
  1743. -#ifndef __UM_TLBFLUSH_H
  1744. -#define __UM_TLBFLUSH_H
  1745. -
  1746. -#include <linux/mm.h>
  1747. -
  1748. -/*
  1749. - * TLB flushing:
  1750. - *
  1751. - * - flush_tlb() flushes the current mm struct TLBs
  1752. - * - flush_tlb_all() flushes all processes TLBs
  1753. - * - flush_tlb_mm(mm) flushes the specified mm context TLB's
  1754. - * - flush_tlb_page(vma, vmaddr) flushes one page
  1755. - * - flush_tlb_kernel_vm() flushes the kernel vm area
  1756. - * - flush_tlb_range(vma, start, end) flushes a range of pages
  1757. - */
  1758. -
  1759. -extern void flush_tlb_all(void);
  1760. -extern void flush_tlb_mm(struct mm_struct *mm);
  1761. -extern void flush_tlb_range(struct vm_area_struct *vma, unsigned long start,
  1762. - unsigned long end);
  1763. -extern void flush_tlb_page(struct vm_area_struct *vma, unsigned long address);
  1764. -extern void flush_tlb_kernel_vm(void);
  1765. -extern void flush_tlb_kernel_range(unsigned long start, unsigned long end);
  1766. -extern void __flush_tlb_one(unsigned long addr);
  1767. -
  1768. -#endif
  1769. --- a/arch/um/include/asm/uaccess.h
  1770. +++ /dev/null
  1771. @@ -1,178 +0,0 @@
  1772. -/*
  1773. - * Copyright (C) 2002 Jeff Dike ([email protected])
  1774. - * Licensed under the GPL
  1775. - */
  1776. -
  1777. -#ifndef __UM_UACCESS_H
  1778. -#define __UM_UACCESS_H
  1779. -
  1780. -/* thread_info has a mm_segment_t in it, so put the definition up here */
  1781. -typedef struct {
  1782. - unsigned long seg;
  1783. -} mm_segment_t;
  1784. -
  1785. -#include <linux/thread_info.h>
  1786. -#include <linux/errno.h>
  1787. -#include <asm/processor.h>
  1788. -#include <asm/elf.h>
  1789. -
  1790. -#define VERIFY_READ 0
  1791. -#define VERIFY_WRITE 1
  1792. -
  1793. -/*
  1794. - * The fs value determines whether argument validity checking should be
  1795. - * performed or not. If get_fs() == USER_DS, checking is performed, with
  1796. - * get_fs() == KERNEL_DS, checking is bypassed.
  1797. - *
  1798. - * For historical reasons, these macros are grossly misnamed.
  1799. - */
  1800. -
  1801. -#define MAKE_MM_SEG(s) ((mm_segment_t) { (s) })
  1802. -
  1803. -#define KERNEL_DS MAKE_MM_SEG(0xFFFFFFFF)
  1804. -#define USER_DS MAKE_MM_SEG(TASK_SIZE)
  1805. -
  1806. -#define get_ds() (KERNEL_DS)
  1807. -#define get_fs() (current_thread_info()->addr_limit)
  1808. -#define set_fs(x) (current_thread_info()->addr_limit = (x))
  1809. -
  1810. -#define segment_eq(a, b) ((a).seg == (b).seg)
  1811. -
  1812. -#define __under_task_size(addr, size) \
  1813. - (((unsigned long) (addr) < TASK_SIZE) && \
  1814. - (((unsigned long) (addr) + (size)) < TASK_SIZE))
  1815. -
  1816. -#define __access_ok_vsyscall(type, addr, size) \
  1817. - ((type == VERIFY_READ) && \
  1818. - ((unsigned long) (addr) >= FIXADDR_USER_START) && \
  1819. - ((unsigned long) (addr) + (size) <= FIXADDR_USER_END) && \
  1820. - ((unsigned long) (addr) + (size) >= (unsigned long)(addr)))
  1821. -
  1822. -#define __addr_range_nowrap(addr, size) \
  1823. - ((unsigned long) (addr) <= ((unsigned long) (addr) + (size)))
  1824. -
  1825. -#define access_ok(type, addr, size) \
  1826. - (__addr_range_nowrap(addr, size) && \
  1827. - (__under_task_size(addr, size) || \
  1828. - __access_ok_vsyscall(type, addr, size) || \
  1829. - segment_eq(get_fs(), KERNEL_DS)))
  1830. -
  1831. -extern int copy_from_user(void *to, const void __user *from, int n);
  1832. -extern int copy_to_user(void __user *to, const void *from, int n);
  1833. -
  1834. -/*
  1835. - * strncpy_from_user: - Copy a NUL terminated string from userspace.
  1836. - * @dst: Destination address, in kernel space. This buffer must be at
  1837. - * least @count bytes long.
  1838. - * @src: Source address, in user space.
  1839. - * @count: Maximum number of bytes to copy, including the trailing NUL.
  1840. - *
  1841. - * Copies a NUL-terminated string from userspace to kernel space.
  1842. - *
  1843. - * On success, returns the length of the string (not including the trailing
  1844. - * NUL).
  1845. - *
  1846. - * If access to userspace fails, returns -EFAULT (some data may have been
  1847. - * copied).
  1848. - *
  1849. - * If @count is smaller than the length of the string, copies @count bytes
  1850. - * and returns @count.
  1851. - */
  1852. -
  1853. -extern int strncpy_from_user(char *dst, const char __user *src, int count);
  1854. -
  1855. -/*
  1856. - * __clear_user: - Zero a block of memory in user space, with less checking.
  1857. - * @to: Destination address, in user space.
  1858. - * @n: Number of bytes to zero.
  1859. - *
  1860. - * Zero a block of memory in user space. Caller must check
  1861. - * the specified block with access_ok() before calling this function.
  1862. - *
  1863. - * Returns number of bytes that could not be cleared.
  1864. - * On success, this will be zero.
  1865. - */
  1866. -extern int __clear_user(void __user *mem, int len);
  1867. -
  1868. -/*
  1869. - * clear_user: - Zero a block of memory in user space.
  1870. - * @to: Destination address, in user space.
  1871. - * @n: Number of bytes to zero.
  1872. - *
  1873. - * Zero a block of memory in user space.
  1874. - *
  1875. - * Returns number of bytes that could not be cleared.
  1876. - * On success, this will be zero.
  1877. - */
  1878. -extern int clear_user(void __user *mem, int len);
  1879. -
  1880. -/*
  1881. - * strlen_user: - Get the size of a string in user space.
  1882. - * @str: The string to measure.
  1883. - * @n: The maximum valid length
  1884. - *
  1885. - * Get the size of a NUL-terminated string in user space.
  1886. - *
  1887. - * Returns the size of the string INCLUDING the terminating NUL.
  1888. - * On exception, returns 0.
  1889. - * If the string is too long, returns a value greater than @n.
  1890. - */
  1891. -extern int strnlen_user(const void __user *str, int len);
  1892. -
  1893. -#define __copy_from_user(to, from, n) copy_from_user(to, from, n)
  1894. -
  1895. -#define __copy_to_user(to, from, n) copy_to_user(to, from, n)
  1896. -
  1897. -#define __copy_to_user_inatomic __copy_to_user
  1898. -#define __copy_from_user_inatomic __copy_from_user
  1899. -
  1900. -#define __get_user(x, ptr) \
  1901. -({ \
  1902. - const __typeof__(*(ptr)) __user *__private_ptr = (ptr); \
  1903. - __typeof__(x) __private_val; \
  1904. - int __private_ret = -EFAULT; \
  1905. - (x) = (__typeof__(*(__private_ptr)))0; \
  1906. - if (__copy_from_user((__force void *)&__private_val, (__private_ptr),\
  1907. - sizeof(*(__private_ptr))) == 0) { \
  1908. - (x) = (__typeof__(*(__private_ptr))) __private_val; \
  1909. - __private_ret = 0; \
  1910. - } \
  1911. - __private_ret; \
  1912. -})
  1913. -
  1914. -#define get_user(x, ptr) \
  1915. -({ \
  1916. - const __typeof__((*(ptr))) __user *private_ptr = (ptr); \
  1917. - (access_ok(VERIFY_READ, private_ptr, sizeof(*private_ptr)) ? \
  1918. - __get_user(x, private_ptr) : ((x) = (__typeof__(*ptr))0, -EFAULT)); \
  1919. -})
  1920. -
  1921. -#define __put_user(x, ptr) \
  1922. -({ \
  1923. - __typeof__(*(ptr)) __user *__private_ptr = ptr; \
  1924. - __typeof__(*(__private_ptr)) __private_val; \
  1925. - int __private_ret = -EFAULT; \
  1926. - __private_val = (__typeof__(*(__private_ptr))) (x); \
  1927. - if (__copy_to_user((__private_ptr), &__private_val, \
  1928. - sizeof(*(__private_ptr))) == 0) { \
  1929. - __private_ret = 0; \
  1930. - } \
  1931. - __private_ret; \
  1932. -})
  1933. -
  1934. -#define put_user(x, ptr) \
  1935. -({ \
  1936. - __typeof__(*(ptr)) __user *private_ptr = (ptr); \
  1937. - (access_ok(VERIFY_WRITE, private_ptr, sizeof(*private_ptr)) ? \
  1938. - __put_user(x, private_ptr) : -EFAULT); \
  1939. -})
  1940. -
  1941. -#define strlen_user(str) strnlen_user(str, ~0U >> 1)
  1942. -
  1943. -struct exception_table_entry
  1944. -{
  1945. - unsigned long insn;
  1946. - unsigned long fixup;
  1947. -};
  1948. -
  1949. -#endif
  1950. --- /dev/null
  1951. +++ b/arch/um/include/uapi/asm/Kbuild
  1952. @@ -0,0 +1,8 @@
  1953. +generic-y += bug.h cputime.h device.h emergency-restart.h futex.h hardirq.h
  1954. +generic-y += hw_irq.h irq_regs.h kdebug.h percpu.h sections.h topology.h xor.h
  1955. +generic-y += ftrace.h pci.h io.h param.h delay.h mutex.h current.h exec.h
  1956. +generic-y += switch_to.h clkdev.h
  1957. +generic-y += trace_clock.h
  1958. +generic-y += preempt.h
  1959. +generic-y += hash.h
  1960. +generic-y += barrier.h
  1961. --- /dev/null
  1962. +++ b/arch/um/include/uapi/asm/a.out-core.h
  1963. @@ -0,0 +1,27 @@
  1964. +/* a.out coredump register dumper
  1965. + *
  1966. + * Copyright (C) 2007 Red Hat, Inc. All Rights Reserved.
  1967. + * Written by David Howells ([email protected])
  1968. + *
  1969. + * This program is free software; you can redistribute it and/or
  1970. + * modify it under the terms of the GNU General Public Licence
  1971. + * as published by the Free Software Foundation; either version
  1972. + * 2 of the Licence, or (at your option) any later version.
  1973. + */
  1974. +
  1975. +#ifndef __UM_A_OUT_CORE_H
  1976. +#define __UM_A_OUT_CORE_H
  1977. +
  1978. +#ifdef __KERNEL__
  1979. +
  1980. +#include <linux/user.h>
  1981. +
  1982. +/*
  1983. + * fill in the user structure for an a.out core dump
  1984. + */
  1985. +static inline void aout_dump_thread(struct pt_regs *regs, struct user *u)
  1986. +{
  1987. +}
  1988. +
  1989. +#endif /* __KERNEL__ */
  1990. +#endif /* __UM_A_OUT_CORE_H */
  1991. --- /dev/null
  1992. +++ b/arch/um/include/uapi/asm/bugs.h
  1993. @@ -0,0 +1,6 @@
  1994. +#ifndef __UM_BUGS_H
  1995. +#define __UM_BUGS_H
  1996. +
  1997. +void check_bugs(void);
  1998. +
  1999. +#endif
  2000. --- /dev/null
  2001. +++ b/arch/um/include/uapi/asm/cache.h
  2002. @@ -0,0 +1,17 @@
  2003. +#ifndef __UM_CACHE_H
  2004. +#define __UM_CACHE_H
  2005. +
  2006. +
  2007. +#if defined(CONFIG_UML_X86) && !defined(CONFIG_64BIT)
  2008. +# define L1_CACHE_SHIFT (CONFIG_X86_L1_CACHE_SHIFT)
  2009. +#elif defined(CONFIG_UML_X86) /* 64-bit */
  2010. +# define L1_CACHE_SHIFT 6 /* Should be 7 on Intel */
  2011. +#else
  2012. +/* XXX: this was taken from x86, now it's completely random. Luckily only
  2013. + * affects SMP padding. */
  2014. +# define L1_CACHE_SHIFT 5
  2015. +#endif
  2016. +
  2017. +#define L1_CACHE_BYTES (1 << L1_CACHE_SHIFT)
  2018. +
  2019. +#endif
  2020. --- /dev/null
  2021. +++ b/arch/um/include/uapi/asm/common.lds.S
  2022. @@ -0,0 +1,107 @@
  2023. +#include <asm-generic/vmlinux.lds.h>
  2024. +
  2025. + .fini : { *(.fini) } =0x9090
  2026. + _etext = .;
  2027. + PROVIDE (etext = .);
  2028. +
  2029. + . = ALIGN(4096);
  2030. + _sdata = .;
  2031. + PROVIDE (sdata = .);
  2032. +
  2033. + RODATA
  2034. +
  2035. + .unprotected : { *(.unprotected) }
  2036. + . = ALIGN(4096);
  2037. + PROVIDE (_unprotected_end = .);
  2038. +
  2039. + . = ALIGN(4096);
  2040. + .note : { *(.note.*) }
  2041. + EXCEPTION_TABLE(0)
  2042. +
  2043. + BUG_TABLE
  2044. +
  2045. + .uml.setup.init : {
  2046. + __uml_setup_start = .;
  2047. + *(.uml.setup.init)
  2048. + __uml_setup_end = .;
  2049. + }
  2050. +
  2051. + .uml.help.init : {
  2052. + __uml_help_start = .;
  2053. + *(.uml.help.init)
  2054. + __uml_help_end = .;
  2055. + }
  2056. +
  2057. + .uml.postsetup.init : {
  2058. + __uml_postsetup_start = .;
  2059. + *(.uml.postsetup.init)
  2060. + __uml_postsetup_end = .;
  2061. + }
  2062. +
  2063. + .init.setup : {
  2064. + INIT_SETUP(0)
  2065. + }
  2066. +
  2067. + PERCPU_SECTION(32)
  2068. +
  2069. + .initcall.init : {
  2070. + INIT_CALLS
  2071. + }
  2072. +
  2073. + .con_initcall.init : {
  2074. + CON_INITCALL
  2075. + }
  2076. +
  2077. + .uml.initcall.init : {
  2078. + __uml_initcall_start = .;
  2079. + *(.uml.initcall.init)
  2080. + __uml_initcall_end = .;
  2081. + }
  2082. +
  2083. + SECURITY_INIT
  2084. +
  2085. + .exitcall : {
  2086. + __exitcall_begin = .;
  2087. + *(.exitcall.exit)
  2088. + __exitcall_end = .;
  2089. + }
  2090. +
  2091. + .uml.exitcall : {
  2092. + __uml_exitcall_begin = .;
  2093. + *(.uml.exitcall.exit)
  2094. + __uml_exitcall_end = .;
  2095. + }
  2096. +
  2097. + . = ALIGN(4);
  2098. + .altinstructions : {
  2099. + __alt_instructions = .;
  2100. + *(.altinstructions)
  2101. + __alt_instructions_end = .;
  2102. + }
  2103. + .altinstr_replacement : { *(.altinstr_replacement) }
  2104. + /* .exit.text is discard at runtime, not link time, to deal with references
  2105. + from .altinstructions and .eh_frame */
  2106. + .exit.text : { *(.exit.text) }
  2107. + .exit.data : { *(.exit.data) }
  2108. +
  2109. + .preinit_array : {
  2110. + __preinit_array_start = .;
  2111. + *(.preinit_array)
  2112. + __preinit_array_end = .;
  2113. + }
  2114. + .init_array : {
  2115. + __init_array_start = .;
  2116. + *(.init_array)
  2117. + __init_array_end = .;
  2118. + }
  2119. + .fini_array : {
  2120. + __fini_array_start = .;
  2121. + *(.fini_array)
  2122. + __fini_array_end = .;
  2123. + }
  2124. +
  2125. + . = ALIGN(4096);
  2126. + .init.ramfs : {
  2127. + INIT_RAM_FS
  2128. + }
  2129. +
  2130. --- /dev/null
  2131. +++ b/arch/um/include/uapi/asm/dma.h
  2132. @@ -0,0 +1,10 @@
  2133. +#ifndef __UM_DMA_H
  2134. +#define __UM_DMA_H
  2135. +
  2136. +#include <asm/io.h>
  2137. +
  2138. +extern unsigned long uml_physmem;
  2139. +
  2140. +#define MAX_DMA_ADDRESS (uml_physmem)
  2141. +
  2142. +#endif
  2143. --- /dev/null
  2144. +++ b/arch/um/include/uapi/asm/fixmap.h
  2145. @@ -0,0 +1,60 @@
  2146. +#ifndef __UM_FIXMAP_H
  2147. +#define __UM_FIXMAP_H
  2148. +
  2149. +#include <asm/processor.h>
  2150. +#include <asm/kmap_types.h>
  2151. +#include <asm/archparam.h>
  2152. +#include <asm/page.h>
  2153. +#include <linux/threads.h>
  2154. +
  2155. +/*
  2156. + * Here we define all the compile-time 'special' virtual
  2157. + * addresses. The point is to have a constant address at
  2158. + * compile time, but to set the physical address only
  2159. + * in the boot process. We allocate these special addresses
  2160. + * from the end of virtual memory (0xfffff000) backwards.
  2161. + * Also this lets us do fail-safe vmalloc(), we
  2162. + * can guarantee that these special addresses and
  2163. + * vmalloc()-ed addresses never overlap.
  2164. + *
  2165. + * these 'compile-time allocated' memory buffers are
  2166. + * fixed-size 4k pages. (or larger if used with an increment
  2167. + * highger than 1) use fixmap_set(idx,phys) to associate
  2168. + * physical memory with fixmap indices.
  2169. + *
  2170. + * TLB entries of such buffers will not be flushed across
  2171. + * task switches.
  2172. + */
  2173. +
  2174. +/*
  2175. + * on UP currently we will have no trace of the fixmap mechanizm,
  2176. + * no page table allocations, etc. This might change in the
  2177. + * future, say framebuffers for the console driver(s) could be
  2178. + * fix-mapped?
  2179. + */
  2180. +enum fixed_addresses {
  2181. +#ifdef CONFIG_HIGHMEM
  2182. + FIX_KMAP_BEGIN, /* reserved pte's for temporary kernel mappings */
  2183. + FIX_KMAP_END = FIX_KMAP_BEGIN+(KM_TYPE_NR*NR_CPUS)-1,
  2184. +#endif
  2185. + __end_of_fixed_addresses
  2186. +};
  2187. +
  2188. +extern void __set_fixmap (enum fixed_addresses idx,
  2189. + unsigned long phys, pgprot_t flags);
  2190. +
  2191. +/*
  2192. + * used by vmalloc.c.
  2193. + *
  2194. + * Leave one empty page between vmalloc'ed areas and
  2195. + * the start of the fixmap, and leave one page empty
  2196. + * at the top of mem..
  2197. + */
  2198. +
  2199. +#define FIXADDR_TOP (TASK_SIZE - 2 * PAGE_SIZE)
  2200. +#define FIXADDR_SIZE (__end_of_fixed_addresses << PAGE_SHIFT)
  2201. +#define FIXADDR_START (FIXADDR_TOP - FIXADDR_SIZE)
  2202. +
  2203. +#include <asm-generic/fixmap.h>
  2204. +
  2205. +#endif
  2206. --- /dev/null
  2207. +++ b/arch/um/include/uapi/asm/irq.h
  2208. @@ -0,0 +1,23 @@
  2209. +#ifndef __UM_IRQ_H
  2210. +#define __UM_IRQ_H
  2211. +
  2212. +#define TIMER_IRQ 0
  2213. +#define UMN_IRQ 1
  2214. +#define CONSOLE_IRQ 2
  2215. +#define CONSOLE_WRITE_IRQ 3
  2216. +#define UBD_IRQ 4
  2217. +#define UM_ETH_IRQ 5
  2218. +#define SSL_IRQ 6
  2219. +#define SSL_WRITE_IRQ 7
  2220. +#define ACCEPT_IRQ 8
  2221. +#define MCONSOLE_IRQ 9
  2222. +#define WINCH_IRQ 10
  2223. +#define SIGIO_WRITE_IRQ 11
  2224. +#define TELNETD_IRQ 12
  2225. +#define XTERM_IRQ 13
  2226. +#define RANDOM_IRQ 14
  2227. +
  2228. +#define LAST_IRQ RANDOM_IRQ
  2229. +#define NR_IRQS (LAST_IRQ + 1)
  2230. +
  2231. +#endif
  2232. --- /dev/null
  2233. +++ b/arch/um/include/uapi/asm/irqflags.h
  2234. @@ -0,0 +1,42 @@
  2235. +#ifndef __UM_IRQFLAGS_H
  2236. +#define __UM_IRQFLAGS_H
  2237. +
  2238. +extern int get_signals(void);
  2239. +extern int set_signals(int enable);
  2240. +extern void block_signals(void);
  2241. +extern void unblock_signals(void);
  2242. +
  2243. +static inline unsigned long arch_local_save_flags(void)
  2244. +{
  2245. + return get_signals();
  2246. +}
  2247. +
  2248. +static inline void arch_local_irq_restore(unsigned long flags)
  2249. +{
  2250. + set_signals(flags);
  2251. +}
  2252. +
  2253. +static inline void arch_local_irq_enable(void)
  2254. +{
  2255. + unblock_signals();
  2256. +}
  2257. +
  2258. +static inline void arch_local_irq_disable(void)
  2259. +{
  2260. + block_signals();
  2261. +}
  2262. +
  2263. +static inline unsigned long arch_local_irq_save(void)
  2264. +{
  2265. + unsigned long flags;
  2266. + flags = arch_local_save_flags();
  2267. + arch_local_irq_disable();
  2268. + return flags;
  2269. +}
  2270. +
  2271. +static inline bool arch_irqs_disabled(void)
  2272. +{
  2273. + return arch_local_save_flags() == 0;
  2274. +}
  2275. +
  2276. +#endif
  2277. --- /dev/null
  2278. +++ b/arch/um/include/uapi/asm/kmap_types.h
  2279. @@ -0,0 +1,13 @@
  2280. +/*
  2281. + * Copyright (C) 2002 Jeff Dike ([email protected])
  2282. + * Licensed under the GPL
  2283. + */
  2284. +
  2285. +#ifndef __UM_KMAP_TYPES_H
  2286. +#define __UM_KMAP_TYPES_H
  2287. +
  2288. +/* No more #include "asm/arch/kmap_types.h" ! */
  2289. +
  2290. +#define KM_TYPE_NR 14
  2291. +
  2292. +#endif
  2293. --- /dev/null
  2294. +++ b/arch/um/include/uapi/asm/kvm_para.h
  2295. @@ -0,0 +1 @@
  2296. +#include <asm-generic/kvm_para.h>
  2297. --- /dev/null
  2298. +++ b/arch/um/include/uapi/asm/mmu.h
  2299. @@ -0,0 +1,24 @@
  2300. +/*
  2301. + * Copyright (C) 2002 - 2007 Jeff Dike (jdike@{addtoit,linux.intel}.com)
  2302. + * Licensed under the GPL
  2303. + */
  2304. +
  2305. +#ifndef __ARCH_UM_MMU_H
  2306. +#define __ARCH_UM_MMU_H
  2307. +
  2308. +#include <mm_id.h>
  2309. +#include <asm/mm_context.h>
  2310. +
  2311. +typedef struct mm_context {
  2312. + struct mm_id id;
  2313. + struct uml_arch_mm_context arch;
  2314. + struct page *stub_pages[2];
  2315. +} mm_context_t;
  2316. +
  2317. +extern void __switch_mm(struct mm_id * mm_idp);
  2318. +
  2319. +/* Avoid tangled inclusion with asm/ldt.h */
  2320. +extern long init_new_ldt(struct mm_context *to_mm, struct mm_context *from_mm);
  2321. +extern void free_ldt(struct mm_context *mm);
  2322. +
  2323. +#endif
  2324. --- /dev/null
  2325. +++ b/arch/um/include/uapi/asm/mmu_context.h
  2326. @@ -0,0 +1,58 @@
  2327. +/*
  2328. + * Copyright (C) 2002 - 2007 Jeff Dike (jdike@{addtoit,linux.intel}.com)
  2329. + * Licensed under the GPL
  2330. + */
  2331. +
  2332. +#ifndef __UM_MMU_CONTEXT_H
  2333. +#define __UM_MMU_CONTEXT_H
  2334. +
  2335. +#include <linux/sched.h>
  2336. +#include <asm/mmu.h>
  2337. +
  2338. +extern void uml_setup_stubs(struct mm_struct *mm);
  2339. +extern void arch_exit_mmap(struct mm_struct *mm);
  2340. +
  2341. +#define deactivate_mm(tsk,mm) do { } while (0)
  2342. +
  2343. +extern void force_flush_all(void);
  2344. +
  2345. +static inline void activate_mm(struct mm_struct *old, struct mm_struct *new)
  2346. +{
  2347. + /*
  2348. + * This is called by fs/exec.c and sys_unshare()
  2349. + * when the new ->mm is used for the first time.
  2350. + */
  2351. + __switch_mm(&new->context.id);
  2352. + down_write(&new->mmap_sem);
  2353. + uml_setup_stubs(new);
  2354. + up_write(&new->mmap_sem);
  2355. +}
  2356. +
  2357. +static inline void switch_mm(struct mm_struct *prev, struct mm_struct *next,
  2358. + struct task_struct *tsk)
  2359. +{
  2360. + unsigned cpu = smp_processor_id();
  2361. +
  2362. + if(prev != next){
  2363. + cpumask_clear_cpu(cpu, mm_cpumask(prev));
  2364. + cpumask_set_cpu(cpu, mm_cpumask(next));
  2365. + if(next != &init_mm)
  2366. + __switch_mm(&next->context.id);
  2367. + }
  2368. +}
  2369. +
  2370. +static inline void arch_dup_mmap(struct mm_struct *oldmm, struct mm_struct *mm)
  2371. +{
  2372. + uml_setup_stubs(mm);
  2373. +}
  2374. +
  2375. +static inline void enter_lazy_tlb(struct mm_struct *mm,
  2376. + struct task_struct *tsk)
  2377. +{
  2378. +}
  2379. +
  2380. +extern int init_new_context(struct task_struct *task, struct mm_struct *mm);
  2381. +
  2382. +extern void destroy_context(struct mm_struct *mm);
  2383. +
  2384. +#endif
  2385. --- /dev/null
  2386. +++ b/arch/um/include/uapi/asm/page.h
  2387. @@ -0,0 +1,122 @@
  2388. +/*
  2389. + * Copyright (C) 2000 - 2003 Jeff Dike ([email protected])
  2390. + * Copyright 2003 PathScale, Inc.
  2391. + * Licensed under the GPL
  2392. + */
  2393. +
  2394. +#ifndef __UM_PAGE_H
  2395. +#define __UM_PAGE_H
  2396. +
  2397. +#include <linux/const.h>
  2398. +
  2399. +/* PAGE_SHIFT determines the page size */
  2400. +#define PAGE_SHIFT 12
  2401. +#define PAGE_SIZE (_AC(1, UL) << PAGE_SHIFT)
  2402. +#define PAGE_MASK (~(PAGE_SIZE-1))
  2403. +
  2404. +#ifndef __ASSEMBLY__
  2405. +
  2406. +struct page;
  2407. +
  2408. +#include <linux/types.h>
  2409. +#include <asm/vm-flags.h>
  2410. +
  2411. +/*
  2412. + * These are used to make use of C type-checking..
  2413. + */
  2414. +
  2415. +#define clear_page(page) memset((void *)(page), 0, PAGE_SIZE)
  2416. +#define copy_page(to,from) memcpy((void *)(to), (void *)(from), PAGE_SIZE)
  2417. +
  2418. +#define clear_user_page(page, vaddr, pg) clear_page(page)
  2419. +#define copy_user_page(to, from, vaddr, pg) copy_page(to, from)
  2420. +
  2421. +#if defined(CONFIG_3_LEVEL_PGTABLES) && !defined(CONFIG_64BIT)
  2422. +
  2423. +typedef struct { unsigned long pte_low, pte_high; } pte_t;
  2424. +typedef struct { unsigned long pmd; } pmd_t;
  2425. +typedef struct { unsigned long pgd; } pgd_t;
  2426. +#define pte_val(x) ((x).pte_low | ((unsigned long long) (x).pte_high << 32))
  2427. +
  2428. +#define pte_get_bits(pte, bits) ((pte).pte_low & (bits))
  2429. +#define pte_set_bits(pte, bits) ((pte).pte_low |= (bits))
  2430. +#define pte_clear_bits(pte, bits) ((pte).pte_low &= ~(bits))
  2431. +#define pte_copy(to, from) ({ (to).pte_high = (from).pte_high; \
  2432. + smp_wmb(); \
  2433. + (to).pte_low = (from).pte_low; })
  2434. +#define pte_is_zero(pte) (!((pte).pte_low & ~_PAGE_NEWPAGE) && !(pte).pte_high)
  2435. +#define pte_set_val(pte, phys, prot) \
  2436. + ({ (pte).pte_high = (phys) >> 32; \
  2437. + (pte).pte_low = (phys) | pgprot_val(prot); })
  2438. +
  2439. +#define pmd_val(x) ((x).pmd)
  2440. +#define __pmd(x) ((pmd_t) { (x) } )
  2441. +
  2442. +typedef unsigned long long pfn_t;
  2443. +typedef unsigned long long phys_t;
  2444. +
  2445. +#else
  2446. +
  2447. +typedef struct { unsigned long pte; } pte_t;
  2448. +typedef struct { unsigned long pgd; } pgd_t;
  2449. +
  2450. +#ifdef CONFIG_3_LEVEL_PGTABLES
  2451. +typedef struct { unsigned long pmd; } pmd_t;
  2452. +#define pmd_val(x) ((x).pmd)
  2453. +#define __pmd(x) ((pmd_t) { (x) } )
  2454. +#endif
  2455. +
  2456. +#define pte_val(x) ((x).pte)
  2457. +
  2458. +
  2459. +#define pte_get_bits(p, bits) ((p).pte & (bits))
  2460. +#define pte_set_bits(p, bits) ((p).pte |= (bits))
  2461. +#define pte_clear_bits(p, bits) ((p).pte &= ~(bits))
  2462. +#define pte_copy(to, from) ((to).pte = (from).pte)
  2463. +#define pte_is_zero(p) (!((p).pte & ~_PAGE_NEWPAGE))
  2464. +#define pte_set_val(p, phys, prot) (p).pte = (phys | pgprot_val(prot))
  2465. +
  2466. +typedef unsigned long pfn_t;
  2467. +typedef unsigned long phys_t;
  2468. +
  2469. +#endif
  2470. +
  2471. +typedef struct { unsigned long pgprot; } pgprot_t;
  2472. +
  2473. +typedef struct page *pgtable_t;
  2474. +
  2475. +#define pgd_val(x) ((x).pgd)
  2476. +#define pgprot_val(x) ((x).pgprot)
  2477. +
  2478. +#define __pte(x) ((pte_t) { (x) } )
  2479. +#define __pgd(x) ((pgd_t) { (x) } )
  2480. +#define __pgprot(x) ((pgprot_t) { (x) } )
  2481. +
  2482. +extern unsigned long uml_physmem;
  2483. +
  2484. +#define PAGE_OFFSET (uml_physmem)
  2485. +#define KERNELBASE PAGE_OFFSET
  2486. +
  2487. +#define __va_space (8*1024*1024)
  2488. +
  2489. +#include <mem.h>
  2490. +
  2491. +/* Cast to unsigned long before casting to void * to avoid a warning from
  2492. + * mmap_kmem about cutting a long long down to a void *. Not sure that
  2493. + * casting is the right thing, but 32-bit UML can't have 64-bit virtual
  2494. + * addresses
  2495. + */
  2496. +#define __pa(virt) to_phys((void *) (unsigned long) (virt))
  2497. +#define __va(phys) to_virt((unsigned long) (phys))
  2498. +
  2499. +#define phys_to_pfn(p) ((pfn_t) ((p) >> PAGE_SHIFT))
  2500. +#define pfn_to_phys(pfn) ((phys_t) ((pfn) << PAGE_SHIFT))
  2501. +
  2502. +#define pfn_valid(pfn) ((pfn) < max_mapnr)
  2503. +#define virt_addr_valid(v) pfn_valid(phys_to_pfn(__pa(v)))
  2504. +
  2505. +#include <asm-generic/memory_model.h>
  2506. +#include <asm-generic/getorder.h>
  2507. +
  2508. +#endif /* __ASSEMBLY__ */
  2509. +#endif /* __UM_PAGE_H */
  2510. --- /dev/null
  2511. +++ b/arch/um/include/uapi/asm/pgalloc.h
  2512. @@ -0,0 +1,61 @@
  2513. +/*
  2514. + * Copyright (C) 2000, 2001, 2002 Jeff Dike ([email protected])
  2515. + * Copyright 2003 PathScale, Inc.
  2516. + * Derived from include/asm-i386/pgalloc.h and include/asm-i386/pgtable.h
  2517. + * Licensed under the GPL
  2518. + */
  2519. +
  2520. +#ifndef __UM_PGALLOC_H
  2521. +#define __UM_PGALLOC_H
  2522. +
  2523. +#include <linux/mm.h>
  2524. +
  2525. +#define pmd_populate_kernel(mm, pmd, pte) \
  2526. + set_pmd(pmd, __pmd(_PAGE_TABLE + (unsigned long) __pa(pte)))
  2527. +
  2528. +#define pmd_populate(mm, pmd, pte) \
  2529. + set_pmd(pmd, __pmd(_PAGE_TABLE + \
  2530. + ((unsigned long long)page_to_pfn(pte) << \
  2531. + (unsigned long long) PAGE_SHIFT)))
  2532. +#define pmd_pgtable(pmd) pmd_page(pmd)
  2533. +
  2534. +/*
  2535. + * Allocate and free page tables.
  2536. + */
  2537. +extern pgd_t *pgd_alloc(struct mm_struct *);
  2538. +extern void pgd_free(struct mm_struct *mm, pgd_t *pgd);
  2539. +
  2540. +extern pte_t *pte_alloc_one_kernel(struct mm_struct *, unsigned long);
  2541. +extern pgtable_t pte_alloc_one(struct mm_struct *, unsigned long);
  2542. +
  2543. +static inline void pte_free_kernel(struct mm_struct *mm, pte_t *pte)
  2544. +{
  2545. + free_page((unsigned long) pte);
  2546. +}
  2547. +
  2548. +static inline void pte_free(struct mm_struct *mm, pgtable_t pte)
  2549. +{
  2550. + pgtable_page_dtor(pte);
  2551. + __free_page(pte);
  2552. +}
  2553. +
  2554. +#define __pte_free_tlb(tlb,pte, address) \
  2555. +do { \
  2556. + pgtable_page_dtor(pte); \
  2557. + tlb_remove_page((tlb),(pte)); \
  2558. +} while (0)
  2559. +
  2560. +#ifdef CONFIG_3_LEVEL_PGTABLES
  2561. +
  2562. +static inline void pmd_free(struct mm_struct *mm, pmd_t *pmd)
  2563. +{
  2564. + free_page((unsigned long)pmd);
  2565. +}
  2566. +
  2567. +#define __pmd_free_tlb(tlb,x, address) tlb_remove_page((tlb),virt_to_page(x))
  2568. +#endif
  2569. +
  2570. +#define check_pgt_cache() do { } while (0)
  2571. +
  2572. +#endif
  2573. +
  2574. --- /dev/null
  2575. +++ b/arch/um/include/uapi/asm/pgtable-2level.h
  2576. @@ -0,0 +1,53 @@
  2577. +/*
  2578. + * Copyright (C) 2000, 2001, 2002 Jeff Dike ([email protected])
  2579. + * Copyright 2003 PathScale, Inc.
  2580. + * Derived from include/asm-i386/pgtable.h
  2581. + * Licensed under the GPL
  2582. + */
  2583. +
  2584. +#ifndef __UM_PGTABLE_2LEVEL_H
  2585. +#define __UM_PGTABLE_2LEVEL_H
  2586. +
  2587. +#include <asm-generic/pgtable-nopmd.h>
  2588. +
  2589. +/* PGDIR_SHIFT determines what a third-level page table entry can map */
  2590. +
  2591. +#define PGDIR_SHIFT 22
  2592. +#define PGDIR_SIZE (1UL << PGDIR_SHIFT)
  2593. +#define PGDIR_MASK (~(PGDIR_SIZE-1))
  2594. +
  2595. +/*
  2596. + * entries per page directory level: the i386 is two-level, so
  2597. + * we don't really have any PMD directory physically.
  2598. + */
  2599. +#define PTRS_PER_PTE 1024
  2600. +#define USER_PTRS_PER_PGD ((TASK_SIZE + (PGDIR_SIZE - 1)) / PGDIR_SIZE)
  2601. +#define PTRS_PER_PGD 1024
  2602. +#define FIRST_USER_ADDRESS 0
  2603. +
  2604. +#define pte_ERROR(e) \
  2605. + printk("%s:%d: bad pte %p(%08lx).\n", __FILE__, __LINE__, &(e), \
  2606. + pte_val(e))
  2607. +#define pgd_ERROR(e) \
  2608. + printk("%s:%d: bad pgd %p(%08lx).\n", __FILE__, __LINE__, &(e), \
  2609. + pgd_val(e))
  2610. +
  2611. +static inline int pgd_newpage(pgd_t pgd) { return 0; }
  2612. +static inline void pgd_mkuptodate(pgd_t pgd) { }
  2613. +
  2614. +#define set_pmd(pmdptr, pmdval) (*(pmdptr) = (pmdval))
  2615. +
  2616. +#define pte_pfn(x) phys_to_pfn(pte_val(x))
  2617. +#define pfn_pte(pfn, prot) __pte(pfn_to_phys(pfn) | pgprot_val(prot))
  2618. +#define pfn_pmd(pfn, prot) __pmd(pfn_to_phys(pfn) | pgprot_val(prot))
  2619. +
  2620. +/*
  2621. + * Bits 0 through 4 are taken
  2622. + */
  2623. +#define PTE_FILE_MAX_BITS 27
  2624. +
  2625. +#define pte_to_pgoff(pte) (pte_val(pte) >> 5)
  2626. +
  2627. +#define pgoff_to_pte(off) ((pte_t) { ((off) << 5) + _PAGE_FILE })
  2628. +
  2629. +#endif
  2630. --- /dev/null
  2631. +++ b/arch/um/include/uapi/asm/pgtable-3level.h
  2632. @@ -0,0 +1,136 @@
  2633. +/*
  2634. + * Copyright 2003 PathScale Inc
  2635. + * Derived from include/asm-i386/pgtable.h
  2636. + * Licensed under the GPL
  2637. + */
  2638. +
  2639. +#ifndef __UM_PGTABLE_3LEVEL_H
  2640. +#define __UM_PGTABLE_3LEVEL_H
  2641. +
  2642. +#include <asm-generic/pgtable-nopud.h>
  2643. +
  2644. +/* PGDIR_SHIFT determines what a third-level page table entry can map */
  2645. +
  2646. +#ifdef CONFIG_64BIT
  2647. +#define PGDIR_SHIFT 30
  2648. +#else
  2649. +#define PGDIR_SHIFT 31
  2650. +#endif
  2651. +#define PGDIR_SIZE (1UL << PGDIR_SHIFT)
  2652. +#define PGDIR_MASK (~(PGDIR_SIZE-1))
  2653. +
  2654. +/* PMD_SHIFT determines the size of the area a second-level page table can
  2655. + * map
  2656. + */
  2657. +
  2658. +#define PMD_SHIFT 21
  2659. +#define PMD_SIZE (1UL << PMD_SHIFT)
  2660. +#define PMD_MASK (~(PMD_SIZE-1))
  2661. +
  2662. +/*
  2663. + * entries per page directory level
  2664. + */
  2665. +
  2666. +#define PTRS_PER_PTE 512
  2667. +#ifdef CONFIG_64BIT
  2668. +#define PTRS_PER_PMD 512
  2669. +#define PTRS_PER_PGD 512
  2670. +#else
  2671. +#define PTRS_PER_PMD 1024
  2672. +#define PTRS_PER_PGD 1024
  2673. +#endif
  2674. +
  2675. +#define USER_PTRS_PER_PGD ((TASK_SIZE + (PGDIR_SIZE - 1)) / PGDIR_SIZE)
  2676. +#define FIRST_USER_ADDRESS 0
  2677. +
  2678. +#define pte_ERROR(e) \
  2679. + printk("%s:%d: bad pte %p(%016lx).\n", __FILE__, __LINE__, &(e), \
  2680. + pte_val(e))
  2681. +#define pmd_ERROR(e) \
  2682. + printk("%s:%d: bad pmd %p(%016lx).\n", __FILE__, __LINE__, &(e), \
  2683. + pmd_val(e))
  2684. +#define pgd_ERROR(e) \
  2685. + printk("%s:%d: bad pgd %p(%016lx).\n", __FILE__, __LINE__, &(e), \
  2686. + pgd_val(e))
  2687. +
  2688. +#define pud_none(x) (!(pud_val(x) & ~_PAGE_NEWPAGE))
  2689. +#define pud_bad(x) ((pud_val(x) & (~PAGE_MASK & ~_PAGE_USER)) != _KERNPG_TABLE)
  2690. +#define pud_present(x) (pud_val(x) & _PAGE_PRESENT)
  2691. +#define pud_populate(mm, pud, pmd) \
  2692. + set_pud(pud, __pud(_PAGE_TABLE + __pa(pmd)))
  2693. +
  2694. +#ifdef CONFIG_64BIT
  2695. +#define set_pud(pudptr, pudval) set_64bit((u64 *) (pudptr), pud_val(pudval))
  2696. +#else
  2697. +#define set_pud(pudptr, pudval) (*(pudptr) = (pudval))
  2698. +#endif
  2699. +
  2700. +static inline int pgd_newpage(pgd_t pgd)
  2701. +{
  2702. + return(pgd_val(pgd) & _PAGE_NEWPAGE);
  2703. +}
  2704. +
  2705. +static inline void pgd_mkuptodate(pgd_t pgd) { pgd_val(pgd) &= ~_PAGE_NEWPAGE; }
  2706. +
  2707. +#ifdef CONFIG_64BIT
  2708. +#define set_pmd(pmdptr, pmdval) set_64bit((u64 *) (pmdptr), pmd_val(pmdval))
  2709. +#else
  2710. +#define set_pmd(pmdptr, pmdval) (*(pmdptr) = (pmdval))
  2711. +#endif
  2712. +
  2713. +struct mm_struct;
  2714. +extern pmd_t *pmd_alloc_one(struct mm_struct *mm, unsigned long address);
  2715. +
  2716. +static inline void pud_clear (pud_t *pud)
  2717. +{
  2718. + set_pud(pud, __pud(_PAGE_NEWPAGE));
  2719. +}
  2720. +
  2721. +#define pud_page(pud) phys_to_page(pud_val(pud) & PAGE_MASK)
  2722. +#define pud_page_vaddr(pud) ((unsigned long) __va(pud_val(pud) & PAGE_MASK))
  2723. +
  2724. +/* Find an entry in the second-level page table.. */
  2725. +#define pmd_offset(pud, address) ((pmd_t *) pud_page_vaddr(*(pud)) + \
  2726. + pmd_index(address))
  2727. +
  2728. +static inline unsigned long pte_pfn(pte_t pte)
  2729. +{
  2730. + return phys_to_pfn(pte_val(pte));
  2731. +}
  2732. +
  2733. +static inline pte_t pfn_pte(pfn_t page_nr, pgprot_t pgprot)
  2734. +{
  2735. + pte_t pte;
  2736. + phys_t phys = pfn_to_phys(page_nr);
  2737. +
  2738. + pte_set_val(pte, phys, pgprot);
  2739. + return pte;
  2740. +}
  2741. +
  2742. +static inline pmd_t pfn_pmd(pfn_t page_nr, pgprot_t pgprot)
  2743. +{
  2744. + return __pmd((page_nr << PAGE_SHIFT) | pgprot_val(pgprot));
  2745. +}
  2746. +
  2747. +/*
  2748. + * Bits 0 through 3 are taken in the low part of the pte,
  2749. + * put the 32 bits of offset into the high part.
  2750. + */
  2751. +#define PTE_FILE_MAX_BITS 32
  2752. +
  2753. +#ifdef CONFIG_64BIT
  2754. +
  2755. +#define pte_to_pgoff(p) ((p).pte >> 32)
  2756. +
  2757. +#define pgoff_to_pte(off) ((pte_t) { ((off) << 32) | _PAGE_FILE })
  2758. +
  2759. +#else
  2760. +
  2761. +#define pte_to_pgoff(pte) ((pte).pte_high)
  2762. +
  2763. +#define pgoff_to_pte(off) ((pte_t) { _PAGE_FILE, (off) })
  2764. +
  2765. +#endif
  2766. +
  2767. +#endif
  2768. +
  2769. --- /dev/null
  2770. +++ b/arch/um/include/uapi/asm/pgtable.h
  2771. @@ -0,0 +1,375 @@
  2772. +/*
  2773. + * Copyright (C) 2000 - 2007 Jeff Dike (jdike@{addtoit,linux.intel}.com)
  2774. + * Copyright 2003 PathScale, Inc.
  2775. + * Derived from include/asm-i386/pgtable.h
  2776. + * Licensed under the GPL
  2777. + */
  2778. +
  2779. +#ifndef __UM_PGTABLE_H
  2780. +#define __UM_PGTABLE_H
  2781. +
  2782. +#include <asm/fixmap.h>
  2783. +
  2784. +#define _PAGE_PRESENT 0x001
  2785. +#define _PAGE_NEWPAGE 0x002
  2786. +#define _PAGE_NEWPROT 0x004
  2787. +#define _PAGE_RW 0x020
  2788. +#define _PAGE_USER 0x040
  2789. +#define _PAGE_ACCESSED 0x080
  2790. +#define _PAGE_DIRTY 0x100
  2791. +/* If _PAGE_PRESENT is clear, we use these: */
  2792. +#define _PAGE_FILE 0x008 /* nonlinear file mapping, saved PTE; unset:swap */
  2793. +#define _PAGE_PROTNONE 0x010 /* if the user mapped it with PROT_NONE;
  2794. + pte_present gives true */
  2795. +
  2796. +#ifdef CONFIG_3_LEVEL_PGTABLES
  2797. +#include <asm/pgtable-3level.h>
  2798. +#else
  2799. +#include <asm/pgtable-2level.h>
  2800. +#endif
  2801. +
  2802. +extern pgd_t swapper_pg_dir[PTRS_PER_PGD];
  2803. +
  2804. +/* zero page used for uninitialized stuff */
  2805. +extern unsigned long *empty_zero_page;
  2806. +
  2807. +#define pgtable_cache_init() do ; while (0)
  2808. +
  2809. +/* Just any arbitrary offset to the start of the vmalloc VM area: the
  2810. + * current 8MB value just means that there will be a 8MB "hole" after the
  2811. + * physical memory until the kernel virtual memory starts. That means that
  2812. + * any out-of-bounds memory accesses will hopefully be caught.
  2813. + * The vmalloc() routines leaves a hole of 4kB between each vmalloced
  2814. + * area for the same reason. ;)
  2815. + */
  2816. +
  2817. +extern unsigned long end_iomem;
  2818. +
  2819. +#define VMALLOC_OFFSET (__va_space)
  2820. +#define VMALLOC_START ((end_iomem + VMALLOC_OFFSET) & ~(VMALLOC_OFFSET-1))
  2821. +#define PKMAP_BASE ((FIXADDR_START - LAST_PKMAP * PAGE_SIZE) & PMD_MASK)
  2822. +#ifdef CONFIG_HIGHMEM
  2823. +# define VMALLOC_END (PKMAP_BASE-2*PAGE_SIZE)
  2824. +#else
  2825. +# define VMALLOC_END (FIXADDR_START-2*PAGE_SIZE)
  2826. +#endif
  2827. +#define MODULES_VADDR VMALLOC_START
  2828. +#define MODULES_END VMALLOC_END
  2829. +#define MODULES_LEN (MODULES_VADDR - MODULES_END)
  2830. +
  2831. +#define _PAGE_TABLE (_PAGE_PRESENT | _PAGE_RW | _PAGE_USER | _PAGE_ACCESSED | _PAGE_DIRTY)
  2832. +#define _KERNPG_TABLE (_PAGE_PRESENT | _PAGE_RW | _PAGE_ACCESSED | _PAGE_DIRTY)
  2833. +#define _PAGE_CHG_MASK (PAGE_MASK | _PAGE_ACCESSED | _PAGE_DIRTY)
  2834. +#define __PAGE_KERNEL_EXEC \
  2835. + (_PAGE_PRESENT | _PAGE_RW | _PAGE_DIRTY | _PAGE_ACCESSED)
  2836. +#define PAGE_NONE __pgprot(_PAGE_PROTNONE | _PAGE_ACCESSED)
  2837. +#define PAGE_SHARED __pgprot(_PAGE_PRESENT | _PAGE_RW | _PAGE_USER | _PAGE_ACCESSED)
  2838. +#define PAGE_COPY __pgprot(_PAGE_PRESENT | _PAGE_USER | _PAGE_ACCESSED)
  2839. +#define PAGE_READONLY __pgprot(_PAGE_PRESENT | _PAGE_USER | _PAGE_ACCESSED)
  2840. +#define PAGE_KERNEL __pgprot(_PAGE_PRESENT | _PAGE_RW | _PAGE_DIRTY | _PAGE_ACCESSED)
  2841. +#define PAGE_KERNEL_EXEC __pgprot(__PAGE_KERNEL_EXEC)
  2842. +
  2843. +/*
  2844. + * The i386 can't do page protection for execute, and considers that the same
  2845. + * are read.
  2846. + * Also, write permissions imply read permissions. This is the closest we can
  2847. + * get..
  2848. + */
  2849. +#define __P000 PAGE_NONE
  2850. +#define __P001 PAGE_READONLY
  2851. +#define __P010 PAGE_COPY
  2852. +#define __P011 PAGE_COPY
  2853. +#define __P100 PAGE_READONLY
  2854. +#define __P101 PAGE_READONLY
  2855. +#define __P110 PAGE_COPY
  2856. +#define __P111 PAGE_COPY
  2857. +
  2858. +#define __S000 PAGE_NONE
  2859. +#define __S001 PAGE_READONLY
  2860. +#define __S010 PAGE_SHARED
  2861. +#define __S011 PAGE_SHARED
  2862. +#define __S100 PAGE_READONLY
  2863. +#define __S101 PAGE_READONLY
  2864. +#define __S110 PAGE_SHARED
  2865. +#define __S111 PAGE_SHARED
  2866. +
  2867. +/*
  2868. + * ZERO_PAGE is a global shared page that is always zero: used
  2869. + * for zero-mapped memory areas etc..
  2870. + */
  2871. +#define ZERO_PAGE(vaddr) virt_to_page(empty_zero_page)
  2872. +
  2873. +#define pte_clear(mm,addr,xp) pte_set_val(*(xp), (phys_t) 0, __pgprot(_PAGE_NEWPAGE))
  2874. +
  2875. +#define pmd_none(x) (!((unsigned long)pmd_val(x) & ~_PAGE_NEWPAGE))
  2876. +#define pmd_bad(x) ((pmd_val(x) & (~PAGE_MASK & ~_PAGE_USER)) != _KERNPG_TABLE)
  2877. +
  2878. +#define pmd_present(x) (pmd_val(x) & _PAGE_PRESENT)
  2879. +#define pmd_clear(xp) do { pmd_val(*(xp)) = _PAGE_NEWPAGE; } while (0)
  2880. +
  2881. +#define pmd_newpage(x) (pmd_val(x) & _PAGE_NEWPAGE)
  2882. +#define pmd_mkuptodate(x) (pmd_val(x) &= ~_PAGE_NEWPAGE)
  2883. +
  2884. +#define pud_newpage(x) (pud_val(x) & _PAGE_NEWPAGE)
  2885. +#define pud_mkuptodate(x) (pud_val(x) &= ~_PAGE_NEWPAGE)
  2886. +
  2887. +#define pmd_page(pmd) phys_to_page(pmd_val(pmd) & PAGE_MASK)
  2888. +
  2889. +#define pte_page(x) pfn_to_page(pte_pfn(x))
  2890. +
  2891. +#define pte_present(x) pte_get_bits(x, (_PAGE_PRESENT | _PAGE_PROTNONE))
  2892. +
  2893. +/*
  2894. + * =================================
  2895. + * Flags checking section.
  2896. + * =================================
  2897. + */
  2898. +
  2899. +static inline int pte_none(pte_t pte)
  2900. +{
  2901. + return pte_is_zero(pte);
  2902. +}
  2903. +
  2904. +/*
  2905. + * The following only work if pte_present() is true.
  2906. + * Undefined behaviour if not..
  2907. + */
  2908. +static inline int pte_read(pte_t pte)
  2909. +{
  2910. + return((pte_get_bits(pte, _PAGE_USER)) &&
  2911. + !(pte_get_bits(pte, _PAGE_PROTNONE)));
  2912. +}
  2913. +
  2914. +static inline int pte_exec(pte_t pte){
  2915. + return((pte_get_bits(pte, _PAGE_USER)) &&
  2916. + !(pte_get_bits(pte, _PAGE_PROTNONE)));
  2917. +}
  2918. +
  2919. +static inline int pte_write(pte_t pte)
  2920. +{
  2921. + return((pte_get_bits(pte, _PAGE_RW)) &&
  2922. + !(pte_get_bits(pte, _PAGE_PROTNONE)));
  2923. +}
  2924. +
  2925. +/*
  2926. + * The following only works if pte_present() is not true.
  2927. + */
  2928. +static inline int pte_file(pte_t pte)
  2929. +{
  2930. + return pte_get_bits(pte, _PAGE_FILE);
  2931. +}
  2932. +
  2933. +static inline int pte_dirty(pte_t pte)
  2934. +{
  2935. + return pte_get_bits(pte, _PAGE_DIRTY);
  2936. +}
  2937. +
  2938. +static inline int pte_young(pte_t pte)
  2939. +{
  2940. + return pte_get_bits(pte, _PAGE_ACCESSED);
  2941. +}
  2942. +
  2943. +static inline int pte_newpage(pte_t pte)
  2944. +{
  2945. + return pte_get_bits(pte, _PAGE_NEWPAGE);
  2946. +}
  2947. +
  2948. +static inline int pte_newprot(pte_t pte)
  2949. +{
  2950. + return(pte_present(pte) && (pte_get_bits(pte, _PAGE_NEWPROT)));
  2951. +}
  2952. +
  2953. +static inline int pte_special(pte_t pte)
  2954. +{
  2955. + return 0;
  2956. +}
  2957. +
  2958. +/*
  2959. + * =================================
  2960. + * Flags setting section.
  2961. + * =================================
  2962. + */
  2963. +
  2964. +static inline pte_t pte_mknewprot(pte_t pte)
  2965. +{
  2966. + pte_set_bits(pte, _PAGE_NEWPROT);
  2967. + return(pte);
  2968. +}
  2969. +
  2970. +static inline pte_t pte_mkclean(pte_t pte)
  2971. +{
  2972. + pte_clear_bits(pte, _PAGE_DIRTY);
  2973. + return(pte);
  2974. +}
  2975. +
  2976. +static inline pte_t pte_mkold(pte_t pte)
  2977. +{
  2978. + pte_clear_bits(pte, _PAGE_ACCESSED);
  2979. + return(pte);
  2980. +}
  2981. +
  2982. +static inline pte_t pte_wrprotect(pte_t pte)
  2983. +{
  2984. + pte_clear_bits(pte, _PAGE_RW);
  2985. + return(pte_mknewprot(pte));
  2986. +}
  2987. +
  2988. +static inline pte_t pte_mkread(pte_t pte)
  2989. +{
  2990. + pte_set_bits(pte, _PAGE_USER);
  2991. + return(pte_mknewprot(pte));
  2992. +}
  2993. +
  2994. +static inline pte_t pte_mkdirty(pte_t pte)
  2995. +{
  2996. + pte_set_bits(pte, _PAGE_DIRTY);
  2997. + return(pte);
  2998. +}
  2999. +
  3000. +static inline pte_t pte_mkyoung(pte_t pte)
  3001. +{
  3002. + pte_set_bits(pte, _PAGE_ACCESSED);
  3003. + return(pte);
  3004. +}
  3005. +
  3006. +static inline pte_t pte_mkwrite(pte_t pte)
  3007. +{
  3008. + pte_set_bits(pte, _PAGE_RW);
  3009. + return(pte_mknewprot(pte));
  3010. +}
  3011. +
  3012. +static inline pte_t pte_mkuptodate(pte_t pte)
  3013. +{
  3014. + pte_clear_bits(pte, _PAGE_NEWPAGE);
  3015. + if(pte_present(pte))
  3016. + pte_clear_bits(pte, _PAGE_NEWPROT);
  3017. + return(pte);
  3018. +}
  3019. +
  3020. +static inline pte_t pte_mknewpage(pte_t pte)
  3021. +{
  3022. + pte_set_bits(pte, _PAGE_NEWPAGE);
  3023. + return(pte);
  3024. +}
  3025. +
  3026. +static inline pte_t pte_mkspecial(pte_t pte)
  3027. +{
  3028. + return(pte);
  3029. +}
  3030. +
  3031. +static inline void set_pte(pte_t *pteptr, pte_t pteval)
  3032. +{
  3033. + pte_copy(*pteptr, pteval);
  3034. +
  3035. + /* If it's a swap entry, it needs to be marked _PAGE_NEWPAGE so
  3036. + * fix_range knows to unmap it. _PAGE_NEWPROT is specific to
  3037. + * mapped pages.
  3038. + */
  3039. +
  3040. + *pteptr = pte_mknewpage(*pteptr);
  3041. + if(pte_present(*pteptr)) *pteptr = pte_mknewprot(*pteptr);
  3042. +}
  3043. +#define set_pte_at(mm,addr,ptep,pteval) set_pte(ptep,pteval)
  3044. +
  3045. +#define __HAVE_ARCH_PTE_SAME
  3046. +static inline int pte_same(pte_t pte_a, pte_t pte_b)
  3047. +{
  3048. + return !((pte_val(pte_a) ^ pte_val(pte_b)) & ~_PAGE_NEWPAGE);
  3049. +}
  3050. +
  3051. +/*
  3052. + * Conversion functions: convert a page and protection to a page entry,
  3053. + * and a page entry and page directory to the page they refer to.
  3054. + */
  3055. +
  3056. +#define phys_to_page(phys) pfn_to_page(phys_to_pfn(phys))
  3057. +#define __virt_to_page(virt) phys_to_page(__pa(virt))
  3058. +#define page_to_phys(page) pfn_to_phys((pfn_t) page_to_pfn(page))
  3059. +#define virt_to_page(addr) __virt_to_page((const unsigned long) addr)
  3060. +
  3061. +#define mk_pte(page, pgprot) \
  3062. + ({ pte_t pte; \
  3063. + \
  3064. + pte_set_val(pte, page_to_phys(page), (pgprot)); \
  3065. + if (pte_present(pte)) \
  3066. + pte_mknewprot(pte_mknewpage(pte)); \
  3067. + pte;})
  3068. +
  3069. +static inline pte_t pte_modify(pte_t pte, pgprot_t newprot)
  3070. +{
  3071. + pte_set_val(pte, (pte_val(pte) & _PAGE_CHG_MASK), newprot);
  3072. + return pte;
  3073. +}
  3074. +
  3075. +/*
  3076. + * the pgd page can be thought of an array like this: pgd_t[PTRS_PER_PGD]
  3077. + *
  3078. + * this macro returns the index of the entry in the pgd page which would
  3079. + * control the given virtual address
  3080. + */
  3081. +#define pgd_index(address) (((address) >> PGDIR_SHIFT) & (PTRS_PER_PGD-1))
  3082. +
  3083. +/*
  3084. + * pgd_offset() returns a (pgd_t *)
  3085. + * pgd_index() is used get the offset into the pgd page's array of pgd_t's;
  3086. + */
  3087. +#define pgd_offset(mm, address) ((mm)->pgd+pgd_index(address))
  3088. +
  3089. +/*
  3090. + * a shortcut which implies the use of the kernel's pgd, instead
  3091. + * of a process's
  3092. + */
  3093. +#define pgd_offset_k(address) pgd_offset(&init_mm, address)
  3094. +
  3095. +/*
  3096. + * the pmd page can be thought of an array like this: pmd_t[PTRS_PER_PMD]
  3097. + *
  3098. + * this macro returns the index of the entry in the pmd page which would
  3099. + * control the given virtual address
  3100. + */
  3101. +#define pmd_page_vaddr(pmd) ((unsigned long) __va(pmd_val(pmd) & PAGE_MASK))
  3102. +#define pmd_index(address) (((address) >> PMD_SHIFT) & (PTRS_PER_PMD-1))
  3103. +
  3104. +#define pmd_page_vaddr(pmd) \
  3105. + ((unsigned long) __va(pmd_val(pmd) & PAGE_MASK))
  3106. +
  3107. +/*
  3108. + * the pte page can be thought of an array like this: pte_t[PTRS_PER_PTE]
  3109. + *
  3110. + * this macro returns the index of the entry in the pte page which would
  3111. + * control the given virtual address
  3112. + */
  3113. +#define pte_index(address) (((address) >> PAGE_SHIFT) & (PTRS_PER_PTE - 1))
  3114. +#define pte_offset_kernel(dir, address) \
  3115. + ((pte_t *) pmd_page_vaddr(*(dir)) + pte_index(address))
  3116. +#define pte_offset_map(dir, address) \
  3117. + ((pte_t *)page_address(pmd_page(*(dir))) + pte_index(address))
  3118. +#define pte_unmap(pte) do { } while (0)
  3119. +
  3120. +struct mm_struct;
  3121. +extern pte_t *virt_to_pte(struct mm_struct *mm, unsigned long addr);
  3122. +
  3123. +#define update_mmu_cache(vma,address,ptep) do ; while (0)
  3124. +
  3125. +/* Encode and de-code a swap entry */
  3126. +#define __swp_type(x) (((x).val >> 5) & 0x1f)
  3127. +#define __swp_offset(x) ((x).val >> 11)
  3128. +
  3129. +#define __swp_entry(type, offset) \
  3130. + ((swp_entry_t) { ((type) << 5) | ((offset) << 11) })
  3131. +#define __pte_to_swp_entry(pte) \
  3132. + ((swp_entry_t) { pte_val(pte_mkuptodate(pte)) })
  3133. +#define __swp_entry_to_pte(x) ((pte_t) { (x).val })
  3134. +
  3135. +#define kern_addr_valid(addr) (1)
  3136. +
  3137. +#include <asm-generic/pgtable.h>
  3138. +
  3139. +/* Clear a kernel PTE and flush it from the TLB */
  3140. +#define kpte_clear_flush(ptep, vaddr) \
  3141. +do { \
  3142. + pte_clear(&init_mm, (vaddr), (ptep)); \
  3143. + __flush_tlb_one((vaddr)); \
  3144. +} while (0)
  3145. +
  3146. +#endif
  3147. --- /dev/null
  3148. +++ b/arch/um/include/uapi/asm/processor-generic.h
  3149. @@ -0,0 +1,115 @@
  3150. +/*
  3151. + * Copyright (C) 2000 - 2007 Jeff Dike (jdike@{addtoit,linux.intel}.com)
  3152. + * Licensed under the GPL
  3153. + */
  3154. +
  3155. +#ifndef __UM_PROCESSOR_GENERIC_H
  3156. +#define __UM_PROCESSOR_GENERIC_H
  3157. +
  3158. +struct pt_regs;
  3159. +
  3160. +struct task_struct;
  3161. +
  3162. +#include <asm/ptrace.h>
  3163. +#include <registers.h>
  3164. +#include <sysdep/archsetjmp.h>
  3165. +
  3166. +#include <linux/prefetch.h>
  3167. +
  3168. +struct mm_struct;
  3169. +
  3170. +struct thread_struct {
  3171. + struct pt_regs regs;
  3172. + struct pt_regs *segv_regs;
  3173. + int singlestep_syscall;
  3174. + void *fault_addr;
  3175. + jmp_buf *fault_catcher;
  3176. + struct task_struct *prev_sched;
  3177. + struct arch_thread arch;
  3178. + jmp_buf switch_buf;
  3179. + struct {
  3180. + int op;
  3181. + union {
  3182. + struct {
  3183. + int pid;
  3184. + } fork, exec;
  3185. + struct {
  3186. + int (*proc)(void *);
  3187. + void *arg;
  3188. + } thread;
  3189. + struct {
  3190. + void (*proc)(void *);
  3191. + void *arg;
  3192. + } cb;
  3193. + } u;
  3194. + } request;
  3195. +};
  3196. +
  3197. +#define INIT_THREAD \
  3198. +{ \
  3199. + .regs = EMPTY_REGS, \
  3200. + .fault_addr = NULL, \
  3201. + .prev_sched = NULL, \
  3202. + .arch = INIT_ARCH_THREAD, \
  3203. + .request = { 0 } \
  3204. +}
  3205. +
  3206. +static inline void release_thread(struct task_struct *task)
  3207. +{
  3208. +}
  3209. +
  3210. +extern unsigned long thread_saved_pc(struct task_struct *t);
  3211. +
  3212. +static inline void mm_copy_segments(struct mm_struct *from_mm,
  3213. + struct mm_struct *new_mm)
  3214. +{
  3215. +}
  3216. +
  3217. +#define init_stack (init_thread_union.stack)
  3218. +
  3219. +/*
  3220. + * User space process size: 3GB (default).
  3221. + */
  3222. +extern unsigned long task_size;
  3223. +
  3224. +#define TASK_SIZE (task_size)
  3225. +
  3226. +#undef STACK_TOP
  3227. +#undef STACK_TOP_MAX
  3228. +
  3229. +extern unsigned long stacksizelim;
  3230. +
  3231. +#define STACK_ROOM (stacksizelim)
  3232. +#define STACK_TOP (TASK_SIZE - 2 * PAGE_SIZE)
  3233. +#define STACK_TOP_MAX STACK_TOP
  3234. +
  3235. +/* This decides where the kernel will search for a free chunk of vm
  3236. + * space during mmap's.
  3237. + */
  3238. +#define TASK_UNMAPPED_BASE (0x40000000)
  3239. +
  3240. +extern void start_thread(struct pt_regs *regs, unsigned long entry,
  3241. + unsigned long stack);
  3242. +
  3243. +struct cpuinfo_um {
  3244. + unsigned long loops_per_jiffy;
  3245. + int ipi_pipe[2];
  3246. +};
  3247. +
  3248. +extern struct cpuinfo_um boot_cpu_data;
  3249. +
  3250. +#define my_cpu_data cpu_data[smp_processor_id()]
  3251. +
  3252. +#ifdef CONFIG_SMP
  3253. +extern struct cpuinfo_um cpu_data[];
  3254. +#define current_cpu_data cpu_data[smp_processor_id()]
  3255. +#else
  3256. +#define cpu_data (&boot_cpu_data)
  3257. +#define current_cpu_data boot_cpu_data
  3258. +#endif
  3259. +
  3260. +
  3261. +#define KSTK_REG(tsk, reg) get_thread_reg(reg, &tsk->thread.switch_buf)
  3262. +extern unsigned long get_wchan(struct task_struct *p);
  3263. +
  3264. +#endif
  3265. --- /dev/null
  3266. +++ b/arch/um/include/uapi/asm/ptrace-generic.h
  3267. @@ -0,0 +1,45 @@
  3268. +/*
  3269. + * Copyright (C) 2000 - 2007 Jeff Dike (jdike@{addtoit,linux.intel}.com)
  3270. + * Licensed under the GPL
  3271. + */
  3272. +
  3273. +#ifndef __UM_PTRACE_GENERIC_H
  3274. +#define __UM_PTRACE_GENERIC_H
  3275. +
  3276. +#ifndef __ASSEMBLY__
  3277. +
  3278. +#include <asm/ptrace-abi.h>
  3279. +#include <sysdep/ptrace.h>
  3280. +
  3281. +struct pt_regs {
  3282. + struct uml_pt_regs regs;
  3283. +};
  3284. +
  3285. +#define arch_has_single_step() (1)
  3286. +
  3287. +#define EMPTY_REGS { .regs = EMPTY_UML_PT_REGS }
  3288. +
  3289. +#define PT_REGS_IP(r) UPT_IP(&(r)->regs)
  3290. +#define PT_REGS_SP(r) UPT_SP(&(r)->regs)
  3291. +
  3292. +#define PT_REGS_RESTART_SYSCALL(r) UPT_RESTART_SYSCALL(&(r)->regs)
  3293. +
  3294. +#define PT_REGS_SYSCALL_NR(r) UPT_SYSCALL_NR(&(r)->regs)
  3295. +
  3296. +#define instruction_pointer(regs) PT_REGS_IP(regs)
  3297. +
  3298. +struct task_struct;
  3299. +
  3300. +extern long subarch_ptrace(struct task_struct *child, long request,
  3301. + unsigned long addr, unsigned long data);
  3302. +extern unsigned long getreg(struct task_struct *child, int regno);
  3303. +extern int putreg(struct task_struct *child, int regno, unsigned long value);
  3304. +
  3305. +extern int arch_copy_tls(struct task_struct *new);
  3306. +extern void clear_flushed_tls(struct task_struct *task);
  3307. +extern void syscall_trace_enter(struct pt_regs *regs);
  3308. +extern void syscall_trace_leave(struct pt_regs *regs);
  3309. +
  3310. +#endif
  3311. +
  3312. +#endif
  3313. --- /dev/null
  3314. +++ b/arch/um/include/uapi/asm/setup.h
  3315. @@ -0,0 +1,10 @@
  3316. +#ifndef SETUP_H_INCLUDED
  3317. +#define SETUP_H_INCLUDED
  3318. +
  3319. +/* POSIX mandated with _POSIX_ARG_MAX that we can rely on 4096 chars in the
  3320. + * command line, so this choice is ok.
  3321. + */
  3322. +
  3323. +#define COMMAND_LINE_SIZE 4096
  3324. +
  3325. +#endif /* SETUP_H_INCLUDED */
  3326. --- /dev/null
  3327. +++ b/arch/um/include/uapi/asm/smp.h
  3328. @@ -0,0 +1,32 @@
  3329. +#ifndef __UM_SMP_H
  3330. +#define __UM_SMP_H
  3331. +
  3332. +#ifdef CONFIG_SMP
  3333. +
  3334. +#include <linux/bitops.h>
  3335. +#include <asm/current.h>
  3336. +#include <linux/cpumask.h>
  3337. +
  3338. +#define raw_smp_processor_id() (current_thread->cpu)
  3339. +
  3340. +#define cpu_logical_map(n) (n)
  3341. +#define cpu_number_map(n) (n)
  3342. +extern int hard_smp_processor_id(void);
  3343. +#define NO_PROC_ID -1
  3344. +
  3345. +extern int ncpus;
  3346. +
  3347. +
  3348. +static inline void smp_cpus_done(unsigned int maxcpus)
  3349. +{
  3350. +}
  3351. +
  3352. +extern struct task_struct *idle_threads[NR_CPUS];
  3353. +
  3354. +#else
  3355. +
  3356. +#define hard_smp_processor_id() 0
  3357. +
  3358. +#endif
  3359. +
  3360. +#endif
  3361. --- /dev/null
  3362. +++ b/arch/um/include/uapi/asm/sysrq.h
  3363. @@ -0,0 +1,7 @@
  3364. +#ifndef __UM_SYSRQ_H
  3365. +#define __UM_SYSRQ_H
  3366. +
  3367. +struct task_struct;
  3368. +extern void show_trace(struct task_struct* task, unsigned long *stack);
  3369. +
  3370. +#endif
  3371. --- /dev/null
  3372. +++ b/arch/um/include/uapi/asm/thread_info.h
  3373. @@ -0,0 +1,78 @@
  3374. +/*
  3375. + * Copyright (C) 2002 - 2007 Jeff Dike (jdike@{addtoit,linux.intel}.com)
  3376. + * Licensed under the GPL
  3377. + */
  3378. +
  3379. +#ifndef __UM_THREAD_INFO_H
  3380. +#define __UM_THREAD_INFO_H
  3381. +
  3382. +#ifndef __ASSEMBLY__
  3383. +
  3384. +#include <asm/types.h>
  3385. +#include <asm/page.h>
  3386. +#include <asm/uaccess.h>
  3387. +
  3388. +struct thread_info {
  3389. + struct task_struct *task; /* main task structure */
  3390. + struct exec_domain *exec_domain; /* execution domain */
  3391. + unsigned long flags; /* low level flags */
  3392. + __u32 cpu; /* current CPU */
  3393. + int preempt_count; /* 0 => preemptable,
  3394. + <0 => BUG */
  3395. + mm_segment_t addr_limit; /* thread address space:
  3396. + 0-0xBFFFFFFF for user
  3397. + 0-0xFFFFFFFF for kernel */
  3398. + struct restart_block restart_block;
  3399. + struct thread_info *real_thread; /* Points to non-IRQ stack */
  3400. +};
  3401. +
  3402. +#define INIT_THREAD_INFO(tsk) \
  3403. +{ \
  3404. + .task = &tsk, \
  3405. + .exec_domain = &default_exec_domain, \
  3406. + .flags = 0, \
  3407. + .cpu = 0, \
  3408. + .preempt_count = INIT_PREEMPT_COUNT, \
  3409. + .addr_limit = KERNEL_DS, \
  3410. + .restart_block = { \
  3411. + .fn = do_no_restart_syscall, \
  3412. + }, \
  3413. + .real_thread = NULL, \
  3414. +}
  3415. +
  3416. +#define init_thread_info (init_thread_union.thread_info)
  3417. +#define init_stack (init_thread_union.stack)
  3418. +
  3419. +#define THREAD_SIZE ((1 << CONFIG_KERNEL_STACK_ORDER) * PAGE_SIZE)
  3420. +/* how to get the thread information struct from C */
  3421. +static inline struct thread_info *current_thread_info(void)
  3422. +{
  3423. + struct thread_info *ti;
  3424. + unsigned long mask = THREAD_SIZE - 1;
  3425. + void *p;
  3426. +
  3427. + asm volatile ("" : "=r" (p) : "0" (&ti));
  3428. + ti = (struct thread_info *) (((unsigned long)p) & ~mask);
  3429. + return ti;
  3430. +}
  3431. +
  3432. +#define THREAD_SIZE_ORDER CONFIG_KERNEL_STACK_ORDER
  3433. +
  3434. +#endif
  3435. +
  3436. +#define TIF_SYSCALL_TRACE 0 /* syscall trace active */
  3437. +#define TIF_SIGPENDING 1 /* signal pending */
  3438. +#define TIF_NEED_RESCHED 2 /* rescheduling necessary */
  3439. +#define TIF_RESTART_BLOCK 4
  3440. +#define TIF_MEMDIE 5 /* is terminating due to OOM killer */
  3441. +#define TIF_SYSCALL_AUDIT 6
  3442. +#define TIF_RESTORE_SIGMASK 7
  3443. +#define TIF_NOTIFY_RESUME 8
  3444. +
  3445. +#define _TIF_SYSCALL_TRACE (1 << TIF_SYSCALL_TRACE)
  3446. +#define _TIF_SIGPENDING (1 << TIF_SIGPENDING)
  3447. +#define _TIF_NEED_RESCHED (1 << TIF_NEED_RESCHED)
  3448. +#define _TIF_MEMDIE (1 << TIF_MEMDIE)
  3449. +#define _TIF_SYSCALL_AUDIT (1 << TIF_SYSCALL_AUDIT)
  3450. +
  3451. +#endif
  3452. --- /dev/null
  3453. +++ b/arch/um/include/uapi/asm/timex.h
  3454. @@ -0,0 +1,13 @@
  3455. +#ifndef __UM_TIMEX_H
  3456. +#define __UM_TIMEX_H
  3457. +
  3458. +typedef unsigned long cycles_t;
  3459. +
  3460. +static inline cycles_t get_cycles (void)
  3461. +{
  3462. + return 0;
  3463. +}
  3464. +
  3465. +#define CLOCK_TICK_RATE (HZ)
  3466. +
  3467. +#endif
  3468. --- /dev/null
  3469. +++ b/arch/um/include/uapi/asm/tlb.h
  3470. @@ -0,0 +1,122 @@
  3471. +#ifndef __UM_TLB_H
  3472. +#define __UM_TLB_H
  3473. +
  3474. +#include <linux/pagemap.h>
  3475. +#include <linux/swap.h>
  3476. +#include <asm/percpu.h>
  3477. +#include <asm/pgalloc.h>
  3478. +#include <asm/tlbflush.h>
  3479. +
  3480. +#define tlb_start_vma(tlb, vma) do { } while (0)
  3481. +#define tlb_end_vma(tlb, vma) do { } while (0)
  3482. +#define tlb_flush(tlb) flush_tlb_mm((tlb)->mm)
  3483. +
  3484. +/* struct mmu_gather is an opaque type used by the mm code for passing around
  3485. + * any data needed by arch specific code for tlb_remove_page.
  3486. + */
  3487. +struct mmu_gather {
  3488. + struct mm_struct *mm;
  3489. + unsigned int need_flush; /* Really unmapped some ptes? */
  3490. + unsigned long start;
  3491. + unsigned long end;
  3492. + unsigned int fullmm; /* non-zero means full mm flush */
  3493. +};
  3494. +
  3495. +static inline void __tlb_remove_tlb_entry(struct mmu_gather *tlb, pte_t *ptep,
  3496. + unsigned long address)
  3497. +{
  3498. + if (tlb->start > address)
  3499. + tlb->start = address;
  3500. + if (tlb->end < address + PAGE_SIZE)
  3501. + tlb->end = address + PAGE_SIZE;
  3502. +}
  3503. +
  3504. +static inline void init_tlb_gather(struct mmu_gather *tlb)
  3505. +{
  3506. + tlb->need_flush = 0;
  3507. +
  3508. + tlb->start = TASK_SIZE;
  3509. + tlb->end = 0;
  3510. +
  3511. + if (tlb->fullmm) {
  3512. + tlb->start = 0;
  3513. + tlb->end = TASK_SIZE;
  3514. + }
  3515. +}
  3516. +
  3517. +static inline void
  3518. +tlb_gather_mmu(struct mmu_gather *tlb, struct mm_struct *mm, unsigned long start, unsigned long end)
  3519. +{
  3520. + tlb->mm = mm;
  3521. + tlb->start = start;
  3522. + tlb->end = end;
  3523. + tlb->fullmm = !(start | (end+1));
  3524. +
  3525. + init_tlb_gather(tlb);
  3526. +}
  3527. +
  3528. +extern void flush_tlb_mm_range(struct mm_struct *mm, unsigned long start,
  3529. + unsigned long end);
  3530. +
  3531. +static inline void
  3532. +tlb_flush_mmu(struct mmu_gather *tlb)
  3533. +{
  3534. + if (!tlb->need_flush)
  3535. + return;
  3536. +
  3537. + flush_tlb_mm_range(tlb->mm, tlb->start, tlb->end);
  3538. + init_tlb_gather(tlb);
  3539. +}
  3540. +
  3541. +/* tlb_finish_mmu
  3542. + * Called at the end of the shootdown operation to free up any resources
  3543. + * that were required.
  3544. + */
  3545. +static inline void
  3546. +tlb_finish_mmu(struct mmu_gather *tlb, unsigned long start, unsigned long end)
  3547. +{
  3548. + tlb_flush_mmu(tlb);
  3549. +
  3550. + /* keep the page table cache within bounds */
  3551. + check_pgt_cache();
  3552. +}
  3553. +
  3554. +/* tlb_remove_page
  3555. + * Must perform the equivalent to __free_pte(pte_get_and_clear(ptep)),
  3556. + * while handling the additional races in SMP caused by other CPUs
  3557. + * caching valid mappings in their TLBs.
  3558. + */
  3559. +static inline int __tlb_remove_page(struct mmu_gather *tlb, struct page *page)
  3560. +{
  3561. + tlb->need_flush = 1;
  3562. + free_page_and_swap_cache(page);
  3563. + return 1; /* avoid calling tlb_flush_mmu */
  3564. +}
  3565. +
  3566. +static inline void tlb_remove_page(struct mmu_gather *tlb, struct page *page)
  3567. +{
  3568. + __tlb_remove_page(tlb, page);
  3569. +}
  3570. +
  3571. +/**
  3572. + * tlb_remove_tlb_entry - remember a pte unmapping for later tlb invalidation.
  3573. + *
  3574. + * Record the fact that pte's were really umapped in ->need_flush, so we can
  3575. + * later optimise away the tlb invalidate. This helps when userspace is
  3576. + * unmapping already-unmapped pages, which happens quite a lot.
  3577. + */
  3578. +#define tlb_remove_tlb_entry(tlb, ptep, address) \
  3579. + do { \
  3580. + tlb->need_flush = 1; \
  3581. + __tlb_remove_tlb_entry(tlb, ptep, address); \
  3582. + } while (0)
  3583. +
  3584. +#define pte_free_tlb(tlb, ptep, addr) __pte_free_tlb(tlb, ptep, addr)
  3585. +
  3586. +#define pud_free_tlb(tlb, pudp, addr) __pud_free_tlb(tlb, pudp, addr)
  3587. +
  3588. +#define pmd_free_tlb(tlb, pmdp, addr) __pmd_free_tlb(tlb, pmdp, addr)
  3589. +
  3590. +#define tlb_migrate_finish(mm) do {} while (0)
  3591. +
  3592. +#endif
  3593. --- /dev/null
  3594. +++ b/arch/um/include/uapi/asm/tlbflush.h
  3595. @@ -0,0 +1,31 @@
  3596. +/*
  3597. + * Copyright (C) 2002 - 2007 Jeff Dike (jdike@{addtoit,linux.intel}.com)
  3598. + * Licensed under the GPL
  3599. + */
  3600. +
  3601. +#ifndef __UM_TLBFLUSH_H
  3602. +#define __UM_TLBFLUSH_H
  3603. +
  3604. +#include <linux/mm.h>
  3605. +
  3606. +/*
  3607. + * TLB flushing:
  3608. + *
  3609. + * - flush_tlb() flushes the current mm struct TLBs
  3610. + * - flush_tlb_all() flushes all processes TLBs
  3611. + * - flush_tlb_mm(mm) flushes the specified mm context TLB's
  3612. + * - flush_tlb_page(vma, vmaddr) flushes one page
  3613. + * - flush_tlb_kernel_vm() flushes the kernel vm area
  3614. + * - flush_tlb_range(vma, start, end) flushes a range of pages
  3615. + */
  3616. +
  3617. +extern void flush_tlb_all(void);
  3618. +extern void flush_tlb_mm(struct mm_struct *mm);
  3619. +extern void flush_tlb_range(struct vm_area_struct *vma, unsigned long start,
  3620. + unsigned long end);
  3621. +extern void flush_tlb_page(struct vm_area_struct *vma, unsigned long address);
  3622. +extern void flush_tlb_kernel_vm(void);
  3623. +extern void flush_tlb_kernel_range(unsigned long start, unsigned long end);
  3624. +extern void __flush_tlb_one(unsigned long addr);
  3625. +
  3626. +#endif
  3627. --- /dev/null
  3628. +++ b/arch/um/include/uapi/asm/uaccess.h
  3629. @@ -0,0 +1,178 @@
  3630. +/*
  3631. + * Copyright (C) 2002 Jeff Dike ([email protected])
  3632. + * Licensed under the GPL
  3633. + */
  3634. +
  3635. +#ifndef __UM_UACCESS_H
  3636. +#define __UM_UACCESS_H
  3637. +
  3638. +/* thread_info has a mm_segment_t in it, so put the definition up here */
  3639. +typedef struct {
  3640. + unsigned long seg;
  3641. +} mm_segment_t;
  3642. +
  3643. +#include <linux/thread_info.h>
  3644. +#include <linux/errno.h>
  3645. +#include <asm/processor.h>
  3646. +#include <asm/elf.h>
  3647. +
  3648. +#define VERIFY_READ 0
  3649. +#define VERIFY_WRITE 1
  3650. +
  3651. +/*
  3652. + * The fs value determines whether argument validity checking should be
  3653. + * performed or not. If get_fs() == USER_DS, checking is performed, with
  3654. + * get_fs() == KERNEL_DS, checking is bypassed.
  3655. + *
  3656. + * For historical reasons, these macros are grossly misnamed.
  3657. + */
  3658. +
  3659. +#define MAKE_MM_SEG(s) ((mm_segment_t) { (s) })
  3660. +
  3661. +#define KERNEL_DS MAKE_MM_SEG(0xFFFFFFFF)
  3662. +#define USER_DS MAKE_MM_SEG(TASK_SIZE)
  3663. +
  3664. +#define get_ds() (KERNEL_DS)
  3665. +#define get_fs() (current_thread_info()->addr_limit)
  3666. +#define set_fs(x) (current_thread_info()->addr_limit = (x))
  3667. +
  3668. +#define segment_eq(a, b) ((a).seg == (b).seg)
  3669. +
  3670. +#define __under_task_size(addr, size) \
  3671. + (((unsigned long) (addr) < TASK_SIZE) && \
  3672. + (((unsigned long) (addr) + (size)) < TASK_SIZE))
  3673. +
  3674. +#define __access_ok_vsyscall(type, addr, size) \
  3675. + ((type == VERIFY_READ) && \
  3676. + ((unsigned long) (addr) >= FIXADDR_USER_START) && \
  3677. + ((unsigned long) (addr) + (size) <= FIXADDR_USER_END) && \
  3678. + ((unsigned long) (addr) + (size) >= (unsigned long)(addr)))
  3679. +
  3680. +#define __addr_range_nowrap(addr, size) \
  3681. + ((unsigned long) (addr) <= ((unsigned long) (addr) + (size)))
  3682. +
  3683. +#define access_ok(type, addr, size) \
  3684. + (__addr_range_nowrap(addr, size) && \
  3685. + (__under_task_size(addr, size) || \
  3686. + __access_ok_vsyscall(type, addr, size) || \
  3687. + segment_eq(get_fs(), KERNEL_DS)))
  3688. +
  3689. +extern int copy_from_user(void *to, const void __user *from, int n);
  3690. +extern int copy_to_user(void __user *to, const void *from, int n);
  3691. +
  3692. +/*
  3693. + * strncpy_from_user: - Copy a NUL terminated string from userspace.
  3694. + * @dst: Destination address, in kernel space. This buffer must be at
  3695. + * least @count bytes long.
  3696. + * @src: Source address, in user space.
  3697. + * @count: Maximum number of bytes to copy, including the trailing NUL.
  3698. + *
  3699. + * Copies a NUL-terminated string from userspace to kernel space.
  3700. + *
  3701. + * On success, returns the length of the string (not including the trailing
  3702. + * NUL).
  3703. + *
  3704. + * If access to userspace fails, returns -EFAULT (some data may have been
  3705. + * copied).
  3706. + *
  3707. + * If @count is smaller than the length of the string, copies @count bytes
  3708. + * and returns @count.
  3709. + */
  3710. +
  3711. +extern int strncpy_from_user(char *dst, const char __user *src, int count);
  3712. +
  3713. +/*
  3714. + * __clear_user: - Zero a block of memory in user space, with less checking.
  3715. + * @to: Destination address, in user space.
  3716. + * @n: Number of bytes to zero.
  3717. + *
  3718. + * Zero a block of memory in user space. Caller must check
  3719. + * the specified block with access_ok() before calling this function.
  3720. + *
  3721. + * Returns number of bytes that could not be cleared.
  3722. + * On success, this will be zero.
  3723. + */
  3724. +extern int __clear_user(void __user *mem, int len);
  3725. +
  3726. +/*
  3727. + * clear_user: - Zero a block of memory in user space.
  3728. + * @to: Destination address, in user space.
  3729. + * @n: Number of bytes to zero.
  3730. + *
  3731. + * Zero a block of memory in user space.
  3732. + *
  3733. + * Returns number of bytes that could not be cleared.
  3734. + * On success, this will be zero.
  3735. + */
  3736. +extern int clear_user(void __user *mem, int len);
  3737. +
  3738. +/*
  3739. + * strlen_user: - Get the size of a string in user space.
  3740. + * @str: The string to measure.
  3741. + * @n: The maximum valid length
  3742. + *
  3743. + * Get the size of a NUL-terminated string in user space.
  3744. + *
  3745. + * Returns the size of the string INCLUDING the terminating NUL.
  3746. + * On exception, returns 0.
  3747. + * If the string is too long, returns a value greater than @n.
  3748. + */
  3749. +extern int strnlen_user(const void __user *str, int len);
  3750. +
  3751. +#define __copy_from_user(to, from, n) copy_from_user(to, from, n)
  3752. +
  3753. +#define __copy_to_user(to, from, n) copy_to_user(to, from, n)
  3754. +
  3755. +#define __copy_to_user_inatomic __copy_to_user
  3756. +#define __copy_from_user_inatomic __copy_from_user
  3757. +
  3758. +#define __get_user(x, ptr) \
  3759. +({ \
  3760. + const __typeof__(*(ptr)) __user *__private_ptr = (ptr); \
  3761. + __typeof__(x) __private_val; \
  3762. + int __private_ret = -EFAULT; \
  3763. + (x) = (__typeof__(*(__private_ptr)))0; \
  3764. + if (__copy_from_user((__force void *)&__private_val, (__private_ptr),\
  3765. + sizeof(*(__private_ptr))) == 0) { \
  3766. + (x) = (__typeof__(*(__private_ptr))) __private_val; \
  3767. + __private_ret = 0; \
  3768. + } \
  3769. + __private_ret; \
  3770. +})
  3771. +
  3772. +#define get_user(x, ptr) \
  3773. +({ \
  3774. + const __typeof__((*(ptr))) __user *private_ptr = (ptr); \
  3775. + (access_ok(VERIFY_READ, private_ptr, sizeof(*private_ptr)) ? \
  3776. + __get_user(x, private_ptr) : ((x) = (__typeof__(*ptr))0, -EFAULT)); \
  3777. +})
  3778. +
  3779. +#define __put_user(x, ptr) \
  3780. +({ \
  3781. + __typeof__(*(ptr)) __user *__private_ptr = ptr; \
  3782. + __typeof__(*(__private_ptr)) __private_val; \
  3783. + int __private_ret = -EFAULT; \
  3784. + __private_val = (__typeof__(*(__private_ptr))) (x); \
  3785. + if (__copy_to_user((__private_ptr), &__private_val, \
  3786. + sizeof(*(__private_ptr))) == 0) { \
  3787. + __private_ret = 0; \
  3788. + } \
  3789. + __private_ret; \
  3790. +})
  3791. +
  3792. +#define put_user(x, ptr) \
  3793. +({ \
  3794. + __typeof__(*(ptr)) __user *private_ptr = (ptr); \
  3795. + (access_ok(VERIFY_WRITE, private_ptr, sizeof(*private_ptr)) ? \
  3796. + __put_user(x, private_ptr) : -EFAULT); \
  3797. +})
  3798. +
  3799. +#define strlen_user(str) strnlen_user(str, ~0U >> 1)
  3800. +
  3801. +struct exception_table_entry
  3802. +{
  3803. + unsigned long insn;
  3804. + unsigned long fixup;
  3805. +};
  3806. +
  3807. +#endif