000-update-to-git-2016-01-22.patch 176 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060106110621063106410651066106710681069107010711072107310741075107610771078107910801081108210831084108510861087108810891090109110921093109410951096109710981099110011011102110311041105110611071108110911101111111211131114111511161117111811191120112111221123112411251126112711281129113011311132113311341135113611371138113911401141114211431144114511461147114811491150115111521153115411551156115711581159116011611162116311641165116611671168116911701171117211731174117511761177117811791180118111821183118411851186118711881189119011911192119311941195119611971198119912001201120212031204120512061207120812091210121112121213121412151216121712181219122012211222122312241225122612271228122912301231123212331234123512361237123812391240124112421243124412451246124712481249125012511252125312541255125612571258125912601261126212631264126512661267126812691270127112721273127412751276127712781279128012811282128312841285128612871288128912901291129212931294129512961297129812991300130113021303130413051306130713081309131013111312131313141315131613171318131913201321132213231324132513261327132813291330133113321333133413351336133713381339134013411342134313441345134613471348134913501351135213531354135513561357135813591360136113621363136413651366136713681369137013711372137313741375137613771378137913801381138213831384138513861387138813891390139113921393139413951396139713981399140014011402140314041405140614071408140914101411141214131414141514161417141814191420142114221423142414251426142714281429143014311432143314341435143614371438143914401441144214431444144514461447144814491450145114521453145414551456145714581459146014611462146314641465146614671468146914701471147214731474147514761477147814791480148114821483148414851486148714881489149014911492149314941495149614971498149915001501150215031504150515061507150815091510151115121513151415151516151715181519152015211522152315241525152615271528152915301531153215331534153515361537153815391540154115421543154415451546154715481549155015511552155315541555155615571558155915601561156215631564156515661567156815691570157115721573157415751576157715781579158015811582158315841585158615871588158915901591159215931594159515961597159815991600160116021603160416051606160716081609161016111612161316141615161616171618161916201621162216231624162516261627162816291630163116321633163416351636163716381639164016411642164316441645164616471648164916501651165216531654165516561657165816591660166116621663166416651666166716681669167016711672167316741675167616771678167916801681168216831684168516861687168816891690169116921693169416951696169716981699170017011702170317041705170617071708170917101711171217131714171517161717171817191720172117221723172417251726172717281729173017311732173317341735173617371738173917401741174217431744174517461747174817491750175117521753175417551756175717581759176017611762176317641765176617671768176917701771177217731774177517761777177817791780178117821783178417851786178717881789179017911792179317941795179617971798179918001801180218031804180518061807180818091810181118121813181418151816181718181819182018211822182318241825182618271828182918301831183218331834183518361837183818391840184118421843184418451846184718481849185018511852185318541855185618571858185918601861186218631864186518661867186818691870187118721873187418751876187718781879188018811882188318841885188618871888188918901891189218931894189518961897189818991900190119021903190419051906190719081909191019111912191319141915191619171918191919201921192219231924192519261927192819291930193119321933193419351936193719381939194019411942194319441945194619471948194919501951195219531954195519561957195819591960196119621963196419651966196719681969197019711972197319741975197619771978197919801981198219831984198519861987198819891990199119921993199419951996199719981999200020012002200320042005200620072008200920102011201220132014201520162017201820192020202120222023202420252026202720282029203020312032203320342035203620372038203920402041204220432044204520462047204820492050205120522053205420552056205720582059206020612062206320642065206620672068206920702071207220732074207520762077207820792080208120822083208420852086208720882089209020912092209320942095209620972098209921002101210221032104210521062107210821092110211121122113211421152116211721182119212021212122212321242125212621272128212921302131213221332134213521362137213821392140214121422143214421452146214721482149215021512152215321542155215621572158215921602161216221632164216521662167216821692170217121722173217421752176217721782179218021812182218321842185218621872188218921902191219221932194219521962197219821992200220122022203220422052206220722082209221022112212221322142215221622172218221922202221222222232224222522262227222822292230223122322233223422352236223722382239224022412242224322442245224622472248224922502251225222532254225522562257225822592260226122622263226422652266226722682269227022712272227322742275227622772278227922802281228222832284228522862287228822892290229122922293229422952296229722982299230023012302230323042305230623072308230923102311231223132314231523162317231823192320232123222323232423252326232723282329233023312332233323342335233623372338233923402341234223432344234523462347234823492350235123522353235423552356235723582359236023612362236323642365236623672368236923702371237223732374237523762377237823792380238123822383238423852386238723882389239023912392239323942395239623972398239924002401240224032404240524062407240824092410241124122413241424152416241724182419242024212422242324242425242624272428242924302431243224332434243524362437243824392440244124422443244424452446244724482449245024512452245324542455245624572458245924602461246224632464246524662467246824692470247124722473247424752476247724782479248024812482248324842485248624872488248924902491249224932494249524962497249824992500250125022503250425052506250725082509251025112512251325142515251625172518251925202521252225232524252525262527252825292530253125322533253425352536253725382539254025412542254325442545254625472548254925502551255225532554255525562557255825592560256125622563256425652566256725682569257025712572257325742575257625772578257925802581258225832584258525862587258825892590259125922593259425952596259725982599260026012602260326042605260626072608260926102611261226132614261526162617261826192620262126222623262426252626262726282629263026312632263326342635263626372638263926402641264226432644264526462647264826492650265126522653265426552656265726582659266026612662266326642665266626672668266926702671267226732674267526762677267826792680268126822683268426852686268726882689269026912692269326942695269626972698269927002701270227032704270527062707270827092710271127122713271427152716271727182719272027212722272327242725272627272728272927302731273227332734273527362737273827392740274127422743274427452746274727482749275027512752275327542755275627572758275927602761276227632764276527662767276827692770277127722773277427752776277727782779278027812782278327842785278627872788278927902791279227932794279527962797279827992800280128022803280428052806280728082809281028112812281328142815281628172818281928202821282228232824282528262827282828292830283128322833283428352836283728382839284028412842284328442845284628472848284928502851285228532854285528562857285828592860286128622863286428652866286728682869287028712872287328742875287628772878287928802881288228832884288528862887288828892890289128922893289428952896289728982899290029012902290329042905290629072908290929102911291229132914291529162917291829192920292129222923292429252926292729282929293029312932293329342935293629372938293929402941294229432944294529462947294829492950295129522953295429552956295729582959296029612962296329642965296629672968296929702971297229732974297529762977297829792980298129822983298429852986298729882989299029912992299329942995299629972998299930003001300230033004300530063007300830093010301130123013301430153016301730183019302030213022302330243025302630273028302930303031303230333034303530363037303830393040304130423043304430453046304730483049305030513052305330543055305630573058305930603061306230633064306530663067306830693070307130723073307430753076307730783079308030813082308330843085308630873088308930903091309230933094309530963097309830993100310131023103310431053106310731083109311031113112311331143115311631173118311931203121312231233124312531263127312831293130313131323133313431353136313731383139314031413142314331443145314631473148314931503151315231533154315531563157315831593160316131623163316431653166316731683169317031713172317331743175317631773178317931803181318231833184318531863187318831893190319131923193319431953196319731983199320032013202320332043205320632073208320932103211321232133214321532163217321832193220322132223223322432253226322732283229323032313232323332343235323632373238323932403241324232433244324532463247324832493250325132523253325432553256325732583259326032613262326332643265326632673268326932703271327232733274327532763277327832793280328132823283328432853286328732883289329032913292329332943295329632973298329933003301330233033304330533063307330833093310331133123313331433153316331733183319332033213322332333243325332633273328332933303331333233333334333533363337333833393340334133423343334433453346334733483349335033513352335333543355335633573358335933603361336233633364336533663367336833693370337133723373337433753376337733783379338033813382338333843385338633873388338933903391339233933394339533963397339833993400340134023403340434053406340734083409341034113412341334143415341634173418341934203421342234233424342534263427342834293430343134323433343434353436343734383439344034413442344334443445344634473448344934503451345234533454345534563457345834593460346134623463346434653466346734683469347034713472347334743475347634773478347934803481348234833484348534863487348834893490349134923493349434953496349734983499350035013502350335043505350635073508350935103511351235133514351535163517351835193520352135223523352435253526352735283529353035313532353335343535353635373538353935403541354235433544354535463547354835493550355135523553355435553556355735583559356035613562356335643565356635673568356935703571357235733574357535763577357835793580358135823583358435853586358735883589359035913592359335943595359635973598359936003601360236033604360536063607360836093610361136123613361436153616361736183619362036213622362336243625362636273628362936303631363236333634363536363637363836393640364136423643364436453646364736483649365036513652365336543655365636573658365936603661366236633664366536663667366836693670367136723673367436753676367736783679368036813682368336843685368636873688368936903691369236933694369536963697369836993700370137023703370437053706370737083709371037113712371337143715371637173718371937203721372237233724372537263727372837293730373137323733373437353736373737383739374037413742374337443745374637473748374937503751375237533754375537563757375837593760376137623763376437653766376737683769377037713772377337743775377637773778377937803781378237833784378537863787378837893790379137923793379437953796379737983799380038013802380338043805380638073808380938103811381238133814381538163817381838193820382138223823382438253826382738283829383038313832383338343835383638373838383938403841384238433844384538463847384838493850385138523853385438553856385738583859386038613862386338643865386638673868386938703871387238733874387538763877387838793880388138823883388438853886388738883889389038913892389338943895389638973898389939003901390239033904390539063907390839093910391139123913391439153916391739183919392039213922392339243925392639273928392939303931393239333934393539363937393839393940394139423943394439453946394739483949395039513952395339543955395639573958395939603961396239633964396539663967396839693970397139723973397439753976397739783979398039813982398339843985398639873988398939903991399239933994399539963997399839994000400140024003400440054006400740084009401040114012401340144015401640174018401940204021402240234024402540264027402840294030403140324033403440354036403740384039404040414042404340444045404640474048404940504051405240534054405540564057405840594060406140624063406440654066406740684069407040714072407340744075407640774078407940804081408240834084408540864087408840894090409140924093409440954096409740984099410041014102410341044105410641074108410941104111411241134114411541164117411841194120412141224123412441254126412741284129413041314132413341344135413641374138413941404141414241434144414541464147414841494150415141524153415441554156415741584159416041614162416341644165416641674168416941704171417241734174417541764177417841794180418141824183418441854186418741884189419041914192419341944195419641974198419942004201420242034204420542064207420842094210421142124213421442154216421742184219422042214222422342244225422642274228422942304231423242334234423542364237423842394240424142424243424442454246424742484249425042514252425342544255425642574258425942604261426242634264426542664267426842694270427142724273427442754276427742784279428042814282428342844285428642874288428942904291429242934294429542964297429842994300430143024303430443054306430743084309431043114312431343144315431643174318431943204321432243234324432543264327432843294330433143324333433443354336433743384339434043414342434343444345434643474348434943504351435243534354435543564357435843594360436143624363436443654366436743684369437043714372437343744375437643774378437943804381438243834384438543864387438843894390439143924393439443954396439743984399440044014402440344044405440644074408440944104411441244134414441544164417441844194420442144224423442444254426442744284429443044314432443344344435443644374438443944404441444244434444444544464447444844494450445144524453445444554456445744584459446044614462446344644465446644674468446944704471447244734474447544764477447844794480448144824483448444854486448744884489449044914492449344944495449644974498449945004501450245034504450545064507450845094510451145124513451445154516451745184519452045214522452345244525452645274528452945304531453245334534453545364537453845394540454145424543454445454546454745484549455045514552455345544555455645574558455945604561456245634564456545664567456845694570457145724573457445754576457745784579458045814582458345844585458645874588458945904591459245934594459545964597459845994600460146024603460446054606460746084609461046114612461346144615461646174618461946204621462246234624462546264627462846294630463146324633463446354636463746384639464046414642464346444645464646474648464946504651465246534654465546564657465846594660466146624663466446654666466746684669467046714672467346744675467646774678467946804681468246834684468546864687468846894690469146924693469446954696469746984699470047014702470347044705470647074708470947104711471247134714471547164717471847194720472147224723472447254726472747284729473047314732473347344735473647374738473947404741474247434744474547464747474847494750475147524753475447554756475747584759476047614762476347644765476647674768476947704771477247734774477547764777477847794780478147824783478447854786478747884789479047914792479347944795479647974798479948004801480248034804480548064807480848094810481148124813481448154816481748184819482048214822482348244825482648274828482948304831483248334834483548364837483848394840484148424843484448454846484748484849485048514852485348544855485648574858485948604861486248634864486548664867486848694870487148724873487448754876487748784879488048814882488348844885488648874888488948904891489248934894489548964897489848994900490149024903490449054906490749084909491049114912491349144915491649174918491949204921492249234924492549264927492849294930493149324933493449354936493749384939494049414942494349444945494649474948494949504951495249534954495549564957495849594960496149624963496449654966496749684969497049714972497349744975497649774978497949804981498249834984498549864987498849894990499149924993499449954996499749984999500050015002500350045005500650075008500950105011501250135014501550165017501850195020502150225023502450255026502750285029503050315032503350345035503650375038503950405041504250435044504550465047504850495050505150525053505450555056505750585059506050615062506350645065506650675068506950705071507250735074507550765077507850795080508150825083508450855086508750885089509050915092509350945095509650975098509951005101510251035104510551065107510851095110511151125113511451155116511751185119512051215122512351245125512651275128512951305131513251335134513551365137513851395140514151425143514451455146514751485149515051515152515351545155515651575158515951605161516251635164516551665167516851695170517151725173517451755176517751785179518051815182518351845185518651875188518951905191519251935194519551965197519851995200520152025203520452055206520752085209521052115212521352145215521652175218521952205221522252235224522552265227522852295230523152325233523452355236523752385239524052415242524352445245524652475248524952505251525252535254525552565257525852595260526152625263526452655266526752685269527052715272527352745275527652775278527952805281528252835284528552865287528852895290529152925293529452955296529752985299530053015302530353045305530653075308530953105311531253135314531553165317531853195320532153225323532453255326532753285329533053315332533353345335533653375338533953405341534253435344534553465347534853495350535153525353535453555356535753585359536053615362536353645365536653675368536953705371537253735374537553765377537853795380538153825383538453855386538753885389539053915392539353945395539653975398539954005401540254035404540554065407540854095410541154125413541454155416541754185419542054215422542354245425542654275428542954305431543254335434543554365437543854395440544154425443544454455446544754485449545054515452545354545455545654575458545954605461546254635464546554665467546854695470547154725473547454755476547754785479548054815482548354845485548654875488548954905491549254935494549554965497549854995500550155025503550455055506550755085509551055115512551355145515551655175518551955205521552255235524552555265527552855295530553155325533553455355536553755385539554055415542554355445545554655475548554955505551555255535554555555565557555855595560556155625563556455655566556755685569557055715572557355745575557655775578557955805581558255835584558555865587558855895590559155925593559455955596559755985599560056015602560356045605560656075608560956105611561256135614561556165617561856195620562156225623562456255626562756285629563056315632563356345635563656375638563956405641564256435644564556465647564856495650565156525653565456555656565756585659566056615662566356645665566656675668566956705671567256735674567556765677567856795680568156825683568456855686568756885689569056915692569356945695569656975698569957005701570257035704570557065707570857095710571157125713571457155716571757185719572057215722572357245725572657275728572957305731573257335734573557365737573857395740574157425743574457455746574757485749575057515752575357545755575657575758575957605761576257635764576557665767576857695770577157725773577457755776577757785779578057815782578357845785578657875788578957905791579257935794579557965797579857995800580158025803580458055806580758085809581058115812581358145815581658175818581958205821582258235824582558265827582858295830583158325833583458355836583758385839584058415842584358445845584658475848584958505851585258535854585558565857585858595860586158625863586458655866586758685869587058715872587358745875587658775878587958805881588258835884588558865887588858895890589158925893589458955896589758985899590059015902590359045905590659075908590959105911591259135914591559165917591859195920592159225923592459255926592759285929593059315932593359345935593659375938593959405941594259435944594559465947594859495950595159525953595459555956595759585959596059615962596359645965596659675968596959705971597259735974597559765977597859795980598159825983598459855986598759885989599059915992599359945995599659975998599960006001600260036004600560066007600860096010601160126013601460156016601760186019602060216022602360246025602660276028602960306031603260336034603560366037603860396040604160426043604460456046604760486049605060516052605360546055605660576058605960606061606260636064606560666067606860696070607160726073607460756076607760786079608060816082608360846085608660876088608960906091609260936094609560966097609860996100610161026103610461056106610761086109611061116112611361146115611661176118611961206121612261236124612561266127612861296130613161326133613461356136613761386139614061416142614361446145614661476148614961506151615261536154615561566157615861596160616161626163616461656166616761686169617061716172617361746175617661776178617961806181618261836184618561866187618861896190619161926193619461956196619761986199620062016202620362046205620662076208620962106211621262136214621562166217621862196220622162226223622462256226622762286229623062316232623362346235623662376238623962406241624262436244624562466247624862496250625162526253625462556256625762586259626062616262626362646265626662676268626962706271627262736274627562766277627862796280628162826283628462856286628762886289629062916292629362946295629662976298629963006301630263036304630563066307630863096310631163126313631463156316631763186319632063216322632363246325632663276328632963306331633263336334633563366337633863396340634163426343634463456346634763486349635063516352635363546355635663576358635963606361636263636364636563666367636863696370637163726373637463756376637763786379638063816382638363846385638663876388638963906391639263936394639563966397639863996400640164026403640464056406640764086409641064116412641364146415641664176418641964206421642264236424642564266427642864296430643164326433643464356436643764386439644064416442644364446445644664476448644964506451645264536454645564566457645864596460646164626463646464656466646764686469647064716472647364746475647664776478647964806481648264836484648564866487648864896490649164926493649464956496649764986499650065016502650365046505650665076508650965106511651265136514651565166517651865196520652165226523652465256526652765286529653065316532653365346535653665376538653965406541654265436544654565466547654865496550655165526553655465556556655765586559656065616562656365646565656665676568656965706571657265736574657565766577657865796580658165826583658465856586658765886589659065916592659365946595659665976598659966006601660266036604660566066607660866096610661166126613661466156616661766186619662066216622662366246625662666276628662966306631663266336634663566366637663866396640664166426643664466456646664766486649665066516652665366546655665666576658665966606661666266636664666566666667666866696670667166726673667466756676667766786679668066816682668366846685668666876688668966906691669266936694669566966697669866996700670167026703670467056706670767086709671067116712671367146715671667176718671967206721672267236724672567266727672867296730673167326733673467356736673767386739674067416742674367446745674667476748674967506751675267536754675567566757675867596760676167626763676467656766676767686769677067716772677367746775677667776778677967806781678267836784678567866787678867896790679167926793679467956796679767986799680068016802680368046805680668076808680968106811681268136814681568166817681868196820682168226823682468256826682768286829683068316832683368346835683668376838683968406841684268436844684568466847684868496850685168526853685468556856685768586859686068616862686368646865686668676868686968706871687268736874687568766877687868796880688168826883688468856886688768886889689068916892689368946895689668976898689969006901690269036904690569066907690869096910691169126913691469156916691769186919692069216922692369246925692669276928692969306931693269336934693569366937693869396940694169426943694469456946694769486949695069516952695369546955695669576958695969606961696269636964696569666967696869696970697169726973697469756976697769786979698069816982698369846985698669876988698969906991699269936994699569966997699869997000700170027003700470057006700770087009701070117012701370147015701670177018701970207021702270237024702570267027702870297030703170327033703470357036703770387039704070417042704370447045704670477048704970507051705270537054705570567057705870597060706170627063706470657066706770687069707070717072707370747075707670777078707970807081708270837084708570867087708870897090709170927093709470957096709770987099710071017102710371047105710671077108710971107111711271137114711571167117711871197120712171227123712471257126712771287129713071317132713371347135713671377138713971407141714271437144714571467147714871497150715171527153715471557156715771587159716071617162716371647165716671677168716971707171717271737174717571767177717871797180718171827183718471857186718771887189719071917192719371947195719671977198719972007201720272037204720572067207720872097210721172127213721472157216721772187219722072217222722372247225722672277228722972307231723272337234723572367237723872397240724172427243724472457246724772487249725072517252725372547255725672577258725972607261726272637264726572667267726872697270727172727273727472757276727772787279728072817282728372847285728672877288728972907291729272937294729572967297729872997300730173027303730473057306730773087309731073117312731373147315731673177318731973207321732273237324732573267327732873297330733173327333733473357336733773387339734073417342734373447345734673477348734973507351735273537354735573567357735873597360736173627363736473657366736773687369737073717372737373747375737673777378737973807381738273837384738573867387738873897390739173927393739473957396739773987399740074017402740374047405740674077408740974107411741274137414741574167417741874197420742174227423742474257426742774287429743074317432743374347435743674377438743974407441744274437444744574467447744874497450745174527453745474557456745774587459746074617462746374647465746674677468746974707471747274737474747574767477747874797480748174827483748474857486748774887489749074917492749374947495749674977498749975007501750275037504750575067507750875097510751175127513751475157516751775187519752075217522752375247525752675277528752975307531753275337534753575367537753875397540754175427543754475457546754775487549755075517552755375547555755675577558755975607561756275637564756575667567756875697570757175727573757475757576757775787579758075817582758375847585758675877588758975907591759275937594759575967597759875997600760176027603760476057606760776087609761076117612761376147615761676177618761976207621762276237624762576267627762876297630763176327633763476357636
  1. --- a/.gitignore
  2. +++ b/.gitignore
  3. @@ -5,9 +5,6 @@
  4. *.so.1
  5. arch/*/bits/alltypes.h
  6. config.mak
  7. -include/bits
  8. -tools/musl-gcc
  9. -tools/musl-clang
  10. -tools/ld.musl-clang
  11. lib/musl-gcc.specs
  12. src/internal/version.h
  13. +/obj/
  14. --- a/Makefile
  15. +++ b/Makefile
  16. @@ -8,6 +8,7 @@
  17. # Do not make changes here.
  18. #
  19. +srcdir = .
  20. exec_prefix = /usr/local
  21. bindir = $(exec_prefix)/bin
  22. @@ -16,31 +17,38 @@ includedir = $(prefix)/include
  23. libdir = $(prefix)/lib
  24. syslibdir = /lib
  25. -SRCS = $(sort $(wildcard src/*/*.c arch/$(ARCH)/src/*.c))
  26. -OBJS = $(SRCS:.c=.o)
  27. +BASE_SRCS = $(sort $(wildcard $(srcdir)/src/*/*.c $(srcdir)/arch/$(ARCH)/src/*.[csS]))
  28. +BASE_OBJS = $(patsubst $(srcdir)/%,%.o,$(basename $(BASE_SRCS)))
  29. +ARCH_SRCS = $(wildcard $(srcdir)/src/*/$(ARCH)/*.[csS])
  30. +ARCH_OBJS = $(patsubst $(srcdir)/%,%.o,$(basename $(ARCH_SRCS)))
  31. +REPLACED_OBJS = $(sort $(subst /$(ARCH)/,/,$(ARCH_OBJS)))
  32. +OBJS = $(addprefix obj/, $(filter-out $(REPLACED_OBJS), $(sort $(BASE_OBJS) $(ARCH_OBJS))))
  33. LOBJS = $(OBJS:.o=.lo)
  34. -GENH = include/bits/alltypes.h
  35. -GENH_INT = src/internal/version.h
  36. -IMPH = src/internal/stdio_impl.h src/internal/pthread_impl.h src/internal/libc.h
  37. +GENH = obj/include/bits/alltypes.h
  38. +GENH_INT = obj/src/internal/version.h
  39. +IMPH = $(addprefix $(srcdir)/, src/internal/stdio_impl.h src/internal/pthread_impl.h src/internal/libc.h)
  40. -LDFLAGS =
  41. +LDFLAGS =
  42. +LDFLAGS_AUTO =
  43. LIBCC = -lgcc
  44. CPPFLAGS =
  45. -CFLAGS = -Os -pipe
  46. +CFLAGS =
  47. +CFLAGS_AUTO = -Os -pipe
  48. CFLAGS_C99FSE = -std=c99 -ffreestanding -nostdinc
  49. CFLAGS_ALL = $(CFLAGS_C99FSE)
  50. -CFLAGS_ALL += -D_XOPEN_SOURCE=700 -I./arch/$(ARCH) -I./src/internal -I./include
  51. -CFLAGS_ALL += $(CPPFLAGS) $(CFLAGS)
  52. -CFLAGS_ALL_STATIC = $(CFLAGS_ALL)
  53. -CFLAGS_ALL_SHARED = $(CFLAGS_ALL) -fPIC -DSHARED
  54. +CFLAGS_ALL += -D_XOPEN_SOURCE=700 -I$(srcdir)/arch/$(ARCH) -Iobj/src/internal -I$(srcdir)/src/internal -Iobj/include -I$(srcdir)/include
  55. +CFLAGS_ALL += $(CPPFLAGS) $(CFLAGS_AUTO) $(CFLAGS)
  56. +
  57. +LDFLAGS_ALL = $(LDFLAGS_AUTO) $(LDFLAGS)
  58. AR = $(CROSS_COMPILE)ar
  59. RANLIB = $(CROSS_COMPILE)ranlib
  60. -INSTALL = ./tools/install.sh
  61. +INSTALL = $(srcdir)/tools/install.sh
  62. -ARCH_INCLUDES = $(wildcard arch/$(ARCH)/bits/*.h)
  63. -ALL_INCLUDES = $(sort $(wildcard include/*.h include/*/*.h) $(GENH) $(ARCH_INCLUDES:arch/$(ARCH)/%=include/%))
  64. +ARCH_INCLUDES = $(wildcard $(srcdir)/arch/$(ARCH)/bits/*.h)
  65. +INCLUDES = $(wildcard $(srcdir)/include/*.h $(srcdir)/include/*/*.h)
  66. +ALL_INCLUDES = $(sort $(INCLUDES:$(srcdir)/%=%) $(GENH:obj/%=%) $(ARCH_INCLUDES:$(srcdir)/arch/$(ARCH)/%=include/%))
  67. EMPTY_LIB_NAMES = m rt pthread crypt util xnet resolv dl
  68. EMPTY_LIBS = $(EMPTY_LIB_NAMES:%=lib/lib%.a)
  69. @@ -49,7 +57,7 @@ STATIC_LIBS = lib/libc.a
  70. SHARED_LIBS = lib/libc.so
  71. TOOL_LIBS = lib/musl-gcc.specs
  72. ALL_LIBS = $(CRT_LIBS) $(STATIC_LIBS) $(SHARED_LIBS) $(EMPTY_LIBS) $(TOOL_LIBS)
  73. -ALL_TOOLS = tools/musl-gcc
  74. +ALL_TOOLS = obj/musl-gcc
  75. WRAPCC_GCC = gcc
  76. WRAPCC_CLANG = clang
  77. @@ -58,95 +66,93 @@ LDSO_PATHNAME = $(syslibdir)/ld-musl-$(A
  78. -include config.mak
  79. +ifeq ($(ARCH),)
  80. +$(error Please set ARCH in config.mak before running make.)
  81. +endif
  82. +
  83. all: $(ALL_LIBS) $(ALL_TOOLS)
  84. +OBJ_DIRS = $(sort $(patsubst %/,%,$(dir $(ALL_LIBS) $(ALL_TOOLS) $(OBJS) $(GENH) $(GENH_INT))) $(addprefix obj/, crt crt/$(ARCH) include))
  85. +
  86. +$(ALL_LIBS) $(ALL_TOOLS) $(CRT_LIBS:lib/%=obj/crt/%) $(OBJS) $(LOBJS) $(GENH) $(GENH_INT): | $(OBJ_DIRS)
  87. +
  88. +$(OBJ_DIRS):
  89. + mkdir -p $@
  90. +
  91. install: install-libs install-headers install-tools
  92. clean:
  93. - rm -f crt/*.o
  94. - rm -f $(OBJS)
  95. - rm -f $(LOBJS)
  96. - rm -f $(ALL_LIBS) lib/*.[ao] lib/*.so
  97. - rm -f $(ALL_TOOLS)
  98. - rm -f $(GENH) $(GENH_INT)
  99. - rm -f include/bits
  100. + rm -rf obj lib
  101. distclean: clean
  102. rm -f config.mak
  103. -include/bits:
  104. - @test "$(ARCH)" || { echo "Please set ARCH in config.mak before running make." ; exit 1 ; }
  105. - ln -sf ../arch/$(ARCH)/bits $@
  106. +obj/include/bits/alltypes.h: $(srcdir)/arch/$(ARCH)/bits/alltypes.h.in $(srcdir)/include/alltypes.h.in $(srcdir)/tools/mkalltypes.sed
  107. + sed -f $(srcdir)/tools/mkalltypes.sed $(srcdir)/arch/$(ARCH)/bits/alltypes.h.in $(srcdir)/include/alltypes.h.in > $@
  108. -include/bits/alltypes.h.in: include/bits
  109. +obj/src/internal/version.h: $(wildcard $(srcdir)/VERSION $(srcdir)/.git)
  110. + printf '#define VERSION "%s"\n' "$$(cd $(srcdir); sh tools/version.sh)" > $@
  111. -include/bits/alltypes.h: include/bits/alltypes.h.in include/alltypes.h.in tools/mkalltypes.sed
  112. - sed -f tools/mkalltypes.sed include/bits/alltypes.h.in include/alltypes.h.in > $@
  113. +obj/src/internal/version.o obj/src/internal/version.lo: obj/src/internal/version.h
  114. -src/internal/version.h: $(wildcard VERSION .git)
  115. - printf '#define VERSION "%s"\n' "$$(sh tools/version.sh)" > $@
  116. +obj/crt/rcrt1.o obj/src/ldso/dlstart.lo obj/src/ldso/dynlink.lo: $(srcdir)/src/internal/dynlink.h $(srcdir)/arch/$(ARCH)/reloc.h
  117. -src/internal/version.lo: src/internal/version.h
  118. +obj/crt/crt1.o obj/crt/scrt1.o obj/crt/rcrt1.o obj/src/ldso/dlstart.lo: $(srcdir)/arch/$(ARCH)/crt_arch.h
  119. -crt/rcrt1.o src/ldso/dlstart.lo src/ldso/dynlink.lo: src/internal/dynlink.h arch/$(ARCH)/reloc.h
  120. +obj/crt/rcrt1.o: $(srcdir)/src/ldso/dlstart.c
  121. -crt/crt1.o crt/Scrt1.o crt/rcrt1.o src/ldso/dlstart.lo: $(wildcard arch/$(ARCH)/crt_arch.h)
  122. +obj/crt/Scrt1.o obj/crt/rcrt1.o: CFLAGS_ALL += -fPIC
  123. -crt/rcrt1.o: src/ldso/dlstart.c
  124. +obj/crt/$(ARCH)/crti.o: $(srcdir)/crt/$(ARCH)/crti.s
  125. -crt/Scrt1.o crt/rcrt1.o: CFLAGS += -fPIC
  126. +obj/crt/$(ARCH)/crtn.o: $(srcdir)/crt/$(ARCH)/crtn.s
  127. -OPTIMIZE_SRCS = $(wildcard $(OPTIMIZE_GLOBS:%=src/%))
  128. -$(OPTIMIZE_SRCS:%.c=%.o) $(OPTIMIZE_SRCS:%.c=%.lo): CFLAGS += -O3
  129. +OPTIMIZE_SRCS = $(wildcard $(OPTIMIZE_GLOBS:%=$(srcdir)/src/%))
  130. +$(OPTIMIZE_SRCS:$(srcdir)/%.c=obj/%.o) $(OPTIMIZE_SRCS:$(srcdir)/%.c=obj/%.lo): CFLAGS += -O3
  131. MEMOPS_SRCS = src/string/memcpy.c src/string/memmove.c src/string/memcmp.c src/string/memset.c
  132. -$(MEMOPS_SRCS:%.c=%.o) $(MEMOPS_SRCS:%.c=%.lo): CFLAGS += $(CFLAGS_MEMOPS)
  133. +$(MEMOPS_SRCS:%.c=obj/%.o) $(MEMOPS_SRCS:%.c=obj/%.lo): CFLAGS_ALL += $(CFLAGS_MEMOPS)
  134. NOSSP_SRCS = $(wildcard crt/*.c) \
  135. src/env/__libc_start_main.c src/env/__init_tls.c \
  136. src/thread/__set_thread_area.c src/env/__stack_chk_fail.c \
  137. src/string/memset.c src/string/memcpy.c \
  138. src/ldso/dlstart.c src/ldso/dynlink.c
  139. -$(NOSSP_SRCS:%.c=%.o) $(NOSSP_SRCS:%.c=%.lo): CFLAGS += $(CFLAGS_NOSSP)
  140. +$(NOSSP_SRCS:%.c=obj/%.o) $(NOSSP_SRCS:%.c=obj/%.lo): CFLAGS_ALL += $(CFLAGS_NOSSP)
  141. +
  142. +$(CRT_LIBS:lib/%=obj/crt/%): CFLAGS_ALL += -DCRT
  143. -$(CRT_LIBS:lib/%=crt/%): CFLAGS += -DCRT
  144. +$(LOBJS): CFLAGS_ALL += -fPIC -DSHARED
  145. -# This incantation ensures that changes to any subarch asm files will
  146. -# force the corresponding object file to be rebuilt, even if the implicit
  147. -# rule below goes indirectly through a .sub file.
  148. -define mkasmdep
  149. -$(dir $(patsubst %/,%,$(dir $(1))))$(notdir $(1:.s=.o)): $(1)
  150. -endef
  151. -$(foreach s,$(wildcard src/*/$(ARCH)*/*.s),$(eval $(call mkasmdep,$(s))))
  152. +CC_CMD = $(CC) $(CFLAGS_ALL) -c -o $@ $<
  153. # Choose invocation of assembler to be used
  154. -# $(1) is input file, $(2) is output file, $(3) is assembler flags
  155. ifeq ($(ADD_CFI),yes)
  156. - AS_CMD = LC_ALL=C awk -f tools/add-cfi.common.awk -f tools/add-cfi.$(ARCH).awk $< | $(CC) -x assembler -c -o $@ -
  157. + AS_CMD = LC_ALL=C awk -f $(srcdir)/tools/add-cfi.common.awk -f $(srcdir)/tools/add-cfi.$(ARCH).awk $< | $(CC) $(CFLAGS_ALL) -x assembler -c -o $@ -
  158. else
  159. - AS_CMD = $(CC) -c -o $@ $<
  160. + AS_CMD = $(CC_CMD)
  161. endif
  162. -%.o: $(ARCH)$(ASMSUBARCH)/%.sub
  163. - $(CC) $(CFLAGS_ALL_STATIC) -c -o $@ $(dir $<)$(shell cat $<)
  164. +obj/%.o: $(srcdir)/%.s
  165. + $(AS_CMD)
  166. -%.o: $(ARCH)/%.s
  167. - $(AS_CMD) $(CFLAGS_ALL_STATIC)
  168. +obj/%.o: $(srcdir)/%.S
  169. + $(CC_CMD)
  170. -%.o: %.c $(GENH) $(IMPH)
  171. - $(CC) $(CFLAGS_ALL_STATIC) -c -o $@ $<
  172. +obj/%.o: $(srcdir)/%.c $(GENH) $(IMPH)
  173. + $(CC_CMD)
  174. -%.lo: $(ARCH)$(ASMSUBARCH)/%.sub
  175. - $(CC) $(CFLAGS_ALL_SHARED) -c -o $@ $(dir $<)$(shell cat $<)
  176. +obj/%.lo: $(srcdir)/%.s
  177. + $(AS_CMD)
  178. -%.lo: $(ARCH)/%.s
  179. - $(AS_CMD) $(CFLAGS_ALL_SHARED)
  180. +obj/%.lo: $(srcdir)/%.S
  181. + $(CC_CMD)
  182. -%.lo: %.c $(GENH) $(IMPH)
  183. - $(CC) $(CFLAGS_ALL_SHARED) -c -o $@ $<
  184. +obj/%.lo: $(srcdir)/%.c $(GENH) $(IMPH)
  185. + $(CC_CMD)
  186. lib/libc.so: $(LOBJS)
  187. - $(CC) $(CFLAGS_ALL_SHARED) $(LDFLAGS) -nostdlib -shared \
  188. + $(CC) $(CFLAGS_ALL) $(LDFLAGS_ALL) -nostdlib -shared \
  189. -Wl,-e,_dlstart -Wl,-Bsymbolic-functions \
  190. -o $@ $(LOBJS) $(LIBCC)
  191. @@ -159,21 +165,27 @@ $(EMPTY_LIBS):
  192. rm -f $@
  193. $(AR) rc $@
  194. -lib/%.o: crt/%.o
  195. +lib/%.o: obj/crt/%.o
  196. cp $< $@
  197. -lib/musl-gcc.specs: tools/musl-gcc.specs.sh config.mak
  198. +lib/crti.o: obj/crt/$(ARCH)/crti.o
  199. + cp $< $@
  200. +
  201. +lib/crtn.o: obj/crt/$(ARCH)/crtn.o
  202. + cp $< $@
  203. +
  204. +lib/musl-gcc.specs: $(srcdir)/tools/musl-gcc.specs.sh config.mak
  205. sh $< "$(includedir)" "$(libdir)" "$(LDSO_PATHNAME)" > $@
  206. -tools/musl-gcc: config.mak
  207. +obj/musl-gcc: config.mak
  208. printf '#!/bin/sh\nexec "$${REALGCC:-$(WRAPCC_GCC)}" "$$@" -specs "%s/musl-gcc.specs"\n' "$(libdir)" > $@
  209. chmod +x $@
  210. -tools/%-clang: tools/%-clang.in config.mak
  211. +obj/%-clang: $(srcdir)/tools/%-clang.in config.mak
  212. sed -e 's!@CC@!$(WRAPCC_CLANG)!g' -e 's!@PREFIX@!$(prefix)!g' -e 's!@INCDIR@!$(includedir)!g' -e 's!@LIBDIR@!$(libdir)!g' -e 's!@LDSO@!$(LDSO_PATHNAME)!g' $< > $@
  213. chmod +x $@
  214. -$(DESTDIR)$(bindir)/%: tools/%
  215. +$(DESTDIR)$(bindir)/%: obj/%
  216. $(INSTALL) -D $< $@
  217. $(DESTDIR)$(libdir)/%.so: lib/%.so
  218. @@ -182,10 +194,13 @@ $(DESTDIR)$(libdir)/%.so: lib/%.so
  219. $(DESTDIR)$(libdir)/%: lib/%
  220. $(INSTALL) -D -m 644 $< $@
  221. -$(DESTDIR)$(includedir)/bits/%: arch/$(ARCH)/bits/%
  222. +$(DESTDIR)$(includedir)/bits/%: $(srcdir)/arch/$(ARCH)/bits/%
  223. + $(INSTALL) -D -m 644 $< $@
  224. +
  225. +$(DESTDIR)$(includedir)/bits/%: obj/include/bits/%
  226. $(INSTALL) -D -m 644 $< $@
  227. -$(DESTDIR)$(includedir)/%: include/%
  228. +$(DESTDIR)$(includedir)/%: $(srcdir)/include/%
  229. $(INSTALL) -D -m 644 $< $@
  230. $(DESTDIR)$(LDSO_PATHNAME): $(DESTDIR)$(libdir)/libc.so
  231. @@ -195,12 +210,12 @@ install-libs: $(ALL_LIBS:lib/%=$(DESTDIR
  232. install-headers: $(ALL_INCLUDES:include/%=$(DESTDIR)$(includedir)/%)
  233. -install-tools: $(ALL_TOOLS:tools/%=$(DESTDIR)$(bindir)/%)
  234. +install-tools: $(ALL_TOOLS:obj/%=$(DESTDIR)$(bindir)/%)
  235. musl-git-%.tar.gz: .git
  236. - git archive --format=tar.gz --prefix=$(patsubst %.tar.gz,%,$@)/ -o $@ $(patsubst musl-git-%.tar.gz,%,$@)
  237. + git --git-dir=$(srcdir)/.git archive --format=tar.gz --prefix=$(patsubst %.tar.gz,%,$@)/ -o $@ $(patsubst musl-git-%.tar.gz,%,$@)
  238. musl-%.tar.gz: .git
  239. - git archive --format=tar.gz --prefix=$(patsubst %.tar.gz,%,$@)/ -o $@ v$(patsubst musl-%.tar.gz,%,$@)
  240. + git --git-dir=$(srcdir)/.git archive --format=tar.gz --prefix=$(patsubst %.tar.gz,%,$@)/ -o $@ v$(patsubst musl-%.tar.gz,%,$@)
  241. .PHONY: all clean install install-libs install-headers install-tools
  242. --- a/arch/aarch64/atomic.h
  243. +++ /dev/null
  244. @@ -1,206 +0,0 @@
  245. -#ifndef _INTERNAL_ATOMIC_H
  246. -#define _INTERNAL_ATOMIC_H
  247. -
  248. -#include <stdint.h>
  249. -
  250. -static inline int a_ctz_64(uint64_t x)
  251. -{
  252. - __asm__(
  253. - " rbit %0, %1\n"
  254. - " clz %0, %0\n"
  255. - : "=r"(x) : "r"(x));
  256. - return x;
  257. -}
  258. -
  259. -static inline int a_ctz_l(unsigned long x)
  260. -{
  261. - return a_ctz_64(x);
  262. -}
  263. -
  264. -static inline void a_barrier()
  265. -{
  266. - __asm__ __volatile__("dmb ish");
  267. -}
  268. -
  269. -static inline void *a_cas_p(volatile void *p, void *t, void *s)
  270. -{
  271. - void *old;
  272. - __asm__ __volatile__(
  273. - " dmb ish\n"
  274. - "1: ldxr %0,%3\n"
  275. - " cmp %0,%1\n"
  276. - " b.ne 1f\n"
  277. - " stxr %w0,%2,%3\n"
  278. - " cbnz %w0,1b\n"
  279. - " mov %0,%1\n"
  280. - "1: dmb ish\n"
  281. - : "=&r"(old)
  282. - : "r"(t), "r"(s), "Q"(*(long*)p)
  283. - : "memory", "cc");
  284. - return old;
  285. -}
  286. -
  287. -static inline int a_cas(volatile int *p, int t, int s)
  288. -{
  289. - int old;
  290. - __asm__ __volatile__(
  291. - " dmb ish\n"
  292. - "1: ldxr %w0,%3\n"
  293. - " cmp %w0,%w1\n"
  294. - " b.ne 1f\n"
  295. - " stxr %w0,%w2,%3\n"
  296. - " cbnz %w0,1b\n"
  297. - " mov %w0,%w1\n"
  298. - "1: dmb ish\n"
  299. - : "=&r"(old)
  300. - : "r"(t), "r"(s), "Q"(*p)
  301. - : "memory", "cc");
  302. - return old;
  303. -}
  304. -
  305. -static inline int a_swap(volatile int *x, int v)
  306. -{
  307. - int old, tmp;
  308. - __asm__ __volatile__(
  309. - " dmb ish\n"
  310. - "1: ldxr %w0,%3\n"
  311. - " stxr %w1,%w2,%3\n"
  312. - " cbnz %w1,1b\n"
  313. - " dmb ish\n"
  314. - : "=&r"(old), "=&r"(tmp)
  315. - : "r"(v), "Q"(*x)
  316. - : "memory", "cc" );
  317. - return old;
  318. -}
  319. -
  320. -static inline int a_fetch_add(volatile int *x, int v)
  321. -{
  322. - int old, tmp;
  323. - __asm__ __volatile__(
  324. - " dmb ish\n"
  325. - "1: ldxr %w0,%3\n"
  326. - " add %w0,%w0,%w2\n"
  327. - " stxr %w1,%w0,%3\n"
  328. - " cbnz %w1,1b\n"
  329. - " dmb ish\n"
  330. - : "=&r"(old), "=&r"(tmp)
  331. - : "r"(v), "Q"(*x)
  332. - : "memory", "cc" );
  333. - return old-v;
  334. -}
  335. -
  336. -static inline void a_inc(volatile int *x)
  337. -{
  338. - int tmp, tmp2;
  339. - __asm__ __volatile__(
  340. - " dmb ish\n"
  341. - "1: ldxr %w0,%2\n"
  342. - " add %w0,%w0,#1\n"
  343. - " stxr %w1,%w0,%2\n"
  344. - " cbnz %w1,1b\n"
  345. - " dmb ish\n"
  346. - : "=&r"(tmp), "=&r"(tmp2)
  347. - : "Q"(*x)
  348. - : "memory", "cc" );
  349. -}
  350. -
  351. -static inline void a_dec(volatile int *x)
  352. -{
  353. - int tmp, tmp2;
  354. - __asm__ __volatile__(
  355. - " dmb ish\n"
  356. - "1: ldxr %w0,%2\n"
  357. - " sub %w0,%w0,#1\n"
  358. - " stxr %w1,%w0,%2\n"
  359. - " cbnz %w1,1b\n"
  360. - " dmb ish\n"
  361. - : "=&r"(tmp), "=&r"(tmp2)
  362. - : "Q"(*x)
  363. - : "memory", "cc" );
  364. -}
  365. -
  366. -static inline void a_and_64(volatile uint64_t *p, uint64_t v)
  367. -{
  368. - int tmp, tmp2;
  369. - __asm__ __volatile__(
  370. - " dmb ish\n"
  371. - "1: ldxr %0,%3\n"
  372. - " and %0,%0,%2\n"
  373. - " stxr %w1,%0,%3\n"
  374. - " cbnz %w1,1b\n"
  375. - " dmb ish\n"
  376. - : "=&r"(tmp), "=&r"(tmp2)
  377. - : "r"(v), "Q"(*p)
  378. - : "memory", "cc" );
  379. -}
  380. -
  381. -static inline void a_and(volatile int *p, int v)
  382. -{
  383. - int tmp, tmp2;
  384. - __asm__ __volatile__(
  385. - " dmb ish\n"
  386. - "1: ldxr %w0,%3\n"
  387. - " and %w0,%w0,%w2\n"
  388. - " stxr %w1,%w0,%3\n"
  389. - " cbnz %w1,1b\n"
  390. - " dmb ish\n"
  391. - : "=&r"(tmp), "=&r"(tmp2)
  392. - : "r"(v), "Q"(*p)
  393. - : "memory", "cc" );
  394. -}
  395. -
  396. -static inline void a_or_64(volatile uint64_t *p, uint64_t v)
  397. -{
  398. - int tmp, tmp2;
  399. - __asm__ __volatile__(
  400. - " dmb ish\n"
  401. - "1: ldxr %0,%3\n"
  402. - " orr %0,%0,%2\n"
  403. - " stxr %w1,%0,%3\n"
  404. - " cbnz %w1,1b\n"
  405. - " dmb ish\n"
  406. - : "=&r"(tmp), "=&r"(tmp2)
  407. - : "r"(v), "Q"(*p)
  408. - : "memory", "cc" );
  409. -}
  410. -
  411. -static inline void a_or_l(volatile void *p, long v)
  412. -{
  413. - return a_or_64(p, v);
  414. -}
  415. -
  416. -static inline void a_or(volatile int *p, int v)
  417. -{
  418. - int tmp, tmp2;
  419. - __asm__ __volatile__(
  420. - " dmb ish\n"
  421. - "1: ldxr %w0,%3\n"
  422. - " orr %w0,%w0,%w2\n"
  423. - " stxr %w1,%w0,%3\n"
  424. - " cbnz %w1,1b\n"
  425. - " dmb ish\n"
  426. - : "=&r"(tmp), "=&r"(tmp2)
  427. - : "r"(v), "Q"(*p)
  428. - : "memory", "cc" );
  429. -}
  430. -
  431. -static inline void a_store(volatile int *p, int x)
  432. -{
  433. - __asm__ __volatile__(
  434. - " dmb ish\n"
  435. - " str %w1,%0\n"
  436. - " dmb ish\n"
  437. - : "=m"(*p)
  438. - : "r"(x)
  439. - : "memory", "cc" );
  440. -}
  441. -
  442. -#define a_spin a_barrier
  443. -
  444. -static inline void a_crash()
  445. -{
  446. - *(volatile char *)0=0;
  447. -}
  448. -
  449. -
  450. -#endif
  451. --- /dev/null
  452. +++ b/arch/aarch64/atomic_arch.h
  453. @@ -0,0 +1,53 @@
  454. +#define a_ll a_ll
  455. +static inline int a_ll(volatile int *p)
  456. +{
  457. + int v;
  458. + __asm__ __volatile__ ("ldxr %0, %1" : "=r"(v) : "Q"(*p));
  459. + return v;
  460. +}
  461. +
  462. +#define a_sc a_sc
  463. +static inline int a_sc(volatile int *p, int v)
  464. +{
  465. + int r;
  466. + __asm__ __volatile__ ("stxr %w0,%1,%2" : "=&r"(r) : "r"(v), "Q"(*p) : "memory");
  467. + return !r;
  468. +}
  469. +
  470. +#define a_barrier a_barrier
  471. +static inline void a_barrier()
  472. +{
  473. + __asm__ __volatile__ ("dmb ish" : : : "memory");
  474. +}
  475. +
  476. +#define a_pre_llsc a_barrier
  477. +#define a_post_llsc a_barrier
  478. +
  479. +#define a_cas_p a_cas_p
  480. +static inline void *a_cas_p(volatile void *p, void *t, void *s)
  481. +{
  482. + void *old;
  483. + __asm__ __volatile__(
  484. + " dmb ish\n"
  485. + "1: ldxr %0,%3\n"
  486. + " cmp %0,%1\n"
  487. + " b.ne 1f\n"
  488. + " stxr %w0,%2,%3\n"
  489. + " cbnz %w0,1b\n"
  490. + " mov %0,%1\n"
  491. + "1: dmb ish\n"
  492. + : "=&r"(old)
  493. + : "r"(t), "r"(s), "Q"(*(void *volatile *)p)
  494. + : "memory", "cc");
  495. + return old;
  496. +}
  497. +
  498. +#define a_ctz_64 a_ctz_64
  499. +static inline int a_ctz_64(uint64_t x)
  500. +{
  501. + __asm__(
  502. + " rbit %0, %1\n"
  503. + " clz %0, %0\n"
  504. + : "=r"(x) : "r"(x));
  505. + return x;
  506. +}
  507. --- a/arch/aarch64/pthread_arch.h
  508. +++ b/arch/aarch64/pthread_arch.h
  509. @@ -8,4 +8,4 @@ static inline struct pthread *__pthread_
  510. #define TLS_ABOVE_TP
  511. #define TP_ADJ(p) ((char *)(p) + sizeof(struct pthread) - 16)
  512. -#define CANCEL_REG_IP 33
  513. +#define MC_PC pc
  514. --- a/arch/arm/atomic.h
  515. +++ /dev/null
  516. @@ -1,261 +0,0 @@
  517. -#ifndef _INTERNAL_ATOMIC_H
  518. -#define _INTERNAL_ATOMIC_H
  519. -
  520. -#include <stdint.h>
  521. -
  522. -static inline int a_ctz_l(unsigned long x)
  523. -{
  524. - static const char debruijn32[32] = {
  525. - 0, 1, 23, 2, 29, 24, 19, 3, 30, 27, 25, 11, 20, 8, 4, 13,
  526. - 31, 22, 28, 18, 26, 10, 7, 12, 21, 17, 9, 6, 16, 5, 15, 14
  527. - };
  528. - return debruijn32[(x&-x)*0x076be629 >> 27];
  529. -}
  530. -
  531. -static inline int a_ctz_64(uint64_t x)
  532. -{
  533. - uint32_t y = x;
  534. - if (!y) {
  535. - y = x>>32;
  536. - return 32 + a_ctz_l(y);
  537. - }
  538. - return a_ctz_l(y);
  539. -}
  540. -
  541. -#if __ARM_ARCH_7A__ || __ARM_ARCH_7R__ || __ARM_ARCH >= 7
  542. -
  543. -static inline void a_barrier()
  544. -{
  545. - __asm__ __volatile__("dmb ish");
  546. -}
  547. -
  548. -static inline int a_cas(volatile int *p, int t, int s)
  549. -{
  550. - int old;
  551. - __asm__ __volatile__(
  552. - " dmb ish\n"
  553. - "1: ldrex %0,%3\n"
  554. - " cmp %0,%1\n"
  555. - " bne 1f\n"
  556. - " strex %0,%2,%3\n"
  557. - " cmp %0, #0\n"
  558. - " bne 1b\n"
  559. - " mov %0, %1\n"
  560. - "1: dmb ish\n"
  561. - : "=&r"(old)
  562. - : "r"(t), "r"(s), "Q"(*p)
  563. - : "memory", "cc" );
  564. - return old;
  565. -}
  566. -
  567. -static inline int a_swap(volatile int *x, int v)
  568. -{
  569. - int old, tmp;
  570. - __asm__ __volatile__(
  571. - " dmb ish\n"
  572. - "1: ldrex %0,%3\n"
  573. - " strex %1,%2,%3\n"
  574. - " cmp %1, #0\n"
  575. - " bne 1b\n"
  576. - " dmb ish\n"
  577. - : "=&r"(old), "=&r"(tmp)
  578. - : "r"(v), "Q"(*x)
  579. - : "memory", "cc" );
  580. - return old;
  581. -}
  582. -
  583. -static inline int a_fetch_add(volatile int *x, int v)
  584. -{
  585. - int old, tmp;
  586. - __asm__ __volatile__(
  587. - " dmb ish\n"
  588. - "1: ldrex %0,%3\n"
  589. - " add %0,%0,%2\n"
  590. - " strex %1,%0,%3\n"
  591. - " cmp %1, #0\n"
  592. - " bne 1b\n"
  593. - " dmb ish\n"
  594. - : "=&r"(old), "=&r"(tmp)
  595. - : "r"(v), "Q"(*x)
  596. - : "memory", "cc" );
  597. - return old-v;
  598. -}
  599. -
  600. -static inline void a_inc(volatile int *x)
  601. -{
  602. - int tmp, tmp2;
  603. - __asm__ __volatile__(
  604. - " dmb ish\n"
  605. - "1: ldrex %0,%2\n"
  606. - " add %0,%0,#1\n"
  607. - " strex %1,%0,%2\n"
  608. - " cmp %1, #0\n"
  609. - " bne 1b\n"
  610. - " dmb ish\n"
  611. - : "=&r"(tmp), "=&r"(tmp2)
  612. - : "Q"(*x)
  613. - : "memory", "cc" );
  614. -}
  615. -
  616. -static inline void a_dec(volatile int *x)
  617. -{
  618. - int tmp, tmp2;
  619. - __asm__ __volatile__(
  620. - " dmb ish\n"
  621. - "1: ldrex %0,%2\n"
  622. - " sub %0,%0,#1\n"
  623. - " strex %1,%0,%2\n"
  624. - " cmp %1, #0\n"
  625. - " bne 1b\n"
  626. - " dmb ish\n"
  627. - : "=&r"(tmp), "=&r"(tmp2)
  628. - : "Q"(*x)
  629. - : "memory", "cc" );
  630. -}
  631. -
  632. -static inline void a_and(volatile int *x, int v)
  633. -{
  634. - int tmp, tmp2;
  635. - __asm__ __volatile__(
  636. - " dmb ish\n"
  637. - "1: ldrex %0,%3\n"
  638. - " and %0,%0,%2\n"
  639. - " strex %1,%0,%3\n"
  640. - " cmp %1, #0\n"
  641. - " bne 1b\n"
  642. - " dmb ish\n"
  643. - : "=&r"(tmp), "=&r"(tmp2)
  644. - : "r"(v), "Q"(*x)
  645. - : "memory", "cc" );
  646. -}
  647. -
  648. -static inline void a_or(volatile int *x, int v)
  649. -{
  650. - int tmp, tmp2;
  651. - __asm__ __volatile__(
  652. - " dmb ish\n"
  653. - "1: ldrex %0,%3\n"
  654. - " orr %0,%0,%2\n"
  655. - " strex %1,%0,%3\n"
  656. - " cmp %1, #0\n"
  657. - " bne 1b\n"
  658. - " dmb ish\n"
  659. - : "=&r"(tmp), "=&r"(tmp2)
  660. - : "r"(v), "Q"(*x)
  661. - : "memory", "cc" );
  662. -}
  663. -
  664. -static inline void a_store(volatile int *p, int x)
  665. -{
  666. - __asm__ __volatile__(
  667. - " dmb ish\n"
  668. - " str %1,%0\n"
  669. - " dmb ish\n"
  670. - : "=m"(*p)
  671. - : "r"(x)
  672. - : "memory", "cc" );
  673. -}
  674. -
  675. -#else
  676. -
  677. -int __a_cas(int, int, volatile int *) __attribute__((__visibility__("hidden")));
  678. -#define __k_cas __a_cas
  679. -
  680. -static inline void a_barrier()
  681. -{
  682. - __asm__ __volatile__("bl __a_barrier"
  683. - : : : "memory", "cc", "ip", "lr" );
  684. -}
  685. -
  686. -static inline int a_cas(volatile int *p, int t, int s)
  687. -{
  688. - int old;
  689. - for (;;) {
  690. - if (!__k_cas(t, s, p))
  691. - return t;
  692. - if ((old=*p) != t)
  693. - return old;
  694. - }
  695. -}
  696. -
  697. -static inline int a_swap(volatile int *x, int v)
  698. -{
  699. - int old;
  700. - do old = *x;
  701. - while (__k_cas(old, v, x));
  702. - return old;
  703. -}
  704. -
  705. -static inline int a_fetch_add(volatile int *x, int v)
  706. -{
  707. - int old;
  708. - do old = *x;
  709. - while (__k_cas(old, old+v, x));
  710. - return old;
  711. -}
  712. -
  713. -static inline void a_inc(volatile int *x)
  714. -{
  715. - a_fetch_add(x, 1);
  716. -}
  717. -
  718. -static inline void a_dec(volatile int *x)
  719. -{
  720. - a_fetch_add(x, -1);
  721. -}
  722. -
  723. -static inline void a_store(volatile int *p, int x)
  724. -{
  725. - a_barrier();
  726. - *p = x;
  727. - a_barrier();
  728. -}
  729. -
  730. -static inline void a_and(volatile int *p, int v)
  731. -{
  732. - int old;
  733. - do old = *p;
  734. - while (__k_cas(old, old&v, p));
  735. -}
  736. -
  737. -static inline void a_or(volatile int *p, int v)
  738. -{
  739. - int old;
  740. - do old = *p;
  741. - while (__k_cas(old, old|v, p));
  742. -}
  743. -
  744. -#endif
  745. -
  746. -static inline void *a_cas_p(volatile void *p, void *t, void *s)
  747. -{
  748. - return (void *)a_cas(p, (int)t, (int)s);
  749. -}
  750. -
  751. -#define a_spin a_barrier
  752. -
  753. -static inline void a_crash()
  754. -{
  755. - *(volatile char *)0=0;
  756. -}
  757. -
  758. -static inline void a_or_l(volatile void *p, long v)
  759. -{
  760. - a_or(p, v);
  761. -}
  762. -
  763. -static inline void a_and_64(volatile uint64_t *p, uint64_t v)
  764. -{
  765. - union { uint64_t v; uint32_t r[2]; } u = { v };
  766. - a_and((int *)p, u.r[0]);
  767. - a_and((int *)p+1, u.r[1]);
  768. -}
  769. -
  770. -static inline void a_or_64(volatile uint64_t *p, uint64_t v)
  771. -{
  772. - union { uint64_t v; uint32_t r[2]; } u = { v };
  773. - a_or((int *)p, u.r[0]);
  774. - a_or((int *)p+1, u.r[1]);
  775. -}
  776. -
  777. -#endif
  778. --- /dev/null
  779. +++ b/arch/arm/atomic_arch.h
  780. @@ -0,0 +1,64 @@
  781. +__attribute__((__visibility__("hidden")))
  782. +extern const void *__arm_atomics[3]; /* gettp, cas, barrier */
  783. +
  784. +#if ((__ARM_ARCH_6__ || __ARM_ARCH_6K__ || __ARM_ARCH_6ZK__) && !__thumb__) \
  785. + || __ARM_ARCH_7A__ || __ARM_ARCH_7R__ || __ARM_ARCH >= 7
  786. +
  787. +#define a_ll a_ll
  788. +static inline int a_ll(volatile int *p)
  789. +{
  790. + int v;
  791. + __asm__ __volatile__ ("ldrex %0, %1" : "=r"(v) : "Q"(*p));
  792. + return v;
  793. +}
  794. +
  795. +#define a_sc a_sc
  796. +static inline int a_sc(volatile int *p, int v)
  797. +{
  798. + int r;
  799. + __asm__ __volatile__ ("strex %0,%1,%2" : "=&r"(r) : "r"(v), "Q"(*p) : "memory");
  800. + return !r;
  801. +}
  802. +
  803. +#if __ARM_ARCH_7A__ || __ARM_ARCH_7R__ || __ARM_ARCH >= 7
  804. +
  805. +#define a_barrier a_barrier
  806. +static inline void a_barrier()
  807. +{
  808. + __asm__ __volatile__ ("dmb ish" : : : "memory");
  809. +}
  810. +
  811. +#endif
  812. +
  813. +#define a_pre_llsc a_barrier
  814. +#define a_post_llsc a_barrier
  815. +
  816. +#else
  817. +
  818. +#define a_cas a_cas
  819. +static inline int a_cas(volatile int *p, int t, int s)
  820. +{
  821. + for (;;) {
  822. + register int r0 __asm__("r0") = t;
  823. + register int r1 __asm__("r1") = s;
  824. + register volatile int *r2 __asm__("r2") = p;
  825. + int old;
  826. + __asm__ __volatile__ (
  827. + "bl __a_cas"
  828. + : "+r"(r0) : "r"(r1), "r"(r2)
  829. + : "memory", "r3", "lr", "ip", "cc" );
  830. + if (!r0) return t;
  831. + if ((old=*p)!=t) return old;
  832. + }
  833. +}
  834. +
  835. +#endif
  836. +
  837. +#ifndef a_barrier
  838. +#define a_barrier a_barrier
  839. +static inline void a_barrier()
  840. +{
  841. + __asm__ __volatile__("bl __a_barrier"
  842. + : : : "memory", "cc", "ip", "lr" );
  843. +}
  844. +#endif
  845. --- a/arch/arm/pthread_arch.h
  846. +++ b/arch/arm/pthread_arch.h
  847. @@ -27,4 +27,4 @@ static inline pthread_t __pthread_self()
  848. #define TLS_ABOVE_TP
  849. #define TP_ADJ(p) ((char *)(p) + sizeof(struct pthread) - 8)
  850. -#define CANCEL_REG_IP 18
  851. +#define MC_PC arm_pc
  852. --- a/arch/arm/reloc.h
  853. +++ b/arch/arm/reloc.h
  854. @@ -6,10 +6,10 @@
  855. #define ENDIAN_SUFFIX ""
  856. #endif
  857. -#if __SOFTFP__
  858. -#define FP_SUFFIX ""
  859. -#else
  860. +#if __ARM_PCS_VFP
  861. #define FP_SUFFIX "hf"
  862. +#else
  863. +#define FP_SUFFIX ""
  864. #endif
  865. #define LDSO_ARCH "arm" ENDIAN_SUFFIX FP_SUFFIX
  866. @@ -28,10 +28,5 @@
  867. #define REL_TPOFF R_ARM_TLS_TPOFF32
  868. //#define REL_TLSDESC R_ARM_TLS_DESC
  869. -#ifdef __thumb__
  870. #define CRTJMP(pc,sp) __asm__ __volatile__( \
  871. "mov sp,%1 ; bx %0" : : "r"(pc), "r"(sp) : "memory" )
  872. -#else
  873. -#define CRTJMP(pc,sp) __asm__ __volatile__( \
  874. - "mov sp,%1 ; tst %0,#1 ; moveq pc,%0 ; bx %0" : : "r"(pc), "r"(sp) : "memory" )
  875. -#endif
  876. --- a/arch/arm/src/__aeabi_atexit.c
  877. +++ /dev/null
  878. @@ -1,6 +0,0 @@
  879. -int __cxa_atexit(void (*func)(void *), void *arg, void *dso);
  880. -
  881. -int __aeabi_atexit (void *obj, void (*func) (void *), void *d)
  882. -{
  883. - return __cxa_atexit (func, obj, d);
  884. -}
  885. --- a/arch/arm/src/__aeabi_memclr.c
  886. +++ /dev/null
  887. @@ -1,9 +0,0 @@
  888. -#include <string.h>
  889. -#include "libc.h"
  890. -
  891. -void __aeabi_memclr(void *dest, size_t n)
  892. -{
  893. - memset(dest, 0, n);
  894. -}
  895. -weak_alias(__aeabi_memclr, __aeabi_memclr4);
  896. -weak_alias(__aeabi_memclr, __aeabi_memclr8);
  897. --- a/arch/arm/src/__aeabi_memcpy.c
  898. +++ /dev/null
  899. @@ -1,9 +0,0 @@
  900. -#include <string.h>
  901. -#include "libc.h"
  902. -
  903. -void __aeabi_memcpy(void *restrict dest, const void *restrict src, size_t n)
  904. -{
  905. - memcpy(dest, src, n);
  906. -}
  907. -weak_alias(__aeabi_memcpy, __aeabi_memcpy4);
  908. -weak_alias(__aeabi_memcpy, __aeabi_memcpy8);
  909. --- a/arch/arm/src/__aeabi_memmove.c
  910. +++ /dev/null
  911. @@ -1,9 +0,0 @@
  912. -#include <string.h>
  913. -#include "libc.h"
  914. -
  915. -void __aeabi_memmove(void *dest, const void *src, size_t n)
  916. -{
  917. - memmove(dest, src, n);
  918. -}
  919. -weak_alias(__aeabi_memmove, __aeabi_memmove4);
  920. -weak_alias(__aeabi_memmove, __aeabi_memmove8);
  921. --- a/arch/arm/src/__aeabi_memset.c
  922. +++ /dev/null
  923. @@ -1,9 +0,0 @@
  924. -#include <string.h>
  925. -#include "libc.h"
  926. -
  927. -void __aeabi_memset(void *dest, size_t n, int c)
  928. -{
  929. - memset(dest, c, n);
  930. -}
  931. -weak_alias(__aeabi_memset, __aeabi_memset4);
  932. -weak_alias(__aeabi_memset, __aeabi_memset8);
  933. --- a/arch/arm/src/__set_thread_area.c
  934. +++ /dev/null
  935. @@ -1,49 +0,0 @@
  936. -#include <stdint.h>
  937. -#include <elf.h>
  938. -#include "pthread_impl.h"
  939. -#include "libc.h"
  940. -
  941. -#define HWCAP_TLS (1 << 15)
  942. -
  943. -extern const unsigned char __attribute__((__visibility__("hidden")))
  944. - __a_barrier_dummy[], __a_barrier_oldkuser[],
  945. - __a_barrier_v6[], __a_barrier_v7[],
  946. - __a_cas_dummy[], __a_cas_v6[], __a_cas_v7[],
  947. - __a_gettp_dummy[];
  948. -
  949. -#define __a_barrier_kuser 0xffff0fa0
  950. -#define __a_cas_kuser 0xffff0fc0
  951. -#define __a_gettp_kuser 0xffff0fe0
  952. -
  953. -extern uintptr_t __attribute__((__visibility__("hidden")))
  954. - __a_barrier_ptr, __a_cas_ptr, __a_gettp_ptr;
  955. -
  956. -#define SET(op,ver) (__a_##op##_ptr = \
  957. - (uintptr_t)__a_##op##_##ver - (uintptr_t)__a_##op##_dummy)
  958. -
  959. -int __set_thread_area(void *p)
  960. -{
  961. -#if !__ARM_ARCH_7A__ && !__ARM_ARCH_7R__ && __ARM_ARCH < 7
  962. - if (__hwcap & HWCAP_TLS) {
  963. - size_t *aux;
  964. - SET(cas, v7);
  965. - SET(barrier, v7);
  966. - for (aux=libc.auxv; *aux; aux+=2) {
  967. - if (*aux != AT_PLATFORM) continue;
  968. - const char *s = (void *)aux[1];
  969. - if (s[0]!='v' || s[1]!='6' || s[2]-'0'<10u) break;
  970. - SET(cas, v6);
  971. - SET(barrier, v6);
  972. - break;
  973. - }
  974. - } else {
  975. - int ver = *(int *)0xffff0ffc;
  976. - SET(gettp, kuser);
  977. - SET(cas, kuser);
  978. - SET(barrier, kuser);
  979. - if (ver < 2) a_crash();
  980. - if (ver < 3) SET(barrier, oldkuser);
  981. - }
  982. -#endif
  983. - return __syscall(0xf0005, p);
  984. -}
  985. --- a/arch/arm/src/arm/atomics.s
  986. +++ /dev/null
  987. @@ -1,116 +0,0 @@
  988. -.text
  989. -
  990. -.global __a_barrier
  991. -.hidden __a_barrier
  992. -.type __a_barrier,%function
  993. -__a_barrier:
  994. - ldr ip,1f
  995. - ldr ip,[pc,ip]
  996. - add pc,pc,ip
  997. -1: .word __a_barrier_ptr-1b
  998. -.global __a_barrier_dummy
  999. -.hidden __a_barrier_dummy
  1000. -__a_barrier_dummy:
  1001. - tst lr,#1
  1002. - moveq pc,lr
  1003. - bx lr
  1004. -.global __a_barrier_oldkuser
  1005. -.hidden __a_barrier_oldkuser
  1006. -__a_barrier_oldkuser:
  1007. - push {r0,r1,r2,r3,ip,lr}
  1008. - mov r1,r0
  1009. - mov r2,sp
  1010. - ldr ip,=0xffff0fc0
  1011. - mov lr,pc
  1012. - mov pc,ip
  1013. - pop {r0,r1,r2,r3,ip,lr}
  1014. - tst lr,#1
  1015. - moveq pc,lr
  1016. - bx lr
  1017. -.global __a_barrier_v6
  1018. -.hidden __a_barrier_v6
  1019. -__a_barrier_v6:
  1020. - mcr p15,0,r0,c7,c10,5
  1021. - bx lr
  1022. -.global __a_barrier_v7
  1023. -.hidden __a_barrier_v7
  1024. -__a_barrier_v7:
  1025. - .word 0xf57ff05b /* dmb ish */
  1026. - bx lr
  1027. -
  1028. -.global __a_cas
  1029. -.hidden __a_cas
  1030. -.type __a_cas,%function
  1031. -__a_cas:
  1032. - ldr ip,1f
  1033. - ldr ip,[pc,ip]
  1034. - add pc,pc,ip
  1035. -1: .word __a_cas_ptr-1b
  1036. -.global __a_cas_dummy
  1037. -.hidden __a_cas_dummy
  1038. -__a_cas_dummy:
  1039. - mov r3,r0
  1040. - ldr r0,[r2]
  1041. - subs r0,r3,r0
  1042. - streq r1,[r2]
  1043. - tst lr,#1
  1044. - moveq pc,lr
  1045. - bx lr
  1046. -.global __a_cas_v6
  1047. -.hidden __a_cas_v6
  1048. -__a_cas_v6:
  1049. - mov r3,r0
  1050. - mcr p15,0,r0,c7,c10,5
  1051. -1: .word 0xe1920f9f /* ldrex r0,[r2] */
  1052. - subs r0,r3,r0
  1053. - .word 0x01820f91 /* strexeq r0,r1,[r2] */
  1054. - teqeq r0,#1
  1055. - beq 1b
  1056. - mcr p15,0,r0,c7,c10,5
  1057. - bx lr
  1058. -.global __a_cas_v7
  1059. -.hidden __a_cas_v7
  1060. -__a_cas_v7:
  1061. - mov r3,r0
  1062. - .word 0xf57ff05b /* dmb ish */
  1063. -1: .word 0xe1920f9f /* ldrex r0,[r2] */
  1064. - subs r0,r3,r0
  1065. - .word 0x01820f91 /* strexeq r0,r1,[r2] */
  1066. - teqeq r0,#1
  1067. - beq 1b
  1068. - .word 0xf57ff05b /* dmb ish */
  1069. - bx lr
  1070. -
  1071. -.global __aeabi_read_tp
  1072. -.type __aeabi_read_tp,%function
  1073. -__aeabi_read_tp:
  1074. -
  1075. -.global __a_gettp
  1076. -.hidden __a_gettp
  1077. -.type __a_gettp,%function
  1078. -__a_gettp:
  1079. - ldr r0,1f
  1080. - ldr r0,[pc,r0]
  1081. - add pc,pc,r0
  1082. -1: .word __a_gettp_ptr-1b
  1083. -.global __a_gettp_dummy
  1084. -.hidden __a_gettp_dummy
  1085. -__a_gettp_dummy:
  1086. - mrc p15,0,r0,c13,c0,3
  1087. - bx lr
  1088. -
  1089. -.data
  1090. -.global __a_barrier_ptr
  1091. -.hidden __a_barrier_ptr
  1092. -__a_barrier_ptr:
  1093. - .word 0
  1094. -
  1095. -.global __a_cas_ptr
  1096. -.hidden __a_cas_ptr
  1097. -__a_cas_ptr:
  1098. - .word 0
  1099. -
  1100. -.global __a_gettp_ptr
  1101. -.hidden __a_gettp_ptr
  1102. -__a_gettp_ptr:
  1103. - .word 0
  1104. --- a/arch/arm/src/find_exidx.c
  1105. +++ /dev/null
  1106. @@ -1,42 +0,0 @@
  1107. -#define _GNU_SOURCE
  1108. -#include <link.h>
  1109. -#include <stdint.h>
  1110. -
  1111. -struct find_exidx_data {
  1112. - uintptr_t pc, exidx_start;
  1113. - int exidx_len;
  1114. -};
  1115. -
  1116. -static int find_exidx(struct dl_phdr_info *info, size_t size, void *ptr)
  1117. -{
  1118. - struct find_exidx_data *data = ptr;
  1119. - const ElfW(Phdr) *phdr = info->dlpi_phdr;
  1120. - uintptr_t addr, exidx_start = 0;
  1121. - int i, match = 0, exidx_len = 0;
  1122. -
  1123. - for (i = info->dlpi_phnum; i > 0; i--, phdr++) {
  1124. - addr = info->dlpi_addr + phdr->p_vaddr;
  1125. - switch (phdr->p_type) {
  1126. - case PT_LOAD:
  1127. - match |= data->pc >= addr && data->pc < addr + phdr->p_memsz;
  1128. - break;
  1129. - case PT_ARM_EXIDX:
  1130. - exidx_start = addr;
  1131. - exidx_len = phdr->p_memsz;
  1132. - break;
  1133. - }
  1134. - }
  1135. - data->exidx_start = exidx_start;
  1136. - data->exidx_len = exidx_len;
  1137. - return match;
  1138. -}
  1139. -
  1140. -uintptr_t __gnu_Unwind_Find_exidx(uintptr_t pc, int *pcount)
  1141. -{
  1142. - struct find_exidx_data data;
  1143. - data.pc = pc;
  1144. - if (dl_iterate_phdr(find_exidx, &data) <= 0)
  1145. - return 0;
  1146. - *pcount = data.exidx_len / 8;
  1147. - return data.exidx_start;
  1148. -}
  1149. --- a/arch/i386/atomic.h
  1150. +++ /dev/null
  1151. @@ -1,110 +0,0 @@
  1152. -#ifndef _INTERNAL_ATOMIC_H
  1153. -#define _INTERNAL_ATOMIC_H
  1154. -
  1155. -#include <stdint.h>
  1156. -
  1157. -static inline int a_ctz_64(uint64_t x)
  1158. -{
  1159. - int r;
  1160. - __asm__( "bsf %1,%0 ; jnz 1f ; bsf %2,%0 ; addl $32,%0\n1:"
  1161. - : "=&r"(r) : "r"((unsigned)x), "r"((unsigned)(x>>32)) );
  1162. - return r;
  1163. -}
  1164. -
  1165. -static inline int a_ctz_l(unsigned long x)
  1166. -{
  1167. - long r;
  1168. - __asm__( "bsf %1,%0" : "=r"(r) : "r"(x) );
  1169. - return r;
  1170. -}
  1171. -
  1172. -static inline void a_and_64(volatile uint64_t *p, uint64_t v)
  1173. -{
  1174. - __asm__( "lock ; andl %1, (%0) ; lock ; andl %2, 4(%0)"
  1175. - : : "r"((long *)p), "r"((unsigned)v), "r"((unsigned)(v>>32)) : "memory" );
  1176. -}
  1177. -
  1178. -static inline void a_or_64(volatile uint64_t *p, uint64_t v)
  1179. -{
  1180. - __asm__( "lock ; orl %1, (%0) ; lock ; orl %2, 4(%0)"
  1181. - : : "r"((long *)p), "r"((unsigned)v), "r"((unsigned)(v>>32)) : "memory" );
  1182. -}
  1183. -
  1184. -static inline void a_or_l(volatile void *p, long v)
  1185. -{
  1186. - __asm__( "lock ; orl %1, %0"
  1187. - : "=m"(*(long *)p) : "r"(v) : "memory" );
  1188. -}
  1189. -
  1190. -static inline void *a_cas_p(volatile void *p, void *t, void *s)
  1191. -{
  1192. - __asm__( "lock ; cmpxchg %3, %1"
  1193. - : "=a"(t), "=m"(*(long *)p) : "a"(t), "r"(s) : "memory" );
  1194. - return t;
  1195. -}
  1196. -
  1197. -static inline int a_cas(volatile int *p, int t, int s)
  1198. -{
  1199. - __asm__( "lock ; cmpxchg %3, %1"
  1200. - : "=a"(t), "=m"(*p) : "a"(t), "r"(s) : "memory" );
  1201. - return t;
  1202. -}
  1203. -
  1204. -static inline void a_or(volatile int *p, int v)
  1205. -{
  1206. - __asm__( "lock ; orl %1, %0"
  1207. - : "=m"(*p) : "r"(v) : "memory" );
  1208. -}
  1209. -
  1210. -static inline void a_and(volatile int *p, int v)
  1211. -{
  1212. - __asm__( "lock ; andl %1, %0"
  1213. - : "=m"(*p) : "r"(v) : "memory" );
  1214. -}
  1215. -
  1216. -static inline int a_swap(volatile int *x, int v)
  1217. -{
  1218. - __asm__( "xchg %0, %1" : "=r"(v), "=m"(*x) : "0"(v) : "memory" );
  1219. - return v;
  1220. -}
  1221. -
  1222. -#define a_xchg a_swap
  1223. -
  1224. -static inline int a_fetch_add(volatile int *x, int v)
  1225. -{
  1226. - __asm__( "lock ; xadd %0, %1" : "=r"(v), "=m"(*x) : "0"(v) : "memory" );
  1227. - return v;
  1228. -}
  1229. -
  1230. -static inline void a_inc(volatile int *x)
  1231. -{
  1232. - __asm__( "lock ; incl %0" : "=m"(*x) : "m"(*x) : "memory" );
  1233. -}
  1234. -
  1235. -static inline void a_dec(volatile int *x)
  1236. -{
  1237. - __asm__( "lock ; decl %0" : "=m"(*x) : "m"(*x) : "memory" );
  1238. -}
  1239. -
  1240. -static inline void a_store(volatile int *p, int x)
  1241. -{
  1242. - __asm__( "movl %1, %0 ; lock ; orl $0,(%%esp)" : "=m"(*p) : "r"(x) : "memory" );
  1243. -}
  1244. -
  1245. -static inline void a_spin()
  1246. -{
  1247. - __asm__ __volatile__( "pause" : : : "memory" );
  1248. -}
  1249. -
  1250. -static inline void a_barrier()
  1251. -{
  1252. - __asm__ __volatile__( "" : : : "memory" );
  1253. -}
  1254. -
  1255. -static inline void a_crash()
  1256. -{
  1257. - __asm__ __volatile__( "hlt" : : : "memory" );
  1258. -}
  1259. -
  1260. -
  1261. -#endif
  1262. --- /dev/null
  1263. +++ b/arch/i386/atomic_arch.h
  1264. @@ -0,0 +1,109 @@
  1265. +#define a_ctz_64 a_ctz_64
  1266. +static inline int a_ctz_64(uint64_t x)
  1267. +{
  1268. + int r;
  1269. + __asm__( "bsf %1,%0 ; jnz 1f ; bsf %2,%0 ; addl $32,%0\n1:"
  1270. + : "=&r"(r) : "r"((unsigned)x), "r"((unsigned)(x>>32)) );
  1271. + return r;
  1272. +}
  1273. +
  1274. +#define a_ctz_l a_ctz_l
  1275. +static inline int a_ctz_l(unsigned long x)
  1276. +{
  1277. + long r;
  1278. + __asm__( "bsf %1,%0" : "=r"(r) : "r"(x) );
  1279. + return r;
  1280. +}
  1281. +
  1282. +#define a_and_64 a_and_64
  1283. +static inline void a_and_64(volatile uint64_t *p, uint64_t v)
  1284. +{
  1285. + __asm__( "lock ; andl %1, (%0) ; lock ; andl %2, 4(%0)"
  1286. + : : "r"((long *)p), "r"((unsigned)v), "r"((unsigned)(v>>32)) : "memory" );
  1287. +}
  1288. +
  1289. +#define a_or_64 a_or_64
  1290. +static inline void a_or_64(volatile uint64_t *p, uint64_t v)
  1291. +{
  1292. + __asm__( "lock ; orl %1, (%0) ; lock ; orl %2, 4(%0)"
  1293. + : : "r"((long *)p), "r"((unsigned)v), "r"((unsigned)(v>>32)) : "memory" );
  1294. +}
  1295. +
  1296. +#define a_or_l a_or_l
  1297. +static inline void a_or_l(volatile void *p, long v)
  1298. +{
  1299. + __asm__( "lock ; orl %1, %0"
  1300. + : "=m"(*(long *)p) : "r"(v) : "memory" );
  1301. +}
  1302. +
  1303. +#define a_cas a_cas
  1304. +static inline int a_cas(volatile int *p, int t, int s)
  1305. +{
  1306. + __asm__( "lock ; cmpxchg %3, %1"
  1307. + : "=a"(t), "=m"(*p) : "a"(t), "r"(s) : "memory" );
  1308. + return t;
  1309. +}
  1310. +
  1311. +#define a_or a_or
  1312. +static inline void a_or(volatile int *p, int v)
  1313. +{
  1314. + __asm__( "lock ; orl %1, %0"
  1315. + : "=m"(*p) : "r"(v) : "memory" );
  1316. +}
  1317. +
  1318. +#define a_and a_and
  1319. +static inline void a_and(volatile int *p, int v)
  1320. +{
  1321. + __asm__( "lock ; andl %1, %0"
  1322. + : "=m"(*p) : "r"(v) : "memory" );
  1323. +}
  1324. +
  1325. +#define a_swap a_swap
  1326. +static inline int a_swap(volatile int *x, int v)
  1327. +{
  1328. + __asm__( "xchg %0, %1" : "=r"(v), "=m"(*x) : "0"(v) : "memory" );
  1329. + return v;
  1330. +}
  1331. +
  1332. +#define a_fetch_add a_fetch_add
  1333. +static inline int a_fetch_add(volatile int *x, int v)
  1334. +{
  1335. + __asm__( "lock ; xadd %0, %1" : "=r"(v), "=m"(*x) : "0"(v) : "memory" );
  1336. + return v;
  1337. +}
  1338. +
  1339. +#define a_inc a_inc
  1340. +static inline void a_inc(volatile int *x)
  1341. +{
  1342. + __asm__( "lock ; incl %0" : "=m"(*x) : "m"(*x) : "memory" );
  1343. +}
  1344. +
  1345. +#define a_dec a_dec
  1346. +static inline void a_dec(volatile int *x)
  1347. +{
  1348. + __asm__( "lock ; decl %0" : "=m"(*x) : "m"(*x) : "memory" );
  1349. +}
  1350. +
  1351. +#define a_store a_store
  1352. +static inline void a_store(volatile int *p, int x)
  1353. +{
  1354. + __asm__( "movl %1, %0 ; lock ; orl $0,(%%esp)" : "=m"(*p) : "r"(x) : "memory" );
  1355. +}
  1356. +
  1357. +#define a_spin a_spin
  1358. +static inline void a_spin()
  1359. +{
  1360. + __asm__ __volatile__( "pause" : : : "memory" );
  1361. +}
  1362. +
  1363. +#define a_barrier a_barrier
  1364. +static inline void a_barrier()
  1365. +{
  1366. + __asm__ __volatile__( "" : : : "memory" );
  1367. +}
  1368. +
  1369. +#define a_crash a_crash
  1370. +static inline void a_crash()
  1371. +{
  1372. + __asm__ __volatile__( "hlt" : : : "memory" );
  1373. +}
  1374. --- a/arch/i386/bits/alltypes.h.in
  1375. +++ b/arch/i386/bits/alltypes.h.in
  1376. @@ -26,10 +26,12 @@ TYPEDEF long double float_t;
  1377. TYPEDEF long double double_t;
  1378. #endif
  1379. -#ifdef __cplusplus
  1380. -TYPEDEF struct { alignas(8) long long __ll; long double __ld; } max_align_t;
  1381. -#else
  1382. +#if !defined(__cplusplus)
  1383. TYPEDEF struct { _Alignas(8) long long __ll; long double __ld; } max_align_t;
  1384. +#elif defined(__GNUC__)
  1385. +TYPEDEF struct { __attribute__((__aligned__(8))) long long __ll; long double __ld; } max_align_t;
  1386. +#else
  1387. +TYPEDEF struct { alignas(8) long long __ll; long double __ld; } max_align_t;
  1388. #endif
  1389. TYPEDEF long time_t;
  1390. --- a/arch/i386/pthread_arch.h
  1391. +++ b/arch/i386/pthread_arch.h
  1392. @@ -7,4 +7,4 @@ static inline struct pthread *__pthread_
  1393. #define TP_ADJ(p) (p)
  1394. -#define CANCEL_REG_IP 14
  1395. +#define MC_PC gregs[REG_EIP]
  1396. --- a/arch/microblaze/atomic.h
  1397. +++ /dev/null
  1398. @@ -1,143 +0,0 @@
  1399. -#ifndef _INTERNAL_ATOMIC_H
  1400. -#define _INTERNAL_ATOMIC_H
  1401. -
  1402. -#include <stdint.h>
  1403. -
  1404. -static inline int a_ctz_l(unsigned long x)
  1405. -{
  1406. - static const char debruijn32[32] = {
  1407. - 0, 1, 23, 2, 29, 24, 19, 3, 30, 27, 25, 11, 20, 8, 4, 13,
  1408. - 31, 22, 28, 18, 26, 10, 7, 12, 21, 17, 9, 6, 16, 5, 15, 14
  1409. - };
  1410. - return debruijn32[(x&-x)*0x076be629 >> 27];
  1411. -}
  1412. -
  1413. -static inline int a_ctz_64(uint64_t x)
  1414. -{
  1415. - uint32_t y = x;
  1416. - if (!y) {
  1417. - y = x>>32;
  1418. - return 32 + a_ctz_l(y);
  1419. - }
  1420. - return a_ctz_l(y);
  1421. -}
  1422. -
  1423. -static inline int a_cas(volatile int *p, int t, int s)
  1424. -{
  1425. - register int old, tmp;
  1426. - __asm__ __volatile__ (
  1427. - " addi %0, r0, 0\n"
  1428. - "1: lwx %0, %2, r0\n"
  1429. - " rsubk %1, %0, %3\n"
  1430. - " bnei %1, 1f\n"
  1431. - " swx %4, %2, r0\n"
  1432. - " addic %1, r0, 0\n"
  1433. - " bnei %1, 1b\n"
  1434. - "1: "
  1435. - : "=&r"(old), "=&r"(tmp)
  1436. - : "r"(p), "r"(t), "r"(s)
  1437. - : "cc", "memory" );
  1438. - return old;
  1439. -}
  1440. -
  1441. -static inline void *a_cas_p(volatile void *p, void *t, void *s)
  1442. -{
  1443. - return (void *)a_cas(p, (int)t, (int)s);
  1444. -}
  1445. -
  1446. -static inline int a_swap(volatile int *x, int v)
  1447. -{
  1448. - register int old, tmp;
  1449. - __asm__ __volatile__ (
  1450. - " addi %0, r0, 0\n"
  1451. - "1: lwx %0, %2, r0\n"
  1452. - " swx %3, %2, r0\n"
  1453. - " addic %1, r0, 0\n"
  1454. - " bnei %1, 1b\n"
  1455. - "1: "
  1456. - : "=&r"(old), "=&r"(tmp)
  1457. - : "r"(x), "r"(v)
  1458. - : "cc", "memory" );
  1459. - return old;
  1460. -}
  1461. -
  1462. -static inline int a_fetch_add(volatile int *x, int v)
  1463. -{
  1464. - register int new, tmp;
  1465. - __asm__ __volatile__ (
  1466. - " addi %0, r0, 0\n"
  1467. - "1: lwx %0, %2, r0\n"
  1468. - " addk %0, %0, %3\n"
  1469. - " swx %0, %2, r0\n"
  1470. - " addic %1, r0, 0\n"
  1471. - " bnei %1, 1b\n"
  1472. - "1: "
  1473. - : "=&r"(new), "=&r"(tmp)
  1474. - : "r"(x), "r"(v)
  1475. - : "cc", "memory" );
  1476. - return new-v;
  1477. -}
  1478. -
  1479. -static inline void a_inc(volatile int *x)
  1480. -{
  1481. - a_fetch_add(x, 1);
  1482. -}
  1483. -
  1484. -static inline void a_dec(volatile int *x)
  1485. -{
  1486. - a_fetch_add(x, -1);
  1487. -}
  1488. -
  1489. -static inline void a_store(volatile int *p, int x)
  1490. -{
  1491. - __asm__ __volatile__ (
  1492. - "swi %1, %0"
  1493. - : "=m"(*p) : "r"(x) : "memory" );
  1494. -}
  1495. -
  1496. -#define a_spin a_barrier
  1497. -
  1498. -static inline void a_barrier()
  1499. -{
  1500. - a_cas(&(int){0}, 0, 0);
  1501. -}
  1502. -
  1503. -static inline void a_crash()
  1504. -{
  1505. - *(volatile char *)0=0;
  1506. -}
  1507. -
  1508. -static inline void a_and(volatile int *p, int v)
  1509. -{
  1510. - int old;
  1511. - do old = *p;
  1512. - while (a_cas(p, old, old&v) != old);
  1513. -}
  1514. -
  1515. -static inline void a_or(volatile int *p, int v)
  1516. -{
  1517. - int old;
  1518. - do old = *p;
  1519. - while (a_cas(p, old, old|v) != old);
  1520. -}
  1521. -
  1522. -static inline void a_or_l(volatile void *p, long v)
  1523. -{
  1524. - a_or(p, v);
  1525. -}
  1526. -
  1527. -static inline void a_and_64(volatile uint64_t *p, uint64_t v)
  1528. -{
  1529. - union { uint64_t v; uint32_t r[2]; } u = { v };
  1530. - a_and((int *)p, u.r[0]);
  1531. - a_and((int *)p+1, u.r[1]);
  1532. -}
  1533. -
  1534. -static inline void a_or_64(volatile uint64_t *p, uint64_t v)
  1535. -{
  1536. - union { uint64_t v; uint32_t r[2]; } u = { v };
  1537. - a_or((int *)p, u.r[0]);
  1538. - a_or((int *)p+1, u.r[1]);
  1539. -}
  1540. -
  1541. -#endif
  1542. --- /dev/null
  1543. +++ b/arch/microblaze/atomic_arch.h
  1544. @@ -0,0 +1,53 @@
  1545. +#define a_cas a_cas
  1546. +static inline int a_cas(volatile int *p, int t, int s)
  1547. +{
  1548. + register int old, tmp;
  1549. + __asm__ __volatile__ (
  1550. + " addi %0, r0, 0\n"
  1551. + "1: lwx %0, %2, r0\n"
  1552. + " rsubk %1, %0, %3\n"
  1553. + " bnei %1, 1f\n"
  1554. + " swx %4, %2, r0\n"
  1555. + " addic %1, r0, 0\n"
  1556. + " bnei %1, 1b\n"
  1557. + "1: "
  1558. + : "=&r"(old), "=&r"(tmp)
  1559. + : "r"(p), "r"(t), "r"(s)
  1560. + : "cc", "memory" );
  1561. + return old;
  1562. +}
  1563. +
  1564. +#define a_swap a_swap
  1565. +static inline int a_swap(volatile int *x, int v)
  1566. +{
  1567. + register int old, tmp;
  1568. + __asm__ __volatile__ (
  1569. + " addi %0, r0, 0\n"
  1570. + "1: lwx %0, %2, r0\n"
  1571. + " swx %3, %2, r0\n"
  1572. + " addic %1, r0, 0\n"
  1573. + " bnei %1, 1b\n"
  1574. + "1: "
  1575. + : "=&r"(old), "=&r"(tmp)
  1576. + : "r"(x), "r"(v)
  1577. + : "cc", "memory" );
  1578. + return old;
  1579. +}
  1580. +
  1581. +#define a_fetch_add a_fetch_add
  1582. +static inline int a_fetch_add(volatile int *x, int v)
  1583. +{
  1584. + register int new, tmp;
  1585. + __asm__ __volatile__ (
  1586. + " addi %0, r0, 0\n"
  1587. + "1: lwx %0, %2, r0\n"
  1588. + " addk %0, %0, %3\n"
  1589. + " swx %0, %2, r0\n"
  1590. + " addic %1, r0, 0\n"
  1591. + " bnei %1, 1b\n"
  1592. + "1: "
  1593. + : "=&r"(new), "=&r"(tmp)
  1594. + : "r"(x), "r"(v)
  1595. + : "cc", "memory" );
  1596. + return new-v;
  1597. +}
  1598. --- a/arch/microblaze/pthread_arch.h
  1599. +++ b/arch/microblaze/pthread_arch.h
  1600. @@ -7,4 +7,4 @@ static inline struct pthread *__pthread_
  1601. #define TP_ADJ(p) (p)
  1602. -#define CANCEL_REG_IP 32
  1603. +#define MC_PC regs.pc
  1604. --- a/arch/mips/atomic.h
  1605. +++ /dev/null
  1606. @@ -1,205 +0,0 @@
  1607. -#ifndef _INTERNAL_ATOMIC_H
  1608. -#define _INTERNAL_ATOMIC_H
  1609. -
  1610. -#include <stdint.h>
  1611. -
  1612. -static inline int a_ctz_l(unsigned long x)
  1613. -{
  1614. - static const char debruijn32[32] = {
  1615. - 0, 1, 23, 2, 29, 24, 19, 3, 30, 27, 25, 11, 20, 8, 4, 13,
  1616. - 31, 22, 28, 18, 26, 10, 7, 12, 21, 17, 9, 6, 16, 5, 15, 14
  1617. - };
  1618. - return debruijn32[(x&-x)*0x076be629 >> 27];
  1619. -}
  1620. -
  1621. -static inline int a_ctz_64(uint64_t x)
  1622. -{
  1623. - uint32_t y = x;
  1624. - if (!y) {
  1625. - y = x>>32;
  1626. - return 32 + a_ctz_l(y);
  1627. - }
  1628. - return a_ctz_l(y);
  1629. -}
  1630. -
  1631. -static inline int a_cas(volatile int *p, int t, int s)
  1632. -{
  1633. - int dummy;
  1634. - __asm__ __volatile__(
  1635. - ".set push\n"
  1636. - ".set mips2\n"
  1637. - ".set noreorder\n"
  1638. - " sync\n"
  1639. - "1: ll %0, %2\n"
  1640. - " bne %0, %3, 1f\n"
  1641. - " addu %1, %4, $0\n"
  1642. - " sc %1, %2\n"
  1643. - " beq %1, $0, 1b\n"
  1644. - " nop\n"
  1645. - " sync\n"
  1646. - "1: \n"
  1647. - ".set pop\n"
  1648. - : "=&r"(t), "=&r"(dummy), "+m"(*p) : "r"(t), "r"(s) : "memory" );
  1649. - return t;
  1650. -}
  1651. -
  1652. -static inline void *a_cas_p(volatile void *p, void *t, void *s)
  1653. -{
  1654. - return (void *)a_cas(p, (int)t, (int)s);
  1655. -}
  1656. -
  1657. -static inline int a_swap(volatile int *x, int v)
  1658. -{
  1659. - int old, dummy;
  1660. - __asm__ __volatile__(
  1661. - ".set push\n"
  1662. - ".set mips2\n"
  1663. - ".set noreorder\n"
  1664. - " sync\n"
  1665. - "1: ll %0, %2\n"
  1666. - " addu %1, %3, $0\n"
  1667. - " sc %1, %2\n"
  1668. - " beq %1, $0, 1b\n"
  1669. - " nop\n"
  1670. - " sync\n"
  1671. - ".set pop\n"
  1672. - : "=&r"(old), "=&r"(dummy), "+m"(*x) : "r"(v) : "memory" );
  1673. - return old;
  1674. -}
  1675. -
  1676. -static inline int a_fetch_add(volatile int *x, int v)
  1677. -{
  1678. - int old, dummy;
  1679. - __asm__ __volatile__(
  1680. - ".set push\n"
  1681. - ".set mips2\n"
  1682. - ".set noreorder\n"
  1683. - " sync\n"
  1684. - "1: ll %0, %2\n"
  1685. - " addu %1, %0, %3\n"
  1686. - " sc %1, %2\n"
  1687. - " beq %1, $0, 1b\n"
  1688. - " nop\n"
  1689. - " sync\n"
  1690. - ".set pop\n"
  1691. - : "=&r"(old), "=&r"(dummy), "+m"(*x) : "r"(v) : "memory" );
  1692. - return old;
  1693. -}
  1694. -
  1695. -static inline void a_inc(volatile int *x)
  1696. -{
  1697. - int dummy;
  1698. - __asm__ __volatile__(
  1699. - ".set push\n"
  1700. - ".set mips2\n"
  1701. - ".set noreorder\n"
  1702. - " sync\n"
  1703. - "1: ll %0, %1\n"
  1704. - " addu %0, %0, 1\n"
  1705. - " sc %0, %1\n"
  1706. - " beq %0, $0, 1b\n"
  1707. - " nop\n"
  1708. - " sync\n"
  1709. - ".set pop\n"
  1710. - : "=&r"(dummy), "+m"(*x) : : "memory" );
  1711. -}
  1712. -
  1713. -static inline void a_dec(volatile int *x)
  1714. -{
  1715. - int dummy;
  1716. - __asm__ __volatile__(
  1717. - ".set push\n"
  1718. - ".set mips2\n"
  1719. - ".set noreorder\n"
  1720. - " sync\n"
  1721. - "1: ll %0, %1\n"
  1722. - " subu %0, %0, 1\n"
  1723. - " sc %0, %1\n"
  1724. - " beq %0, $0, 1b\n"
  1725. - " nop\n"
  1726. - " sync\n"
  1727. - ".set pop\n"
  1728. - : "=&r"(dummy), "+m"(*x) : : "memory" );
  1729. -}
  1730. -
  1731. -static inline void a_store(volatile int *p, int x)
  1732. -{
  1733. - __asm__ __volatile__(
  1734. - ".set push\n"
  1735. - ".set mips2\n"
  1736. - ".set noreorder\n"
  1737. - " sync\n"
  1738. - " sw %1, %0\n"
  1739. - " sync\n"
  1740. - ".set pop\n"
  1741. - : "+m"(*p) : "r"(x) : "memory" );
  1742. -}
  1743. -
  1744. -#define a_spin a_barrier
  1745. -
  1746. -static inline void a_barrier()
  1747. -{
  1748. - a_cas(&(int){0}, 0, 0);
  1749. -}
  1750. -
  1751. -static inline void a_crash()
  1752. -{
  1753. - *(volatile char *)0=0;
  1754. -}
  1755. -
  1756. -static inline void a_and(volatile int *p, int v)
  1757. -{
  1758. - int dummy;
  1759. - __asm__ __volatile__(
  1760. - ".set push\n"
  1761. - ".set mips2\n"
  1762. - ".set noreorder\n"
  1763. - " sync\n"
  1764. - "1: ll %0, %1\n"
  1765. - " and %0, %0, %2\n"
  1766. - " sc %0, %1\n"
  1767. - " beq %0, $0, 1b\n"
  1768. - " nop\n"
  1769. - " sync\n"
  1770. - ".set pop\n"
  1771. - : "=&r"(dummy), "+m"(*p) : "r"(v) : "memory" );
  1772. -}
  1773. -
  1774. -static inline void a_or(volatile int *p, int v)
  1775. -{
  1776. - int dummy;
  1777. - __asm__ __volatile__(
  1778. - ".set push\n"
  1779. - ".set mips2\n"
  1780. - ".set noreorder\n"
  1781. - " sync\n"
  1782. - "1: ll %0, %1\n"
  1783. - " or %0, %0, %2\n"
  1784. - " sc %0, %1\n"
  1785. - " beq %0, $0, 1b\n"
  1786. - " nop\n"
  1787. - " sync\n"
  1788. - ".set pop\n"
  1789. - : "=&r"(dummy), "+m"(*p) : "r"(v) : "memory" );
  1790. -}
  1791. -
  1792. -static inline void a_or_l(volatile void *p, long v)
  1793. -{
  1794. - a_or(p, v);
  1795. -}
  1796. -
  1797. -static inline void a_and_64(volatile uint64_t *p, uint64_t v)
  1798. -{
  1799. - union { uint64_t v; uint32_t r[2]; } u = { v };
  1800. - a_and((int *)p, u.r[0]);
  1801. - a_and((int *)p+1, u.r[1]);
  1802. -}
  1803. -
  1804. -static inline void a_or_64(volatile uint64_t *p, uint64_t v)
  1805. -{
  1806. - union { uint64_t v; uint32_t r[2]; } u = { v };
  1807. - a_or((int *)p, u.r[0]);
  1808. - a_or((int *)p+1, u.r[1]);
  1809. -}
  1810. -
  1811. -#endif
  1812. --- /dev/null
  1813. +++ b/arch/mips/atomic_arch.h
  1814. @@ -0,0 +1,39 @@
  1815. +#define a_ll a_ll
  1816. +static inline int a_ll(volatile int *p)
  1817. +{
  1818. + int v;
  1819. + __asm__ __volatile__ (
  1820. + ".set push ; .set mips2\n\t"
  1821. + "ll %0, %1"
  1822. + "\n\t.set pop"
  1823. + : "=r"(v) : "m"(*p));
  1824. + return v;
  1825. +}
  1826. +
  1827. +#define a_sc a_sc
  1828. +static inline int a_sc(volatile int *p, int v)
  1829. +{
  1830. + int r;
  1831. + __asm__ __volatile__ (
  1832. + ".set push ; .set mips2\n\t"
  1833. + "sc %0, %1"
  1834. + "\n\t.set pop"
  1835. + : "=r"(r), "=m"(*p) : "0"(v) : "memory");
  1836. + return r;
  1837. +}
  1838. +
  1839. +#define a_barrier a_barrier
  1840. +static inline void a_barrier()
  1841. +{
  1842. + /* mips2 sync, but using too many directives causes
  1843. + * gcc not to inline it, so encode with .long instead. */
  1844. + __asm__ __volatile__ (".long 0xf" : : : "memory");
  1845. +#if 0
  1846. + __asm__ __volatile__ (
  1847. + ".set push ; .set mips2 ; sync ; .set pop"
  1848. + : : : "memory");
  1849. +#endif
  1850. +}
  1851. +
  1852. +#define a_pre_llsc a_barrier
  1853. +#define a_post_llsc a_barrier
  1854. --- a/arch/mips/crt_arch.h
  1855. +++ b/arch/mips/crt_arch.h
  1856. @@ -4,13 +4,16 @@ __asm__(
  1857. ".text \n"
  1858. ".global _" START "\n"
  1859. ".global " START "\n"
  1860. +".global " START "_data\n"
  1861. ".type _" START ", @function\n"
  1862. ".type " START ", @function\n"
  1863. +".type " START "_data, @function\n"
  1864. "_" START ":\n"
  1865. "" START ":\n"
  1866. " bal 1f \n"
  1867. " move $fp, $0 \n"
  1868. -"2: .gpword 2b \n"
  1869. +"" START "_data: \n"
  1870. +" .gpword " START "_data \n"
  1871. " .gpword " START "_c \n"
  1872. ".weak _DYNAMIC \n"
  1873. ".hidden _DYNAMIC \n"
  1874. --- a/arch/mips/pthread_arch.h
  1875. +++ b/arch/mips/pthread_arch.h
  1876. @@ -16,4 +16,4 @@ static inline struct pthread *__pthread_
  1877. #define DTP_OFFSET 0x8000
  1878. -#define CANCEL_REG_IP (3-(union {int __i; char __b;}){1}.__b)
  1879. +#define MC_PC pc
  1880. --- a/arch/mips/syscall_arch.h
  1881. +++ b/arch/mips/syscall_arch.h
  1882. @@ -3,9 +3,7 @@
  1883. ((union { long long ll; long l[2]; }){ .ll = x }).l[1]
  1884. #define __SYSCALL_LL_O(x) 0, __SYSCALL_LL_E((x))
  1885. -#ifdef SHARED
  1886. __attribute__((visibility("hidden")))
  1887. -#endif
  1888. long (__syscall)(long, ...);
  1889. #define SYSCALL_RLIM_INFINITY (-1UL/2)
  1890. --- a/arch/or1k/atomic.h
  1891. +++ /dev/null
  1892. @@ -1,120 +0,0 @@
  1893. -#ifndef _INTERNAL_ATOMIC_H
  1894. -#define _INTERNAL_ATOMIC_H
  1895. -
  1896. -#include <stdint.h>
  1897. -
  1898. -static inline int a_ctz_l(unsigned long x)
  1899. -{
  1900. - static const char debruijn32[32] = {
  1901. - 0, 1, 23, 2, 29, 24, 19, 3, 30, 27, 25, 11, 20, 8, 4, 13,
  1902. - 31, 22, 28, 18, 26, 10, 7, 12, 21, 17, 9, 6, 16, 5, 15, 14
  1903. - };
  1904. - return debruijn32[(x&-x)*0x076be629 >> 27];
  1905. -}
  1906. -
  1907. -static inline int a_ctz_64(uint64_t x)
  1908. -{
  1909. - uint32_t y = x;
  1910. - if (!y) {
  1911. - y = x>>32;
  1912. - return 32 + a_ctz_l(y);
  1913. - }
  1914. - return a_ctz_l(y);
  1915. -}
  1916. -
  1917. -static inline int a_cas(volatile int *p, int t, int s)
  1918. -{
  1919. - __asm__("1: l.lwa %0, %1\n"
  1920. - " l.sfeq %0, %2\n"
  1921. - " l.bnf 1f\n"
  1922. - " l.nop\n"
  1923. - " l.swa %1, %3\n"
  1924. - " l.bnf 1b\n"
  1925. - " l.nop\n"
  1926. - "1: \n"
  1927. - : "=&r"(t), "+m"(*p) : "r"(t), "r"(s) : "cc", "memory" );
  1928. - return t;
  1929. -}
  1930. -
  1931. -static inline void *a_cas_p(volatile void *p, void *t, void *s)
  1932. -{
  1933. - return (void *)a_cas(p, (int)t, (int)s);
  1934. -}
  1935. -
  1936. -static inline int a_swap(volatile int *x, int v)
  1937. -{
  1938. - int old;
  1939. - do old = *x;
  1940. - while (a_cas(x, old, v) != old);
  1941. - return old;
  1942. -}
  1943. -
  1944. -static inline int a_fetch_add(volatile int *x, int v)
  1945. -{
  1946. - int old;
  1947. - do old = *x;
  1948. - while (a_cas(x, old, old+v) != old);
  1949. - return old;
  1950. -}
  1951. -
  1952. -static inline void a_inc(volatile int *x)
  1953. -{
  1954. - a_fetch_add(x, 1);
  1955. -}
  1956. -
  1957. -static inline void a_dec(volatile int *x)
  1958. -{
  1959. - a_fetch_add(x, -1);
  1960. -}
  1961. -
  1962. -static inline void a_store(volatile int *p, int x)
  1963. -{
  1964. - a_swap(p, x);
  1965. -}
  1966. -
  1967. -#define a_spin a_barrier
  1968. -
  1969. -static inline void a_barrier()
  1970. -{
  1971. - a_cas(&(int){0}, 0, 0);
  1972. -}
  1973. -
  1974. -static inline void a_crash()
  1975. -{
  1976. - *(volatile char *)0=0;
  1977. -}
  1978. -
  1979. -static inline void a_and(volatile int *p, int v)
  1980. -{
  1981. - int old;
  1982. - do old = *p;
  1983. - while (a_cas(p, old, old&v) != old);
  1984. -}
  1985. -
  1986. -static inline void a_or(volatile int *p, int v)
  1987. -{
  1988. - int old;
  1989. - do old = *p;
  1990. - while (a_cas(p, old, old|v) != old);
  1991. -}
  1992. -
  1993. -static inline void a_or_l(volatile void *p, long v)
  1994. -{
  1995. - a_or(p, v);
  1996. -}
  1997. -
  1998. -static inline void a_and_64(volatile uint64_t *p, uint64_t v)
  1999. -{
  2000. - union { uint64_t v; uint32_t r[2]; } u = { v };
  2001. - a_and((int *)p, u.r[0]);
  2002. - a_and((int *)p+1, u.r[1]);
  2003. -}
  2004. -
  2005. -static inline void a_or_64(volatile uint64_t *p, uint64_t v)
  2006. -{
  2007. - union { uint64_t v; uint32_t r[2]; } u = { v };
  2008. - a_or((int *)p, u.r[0]);
  2009. - a_or((int *)p+1, u.r[1]);
  2010. -}
  2011. -
  2012. -#endif
  2013. --- /dev/null
  2014. +++ b/arch/or1k/atomic_arch.h
  2015. @@ -0,0 +1,14 @@
  2016. +#define a_cas a_cas
  2017. +static inline int a_cas(volatile int *p, int t, int s)
  2018. +{
  2019. + __asm__("1: l.lwa %0, %1\n"
  2020. + " l.sfeq %0, %2\n"
  2021. + " l.bnf 1f\n"
  2022. + " l.nop\n"
  2023. + " l.swa %1, %3\n"
  2024. + " l.bnf 1b\n"
  2025. + " l.nop\n"
  2026. + "1: \n"
  2027. + : "=&r"(t), "+m"(*p) : "r"(t), "r"(s) : "cc", "memory" );
  2028. + return t;
  2029. +}
  2030. --- a/arch/or1k/pthread_arch.h
  2031. +++ b/arch/or1k/pthread_arch.h
  2032. @@ -14,5 +14,4 @@ static inline struct pthread *__pthread_
  2033. #define TLS_ABOVE_TP
  2034. #define TP_ADJ(p) ((char *)(p) + sizeof(struct pthread))
  2035. -/* word-offset to 'pc' in mcontext_t */
  2036. -#define CANCEL_REG_IP 32
  2037. +#define MC_PC regs.pc
  2038. --- a/arch/powerpc/atomic.h
  2039. +++ /dev/null
  2040. @@ -1,126 +0,0 @@
  2041. -#ifndef _INTERNAL_ATOMIC_H
  2042. -#define _INTERNAL_ATOMIC_H
  2043. -
  2044. -#include <stdint.h>
  2045. -#include <endian.h>
  2046. -
  2047. -static inline int a_ctz_l(unsigned long x)
  2048. -{
  2049. - static const char debruijn32[32] = {
  2050. - 0, 1, 23, 2, 29, 24, 19, 3, 30, 27, 25, 11, 20, 8, 4, 13,
  2051. - 31, 22, 28, 18, 26, 10, 7, 12, 21, 17, 9, 6, 16, 5, 15, 14
  2052. - };
  2053. - return debruijn32[(x&-x)*0x076be629 >> 27];
  2054. -}
  2055. -
  2056. -static inline int a_ctz_64(uint64_t x)
  2057. -{
  2058. - uint32_t y = x;
  2059. - if (!y) {
  2060. - y = x>>32;
  2061. - return 32 + a_ctz_l(y);
  2062. - }
  2063. - return a_ctz_l(y);
  2064. -}
  2065. -
  2066. -static inline int a_cas(volatile int *p, int t, int s)
  2067. -{
  2068. - __asm__("\n"
  2069. - " sync\n"
  2070. - "1: lwarx %0, 0, %4\n"
  2071. - " cmpw %0, %2\n"
  2072. - " bne 1f\n"
  2073. - " stwcx. %3, 0, %4\n"
  2074. - " bne- 1b\n"
  2075. - " isync\n"
  2076. - "1: \n"
  2077. - : "=&r"(t), "+m"(*p) : "r"(t), "r"(s), "r"(p) : "cc", "memory" );
  2078. - return t;
  2079. -}
  2080. -
  2081. -static inline void *a_cas_p(volatile void *p, void *t, void *s)
  2082. -{
  2083. - return (void *)a_cas(p, (int)t, (int)s);
  2084. -}
  2085. -
  2086. -static inline int a_swap(volatile int *x, int v)
  2087. -{
  2088. - int old;
  2089. - do old = *x;
  2090. - while (a_cas(x, old, v) != old);
  2091. - return old;
  2092. -}
  2093. -
  2094. -static inline int a_fetch_add(volatile int *x, int v)
  2095. -{
  2096. - int old;
  2097. - do old = *x;
  2098. - while (a_cas(x, old, old+v) != old);
  2099. - return old;
  2100. -}
  2101. -
  2102. -static inline void a_inc(volatile int *x)
  2103. -{
  2104. - a_fetch_add(x, 1);
  2105. -}
  2106. -
  2107. -static inline void a_dec(volatile int *x)
  2108. -{
  2109. - a_fetch_add(x, -1);
  2110. -}
  2111. -
  2112. -static inline void a_store(volatile int *p, int x)
  2113. -{
  2114. - __asm__ __volatile__ ("\n"
  2115. - " sync\n"
  2116. - " stw %1, %0\n"
  2117. - " isync\n"
  2118. - : "=m"(*p) : "r"(x) : "memory" );
  2119. -}
  2120. -
  2121. -#define a_spin a_barrier
  2122. -
  2123. -static inline void a_barrier()
  2124. -{
  2125. - a_cas(&(int){0}, 0, 0);
  2126. -}
  2127. -
  2128. -static inline void a_crash()
  2129. -{
  2130. - *(volatile char *)0=0;
  2131. -}
  2132. -
  2133. -static inline void a_and(volatile int *p, int v)
  2134. -{
  2135. - int old;
  2136. - do old = *p;
  2137. - while (a_cas(p, old, old&v) != old);
  2138. -}
  2139. -
  2140. -static inline void a_or(volatile int *p, int v)
  2141. -{
  2142. - int old;
  2143. - do old = *p;
  2144. - while (a_cas(p, old, old|v) != old);
  2145. -}
  2146. -
  2147. -static inline void a_or_l(volatile void *p, long v)
  2148. -{
  2149. - a_or(p, v);
  2150. -}
  2151. -
  2152. -static inline void a_and_64(volatile uint64_t *p, uint64_t v)
  2153. -{
  2154. - union { uint64_t v; uint32_t r[2]; } u = { v };
  2155. - a_and((int *)p, u.r[0]);
  2156. - a_and((int *)p+1, u.r[1]);
  2157. -}
  2158. -
  2159. -static inline void a_or_64(volatile uint64_t *p, uint64_t v)
  2160. -{
  2161. - union { uint64_t v; uint32_t r[2]; } u = { v };
  2162. - a_or((int *)p, u.r[0]);
  2163. - a_or((int *)p+1, u.r[1]);
  2164. -}
  2165. -
  2166. -#endif
  2167. --- /dev/null
  2168. +++ b/arch/powerpc/atomic_arch.h
  2169. @@ -0,0 +1,15 @@
  2170. +#define a_cas a_cas
  2171. +static inline int a_cas(volatile int *p, int t, int s)
  2172. +{
  2173. + __asm__("\n"
  2174. + " sync\n"
  2175. + "1: lwarx %0, 0, %4\n"
  2176. + " cmpw %0, %2\n"
  2177. + " bne 1f\n"
  2178. + " stwcx. %3, 0, %4\n"
  2179. + " bne- 1b\n"
  2180. + " isync\n"
  2181. + "1: \n"
  2182. + : "=&r"(t), "+m"(*p) : "r"(t), "r"(s), "r"(p) : "cc", "memory" );
  2183. + return t;
  2184. +}
  2185. --- a/arch/powerpc/pthread_arch.h
  2186. +++ b/arch/powerpc/pthread_arch.h
  2187. @@ -15,9 +15,8 @@ static inline struct pthread *__pthread_
  2188. #define DTP_OFFSET 0x8000
  2189. -// offset of the PC register in mcontext_t, divided by the system wordsize
  2190. // the kernel calls the ip "nip", it's the first saved value after the 32
  2191. // GPRs.
  2192. -#define CANCEL_REG_IP 32
  2193. +#define MC_PC gregs[32]
  2194. #define CANARY canary_at_end
  2195. --- a/arch/sh/atomic.h
  2196. +++ /dev/null
  2197. @@ -1,168 +0,0 @@
  2198. -#ifndef _INTERNAL_ATOMIC_H
  2199. -#define _INTERNAL_ATOMIC_H
  2200. -
  2201. -#include <stdint.h>
  2202. -
  2203. -static inline int a_ctz_l(unsigned long x)
  2204. -{
  2205. - static const char debruijn32[32] = {
  2206. - 0, 1, 23, 2, 29, 24, 19, 3, 30, 27, 25, 11, 20, 8, 4, 13,
  2207. - 31, 22, 28, 18, 26, 10, 7, 12, 21, 17, 9, 6, 16, 5, 15, 14
  2208. - };
  2209. - return debruijn32[(x&-x)*0x076be629 >> 27];
  2210. -}
  2211. -
  2212. -static inline int a_ctz_64(uint64_t x)
  2213. -{
  2214. - uint32_t y = x;
  2215. - if (!y) {
  2216. - y = x>>32;
  2217. - return 32 + a_ctz_l(y);
  2218. - }
  2219. - return a_ctz_l(y);
  2220. -}
  2221. -
  2222. -#define LLSC_CLOBBERS "r0", "t", "memory"
  2223. -#define LLSC_START(mem) "synco\n" \
  2224. - "0: movli.l @" mem ", r0\n"
  2225. -#define LLSC_END(mem) \
  2226. - "1: movco.l r0, @" mem "\n" \
  2227. - " bf 0b\n" \
  2228. - " synco\n"
  2229. -
  2230. -static inline int __sh_cas_llsc(volatile int *p, int t, int s)
  2231. -{
  2232. - int old;
  2233. - __asm__ __volatile__(
  2234. - LLSC_START("%1")
  2235. - " mov r0, %0\n"
  2236. - " cmp/eq %0, %2\n"
  2237. - " bf 1f\n"
  2238. - " mov %3, r0\n"
  2239. - LLSC_END("%1")
  2240. - : "=&r"(old) : "r"(p), "r"(t), "r"(s) : LLSC_CLOBBERS);
  2241. - return old;
  2242. -}
  2243. -
  2244. -static inline int __sh_swap_llsc(volatile int *x, int v)
  2245. -{
  2246. - int old;
  2247. - __asm__ __volatile__(
  2248. - LLSC_START("%1")
  2249. - " mov r0, %0\n"
  2250. - " mov %2, r0\n"
  2251. - LLSC_END("%1")
  2252. - : "=&r"(old) : "r"(x), "r"(v) : LLSC_CLOBBERS);
  2253. - return old;
  2254. -}
  2255. -
  2256. -static inline int __sh_fetch_add_llsc(volatile int *x, int v)
  2257. -{
  2258. - int old;
  2259. - __asm__ __volatile__(
  2260. - LLSC_START("%1")
  2261. - " mov r0, %0\n"
  2262. - " add %2, r0\n"
  2263. - LLSC_END("%1")
  2264. - : "=&r"(old) : "r"(x), "r"(v) : LLSC_CLOBBERS);
  2265. - return old;
  2266. -}
  2267. -
  2268. -static inline void __sh_store_llsc(volatile int *p, int x)
  2269. -{
  2270. - __asm__ __volatile__(
  2271. - " synco\n"
  2272. - " mov.l %1, @%0\n"
  2273. - " synco\n"
  2274. - : : "r"(p), "r"(x) : "memory");
  2275. -}
  2276. -
  2277. -static inline void __sh_and_llsc(volatile int *x, int v)
  2278. -{
  2279. - __asm__ __volatile__(
  2280. - LLSC_START("%0")
  2281. - " and %1, r0\n"
  2282. - LLSC_END("%0")
  2283. - : : "r"(x), "r"(v) : LLSC_CLOBBERS);
  2284. -}
  2285. -
  2286. -static inline void __sh_or_llsc(volatile int *x, int v)
  2287. -{
  2288. - __asm__ __volatile__(
  2289. - LLSC_START("%0")
  2290. - " or %1, r0\n"
  2291. - LLSC_END("%0")
  2292. - : : "r"(x), "r"(v) : LLSC_CLOBBERS);
  2293. -}
  2294. -
  2295. -#ifdef __SH4A__
  2296. -#define a_cas(p,t,s) __sh_cas_llsc(p,t,s)
  2297. -#define a_swap(x,v) __sh_swap_llsc(x,v)
  2298. -#define a_fetch_add(x,v) __sh_fetch_add_llsc(x, v)
  2299. -#define a_store(x,v) __sh_store_llsc(x, v)
  2300. -#define a_and(x,v) __sh_and_llsc(x, v)
  2301. -#define a_or(x,v) __sh_or_llsc(x, v)
  2302. -#else
  2303. -
  2304. -int __sh_cas(volatile int *, int, int);
  2305. -int __sh_swap(volatile int *, int);
  2306. -int __sh_fetch_add(volatile int *, int);
  2307. -void __sh_store(volatile int *, int);
  2308. -void __sh_and(volatile int *, int);
  2309. -void __sh_or(volatile int *, int);
  2310. -
  2311. -#define a_cas(p,t,s) __sh_cas(p,t,s)
  2312. -#define a_swap(x,v) __sh_swap(x,v)
  2313. -#define a_fetch_add(x,v) __sh_fetch_add(x, v)
  2314. -#define a_store(x,v) __sh_store(x, v)
  2315. -#define a_and(x,v) __sh_and(x, v)
  2316. -#define a_or(x,v) __sh_or(x, v)
  2317. -#endif
  2318. -
  2319. -static inline void *a_cas_p(volatile void *p, void *t, void *s)
  2320. -{
  2321. - return (void *)a_cas(p, (int)t, (int)s);
  2322. -}
  2323. -
  2324. -static inline void a_inc(volatile int *x)
  2325. -{
  2326. - a_fetch_add(x, 1);
  2327. -}
  2328. -
  2329. -static inline void a_dec(volatile int *x)
  2330. -{
  2331. - a_fetch_add(x, -1);
  2332. -}
  2333. -
  2334. -#define a_spin a_barrier
  2335. -
  2336. -static inline void a_barrier()
  2337. -{
  2338. - a_cas(&(int){0}, 0, 0);
  2339. -}
  2340. -
  2341. -static inline void a_crash()
  2342. -{
  2343. - *(volatile char *)0=0;
  2344. -}
  2345. -
  2346. -static inline void a_or_l(volatile void *p, long v)
  2347. -{
  2348. - a_or(p, v);
  2349. -}
  2350. -
  2351. -static inline void a_and_64(volatile uint64_t *p, uint64_t v)
  2352. -{
  2353. - union { uint64_t v; uint32_t r[2]; } u = { v };
  2354. - a_and((int *)p, u.r[0]);
  2355. - a_and((int *)p+1, u.r[1]);
  2356. -}
  2357. -
  2358. -static inline void a_or_64(volatile uint64_t *p, uint64_t v)
  2359. -{
  2360. - union { uint64_t v; uint32_t r[2]; } u = { v };
  2361. - a_or((int *)p, u.r[0]);
  2362. - a_or((int *)p+1, u.r[1]);
  2363. -}
  2364. -
  2365. -#endif
  2366. --- /dev/null
  2367. +++ b/arch/sh/atomic_arch.h
  2368. @@ -0,0 +1,46 @@
  2369. +#if defined(__SH4A__)
  2370. +
  2371. +#define a_ll a_ll
  2372. +static inline int a_ll(volatile int *p)
  2373. +{
  2374. + int v;
  2375. + __asm__ __volatile__ ("movli.l @%1, %0" : "=z"(v) : "r"(p), "m"(*p));
  2376. + return v;
  2377. +}
  2378. +
  2379. +#define a_sc a_sc
  2380. +static inline int a_sc(volatile int *p, int v)
  2381. +{
  2382. + int r;
  2383. + __asm__ __volatile__ (
  2384. + "movco.l %2, @%3 ; movt %0"
  2385. + : "=r"(r), "=m"(*p) : "z"(v), "r"(p) : "memory", "cc");
  2386. + return r;
  2387. +}
  2388. +
  2389. +#define a_barrier a_barrier
  2390. +static inline void a_barrier()
  2391. +{
  2392. + __asm__ __volatile__ ("synco" : : "memory");
  2393. +}
  2394. +
  2395. +#define a_pre_llsc a_barrier
  2396. +#define a_post_llsc a_barrier
  2397. +
  2398. +#else
  2399. +
  2400. +#define a_cas a_cas
  2401. +__attribute__((__visibility__("hidden"))) extern const void *__sh_cas_ptr;
  2402. +static inline int a_cas(volatile int *p, int t, int s)
  2403. +{
  2404. + register int r1 __asm__("r1");
  2405. + register int r2 __asm__("r2") = t;
  2406. + register int r3 __asm__("r3") = s;
  2407. + __asm__ __volatile__ (
  2408. + "jsr @%4 ; nop"
  2409. + : "=r"(r1), "+r"(r3) : "z"(p), "r"(r2), "r"(__sh_cas_ptr)
  2410. + : "memory", "pr", "cc");
  2411. + return r3;
  2412. +}
  2413. +
  2414. +#endif
  2415. --- a/arch/sh/crt_arch.h
  2416. +++ b/arch/sh/crt_arch.h
  2417. @@ -22,7 +22,8 @@ START ": \n"
  2418. " mov.l 1f, r5 \n"
  2419. " mov.l 1f+4, r6 \n"
  2420. " add r0, r5 \n"
  2421. -" bsr __fdpic_fixup \n"
  2422. +" mov.l 4f, r1 \n"
  2423. +"5: bsrf r1 \n"
  2424. " add r0, r6 \n"
  2425. " mov r0, r12 \n"
  2426. #endif
  2427. @@ -31,11 +32,16 @@ START ": \n"
  2428. " mov.l r9, @-r15 \n"
  2429. " mov.l r8, @-r15 \n"
  2430. " mov #-16, r0 \n"
  2431. -" bsr " START "_c \n"
  2432. +" mov.l 2f, r1 \n"
  2433. +"3: bsrf r1 \n"
  2434. " and r0, r15 \n"
  2435. ".align 2 \n"
  2436. "1: .long __ROFIXUP_LIST__@PCREL \n"
  2437. " .long __ROFIXUP_END__@PCREL + 4 \n"
  2438. +"2: .long " START "_c@PCREL - (3b+4-.) \n"
  2439. +#ifndef SHARED
  2440. +"4: .long __fdpic_fixup@PCREL - (5b+4-.) \n"
  2441. +#endif
  2442. );
  2443. #ifndef SHARED
  2444. @@ -53,13 +59,14 @@ START ": \n"
  2445. " add r0, r5 \n"
  2446. " mov r15, r4 \n"
  2447. " mov #-16, r0 \n"
  2448. -" and r0, r15 \n"
  2449. -" bsr " START "_c \n"
  2450. -" nop \n"
  2451. +" mov.l 2f, r1 \n"
  2452. +"3: bsrf r1 \n"
  2453. +" and r0, r15 \n"
  2454. ".align 2 \n"
  2455. ".weak _DYNAMIC \n"
  2456. ".hidden _DYNAMIC \n"
  2457. "1: .long _DYNAMIC-. \n"
  2458. +"2: .long " START "_c@PCREL - (3b+4-.) \n"
  2459. );
  2460. #endif
  2461. --- a/arch/sh/pthread_arch.h
  2462. +++ b/arch/sh/pthread_arch.h
  2463. @@ -8,4 +8,4 @@ static inline struct pthread *__pthread_
  2464. #define TLS_ABOVE_TP
  2465. #define TP_ADJ(p) ((char *)(p) + sizeof(struct pthread) - 8)
  2466. -#define CANCEL_REG_IP 17
  2467. +#define MC_PC sc_pc
  2468. --- a/arch/sh/reloc.h
  2469. +++ b/arch/sh/reloc.h
  2470. @@ -32,6 +32,8 @@
  2471. #define REL_DTPOFF R_SH_TLS_DTPOFF32
  2472. #define REL_TPOFF R_SH_TLS_TPOFF32
  2473. +#define DL_NOMMU_SUPPORT 1
  2474. +
  2475. #if __SH_FDPIC__
  2476. #define REL_FUNCDESC R_SH_FUNCDESC
  2477. #define REL_FUNCDESC_VAL R_SH_FUNCDESC_VALUE
  2478. --- a/arch/sh/src/__set_thread_area.c
  2479. +++ /dev/null
  2480. @@ -1,34 +0,0 @@
  2481. -#include "pthread_impl.h"
  2482. -#include "libc.h"
  2483. -#include "sh_atomic.h"
  2484. -#include <elf.h>
  2485. -
  2486. -/* Also perform sh-specific init */
  2487. -
  2488. -#define CPU_HAS_LLSC 0x0040
  2489. -
  2490. -__attribute__((__visibility__("hidden"))) unsigned __sh_atomic_model, __sh_nommu;
  2491. -
  2492. -int __set_thread_area(void *p)
  2493. -{
  2494. - size_t *aux;
  2495. - __asm__ __volatile__ ( "ldc %0, gbr" : : "r"(p) : "memory" );
  2496. -#ifndef __SH4A__
  2497. - if (__hwcap & CPU_HAS_LLSC) {
  2498. - __sh_atomic_model = SH_A_LLSC;
  2499. - return 0;
  2500. - }
  2501. -#if !defined(__SH3__) && !defined(__SH4__)
  2502. - for (aux=libc.auxv; *aux; aux+=2) {
  2503. - if (*aux != AT_PLATFORM) continue;
  2504. - const char *s = (void *)aux[1];
  2505. - if (s[0]!='s' || s[1]!='h' || s[2]!='2' || s[3]-'0'<10u) break;
  2506. - __sh_atomic_model = SH_A_IMASK;
  2507. - __sh_nommu = 1;
  2508. - return 0;
  2509. - }
  2510. -#endif
  2511. - /* __sh_atomic_model = SH_A_GUSA; */ /* 0, default */
  2512. -#endif
  2513. - return 0;
  2514. -}
  2515. --- a/arch/sh/src/atomic.c
  2516. +++ /dev/null
  2517. @@ -1,158 +0,0 @@
  2518. -#ifndef __SH4A__
  2519. -
  2520. -#include "sh_atomic.h"
  2521. -#include "atomic.h"
  2522. -#include "libc.h"
  2523. -
  2524. -static inline unsigned mask()
  2525. -{
  2526. - unsigned sr;
  2527. - __asm__ __volatile__ ( "\n"
  2528. - " stc sr,r0 \n"
  2529. - " mov r0,%0 \n"
  2530. - " or #0xf0,r0 \n"
  2531. - " ldc r0,sr \n"
  2532. - : "=&r"(sr) : : "memory", "r0" );
  2533. - return sr;
  2534. -}
  2535. -
  2536. -static inline void unmask(unsigned sr)
  2537. -{
  2538. - __asm__ __volatile__ ( "ldc %0,sr" : : "r"(sr) : "memory" );
  2539. -}
  2540. -
  2541. -/* gusa is a hack in the kernel which lets you create a sequence of instructions
  2542. - * which will be restarted if the process is preempted in the middle of the
  2543. - * sequence. It will do for implementing atomics on non-smp systems. ABI is:
  2544. - * r0 = address of first instruction after the atomic sequence
  2545. - * r1 = original stack pointer
  2546. - * r15 = -1 * length of atomic sequence in bytes
  2547. - */
  2548. -#define GUSA_CLOBBERS "r0", "r1", "memory"
  2549. -#define GUSA_START(mem,old,nop) \
  2550. - " .align 2\n" \
  2551. - " mova 1f, r0\n" \
  2552. - nop \
  2553. - " mov r15, r1\n" \
  2554. - " mov #(0f-1f), r15\n" \
  2555. - "0: mov.l @" mem ", " old "\n"
  2556. -/* the target of mova must be 4 byte aligned, so we may need a nop */
  2557. -#define GUSA_START_ODD(mem,old) GUSA_START(mem,old,"")
  2558. -#define GUSA_START_EVEN(mem,old) GUSA_START(mem,old,"\tnop\n")
  2559. -#define GUSA_END(mem,new) \
  2560. - " mov.l " new ", @" mem "\n" \
  2561. - "1: mov r1, r15\n"
  2562. -
  2563. -int __sh_cas(volatile int *p, int t, int s)
  2564. -{
  2565. - if (__sh_atomic_model == SH_A_LLSC) return __sh_cas_llsc(p, t, s);
  2566. -
  2567. - if (__sh_atomic_model == SH_A_IMASK) {
  2568. - unsigned sr = mask();
  2569. - int old = *p;
  2570. - if (old==t) *p = s;
  2571. - unmask(sr);
  2572. - return old;
  2573. - }
  2574. -
  2575. - int old;
  2576. - __asm__ __volatile__(
  2577. - GUSA_START_EVEN("%1", "%0")
  2578. - " cmp/eq %0, %2\n"
  2579. - " bf 1f\n"
  2580. - GUSA_END("%1", "%3")
  2581. - : "=&r"(old) : "r"(p), "r"(t), "r"(s) : GUSA_CLOBBERS, "t");
  2582. - return old;
  2583. -}
  2584. -
  2585. -int __sh_swap(volatile int *x, int v)
  2586. -{
  2587. - if (__sh_atomic_model == SH_A_LLSC) return __sh_swap_llsc(x, v);
  2588. -
  2589. - if (__sh_atomic_model == SH_A_IMASK) {
  2590. - unsigned sr = mask();
  2591. - int old = *x;
  2592. - *x = v;
  2593. - unmask(sr);
  2594. - return old;
  2595. - }
  2596. -
  2597. - int old;
  2598. - __asm__ __volatile__(
  2599. - GUSA_START_EVEN("%1", "%0")
  2600. - GUSA_END("%1", "%2")
  2601. - : "=&r"(old) : "r"(x), "r"(v) : GUSA_CLOBBERS);
  2602. - return old;
  2603. -}
  2604. -
  2605. -int __sh_fetch_add(volatile int *x, int v)
  2606. -{
  2607. - if (__sh_atomic_model == SH_A_LLSC) return __sh_fetch_add_llsc(x, v);
  2608. -
  2609. - if (__sh_atomic_model == SH_A_IMASK) {
  2610. - unsigned sr = mask();
  2611. - int old = *x;
  2612. - *x = old + v;
  2613. - unmask(sr);
  2614. - return old;
  2615. - }
  2616. -
  2617. - int old, dummy;
  2618. - __asm__ __volatile__(
  2619. - GUSA_START_EVEN("%2", "%0")
  2620. - " mov %0, %1\n"
  2621. - " add %3, %1\n"
  2622. - GUSA_END("%2", "%1")
  2623. - : "=&r"(old), "=&r"(dummy) : "r"(x), "r"(v) : GUSA_CLOBBERS);
  2624. - return old;
  2625. -}
  2626. -
  2627. -void __sh_store(volatile int *p, int x)
  2628. -{
  2629. - if (__sh_atomic_model == SH_A_LLSC) return __sh_store_llsc(p, x);
  2630. - __asm__ __volatile__(
  2631. - " mov.l %1, @%0\n"
  2632. - : : "r"(p), "r"(x) : "memory");
  2633. -}
  2634. -
  2635. -void __sh_and(volatile int *x, int v)
  2636. -{
  2637. - if (__sh_atomic_model == SH_A_LLSC) return __sh_and_llsc(x, v);
  2638. -
  2639. - if (__sh_atomic_model == SH_A_IMASK) {
  2640. - unsigned sr = mask();
  2641. - int old = *x;
  2642. - *x = old & v;
  2643. - unmask(sr);
  2644. - return;
  2645. - }
  2646. -
  2647. - int dummy;
  2648. - __asm__ __volatile__(
  2649. - GUSA_START_ODD("%1", "%0")
  2650. - " and %2, %0\n"
  2651. - GUSA_END("%1", "%0")
  2652. - : "=&r"(dummy) : "r"(x), "r"(v) : GUSA_CLOBBERS);
  2653. -}
  2654. -
  2655. -void __sh_or(volatile int *x, int v)
  2656. -{
  2657. - if (__sh_atomic_model == SH_A_LLSC) return __sh_or_llsc(x, v);
  2658. -
  2659. - if (__sh_atomic_model == SH_A_IMASK) {
  2660. - unsigned sr = mask();
  2661. - int old = *x;
  2662. - *x = old | v;
  2663. - unmask(sr);
  2664. - return;
  2665. - }
  2666. -
  2667. - int dummy;
  2668. - __asm__ __volatile__(
  2669. - GUSA_START_ODD("%1", "%0")
  2670. - " or %2, %0\n"
  2671. - GUSA_END("%1", "%0")
  2672. - : "=&r"(dummy) : "r"(x), "r"(v) : GUSA_CLOBBERS);
  2673. -}
  2674. -
  2675. -#endif
  2676. --- a/arch/sh/src/sh_atomic.h
  2677. +++ /dev/null
  2678. @@ -1,15 +0,0 @@
  2679. -#ifndef _SH_ATOMIC_H
  2680. -#define _SH_ATOMIC_H
  2681. -
  2682. -#define SH_A_GUSA 0
  2683. -#define SH_A_LLSC 1
  2684. -#define SH_A_CAS 2
  2685. -#if !defined(__SH3__) && !defined(__SH4__)
  2686. -#define SH_A_IMASK 3
  2687. -#else
  2688. -#define SH_A_IMASK -1LL /* unmatchable by unsigned int */
  2689. -#endif
  2690. -
  2691. -extern __attribute__((__visibility__("hidden"))) unsigned __sh_atomic_model;
  2692. -
  2693. -#endif
  2694. --- a/arch/x32/atomic.h
  2695. +++ /dev/null
  2696. @@ -1,105 +0,0 @@
  2697. -#ifndef _INTERNAL_ATOMIC_H
  2698. -#define _INTERNAL_ATOMIC_H
  2699. -
  2700. -#include <stdint.h>
  2701. -
  2702. -static inline int a_ctz_64(uint64_t x)
  2703. -{
  2704. - __asm__( "bsf %1,%0" : "=r"(x) : "r"(x) );
  2705. - return x;
  2706. -}
  2707. -
  2708. -static inline int a_ctz_l(unsigned long x)
  2709. -{
  2710. - __asm__( "bsf %1,%0" : "=r"(x) : "r"(x) );
  2711. - return x;
  2712. -}
  2713. -
  2714. -static inline void a_and_64(volatile uint64_t *p, uint64_t v)
  2715. -{
  2716. - __asm__( "lock ; and %1, %0"
  2717. - : "=m"(*p) : "r"(v) : "memory" );
  2718. -}
  2719. -
  2720. -static inline void a_or_64(volatile uint64_t *p, uint64_t v)
  2721. -{
  2722. - __asm__( "lock ; or %1, %0"
  2723. - : "=m"(*p) : "r"(v) : "memory" );
  2724. -}
  2725. -
  2726. -static inline void a_or_l(volatile void *p, long v)
  2727. -{
  2728. - __asm__( "lock ; or %1, %0"
  2729. - : "=m"(*(long *)p) : "r"(v) : "memory" );
  2730. -}
  2731. -
  2732. -static inline void *a_cas_p(volatile void *p, void *t, void *s)
  2733. -{
  2734. - __asm__( "lock ; cmpxchg %3, %1"
  2735. - : "=a"(t), "=m"(*(long *)p) : "a"(t), "r"(s) : "memory" );
  2736. - return t;
  2737. -}
  2738. -
  2739. -static inline int a_cas(volatile int *p, int t, int s)
  2740. -{
  2741. - __asm__( "lock ; cmpxchg %3, %1"
  2742. - : "=a"(t), "=m"(*p) : "a"(t), "r"(s) : "memory" );
  2743. - return t;
  2744. -}
  2745. -
  2746. -static inline void a_or(volatile int *p, int v)
  2747. -{
  2748. - __asm__( "lock ; or %1, %0"
  2749. - : "=m"(*p) : "r"(v) : "memory" );
  2750. -}
  2751. -
  2752. -static inline void a_and(volatile int *p, int v)
  2753. -{
  2754. - __asm__( "lock ; and %1, %0"
  2755. - : "=m"(*p) : "r"(v) : "memory" );
  2756. -}
  2757. -
  2758. -static inline int a_swap(volatile int *x, int v)
  2759. -{
  2760. - __asm__( "xchg %0, %1" : "=r"(v), "=m"(*x) : "0"(v) : "memory" );
  2761. - return v;
  2762. -}
  2763. -
  2764. -static inline int a_fetch_add(volatile int *x, int v)
  2765. -{
  2766. - __asm__( "lock ; xadd %0, %1" : "=r"(v), "=m"(*x) : "0"(v) : "memory" );
  2767. - return v;
  2768. -}
  2769. -
  2770. -static inline void a_inc(volatile int *x)
  2771. -{
  2772. - __asm__( "lock ; incl %0" : "=m"(*x) : "m"(*x) : "memory" );
  2773. -}
  2774. -
  2775. -static inline void a_dec(volatile int *x)
  2776. -{
  2777. - __asm__( "lock ; decl %0" : "=m"(*x) : "m"(*x) : "memory" );
  2778. -}
  2779. -
  2780. -static inline void a_store(volatile int *p, int x)
  2781. -{
  2782. - __asm__( "mov %1, %0 ; lock ; orl $0,(%%rsp)" : "=m"(*p) : "r"(x) : "memory" );
  2783. -}
  2784. -
  2785. -static inline void a_spin()
  2786. -{
  2787. - __asm__ __volatile__( "pause" : : : "memory" );
  2788. -}
  2789. -
  2790. -static inline void a_barrier()
  2791. -{
  2792. - __asm__ __volatile__( "" : : : "memory" );
  2793. -}
  2794. -
  2795. -static inline void a_crash()
  2796. -{
  2797. - __asm__ __volatile__( "hlt" : : : "memory" );
  2798. -}
  2799. -
  2800. -
  2801. -#endif
  2802. --- /dev/null
  2803. +++ b/arch/x32/atomic_arch.h
  2804. @@ -0,0 +1,106 @@
  2805. +#define a_ctz_64 a_ctz_64
  2806. +static inline int a_ctz_64(uint64_t x)
  2807. +{
  2808. + __asm__( "bsf %1,%0" : "=r"(x) : "r"(x) );
  2809. + return x;
  2810. +}
  2811. +
  2812. +#define a_ctz_l a_ctz_l
  2813. +static inline int a_ctz_l(unsigned long x)
  2814. +{
  2815. + __asm__( "bsf %1,%0" : "=r"(x) : "r"(x) );
  2816. + return x;
  2817. +}
  2818. +
  2819. +#define a_and_64 a_and_64
  2820. +static inline void a_and_64(volatile uint64_t *p, uint64_t v)
  2821. +{
  2822. + __asm__( "lock ; and %1, %0"
  2823. + : "=m"(*p) : "r"(v) : "memory" );
  2824. +}
  2825. +
  2826. +#define a_or_64 a_or_64
  2827. +static inline void a_or_64(volatile uint64_t *p, uint64_t v)
  2828. +{
  2829. + __asm__( "lock ; or %1, %0"
  2830. + : "=m"(*p) : "r"(v) : "memory" );
  2831. +}
  2832. +
  2833. +#define a_or_l a_or_l
  2834. +static inline void a_or_l(volatile void *p, long v)
  2835. +{
  2836. + __asm__( "lock ; or %1, %0"
  2837. + : "=m"(*(long *)p) : "r"(v) : "memory" );
  2838. +}
  2839. +
  2840. +#define a_cas a_cas
  2841. +static inline int a_cas(volatile int *p, int t, int s)
  2842. +{
  2843. + __asm__( "lock ; cmpxchg %3, %1"
  2844. + : "=a"(t), "=m"(*p) : "a"(t), "r"(s) : "memory" );
  2845. + return t;
  2846. +}
  2847. +
  2848. +#define a_or a_or
  2849. +static inline void a_or(volatile int *p, int v)
  2850. +{
  2851. + __asm__( "lock ; or %1, %0"
  2852. + : "=m"(*p) : "r"(v) : "memory" );
  2853. +}
  2854. +
  2855. +#define a_and a_and
  2856. +static inline void a_and(volatile int *p, int v)
  2857. +{
  2858. + __asm__( "lock ; and %1, %0"
  2859. + : "=m"(*p) : "r"(v) : "memory" );
  2860. +}
  2861. +
  2862. +#define a_swap a_swap
  2863. +static inline int a_swap(volatile int *x, int v)
  2864. +{
  2865. + __asm__( "xchg %0, %1" : "=r"(v), "=m"(*x) : "0"(v) : "memory" );
  2866. + return v;
  2867. +}
  2868. +
  2869. +#define a_fetch_add a_fetch_add
  2870. +static inline int a_fetch_add(volatile int *x, int v)
  2871. +{
  2872. + __asm__( "lock ; xadd %0, %1" : "=r"(v), "=m"(*x) : "0"(v) : "memory" );
  2873. + return v;
  2874. +}
  2875. +
  2876. +#define a_inc a_inc
  2877. +static inline void a_inc(volatile int *x)
  2878. +{
  2879. + __asm__( "lock ; incl %0" : "=m"(*x) : "m"(*x) : "memory" );
  2880. +}
  2881. +
  2882. +#define a_dec a_dec
  2883. +static inline void a_dec(volatile int *x)
  2884. +{
  2885. + __asm__( "lock ; decl %0" : "=m"(*x) : "m"(*x) : "memory" );
  2886. +}
  2887. +
  2888. +#define a_store a_store
  2889. +static inline void a_store(volatile int *p, int x)
  2890. +{
  2891. + __asm__( "mov %1, %0 ; lock ; orl $0,(%%rsp)" : "=m"(*p) : "r"(x) : "memory" );
  2892. +}
  2893. +
  2894. +#define a_spin a_spin
  2895. +static inline void a_spin()
  2896. +{
  2897. + __asm__ __volatile__( "pause" : : : "memory" );
  2898. +}
  2899. +
  2900. +#define a_barrier a_barrier
  2901. +static inline void a_barrier()
  2902. +{
  2903. + __asm__ __volatile__( "" : : : "memory" );
  2904. +}
  2905. +
  2906. +#define a_crash a_crash
  2907. +static inline void a_crash()
  2908. +{
  2909. + __asm__ __volatile__( "hlt" : : : "memory" );
  2910. +}
  2911. --- a/arch/x32/pthread_arch.h
  2912. +++ b/arch/x32/pthread_arch.h
  2913. @@ -7,6 +7,6 @@ static inline struct pthread *__pthread_
  2914. #define TP_ADJ(p) (p)
  2915. -#define CANCEL_REG_IP 32
  2916. +#define MC_PC gregs[REG_RIP]
  2917. #define CANARY canary2
  2918. --- a/arch/x32/src/syscall_cp_fixup.c
  2919. +++ b/arch/x32/src/syscall_cp_fixup.c
  2920. @@ -1,8 +1,6 @@
  2921. #include <sys/syscall.h>
  2922. -#ifdef SHARED
  2923. __attribute__((__visibility__("hidden")))
  2924. -#endif
  2925. long __syscall_cp_internal(volatile void*, long long, long long, long long, long long,
  2926. long long, long long, long long);
  2927. @@ -14,9 +12,7 @@ struct __timespec_kernel { long long tv_
  2928. ts->tv_nsec = __tsc(X)->tv_nsec; \
  2929. (X) = (unsigned long)ts; } } while(0)
  2930. -#ifdef SHARED
  2931. __attribute__((__visibility__("hidden")))
  2932. -#endif
  2933. long __syscall_cp_asm (volatile void * foo, long long n, long long a1, long long a2, long long a3,
  2934. long long a4, long long a5, long long a6)
  2935. {
  2936. --- a/arch/x86_64/atomic.h
  2937. +++ /dev/null
  2938. @@ -1,105 +0,0 @@
  2939. -#ifndef _INTERNAL_ATOMIC_H
  2940. -#define _INTERNAL_ATOMIC_H
  2941. -
  2942. -#include <stdint.h>
  2943. -
  2944. -static inline int a_ctz_64(uint64_t x)
  2945. -{
  2946. - __asm__( "bsf %1,%0" : "=r"(x) : "r"(x) );
  2947. - return x;
  2948. -}
  2949. -
  2950. -static inline int a_ctz_l(unsigned long x)
  2951. -{
  2952. - __asm__( "bsf %1,%0" : "=r"(x) : "r"(x) );
  2953. - return x;
  2954. -}
  2955. -
  2956. -static inline void a_and_64(volatile uint64_t *p, uint64_t v)
  2957. -{
  2958. - __asm__( "lock ; and %1, %0"
  2959. - : "=m"(*p) : "r"(v) : "memory" );
  2960. -}
  2961. -
  2962. -static inline void a_or_64(volatile uint64_t *p, uint64_t v)
  2963. -{
  2964. - __asm__( "lock ; or %1, %0"
  2965. - : "=m"(*p) : "r"(v) : "memory" );
  2966. -}
  2967. -
  2968. -static inline void a_or_l(volatile void *p, long v)
  2969. -{
  2970. - __asm__( "lock ; or %1, %0"
  2971. - : "=m"(*(long *)p) : "r"(v) : "memory" );
  2972. -}
  2973. -
  2974. -static inline void *a_cas_p(volatile void *p, void *t, void *s)
  2975. -{
  2976. - __asm__( "lock ; cmpxchg %3, %1"
  2977. - : "=a"(t), "=m"(*(long *)p) : "a"(t), "r"(s) : "memory" );
  2978. - return t;
  2979. -}
  2980. -
  2981. -static inline int a_cas(volatile int *p, int t, int s)
  2982. -{
  2983. - __asm__( "lock ; cmpxchg %3, %1"
  2984. - : "=a"(t), "=m"(*p) : "a"(t), "r"(s) : "memory" );
  2985. - return t;
  2986. -}
  2987. -
  2988. -static inline void a_or(volatile int *p, int v)
  2989. -{
  2990. - __asm__( "lock ; or %1, %0"
  2991. - : "=m"(*p) : "r"(v) : "memory" );
  2992. -}
  2993. -
  2994. -static inline void a_and(volatile int *p, int v)
  2995. -{
  2996. - __asm__( "lock ; and %1, %0"
  2997. - : "=m"(*p) : "r"(v) : "memory" );
  2998. -}
  2999. -
  3000. -static inline int a_swap(volatile int *x, int v)
  3001. -{
  3002. - __asm__( "xchg %0, %1" : "=r"(v), "=m"(*x) : "0"(v) : "memory" );
  3003. - return v;
  3004. -}
  3005. -
  3006. -static inline int a_fetch_add(volatile int *x, int v)
  3007. -{
  3008. - __asm__( "lock ; xadd %0, %1" : "=r"(v), "=m"(*x) : "0"(v) : "memory" );
  3009. - return v;
  3010. -}
  3011. -
  3012. -static inline void a_inc(volatile int *x)
  3013. -{
  3014. - __asm__( "lock ; incl %0" : "=m"(*x) : "m"(*x) : "memory" );
  3015. -}
  3016. -
  3017. -static inline void a_dec(volatile int *x)
  3018. -{
  3019. - __asm__( "lock ; decl %0" : "=m"(*x) : "m"(*x) : "memory" );
  3020. -}
  3021. -
  3022. -static inline void a_store(volatile int *p, int x)
  3023. -{
  3024. - __asm__( "mov %1, %0 ; lock ; orl $0,(%%rsp)" : "=m"(*p) : "r"(x) : "memory" );
  3025. -}
  3026. -
  3027. -static inline void a_spin()
  3028. -{
  3029. - __asm__ __volatile__( "pause" : : : "memory" );
  3030. -}
  3031. -
  3032. -static inline void a_barrier()
  3033. -{
  3034. - __asm__ __volatile__( "" : : : "memory" );
  3035. -}
  3036. -
  3037. -static inline void a_crash()
  3038. -{
  3039. - __asm__ __volatile__( "hlt" : : : "memory" );
  3040. -}
  3041. -
  3042. -
  3043. -#endif
  3044. --- /dev/null
  3045. +++ b/arch/x86_64/atomic_arch.h
  3046. @@ -0,0 +1,107 @@
  3047. +#define a_ctz_64 a_ctz_64
  3048. +static inline int a_ctz_64(uint64_t x)
  3049. +{
  3050. + __asm__( "bsf %1,%0" : "=r"(x) : "r"(x) );
  3051. + return x;
  3052. +}
  3053. +
  3054. +#define a_and_64 a_and_64
  3055. +static inline void a_and_64(volatile uint64_t *p, uint64_t v)
  3056. +{
  3057. + __asm__( "lock ; and %1, %0"
  3058. + : "=m"(*p) : "r"(v) : "memory" );
  3059. +}
  3060. +
  3061. +#define a_or_64 a_or_64
  3062. +static inline void a_or_64(volatile uint64_t *p, uint64_t v)
  3063. +{
  3064. + __asm__( "lock ; or %1, %0"
  3065. + : "=m"(*p) : "r"(v) : "memory" );
  3066. +}
  3067. +
  3068. +#define a_or_l a_or_l
  3069. +static inline void a_or_l(volatile void *p, long v)
  3070. +{
  3071. + __asm__( "lock ; or %1, %0"
  3072. + : "=m"(*(long *)p) : "r"(v) : "memory" );
  3073. +}
  3074. +
  3075. +#define a_cas_p a_cas_p
  3076. +static inline void *a_cas_p(volatile void *p, void *t, void *s)
  3077. +{
  3078. + __asm__( "lock ; cmpxchg %3, %1"
  3079. + : "=a"(t), "=m"(*(long *)p) : "a"(t), "r"(s) : "memory" );
  3080. + return t;
  3081. +}
  3082. +
  3083. +#define a_cas a_cas
  3084. +static inline int a_cas(volatile int *p, int t, int s)
  3085. +{
  3086. + __asm__( "lock ; cmpxchg %3, %1"
  3087. + : "=a"(t), "=m"(*p) : "a"(t), "r"(s) : "memory" );
  3088. + return t;
  3089. +}
  3090. +
  3091. +#define a_or a_or
  3092. +static inline void a_or(volatile int *p, int v)
  3093. +{
  3094. + __asm__( "lock ; or %1, %0"
  3095. + : "=m"(*p) : "r"(v) : "memory" );
  3096. +}
  3097. +
  3098. +#define a_and a_and
  3099. +static inline void a_and(volatile int *p, int v)
  3100. +{
  3101. + __asm__( "lock ; and %1, %0"
  3102. + : "=m"(*p) : "r"(v) : "memory" );
  3103. +}
  3104. +
  3105. +#define a_swap a_swap
  3106. +static inline int a_swap(volatile int *x, int v)
  3107. +{
  3108. + __asm__( "xchg %0, %1" : "=r"(v), "=m"(*x) : "0"(v) : "memory" );
  3109. + return v;
  3110. +}
  3111. +
  3112. +#define a_fetch_add a_fetch_add
  3113. +static inline int a_fetch_add(volatile int *x, int v)
  3114. +{
  3115. + __asm__( "lock ; xadd %0, %1" : "=r"(v), "=m"(*x) : "0"(v) : "memory" );
  3116. + return v;
  3117. +}
  3118. +
  3119. +#define a_inc a_inc
  3120. +static inline void a_inc(volatile int *x)
  3121. +{
  3122. + __asm__( "lock ; incl %0" : "=m"(*x) : "m"(*x) : "memory" );
  3123. +}
  3124. +
  3125. +#define a_dec a_dec
  3126. +static inline void a_dec(volatile int *x)
  3127. +{
  3128. + __asm__( "lock ; decl %0" : "=m"(*x) : "m"(*x) : "memory" );
  3129. +}
  3130. +
  3131. +#define a_store a_store
  3132. +static inline void a_store(volatile int *p, int x)
  3133. +{
  3134. + __asm__( "mov %1, %0 ; lock ; orl $0,(%%rsp)" : "=m"(*p) : "r"(x) : "memory" );
  3135. +}
  3136. +
  3137. +#define a_spin a_spin
  3138. +static inline void a_spin()
  3139. +{
  3140. + __asm__ __volatile__( "pause" : : : "memory" );
  3141. +}
  3142. +
  3143. +#define a_barrier a_barrier
  3144. +static inline void a_barrier()
  3145. +{
  3146. + __asm__ __volatile__( "" : : : "memory" );
  3147. +}
  3148. +
  3149. +#define a_crash a_crash
  3150. +static inline void a_crash()
  3151. +{
  3152. + __asm__ __volatile__( "hlt" : : : "memory" );
  3153. +}
  3154. --- a/arch/x86_64/pthread_arch.h
  3155. +++ b/arch/x86_64/pthread_arch.h
  3156. @@ -7,4 +7,4 @@ static inline struct pthread *__pthread_
  3157. #define TP_ADJ(p) (p)
  3158. -#define CANCEL_REG_IP 16
  3159. +#define MC_PC gregs[REG_RIP]
  3160. --- a/configure
  3161. +++ b/configure
  3162. @@ -9,6 +9,9 @@ VAR=VALUE. See below for descriptions o
  3163. Defaults for the options are specified in brackets.
  3164. +Configuration:
  3165. + --srcdir=DIR source directory [detected]
  3166. +
  3167. Installation directories:
  3168. --prefix=PREFIX main installation prefix [/usr/local/musl]
  3169. --exec-prefix=EPREFIX installation prefix for executable files [PREFIX]
  3170. @@ -117,6 +120,7 @@ CFLAGS_TRY=
  3171. LDFLAGS_AUTO=
  3172. LDFLAGS_TRY=
  3173. OPTIMIZE_GLOBS=
  3174. +srcdir=
  3175. prefix=/usr/local/musl
  3176. exec_prefix='$(prefix)'
  3177. bindir='$(exec_prefix)/bin'
  3178. @@ -139,6 +143,7 @@ clang_wrapper=no
  3179. for arg ; do
  3180. case "$arg" in
  3181. --help) usage ;;
  3182. +--srcdir=*) srcdir=${arg#*=} ;;
  3183. --prefix=*) prefix=${arg#*=} ;;
  3184. --exec-prefix=*) exec_prefix=${arg#*=} ;;
  3185. --bindir=*) bindir=${arg#*=} ;;
  3186. @@ -179,11 +184,23 @@ LIBCC=*) LIBCC=${arg#*=} ;;
  3187. esac
  3188. done
  3189. -for i in prefix exec_prefix bindir libdir includedir syslibdir ; do
  3190. +for i in srcdir prefix exec_prefix bindir libdir includedir syslibdir ; do
  3191. stripdir $i
  3192. done
  3193. #
  3194. +# Get the source dir for out-of-tree builds
  3195. +#
  3196. +if test -z "$srcdir" ; then
  3197. +srcdir="${0%/configure}"
  3198. +stripdir srcdir
  3199. +fi
  3200. +abs_builddir="$(pwd)" || fail "$0: cannot determine working directory"
  3201. +abs_srcdir="$(cd $srcdir && pwd)" || fail "$0: invalid source directory $srcdir"
  3202. +test "$abs_srcdir" = "$abs_builddir" && srcdir=.
  3203. +test "$srcdir" != "." -a -f Makefile -a ! -h Makefile && fail "$0: Makefile already exists in the working directory"
  3204. +
  3205. +#
  3206. # Get a temp filename we can use
  3207. #
  3208. i=0
  3209. @@ -263,11 +280,11 @@ fi
  3210. fi
  3211. if test "$gcc_wrapper" = yes ; then
  3212. -tools="$tools tools/musl-gcc"
  3213. +tools="$tools obj/musl-gcc"
  3214. tool_libs="$tool_libs lib/musl-gcc.specs"
  3215. fi
  3216. if test "$clang_wrapper" = yes ; then
  3217. -tools="$tools tools/musl-clang tools/ld.musl-clang"
  3218. +tools="$tools obj/musl-clang obj/ld.musl-clang"
  3219. fi
  3220. #
  3221. @@ -321,7 +338,7 @@ __attribute__((__may_alias__))
  3222. #endif
  3223. x;
  3224. EOF
  3225. -if $CC $CFLAGS_C99FSE -I./arch/$ARCH -I./include $CPPFLAGS $CFLAGS \
  3226. +if $CC $CFLAGS_C99FSE -I$srcdir/arch/$ARCH -I$srcdir/include $CPPFLAGS $CFLAGS \
  3227. -c -o /dev/null "$tmpc" >/dev/null 2>&1 ; then
  3228. printf "no\n"
  3229. else
  3230. @@ -330,6 +347,13 @@ CFLAGS_C99FSE="$CFLAGS_C99FSE -D__may_al
  3231. fi
  3232. #
  3233. +# The GNU toolchain defaults to assuming unmarked files need an
  3234. +# executable stack, potentially exposing vulnerabilities in programs
  3235. +# linked with such object files. Fix this.
  3236. +#
  3237. +tryflag CFLAGS_C99FSE -Wa,--noexecstack
  3238. +
  3239. +#
  3240. # Check for options to disable stack protector, which needs to be
  3241. # disabled for a few early-bootstrap translation units. If not found,
  3242. # this is not an error; we assume the toolchain does not do ssp.
  3243. @@ -430,11 +454,15 @@ tryflag CFLAGS_AUTO -fno-unwind-tables
  3244. tryflag CFLAGS_AUTO -fno-asynchronous-unwind-tables
  3245. #
  3246. -# The GNU toolchain defaults to assuming unmarked files need an
  3247. -# executable stack, potentially exposing vulnerabilities in programs
  3248. -# linked with such object files. Fix this.
  3249. +# Attempt to put each function and each data object in its own
  3250. +# section. This both allows additional size optimizations at link
  3251. +# time and works around a dangerous class of compiler/assembler bugs
  3252. +# whereby relative address expressions are constant-folded by the
  3253. +# assembler even when one or more of the symbols involved is
  3254. +# replaceable. See gas pr 18561 and gcc pr 66609, 68178, etc.
  3255. #
  3256. -tryflag CFLAGS_AUTO -Wa,--noexecstack
  3257. +tryflag CFLAGS_AUTO -ffunction-sections
  3258. +tryflag CFLAGS_AUTO -fdata-sections
  3259. #
  3260. # On x86, make sure we don't have incompatible instruction set
  3261. @@ -489,7 +517,7 @@ int foo(void) { }
  3262. int bar(void) { fp = foo; return foo(); }
  3263. EOF
  3264. if $CC $CFLAGS_C99FSE $CPPFLAGS $CFLAGS \
  3265. - -DSHARED -fPIC -I./src/internal -include vis.h \
  3266. + -DSHARED -fPIC -I$srcdir/src/internal -include vis.h \
  3267. -nostdlib -shared -Wl,-Bsymbolic-functions \
  3268. -o /dev/null "$tmpc" >/dev/null 2>&1 ; then
  3269. visibility=yes
  3270. @@ -504,6 +532,16 @@ CFLAGS_AUTO="$CFLAGS_AUTO -include vis.h
  3271. CFLAGS_AUTO="${CFLAGS_AUTO# }"
  3272. fi
  3273. +# Reduce space lost to padding for alignment purposes by sorting data
  3274. +# objects according to their alignment reqirements. This approximates
  3275. +# optimal packing.
  3276. +tryldflag LDFLAGS_AUTO -Wl,--sort-section,alignment
  3277. +tryldflag LDFLAGS_AUTO -Wl,--sort-common
  3278. +
  3279. +# When linking shared library, drop dummy weak definitions that were
  3280. +# replaced by strong definitions from other translation units.
  3281. +tryldflag LDFLAGS_AUTO -Wl,--gc-sections
  3282. +
  3283. # Some patched GCC builds have these defaults messed up...
  3284. tryldflag LDFLAGS_AUTO -Wl,--hash-style=both
  3285. @@ -513,6 +551,11 @@ tryldflag LDFLAGS_AUTO -Wl,--hash-style=
  3286. # runtime library; implementation error is also a possibility.
  3287. tryldflag LDFLAGS_AUTO -Wl,--no-undefined
  3288. +# Avoid exporting symbols from compiler runtime libraries. They
  3289. +# should be hidden anyway, but some toolchains including old gcc
  3290. +# versions built without shared library support and pcc are broken.
  3291. +tryldflag LDFLAGS_AUTO -Wl,--exclude-libs=ALL
  3292. +
  3293. test "$shared" = "no" || {
  3294. # Disable dynamic linking if ld is broken and can't do -Bsymbolic-functions
  3295. LDFLAGS_DUMMY=
  3296. @@ -599,7 +642,7 @@ echo '#include <float.h>' > "$tmpc"
  3297. echo '#if LDBL_MANT_DIG == 53' >> "$tmpc"
  3298. echo 'typedef char ldcheck[9-(int)sizeof(long double)];' >> "$tmpc"
  3299. echo '#endif' >> "$tmpc"
  3300. -if $CC $CFLAGS_C99FSE -I./arch/$ARCH -I./include $CPPFLAGS $CFLAGS \
  3301. +if $CC $CFLAGS_C99FSE -I$srcdir/arch/$ARCH -I$srcdir/include $CPPFLAGS $CFLAGS \
  3302. -c -o /dev/null "$tmpc" >/dev/null 2>&1 ; then
  3303. printf "yes\n"
  3304. else
  3305. @@ -622,6 +665,7 @@ cat << EOF
  3306. ARCH = $ARCH
  3307. SUBARCH = $SUBARCH
  3308. ASMSUBARCH = $ASMSUBARCH
  3309. +srcdir = $srcdir
  3310. prefix = $prefix
  3311. exec_prefix = $exec_prefix
  3312. bindir = $bindir
  3313. @@ -629,12 +673,14 @@ libdir = $libdir
  3314. includedir = $includedir
  3315. syslibdir = $syslibdir
  3316. CC = $CC
  3317. -CFLAGS = $CFLAGS_AUTO $CFLAGS
  3318. +CFLAGS = $CFLAGS
  3319. +CFLAGS_AUTO = $CFLAGS_AUTO
  3320. CFLAGS_C99FSE = $CFLAGS_C99FSE
  3321. CFLAGS_MEMOPS = $CFLAGS_MEMOPS
  3322. CFLAGS_NOSSP = $CFLAGS_NOSSP
  3323. CPPFLAGS = $CPPFLAGS
  3324. -LDFLAGS = $LDFLAGS_AUTO $LDFLAGS
  3325. +LDFLAGS = $LDFLAGS
  3326. +LDFLAGS_AUTO = $LDFLAGS_AUTO
  3327. CROSS_COMPILE = $CROSS_COMPILE
  3328. LIBCC = $LIBCC
  3329. OPTIMIZE_GLOBS = $OPTIMIZE_GLOBS
  3330. @@ -648,4 +694,6 @@ test "x$cc_family" = xgcc && echo 'WRAPC
  3331. test "x$cc_family" = xclang && echo 'WRAPCC_CLANG = $(CC)'
  3332. exec 1>&3 3>&-
  3333. +test "$srcdir" = "." || ln -sf $srcdir/Makefile .
  3334. +
  3335. printf "done\n"
  3336. --- a/crt/arm/crti.s
  3337. +++ b/crt/arm/crti.s
  3338. @@ -1,3 +1,5 @@
  3339. +.syntax unified
  3340. +
  3341. .section .init
  3342. .global _init
  3343. .type _init,%function
  3344. --- a/crt/arm/crtn.s
  3345. +++ b/crt/arm/crtn.s
  3346. @@ -1,11 +1,9 @@
  3347. +.syntax unified
  3348. +
  3349. .section .init
  3350. pop {r0,lr}
  3351. - tst lr,#1
  3352. - moveq pc,lr
  3353. bx lr
  3354. .section .fini
  3355. pop {r0,lr}
  3356. - tst lr,#1
  3357. - moveq pc,lr
  3358. bx lr
  3359. --- a/include/complex.h
  3360. +++ b/include/complex.h
  3361. @@ -116,7 +116,7 @@ long double creall(long double complex);
  3362. #if __STDC_VERSION__ >= 201112L
  3363. #if defined(_Imaginary_I)
  3364. -#define __CMPLX(x, y, t) ((t)(x) + _Imaginary_I*(t)(y)))
  3365. +#define __CMPLX(x, y, t) ((t)(x) + _Imaginary_I*(t)(y))
  3366. #elif defined(__clang__)
  3367. #define __CMPLX(x, y, t) (+(_Complex t){ (t)(x), (t)(y) })
  3368. #else
  3369. --- a/include/netinet/tcp.h
  3370. +++ b/include/netinet/tcp.h
  3371. @@ -41,7 +41,20 @@
  3372. #define TCP_CLOSING 11
  3373. #if defined(_GNU_SOURCE) || defined(_BSD_SOURCE)
  3374. +#define TCPOPT_EOL 0
  3375. +#define TCPOPT_NOP 1
  3376. +#define TCPOPT_MAXSEG 2
  3377. +#define TCPOPT_WINDOW 3
  3378. +#define TCPOPT_SACK_PERMITTED 4
  3379. +#define TCPOPT_SACK 5
  3380. +#define TCPOPT_TIMESTAMP 8
  3381. +#define TCPOLEN_SACK_PERMITTED 2
  3382. +#define TCPOLEN_WINDOW 3
  3383. +#define TCPOLEN_MAXSEG 4
  3384. +#define TCPOLEN_TIMESTAMP 10
  3385. +
  3386. #define SOL_TCP 6
  3387. +
  3388. #include <sys/types.h>
  3389. #include <sys/socket.h>
  3390. #include <stdint.h>
  3391. --- a/src/env/__init_tls.c
  3392. +++ b/src/env/__init_tls.c
  3393. @@ -8,9 +8,6 @@
  3394. #include "atomic.h"
  3395. #include "syscall.h"
  3396. -#ifndef SHARED
  3397. -static
  3398. -#endif
  3399. int __init_tp(void *p)
  3400. {
  3401. pthread_t td = p;
  3402. @@ -24,8 +21,6 @@ int __init_tp(void *p)
  3403. return 0;
  3404. }
  3405. -#ifndef SHARED
  3406. -
  3407. static struct builtin_tls {
  3408. char c;
  3409. struct pthread pt;
  3410. @@ -33,33 +28,40 @@ static struct builtin_tls {
  3411. } builtin_tls[1];
  3412. #define MIN_TLS_ALIGN offsetof(struct builtin_tls, pt)
  3413. -struct tls_image {
  3414. - void *image;
  3415. - size_t len, size, align;
  3416. -} __static_tls;
  3417. -
  3418. -#define T __static_tls
  3419. +static struct tls_module main_tls;
  3420. void *__copy_tls(unsigned char *mem)
  3421. {
  3422. pthread_t td;
  3423. - if (!T.image) return mem;
  3424. - void **dtv = (void *)mem;
  3425. - dtv[0] = (void *)1;
  3426. + struct tls_module *p;
  3427. + size_t i;
  3428. + void **dtv;
  3429. +
  3430. #ifdef TLS_ABOVE_TP
  3431. - mem += sizeof(void *) * 2;
  3432. - mem += -((uintptr_t)mem + sizeof(struct pthread)) & (T.align-1);
  3433. + dtv = (void **)(mem + libc.tls_size) - (libc.tls_cnt + 1);
  3434. +
  3435. + mem += -((uintptr_t)mem + sizeof(struct pthread)) & (libc.tls_align-1);
  3436. td = (pthread_t)mem;
  3437. mem += sizeof(struct pthread);
  3438. +
  3439. + for (i=1, p=libc.tls_head; p; i++, p=p->next) {
  3440. + dtv[i] = mem + p->offset;
  3441. + memcpy(dtv[i], p->image, p->len);
  3442. + }
  3443. #else
  3444. + dtv = (void **)mem;
  3445. +
  3446. mem += libc.tls_size - sizeof(struct pthread);
  3447. - mem -= (uintptr_t)mem & (T.align-1);
  3448. + mem -= (uintptr_t)mem & (libc.tls_align-1);
  3449. td = (pthread_t)mem;
  3450. - mem -= T.size;
  3451. +
  3452. + for (i=1, p=libc.tls_head; p; i++, p=p->next) {
  3453. + dtv[i] = mem - p->offset;
  3454. + memcpy(dtv[i], p->image, p->len);
  3455. + }
  3456. #endif
  3457. + dtv[0] = (void *)libc.tls_cnt;
  3458. td->dtv = td->dtv_copy = dtv;
  3459. - dtv[1] = mem;
  3460. - memcpy(mem, T.image, T.len);
  3461. return td;
  3462. }
  3463. @@ -69,7 +71,7 @@ typedef Elf32_Phdr Phdr;
  3464. typedef Elf64_Phdr Phdr;
  3465. #endif
  3466. -void __init_tls(size_t *aux)
  3467. +static void static_init_tls(size_t *aux)
  3468. {
  3469. unsigned char *p;
  3470. size_t n;
  3471. @@ -86,16 +88,24 @@ void __init_tls(size_t *aux)
  3472. }
  3473. if (tls_phdr) {
  3474. - T.image = (void *)(base + tls_phdr->p_vaddr);
  3475. - T.len = tls_phdr->p_filesz;
  3476. - T.size = tls_phdr->p_memsz;
  3477. - T.align = tls_phdr->p_align;
  3478. + main_tls.image = (void *)(base + tls_phdr->p_vaddr);
  3479. + main_tls.len = tls_phdr->p_filesz;
  3480. + main_tls.size = tls_phdr->p_memsz;
  3481. + main_tls.align = tls_phdr->p_align;
  3482. + libc.tls_cnt = 1;
  3483. + libc.tls_head = &main_tls;
  3484. }
  3485. - T.size += (-T.size - (uintptr_t)T.image) & (T.align-1);
  3486. - if (T.align < MIN_TLS_ALIGN) T.align = MIN_TLS_ALIGN;
  3487. + main_tls.size += (-main_tls.size - (uintptr_t)main_tls.image)
  3488. + & (main_tls.align-1);
  3489. + if (main_tls.align < MIN_TLS_ALIGN) main_tls.align = MIN_TLS_ALIGN;
  3490. +#ifndef TLS_ABOVE_TP
  3491. + main_tls.offset = main_tls.size;
  3492. +#endif
  3493. - libc.tls_size = 2*sizeof(void *)+T.size+T.align+sizeof(struct pthread)
  3494. + libc.tls_align = main_tls.align;
  3495. + libc.tls_size = 2*sizeof(void *) + sizeof(struct pthread)
  3496. + + main_tls.size + main_tls.align
  3497. + MIN_TLS_ALIGN-1 & -MIN_TLS_ALIGN;
  3498. if (libc.tls_size > sizeof builtin_tls) {
  3499. @@ -117,6 +127,5 @@ void __init_tls(size_t *aux)
  3500. if (__init_tp(__copy_tls(mem)) < 0)
  3501. a_crash();
  3502. }
  3503. -#else
  3504. -void __init_tls(size_t *auxv) { }
  3505. -#endif
  3506. +
  3507. +weak_alias(static_init_tls, __init_tls);
  3508. --- a/src/env/__libc_start_main.c
  3509. +++ b/src/env/__libc_start_main.c
  3510. @@ -8,21 +8,17 @@
  3511. void __init_tls(size_t *);
  3512. -#ifndef SHARED
  3513. -static void dummy() {}
  3514. +static void dummy(void) {}
  3515. weak_alias(dummy, _init);
  3516. -extern void (*const __init_array_start)() __attribute__((weak));
  3517. -extern void (*const __init_array_end)() __attribute__((weak));
  3518. -#endif
  3519. +
  3520. +__attribute__((__weak__, __visibility__("hidden")))
  3521. +extern void (*const __init_array_start)(void), (*const __init_array_end)(void);
  3522. static void dummy1(void *p) {}
  3523. weak_alias(dummy1, __init_ssp);
  3524. #define AUX_CNT 38
  3525. -#ifndef SHARED
  3526. -static
  3527. -#endif
  3528. void __init_libc(char **envp, char *pn)
  3529. {
  3530. size_t i, *auxv, aux[AUX_CNT] = { 0 };
  3531. @@ -57,20 +53,22 @@ void __init_libc(char **envp, char *pn)
  3532. libc.secure = 1;
  3533. }
  3534. -int __libc_start_main(int (*main)(int,char **,char **), int argc, char **argv)
  3535. +static void libc_start_init(void)
  3536. {
  3537. - char **envp = argv+argc+1;
  3538. -
  3539. -#ifndef SHARED
  3540. - __init_libc(envp, argv[0]);
  3541. _init();
  3542. uintptr_t a = (uintptr_t)&__init_array_start;
  3543. for (; a<(uintptr_t)&__init_array_end; a+=sizeof(void(*)()))
  3544. (*(void (**)())a)();
  3545. -#else
  3546. - void __libc_start_init(void);
  3547. +}
  3548. +
  3549. +weak_alias(libc_start_init, __libc_start_init);
  3550. +
  3551. +int __libc_start_main(int (*main)(int,char **,char **), int argc, char **argv)
  3552. +{
  3553. + char **envp = argv+argc+1;
  3554. +
  3555. + __init_libc(envp, argv[0]);
  3556. __libc_start_init();
  3557. -#endif
  3558. /* Pass control to the application */
  3559. exit(main(argc, argv, envp));
  3560. --- a/src/env/__reset_tls.c
  3561. +++ b/src/env/__reset_tls.c
  3562. @@ -1,21 +1,16 @@
  3563. -#ifndef SHARED
  3564. -
  3565. #include <string.h>
  3566. #include "pthread_impl.h"
  3567. -
  3568. -extern struct tls_image {
  3569. - void *image;
  3570. - size_t len, size, align;
  3571. -} __static_tls;
  3572. -
  3573. -#define T __static_tls
  3574. +#include "libc.h"
  3575. void __reset_tls()
  3576. {
  3577. - if (!T.size) return;
  3578. pthread_t self = __pthread_self();
  3579. - memcpy(self->dtv[1], T.image, T.len);
  3580. - memset((char *)self->dtv[1]+T.len, 0, T.size-T.len);
  3581. + struct tls_module *p;
  3582. + size_t i, n = (size_t)self->dtv[0];
  3583. + if (n) for (p=libc.tls_head, i=1; i<=n; i++, p=p->next) {
  3584. + if (!self->dtv[i]) continue;
  3585. + memcpy(self->dtv[i], p->image, p->len);
  3586. + memset((char *)self->dtv[i]+p->len, 0,
  3587. + p->size - p->len);
  3588. + }
  3589. }
  3590. -
  3591. -#endif
  3592. --- a/src/env/__stack_chk_fail.c
  3593. +++ b/src/env/__stack_chk_fail.c
  3594. @@ -17,16 +17,7 @@ void __stack_chk_fail(void)
  3595. a_crash();
  3596. }
  3597. -#ifdef SHARED
  3598. -
  3599. __attribute__((__visibility__("hidden")))
  3600. -void __stack_chk_fail_local(void)
  3601. -{
  3602. - a_crash();
  3603. -}
  3604. -
  3605. -#else
  3606. +void __stack_chk_fail_local(void);
  3607. weak_alias(__stack_chk_fail, __stack_chk_fail_local);
  3608. -
  3609. -#endif
  3610. --- /dev/null
  3611. +++ b/src/exit/arm/__aeabi_atexit.c
  3612. @@ -0,0 +1,6 @@
  3613. +int __cxa_atexit(void (*func)(void *), void *arg, void *dso);
  3614. +
  3615. +int __aeabi_atexit (void *obj, void (*func) (void *), void *d)
  3616. +{
  3617. + return __cxa_atexit (func, obj, d);
  3618. +}
  3619. --- a/src/exit/exit.c
  3620. +++ b/src/exit/exit.c
  3621. @@ -10,25 +10,25 @@ static void dummy()
  3622. * as a consequence of linking either __toread.c or __towrite.c. */
  3623. weak_alias(dummy, __funcs_on_exit);
  3624. weak_alias(dummy, __stdio_exit);
  3625. -
  3626. -#ifndef SHARED
  3627. weak_alias(dummy, _fini);
  3628. -extern void (*const __fini_array_start)() __attribute__((weak));
  3629. -extern void (*const __fini_array_end)() __attribute__((weak));
  3630. -#endif
  3631. -_Noreturn void exit(int code)
  3632. -{
  3633. - __funcs_on_exit();
  3634. +__attribute__((__weak__, __visibility__("hidden")))
  3635. +extern void (*const __fini_array_start)(void), (*const __fini_array_end)(void);
  3636. -#ifndef SHARED
  3637. +static void libc_exit_fini(void)
  3638. +{
  3639. uintptr_t a = (uintptr_t)&__fini_array_end;
  3640. for (; a>(uintptr_t)&__fini_array_start; a-=sizeof(void(*)()))
  3641. (*(void (**)())(a-sizeof(void(*)())))();
  3642. _fini();
  3643. -#endif
  3644. +}
  3645. - __stdio_exit();
  3646. +weak_alias(libc_exit_fini, __libc_exit_fini);
  3647. +_Noreturn void exit(int code)
  3648. +{
  3649. + __funcs_on_exit();
  3650. + __libc_exit_fini();
  3651. + __stdio_exit();
  3652. _Exit(code);
  3653. }
  3654. --- /dev/null
  3655. +++ b/src/fenv/arm/fenv-hf.S
  3656. @@ -0,0 +1,69 @@
  3657. +#if __ARM_PCS_VFP
  3658. +
  3659. +.syntax unified
  3660. +.fpu vfp
  3661. +
  3662. +.global fegetround
  3663. +.type fegetround,%function
  3664. +fegetround:
  3665. + fmrx r0, fpscr
  3666. + and r0, r0, #0xc00000
  3667. + bx lr
  3668. +
  3669. +.global __fesetround
  3670. +.type __fesetround,%function
  3671. +__fesetround:
  3672. + fmrx r3, fpscr
  3673. + bic r3, r3, #0xc00000
  3674. + orr r3, r3, r0
  3675. + fmxr fpscr, r3
  3676. + mov r0, #0
  3677. + bx lr
  3678. +
  3679. +.global fetestexcept
  3680. +.type fetestexcept,%function
  3681. +fetestexcept:
  3682. + and r0, r0, #0x1f
  3683. + fmrx r3, fpscr
  3684. + and r0, r0, r3
  3685. + bx lr
  3686. +
  3687. +.global feclearexcept
  3688. +.type feclearexcept,%function
  3689. +feclearexcept:
  3690. + and r0, r0, #0x1f
  3691. + fmrx r3, fpscr
  3692. + bic r3, r3, r0
  3693. + fmxr fpscr, r3
  3694. + mov r0, #0
  3695. + bx lr
  3696. +
  3697. +.global feraiseexcept
  3698. +.type feraiseexcept,%function
  3699. +feraiseexcept:
  3700. + and r0, r0, #0x1f
  3701. + fmrx r3, fpscr
  3702. + orr r3, r3, r0
  3703. + fmxr fpscr, r3
  3704. + mov r0, #0
  3705. + bx lr
  3706. +
  3707. +.global fegetenv
  3708. +.type fegetenv,%function
  3709. +fegetenv:
  3710. + fmrx r3, fpscr
  3711. + str r3, [r0]
  3712. + mov r0, #0
  3713. + bx lr
  3714. +
  3715. +.global fesetenv
  3716. +.type fesetenv,%function
  3717. +fesetenv:
  3718. + cmn r0, #1
  3719. + moveq r3, #0
  3720. + ldrne r3, [r0]
  3721. + fmxr fpscr, r3
  3722. + mov r0, #0
  3723. + bx lr
  3724. +
  3725. +#endif
  3726. --- /dev/null
  3727. +++ b/src/fenv/arm/fenv.c
  3728. @@ -0,0 +1,3 @@
  3729. +#if !__ARM_PCS_VFP
  3730. +#include "../fenv.c"
  3731. +#endif
  3732. --- a/src/fenv/armebhf/fenv.sub
  3733. +++ /dev/null
  3734. @@ -1 +0,0 @@
  3735. -../armhf/fenv.s
  3736. --- a/src/fenv/armhf/fenv.s
  3737. +++ /dev/null
  3738. @@ -1,64 +0,0 @@
  3739. -.fpu vfp
  3740. -
  3741. -.global fegetround
  3742. -.type fegetround,%function
  3743. -fegetround:
  3744. - mrc p10, 7, r0, cr1, cr0, 0
  3745. - and r0, r0, #0xc00000
  3746. - bx lr
  3747. -
  3748. -.global __fesetround
  3749. -.type __fesetround,%function
  3750. -__fesetround:
  3751. - mrc p10, 7, r3, cr1, cr0, 0
  3752. - bic r3, r3, #0xc00000
  3753. - orr r3, r3, r0
  3754. - mcr p10, 7, r3, cr1, cr0, 0
  3755. - mov r0, #0
  3756. - bx lr
  3757. -
  3758. -.global fetestexcept
  3759. -.type fetestexcept,%function
  3760. -fetestexcept:
  3761. - and r0, r0, #0x1f
  3762. - mrc p10, 7, r3, cr1, cr0, 0
  3763. - and r0, r0, r3
  3764. - bx lr
  3765. -
  3766. -.global feclearexcept
  3767. -.type feclearexcept,%function
  3768. -feclearexcept:
  3769. - and r0, r0, #0x1f
  3770. - mrc p10, 7, r3, cr1, cr0, 0
  3771. - bic r3, r3, r0
  3772. - mcr p10, 7, r3, cr1, cr0, 0
  3773. - mov r0, #0
  3774. - bx lr
  3775. -
  3776. -.global feraiseexcept
  3777. -.type feraiseexcept,%function
  3778. -feraiseexcept:
  3779. - and r0, r0, #0x1f
  3780. - mrc p10, 7, r3, cr1, cr0, 0
  3781. - orr r3, r3, r0
  3782. - mcr p10, 7, r3, cr1, cr0, 0
  3783. - mov r0, #0
  3784. - bx lr
  3785. -
  3786. -.global fegetenv
  3787. -.type fegetenv,%function
  3788. -fegetenv:
  3789. - mrc p10, 7, r3, cr1, cr0, 0
  3790. - str r3, [r0]
  3791. - mov r0, #0
  3792. - bx lr
  3793. -
  3794. -.global fesetenv
  3795. -.type fesetenv,%function
  3796. -fesetenv:
  3797. - cmn r0, #1
  3798. - moveq r3, #0
  3799. - ldrne r3, [r0]
  3800. - mcr p10, 7, r3, cr1, cr0, 0
  3801. - mov r0, #0
  3802. - bx lr
  3803. --- a/src/fenv/armhf/fenv.sub
  3804. +++ /dev/null
  3805. @@ -1 +0,0 @@
  3806. -fenv.s
  3807. --- a/src/fenv/mips-sf/fenv.sub
  3808. +++ /dev/null
  3809. @@ -1 +0,0 @@
  3810. -../fenv.c
  3811. --- /dev/null
  3812. +++ b/src/fenv/mips/fenv-sf.c
  3813. @@ -0,0 +1,3 @@
  3814. +#ifdef __mips_soft_float
  3815. +#include "../fenv.c"
  3816. +#endif
  3817. --- /dev/null
  3818. +++ b/src/fenv/mips/fenv.S
  3819. @@ -0,0 +1,71 @@
  3820. +#ifndef __mips_soft_float
  3821. +
  3822. +.set noreorder
  3823. +
  3824. +.global feclearexcept
  3825. +.type feclearexcept,@function
  3826. +feclearexcept:
  3827. + and $4, $4, 0x7c
  3828. + cfc1 $5, $31
  3829. + or $5, $5, $4
  3830. + xor $5, $5, $4
  3831. + ctc1 $5, $31
  3832. + jr $ra
  3833. + li $2, 0
  3834. +
  3835. +.global feraiseexcept
  3836. +.type feraiseexcept,@function
  3837. +feraiseexcept:
  3838. + and $4, $4, 0x7c
  3839. + cfc1 $5, $31
  3840. + or $5, $5, $4
  3841. + ctc1 $5, $31
  3842. + jr $ra
  3843. + li $2, 0
  3844. +
  3845. +.global fetestexcept
  3846. +.type fetestexcept,@function
  3847. +fetestexcept:
  3848. + and $4, $4, 0x7c
  3849. + cfc1 $2, $31
  3850. + jr $ra
  3851. + and $2, $2, $4
  3852. +
  3853. +.global fegetround
  3854. +.type fegetround,@function
  3855. +fegetround:
  3856. + cfc1 $2, $31
  3857. + jr $ra
  3858. + andi $2, $2, 3
  3859. +
  3860. +.global __fesetround
  3861. +.type __fesetround,@function
  3862. +__fesetround:
  3863. + cfc1 $5, $31
  3864. + li $6, -4
  3865. + and $5, $5, $6
  3866. + or $5, $5, $4
  3867. + ctc1 $5, $31
  3868. + jr $ra
  3869. + li $2, 0
  3870. +
  3871. +.global fegetenv
  3872. +.type fegetenv,@function
  3873. +fegetenv:
  3874. + cfc1 $5, $31
  3875. + sw $5, 0($4)
  3876. + jr $ra
  3877. + li $2, 0
  3878. +
  3879. +.global fesetenv
  3880. +.type fesetenv,@function
  3881. +fesetenv:
  3882. + addiu $5, $4, 1
  3883. + beq $5, $0, 1f
  3884. + nop
  3885. + lw $5, 0($4)
  3886. +1: ctc1 $5, $31
  3887. + jr $ra
  3888. + li $2, 0
  3889. +
  3890. +#endif
  3891. --- a/src/fenv/mips/fenv.s
  3892. +++ /dev/null
  3893. @@ -1,67 +0,0 @@
  3894. -.set noreorder
  3895. -
  3896. -.global feclearexcept
  3897. -.type feclearexcept,@function
  3898. -feclearexcept:
  3899. - and $4, $4, 0x7c
  3900. - cfc1 $5, $31
  3901. - or $5, $5, $4
  3902. - xor $5, $5, $4
  3903. - ctc1 $5, $31
  3904. - jr $ra
  3905. - li $2, 0
  3906. -
  3907. -.global feraiseexcept
  3908. -.type feraiseexcept,@function
  3909. -feraiseexcept:
  3910. - and $4, $4, 0x7c
  3911. - cfc1 $5, $31
  3912. - or $5, $5, $4
  3913. - ctc1 $5, $31
  3914. - jr $ra
  3915. - li $2, 0
  3916. -
  3917. -.global fetestexcept
  3918. -.type fetestexcept,@function
  3919. -fetestexcept:
  3920. - and $4, $4, 0x7c
  3921. - cfc1 $2, $31
  3922. - jr $ra
  3923. - and $2, $2, $4
  3924. -
  3925. -.global fegetround
  3926. -.type fegetround,@function
  3927. -fegetround:
  3928. - cfc1 $2, $31
  3929. - jr $ra
  3930. - andi $2, $2, 3
  3931. -
  3932. -.global __fesetround
  3933. -.type __fesetround,@function
  3934. -__fesetround:
  3935. - cfc1 $5, $31
  3936. - li $6, -4
  3937. - and $5, $5, $6
  3938. - or $5, $5, $4
  3939. - ctc1 $5, $31
  3940. - jr $ra
  3941. - li $2, 0
  3942. -
  3943. -.global fegetenv
  3944. -.type fegetenv,@function
  3945. -fegetenv:
  3946. - cfc1 $5, $31
  3947. - sw $5, 0($4)
  3948. - jr $ra
  3949. - li $2, 0
  3950. -
  3951. -.global fesetenv
  3952. -.type fesetenv,@function
  3953. -fesetenv:
  3954. - addiu $5, $4, 1
  3955. - beq $5, $0, 1f
  3956. - nop
  3957. - lw $5, 0($4)
  3958. -1: ctc1 $5, $31
  3959. - jr $ra
  3960. - li $2, 0
  3961. --- a/src/fenv/mipsel-sf/fenv.sub
  3962. +++ /dev/null
  3963. @@ -1 +0,0 @@
  3964. -../fenv.c
  3965. --- a/src/fenv/sh-nofpu/fenv.sub
  3966. +++ /dev/null
  3967. @@ -1 +0,0 @@
  3968. -../fenv.c
  3969. --- /dev/null
  3970. +++ b/src/fenv/sh/fenv-nofpu.c
  3971. @@ -0,0 +1,3 @@
  3972. +#if !__SH_FPU_ANY__ && !__SH4__
  3973. +#include "../fenv.c"
  3974. +#endif
  3975. --- /dev/null
  3976. +++ b/src/fenv/sh/fenv.S
  3977. @@ -0,0 +1,78 @@
  3978. +#if __SH_FPU_ANY__ || __SH4__
  3979. +
  3980. +.global fegetround
  3981. +.type fegetround, @function
  3982. +fegetround:
  3983. + sts fpscr, r0
  3984. + rts
  3985. + and #3, r0
  3986. +
  3987. +.global __fesetround
  3988. +.type __fesetround, @function
  3989. +__fesetround:
  3990. + sts fpscr, r0
  3991. + or r4, r0
  3992. + lds r0, fpscr
  3993. + rts
  3994. + mov #0, r0
  3995. +
  3996. +.global fetestexcept
  3997. +.type fetestexcept, @function
  3998. +fetestexcept:
  3999. + sts fpscr, r0
  4000. + and r4, r0
  4001. + rts
  4002. + and #0x7c, r0
  4003. +
  4004. +.global feclearexcept
  4005. +.type feclearexcept, @function
  4006. +feclearexcept:
  4007. + mov r4, r0
  4008. + and #0x7c, r0
  4009. + not r0, r4
  4010. + sts fpscr, r0
  4011. + and r4, r0
  4012. + lds r0, fpscr
  4013. + rts
  4014. + mov #0, r0
  4015. +
  4016. +.global feraiseexcept
  4017. +.type feraiseexcept, @function
  4018. +feraiseexcept:
  4019. + mov r4, r0
  4020. + and #0x7c, r0
  4021. + sts fpscr, r4
  4022. + or r4, r0
  4023. + lds r0, fpscr
  4024. + rts
  4025. + mov #0, r0
  4026. +
  4027. +.global fegetenv
  4028. +.type fegetenv, @function
  4029. +fegetenv:
  4030. + sts fpscr, r0
  4031. + mov.l r0, @r4
  4032. + rts
  4033. + mov #0, r0
  4034. +
  4035. +.global fesetenv
  4036. +.type fesetenv, @function
  4037. +fesetenv:
  4038. + mov r4, r0
  4039. + cmp/eq #-1, r0
  4040. + bf 1f
  4041. +
  4042. + ! the default environment is complicated by the fact that we need to
  4043. + ! preserve the current precision bit, which we do not know a priori
  4044. + sts fpscr, r0
  4045. + mov #8, r1
  4046. + swap.w r1, r1
  4047. + bra 2f
  4048. + and r1, r0
  4049. +
  4050. +1: mov.l @r4, r0 ! non-default environment
  4051. +2: lds r0, fpscr
  4052. + rts
  4053. + mov #0, r0
  4054. +
  4055. +#endif
  4056. --- a/src/fenv/sh/fenv.s
  4057. +++ /dev/null
  4058. @@ -1,74 +0,0 @@
  4059. -.global fegetround
  4060. -.type fegetround, @function
  4061. -fegetround:
  4062. - sts fpscr, r0
  4063. - rts
  4064. - and #3, r0
  4065. -
  4066. -.global __fesetround
  4067. -.type __fesetround, @function
  4068. -__fesetround:
  4069. - sts fpscr, r0
  4070. - or r4, r0
  4071. - lds r0, fpscr
  4072. - rts
  4073. - mov #0, r0
  4074. -
  4075. -.global fetestexcept
  4076. -.type fetestexcept, @function
  4077. -fetestexcept:
  4078. - sts fpscr, r0
  4079. - and r4, r0
  4080. - rts
  4081. - and #0x7c, r0
  4082. -
  4083. -.global feclearexcept
  4084. -.type feclearexcept, @function
  4085. -feclearexcept:
  4086. - mov r4, r0
  4087. - and #0x7c, r0
  4088. - not r0, r4
  4089. - sts fpscr, r0
  4090. - and r4, r0
  4091. - lds r0, fpscr
  4092. - rts
  4093. - mov #0, r0
  4094. -
  4095. -.global feraiseexcept
  4096. -.type feraiseexcept, @function
  4097. -feraiseexcept:
  4098. - mov r4, r0
  4099. - and #0x7c, r0
  4100. - sts fpscr, r4
  4101. - or r4, r0
  4102. - lds r0, fpscr
  4103. - rts
  4104. - mov #0, r0
  4105. -
  4106. -.global fegetenv
  4107. -.type fegetenv, @function
  4108. -fegetenv:
  4109. - sts fpscr, r0
  4110. - mov.l r0, @r4
  4111. - rts
  4112. - mov #0, r0
  4113. -
  4114. -.global fesetenv
  4115. -.type fesetenv, @function
  4116. -fesetenv:
  4117. - mov r4, r0
  4118. - cmp/eq #-1, r0
  4119. - bf 1f
  4120. -
  4121. - ! the default environment is complicated by the fact that we need to
  4122. - ! preserve the current precision bit, which we do not know a priori
  4123. - sts fpscr, r0
  4124. - mov #8, r1
  4125. - swap.w r1, r1
  4126. - bra 2f
  4127. - and r1, r0
  4128. -
  4129. -1: mov.l @r4, r0 ! non-default environment
  4130. -2: lds r0, fpscr
  4131. - rts
  4132. - mov #0, r0
  4133. --- a/src/fenv/sheb-nofpu/fenv.sub
  4134. +++ /dev/null
  4135. @@ -1 +0,0 @@
  4136. -../fenv.c
  4137. --- a/src/internal/arm/syscall.s
  4138. +++ b/src/internal/arm/syscall.s
  4139. @@ -1,3 +1,4 @@
  4140. +.syntax unified
  4141. .global __syscall
  4142. .hidden __syscall
  4143. .type __syscall,%function
  4144. @@ -11,6 +12,4 @@ __syscall:
  4145. ldmfd ip,{r3,r4,r5,r6}
  4146. svc 0
  4147. ldmfd sp!,{r4,r5,r6,r7}
  4148. - tst lr,#1
  4149. - moveq pc,lr
  4150. bx lr
  4151. --- /dev/null
  4152. +++ b/src/internal/atomic.h
  4153. @@ -0,0 +1,275 @@
  4154. +#ifndef _ATOMIC_H
  4155. +#define _ATOMIC_H
  4156. +
  4157. +#include <stdint.h>
  4158. +
  4159. +#include "atomic_arch.h"
  4160. +
  4161. +#ifdef a_ll
  4162. +
  4163. +#ifndef a_pre_llsc
  4164. +#define a_pre_llsc()
  4165. +#endif
  4166. +
  4167. +#ifndef a_post_llsc
  4168. +#define a_post_llsc()
  4169. +#endif
  4170. +
  4171. +#ifndef a_cas
  4172. +#define a_cas a_cas
  4173. +static inline int a_cas(volatile int *p, int t, int s)
  4174. +{
  4175. + int old;
  4176. + a_pre_llsc();
  4177. + do old = a_ll(p);
  4178. + while (old==t && !a_sc(p, s));
  4179. + a_post_llsc();
  4180. + return old;
  4181. +}
  4182. +#endif
  4183. +
  4184. +#ifndef a_swap
  4185. +#define a_swap a_swap
  4186. +static inline int a_swap(volatile int *p, int v)
  4187. +{
  4188. + int old;
  4189. + a_pre_llsc();
  4190. + do old = a_ll(p);
  4191. + while (!a_sc(p, v));
  4192. + a_post_llsc();
  4193. + return old;
  4194. +}
  4195. +#endif
  4196. +
  4197. +#ifndef a_fetch_add
  4198. +#define a_fetch_add a_fetch_add
  4199. +static inline int a_fetch_add(volatile int *p, int v)
  4200. +{
  4201. + int old;
  4202. + a_pre_llsc();
  4203. + do old = a_ll(p);
  4204. + while (!a_sc(p, (unsigned)old + v));
  4205. + a_post_llsc();
  4206. + return old;
  4207. +}
  4208. +#endif
  4209. +
  4210. +#ifndef a_fetch_and
  4211. +#define a_fetch_and a_fetch_and
  4212. +static inline int a_fetch_and(volatile int *p, int v)
  4213. +{
  4214. + int old;
  4215. + a_pre_llsc();
  4216. + do old = a_ll(p);
  4217. + while (!a_sc(p, old & v));
  4218. + a_post_llsc();
  4219. + return old;
  4220. +}
  4221. +#endif
  4222. +
  4223. +#ifndef a_fetch_or
  4224. +#define a_fetch_or a_fetch_or
  4225. +static inline int a_fetch_or(volatile int *p, int v)
  4226. +{
  4227. + int old;
  4228. + a_pre_llsc();
  4229. + do old = a_ll(p);
  4230. + while (!a_sc(p, old | v));
  4231. + a_post_llsc();
  4232. + return old;
  4233. +}
  4234. +#endif
  4235. +
  4236. +#endif
  4237. +
  4238. +#ifndef a_cas
  4239. +#error missing definition of a_cas
  4240. +#endif
  4241. +
  4242. +#ifndef a_swap
  4243. +#define a_swap a_swap
  4244. +static inline int a_swap(volatile int *p, int v)
  4245. +{
  4246. + int old;
  4247. + do old = *p;
  4248. + while (a_cas(p, old, v) != old);
  4249. + return old;
  4250. +}
  4251. +#endif
  4252. +
  4253. +#ifndef a_fetch_add
  4254. +#define a_fetch_add a_fetch_add
  4255. +static inline int a_fetch_add(volatile int *p, int v)
  4256. +{
  4257. + int old;
  4258. + do old = *p;
  4259. + while (a_cas(p, old, (unsigned)old+v) != old);
  4260. + return old;
  4261. +}
  4262. +#endif
  4263. +
  4264. +#ifndef a_fetch_and
  4265. +#define a_fetch_and a_fetch_and
  4266. +static inline int a_fetch_and(volatile int *p, int v)
  4267. +{
  4268. + int old;
  4269. + do old = *p;
  4270. + while (a_cas(p, old, old&v) != old);
  4271. + return old;
  4272. +}
  4273. +#endif
  4274. +#ifndef a_fetch_or
  4275. +#define a_fetch_or a_fetch_or
  4276. +static inline int a_fetch_or(volatile int *p, int v)
  4277. +{
  4278. + int old;
  4279. + do old = *p;
  4280. + while (a_cas(p, old, old|v) != old);
  4281. + return old;
  4282. +}
  4283. +#endif
  4284. +
  4285. +#ifndef a_and
  4286. +#define a_and a_and
  4287. +static inline void a_and(volatile int *p, int v)
  4288. +{
  4289. + a_fetch_and(p, v);
  4290. +}
  4291. +#endif
  4292. +
  4293. +#ifndef a_or
  4294. +#define a_or a_or
  4295. +static inline void a_or(volatile int *p, int v)
  4296. +{
  4297. + a_fetch_or(p, v);
  4298. +}
  4299. +#endif
  4300. +
  4301. +#ifndef a_inc
  4302. +#define a_inc a_inc
  4303. +static inline void a_inc(volatile int *p)
  4304. +{
  4305. + a_fetch_add(p, 1);
  4306. +}
  4307. +#endif
  4308. +
  4309. +#ifndef a_dec
  4310. +#define a_dec a_dec
  4311. +static inline void a_dec(volatile int *p)
  4312. +{
  4313. + a_fetch_add(p, -1);
  4314. +}
  4315. +#endif
  4316. +
  4317. +#ifndef a_store
  4318. +#define a_store a_store
  4319. +static inline void a_store(volatile int *p, int v)
  4320. +{
  4321. +#ifdef a_barrier
  4322. + a_barrier();
  4323. + *p = v;
  4324. + a_barrier();
  4325. +#else
  4326. + a_swap(p, v);
  4327. +#endif
  4328. +}
  4329. +#endif
  4330. +
  4331. +#ifndef a_barrier
  4332. +#define a_barrier a_barrier
  4333. +static void a_barrier()
  4334. +{
  4335. + volatile int tmp = 0;
  4336. + a_cas(&tmp, 0, 0);
  4337. +}
  4338. +#endif
  4339. +
  4340. +#ifndef a_spin
  4341. +#define a_spin a_barrier
  4342. +#endif
  4343. +
  4344. +#ifndef a_and_64
  4345. +#define a_and_64 a_and_64
  4346. +static inline void a_and_64(volatile uint64_t *p, uint64_t v)
  4347. +{
  4348. + union { uint64_t v; uint32_t r[2]; } u = { v };
  4349. + if (u.r[0]+1) a_and((int *)p, u.r[0]);
  4350. + if (u.r[1]+1) a_and((int *)p+1, u.r[1]);
  4351. +}
  4352. +#endif
  4353. +
  4354. +#ifndef a_or_64
  4355. +#define a_or_64 a_or_64
  4356. +static inline void a_or_64(volatile uint64_t *p, uint64_t v)
  4357. +{
  4358. + union { uint64_t v; uint32_t r[2]; } u = { v };
  4359. + if (u.r[0]) a_or((int *)p, u.r[0]);
  4360. + if (u.r[1]) a_or((int *)p+1, u.r[1]);
  4361. +}
  4362. +#endif
  4363. +
  4364. +#ifndef a_cas_p
  4365. +#define a_cas_p a_cas_p
  4366. +static inline void *a_cas_p(volatile void *p, void *t, void *s)
  4367. +{
  4368. + return (void *)a_cas((volatile int *)p, (int)t, (int)s);
  4369. +}
  4370. +#endif
  4371. +
  4372. +#ifndef a_or_l
  4373. +#define a_or_l a_or_l
  4374. +static inline void a_or_l(volatile void *p, long v)
  4375. +{
  4376. + if (sizeof(long) == sizeof(int)) a_or(p, v);
  4377. + else a_or_64(p, v);
  4378. +}
  4379. +#endif
  4380. +
  4381. +#ifndef a_crash
  4382. +#define a_crash a_crash
  4383. +static inline void a_crash()
  4384. +{
  4385. + *(volatile char *)0=0;
  4386. +}
  4387. +#endif
  4388. +
  4389. +#ifndef a_ctz_64
  4390. +#define a_ctz_64 a_ctz_64
  4391. +static inline int a_ctz_64(uint64_t x)
  4392. +{
  4393. + static const char debruijn64[64] = {
  4394. + 0, 1, 2, 53, 3, 7, 54, 27, 4, 38, 41, 8, 34, 55, 48, 28,
  4395. + 62, 5, 39, 46, 44, 42, 22, 9, 24, 35, 59, 56, 49, 18, 29, 11,
  4396. + 63, 52, 6, 26, 37, 40, 33, 47, 61, 45, 43, 21, 23, 58, 17, 10,
  4397. + 51, 25, 36, 32, 60, 20, 57, 16, 50, 31, 19, 15, 30, 14, 13, 12
  4398. + };
  4399. + static const char debruijn32[32] = {
  4400. + 0, 1, 23, 2, 29, 24, 19, 3, 30, 27, 25, 11, 20, 8, 4, 13,
  4401. + 31, 22, 28, 18, 26, 10, 7, 12, 21, 17, 9, 6, 16, 5, 15, 14
  4402. + };
  4403. + if (sizeof(long) < 8) {
  4404. + uint32_t y = x;
  4405. + if (!y) {
  4406. + y = x>>32;
  4407. + return 32 + debruijn32[(y&-y)*0x076be629 >> 27];
  4408. + }
  4409. + return debruijn32[(y&-y)*0x076be629 >> 27];
  4410. + }
  4411. + return debruijn64[(x&-x)*0x022fdd63cc95386dull >> 58];
  4412. +}
  4413. +#endif
  4414. +
  4415. +#ifndef a_ctz_l
  4416. +#define a_ctz_l a_ctz_l
  4417. +static inline int a_ctz_l(unsigned long x)
  4418. +{
  4419. + static const char debruijn32[32] = {
  4420. + 0, 1, 23, 2, 29, 24, 19, 3, 30, 27, 25, 11, 20, 8, 4, 13,
  4421. + 31, 22, 28, 18, 26, 10, 7, 12, 21, 17, 9, 6, 16, 5, 15, 14
  4422. + };
  4423. + if (sizeof(long) == 8) return a_ctz_64(x);
  4424. + return debruijn32[(x&-x)*0x076be629 >> 27];
  4425. +}
  4426. +#endif
  4427. +
  4428. +#endif
  4429. --- a/src/internal/dynlink.h
  4430. +++ b/src/internal/dynlink.h
  4431. @@ -64,6 +64,10 @@ struct fdpic_dummy_loadmap {
  4432. #define DL_FDPIC 0
  4433. #endif
  4434. +#ifndef DL_NOMMU_SUPPORT
  4435. +#define DL_NOMMU_SUPPORT 0
  4436. +#endif
  4437. +
  4438. #if !DL_FDPIC
  4439. #define IS_RELATIVE(x,s) ( \
  4440. (R_TYPE(x) == REL_RELATIVE) || \
  4441. --- a/src/internal/libc.h
  4442. +++ b/src/internal/libc.h
  4443. @@ -11,13 +11,20 @@ struct __locale_struct {
  4444. const struct __locale_map *volatile cat[6];
  4445. };
  4446. +struct tls_module {
  4447. + struct tls_module *next;
  4448. + void *image;
  4449. + size_t len, size, align, offset;
  4450. +};
  4451. +
  4452. struct __libc {
  4453. int can_do_threads;
  4454. int threaded;
  4455. int secure;
  4456. volatile int threads_minus_1;
  4457. size_t *auxv;
  4458. - size_t tls_size;
  4459. + struct tls_module *tls_head;
  4460. + size_t tls_size, tls_align, tls_cnt;
  4461. size_t page_size;
  4462. struct __locale_struct global_locale;
  4463. };
  4464. --- a/src/internal/syscall.h
  4465. +++ b/src/internal/syscall.h
  4466. @@ -17,9 +17,7 @@
  4467. typedef long syscall_arg_t;
  4468. #endif
  4469. -#ifdef SHARED
  4470. __attribute__((visibility("hidden")))
  4471. -#endif
  4472. long __syscall_ret(unsigned long), __syscall(syscall_arg_t, ...),
  4473. __syscall_cp(syscall_arg_t, syscall_arg_t, syscall_arg_t, syscall_arg_t,
  4474. syscall_arg_t, syscall_arg_t, syscall_arg_t);
  4475. --- a/src/internal/version.c
  4476. +++ b/src/internal/version.c
  4477. @@ -1,12 +1,9 @@
  4478. -#ifdef SHARED
  4479. -
  4480. #include "version.h"
  4481. static const char version[] = VERSION;
  4482. +__attribute__((__visibility__("hidden")))
  4483. const char *__libc_get_version()
  4484. {
  4485. return version;
  4486. }
  4487. -
  4488. -#endif
  4489. --- a/src/internal/vis.h
  4490. +++ b/src/internal/vis.h
  4491. @@ -4,10 +4,9 @@
  4492. * override default visibilities to reduce the size and performance costs
  4493. * of position-independent code. */
  4494. -#ifndef CRT
  4495. -#ifdef SHARED
  4496. +#if !defined(CRT) && !defined(__ASSEMBLER__)
  4497. -/* For shared libc.so, all symbols should be protected, but some toolchains
  4498. +/* Conceptually, all symbols should be protected, but some toolchains
  4499. * fail to support copy relocations for protected data, so exclude all
  4500. * exported data symbols. */
  4501. @@ -25,16 +24,4 @@ extern char *optarg, **environ, **__envi
  4502. #pragma GCC visibility push(protected)
  4503. -#elif defined(__PIC__)
  4504. -
  4505. -/* If building static libc.a as position-independent code, try to make
  4506. - * everything hidden except possibly-undefined weak references. */
  4507. -
  4508. -__attribute__((__visibility__("default")))
  4509. -extern void (*const __init_array_start)(), (*const __init_array_end)(),
  4510. - (*const __fini_array_start)(), (*const __fini_array_end)();
  4511. -
  4512. -#pragma GCC visibility push(hidden)
  4513. -
  4514. -#endif
  4515. #endif
  4516. --- a/src/ldso/arm/dlsym.s
  4517. +++ b/src/ldso/arm/dlsym.s
  4518. @@ -1,3 +1,4 @@
  4519. +.syntax unified
  4520. .text
  4521. .global dlsym
  4522. .hidden __dlsym
  4523. --- /dev/null
  4524. +++ b/src/ldso/arm/find_exidx.c
  4525. @@ -0,0 +1,42 @@
  4526. +#define _GNU_SOURCE
  4527. +#include <link.h>
  4528. +#include <stdint.h>
  4529. +
  4530. +struct find_exidx_data {
  4531. + uintptr_t pc, exidx_start;
  4532. + int exidx_len;
  4533. +};
  4534. +
  4535. +static int find_exidx(struct dl_phdr_info *info, size_t size, void *ptr)
  4536. +{
  4537. + struct find_exidx_data *data = ptr;
  4538. + const ElfW(Phdr) *phdr = info->dlpi_phdr;
  4539. + uintptr_t addr, exidx_start = 0;
  4540. + int i, match = 0, exidx_len = 0;
  4541. +
  4542. + for (i = info->dlpi_phnum; i > 0; i--, phdr++) {
  4543. + addr = info->dlpi_addr + phdr->p_vaddr;
  4544. + switch (phdr->p_type) {
  4545. + case PT_LOAD:
  4546. + match |= data->pc >= addr && data->pc < addr + phdr->p_memsz;
  4547. + break;
  4548. + case PT_ARM_EXIDX:
  4549. + exidx_start = addr;
  4550. + exidx_len = phdr->p_memsz;
  4551. + break;
  4552. + }
  4553. + }
  4554. + data->exidx_start = exidx_start;
  4555. + data->exidx_len = exidx_len;
  4556. + return match;
  4557. +}
  4558. +
  4559. +uintptr_t __gnu_Unwind_Find_exidx(uintptr_t pc, int *pcount)
  4560. +{
  4561. + struct find_exidx_data data;
  4562. + data.pc = pc;
  4563. + if (dl_iterate_phdr(find_exidx, &data) <= 0)
  4564. + return 0;
  4565. + *pcount = data.exidx_len / 8;
  4566. + return data.exidx_start;
  4567. +}
  4568. --- a/src/ldso/dynlink.c
  4569. +++ b/src/ldso/dynlink.c
  4570. @@ -70,8 +70,8 @@ struct dso {
  4571. char kernel_mapped;
  4572. struct dso **deps, *needed_by;
  4573. char *rpath_orig, *rpath;
  4574. - void *tls_image;
  4575. - size_t tls_len, tls_size, tls_align, tls_id, tls_offset;
  4576. + struct tls_module tls;
  4577. + size_t tls_id;
  4578. size_t relro_start, relro_end;
  4579. void **new_dtv;
  4580. unsigned char *new_tls;
  4581. @@ -99,7 +99,9 @@ struct symdef {
  4582. int __init_tp(void *);
  4583. void __init_libc(char **, char *);
  4584. +void *__copy_tls(unsigned char *);
  4585. +__attribute__((__visibility__("hidden")))
  4586. const char *__libc_get_version(void);
  4587. static struct builtin_tls {
  4588. @@ -123,6 +125,7 @@ static int noload;
  4589. static jmp_buf *rtld_fail;
  4590. static pthread_rwlock_t lock;
  4591. static struct debug debug;
  4592. +static struct tls_module *tls_tail;
  4593. static size_t tls_cnt, tls_offset, tls_align = MIN_TLS_ALIGN;
  4594. static size_t static_tls_cnt;
  4595. static pthread_mutex_t init_fini_lock = { ._m_type = PTHREAD_MUTEX_RECURSIVE };
  4596. @@ -131,6 +134,15 @@ static struct fdpic_dummy_loadmap app_du
  4597. struct debug *_dl_debug_addr = &debug;
  4598. +__attribute__((__visibility__("hidden")))
  4599. +void (*const __init_array_start)(void)=0, (*const __fini_array_start)(void)=0;
  4600. +
  4601. +__attribute__((__visibility__("hidden")))
  4602. +extern void (*const __init_array_end)(void), (*const __fini_array_end)(void);
  4603. +
  4604. +weak_alias(__init_array_start, __init_array_end);
  4605. +weak_alias(__fini_array_start, __fini_array_end);
  4606. +
  4607. static int dl_strcmp(const char *l, const char *r)
  4608. {
  4609. for (; *l==*r && *l; l++, r++);
  4610. @@ -397,14 +409,14 @@ static void do_relocs(struct dso *dso, s
  4611. break;
  4612. #ifdef TLS_ABOVE_TP
  4613. case REL_TPOFF:
  4614. - *reloc_addr = tls_val + def.dso->tls_offset + TPOFF_K + addend;
  4615. + *reloc_addr = tls_val + def.dso->tls.offset + TPOFF_K + addend;
  4616. break;
  4617. #else
  4618. case REL_TPOFF:
  4619. - *reloc_addr = tls_val - def.dso->tls_offset + addend;
  4620. + *reloc_addr = tls_val - def.dso->tls.offset + addend;
  4621. break;
  4622. case REL_TPOFF_NEG:
  4623. - *reloc_addr = def.dso->tls_offset - tls_val + addend;
  4624. + *reloc_addr = def.dso->tls.offset - tls_val + addend;
  4625. break;
  4626. #endif
  4627. case REL_TLSDESC:
  4628. @@ -426,10 +438,10 @@ static void do_relocs(struct dso *dso, s
  4629. } else {
  4630. reloc_addr[0] = (size_t)__tlsdesc_static;
  4631. #ifdef TLS_ABOVE_TP
  4632. - reloc_addr[1] = tls_val + def.dso->tls_offset
  4633. + reloc_addr[1] = tls_val + def.dso->tls.offset
  4634. + TPOFF_K + addend;
  4635. #else
  4636. - reloc_addr[1] = tls_val - def.dso->tls_offset
  4637. + reloc_addr[1] = tls_val - def.dso->tls.offset
  4638. + addend;
  4639. #endif
  4640. }
  4641. @@ -482,8 +494,14 @@ static void reclaim_gaps(struct dso *dso
  4642. static void *mmap_fixed(void *p, size_t n, int prot, int flags, int fd, off_t off)
  4643. {
  4644. - char *q = mmap(p, n, prot, flags, fd, off);
  4645. - if (q != MAP_FAILED || errno != EINVAL) return q;
  4646. + static int no_map_fixed;
  4647. + char *q;
  4648. + if (!no_map_fixed) {
  4649. + q = mmap(p, n, prot, flags|MAP_FIXED, fd, off);
  4650. + if (!DL_NOMMU_SUPPORT || q != MAP_FAILED || errno != EINVAL)
  4651. + return q;
  4652. + no_map_fixed = 1;
  4653. + }
  4654. /* Fallbacks for MAP_FIXED failure on NOMMU kernels. */
  4655. if (flags & MAP_ANONYMOUS) {
  4656. memset(p, 0, n);
  4657. @@ -561,9 +579,9 @@ static void *map_library(int fd, struct
  4658. dyn = ph->p_vaddr;
  4659. } else if (ph->p_type == PT_TLS) {
  4660. tls_image = ph->p_vaddr;
  4661. - dso->tls_align = ph->p_align;
  4662. - dso->tls_len = ph->p_filesz;
  4663. - dso->tls_size = ph->p_memsz;
  4664. + dso->tls.align = ph->p_align;
  4665. + dso->tls.len = ph->p_filesz;
  4666. + dso->tls.size = ph->p_memsz;
  4667. } else if (ph->p_type == PT_GNU_RELRO) {
  4668. dso->relro_start = ph->p_vaddr & -PAGE_SIZE;
  4669. dso->relro_end = (ph->p_vaddr + ph->p_memsz) & -PAGE_SIZE;
  4670. @@ -593,7 +611,7 @@ static void *map_library(int fd, struct
  4671. ((ph->p_flags&PF_W) ? PROT_WRITE: 0) |
  4672. ((ph->p_flags&PF_X) ? PROT_EXEC : 0));
  4673. map = mmap(0, ph->p_memsz + (ph->p_vaddr & PAGE_SIZE-1),
  4674. - prot, (prot&PROT_WRITE) ? MAP_PRIVATE : MAP_SHARED,
  4675. + prot, MAP_PRIVATE,
  4676. fd, ph->p_offset & -PAGE_SIZE);
  4677. if (map == MAP_FAILED) {
  4678. unmap_library(dso);
  4679. @@ -604,6 +622,19 @@ static void *map_library(int fd, struct
  4680. dso->loadmap->segs[i].p_vaddr = ph->p_vaddr;
  4681. dso->loadmap->segs[i].p_memsz = ph->p_memsz;
  4682. i++;
  4683. + if (prot & PROT_WRITE) {
  4684. + size_t brk = (ph->p_vaddr & PAGE_SIZE-1)
  4685. + + ph->p_filesz;
  4686. + size_t pgbrk = brk + PAGE_SIZE-1 & -PAGE_SIZE;
  4687. + size_t pgend = brk + ph->p_memsz - ph->p_filesz
  4688. + + PAGE_SIZE-1 & -PAGE_SIZE;
  4689. + if (pgend > pgbrk && mmap_fixed(map+pgbrk,
  4690. + pgend-pgbrk, prot,
  4691. + MAP_PRIVATE|MAP_FIXED|MAP_ANONYMOUS,
  4692. + -1, off_start) == MAP_FAILED)
  4693. + goto error;
  4694. + memset(map + brk, 0, pgbrk-brk);
  4695. + }
  4696. }
  4697. map = (void *)dso->loadmap->segs[0].addr;
  4698. map_len = 0;
  4699. @@ -618,7 +649,11 @@ static void *map_library(int fd, struct
  4700. * the length of the file. This is okay because we will not
  4701. * use the invalid part; we just need to reserve the right
  4702. * amount of virtual address space to map over later. */
  4703. - map = mmap((void *)addr_min, map_len, prot, MAP_PRIVATE, fd, off_start);
  4704. + map = DL_NOMMU_SUPPORT
  4705. + ? mmap((void *)addr_min, map_len, PROT_READ|PROT_WRITE|PROT_EXEC,
  4706. + MAP_PRIVATE|MAP_ANONYMOUS, -1, 0)
  4707. + : mmap((void *)addr_min, map_len, prot,
  4708. + MAP_PRIVATE, fd, off_start);
  4709. if (map==MAP_FAILED) goto error;
  4710. dso->map = map;
  4711. dso->map_len = map_len;
  4712. @@ -643,7 +678,8 @@ static void *map_library(int fd, struct
  4713. dso->phentsize = eh->e_phentsize;
  4714. }
  4715. /* Reuse the existing mapping for the lowest-address LOAD */
  4716. - if ((ph->p_vaddr & -PAGE_SIZE) == addr_min) continue;
  4717. + if ((ph->p_vaddr & -PAGE_SIZE) == addr_min && !DL_NOMMU_SUPPORT)
  4718. + continue;
  4719. this_min = ph->p_vaddr & -PAGE_SIZE;
  4720. this_max = ph->p_vaddr+ph->p_memsz+PAGE_SIZE-1 & -PAGE_SIZE;
  4721. off_start = ph->p_offset & -PAGE_SIZE;
  4722. @@ -670,7 +706,7 @@ static void *map_library(int fd, struct
  4723. done_mapping:
  4724. dso->base = base;
  4725. dso->dynv = laddr(dso, dyn);
  4726. - if (dso->tls_size) dso->tls_image = laddr(dso, tls_image);
  4727. + if (dso->tls.size) dso->tls.image = laddr(dso, tls_image);
  4728. if (!runtime) reclaim_gaps(dso);
  4729. free(allocated_buf);
  4730. return map;
  4731. @@ -987,8 +1023,8 @@ static struct dso *load_library(const ch
  4732. * extended DTV capable of storing an additional slot for
  4733. * the newly-loaded DSO. */
  4734. alloc_size = sizeof *p + strlen(pathname) + 1;
  4735. - if (runtime && temp_dso.tls_image) {
  4736. - size_t per_th = temp_dso.tls_size + temp_dso.tls_align
  4737. + if (runtime && temp_dso.tls.image) {
  4738. + size_t per_th = temp_dso.tls.size + temp_dso.tls.align
  4739. + sizeof(void *) * (tls_cnt+3);
  4740. n_th = libc.threads_minus_1 + 1;
  4741. if (n_th > SSIZE_MAX / per_th) alloc_size = SIZE_MAX;
  4742. @@ -1009,22 +1045,25 @@ static struct dso *load_library(const ch
  4743. strcpy(p->name, pathname);
  4744. /* Add a shortname only if name arg was not an explicit pathname. */
  4745. if (pathname != name) p->shortname = strrchr(p->name, '/')+1;
  4746. - if (p->tls_image) {
  4747. + if (p->tls.image) {
  4748. p->tls_id = ++tls_cnt;
  4749. - tls_align = MAXP2(tls_align, p->tls_align);
  4750. + tls_align = MAXP2(tls_align, p->tls.align);
  4751. #ifdef TLS_ABOVE_TP
  4752. - p->tls_offset = tls_offset + ( (tls_align-1) &
  4753. - -(tls_offset + (uintptr_t)p->tls_image) );
  4754. - tls_offset += p->tls_size;
  4755. + p->tls.offset = tls_offset + ( (tls_align-1) &
  4756. + -(tls_offset + (uintptr_t)p->tls.image) );
  4757. + tls_offset += p->tls.size;
  4758. #else
  4759. - tls_offset += p->tls_size + p->tls_align - 1;
  4760. - tls_offset -= (tls_offset + (uintptr_t)p->tls_image)
  4761. - & (p->tls_align-1);
  4762. - p->tls_offset = tls_offset;
  4763. + tls_offset += p->tls.size + p->tls.align - 1;
  4764. + tls_offset -= (tls_offset + (uintptr_t)p->tls.image)
  4765. + & (p->tls.align-1);
  4766. + p->tls.offset = tls_offset;
  4767. #endif
  4768. p->new_dtv = (void *)(-sizeof(size_t) &
  4769. (uintptr_t)(p->name+strlen(p->name)+sizeof(size_t)));
  4770. p->new_tls = (void *)(p->new_dtv + n_th*(tls_cnt+1));
  4771. + if (tls_tail) tls_tail->next = &p->tls;
  4772. + else libc.tls_head = &p->tls;
  4773. + tls_tail = &p->tls;
  4774. }
  4775. tail->next = p;
  4776. @@ -1151,7 +1190,7 @@ static void kernel_mapped_dso(struct dso
  4777. p->kernel_mapped = 1;
  4778. }
  4779. -static void do_fini()
  4780. +void __libc_exit_fini()
  4781. {
  4782. struct dso *p;
  4783. size_t dyn[DYN_CNT];
  4784. @@ -1214,53 +1253,8 @@ static void dl_debug_state(void)
  4785. weak_alias(dl_debug_state, _dl_debug_state);
  4786. -void __reset_tls()
  4787. +void __init_tls(size_t *auxv)
  4788. {
  4789. - pthread_t self = __pthread_self();
  4790. - struct dso *p;
  4791. - for (p=head; p; p=p->next) {
  4792. - if (!p->tls_id || !self->dtv[p->tls_id]) continue;
  4793. - memcpy(self->dtv[p->tls_id], p->tls_image, p->tls_len);
  4794. - memset((char *)self->dtv[p->tls_id]+p->tls_len, 0,
  4795. - p->tls_size - p->tls_len);
  4796. - if (p->tls_id == (size_t)self->dtv[0]) break;
  4797. - }
  4798. -}
  4799. -
  4800. -void *__copy_tls(unsigned char *mem)
  4801. -{
  4802. - pthread_t td;
  4803. - struct dso *p;
  4804. - void **dtv;
  4805. -
  4806. -#ifdef TLS_ABOVE_TP
  4807. - dtv = (void **)(mem + libc.tls_size) - (tls_cnt + 1);
  4808. -
  4809. - mem += -((uintptr_t)mem + sizeof(struct pthread)) & (tls_align-1);
  4810. - td = (pthread_t)mem;
  4811. - mem += sizeof(struct pthread);
  4812. -
  4813. - for (p=head; p; p=p->next) {
  4814. - if (!p->tls_id) continue;
  4815. - dtv[p->tls_id] = mem + p->tls_offset;
  4816. - memcpy(dtv[p->tls_id], p->tls_image, p->tls_len);
  4817. - }
  4818. -#else
  4819. - dtv = (void **)mem;
  4820. -
  4821. - mem += libc.tls_size - sizeof(struct pthread);
  4822. - mem -= (uintptr_t)mem & (tls_align-1);
  4823. - td = (pthread_t)mem;
  4824. -
  4825. - for (p=head; p; p=p->next) {
  4826. - if (!p->tls_id) continue;
  4827. - dtv[p->tls_id] = mem - p->tls_offset;
  4828. - memcpy(dtv[p->tls_id], p->tls_image, p->tls_len);
  4829. - }
  4830. -#endif
  4831. - dtv[0] = (void *)tls_cnt;
  4832. - td->dtv = td->dtv_copy = dtv;
  4833. - return td;
  4834. }
  4835. __attribute__((__visibility__("hidden")))
  4836. @@ -1286,7 +1280,7 @@ void *__tls_get_new(size_t *v)
  4837. /* Get new DTV space from new DSO if needed */
  4838. if (v[0] > (size_t)self->dtv[0]) {
  4839. void **newdtv = p->new_dtv +
  4840. - (v[0]+1)*sizeof(void *)*a_fetch_add(&p->new_dtv_idx,1);
  4841. + (v[0]+1)*a_fetch_add(&p->new_dtv_idx,1);
  4842. memcpy(newdtv, self->dtv,
  4843. ((size_t)self->dtv[0]+1) * sizeof(void *));
  4844. newdtv[0] = (void *)v[0];
  4845. @@ -1297,12 +1291,12 @@ void *__tls_get_new(size_t *v)
  4846. unsigned char *mem;
  4847. for (p=head; ; p=p->next) {
  4848. if (!p->tls_id || self->dtv[p->tls_id]) continue;
  4849. - mem = p->new_tls + (p->tls_size + p->tls_align)
  4850. + mem = p->new_tls + (p->tls.size + p->tls.align)
  4851. * a_fetch_add(&p->new_tls_idx,1);
  4852. - mem += ((uintptr_t)p->tls_image - (uintptr_t)mem)
  4853. - & (p->tls_align-1);
  4854. + mem += ((uintptr_t)p->tls.image - (uintptr_t)mem)
  4855. + & (p->tls.align-1);
  4856. self->dtv[p->tls_id] = mem;
  4857. - memcpy(mem, p->tls_image, p->tls_len);
  4858. + memcpy(mem, p->tls.image, p->tls.len);
  4859. if (p->tls_id == v[0]) break;
  4860. }
  4861. __restore_sigs(&set);
  4862. @@ -1311,6 +1305,8 @@ void *__tls_get_new(size_t *v)
  4863. static void update_tls_size()
  4864. {
  4865. + libc.tls_cnt = tls_cnt;
  4866. + libc.tls_align = tls_align;
  4867. libc.tls_size = ALIGN(
  4868. (1+tls_cnt) * sizeof(void *) +
  4869. tls_offset +
  4870. @@ -1421,6 +1417,7 @@ _Noreturn void __dls3(size_t *sp)
  4871. * use during dynamic linking. If possible it will also serve as the
  4872. * thread pointer at runtime. */
  4873. libc.tls_size = sizeof builtin_tls;
  4874. + libc.tls_align = tls_align;
  4875. if (__init_tp(__copy_tls((void *)builtin_tls)) < 0) {
  4876. a_crash();
  4877. }
  4878. @@ -1448,13 +1445,13 @@ _Noreturn void __dls3(size_t *sp)
  4879. interp_off = (size_t)phdr->p_vaddr;
  4880. else if (phdr->p_type == PT_TLS) {
  4881. tls_image = phdr->p_vaddr;
  4882. - app.tls_len = phdr->p_filesz;
  4883. - app.tls_size = phdr->p_memsz;
  4884. - app.tls_align = phdr->p_align;
  4885. + app.tls.len = phdr->p_filesz;
  4886. + app.tls.size = phdr->p_memsz;
  4887. + app.tls.align = phdr->p_align;
  4888. }
  4889. }
  4890. if (DL_FDPIC) app.loadmap = app_loadmap;
  4891. - if (app.tls_size) app.tls_image = laddr(&app, tls_image);
  4892. + if (app.tls.size) app.tls.image = laddr(&app, tls_image);
  4893. if (interp_off) ldso.name = laddr(&app, interp_off);
  4894. if ((aux[0] & (1UL<<AT_EXECFN))
  4895. && strncmp((char *)aux[AT_EXECFN], "/proc/", 6))
  4896. @@ -1523,19 +1520,20 @@ _Noreturn void __dls3(size_t *sp)
  4897. dprintf(1, "\t%s (%p)\n", ldso.name, ldso.base);
  4898. }
  4899. }
  4900. - if (app.tls_size) {
  4901. + if (app.tls.size) {
  4902. + libc.tls_head = &app.tls;
  4903. app.tls_id = tls_cnt = 1;
  4904. #ifdef TLS_ABOVE_TP
  4905. - app.tls_offset = 0;
  4906. - tls_offset = app.tls_size
  4907. - + ( -((uintptr_t)app.tls_image + app.tls_size)
  4908. - & (app.tls_align-1) );
  4909. + app.tls.offset = 0;
  4910. + tls_offset = app.tls.size
  4911. + + ( -((uintptr_t)app.tls.image + app.tls.size)
  4912. + & (app.tls.align-1) );
  4913. #else
  4914. - tls_offset = app.tls_offset = app.tls_size
  4915. - + ( -((uintptr_t)app.tls_image + app.tls_size)
  4916. - & (app.tls_align-1) );
  4917. + tls_offset = app.tls.offset = app.tls.size
  4918. + + ( -((uintptr_t)app.tls.image + app.tls.size)
  4919. + & (app.tls.align-1) );
  4920. #endif
  4921. - tls_align = MAXP2(tls_align, app.tls_align);
  4922. + tls_align = MAXP2(tls_align, app.tls.align);
  4923. }
  4924. app.global = 1;
  4925. decode_dyn(&app);
  4926. @@ -1635,8 +1633,6 @@ _Noreturn void __dls3(size_t *sp)
  4927. debug.state = 0;
  4928. _dl_debug_state();
  4929. - __init_libc(envp, argv[0]);
  4930. - atexit(do_fini);
  4931. errno = 0;
  4932. CRTJMP((void *)aux[AT_ENTRY], argv-1);
  4933. @@ -1646,6 +1642,7 @@ _Noreturn void __dls3(size_t *sp)
  4934. void *dlopen(const char *file, int mode)
  4935. {
  4936. struct dso *volatile p, *orig_tail, *next;
  4937. + struct tls_module *orig_tls_tail;
  4938. size_t orig_tls_cnt, orig_tls_offset, orig_tls_align;
  4939. size_t i;
  4940. int cs;
  4941. @@ -1658,6 +1655,7 @@ void *dlopen(const char *file, int mode)
  4942. __inhibit_ptc();
  4943. p = 0;
  4944. + orig_tls_tail = tls_tail;
  4945. orig_tls_cnt = tls_cnt;
  4946. orig_tls_offset = tls_offset;
  4947. orig_tls_align = tls_align;
  4948. @@ -1684,6 +1682,8 @@ void *dlopen(const char *file, int mode)
  4949. unmap_library(p);
  4950. free(p);
  4951. }
  4952. + if (!orig_tls_tail) libc.tls_head = 0;
  4953. + tls_tail = orig_tls_tail;
  4954. tls_cnt = orig_tls_cnt;
  4955. tls_offset = orig_tls_offset;
  4956. tls_align = orig_tls_align;
  4957. @@ -1900,7 +1900,7 @@ int dl_iterate_phdr(int(*callback)(struc
  4958. info.dlpi_adds = gencnt;
  4959. info.dlpi_subs = 0;
  4960. info.dlpi_tls_modid = current->tls_id;
  4961. - info.dlpi_tls_data = current->tls_image;
  4962. + info.dlpi_tls_data = current->tls.image;
  4963. ret = (callback)(&info, sizeof (info), data);
  4964. --- a/src/locale/langinfo.c
  4965. +++ b/src/locale/langinfo.c
  4966. @@ -37,23 +37,23 @@ char *__nl_langinfo_l(nl_item item, loca
  4967. switch (cat) {
  4968. case LC_NUMERIC:
  4969. - if (idx > 1) return NULL;
  4970. + if (idx > 1) return "";
  4971. str = c_numeric;
  4972. break;
  4973. case LC_TIME:
  4974. - if (idx > 0x31) return NULL;
  4975. + if (idx > 0x31) return "";
  4976. str = c_time;
  4977. break;
  4978. case LC_MONETARY:
  4979. - if (idx > 0) return NULL;
  4980. + if (idx > 0) return "";
  4981. str = "";
  4982. break;
  4983. case LC_MESSAGES:
  4984. - if (idx > 3) return NULL;
  4985. + if (idx > 3) return "";
  4986. str = c_messages;
  4987. break;
  4988. default:
  4989. - return NULL;
  4990. + return "";
  4991. }
  4992. for (; idx; idx--, str++) for (; *str; str++);
  4993. --- a/src/malloc/lite_malloc.c
  4994. +++ b/src/malloc/lite_malloc.c
  4995. @@ -8,7 +8,7 @@
  4996. void *__expand_heap(size_t *);
  4997. -void *__simple_malloc(size_t n)
  4998. +static void *__simple_malloc(size_t n)
  4999. {
  5000. static char *cur, *end;
  5001. static volatile int lock[2];
  5002. --- a/src/math/__rem_pio2.c
  5003. +++ b/src/math/__rem_pio2.c
  5004. @@ -118,7 +118,7 @@ int __rem_pio2(double x, double *y)
  5005. if (ix < 0x413921fb) { /* |x| ~< 2^20*(pi/2), medium size */
  5006. medium:
  5007. /* rint(x/(pi/2)), Assume round-to-nearest. */
  5008. - fn = x*invpio2 + toint - toint;
  5009. + fn = (double_t)x*invpio2 + toint - toint;
  5010. n = (int32_t)fn;
  5011. r = x - fn*pio2_1;
  5012. w = fn*pio2_1t; /* 1st round, good to 85 bits */
  5013. --- a/src/math/__rem_pio2f.c
  5014. +++ b/src/math/__rem_pio2f.c
  5015. @@ -51,7 +51,7 @@ int __rem_pio2f(float x, double *y)
  5016. /* 25+53 bit pi is good enough for medium size */
  5017. if (ix < 0x4dc90fdb) { /* |x| ~< 2^28*(pi/2), medium size */
  5018. /* Use a specialized rint() to get fn. Assume round-to-nearest. */
  5019. - fn = x*invpio2 + toint - toint;
  5020. + fn = (double_t)x*invpio2 + toint - toint;
  5021. n = (int32_t)fn;
  5022. *y = x - fn*pio2_1 - fn*pio2_1t;
  5023. return n;
  5024. --- /dev/null
  5025. +++ b/src/math/arm/fabs.c
  5026. @@ -0,0 +1,15 @@
  5027. +#include <math.h>
  5028. +
  5029. +#if __ARM_PCS_VFP
  5030. +
  5031. +double fabs(double x)
  5032. +{
  5033. + __asm__ ("vabs.f64 %P0, %P1" : "=w"(x) : "w"(x));
  5034. + return x;
  5035. +}
  5036. +
  5037. +#else
  5038. +
  5039. +#include "../fabs.c"
  5040. +
  5041. +#endif
  5042. --- /dev/null
  5043. +++ b/src/math/arm/fabsf.c
  5044. @@ -0,0 +1,15 @@
  5045. +#include <math.h>
  5046. +
  5047. +#if __ARM_PCS_VFP
  5048. +
  5049. +float fabsf(float x)
  5050. +{
  5051. + __asm__ ("vabs.f32 %0, %1" : "=t"(x) : "t"(x));
  5052. + return x;
  5053. +}
  5054. +
  5055. +#else
  5056. +
  5057. +#include "../fabsf.c"
  5058. +
  5059. +#endif
  5060. --- /dev/null
  5061. +++ b/src/math/arm/sqrt.c
  5062. @@ -0,0 +1,15 @@
  5063. +#include <math.h>
  5064. +
  5065. +#if __VFP_FP__ && !__SOFTFP__
  5066. +
  5067. +double sqrt(double x)
  5068. +{
  5069. + __asm__ ("vsqrt.f64 %P0, %P1" : "=w"(x) : "w"(x));
  5070. + return x;
  5071. +}
  5072. +
  5073. +#else
  5074. +
  5075. +#include "../sqrt.c"
  5076. +
  5077. +#endif
  5078. --- /dev/null
  5079. +++ b/src/math/arm/sqrtf.c
  5080. @@ -0,0 +1,15 @@
  5081. +#include <math.h>
  5082. +
  5083. +#if __VFP_FP__ && !__SOFTFP__
  5084. +
  5085. +float sqrtf(float x)
  5086. +{
  5087. + __asm__ ("vsqrt.f32 %0, %1" : "=t"(x) : "t"(x));
  5088. + return x;
  5089. +}
  5090. +
  5091. +#else
  5092. +
  5093. +#include "../sqrtf.c"
  5094. +
  5095. +#endif
  5096. --- a/src/math/armebhf/fabs.sub
  5097. +++ /dev/null
  5098. @@ -1 +0,0 @@
  5099. -../armhf/fabs.s
  5100. --- a/src/math/armebhf/fabsf.sub
  5101. +++ /dev/null
  5102. @@ -1 +0,0 @@
  5103. -../armhf/fabsf.s
  5104. --- a/src/math/armebhf/sqrt.sub
  5105. +++ /dev/null
  5106. @@ -1 +0,0 @@
  5107. -../armhf/sqrt.s
  5108. --- a/src/math/armebhf/sqrtf.sub
  5109. +++ /dev/null
  5110. @@ -1 +0,0 @@
  5111. -../armhf/sqrtf.s
  5112. --- a/src/math/armhf/fabs.s
  5113. +++ /dev/null
  5114. @@ -1,7 +0,0 @@
  5115. -.fpu vfp
  5116. -.text
  5117. -.global fabs
  5118. -.type fabs,%function
  5119. -fabs:
  5120. - vabs.f64 d0, d0
  5121. - bx lr
  5122. --- a/src/math/armhf/fabs.sub
  5123. +++ /dev/null
  5124. @@ -1 +0,0 @@
  5125. -fabs.s
  5126. --- a/src/math/armhf/fabsf.s
  5127. +++ /dev/null
  5128. @@ -1,7 +0,0 @@
  5129. -.fpu vfp
  5130. -.text
  5131. -.global fabsf
  5132. -.type fabsf,%function
  5133. -fabsf:
  5134. - vabs.f32 s0, s0
  5135. - bx lr
  5136. --- a/src/math/armhf/fabsf.sub
  5137. +++ /dev/null
  5138. @@ -1 +0,0 @@
  5139. -fabsf.s
  5140. --- a/src/math/armhf/sqrt.s
  5141. +++ /dev/null
  5142. @@ -1,7 +0,0 @@
  5143. -.fpu vfp
  5144. -.text
  5145. -.global sqrt
  5146. -.type sqrt,%function
  5147. -sqrt:
  5148. - vsqrt.f64 d0, d0
  5149. - bx lr
  5150. --- a/src/math/armhf/sqrt.sub
  5151. +++ /dev/null
  5152. @@ -1 +0,0 @@
  5153. -sqrt.s
  5154. --- a/src/math/armhf/sqrtf.s
  5155. +++ /dev/null
  5156. @@ -1,7 +0,0 @@
  5157. -.fpu vfp
  5158. -.text
  5159. -.global sqrtf
  5160. -.type sqrtf,%function
  5161. -sqrtf:
  5162. - vsqrt.f32 s0, s0
  5163. - bx lr
  5164. --- a/src/math/armhf/sqrtf.sub
  5165. +++ /dev/null
  5166. @@ -1 +0,0 @@
  5167. -sqrtf.s
  5168. --- a/src/math/hypot.c
  5169. +++ b/src/math/hypot.c
  5170. @@ -12,10 +12,10 @@ static void sq(double_t *hi, double_t *l
  5171. {
  5172. double_t xh, xl, xc;
  5173. - xc = x*SPLIT;
  5174. + xc = (double_t)x*SPLIT;
  5175. xh = x - xc + xc;
  5176. xl = x - xh;
  5177. - *hi = x*x;
  5178. + *hi = (double_t)x*x;
  5179. *lo = xh*xh - *hi + 2*xh*xl + xl*xl;
  5180. }
  5181. --- a/src/mman/mremap.c
  5182. +++ b/src/mman/mremap.c
  5183. @@ -1,17 +1,31 @@
  5184. +#define _GNU_SOURCE
  5185. #include <unistd.h>
  5186. #include <sys/mman.h>
  5187. +#include <errno.h>
  5188. +#include <stdint.h>
  5189. #include <stdarg.h>
  5190. #include "syscall.h"
  5191. #include "libc.h"
  5192. +static void dummy(void) { }
  5193. +weak_alias(dummy, __vm_wait);
  5194. +
  5195. void *__mremap(void *old_addr, size_t old_len, size_t new_len, int flags, ...)
  5196. {
  5197. va_list ap;
  5198. - void *new_addr;
  5199. -
  5200. - va_start(ap, flags);
  5201. - new_addr = va_arg(ap, void *);
  5202. - va_end(ap);
  5203. + void *new_addr = 0;
  5204. +
  5205. + if (new_len >= PTRDIFF_MAX) {
  5206. + errno = ENOMEM;
  5207. + return MAP_FAILED;
  5208. + }
  5209. +
  5210. + if (flags & MREMAP_FIXED) {
  5211. + __vm_wait();
  5212. + va_start(ap, flags);
  5213. + new_addr = va_arg(ap, void *);
  5214. + va_end(ap);
  5215. + }
  5216. return (void *)syscall(SYS_mremap, old_addr, old_len, new_len, flags, new_addr);
  5217. }
  5218. --- a/src/network/getifaddrs.c
  5219. +++ b/src/network/getifaddrs.c
  5220. @@ -162,13 +162,26 @@ static int netlink_msg_to_ifaddr(void *p
  5221. for (rta = NLMSG_RTA(h, sizeof(*ifa)); NLMSG_RTAOK(rta, h); rta = RTA_NEXT(rta)) {
  5222. switch (rta->rta_type) {
  5223. case IFA_ADDRESS:
  5224. - copy_addr(&ifs->ifa.ifa_addr, ifa->ifa_family, &ifs->addr, RTA_DATA(rta), RTA_DATALEN(rta), ifa->ifa_index);
  5225. + /* If ifa_addr is already set we, received an IFA_LOCAL before
  5226. + * so treat this as destination address */
  5227. + if (ifs->ifa.ifa_addr)
  5228. + copy_addr(&ifs->ifa.ifa_dstaddr, ifa->ifa_family, &ifs->ifu, RTA_DATA(rta), RTA_DATALEN(rta), ifa->ifa_index);
  5229. + else
  5230. + copy_addr(&ifs->ifa.ifa_addr, ifa->ifa_family, &ifs->addr, RTA_DATA(rta), RTA_DATALEN(rta), ifa->ifa_index);
  5231. break;
  5232. case IFA_BROADCAST:
  5233. - /* For point-to-point links this is peer, but ifa_broadaddr
  5234. - * and ifa_dstaddr are union, so this works for both. */
  5235. copy_addr(&ifs->ifa.ifa_broadaddr, ifa->ifa_family, &ifs->ifu, RTA_DATA(rta), RTA_DATALEN(rta), ifa->ifa_index);
  5236. break;
  5237. + case IFA_LOCAL:
  5238. + /* If ifa_addr is set and we get IFA_LOCAL, assume we have
  5239. + * a point-to-point network. Move address to correct field. */
  5240. + if (ifs->ifa.ifa_addr) {
  5241. + ifs->ifu = ifs->addr;
  5242. + ifs->ifa.ifa_dstaddr = &ifs->ifu.sa;
  5243. + memset(&ifs->addr, 0, sizeof(ifs->addr));
  5244. + }
  5245. + copy_addr(&ifs->ifa.ifa_addr, ifa->ifa_family, &ifs->addr, RTA_DATA(rta), RTA_DATALEN(rta), ifa->ifa_index);
  5246. + break;
  5247. case IFA_LABEL:
  5248. if (RTA_DATALEN(rta) < sizeof(ifs->name)) {
  5249. memcpy(ifs->name, RTA_DATA(rta), RTA_DATALEN(rta));
  5250. --- a/src/network/getnameinfo.c
  5251. +++ b/src/network/getnameinfo.c
  5252. @@ -135,13 +135,13 @@ int getnameinfo(const struct sockaddr *r
  5253. switch (af) {
  5254. case AF_INET:
  5255. a = (void *)&((struct sockaddr_in *)sa)->sin_addr;
  5256. - if (sl != sizeof(struct sockaddr_in)) return EAI_FAMILY;
  5257. + if (sl < sizeof(struct sockaddr_in)) return EAI_FAMILY;
  5258. mkptr4(ptr, a);
  5259. scopeid = 0;
  5260. break;
  5261. case AF_INET6:
  5262. a = (void *)&((struct sockaddr_in6 *)sa)->sin6_addr;
  5263. - if (sl != sizeof(struct sockaddr_in6)) return EAI_FAMILY;
  5264. + if (sl < sizeof(struct sockaddr_in6)) return EAI_FAMILY;
  5265. if (memcmp(a, "\0\0\0\0\0\0\0\0\0\0\xff\xff", 12))
  5266. mkptr6(ptr, a);
  5267. else
  5268. --- a/src/network/if_nametoindex.c
  5269. +++ b/src/network/if_nametoindex.c
  5270. @@ -10,7 +10,7 @@ unsigned if_nametoindex(const char *name
  5271. struct ifreq ifr;
  5272. int fd, r;
  5273. - if ((fd = socket(AF_UNIX, SOCK_DGRAM|SOCK_CLOEXEC, 0)) < 0) return -1;
  5274. + if ((fd = socket(AF_UNIX, SOCK_DGRAM|SOCK_CLOEXEC, 0)) < 0) return 0;
  5275. strncpy(ifr.ifr_name, name, sizeof ifr.ifr_name);
  5276. r = ioctl(fd, SIOCGIFINDEX, &ifr);
  5277. __syscall(SYS_close, fd);
  5278. --- a/src/network/lookup_name.c
  5279. +++ b/src/network/lookup_name.c
  5280. @@ -9,6 +9,7 @@
  5281. #include <fcntl.h>
  5282. #include <unistd.h>
  5283. #include <pthread.h>
  5284. +#include <errno.h>
  5285. #include "lookup.h"
  5286. #include "stdio_impl.h"
  5287. #include "syscall.h"
  5288. @@ -51,7 +52,14 @@ static int name_from_hosts(struct addres
  5289. int cnt = 0;
  5290. unsigned char _buf[1032];
  5291. FILE _f, *f = __fopen_rb_ca("/etc/hosts", &_f, _buf, sizeof _buf);
  5292. - if (!f) return 0;
  5293. + if (!f) switch (errno) {
  5294. + case ENOENT:
  5295. + case ENOTDIR:
  5296. + case EACCES:
  5297. + return 0;
  5298. + default:
  5299. + return EAI_SYSTEM;
  5300. + }
  5301. while (fgets(line, sizeof line, f) && cnt < MAXADDRS) {
  5302. char *p, *z;
  5303. --- a/src/network/lookup_serv.c
  5304. +++ b/src/network/lookup_serv.c
  5305. @@ -4,6 +4,7 @@
  5306. #include <ctype.h>
  5307. #include <string.h>
  5308. #include <fcntl.h>
  5309. +#include <errno.h>
  5310. #include "lookup.h"
  5311. #include "stdio_impl.h"
  5312. @@ -69,7 +70,14 @@ int __lookup_serv(struct service buf[sta
  5313. unsigned char _buf[1032];
  5314. FILE _f, *f = __fopen_rb_ca("/etc/services", &_f, _buf, sizeof _buf);
  5315. - if (!f) return EAI_SERVICE;
  5316. + if (!f) switch (errno) {
  5317. + case ENOENT:
  5318. + case ENOTDIR:
  5319. + case EACCES:
  5320. + return EAI_SERVICE;
  5321. + default:
  5322. + return EAI_SYSTEM;
  5323. + }
  5324. while (fgets(line, sizeof line, f) && cnt < MAXSERVS) {
  5325. if ((p=strchr(line, '#'))) *p++='\n', *p=0;
  5326. --- a/src/network/proto.c
  5327. +++ b/src/network/proto.c
  5328. @@ -9,21 +9,36 @@ static const unsigned char protos[] = {
  5329. "\001icmp\0"
  5330. "\002igmp\0"
  5331. "\003ggp\0"
  5332. + "\004ipencap\0"
  5333. + "\005st\0"
  5334. "\006tcp\0"
  5335. + "\008egp\0"
  5336. "\014pup\0"
  5337. "\021udp\0"
  5338. - "\026idp\0"
  5339. + "\024hmp\0"
  5340. + "\026xns-idp\0"
  5341. + "\033rdp\0"
  5342. + "\035iso-tp4\0"
  5343. + "\044xtp\0"
  5344. + "\045ddp\0"
  5345. + "\046idpr-cmtp\0"
  5346. "\051ipv6\0"
  5347. "\053ipv6-route\0"
  5348. "\054ipv6-frag\0"
  5349. + "\055idrp\0"
  5350. + "\056rsvp\0"
  5351. "\057gre\0"
  5352. "\062esp\0"
  5353. "\063ah\0"
  5354. + "\071skip\0"
  5355. "\072ipv6-icmp\0"
  5356. "\073ipv6-nonxt\0"
  5357. "\074ipv6-opts\0"
  5358. + "\111rspf\0"
  5359. + "\121vmtp\0"
  5360. "\131ospf\0"
  5361. "\136ipip\0"
  5362. + "\142encap\0"
  5363. "\147pim\0"
  5364. "\377raw"
  5365. };
  5366. --- a/src/network/res_msend.c
  5367. +++ b/src/network/res_msend.c
  5368. @@ -54,7 +54,15 @@ int __res_msend(int nqueries, const unsi
  5369. /* Get nameservers from resolv.conf, fallback to localhost */
  5370. f = __fopen_rb_ca("/etc/resolv.conf", &_f, _buf, sizeof _buf);
  5371. - if (f) for (nns=0; nns<3 && fgets(line, sizeof line, f); ) {
  5372. + if (!f) switch (errno) {
  5373. + case ENOENT:
  5374. + case ENOTDIR:
  5375. + case EACCES:
  5376. + goto no_resolv_conf;
  5377. + default:
  5378. + return -1;
  5379. + }
  5380. + for (nns=0; nns<3 && fgets(line, sizeof line, f); ) {
  5381. if (!strncmp(line, "options", 7) && isspace(line[7])) {
  5382. unsigned long x;
  5383. char *p, *z;
  5384. @@ -92,7 +100,8 @@ int __res_msend(int nqueries, const unsi
  5385. }
  5386. }
  5387. }
  5388. - if (f) __fclose_ca(f);
  5389. + __fclose_ca(f);
  5390. +no_resolv_conf:
  5391. if (!nns) {
  5392. ns[0].sin.sin_family = AF_INET;
  5393. ns[0].sin.sin_port = htons(53);
  5394. --- a/src/search/tsearch_avl.c
  5395. +++ b/src/search/tsearch_avl.c
  5396. @@ -77,38 +77,45 @@ static struct node *find(struct node *n,
  5397. return find(n->right, k, cmp);
  5398. }
  5399. -static struct node *insert(struct node **n, const void *k,
  5400. - int (*cmp)(const void *, const void *), int *new)
  5401. +static struct node *insert(struct node *n, const void *k,
  5402. + int (*cmp)(const void *, const void *), struct node **found)
  5403. {
  5404. - struct node *r = *n;
  5405. + struct node *r;
  5406. int c;
  5407. - if (!r) {
  5408. - *n = r = malloc(sizeof **n);
  5409. - if (r) {
  5410. - r->key = k;
  5411. - r->left = r->right = 0;
  5412. - r->height = 1;
  5413. + if (!n) {
  5414. + n = malloc(sizeof *n);
  5415. + if (n) {
  5416. + n->key = k;
  5417. + n->left = n->right = 0;
  5418. + n->height = 1;
  5419. }
  5420. - *new = 1;
  5421. - return r;
  5422. + *found = n;
  5423. + return n;
  5424. + }
  5425. + c = cmp(k, n->key);
  5426. + if (c == 0) {
  5427. + *found = n;
  5428. + return 0;
  5429. + }
  5430. + r = insert(c < 0 ? n->left : n->right, k, cmp, found);
  5431. + if (r) {
  5432. + if (c < 0)
  5433. + n->left = r;
  5434. + else
  5435. + n->right = r;
  5436. + r = balance(n);
  5437. }
  5438. - c = cmp(k, r->key);
  5439. - if (c == 0)
  5440. - return r;
  5441. - if (c < 0)
  5442. - r = insert(&r->left, k, cmp, new);
  5443. - else
  5444. - r = insert(&r->right, k, cmp, new);
  5445. - if (*new)
  5446. - *n = balance(*n);
  5447. return r;
  5448. }
  5449. -static struct node *movr(struct node *n, struct node *r) {
  5450. - if (!n)
  5451. - return r;
  5452. - n->right = movr(n->right, r);
  5453. +static struct node *remove_rightmost(struct node *n, struct node **rightmost)
  5454. +{
  5455. + if (!n->right) {
  5456. + *rightmost = n;
  5457. + return n->left;
  5458. + }
  5459. + n->right = remove_rightmost(n->right, rightmost);
  5460. return balance(n);
  5461. }
  5462. @@ -122,7 +129,13 @@ static struct node *remove(struct node *
  5463. c = cmp(k, (*n)->key);
  5464. if (c == 0) {
  5465. struct node *r = *n;
  5466. - *n = movr(r->left, r->right);
  5467. + if (r->left) {
  5468. + r->left = remove_rightmost(r->left, n);
  5469. + (*n)->left = r->left;
  5470. + (*n)->right = r->right;
  5471. + *n = balance(*n);
  5472. + } else
  5473. + *n = r->right;
  5474. free(r);
  5475. return parent;
  5476. }
  5477. @@ -138,6 +151,8 @@ static struct node *remove(struct node *
  5478. void *tdelete(const void *restrict key, void **restrict rootp,
  5479. int(*compar)(const void *, const void *))
  5480. {
  5481. + if (!rootp)
  5482. + return 0;
  5483. struct node *n = *rootp;
  5484. struct node *ret;
  5485. /* last argument is arbitrary non-null pointer
  5486. @@ -150,17 +165,21 @@ void *tdelete(const void *restrict key,
  5487. void *tfind(const void *key, void *const *rootp,
  5488. int(*compar)(const void *, const void *))
  5489. {
  5490. + if (!rootp)
  5491. + return 0;
  5492. return find(*rootp, key, compar);
  5493. }
  5494. void *tsearch(const void *key, void **rootp,
  5495. int (*compar)(const void *, const void *))
  5496. {
  5497. - int new = 0;
  5498. - struct node *n = *rootp;
  5499. + struct node *update;
  5500. struct node *ret;
  5501. - ret = insert(&n, key, compar, &new);
  5502. - *rootp = n;
  5503. + if (!rootp)
  5504. + return 0;
  5505. + update = insert(*rootp, key, compar, &ret);
  5506. + if (update)
  5507. + *rootp = update;
  5508. return ret;
  5509. }
  5510. --- a/src/setjmp/arm/longjmp.s
  5511. +++ b/src/setjmp/arm/longjmp.s
  5512. @@ -1,3 +1,4 @@
  5513. +.syntax unified
  5514. .global _longjmp
  5515. .global longjmp
  5516. .type _longjmp,%function
  5517. @@ -20,7 +21,11 @@ longjmp:
  5518. ldc p2, cr4, [ip], #48
  5519. 2: tst r1,#0x40
  5520. beq 2f
  5521. - .word 0xecbc8b10 /* vldmia ip!, {d8-d15} */
  5522. + .fpu vfp
  5523. + vldmia ip!, {d8-d15}
  5524. + .fpu softvfp
  5525. + .eabi_attribute 10, 0
  5526. + .eabi_attribute 27, 0
  5527. 2: tst r1,#0x200
  5528. beq 3f
  5529. ldcl p1, cr10, [ip], #8
  5530. @@ -29,9 +34,7 @@ longjmp:
  5531. ldcl p1, cr13, [ip], #8
  5532. ldcl p1, cr14, [ip], #8
  5533. ldcl p1, cr15, [ip], #8
  5534. -3: tst lr,#1
  5535. - moveq pc,lr
  5536. - bx lr
  5537. +3: bx lr
  5538. .hidden __hwcap
  5539. 1: .word __hwcap-1b
  5540. --- a/src/setjmp/arm/setjmp.s
  5541. +++ b/src/setjmp/arm/setjmp.s
  5542. @@ -1,3 +1,4 @@
  5543. +.syntax unified
  5544. .global __setjmp
  5545. .global _setjmp
  5546. .global setjmp
  5547. @@ -22,7 +23,11 @@ setjmp:
  5548. stc p2, cr4, [ip], #48
  5549. 2: tst r1,#0x40
  5550. beq 2f
  5551. - .word 0xecac8b10 /* vstmia ip!, {d8-d15} */
  5552. + .fpu vfp
  5553. + vstmia ip!, {d8-d15}
  5554. + .fpu softvfp
  5555. + .eabi_attribute 10, 0
  5556. + .eabi_attribute 27, 0
  5557. 2: tst r1,#0x200
  5558. beq 3f
  5559. stcl p1, cr10, [ip], #8
  5560. @@ -31,9 +36,7 @@ setjmp:
  5561. stcl p1, cr13, [ip], #8
  5562. stcl p1, cr14, [ip], #8
  5563. stcl p1, cr15, [ip], #8
  5564. -3: tst lr,#1
  5565. - moveq pc,lr
  5566. - bx lr
  5567. +3: bx lr
  5568. .hidden __hwcap
  5569. 1: .word __hwcap-1b
  5570. --- a/src/setjmp/mips-sf/longjmp.s
  5571. +++ /dev/null
  5572. @@ -1,25 +0,0 @@
  5573. -.set noreorder
  5574. -
  5575. -.global _longjmp
  5576. -.global longjmp
  5577. -.type _longjmp,@function
  5578. -.type longjmp,@function
  5579. -_longjmp:
  5580. -longjmp:
  5581. - move $2, $5
  5582. - bne $2, $0, 1f
  5583. - nop
  5584. - addu $2, $2, 1
  5585. -1: lw $ra, 0($4)
  5586. - lw $sp, 4($4)
  5587. - lw $16, 8($4)
  5588. - lw $17, 12($4)
  5589. - lw $18, 16($4)
  5590. - lw $19, 20($4)
  5591. - lw $20, 24($4)
  5592. - lw $21, 28($4)
  5593. - lw $22, 32($4)
  5594. - lw $23, 36($4)
  5595. - lw $30, 40($4)
  5596. - jr $ra
  5597. - lw $28, 44($4)
  5598. --- a/src/setjmp/mips-sf/longjmp.sub
  5599. +++ /dev/null
  5600. @@ -1 +0,0 @@
  5601. -longjmp.s
  5602. --- a/src/setjmp/mips-sf/setjmp.s
  5603. +++ /dev/null
  5604. @@ -1,25 +0,0 @@
  5605. -.set noreorder
  5606. -
  5607. -.global __setjmp
  5608. -.global _setjmp
  5609. -.global setjmp
  5610. -.type __setjmp,@function
  5611. -.type _setjmp,@function
  5612. -.type setjmp,@function
  5613. -__setjmp:
  5614. -_setjmp:
  5615. -setjmp:
  5616. - sw $ra, 0($4)
  5617. - sw $sp, 4($4)
  5618. - sw $16, 8($4)
  5619. - sw $17, 12($4)
  5620. - sw $18, 16($4)
  5621. - sw $19, 20($4)
  5622. - sw $20, 24($4)
  5623. - sw $21, 28($4)
  5624. - sw $22, 32($4)
  5625. - sw $23, 36($4)
  5626. - sw $30, 40($4)
  5627. - sw $28, 44($4)
  5628. - jr $ra
  5629. - li $2, 0
  5630. --- a/src/setjmp/mips-sf/setjmp.sub
  5631. +++ /dev/null
  5632. @@ -1 +0,0 @@
  5633. -setjmp.s
  5634. --- /dev/null
  5635. +++ b/src/setjmp/mips/longjmp.S
  5636. @@ -0,0 +1,40 @@
  5637. +.set noreorder
  5638. +
  5639. +.global _longjmp
  5640. +.global longjmp
  5641. +.type _longjmp,@function
  5642. +.type longjmp,@function
  5643. +_longjmp:
  5644. +longjmp:
  5645. + move $2, $5
  5646. + bne $2, $0, 1f
  5647. + nop
  5648. + addu $2, $2, 1
  5649. +1:
  5650. +#ifndef __mips_soft_float
  5651. + lwc1 $20, 56($4)
  5652. + lwc1 $21, 60($4)
  5653. + lwc1 $22, 64($4)
  5654. + lwc1 $23, 68($4)
  5655. + lwc1 $24, 72($4)
  5656. + lwc1 $25, 76($4)
  5657. + lwc1 $26, 80($4)
  5658. + lwc1 $27, 84($4)
  5659. + lwc1 $28, 88($4)
  5660. + lwc1 $29, 92($4)
  5661. + lwc1 $30, 96($4)
  5662. + lwc1 $31, 100($4)
  5663. +#endif
  5664. + lw $ra, 0($4)
  5665. + lw $sp, 4($4)
  5666. + lw $16, 8($4)
  5667. + lw $17, 12($4)
  5668. + lw $18, 16($4)
  5669. + lw $19, 20($4)
  5670. + lw $20, 24($4)
  5671. + lw $21, 28($4)
  5672. + lw $22, 32($4)
  5673. + lw $23, 36($4)
  5674. + lw $30, 40($4)
  5675. + jr $ra
  5676. + lw $28, 44($4)
  5677. --- a/src/setjmp/mips/longjmp.s
  5678. +++ /dev/null
  5679. @@ -1,37 +0,0 @@
  5680. -.set noreorder
  5681. -
  5682. -.global _longjmp
  5683. -.global longjmp
  5684. -.type _longjmp,@function
  5685. -.type longjmp,@function
  5686. -_longjmp:
  5687. -longjmp:
  5688. - move $2, $5
  5689. - bne $2, $0, 1f
  5690. - nop
  5691. - addu $2, $2, 1
  5692. -1: lwc1 $20, 56($4)
  5693. - lwc1 $21, 60($4)
  5694. - lwc1 $22, 64($4)
  5695. - lwc1 $23, 68($4)
  5696. - lwc1 $24, 72($4)
  5697. - lwc1 $25, 76($4)
  5698. - lwc1 $26, 80($4)
  5699. - lwc1 $27, 84($4)
  5700. - lwc1 $28, 88($4)
  5701. - lwc1 $29, 92($4)
  5702. - lwc1 $30, 96($4)
  5703. - lwc1 $31, 100($4)
  5704. - lw $ra, 0($4)
  5705. - lw $sp, 4($4)
  5706. - lw $16, 8($4)
  5707. - lw $17, 12($4)
  5708. - lw $18, 16($4)
  5709. - lw $19, 20($4)
  5710. - lw $20, 24($4)
  5711. - lw $21, 28($4)
  5712. - lw $22, 32($4)
  5713. - lw $23, 36($4)
  5714. - lw $30, 40($4)
  5715. - jr $ra
  5716. - lw $28, 44($4)
  5717. --- /dev/null
  5718. +++ b/src/setjmp/mips/setjmp.S
  5719. @@ -0,0 +1,39 @@
  5720. +.set noreorder
  5721. +
  5722. +.global __setjmp
  5723. +.global _setjmp
  5724. +.global setjmp
  5725. +.type __setjmp,@function
  5726. +.type _setjmp,@function
  5727. +.type setjmp,@function
  5728. +__setjmp:
  5729. +_setjmp:
  5730. +setjmp:
  5731. + sw $ra, 0($4)
  5732. + sw $sp, 4($4)
  5733. + sw $16, 8($4)
  5734. + sw $17, 12($4)
  5735. + sw $18, 16($4)
  5736. + sw $19, 20($4)
  5737. + sw $20, 24($4)
  5738. + sw $21, 28($4)
  5739. + sw $22, 32($4)
  5740. + sw $23, 36($4)
  5741. + sw $30, 40($4)
  5742. + sw $28, 44($4)
  5743. +#ifndef __mips_soft_float
  5744. + swc1 $20, 56($4)
  5745. + swc1 $21, 60($4)
  5746. + swc1 $22, 64($4)
  5747. + swc1 $23, 68($4)
  5748. + swc1 $24, 72($4)
  5749. + swc1 $25, 76($4)
  5750. + swc1 $26, 80($4)
  5751. + swc1 $27, 84($4)
  5752. + swc1 $28, 88($4)
  5753. + swc1 $29, 92($4)
  5754. + swc1 $30, 96($4)
  5755. + swc1 $31, 100($4)
  5756. +#endif
  5757. + jr $ra
  5758. + li $2, 0
  5759. --- a/src/setjmp/mips/setjmp.s
  5760. +++ /dev/null
  5761. @@ -1,37 +0,0 @@
  5762. -.set noreorder
  5763. -
  5764. -.global __setjmp
  5765. -.global _setjmp
  5766. -.global setjmp
  5767. -.type __setjmp,@function
  5768. -.type _setjmp,@function
  5769. -.type setjmp,@function
  5770. -__setjmp:
  5771. -_setjmp:
  5772. -setjmp:
  5773. - sw $ra, 0($4)
  5774. - sw $sp, 4($4)
  5775. - sw $16, 8($4)
  5776. - sw $17, 12($4)
  5777. - sw $18, 16($4)
  5778. - sw $19, 20($4)
  5779. - sw $20, 24($4)
  5780. - sw $21, 28($4)
  5781. - sw $22, 32($4)
  5782. - sw $23, 36($4)
  5783. - sw $30, 40($4)
  5784. - sw $28, 44($4)
  5785. - swc1 $20, 56($4)
  5786. - swc1 $21, 60($4)
  5787. - swc1 $22, 64($4)
  5788. - swc1 $23, 68($4)
  5789. - swc1 $24, 72($4)
  5790. - swc1 $25, 76($4)
  5791. - swc1 $26, 80($4)
  5792. - swc1 $27, 84($4)
  5793. - swc1 $28, 88($4)
  5794. - swc1 $29, 92($4)
  5795. - swc1 $30, 96($4)
  5796. - swc1 $31, 100($4)
  5797. - jr $ra
  5798. - li $2, 0
  5799. --- a/src/setjmp/mipsel-sf/longjmp.sub
  5800. +++ /dev/null
  5801. @@ -1 +0,0 @@
  5802. -../mips-sf/longjmp.s
  5803. --- a/src/setjmp/mipsel-sf/setjmp.sub
  5804. +++ /dev/null
  5805. @@ -1 +0,0 @@
  5806. -../mips-sf/setjmp.s
  5807. --- a/src/setjmp/sh-nofpu/longjmp.s
  5808. +++ /dev/null
  5809. @@ -1,22 +0,0 @@
  5810. -.global _longjmp
  5811. -.global longjmp
  5812. -.type _longjmp, @function
  5813. -.type longjmp, @function
  5814. -_longjmp:
  5815. -longjmp:
  5816. - mov.l @r4+, r8
  5817. - mov.l @r4+, r9
  5818. - mov.l @r4+, r10
  5819. - mov.l @r4+, r11
  5820. - mov.l @r4+, r12
  5821. - mov.l @r4+, r13
  5822. - mov.l @r4+, r14
  5823. - mov.l @r4+, r15
  5824. - lds.l @r4+, pr
  5825. -
  5826. - tst r5, r5
  5827. - movt r0
  5828. - add r5, r0
  5829. -
  5830. - rts
  5831. - nop
  5832. --- a/src/setjmp/sh-nofpu/longjmp.sub
  5833. +++ /dev/null
  5834. @@ -1 +0,0 @@
  5835. -longjmp.s
  5836. --- a/src/setjmp/sh-nofpu/setjmp.s
  5837. +++ /dev/null
  5838. @@ -1,24 +0,0 @@
  5839. -.global ___setjmp
  5840. -.hidden ___setjmp
  5841. -.global __setjmp
  5842. -.global _setjmp
  5843. -.global setjmp
  5844. -.type __setjmp, @function
  5845. -.type _setjmp, @function
  5846. -.type setjmp, @function
  5847. -___setjmp:
  5848. -__setjmp:
  5849. -_setjmp:
  5850. -setjmp:
  5851. - add #36, r4
  5852. - sts.l pr, @-r4
  5853. - mov.l r15 @-r4
  5854. - mov.l r14, @-r4
  5855. - mov.l r13, @-r4
  5856. - mov.l r12, @-r4
  5857. - mov.l r11, @-r4
  5858. - mov.l r10, @-r4
  5859. - mov.l r9, @-r4
  5860. - mov.l r8, @-r4
  5861. - rts
  5862. - mov #0, r0
  5863. --- a/src/setjmp/sh-nofpu/setjmp.sub
  5864. +++ /dev/null
  5865. @@ -1 +0,0 @@
  5866. -setjmp.s
  5867. --- /dev/null
  5868. +++ b/src/setjmp/sh/longjmp.S
  5869. @@ -0,0 +1,28 @@
  5870. +.global _longjmp
  5871. +.global longjmp
  5872. +.type _longjmp, @function
  5873. +.type longjmp, @function
  5874. +_longjmp:
  5875. +longjmp:
  5876. + mov.l @r4+, r8
  5877. + mov.l @r4+, r9
  5878. + mov.l @r4+, r10
  5879. + mov.l @r4+, r11
  5880. + mov.l @r4+, r12
  5881. + mov.l @r4+, r13
  5882. + mov.l @r4+, r14
  5883. + mov.l @r4+, r15
  5884. + lds.l @r4+, pr
  5885. +#if __SH_FPU_ANY__ || __SH4__
  5886. + fmov.s @r4+, fr12
  5887. + fmov.s @r4+, fr13
  5888. + fmov.s @r4+, fr14
  5889. + fmov.s @r4+, fr15
  5890. +#endif
  5891. +
  5892. + tst r5, r5
  5893. + movt r0
  5894. + add r5, r0
  5895. +
  5896. + rts
  5897. + nop
  5898. --- a/src/setjmp/sh/longjmp.s
  5899. +++ /dev/null
  5900. @@ -1,26 +0,0 @@
  5901. -.global _longjmp
  5902. -.global longjmp
  5903. -.type _longjmp, @function
  5904. -.type longjmp, @function
  5905. -_longjmp:
  5906. -longjmp:
  5907. - mov.l @r4+, r8
  5908. - mov.l @r4+, r9
  5909. - mov.l @r4+, r10
  5910. - mov.l @r4+, r11
  5911. - mov.l @r4+, r12
  5912. - mov.l @r4+, r13
  5913. - mov.l @r4+, r14
  5914. - mov.l @r4+, r15
  5915. - lds.l @r4+, pr
  5916. - fmov.s @r4+, fr12
  5917. - fmov.s @r4+, fr13
  5918. - fmov.s @r4+, fr14
  5919. - fmov.s @r4+, fr15
  5920. -
  5921. - tst r5, r5
  5922. - movt r0
  5923. - add r5, r0
  5924. -
  5925. - rts
  5926. - nop
  5927. --- /dev/null
  5928. +++ b/src/setjmp/sh/setjmp.S
  5929. @@ -0,0 +1,32 @@
  5930. +.global ___setjmp
  5931. +.hidden ___setjmp
  5932. +.global __setjmp
  5933. +.global _setjmp
  5934. +.global setjmp
  5935. +.type __setjmp, @function
  5936. +.type _setjmp, @function
  5937. +.type setjmp, @function
  5938. +___setjmp:
  5939. +__setjmp:
  5940. +_setjmp:
  5941. +setjmp:
  5942. +#if __SH_FPU_ANY__ || __SH4__
  5943. + add #52, r4
  5944. + fmov.s fr15, @-r4
  5945. + fmov.s fr14, @-r4
  5946. + fmov.s fr13, @-r4
  5947. + fmov.s fr12, @-r4
  5948. +#else
  5949. + add #36, r4
  5950. +#endif
  5951. + sts.l pr, @-r4
  5952. + mov.l r15, @-r4
  5953. + mov.l r14, @-r4
  5954. + mov.l r13, @-r4
  5955. + mov.l r12, @-r4
  5956. + mov.l r11, @-r4
  5957. + mov.l r10, @-r4
  5958. + mov.l r9, @-r4
  5959. + mov.l r8, @-r4
  5960. + rts
  5961. + mov #0, r0
  5962. --- a/src/setjmp/sh/setjmp.s
  5963. +++ /dev/null
  5964. @@ -1,28 +0,0 @@
  5965. -.global ___setjmp
  5966. -.hidden ___setjmp
  5967. -.global __setjmp
  5968. -.global _setjmp
  5969. -.global setjmp
  5970. -.type __setjmp, @function
  5971. -.type _setjmp, @function
  5972. -.type setjmp, @function
  5973. -___setjmp:
  5974. -__setjmp:
  5975. -_setjmp:
  5976. -setjmp:
  5977. - add #52, r4
  5978. - fmov.s fr15, @-r4
  5979. - fmov.s fr14, @-r4
  5980. - fmov.s fr13, @-r4
  5981. - fmov.s fr12, @-r4
  5982. - sts.l pr, @-r4
  5983. - mov.l r15, @-r4
  5984. - mov.l r14, @-r4
  5985. - mov.l r13, @-r4
  5986. - mov.l r12, @-r4
  5987. - mov.l r11, @-r4
  5988. - mov.l r10, @-r4
  5989. - mov.l r9, @-r4
  5990. - mov.l r8, @-r4
  5991. - rts
  5992. - mov #0, r0
  5993. --- a/src/setjmp/sheb-nofpu/longjmp.sub
  5994. +++ /dev/null
  5995. @@ -1 +0,0 @@
  5996. -../sh-nofpu/longjmp.s
  5997. --- a/src/setjmp/sheb-nofpu/setjmp.sub
  5998. +++ /dev/null
  5999. @@ -1 +0,0 @@
  6000. -../sh-nofpu/setjmp.s
  6001. --- a/src/signal/arm/restore.s
  6002. +++ b/src/signal/arm/restore.s
  6003. @@ -1,3 +1,5 @@
  6004. +.syntax unified
  6005. +
  6006. .global __restore
  6007. .type __restore,%function
  6008. __restore:
  6009. --- a/src/signal/arm/sigsetjmp.s
  6010. +++ b/src/signal/arm/sigsetjmp.s
  6011. @@ -1,3 +1,4 @@
  6012. +.syntax unified
  6013. .global sigsetjmp
  6014. .global __sigsetjmp
  6015. .type sigsetjmp,%function
  6016. --- a/src/signal/sigaction.c
  6017. +++ b/src/signal/sigaction.c
  6018. @@ -17,10 +17,6 @@ void __get_handler_set(sigset_t *set)
  6019. int __libc_sigaction(int sig, const struct sigaction *restrict sa, struct sigaction *restrict old)
  6020. {
  6021. struct k_sigaction ksa, ksa_old;
  6022. - if (sig >= (unsigned)_NSIG) {
  6023. - errno = EINVAL;
  6024. - return -1;
  6025. - }
  6026. if (sa) {
  6027. if ((uintptr_t)sa->sa_handler > 1UL) {
  6028. a_or_l(handler_set+(sig-1)/(8*sizeof(long)),
  6029. @@ -57,7 +53,7 @@ int __libc_sigaction(int sig, const stru
  6030. int __sigaction(int sig, const struct sigaction *restrict sa, struct sigaction *restrict old)
  6031. {
  6032. - if (sig-32U < 3) {
  6033. + if (sig-32U < 3 || sig-1U >= _NSIG-1) {
  6034. errno = EINVAL;
  6035. return -1;
  6036. }
  6037. --- a/src/signal/sigsetjmp_tail.c
  6038. +++ b/src/signal/sigsetjmp_tail.c
  6039. @@ -2,9 +2,7 @@
  6040. #include <signal.h>
  6041. #include "syscall.h"
  6042. -#ifdef SHARED
  6043. __attribute__((__visibility__("hidden")))
  6044. -#endif
  6045. int __sigsetjmp_tail(sigjmp_buf jb, int ret)
  6046. {
  6047. void *p = jb->__ss;
  6048. --- a/src/stdio/getdelim.c
  6049. +++ b/src/stdio/getdelim.c
  6050. @@ -27,17 +27,18 @@ ssize_t getdelim(char **restrict s, size
  6051. for (;;) {
  6052. z = memchr(f->rpos, delim, f->rend - f->rpos);
  6053. k = z ? z - f->rpos + 1 : f->rend - f->rpos;
  6054. - if (i+k >= *n) {
  6055. + if (i+k+1 >= *n) {
  6056. if (k >= SIZE_MAX/2-i) goto oom;
  6057. - *n = i+k+2;
  6058. - if (*n < SIZE_MAX/4) *n *= 2;
  6059. - tmp = realloc(*s, *n);
  6060. + size_t m = i+k+2;
  6061. + if (!z && m < SIZE_MAX/4) m += m/2;
  6062. + tmp = realloc(*s, m);
  6063. if (!tmp) {
  6064. - *n = i+k+2;
  6065. - tmp = realloc(*s, *n);
  6066. + m = i+k+2;
  6067. + tmp = realloc(*s, m);
  6068. if (!tmp) goto oom;
  6069. }
  6070. *s = tmp;
  6071. + *n = m;
  6072. }
  6073. memcpy(*s+i, f->rpos, k);
  6074. f->rpos += k;
  6075. --- /dev/null
  6076. +++ b/src/string/arm/__aeabi_memclr.c
  6077. @@ -0,0 +1,9 @@
  6078. +#include <string.h>
  6079. +#include "libc.h"
  6080. +
  6081. +void __aeabi_memclr(void *dest, size_t n)
  6082. +{
  6083. + memset(dest, 0, n);
  6084. +}
  6085. +weak_alias(__aeabi_memclr, __aeabi_memclr4);
  6086. +weak_alias(__aeabi_memclr, __aeabi_memclr8);
  6087. --- /dev/null
  6088. +++ b/src/string/arm/__aeabi_memcpy.c
  6089. @@ -0,0 +1,9 @@
  6090. +#include <string.h>
  6091. +#include "libc.h"
  6092. +
  6093. +void __aeabi_memcpy(void *restrict dest, const void *restrict src, size_t n)
  6094. +{
  6095. + memcpy(dest, src, n);
  6096. +}
  6097. +weak_alias(__aeabi_memcpy, __aeabi_memcpy4);
  6098. +weak_alias(__aeabi_memcpy, __aeabi_memcpy8);
  6099. --- /dev/null
  6100. +++ b/src/string/arm/__aeabi_memmove.c
  6101. @@ -0,0 +1,9 @@
  6102. +#include <string.h>
  6103. +#include "libc.h"
  6104. +
  6105. +void __aeabi_memmove(void *dest, const void *src, size_t n)
  6106. +{
  6107. + memmove(dest, src, n);
  6108. +}
  6109. +weak_alias(__aeabi_memmove, __aeabi_memmove4);
  6110. +weak_alias(__aeabi_memmove, __aeabi_memmove8);
  6111. --- /dev/null
  6112. +++ b/src/string/arm/__aeabi_memset.c
  6113. @@ -0,0 +1,9 @@
  6114. +#include <string.h>
  6115. +#include "libc.h"
  6116. +
  6117. +void __aeabi_memset(void *dest, size_t n, int c)
  6118. +{
  6119. + memset(dest, c, n);
  6120. +}
  6121. +weak_alias(__aeabi_memset, __aeabi_memset4);
  6122. +weak_alias(__aeabi_memset, __aeabi_memset8);
  6123. --- /dev/null
  6124. +++ b/src/string/arm/memcpy.c
  6125. @@ -0,0 +1,3 @@
  6126. +#if __ARMEB__
  6127. +#include "../memcpy.c"
  6128. +#endif
  6129. --- /dev/null
  6130. +++ b/src/string/arm/memcpy_le.S
  6131. @@ -0,0 +1,383 @@
  6132. +#ifndef __ARMEB__
  6133. +
  6134. +/*
  6135. + * Copyright (C) 2008 The Android Open Source Project
  6136. + * All rights reserved.
  6137. + *
  6138. + * Redistribution and use in source and binary forms, with or without
  6139. + * modification, are permitted provided that the following conditions
  6140. + * are met:
  6141. + * * Redistributions of source code must retain the above copyright
  6142. + * notice, this list of conditions and the following disclaimer.
  6143. + * * Redistributions in binary form must reproduce the above copyright
  6144. + * notice, this list of conditions and the following disclaimer in
  6145. + * the documentation and/or other materials provided with the
  6146. + * distribution.
  6147. + *
  6148. + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
  6149. + * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
  6150. + * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
  6151. + * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
  6152. + * COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
  6153. + * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
  6154. + * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS
  6155. + * OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED
  6156. + * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
  6157. + * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT
  6158. + * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
  6159. + * SUCH DAMAGE.
  6160. + */
  6161. +
  6162. +
  6163. +/*
  6164. + * Optimized memcpy() for ARM.
  6165. + *
  6166. + * note that memcpy() always returns the destination pointer,
  6167. + * so we have to preserve R0.
  6168. + */
  6169. +
  6170. +/*
  6171. + * This file has been modified from the original for use in musl libc.
  6172. + * The main changes are: addition of .type memcpy,%function to make the
  6173. + * code safely callable from thumb mode, adjusting the return
  6174. + * instructions to be compatible with pre-thumb ARM cpus, and removal
  6175. + * of prefetch code that is not compatible with older cpus.
  6176. + */
  6177. +
  6178. +.syntax unified
  6179. +
  6180. +.global memcpy
  6181. +.type memcpy,%function
  6182. +memcpy:
  6183. + /* The stack must always be 64-bits aligned to be compliant with the
  6184. + * ARM ABI. Since we have to save R0, we might as well save R4
  6185. + * which we can use for better pipelining of the reads below
  6186. + */
  6187. + .fnstart
  6188. + .save {r0, r4, lr}
  6189. + stmfd sp!, {r0, r4, lr}
  6190. + /* Making room for r5-r11 which will be spilled later */
  6191. + .pad #28
  6192. + sub sp, sp, #28
  6193. +
  6194. + /* it simplifies things to take care of len<4 early */
  6195. + cmp r2, #4
  6196. + blo copy_last_3_and_return
  6197. +
  6198. + /* compute the offset to align the source
  6199. + * offset = (4-(src&3))&3 = -src & 3
  6200. + */
  6201. + rsb r3, r1, #0
  6202. + ands r3, r3, #3
  6203. + beq src_aligned
  6204. +
  6205. + /* align source to 32 bits. We need to insert 2 instructions between
  6206. + * a ldr[b|h] and str[b|h] because byte and half-word instructions
  6207. + * stall 2 cycles.
  6208. + */
  6209. + movs r12, r3, lsl #31
  6210. + sub r2, r2, r3 /* we know that r3 <= r2 because r2 >= 4 */
  6211. + ldrbmi r3, [r1], #1
  6212. + ldrbcs r4, [r1], #1
  6213. + ldrbcs r12,[r1], #1
  6214. + strbmi r3, [r0], #1
  6215. + strbcs r4, [r0], #1
  6216. + strbcs r12,[r0], #1
  6217. +
  6218. +src_aligned:
  6219. +
  6220. + /* see if src and dst are aligned together (congruent) */
  6221. + eor r12, r0, r1
  6222. + tst r12, #3
  6223. + bne non_congruent
  6224. +
  6225. + /* Use post-incriment mode for stm to spill r5-r11 to reserved stack
  6226. + * frame. Don't update sp.
  6227. + */
  6228. + stmea sp, {r5-r11}
  6229. +
  6230. + /* align the destination to a cache-line */
  6231. + rsb r3, r0, #0
  6232. + ands r3, r3, #0x1C
  6233. + beq congruent_aligned32
  6234. + cmp r3, r2
  6235. + andhi r3, r2, #0x1C
  6236. +
  6237. + /* conditionnaly copies 0 to 7 words (length in r3) */
  6238. + movs r12, r3, lsl #28
  6239. + ldmcs r1!, {r4, r5, r6, r7} /* 16 bytes */
  6240. + ldmmi r1!, {r8, r9} /* 8 bytes */
  6241. + stmcs r0!, {r4, r5, r6, r7}
  6242. + stmmi r0!, {r8, r9}
  6243. + tst r3, #0x4
  6244. + ldrne r10,[r1], #4 /* 4 bytes */
  6245. + strne r10,[r0], #4
  6246. + sub r2, r2, r3
  6247. +
  6248. +congruent_aligned32:
  6249. + /*
  6250. + * here source is aligned to 32 bytes.
  6251. + */
  6252. +
  6253. +cached_aligned32:
  6254. + subs r2, r2, #32
  6255. + blo less_than_32_left
  6256. +
  6257. + /*
  6258. + * We preload a cache-line up to 64 bytes ahead. On the 926, this will
  6259. + * stall only until the requested world is fetched, but the linefill
  6260. + * continues in the the background.
  6261. + * While the linefill is going, we write our previous cache-line
  6262. + * into the write-buffer (which should have some free space).
  6263. + * When the linefill is done, the writebuffer will
  6264. + * start dumping its content into memory
  6265. + *
  6266. + * While all this is going, we then load a full cache line into
  6267. + * 8 registers, this cache line should be in the cache by now
  6268. + * (or partly in the cache).
  6269. + *
  6270. + * This code should work well regardless of the source/dest alignment.
  6271. + *
  6272. + */
  6273. +
  6274. + /* Align the preload register to a cache-line because the cpu does
  6275. + * "critical word first" (the first word requested is loaded first).
  6276. + */
  6277. + @ bic r12, r1, #0x1F
  6278. + @ add r12, r12, #64
  6279. +
  6280. +1: ldmia r1!, { r4-r11 }
  6281. + subs r2, r2, #32
  6282. +
  6283. + /*
  6284. + * NOTE: if r12 is more than 64 ahead of r1, the following ldrhi
  6285. + * for ARM9 preload will not be safely guarded by the preceding subs.
  6286. + * When it is safely guarded the only possibility to have SIGSEGV here
  6287. + * is because the caller overstates the length.
  6288. + */
  6289. + @ ldrhi r3, [r12], #32 /* cheap ARM9 preload */
  6290. + stmia r0!, { r4-r11 }
  6291. + bhs 1b
  6292. +
  6293. + add r2, r2, #32
  6294. +
  6295. +less_than_32_left:
  6296. + /*
  6297. + * less than 32 bytes left at this point (length in r2)
  6298. + */
  6299. +
  6300. + /* skip all this if there is nothing to do, which should
  6301. + * be a common case (if not executed the code below takes
  6302. + * about 16 cycles)
  6303. + */
  6304. + tst r2, #0x1F
  6305. + beq 1f
  6306. +
  6307. + /* conditionnaly copies 0 to 31 bytes */
  6308. + movs r12, r2, lsl #28
  6309. + ldmcs r1!, {r4, r5, r6, r7} /* 16 bytes */
  6310. + ldmmi r1!, {r8, r9} /* 8 bytes */
  6311. + stmcs r0!, {r4, r5, r6, r7}
  6312. + stmmi r0!, {r8, r9}
  6313. + movs r12, r2, lsl #30
  6314. + ldrcs r3, [r1], #4 /* 4 bytes */
  6315. + ldrhmi r4, [r1], #2 /* 2 bytes */
  6316. + strcs r3, [r0], #4
  6317. + strhmi r4, [r0], #2
  6318. + tst r2, #0x1
  6319. + ldrbne r3, [r1] /* last byte */
  6320. + strbne r3, [r0]
  6321. +
  6322. + /* we're done! restore everything and return */
  6323. +1: ldmfd sp!, {r5-r11}
  6324. + ldmfd sp!, {r0, r4, lr}
  6325. + bx lr
  6326. +
  6327. + /********************************************************************/
  6328. +
  6329. +non_congruent:
  6330. + /*
  6331. + * here source is aligned to 4 bytes
  6332. + * but destination is not.
  6333. + *
  6334. + * in the code below r2 is the number of bytes read
  6335. + * (the number of bytes written is always smaller, because we have
  6336. + * partial words in the shift queue)
  6337. + */
  6338. + cmp r2, #4
  6339. + blo copy_last_3_and_return
  6340. +
  6341. + /* Use post-incriment mode for stm to spill r5-r11 to reserved stack
  6342. + * frame. Don't update sp.
  6343. + */
  6344. + stmea sp, {r5-r11}
  6345. +
  6346. + /* compute shifts needed to align src to dest */
  6347. + rsb r5, r0, #0
  6348. + and r5, r5, #3 /* r5 = # bytes in partial words */
  6349. + mov r12, r5, lsl #3 /* r12 = right */
  6350. + rsb lr, r12, #32 /* lr = left */
  6351. +
  6352. + /* read the first word */
  6353. + ldr r3, [r1], #4
  6354. + sub r2, r2, #4
  6355. +
  6356. + /* write a partial word (0 to 3 bytes), such that destination
  6357. + * becomes aligned to 32 bits (r5 = nb of words to copy for alignment)
  6358. + */
  6359. + movs r5, r5, lsl #31
  6360. + strbmi r3, [r0], #1
  6361. + movmi r3, r3, lsr #8
  6362. + strbcs r3, [r0], #1
  6363. + movcs r3, r3, lsr #8
  6364. + strbcs r3, [r0], #1
  6365. + movcs r3, r3, lsr #8
  6366. +
  6367. + cmp r2, #4
  6368. + blo partial_word_tail
  6369. +
  6370. + /* Align destination to 32 bytes (cache line boundary) */
  6371. +1: tst r0, #0x1c
  6372. + beq 2f
  6373. + ldr r5, [r1], #4
  6374. + sub r2, r2, #4
  6375. + orr r4, r3, r5, lsl lr
  6376. + mov r3, r5, lsr r12
  6377. + str r4, [r0], #4
  6378. + cmp r2, #4
  6379. + bhs 1b
  6380. + blo partial_word_tail
  6381. +
  6382. + /* copy 32 bytes at a time */
  6383. +2: subs r2, r2, #32
  6384. + blo less_than_thirtytwo
  6385. +
  6386. + /* Use immediate mode for the shifts, because there is an extra cycle
  6387. + * for register shifts, which could account for up to 50% of
  6388. + * performance hit.
  6389. + */
  6390. +
  6391. + cmp r12, #24
  6392. + beq loop24
  6393. + cmp r12, #8
  6394. + beq loop8
  6395. +
  6396. +loop16:
  6397. + ldr r12, [r1], #4
  6398. +1: mov r4, r12
  6399. + ldmia r1!, { r5,r6,r7, r8,r9,r10,r11}
  6400. + subs r2, r2, #32
  6401. + ldrhs r12, [r1], #4
  6402. + orr r3, r3, r4, lsl #16
  6403. + mov r4, r4, lsr #16
  6404. + orr r4, r4, r5, lsl #16
  6405. + mov r5, r5, lsr #16
  6406. + orr r5, r5, r6, lsl #16
  6407. + mov r6, r6, lsr #16
  6408. + orr r6, r6, r7, lsl #16
  6409. + mov r7, r7, lsr #16
  6410. + orr r7, r7, r8, lsl #16
  6411. + mov r8, r8, lsr #16
  6412. + orr r8, r8, r9, lsl #16
  6413. + mov r9, r9, lsr #16
  6414. + orr r9, r9, r10, lsl #16
  6415. + mov r10, r10, lsr #16
  6416. + orr r10, r10, r11, lsl #16
  6417. + stmia r0!, {r3,r4,r5,r6, r7,r8,r9,r10}
  6418. + mov r3, r11, lsr #16
  6419. + bhs 1b
  6420. + b less_than_thirtytwo
  6421. +
  6422. +loop8:
  6423. + ldr r12, [r1], #4
  6424. +1: mov r4, r12
  6425. + ldmia r1!, { r5,r6,r7, r8,r9,r10,r11}
  6426. + subs r2, r2, #32
  6427. + ldrhs r12, [r1], #4
  6428. + orr r3, r3, r4, lsl #24
  6429. + mov r4, r4, lsr #8
  6430. + orr r4, r4, r5, lsl #24
  6431. + mov r5, r5, lsr #8
  6432. + orr r5, r5, r6, lsl #24
  6433. + mov r6, r6, lsr #8
  6434. + orr r6, r6, r7, lsl #24
  6435. + mov r7, r7, lsr #8
  6436. + orr r7, r7, r8, lsl #24
  6437. + mov r8, r8, lsr #8
  6438. + orr r8, r8, r9, lsl #24
  6439. + mov r9, r9, lsr #8
  6440. + orr r9, r9, r10, lsl #24
  6441. + mov r10, r10, lsr #8
  6442. + orr r10, r10, r11, lsl #24
  6443. + stmia r0!, {r3,r4,r5,r6, r7,r8,r9,r10}
  6444. + mov r3, r11, lsr #8
  6445. + bhs 1b
  6446. + b less_than_thirtytwo
  6447. +
  6448. +loop24:
  6449. + ldr r12, [r1], #4
  6450. +1: mov r4, r12
  6451. + ldmia r1!, { r5,r6,r7, r8,r9,r10,r11}
  6452. + subs r2, r2, #32
  6453. + ldrhs r12, [r1], #4
  6454. + orr r3, r3, r4, lsl #8
  6455. + mov r4, r4, lsr #24
  6456. + orr r4, r4, r5, lsl #8
  6457. + mov r5, r5, lsr #24
  6458. + orr r5, r5, r6, lsl #8
  6459. + mov r6, r6, lsr #24
  6460. + orr r6, r6, r7, lsl #8
  6461. + mov r7, r7, lsr #24
  6462. + orr r7, r7, r8, lsl #8
  6463. + mov r8, r8, lsr #24
  6464. + orr r8, r8, r9, lsl #8
  6465. + mov r9, r9, lsr #24
  6466. + orr r9, r9, r10, lsl #8
  6467. + mov r10, r10, lsr #24
  6468. + orr r10, r10, r11, lsl #8
  6469. + stmia r0!, {r3,r4,r5,r6, r7,r8,r9,r10}
  6470. + mov r3, r11, lsr #24
  6471. + bhs 1b
  6472. +
  6473. +less_than_thirtytwo:
  6474. + /* copy the last 0 to 31 bytes of the source */
  6475. + rsb r12, lr, #32 /* we corrupted r12, recompute it */
  6476. + add r2, r2, #32
  6477. + cmp r2, #4
  6478. + blo partial_word_tail
  6479. +
  6480. +1: ldr r5, [r1], #4
  6481. + sub r2, r2, #4
  6482. + orr r4, r3, r5, lsl lr
  6483. + mov r3, r5, lsr r12
  6484. + str r4, [r0], #4
  6485. + cmp r2, #4
  6486. + bhs 1b
  6487. +
  6488. +partial_word_tail:
  6489. + /* we have a partial word in the input buffer */
  6490. + movs r5, lr, lsl #(31-3)
  6491. + strbmi r3, [r0], #1
  6492. + movmi r3, r3, lsr #8
  6493. + strbcs r3, [r0], #1
  6494. + movcs r3, r3, lsr #8
  6495. + strbcs r3, [r0], #1
  6496. +
  6497. + /* Refill spilled registers from the stack. Don't update sp. */
  6498. + ldmfd sp, {r5-r11}
  6499. +
  6500. +copy_last_3_and_return:
  6501. + movs r2, r2, lsl #31 /* copy remaining 0, 1, 2 or 3 bytes */
  6502. + ldrbmi r2, [r1], #1
  6503. + ldrbcs r3, [r1], #1
  6504. + ldrbcs r12,[r1]
  6505. + strbmi r2, [r0], #1
  6506. + strbcs r3, [r0], #1
  6507. + strbcs r12,[r0]
  6508. +
  6509. + /* we're done! restore sp and spilled registers and return */
  6510. + add sp, sp, #28
  6511. + ldmfd sp!, {r0, r4, lr}
  6512. + bx lr
  6513. +
  6514. +#endif
  6515. --- a/src/string/armel/memcpy.s
  6516. +++ /dev/null
  6517. @@ -1,381 +0,0 @@
  6518. -/*
  6519. - * Copyright (C) 2008 The Android Open Source Project
  6520. - * All rights reserved.
  6521. - *
  6522. - * Redistribution and use in source and binary forms, with or without
  6523. - * modification, are permitted provided that the following conditions
  6524. - * are met:
  6525. - * * Redistributions of source code must retain the above copyright
  6526. - * notice, this list of conditions and the following disclaimer.
  6527. - * * Redistributions in binary form must reproduce the above copyright
  6528. - * notice, this list of conditions and the following disclaimer in
  6529. - * the documentation and/or other materials provided with the
  6530. - * distribution.
  6531. - *
  6532. - * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
  6533. - * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
  6534. - * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
  6535. - * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
  6536. - * COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
  6537. - * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
  6538. - * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS
  6539. - * OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED
  6540. - * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
  6541. - * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT
  6542. - * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
  6543. - * SUCH DAMAGE.
  6544. - */
  6545. -
  6546. -
  6547. -/*
  6548. - * Optimized memcpy() for ARM.
  6549. - *
  6550. - * note that memcpy() always returns the destination pointer,
  6551. - * so we have to preserve R0.
  6552. - */
  6553. -
  6554. -/*
  6555. - * This file has been modified from the original for use in musl libc.
  6556. - * The main changes are: addition of .type memcpy,%function to make the
  6557. - * code safely callable from thumb mode, adjusting the return
  6558. - * instructions to be compatible with pre-thumb ARM cpus, and removal
  6559. - * of prefetch code that is not compatible with older cpus.
  6560. - */
  6561. -
  6562. -.global memcpy
  6563. -.type memcpy,%function
  6564. -memcpy:
  6565. - /* The stack must always be 64-bits aligned to be compliant with the
  6566. - * ARM ABI. Since we have to save R0, we might as well save R4
  6567. - * which we can use for better pipelining of the reads below
  6568. - */
  6569. - .fnstart
  6570. - .save {r0, r4, lr}
  6571. - stmfd sp!, {r0, r4, lr}
  6572. - /* Making room for r5-r11 which will be spilled later */
  6573. - .pad #28
  6574. - sub sp, sp, #28
  6575. -
  6576. - /* it simplifies things to take care of len<4 early */
  6577. - cmp r2, #4
  6578. - blo copy_last_3_and_return
  6579. -
  6580. - /* compute the offset to align the source
  6581. - * offset = (4-(src&3))&3 = -src & 3
  6582. - */
  6583. - rsb r3, r1, #0
  6584. - ands r3, r3, #3
  6585. - beq src_aligned
  6586. -
  6587. - /* align source to 32 bits. We need to insert 2 instructions between
  6588. - * a ldr[b|h] and str[b|h] because byte and half-word instructions
  6589. - * stall 2 cycles.
  6590. - */
  6591. - movs r12, r3, lsl #31
  6592. - sub r2, r2, r3 /* we know that r3 <= r2 because r2 >= 4 */
  6593. - .word 0x44d13001 /* ldrbmi r3, [r1], #1 */
  6594. - .word 0x24d14001 /* ldrbcs r4, [r1], #1 */
  6595. - .word 0x24d1c001 /* ldrbcs r12,[r1], #1 */
  6596. - .word 0x44c03001 /* strbmi r3, [r0], #1 */
  6597. - .word 0x24c04001 /* strbcs r4, [r0], #1 */
  6598. - .word 0x24c0c001 /* strbcs r12,[r0], #1 */
  6599. -
  6600. -src_aligned:
  6601. -
  6602. - /* see if src and dst are aligned together (congruent) */
  6603. - eor r12, r0, r1
  6604. - tst r12, #3
  6605. - bne non_congruent
  6606. -
  6607. - /* Use post-incriment mode for stm to spill r5-r11 to reserved stack
  6608. - * frame. Don't update sp.
  6609. - */
  6610. - stmea sp, {r5-r11}
  6611. -
  6612. - /* align the destination to a cache-line */
  6613. - rsb r3, r0, #0
  6614. - ands r3, r3, #0x1C
  6615. - beq congruent_aligned32
  6616. - cmp r3, r2
  6617. - andhi r3, r2, #0x1C
  6618. -
  6619. - /* conditionnaly copies 0 to 7 words (length in r3) */
  6620. - movs r12, r3, lsl #28
  6621. - ldmcs r1!, {r4, r5, r6, r7} /* 16 bytes */
  6622. - ldmmi r1!, {r8, r9} /* 8 bytes */
  6623. - stmcs r0!, {r4, r5, r6, r7}
  6624. - stmmi r0!, {r8, r9}
  6625. - tst r3, #0x4
  6626. - ldrne r10,[r1], #4 /* 4 bytes */
  6627. - strne r10,[r0], #4
  6628. - sub r2, r2, r3
  6629. -
  6630. -congruent_aligned32:
  6631. - /*
  6632. - * here source is aligned to 32 bytes.
  6633. - */
  6634. -
  6635. -cached_aligned32:
  6636. - subs r2, r2, #32
  6637. - blo less_than_32_left
  6638. -
  6639. - /*
  6640. - * We preload a cache-line up to 64 bytes ahead. On the 926, this will
  6641. - * stall only until the requested world is fetched, but the linefill
  6642. - * continues in the the background.
  6643. - * While the linefill is going, we write our previous cache-line
  6644. - * into the write-buffer (which should have some free space).
  6645. - * When the linefill is done, the writebuffer will
  6646. - * start dumping its content into memory
  6647. - *
  6648. - * While all this is going, we then load a full cache line into
  6649. - * 8 registers, this cache line should be in the cache by now
  6650. - * (or partly in the cache).
  6651. - *
  6652. - * This code should work well regardless of the source/dest alignment.
  6653. - *
  6654. - */
  6655. -
  6656. - /* Align the preload register to a cache-line because the cpu does
  6657. - * "critical word first" (the first word requested is loaded first).
  6658. - */
  6659. - @ bic r12, r1, #0x1F
  6660. - @ add r12, r12, #64
  6661. -
  6662. -1: ldmia r1!, { r4-r11 }
  6663. - subs r2, r2, #32
  6664. -
  6665. - /*
  6666. - * NOTE: if r12 is more than 64 ahead of r1, the following ldrhi
  6667. - * for ARM9 preload will not be safely guarded by the preceding subs.
  6668. - * When it is safely guarded the only possibility to have SIGSEGV here
  6669. - * is because the caller overstates the length.
  6670. - */
  6671. - @ ldrhi r3, [r12], #32 /* cheap ARM9 preload */
  6672. - stmia r0!, { r4-r11 }
  6673. - bhs 1b
  6674. -
  6675. - add r2, r2, #32
  6676. -
  6677. -less_than_32_left:
  6678. - /*
  6679. - * less than 32 bytes left at this point (length in r2)
  6680. - */
  6681. -
  6682. - /* skip all this if there is nothing to do, which should
  6683. - * be a common case (if not executed the code below takes
  6684. - * about 16 cycles)
  6685. - */
  6686. - tst r2, #0x1F
  6687. - beq 1f
  6688. -
  6689. - /* conditionnaly copies 0 to 31 bytes */
  6690. - movs r12, r2, lsl #28
  6691. - ldmcs r1!, {r4, r5, r6, r7} /* 16 bytes */
  6692. - ldmmi r1!, {r8, r9} /* 8 bytes */
  6693. - stmcs r0!, {r4, r5, r6, r7}
  6694. - stmmi r0!, {r8, r9}
  6695. - movs r12, r2, lsl #30
  6696. - ldrcs r3, [r1], #4 /* 4 bytes */
  6697. - .word 0x40d140b2 /* ldrhmi r4, [r1], #2 */ /* 2 bytes */
  6698. - strcs r3, [r0], #4
  6699. - .word 0x40c040b2 /* strhmi r4, [r0], #2 */
  6700. - tst r2, #0x1
  6701. - .word 0x15d13000 /* ldrbne r3, [r1] */ /* last byte */
  6702. - .word 0x15c03000 /* strbne r3, [r0] */
  6703. -
  6704. - /* we're done! restore everything and return */
  6705. -1: ldmfd sp!, {r5-r11}
  6706. - ldmfd sp!, {r0, r4, lr}
  6707. - tst lr, #1
  6708. - moveq pc, lr
  6709. - bx lr
  6710. -
  6711. - /********************************************************************/
  6712. -
  6713. -non_congruent:
  6714. - /*
  6715. - * here source is aligned to 4 bytes
  6716. - * but destination is not.
  6717. - *
  6718. - * in the code below r2 is the number of bytes read
  6719. - * (the number of bytes written is always smaller, because we have
  6720. - * partial words in the shift queue)
  6721. - */
  6722. - cmp r2, #4
  6723. - blo copy_last_3_and_return
  6724. -
  6725. - /* Use post-incriment mode for stm to spill r5-r11 to reserved stack
  6726. - * frame. Don't update sp.
  6727. - */
  6728. - stmea sp, {r5-r11}
  6729. -
  6730. - /* compute shifts needed to align src to dest */
  6731. - rsb r5, r0, #0
  6732. - and r5, r5, #3 /* r5 = # bytes in partial words */
  6733. - mov r12, r5, lsl #3 /* r12 = right */
  6734. - rsb lr, r12, #32 /* lr = left */
  6735. -
  6736. - /* read the first word */
  6737. - ldr r3, [r1], #4
  6738. - sub r2, r2, #4
  6739. -
  6740. - /* write a partial word (0 to 3 bytes), such that destination
  6741. - * becomes aligned to 32 bits (r5 = nb of words to copy for alignment)
  6742. - */
  6743. - movs r5, r5, lsl #31
  6744. - .word 0x44c03001 /* strbmi r3, [r0], #1 */
  6745. - movmi r3, r3, lsr #8
  6746. - .word 0x24c03001 /* strbcs r3, [r0], #1 */
  6747. - movcs r3, r3, lsr #8
  6748. - .word 0x24c03001 /* strbcs r3, [r0], #1 */
  6749. - movcs r3, r3, lsr #8
  6750. -
  6751. - cmp r2, #4
  6752. - blo partial_word_tail
  6753. -
  6754. - /* Align destination to 32 bytes (cache line boundary) */
  6755. -1: tst r0, #0x1c
  6756. - beq 2f
  6757. - ldr r5, [r1], #4
  6758. - sub r2, r2, #4
  6759. - orr r4, r3, r5, lsl lr
  6760. - mov r3, r5, lsr r12
  6761. - str r4, [r0], #4
  6762. - cmp r2, #4
  6763. - bhs 1b
  6764. - blo partial_word_tail
  6765. -
  6766. - /* copy 32 bytes at a time */
  6767. -2: subs r2, r2, #32
  6768. - blo less_than_thirtytwo
  6769. -
  6770. - /* Use immediate mode for the shifts, because there is an extra cycle
  6771. - * for register shifts, which could account for up to 50% of
  6772. - * performance hit.
  6773. - */
  6774. -
  6775. - cmp r12, #24
  6776. - beq loop24
  6777. - cmp r12, #8
  6778. - beq loop8
  6779. -
  6780. -loop16:
  6781. - ldr r12, [r1], #4
  6782. -1: mov r4, r12
  6783. - ldmia r1!, { r5,r6,r7, r8,r9,r10,r11}
  6784. - subs r2, r2, #32
  6785. - ldrhs r12, [r1], #4
  6786. - orr r3, r3, r4, lsl #16
  6787. - mov r4, r4, lsr #16
  6788. - orr r4, r4, r5, lsl #16
  6789. - mov r5, r5, lsr #16
  6790. - orr r5, r5, r6, lsl #16
  6791. - mov r6, r6, lsr #16
  6792. - orr r6, r6, r7, lsl #16
  6793. - mov r7, r7, lsr #16
  6794. - orr r7, r7, r8, lsl #16
  6795. - mov r8, r8, lsr #16
  6796. - orr r8, r8, r9, lsl #16
  6797. - mov r9, r9, lsr #16
  6798. - orr r9, r9, r10, lsl #16
  6799. - mov r10, r10, lsr #16
  6800. - orr r10, r10, r11, lsl #16
  6801. - stmia r0!, {r3,r4,r5,r6, r7,r8,r9,r10}
  6802. - mov r3, r11, lsr #16
  6803. - bhs 1b
  6804. - b less_than_thirtytwo
  6805. -
  6806. -loop8:
  6807. - ldr r12, [r1], #4
  6808. -1: mov r4, r12
  6809. - ldmia r1!, { r5,r6,r7, r8,r9,r10,r11}
  6810. - subs r2, r2, #32
  6811. - ldrhs r12, [r1], #4
  6812. - orr r3, r3, r4, lsl #24
  6813. - mov r4, r4, lsr #8
  6814. - orr r4, r4, r5, lsl #24
  6815. - mov r5, r5, lsr #8
  6816. - orr r5, r5, r6, lsl #24
  6817. - mov r6, r6, lsr #8
  6818. - orr r6, r6, r7, lsl #24
  6819. - mov r7, r7, lsr #8
  6820. - orr r7, r7, r8, lsl #24
  6821. - mov r8, r8, lsr #8
  6822. - orr r8, r8, r9, lsl #24
  6823. - mov r9, r9, lsr #8
  6824. - orr r9, r9, r10, lsl #24
  6825. - mov r10, r10, lsr #8
  6826. - orr r10, r10, r11, lsl #24
  6827. - stmia r0!, {r3,r4,r5,r6, r7,r8,r9,r10}
  6828. - mov r3, r11, lsr #8
  6829. - bhs 1b
  6830. - b less_than_thirtytwo
  6831. -
  6832. -loop24:
  6833. - ldr r12, [r1], #4
  6834. -1: mov r4, r12
  6835. - ldmia r1!, { r5,r6,r7, r8,r9,r10,r11}
  6836. - subs r2, r2, #32
  6837. - ldrhs r12, [r1], #4
  6838. - orr r3, r3, r4, lsl #8
  6839. - mov r4, r4, lsr #24
  6840. - orr r4, r4, r5, lsl #8
  6841. - mov r5, r5, lsr #24
  6842. - orr r5, r5, r6, lsl #8
  6843. - mov r6, r6, lsr #24
  6844. - orr r6, r6, r7, lsl #8
  6845. - mov r7, r7, lsr #24
  6846. - orr r7, r7, r8, lsl #8
  6847. - mov r8, r8, lsr #24
  6848. - orr r8, r8, r9, lsl #8
  6849. - mov r9, r9, lsr #24
  6850. - orr r9, r9, r10, lsl #8
  6851. - mov r10, r10, lsr #24
  6852. - orr r10, r10, r11, lsl #8
  6853. - stmia r0!, {r3,r4,r5,r6, r7,r8,r9,r10}
  6854. - mov r3, r11, lsr #24
  6855. - bhs 1b
  6856. -
  6857. -less_than_thirtytwo:
  6858. - /* copy the last 0 to 31 bytes of the source */
  6859. - rsb r12, lr, #32 /* we corrupted r12, recompute it */
  6860. - add r2, r2, #32
  6861. - cmp r2, #4
  6862. - blo partial_word_tail
  6863. -
  6864. -1: ldr r5, [r1], #4
  6865. - sub r2, r2, #4
  6866. - orr r4, r3, r5, lsl lr
  6867. - mov r3, r5, lsr r12
  6868. - str r4, [r0], #4
  6869. - cmp r2, #4
  6870. - bhs 1b
  6871. -
  6872. -partial_word_tail:
  6873. - /* we have a partial word in the input buffer */
  6874. - movs r5, lr, lsl #(31-3)
  6875. - .word 0x44c03001 /* strbmi r3, [r0], #1 */
  6876. - movmi r3, r3, lsr #8
  6877. - .word 0x24c03001 /* strbcs r3, [r0], #1 */
  6878. - movcs r3, r3, lsr #8
  6879. - .word 0x24c03001 /* strbcs r3, [r0], #1 */
  6880. -
  6881. - /* Refill spilled registers from the stack. Don't update sp. */
  6882. - ldmfd sp, {r5-r11}
  6883. -
  6884. -copy_last_3_and_return:
  6885. - movs r2, r2, lsl #31 /* copy remaining 0, 1, 2 or 3 bytes */
  6886. - .word 0x44d12001 /* ldrbmi r2, [r1], #1 */
  6887. - .word 0x24d13001 /* ldrbcs r3, [r1], #1 */
  6888. - .word 0x25d1c000 /* ldrbcs r12,[r1] */
  6889. - .word 0x44c02001 /* strbmi r2, [r0], #1 */
  6890. - .word 0x24c03001 /* strbcs r3, [r0], #1 */
  6891. - .word 0x25c0c000 /* strbcs r12,[r0] */
  6892. -
  6893. - /* we're done! restore sp and spilled registers and return */
  6894. - add sp, sp, #28
  6895. - ldmfd sp!, {r0, r4, lr}
  6896. - tst lr, #1
  6897. - moveq pc, lr
  6898. - bx lr
  6899. --- a/src/string/armel/memcpy.sub
  6900. +++ /dev/null
  6901. @@ -1 +0,0 @@
  6902. -memcpy.s
  6903. --- a/src/string/armhf/memcpy.sub
  6904. +++ /dev/null
  6905. @@ -1 +0,0 @@
  6906. -../armel/memcpy.s
  6907. --- a/src/thread/__syscall_cp.c
  6908. +++ b/src/thread/__syscall_cp.c
  6909. @@ -1,9 +1,7 @@
  6910. #include "pthread_impl.h"
  6911. #include "syscall.h"
  6912. -#ifdef SHARED
  6913. __attribute__((__visibility__("hidden")))
  6914. -#endif
  6915. long __syscall_cp_c();
  6916. static long sccp(syscall_arg_t nr,
  6917. --- a/src/thread/__tls_get_addr.c
  6918. +++ b/src/thread/__tls_get_addr.c
  6919. @@ -1,16 +1,16 @@
  6920. #include <stddef.h>
  6921. #include "pthread_impl.h"
  6922. +#include "libc.h"
  6923. +
  6924. +__attribute__((__visibility__("hidden")))
  6925. +void *__tls_get_new(size_t *);
  6926. void *__tls_get_addr(size_t *v)
  6927. {
  6928. pthread_t self = __pthread_self();
  6929. -#ifdef SHARED
  6930. - __attribute__((__visibility__("hidden")))
  6931. - void *__tls_get_new(size_t *);
  6932. if (v[0]<=(size_t)self->dtv[0])
  6933. return (char *)self->dtv[v[0]]+v[1]+DTP_OFFSET;
  6934. return __tls_get_new(v);
  6935. -#else
  6936. - return (char *)self->dtv[1]+v[1]+DTP_OFFSET;
  6937. -#endif
  6938. }
  6939. +
  6940. +weak_alias(__tls_get_addr, __tls_get_new);
  6941. --- a/src/thread/aarch64/syscall_cp.s
  6942. +++ b/src/thread/aarch64/syscall_cp.s
  6943. @@ -17,7 +17,7 @@
  6944. __syscall_cp_asm:
  6945. __cp_begin:
  6946. ldr w0,[x0]
  6947. - cbnz w0,1f
  6948. + cbnz w0,__cp_cancel
  6949. mov x8,x1
  6950. mov x0,x2
  6951. mov x1,x3
  6952. @@ -28,6 +28,5 @@ __cp_begin:
  6953. svc 0
  6954. __cp_end:
  6955. ret
  6956. -
  6957. - // cbnz might not be able to jump far enough
  6958. -1: b __cancel
  6959. +__cp_cancel:
  6960. + b __cancel
  6961. --- /dev/null
  6962. +++ b/src/thread/arm/__set_thread_area.c
  6963. @@ -0,0 +1,49 @@
  6964. +#include <stdint.h>
  6965. +#include <elf.h>
  6966. +#include "pthread_impl.h"
  6967. +#include "libc.h"
  6968. +
  6969. +#define HWCAP_TLS (1 << 15)
  6970. +
  6971. +extern const unsigned char __attribute__((__visibility__("hidden")))
  6972. + __a_barrier_dummy[], __a_barrier_oldkuser[],
  6973. + __a_barrier_v6[], __a_barrier_v7[],
  6974. + __a_cas_dummy[], __a_cas_v6[], __a_cas_v7[],
  6975. + __a_gettp_dummy[];
  6976. +
  6977. +#define __a_barrier_kuser 0xffff0fa0
  6978. +#define __a_cas_kuser 0xffff0fc0
  6979. +#define __a_gettp_kuser 0xffff0fe0
  6980. +
  6981. +extern uintptr_t __attribute__((__visibility__("hidden")))
  6982. + __a_barrier_ptr, __a_cas_ptr, __a_gettp_ptr;
  6983. +
  6984. +#define SET(op,ver) (__a_##op##_ptr = \
  6985. + (uintptr_t)__a_##op##_##ver - (uintptr_t)__a_##op##_dummy)
  6986. +
  6987. +int __set_thread_area(void *p)
  6988. +{
  6989. +#if !__ARM_ARCH_7A__ && !__ARM_ARCH_7R__ && __ARM_ARCH < 7
  6990. + if (__hwcap & HWCAP_TLS) {
  6991. + size_t *aux;
  6992. + SET(cas, v7);
  6993. + SET(barrier, v7);
  6994. + for (aux=libc.auxv; *aux; aux+=2) {
  6995. + if (*aux != AT_PLATFORM) continue;
  6996. + const char *s = (void *)aux[1];
  6997. + if (s[0]!='v' || s[1]!='6' || s[2]-'0'<10u) break;
  6998. + SET(cas, v6);
  6999. + SET(barrier, v6);
  7000. + break;
  7001. + }
  7002. + } else {
  7003. + int ver = *(int *)0xffff0ffc;
  7004. + SET(gettp, kuser);
  7005. + SET(cas, kuser);
  7006. + SET(barrier, kuser);
  7007. + if (ver < 2) a_crash();
  7008. + if (ver < 3) SET(barrier, oldkuser);
  7009. + }
  7010. +#endif
  7011. + return __syscall(0xf0005, p);
  7012. +}
  7013. --- a/src/thread/arm/__set_thread_area.s
  7014. +++ /dev/null
  7015. @@ -1 +0,0 @@
  7016. -/* Replaced by C code in arch/arm/src */
  7017. --- a/src/thread/arm/__unmapself.s
  7018. +++ b/src/thread/arm/__unmapself.s
  7019. @@ -1,3 +1,4 @@
  7020. +.syntax unified
  7021. .text
  7022. .global __unmapself
  7023. .type __unmapself,%function
  7024. --- /dev/null
  7025. +++ b/src/thread/arm/atomics.s
  7026. @@ -0,0 +1,111 @@
  7027. +.syntax unified
  7028. +.text
  7029. +
  7030. +.global __a_barrier
  7031. +.hidden __a_barrier
  7032. +.type __a_barrier,%function
  7033. +__a_barrier:
  7034. + ldr ip,1f
  7035. + ldr ip,[pc,ip]
  7036. + add pc,pc,ip
  7037. +1: .word __a_barrier_ptr-1b
  7038. +.global __a_barrier_dummy
  7039. +.hidden __a_barrier_dummy
  7040. +__a_barrier_dummy:
  7041. + bx lr
  7042. +.global __a_barrier_oldkuser
  7043. +.hidden __a_barrier_oldkuser
  7044. +__a_barrier_oldkuser:
  7045. + push {r0,r1,r2,r3,ip,lr}
  7046. + mov r1,r0
  7047. + mov r2,sp
  7048. + ldr ip,=0xffff0fc0
  7049. + mov lr,pc
  7050. + mov pc,ip
  7051. + pop {r0,r1,r2,r3,ip,lr}
  7052. + bx lr
  7053. +.global __a_barrier_v6
  7054. +.hidden __a_barrier_v6
  7055. +__a_barrier_v6:
  7056. + mcr p15,0,r0,c7,c10,5
  7057. + bx lr
  7058. +.global __a_barrier_v7
  7059. +.hidden __a_barrier_v7
  7060. +__a_barrier_v7:
  7061. + .word 0xf57ff05b /* dmb ish */
  7062. + bx lr
  7063. +
  7064. +.global __a_cas
  7065. +.hidden __a_cas
  7066. +.type __a_cas,%function
  7067. +__a_cas:
  7068. + ldr ip,1f
  7069. + ldr ip,[pc,ip]
  7070. + add pc,pc,ip
  7071. +1: .word __a_cas_ptr-1b
  7072. +.global __a_cas_dummy
  7073. +.hidden __a_cas_dummy
  7074. +__a_cas_dummy:
  7075. + mov r3,r0
  7076. + ldr r0,[r2]
  7077. + subs r0,r3,r0
  7078. + streq r1,[r2]
  7079. + bx lr
  7080. +.global __a_cas_v6
  7081. +.hidden __a_cas_v6
  7082. +__a_cas_v6:
  7083. + mov r3,r0
  7084. + mcr p15,0,r0,c7,c10,5
  7085. +1: .word 0xe1920f9f /* ldrex r0,[r2] */
  7086. + subs r0,r3,r0
  7087. + .word 0x01820f91 /* strexeq r0,r1,[r2] */
  7088. + teqeq r0,#1
  7089. + beq 1b
  7090. + mcr p15,0,r0,c7,c10,5
  7091. + bx lr
  7092. +.global __a_cas_v7
  7093. +.hidden __a_cas_v7
  7094. +__a_cas_v7:
  7095. + mov r3,r0
  7096. + .word 0xf57ff05b /* dmb ish */
  7097. +1: .word 0xe1920f9f /* ldrex r0,[r2] */
  7098. + subs r0,r3,r0
  7099. + .word 0x01820f91 /* strexeq r0,r1,[r2] */
  7100. + teqeq r0,#1
  7101. + beq 1b
  7102. + .word 0xf57ff05b /* dmb ish */
  7103. + bx lr
  7104. +
  7105. +.global __aeabi_read_tp
  7106. +.type __aeabi_read_tp,%function
  7107. +__aeabi_read_tp:
  7108. +
  7109. +.global __a_gettp
  7110. +.hidden __a_gettp
  7111. +.type __a_gettp,%function
  7112. +__a_gettp:
  7113. + ldr r0,1f
  7114. + ldr r0,[pc,r0]
  7115. + add pc,pc,r0
  7116. +1: .word __a_gettp_ptr-1b
  7117. +.global __a_gettp_dummy
  7118. +.hidden __a_gettp_dummy
  7119. +__a_gettp_dummy:
  7120. + mrc p15,0,r0,c13,c0,3
  7121. + bx lr
  7122. +
  7123. +.data
  7124. +.global __a_barrier_ptr
  7125. +.hidden __a_barrier_ptr
  7126. +__a_barrier_ptr:
  7127. + .word 0
  7128. +
  7129. +.global __a_cas_ptr
  7130. +.hidden __a_cas_ptr
  7131. +__a_cas_ptr:
  7132. + .word 0
  7133. +
  7134. +.global __a_gettp_ptr
  7135. +.hidden __a_gettp_ptr
  7136. +__a_gettp_ptr:
  7137. + .word 0
  7138. --- a/src/thread/arm/clone.s
  7139. +++ b/src/thread/arm/clone.s
  7140. @@ -1,3 +1,4 @@
  7141. +.syntax unified
  7142. .text
  7143. .global __clone
  7144. .type __clone,%function
  7145. @@ -15,8 +16,6 @@ __clone:
  7146. tst r0,r0
  7147. beq 1f
  7148. ldmfd sp!,{r4,r5,r6,r7}
  7149. - tst lr,#1
  7150. - moveq pc,lr
  7151. bx lr
  7152. 1: mov r0,r6
  7153. --- a/src/thread/arm/syscall_cp.s
  7154. +++ b/src/thread/arm/syscall_cp.s
  7155. @@ -1,3 +1,4 @@
  7156. +.syntax unified
  7157. .global __cp_begin
  7158. .hidden __cp_begin
  7159. .global __cp_end
  7160. @@ -22,8 +23,6 @@ __cp_begin:
  7161. svc 0
  7162. __cp_end:
  7163. ldmfd sp!,{r4,r5,r6,r7,lr}
  7164. - tst lr,#1
  7165. - moveq pc,lr
  7166. bx lr
  7167. __cp_cancel:
  7168. ldmfd sp!,{r4,r5,r6,r7,lr}
  7169. --- a/src/thread/microblaze/syscall_cp.s
  7170. +++ b/src/thread/microblaze/syscall_cp.s
  7171. @@ -11,7 +11,7 @@
  7172. __syscall_cp_asm:
  7173. __cp_begin:
  7174. lwi r5, r5, 0
  7175. - bnei r5, __cancel
  7176. + bnei r5, __cp_cancel
  7177. addi r12, r6, 0
  7178. add r5, r7, r0
  7179. add r6, r8, r0
  7180. @@ -23,3 +23,5 @@ __cp_begin:
  7181. __cp_end:
  7182. rtsd r15, 8
  7183. nop
  7184. +__cp_cancel:
  7185. + bri __cancel
  7186. --- a/src/thread/or1k/syscall_cp.s
  7187. +++ b/src/thread/or1k/syscall_cp.s
  7188. @@ -12,7 +12,7 @@ __syscall_cp_asm:
  7189. __cp_begin:
  7190. l.lwz r3, 0(r3)
  7191. l.sfeqi r3, 0
  7192. - l.bnf __cancel
  7193. + l.bnf __cp_cancel
  7194. l.ori r11, r4, 0
  7195. l.ori r3, r5, 0
  7196. l.ori r4, r6, 0
  7197. @@ -24,3 +24,6 @@ __cp_begin:
  7198. __cp_end:
  7199. l.jr r9
  7200. l.nop
  7201. +__cp_cancel:
  7202. + l.j __cancel
  7203. + l.nop
  7204. --- a/src/thread/powerpc/syscall_cp.s
  7205. +++ b/src/thread/powerpc/syscall_cp.s
  7206. @@ -38,7 +38,7 @@ __cp_begin:
  7207. cmpwi cr7, 0, 0 #compare r0 with 0, store result in cr7.
  7208. beq+ cr7, 1f #jump to label 1 if r0 was 0
  7209. - b __cancel #else call cancel
  7210. + b __cp_cancel #else call cancel
  7211. 1:
  7212. #ok, the cancel flag was not set
  7213. # syscall: number goes to r0, the rest 3-8
  7214. @@ -55,3 +55,5 @@ __cp_end:
  7215. #else negate result.
  7216. neg 3, 3
  7217. blr
  7218. +__cp_cancel:
  7219. + b __cancel
  7220. --- a/src/thread/pthread_cancel.c
  7221. +++ b/src/thread/pthread_cancel.c
  7222. @@ -1,12 +1,11 @@
  7223. +#define _GNU_SOURCE
  7224. #include <string.h>
  7225. #include "pthread_impl.h"
  7226. #include "syscall.h"
  7227. #include "libc.h"
  7228. -#ifdef SHARED
  7229. __attribute__((__visibility__("hidden")))
  7230. -#endif
  7231. -long __cancel(), __cp_cancel(), __syscall_cp_asm(), __syscall_cp_c();
  7232. +long __cancel(), __syscall_cp_asm(), __syscall_cp_c();
  7233. long __cancel()
  7234. {
  7235. @@ -17,12 +16,6 @@ long __cancel()
  7236. return -ECANCELED;
  7237. }
  7238. -/* If __syscall_cp_asm has adjusted the stack pointer, it must provide a
  7239. - * definition of __cp_cancel to undo those adjustments and call __cancel.
  7240. - * Otherwise, __cancel provides a definition for __cp_cancel. */
  7241. -
  7242. -weak_alias(__cancel, __cp_cancel);
  7243. -
  7244. long __syscall_cp_asm(volatile void *, syscall_arg_t,
  7245. syscall_arg_t, syscall_arg_t, syscall_arg_t,
  7246. syscall_arg_t, syscall_arg_t, syscall_arg_t);
  7247. @@ -52,24 +45,22 @@ static void _sigaddset(sigset_t *set, in
  7248. set->__bits[s/8/sizeof *set->__bits] |= 1UL<<(s&8*sizeof *set->__bits-1);
  7249. }
  7250. -#ifdef SHARED
  7251. __attribute__((__visibility__("hidden")))
  7252. -#endif
  7253. -extern const char __cp_begin[1], __cp_end[1];
  7254. +extern const char __cp_begin[1], __cp_end[1], __cp_cancel[1];
  7255. static void cancel_handler(int sig, siginfo_t *si, void *ctx)
  7256. {
  7257. pthread_t self = __pthread_self();
  7258. ucontext_t *uc = ctx;
  7259. - const char *ip = ((char **)&uc->uc_mcontext)[CANCEL_REG_IP];
  7260. + uintptr_t pc = uc->uc_mcontext.MC_PC;
  7261. a_barrier();
  7262. if (!self->cancel || self->canceldisable == PTHREAD_CANCEL_DISABLE) return;
  7263. _sigaddset(&uc->uc_sigmask, SIGCANCEL);
  7264. - if (self->cancelasync || ip >= __cp_begin && ip < __cp_end) {
  7265. - ((char **)&uc->uc_mcontext)[CANCEL_REG_IP] = (char *)__cp_cancel;
  7266. + if (self->cancelasync || pc >= (uintptr_t)__cp_begin && pc < (uintptr_t)__cp_end) {
  7267. + uc->uc_mcontext.MC_PC = (uintptr_t)__cp_cancel;
  7268. return;
  7269. }
  7270. --- /dev/null
  7271. +++ b/src/thread/sh/__set_thread_area.c
  7272. @@ -0,0 +1,40 @@
  7273. +#include "pthread_impl.h"
  7274. +#include "libc.h"
  7275. +#include <elf.h>
  7276. +
  7277. +/* Also perform sh-specific init */
  7278. +
  7279. +#define CPU_HAS_LLSC 0x0040
  7280. +#define CPU_HAS_CAS_L 0x0400
  7281. +
  7282. +__attribute__((__visibility__("hidden")))
  7283. +extern const char __sh_cas_gusa[], __sh_cas_llsc[], __sh_cas_imask[], __sh_cas_cas_l[];
  7284. +
  7285. +__attribute__((__visibility__("hidden")))
  7286. +const void *__sh_cas_ptr;
  7287. +
  7288. +__attribute__((__visibility__("hidden")))
  7289. +unsigned __sh_nommu;
  7290. +
  7291. +int __set_thread_area(void *p)
  7292. +{
  7293. + size_t *aux;
  7294. + __asm__ __volatile__ ( "ldc %0, gbr" : : "r"(p) : "memory" );
  7295. +#ifndef __SH4A__
  7296. + __sh_cas_ptr = __sh_cas_gusa;
  7297. +#if !defined(__SH3__) && !defined(__SH4__)
  7298. + for (aux=libc.auxv; *aux; aux+=2) {
  7299. + if (*aux != AT_PLATFORM) continue;
  7300. + const char *s = (void *)aux[1];
  7301. + if (s[0]!='s' || s[1]!='h' || s[2]!='2' || s[3]-'0'<10u) break;
  7302. + __sh_cas_ptr = __sh_cas_imask;
  7303. + __sh_nommu = 1;
  7304. + }
  7305. +#endif
  7306. + if (__hwcap & CPU_HAS_CAS_L)
  7307. + __sh_cas_ptr = __sh_cas_cas_l;
  7308. + else if (__hwcap & CPU_HAS_LLSC)
  7309. + __sh_cas_ptr = __sh_cas_llsc;
  7310. +#endif
  7311. + return 0;
  7312. +}
  7313. --- /dev/null
  7314. +++ b/src/thread/sh/atomics.s
  7315. @@ -0,0 +1,65 @@
  7316. +/* Contract for all versions is same as cas.l r2,r3,@r0
  7317. + * pr and r1 are also clobbered (by jsr & r1 as temp).
  7318. + * r0,r2,r4-r15 must be preserved.
  7319. + * r3 contains result (==r2 iff cas succeeded). */
  7320. +
  7321. + .align 2
  7322. +.global __sh_cas_gusa
  7323. +.hidden __sh_cas_gusa
  7324. +__sh_cas_gusa:
  7325. + mov.l r5,@-r15
  7326. + mov.l r4,@-r15
  7327. + mov r0,r4
  7328. + mova 1f,r0
  7329. + mov r15,r1
  7330. + mov #(0f-1f),r15
  7331. +0: mov.l @r4,r5
  7332. + cmp/eq r5,r2
  7333. + bf 1f
  7334. + mov.l r3,@r4
  7335. +1: mov r1,r15
  7336. + mov r5,r3
  7337. + mov r4,r0
  7338. + mov.l @r15+,r4
  7339. + rts
  7340. + mov.l @r15+,r5
  7341. +
  7342. +.global __sh_cas_llsc
  7343. +.hidden __sh_cas_llsc
  7344. +__sh_cas_llsc:
  7345. + mov r0,r1
  7346. + synco
  7347. +0: movli.l @r1,r0
  7348. + cmp/eq r0,r2
  7349. + bf 1f
  7350. + mov r3,r0
  7351. + movco.l r0,@r1
  7352. + bf 0b
  7353. + mov r2,r0
  7354. +1: synco
  7355. + mov r0,r3
  7356. + rts
  7357. + mov r1,r0
  7358. +
  7359. +.global __sh_cas_imask
  7360. +.hidden __sh_cas_imask
  7361. +__sh_cas_imask:
  7362. + mov r0,r1
  7363. + stc sr,r0
  7364. + mov.l r0,@-r15
  7365. + or #0xf0,r0
  7366. + ldc r0,sr
  7367. + mov.l @r1,r0
  7368. + cmp/eq r0,r2
  7369. + bf 1f
  7370. + mov.l r3,@r1
  7371. +1: ldc.l @r15+,sr
  7372. + mov r0,r3
  7373. + rts
  7374. + mov r1,r0
  7375. +
  7376. +.global __sh_cas_cas_l
  7377. +.hidden __sh_cas_cas_l
  7378. +__sh_cas_cas_l:
  7379. + rts
  7380. + .word 0x2323 /* cas.l r2,r3,@r0 */
  7381. --- a/src/thread/sh/syscall_cp.s
  7382. +++ b/src/thread/sh/syscall_cp.s
  7383. @@ -14,17 +14,8 @@ __syscall_cp_asm:
  7384. __cp_begin:
  7385. mov.l @r4, r4
  7386. tst r4, r4
  7387. - bt 2f
  7388. -
  7389. - mov.l L1, r0
  7390. - braf r0
  7391. - nop
  7392. -1:
  7393. -
  7394. -.align 2
  7395. -L1: .long __cancel@PLT-(1b-.)
  7396. -
  7397. -2: mov r5, r3
  7398. + bf __cp_cancel
  7399. + mov r5, r3
  7400. mov r6, r4
  7401. mov r7, r5
  7402. mov.l @r15, r6
  7403. @@ -43,3 +34,12 @@ __cp_end:
  7404. rts
  7405. nop
  7406. +
  7407. +__cp_cancel:
  7408. + mov.l 2f, r0
  7409. + braf r0
  7410. + nop
  7411. +1:
  7412. +
  7413. +.align 2
  7414. +2: .long __cancel@PCREL-(1b-.)
  7415. --- a/src/thread/x32/syscall_cp.s
  7416. +++ b/src/thread/x32/syscall_cp.s
  7417. @@ -14,7 +14,7 @@ __syscall_cp_internal:
  7418. __cp_begin:
  7419. mov (%rdi),%eax
  7420. test %eax,%eax
  7421. - jnz __cancel
  7422. + jnz __cp_cancel
  7423. mov %rdi,%r11
  7424. mov %rsi,%rax
  7425. mov %rdx,%rdi
  7426. @@ -27,3 +27,5 @@ __cp_begin:
  7427. syscall
  7428. __cp_end:
  7429. ret
  7430. +__cp_cancel:
  7431. + jmp __cancel
  7432. --- a/src/thread/x86_64/syscall_cp.s
  7433. +++ b/src/thread/x86_64/syscall_cp.s
  7434. @@ -14,7 +14,7 @@ __syscall_cp_asm:
  7435. __cp_begin:
  7436. mov (%rdi),%eax
  7437. test %eax,%eax
  7438. - jnz __cancel
  7439. + jnz __cp_cancel
  7440. mov %rdi,%r11
  7441. mov %rsi,%rax
  7442. mov %rdx,%rdi
  7443. @@ -27,3 +27,5 @@ __cp_begin:
  7444. syscall
  7445. __cp_end:
  7446. ret
  7447. +__cp_cancel:
  7448. + jmp __cancel