sse2.h 226 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060106110621063106410651066106710681069107010711072107310741075107610771078107910801081108210831084108510861087108810891090109110921093109410951096109710981099110011011102110311041105110611071108110911101111111211131114111511161117111811191120112111221123112411251126112711281129113011311132113311341135113611371138113911401141114211431144114511461147114811491150115111521153115411551156115711581159116011611162116311641165116611671168116911701171117211731174117511761177117811791180118111821183118411851186118711881189119011911192119311941195119611971198119912001201120212031204120512061207120812091210121112121213121412151216121712181219122012211222122312241225122612271228122912301231123212331234123512361237123812391240124112421243124412451246124712481249125012511252125312541255125612571258125912601261126212631264126512661267126812691270127112721273127412751276127712781279128012811282128312841285128612871288128912901291129212931294129512961297129812991300130113021303130413051306130713081309131013111312131313141315131613171318131913201321132213231324132513261327132813291330133113321333133413351336133713381339134013411342134313441345134613471348134913501351135213531354135513561357135813591360136113621363136413651366136713681369137013711372137313741375137613771378137913801381138213831384138513861387138813891390139113921393139413951396139713981399140014011402140314041405140614071408140914101411141214131414141514161417141814191420142114221423142414251426142714281429143014311432143314341435143614371438143914401441144214431444144514461447144814491450145114521453145414551456145714581459146014611462146314641465146614671468146914701471147214731474147514761477147814791480148114821483148414851486148714881489149014911492149314941495149614971498149915001501150215031504150515061507150815091510151115121513151415151516151715181519152015211522152315241525152615271528152915301531153215331534153515361537153815391540154115421543154415451546154715481549155015511552155315541555155615571558155915601561156215631564156515661567156815691570157115721573157415751576157715781579158015811582158315841585158615871588158915901591159215931594159515961597159815991600160116021603160416051606160716081609161016111612161316141615161616171618161916201621162216231624162516261627162816291630163116321633163416351636163716381639164016411642164316441645164616471648164916501651165216531654165516561657165816591660166116621663166416651666166716681669167016711672167316741675167616771678167916801681168216831684168516861687168816891690169116921693169416951696169716981699170017011702170317041705170617071708170917101711171217131714171517161717171817191720172117221723172417251726172717281729173017311732173317341735173617371738173917401741174217431744174517461747174817491750175117521753175417551756175717581759176017611762176317641765176617671768176917701771177217731774177517761777177817791780178117821783178417851786178717881789179017911792179317941795179617971798179918001801180218031804180518061807180818091810181118121813181418151816181718181819182018211822182318241825182618271828182918301831183218331834183518361837183818391840184118421843184418451846184718481849185018511852185318541855185618571858185918601861186218631864186518661867186818691870187118721873187418751876187718781879188018811882188318841885188618871888188918901891189218931894189518961897189818991900190119021903190419051906190719081909191019111912191319141915191619171918191919201921192219231924192519261927192819291930193119321933193419351936193719381939194019411942194319441945194619471948194919501951195219531954195519561957195819591960196119621963196419651966196719681969197019711972197319741975197619771978197919801981198219831984198519861987198819891990199119921993199419951996199719981999200020012002200320042005200620072008200920102011201220132014201520162017201820192020202120222023202420252026202720282029203020312032203320342035203620372038203920402041204220432044204520462047204820492050205120522053205420552056205720582059206020612062206320642065206620672068206920702071207220732074207520762077207820792080208120822083208420852086208720882089209020912092209320942095209620972098209921002101210221032104210521062107210821092110211121122113211421152116211721182119212021212122212321242125212621272128212921302131213221332134213521362137213821392140214121422143214421452146214721482149215021512152215321542155215621572158215921602161216221632164216521662167216821692170217121722173217421752176217721782179218021812182218321842185218621872188218921902191219221932194219521962197219821992200220122022203220422052206220722082209221022112212221322142215221622172218221922202221222222232224222522262227222822292230223122322233223422352236223722382239224022412242224322442245224622472248224922502251225222532254225522562257225822592260226122622263226422652266226722682269227022712272227322742275227622772278227922802281228222832284228522862287228822892290229122922293229422952296229722982299230023012302230323042305230623072308230923102311231223132314231523162317231823192320232123222323232423252326232723282329233023312332233323342335233623372338233923402341234223432344234523462347234823492350235123522353235423552356235723582359236023612362236323642365236623672368236923702371237223732374237523762377237823792380238123822383238423852386238723882389239023912392239323942395239623972398239924002401240224032404240524062407240824092410241124122413241424152416241724182419242024212422242324242425242624272428242924302431243224332434243524362437243824392440244124422443244424452446244724482449245024512452245324542455245624572458245924602461246224632464246524662467246824692470247124722473247424752476247724782479248024812482248324842485248624872488248924902491249224932494249524962497249824992500250125022503250425052506250725082509251025112512251325142515251625172518251925202521252225232524252525262527252825292530253125322533253425352536253725382539254025412542254325442545254625472548254925502551255225532554255525562557255825592560256125622563256425652566256725682569257025712572257325742575257625772578257925802581258225832584258525862587258825892590259125922593259425952596259725982599260026012602260326042605260626072608260926102611261226132614261526162617261826192620262126222623262426252626262726282629263026312632263326342635263626372638263926402641264226432644264526462647264826492650265126522653265426552656265726582659266026612662266326642665266626672668266926702671267226732674267526762677267826792680268126822683268426852686268726882689269026912692269326942695269626972698269927002701270227032704270527062707270827092710271127122713271427152716271727182719272027212722272327242725272627272728272927302731273227332734273527362737273827392740274127422743274427452746274727482749275027512752275327542755275627572758275927602761276227632764276527662767276827692770277127722773277427752776277727782779278027812782278327842785278627872788278927902791279227932794279527962797279827992800280128022803280428052806280728082809281028112812281328142815281628172818281928202821282228232824282528262827282828292830283128322833283428352836283728382839284028412842284328442845284628472848284928502851285228532854285528562857285828592860286128622863286428652866286728682869287028712872287328742875287628772878287928802881288228832884288528862887288828892890289128922893289428952896289728982899290029012902290329042905290629072908290929102911291229132914291529162917291829192920292129222923292429252926292729282929293029312932293329342935293629372938293929402941294229432944294529462947294829492950295129522953295429552956295729582959296029612962296329642965296629672968296929702971297229732974297529762977297829792980298129822983298429852986298729882989299029912992299329942995299629972998299930003001300230033004300530063007300830093010301130123013301430153016301730183019302030213022302330243025302630273028302930303031303230333034303530363037303830393040304130423043304430453046304730483049305030513052305330543055305630573058305930603061306230633064306530663067306830693070307130723073307430753076307730783079308030813082308330843085308630873088308930903091309230933094309530963097309830993100310131023103310431053106310731083109311031113112311331143115311631173118311931203121312231233124312531263127312831293130313131323133313431353136313731383139314031413142314331443145314631473148314931503151315231533154315531563157315831593160316131623163316431653166316731683169317031713172317331743175317631773178317931803181318231833184318531863187318831893190319131923193319431953196319731983199320032013202320332043205320632073208320932103211321232133214321532163217321832193220322132223223322432253226322732283229323032313232323332343235323632373238323932403241324232433244324532463247324832493250325132523253325432553256325732583259326032613262326332643265326632673268326932703271327232733274327532763277327832793280328132823283328432853286328732883289329032913292329332943295329632973298329933003301330233033304330533063307330833093310331133123313331433153316331733183319332033213322332333243325332633273328332933303331333233333334333533363337333833393340334133423343334433453346334733483349335033513352335333543355335633573358335933603361336233633364336533663367336833693370337133723373337433753376337733783379338033813382338333843385338633873388338933903391339233933394339533963397339833993400340134023403340434053406340734083409341034113412341334143415341634173418341934203421342234233424342534263427342834293430343134323433343434353436343734383439344034413442344334443445344634473448344934503451345234533454345534563457345834593460346134623463346434653466346734683469347034713472347334743475347634773478347934803481348234833484348534863487348834893490349134923493349434953496349734983499350035013502350335043505350635073508350935103511351235133514351535163517351835193520352135223523352435253526352735283529353035313532353335343535353635373538353935403541354235433544354535463547354835493550355135523553355435553556355735583559356035613562356335643565356635673568356935703571357235733574357535763577357835793580358135823583358435853586358735883589359035913592359335943595359635973598359936003601360236033604360536063607360836093610361136123613361436153616361736183619362036213622362336243625362636273628362936303631363236333634363536363637363836393640364136423643364436453646364736483649365036513652365336543655365636573658365936603661366236633664366536663667366836693670367136723673367436753676367736783679368036813682368336843685368636873688368936903691369236933694369536963697369836993700370137023703370437053706370737083709371037113712371337143715371637173718371937203721372237233724372537263727372837293730373137323733373437353736373737383739374037413742374337443745374637473748374937503751375237533754375537563757375837593760376137623763376437653766376737683769377037713772377337743775377637773778377937803781378237833784378537863787378837893790379137923793379437953796379737983799380038013802380338043805380638073808380938103811381238133814381538163817381838193820382138223823382438253826382738283829383038313832383338343835383638373838383938403841384238433844384538463847384838493850385138523853385438553856385738583859386038613862386338643865386638673868386938703871387238733874387538763877387838793880388138823883388438853886388738883889389038913892389338943895389638973898389939003901390239033904390539063907390839093910391139123913391439153916391739183919392039213922392339243925392639273928392939303931393239333934393539363937393839393940394139423943394439453946394739483949395039513952395339543955395639573958395939603961396239633964396539663967396839693970397139723973397439753976397739783979398039813982398339843985398639873988398939903991399239933994399539963997399839994000400140024003400440054006400740084009401040114012401340144015401640174018401940204021402240234024402540264027402840294030403140324033403440354036403740384039404040414042404340444045404640474048404940504051405240534054405540564057405840594060406140624063406440654066406740684069407040714072407340744075407640774078407940804081408240834084408540864087408840894090409140924093409440954096409740984099410041014102410341044105410641074108410941104111411241134114411541164117411841194120412141224123412441254126412741284129413041314132413341344135413641374138413941404141414241434144414541464147414841494150415141524153415441554156415741584159416041614162416341644165416641674168416941704171417241734174417541764177417841794180418141824183418441854186418741884189419041914192419341944195419641974198419942004201420242034204420542064207420842094210421142124213421442154216421742184219422042214222422342244225422642274228422942304231423242334234423542364237423842394240424142424243424442454246424742484249425042514252425342544255425642574258425942604261426242634264426542664267426842694270427142724273427442754276427742784279428042814282428342844285428642874288428942904291429242934294429542964297429842994300430143024303430443054306430743084309431043114312431343144315431643174318431943204321432243234324432543264327432843294330433143324333433443354336433743384339434043414342434343444345434643474348434943504351435243534354435543564357435843594360436143624363436443654366436743684369437043714372437343744375437643774378437943804381438243834384438543864387438843894390439143924393439443954396439743984399440044014402440344044405440644074408440944104411441244134414441544164417441844194420442144224423442444254426442744284429443044314432443344344435443644374438443944404441444244434444444544464447444844494450445144524453445444554456445744584459446044614462446344644465446644674468446944704471447244734474447544764477447844794480448144824483448444854486448744884489449044914492449344944495449644974498449945004501450245034504450545064507450845094510451145124513451445154516451745184519452045214522452345244525452645274528452945304531453245334534453545364537453845394540454145424543454445454546454745484549455045514552455345544555455645574558455945604561456245634564456545664567456845694570457145724573457445754576457745784579458045814582458345844585458645874588458945904591459245934594459545964597459845994600460146024603460446054606460746084609461046114612461346144615461646174618461946204621462246234624462546264627462846294630463146324633463446354636463746384639464046414642464346444645464646474648464946504651465246534654465546564657465846594660466146624663466446654666466746684669467046714672467346744675467646774678467946804681468246834684468546864687468846894690469146924693469446954696469746984699470047014702470347044705470647074708470947104711471247134714471547164717471847194720472147224723472447254726472747284729473047314732473347344735473647374738473947404741474247434744474547464747474847494750475147524753475447554756475747584759476047614762476347644765476647674768476947704771477247734774477547764777477847794780478147824783478447854786478747884789479047914792479347944795479647974798479948004801480248034804480548064807480848094810481148124813481448154816481748184819482048214822482348244825482648274828482948304831483248334834483548364837483848394840484148424843484448454846484748484849485048514852485348544855485648574858485948604861486248634864486548664867486848694870487148724873487448754876487748784879488048814882488348844885488648874888488948904891489248934894489548964897489848994900490149024903490449054906490749084909491049114912491349144915491649174918491949204921492249234924492549264927492849294930493149324933493449354936493749384939494049414942494349444945494649474948494949504951495249534954495549564957495849594960496149624963496449654966496749684969497049714972497349744975497649774978497949804981498249834984498549864987498849894990499149924993499449954996499749984999500050015002500350045005500650075008500950105011501250135014501550165017501850195020502150225023502450255026502750285029503050315032503350345035503650375038503950405041504250435044504550465047504850495050505150525053505450555056505750585059506050615062506350645065506650675068506950705071507250735074507550765077507850795080508150825083508450855086508750885089509050915092509350945095509650975098509951005101510251035104510551065107510851095110511151125113511451155116511751185119512051215122512351245125512651275128512951305131513251335134513551365137513851395140514151425143514451455146514751485149515051515152515351545155515651575158515951605161516251635164516551665167516851695170517151725173517451755176517751785179518051815182518351845185518651875188518951905191519251935194519551965197519851995200520152025203520452055206520752085209521052115212521352145215521652175218521952205221522252235224522552265227522852295230523152325233523452355236523752385239524052415242524352445245524652475248524952505251525252535254525552565257525852595260526152625263526452655266526752685269527052715272527352745275527652775278527952805281528252835284528552865287528852895290529152925293529452955296529752985299530053015302530353045305530653075308530953105311531253135314531553165317531853195320532153225323532453255326532753285329533053315332533353345335533653375338533953405341534253435344534553465347534853495350535153525353535453555356535753585359536053615362536353645365536653675368536953705371537253735374537553765377537853795380538153825383538453855386538753885389539053915392539353945395539653975398539954005401540254035404540554065407540854095410541154125413541454155416541754185419542054215422542354245425542654275428542954305431543254335434543554365437543854395440544154425443544454455446544754485449545054515452545354545455545654575458545954605461546254635464546554665467546854695470547154725473547454755476547754785479548054815482548354845485548654875488548954905491549254935494549554965497549854995500550155025503550455055506550755085509551055115512551355145515551655175518551955205521552255235524552555265527552855295530553155325533553455355536553755385539554055415542554355445545554655475548554955505551555255535554555555565557555855595560556155625563556455655566556755685569557055715572557355745575557655775578557955805581558255835584558555865587558855895590559155925593559455955596559755985599560056015602560356045605560656075608560956105611561256135614561556165617561856195620562156225623562456255626562756285629563056315632563356345635563656375638563956405641564256435644564556465647564856495650565156525653565456555656565756585659566056615662566356645665566656675668566956705671567256735674567556765677567856795680568156825683568456855686568756885689569056915692569356945695569656975698569957005701570257035704570557065707570857095710571157125713571457155716571757185719572057215722572357245725572657275728572957305731573257335734573557365737573857395740574157425743574457455746574757485749575057515752575357545755575657575758575957605761576257635764576557665767576857695770577157725773577457755776577757785779578057815782578357845785578657875788578957905791579257935794579557965797579857995800580158025803580458055806580758085809581058115812581358145815581658175818581958205821582258235824582558265827582858295830583158325833583458355836583758385839584058415842584358445845584658475848584958505851585258535854585558565857585858595860586158625863586458655866586758685869587058715872587358745875587658775878587958805881588258835884588558865887588858895890589158925893589458955896589758985899590059015902590359045905590659075908590959105911591259135914591559165917591859195920592159225923592459255926592759285929593059315932593359345935593659375938593959405941594259435944594559465947594859495950595159525953595459555956595759585959596059615962596359645965596659675968596959705971597259735974597559765977597859795980598159825983598459855986598759885989599059915992599359945995599659975998599960006001600260036004600560066007600860096010601160126013601460156016601760186019602060216022602360246025602660276028602960306031603260336034603560366037603860396040604160426043604460456046604760486049605060516052605360546055605660576058605960606061606260636064606560666067606860696070607160726073607460756076607760786079608060816082608360846085608660876088608960906091609260936094609560966097609860996100610161026103610461056106610761086109611061116112611361146115611661176118611961206121612261236124612561266127612861296130613161326133613461356136613761386139614061416142614361446145614661476148614961506151615261536154615561566157615861596160616161626163616461656166616761686169617061716172617361746175617661776178617961806181618261836184618561866187618861896190619161926193619461956196619761986199620062016202620362046205620662076208620962106211621262136214621562166217621862196220622162226223622462256226622762286229623062316232623362346235623662376238623962406241624262436244624562466247624862496250625162526253625462556256625762586259626062616262626362646265626662676268626962706271627262736274627562766277627862796280628162826283628462856286628762886289629062916292629362946295629662976298629963006301630263036304630563066307630863096310631163126313631463156316631763186319632063216322632363246325632663276328632963306331633263336334633563366337633863396340634163426343634463456346634763486349635063516352635363546355635663576358635963606361636263636364636563666367636863696370637163726373637463756376637763786379638063816382638363846385638663876388638963906391639263936394639563966397639863996400640164026403640464056406640764086409641064116412641364146415641664176418641964206421642264236424642564266427642864296430643164326433643464356436643764386439644064416442644364446445644664476448644964506451645264536454645564566457645864596460646164626463646464656466646764686469647064716472647364746475647664776478647964806481648264836484648564866487648864896490649164926493649464956496649764986499650065016502650365046505650665076508650965106511651265136514651565166517651865196520652165226523652465256526652765286529653065316532653365346535653665376538653965406541654265436544654565466547654865496550655165526553655465556556655765586559656065616562656365646565656665676568656965706571657265736574657565766577657865796580658165826583658465856586658765886589659065916592659365946595659665976598659966006601660266036604660566066607660866096610661166126613661466156616661766186619662066216622662366246625662666276628662966306631663266336634663566366637663866396640664166426643664466456646664766486649665066516652665366546655665666576658665966606661666266636664666566666667666866696670667166726673667466756676667766786679668066816682668366846685668666876688668966906691669266936694669566966697669866996700670167026703670467056706670767086709671067116712671367146715671667176718671967206721672267236724672567266727672867296730673167326733673467356736673767386739674067416742674367446745674667476748674967506751675267536754675567566757675867596760676167626763676467656766676767686769677067716772677367746775677667776778677967806781678267836784678567866787678867896790679167926793679467956796679767986799680068016802680368046805680668076808680968106811681268136814681568166817681868196820682168226823682468256826682768286829683068316832683368346835683668376838683968406841684268436844684568466847684868496850685168526853685468556856685768586859686068616862686368646865686668676868686968706871687268736874687568766877687868796880688168826883688468856886688768886889689068916892689368946895689668976898689969006901690269036904690569066907690869096910691169126913691469156916691769186919692069216922692369246925692669276928692969306931693269336934693569366937693869396940694169426943694469456946694769486949695069516952695369546955695669576958695969606961696269636964696569666967696869696970697169726973697469756976697769786979698069816982698369846985698669876988698969906991699269936994699569966997699869997000700170027003700470057006700770087009701070117012701370147015701670177018701970207021702270237024702570267027702870297030703170327033703470357036703770387039704070417042704370447045704670477048704970507051705270537054705570567057705870597060706170627063706470657066706770687069707070717072707370747075707670777078707970807081708270837084708570867087708870897090709170927093709470957096709770987099710071017102710371047105710671077108710971107111711271137114711571167117711871197120712171227123712471257126712771287129713071317132713371347135713671377138713971407141714271437144714571467147714871497150715171527153715471557156715771587159716071617162716371647165716671677168716971707171717271737174717571767177717871797180718171827183718471857186718771887189719071917192719371947195719671977198719972007201720272037204720572067207720872097210721172127213721472157216721772187219722072217222722372247225722672277228722972307231723272337234723572367237723872397240724172427243724472457246724772487249725072517252725372547255725672577258725972607261726272637264726572667267726872697270727172727273727472757276727772787279728072817282728372847285728672877288728972907291729272937294729572967297729872997300730173027303730473057306730773087309731073117312731373147315731673177318731973207321732273237324732573267327732873297330733173327333733473357336733773387339734073417342734373447345734673477348734973507351735273537354735573567357735873597360736173627363736473657366736773687369737073717372737373747375737673777378737973807381738273837384738573867387738873897390739173927393739473957396739773987399740074017402740374047405740674077408740974107411741274137414741574167417741874197420742174227423742474257426742774287429743074317432743374347435743674377438743974407441744274437444744574467447744874497450745174527453745474557456745774587459746074617462746374647465746674677468746974707471747274737474747574767477747874797480748174827483748474857486748774887489749074917492749374947495749674977498749975007501750275037504750575067507750875097510751175127513751475157516751775187519752075217522752375247525752675277528752975307531753275337534753575367537753875397540754175427543754475457546754775487549
  1. /* SPDX-License-Identifier: MIT
  2. *
  3. * Permission is hereby granted, free of charge, to any person
  4. * obtaining a copy of this software and associated documentation
  5. * files (the "Software"), to deal in the Software without
  6. * restriction, including without limitation the rights to use, copy,
  7. * modify, merge, publish, distribute, sublicense, and/or sell copies
  8. * of the Software, and to permit persons to whom the Software is
  9. * furnished to do so, subject to the following conditions:
  10. *
  11. * The above copyright notice and this permission notice shall be
  12. * included in all copies or substantial portions of the Software.
  13. *
  14. * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
  15. * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
  16. * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
  17. * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
  18. * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
  19. * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
  20. * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
  21. * SOFTWARE.
  22. *
  23. * Copyright:
  24. * 2017-2020 Evan Nemerson <[email protected]>
  25. * 2015-2017 John W. Ratcliff <[email protected]>
  26. * 2015 Brandon Rowlett <[email protected]>
  27. * 2015 Ken Fast <[email protected]>
  28. * 2017 Hasindu Gamaarachchi <[email protected]>
  29. * 2018 Jeff Daily <[email protected]>
  30. */
  31. #if !defined(SIMDE_X86_SSE2_H)
  32. #define SIMDE_X86_SSE2_H
  33. #include "sse.h"
  34. HEDLEY_DIAGNOSTIC_PUSH
  35. SIMDE_DISABLE_UNWANTED_DIAGNOSTICS
  36. SIMDE_BEGIN_DECLS_
  37. typedef union {
  38. #if defined(SIMDE_VECTOR_SUBSCRIPT)
  39. SIMDE_ALIGN_TO_16 int8_t i8 SIMDE_VECTOR(16) SIMDE_MAY_ALIAS;
  40. SIMDE_ALIGN_TO_16 int16_t i16 SIMDE_VECTOR(16) SIMDE_MAY_ALIAS;
  41. SIMDE_ALIGN_TO_16 int32_t i32 SIMDE_VECTOR(16) SIMDE_MAY_ALIAS;
  42. SIMDE_ALIGN_TO_16 int64_t i64 SIMDE_VECTOR(16) SIMDE_MAY_ALIAS;
  43. SIMDE_ALIGN_TO_16 uint8_t u8 SIMDE_VECTOR(16) SIMDE_MAY_ALIAS;
  44. SIMDE_ALIGN_TO_16 uint16_t u16 SIMDE_VECTOR(16) SIMDE_MAY_ALIAS;
  45. SIMDE_ALIGN_TO_16 uint32_t u32 SIMDE_VECTOR(16) SIMDE_MAY_ALIAS;
  46. SIMDE_ALIGN_TO_16 uint64_t u64 SIMDE_VECTOR(16) SIMDE_MAY_ALIAS;
  47. #if defined(SIMDE_HAVE_INT128_)
  48. SIMDE_ALIGN_TO_16 simde_int128 i128 SIMDE_VECTOR(16) SIMDE_MAY_ALIAS;
  49. SIMDE_ALIGN_TO_16 simde_uint128 u128 SIMDE_VECTOR(16) SIMDE_MAY_ALIAS;
  50. #endif
  51. SIMDE_ALIGN_TO_16 simde_float32 f32 SIMDE_VECTOR(16) SIMDE_MAY_ALIAS;
  52. SIMDE_ALIGN_TO_16 simde_float64 f64 SIMDE_VECTOR(16) SIMDE_MAY_ALIAS;
  53. SIMDE_ALIGN_TO_16 int_fast32_t i32f SIMDE_VECTOR(16) SIMDE_MAY_ALIAS;
  54. SIMDE_ALIGN_TO_16 uint_fast32_t u32f SIMDE_VECTOR(16) SIMDE_MAY_ALIAS;
  55. #else
  56. SIMDE_ALIGN_TO_16 int8_t i8[16];
  57. SIMDE_ALIGN_TO_16 int16_t i16[8];
  58. SIMDE_ALIGN_TO_16 int32_t i32[4];
  59. SIMDE_ALIGN_TO_16 int64_t i64[2];
  60. SIMDE_ALIGN_TO_16 uint8_t u8[16];
  61. SIMDE_ALIGN_TO_16 uint16_t u16[8];
  62. SIMDE_ALIGN_TO_16 uint32_t u32[4];
  63. SIMDE_ALIGN_TO_16 uint64_t u64[2];
  64. #if defined(SIMDE_HAVE_INT128_)
  65. SIMDE_ALIGN_TO_16 simde_int128 i128[1];
  66. SIMDE_ALIGN_TO_16 simde_uint128 u128[1];
  67. #endif
  68. SIMDE_ALIGN_TO_16 simde_float32 f32[4];
  69. SIMDE_ALIGN_TO_16 simde_float64 f64[2];
  70. SIMDE_ALIGN_TO_16 int_fast32_t i32f[16 / sizeof(int_fast32_t)];
  71. SIMDE_ALIGN_TO_16 uint_fast32_t u32f[16 / sizeof(uint_fast32_t)];
  72. #endif
  73. SIMDE_ALIGN_TO_16 simde__m64_private m64_private[2];
  74. SIMDE_ALIGN_TO_16 simde__m64 m64[2];
  75. #if defined(SIMDE_X86_SSE2_NATIVE)
  76. SIMDE_ALIGN_TO_16 __m128i n;
  77. #elif defined(SIMDE_ARM_NEON_A32V7_NATIVE)
  78. SIMDE_ALIGN_TO_16 int8x16_t neon_i8;
  79. SIMDE_ALIGN_TO_16 int16x8_t neon_i16;
  80. SIMDE_ALIGN_TO_16 int32x4_t neon_i32;
  81. SIMDE_ALIGN_TO_16 int64x2_t neon_i64;
  82. SIMDE_ALIGN_TO_16 uint8x16_t neon_u8;
  83. SIMDE_ALIGN_TO_16 uint16x8_t neon_u16;
  84. SIMDE_ALIGN_TO_16 uint32x4_t neon_u32;
  85. SIMDE_ALIGN_TO_16 uint64x2_t neon_u64;
  86. SIMDE_ALIGN_TO_16 float32x4_t neon_f32;
  87. #if defined(SIMDE_ARCH_AARCH64)
  88. SIMDE_ALIGN_TO_16 float64x2_t neon_f64;
  89. #endif
  90. #elif defined(SIMDE_WASM_SIMD128_NATIVE)
  91. SIMDE_ALIGN_TO_16 v128_t wasm_v128;
  92. #elif defined(SIMDE_POWER_ALTIVEC_P6_NATIVE)
  93. SIMDE_ALIGN_TO_16 SIMDE_POWER_ALTIVEC_VECTOR(signed char) altivec_i8;
  94. SIMDE_ALIGN_TO_16 SIMDE_POWER_ALTIVEC_VECTOR(signed short) altivec_i16;
  95. SIMDE_ALIGN_TO_16 SIMDE_POWER_ALTIVEC_VECTOR(signed int) altivec_i32;
  96. #if defined(__UINT_FAST32_TYPE__) && defined(SIMDE_POWER_ALTIVEC_P7_NATIVE)
  97. SIMDE_ALIGN_TO_16
  98. SIMDE_POWER_ALTIVEC_VECTOR(__INT_FAST32_TYPE__) altivec_i32f;
  99. #else
  100. SIMDE_ALIGN_TO_16 SIMDE_POWER_ALTIVEC_VECTOR(signed int) altivec_i32f;
  101. #endif
  102. SIMDE_ALIGN_TO_16 SIMDE_POWER_ALTIVEC_VECTOR(unsigned char) altivec_u8;
  103. SIMDE_ALIGN_TO_16
  104. SIMDE_POWER_ALTIVEC_VECTOR(unsigned short) altivec_u16;
  105. SIMDE_ALIGN_TO_16 SIMDE_POWER_ALTIVEC_VECTOR(unsigned int) altivec_u32;
  106. #if defined(__UINT_FAST32_TYPE__) && defined(SIMDE_POWER_ALTIVEC_P7_NATIVE)
  107. SIMDE_ALIGN_TO_16
  108. SIMDE_POWER_ALTIVEC_VECTOR(__UINT_FAST32_TYPE__) altivec_u32f;
  109. #else
  110. SIMDE_ALIGN_TO_16 SIMDE_POWER_ALTIVEC_VECTOR(unsigned int) altivec_u32f;
  111. #endif
  112. SIMDE_ALIGN_TO_16 SIMDE_POWER_ALTIVEC_VECTOR(float) altivec_f32;
  113. #if defined(SIMDE_POWER_ALTIVEC_P7_NATIVE)
  114. SIMDE_ALIGN_TO_16
  115. SIMDE_POWER_ALTIVEC_VECTOR(signed long long) altivec_i64;
  116. SIMDE_ALIGN_TO_16
  117. SIMDE_POWER_ALTIVEC_VECTOR(unsigned long long) altivec_u64;
  118. SIMDE_ALIGN_TO_16 SIMDE_POWER_ALTIVEC_VECTOR(double) altivec_f64;
  119. #endif
  120. #endif
  121. } simde__m128i_private;
  122. typedef union {
  123. #if defined(SIMDE_VECTOR_SUBSCRIPT)
  124. SIMDE_ALIGN_TO_16 int8_t i8 SIMDE_VECTOR(16) SIMDE_MAY_ALIAS;
  125. SIMDE_ALIGN_TO_16 int16_t i16 SIMDE_VECTOR(16) SIMDE_MAY_ALIAS;
  126. SIMDE_ALIGN_TO_16 int32_t i32 SIMDE_VECTOR(16) SIMDE_MAY_ALIAS;
  127. SIMDE_ALIGN_TO_16 int64_t i64 SIMDE_VECTOR(16) SIMDE_MAY_ALIAS;
  128. SIMDE_ALIGN_TO_16 uint8_t u8 SIMDE_VECTOR(16) SIMDE_MAY_ALIAS;
  129. SIMDE_ALIGN_TO_16 uint16_t u16 SIMDE_VECTOR(16) SIMDE_MAY_ALIAS;
  130. SIMDE_ALIGN_TO_16 uint32_t u32 SIMDE_VECTOR(16) SIMDE_MAY_ALIAS;
  131. SIMDE_ALIGN_TO_16 uint64_t u64 SIMDE_VECTOR(16) SIMDE_MAY_ALIAS;
  132. SIMDE_ALIGN_TO_16 simde_float32 f32 SIMDE_VECTOR(16) SIMDE_MAY_ALIAS;
  133. SIMDE_ALIGN_TO_16 simde_float64 f64 SIMDE_VECTOR(16) SIMDE_MAY_ALIAS;
  134. SIMDE_ALIGN_TO_16 int_fast32_t i32f SIMDE_VECTOR(16) SIMDE_MAY_ALIAS;
  135. SIMDE_ALIGN_TO_16 uint_fast32_t u32f SIMDE_VECTOR(16) SIMDE_MAY_ALIAS;
  136. #else
  137. SIMDE_ALIGN_TO_16 int8_t i8[16];
  138. SIMDE_ALIGN_TO_16 int16_t i16[8];
  139. SIMDE_ALIGN_TO_16 int32_t i32[4];
  140. SIMDE_ALIGN_TO_16 int64_t i64[2];
  141. SIMDE_ALIGN_TO_16 uint8_t u8[16];
  142. SIMDE_ALIGN_TO_16 uint16_t u16[8];
  143. SIMDE_ALIGN_TO_16 uint32_t u32[4];
  144. SIMDE_ALIGN_TO_16 uint64_t u64[2];
  145. SIMDE_ALIGN_TO_16 simde_float32 f32[4];
  146. SIMDE_ALIGN_TO_16 simde_float64 f64[2];
  147. SIMDE_ALIGN_TO_16 int_fast32_t i32f[16 / sizeof(int_fast32_t)];
  148. SIMDE_ALIGN_TO_16 uint_fast32_t u32f[16 / sizeof(uint_fast32_t)];
  149. #endif
  150. SIMDE_ALIGN_TO_16 simde__m64_private m64_private[2];
  151. SIMDE_ALIGN_TO_16 simde__m64 m64[2];
  152. #if defined(SIMDE_X86_SSE2_NATIVE)
  153. SIMDE_ALIGN_TO_16 __m128d n;
  154. #elif defined(SIMDE_ARM_NEON_A32V7_NATIVE)
  155. SIMDE_ALIGN_TO_16 int8x16_t neon_i8;
  156. SIMDE_ALIGN_TO_16 int16x8_t neon_i16;
  157. SIMDE_ALIGN_TO_16 int32x4_t neon_i32;
  158. SIMDE_ALIGN_TO_16 int64x2_t neon_i64;
  159. SIMDE_ALIGN_TO_16 uint8x16_t neon_u8;
  160. SIMDE_ALIGN_TO_16 uint16x8_t neon_u16;
  161. SIMDE_ALIGN_TO_16 uint32x4_t neon_u32;
  162. SIMDE_ALIGN_TO_16 uint64x2_t neon_u64;
  163. SIMDE_ALIGN_TO_16 float32x4_t neon_f32;
  164. #if defined(SIMDE_ARCH_AARCH64)
  165. SIMDE_ALIGN_TO_16 float64x2_t neon_f64;
  166. #endif
  167. #elif defined(SIMDE_WASM_SIMD128_NATIVE)
  168. SIMDE_ALIGN_TO_16 v128_t wasm_v128;
  169. #elif defined(SIMDE_POWER_ALTIVEC_P6_NATIVE)
  170. SIMDE_ALIGN_TO_16 SIMDE_POWER_ALTIVEC_VECTOR(signed char) altivec_i8;
  171. SIMDE_ALIGN_TO_16 SIMDE_POWER_ALTIVEC_VECTOR(signed short) altivec_i16;
  172. SIMDE_ALIGN_TO_16 SIMDE_POWER_ALTIVEC_VECTOR(signed int) altivec_i32;
  173. #if defined(__INT_FAST32_TYPE__) && defined(SIMDE_POWER_ALTIVEC_P7_NATIVE)
  174. SIMDE_ALIGN_TO_16
  175. SIMDE_POWER_ALTIVEC_VECTOR(__INT_FAST32_TYPE__) altivec_i32f;
  176. #else
  177. SIMDE_ALIGN_TO_16 SIMDE_POWER_ALTIVEC_VECTOR(signed int) altivec_i32f;
  178. #endif
  179. SIMDE_ALIGN_TO_16 SIMDE_POWER_ALTIVEC_VECTOR(unsigned char) altivec_u8;
  180. SIMDE_ALIGN_TO_16
  181. SIMDE_POWER_ALTIVEC_VECTOR(unsigned short) altivec_u16;
  182. SIMDE_ALIGN_TO_16 SIMDE_POWER_ALTIVEC_VECTOR(unsigned int) altivec_u32;
  183. #if defined(__UINT_FAST32_TYPE__) && defined(SIMDE_POWER_ALTIVEC_P7_NATIVE)
  184. SIMDE_ALIGN_TO_16
  185. SIMDE_POWER_ALTIVEC_VECTOR(__UINT_FAST32_TYPE__) altivec_u32f;
  186. #else
  187. SIMDE_ALIGN_TO_16 SIMDE_POWER_ALTIVEC_VECTOR(unsigned int) altivec_u32f;
  188. #endif
  189. SIMDE_ALIGN_TO_16 SIMDE_POWER_ALTIVEC_VECTOR(float) altivec_f32;
  190. #if defined(SIMDE_POWER_ALTIVEC_P7_NATIVE)
  191. SIMDE_ALIGN_TO_16
  192. SIMDE_POWER_ALTIVEC_VECTOR(signed long long) altivec_i64;
  193. SIMDE_ALIGN_TO_16
  194. SIMDE_POWER_ALTIVEC_VECTOR(unsigned long long) altivec_u64;
  195. SIMDE_ALIGN_TO_16 SIMDE_POWER_ALTIVEC_VECTOR(double) altivec_f64;
  196. #endif
  197. #endif
  198. } simde__m128d_private;
  199. #if defined(SIMDE_X86_SSE2_NATIVE)
  200. typedef __m128i simde__m128i;
  201. typedef __m128d simde__m128d;
  202. #elif defined(SIMDE_ARM_NEON_A32V7_NATIVE)
  203. typedef int64x2_t simde__m128i;
  204. #if defined(SIMDE_ARCH_AARCH64)
  205. typedef float64x2_t simde__m128d;
  206. #elif defined(SIMDE_VECTOR_SUBSCRIPT)
  207. typedef simde_float64 simde__m128d SIMDE_VECTOR(16) SIMDE_MAY_ALIAS;
  208. #else
  209. typedef simde__m128d_private simde__m128d;
  210. #endif
  211. #elif defined(SIMDE_WASM_SIMD128_NATIVE)
  212. typedef v128_t simde__m128i;
  213. typedef v128_t simde__m128d;
  214. #elif defined(SIMDE_POWER_ALTIVEC_P6_NATIVE)
  215. typedef SIMDE_POWER_ALTIVEC_VECTOR(float) simde__m128i;
  216. #if defined(SIMDE_POWER_ALTIVEC_P7_NATIVE)
  217. typedef SIMDE_POWER_ALTIVEC_VECTOR(double) simde__m128d;
  218. #else
  219. typedef simde__m128d_private simde__m128d;
  220. #endif
  221. #elif defined(SIMDE_VECTOR_SUBSCRIPT)
  222. typedef int64_t simde__m128i SIMDE_ALIGN_TO_16 SIMDE_VECTOR(16) SIMDE_MAY_ALIAS;
  223. typedef simde_float64
  224. simde__m128d SIMDE_ALIGN_TO_16 SIMDE_VECTOR(16) SIMDE_MAY_ALIAS;
  225. #else
  226. typedef simde__m128i_private simde__m128i;
  227. typedef simde__m128d_private simde__m128d;
  228. #endif
  229. #if defined(SIMDE_X86_SSE2_ENABLE_NATIVE_ALIASES)
  230. typedef simde__m128i __m128i;
  231. typedef simde__m128d __m128d;
  232. #endif
  233. HEDLEY_STATIC_ASSERT(16 == sizeof(simde__m128i), "simde__m128i size incorrect");
  234. HEDLEY_STATIC_ASSERT(16 == sizeof(simde__m128i_private),
  235. "simde__m128i_private size incorrect");
  236. HEDLEY_STATIC_ASSERT(16 == sizeof(simde__m128d), "simde__m128d size incorrect");
  237. HEDLEY_STATIC_ASSERT(16 == sizeof(simde__m128d_private),
  238. "simde__m128d_private size incorrect");
  239. #if defined(SIMDE_CHECK_ALIGNMENT) && defined(SIMDE_ALIGN_OF)
  240. HEDLEY_STATIC_ASSERT(SIMDE_ALIGN_OF(simde__m128i) == 16,
  241. "simde__m128i is not 16-byte aligned");
  242. HEDLEY_STATIC_ASSERT(SIMDE_ALIGN_OF(simde__m128i_private) == 16,
  243. "simde__m128i_private is not 16-byte aligned");
  244. HEDLEY_STATIC_ASSERT(SIMDE_ALIGN_OF(simde__m128d) == 16,
  245. "simde__m128d is not 16-byte aligned");
  246. HEDLEY_STATIC_ASSERT(SIMDE_ALIGN_OF(simde__m128d_private) == 16,
  247. "simde__m128d_private is not 16-byte aligned");
  248. #endif
  249. SIMDE_FUNCTION_ATTRIBUTES
  250. simde__m128i simde__m128i_from_private(simde__m128i_private v)
  251. {
  252. simde__m128i r;
  253. simde_memcpy(&r, &v, sizeof(r));
  254. return r;
  255. }
  256. SIMDE_FUNCTION_ATTRIBUTES
  257. simde__m128i_private simde__m128i_to_private(simde__m128i v)
  258. {
  259. simde__m128i_private r;
  260. simde_memcpy(&r, &v, sizeof(r));
  261. return r;
  262. }
  263. SIMDE_FUNCTION_ATTRIBUTES
  264. simde__m128d simde__m128d_from_private(simde__m128d_private v)
  265. {
  266. simde__m128d r;
  267. simde_memcpy(&r, &v, sizeof(r));
  268. return r;
  269. }
  270. SIMDE_FUNCTION_ATTRIBUTES
  271. simde__m128d_private simde__m128d_to_private(simde__m128d v)
  272. {
  273. simde__m128d_private r;
  274. simde_memcpy(&r, &v, sizeof(r));
  275. return r;
  276. }
  277. #if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
  278. SIMDE_X86_GENERATE_CONVERSION_FUNCTION(m128i, int8x16_t, neon, i8)
  279. SIMDE_X86_GENERATE_CONVERSION_FUNCTION(m128i, int16x8_t, neon, i16)
  280. SIMDE_X86_GENERATE_CONVERSION_FUNCTION(m128i, int32x4_t, neon, i32)
  281. SIMDE_X86_GENERATE_CONVERSION_FUNCTION(m128i, int64x2_t, neon, i64)
  282. SIMDE_X86_GENERATE_CONVERSION_FUNCTION(m128i, uint8x16_t, neon, u8)
  283. SIMDE_X86_GENERATE_CONVERSION_FUNCTION(m128i, uint16x8_t, neon, u16)
  284. SIMDE_X86_GENERATE_CONVERSION_FUNCTION(m128i, uint32x4_t, neon, u32)
  285. SIMDE_X86_GENERATE_CONVERSION_FUNCTION(m128i, uint64x2_t, neon, u64)
  286. SIMDE_X86_GENERATE_CONVERSION_FUNCTION(m128i, float32x4_t, neon, f32)
  287. #if defined(SIMDE_ARM_NEON_A64V8_NATIVE)
  288. SIMDE_X86_GENERATE_CONVERSION_FUNCTION(m128i, float64x2_t, neon, f64)
  289. #endif
  290. #elif defined(SIMDE_POWER_ALTIVEC_P6_NATIVE)
  291. SIMDE_X86_GENERATE_CONVERSION_FUNCTION(m128i,
  292. SIMDE_POWER_ALTIVEC_VECTOR(signed char),
  293. altivec, i8)
  294. SIMDE_X86_GENERATE_CONVERSION_FUNCTION(m128i,
  295. SIMDE_POWER_ALTIVEC_VECTOR(signed short),
  296. altivec, i16)
  297. SIMDE_X86_GENERATE_CONVERSION_FUNCTION(m128i,
  298. SIMDE_POWER_ALTIVEC_VECTOR(signed int),
  299. altivec, i32)
  300. SIMDE_X86_GENERATE_CONVERSION_FUNCTION(
  301. m128i, SIMDE_POWER_ALTIVEC_VECTOR(unsigned char), altivec, u8)
  302. SIMDE_X86_GENERATE_CONVERSION_FUNCTION(
  303. m128i, SIMDE_POWER_ALTIVEC_VECTOR(unsigned short), altivec, u16)
  304. SIMDE_X86_GENERATE_CONVERSION_FUNCTION(m128i,
  305. SIMDE_POWER_ALTIVEC_VECTOR(unsigned int),
  306. altivec, u32)
  307. #if defined(SIMDE_POWER_ALTIVEC_P7_NATIVE)
  308. SIMDE_X86_GENERATE_CONVERSION_FUNCTION(
  309. m128i, SIMDE_POWER_ALTIVEC_VECTOR(unsigned long long), altivec, u64)
  310. SIMDE_X86_GENERATE_CONVERSION_FUNCTION(
  311. m128i, SIMDE_POWER_ALTIVEC_VECTOR(signed long long), altivec, i64)
  312. #endif
  313. #endif /* defined(SIMDE_ARM_NEON_A32V7_NATIVE) */
  314. #if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
  315. SIMDE_X86_GENERATE_CONVERSION_FUNCTION(m128d, int8x16_t, neon, i8)
  316. SIMDE_X86_GENERATE_CONVERSION_FUNCTION(m128d, int16x8_t, neon, i16)
  317. SIMDE_X86_GENERATE_CONVERSION_FUNCTION(m128d, int32x4_t, neon, i32)
  318. SIMDE_X86_GENERATE_CONVERSION_FUNCTION(m128d, int64x2_t, neon, i64)
  319. SIMDE_X86_GENERATE_CONVERSION_FUNCTION(m128d, uint8x16_t, neon, u8)
  320. SIMDE_X86_GENERATE_CONVERSION_FUNCTION(m128d, uint16x8_t, neon, u16)
  321. SIMDE_X86_GENERATE_CONVERSION_FUNCTION(m128d, uint32x4_t, neon, u32)
  322. SIMDE_X86_GENERATE_CONVERSION_FUNCTION(m128d, uint64x2_t, neon, u64)
  323. SIMDE_X86_GENERATE_CONVERSION_FUNCTION(m128d, float32x4_t, neon, f32)
  324. #if defined(SIMDE_ARM_NEON_A64V8_NATIVE)
  325. SIMDE_X86_GENERATE_CONVERSION_FUNCTION(m128d, float64x2_t, neon, f64)
  326. #endif
  327. #elif defined(SIMDE_POWER_ALTIVEC_P6_NATIVE)
  328. SIMDE_X86_GENERATE_CONVERSION_FUNCTION(m128d,
  329. SIMDE_POWER_ALTIVEC_VECTOR(signed char),
  330. altivec, i8)
  331. SIMDE_X86_GENERATE_CONVERSION_FUNCTION(m128d,
  332. SIMDE_POWER_ALTIVEC_VECTOR(signed short),
  333. altivec, i16)
  334. SIMDE_X86_GENERATE_CONVERSION_FUNCTION(m128d,
  335. SIMDE_POWER_ALTIVEC_VECTOR(signed int),
  336. altivec, i32)
  337. SIMDE_X86_GENERATE_CONVERSION_FUNCTION(
  338. m128d, SIMDE_POWER_ALTIVEC_VECTOR(unsigned char), altivec, u8)
  339. SIMDE_X86_GENERATE_CONVERSION_FUNCTION(
  340. m128d, SIMDE_POWER_ALTIVEC_VECTOR(unsigned short), altivec, u16)
  341. SIMDE_X86_GENERATE_CONVERSION_FUNCTION(m128d,
  342. SIMDE_POWER_ALTIVEC_VECTOR(unsigned int),
  343. altivec, u32)
  344. #if defined(SIMDE_POWER_ALTIVEC_P7_NATIVE)
  345. SIMDE_X86_GENERATE_CONVERSION_FUNCTION(
  346. m128d, SIMDE_POWER_ALTIVEC_VECTOR(unsigned long long), altivec, u64)
  347. SIMDE_X86_GENERATE_CONVERSION_FUNCTION(
  348. m128d, SIMDE_POWER_ALTIVEC_VECTOR(signed long long), altivec, i64)
  349. #if defined(SIMDE_BUG_GCC_95782)
  350. SIMDE_FUNCTION_ATTRIBUTES
  351. SIMDE_POWER_ALTIVEC_VECTOR(double)
  352. simde__m128d_to_altivec_f64(simde__m128d value)
  353. {
  354. simde__m128d_private r_ = simde__m128d_to_private(value);
  355. return r_.altivec_f64;
  356. }
  357. SIMDE_FUNCTION_ATTRIBUTES
  358. simde__m128d simde__m128d_from_altivec_f64(SIMDE_POWER_ALTIVEC_VECTOR(double)
  359. value)
  360. {
  361. simde__m128d_private r_;
  362. r_.altivec_f64 = value;
  363. return simde__m128d_from_private(r_);
  364. }
  365. #else
  366. SIMDE_X86_GENERATE_CONVERSION_FUNCTION(m128d,
  367. SIMDE_POWER_ALTIVEC_VECTOR(double),
  368. altivec, f64)
  369. #endif
  370. #endif
  371. #elif defined(SIMDE_WASM_SIMD128_NATIVE)
  372. SIMDE_X86_GENERATE_CONVERSION_FUNCTION(m128d, v128_t, wasm, v128);
  373. SIMDE_X86_GENERATE_CONVERSION_FUNCTION(m128i, v128_t, wasm, v128);
  374. #endif /* defined(SIMDE_ARM_NEON_A32V7_NATIVE) */
  375. SIMDE_FUNCTION_ATTRIBUTES
  376. simde__m128d simde_mm_set_pd(simde_float64 e1, simde_float64 e0)
  377. {
  378. #if defined(SIMDE_X86_SSE2_NATIVE)
  379. return _mm_set_pd(e1, e0);
  380. #else
  381. simde__m128d_private r_;
  382. #if defined(SIMDE_WASM_SIMD128_NATIVE)
  383. r_.wasm_v128 = wasm_f64x2_make(e0, e1);
  384. #elif defined(SIMDE_ARM_NEON_A64V8_NATIVE)
  385. SIMDE_ALIGN_TO_16 simde_float64 data[2] = {e0, e1};
  386. r_.neon_f64 = vld1q_f64(data);
  387. #else
  388. r_.f64[0] = e0;
  389. r_.f64[1] = e1;
  390. #endif
  391. return simde__m128d_from_private(r_);
  392. #endif
  393. }
  394. #if defined(SIMDE_X86_SSE2_ENABLE_NATIVE_ALIASES)
  395. #define _mm_set_pd(e1, e0) simde_mm_set_pd(e1, e0)
  396. #endif
  397. SIMDE_FUNCTION_ATTRIBUTES
  398. simde__m128d simde_mm_set1_pd(simde_float64 a)
  399. {
  400. #if defined(SIMDE_X86_SSE2_NATIVE)
  401. return _mm_set1_pd(a);
  402. #else
  403. simde__m128d_private r_;
  404. #if defined(SIMDE_WASM_SIMD128_NATIVE)
  405. r_.wasm_v128 = wasm_f64x2_splat(a);
  406. #elif defined(SIMDE_ARM_NEON_A64V8_NATIVE)
  407. r_.neon_f64 = vdupq_n_f64(a);
  408. #elif defined(SIMDE_POWER_ALTIVEC_P8_NATIVE)
  409. r_.altivec_f64 = vec_splats(HEDLEY_STATIC_CAST(double, a));
  410. #else
  411. SIMDE_VECTORIZE
  412. for (size_t i = 0; i < (sizeof(r_.i64) / sizeof(r_.i64[0])); i++) {
  413. r_.f64[i] = a;
  414. }
  415. #endif
  416. return simde__m128d_from_private(r_);
  417. #endif
  418. }
  419. #define simde_mm_set_pd1(a) simde_mm_set1_pd(a)
  420. #if defined(SIMDE_X86_SSE2_ENABLE_NATIVE_ALIASES)
  421. #define _mm_set1_pd(a) simde_mm_set1_pd(a)
  422. #define _mm_set_pd1(a) simde_mm_set1_pd(a)
  423. #endif
  424. SIMDE_FUNCTION_ATTRIBUTES
  425. simde__m128d simde_x_mm_abs_pd(simde__m128d a)
  426. {
  427. #if defined(SIMDE_X86_AVX512F_NATIVE) && \
  428. (!defined(HEDLEY_GCC_VERSION) || HEDLEY_GCC_VERSION_CHECK(7, 4, 0))
  429. return _mm512_castpd512_pd128(_mm512_abs_pd(_mm512_castpd128_pd512(a)));
  430. #else
  431. simde__m128d_private r_, a_ = simde__m128d_to_private(a);
  432. #if defined(SIMDE_ARM_NEON_A32V8_NATIVE)
  433. r_.neon_f32 = vabsq_f32(a_.neon_f32);
  434. #elif defined(SIMDE_POWER_ALTIVEC_P7_NATIVE)
  435. r_.altivec_f32 = vec_abs(a_.altivec_f32);
  436. #else
  437. SIMDE_VECTORIZE
  438. for (size_t i = 0; i < (sizeof(r_.f64) / sizeof(r_.f64[0])); i++) {
  439. r_.f64[i] = simde_math_fabs(a_.f64[i]);
  440. }
  441. #endif
  442. return simde__m128d_from_private(r_);
  443. #endif
  444. }
  445. SIMDE_FUNCTION_ATTRIBUTES
  446. simde__m128d simde_x_mm_not_pd(simde__m128d a)
  447. {
  448. #if defined(SIMDE_X86_AVX512VL_NATIVE)
  449. __m128i ai = _mm_castpd_si128(a);
  450. return _mm_castsi128_pd(_mm_ternarylogic_epi64(ai, ai, ai, 0x55));
  451. #else
  452. simde__m128d_private r_, a_ = simde__m128d_to_private(a);
  453. #if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
  454. r_.neon_i32 = vmvnq_s32(a_.neon_i32);
  455. #elif defined(SIMDE_POWER_ALTIVEC_P8_NATIVE)
  456. r_.altivec_f64 = vec_nor(a_.altivec_f64, a_.altivec_f64);
  457. #elif defined(SIMDE_POWER_ALTIVEC_P6_NATIVE)
  458. r_.altivec_i32 = vec_nor(a_.altivec_i32, a_.altivec_i32);
  459. #elif defined(SIMDE_WASM_SIMD128_NATIVE)
  460. r_.wasm_v128 = wasm_v128_not(a_.wasm_v128);
  461. #elif defined(SIMDE_VECTOR_SUBSCRIPT_OPS)
  462. r_.i32f = ~a_.i32f;
  463. #else
  464. SIMDE_VECTORIZE
  465. for (size_t i = 0; i < (sizeof(r_.i32f) / sizeof(r_.i32f[0])); i++) {
  466. r_.i32f[i] = ~(a_.i32f[i]);
  467. }
  468. #endif
  469. return simde__m128d_from_private(r_);
  470. #endif
  471. }
  472. SIMDE_FUNCTION_ATTRIBUTES
  473. simde__m128d simde_x_mm_select_pd(simde__m128d a, simde__m128d b,
  474. simde__m128d mask)
  475. {
  476. /* This function is for when you want to blend two elements together
  477. * according to a mask. It is similar to _mm_blendv_pd, except that
  478. * it is undefined whether the blend is based on the highest bit in
  479. * each lane (like blendv) or just bitwise operations. This allows
  480. * us to implement the function efficiently everywhere.
  481. *
  482. * Basically, you promise that all the lanes in mask are either 0 or
  483. * ~0. */
  484. #if defined(SIMDE_X86_SSE4_1_NATIVE)
  485. return _mm_blendv_pd(a, b, mask);
  486. #else
  487. simde__m128d_private r_, a_ = simde__m128d_to_private(a),
  488. b_ = simde__m128d_to_private(b),
  489. mask_ = simde__m128d_to_private(mask);
  490. #if defined(SIMDE_VECTOR_SUBSCRIPT_OPS)
  491. r_.i64 = a_.i64 ^ ((a_.i64 ^ b_.i64) & mask_.i64);
  492. #elif defined(SIMDE_ARM_NEON_A32V7_NATIVE)
  493. r_.neon_i64 = vbslq_s64(mask_.neon_u64, b_.neon_i64, a_.neon_i64);
  494. #else
  495. SIMDE_VECTORIZE
  496. for (size_t i = 0; i < (sizeof(r_.i64) / sizeof(r_.i64[0])); i++) {
  497. r_.i64[i] = a_.i64[i] ^
  498. ((a_.i64[i] ^ b_.i64[i]) & mask_.i64[i]);
  499. }
  500. #endif
  501. return simde__m128d_from_private(r_);
  502. #endif
  503. }
  504. SIMDE_FUNCTION_ATTRIBUTES
  505. simde__m128i simde_mm_add_epi8(simde__m128i a, simde__m128i b)
  506. {
  507. #if defined(SIMDE_X86_SSE2_NATIVE)
  508. return _mm_add_epi8(a, b);
  509. #else
  510. simde__m128i_private r_, a_ = simde__m128i_to_private(a),
  511. b_ = simde__m128i_to_private(b);
  512. #if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
  513. r_.neon_i8 = vaddq_s8(a_.neon_i8, b_.neon_i8);
  514. #elif defined(SIMDE_POWER_ALTIVEC_P6_NATIVE)
  515. r_.altivec_i8 = vec_add(a_.altivec_i8, b_.altivec_i8);
  516. #elif defined(SIMDE_WASM_SIMD128_NATIVE)
  517. r_.wasm_v128 = wasm_i8x16_add(a_.wasm_v128, b_.wasm_v128);
  518. #elif defined(SIMDE_VECTOR_SUBSCRIPT_OPS)
  519. r_.i8 = a_.i8 + b_.i8;
  520. #else
  521. SIMDE_VECTORIZE
  522. for (size_t i = 0; i < (sizeof(r_.i8) / sizeof(r_.i8[0])); i++) {
  523. r_.i8[i] = a_.i8[i] + b_.i8[i];
  524. }
  525. #endif
  526. return simde__m128i_from_private(r_);
  527. #endif
  528. }
  529. #if defined(SIMDE_X86_SSE2_ENABLE_NATIVE_ALIASES)
  530. #define _mm_add_epi8(a, b) simde_mm_add_epi8(a, b)
  531. #endif
  532. SIMDE_FUNCTION_ATTRIBUTES
  533. simde__m128i simde_mm_add_epi16(simde__m128i a, simde__m128i b)
  534. {
  535. #if defined(SIMDE_X86_SSE2_NATIVE)
  536. return _mm_add_epi16(a, b);
  537. #else
  538. simde__m128i_private r_, a_ = simde__m128i_to_private(a),
  539. b_ = simde__m128i_to_private(b);
  540. #if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
  541. r_.neon_i16 = vaddq_s16(a_.neon_i16, b_.neon_i16);
  542. #elif defined(SIMDE_POWER_ALTIVEC_P6_NATIVE)
  543. r_.altivec_i16 = vec_add(a_.altivec_i16, b_.altivec_i16);
  544. #elif defined(SIMDE_WASM_SIMD128_NATIVE)
  545. r_.wasm_v128 = wasm_i16x8_add(a_.wasm_v128, b_.wasm_v128);
  546. #elif defined(SIMDE_VECTOR_SUBSCRIPT_OPS)
  547. r_.i16 = a_.i16 + b_.i16;
  548. #else
  549. SIMDE_VECTORIZE
  550. for (size_t i = 0; i < (sizeof(r_.i16) / sizeof(r_.i16[0])); i++) {
  551. r_.i16[i] = a_.i16[i] + b_.i16[i];
  552. }
  553. #endif
  554. return simde__m128i_from_private(r_);
  555. #endif
  556. }
  557. #if defined(SIMDE_X86_SSE2_ENABLE_NATIVE_ALIASES)
  558. #define _mm_add_epi16(a, b) simde_mm_add_epi16(a, b)
  559. #endif
  560. SIMDE_FUNCTION_ATTRIBUTES
  561. simde__m128i simde_mm_add_epi32(simde__m128i a, simde__m128i b)
  562. {
  563. #if defined(SIMDE_X86_SSE2_NATIVE)
  564. return _mm_add_epi32(a, b);
  565. #else
  566. simde__m128i_private r_, a_ = simde__m128i_to_private(a),
  567. b_ = simde__m128i_to_private(b);
  568. #if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
  569. r_.neon_i32 = vaddq_s32(a_.neon_i32, b_.neon_i32);
  570. #elif defined(SIMDE_POWER_ALTIVEC_P6_NATIVE)
  571. r_.altivec_i32 = vec_add(a_.altivec_i32, b_.altivec_i32);
  572. #elif defined(SIMDE_WASM_SIMD128_NATIVE)
  573. r_.wasm_v128 = wasm_i32x4_add(a_.wasm_v128, b_.wasm_v128);
  574. #elif defined(SIMDE_VECTOR_SUBSCRIPT_OPS)
  575. r_.i32 = a_.i32 + b_.i32;
  576. #else
  577. SIMDE_VECTORIZE
  578. for (size_t i = 0; i < (sizeof(r_.i32) / sizeof(r_.i32[0])); i++) {
  579. r_.i32[i] = a_.i32[i] + b_.i32[i];
  580. }
  581. #endif
  582. return simde__m128i_from_private(r_);
  583. #endif
  584. }
  585. #if defined(SIMDE_X86_SSE2_ENABLE_NATIVE_ALIASES)
  586. #define _mm_add_epi32(a, b) simde_mm_add_epi32(a, b)
  587. #endif
  588. SIMDE_FUNCTION_ATTRIBUTES
  589. simde__m128i simde_mm_add_epi64(simde__m128i a, simde__m128i b)
  590. {
  591. #if defined(SIMDE_X86_SSE2_NATIVE)
  592. return _mm_add_epi64(a, b);
  593. #else
  594. simde__m128i_private r_, a_ = simde__m128i_to_private(a),
  595. b_ = simde__m128i_to_private(b);
  596. #if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
  597. r_.neon_i64 = vaddq_s64(a_.neon_i64, b_.neon_i64);
  598. #elif defined(SIMDE_POWER_ALTIVEC_P8_NATIVE)
  599. r_.altivec_i64 = vec_add(a_.altivec_i64, b_.altivec_i64);
  600. #elif defined(SIMDE_WASM_SIMD128_NATIVE)
  601. r_.wasm_v128 = wasm_i64x2_add(a_.wasm_v128, b_.wasm_v128);
  602. #elif defined(SIMDE_VECTOR_SUBSCRIPT_OPS)
  603. r_.i64 = a_.i64 + b_.i64;
  604. #else
  605. SIMDE_VECTORIZE
  606. for (size_t i = 0; i < (sizeof(r_.i64) / sizeof(r_.i64[0])); i++) {
  607. r_.i64[i] = a_.i64[i] + b_.i64[i];
  608. }
  609. #endif
  610. return simde__m128i_from_private(r_);
  611. #endif
  612. }
  613. #if defined(SIMDE_X86_SSE2_ENABLE_NATIVE_ALIASES)
  614. #define _mm_add_epi64(a, b) simde_mm_add_epi64(a, b)
  615. #endif
  616. SIMDE_FUNCTION_ATTRIBUTES
  617. simde__m128d simde_mm_add_pd(simde__m128d a, simde__m128d b)
  618. {
  619. #if defined(SIMDE_X86_SSE2_NATIVE)
  620. return _mm_add_pd(a, b);
  621. #else
  622. simde__m128d_private r_, a_ = simde__m128d_to_private(a),
  623. b_ = simde__m128d_to_private(b);
  624. #if defined(SIMDE_ARM_NEON_A64V8_NATIVE)
  625. r_.neon_f64 = vaddq_f64(a_.neon_f64, b_.neon_f64);
  626. #elif defined(SIMDE_WASM_SIMD128_NATIVE)
  627. r_.wasm_v128 = wasm_f64x2_add(a_.wasm_v128, b_.wasm_v128);
  628. #elif defined(SIMDE_POWER_ALTIVEC_P7_NATIVE)
  629. r_.altivec_f64 = vec_add(a_.altivec_f64, b_.altivec_f64);
  630. #elif defined(SIMDE_WASM_SIMD128_NATIVE)
  631. r_.wasm_v128 = wasm_f64x2_add(a_.wasm_v128, b_.wasm_v128);
  632. #elif defined(SIMDE_VECTOR_SUBSCRIPT_OPS)
  633. r_.f64 = a_.f64 + b_.f64;
  634. #else
  635. SIMDE_VECTORIZE
  636. for (size_t i = 0; i < (sizeof(r_.f64) / sizeof(r_.f64[0])); i++) {
  637. r_.f64[i] = a_.f64[i] + b_.f64[i];
  638. }
  639. #endif
  640. return simde__m128d_from_private(r_);
  641. #endif
  642. }
  643. #if defined(SIMDE_X86_SSE2_ENABLE_NATIVE_ALIASES)
  644. #define _mm_add_pd(a, b) simde_mm_add_pd(a, b)
  645. #endif
  646. SIMDE_FUNCTION_ATTRIBUTES
  647. simde__m128d simde_mm_move_sd(simde__m128d a, simde__m128d b)
  648. {
  649. #if defined(SIMDE_X86_SSE2_NATIVE)
  650. return _mm_move_sd(a, b);
  651. #else
  652. simde__m128d_private r_, a_ = simde__m128d_to_private(a),
  653. b_ = simde__m128d_to_private(b);
  654. #if defined(SIMDE_ARM_NEON_A64V8_NATIVE)
  655. r_.neon_f64 =
  656. vsetq_lane_f64(vgetq_lane_f64(b_.neon_f64, 0), a_.neon_f64, 0);
  657. #elif defined(SIMDE_POWER_ALTIVEC_P7_NATIVE)
  658. #if defined(HEDLEY_IBM_VERSION)
  659. r_.altivec_f64 = vec_xxpermdi(a_.altivec_f64, b_.altivec_f64, 1);
  660. #else
  661. r_.altivec_f64 = vec_xxpermdi(b_.altivec_f64, a_.altivec_f64, 1);
  662. #endif
  663. #elif defined(SIMDE_WASM_SIMD128_NATIVE)
  664. r_.wasm_v128 = wasm_v64x2_shuffle(a_.wasm_v128, b_.wasm_v128, 2, 1);
  665. #elif defined(SIMDE_SHUFFLE_VECTOR_)
  666. r_.f64 = SIMDE_SHUFFLE_VECTOR_(64, 16, a_.f64, b_.f64, 2, 1);
  667. #else
  668. r_.f64[0] = b_.f64[0];
  669. r_.f64[1] = a_.f64[1];
  670. #endif
  671. return simde__m128d_from_private(r_);
  672. #endif
  673. }
  674. #if defined(SIMDE_X86_SSE2_ENABLE_NATIVE_ALIASES)
  675. #define _mm_move_sd(a, b) simde_mm_move_sd(a, b)
  676. #endif
  677. SIMDE_FUNCTION_ATTRIBUTES
  678. simde__m128d simde_mm_add_sd(simde__m128d a, simde__m128d b)
  679. {
  680. #if defined(SIMDE_X86_SSE2_NATIVE)
  681. return _mm_add_sd(a, b);
  682. #elif (SIMDE_NATURAL_VECTOR_SIZE > 0)
  683. return simde_mm_move_sd(a, simde_mm_add_pd(a, b));
  684. #else
  685. simde__m128d_private r_, a_ = simde__m128d_to_private(a),
  686. b_ = simde__m128d_to_private(b);
  687. r_.f64[0] = a_.f64[0] + b_.f64[0];
  688. r_.f64[1] = a_.f64[1];
  689. return simde__m128d_from_private(r_);
  690. #endif
  691. }
  692. #if defined(SIMDE_X86_SSE2_ENABLE_NATIVE_ALIASES)
  693. #define _mm_add_sd(a, b) simde_mm_add_sd(a, b)
  694. #endif
  695. SIMDE_FUNCTION_ATTRIBUTES
  696. simde__m64 simde_mm_add_si64(simde__m64 a, simde__m64 b)
  697. {
  698. #if defined(SIMDE_X86_SSE2_NATIVE) && defined(SIMDE_X86_MMX_NATIVE)
  699. return _mm_add_si64(a, b);
  700. #else
  701. simde__m64_private r_, a_ = simde__m64_to_private(a),
  702. b_ = simde__m64_to_private(b);
  703. #if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
  704. r_.neon_i64 = vadd_s64(a_.neon_i64, b_.neon_i64);
  705. #else
  706. r_.i64[0] = a_.i64[0] + b_.i64[0];
  707. #endif
  708. return simde__m64_from_private(r_);
  709. #endif
  710. }
  711. #if defined(SIMDE_X86_SSE2_ENABLE_NATIVE_ALIASES)
  712. #define _mm_add_si64(a, b) simde_mm_add_si64(a, b)
  713. #endif
  714. SIMDE_FUNCTION_ATTRIBUTES
  715. simde__m128i simde_mm_adds_epi8(simde__m128i a, simde__m128i b)
  716. {
  717. #if defined(SIMDE_X86_SSE2_NATIVE)
  718. return _mm_adds_epi8(a, b);
  719. #else
  720. simde__m128i_private r_, a_ = simde__m128i_to_private(a),
  721. b_ = simde__m128i_to_private(b);
  722. #if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
  723. r_.neon_i8 = vqaddq_s8(a_.neon_i8, b_.neon_i8);
  724. #elif defined(SIMDE_WASM_SIMD128_NATIVE)
  725. r_.wasm_v128 = wasm_i8x16_add_saturate(a_.wasm_v128, b_.wasm_v128);
  726. #elif defined(SIMDE_POWER_ALTIVEC_P6_NATIVE)
  727. r_.altivec_i8 = vec_adds(a_.altivec_i8, b_.altivec_i8);
  728. #else
  729. SIMDE_VECTORIZE
  730. for (size_t i = 0; i < (sizeof(r_.i8) / sizeof(r_.i8[0])); i++) {
  731. const int_fast16_t tmp =
  732. HEDLEY_STATIC_CAST(int_fast16_t, a_.i8[i]) +
  733. HEDLEY_STATIC_CAST(int_fast16_t, b_.i8[i]);
  734. r_.i8[i] = HEDLEY_STATIC_CAST(
  735. int8_t,
  736. ((tmp < INT8_MAX) ? ((tmp > INT8_MIN) ? tmp : INT8_MIN)
  737. : INT8_MAX));
  738. }
  739. #endif
  740. return simde__m128i_from_private(r_);
  741. #endif
  742. }
  743. #if defined(SIMDE_X86_SSE2_ENABLE_NATIVE_ALIASES)
  744. #define _mm_adds_epi8(a, b) simde_mm_adds_epi8(a, b)
  745. #endif
  746. SIMDE_FUNCTION_ATTRIBUTES
  747. simde__m128i simde_mm_adds_epi16(simde__m128i a, simde__m128i b)
  748. {
  749. #if defined(SIMDE_X86_SSE2_NATIVE)
  750. return _mm_adds_epi16(a, b);
  751. #else
  752. simde__m128i_private r_, a_ = simde__m128i_to_private(a),
  753. b_ = simde__m128i_to_private(b);
  754. #if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
  755. r_.neon_i16 = vqaddq_s16(a_.neon_i16, b_.neon_i16);
  756. #elif defined(SIMDE_WASM_SIMD128_NATIVE)
  757. r_.wasm_v128 = wasm_i16x8_add_saturate(a_.wasm_v128, b_.wasm_v128);
  758. #elif defined(SIMDE_POWER_ALTIVEC_P6_NATIVE)
  759. r_.altivec_i16 = vec_adds(a_.altivec_i16, b_.altivec_i16);
  760. #else
  761. SIMDE_VECTORIZE
  762. for (size_t i = 0; i < (sizeof(r_.i16) / sizeof(r_.i16[0])); i++) {
  763. const int_fast32_t tmp =
  764. HEDLEY_STATIC_CAST(int_fast32_t, a_.i16[i]) +
  765. HEDLEY_STATIC_CAST(int_fast32_t, b_.i16[i]);
  766. r_.i16[i] = HEDLEY_STATIC_CAST(
  767. int16_t,
  768. ((tmp < INT16_MAX)
  769. ? ((tmp > INT16_MIN) ? tmp : INT16_MIN)
  770. : INT16_MAX));
  771. }
  772. #endif
  773. return simde__m128i_from_private(r_);
  774. #endif
  775. }
  776. #if defined(SIMDE_X86_SSE2_ENABLE_NATIVE_ALIASES)
  777. #define _mm_adds_epi16(a, b) simde_mm_adds_epi16(a, b)
  778. #endif
  779. SIMDE_FUNCTION_ATTRIBUTES
  780. simde__m128i simde_mm_adds_epu8(simde__m128i a, simde__m128i b)
  781. {
  782. #if defined(SIMDE_X86_SSE2_NATIVE)
  783. return _mm_adds_epu8(a, b);
  784. #else
  785. simde__m128i_private r_, a_ = simde__m128i_to_private(a),
  786. b_ = simde__m128i_to_private(b);
  787. #if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
  788. r_.neon_u8 = vqaddq_u8(a_.neon_u8, b_.neon_u8);
  789. #elif defined(SIMDE_WASM_SIMD128_NATIVE)
  790. r_.wasm_v128 = wasm_u8x16_add_saturate(a_.wasm_v128, b_.wasm_v128);
  791. #elif defined(SIMDE_POWER_ALTIVEC_P7_NATIVE)
  792. r_.altivec_u8 = vec_adds(a_.altivec_u8, b_.altivec_u8);
  793. #else
  794. SIMDE_VECTORIZE
  795. for (size_t i = 0; i < (sizeof(r_.u8) / sizeof(r_.u8[0])); i++) {
  796. r_.u8[i] = ((UINT8_MAX - a_.u8[i]) > b_.u8[i])
  797. ? (a_.u8[i] + b_.u8[i])
  798. : UINT8_MAX;
  799. }
  800. #endif
  801. return simde__m128i_from_private(r_);
  802. #endif
  803. }
  804. #if defined(SIMDE_X86_SSE2_ENABLE_NATIVE_ALIASES)
  805. #define _mm_adds_epu8(a, b) simde_mm_adds_epu8(a, b)
  806. #endif
  807. SIMDE_FUNCTION_ATTRIBUTES
  808. simde__m128i simde_mm_adds_epu16(simde__m128i a, simde__m128i b)
  809. {
  810. #if defined(SIMDE_X86_SSE2_NATIVE)
  811. return _mm_adds_epu16(a, b);
  812. #else
  813. simde__m128i_private r_, a_ = simde__m128i_to_private(a),
  814. b_ = simde__m128i_to_private(b);
  815. #if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
  816. r_.neon_u16 = vqaddq_u16(a_.neon_u16, b_.neon_u16);
  817. #elif defined(SIMDE_WASM_SIMD128_NATIVE)
  818. r_.wasm_v128 = wasm_u16x8_add_saturate(a_.wasm_v128, b_.wasm_v128);
  819. #elif defined(SIMDE_POWER_ALTIVEC_P6_NATIVE)
  820. r_.altivec_u16 = vec_adds(a_.altivec_u16, b_.altivec_u16);
  821. #else
  822. SIMDE_VECTORIZE
  823. for (size_t i = 0; i < (sizeof(r_.u16) / sizeof(r_.u16[0])); i++) {
  824. r_.u16[i] = ((UINT16_MAX - a_.u16[i]) > b_.u16[i])
  825. ? (a_.u16[i] + b_.u16[i])
  826. : UINT16_MAX;
  827. }
  828. #endif
  829. return simde__m128i_from_private(r_);
  830. #endif
  831. }
  832. #if defined(SIMDE_X86_SSE2_ENABLE_NATIVE_ALIASES)
  833. #define _mm_adds_epu16(a, b) simde_mm_adds_epu16(a, b)
  834. #endif
  835. SIMDE_FUNCTION_ATTRIBUTES
  836. simde__m128d simde_mm_and_pd(simde__m128d a, simde__m128d b)
  837. {
  838. #if defined(SIMDE_X86_SSE2_NATIVE)
  839. return _mm_and_pd(a, b);
  840. #else
  841. simde__m128d_private r_, a_ = simde__m128d_to_private(a),
  842. b_ = simde__m128d_to_private(b);
  843. #if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
  844. r_.neon_i32 = vandq_s32(a_.neon_i32, b_.neon_i32);
  845. #elif defined(SIMDE_WASM_SIMD128_NATIVE)
  846. r_.wasm_v128 = wasm_v128_and(a_.wasm_v128, b_.wasm_v128);
  847. #elif defined(SIMDE_POWER_ALTIVEC_P7_NATIVE)
  848. r_.altivec_f64 = vec_and(a_.altivec_f64, b_.altivec_f64);
  849. #elif defined(SIMDE_VECTOR_SUBSCRIPT_OPS)
  850. r_.i32f = a_.i32f & b_.i32f;
  851. #else
  852. SIMDE_VECTORIZE
  853. for (size_t i = 0; i < (sizeof(r_.i32f) / sizeof(r_.i32f[0])); i++) {
  854. r_.i32f[i] = a_.i32f[i] & b_.i32f[i];
  855. }
  856. #endif
  857. return simde__m128d_from_private(r_);
  858. #endif
  859. }
  860. #if defined(SIMDE_X86_SSE2_ENABLE_NATIVE_ALIASES)
  861. #define _mm_and_pd(a, b) simde_mm_and_pd(a, b)
  862. #endif
  863. SIMDE_FUNCTION_ATTRIBUTES
  864. simde__m128i simde_mm_and_si128(simde__m128i a, simde__m128i b)
  865. {
  866. #if defined(SIMDE_X86_SSE2_NATIVE)
  867. return _mm_and_si128(a, b);
  868. #else
  869. simde__m128i_private r_, a_ = simde__m128i_to_private(a),
  870. b_ = simde__m128i_to_private(b);
  871. #if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
  872. r_.neon_i32 = vandq_s32(b_.neon_i32, a_.neon_i32);
  873. #elif defined(SIMDE_POWER_ALTIVEC_P6_NATIVE)
  874. r_.altivec_u32f = vec_and(a_.altivec_u32f, b_.altivec_u32f);
  875. #elif defined(SIMDE_VECTOR_SUBSCRIPT_OPS)
  876. r_.i32f = a_.i32f & b_.i32f;
  877. #else
  878. SIMDE_VECTORIZE
  879. for (size_t i = 0; i < (sizeof(r_.i32f) / sizeof(r_.i32f[0])); i++) {
  880. r_.i32f[i] = a_.i32f[i] & b_.i32f[i];
  881. }
  882. #endif
  883. return simde__m128i_from_private(r_);
  884. #endif
  885. }
  886. #if defined(SIMDE_X86_SSE2_ENABLE_NATIVE_ALIASES)
  887. #define _mm_and_si128(a, b) simde_mm_and_si128(a, b)
  888. #endif
  889. SIMDE_FUNCTION_ATTRIBUTES
  890. simde__m128d simde_mm_andnot_pd(simde__m128d a, simde__m128d b)
  891. {
  892. #if defined(SIMDE_X86_SSE2_NATIVE)
  893. return _mm_andnot_pd(a, b);
  894. #else
  895. simde__m128d_private r_, a_ = simde__m128d_to_private(a),
  896. b_ = simde__m128d_to_private(b);
  897. #if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
  898. r_.neon_i32 = vbicq_s32(b_.neon_i32, a_.neon_i32);
  899. #elif defined(SIMDE_WASM_SIMD128_NATIVE)
  900. r_.wasm_v128 = wasm_v128_andnot(b_.wasm_v128, a_.wasm_v128);
  901. #elif defined(SIMDE_POWER_ALTIVEC_P7_NATIVE)
  902. r_.altivec_f64 = vec_andc(b_.altivec_f64, a_.altivec_f64);
  903. #elif defined(SIMDE_POWER_ALTIVEC_P6_NATIVE)
  904. r_.altivec_i32f = vec_andc(b_.altivec_i32f, a_.altivec_i32f);
  905. #elif defined(SIMDE_VECTOR_SUBSCRIPT_OPS)
  906. r_.i32f = ~a_.i32f & b_.i32f;
  907. #else
  908. SIMDE_VECTORIZE
  909. for (size_t i = 0; i < (sizeof(r_.u64) / sizeof(r_.u64[0])); i++) {
  910. r_.u64[i] = ~a_.u64[i] & b_.u64[i];
  911. }
  912. #endif
  913. return simde__m128d_from_private(r_);
  914. #endif
  915. }
  916. #if defined(SIMDE_X86_SSE2_ENABLE_NATIVE_ALIASES)
  917. #define _mm_andnot_pd(a, b) simde_mm_andnot_pd(a, b)
  918. #endif
  919. SIMDE_FUNCTION_ATTRIBUTES
  920. simde__m128i simde_mm_andnot_si128(simde__m128i a, simde__m128i b)
  921. {
  922. #if defined(SIMDE_X86_SSE2_NATIVE)
  923. return _mm_andnot_si128(a, b);
  924. #else
  925. simde__m128i_private r_, a_ = simde__m128i_to_private(a),
  926. b_ = simde__m128i_to_private(b);
  927. #if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
  928. r_.neon_i32 = vbicq_s32(b_.neon_i32, a_.neon_i32);
  929. #elif defined(SIMDE_POWER_ALTIVEC_P6_NATIVE)
  930. r_.altivec_i32 = vec_andc(b_.altivec_i32, a_.altivec_i32);
  931. #elif defined(SIMDE_VECTOR_SUBSCRIPT_OPS)
  932. r_.i32f = ~a_.i32f & b_.i32f;
  933. #else
  934. SIMDE_VECTORIZE
  935. for (size_t i = 0; i < (sizeof(r_.i32f) / sizeof(r_.i32f[0])); i++) {
  936. r_.i32f[i] = ~(a_.i32f[i]) & b_.i32f[i];
  937. }
  938. #endif
  939. return simde__m128i_from_private(r_);
  940. #endif
  941. }
  942. #if defined(SIMDE_X86_SSE2_ENABLE_NATIVE_ALIASES)
  943. #define _mm_andnot_si128(a, b) simde_mm_andnot_si128(a, b)
  944. #endif
  945. SIMDE_FUNCTION_ATTRIBUTES
  946. simde__m128d simde_mm_xor_pd(simde__m128d a, simde__m128d b)
  947. {
  948. #if defined(SIMDE_X86_SSE2_NATIVE)
  949. return _mm_xor_pd(a, b);
  950. #else
  951. simde__m128d_private r_, a_ = simde__m128d_to_private(a),
  952. b_ = simde__m128d_to_private(b);
  953. #if defined(SIMDE_VECTOR_SUBSCRIPT_OPS)
  954. r_.i32f = a_.i32f ^ b_.i32f;
  955. #elif defined(SIMDE_WASM_SIMD128_NATIVE)
  956. r_.wasm_v128 = wasm_v128_xor(a_.wasm_v128, b_.wasm_v128);
  957. #elif defined(SIMDE_ARM_NEON_A32V7_NATIVE)
  958. r_.neon_i64 = veorq_s64(a_.neon_i64, b_.neon_i64);
  959. #else
  960. SIMDE_VECTORIZE
  961. for (size_t i = 0; i < (sizeof(r_.i32f) / sizeof(r_.i32f[0])); i++) {
  962. r_.i32f[i] = a_.i32f[i] ^ b_.i32f[i];
  963. }
  964. #endif
  965. return simde__m128d_from_private(r_);
  966. #endif
  967. }
  968. #if defined(SIMDE_X86_SSE2_ENABLE_NATIVE_ALIASES)
  969. #define _mm_xor_pd(a, b) simde_mm_xor_pd(a, b)
  970. #endif
  971. SIMDE_FUNCTION_ATTRIBUTES
  972. simde__m128i simde_mm_avg_epu8(simde__m128i a, simde__m128i b)
  973. {
  974. #if defined(SIMDE_X86_SSE2_NATIVE)
  975. return _mm_avg_epu8(a, b);
  976. #else
  977. simde__m128i_private r_, a_ = simde__m128i_to_private(a),
  978. b_ = simde__m128i_to_private(b);
  979. #if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
  980. r_.neon_u8 = vrhaddq_u8(b_.neon_u8, a_.neon_u8);
  981. #elif defined(SIMDE_WASM_SIMD128_NATIVE)
  982. r_.wasm_v128 = wasm_u8x16_avgr(a_.wasm_v128, b_.wasm_v128);
  983. #elif defined(SIMDE_POWER_ALTIVEC_P6_NATIVE)
  984. r_.altivec_u8 = vec_avg(a_.altivec_u8, b_.altivec_u8);
  985. #elif defined(SIMDE_VECTOR_SUBSCRIPT_OPS) && \
  986. defined(SIMDE_VECTOR_SUBSCRIPT_SCALAR) && \
  987. defined(SIMDE_CONVERT_VECTOR_)
  988. uint16_t wa SIMDE_VECTOR(32);
  989. uint16_t wb SIMDE_VECTOR(32);
  990. uint16_t wr SIMDE_VECTOR(32);
  991. SIMDE_CONVERT_VECTOR_(wa, a_.u8);
  992. SIMDE_CONVERT_VECTOR_(wb, b_.u8);
  993. wr = (wa + wb + 1) >> 1;
  994. SIMDE_CONVERT_VECTOR_(r_.u8, wr);
  995. #else
  996. SIMDE_VECTORIZE
  997. for (size_t i = 0; i < (sizeof(r_.u8) / sizeof(r_.u8[0])); i++) {
  998. r_.u8[i] = (a_.u8[i] + b_.u8[i] + 1) >> 1;
  999. }
  1000. #endif
  1001. return simde__m128i_from_private(r_);
  1002. #endif
  1003. }
  1004. #if defined(SIMDE_X86_SSE2_ENABLE_NATIVE_ALIASES)
  1005. #define _mm_avg_epu8(a, b) simde_mm_avg_epu8(a, b)
  1006. #endif
  1007. SIMDE_FUNCTION_ATTRIBUTES
  1008. simde__m128i simde_mm_avg_epu16(simde__m128i a, simde__m128i b)
  1009. {
  1010. #if defined(SIMDE_X86_SSE2_NATIVE)
  1011. return _mm_avg_epu16(a, b);
  1012. #else
  1013. simde__m128i_private r_, a_ = simde__m128i_to_private(a),
  1014. b_ = simde__m128i_to_private(b);
  1015. #if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
  1016. r_.neon_u16 = vrhaddq_u16(b_.neon_u16, a_.neon_u16);
  1017. #elif defined(SIMDE_WASM_SIMD128_NATIVE)
  1018. r_.wasm_v128 = wasm_u16x8_avgr(a_.wasm_v128, b_.wasm_v128);
  1019. #elif defined(SIMDE_POWER_ALTIVEC_P6_NATIVE)
  1020. r_.altivec_u16 = vec_avg(a_.altivec_u16, b_.altivec_u16);
  1021. #elif defined(SIMDE_VECTOR_SUBSCRIPT_OPS) && \
  1022. defined(SIMDE_VECTOR_SUBSCRIPT_SCALAR) && \
  1023. defined(SIMDE_CONVERT_VECTOR_)
  1024. uint32_t wa SIMDE_VECTOR(32);
  1025. uint32_t wb SIMDE_VECTOR(32);
  1026. uint32_t wr SIMDE_VECTOR(32);
  1027. SIMDE_CONVERT_VECTOR_(wa, a_.u16);
  1028. SIMDE_CONVERT_VECTOR_(wb, b_.u16);
  1029. wr = (wa + wb + 1) >> 1;
  1030. SIMDE_CONVERT_VECTOR_(r_.u16, wr);
  1031. #else
  1032. SIMDE_VECTORIZE
  1033. for (size_t i = 0; i < (sizeof(r_.u16) / sizeof(r_.u16[0])); i++) {
  1034. r_.u16[i] = (a_.u16[i] + b_.u16[i] + 1) >> 1;
  1035. }
  1036. #endif
  1037. return simde__m128i_from_private(r_);
  1038. #endif
  1039. }
  1040. #if defined(SIMDE_X86_SSE2_ENABLE_NATIVE_ALIASES)
  1041. #define _mm_avg_epu16(a, b) simde_mm_avg_epu16(a, b)
  1042. #endif
  1043. SIMDE_FUNCTION_ATTRIBUTES
  1044. simde__m128i simde_mm_setzero_si128(void)
  1045. {
  1046. #if defined(SIMDE_X86_SSE2_NATIVE)
  1047. return _mm_setzero_si128();
  1048. #else
  1049. simde__m128i_private r_;
  1050. #if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
  1051. r_.neon_i32 = vdupq_n_s32(0);
  1052. #elif defined(SIMDE_POWER_ALTIVEC_P6_NATIVE)
  1053. r_.altivec_i32 = vec_splats(HEDLEY_STATIC_CAST(signed int, 0));
  1054. #elif defined(SIMDE_WASM_SIMD128_NATIVE)
  1055. r_.wasm_v128 = wasm_i32x4_splat(INT32_C(0));
  1056. #elif defined(SIMDE_VECTOR_SUBSCRIPT)
  1057. r_.i32 = __extension__(__typeof__(r_.i32)){0, 0, 0, 0};
  1058. #else
  1059. SIMDE_VECTORIZE
  1060. for (size_t i = 0; i < (sizeof(r_.i32f) / sizeof(r_.i32f[0])); i++) {
  1061. r_.i32f[i] = 0;
  1062. }
  1063. #endif
  1064. return simde__m128i_from_private(r_);
  1065. #endif
  1066. }
  1067. #if defined(SIMDE_X86_SSE2_ENABLE_NATIVE_ALIASES)
  1068. #define _mm_setzero_si128() (simde_mm_setzero_si128())
  1069. #endif
  1070. SIMDE_FUNCTION_ATTRIBUTES
  1071. simde__m128i simde_mm_bslli_si128(simde__m128i a, const int imm8)
  1072. SIMDE_REQUIRE_CONSTANT_RANGE(imm8, 0, 255)
  1073. {
  1074. simde__m128i_private r_, a_ = simde__m128i_to_private(a);
  1075. if (HEDLEY_UNLIKELY((imm8 & ~15))) {
  1076. return simde_mm_setzero_si128();
  1077. }
  1078. #if defined(SIMDE_POWER_ALTIVEC_P6_NATIVE) && defined(SIMDE_ENDIAN_ORDER)
  1079. r_.altivec_i8 =
  1080. #if (SIMDE_ENDIAN_ORDER == SIMDE_ENDIAN_LITTLE)
  1081. vec_slo
  1082. #else /* SIMDE_ENDIAN_ORDER == SIMDE_ENDIAN_BIG */
  1083. vec_sro
  1084. #endif
  1085. (a_.altivec_i8,
  1086. vec_splats(HEDLEY_STATIC_CAST(unsigned char, imm8 * 8)));
  1087. #elif defined(SIMDE_HAVE_INT128_) && (SIMDE_ENDIAN_ORDER == SIMDE_ENDIAN_LITTLE)
  1088. r_.u128[0] = a_.u128[0] << (imm8 * 8);
  1089. #else
  1090. r_ = simde__m128i_to_private(simde_mm_setzero_si128());
  1091. for (int i = imm8;
  1092. i < HEDLEY_STATIC_CAST(int, sizeof(r_.i8) / sizeof(r_.i8[0]));
  1093. i++) {
  1094. r_.i8[i] = a_.i8[i - imm8];
  1095. }
  1096. #endif
  1097. return simde__m128i_from_private(r_);
  1098. }
  1099. #if defined(SIMDE_X86_SSE2_NATIVE) && !defined(__PGI)
  1100. #define simde_mm_bslli_si128(a, imm8) _mm_slli_si128(a, imm8)
  1101. #elif defined(SIMDE_ARM_NEON_A32V7_NATIVE) && !defined(__clang__)
  1102. #define simde_mm_bslli_si128(a, imm8) \
  1103. simde__m128i_from_neon_i8( \
  1104. ((imm8) <= 0) \
  1105. ? simde__m128i_to_neon_i8(a) \
  1106. : (((imm8) > 15) \
  1107. ? (vdupq_n_s8(0)) \
  1108. : (vextq_s8(vdupq_n_s8(0), \
  1109. simde__m128i_to_neon_i8(a), \
  1110. 16 - (imm8)))))
  1111. #elif defined(SIMDE_SHUFFLE_VECTOR_) && !defined(SIMDE_POWER_ALTIVEC_P6_NATIVE)
  1112. #define simde_mm_bslli_si128(a, imm8) \
  1113. (__extension__({ \
  1114. const simde__m128i_private simde__tmp_a_ = \
  1115. simde__m128i_to_private(a); \
  1116. const simde__m128i_private simde__tmp_z_ = \
  1117. simde__m128i_to_private(simde_mm_setzero_si128()); \
  1118. simde__m128i_private simde__tmp_r_; \
  1119. if (HEDLEY_UNLIKELY(imm8 > 15)) { \
  1120. simde__tmp_r_ = simde__m128i_to_private( \
  1121. simde_mm_setzero_si128()); \
  1122. } else { \
  1123. simde__tmp_r_.i8 = SIMDE_SHUFFLE_VECTOR_( \
  1124. 8, 16, simde__tmp_z_.i8, (simde__tmp_a_).i8, \
  1125. HEDLEY_STATIC_CAST(int8_t, (16 - imm8) & 31), \
  1126. HEDLEY_STATIC_CAST(int8_t, (17 - imm8) & 31), \
  1127. HEDLEY_STATIC_CAST(int8_t, (18 - imm8) & 31), \
  1128. HEDLEY_STATIC_CAST(int8_t, (19 - imm8) & 31), \
  1129. HEDLEY_STATIC_CAST(int8_t, (20 - imm8) & 31), \
  1130. HEDLEY_STATIC_CAST(int8_t, (21 - imm8) & 31), \
  1131. HEDLEY_STATIC_CAST(int8_t, (22 - imm8) & 31), \
  1132. HEDLEY_STATIC_CAST(int8_t, (23 - imm8) & 31), \
  1133. HEDLEY_STATIC_CAST(int8_t, (24 - imm8) & 31), \
  1134. HEDLEY_STATIC_CAST(int8_t, (25 - imm8) & 31), \
  1135. HEDLEY_STATIC_CAST(int8_t, (26 - imm8) & 31), \
  1136. HEDLEY_STATIC_CAST(int8_t, (27 - imm8) & 31), \
  1137. HEDLEY_STATIC_CAST(int8_t, (28 - imm8) & 31), \
  1138. HEDLEY_STATIC_CAST(int8_t, (29 - imm8) & 31), \
  1139. HEDLEY_STATIC_CAST(int8_t, (30 - imm8) & 31), \
  1140. HEDLEY_STATIC_CAST(int8_t, (31 - imm8) & 31)); \
  1141. } \
  1142. simde__m128i_from_private(simde__tmp_r_); \
  1143. }))
  1144. #endif
  1145. #define simde_mm_slli_si128(a, imm8) simde_mm_bslli_si128(a, imm8)
  1146. #if defined(SIMDE_X86_SSE2_ENABLE_NATIVE_ALIASES)
  1147. #define _mm_bslli_si128(a, imm8) simde_mm_bslli_si128(a, imm8)
  1148. #define _mm_slli_si128(a, imm8) simde_mm_bslli_si128(a, imm8)
  1149. #endif
  1150. SIMDE_FUNCTION_ATTRIBUTES
  1151. simde__m128i simde_mm_bsrli_si128(simde__m128i a, const int imm8)
  1152. SIMDE_REQUIRE_CONSTANT_RANGE(imm8, 0, 255)
  1153. {
  1154. simde__m128i_private r_, a_ = simde__m128i_to_private(a);
  1155. if (HEDLEY_UNLIKELY((imm8 & ~15))) {
  1156. return simde_mm_setzero_si128();
  1157. }
  1158. #if defined(SIMDE_POWER_ALTIVEC_P6_NATIVE) && defined(SIMDE_ENDIAN_ORDER)
  1159. r_.altivec_i8 =
  1160. #if (SIMDE_ENDIAN_ORDER == SIMDE_ENDIAN_LITTLE)
  1161. vec_sro
  1162. #else /* SIMDE_ENDIAN_ORDER == SIMDE_ENDIAN_BIG */
  1163. vec_slo
  1164. #endif
  1165. (a_.altivec_i8,
  1166. vec_splats(HEDLEY_STATIC_CAST(unsigned char, imm8 * 8)));
  1167. #else
  1168. SIMDE_VECTORIZE
  1169. for (size_t i = 0; i < (sizeof(r_.i8) / sizeof(r_.i8[0])); i++) {
  1170. const int e = HEDLEY_STATIC_CAST(int, i) + imm8;
  1171. r_.i8[i] = (e < 16) ? a_.i8[e] : 0;
  1172. }
  1173. #endif
  1174. return simde__m128i_from_private(r_);
  1175. }
  1176. #if defined(SIMDE_X86_SSE2_NATIVE) && !defined(__PGI)
  1177. #define simde_mm_bsrli_si128(a, imm8) _mm_srli_si128(a, imm8)
  1178. #elif defined(SIMDE_ARM_NEON_A32V7_NATIVE) && !defined(__clang__)
  1179. #define simde_mm_bsrli_si128(a, imm8) \
  1180. simde__m128i_from_neon_i8( \
  1181. ((imm8 < 0) || (imm8 > 15)) \
  1182. ? vdupq_n_s8(0) \
  1183. : (vextq_s8(simde__m128i_to_private(a).neon_i8, \
  1184. vdupq_n_s8(0), \
  1185. ((imm8 & 15) != 0) ? imm8 : (imm8 & 15))))
  1186. #elif defined(SIMDE_SHUFFLE_VECTOR_) && !defined(SIMDE_POWER_ALTIVEC_P6_NATIVE)
  1187. #define simde_mm_bsrli_si128(a, imm8) \
  1188. (__extension__({ \
  1189. const simde__m128i_private simde__tmp_a_ = \
  1190. simde__m128i_to_private(a); \
  1191. const simde__m128i_private simde__tmp_z_ = \
  1192. simde__m128i_to_private(simde_mm_setzero_si128()); \
  1193. simde__m128i_private simde__tmp_r_ = \
  1194. simde__m128i_to_private(a); \
  1195. if (HEDLEY_UNLIKELY(imm8 > 15)) { \
  1196. simde__tmp_r_ = simde__m128i_to_private( \
  1197. simde_mm_setzero_si128()); \
  1198. } else { \
  1199. simde__tmp_r_.i8 = SIMDE_SHUFFLE_VECTOR_( \
  1200. 8, 16, simde__tmp_z_.i8, (simde__tmp_a_).i8, \
  1201. HEDLEY_STATIC_CAST(int8_t, (imm8 + 16) & 31), \
  1202. HEDLEY_STATIC_CAST(int8_t, (imm8 + 17) & 31), \
  1203. HEDLEY_STATIC_CAST(int8_t, (imm8 + 18) & 31), \
  1204. HEDLEY_STATIC_CAST(int8_t, (imm8 + 19) & 31), \
  1205. HEDLEY_STATIC_CAST(int8_t, (imm8 + 20) & 31), \
  1206. HEDLEY_STATIC_CAST(int8_t, (imm8 + 21) & 31), \
  1207. HEDLEY_STATIC_CAST(int8_t, (imm8 + 22) & 31), \
  1208. HEDLEY_STATIC_CAST(int8_t, (imm8 + 23) & 31), \
  1209. HEDLEY_STATIC_CAST(int8_t, (imm8 + 24) & 31), \
  1210. HEDLEY_STATIC_CAST(int8_t, (imm8 + 25) & 31), \
  1211. HEDLEY_STATIC_CAST(int8_t, (imm8 + 26) & 31), \
  1212. HEDLEY_STATIC_CAST(int8_t, (imm8 + 27) & 31), \
  1213. HEDLEY_STATIC_CAST(int8_t, (imm8 + 28) & 31), \
  1214. HEDLEY_STATIC_CAST(int8_t, (imm8 + 29) & 31), \
  1215. HEDLEY_STATIC_CAST(int8_t, (imm8 + 30) & 31), \
  1216. HEDLEY_STATIC_CAST(int8_t, (imm8 + 31) & 31)); \
  1217. } \
  1218. simde__m128i_from_private(simde__tmp_r_); \
  1219. }))
  1220. #endif
  1221. #define simde_mm_srli_si128(a, imm8) simde_mm_bsrli_si128((a), (imm8))
  1222. #if defined(SIMDE_X86_SSE2_ENABLE_NATIVE_ALIASES)
  1223. #define _mm_bsrli_si128(a, imm8) simde_mm_bsrli_si128((a), (imm8))
  1224. #define _mm_srli_si128(a, imm8) simde_mm_bsrli_si128((a), (imm8))
  1225. #endif
  1226. SIMDE_FUNCTION_ATTRIBUTES
  1227. void simde_mm_clflush(void const *p)
  1228. {
  1229. #if defined(SIMDE_X86_SSE2_NATIVE)
  1230. _mm_clflush(p);
  1231. #else
  1232. (void)p;
  1233. #endif
  1234. }
  1235. #if defined(SIMDE_X86_SSE2_ENABLE_NATIVE_ALIASES)
  1236. #define _mm_clflush(a, b) simde_mm_clflush()
  1237. #endif
  1238. SIMDE_FUNCTION_ATTRIBUTES
  1239. int simde_mm_comieq_sd(simde__m128d a, simde__m128d b)
  1240. {
  1241. #if defined(SIMDE_X86_SSE2_NATIVE)
  1242. return _mm_comieq_sd(a, b);
  1243. #else
  1244. simde__m128d_private a_ = simde__m128d_to_private(a),
  1245. b_ = simde__m128d_to_private(b);
  1246. #if defined(SIMDE_ARM_NEON_A64V8_NATIVE)
  1247. return !!vgetq_lane_u64(vceqq_f64(a_.neon_f64, b_.neon_f64), 0);
  1248. #elif defined(SIMDE_WASM_SIMD128_NATIVE)
  1249. return wasm_f64x2_extract_lane(a_.wasm_v128, 0) ==
  1250. wasm_f64x2_extract_lane(b_.wasm_v128, 0);
  1251. #else
  1252. return a_.f64[0] == b_.f64[0];
  1253. #endif
  1254. #endif
  1255. }
  1256. #if defined(SIMDE_X86_SSE2_ENABLE_NATIVE_ALIASES)
  1257. #define _mm_comieq_sd(a, b) simde_mm_comieq_sd(a, b)
  1258. #endif
  1259. SIMDE_FUNCTION_ATTRIBUTES
  1260. int simde_mm_comige_sd(simde__m128d a, simde__m128d b)
  1261. {
  1262. #if defined(SIMDE_X86_SSE2_NATIVE)
  1263. return _mm_comige_sd(a, b);
  1264. #else
  1265. simde__m128d_private a_ = simde__m128d_to_private(a),
  1266. b_ = simde__m128d_to_private(b);
  1267. #if defined(SIMDE_ARM_NEON_A64V8_NATIVE)
  1268. return !!vgetq_lane_u64(vcgeq_f64(a_.neon_f64, b_.neon_f64), 0);
  1269. #elif defined(SIMDE_WASM_SIMD128_NATIVE)
  1270. return wasm_f64x2_extract_lane(a_.wasm_v128, 0) >=
  1271. wasm_f64x2_extract_lane(b_.wasm_v128, 0);
  1272. #else
  1273. return a_.f64[0] >= b_.f64[0];
  1274. #endif
  1275. #endif
  1276. }
  1277. #if defined(SIMDE_X86_SSE2_ENABLE_NATIVE_ALIASES)
  1278. #define _mm_comige_sd(a, b) simde_mm_comige_sd(a, b)
  1279. #endif
  1280. SIMDE_FUNCTION_ATTRIBUTES
  1281. int simde_mm_comigt_sd(simde__m128d a, simde__m128d b)
  1282. {
  1283. #if defined(SIMDE_X86_SSE2_NATIVE)
  1284. return _mm_comigt_sd(a, b);
  1285. #else
  1286. simde__m128d_private a_ = simde__m128d_to_private(a),
  1287. b_ = simde__m128d_to_private(b);
  1288. #if defined(SIMDE_ARM_NEON_A64V8_NATIVE)
  1289. return !!vgetq_lane_u64(vcgtq_f64(a_.neon_f64, b_.neon_f64), 0);
  1290. #elif defined(SIMDE_WASM_SIMD128_NATIVE)
  1291. return wasm_f64x2_extract_lane(a_.wasm_v128, 0) >
  1292. wasm_f64x2_extract_lane(b_.wasm_v128, 0);
  1293. #else
  1294. return a_.f64[0] > b_.f64[0];
  1295. #endif
  1296. #endif
  1297. }
  1298. #if defined(SIMDE_X86_SSE2_ENABLE_NATIVE_ALIASES)
  1299. #define _mm_comigt_sd(a, b) simde_mm_comigt_sd(a, b)
  1300. #endif
  1301. SIMDE_FUNCTION_ATTRIBUTES
  1302. int simde_mm_comile_sd(simde__m128d a, simde__m128d b)
  1303. {
  1304. #if defined(SIMDE_X86_SSE2_NATIVE)
  1305. return _mm_comile_sd(a, b);
  1306. #else
  1307. simde__m128d_private a_ = simde__m128d_to_private(a),
  1308. b_ = simde__m128d_to_private(b);
  1309. #if defined(SIMDE_ARM_NEON_A64V8_NATIVE)
  1310. return !!vgetq_lane_u64(vcleq_f64(a_.neon_f64, b_.neon_f64), 0);
  1311. #elif defined(SIMDE_WASM_SIMD128_NATIVE)
  1312. return wasm_f64x2_extract_lane(a_.wasm_v128, 0) <=
  1313. wasm_f64x2_extract_lane(b_.wasm_v128, 0);
  1314. #else
  1315. return a_.f64[0] <= b_.f64[0];
  1316. #endif
  1317. #endif
  1318. }
  1319. #if defined(SIMDE_X86_SSE2_ENABLE_NATIVE_ALIASES)
  1320. #define _mm_comile_sd(a, b) simde_mm_comile_sd(a, b)
  1321. #endif
  1322. SIMDE_FUNCTION_ATTRIBUTES
  1323. int simde_mm_comilt_sd(simde__m128d a, simde__m128d b)
  1324. {
  1325. #if defined(SIMDE_X86_SSE2_NATIVE)
  1326. return _mm_comilt_sd(a, b);
  1327. #else
  1328. simde__m128d_private a_ = simde__m128d_to_private(a),
  1329. b_ = simde__m128d_to_private(b);
  1330. #if defined(SIMDE_ARM_NEON_A64V8_NATIVE)
  1331. return !!vgetq_lane_u64(vcltq_f64(a_.neon_f64, b_.neon_f64), 0);
  1332. #elif defined(SIMDE_WASM_SIMD128_NATIVE)
  1333. return wasm_f64x2_extract_lane(a_.wasm_v128, 0) <
  1334. wasm_f64x2_extract_lane(b_.wasm_v128, 0);
  1335. #else
  1336. return a_.f64[0] < b_.f64[0];
  1337. #endif
  1338. #endif
  1339. }
  1340. #if defined(SIMDE_X86_SSE2_ENABLE_NATIVE_ALIASES)
  1341. #define _mm_comilt_sd(a, b) simde_mm_comilt_sd(a, b)
  1342. #endif
  1343. SIMDE_FUNCTION_ATTRIBUTES
  1344. int simde_mm_comineq_sd(simde__m128d a, simde__m128d b)
  1345. {
  1346. #if defined(SIMDE_X86_SSE2_NATIVE)
  1347. return _mm_comineq_sd(a, b);
  1348. #else
  1349. simde__m128d_private a_ = simde__m128d_to_private(a),
  1350. b_ = simde__m128d_to_private(b);
  1351. #if defined(SIMDE_ARM_NEON_A64V8_NATIVE)
  1352. return !vgetq_lane_u64(vceqq_f64(a_.neon_f64, b_.neon_f64), 0);
  1353. #elif defined(SIMDE_WASM_SIMD128_NATIVE)
  1354. return wasm_f64x2_extract_lane(a_.wasm_v128, 0) !=
  1355. wasm_f64x2_extract_lane(b_.wasm_v128, 0);
  1356. #else
  1357. return a_.f64[0] != b_.f64[0];
  1358. #endif
  1359. #endif
  1360. }
  1361. #if defined(SIMDE_X86_SSE2_ENABLE_NATIVE_ALIASES)
  1362. #define _mm_comineq_sd(a, b) simde_mm_comineq_sd(a, b)
  1363. #endif
  1364. SIMDE_FUNCTION_ATTRIBUTES
  1365. simde__m128d simde_x_mm_copysign_pd(simde__m128d dest, simde__m128d src)
  1366. {
  1367. simde__m128d_private r_, dest_ = simde__m128d_to_private(dest),
  1368. src_ = simde__m128d_to_private(src);
  1369. #if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
  1370. #if defined(SIMDE_ARM_NEON_A64V8_NATIVE)
  1371. uint64x2_t sign_pos =
  1372. vreinterpretq_u64_f64(vdupq_n_f64(-SIMDE_FLOAT64_C(0.0)));
  1373. #else
  1374. simde_float64 dbl_nz = -SIMDE_FLOAT64_C(0.0);
  1375. uint64_t u64_nz;
  1376. simde_memcpy(&u64_nz, &dbl_nz, sizeof(u64_nz));
  1377. uint64x2_t sign_pos = vdupq_n_u64(u64_nz);
  1378. #endif
  1379. r_.neon_u64 = vbslq_u64(sign_pos, src_.neon_u64, dest_.neon_u64);
  1380. #elif defined(SIMDE_POWER_ALTIVEC_P9_NATIVE)
  1381. #if !defined(HEDLEY_IBM_VERSION)
  1382. r_.altivec_f64 = vec_cpsgn(dest_.altivec_f64, src_.altivec_f64);
  1383. #else
  1384. r_.altivec_f64 = vec_cpsgn(src_.altivec_f64, dest_.altivec_f64);
  1385. #endif
  1386. #elif defined(simde_math_copysign)
  1387. SIMDE_VECTORIZE
  1388. for (size_t i = 0; i < (sizeof(r_.f64) / sizeof(r_.f64[0])); i++) {
  1389. r_.f64[i] = simde_math_copysign(dest_.f64[i], src_.f64[i]);
  1390. }
  1391. #else
  1392. simde__m128d sgnbit = simde_mm_set1_pd(-SIMDE_FLOAT64_C(0.0));
  1393. return simde_mm_xor_pd(simde_mm_and_pd(sgnbit, src),
  1394. simde_mm_andnot_pd(sgnbit, dest));
  1395. #endif
  1396. return simde__m128d_from_private(r_);
  1397. }
  1398. SIMDE_FUNCTION_ATTRIBUTES
  1399. simde__m128d simde_x_mm_xorsign_pd(simde__m128d dest, simde__m128d src)
  1400. {
  1401. return simde_mm_xor_pd(simde_mm_and_pd(simde_mm_set1_pd(-0.0), src),
  1402. dest);
  1403. }
  1404. SIMDE_FUNCTION_ATTRIBUTES
  1405. simde__m128 simde_mm_castpd_ps(simde__m128d a)
  1406. {
  1407. #if defined(SIMDE_X86_SSE2_NATIVE)
  1408. return _mm_castpd_ps(a);
  1409. #elif defined(SIMDE_ARM_NEON_A64V8_NATIVE)
  1410. return vreinterpretq_f32_f64(a);
  1411. #else
  1412. simde__m128 r;
  1413. simde_memcpy(&r, &a, sizeof(a));
  1414. return r;
  1415. #endif
  1416. }
  1417. #if defined(SIMDE_X86_SSE2_ENABLE_NATIVE_ALIASES)
  1418. #define _mm_castpd_ps(a) simde_mm_castpd_ps(a)
  1419. #endif
  1420. SIMDE_FUNCTION_ATTRIBUTES
  1421. simde__m128i simde_mm_castpd_si128(simde__m128d a)
  1422. {
  1423. #if defined(SIMDE_X86_SSE2_NATIVE)
  1424. return _mm_castpd_si128(a);
  1425. #elif defined(SIMDE_ARM_NEON_A64V8_NATIVE)
  1426. return vreinterpretq_s64_f64(a);
  1427. #else
  1428. simde__m128i r;
  1429. simde_memcpy(&r, &a, sizeof(a));
  1430. return r;
  1431. #endif
  1432. }
  1433. #if defined(SIMDE_X86_SSE2_ENABLE_NATIVE_ALIASES)
  1434. #define _mm_castpd_si128(a) simde_mm_castpd_si128(a)
  1435. #endif
  1436. SIMDE_FUNCTION_ATTRIBUTES
  1437. simde__m128d simde_mm_castps_pd(simde__m128 a)
  1438. {
  1439. #if defined(SIMDE_X86_SSE2_NATIVE)
  1440. return _mm_castps_pd(a);
  1441. #elif defined(SIMDE_ARM_NEON_A64V8_NATIVE)
  1442. return vreinterpretq_f64_f32(a);
  1443. #else
  1444. simde__m128d r;
  1445. simde_memcpy(&r, &a, sizeof(a));
  1446. return r;
  1447. #endif
  1448. }
  1449. #if defined(SIMDE_X86_SSE2_ENABLE_NATIVE_ALIASES)
  1450. #define _mm_castps_pd(a) simde_mm_castps_pd(a)
  1451. #endif
  1452. SIMDE_FUNCTION_ATTRIBUTES
  1453. simde__m128i simde_mm_castps_si128(simde__m128 a)
  1454. {
  1455. #if defined(SIMDE_X86_SSE2_NATIVE)
  1456. return _mm_castps_si128(a);
  1457. #elif defined(SIMDE_ARM_NEON_A32V7_NATIVE)
  1458. return simde__m128i_from_neon_i32(simde__m128_to_private(a).neon_i32);
  1459. #else
  1460. simde__m128i r;
  1461. simde_memcpy(&r, &a, sizeof(a));
  1462. return r;
  1463. #endif
  1464. }
  1465. #if defined(SIMDE_X86_SSE2_ENABLE_NATIVE_ALIASES)
  1466. #define _mm_castps_si128(a) simde_mm_castps_si128(a)
  1467. #endif
  1468. SIMDE_FUNCTION_ATTRIBUTES
  1469. simde__m128d simde_mm_castsi128_pd(simde__m128i a)
  1470. {
  1471. #if defined(SIMDE_X86_SSE2_NATIVE)
  1472. return _mm_castsi128_pd(a);
  1473. #elif defined(SIMDE_ARM_NEON_A64V8_NATIVE)
  1474. return vreinterpretq_f64_s64(a);
  1475. #else
  1476. simde__m128d r;
  1477. simde_memcpy(&r, &a, sizeof(a));
  1478. return r;
  1479. #endif
  1480. }
  1481. #if defined(SIMDE_X86_SSE2_ENABLE_NATIVE_ALIASES)
  1482. #define _mm_castsi128_pd(a) simde_mm_castsi128_pd(a)
  1483. #endif
  1484. SIMDE_FUNCTION_ATTRIBUTES
  1485. simde__m128 simde_mm_castsi128_ps(simde__m128i a)
  1486. {
  1487. #if defined(SIMDE_X86_SSE2_NATIVE)
  1488. return _mm_castsi128_ps(a);
  1489. #elif defined(SIMDE_POWER_ALTIVEC_P6_NATIVE)
  1490. return HEDLEY_REINTERPRET_CAST(SIMDE_POWER_ALTIVEC_VECTOR(float), a);
  1491. #elif defined(SIMDE_ARM_NEON_A32V7_NATIVE)
  1492. return simde__m128_from_neon_i32(simde__m128i_to_private(a).neon_i32);
  1493. #else
  1494. simde__m128 r;
  1495. simde_memcpy(&r, &a, sizeof(a));
  1496. return r;
  1497. #endif
  1498. }
  1499. #if defined(SIMDE_X86_SSE2_ENABLE_NATIVE_ALIASES)
  1500. #define _mm_castsi128_ps(a) simde_mm_castsi128_ps(a)
  1501. #endif
  1502. SIMDE_FUNCTION_ATTRIBUTES
  1503. simde__m128i simde_mm_cmpeq_epi8(simde__m128i a, simde__m128i b)
  1504. {
  1505. #if defined(SIMDE_X86_SSE2_NATIVE)
  1506. return _mm_cmpeq_epi8(a, b);
  1507. #else
  1508. simde__m128i_private r_, a_ = simde__m128i_to_private(a),
  1509. b_ = simde__m128i_to_private(b);
  1510. #if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
  1511. r_.neon_u8 = vceqq_s8(b_.neon_i8, a_.neon_i8);
  1512. #elif defined(SIMDE_WASM_SIMD128_NATIVE)
  1513. r_.wasm_v128 = wasm_i8x16_eq(a_.wasm_v128, b_.wasm_v128);
  1514. #elif defined(SIMDE_POWER_ALTIVEC_P6_NATIVE)
  1515. r_.altivec_i8 = HEDLEY_REINTERPRET_CAST(
  1516. SIMDE_POWER_ALTIVEC_VECTOR(signed char),
  1517. vec_cmpeq(a_.altivec_i8, b_.altivec_i8));
  1518. #elif defined(SIMDE_VECTOR_SUBSCRIPT_OPS)
  1519. r_.i8 = HEDLEY_STATIC_CAST(__typeof__(r_.i8), (a_.i8 == b_.i8));
  1520. #else
  1521. SIMDE_VECTORIZE
  1522. for (size_t i = 0; i < (sizeof(r_.i8) / sizeof(r_.i8[0])); i++) {
  1523. r_.i8[i] = (a_.i8[i] == b_.i8[i]) ? ~INT8_C(0) : INT8_C(0);
  1524. }
  1525. #endif
  1526. return simde__m128i_from_private(r_);
  1527. #endif
  1528. }
  1529. #if defined(SIMDE_X86_SSE2_ENABLE_NATIVE_ALIASES)
  1530. #define _mm_cmpeq_epi8(a, b) simde_mm_cmpeq_epi8(a, b)
  1531. #endif
  1532. SIMDE_FUNCTION_ATTRIBUTES
  1533. simde__m128i simde_mm_cmpeq_epi16(simde__m128i a, simde__m128i b)
  1534. {
  1535. #if defined(SIMDE_X86_SSE2_NATIVE)
  1536. return _mm_cmpeq_epi16(a, b);
  1537. #else
  1538. simde__m128i_private r_, a_ = simde__m128i_to_private(a),
  1539. b_ = simde__m128i_to_private(b);
  1540. #if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
  1541. r_.neon_u16 = vceqq_s16(b_.neon_i16, a_.neon_i16);
  1542. #elif defined(SIMDE_WASM_SIMD128_NATIVE)
  1543. r_.wasm_v128 = wasm_i16x8_eq(a_.wasm_v128, b_.wasm_v128);
  1544. #elif defined(SIMDE_POWER_ALTIVEC_P6_NATIVE)
  1545. r_.altivec_i16 = HEDLEY_REINTERPRET_CAST(
  1546. SIMDE_POWER_ALTIVEC_VECTOR(signed short),
  1547. vec_cmpeq(a_.altivec_i16, b_.altivec_i16));
  1548. #elif defined(SIMDE_VECTOR_SUBSCRIPT_OPS)
  1549. r_.i16 = (a_.i16 == b_.i16);
  1550. #else
  1551. SIMDE_VECTORIZE
  1552. for (size_t i = 0; i < (sizeof(r_.i16) / sizeof(r_.i16[0])); i++) {
  1553. r_.i16[i] = (a_.i16[i] == b_.i16[i]) ? ~INT16_C(0) : INT16_C(0);
  1554. }
  1555. #endif
  1556. return simde__m128i_from_private(r_);
  1557. #endif
  1558. }
  1559. #if defined(SIMDE_X86_SSE2_ENABLE_NATIVE_ALIASES)
  1560. #define _mm_cmpeq_epi16(a, b) simde_mm_cmpeq_epi16(a, b)
  1561. #endif
  1562. SIMDE_FUNCTION_ATTRIBUTES
  1563. simde__m128i simde_mm_cmpeq_epi32(simde__m128i a, simde__m128i b)
  1564. {
  1565. #if defined(SIMDE_X86_SSE2_NATIVE)
  1566. return _mm_cmpeq_epi32(a, b);
  1567. #else
  1568. simde__m128i_private r_, a_ = simde__m128i_to_private(a),
  1569. b_ = simde__m128i_to_private(b);
  1570. #if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
  1571. r_.neon_u32 = vceqq_s32(b_.neon_i32, a_.neon_i32);
  1572. #elif defined(SIMDE_WASM_SIMD128_NATIVE)
  1573. r_.wasm_v128 = wasm_i32x4_eq(a_.wasm_v128, b_.wasm_v128);
  1574. #elif defined(SIMDE_POWER_ALTIVEC_P6_NATIVE)
  1575. r_.altivec_i32 = HEDLEY_REINTERPRET_CAST(
  1576. SIMDE_POWER_ALTIVEC_VECTOR(signed int),
  1577. vec_cmpeq(a_.altivec_i32, b_.altivec_i32));
  1578. #elif defined(SIMDE_VECTOR_SUBSCRIPT_OPS)
  1579. r_.i32 = HEDLEY_STATIC_CAST(__typeof__(r_.i32), a_.i32 == b_.i32);
  1580. #else
  1581. SIMDE_VECTORIZE
  1582. for (size_t i = 0; i < (sizeof(r_.i32) / sizeof(r_.i32[0])); i++) {
  1583. r_.i32[i] = (a_.i32[i] == b_.i32[i]) ? ~INT32_C(0) : INT32_C(0);
  1584. }
  1585. #endif
  1586. return simde__m128i_from_private(r_);
  1587. #endif
  1588. }
  1589. #if defined(SIMDE_X86_SSE2_ENABLE_NATIVE_ALIASES)
  1590. #define _mm_cmpeq_epi32(a, b) simde_mm_cmpeq_epi32(a, b)
  1591. #endif
  1592. SIMDE_FUNCTION_ATTRIBUTES
  1593. simde__m128d simde_mm_cmpeq_pd(simde__m128d a, simde__m128d b)
  1594. {
  1595. #if defined(SIMDE_X86_SSE2_NATIVE)
  1596. return _mm_cmpeq_pd(a, b);
  1597. #else
  1598. simde__m128d_private r_, a_ = simde__m128d_to_private(a),
  1599. b_ = simde__m128d_to_private(b);
  1600. #if defined(SIMDE_ARM_NEON_A64V8_NATIVE)
  1601. r_.neon_u64 = vceqq_s64(b_.neon_i64, a_.neon_i64);
  1602. #elif defined(SIMDE_WASM_SIMD128_NATIVE)
  1603. r_.wasm_v128 = wasm_f64x2_eq(a_.wasm_v128, b_.wasm_v128);
  1604. #elif defined(SIMDE_POWER_ALTIVEC_P7_NATIVE)
  1605. r_.altivec_f64 = HEDLEY_REINTERPRET_CAST(
  1606. SIMDE_POWER_ALTIVEC_VECTOR(double),
  1607. vec_cmpeq(a_.altivec_f64, b_.altivec_f64));
  1608. #elif defined(SIMDE_VECTOR_SUBSCRIPT_OPS)
  1609. r_.i64 = HEDLEY_STATIC_CAST(__typeof__(r_.i64), (a_.f64 == b_.f64));
  1610. #else
  1611. SIMDE_VECTORIZE
  1612. for (size_t i = 0; i < (sizeof(r_.f64) / sizeof(r_.f64[0])); i++) {
  1613. r_.u64[i] = (a_.f64[i] == b_.f64[i]) ? ~UINT64_C(0)
  1614. : UINT64_C(0);
  1615. }
  1616. #endif
  1617. return simde__m128d_from_private(r_);
  1618. #endif
  1619. }
  1620. #if defined(SIMDE_X86_SSE2_ENABLE_NATIVE_ALIASES)
  1621. #define _mm_cmpeq_pd(a, b) simde_mm_cmpeq_pd(a, b)
  1622. #endif
  1623. SIMDE_FUNCTION_ATTRIBUTES
  1624. simde__m128d simde_mm_cmpeq_sd(simde__m128d a, simde__m128d b)
  1625. {
  1626. #if defined(SIMDE_X86_SSE2_NATIVE)
  1627. return _mm_cmpeq_sd(a, b);
  1628. #elif (SIMDE_NATURAL_VECTOR_SIZE > 0)
  1629. return simde_mm_move_sd(a, simde_mm_cmpeq_pd(a, b));
  1630. #else
  1631. simde__m128d_private r_, a_ = simde__m128d_to_private(a),
  1632. b_ = simde__m128d_to_private(b);
  1633. r_.u64[0] = (a_.u64[0] == b_.u64[0]) ? ~UINT64_C(0) : 0;
  1634. r_.u64[1] = a_.u64[1];
  1635. return simde__m128d_from_private(r_);
  1636. #endif
  1637. }
  1638. #if defined(SIMDE_X86_SSE2_ENABLE_NATIVE_ALIASES)
  1639. #define _mm_cmpeq_sd(a, b) simde_mm_cmpeq_sd(a, b)
  1640. #endif
  1641. SIMDE_FUNCTION_ATTRIBUTES
  1642. simde__m128d simde_mm_cmpneq_pd(simde__m128d a, simde__m128d b)
  1643. {
  1644. #if defined(SIMDE_X86_SSE2_NATIVE)
  1645. return _mm_cmpneq_pd(a, b);
  1646. #else
  1647. simde__m128d_private r_, a_ = simde__m128d_to_private(a),
  1648. b_ = simde__m128d_to_private(b);
  1649. #if defined(SIMDE_ARM_NEON_A64V8_NATIVE)
  1650. r_.neon_u32 = vmvnq_u32(
  1651. vreinterpretq_u32_u64(vceqq_f64(b_.neon_f64, a_.neon_f64)));
  1652. #elif defined(SIMDE_WASM_SIMD128_NATIVE)
  1653. r_.wasm_v128 = wasm_f64x2_ne(a_.wasm_v128, b_.wasm_v128);
  1654. #elif defined(SIMDE_VECTOR_SUBSCRIPT_OPS)
  1655. r_.i64 = HEDLEY_STATIC_CAST(__typeof__(r_.i64), (a_.f64 != b_.f64));
  1656. #else
  1657. SIMDE_VECTORIZE
  1658. for (size_t i = 0; i < (sizeof(r_.f64) / sizeof(r_.f64[0])); i++) {
  1659. r_.u64[i] = (a_.f64[i] != b_.f64[i]) ? ~UINT64_C(0)
  1660. : UINT64_C(0);
  1661. }
  1662. #endif
  1663. return simde__m128d_from_private(r_);
  1664. #endif
  1665. }
  1666. #if defined(SIMDE_X86_SSE2_ENABLE_NATIVE_ALIASES)
  1667. #define _mm_cmpneq_pd(a, b) simde_mm_cmpneq_pd(a, b)
  1668. #endif
  1669. SIMDE_FUNCTION_ATTRIBUTES
  1670. simde__m128d simde_mm_cmpneq_sd(simde__m128d a, simde__m128d b)
  1671. {
  1672. #if defined(SIMDE_X86_SSE2_NATIVE)
  1673. return _mm_cmpneq_sd(a, b);
  1674. #elif (SIMDE_NATURAL_VECTOR_SIZE > 0)
  1675. return simde_mm_move_sd(a, simde_mm_cmpneq_pd(a, b));
  1676. #else
  1677. simde__m128d_private r_, a_ = simde__m128d_to_private(a),
  1678. b_ = simde__m128d_to_private(b);
  1679. r_.u64[0] = (a_.f64[0] != b_.f64[0]) ? ~UINT64_C(0) : UINT64_C(0);
  1680. r_.u64[1] = a_.u64[1];
  1681. return simde__m128d_from_private(r_);
  1682. #endif
  1683. }
  1684. #if defined(SIMDE_X86_SSE2_ENABLE_NATIVE_ALIASES)
  1685. #define _mm_cmpneq_sd(a, b) simde_mm_cmpneq_sd(a, b)
  1686. #endif
  1687. SIMDE_FUNCTION_ATTRIBUTES
  1688. simde__m128i simde_mm_cmplt_epi8(simde__m128i a, simde__m128i b)
  1689. {
  1690. #if defined(SIMDE_X86_SSE2_NATIVE)
  1691. return _mm_cmplt_epi8(a, b);
  1692. #else
  1693. simde__m128i_private r_, a_ = simde__m128i_to_private(a),
  1694. b_ = simde__m128i_to_private(b);
  1695. #if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
  1696. r_.neon_u8 = vcltq_s8(a_.neon_i8, b_.neon_i8);
  1697. #elif defined(SIMDE_POWER_ALTIVEC_P6_NATIVE)
  1698. r_.altivec_i8 = HEDLEY_REINTERPRET_CAST(
  1699. SIMDE_POWER_ALTIVEC_VECTOR(signed char),
  1700. vec_cmplt(a_.altivec_i8, b_.altivec_i8));
  1701. #elif defined(SIMDE_WASM_SIMD128_NATIVE)
  1702. r_.wasm_v128 = wasm_i8x16_lt(a_.wasm_v128, b_.wasm_v128);
  1703. #elif defined(SIMDE_VECTOR_SUBSCRIPT_OPS)
  1704. r_.i8 = HEDLEY_STATIC_CAST(__typeof__(r_.i8), (a_.i8 < b_.i8));
  1705. #else
  1706. SIMDE_VECTORIZE
  1707. for (size_t i = 0; i < (sizeof(r_.i8) / sizeof(r_.i8[0])); i++) {
  1708. r_.i8[i] = (a_.i8[i] < b_.i8[i]) ? ~INT8_C(0) : INT8_C(0);
  1709. }
  1710. #endif
  1711. return simde__m128i_from_private(r_);
  1712. #endif
  1713. }
  1714. #if defined(SIMDE_X86_SSE2_ENABLE_NATIVE_ALIASES)
  1715. #define _mm_cmplt_epi8(a, b) simde_mm_cmplt_epi8(a, b)
  1716. #endif
  1717. SIMDE_FUNCTION_ATTRIBUTES
  1718. simde__m128i simde_mm_cmplt_epi16(simde__m128i a, simde__m128i b)
  1719. {
  1720. #if defined(SIMDE_X86_SSE2_NATIVE)
  1721. return _mm_cmplt_epi16(a, b);
  1722. #else
  1723. simde__m128i_private r_, a_ = simde__m128i_to_private(a),
  1724. b_ = simde__m128i_to_private(b);
  1725. #if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
  1726. r_.neon_u16 = vcltq_s16(a_.neon_i16, b_.neon_i16);
  1727. #elif defined(SIMDE_POWER_ALTIVEC_P6_NATIVE)
  1728. r_.altivec_i16 = HEDLEY_REINTERPRET_CAST(
  1729. SIMDE_POWER_ALTIVEC_VECTOR(signed short),
  1730. vec_cmplt(a_.altivec_i16, b_.altivec_i16));
  1731. #elif defined(SIMDE_WASM_SIMD128_NATIVE)
  1732. r_.wasm_v128 = wasm_i16x8_lt(a_.wasm_v128, b_.wasm_v128);
  1733. #elif defined(SIMDE_VECTOR_SUBSCRIPT_OPS)
  1734. r_.i16 = HEDLEY_STATIC_CAST(__typeof__(r_.i16), (a_.i16 < b_.i16));
  1735. #else
  1736. SIMDE_VECTORIZE
  1737. for (size_t i = 0; i < (sizeof(r_.i16) / sizeof(r_.i16[0])); i++) {
  1738. r_.i16[i] = (a_.i16[i] < b_.i16[i]) ? ~INT16_C(0) : INT16_C(0);
  1739. }
  1740. #endif
  1741. return simde__m128i_from_private(r_);
  1742. #endif
  1743. }
  1744. #if defined(SIMDE_X86_SSE2_ENABLE_NATIVE_ALIASES)
  1745. #define _mm_cmplt_epi16(a, b) simde_mm_cmplt_epi16(a, b)
  1746. #endif
  1747. SIMDE_FUNCTION_ATTRIBUTES
  1748. simde__m128i simde_mm_cmplt_epi32(simde__m128i a, simde__m128i b)
  1749. {
  1750. #if defined(SIMDE_X86_SSE2_NATIVE)
  1751. return _mm_cmplt_epi32(a, b);
  1752. #else
  1753. simde__m128i_private r_, a_ = simde__m128i_to_private(a),
  1754. b_ = simde__m128i_to_private(b);
  1755. #if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
  1756. r_.neon_u32 = vcltq_s32(a_.neon_i32, b_.neon_i32);
  1757. #elif defined(SIMDE_POWER_ALTIVEC_P6_NATIVE)
  1758. r_.altivec_i32 = HEDLEY_REINTERPRET_CAST(
  1759. SIMDE_POWER_ALTIVEC_VECTOR(signed int),
  1760. vec_cmplt(a_.altivec_i32, b_.altivec_i32));
  1761. #elif defined(SIMDE_WASM_SIMD128_NATIVE)
  1762. r_.wasm_v128 = wasm_i32x4_lt(a_.wasm_v128, b_.wasm_v128);
  1763. #elif defined(SIMDE_VECTOR_SUBSCRIPT_OPS)
  1764. r_.i32 = HEDLEY_STATIC_CAST(__typeof__(r_.i32), (a_.i32 < b_.i32));
  1765. #else
  1766. SIMDE_VECTORIZE
  1767. for (size_t i = 0; i < (sizeof(r_.i32) / sizeof(r_.i32[0])); i++) {
  1768. r_.i32[i] = (a_.i32[i] < b_.i32[i]) ? ~INT32_C(0) : INT32_C(0);
  1769. }
  1770. #endif
  1771. return simde__m128i_from_private(r_);
  1772. #endif
  1773. }
  1774. #if defined(SIMDE_X86_SSE2_ENABLE_NATIVE_ALIASES)
  1775. #define _mm_cmplt_epi32(a, b) simde_mm_cmplt_epi32(a, b)
  1776. #endif
  1777. SIMDE_FUNCTION_ATTRIBUTES
  1778. simde__m128d simde_mm_cmplt_pd(simde__m128d a, simde__m128d b)
  1779. {
  1780. #if defined(SIMDE_X86_SSE2_NATIVE)
  1781. return _mm_cmplt_pd(a, b);
  1782. #else
  1783. simde__m128d_private r_, a_ = simde__m128d_to_private(a),
  1784. b_ = simde__m128d_to_private(b);
  1785. #if defined(SIMDE_VECTOR_SUBSCRIPT_OPS)
  1786. r_.i64 = HEDLEY_STATIC_CAST(__typeof__(r_.i64), (a_.f64 < b_.f64));
  1787. #elif defined(SIMDE_ARM_NEON_A64V8_NATIVE)
  1788. r_.neon_u64 = vcltq_f64(a_.neon_f64, b_.neon_f64);
  1789. #elif defined(SIMDE_WASM_SIMD128_NATIVE)
  1790. r_.wasm_v128 = wasm_f64x2_lt(a_.wasm_v128, b_.wasm_v128);
  1791. #else
  1792. SIMDE_VECTORIZE
  1793. for (size_t i = 0; i < (sizeof(r_.f64) / sizeof(r_.f64[0])); i++) {
  1794. r_.u64[i] = (a_.f64[i] < b_.f64[i]) ? ~UINT64_C(0)
  1795. : UINT64_C(0);
  1796. }
  1797. #endif
  1798. return simde__m128d_from_private(r_);
  1799. #endif
  1800. }
  1801. #if defined(SIMDE_X86_SSE2_ENABLE_NATIVE_ALIASES)
  1802. #define _mm_cmplt_pd(a, b) simde_mm_cmplt_pd(a, b)
  1803. #endif
  1804. SIMDE_FUNCTION_ATTRIBUTES
  1805. simde__m128d simde_mm_cmplt_sd(simde__m128d a, simde__m128d b)
  1806. {
  1807. #if defined(SIMDE_X86_SSE2_NATIVE)
  1808. return _mm_cmplt_sd(a, b);
  1809. #elif (SIMDE_NATURAL_VECTOR_SIZE > 0)
  1810. return simde_mm_move_sd(a, simde_mm_cmplt_pd(a, b));
  1811. #else
  1812. simde__m128d_private r_, a_ = simde__m128d_to_private(a),
  1813. b_ = simde__m128d_to_private(b);
  1814. r_.u64[0] = (a_.f64[0] < b_.f64[0]) ? ~UINT64_C(0) : UINT64_C(0);
  1815. r_.u64[1] = a_.u64[1];
  1816. return simde__m128d_from_private(r_);
  1817. #endif
  1818. }
  1819. #if defined(SIMDE_X86_SSE2_ENABLE_NATIVE_ALIASES)
  1820. #define _mm_cmplt_sd(a, b) simde_mm_cmplt_sd(a, b)
  1821. #endif
  1822. SIMDE_FUNCTION_ATTRIBUTES
  1823. simde__m128d simde_mm_cmple_pd(simde__m128d a, simde__m128d b)
  1824. {
  1825. #if defined(SIMDE_X86_SSE2_NATIVE)
  1826. return _mm_cmple_pd(a, b);
  1827. #else
  1828. simde__m128d_private r_, a_ = simde__m128d_to_private(a),
  1829. b_ = simde__m128d_to_private(b);
  1830. #if defined(SIMDE_VECTOR_SUBSCRIPT_OPS)
  1831. r_.i64 = HEDLEY_STATIC_CAST(__typeof__(r_.i64), (a_.f64 <= b_.f64));
  1832. #elif defined(SIMDE_ARM_NEON_A64V8_NATIVE)
  1833. r_.neon_u64 = vcleq_f64(a_.neon_f64, b_.neon_f64);
  1834. #elif defined(SIMDE_WASM_SIMD128_NATIVE)
  1835. r_.wasm_v128 = wasm_f64x2_le(a_.wasm_v128, b_.wasm_v128);
  1836. #elif defined(SIMDE_POWER_ALTIVEC_P6_NATIVE)
  1837. r_.altivec_f64 = HEDLEY_REINTERPRET_CAST(
  1838. SIMDE_POWER_ALTIVEC_VECTOR(double),
  1839. vec_cmple(a_.altivec_f64, b_.altivec_f64));
  1840. #else
  1841. SIMDE_VECTORIZE
  1842. for (size_t i = 0; i < (sizeof(r_.f64) / sizeof(r_.f64[0])); i++) {
  1843. r_.u64[i] = (a_.f64[i] <= b_.f64[i]) ? ~UINT64_C(0)
  1844. : UINT64_C(0);
  1845. }
  1846. #endif
  1847. return simde__m128d_from_private(r_);
  1848. #endif
  1849. }
  1850. #if defined(SIMDE_X86_SSE2_ENABLE_NATIVE_ALIASES)
  1851. #define _mm_cmple_pd(a, b) simde_mm_cmple_pd(a, b)
  1852. #endif
  1853. SIMDE_FUNCTION_ATTRIBUTES
  1854. simde__m128d simde_mm_cmple_sd(simde__m128d a, simde__m128d b)
  1855. {
  1856. #if defined(SIMDE_X86_SSE2_NATIVE)
  1857. return _mm_cmple_sd(a, b);
  1858. #elif (SIMDE_NATURAL_VECTOR_SIZE > 0)
  1859. return simde_mm_move_sd(a, simde_mm_cmple_pd(a, b));
  1860. #else
  1861. simde__m128d_private r_, a_ = simde__m128d_to_private(a),
  1862. b_ = simde__m128d_to_private(b);
  1863. r_.u64[0] = (a_.f64[0] <= b_.f64[0]) ? ~UINT64_C(0) : UINT64_C(0);
  1864. r_.u64[1] = a_.u64[1];
  1865. return simde__m128d_from_private(r_);
  1866. #endif
  1867. }
  1868. #if defined(SIMDE_X86_SSE2_ENABLE_NATIVE_ALIASES)
  1869. #define _mm_cmple_sd(a, b) simde_mm_cmple_sd(a, b)
  1870. #endif
  1871. SIMDE_FUNCTION_ATTRIBUTES
  1872. simde__m128i simde_mm_cmpgt_epi8(simde__m128i a, simde__m128i b)
  1873. {
  1874. #if defined(SIMDE_X86_SSE2_NATIVE)
  1875. return _mm_cmpgt_epi8(a, b);
  1876. #else
  1877. simde__m128i_private r_, a_ = simde__m128i_to_private(a),
  1878. b_ = simde__m128i_to_private(b);
  1879. #if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
  1880. r_.neon_u8 = vcgtq_s8(a_.neon_i8, b_.neon_i8);
  1881. #elif defined(SIMDE_WASM_SIMD128_NATIVE)
  1882. r_.wasm_v128 = wasm_i8x16_gt(a_.wasm_v128, b_.wasm_v128);
  1883. #elif defined(SIMDE_POWER_ALTIVEC_P6_NATIVE)
  1884. r_.altivec_i8 = HEDLEY_REINTERPRET_CAST(
  1885. SIMDE_POWER_ALTIVEC_VECTOR(signed char),
  1886. vec_cmpgt(a_.altivec_i8, b_.altivec_i8));
  1887. #elif defined(SIMDE_VECTOR_SUBSCRIPT_OPS)
  1888. r_.i8 = HEDLEY_STATIC_CAST(__typeof__(r_.i8), (a_.i8 > b_.i8));
  1889. #else
  1890. SIMDE_VECTORIZE
  1891. for (size_t i = 0; i < (sizeof(r_.i8) / sizeof(r_.i8[0])); i++) {
  1892. r_.i8[i] = (a_.i8[i] > b_.i8[i]) ? ~INT8_C(0) : INT8_C(0);
  1893. }
  1894. #endif
  1895. return simde__m128i_from_private(r_);
  1896. #endif
  1897. }
  1898. #if defined(SIMDE_X86_SSE2_ENABLE_NATIVE_ALIASES)
  1899. #define _mm_cmpgt_epi8(a, b) simde_mm_cmpgt_epi8(a, b)
  1900. #endif
  1901. SIMDE_FUNCTION_ATTRIBUTES
  1902. simde__m128i simde_mm_cmpgt_epi16(simde__m128i a, simde__m128i b)
  1903. {
  1904. #if defined(SIMDE_X86_SSE2_NATIVE)
  1905. return _mm_cmpgt_epi16(a, b);
  1906. #else
  1907. simde__m128i_private r_, a_ = simde__m128i_to_private(a),
  1908. b_ = simde__m128i_to_private(b);
  1909. #if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
  1910. r_.neon_u16 = vcgtq_s16(a_.neon_i16, b_.neon_i16);
  1911. #elif defined(SIMDE_WASM_SIMD128_NATIVE)
  1912. r_.wasm_v128 = wasm_i16x8_gt(a_.wasm_v128, b_.wasm_v128);
  1913. #elif defined(SIMDE_POWER_ALTIVEC_P6_NATIVE)
  1914. r_.altivec_i16 = HEDLEY_REINTERPRET_CAST(
  1915. SIMDE_POWER_ALTIVEC_VECTOR(signed short),
  1916. vec_cmpgt(a_.altivec_i16, b_.altivec_i16));
  1917. #elif defined(SIMDE_VECTOR_SUBSCRIPT_OPS)
  1918. r_.i16 = HEDLEY_STATIC_CAST(__typeof__(r_.i16), (a_.i16 > b_.i16));
  1919. #else
  1920. SIMDE_VECTORIZE
  1921. for (size_t i = 0; i < (sizeof(r_.i16) / sizeof(r_.i16[0])); i++) {
  1922. r_.i16[i] = (a_.i16[i] > b_.i16[i]) ? ~INT16_C(0) : INT16_C(0);
  1923. }
  1924. #endif
  1925. return simde__m128i_from_private(r_);
  1926. #endif
  1927. }
  1928. #if defined(SIMDE_X86_SSE2_ENABLE_NATIVE_ALIASES)
  1929. #define _mm_cmpgt_epi16(a, b) simde_mm_cmpgt_epi16(a, b)
  1930. #endif
  1931. SIMDE_FUNCTION_ATTRIBUTES
  1932. simde__m128i simde_mm_cmpgt_epi32(simde__m128i a, simde__m128i b)
  1933. {
  1934. #if defined(SIMDE_X86_SSE2_NATIVE)
  1935. return _mm_cmpgt_epi32(a, b);
  1936. #else
  1937. simde__m128i_private r_, a_ = simde__m128i_to_private(a),
  1938. b_ = simde__m128i_to_private(b);
  1939. #if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
  1940. r_.neon_u32 = vcgtq_s32(a_.neon_i32, b_.neon_i32);
  1941. #elif defined(SIMDE_WASM_SIMD128_NATIVE)
  1942. r_.wasm_v128 = wasm_i32x4_gt(a_.wasm_v128, b_.wasm_v128);
  1943. #elif defined(SIMDE_POWER_ALTIVEC_P6_NATIVE)
  1944. r_.altivec_i32 = HEDLEY_REINTERPRET_CAST(
  1945. SIMDE_POWER_ALTIVEC_VECTOR(signed int),
  1946. vec_cmpgt(a_.altivec_i32, b_.altivec_i32));
  1947. #elif defined(SIMDE_VECTOR_SUBSCRIPT_OPS)
  1948. r_.i32 = HEDLEY_STATIC_CAST(__typeof__(r_.i32), (a_.i32 > b_.i32));
  1949. #else
  1950. SIMDE_VECTORIZE
  1951. for (size_t i = 0; i < (sizeof(r_.i32) / sizeof(r_.i32[0])); i++) {
  1952. r_.i32[i] = (a_.i32[i] > b_.i32[i]) ? ~INT32_C(0) : INT32_C(0);
  1953. }
  1954. #endif
  1955. return simde__m128i_from_private(r_);
  1956. #endif
  1957. }
  1958. #if defined(SIMDE_X86_SSE2_ENABLE_NATIVE_ALIASES)
  1959. #define _mm_cmpgt_epi32(a, b) simde_mm_cmpgt_epi32(a, b)
  1960. #endif
  1961. SIMDE_FUNCTION_ATTRIBUTES
  1962. simde__m128d simde_mm_cmpgt_pd(simde__m128d a, simde__m128d b)
  1963. {
  1964. #if defined(SIMDE_X86_SSE2_NATIVE)
  1965. return _mm_cmpgt_pd(a, b);
  1966. #else
  1967. simde__m128d_private r_, a_ = simde__m128d_to_private(a),
  1968. b_ = simde__m128d_to_private(b);
  1969. #if defined(SIMDE_VECTOR_SUBSCRIPT_OPS)
  1970. r_.i64 = HEDLEY_STATIC_CAST(__typeof__(r_.i64), (a_.f64 > b_.f64));
  1971. #elif defined(SIMDE_ARM_NEON_A64V8_NATIVE)
  1972. r_.neon_u64 = vcgtq_f64(a_.neon_f64, b_.neon_f64);
  1973. #elif defined(SIMDE_WASM_SIMD128_NATIVE)
  1974. r_.wasm_v128 = wasm_f64x2_gt(a_.wasm_v128, b_.wasm_v128);
  1975. #elif defined(SIMDE_POWER_ALTIVEC_P6_NATIVE)
  1976. r_.altivec_f64 =
  1977. HEDLEY_STATIC_CAST(SIMDE_POWER_ALTIVEC_VECTOR(double),
  1978. vec_cmpgt(a_.altivec_f64, b_.altivec_f64));
  1979. #else
  1980. SIMDE_VECTORIZE
  1981. for (size_t i = 0; i < (sizeof(r_.f64) / sizeof(r_.f64[0])); i++) {
  1982. r_.u64[i] = (a_.f64[i] > b_.f64[i]) ? ~UINT64_C(0)
  1983. : UINT64_C(0);
  1984. }
  1985. #endif
  1986. return simde__m128d_from_private(r_);
  1987. #endif
  1988. }
  1989. #if defined(SIMDE_X86_SSE2_ENABLE_NATIVE_ALIASES)
  1990. #define _mm_cmpgt_pd(a, b) simde_mm_cmpgt_pd(a, b)
  1991. #endif
  1992. SIMDE_FUNCTION_ATTRIBUTES
  1993. simde__m128d simde_mm_cmpgt_sd(simde__m128d a, simde__m128d b)
  1994. {
  1995. #if defined(SIMDE_X86_SSE2_NATIVE) && !defined(__PGI)
  1996. return _mm_cmpgt_sd(a, b);
  1997. #elif (SIMDE_NATURAL_VECTOR_SIZE > 0)
  1998. return simde_mm_move_sd(a, simde_mm_cmpgt_pd(a, b));
  1999. #else
  2000. simde__m128d_private r_, a_ = simde__m128d_to_private(a),
  2001. b_ = simde__m128d_to_private(b);
  2002. r_.u64[0] = (a_.f64[0] > b_.f64[0]) ? ~UINT64_C(0) : UINT64_C(0);
  2003. r_.u64[1] = a_.u64[1];
  2004. return simde__m128d_from_private(r_);
  2005. #endif
  2006. }
  2007. #if defined(SIMDE_X86_SSE2_ENABLE_NATIVE_ALIASES)
  2008. #define _mm_cmpgt_sd(a, b) simde_mm_cmpgt_sd(a, b)
  2009. #endif
  2010. SIMDE_FUNCTION_ATTRIBUTES
  2011. simde__m128d simde_mm_cmpge_pd(simde__m128d a, simde__m128d b)
  2012. {
  2013. #if defined(SIMDE_X86_SSE2_NATIVE)
  2014. return _mm_cmpge_pd(a, b);
  2015. #else
  2016. simde__m128d_private r_, a_ = simde__m128d_to_private(a),
  2017. b_ = simde__m128d_to_private(b);
  2018. #if defined(SIMDE_VECTOR_SUBSCRIPT_OPS)
  2019. r_.i64 = HEDLEY_STATIC_CAST(__typeof__(r_.i64), (a_.f64 >= b_.f64));
  2020. #elif defined(SIMDE_ARM_NEON_A64V8_NATIVE)
  2021. r_.neon_u64 = vcgeq_f64(a_.neon_f64, b_.neon_f64);
  2022. #elif defined(SIMDE_WASM_SIMD128_NATIVE)
  2023. r_.wasm_v128 = wasm_f64x2_ge(a_.wasm_v128, b_.wasm_v128);
  2024. #elif defined(SIMDE_POWER_ALTIVEC_P6_NATIVE)
  2025. r_.altivec_f64 =
  2026. HEDLEY_STATIC_CAST(SIMDE_POWER_ALTIVEC_VECTOR(double),
  2027. vec_cmpge(a_.altivec_f64, b_.altivec_f64));
  2028. #else
  2029. SIMDE_VECTORIZE
  2030. for (size_t i = 0; i < (sizeof(r_.f64) / sizeof(r_.f64[0])); i++) {
  2031. r_.u64[i] = (a_.f64[i] >= b_.f64[i]) ? ~UINT64_C(0)
  2032. : UINT64_C(0);
  2033. }
  2034. #endif
  2035. return simde__m128d_from_private(r_);
  2036. #endif
  2037. }
  2038. #if defined(SIMDE_X86_SSE2_ENABLE_NATIVE_ALIASES)
  2039. #define _mm_cmpge_pd(a, b) simde_mm_cmpge_pd(a, b)
  2040. #endif
  2041. SIMDE_FUNCTION_ATTRIBUTES
  2042. simde__m128d simde_mm_cmpge_sd(simde__m128d a, simde__m128d b)
  2043. {
  2044. #if defined(SIMDE_X86_SSE2_NATIVE) && !defined(__PGI)
  2045. return _mm_cmpge_sd(a, b);
  2046. #elif (SIMDE_NATURAL_VECTOR_SIZE > 0)
  2047. return simde_mm_move_sd(a, simde_mm_cmpge_pd(a, b));
  2048. #else
  2049. simde__m128d_private r_, a_ = simde__m128d_to_private(a),
  2050. b_ = simde__m128d_to_private(b);
  2051. r_.u64[0] = (a_.f64[0] >= b_.f64[0]) ? ~UINT64_C(0) : UINT64_C(0);
  2052. r_.u64[1] = a_.u64[1];
  2053. return simde__m128d_from_private(r_);
  2054. #endif
  2055. }
  2056. #if defined(SIMDE_X86_SSE2_ENABLE_NATIVE_ALIASES)
  2057. #define _mm_cmpge_sd(a, b) simde_mm_cmpge_sd(a, b)
  2058. #endif
  2059. SIMDE_FUNCTION_ATTRIBUTES
  2060. simde__m128d simde_mm_cmpngt_pd(simde__m128d a, simde__m128d b)
  2061. {
  2062. #if defined(SIMDE_X86_SSE2_NATIVE)
  2063. return _mm_cmpngt_pd(a, b);
  2064. #else
  2065. return simde_mm_cmple_pd(a, b);
  2066. #endif
  2067. }
  2068. #if defined(SIMDE_X86_SSE2_ENABLE_NATIVE_ALIASES)
  2069. #define _mm_cmpngt_pd(a, b) simde_mm_cmpngt_pd(a, b)
  2070. #endif
  2071. SIMDE_FUNCTION_ATTRIBUTES
  2072. simde__m128d simde_mm_cmpngt_sd(simde__m128d a, simde__m128d b)
  2073. {
  2074. #if defined(SIMDE_X86_SSE2_NATIVE) && !defined(__PGI)
  2075. return _mm_cmpngt_sd(a, b);
  2076. #else
  2077. return simde_mm_cmple_sd(a, b);
  2078. #endif
  2079. }
  2080. #if defined(SIMDE_X86_SSE2_ENABLE_NATIVE_ALIASES)
  2081. #define _mm_cmpngt_sd(a, b) simde_mm_cmpngt_sd(a, b)
  2082. #endif
  2083. SIMDE_FUNCTION_ATTRIBUTES
  2084. simde__m128d simde_mm_cmpnge_pd(simde__m128d a, simde__m128d b)
  2085. {
  2086. #if defined(SIMDE_X86_SSE2_NATIVE)
  2087. return _mm_cmpnge_pd(a, b);
  2088. #else
  2089. return simde_mm_cmplt_pd(a, b);
  2090. #endif
  2091. }
  2092. #if defined(SIMDE_X86_SSE2_ENABLE_NATIVE_ALIASES)
  2093. #define _mm_cmpnge_pd(a, b) simde_mm_cmpnge_pd(a, b)
  2094. #endif
  2095. SIMDE_FUNCTION_ATTRIBUTES
  2096. simde__m128d simde_mm_cmpnge_sd(simde__m128d a, simde__m128d b)
  2097. {
  2098. #if defined(SIMDE_X86_SSE2_NATIVE) && !defined(__PGI)
  2099. return _mm_cmpnge_sd(a, b);
  2100. #else
  2101. return simde_mm_cmplt_sd(a, b);
  2102. #endif
  2103. }
  2104. #if defined(SIMDE_X86_SSE2_ENABLE_NATIVE_ALIASES)
  2105. #define _mm_cmpnge_sd(a, b) simde_mm_cmpnge_sd(a, b)
  2106. #endif
  2107. SIMDE_FUNCTION_ATTRIBUTES
  2108. simde__m128d simde_mm_cmpnlt_pd(simde__m128d a, simde__m128d b)
  2109. {
  2110. #if defined(SIMDE_X86_SSE2_NATIVE)
  2111. return _mm_cmpnlt_pd(a, b);
  2112. #else
  2113. return simde_mm_cmpge_pd(a, b);
  2114. #endif
  2115. }
  2116. #if defined(SIMDE_X86_SSE2_ENABLE_NATIVE_ALIASES)
  2117. #define _mm_cmpnlt_pd(a, b) simde_mm_cmpnlt_pd(a, b)
  2118. #endif
  2119. SIMDE_FUNCTION_ATTRIBUTES
  2120. simde__m128d simde_mm_cmpnlt_sd(simde__m128d a, simde__m128d b)
  2121. {
  2122. #if defined(SIMDE_X86_SSE2_NATIVE)
  2123. return _mm_cmpnlt_sd(a, b);
  2124. #else
  2125. return simde_mm_cmpge_sd(a, b);
  2126. #endif
  2127. }
  2128. #if defined(SIMDE_X86_SSE2_ENABLE_NATIVE_ALIASES)
  2129. #define _mm_cmpnlt_sd(a, b) simde_mm_cmpnlt_sd(a, b)
  2130. #endif
  2131. SIMDE_FUNCTION_ATTRIBUTES
  2132. simde__m128d simde_mm_cmpnle_pd(simde__m128d a, simde__m128d b)
  2133. {
  2134. #if defined(SIMDE_X86_SSE2_NATIVE)
  2135. return _mm_cmpnle_pd(a, b);
  2136. #else
  2137. return simde_mm_cmpgt_pd(a, b);
  2138. #endif
  2139. }
  2140. #if defined(SIMDE_X86_SSE2_ENABLE_NATIVE_ALIASES)
  2141. #define _mm_cmpnle_pd(a, b) simde_mm_cmpnle_pd(a, b)
  2142. #endif
  2143. SIMDE_FUNCTION_ATTRIBUTES
  2144. simde__m128d simde_mm_cmpnle_sd(simde__m128d a, simde__m128d b)
  2145. {
  2146. #if defined(SIMDE_X86_SSE2_NATIVE)
  2147. return _mm_cmpnle_sd(a, b);
  2148. #else
  2149. return simde_mm_cmpgt_sd(a, b);
  2150. #endif
  2151. }
  2152. #if defined(SIMDE_X86_SSE2_ENABLE_NATIVE_ALIASES)
  2153. #define _mm_cmpnle_sd(a, b) simde_mm_cmpnle_sd(a, b)
  2154. #endif
  2155. SIMDE_FUNCTION_ATTRIBUTES
  2156. simde__m128d simde_mm_cmpord_pd(simde__m128d a, simde__m128d b)
  2157. {
  2158. #if defined(SIMDE_X86_SSE2_NATIVE)
  2159. return _mm_cmpord_pd(a, b);
  2160. #else
  2161. simde__m128d_private r_, a_ = simde__m128d_to_private(a),
  2162. b_ = simde__m128d_to_private(b);
  2163. #if defined(SIMDE_ARM_NEON_A64V8_NATIVE)
  2164. /* Note: NEON does not have ordered compare builtin
  2165. Need to compare a eq a and b eq b to check for NaN
  2166. Do AND of results to get final */
  2167. uint64x2_t ceqaa = vceqq_f64(a_.neon_f64, a_.neon_f64);
  2168. uint64x2_t ceqbb = vceqq_f64(b_.neon_f64, b_.neon_f64);
  2169. r_.neon_u64 = vandq_u64(ceqaa, ceqbb);
  2170. #elif defined(simde_math_isnan)
  2171. SIMDE_VECTORIZE
  2172. for (size_t i = 0; i < (sizeof(r_.f64) / sizeof(r_.f64[0])); i++) {
  2173. r_.u64[i] = (!simde_math_isnan(a_.f64[i]) &&
  2174. !simde_math_isnan(b_.f64[i]))
  2175. ? ~UINT64_C(0)
  2176. : UINT64_C(0);
  2177. }
  2178. #else
  2179. HEDLEY_UNREACHABLE();
  2180. #endif
  2181. return simde__m128d_from_private(r_);
  2182. #endif
  2183. }
  2184. #if defined(SIMDE_X86_SSE2_ENABLE_NATIVE_ALIASES)
  2185. #define _mm_cmpord_pd(a, b) simde_mm_cmpord_pd(a, b)
  2186. #endif
  2187. SIMDE_FUNCTION_ATTRIBUTES
  2188. simde_float64 simde_mm_cvtsd_f64(simde__m128d a)
  2189. {
  2190. #if defined(SIMDE_X86_SSE2_NATIVE) && !defined(__PGI)
  2191. return _mm_cvtsd_f64(a);
  2192. #else
  2193. simde__m128d_private a_ = simde__m128d_to_private(a);
  2194. #if defined(SIMDE_ARM_NEON_A64V8_NATIVE)
  2195. return HEDLEY_STATIC_CAST(simde_float64,
  2196. vgetq_lane_f64(a_.neon_f64, 0));
  2197. #else
  2198. return a_.f64[0];
  2199. #endif
  2200. #endif
  2201. }
  2202. #if defined(SIMDE_X86_SSE2_ENABLE_NATIVE_ALIASES)
  2203. #define _mm_cvtsd_f64(a) simde_mm_cvtsd_f64(a)
  2204. #endif
  2205. SIMDE_FUNCTION_ATTRIBUTES
  2206. simde__m128d simde_mm_cmpord_sd(simde__m128d a, simde__m128d b)
  2207. {
  2208. #if defined(SIMDE_X86_SSE2_NATIVE)
  2209. return _mm_cmpord_sd(a, b);
  2210. #elif (SIMDE_NATURAL_VECTOR_SIZE > 0)
  2211. return simde_mm_move_sd(a, simde_mm_cmpord_pd(a, b));
  2212. #else
  2213. simde__m128d_private r_, a_ = simde__m128d_to_private(a),
  2214. b_ = simde__m128d_to_private(b);
  2215. #if defined(simde_math_isnan)
  2216. r_.u64[0] =
  2217. (!simde_math_isnan(a_.f64[0]) && !simde_math_isnan(b_.f64[0]))
  2218. ? ~UINT64_C(0)
  2219. : UINT64_C(0);
  2220. r_.u64[1] = a_.u64[1];
  2221. #else
  2222. HEDLEY_UNREACHABLE();
  2223. #endif
  2224. return simde__m128d_from_private(r_);
  2225. #endif
  2226. }
  2227. #if defined(SIMDE_X86_SSE2_ENABLE_NATIVE_ALIASES)
  2228. #define _mm_cmpord_sd(a, b) simde_mm_cmpord_sd(a, b)
  2229. #endif
  2230. SIMDE_FUNCTION_ATTRIBUTES
  2231. simde__m128d simde_mm_cmpunord_pd(simde__m128d a, simde__m128d b)
  2232. {
  2233. #if defined(SIMDE_X86_SSE2_NATIVE)
  2234. return _mm_cmpunord_pd(a, b);
  2235. #else
  2236. simde__m128d_private r_, a_ = simde__m128d_to_private(a),
  2237. b_ = simde__m128d_to_private(b);
  2238. #if defined(SIMDE_ARM_NEON_A64V8_NATIVE)
  2239. uint64x2_t ceqaa = vceqq_f64(a_.neon_f64, a_.neon_f64);
  2240. uint64x2_t ceqbb = vceqq_f64(b_.neon_f64, b_.neon_f64);
  2241. r_.neon_u64 = vreinterpretq_u64_u32(
  2242. vmvnq_u32(vreinterpretq_u32_u64(vandq_u64(ceqaa, ceqbb))));
  2243. #elif defined(simde_math_isnan)
  2244. SIMDE_VECTORIZE
  2245. for (size_t i = 0; i < (sizeof(r_.f64) / sizeof(r_.f64[0])); i++) {
  2246. r_.u64[i] = (simde_math_isnan(a_.f64[i]) ||
  2247. simde_math_isnan(b_.f64[i]))
  2248. ? ~UINT64_C(0)
  2249. : UINT64_C(0);
  2250. }
  2251. #else
  2252. HEDLEY_UNREACHABLE();
  2253. #endif
  2254. return simde__m128d_from_private(r_);
  2255. #endif
  2256. }
  2257. #if defined(SIMDE_X86_SSE2_ENABLE_NATIVE_ALIASES)
  2258. #define _mm_cmpunord_pd(a, b) simde_mm_cmpunord_pd(a, b)
  2259. #endif
  2260. SIMDE_FUNCTION_ATTRIBUTES
  2261. simde__m128d simde_mm_cmpunord_sd(simde__m128d a, simde__m128d b)
  2262. {
  2263. #if defined(SIMDE_X86_SSE2_NATIVE)
  2264. return _mm_cmpunord_sd(a, b);
  2265. #elif (SIMDE_NATURAL_VECTOR_SIZE > 0)
  2266. return simde_mm_move_sd(a, simde_mm_cmpunord_pd(a, b));
  2267. #else
  2268. simde__m128d_private r_, a_ = simde__m128d_to_private(a),
  2269. b_ = simde__m128d_to_private(b);
  2270. #if defined(simde_math_isnan)
  2271. r_.u64[0] = (simde_math_isnan(a_.f64[0]) || simde_math_isnan(b_.f64[0]))
  2272. ? ~UINT64_C(0)
  2273. : UINT64_C(0);
  2274. r_.u64[1] = a_.u64[1];
  2275. #else
  2276. HEDLEY_UNREACHABLE();
  2277. #endif
  2278. return simde__m128d_from_private(r_);
  2279. #endif
  2280. }
  2281. #if defined(SIMDE_X86_SSE2_ENABLE_NATIVE_ALIASES)
  2282. #define _mm_cmpunord_sd(a, b) simde_mm_cmpunord_sd(a, b)
  2283. #endif
  2284. SIMDE_FUNCTION_ATTRIBUTES
  2285. simde__m128d simde_mm_cvtepi32_pd(simde__m128i a)
  2286. {
  2287. #if defined(SIMDE_X86_SSE2_NATIVE)
  2288. return _mm_cvtepi32_pd(a);
  2289. #else
  2290. simde__m128d_private r_;
  2291. simde__m128i_private a_ = simde__m128i_to_private(a);
  2292. #if defined(SIMDE_CONVERT_VECTOR_)
  2293. SIMDE_CONVERT_VECTOR_(r_.f64, a_.m64_private[0].i32);
  2294. #else
  2295. SIMDE_VECTORIZE
  2296. for (size_t i = 0; i < (sizeof(r_.f64) / sizeof(r_.f64[0])); i++) {
  2297. r_.f64[i] = (simde_float64)a_.i32[i];
  2298. }
  2299. #endif
  2300. return simde__m128d_from_private(r_);
  2301. #endif
  2302. }
  2303. #if defined(SIMDE_X86_SSE2_ENABLE_NATIVE_ALIASES)
  2304. #define _mm_cvtepi32_pd(a) simde_mm_cvtepi32_pd(a)
  2305. #endif
  2306. SIMDE_FUNCTION_ATTRIBUTES
  2307. simde__m128 simde_mm_cvtepi32_ps(simde__m128i a)
  2308. {
  2309. #if defined(SIMDE_X86_SSE2_NATIVE)
  2310. return _mm_cvtepi32_ps(a);
  2311. #else
  2312. simde__m128_private r_;
  2313. simde__m128i_private a_ = simde__m128i_to_private(a);
  2314. #if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
  2315. r_.neon_f32 = vcvtq_f32_s32(a_.neon_i32);
  2316. #elif defined(SIMDE_WASM_SIMD128_NATIVE)
  2317. r_.wasm_v128 = wasm_f32x4_convert_i32x4(a_.wasm_v128);
  2318. #elif defined(SIMDE_POWER_ALTIVEC_P6_NATIVE)
  2319. HEDLEY_DIAGNOSTIC_PUSH
  2320. #if HEDLEY_HAS_WARNING("-Wc11-extensions")
  2321. #pragma clang diagnostic ignored "-Wc11-extensions"
  2322. #endif
  2323. r_.altivec_f32 = vec_ctf(a_.altivec_i32, 0);
  2324. HEDLEY_DIAGNOSTIC_POP
  2325. #elif defined(SIMDE_CONVERT_VECTOR_)
  2326. SIMDE_CONVERT_VECTOR_(r_.f32, a_.i32);
  2327. #else
  2328. SIMDE_VECTORIZE
  2329. for (size_t i = 0; i < (sizeof(r_.f32) / sizeof(r_.f32[0])); i++) {
  2330. r_.f32[i] = (simde_float32)a_.i32[i];
  2331. }
  2332. #endif
  2333. return simde__m128_from_private(r_);
  2334. #endif
  2335. }
  2336. #if defined(SIMDE_X86_SSE2_ENABLE_NATIVE_ALIASES)
  2337. #define _mm_cvtepi32_ps(a) simde_mm_cvtepi32_ps(a)
  2338. #endif
  2339. SIMDE_FUNCTION_ATTRIBUTES
  2340. simde__m64 simde_mm_cvtpd_pi32(simde__m128d a)
  2341. {
  2342. #if defined(SIMDE_X86_SSE2_NATIVE) && defined(SIMDE_X86_MMX_NATIVE)
  2343. return _mm_cvtpd_pi32(a);
  2344. #else
  2345. simde__m64_private r_;
  2346. simde__m128d_private a_ = simde__m128d_to_private(a);
  2347. SIMDE_VECTORIZE
  2348. for (size_t i = 0; i < (sizeof(r_.i32) / sizeof(r_.i32[0])); i++) {
  2349. simde_float64 v = simde_math_round(a_.f64[i]);
  2350. #if defined(SIMDE_FAST_CONVERSION_RANGE)
  2351. r_.i32[i] = SIMDE_CONVERT_FTOI(int32_t, v);
  2352. #else
  2353. r_.i32[i] =
  2354. ((v > HEDLEY_STATIC_CAST(simde_float64, INT32_MIN)) &&
  2355. (v < HEDLEY_STATIC_CAST(simde_float64, INT32_MAX)))
  2356. ? SIMDE_CONVERT_FTOI(int32_t, v)
  2357. : INT32_MIN;
  2358. #endif
  2359. }
  2360. return simde__m64_from_private(r_);
  2361. #endif
  2362. }
  2363. #if defined(SIMDE_X86_SSE2_ENABLE_NATIVE_ALIASES)
  2364. #define _mm_cvtpd_pi32(a) simde_mm_cvtpd_pi32(a)
  2365. #endif
  2366. SIMDE_FUNCTION_ATTRIBUTES
  2367. simde__m128i simde_mm_cvtpd_epi32(simde__m128d a)
  2368. {
  2369. #if defined(SIMDE_X86_SSE2_NATIVE)
  2370. return _mm_cvtpd_epi32(a);
  2371. #else
  2372. simde__m128i_private r_;
  2373. r_.m64[0] = simde_mm_cvtpd_pi32(a);
  2374. r_.m64[1] = simde_mm_setzero_si64();
  2375. return simde__m128i_from_private(r_);
  2376. #endif
  2377. }
  2378. #if defined(SIMDE_X86_SSE2_ENABLE_NATIVE_ALIASES)
  2379. #define _mm_cvtpd_epi32(a) simde_mm_cvtpd_epi32(a)
  2380. #endif
  2381. SIMDE_FUNCTION_ATTRIBUTES
  2382. simde__m128 simde_mm_cvtpd_ps(simde__m128d a)
  2383. {
  2384. #if defined(SIMDE_X86_SSE2_NATIVE)
  2385. return _mm_cvtpd_ps(a);
  2386. #else
  2387. simde__m128_private r_;
  2388. simde__m128d_private a_ = simde__m128d_to_private(a);
  2389. #if defined(SIMDE_CONVERT_VECTOR_)
  2390. SIMDE_CONVERT_VECTOR_(r_.m64_private[0].f32, a_.f64);
  2391. r_.m64_private[1] = simde__m64_to_private(simde_mm_setzero_si64());
  2392. #elif defined(SIMDE_ARM_NEON_A64V8_NATIVE)
  2393. r_.neon_f32 = vreinterpretq_f32_f64(
  2394. vcombine_f64(vreinterpret_f64_f32(vcvtx_f32_f64(a_.neon_f64)),
  2395. vdup_n_f64(0)));
  2396. #else
  2397. SIMDE_VECTORIZE
  2398. for (size_t i = 0; i < (sizeof(a_.f64) / sizeof(a_.f64[0])); i++) {
  2399. r_.f32[i] = (simde_float32)a_.f64[i];
  2400. }
  2401. simde_memset(&(r_.m64_private[1]), 0, sizeof(r_.m64_private[1]));
  2402. #endif
  2403. return simde__m128_from_private(r_);
  2404. #endif
  2405. }
  2406. #if defined(SIMDE_X86_SSE2_ENABLE_NATIVE_ALIASES)
  2407. #define _mm_cvtpd_ps(a) simde_mm_cvtpd_ps(a)
  2408. #endif
  2409. SIMDE_FUNCTION_ATTRIBUTES
  2410. simde__m128d simde_mm_cvtpi32_pd(simde__m64 a)
  2411. {
  2412. #if defined(SIMDE_X86_SSE2_NATIVE) && defined(SIMDE_X86_MMX_NATIVE)
  2413. return _mm_cvtpi32_pd(a);
  2414. #else
  2415. simde__m128d_private r_;
  2416. simde__m64_private a_ = simde__m64_to_private(a);
  2417. #if defined(SIMDE_CONVERT_VECTOR_)
  2418. SIMDE_CONVERT_VECTOR_(r_.f64, a_.i32);
  2419. #else
  2420. SIMDE_VECTORIZE
  2421. for (size_t i = 0; i < (sizeof(r_.f64) / sizeof(r_.f64[0])); i++) {
  2422. r_.f64[i] = (simde_float64)a_.i32[i];
  2423. }
  2424. #endif
  2425. return simde__m128d_from_private(r_);
  2426. #endif
  2427. }
  2428. #if defined(SIMDE_X86_SSE2_ENABLE_NATIVE_ALIASES)
  2429. #define _mm_cvtpi32_pd(a) simde_mm_cvtpi32_pd(a)
  2430. #endif
  2431. SIMDE_FUNCTION_ATTRIBUTES
  2432. simde__m128i simde_mm_cvtps_epi32(simde__m128 a)
  2433. {
  2434. #if defined(SIMDE_X86_SSE2_NATIVE)
  2435. return _mm_cvtps_epi32(a);
  2436. #else
  2437. simde__m128i_private r_;
  2438. simde__m128_private a_ = simde__m128_to_private(a);
  2439. #if defined(SIMDE_ARM_NEON_A64V8_NATIVE) && defined(SIMDE_FAST_CONVERSION_RANGE)
  2440. r_.neon_i32 = vcvtnq_s32_f32(a_.neon_f32);
  2441. #elif defined(SIMDE_ARM_NEON_A32V7_NATIVE) && \
  2442. defined(SIMDE_FAST_CONVERSION_RANGE) && defined(SIMDE_FAST_ROUND_TIES)
  2443. r_.neon_i32 = vcvtnq_s32_f32(a_.neon_f32);
  2444. #elif defined(SIMDE_POWER_ALTIVEC_P6_NATIVE) && \
  2445. defined(SIMDE_FAST_CONVERSION_RANGE) && defined(SIMDE_FAST_ROUND_TIES)
  2446. HEDLEY_DIAGNOSTIC_PUSH
  2447. SIMDE_DIAGNOSTIC_DISABLE_C11_EXTENSIONS_
  2448. SIMDE_DIAGNOSTIC_DISABLE_VECTOR_CONVERSION_
  2449. r_.altivec_i32 = vec_cts(a_.altivec_f32, 1);
  2450. HEDLEY_DIAGNOSTIC_POP
  2451. #else
  2452. a_ = simde__m128_to_private(
  2453. simde_x_mm_round_ps(a, SIMDE_MM_FROUND_TO_NEAREST_INT, 1));
  2454. SIMDE_VECTORIZE
  2455. for (size_t i = 0; i < (sizeof(r_.i32) / sizeof(r_.i32[0])); i++) {
  2456. simde_float32 v = simde_math_roundf(a_.f32[i]);
  2457. #if defined(SIMDE_FAST_CONVERSION_RANGE)
  2458. r_.i32[i] = SIMDE_CONVERT_FTOI(int32_t, v);
  2459. #else
  2460. r_.i32[i] =
  2461. ((v > HEDLEY_STATIC_CAST(simde_float32, INT32_MIN)) &&
  2462. (v < HEDLEY_STATIC_CAST(simde_float32, INT32_MAX)))
  2463. ? SIMDE_CONVERT_FTOI(int32_t, v)
  2464. : INT32_MIN;
  2465. #endif
  2466. }
  2467. #endif
  2468. return simde__m128i_from_private(r_);
  2469. #endif
  2470. }
  2471. #if defined(SIMDE_X86_SSE2_ENABLE_NATIVE_ALIASES)
  2472. #define _mm_cvtps_epi32(a) simde_mm_cvtps_epi32(a)
  2473. #endif
  2474. SIMDE_FUNCTION_ATTRIBUTES
  2475. simde__m128d simde_mm_cvtps_pd(simde__m128 a)
  2476. {
  2477. #if defined(SIMDE_X86_SSE2_NATIVE)
  2478. return _mm_cvtps_pd(a);
  2479. #else
  2480. simde__m128d_private r_;
  2481. simde__m128_private a_ = simde__m128_to_private(a);
  2482. #if defined(SIMDE_CONVERT_VECTOR_)
  2483. SIMDE_CONVERT_VECTOR_(r_.f64, a_.m64_private[0].f32);
  2484. #elif defined(SIMDE_ARM_NEON_A64V8_NATIVE)
  2485. r_.neon_f64 = vcvt_f64_f32(vget_low_f32(a_.neon_f32));
  2486. #else
  2487. SIMDE_VECTORIZE
  2488. for (size_t i = 0; i < (sizeof(r_.f64) / sizeof(r_.f64[0])); i++) {
  2489. r_.f64[i] = a_.f32[i];
  2490. }
  2491. #endif
  2492. return simde__m128d_from_private(r_);
  2493. #endif
  2494. }
  2495. #if defined(SIMDE_X86_SSE2_ENABLE_NATIVE_ALIASES)
  2496. #define _mm_cvtps_pd(a) simde_mm_cvtps_pd(a)
  2497. #endif
  2498. SIMDE_FUNCTION_ATTRIBUTES
  2499. int32_t simde_mm_cvtsd_si32(simde__m128d a)
  2500. {
  2501. #if defined(SIMDE_X86_SSE2_NATIVE)
  2502. return _mm_cvtsd_si32(a);
  2503. #else
  2504. simde__m128d_private a_ = simde__m128d_to_private(a);
  2505. simde_float64 v = simde_math_round(a_.f64[0]);
  2506. #if defined(SIMDE_FAST_CONVERSION_RANGE)
  2507. return SIMDE_CONVERT_FTOI(int32_t, v);
  2508. #else
  2509. return ((v > HEDLEY_STATIC_CAST(simde_float64, INT32_MIN)) &&
  2510. (v < HEDLEY_STATIC_CAST(simde_float64, INT32_MAX)))
  2511. ? SIMDE_CONVERT_FTOI(int32_t, v)
  2512. : INT32_MIN;
  2513. #endif
  2514. #endif
  2515. }
  2516. #if defined(SIMDE_X86_SSE2_ENABLE_NATIVE_ALIASES)
  2517. #define _mm_cvtsd_si32(a) simde_mm_cvtsd_si32(a)
  2518. #endif
  2519. SIMDE_FUNCTION_ATTRIBUTES
  2520. int64_t simde_mm_cvtsd_si64(simde__m128d a)
  2521. {
  2522. #if defined(SIMDE_X86_SSE2_NATIVE) && defined(SIMDE_ARCH_AMD64)
  2523. #if defined(__PGI)
  2524. return _mm_cvtsd_si64x(a);
  2525. #else
  2526. return _mm_cvtsd_si64(a);
  2527. #endif
  2528. #else
  2529. simde__m128d_private a_ = simde__m128d_to_private(a);
  2530. return SIMDE_CONVERT_FTOI(int64_t, simde_math_round(a_.f64[0]));
  2531. #endif
  2532. }
  2533. #define simde_mm_cvtsd_si64x(a) simde_mm_cvtsd_si64(a)
  2534. #if defined(SIMDE_X86_SSE2_ENABLE_NATIVE_ALIASES)
  2535. #define _mm_cvtsd_si64(a) simde_mm_cvtsd_si64(a)
  2536. #define _mm_cvtsd_si64x(a) simde_mm_cvtsd_si64x(a)
  2537. #endif
  2538. SIMDE_FUNCTION_ATTRIBUTES
  2539. simde__m128 simde_mm_cvtsd_ss(simde__m128 a, simde__m128d b)
  2540. {
  2541. #if defined(SIMDE_X86_SSE2_NATIVE)
  2542. return _mm_cvtsd_ss(a, b);
  2543. #else
  2544. simde__m128_private r_, a_ = simde__m128_to_private(a);
  2545. simde__m128d_private b_ = simde__m128d_to_private(b);
  2546. #if defined(SIMDE_ARM_NEON_A64V8_NATIVE)
  2547. r_.neon_f32 = vsetq_lane_f32(
  2548. vcvtxd_f32_f64(vgetq_lane_f64(b_.neon_f64, 0)), a_.neon_f32, 0);
  2549. #else
  2550. r_.f32[0] = HEDLEY_STATIC_CAST(simde_float32, b_.f64[0]);
  2551. SIMDE_VECTORIZE
  2552. for (size_t i = 1; i < (sizeof(r_) / sizeof(r_.i32[0])); i++) {
  2553. r_.i32[i] = a_.i32[i];
  2554. }
  2555. #endif
  2556. return simde__m128_from_private(r_);
  2557. #endif
  2558. }
  2559. #if defined(SIMDE_X86_SSE2_ENABLE_NATIVE_ALIASES)
  2560. #define _mm_cvtsd_ss(a, b) simde_mm_cvtsd_ss(a, b)
  2561. #endif
  2562. SIMDE_FUNCTION_ATTRIBUTES
  2563. int16_t simde_x_mm_cvtsi128_si16(simde__m128i a)
  2564. {
  2565. simde__m128i_private a_ = simde__m128i_to_private(a);
  2566. #if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
  2567. return vgetq_lane_s16(a_.neon_i16, 0);
  2568. #elif defined(SIMDE_WASM_SIMD128_NATIVE)
  2569. return HEDLEY_STATIC_CAST(int16_t,
  2570. wasm_i16x8_extract_lane(a_.wasm_v128, 0));
  2571. #elif defined(SIMDE_POWER_ALTIVEC_P6_NATIVE)
  2572. #if defined(SIMDE_BUG_GCC_95227)
  2573. (void)a_;
  2574. #endif
  2575. return vec_extract(a_.altivec_i16, 0);
  2576. #else
  2577. return a_.i16[0];
  2578. #endif
  2579. }
  2580. SIMDE_FUNCTION_ATTRIBUTES
  2581. int32_t simde_mm_cvtsi128_si32(simde__m128i a)
  2582. {
  2583. #if defined(SIMDE_X86_SSE2_NATIVE)
  2584. return _mm_cvtsi128_si32(a);
  2585. #else
  2586. simde__m128i_private a_ = simde__m128i_to_private(a);
  2587. #if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
  2588. return vgetq_lane_s32(a_.neon_i32, 0);
  2589. #elif defined(SIMDE_WASM_SIMD128_NATIVE)
  2590. return HEDLEY_STATIC_CAST(int32_t,
  2591. wasm_i32x4_extract_lane(a_.wasm_v128, 0));
  2592. #elif defined(SIMDE_POWER_ALTIVEC_P6_NATIVE)
  2593. #if defined(SIMDE_BUG_GCC_95227)
  2594. (void)a_;
  2595. #endif
  2596. return vec_extract(a_.altivec_i32, 0);
  2597. #else
  2598. return a_.i32[0];
  2599. #endif
  2600. #endif
  2601. }
  2602. #if defined(SIMDE_X86_SSE2_ENABLE_NATIVE_ALIASES)
  2603. #define _mm_cvtsi128_si32(a) simde_mm_cvtsi128_si32(a)
  2604. #endif
  2605. SIMDE_FUNCTION_ATTRIBUTES
  2606. int64_t simde_mm_cvtsi128_si64(simde__m128i a)
  2607. {
  2608. #if defined(SIMDE_X86_SSE2_NATIVE) && defined(SIMDE_ARCH_AMD64)
  2609. #if defined(__PGI)
  2610. return _mm_cvtsi128_si64x(a);
  2611. #else
  2612. return _mm_cvtsi128_si64(a);
  2613. #endif
  2614. #else
  2615. simde__m128i_private a_ = simde__m128i_to_private(a);
  2616. #if defined(SIMDE_POWER_ALTIVEC_P7_NATIVE) && !defined(HEDLEY_IBM_VERSION)
  2617. return vec_extract(HEDLEY_REINTERPRET_CAST(
  2618. SIMDE_POWER_ALTIVEC_VECTOR(signed long long),
  2619. a_.i64),
  2620. 0);
  2621. #elif defined(SIMDE_ARM_NEON_A32V7_NATIVE)
  2622. return vgetq_lane_s64(a_.neon_i64, 0);
  2623. #elif defined(SIMDE_WASM_SIMD128_NATIVE)
  2624. return HEDLEY_STATIC_CAST(int64_t,
  2625. wasm_i64x2_extract_lane(a_.wasm_v128, 0));
  2626. #endif
  2627. return a_.i64[0];
  2628. #endif
  2629. }
  2630. #define simde_mm_cvtsi128_si64x(a) simde_mm_cvtsi128_si64(a)
  2631. #if defined(SIMDE_X86_SSE2_ENABLE_NATIVE_ALIASES)
  2632. #define _mm_cvtsi128_si64(a) simde_mm_cvtsi128_si64(a)
  2633. #define _mm_cvtsi128_si64x(a) simde_mm_cvtsi128_si64x(a)
  2634. #endif
  2635. SIMDE_FUNCTION_ATTRIBUTES
  2636. simde__m128d simde_mm_cvtsi32_sd(simde__m128d a, int32_t b)
  2637. {
  2638. #if defined(SIMDE_X86_SSE2_NATIVE)
  2639. return _mm_cvtsi32_sd(a, b);
  2640. #else
  2641. simde__m128d_private r_;
  2642. simde__m128d_private a_ = simde__m128d_to_private(a);
  2643. #if defined(SIMDE_ARM_NEON_A32V7_NATIVE) && defined(SIMDE_ARCH_AMD64)
  2644. r_.neon_f64 = vsetq_lane_f64(HEDLEY_STATIC_CAST(float64_t, b),
  2645. a_.neon_f64, 0);
  2646. #else
  2647. r_.f64[0] = HEDLEY_STATIC_CAST(simde_float64, b);
  2648. r_.i64[1] = a_.i64[1];
  2649. #endif
  2650. return simde__m128d_from_private(r_);
  2651. #endif
  2652. }
  2653. #if defined(SIMDE_X86_SSE2_ENABLE_NATIVE_ALIASES)
  2654. #define _mm_cvtsi32_sd(a, b) simde_mm_cvtsi32_sd(a, b)
  2655. #endif
  2656. SIMDE_FUNCTION_ATTRIBUTES
  2657. simde__m128i simde_x_mm_cvtsi16_si128(int16_t a)
  2658. {
  2659. simde__m128i_private r_;
  2660. #if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
  2661. r_.neon_i16 = vsetq_lane_s16(a, vdupq_n_s16(0), 0);
  2662. #elif defined(SIMDE_WASM_SIMD128_NATIVE)
  2663. r_.wasm_v128 = wasm_i16x8_make(a, 0, 0, 0, 0, 0, 0, 0);
  2664. #else
  2665. r_.i16[0] = a;
  2666. r_.i16[1] = 0;
  2667. r_.i16[2] = 0;
  2668. r_.i16[3] = 0;
  2669. r_.i16[4] = 0;
  2670. r_.i16[5] = 0;
  2671. r_.i16[6] = 0;
  2672. r_.i16[7] = 0;
  2673. #endif
  2674. return simde__m128i_from_private(r_);
  2675. }
  2676. SIMDE_FUNCTION_ATTRIBUTES
  2677. simde__m128i simde_mm_cvtsi32_si128(int32_t a)
  2678. {
  2679. #if defined(SIMDE_X86_SSE2_NATIVE)
  2680. return _mm_cvtsi32_si128(a);
  2681. #else
  2682. simde__m128i_private r_;
  2683. #if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
  2684. r_.neon_i32 = vsetq_lane_s32(a, vdupq_n_s32(0), 0);
  2685. #elif defined(SIMDE_WASM_SIMD128_NATIVE)
  2686. r_.wasm_v128 = wasm_i32x4_make(a, 0, 0, 0);
  2687. #else
  2688. r_.i32[0] = a;
  2689. r_.i32[1] = 0;
  2690. r_.i32[2] = 0;
  2691. r_.i32[3] = 0;
  2692. #endif
  2693. return simde__m128i_from_private(r_);
  2694. #endif
  2695. }
  2696. #if defined(SIMDE_X86_SSE2_ENABLE_NATIVE_ALIASES)
  2697. #define _mm_cvtsi32_si128(a) simde_mm_cvtsi32_si128(a)
  2698. #endif
  2699. SIMDE_FUNCTION_ATTRIBUTES
  2700. simde__m128d simde_mm_cvtsi64_sd(simde__m128d a, int64_t b)
  2701. {
  2702. #if defined(SIMDE_X86_SSE2_NATIVE) && defined(SIMDE_ARCH_AMD64)
  2703. #if !defined(__PGI)
  2704. return _mm_cvtsi64_sd(a, b);
  2705. #else
  2706. return _mm_cvtsi64x_sd(a, b);
  2707. #endif
  2708. #else
  2709. simde__m128d_private r_, a_ = simde__m128d_to_private(a);
  2710. #if defined(SIMDE_ARM_NEON_A64V8_NATIVE)
  2711. r_.neon_f64 = vsetq_lane_f64(HEDLEY_STATIC_CAST(float64_t, b),
  2712. a_.neon_f64, 0);
  2713. #else
  2714. r_.f64[0] = HEDLEY_STATIC_CAST(simde_float64, b);
  2715. r_.f64[1] = a_.f64[1];
  2716. #endif
  2717. return simde__m128d_from_private(r_);
  2718. #endif
  2719. }
  2720. #define simde_mm_cvtsi64x_sd(a, b) simde_mm_cvtsi64_sd(a, b)
  2721. #if defined(SIMDE_X86_SSE2_ENABLE_NATIVE_ALIASES)
  2722. #define _mm_cvtsi64_sd(a, b) simde_mm_cvtsi64_sd(a, b)
  2723. #define _mm_cvtsi64x_sd(a, b) simde_mm_cvtsi64x_sd(a, b)
  2724. #endif
  2725. SIMDE_FUNCTION_ATTRIBUTES
  2726. simde__m128i simde_mm_cvtsi64_si128(int64_t a)
  2727. {
  2728. #if defined(SIMDE_X86_SSE2_NATIVE) && defined(SIMDE_ARCH_AMD64)
  2729. #if !defined(__PGI)
  2730. return _mm_cvtsi64_si128(a);
  2731. #else
  2732. return _mm_cvtsi64x_si128(a);
  2733. #endif
  2734. #else
  2735. simde__m128i_private r_;
  2736. #if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
  2737. r_.neon_i64 = vsetq_lane_s64(a, vdupq_n_s64(0), 0);
  2738. #elif defined(SIMDE_WASM_SIMD128_NATIVE)
  2739. r_.wasm_v128 = wasm_i64x2_make(a, 0);
  2740. #else
  2741. r_.i64[0] = a;
  2742. r_.i64[1] = 0;
  2743. #endif
  2744. return simde__m128i_from_private(r_);
  2745. #endif
  2746. }
  2747. #define simde_mm_cvtsi64x_si128(a) simde_mm_cvtsi64_si128(a)
  2748. #if defined(SIMDE_X86_SSE2_ENABLE_NATIVE_ALIASES)
  2749. #define _mm_cvtsi64_si128(a) simde_mm_cvtsi64_si128(a)
  2750. #define _mm_cvtsi64x_si128(a) simde_mm_cvtsi64x_si128(a)
  2751. #endif
  2752. SIMDE_FUNCTION_ATTRIBUTES
  2753. simde__m128d simde_mm_cvtss_sd(simde__m128d a, simde__m128 b)
  2754. {
  2755. #if defined(SIMDE_X86_SSE2_NATIVE)
  2756. return _mm_cvtss_sd(a, b);
  2757. #elif defined(SIMDE_ARM_NEON_A64V8_NATIVE)
  2758. float64x2_t temp = vcvt_f64_f32(vset_lane_f32(
  2759. vgetq_lane_f32(simde__m128_to_private(b).neon_f32, 0),
  2760. vdup_n_f32(0), 0));
  2761. return vsetq_lane_f64(
  2762. vgetq_lane_f64(simde__m128d_to_private(a).neon_f64, 1), temp,
  2763. 1);
  2764. #else
  2765. simde__m128d_private a_ = simde__m128d_to_private(a);
  2766. simde__m128_private b_ = simde__m128_to_private(b);
  2767. a_.f64[0] = HEDLEY_STATIC_CAST(simde_float64, b_.f32[0]);
  2768. return simde__m128d_from_private(a_);
  2769. #endif
  2770. }
  2771. #if defined(SIMDE_X86_SSE2_ENABLE_NATIVE_ALIASES)
  2772. #define _mm_cvtss_sd(a, b) simde_mm_cvtss_sd(a, b)
  2773. #endif
  2774. SIMDE_FUNCTION_ATTRIBUTES
  2775. simde__m64 simde_mm_cvttpd_pi32(simde__m128d a)
  2776. {
  2777. #if defined(SIMDE_X86_SSE2_NATIVE) && defined(SIMDE_X86_MMX_NATIVE)
  2778. return _mm_cvttpd_pi32(a);
  2779. #else
  2780. simde__m64_private r_;
  2781. simde__m128d_private a_ = simde__m128d_to_private(a);
  2782. #if defined(SIMDE_CONVERT_VECTOR_) && defined(SIMDE_FAST_CONVERSION_RANGE)
  2783. SIMDE_CONVERT_VECTOR_(r_.i32, a_.f64);
  2784. #else
  2785. for (size_t i = 0; i < (sizeof(r_.i32) / sizeof(r_.i32[0])); i++) {
  2786. simde_float64 v = a_.f64[i];
  2787. #if defined(SIMDE_FAST_CONVERSION_RANGE)
  2788. r_.i32[i] = SIMDE_CONVERT_FTOI(int32_t, v);
  2789. #else
  2790. r_.i32[i] =
  2791. ((v > HEDLEY_STATIC_CAST(simde_float64, INT32_MIN)) &&
  2792. (v < HEDLEY_STATIC_CAST(simde_float64, INT32_MAX)))
  2793. ? SIMDE_CONVERT_FTOI(int32_t, v)
  2794. : INT32_MIN;
  2795. #endif
  2796. }
  2797. #endif
  2798. return simde__m64_from_private(r_);
  2799. #endif
  2800. }
  2801. #if defined(SIMDE_X86_SSE2_ENABLE_NATIVE_ALIASES)
  2802. #define _mm_cvttpd_pi32(a) simde_mm_cvttpd_pi32(a)
  2803. #endif
  2804. SIMDE_FUNCTION_ATTRIBUTES
  2805. simde__m128i simde_mm_cvttpd_epi32(simde__m128d a)
  2806. {
  2807. #if defined(SIMDE_X86_SSE2_NATIVE)
  2808. return _mm_cvttpd_epi32(a);
  2809. #else
  2810. simde__m128i_private r_;
  2811. r_.m64[0] = simde_mm_cvttpd_pi32(a);
  2812. r_.m64[1] = simde_mm_setzero_si64();
  2813. return simde__m128i_from_private(r_);
  2814. #endif
  2815. }
  2816. #if defined(SIMDE_X86_SSE2_ENABLE_NATIVE_ALIASES)
  2817. #define _mm_cvttpd_epi32(a) simde_mm_cvttpd_epi32(a)
  2818. #endif
  2819. SIMDE_FUNCTION_ATTRIBUTES
  2820. simde__m128i simde_mm_cvttps_epi32(simde__m128 a)
  2821. {
  2822. #if defined(SIMDE_X86_SSE2_NATIVE)
  2823. return _mm_cvttps_epi32(a);
  2824. #else
  2825. simde__m128i_private r_;
  2826. simde__m128_private a_ = simde__m128_to_private(a);
  2827. #if defined(SIMDE_ARM_NEON_A32V7_NATIVE) && defined(SIMDE_FAST_CONVERSION_RANGE)
  2828. r_.neon_i32 = vcvtq_s32_f32(a_.neon_f32);
  2829. #elif defined(SIMDE_CONVERT_VECTOR_) && defined(SIMDE_FAST_CONVERSION_RANGE)
  2830. SIMDE_CONVERT_VECTOR_(r_.i32, a_.f32);
  2831. #else
  2832. for (size_t i = 0; i < (sizeof(r_.i32) / sizeof(r_.i32[0])); i++) {
  2833. simde_float32 v = a_.f32[i];
  2834. #if defined(SIMDE_FAST_CONVERSION_RANGE)
  2835. r_.i32[i] = SIMDE_CONVERT_FTOI(int32_t, v);
  2836. #else
  2837. r_.i32[i] =
  2838. ((v > HEDLEY_STATIC_CAST(simde_float32, INT32_MIN)) &&
  2839. (v < HEDLEY_STATIC_CAST(simde_float32, INT32_MAX)))
  2840. ? SIMDE_CONVERT_FTOI(int32_t, v)
  2841. : INT32_MIN;
  2842. #endif
  2843. }
  2844. #endif
  2845. return simde__m128i_from_private(r_);
  2846. #endif
  2847. }
  2848. #if defined(SIMDE_X86_SSE2_ENABLE_NATIVE_ALIASES)
  2849. #define _mm_cvttps_epi32(a) simde_mm_cvttps_epi32(a)
  2850. #endif
  2851. SIMDE_FUNCTION_ATTRIBUTES
  2852. int32_t simde_mm_cvttsd_si32(simde__m128d a)
  2853. {
  2854. #if defined(SIMDE_X86_SSE2_NATIVE)
  2855. return _mm_cvttsd_si32(a);
  2856. #else
  2857. simde__m128d_private a_ = simde__m128d_to_private(a);
  2858. simde_float64 v = a_.f64[0];
  2859. #if defined(SIMDE_FAST_CONVERSION_RANGE)
  2860. return SIMDE_CONVERT_FTOI(int32_t, v);
  2861. #else
  2862. return ((v > HEDLEY_STATIC_CAST(simde_float64, INT32_MIN)) &&
  2863. (v < HEDLEY_STATIC_CAST(simde_float64, INT32_MAX)))
  2864. ? SIMDE_CONVERT_FTOI(int32_t, v)
  2865. : INT32_MIN;
  2866. #endif
  2867. #endif
  2868. }
  2869. #if defined(SIMDE_X86_SSE2_ENABLE_NATIVE_ALIASES)
  2870. #define _mm_cvttsd_si32(a) simde_mm_cvttsd_si32(a)
  2871. #endif
  2872. SIMDE_FUNCTION_ATTRIBUTES
  2873. int64_t simde_mm_cvttsd_si64(simde__m128d a)
  2874. {
  2875. #if defined(SIMDE_X86_SSE2_NATIVE) && defined(SIMDE_ARCH_AMD64)
  2876. #if !defined(__PGI)
  2877. return _mm_cvttsd_si64(a);
  2878. #else
  2879. return _mm_cvttsd_si64x(a);
  2880. #endif
  2881. #else
  2882. simde__m128d_private a_ = simde__m128d_to_private(a);
  2883. return SIMDE_CONVERT_FTOI(int64_t, a_.f64[0]);
  2884. #endif
  2885. }
  2886. #define simde_mm_cvttsd_si64x(a) simde_mm_cvttsd_si64(a)
  2887. #if defined(SIMDE_X86_SSE2_ENABLE_NATIVE_ALIASES)
  2888. #define _mm_cvttsd_si64(a) simde_mm_cvttsd_si64(a)
  2889. #define _mm_cvttsd_si64x(a) simde_mm_cvttsd_si64x(a)
  2890. #endif
  2891. SIMDE_FUNCTION_ATTRIBUTES
  2892. simde__m128d simde_mm_div_pd(simde__m128d a, simde__m128d b)
  2893. {
  2894. #if defined(SIMDE_X86_SSE2_NATIVE)
  2895. return _mm_div_pd(a, b);
  2896. #else
  2897. simde__m128d_private r_, a_ = simde__m128d_to_private(a),
  2898. b_ = simde__m128d_to_private(b);
  2899. #if defined(SIMDE_VECTOR_SUBSCRIPT_OPS)
  2900. r_.f64 = a_.f64 / b_.f64;
  2901. #elif defined(SIMDE_ARM_NEON_A64V8_NATIVE)
  2902. r_.neon_f64 = vdivq_f64(a_.neon_f64, b_.neon_f64);
  2903. #elif defined(SIMDE_WASM_SIMD128_NATIVE)
  2904. r_.wasm_v128 = wasm_f64x2_div(a_.wasm_v128, b_.wasm_v128);
  2905. #else
  2906. SIMDE_VECTORIZE
  2907. for (size_t i = 0; i < (sizeof(r_.f64) / sizeof(r_.f64[0])); i++) {
  2908. r_.f64[i] = a_.f64[i] / b_.f64[i];
  2909. }
  2910. #endif
  2911. return simde__m128d_from_private(r_);
  2912. #endif
  2913. }
  2914. #if defined(SIMDE_X86_SSE2_ENABLE_NATIVE_ALIASES)
  2915. #define _mm_div_pd(a, b) simde_mm_div_pd(a, b)
  2916. #endif
  2917. SIMDE_FUNCTION_ATTRIBUTES
  2918. simde__m128d simde_mm_div_sd(simde__m128d a, simde__m128d b)
  2919. {
  2920. #if defined(SIMDE_X86_SSE2_NATIVE)
  2921. return _mm_div_sd(a, b);
  2922. #elif (SIMDE_NATURAL_VECTOR_SIZE > 0)
  2923. return simde_mm_move_sd(a, simde_mm_div_pd(a, b));
  2924. #else
  2925. simde__m128d_private r_, a_ = simde__m128d_to_private(a),
  2926. b_ = simde__m128d_to_private(b);
  2927. #if defined(SIMDE_ARM_NEON_A64V8_NATIVE)
  2928. float64x2_t temp = vdivq_f64(a_.neon_f64, b_.neon_f64);
  2929. r_.neon_f64 = vsetq_lane_f64(vgetq_lane(a_.neon_f64, 1), temp, 1);
  2930. #else
  2931. r_.f64[0] = a_.f64[0] / b_.f64[0];
  2932. r_.f64[1] = a_.f64[1];
  2933. #endif
  2934. return simde__m128d_from_private(r_);
  2935. #endif
  2936. }
  2937. #if defined(SIMDE_X86_SSE2_ENABLE_NATIVE_ALIASES)
  2938. #define _mm_div_sd(a, b) simde_mm_div_sd(a, b)
  2939. #endif
  2940. SIMDE_FUNCTION_ATTRIBUTES
  2941. int32_t simde_mm_extract_epi16(simde__m128i a, const int imm8)
  2942. SIMDE_REQUIRE_CONSTANT_RANGE(imm8, 0, 7)
  2943. {
  2944. uint16_t r;
  2945. simde__m128i_private a_ = simde__m128i_to_private(a);
  2946. #if defined(SIMDE_POWER_ALTIVEC_P6_NATIVE)
  2947. #if defined(SIMDE_BUG_GCC_95227)
  2948. (void)a_;
  2949. (void)imm8;
  2950. #endif
  2951. r = HEDLEY_STATIC_CAST(uint16_t, vec_extract(a_.altivec_i16, imm8));
  2952. #else
  2953. r = a_.u16[imm8 & 7];
  2954. #endif
  2955. return HEDLEY_STATIC_CAST(int32_t, r);
  2956. }
  2957. #if defined(SIMDE_X86_SSE2_NATIVE) && \
  2958. (!defined(HEDLEY_GCC_VERSION) || HEDLEY_GCC_VERSION_CHECK(4, 6, 0))
  2959. #define simde_mm_extract_epi16(a, imm8) _mm_extract_epi16(a, imm8)
  2960. #elif defined(SIMDE_ARM_NEON_A32V7_NATIVE)
  2961. #define simde_mm_extract_epi16(a, imm8) \
  2962. (HEDLEY_STATIC_CAST( \
  2963. int32_t, vgetq_lane_s16(simde__m128i_to_private(a).neon_i16, \
  2964. (imm8))) & \
  2965. (INT32_C(0x0000ffff)))
  2966. #endif
  2967. #if defined(SIMDE_X86_SSE2_ENABLE_NATIVE_ALIASES)
  2968. #define _mm_extract_epi16(a, imm8) simde_mm_extract_epi16(a, imm8)
  2969. #endif
  2970. SIMDE_FUNCTION_ATTRIBUTES
  2971. simde__m128i simde_mm_insert_epi16(simde__m128i a, int16_t i, const int imm8)
  2972. SIMDE_REQUIRE_CONSTANT_RANGE(imm8, 0, 7)
  2973. {
  2974. simde__m128i_private a_ = simde__m128i_to_private(a);
  2975. a_.i16[imm8 & 7] = i;
  2976. return simde__m128i_from_private(a_);
  2977. }
  2978. #if defined(SIMDE_X86_SSE2_NATIVE) && !defined(__PGI)
  2979. #define simde_mm_insert_epi16(a, i, imm8) _mm_insert_epi16((a), (i), (imm8))
  2980. #elif defined(SIMDE_ARM_NEON_A32V7_NATIVE)
  2981. #define simde_mm_insert_epi16(a, i, imm8) \
  2982. simde__m128i_from_neon_i16( \
  2983. vsetq_lane_s16((i), simde__m128i_to_neon_i16(a), (imm8)))
  2984. #endif
  2985. #if defined(SIMDE_X86_SSE2_ENABLE_NATIVE_ALIASES)
  2986. #define _mm_insert_epi16(a, i, imm8) simde_mm_insert_epi16(a, i, imm8)
  2987. #endif
  2988. SIMDE_FUNCTION_ATTRIBUTES
  2989. simde__m128d
  2990. simde_mm_load_pd(simde_float64 const mem_addr[HEDLEY_ARRAY_PARAM(2)])
  2991. {
  2992. #if defined(SIMDE_X86_SSE2_NATIVE)
  2993. return _mm_load_pd(mem_addr);
  2994. #else
  2995. simde__m128d_private r_;
  2996. #if defined(SIMDE_ARM_NEON_A64V8_NATIVE)
  2997. r_.neon_f64 = vld1q_f64(mem_addr);
  2998. #elif defined(SIMDE_ARM_NEON_A32V7_NATIVE)
  2999. r_.neon_u32 =
  3000. vld1q_u32(HEDLEY_REINTERPRET_CAST(uint32_t const *, mem_addr));
  3001. #else
  3002. simde_memcpy(&r_, SIMDE_ALIGN_ASSUME_LIKE(mem_addr, simde__m128d),
  3003. sizeof(r_));
  3004. #endif
  3005. return simde__m128d_from_private(r_);
  3006. #endif
  3007. }
  3008. #if defined(SIMDE_X86_SSE2_ENABLE_NATIVE_ALIASES)
  3009. #define _mm_load_pd(mem_addr) simde_mm_load_pd(mem_addr)
  3010. #endif
  3011. SIMDE_FUNCTION_ATTRIBUTES
  3012. simde__m128d simde_mm_load1_pd(simde_float64 const *mem_addr)
  3013. {
  3014. #if defined(SIMDE_X86_SSE2_NATIVE)
  3015. return _mm_load1_pd(mem_addr);
  3016. #elif defined(SIMDE_ARM_NEON_A64V8_NATIVE)
  3017. return simde__m128d_from_neon_f64(vld1q_dup_f64(mem_addr));
  3018. #elif defined(SIMDE_WASM_SIMD128_NATIVE)
  3019. return simde__m128d_from_wasm_v128(wasm_v64x2_load_splat(mem_addr));
  3020. #else
  3021. return simde_mm_set1_pd(*mem_addr);
  3022. #endif
  3023. }
  3024. #define simde_mm_load_pd1(mem_addr) simde_mm_load1_pd(mem_addr)
  3025. #if defined(SIMDE_X86_SSE2_ENABLE_NATIVE_ALIASES)
  3026. #define _mm_load_pd1(mem_addr) simde_mm_load1_pd(mem_addr)
  3027. #define _mm_load1_pd(mem_addr) simde_mm_load1_pd(mem_addr)
  3028. #endif
  3029. SIMDE_FUNCTION_ATTRIBUTES
  3030. simde__m128d simde_mm_load_sd(simde_float64 const *mem_addr)
  3031. {
  3032. #if defined(SIMDE_X86_SSE2_NATIVE)
  3033. return _mm_load_sd(mem_addr);
  3034. #else
  3035. simde__m128d_private r_;
  3036. #if defined(SIMDE_ARM_NEON_A64V8_NATIVE)
  3037. r_.neon_f64 = vsetq_lane_f64(*mem_addr, vdupq_n_f64(0), 0);
  3038. #else
  3039. r_.f64[0] = *mem_addr;
  3040. r_.u64[1] = UINT64_C(0);
  3041. #endif
  3042. return simde__m128d_from_private(r_);
  3043. #endif
  3044. }
  3045. #if defined(SIMDE_X86_SSE2_ENABLE_NATIVE_ALIASES)
  3046. #define _mm_load_sd(mem_addr) simde_mm_load_sd(mem_addr)
  3047. #endif
  3048. SIMDE_FUNCTION_ATTRIBUTES
  3049. simde__m128i simde_mm_load_si128(simde__m128i const *mem_addr)
  3050. {
  3051. #if defined(SIMDE_X86_SSE2_NATIVE)
  3052. return _mm_load_si128(
  3053. HEDLEY_REINTERPRET_CAST(__m128i const *, mem_addr));
  3054. #else
  3055. simde__m128i_private r_;
  3056. #if defined(SIMDE_POWER_ALTIVEC_P6_NATIVE)
  3057. r_.altivec_i32 = vec_ld(
  3058. 0, HEDLEY_REINTERPRET_CAST(
  3059. SIMDE_POWER_ALTIVEC_VECTOR(int) const *, mem_addr));
  3060. #elif defined(SIMDE_ARM_NEON_A32V7_NATIVE)
  3061. r_.neon_i32 =
  3062. vld1q_s32(HEDLEY_REINTERPRET_CAST(int32_t const *, mem_addr));
  3063. #else
  3064. simde_memcpy(&r_, SIMDE_ALIGN_ASSUME_LIKE(mem_addr, simde__m128i),
  3065. sizeof(simde__m128i));
  3066. #endif
  3067. return simde__m128i_from_private(r_);
  3068. #endif
  3069. }
  3070. #if defined(SIMDE_X86_SSE2_ENABLE_NATIVE_ALIASES)
  3071. #define _mm_load_si128(mem_addr) simde_mm_load_si128(mem_addr)
  3072. #endif
  3073. SIMDE_FUNCTION_ATTRIBUTES
  3074. simde__m128d simde_mm_loadh_pd(simde__m128d a, simde_float64 const *mem_addr)
  3075. {
  3076. #if defined(SIMDE_X86_SSE2_NATIVE)
  3077. return _mm_loadh_pd(a, mem_addr);
  3078. #else
  3079. simde__m128d_private r_, a_ = simde__m128d_to_private(a);
  3080. #if defined(SIMDE_ARM_NEON_A64V8_NATIVE)
  3081. r_.neon_f64 = vcombine_f64(
  3082. vget_low_f64(a_.neon_f64),
  3083. vld1_f64(HEDLEY_REINTERPRET_CAST(const float64_t *, mem_addr)));
  3084. #else
  3085. simde_float64 t;
  3086. simde_memcpy(&t, mem_addr, sizeof(t));
  3087. r_.f64[0] = a_.f64[0];
  3088. r_.f64[1] = t;
  3089. #endif
  3090. return simde__m128d_from_private(r_);
  3091. #endif
  3092. }
  3093. #if defined(SIMDE_X86_SSE2_ENABLE_NATIVE_ALIASES)
  3094. #define _mm_loadh_pd(a, mem_addr) simde_mm_loadh_pd(a, mem_addr)
  3095. #endif
  3096. SIMDE_FUNCTION_ATTRIBUTES
  3097. simde__m128i simde_mm_loadl_epi64(simde__m128i const *mem_addr)
  3098. {
  3099. #if defined(SIMDE_X86_SSE2_NATIVE)
  3100. return _mm_loadl_epi64(mem_addr);
  3101. #else
  3102. simde__m128i_private r_;
  3103. int64_t value;
  3104. simde_memcpy(&value, mem_addr, sizeof(value));
  3105. #if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
  3106. r_.neon_i64 = vcombine_s64(
  3107. vld1_s64(HEDLEY_REINTERPRET_CAST(int64_t const *, mem_addr)),
  3108. vdup_n_s64(0));
  3109. #else
  3110. r_.i64[0] = value;
  3111. r_.i64[1] = 0;
  3112. #endif
  3113. return simde__m128i_from_private(r_);
  3114. #endif
  3115. }
  3116. #if defined(SIMDE_X86_SSE2_ENABLE_NATIVE_ALIASES)
  3117. #define _mm_loadl_epi64(mem_addr) simde_mm_loadl_epi64(mem_addr)
  3118. #endif
  3119. SIMDE_FUNCTION_ATTRIBUTES
  3120. simde__m128d simde_mm_loadl_pd(simde__m128d a, simde_float64 const *mem_addr)
  3121. {
  3122. #if defined(SIMDE_X86_SSE2_NATIVE)
  3123. return _mm_loadl_pd(a, mem_addr);
  3124. #else
  3125. simde__m128d_private r_, a_ = simde__m128d_to_private(a);
  3126. #if defined(SIMDE_ARM_NEON_A64V8_NATIVE)
  3127. r_.neon_f64 = vcombine_f64(
  3128. vld1_f64(HEDLEY_REINTERPRET_CAST(const float64_t *, mem_addr)),
  3129. vget_high_f64(a_.neon_f64));
  3130. #else
  3131. r_.f64[0] = *mem_addr;
  3132. r_.u64[1] = a_.u64[1];
  3133. #endif
  3134. return simde__m128d_from_private(r_);
  3135. #endif
  3136. }
  3137. #if defined(SIMDE_X86_SSE2_ENABLE_NATIVE_ALIASES)
  3138. #define _mm_loadl_pd(a, mem_addr) simde_mm_loadl_pd(a, mem_addr)
  3139. #endif
  3140. SIMDE_FUNCTION_ATTRIBUTES
  3141. simde__m128d
  3142. simde_mm_loadr_pd(simde_float64 const mem_addr[HEDLEY_ARRAY_PARAM(2)])
  3143. {
  3144. #if defined(SIMDE_X86_SSE2_NATIVE)
  3145. return _mm_loadr_pd(mem_addr);
  3146. #else
  3147. simde__m128d_private r_;
  3148. #if defined(SIMDE_ARM_NEON_A64V8_NATIVE)
  3149. r_.neon_f64 = vld1q_f64(mem_addr);
  3150. r_.neon_f64 = vextq_f64(r_.neon_f64, r_.neon_f64, 1);
  3151. #elif defined(SIMDE_ARM_NEON_A32V7_NATIVE)
  3152. r_.neon_i64 =
  3153. vld1q_s64(HEDLEY_REINTERPRET_CAST(int64_t const *, mem_addr));
  3154. r_.neon_i64 = vextq_s64(r_.neon_i64, r_.neon_i64, 1);
  3155. #elif defined(SIMDE_WASM_SIMD128_NATIVE)
  3156. v128_t tmp = wasm_v128_load(mem_addr);
  3157. r_.wasm_v128 = wasm_v64x2_shuffle(tmp, tmp, 1, 0);
  3158. #else
  3159. r_.f64[0] = mem_addr[1];
  3160. r_.f64[1] = mem_addr[0];
  3161. #endif
  3162. return simde__m128d_from_private(r_);
  3163. #endif
  3164. }
  3165. #if defined(SIMDE_X86_SSE2_ENABLE_NATIVE_ALIASES)
  3166. #define _mm_loadr_pd(mem_addr) simde_mm_loadr_pd(mem_addr)
  3167. #endif
  3168. SIMDE_FUNCTION_ATTRIBUTES
  3169. simde__m128d
  3170. simde_mm_loadu_pd(simde_float64 const mem_addr[HEDLEY_ARRAY_PARAM(2)])
  3171. {
  3172. #if defined(SIMDE_X86_SSE2_NATIVE)
  3173. return _mm_loadu_pd(mem_addr);
  3174. #elif defined(SIMDE_ARM_NEON_A64V8_NATIVE)
  3175. return vld1q_f64(mem_addr);
  3176. #else
  3177. simde__m128d_private r_;
  3178. simde_memcpy(&r_, mem_addr, sizeof(r_));
  3179. return simde__m128d_from_private(r_);
  3180. #endif
  3181. }
  3182. #if defined(SIMDE_X86_SSE2_ENABLE_NATIVE_ALIASES)
  3183. #define _mm_loadu_pd(mem_addr) simde_mm_loadu_pd(mem_addr)
  3184. #endif
  3185. SIMDE_FUNCTION_ATTRIBUTES
  3186. simde__m128i simde_x_mm_loadu_epi8(int8_t const *mem_addr)
  3187. {
  3188. #if defined(SIMDE_X86_SSE2_NATIVE)
  3189. return _mm_loadu_si128(
  3190. SIMDE_ALIGN_CAST(simde__m128i const *, mem_addr));
  3191. #else
  3192. simde__m128i_private r_;
  3193. #if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
  3194. r_.neon_i8 =
  3195. vld1q_s8(HEDLEY_REINTERPRET_CAST(int8_t const *, mem_addr));
  3196. #else
  3197. simde_memcpy(&r_, mem_addr, sizeof(r_));
  3198. #endif
  3199. return simde__m128i_from_private(r_);
  3200. #endif
  3201. }
  3202. SIMDE_FUNCTION_ATTRIBUTES
  3203. simde__m128i simde_x_mm_loadu_epi16(int16_t const *mem_addr)
  3204. {
  3205. #if defined(SIMDE_X86_SSE2_NATIVE)
  3206. return _mm_loadu_si128(
  3207. SIMDE_ALIGN_CAST(simde__m128i const *, mem_addr));
  3208. #else
  3209. simde__m128i_private r_;
  3210. #if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
  3211. r_.neon_i16 =
  3212. vld1q_s16(HEDLEY_REINTERPRET_CAST(int16_t const *, mem_addr));
  3213. #else
  3214. simde_memcpy(&r_, mem_addr, sizeof(r_));
  3215. #endif
  3216. return simde__m128i_from_private(r_);
  3217. #endif
  3218. }
  3219. SIMDE_FUNCTION_ATTRIBUTES
  3220. simde__m128i simde_x_mm_loadu_epi32(int32_t const *mem_addr)
  3221. {
  3222. #if defined(SIMDE_X86_SSE2_NATIVE)
  3223. return _mm_loadu_si128(
  3224. SIMDE_ALIGN_CAST(simde__m128i const *, mem_addr));
  3225. #else
  3226. simde__m128i_private r_;
  3227. #if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
  3228. r_.neon_i32 =
  3229. vld1q_s32(HEDLEY_REINTERPRET_CAST(int32_t const *, mem_addr));
  3230. #else
  3231. simde_memcpy(&r_, mem_addr, sizeof(r_));
  3232. #endif
  3233. return simde__m128i_from_private(r_);
  3234. #endif
  3235. }
  3236. SIMDE_FUNCTION_ATTRIBUTES
  3237. simde__m128i simde_x_mm_loadu_epi64(int64_t const *mem_addr)
  3238. {
  3239. #if defined(SIMDE_X86_SSE2_NATIVE)
  3240. return _mm_loadu_si128(
  3241. SIMDE_ALIGN_CAST(simde__m128i const *, mem_addr));
  3242. #else
  3243. simde__m128i_private r_;
  3244. #if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
  3245. r_.neon_i64 =
  3246. vld1q_s64(HEDLEY_REINTERPRET_CAST(int64_t const *, mem_addr));
  3247. #else
  3248. simde_memcpy(&r_, mem_addr, sizeof(r_));
  3249. #endif
  3250. return simde__m128i_from_private(r_);
  3251. #endif
  3252. }
  3253. SIMDE_FUNCTION_ATTRIBUTES
  3254. simde__m128i simde_mm_loadu_si128(void const *mem_addr)
  3255. {
  3256. #if defined(SIMDE_X86_SSE2_NATIVE)
  3257. return _mm_loadu_si128(HEDLEY_STATIC_CAST(__m128i const *, mem_addr));
  3258. #else
  3259. simde__m128i_private r_;
  3260. #if HEDLEY_GNUC_HAS_ATTRIBUTE(may_alias, 3, 3, 0)
  3261. HEDLEY_DIAGNOSTIC_PUSH
  3262. SIMDE_DIAGNOSTIC_DISABLE_PACKED_
  3263. struct simde_mm_loadu_si128_s {
  3264. __typeof__(r_) v;
  3265. } __attribute__((__packed__, __may_alias__));
  3266. r_ = HEDLEY_REINTERPRET_CAST(const struct simde_mm_loadu_si128_s *,
  3267. mem_addr)
  3268. ->v;
  3269. HEDLEY_DIAGNOSTIC_POP
  3270. #elif defined(SIMDE_ARM_NEON_A32V7_NATIVE)
  3271. /* Note that this is a lower priority than the struct above since
  3272. * clang assumes mem_addr is aligned (since it is a __m128i*). */
  3273. r_.neon_i32 =
  3274. vld1q_s32(HEDLEY_REINTERPRET_CAST(int32_t const *, mem_addr));
  3275. #else
  3276. simde_memcpy(&r_, mem_addr, sizeof(r_));
  3277. #endif
  3278. return simde__m128i_from_private(r_);
  3279. #endif
  3280. }
  3281. #if defined(SIMDE_X86_SSE2_ENABLE_NATIVE_ALIASES)
  3282. #define _mm_loadu_si128(mem_addr) simde_mm_loadu_si128(mem_addr)
  3283. #endif
  3284. SIMDE_FUNCTION_ATTRIBUTES
  3285. simde__m128i simde_mm_madd_epi16(simde__m128i a, simde__m128i b)
  3286. {
  3287. #if defined(SIMDE_X86_SSE2_NATIVE)
  3288. return _mm_madd_epi16(a, b);
  3289. #else
  3290. simde__m128i_private r_, a_ = simde__m128i_to_private(a),
  3291. b_ = simde__m128i_to_private(b);
  3292. #if defined(SIMDE_ARM_NEON_A64V8_NATIVE)
  3293. int32x4_t pl =
  3294. vmull_s16(vget_low_s16(a_.neon_i16), vget_low_s16(b_.neon_i16));
  3295. int32x4_t ph = vmull_high_s16(a_.neon_i16, b_.neon_i16);
  3296. r_.neon_i32 = vpaddq_s32(pl, ph);
  3297. #elif defined(SIMDE_ARM_NEON_A32V7_NATIVE)
  3298. int32x4_t pl =
  3299. vmull_s16(vget_low_s16(a_.neon_i16), vget_low_s16(b_.neon_i16));
  3300. int32x4_t ph = vmull_s16(vget_high_s16(a_.neon_i16),
  3301. vget_high_s16(b_.neon_i16));
  3302. int32x2_t rl = vpadd_s32(vget_low_s32(pl), vget_high_s32(pl));
  3303. int32x2_t rh = vpadd_s32(vget_low_s32(ph), vget_high_s32(ph));
  3304. r_.neon_i32 = vcombine_s32(rl, rh);
  3305. #elif defined(SIMDE_POWER_ALTIVEC_P7_NATIVE)
  3306. static const SIMDE_POWER_ALTIVEC_VECTOR(int) tz = {0, 0, 0, 0};
  3307. r_.altivec_i32 = vec_msum(a_.altivec_i16, b_.altivec_i16, tz);
  3308. #else
  3309. SIMDE_VECTORIZE
  3310. for (size_t i = 0; i < (sizeof(r_) / sizeof(r_.i16[0])); i += 2) {
  3311. r_.i32[i / 2] = (a_.i16[i] * b_.i16[i]) +
  3312. (a_.i16[i + 1] * b_.i16[i + 1]);
  3313. }
  3314. #endif
  3315. return simde__m128i_from_private(r_);
  3316. #endif
  3317. }
  3318. #if defined(SIMDE_X86_SSE2_ENABLE_NATIVE_ALIASES)
  3319. #define _mm_madd_epi16(a, b) simde_mm_madd_epi16(a, b)
  3320. #endif
  3321. SIMDE_FUNCTION_ATTRIBUTES
  3322. void simde_mm_maskmoveu_si128(simde__m128i a, simde__m128i mask,
  3323. int8_t mem_addr[HEDLEY_ARRAY_PARAM(16)])
  3324. {
  3325. #if defined(SIMDE_X86_SSE2_NATIVE)
  3326. _mm_maskmoveu_si128(a, mask, HEDLEY_REINTERPRET_CAST(char *, mem_addr));
  3327. #else
  3328. simde__m128i_private a_ = simde__m128i_to_private(a),
  3329. mask_ = simde__m128i_to_private(mask);
  3330. for (size_t i = 0; i < (sizeof(a_.i8) / sizeof(a_.i8[0])); i++) {
  3331. if (mask_.u8[i] & 0x80) {
  3332. mem_addr[i] = a_.i8[i];
  3333. }
  3334. }
  3335. #endif
  3336. }
  3337. #if defined(SIMDE_X86_SSE2_ENABLE_NATIVE_ALIASES)
  3338. #define _mm_maskmoveu_si128(a, mask, mem_addr) \
  3339. simde_mm_maskmoveu_si128( \
  3340. (a), (mask), \
  3341. SIMDE_CHECKED_REINTERPRET_CAST(int8_t *, char *, (mem_addr)))
  3342. #endif
  3343. SIMDE_FUNCTION_ATTRIBUTES
  3344. int32_t simde_mm_movemask_epi8(simde__m128i a)
  3345. {
  3346. #if defined(SIMDE_X86_SSE2_NATIVE) && !defined(__INTEL_COMPILER)
  3347. /* ICC has trouble with _mm_movemask_epi8 at -O2 and above: */
  3348. return _mm_movemask_epi8(a);
  3349. #else
  3350. int32_t r = 0;
  3351. simde__m128i_private a_ = simde__m128i_to_private(a);
  3352. #if defined(SIMDE_ARM_NEON_A64V8_NATIVE)
  3353. uint8x16_t input = a_.neon_u8;
  3354. const int8_t xr[16] = {-7, -6, -5, -4, -3, -2, -1, 0,
  3355. -7, -6, -5, -4, -3, -2, -1, 0};
  3356. const uint8x16_t mask_and = vdupq_n_u8(0x80);
  3357. const int8x16_t mask_shift = vld1q_s8(xr);
  3358. const uint8x16_t mask_result =
  3359. vshlq_u8(vandq_u8(input, mask_and), mask_shift);
  3360. uint8x8_t lo = vget_low_u8(mask_result);
  3361. uint8x8_t hi = vget_high_u8(mask_result);
  3362. r = vaddv_u8(lo) + (vaddv_u8(hi) << 8);
  3363. #elif defined(SIMDE_ARM_NEON_A32V7_NATIVE)
  3364. // Use increasingly wide shifts+adds to collect the sign bits
  3365. // together.
  3366. // Since the widening shifts would be rather confusing to follow in little endian, everything
  3367. // will be illustrated in big endian order instead. This has a different result - the bits
  3368. // would actually be reversed on a big endian machine.
  3369. // Starting input (only half the elements are shown):
  3370. // 89 ff 1d c0 00 10 99 33
  3371. uint8x16_t input = a_.neon_u8;
  3372. // Shift out everything but the sign bits with an unsigned shift right.
  3373. //
  3374. // Bytes of the vector::
  3375. // 89 ff 1d c0 00 10 99 33
  3376. // \ \ \ \ \ \ \ \ high_bits = (uint16x4_t)(input >> 7)
  3377. // | | | | | | | |
  3378. // 01 01 00 01 00 00 01 00
  3379. //
  3380. // Bits of first important lane(s):
  3381. // 10001001 (89)
  3382. // \______
  3383. // |
  3384. // 00000001 (01)
  3385. uint16x8_t high_bits = vreinterpretq_u16_u8(vshrq_n_u8(input, 7));
  3386. // Merge the even lanes together with a 16-bit unsigned shift right + add.
  3387. // 'xx' represents garbage data which will be ignored in the final result.
  3388. // In the important bytes, the add functions like a binary OR.
  3389. //
  3390. // 01 01 00 01 00 00 01 00
  3391. // \_ | \_ | \_ | \_ | paired16 = (uint32x4_t)(input + (input >> 7))
  3392. // \| \| \| \|
  3393. // xx 03 xx 01 xx 00 xx 02
  3394. //
  3395. // 00000001 00000001 (01 01)
  3396. // \_______ |
  3397. // \|
  3398. // xxxxxxxx xxxxxx11 (xx 03)
  3399. uint32x4_t paired16 =
  3400. vreinterpretq_u32_u16(vsraq_n_u16(high_bits, high_bits, 7));
  3401. // Repeat with a wider 32-bit shift + add.
  3402. // xx 03 xx 01 xx 00 xx 02
  3403. // \____ | \____ | paired32 = (uint64x1_t)(paired16 + (paired16 >> 14))
  3404. // \| \|
  3405. // xx xx xx 0d xx xx xx 02
  3406. //
  3407. // 00000011 00000001 (03 01)
  3408. // \\_____ ||
  3409. // '----.\||
  3410. // xxxxxxxx xxxx1101 (xx 0d)
  3411. uint64x2_t paired32 =
  3412. vreinterpretq_u64_u32(vsraq_n_u32(paired16, paired16, 14));
  3413. // Last, an even wider 64-bit shift + add to get our result in the low 8 bit lanes.
  3414. // xx xx xx 0d xx xx xx 02
  3415. // \_________ | paired64 = (uint8x8_t)(paired32 + (paired32 >> 28))
  3416. // \|
  3417. // xx xx xx xx xx xx xx d2
  3418. //
  3419. // 00001101 00000010 (0d 02)
  3420. // \ \___ | |
  3421. // '---. \| |
  3422. // xxxxxxxx 11010010 (xx d2)
  3423. uint8x16_t paired64 =
  3424. vreinterpretq_u8_u64(vsraq_n_u64(paired32, paired32, 28));
  3425. // Extract the low 8 bits from each 64-bit lane with 2 8-bit extracts.
  3426. // xx xx xx xx xx xx xx d2
  3427. // || return paired64[0]
  3428. // d2
  3429. // Note: Little endian would return the correct value 4b (01001011) instead.
  3430. r = vgetq_lane_u8(paired64, 0) |
  3431. (HEDLEY_STATIC_CAST(int32_t, vgetq_lane_u8(paired64, 8)) << 8);
  3432. #elif defined(SIMDE_POWER_ALTIVEC_P8_NATIVE) && \
  3433. !defined(HEDLEY_IBM_VERSION) && \
  3434. (SIMDE_ENDIAN_ORDER == SIMDE_ENDIAN_LITTLE)
  3435. static const SIMDE_POWER_ALTIVEC_VECTOR(unsigned char)
  3436. perm = {120, 112, 104, 96, 88, 80, 72, 64,
  3437. 56, 48, 40, 32, 24, 16, 8, 0};
  3438. r = HEDLEY_STATIC_CAST(
  3439. int32_t, vec_extract(vec_vbpermq(a_.altivec_u8, perm), 1));
  3440. #elif defined(SIMDE_POWER_ALTIVEC_P8_NATIVE) && \
  3441. !defined(HEDLEY_IBM_VERSION) && \
  3442. (SIMDE_ENDIAN_ORDER == SIMDE_ENDIAN_BIG)
  3443. static const SIMDE_POWER_ALTIVEC_VECTOR(unsigned char)
  3444. perm = {120, 112, 104, 96, 88, 80, 72, 64,
  3445. 56, 48, 40, 32, 24, 16, 8, 0};
  3446. r = HEDLEY_STATIC_CAST(
  3447. int32_t, vec_extract(vec_vbpermq(a_.altivec_u8, perm), 14));
  3448. #else
  3449. SIMDE_VECTORIZE_REDUCTION(| : r)
  3450. for (size_t i = 0; i < (sizeof(a_.u8) / sizeof(a_.u8[0])); i++) {
  3451. r |= (a_.u8[15 - i] >> 7) << (15 - i);
  3452. }
  3453. #endif
  3454. return r;
  3455. #endif
  3456. }
  3457. #if defined(SIMDE_X86_SSE2_ENABLE_NATIVE_ALIASES)
  3458. #define _mm_movemask_epi8(a) simde_mm_movemask_epi8(a)
  3459. #endif
  3460. SIMDE_FUNCTION_ATTRIBUTES
  3461. int32_t simde_mm_movemask_pd(simde__m128d a)
  3462. {
  3463. #if defined(SIMDE_X86_SSE2_NATIVE)
  3464. return _mm_movemask_pd(a);
  3465. #else
  3466. int32_t r = 0;
  3467. simde__m128d_private a_ = simde__m128d_to_private(a);
  3468. #if defined(SIMDE_ARM_NEON_A64V8_NATIVE)
  3469. static const int64_t shift_amount[] = {0, 1};
  3470. const int64x2_t shift = vld1q_s64(shift_amount);
  3471. uint64x2_t tmp = vshrq_n_u64(a_.neon_u64, 63);
  3472. return HEDLEY_STATIC_CAST(int32_t, vaddvq_u64(vshlq_u64(tmp, shift)));
  3473. #else
  3474. SIMDE_VECTORIZE_REDUCTION(| : r)
  3475. for (size_t i = 0; i < (sizeof(a_.u64) / sizeof(a_.u64[0])); i++) {
  3476. r |= (a_.u64[i] >> 63) << i;
  3477. }
  3478. #endif
  3479. return r;
  3480. #endif
  3481. }
  3482. #if defined(SIMDE_X86_SSE2_ENABLE_NATIVE_ALIASES)
  3483. #define _mm_movemask_pd(a) simde_mm_movemask_pd(a)
  3484. #endif
  3485. SIMDE_FUNCTION_ATTRIBUTES
  3486. simde__m64 simde_mm_movepi64_pi64(simde__m128i a)
  3487. {
  3488. #if defined(SIMDE_X86_SSE2_NATIVE) && defined(SIMDE_X86_MMX_NATIVE)
  3489. return _mm_movepi64_pi64(a);
  3490. #else
  3491. simde__m64_private r_;
  3492. simde__m128i_private a_ = simde__m128i_to_private(a);
  3493. #if defined(SIMDE_ARM_NEON_A64V8_NATIVE)
  3494. r_.neon_i64 = vget_low_s64(a_.neon_i64);
  3495. #else
  3496. r_.i64[0] = a_.i64[0];
  3497. #endif
  3498. return simde__m64_from_private(r_);
  3499. #endif
  3500. }
  3501. #if defined(SIMDE_X86_SSE2_ENABLE_NATIVE_ALIASES)
  3502. #define _mm_movepi64_pi64(a) simde_mm_movepi64_pi64(a)
  3503. #endif
  3504. SIMDE_FUNCTION_ATTRIBUTES
  3505. simde__m128i simde_mm_movpi64_epi64(simde__m64 a)
  3506. {
  3507. #if defined(SIMDE_X86_SSE2_NATIVE) && defined(SIMDE_X86_MMX_NATIVE)
  3508. return _mm_movpi64_epi64(a);
  3509. #else
  3510. simde__m128i_private r_;
  3511. simde__m64_private a_ = simde__m64_to_private(a);
  3512. #if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
  3513. r_.neon_i64 = vcombine_s64(a_.neon_i64, vdup_n_s64(0));
  3514. #else
  3515. r_.i64[0] = a_.i64[0];
  3516. r_.i64[1] = 0;
  3517. #endif
  3518. return simde__m128i_from_private(r_);
  3519. #endif
  3520. }
  3521. #if defined(SIMDE_X86_SSE2_ENABLE_NATIVE_ALIASES)
  3522. #define _mm_movpi64_epi64(a) simde_mm_movpi64_epi64(a)
  3523. #endif
  3524. SIMDE_FUNCTION_ATTRIBUTES
  3525. simde__m128i simde_mm_min_epi16(simde__m128i a, simde__m128i b)
  3526. {
  3527. #if defined(SIMDE_X86_SSE2_NATIVE)
  3528. return _mm_min_epi16(a, b);
  3529. #else
  3530. simde__m128i_private r_, a_ = simde__m128i_to_private(a),
  3531. b_ = simde__m128i_to_private(b);
  3532. #if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
  3533. r_.neon_i16 = vminq_s16(a_.neon_i16, b_.neon_i16);
  3534. #elif defined(SIMDE_WASM_SIMD128_NATIVE)
  3535. r_.wasm_v128 = wasm_i16x8_min(a_.wasm_v128, b_.wasm_v128);
  3536. #elif defined(SIMDE_POWER_ALTIVEC_P6_NATIVE)
  3537. r_.altivec_i16 = vec_min(a_.altivec_i16, b_.altivec_i16);
  3538. #else
  3539. SIMDE_VECTORIZE
  3540. for (size_t i = 0; i < (sizeof(r_.i16) / sizeof(r_.i16[0])); i++) {
  3541. r_.i16[i] = (a_.i16[i] < b_.i16[i]) ? a_.i16[i] : b_.i16[i];
  3542. }
  3543. #endif
  3544. return simde__m128i_from_private(r_);
  3545. #endif
  3546. }
  3547. #if defined(SIMDE_X86_SSE2_ENABLE_NATIVE_ALIASES)
  3548. #define _mm_min_epi16(a, b) simde_mm_min_epi16(a, b)
  3549. #endif
  3550. SIMDE_FUNCTION_ATTRIBUTES
  3551. simde__m128i simde_mm_min_epu8(simde__m128i a, simde__m128i b)
  3552. {
  3553. #if defined(SIMDE_X86_SSE2_NATIVE)
  3554. return _mm_min_epu8(a, b);
  3555. #else
  3556. simde__m128i_private r_, a_ = simde__m128i_to_private(a),
  3557. b_ = simde__m128i_to_private(b);
  3558. #if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
  3559. r_.neon_u8 = vminq_u8(a_.neon_u8, b_.neon_u8);
  3560. #elif defined(SIMDE_WASM_SIMD128_NATIVE)
  3561. r_.wasm_v128 = wasm_u8x16_min(a_.wasm_v128, b_.wasm_v128);
  3562. #elif defined(SIMDE_POWER_ALTIVEC_P6_NATIVE)
  3563. r_.altivec_u8 = vec_min(a_.altivec_u8, b_.altivec_u8);
  3564. #else
  3565. SIMDE_VECTORIZE
  3566. for (size_t i = 0; i < (sizeof(r_.u8) / sizeof(r_.u8[0])); i++) {
  3567. r_.u8[i] = (a_.u8[i] < b_.u8[i]) ? a_.u8[i] : b_.u8[i];
  3568. }
  3569. #endif
  3570. return simde__m128i_from_private(r_);
  3571. #endif
  3572. }
  3573. #if defined(SIMDE_X86_SSE2_ENABLE_NATIVE_ALIASES)
  3574. #define _mm_min_epu8(a, b) simde_mm_min_epu8(a, b)
  3575. #endif
  3576. SIMDE_FUNCTION_ATTRIBUTES
  3577. simde__m128d simde_mm_min_pd(simde__m128d a, simde__m128d b)
  3578. {
  3579. #if defined(SIMDE_X86_SSE2_NATIVE)
  3580. return _mm_min_pd(a, b);
  3581. #else
  3582. simde__m128d_private r_, a_ = simde__m128d_to_private(a),
  3583. b_ = simde__m128d_to_private(b);
  3584. #if defined(SIMDE_POWER_ALTIVEC_P8_NATIVE)
  3585. r_.altivec_f64 = vec_min(a_.altivec_f64, b_.altivec_f64);
  3586. #elif defined(SIMDE_ARM_NEON_A64V8_NATIVE)
  3587. r_.neon_f64 = vminq_f64(a_.neon_f64, b_.neon_f64);
  3588. #elif defined(SIMDE_WASM_SIMD128_NATIVE)
  3589. r_.wasm_v128 = wasm_f64x2_min(a_.wasm_v128, b_.wasm_v128);
  3590. #else
  3591. SIMDE_VECTORIZE
  3592. for (size_t i = 0; i < (sizeof(r_.f64) / sizeof(r_.f64[0])); i++) {
  3593. r_.f64[i] = (a_.f64[i] < b_.f64[i]) ? a_.f64[i] : b_.f64[i];
  3594. }
  3595. #endif
  3596. return simde__m128d_from_private(r_);
  3597. #endif
  3598. }
  3599. #if defined(SIMDE_X86_SSE2_ENABLE_NATIVE_ALIASES)
  3600. #define _mm_min_pd(a, b) simde_mm_min_pd(a, b)
  3601. #endif
  3602. SIMDE_FUNCTION_ATTRIBUTES
  3603. simde__m128d simde_mm_min_sd(simde__m128d a, simde__m128d b)
  3604. {
  3605. #if defined(SIMDE_X86_SSE2_NATIVE)
  3606. return _mm_min_sd(a, b);
  3607. #elif (SIMDE_NATURAL_VECTOR_SIZE > 0)
  3608. return simde_mm_move_sd(a, simde_mm_min_pd(a, b));
  3609. #else
  3610. simde__m128d_private r_, a_ = simde__m128d_to_private(a),
  3611. b_ = simde__m128d_to_private(b);
  3612. #if defined(SIMDE_ARM_NEON_A64V8_NATIVE)
  3613. float64x2_t temp = vminq_f64(a_.neon_f64, b_.neon_f64);
  3614. r_.neon_f64 = vsetq_lane_f64(vgetq_lane(a_.neon_f64, 1), temp, 1);
  3615. #else
  3616. r_.f64[0] = (a_.f64[0] < b_.f64[0]) ? a_.f64[0] : b_.f64[0];
  3617. r_.f64[1] = a_.f64[1];
  3618. #endif
  3619. return simde__m128d_from_private(r_);
  3620. #endif
  3621. }
  3622. #if defined(SIMDE_X86_SSE2_ENABLE_NATIVE_ALIASES)
  3623. #define _mm_min_sd(a, b) simde_mm_min_sd(a, b)
  3624. #endif
  3625. SIMDE_FUNCTION_ATTRIBUTES
  3626. simde__m128i simde_mm_max_epi16(simde__m128i a, simde__m128i b)
  3627. {
  3628. #if defined(SIMDE_X86_SSE2_NATIVE)
  3629. return _mm_max_epi16(a, b);
  3630. #else
  3631. simde__m128i_private r_, a_ = simde__m128i_to_private(a),
  3632. b_ = simde__m128i_to_private(b);
  3633. #if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
  3634. r_.neon_i16 = vmaxq_s16(a_.neon_i16, b_.neon_i16);
  3635. #elif defined(SIMDE_WASM_SIMD128_NATIVE)
  3636. r_.wasm_v128 = wasm_i16x8_max(a_.wasm_v128, b_.wasm_v128);
  3637. #elif defined(SIMDE_POWER_ALTIVEC_P6_NATIVE)
  3638. r_.altivec_i16 = vec_max(a_.altivec_i16, b_.altivec_i16);
  3639. #else
  3640. SIMDE_VECTORIZE
  3641. for (size_t i = 0; i < (sizeof(r_.i16) / sizeof(r_.i16[0])); i++) {
  3642. r_.i16[i] = (a_.i16[i] > b_.i16[i]) ? a_.i16[i] : b_.i16[i];
  3643. }
  3644. #endif
  3645. return simde__m128i_from_private(r_);
  3646. #endif
  3647. }
  3648. #if defined(SIMDE_X86_SSE2_ENABLE_NATIVE_ALIASES)
  3649. #define _mm_max_epi16(a, b) simde_mm_max_epi16(a, b)
  3650. #endif
  3651. SIMDE_FUNCTION_ATTRIBUTES
  3652. simde__m128i simde_mm_max_epu8(simde__m128i a, simde__m128i b)
  3653. {
  3654. #if defined(SIMDE_X86_SSE2_NATIVE)
  3655. return _mm_max_epu8(a, b);
  3656. #else
  3657. simde__m128i_private r_, a_ = simde__m128i_to_private(a),
  3658. b_ = simde__m128i_to_private(b);
  3659. #if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
  3660. r_.neon_u8 = vmaxq_u8(a_.neon_u8, b_.neon_u8);
  3661. #elif defined(SIMDE_WASM_SIMD128_NATIVE)
  3662. r_.wasm_v128 = wasm_u8x16_max(a_.wasm_v128, b_.wasm_v128);
  3663. #elif defined(SIMDE_POWER_ALTIVEC_P6_NATIVE)
  3664. r_.altivec_u8 = vec_max(a_.altivec_u8, b_.altivec_u8);
  3665. #else
  3666. SIMDE_VECTORIZE
  3667. for (size_t i = 0; i < (sizeof(r_.u8) / sizeof(r_.u8[0])); i++) {
  3668. r_.u8[i] = (a_.u8[i] > b_.u8[i]) ? a_.u8[i] : b_.u8[i];
  3669. }
  3670. #endif
  3671. return simde__m128i_from_private(r_);
  3672. #endif
  3673. }
  3674. #if defined(SIMDE_X86_SSE2_ENABLE_NATIVE_ALIASES)
  3675. #define _mm_max_epu8(a, b) simde_mm_max_epu8(a, b)
  3676. #endif
  3677. SIMDE_FUNCTION_ATTRIBUTES
  3678. simde__m128d simde_mm_max_pd(simde__m128d a, simde__m128d b)
  3679. {
  3680. #if defined(SIMDE_X86_SSE2_NATIVE)
  3681. return _mm_max_pd(a, b);
  3682. #else
  3683. simde__m128d_private r_, a_ = simde__m128d_to_private(a),
  3684. b_ = simde__m128d_to_private(b);
  3685. #if defined(SIMDE_POWER_ALTIVEC_P7_NATIVE)
  3686. r_.altivec_f64 = vec_max(a_.altivec_f64, b_.altivec_f64);
  3687. #elif defined(SIMDE_WASM_SIMD128_NATIVE)
  3688. r_.wasm_v128 = wasm_f64x2_max(a_.wasm_v128, b_.wasm_v128);
  3689. #elif defined(SIMDE_ARM_NEON_A64V8_NATIVE)
  3690. r_.neon_f64 = vmaxq_f64(a_.neon_f64, b_.neon_f64);
  3691. #else
  3692. SIMDE_VECTORIZE
  3693. for (size_t i = 0; i < (sizeof(r_.f64) / sizeof(r_.f64[0])); i++) {
  3694. r_.f64[i] = (a_.f64[i] > b_.f64[i]) ? a_.f64[i] : b_.f64[i];
  3695. }
  3696. #endif
  3697. return simde__m128d_from_private(r_);
  3698. #endif
  3699. }
  3700. #if defined(SIMDE_X86_SSE2_ENABLE_NATIVE_ALIASES)
  3701. #define _mm_max_pd(a, b) simde_mm_max_pd(a, b)
  3702. #endif
  3703. SIMDE_FUNCTION_ATTRIBUTES
  3704. simde__m128d simde_mm_max_sd(simde__m128d a, simde__m128d b)
  3705. {
  3706. #if defined(SIMDE_X86_SSE2_NATIVE)
  3707. return _mm_max_sd(a, b);
  3708. #elif (SIMDE_NATURAL_VECTOR_SIZE > 0)
  3709. return simde_mm_move_sd(a, simde_mm_max_pd(a, b));
  3710. #else
  3711. simde__m128d_private r_, a_ = simde__m128d_to_private(a),
  3712. b_ = simde__m128d_to_private(b);
  3713. #if defined(SIMDE_ARM_NEON_A64V8_NATIVE)
  3714. float64x2_t temp = vmaxq_f64(a_.neon_f64, b_.neon_f64);
  3715. r_.neon_f64 = vsetq_lane_f64(vgetq_lane(a_.neon_f64, 1), temp, 1);
  3716. #else
  3717. r_.f64[0] = (a_.f64[0] > b_.f64[0]) ? a_.f64[0] : b_.f64[0];
  3718. r_.f64[1] = a_.f64[1];
  3719. #endif
  3720. return simde__m128d_from_private(r_);
  3721. #endif
  3722. }
  3723. #if defined(SIMDE_X86_SSE2_ENABLE_NATIVE_ALIASES)
  3724. #define _mm_max_sd(a, b) simde_mm_max_sd(a, b)
  3725. #endif
  3726. SIMDE_FUNCTION_ATTRIBUTES
  3727. simde__m128i simde_mm_move_epi64(simde__m128i a)
  3728. {
  3729. #if defined(SIMDE_X86_SSE2_NATIVE)
  3730. return _mm_move_epi64(a);
  3731. #else
  3732. simde__m128i_private r_, a_ = simde__m128i_to_private(a);
  3733. #if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
  3734. r_.neon_i64 = vsetq_lane_s64(0, a_.neon_i64, 1);
  3735. #else
  3736. r_.i64[0] = a_.i64[0];
  3737. r_.i64[1] = 0;
  3738. #endif
  3739. return simde__m128i_from_private(r_);
  3740. #endif
  3741. }
  3742. #if defined(SIMDE_X86_SSE2_ENABLE_NATIVE_ALIASES)
  3743. #define _mm_move_epi64(a) simde_mm_move_epi64(a)
  3744. #endif
  3745. SIMDE_FUNCTION_ATTRIBUTES
  3746. simde__m128i simde_mm_mul_epu32(simde__m128i a, simde__m128i b)
  3747. {
  3748. #if defined(SIMDE_X86_SSE2_NATIVE)
  3749. return _mm_mul_epu32(a, b);
  3750. #else
  3751. simde__m128i_private r_, a_ = simde__m128i_to_private(a),
  3752. b_ = simde__m128i_to_private(b);
  3753. #if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
  3754. uint32x2_t a_lo = vmovn_u64(a_.neon_u64);
  3755. uint32x2_t b_lo = vmovn_u64(b_.neon_u64);
  3756. r_.neon_u64 = vmull_u32(a_lo, b_lo);
  3757. #elif defined(SIMDE_SHUFFLE_VECTOR_) && \
  3758. (SIMDE_ENDIAN_ORDER == SIMDE_ENDIAN_LITTLE)
  3759. __typeof__(a_.u32) z = {
  3760. 0,
  3761. };
  3762. a_.u32 = SIMDE_SHUFFLE_VECTOR_(32, 16, a_.u32, z, 0, 4, 2, 6);
  3763. b_.u32 = SIMDE_SHUFFLE_VECTOR_(32, 16, b_.u32, z, 0, 4, 2, 6);
  3764. r_.u64 = HEDLEY_REINTERPRET_CAST(__typeof__(r_.u64), a_.u32) *
  3765. HEDLEY_REINTERPRET_CAST(__typeof__(r_.u64), b_.u32);
  3766. #else
  3767. SIMDE_VECTORIZE
  3768. for (size_t i = 0; i < (sizeof(r_.u64) / sizeof(r_.u64[0])); i++) {
  3769. r_.u64[i] = HEDLEY_STATIC_CAST(uint64_t, a_.u32[i * 2]) *
  3770. HEDLEY_STATIC_CAST(uint64_t, b_.u32[i * 2]);
  3771. }
  3772. #endif
  3773. return simde__m128i_from_private(r_);
  3774. #endif
  3775. }
  3776. #if defined(SIMDE_X86_SSE2_ENABLE_NATIVE_ALIASES)
  3777. #define _mm_mul_epu32(a, b) simde_mm_mul_epu32(a, b)
  3778. #endif
  3779. SIMDE_FUNCTION_ATTRIBUTES
  3780. simde__m128i simde_x_mm_mul_epi64(simde__m128i a, simde__m128i b)
  3781. {
  3782. simde__m128i_private r_, a_ = simde__m128i_to_private(a),
  3783. b_ = simde__m128i_to_private(b);
  3784. #if defined(SIMDE_VECTOR_SUBSCRIPT_OPS)
  3785. r_.i64 = a_.i64 * b_.i64;
  3786. #elif defined(SIMDE_ARM_NEON_A64V8_NATIVE)
  3787. r_.neon_f64 = vmulq_s64(a_.neon_f64, b_.neon_f64);
  3788. #else
  3789. SIMDE_VECTORIZE
  3790. for (size_t i = 0; i < (sizeof(r_.i64) / sizeof(r_.i64[0])); i++) {
  3791. r_.i64[i] = a_.i64[i] * b_.i64[i];
  3792. }
  3793. #endif
  3794. return simde__m128i_from_private(r_);
  3795. }
  3796. SIMDE_FUNCTION_ATTRIBUTES
  3797. simde__m128i simde_x_mm_mod_epi64(simde__m128i a, simde__m128i b)
  3798. {
  3799. simde__m128i_private r_, a_ = simde__m128i_to_private(a),
  3800. b_ = simde__m128i_to_private(b);
  3801. #if defined(SIMDE_VECTOR_SUBSCRIPT_OPS)
  3802. r_.i64 = a_.i64 % b_.i64;
  3803. #else
  3804. SIMDE_VECTORIZE
  3805. for (size_t i = 0; i < (sizeof(r_.i64) / sizeof(r_.i64[0])); i++) {
  3806. r_.i64[i] = a_.i64[i] % b_.i64[i];
  3807. }
  3808. #endif
  3809. return simde__m128i_from_private(r_);
  3810. }
  3811. SIMDE_FUNCTION_ATTRIBUTES
  3812. simde__m128d simde_mm_mul_pd(simde__m128d a, simde__m128d b)
  3813. {
  3814. #if defined(SIMDE_X86_SSE2_NATIVE)
  3815. return _mm_mul_pd(a, b);
  3816. #else
  3817. simde__m128d_private r_, a_ = simde__m128d_to_private(a),
  3818. b_ = simde__m128d_to_private(b);
  3819. #if defined(SIMDE_VECTOR_SUBSCRIPT_OPS)
  3820. r_.f64 = a_.f64 * b_.f64;
  3821. #elif defined(SIMDE_ARM_NEON_A64V8_NATIVE)
  3822. r_.neon_f64 = vmulq_f64(a_.neon_f64, b_.neon_f64);
  3823. #elif defined(SIMDE_WASM_SIMD128_NATIVE)
  3824. r_.wasm_v128 = wasm_f64x2_mul(a_.wasm_v128, b_.wasm_v128);
  3825. #else
  3826. SIMDE_VECTORIZE
  3827. for (size_t i = 0; i < (sizeof(r_.f64) / sizeof(r_.f64[0])); i++) {
  3828. r_.f64[i] = a_.f64[i] * b_.f64[i];
  3829. }
  3830. #endif
  3831. return simde__m128d_from_private(r_);
  3832. #endif
  3833. }
  3834. #if defined(SIMDE_X86_SSE2_ENABLE_NATIVE_ALIASES)
  3835. #define _mm_mul_pd(a, b) simde_mm_mul_pd(a, b)
  3836. #endif
  3837. SIMDE_FUNCTION_ATTRIBUTES
  3838. simde__m128d simde_mm_mul_sd(simde__m128d a, simde__m128d b)
  3839. {
  3840. #if defined(SIMDE_X86_SSE2_NATIVE)
  3841. return _mm_mul_sd(a, b);
  3842. #elif (SIMDE_NATURAL_VECTOR_SIZE > 0)
  3843. return simde_mm_move_sd(a, simde_mm_mul_pd(a, b));
  3844. #else
  3845. simde__m128d_private r_, a_ = simde__m128d_to_private(a),
  3846. b_ = simde__m128d_to_private(b);
  3847. #if defined(SIMDE_ARM_NEON_A64V8_NATIVE)
  3848. float64x2_t temp = vmulq_f64(a_.neon_f64, b_.neon_f64);
  3849. r_.neon_f64 = vsetq_lane_f64(vgetq_lane(a_.neon_f64, 1), temp, 1);
  3850. #else
  3851. r_.f64[0] = a_.f64[0] * b_.f64[0];
  3852. r_.f64[1] = a_.f64[1];
  3853. #endif
  3854. return simde__m128d_from_private(r_);
  3855. #endif
  3856. }
  3857. #if defined(SIMDE_X86_SSE2_ENABLE_NATIVE_ALIASES)
  3858. #define _mm_mul_sd(a, b) simde_mm_mul_sd(a, b)
  3859. #endif
  3860. SIMDE_FUNCTION_ATTRIBUTES
  3861. simde__m64 simde_mm_mul_su32(simde__m64 a, simde__m64 b)
  3862. {
  3863. #if defined(SIMDE_X86_SSE2_NATIVE) && defined(SIMDE_X86_MMX_NATIVE) && \
  3864. !defined(__PGI)
  3865. return _mm_mul_su32(a, b);
  3866. #else
  3867. simde__m64_private r_, a_ = simde__m64_to_private(a),
  3868. b_ = simde__m64_to_private(b);
  3869. #if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
  3870. r_.u64[0] = vget_lane_u64(
  3871. vget_low_u64(vmull_u32(vreinterpret_u32_s64(a_.neon_i64),
  3872. vreinterpret_u32_s64(b_.neon_i64))),
  3873. 0);
  3874. #else
  3875. r_.u64[0] = HEDLEY_STATIC_CAST(uint64_t, a_.u32[0]) *
  3876. HEDLEY_STATIC_CAST(uint64_t, b_.u32[0]);
  3877. #endif
  3878. return simde__m64_from_private(r_);
  3879. #endif
  3880. }
  3881. #if defined(SIMDE_X86_SSE2_ENABLE_NATIVE_ALIASES)
  3882. #define _mm_mul_su32(a, b) simde_mm_mul_su32(a, b)
  3883. #endif
  3884. SIMDE_FUNCTION_ATTRIBUTES
  3885. simde__m128i simde_mm_mulhi_epi16(simde__m128i a, simde__m128i b)
  3886. {
  3887. #if defined(SIMDE_X86_SSE2_NATIVE)
  3888. return _mm_mulhi_epi16(a, b);
  3889. #else
  3890. simde__m128i_private r_, a_ = simde__m128i_to_private(a),
  3891. b_ = simde__m128i_to_private(b);
  3892. #if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
  3893. int16x4_t a3210 = vget_low_s16(a_.neon_i16);
  3894. int16x4_t b3210 = vget_low_s16(b_.neon_i16);
  3895. int32x4_t ab3210 = vmull_s16(a3210, b3210); /* 3333222211110000 */
  3896. #if defined(SIMDE_ARM_NEON_A64V8_NATIVE)
  3897. int32x4_t ab7654 = vmull_high_s16(a_.neon_i16, b_.neon_i16);
  3898. r_.neon_i16 = vuzp2q_s16(vreinterpretq_s16_s32(ab3210),
  3899. vreinterpretq_s16_s32(ab7654));
  3900. #else
  3901. int16x4_t a7654 = vget_high_s16(a_.neon_i16);
  3902. int16x4_t b7654 = vget_high_s16(b_.neon_i16);
  3903. int32x4_t ab7654 = vmull_s16(a7654, b7654); /* 7777666655554444 */
  3904. uint16x8x2_t rv = vuzpq_u16(vreinterpretq_u16_s32(ab3210),
  3905. vreinterpretq_u16_s32(ab7654));
  3906. r_.neon_u16 = rv.val[1];
  3907. #endif
  3908. #else
  3909. SIMDE_VECTORIZE
  3910. for (size_t i = 0; i < (sizeof(r_.i16) / sizeof(r_.i16[0])); i++) {
  3911. r_.u16[i] = HEDLEY_STATIC_CAST(
  3912. uint16_t,
  3913. (HEDLEY_STATIC_CAST(
  3914. uint32_t,
  3915. HEDLEY_STATIC_CAST(int32_t, a_.i16[i]) *
  3916. HEDLEY_STATIC_CAST(int32_t,
  3917. b_.i16[i])) >>
  3918. 16));
  3919. }
  3920. #endif
  3921. return simde__m128i_from_private(r_);
  3922. #endif
  3923. }
  3924. #if defined(SIMDE_X86_SSE2_ENABLE_NATIVE_ALIASES)
  3925. #define _mm_mulhi_epi16(a, b) simde_mm_mulhi_epi16(a, b)
  3926. #endif
  3927. SIMDE_FUNCTION_ATTRIBUTES
  3928. simde__m128i simde_mm_mulhi_epu16(simde__m128i a, simde__m128i b)
  3929. {
  3930. #if defined(SIMDE_X86_SSE2_NATIVE) && !defined(__PGI)
  3931. return _mm_mulhi_epu16(a, b);
  3932. #else
  3933. simde__m128i_private r_, a_ = simde__m128i_to_private(a),
  3934. b_ = simde__m128i_to_private(b);
  3935. #if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
  3936. uint16x4_t a3210 = vget_low_u16(a_.neon_u16);
  3937. uint16x4_t b3210 = vget_low_u16(b_.neon_u16);
  3938. uint32x4_t ab3210 = vmull_u16(a3210, b3210); /* 3333222211110000 */
  3939. #if defined(SIMDE_ARM_NEON_A64V8_NATIVE)
  3940. uint32x4_t ab7654 = vmull_high_u16(a_.neon_u16, b_.neon_u16);
  3941. r_.neon_u16 = vuzp2q_u16(vreinterpretq_u16_u32(ab3210),
  3942. vreinterpretq_u16_u32(ab7654));
  3943. #else
  3944. uint16x4_t a7654 = vget_high_u16(a_.neon_u16);
  3945. uint16x4_t b7654 = vget_high_u16(b_.neon_u16);
  3946. uint32x4_t ab7654 = vmull_u16(a7654, b7654); /* 7777666655554444 */
  3947. uint16x8x2_t neon_r = vuzpq_u16(vreinterpretq_u16_u32(ab3210),
  3948. vreinterpretq_u16_u32(ab7654));
  3949. r_.neon_u16 = neon_r.val[1];
  3950. #endif
  3951. #else
  3952. SIMDE_VECTORIZE
  3953. for (size_t i = 0; i < (sizeof(r_.u16) / sizeof(r_.u16[0])); i++) {
  3954. r_.u16[i] = HEDLEY_STATIC_CAST(
  3955. uint16_t,
  3956. HEDLEY_STATIC_CAST(uint32_t, a_.u16[i]) *
  3957. HEDLEY_STATIC_CAST(uint32_t,
  3958. b_.u16[i]) >>
  3959. 16);
  3960. }
  3961. #endif
  3962. return simde__m128i_from_private(r_);
  3963. #endif
  3964. }
  3965. #if defined(SIMDE_X86_SSE2_ENABLE_NATIVE_ALIASES)
  3966. #define _mm_mulhi_epu16(a, b) simde_mm_mulhi_epu16(a, b)
  3967. #endif
  3968. SIMDE_FUNCTION_ATTRIBUTES
  3969. simde__m128i simde_mm_mullo_epi16(simde__m128i a, simde__m128i b)
  3970. {
  3971. #if defined(SIMDE_X86_SSE2_NATIVE)
  3972. return _mm_mullo_epi16(a, b);
  3973. #else
  3974. simde__m128i_private r_, a_ = simde__m128i_to_private(a),
  3975. b_ = simde__m128i_to_private(b);
  3976. #if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
  3977. r_.neon_i16 = vmulq_s16(a_.neon_i16, b_.neon_i16);
  3978. #elif defined(SIMDE_POWER_ALTIVEC_P6_NATIVE)
  3979. (void)a_;
  3980. (void)b_;
  3981. r_.altivec_i16 = vec_mul(a_.altivec_i16, b_.altivec_i16);
  3982. #else
  3983. SIMDE_VECTORIZE
  3984. for (size_t i = 0; i < (sizeof(r_.i16) / sizeof(r_.i16[0])); i++) {
  3985. r_.u16[i] = HEDLEY_STATIC_CAST(
  3986. uint16_t,
  3987. HEDLEY_STATIC_CAST(uint32_t, a_.u16[i]) *
  3988. HEDLEY_STATIC_CAST(uint32_t, b_.u16[i]));
  3989. }
  3990. #endif
  3991. return simde__m128i_from_private(r_);
  3992. #endif
  3993. }
  3994. #if defined(SIMDE_X86_SSE2_ENABLE_NATIVE_ALIASES)
  3995. #define _mm_mullo_epi16(a, b) simde_mm_mullo_epi16(a, b)
  3996. #endif
  3997. SIMDE_FUNCTION_ATTRIBUTES
  3998. simde__m128d simde_mm_or_pd(simde__m128d a, simde__m128d b)
  3999. {
  4000. #if defined(SIMDE_X86_SSE2_NATIVE)
  4001. return _mm_or_pd(a, b);
  4002. #else
  4003. simde__m128d_private r_, a_ = simde__m128d_to_private(a),
  4004. b_ = simde__m128d_to_private(b);
  4005. #if defined(SIMDE_VECTOR_SUBSCRIPT_OPS)
  4006. r_.i32f = a_.i32f | b_.i32f;
  4007. #elif defined(SIMDE_WASM_SIMD128_NATIVE)
  4008. r_.wasm_v128 = wasm_v128_or(a_.wasm_v128, b_.wasm_v128);
  4009. #elif defined(SIMDE_ARM_NEON_A32V7_NATIVE)
  4010. r_.neon_i64 = vorrq_s64(a_.neon_i64, b_.neon_i64);
  4011. #else
  4012. SIMDE_VECTORIZE
  4013. for (size_t i = 0; i < (sizeof(r_.i32f) / sizeof(r_.i32f[0])); i++) {
  4014. r_.i32f[i] = a_.i32f[i] | b_.i32f[i];
  4015. }
  4016. #endif
  4017. return simde__m128d_from_private(r_);
  4018. #endif
  4019. }
  4020. #if defined(SIMDE_X86_SSE2_ENABLE_NATIVE_ALIASES)
  4021. #define _mm_or_pd(a, b) simde_mm_or_pd(a, b)
  4022. #endif
  4023. SIMDE_FUNCTION_ATTRIBUTES
  4024. simde__m128i simde_mm_or_si128(simde__m128i a, simde__m128i b)
  4025. {
  4026. #if defined(SIMDE_X86_SSE2_NATIVE)
  4027. return _mm_or_si128(a, b);
  4028. #else
  4029. simde__m128i_private r_, a_ = simde__m128i_to_private(a),
  4030. b_ = simde__m128i_to_private(b);
  4031. #if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
  4032. r_.neon_i32 = vorrq_s32(a_.neon_i32, b_.neon_i32);
  4033. #elif defined(SIMDE_POWER_ALTIVEC_P6_NATIVE)
  4034. r_.altivec_i32 = vec_or(a_.altivec_i32, b_.altivec_i32);
  4035. #elif defined(SIMDE_VECTOR_SUBSCRIPT_OPS)
  4036. r_.i32f = a_.i32f | b_.i32f;
  4037. #else
  4038. SIMDE_VECTORIZE
  4039. for (size_t i = 0; i < (sizeof(r_.i32f) / sizeof(r_.i32f[0])); i++) {
  4040. r_.i32f[i] = a_.i32f[i] | b_.i32f[i];
  4041. }
  4042. #endif
  4043. return simde__m128i_from_private(r_);
  4044. #endif
  4045. }
  4046. #if defined(SIMDE_X86_SSE2_ENABLE_NATIVE_ALIASES)
  4047. #define _mm_or_si128(a, b) simde_mm_or_si128(a, b)
  4048. #endif
  4049. SIMDE_FUNCTION_ATTRIBUTES
  4050. simde__m128i simde_mm_packs_epi16(simde__m128i a, simde__m128i b)
  4051. {
  4052. #if defined(SIMDE_X86_SSE2_NATIVE)
  4053. return _mm_packs_epi16(a, b);
  4054. #else
  4055. simde__m128i_private r_, a_ = simde__m128i_to_private(a),
  4056. b_ = simde__m128i_to_private(b);
  4057. #if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
  4058. r_.neon_i8 =
  4059. vcombine_s8(vqmovn_s16(a_.neon_i16), vqmovn_s16(b_.neon_i16));
  4060. #else
  4061. SIMDE_VECTORIZE
  4062. for (size_t i = 0; i < (sizeof(r_.i16) / sizeof(r_.i16[0])); i++) {
  4063. r_.i8[i] = (a_.i16[i] > INT8_MAX)
  4064. ? INT8_MAX
  4065. : ((a_.i16[i] < INT8_MIN)
  4066. ? INT8_MIN
  4067. : HEDLEY_STATIC_CAST(int8_t,
  4068. a_.i16[i]));
  4069. r_.i8[i + 8] = (b_.i16[i] > INT8_MAX)
  4070. ? INT8_MAX
  4071. : ((b_.i16[i] < INT8_MIN)
  4072. ? INT8_MIN
  4073. : HEDLEY_STATIC_CAST(
  4074. int8_t, b_.i16[i]));
  4075. }
  4076. #endif
  4077. return simde__m128i_from_private(r_);
  4078. #endif
  4079. }
  4080. #if defined(SIMDE_X86_SSE2_ENABLE_NATIVE_ALIASES)
  4081. #define _mm_packs_epi16(a, b) simde_mm_packs_epi16(a, b)
  4082. #endif
  4083. SIMDE_FUNCTION_ATTRIBUTES
  4084. simde__m128i simde_mm_packs_epi32(simde__m128i a, simde__m128i b)
  4085. {
  4086. #if defined(SIMDE_X86_SSE2_NATIVE)
  4087. return _mm_packs_epi32(a, b);
  4088. #else
  4089. simde__m128i_private r_, a_ = simde__m128i_to_private(a),
  4090. b_ = simde__m128i_to_private(b);
  4091. #if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
  4092. r_.neon_i16 =
  4093. vcombine_s16(vqmovn_s32(a_.neon_i32), vqmovn_s32(b_.neon_i32));
  4094. #elif defined(SIMDE_POWER_ALTIVEC_P6_NATIVE)
  4095. r_.altivec_i16 = vec_packs(a_.altivec_i32, b_.altivec_i32);
  4096. #else
  4097. SIMDE_VECTORIZE
  4098. for (size_t i = 0; i < (sizeof(r_.i32) / sizeof(r_.i32[0])); i++) {
  4099. r_.i16[i] = (a_.i32[i] > INT16_MAX)
  4100. ? INT16_MAX
  4101. : ((a_.i32[i] < INT16_MIN)
  4102. ? INT16_MIN
  4103. : HEDLEY_STATIC_CAST(int16_t,
  4104. a_.i32[i]));
  4105. r_.i16[i + 4] =
  4106. (b_.i32[i] > INT16_MAX)
  4107. ? INT16_MAX
  4108. : ((b_.i32[i] < INT16_MIN)
  4109. ? INT16_MIN
  4110. : HEDLEY_STATIC_CAST(int16_t,
  4111. b_.i32[i]));
  4112. }
  4113. #endif
  4114. return simde__m128i_from_private(r_);
  4115. #endif
  4116. }
  4117. #if defined(SIMDE_X86_SSE2_ENABLE_NATIVE_ALIASES)
  4118. #define _mm_packs_epi32(a, b) simde_mm_packs_epi32(a, b)
  4119. #endif
  4120. SIMDE_FUNCTION_ATTRIBUTES
  4121. simde__m128i simde_mm_packus_epi16(simde__m128i a, simde__m128i b)
  4122. {
  4123. #if defined(SIMDE_X86_SSE2_NATIVE)
  4124. return _mm_packus_epi16(a, b);
  4125. #else
  4126. simde__m128i_private r_, a_ = simde__m128i_to_private(a),
  4127. b_ = simde__m128i_to_private(b);
  4128. #if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
  4129. r_.neon_u8 =
  4130. vcombine_u8(vqmovun_s16(a_.neon_i16), vqmovun_s16(b_.neon_i16));
  4131. #elif defined(SIMDE_POWER_ALTIVEC_P6_NATIVE)
  4132. r_.altivec_u8 = vec_packsu(a_.altivec_i16, b_.altivec_i16);
  4133. #else
  4134. SIMDE_VECTORIZE
  4135. for (size_t i = 0; i < (sizeof(r_.i16) / sizeof(r_.i16[0])); i++) {
  4136. r_.u8[i] = (a_.i16[i] > UINT8_MAX)
  4137. ? UINT8_MAX
  4138. : ((a_.i16[i] < 0)
  4139. ? UINT8_C(0)
  4140. : HEDLEY_STATIC_CAST(uint8_t,
  4141. a_.i16[i]));
  4142. r_.u8[i + 8] =
  4143. (b_.i16[i] > UINT8_MAX)
  4144. ? UINT8_MAX
  4145. : ((b_.i16[i] < 0)
  4146. ? UINT8_C(0)
  4147. : HEDLEY_STATIC_CAST(uint8_t,
  4148. b_.i16[i]));
  4149. }
  4150. #endif
  4151. return simde__m128i_from_private(r_);
  4152. #endif
  4153. }
  4154. #if defined(SIMDE_X86_SSE2_ENABLE_NATIVE_ALIASES)
  4155. #define _mm_packus_epi16(a, b) simde_mm_packus_epi16(a, b)
  4156. #endif
  4157. SIMDE_FUNCTION_ATTRIBUTES
  4158. void simde_mm_pause(void)
  4159. {
  4160. #if defined(SIMDE_X86_SSE2_NATIVE)
  4161. _mm_pause();
  4162. #endif
  4163. }
  4164. #if defined(SIMDE_X86_SSE2_ENABLE_NATIVE_ALIASES)
  4165. #define _mm_pause() (simde_mm_pause())
  4166. #endif
  4167. SIMDE_FUNCTION_ATTRIBUTES
  4168. simde__m128i simde_mm_sad_epu8(simde__m128i a, simde__m128i b)
  4169. {
  4170. #if defined(SIMDE_X86_SSE2_NATIVE)
  4171. return _mm_sad_epu8(a, b);
  4172. #else
  4173. simde__m128i_private r_, a_ = simde__m128i_to_private(a),
  4174. b_ = simde__m128i_to_private(b);
  4175. #if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
  4176. const uint16x8_t t = vpaddlq_u8(vabdq_u8(a_.neon_u8, b_.neon_u8));
  4177. r_.neon_u64 = vcombine_u64(vpaddl_u32(vpaddl_u16(vget_low_u16(t))),
  4178. vpaddl_u32(vpaddl_u16(vget_high_u16(t))));
  4179. #else
  4180. for (size_t i = 0; i < (sizeof(r_.i64) / sizeof(r_.i64[0])); i++) {
  4181. uint16_t tmp = 0;
  4182. SIMDE_VECTORIZE_REDUCTION(+ : tmp)
  4183. for (size_t j = 0; j < ((sizeof(r_.u8) / sizeof(r_.u8[0])) / 2);
  4184. j++) {
  4185. const size_t e = j + (i * 8);
  4186. tmp += (a_.u8[e] > b_.u8[e]) ? (a_.u8[e] - b_.u8[e])
  4187. : (b_.u8[e] - a_.u8[e]);
  4188. }
  4189. r_.i64[i] = tmp;
  4190. }
  4191. #endif
  4192. return simde__m128i_from_private(r_);
  4193. #endif
  4194. }
  4195. #if defined(SIMDE_X86_SSE2_ENABLE_NATIVE_ALIASES)
  4196. #define _mm_sad_epu8(a, b) simde_mm_sad_epu8(a, b)
  4197. #endif
  4198. SIMDE_FUNCTION_ATTRIBUTES
  4199. simde__m128i simde_mm_set_epi8(int8_t e15, int8_t e14, int8_t e13, int8_t e12,
  4200. int8_t e11, int8_t e10, int8_t e9, int8_t e8,
  4201. int8_t e7, int8_t e6, int8_t e5, int8_t e4,
  4202. int8_t e3, int8_t e2, int8_t e1, int8_t e0)
  4203. {
  4204. #if defined(SIMDE_X86_SSE2_NATIVE)
  4205. return _mm_set_epi8(e15, e14, e13, e12, e11, e10, e9, e8, e7, e6, e5,
  4206. e4, e3, e2, e1, e0);
  4207. #else
  4208. simde__m128i_private r_;
  4209. #if defined(SIMDE_WASM_SIMD128_NATIVE)
  4210. r_.wasm_v128 = wasm_i8x16_make(e0, e1, e2, e3, e4, e5, e6, e7, e8, e9,
  4211. e10, e11, e12, e13, e14, e15);
  4212. #elif defined(SIMDE_ARM_NEON_A32V7_NATIVE)
  4213. SIMDE_ALIGN_LIKE_16(int8x16_t)
  4214. int8_t data[16] = {e0, e1, e2, e3, e4, e5, e6, e7,
  4215. e8, e9, e10, e11, e12, e13, e14, e15};
  4216. r_.neon_i8 = vld1q_s8(data);
  4217. #else
  4218. r_.i8[0] = e0;
  4219. r_.i8[1] = e1;
  4220. r_.i8[2] = e2;
  4221. r_.i8[3] = e3;
  4222. r_.i8[4] = e4;
  4223. r_.i8[5] = e5;
  4224. r_.i8[6] = e6;
  4225. r_.i8[7] = e7;
  4226. r_.i8[8] = e8;
  4227. r_.i8[9] = e9;
  4228. r_.i8[10] = e10;
  4229. r_.i8[11] = e11;
  4230. r_.i8[12] = e12;
  4231. r_.i8[13] = e13;
  4232. r_.i8[14] = e14;
  4233. r_.i8[15] = e15;
  4234. #endif
  4235. return simde__m128i_from_private(r_);
  4236. #endif
  4237. }
  4238. #if defined(SIMDE_X86_SSE2_ENABLE_NATIVE_ALIASES)
  4239. #define _mm_set_epi8(e15, e14, e13, e12, e11, e10, e9, e8, e7, e6, e5, e4, e3, \
  4240. e2, e1, e0) \
  4241. simde_mm_set_epi8(e15, e14, e13, e12, e11, e10, e9, e8, e7, e6, e5, \
  4242. e4, e3, e2, e1, e0)
  4243. #endif
  4244. SIMDE_FUNCTION_ATTRIBUTES
  4245. simde__m128i simde_mm_set_epi16(int16_t e7, int16_t e6, int16_t e5, int16_t e4,
  4246. int16_t e3, int16_t e2, int16_t e1, int16_t e0)
  4247. {
  4248. #if defined(SIMDE_X86_SSE2_NATIVE)
  4249. return _mm_set_epi16(e7, e6, e5, e4, e3, e2, e1, e0);
  4250. #else
  4251. simde__m128i_private r_;
  4252. #if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
  4253. SIMDE_ALIGN_LIKE_16(int16x8_t)
  4254. int16_t data[8] = {e0, e1, e2, e3, e4, e5, e6, e7};
  4255. r_.neon_i16 = vld1q_s16(data);
  4256. #elif defined(SIMDE_WASM_SIMD128_NATIVE)
  4257. r_.wasm_v128 = wasm_i16x8_make(e0, e1, e2, e3, e4, e5, e6, e7);
  4258. #else
  4259. r_.i16[0] = e0;
  4260. r_.i16[1] = e1;
  4261. r_.i16[2] = e2;
  4262. r_.i16[3] = e3;
  4263. r_.i16[4] = e4;
  4264. r_.i16[5] = e5;
  4265. r_.i16[6] = e6;
  4266. r_.i16[7] = e7;
  4267. #endif
  4268. return simde__m128i_from_private(r_);
  4269. #endif
  4270. }
  4271. #if defined(SIMDE_X86_SSE2_ENABLE_NATIVE_ALIASES)
  4272. #define _mm_set_epi16(e7, e6, e5, e4, e3, e2, e1, e0) \
  4273. simde_mm_set_epi16(e7, e6, e5, e4, e3, e2, e1, e0)
  4274. #endif
  4275. SIMDE_FUNCTION_ATTRIBUTES
  4276. simde__m128i simde_mm_loadu_si16(void const *mem_addr)
  4277. {
  4278. #if defined(SIMDE_X86_SSE2_NATIVE) && \
  4279. (SIMDE_DETECT_CLANG_VERSION_CHECK(8, 0, 0) || \
  4280. HEDLEY_GCC_VERSION_CHECK(11, 0, 0) || \
  4281. HEDLEY_INTEL_VERSION_CHECK(20, 21, 1))
  4282. return _mm_loadu_si16(mem_addr);
  4283. #else
  4284. int16_t val;
  4285. simde_memcpy(&val, mem_addr, sizeof(val));
  4286. return simde_x_mm_cvtsi16_si128(val);
  4287. #endif
  4288. }
  4289. #if defined(SIMDE_X86_SSE2_ENABLE_NATIVE_ALIASES)
  4290. #define _mm_loadu_si16(mem_addr) simde_mm_loadu_si16(mem_addr)
  4291. #endif
  4292. SIMDE_FUNCTION_ATTRIBUTES
  4293. simde__m128i simde_mm_set_epi32(int32_t e3, int32_t e2, int32_t e1, int32_t e0)
  4294. {
  4295. #if defined(SIMDE_X86_SSE2_NATIVE)
  4296. return _mm_set_epi32(e3, e2, e1, e0);
  4297. #else
  4298. simde__m128i_private r_;
  4299. #if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
  4300. SIMDE_ALIGN_LIKE_16(int32x4_t) int32_t data[4] = {e0, e1, e2, e3};
  4301. r_.neon_i32 = vld1q_s32(data);
  4302. #elif defined(SIMDE_WASM_SIMD128_NATIVE)
  4303. r_.wasm_v128 = wasm_i32x4_make(e0, e1, e2, e3);
  4304. #else
  4305. r_.i32[0] = e0;
  4306. r_.i32[1] = e1;
  4307. r_.i32[2] = e2;
  4308. r_.i32[3] = e3;
  4309. #endif
  4310. return simde__m128i_from_private(r_);
  4311. #endif
  4312. }
  4313. #if defined(SIMDE_X86_SSE2_ENABLE_NATIVE_ALIASES)
  4314. #define _mm_set_epi32(e3, e2, e1, e0) simde_mm_set_epi32(e3, e2, e1, e0)
  4315. #endif
  4316. SIMDE_FUNCTION_ATTRIBUTES
  4317. simde__m128i simde_mm_loadu_si32(void const *mem_addr)
  4318. {
  4319. #if defined(SIMDE_X86_SSE2_NATIVE) && \
  4320. (SIMDE_DETECT_CLANG_VERSION_CHECK(8, 0, 0) || \
  4321. HEDLEY_GCC_VERSION_CHECK(11, 0, 0) || \
  4322. HEDLEY_INTEL_VERSION_CHECK(20, 21, 1))
  4323. return _mm_loadu_si32(mem_addr);
  4324. #else
  4325. int32_t val;
  4326. simde_memcpy(&val, mem_addr, sizeof(val));
  4327. return simde_mm_cvtsi32_si128(val);
  4328. #endif
  4329. }
  4330. #if defined(SIMDE_X86_SSE2_ENABLE_NATIVE_ALIASES)
  4331. #define _mm_loadu_si32(mem_addr) simde_mm_loadu_si32(mem_addr)
  4332. #endif
  4333. SIMDE_FUNCTION_ATTRIBUTES
  4334. simde__m128i simde_mm_set_epi64(simde__m64 e1, simde__m64 e0)
  4335. {
  4336. #if defined(SIMDE_X86_SSE2_NATIVE) && defined(SIMDE_X86_MMX_NATIVE)
  4337. return _mm_set_epi64(e1, e0);
  4338. #else
  4339. simde__m128i_private r_;
  4340. #if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
  4341. r_.neon_i64 = vcombine_s64(simde__m64_to_neon_i64(e0),
  4342. simde__m64_to_neon_i64(e1));
  4343. #else
  4344. r_.m64[0] = e0;
  4345. r_.m64[1] = e1;
  4346. #endif
  4347. return simde__m128i_from_private(r_);
  4348. #endif
  4349. }
  4350. #if defined(SIMDE_X86_SSE2_ENABLE_NATIVE_ALIASES)
  4351. #define _mm_set_epi64(e1, e0) (simde_mm_set_epi64((e1), (e0)))
  4352. #endif
  4353. SIMDE_FUNCTION_ATTRIBUTES
  4354. simde__m128i simde_mm_set_epi64x(int64_t e1, int64_t e0)
  4355. {
  4356. #if defined(SIMDE_X86_SSE2_NATIVE) && \
  4357. (!defined(HEDLEY_MSVC_VERSION) || HEDLEY_MSVC_VERSION_CHECK(19, 0, 0))
  4358. return _mm_set_epi64x(e1, e0);
  4359. #else
  4360. simde__m128i_private r_;
  4361. #if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
  4362. SIMDE_ALIGN_LIKE_16(int64x2_t) int64_t data[2] = {e0, e1};
  4363. r_.neon_i64 = vld1q_s64(data);
  4364. #elif defined(SIMDE_WASM_SIMD128_NATIVE)
  4365. r_.wasm_v128 = wasm_i64x2_make(e0, e1);
  4366. #else
  4367. r_.i64[0] = e0;
  4368. r_.i64[1] = e1;
  4369. #endif
  4370. return simde__m128i_from_private(r_);
  4371. #endif
  4372. }
  4373. #if defined(SIMDE_X86_SSE2_ENABLE_NATIVE_ALIASES)
  4374. #define _mm_set_epi64x(e1, e0) simde_mm_set_epi64x(e1, e0)
  4375. #endif
  4376. SIMDE_FUNCTION_ATTRIBUTES
  4377. simde__m128i simde_mm_loadu_si64(void const *mem_addr)
  4378. {
  4379. #if defined(SIMDE_X86_SSE2_NATIVE) && \
  4380. (SIMDE_DETECT_CLANG_VERSION_CHECK(8, 0, 0) || \
  4381. HEDLEY_GCC_VERSION_CHECK(11, 0, 0) || \
  4382. HEDLEY_INTEL_VERSION_CHECK(20, 21, 1))
  4383. return _mm_loadu_si64(mem_addr);
  4384. #else
  4385. int64_t val;
  4386. simde_memcpy(&val, mem_addr, sizeof(val));
  4387. return simde_mm_cvtsi64_si128(val);
  4388. #endif
  4389. }
  4390. #if defined(SIMDE_X86_SSE2_ENABLE_NATIVE_ALIASES)
  4391. #define _mm_loadu_si64(mem_addr) simde_mm_loadu_si64(mem_addr)
  4392. #endif
  4393. SIMDE_FUNCTION_ATTRIBUTES
  4394. simde__m128i simde_x_mm_set_epu8(uint8_t e15, uint8_t e14, uint8_t e13,
  4395. uint8_t e12, uint8_t e11, uint8_t e10,
  4396. uint8_t e9, uint8_t e8, uint8_t e7, uint8_t e6,
  4397. uint8_t e5, uint8_t e4, uint8_t e3, uint8_t e2,
  4398. uint8_t e1, uint8_t e0)
  4399. {
  4400. #if defined(SIMDE_X86_SSE2_NATIVE)
  4401. return _mm_set_epi8(
  4402. HEDLEY_STATIC_CAST(char, e15), HEDLEY_STATIC_CAST(char, e14),
  4403. HEDLEY_STATIC_CAST(char, e13), HEDLEY_STATIC_CAST(char, e12),
  4404. HEDLEY_STATIC_CAST(char, e11), HEDLEY_STATIC_CAST(char, e10),
  4405. HEDLEY_STATIC_CAST(char, e9), HEDLEY_STATIC_CAST(char, e8),
  4406. HEDLEY_STATIC_CAST(char, e7), HEDLEY_STATIC_CAST(char, e6),
  4407. HEDLEY_STATIC_CAST(char, e5), HEDLEY_STATIC_CAST(char, e4),
  4408. HEDLEY_STATIC_CAST(char, e3), HEDLEY_STATIC_CAST(char, e2),
  4409. HEDLEY_STATIC_CAST(char, e1), HEDLEY_STATIC_CAST(char, e0));
  4410. #else
  4411. simde__m128i_private r_;
  4412. #if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
  4413. SIMDE_ALIGN_LIKE_16(uint8x16_t)
  4414. uint8_t data[16] = {e0, e1, e2, e3, e4, e5, e6, e7,
  4415. e8, e9, e10, e11, e12, e13, e14, e15};
  4416. r_.neon_u8 = vld1q_u8(data);
  4417. #else
  4418. r_.u8[0] = e0;
  4419. r_.u8[1] = e1;
  4420. r_.u8[2] = e2;
  4421. r_.u8[3] = e3;
  4422. r_.u8[4] = e4;
  4423. r_.u8[5] = e5;
  4424. r_.u8[6] = e6;
  4425. r_.u8[7] = e7;
  4426. r_.u8[8] = e8;
  4427. r_.u8[9] = e9;
  4428. r_.u8[10] = e10;
  4429. r_.u8[11] = e11;
  4430. r_.u8[12] = e12;
  4431. r_.u8[13] = e13;
  4432. r_.u8[14] = e14;
  4433. r_.u8[15] = e15;
  4434. #endif
  4435. return simde__m128i_from_private(r_);
  4436. #endif
  4437. }
  4438. SIMDE_FUNCTION_ATTRIBUTES
  4439. simde__m128i simde_x_mm_set_epu16(uint16_t e7, uint16_t e6, uint16_t e5,
  4440. uint16_t e4, uint16_t e3, uint16_t e2,
  4441. uint16_t e1, uint16_t e0)
  4442. {
  4443. #if defined(SIMDE_X86_SSE2_NATIVE)
  4444. return _mm_set_epi16(
  4445. HEDLEY_STATIC_CAST(short, e7), HEDLEY_STATIC_CAST(short, e6),
  4446. HEDLEY_STATIC_CAST(short, e5), HEDLEY_STATIC_CAST(short, e4),
  4447. HEDLEY_STATIC_CAST(short, e3), HEDLEY_STATIC_CAST(short, e2),
  4448. HEDLEY_STATIC_CAST(short, e1), HEDLEY_STATIC_CAST(short, e0));
  4449. #else
  4450. simde__m128i_private r_;
  4451. #if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
  4452. SIMDE_ALIGN_LIKE_16(uint16x8_t)
  4453. uint16_t data[8] = {e0, e1, e2, e3, e4, e5, e6, e7};
  4454. r_.neon_u16 = vld1q_u16(data);
  4455. #else
  4456. r_.u16[0] = e0;
  4457. r_.u16[1] = e1;
  4458. r_.u16[2] = e2;
  4459. r_.u16[3] = e3;
  4460. r_.u16[4] = e4;
  4461. r_.u16[5] = e5;
  4462. r_.u16[6] = e6;
  4463. r_.u16[7] = e7;
  4464. #endif
  4465. return simde__m128i_from_private(r_);
  4466. #endif
  4467. }
  4468. SIMDE_FUNCTION_ATTRIBUTES
  4469. simde__m128i simde_x_mm_set_epu32(uint32_t e3, uint32_t e2, uint32_t e1,
  4470. uint32_t e0)
  4471. {
  4472. #if defined(SIMDE_X86_SSE2_NATIVE)
  4473. return _mm_set_epi32(HEDLEY_STATIC_CAST(int, e3),
  4474. HEDLEY_STATIC_CAST(int, e2),
  4475. HEDLEY_STATIC_CAST(int, e1),
  4476. HEDLEY_STATIC_CAST(int, e0));
  4477. #else
  4478. simde__m128i_private r_;
  4479. #if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
  4480. SIMDE_ALIGN_LIKE_16(uint32x4_t) uint32_t data[4] = {e0, e1, e2, e3};
  4481. r_.neon_u32 = vld1q_u32(data);
  4482. #else
  4483. r_.u32[0] = e0;
  4484. r_.u32[1] = e1;
  4485. r_.u32[2] = e2;
  4486. r_.u32[3] = e3;
  4487. #endif
  4488. return simde__m128i_from_private(r_);
  4489. #endif
  4490. }
  4491. SIMDE_FUNCTION_ATTRIBUTES
  4492. simde__m128i simde_x_mm_set_epu64x(uint64_t e1, uint64_t e0)
  4493. {
  4494. #if defined(SIMDE_X86_SSE2_NATIVE) && \
  4495. (!defined(HEDLEY_MSVC_VERSION) || HEDLEY_MSVC_VERSION_CHECK(19, 0, 0))
  4496. return _mm_set_epi64x(HEDLEY_STATIC_CAST(int64_t, e1),
  4497. HEDLEY_STATIC_CAST(int64_t, e0));
  4498. #else
  4499. simde__m128i_private r_;
  4500. #if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
  4501. SIMDE_ALIGN_LIKE_16(uint64x2_t) uint64_t data[2] = {e0, e1};
  4502. r_.neon_u64 = vld1q_u64(data);
  4503. #else
  4504. r_.u64[0] = e0;
  4505. r_.u64[1] = e1;
  4506. #endif
  4507. return simde__m128i_from_private(r_);
  4508. #endif
  4509. }
  4510. SIMDE_FUNCTION_ATTRIBUTES
  4511. simde__m128d simde_mm_set_sd(simde_float64 a)
  4512. {
  4513. #if defined(SIMDE_X86_SSE2_NATIVE)
  4514. return _mm_set_sd(a);
  4515. #elif defined(SIMDE_ARM_NEON_A64V8_NATIVE)
  4516. return vsetq_lane_f64(a, vdupq_n_f64(SIMDE_FLOAT64_C(0.0)), 0);
  4517. #else
  4518. return simde_mm_set_pd(SIMDE_FLOAT64_C(0.0), a);
  4519. #endif
  4520. }
  4521. #if defined(SIMDE_X86_SSE2_ENABLE_NATIVE_ALIASES)
  4522. #define _mm_set_sd(a) simde_mm_set_sd(a)
  4523. #endif
  4524. SIMDE_FUNCTION_ATTRIBUTES
  4525. simde__m128i simde_mm_set1_epi8(int8_t a)
  4526. {
  4527. #if defined(SIMDE_X86_SSE2_NATIVE)
  4528. return _mm_set1_epi8(a);
  4529. #else
  4530. simde__m128i_private r_;
  4531. #if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
  4532. r_.neon_i8 = vdupq_n_s8(a);
  4533. #elif defined(SIMDE_WASM_SIMD128_NATIVE)
  4534. r_.wasm_v128 = wasm_i8x16_splat(a);
  4535. #elif defined(SIMDE_POWER_ALTIVEC_P8_NATIVE)
  4536. r_.altivec_i8 = vec_splats(HEDLEY_STATIC_CAST(signed char, a));
  4537. #else
  4538. SIMDE_VECTORIZE
  4539. for (size_t i = 0; i < (sizeof(r_.i8) / sizeof(r_.i8[0])); i++) {
  4540. r_.i8[i] = a;
  4541. }
  4542. #endif
  4543. return simde__m128i_from_private(r_);
  4544. #endif
  4545. }
  4546. #if defined(SIMDE_X86_SSE2_ENABLE_NATIVE_ALIASES)
  4547. #define _mm_set1_epi8(a) simde_mm_set1_epi8(a)
  4548. #endif
  4549. SIMDE_FUNCTION_ATTRIBUTES
  4550. simde__m128i simde_mm_set1_epi16(int16_t a)
  4551. {
  4552. #if defined(SIMDE_X86_SSE2_NATIVE)
  4553. return _mm_set1_epi16(a);
  4554. #else
  4555. simde__m128i_private r_;
  4556. #if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
  4557. r_.neon_i16 = vdupq_n_s16(a);
  4558. #elif defined(SIMDE_WASM_SIMD128_NATIVE)
  4559. r_.wasm_v128 = wasm_i16x8_splat(a);
  4560. #elif defined(SIMDE_POWER_ALTIVEC_P8_NATIVE)
  4561. r_.altivec_i16 = vec_splats(HEDLEY_STATIC_CAST(signed short, a));
  4562. #else
  4563. SIMDE_VECTORIZE
  4564. for (size_t i = 0; i < (sizeof(r_.i16) / sizeof(r_.i16[0])); i++) {
  4565. r_.i16[i] = a;
  4566. }
  4567. #endif
  4568. return simde__m128i_from_private(r_);
  4569. #endif
  4570. }
  4571. #if defined(SIMDE_X86_SSE2_ENABLE_NATIVE_ALIASES)
  4572. #define _mm_set1_epi16(a) simde_mm_set1_epi16(a)
  4573. #endif
  4574. SIMDE_FUNCTION_ATTRIBUTES
  4575. simde__m128i simde_mm_set1_epi32(int32_t a)
  4576. {
  4577. #if defined(SIMDE_X86_SSE2_NATIVE)
  4578. return _mm_set1_epi32(a);
  4579. #else
  4580. simde__m128i_private r_;
  4581. #if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
  4582. r_.neon_i32 = vdupq_n_s32(a);
  4583. #elif defined(SIMDE_WASM_SIMD128_NATIVE)
  4584. r_.wasm_v128 = wasm_i32x4_splat(a);
  4585. #elif defined(SIMDE_POWER_ALTIVEC_P8_NATIVE)
  4586. r_.altivec_i32 = vec_splats(HEDLEY_STATIC_CAST(signed int, a));
  4587. #else
  4588. SIMDE_VECTORIZE
  4589. for (size_t i = 0; i < (sizeof(r_.i32) / sizeof(r_.i32[0])); i++) {
  4590. r_.i32[i] = a;
  4591. }
  4592. #endif
  4593. return simde__m128i_from_private(r_);
  4594. #endif
  4595. }
  4596. #if defined(SIMDE_X86_SSE2_ENABLE_NATIVE_ALIASES)
  4597. #define _mm_set1_epi32(a) simde_mm_set1_epi32(a)
  4598. #endif
  4599. SIMDE_FUNCTION_ATTRIBUTES
  4600. simde__m128i simde_mm_set1_epi64x(int64_t a)
  4601. {
  4602. #if defined(SIMDE_X86_SSE2_NATIVE) && \
  4603. (!defined(HEDLEY_MSVC_VERSION) || HEDLEY_MSVC_VERSION_CHECK(19, 0, 0))
  4604. return _mm_set1_epi64x(a);
  4605. #else
  4606. simde__m128i_private r_;
  4607. #if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
  4608. r_.neon_i64 = vdupq_n_s64(a);
  4609. #elif defined(SIMDE_WASM_SIMD128_NATIVE)
  4610. r_.wasm_v128 = wasm_i64x2_splat(a);
  4611. #elif defined(SIMDE_POWER_ALTIVEC_P8_NATIVE)
  4612. r_.altivec_i64 = vec_splats(HEDLEY_STATIC_CAST(signed long long, a));
  4613. #else
  4614. SIMDE_VECTORIZE
  4615. for (size_t i = 0; i < (sizeof(r_.i64) / sizeof(r_.i64[0])); i++) {
  4616. r_.i64[i] = a;
  4617. }
  4618. #endif
  4619. return simde__m128i_from_private(r_);
  4620. #endif
  4621. }
  4622. #if defined(SIMDE_X86_SSE2_ENABLE_NATIVE_ALIASES)
  4623. #define _mm_set1_epi64x(a) simde_mm_set1_epi64x(a)
  4624. #endif
  4625. SIMDE_FUNCTION_ATTRIBUTES
  4626. simde__m128i simde_mm_set1_epi64(simde__m64 a)
  4627. {
  4628. #if defined(SIMDE_X86_SSE2_NATIVE) && defined(SIMDE_X86_MMX_NATIVE)
  4629. return _mm_set1_epi64(a);
  4630. #else
  4631. simde__m64_private a_ = simde__m64_to_private(a);
  4632. return simde_mm_set1_epi64x(a_.i64[0]);
  4633. #endif
  4634. }
  4635. #if defined(SIMDE_X86_SSE2_ENABLE_NATIVE_ALIASES)
  4636. #define _mm_set1_epi64(a) simde_mm_set1_epi64(a)
  4637. #endif
  4638. SIMDE_FUNCTION_ATTRIBUTES
  4639. simde__m128i simde_x_mm_set1_epu8(uint8_t value)
  4640. {
  4641. #if defined(SIMDE_POWER_ALTIVEC_P8_NATIVE)
  4642. return simde__m128i_from_altivec_u8(
  4643. vec_splats(HEDLEY_STATIC_CAST(unsigned char, value)));
  4644. #else
  4645. return simde_mm_set1_epi8(HEDLEY_STATIC_CAST(int8_t, value));
  4646. #endif
  4647. }
  4648. SIMDE_FUNCTION_ATTRIBUTES
  4649. simde__m128i simde_x_mm_set1_epu16(uint16_t value)
  4650. {
  4651. #if defined(SIMDE_POWER_ALTIVEC_P8_NATIVE)
  4652. return simde__m128i_from_altivec_u16(
  4653. vec_splats(HEDLEY_STATIC_CAST(unsigned short, value)));
  4654. #else
  4655. return simde_mm_set1_epi16(HEDLEY_STATIC_CAST(int16_t, value));
  4656. #endif
  4657. }
  4658. SIMDE_FUNCTION_ATTRIBUTES
  4659. simde__m128i simde_x_mm_set1_epu32(uint32_t value)
  4660. {
  4661. #if defined(SIMDE_POWER_ALTIVEC_P8_NATIVE)
  4662. return simde__m128i_from_altivec_u32(
  4663. vec_splats(HEDLEY_STATIC_CAST(unsigned int, value)));
  4664. #else
  4665. return simde_mm_set1_epi32(HEDLEY_STATIC_CAST(int32_t, value));
  4666. #endif
  4667. }
  4668. SIMDE_FUNCTION_ATTRIBUTES
  4669. simde__m128i simde_x_mm_set1_epu64(uint64_t value)
  4670. {
  4671. #if defined(SIMDE_POWER_ALTIVEC_P8_NATIVE)
  4672. return simde__m128i_from_altivec_u64(
  4673. vec_splats(HEDLEY_STATIC_CAST(unsigned long long, value)));
  4674. #else
  4675. return simde_mm_set1_epi64x(HEDLEY_STATIC_CAST(int64_t, value));
  4676. #endif
  4677. }
  4678. SIMDE_FUNCTION_ATTRIBUTES
  4679. simde__m128i simde_mm_setr_epi8(int8_t e15, int8_t e14, int8_t e13, int8_t e12,
  4680. int8_t e11, int8_t e10, int8_t e9, int8_t e8,
  4681. int8_t e7, int8_t e6, int8_t e5, int8_t e4,
  4682. int8_t e3, int8_t e2, int8_t e1, int8_t e0)
  4683. {
  4684. #if defined(SIMDE_X86_SSE2_NATIVE)
  4685. return _mm_setr_epi8(e15, e14, e13, e12, e11, e10, e9, e8, e7, e6, e5,
  4686. e4, e3, e2, e1, e0);
  4687. #else
  4688. return simde_mm_set_epi8(e0, e1, e2, e3, e4, e5, e6, e7, e8, e9, e10,
  4689. e11, e12, e13, e14, e15);
  4690. #endif
  4691. }
  4692. #if defined(SIMDE_X86_SSE2_ENABLE_NATIVE_ALIASES)
  4693. #define _mm_setr_epi8(e15, e14, e13, e12, e11, e10, e9, e8, e7, e6, e5, e4, \
  4694. e3, e2, e1, e0) \
  4695. simde_mm_setr_epi8(e15, e14, e13, e12, e11, e10, e9, e8, e7, e6, e5, \
  4696. e4, e3, e2, e1, e0)
  4697. #endif
  4698. SIMDE_FUNCTION_ATTRIBUTES
  4699. simde__m128i simde_mm_setr_epi16(int16_t e7, int16_t e6, int16_t e5, int16_t e4,
  4700. int16_t e3, int16_t e2, int16_t e1, int16_t e0)
  4701. {
  4702. #if defined(SIMDE_X86_SSE2_NATIVE)
  4703. return _mm_setr_epi16(e7, e6, e5, e4, e3, e2, e1, e0);
  4704. #else
  4705. return simde_mm_set_epi16(e0, e1, e2, e3, e4, e5, e6, e7);
  4706. #endif
  4707. }
  4708. #if defined(SIMDE_X86_SSE2_ENABLE_NATIVE_ALIASES)
  4709. #define _mm_setr_epi16(e7, e6, e5, e4, e3, e2, e1, e0) \
  4710. simde_mm_setr_epi16(e7, e6, e5, e4, e3, e2, e1, e0)
  4711. #endif
  4712. SIMDE_FUNCTION_ATTRIBUTES
  4713. simde__m128i simde_mm_setr_epi32(int32_t e3, int32_t e2, int32_t e1, int32_t e0)
  4714. {
  4715. #if defined(SIMDE_X86_SSE2_NATIVE)
  4716. return _mm_setr_epi32(e3, e2, e1, e0);
  4717. #else
  4718. return simde_mm_set_epi32(e0, e1, e2, e3);
  4719. #endif
  4720. }
  4721. #if defined(SIMDE_X86_SSE2_ENABLE_NATIVE_ALIASES)
  4722. #define _mm_setr_epi32(e3, e2, e1, e0) simde_mm_setr_epi32(e3, e2, e1, e0)
  4723. #endif
  4724. SIMDE_FUNCTION_ATTRIBUTES
  4725. simde__m128i simde_mm_setr_epi64(simde__m64 e1, simde__m64 e0)
  4726. {
  4727. #if defined(SIMDE_X86_SSE2_NATIVE) && defined(SIMDE_X86_MMX_NATIVE)
  4728. return _mm_setr_epi64(e1, e0);
  4729. #else
  4730. return simde_mm_set_epi64(e0, e1);
  4731. #endif
  4732. }
  4733. #if defined(SIMDE_X86_SSE2_ENABLE_NATIVE_ALIASES)
  4734. #define _mm_setr_epi64(e1, e0) (simde_mm_setr_epi64((e1), (e0)))
  4735. #endif
  4736. SIMDE_FUNCTION_ATTRIBUTES
  4737. simde__m128d simde_mm_setr_pd(simde_float64 e1, simde_float64 e0)
  4738. {
  4739. #if defined(SIMDE_X86_SSE2_NATIVE)
  4740. return _mm_setr_pd(e1, e0);
  4741. #else
  4742. return simde_mm_set_pd(e0, e1);
  4743. #endif
  4744. }
  4745. #if defined(SIMDE_X86_SSE2_ENABLE_NATIVE_ALIASES)
  4746. #define _mm_setr_pd(e1, e0) simde_mm_setr_pd(e1, e0)
  4747. #endif
  4748. SIMDE_FUNCTION_ATTRIBUTES
  4749. simde__m128d simde_mm_setzero_pd(void)
  4750. {
  4751. #if defined(SIMDE_X86_SSE2_NATIVE)
  4752. return _mm_setzero_pd();
  4753. #else
  4754. return simde_mm_castsi128_pd(simde_mm_setzero_si128());
  4755. #endif
  4756. }
  4757. #if defined(SIMDE_X86_SSE2_ENABLE_NATIVE_ALIASES)
  4758. #define _mm_setzero_pd() simde_mm_setzero_pd()
  4759. #endif
  4760. #if defined(SIMDE_DIAGNOSTIC_DISABLE_UNINITIALIZED_)
  4761. HEDLEY_DIAGNOSTIC_PUSH
  4762. SIMDE_DIAGNOSTIC_DISABLE_UNINITIALIZED_
  4763. #endif
  4764. SIMDE_FUNCTION_ATTRIBUTES
  4765. simde__m128d simde_mm_undefined_pd(void)
  4766. {
  4767. simde__m128d_private r_;
  4768. #if defined(SIMDE_X86_SSE2_NATIVE) && defined(SIMDE__HAVE_UNDEFINED128)
  4769. r_.n = _mm_undefined_pd();
  4770. #elif !defined(SIMDE_DIAGNOSTIC_DISABLE_UNINITIALIZED_)
  4771. r_ = simde__m128d_to_private(simde_mm_setzero_pd());
  4772. #endif
  4773. return simde__m128d_from_private(r_);
  4774. }
  4775. #if defined(SIMDE_X86_SSE2_ENABLE_NATIVE_ALIASES)
  4776. #define _mm_undefined_pd() simde_mm_undefined_pd()
  4777. #endif
  4778. SIMDE_FUNCTION_ATTRIBUTES
  4779. simde__m128i simde_mm_undefined_si128(void)
  4780. {
  4781. simde__m128i_private r_;
  4782. #if defined(SIMDE_X86_SSE2_NATIVE) && defined(SIMDE__HAVE_UNDEFINED128)
  4783. r_.n = _mm_undefined_si128();
  4784. #elif !defined(SIMDE_DIAGNOSTIC_DISABLE_UNINITIALIZED_)
  4785. r_ = simde__m128i_to_private(simde_mm_setzero_si128());
  4786. #endif
  4787. return simde__m128i_from_private(r_);
  4788. }
  4789. #if defined(SIMDE_X86_SSE2_ENABLE_NATIVE_ALIASES)
  4790. #define _mm_undefined_si128() (simde_mm_undefined_si128())
  4791. #endif
  4792. #if defined(SIMDE_DIAGNOSTIC_DISABLE_UNINITIALIZED_)
  4793. HEDLEY_DIAGNOSTIC_POP
  4794. #endif
  4795. SIMDE_FUNCTION_ATTRIBUTES
  4796. simde__m128d simde_x_mm_setone_pd(void)
  4797. {
  4798. return simde_mm_castps_pd(simde_x_mm_setone_ps());
  4799. }
  4800. SIMDE_FUNCTION_ATTRIBUTES
  4801. simde__m128i simde_x_mm_setone_si128(void)
  4802. {
  4803. return simde_mm_castps_si128(simde_x_mm_setone_ps());
  4804. }
  4805. SIMDE_FUNCTION_ATTRIBUTES
  4806. simde__m128i simde_mm_shuffle_epi32(simde__m128i a, const int imm8)
  4807. SIMDE_REQUIRE_CONSTANT_RANGE(imm8, 0, 255)
  4808. {
  4809. simde__m128i_private r_, a_ = simde__m128i_to_private(a);
  4810. for (size_t i = 0; i < (sizeof(r_.i32) / sizeof(r_.i32[0])); i++) {
  4811. r_.i32[i] = a_.i32[(imm8 >> (i * 2)) & 3];
  4812. }
  4813. return simde__m128i_from_private(r_);
  4814. }
  4815. #if defined(SIMDE_X86_SSE2_NATIVE)
  4816. #define simde_mm_shuffle_epi32(a, imm8) _mm_shuffle_epi32((a), (imm8))
  4817. #elif defined(SIMDE_ARM_NEON_A32V7_NATIVE)
  4818. #define simde_mm_shuffle_epi32(a, imm8) \
  4819. __extension__({ \
  4820. int32x4_t ret; \
  4821. ret = vmovq_n_s32(vgetq_lane_s32(vreinterpretq_s32_s64(a), \
  4822. (imm8) & (0x3))); \
  4823. ret = vsetq_lane_s32(vgetq_lane_s32(vreinterpretq_s32_s64(a), \
  4824. ((imm8) >> 2) & 0x3), \
  4825. ret, 1); \
  4826. ret = vsetq_lane_s32(vgetq_lane_s32(vreinterpretq_s32_s64(a), \
  4827. ((imm8) >> 4) & 0x3), \
  4828. ret, 2); \
  4829. ret = vsetq_lane_s32(vgetq_lane_s32(vreinterpretq_s32_s64(a), \
  4830. ((imm8) >> 6) & 0x3), \
  4831. ret, 3); \
  4832. vreinterpretq_s64_s32(ret); \
  4833. })
  4834. #elif defined(SIMDE_SHUFFLE_VECTOR_)
  4835. #define simde_mm_shuffle_epi32(a, imm8) \
  4836. (__extension__({ \
  4837. const simde__m128i_private simde__tmp_a_ = \
  4838. simde__m128i_to_private(a); \
  4839. simde__m128i_from_private((simde__m128i_private){ \
  4840. .i32 = SIMDE_SHUFFLE_VECTOR_( \
  4841. 32, 16, (simde__tmp_a_).i32, \
  4842. (simde__tmp_a_).i32, ((imm8)) & 3, \
  4843. ((imm8) >> 2) & 3, ((imm8) >> 4) & 3, \
  4844. ((imm8) >> 6) & 3)}); \
  4845. }))
  4846. #endif
  4847. #if defined(SIMDE_X86_SSE2_ENABLE_NATIVE_ALIASES)
  4848. #define _mm_shuffle_epi32(a, imm8) simde_mm_shuffle_epi32(a, imm8)
  4849. #endif
  4850. SIMDE_FUNCTION_ATTRIBUTES
  4851. simde__m128d simde_mm_shuffle_pd(simde__m128d a, simde__m128d b, const int imm8)
  4852. SIMDE_REQUIRE_CONSTANT_RANGE(imm8, 0, 3)
  4853. {
  4854. simde__m128d_private r_, a_ = simde__m128d_to_private(a),
  4855. b_ = simde__m128d_to_private(b);
  4856. r_.f64[0] = ((imm8 & 1) == 0) ? a_.f64[0] : a_.f64[1];
  4857. r_.f64[1] = ((imm8 & 2) == 0) ? b_.f64[0] : b_.f64[1];
  4858. return simde__m128d_from_private(r_);
  4859. }
  4860. #if defined(SIMDE_X86_SSE2_NATIVE) && !defined(__PGI)
  4861. #define simde_mm_shuffle_pd(a, b, imm8) _mm_shuffle_pd((a), (b), (imm8))
  4862. #elif defined(SIMDE_SHUFFLE_VECTOR_)
  4863. #define simde_mm_shuffle_pd(a, b, imm8) \
  4864. (__extension__({ \
  4865. simde__m128d_from_private((simde__m128d_private){ \
  4866. .f64 = SIMDE_SHUFFLE_VECTOR_( \
  4867. 64, 16, simde__m128d_to_private(a).f64, \
  4868. simde__m128d_to_private(b).f64, \
  4869. (((imm8)) & 1), (((imm8) >> 1) & 1) + 2)}); \
  4870. }))
  4871. #endif
  4872. #if defined(SIMDE_X86_SSE2_ENABLE_NATIVE_ALIASES)
  4873. #define _mm_shuffle_pd(a, b, imm8) simde_mm_shuffle_pd(a, b, imm8)
  4874. #endif
  4875. SIMDE_FUNCTION_ATTRIBUTES
  4876. simde__m128i simde_mm_shufflehi_epi16(simde__m128i a, const int imm8)
  4877. SIMDE_REQUIRE_CONSTANT_RANGE(imm8, 0, 255)
  4878. {
  4879. simde__m128i_private r_, a_ = simde__m128i_to_private(a);
  4880. SIMDE_VECTORIZE
  4881. for (size_t i = 0; i < ((sizeof(a_.i16) / sizeof(a_.i16[0])) / 2);
  4882. i++) {
  4883. r_.i16[i] = a_.i16[i];
  4884. }
  4885. for (size_t i = ((sizeof(a_.i16) / sizeof(a_.i16[0])) / 2);
  4886. i < (sizeof(r_.i16) / sizeof(r_.i16[0])); i++) {
  4887. r_.i16[i] = a_.i16[((imm8 >> ((i - 4) * 2)) & 3) + 4];
  4888. }
  4889. return simde__m128i_from_private(r_);
  4890. }
  4891. #if defined(SIMDE_X86_SSE2_NATIVE)
  4892. #define simde_mm_shufflehi_epi16(a, imm8) _mm_shufflehi_epi16((a), (imm8))
  4893. #elif defined(SIMDE_ARM_NEON_A32V7_NATIVE)
  4894. #define simde_mm_shufflehi_epi16(a, imm8) \
  4895. __extension__({ \
  4896. int16x8_t ret = vreinterpretq_s16_s64(a); \
  4897. int16x4_t highBits = vget_high_s16(ret); \
  4898. ret = vsetq_lane_s16(vget_lane_s16(highBits, (imm8) & (0x3)), \
  4899. ret, 4); \
  4900. ret = vsetq_lane_s16( \
  4901. vget_lane_s16(highBits, ((imm8) >> 2) & 0x3), ret, 5); \
  4902. ret = vsetq_lane_s16( \
  4903. vget_lane_s16(highBits, ((imm8) >> 4) & 0x3), ret, 6); \
  4904. ret = vsetq_lane_s16( \
  4905. vget_lane_s16(highBits, ((imm8) >> 6) & 0x3), ret, 7); \
  4906. vreinterpretq_s64_s16(ret); \
  4907. })
  4908. #elif defined(SIMDE_SHUFFLE_VECTOR_)
  4909. #define simde_mm_shufflehi_epi16(a, imm8) \
  4910. (__extension__({ \
  4911. const simde__m128i_private simde__tmp_a_ = \
  4912. simde__m128i_to_private(a); \
  4913. simde__m128i_from_private((simde__m128i_private){ \
  4914. .i16 = SIMDE_SHUFFLE_VECTOR_( \
  4915. 16, 16, (simde__tmp_a_).i16, \
  4916. (simde__tmp_a_).i16, 0, 1, 2, 3, \
  4917. (((imm8)) & 3) + 4, (((imm8) >> 2) & 3) + 4, \
  4918. (((imm8) >> 4) & 3) + 4, \
  4919. (((imm8) >> 6) & 3) + 4)}); \
  4920. }))
  4921. #endif
  4922. #if defined(SIMDE_X86_SSE2_ENABLE_NATIVE_ALIASES)
  4923. #define _mm_shufflehi_epi16(a, imm8) simde_mm_shufflehi_epi16(a, imm8)
  4924. #endif
  4925. SIMDE_FUNCTION_ATTRIBUTES
  4926. simde__m128i simde_mm_shufflelo_epi16(simde__m128i a, const int imm8)
  4927. SIMDE_REQUIRE_CONSTANT_RANGE(imm8, 0, 255)
  4928. {
  4929. simde__m128i_private r_, a_ = simde__m128i_to_private(a);
  4930. for (size_t i = 0; i < ((sizeof(r_.i16) / sizeof(r_.i16[0])) / 2);
  4931. i++) {
  4932. r_.i16[i] = a_.i16[((imm8 >> (i * 2)) & 3)];
  4933. }
  4934. SIMDE_VECTORIZE
  4935. for (size_t i = ((sizeof(a_.i16) / sizeof(a_.i16[0])) / 2);
  4936. i < (sizeof(r_.i16) / sizeof(r_.i16[0])); i++) {
  4937. r_.i16[i] = a_.i16[i];
  4938. }
  4939. return simde__m128i_from_private(r_);
  4940. }
  4941. #if defined(SIMDE_X86_SSE2_NATIVE)
  4942. #define simde_mm_shufflelo_epi16(a, imm8) _mm_shufflelo_epi16((a), (imm8))
  4943. #elif defined(SIMDE_ARM_NEON_A32V7_NATIVE)
  4944. #define simde_mm_shufflelo_epi16(a, imm8) \
  4945. __extension__({ \
  4946. int16x8_t ret = vreinterpretq_s16_s64(a); \
  4947. int16x4_t lowBits = vget_low_s16(ret); \
  4948. ret = vsetq_lane_s16(vget_lane_s16(lowBits, (imm8) & (0x3)), \
  4949. ret, 0); \
  4950. ret = vsetq_lane_s16( \
  4951. vget_lane_s16(lowBits, ((imm8) >> 2) & 0x3), ret, 1); \
  4952. ret = vsetq_lane_s16( \
  4953. vget_lane_s16(lowBits, ((imm8) >> 4) & 0x3), ret, 2); \
  4954. ret = vsetq_lane_s16( \
  4955. vget_lane_s16(lowBits, ((imm8) >> 6) & 0x3), ret, 3); \
  4956. vreinterpretq_s64_s16(ret); \
  4957. })
  4958. #elif defined(SIMDE_SHUFFLE_VECTOR_)
  4959. #define simde_mm_shufflelo_epi16(a, imm8) \
  4960. (__extension__({ \
  4961. const simde__m128i_private simde__tmp_a_ = \
  4962. simde__m128i_to_private(a); \
  4963. simde__m128i_from_private((simde__m128i_private){ \
  4964. .i16 = SIMDE_SHUFFLE_VECTOR_( \
  4965. 16, 16, (simde__tmp_a_).i16, \
  4966. (simde__tmp_a_).i16, (((imm8)) & 3), \
  4967. (((imm8) >> 2) & 3), (((imm8) >> 4) & 3), \
  4968. (((imm8) >> 6) & 3), 4, 5, 6, 7)}); \
  4969. }))
  4970. #endif
  4971. #if defined(SIMDE_X86_SSE2_ENABLE_NATIVE_ALIASES)
  4972. #define _mm_shufflelo_epi16(a, imm8) simde_mm_shufflelo_epi16(a, imm8)
  4973. #endif
  4974. SIMDE_FUNCTION_ATTRIBUTES
  4975. simde__m128i simde_mm_sll_epi16(simde__m128i a, simde__m128i count)
  4976. {
  4977. #if defined(SIMDE_X86_SSE2_NATIVE)
  4978. return _mm_sll_epi16(a, count);
  4979. #else
  4980. simde__m128i_private r_, a_ = simde__m128i_to_private(a),
  4981. count_ = simde__m128i_to_private(count);
  4982. if (count_.u64[0] > 15)
  4983. return simde_mm_setzero_si128();
  4984. #if defined(SIMDE_VECTOR_SUBSCRIPT_SCALAR)
  4985. r_.u16 = (a_.u16 << count_.u64[0]);
  4986. #elif defined(SIMDE_ARM_NEON_A32V7_NATIVE)
  4987. r_.neon_u16 = vshlq_u16(a_.neon_u16, vdupq_n_s16(HEDLEY_STATIC_CAST(
  4988. int16_t, count_.u64[0])));
  4989. #elif defined(SIMDE_WASM_SIMD128_NATIVE)
  4990. r_.wasm_v128 =
  4991. ((wasm_i64x2_extract_lane(count_.wasm_v128, 0) < 16)
  4992. ? wasm_i16x8_shl(a_.wasm_v128,
  4993. HEDLEY_STATIC_CAST(
  4994. int32_t,
  4995. wasm_i64x2_extract_lane(
  4996. count_.wasm_v128, 0)))
  4997. : wasm_i16x8_const(0, 0, 0, 0, 0, 0, 0, 0));
  4998. #else
  4999. SIMDE_VECTORIZE
  5000. for (size_t i = 0; i < (sizeof(r_.u16) / sizeof(r_.u16[0])); i++) {
  5001. r_.u16[i] = HEDLEY_STATIC_CAST(uint16_t,
  5002. (a_.u16[i] << count_.u64[0]));
  5003. }
  5004. #endif
  5005. return simde__m128i_from_private(r_);
  5006. #endif
  5007. }
  5008. #if defined(SIMDE_X86_SSE2_ENABLE_NATIVE_ALIASES)
  5009. #define _mm_sll_epi16(a, count) simde_mm_sll_epi16((a), (count))
  5010. #endif
  5011. SIMDE_FUNCTION_ATTRIBUTES
  5012. simde__m128i simde_mm_sll_epi32(simde__m128i a, simde__m128i count)
  5013. {
  5014. #if defined(SIMDE_X86_SSE2_NATIVE)
  5015. return _mm_sll_epi32(a, count);
  5016. #else
  5017. simde__m128i_private r_, a_ = simde__m128i_to_private(a),
  5018. count_ = simde__m128i_to_private(count);
  5019. if (count_.u64[0] > 31)
  5020. return simde_mm_setzero_si128();
  5021. #if defined(SIMDE_VECTOR_SUBSCRIPT_SCALAR)
  5022. r_.u32 = (a_.u32 << count_.u64[0]);
  5023. #elif defined(SIMDE_ARM_NEON_A32V7_NATIVE)
  5024. r_.neon_u32 = vshlq_u32(a_.neon_u32, vdupq_n_s32(HEDLEY_STATIC_CAST(
  5025. int32_t, count_.u64[0])));
  5026. #elif defined(SIMDE_WASM_SIMD128_NATIVE)
  5027. r_.wasm_v128 =
  5028. ((wasm_i64x2_extract_lane(count_.wasm_v128, 0) < 32)
  5029. ? wasm_i32x4_shl(a_.wasm_v128,
  5030. HEDLEY_STATIC_CAST(
  5031. int32_t,
  5032. wasm_i64x2_extract_lane(
  5033. count_.wasm_v128, 0)))
  5034. : wasm_i32x4_const(0, 0, 0, 0));
  5035. #else
  5036. SIMDE_VECTORIZE
  5037. for (size_t i = 0; i < (sizeof(r_.u32) / sizeof(r_.u32[0])); i++) {
  5038. r_.u32[i] = HEDLEY_STATIC_CAST(uint32_t,
  5039. (a_.u32[i] << count_.u64[0]));
  5040. }
  5041. #endif
  5042. return simde__m128i_from_private(r_);
  5043. #endif
  5044. }
  5045. #if defined(SIMDE_X86_SSE2_ENABLE_NATIVE_ALIASES)
  5046. #define _mm_sll_epi32(a, count) (simde_mm_sll_epi32(a, (count)))
  5047. #endif
  5048. SIMDE_FUNCTION_ATTRIBUTES
  5049. simde__m128i simde_mm_sll_epi64(simde__m128i a, simde__m128i count)
  5050. {
  5051. #if defined(SIMDE_X86_SSE2_NATIVE)
  5052. return _mm_sll_epi64(a, count);
  5053. #else
  5054. simde__m128i_private r_, a_ = simde__m128i_to_private(a),
  5055. count_ = simde__m128i_to_private(count);
  5056. if (count_.u64[0] > 63)
  5057. return simde_mm_setzero_si128();
  5058. const int_fast16_t s = HEDLEY_STATIC_CAST(int_fast16_t, count_.u64[0]);
  5059. #if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
  5060. r_.neon_u64 = vshlq_u64(a_.neon_u64,
  5061. vdupq_n_s64(HEDLEY_STATIC_CAST(int64_t, s)));
  5062. #elif defined(SIMDE_WASM_SIMD128_NATIVE)
  5063. r_.wasm_v128 = (s < 64) ? wasm_i64x2_shl(a_.wasm_v128, s)
  5064. : wasm_i64x2_const(0, 0);
  5065. #else
  5066. #if !defined(SIMDE_BUG_GCC_94488)
  5067. SIMDE_VECTORIZE
  5068. #endif
  5069. for (size_t i = 0; i < (sizeof(r_.u64) / sizeof(r_.u64[0])); i++) {
  5070. r_.u64[i] = a_.u64[i] << s;
  5071. }
  5072. #endif
  5073. return simde__m128i_from_private(r_);
  5074. #endif
  5075. }
  5076. #if defined(SIMDE_X86_SSE2_ENABLE_NATIVE_ALIASES)
  5077. #define _mm_sll_epi64(a, count) (simde_mm_sll_epi64(a, (count)))
  5078. #endif
  5079. SIMDE_FUNCTION_ATTRIBUTES
  5080. simde__m128d simde_mm_sqrt_pd(simde__m128d a)
  5081. {
  5082. #if defined(SIMDE_X86_SSE2_NATIVE)
  5083. return _mm_sqrt_pd(a);
  5084. #else
  5085. simde__m128d_private r_, a_ = simde__m128d_to_private(a);
  5086. #if defined(SIMDE_ARM_NEON_A64V8_NATIVE)
  5087. r_.neon_f64 = vsqrtq_f64(a_.neon_f64);
  5088. #elif defined(SIMDE_WASM_SIMD128_NATIVE)
  5089. r_.wasm_v128 = wasm_f64x2_sqrt(a_.wasm_v128);
  5090. #elif defined(simde_math_sqrt)
  5091. SIMDE_VECTORIZE
  5092. for (size_t i = 0; i < (sizeof(r_.f64) / sizeof(r_.f64[0])); i++) {
  5093. r_.f64[i] = simde_math_sqrt(a_.f64[i]);
  5094. }
  5095. #else
  5096. HEDLEY_UNREACHABLE();
  5097. #endif
  5098. return simde__m128d_from_private(r_);
  5099. #endif
  5100. }
  5101. #if defined(SIMDE_X86_SSE2_ENABLE_NATIVE_ALIASES)
  5102. #define _mm_sqrt_pd(a) simde_mm_sqrt_pd(a)
  5103. #endif
  5104. SIMDE_FUNCTION_ATTRIBUTES
  5105. simde__m128d simde_mm_sqrt_sd(simde__m128d a, simde__m128d b)
  5106. {
  5107. #if defined(SIMDE_X86_SSE2_NATIVE)
  5108. return _mm_sqrt_sd(a, b);
  5109. #elif (SIMDE_NATURAL_VECTOR_SIZE > 0)
  5110. return simde_mm_move_sd(a, simde_mm_sqrt_pd(b));
  5111. #else
  5112. simde__m128d_private r_, a_ = simde__m128d_to_private(a),
  5113. b_ = simde__m128d_to_private(b);
  5114. #if defined(simde_math_sqrt)
  5115. r_.f64[0] = simde_math_sqrt(b_.f64[0]);
  5116. r_.f64[1] = a_.f64[1];
  5117. #else
  5118. HEDLEY_UNREACHABLE();
  5119. #endif
  5120. return simde__m128d_from_private(r_);
  5121. #endif
  5122. }
  5123. #if defined(SIMDE_X86_SSE2_ENABLE_NATIVE_ALIASES)
  5124. #define _mm_sqrt_sd(a, b) simde_mm_sqrt_sd(a, b)
  5125. #endif
  5126. SIMDE_FUNCTION_ATTRIBUTES
  5127. simde__m128i simde_mm_srl_epi16(simde__m128i a, simde__m128i count)
  5128. {
  5129. #if defined(SIMDE_X86_SSE2_NATIVE)
  5130. return _mm_srl_epi16(a, count);
  5131. #else
  5132. simde__m128i_private r_, a_ = simde__m128i_to_private(a),
  5133. count_ = simde__m128i_to_private(count);
  5134. const int cnt = HEDLEY_STATIC_CAST(
  5135. int, (count_.i64[0] > 16 ? 16 : count_.i64[0]));
  5136. #if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
  5137. r_.neon_u16 = vshlq_u16(a_.neon_u16,
  5138. vdupq_n_s16(HEDLEY_STATIC_CAST(int16_t, -cnt)));
  5139. #else
  5140. SIMDE_VECTORIZE
  5141. for (size_t i = 0; i < (sizeof(r_.u16) / sizeof(r_.u16[0])); i++) {
  5142. r_.u16[i] = a_.u16[i] >> cnt;
  5143. }
  5144. #endif
  5145. return simde__m128i_from_private(r_);
  5146. #endif
  5147. }
  5148. #if defined(SIMDE_X86_SSE2_ENABLE_NATIVE_ALIASES)
  5149. #define _mm_srl_epi16(a, count) (simde_mm_srl_epi16(a, (count)))
  5150. #endif
  5151. SIMDE_FUNCTION_ATTRIBUTES
  5152. simde__m128i simde_mm_srl_epi32(simde__m128i a, simde__m128i count)
  5153. {
  5154. #if defined(SIMDE_X86_SSE2_NATIVE)
  5155. return _mm_srl_epi32(a, count);
  5156. #else
  5157. simde__m128i_private r_, a_ = simde__m128i_to_private(a),
  5158. count_ = simde__m128i_to_private(count);
  5159. const int cnt = HEDLEY_STATIC_CAST(
  5160. int, (count_.i64[0] > 32 ? 32 : count_.i64[0]));
  5161. #if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
  5162. r_.neon_u32 = vshlq_u32(a_.neon_u32,
  5163. vdupq_n_s32(HEDLEY_STATIC_CAST(int32_t, -cnt)));
  5164. #elif defined(SIMDE_WASM_SIMD128_NATIVE)
  5165. r_.wasm_v128 = wasm_u32x4_shr(a_.wasm_v128, cnt);
  5166. #else
  5167. SIMDE_VECTORIZE
  5168. for (size_t i = 0; i < (sizeof(r_.u32) / sizeof(r_.u32[0])); i++) {
  5169. r_.u32[i] = a_.u32[i] >> cnt;
  5170. }
  5171. #endif
  5172. return simde__m128i_from_private(r_);
  5173. #endif
  5174. }
  5175. #if defined(SIMDE_X86_SSE2_ENABLE_NATIVE_ALIASES)
  5176. #define _mm_srl_epi32(a, count) (simde_mm_srl_epi32(a, (count)))
  5177. #endif
  5178. SIMDE_FUNCTION_ATTRIBUTES
  5179. simde__m128i simde_mm_srl_epi64(simde__m128i a, simde__m128i count)
  5180. {
  5181. #if defined(SIMDE_X86_SSE2_NATIVE)
  5182. return _mm_srl_epi64(a, count);
  5183. #else
  5184. simde__m128i_private r_, a_ = simde__m128i_to_private(a),
  5185. count_ = simde__m128i_to_private(count);
  5186. const int cnt = HEDLEY_STATIC_CAST(
  5187. int, (count_.i64[0] > 64 ? 64 : count_.i64[0]));
  5188. #if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
  5189. r_.neon_u64 = vshlq_u64(a_.neon_u64,
  5190. vdupq_n_s64(HEDLEY_STATIC_CAST(int64_t, -cnt)));
  5191. #elif defined(SIMDE_WASM_SIMD128_NATIVE)
  5192. r_.wasm_v128 = wasm_u64x2_shr(a_.wasm_v128, cnt);
  5193. #else
  5194. #if !defined(SIMDE_BUG_GCC_94488)
  5195. SIMDE_VECTORIZE
  5196. #endif
  5197. for (size_t i = 0; i < (sizeof(r_.u64) / sizeof(r_.u64[0])); i++) {
  5198. r_.u64[i] = a_.u64[i] >> cnt;
  5199. }
  5200. #endif
  5201. return simde__m128i_from_private(r_);
  5202. #endif
  5203. }
  5204. #if defined(SIMDE_X86_SSE2_ENABLE_NATIVE_ALIASES)
  5205. #define _mm_srl_epi64(a, count) (simde_mm_srl_epi64(a, (count)))
  5206. #endif
  5207. SIMDE_FUNCTION_ATTRIBUTES
  5208. simde__m128i simde_mm_srai_epi16(simde__m128i a, const int imm8)
  5209. SIMDE_REQUIRE_CONSTANT_RANGE(imm8, 0, 255)
  5210. {
  5211. /* MSVC requires a range of (0, 255). */
  5212. simde__m128i_private r_, a_ = simde__m128i_to_private(a);
  5213. const int cnt = (imm8 & ~15) ? 15 : imm8;
  5214. #if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
  5215. r_.neon_i16 = vshlq_s16(a_.neon_i16,
  5216. vdupq_n_s16(HEDLEY_STATIC_CAST(int16_t, -cnt)));
  5217. #elif defined(SIMDE_WASM_SIMD128_NATIVE)
  5218. r_.wasm_v128 = wasm_i16x8_shr(a_.wasm_v128, cnt);
  5219. #else
  5220. SIMDE_VECTORIZE
  5221. for (size_t i = 0; i < (sizeof(r_) / sizeof(r_.i16[0])); i++) {
  5222. r_.i16[i] = a_.i16[i] >> cnt;
  5223. }
  5224. #endif
  5225. return simde__m128i_from_private(r_);
  5226. }
  5227. #if defined(SIMDE_X86_SSE2_NATIVE)
  5228. #define simde_mm_srai_epi16(a, imm8) _mm_srai_epi16((a), (imm8))
  5229. #endif
  5230. #if defined(SIMDE_X86_SSE2_ENABLE_NATIVE_ALIASES)
  5231. #define _mm_srai_epi16(a, imm8) simde_mm_srai_epi16(a, imm8)
  5232. #endif
  5233. SIMDE_FUNCTION_ATTRIBUTES
  5234. simde__m128i simde_mm_srai_epi32(simde__m128i a, const int imm8)
  5235. SIMDE_REQUIRE_CONSTANT_RANGE(imm8, 0, 255)
  5236. {
  5237. /* MSVC requires a range of (0, 255). */
  5238. simde__m128i_private r_, a_ = simde__m128i_to_private(a);
  5239. const int cnt = (imm8 & ~31) ? 31 : imm8;
  5240. #if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
  5241. r_.neon_i32 = vshlq_s32(a_.neon_i32, vdupq_n_s32(-cnt));
  5242. #elif defined(SIMDE_WASM_SIMD128_NATIVE)
  5243. r_.wasm_v128 = wasm_i32x4_shr(a_.wasm_v128, cnt);
  5244. #else
  5245. SIMDE_VECTORIZE
  5246. for (size_t i = 0; i < (sizeof(r_) / sizeof(r_.i32[0])); i++) {
  5247. r_.i32[i] = a_.i32[i] >> cnt;
  5248. }
  5249. #endif
  5250. return simde__m128i_from_private(r_);
  5251. }
  5252. #if defined(SIMDE_X86_SSE2_NATIVE)
  5253. #define simde_mm_srai_epi32(a, imm8) _mm_srai_epi32((a), (imm8))
  5254. #endif
  5255. #if defined(SIMDE_X86_SSE2_ENABLE_NATIVE_ALIASES)
  5256. #define _mm_srai_epi32(a, imm8) simde_mm_srai_epi32(a, imm8)
  5257. #endif
  5258. SIMDE_FUNCTION_ATTRIBUTES
  5259. simde__m128i simde_mm_sra_epi16(simde__m128i a, simde__m128i count)
  5260. {
  5261. #if defined(SIMDE_X86_SSE2_NATIVE)
  5262. return _mm_sra_epi16(a, count);
  5263. #else
  5264. simde__m128i_private r_, a_ = simde__m128i_to_private(a),
  5265. count_ = simde__m128i_to_private(count);
  5266. const int cnt = HEDLEY_STATIC_CAST(
  5267. int, (count_.i64[0] > 15 ? 15 : count_.i64[0]));
  5268. #if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
  5269. r_.neon_i16 = vshlq_s16(a_.neon_i16,
  5270. vdupq_n_s16(HEDLEY_STATIC_CAST(int16_t, -cnt)));
  5271. #elif defined(SIMDE_WASM_SIMD128_NATIVE)
  5272. r_.wasm_v128 = wasm_i16x8_shr(a_.wasm_v128, cnt);
  5273. #else
  5274. SIMDE_VECTORIZE
  5275. for (size_t i = 0; i < (sizeof(r_.i16) / sizeof(r_.i16[0])); i++) {
  5276. r_.i16[i] = a_.i16[i] >> cnt;
  5277. }
  5278. #endif
  5279. return simde__m128i_from_private(r_);
  5280. #endif
  5281. }
  5282. #if defined(SIMDE_X86_SSE2_ENABLE_NATIVE_ALIASES)
  5283. #define _mm_sra_epi16(a, count) (simde_mm_sra_epi16(a, count))
  5284. #endif
  5285. SIMDE_FUNCTION_ATTRIBUTES
  5286. simde__m128i simde_mm_sra_epi32(simde__m128i a, simde__m128i count)
  5287. {
  5288. #if defined(SIMDE_X86_SSE2_NATIVE) && !defined(SIMDE_BUG_GCC_BAD_MM_SRA_EPI32)
  5289. return _mm_sra_epi32(a, count);
  5290. #else
  5291. simde__m128i_private r_, a_ = simde__m128i_to_private(a),
  5292. count_ = simde__m128i_to_private(count);
  5293. const int cnt = count_.u64[0] > 31
  5294. ? 31
  5295. : HEDLEY_STATIC_CAST(int, count_.u64[0]);
  5296. #if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
  5297. r_.neon_i32 = vshlq_s32(a_.neon_i32,
  5298. vdupq_n_s32(HEDLEY_STATIC_CAST(int32_t, -cnt)));
  5299. #elif defined(SIMDE_WASM_SIMD128_NATIVE)
  5300. r_.wasm_v128 = wasm_i32x4_shr(a_.wasm_v128, cnt);
  5301. #else
  5302. SIMDE_VECTORIZE
  5303. for (size_t i = 0; i < (sizeof(r_.i32) / sizeof(r_.i32[0])); i++) {
  5304. r_.i32[i] = a_.i32[i] >> cnt;
  5305. }
  5306. #endif
  5307. return simde__m128i_from_private(r_);
  5308. #endif
  5309. }
  5310. #if defined(SIMDE_X86_SSE2_ENABLE_NATIVE_ALIASES)
  5311. #define _mm_sra_epi32(a, count) (simde_mm_sra_epi32(a, (count)))
  5312. #endif
  5313. SIMDE_FUNCTION_ATTRIBUTES
  5314. simde__m128i simde_mm_slli_epi16(simde__m128i a, const int imm8)
  5315. SIMDE_REQUIRE_CONSTANT_RANGE(imm8, 0, 255)
  5316. {
  5317. if (HEDLEY_UNLIKELY((imm8 > 15))) {
  5318. return simde_mm_setzero_si128();
  5319. }
  5320. simde__m128i_private r_, a_ = simde__m128i_to_private(a);
  5321. #if defined(SIMDE_VECTOR_SUBSCRIPT_SCALAR)
  5322. r_.i16 = a_.i16 << (imm8 & 0xff);
  5323. #else
  5324. const int s =
  5325. (imm8 >
  5326. HEDLEY_STATIC_CAST(int, sizeof(r_.i16[0]) * CHAR_BIT) - 1)
  5327. ? 0
  5328. : imm8;
  5329. SIMDE_VECTORIZE
  5330. for (size_t i = 0; i < (sizeof(r_.i16) / sizeof(r_.i16[0])); i++) {
  5331. r_.i16[i] = HEDLEY_STATIC_CAST(int16_t, a_.i16[i] << s);
  5332. }
  5333. #endif
  5334. return simde__m128i_from_private(r_);
  5335. }
  5336. #if defined(SIMDE_X86_SSE2_NATIVE)
  5337. #define simde_mm_slli_epi16(a, imm8) _mm_slli_epi16(a, imm8)
  5338. #elif defined(SIMDE_ARM_NEON_A32V7_NATIVE)
  5339. #define simde_mm_slli_epi16(a, imm8) \
  5340. (__extension__({ \
  5341. simde__m128i ret; \
  5342. if ((imm8) <= 0) { \
  5343. ret = a; \
  5344. } else if ((imm8) > 15) { \
  5345. ret = simde_mm_setzero_si128(); \
  5346. } else { \
  5347. ret = simde__m128i_from_neon_i16(vshlq_n_s16( \
  5348. simde__m128i_to_neon_i16(a), ((imm8)&15))); \
  5349. } \
  5350. ret; \
  5351. }))
  5352. #elif defined(SIMDE_WASM_SIMD128_NATIVE)
  5353. #define simde_mm_slli_epi16(a, imm8) \
  5354. ((imm8 < 16) \
  5355. ? wasm_i16x8_shl(simde__m128i_to_private(a).wasm_v128, imm8) \
  5356. : wasm_i16x8_const(0, 0, 0, 0, 0, 0, 0, 0))
  5357. #elif defined(SIMDE_POWER_ALTIVEC_P8_NATIVE)
  5358. #define simde_mm_slli_epi16(a, imm8) \
  5359. ((imm8 & ~15) ? simde_mm_setzero_si128() \
  5360. : simde__m128i_from_altivec_i16( \
  5361. vec_sl(simde__m128i_to_altivec_i16(a), \
  5362. vec_splat_u16(HEDLEY_STATIC_CAST( \
  5363. unsigned short, imm8)))))
  5364. #endif
  5365. #if defined(SIMDE_X86_SSE2_ENABLE_NATIVE_ALIASES)
  5366. #define _mm_slli_epi16(a, imm8) simde_mm_slli_epi16(a, imm8)
  5367. #endif
  5368. SIMDE_FUNCTION_ATTRIBUTES
  5369. simde__m128i simde_mm_slli_epi32(simde__m128i a, const int imm8)
  5370. SIMDE_REQUIRE_CONSTANT_RANGE(imm8, 0, 255)
  5371. {
  5372. if (HEDLEY_UNLIKELY((imm8 > 31))) {
  5373. return simde_mm_setzero_si128();
  5374. }
  5375. simde__m128i_private r_, a_ = simde__m128i_to_private(a);
  5376. #if defined(SIMDE_VECTOR_SUBSCRIPT_SCALAR)
  5377. r_.i32 = a_.i32 << imm8;
  5378. #else
  5379. SIMDE_VECTORIZE
  5380. for (size_t i = 0; i < (sizeof(r_.i32) / sizeof(r_.i32[0])); i++) {
  5381. r_.i32[i] = a_.i32[i] << (imm8 & 0xff);
  5382. }
  5383. #endif
  5384. return simde__m128i_from_private(r_);
  5385. }
  5386. #if defined(SIMDE_X86_SSE2_NATIVE)
  5387. #define simde_mm_slli_epi32(a, imm8) _mm_slli_epi32(a, imm8)
  5388. #elif defined(SIMDE_ARM_NEON_A32V7_NATIVE)
  5389. #define simde_mm_slli_epi32(a, imm8) \
  5390. (__extension__({ \
  5391. simde__m128i ret; \
  5392. if ((imm8) <= 0) { \
  5393. ret = a; \
  5394. } else if ((imm8) > 31) { \
  5395. ret = simde_mm_setzero_si128(); \
  5396. } else { \
  5397. ret = simde__m128i_from_neon_i32(vshlq_n_s32( \
  5398. simde__m128i_to_neon_i32(a), ((imm8)&31))); \
  5399. } \
  5400. ret; \
  5401. }))
  5402. #elif defined(SIMDE_WASM_SIMD128_NATIVE)
  5403. #define simde_mm_slli_epi32(a, imm8) \
  5404. ((imm8 < 32) \
  5405. ? wasm_i32x4_shl(simde__m128i_to_private(a).wasm_v128, imm8) \
  5406. : wasm_i32x4_const(0, 0, 0, 0))
  5407. #elif defined(SIMDE_POWER_ALTIVEC_P8_NATIVE)
  5408. #define simde_mm_slli_epi32(a, imm8) \
  5409. (__extension__({ \
  5410. simde__m128i ret; \
  5411. if ((imm8) <= 0) { \
  5412. ret = a; \
  5413. } else if ((imm8) > 31) { \
  5414. ret = simde_mm_setzero_si128(); \
  5415. } else { \
  5416. ret = simde__m128i_from_altivec_i32( \
  5417. vec_sl(simde__m128i_to_altivec_i32(a), \
  5418. vec_splats(HEDLEY_STATIC_CAST( \
  5419. unsigned int, (imm8)&31)))); \
  5420. } \
  5421. ret; \
  5422. }))
  5423. #endif
  5424. #if defined(SIMDE_X86_SSE2_ENABLE_NATIVE_ALIASES)
  5425. #define _mm_slli_epi32(a, imm8) simde_mm_slli_epi32(a, imm8)
  5426. #endif
  5427. SIMDE_FUNCTION_ATTRIBUTES
  5428. simde__m128i simde_mm_slli_epi64(simde__m128i a, const int imm8)
  5429. SIMDE_REQUIRE_CONSTANT_RANGE(imm8, 0, 255)
  5430. {
  5431. if (HEDLEY_UNLIKELY((imm8 > 63))) {
  5432. return simde_mm_setzero_si128();
  5433. }
  5434. simde__m128i_private r_, a_ = simde__m128i_to_private(a);
  5435. #if defined(SIMDE_VECTOR_SUBSCRIPT_SCALAR)
  5436. r_.i64 = a_.i64 << imm8;
  5437. #else
  5438. SIMDE_VECTORIZE
  5439. for (size_t i = 0; i < (sizeof(r_.i64) / sizeof(r_.i64[0])); i++) {
  5440. r_.i64[i] = a_.i64[i] << (imm8 & 0xff);
  5441. }
  5442. #endif
  5443. return simde__m128i_from_private(r_);
  5444. }
  5445. #if defined(SIMDE_X86_SSE2_NATIVE)
  5446. #define simde_mm_slli_epi64(a, imm8) _mm_slli_epi64(a, imm8)
  5447. #elif defined(SIMDE_ARM_NEON_A32V7_NATIVE)
  5448. #define simde_mm_slli_epi64(a, imm8) \
  5449. (__extension__({ \
  5450. simde__m128i ret; \
  5451. if ((imm8) <= 0) { \
  5452. ret = a; \
  5453. } else if ((imm8) > 63) { \
  5454. ret = simde_mm_setzero_si128(); \
  5455. } else { \
  5456. ret = simde__m128i_from_neon_i64(vshlq_n_s64( \
  5457. simde__m128i_to_neon_i64(a), ((imm8)&63))); \
  5458. } \
  5459. ret; \
  5460. }))
  5461. #elif defined(SIMDE_WASM_SIMD128_NATIVE)
  5462. #define simde_mm_slli_epi64(a, imm8) \
  5463. ((imm8 < 64) \
  5464. ? wasm_i64x2_shl(simde__m128i_to_private(a).wasm_v128, imm8) \
  5465. : wasm_i64x2_const(0, 0))
  5466. #endif
  5467. #if defined(SIMDE_X86_SSE2_ENABLE_NATIVE_ALIASES)
  5468. #define _mm_slli_epi64(a, imm8) simde_mm_slli_epi64(a, imm8)
  5469. #endif
  5470. SIMDE_FUNCTION_ATTRIBUTES
  5471. simde__m128i simde_mm_srli_epi16(simde__m128i a, const int imm8)
  5472. SIMDE_REQUIRE_CONSTANT_RANGE(imm8, 0, 255)
  5473. {
  5474. if (HEDLEY_UNLIKELY((imm8 > 15))) {
  5475. return simde_mm_setzero_si128();
  5476. }
  5477. simde__m128i_private r_, a_ = simde__m128i_to_private(a);
  5478. #if defined(SIMDE_VECTOR_SUBSCRIPT_SCALAR)
  5479. r_.u16 = a_.u16 >> imm8;
  5480. #else
  5481. SIMDE_VECTORIZE
  5482. for (size_t i = 0; i < (sizeof(r_.i16) / sizeof(r_.i16[0])); i++) {
  5483. r_.u16[i] = a_.u16[i] >> (imm8 & 0xff);
  5484. }
  5485. #endif
  5486. return simde__m128i_from_private(r_);
  5487. }
  5488. #if defined(SIMDE_X86_SSE2_NATIVE)
  5489. #define simde_mm_srli_epi16(a, imm8) _mm_srli_epi16(a, imm8)
  5490. #elif defined(SIMDE_ARM_NEON_A32V7_NATIVE)
  5491. #define simde_mm_srli_epi16(a, imm8) \
  5492. (__extension__({ \
  5493. simde__m128i ret; \
  5494. if ((imm8) <= 0) { \
  5495. ret = a; \
  5496. } else if ((imm8) > 15) { \
  5497. ret = simde_mm_setzero_si128(); \
  5498. } else { \
  5499. ret = simde__m128i_from_neon_u16(vshrq_n_u16( \
  5500. simde__m128i_to_neon_u16(a), \
  5501. (((imm8)&15) | (((imm8)&15) == 0)))); \
  5502. } \
  5503. ret; \
  5504. }))
  5505. #elif defined(SIMDE_WASM_SIMD128_NATIVE)
  5506. #define simde_mm_srli_epi16(a, imm8) \
  5507. ((imm8 < 16) \
  5508. ? wasm_u16x8_shr(simde__m128i_to_private(a).wasm_v128, imm8) \
  5509. : wasm_i16x8_const(0, 0, 0, 0, 0, 0, 0, 0))
  5510. #elif defined(SIMDE_POWER_ALTIVEC_P8_NATIVE)
  5511. #define simde_mm_srli_epi16(a, imm8) \
  5512. ((imm8 & ~15) ? simde_mm_setzero_si128() \
  5513. : simde__m128i_from_altivec_i16( \
  5514. vec_sr(simde__m128i_to_altivec_i16(a), \
  5515. vec_splat_u16(HEDLEY_STATIC_CAST( \
  5516. unsigned short, imm8)))))
  5517. #endif
  5518. #if defined(SIMDE_X86_SSE2_ENABLE_NATIVE_ALIASES)
  5519. #define _mm_srli_epi16(a, imm8) simde_mm_srli_epi16(a, imm8)
  5520. #endif
  5521. SIMDE_FUNCTION_ATTRIBUTES
  5522. simde__m128i simde_mm_srli_epi32(simde__m128i a, const int imm8)
  5523. SIMDE_REQUIRE_CONSTANT_RANGE(imm8, 0, 255)
  5524. {
  5525. if (HEDLEY_UNLIKELY((imm8 > 31))) {
  5526. return simde_mm_setzero_si128();
  5527. }
  5528. simde__m128i_private r_, a_ = simde__m128i_to_private(a);
  5529. #if defined(SIMDE_VECTOR_SUBSCRIPT_SCALAR)
  5530. r_.u32 = a_.u32 >> (imm8 & 0xff);
  5531. #else
  5532. SIMDE_VECTORIZE
  5533. for (size_t i = 0; i < (sizeof(r_.i32) / sizeof(r_.i32[0])); i++) {
  5534. r_.u32[i] = a_.u32[i] >> (imm8 & 0xff);
  5535. }
  5536. #endif
  5537. return simde__m128i_from_private(r_);
  5538. }
  5539. #if defined(SIMDE_X86_SSE2_NATIVE)
  5540. #define simde_mm_srli_epi32(a, imm8) _mm_srli_epi32(a, imm8)
  5541. #elif defined(SIMDE_ARM_NEON_A32V7_NATIVE)
  5542. #define simde_mm_srli_epi32(a, imm8) \
  5543. (__extension__({ \
  5544. simde__m128i ret; \
  5545. if ((imm8) <= 0) { \
  5546. ret = a; \
  5547. } else if ((imm8) > 31) { \
  5548. ret = simde_mm_setzero_si128(); \
  5549. } else { \
  5550. ret = simde__m128i_from_neon_u32(vshrq_n_u32( \
  5551. simde__m128i_to_neon_u32(a), \
  5552. (((imm8)&31) | (((imm8)&31) == 0)))); \
  5553. } \
  5554. ret; \
  5555. }))
  5556. #elif defined(SIMDE_WASM_SIMD128_NATIVE)
  5557. #define simde_mm_srli_epi32(a, imm8) \
  5558. ((imm8 < 32) \
  5559. ? wasm_u32x4_shr(simde__m128i_to_private(a).wasm_v128, imm8) \
  5560. : wasm_i32x4_const(0, 0, 0, 0))
  5561. #elif defined(SIMDE_POWER_ALTIVEC_P8_NATIVE)
  5562. #define simde_mm_srli_epi32(a, imm8) \
  5563. (__extension__({ \
  5564. simde__m128i ret; \
  5565. if ((imm8) <= 0) { \
  5566. ret = a; \
  5567. } else if ((imm8) > 31) { \
  5568. ret = simde_mm_setzero_si128(); \
  5569. } else { \
  5570. ret = simde__m128i_from_altivec_i32( \
  5571. vec_sr(simde__m128i_to_altivec_i32(a), \
  5572. vec_splats(HEDLEY_STATIC_CAST( \
  5573. unsigned int, (imm8)&31)))); \
  5574. } \
  5575. ret; \
  5576. }))
  5577. #endif
  5578. #if defined(SIMDE_X86_SSE2_ENABLE_NATIVE_ALIASES)
  5579. #define _mm_srli_epi32(a, imm8) simde_mm_srli_epi32(a, imm8)
  5580. #endif
  5581. SIMDE_FUNCTION_ATTRIBUTES
  5582. simde__m128i simde_mm_srli_epi64(simde__m128i a, const int imm8)
  5583. SIMDE_REQUIRE_CONSTANT_RANGE(imm8, 0, 255)
  5584. {
  5585. simde__m128i_private r_, a_ = simde__m128i_to_private(a);
  5586. if (HEDLEY_UNLIKELY((imm8 & 63) != imm8))
  5587. return simde_mm_setzero_si128();
  5588. #if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
  5589. r_.neon_u64 = vshlq_u64(a_.neon_u64, vdupq_n_s64(-imm8));
  5590. #else
  5591. #if defined(SIMDE_VECTOR_SUBSCRIPT_SCALAR) && !defined(SIMDE_BUG_GCC_94488)
  5592. r_.u64 = a_.u64 >> imm8;
  5593. #else
  5594. SIMDE_VECTORIZE
  5595. for (size_t i = 0; i < (sizeof(r_.i64) / sizeof(r_.i64[0])); i++) {
  5596. r_.u64[i] = a_.u64[i] >> imm8;
  5597. }
  5598. #endif
  5599. #endif
  5600. return simde__m128i_from_private(r_);
  5601. }
  5602. #if defined(SIMDE_X86_SSE2_NATIVE)
  5603. #define simde_mm_srli_epi64(a, imm8) _mm_srli_epi64(a, imm8)
  5604. #elif defined(SIMDE_ARM_NEON_A32V7_NATIVE)
  5605. #define simde_mm_srli_epi64(a, imm8) \
  5606. (__extension__({ \
  5607. simde__m128i ret; \
  5608. if ((imm8) <= 0) { \
  5609. ret = a; \
  5610. } else if ((imm8) > 63) { \
  5611. ret = simde_mm_setzero_si128(); \
  5612. } else { \
  5613. ret = simde__m128i_from_neon_u64(vshrq_n_u64( \
  5614. simde__m128i_to_neon_u64(a), \
  5615. (((imm8)&63) | (((imm8)&63) == 0)))); \
  5616. } \
  5617. ret; \
  5618. }))
  5619. #elif defined(SIMDE_WASM_SIMD128_NATIVE)
  5620. #define simde_mm_srli_epi64(a, imm8) \
  5621. ((imm8 < 64) \
  5622. ? wasm_u64x2_shr(simde__m128i_to_private(a).wasm_v128, imm8) \
  5623. : wasm_i64x2_const(0, 0))
  5624. #endif
  5625. #if defined(SIMDE_X86_SSE2_ENABLE_NATIVE_ALIASES)
  5626. #define _mm_srli_epi64(a, imm8) simde_mm_srli_epi64(a, imm8)
  5627. #endif
  5628. SIMDE_FUNCTION_ATTRIBUTES
  5629. void simde_mm_store_pd(simde_float64 mem_addr[HEDLEY_ARRAY_PARAM(2)],
  5630. simde__m128d a)
  5631. {
  5632. #if defined(SIMDE_X86_SSE2_NATIVE)
  5633. _mm_store_pd(mem_addr, a);
  5634. #elif defined(SIMDE_ARM_NEON_A64V8_NATIVE)
  5635. vst1q_f64(mem_addr, simde__m128d_to_private(a).neon_f64);
  5636. #elif defined(SIMDE_ARM_NEON_A32V7_NATIVE)
  5637. vst1q_s64(HEDLEY_REINTERPRET_CAST(int64_t *, mem_addr),
  5638. simde__m128d_to_private(a).neon_i64);
  5639. #else
  5640. simde_memcpy(SIMDE_ALIGN_ASSUME_LIKE(mem_addr, simde__m128d), &a,
  5641. sizeof(a));
  5642. #endif
  5643. }
  5644. #if defined(SIMDE_X86_SSE2_ENABLE_NATIVE_ALIASES)
  5645. #define _mm_store_pd(mem_addr, a) \
  5646. simde_mm_store_pd(HEDLEY_REINTERPRET_CAST(double *, mem_addr), a)
  5647. #endif
  5648. SIMDE_FUNCTION_ATTRIBUTES
  5649. void simde_mm_store1_pd(simde_float64 mem_addr[HEDLEY_ARRAY_PARAM(2)],
  5650. simde__m128d a)
  5651. {
  5652. #if defined(SIMDE_X86_SSE2_NATIVE)
  5653. _mm_store1_pd(mem_addr, a);
  5654. #else
  5655. simde__m128d_private a_ = simde__m128d_to_private(a);
  5656. #if defined(SIMDE_ARM_NEON_A64V8_NATIVE)
  5657. vst1q_f64(mem_addr, vdupq_laneq_f64(a_.neon_f64, 0));
  5658. #else
  5659. mem_addr[0] = a_.f64[0];
  5660. mem_addr[1] = a_.f64[0];
  5661. #endif
  5662. #endif
  5663. }
  5664. #define simde_mm_store_pd1(mem_addr, a) \
  5665. simde_mm_store1_pd(HEDLEY_REINTERPRET_CAST(double *, mem_addr), a)
  5666. #if defined(SIMDE_X86_SSE2_ENABLE_NATIVE_ALIASES)
  5667. #define _mm_store1_pd(mem_addr, a) \
  5668. simde_mm_store1_pd(HEDLEY_REINTERPRET_CAST(double *, mem_addr), a)
  5669. #define _mm_store_pd1(mem_addr, a) \
  5670. simde_mm_store_pd1(HEDLEY_REINTERPRET_CAST(double *, mem_addr), a)
  5671. #endif
  5672. SIMDE_FUNCTION_ATTRIBUTES
  5673. void simde_mm_store_sd(simde_float64 *mem_addr, simde__m128d a)
  5674. {
  5675. #if defined(SIMDE_X86_SSE2_NATIVE)
  5676. _mm_store_sd(mem_addr, a);
  5677. #else
  5678. simde__m128d_private a_ = simde__m128d_to_private(a);
  5679. #if defined(SIMDE_ARM_NEON_A64V8_NATIVE)
  5680. const simde_float64 v = vgetq_lane_f64(a_.neon_f64, 0);
  5681. simde_memcpy(mem_addr, &v, sizeof(v));
  5682. #elif defined(SIMDE_ARM_NEON_A32V7_NATIVE)
  5683. const int64_t v = vgetq_lane_s64(a_.neon_i64, 0);
  5684. simde_memcpy(HEDLEY_REINTERPRET_CAST(int64_t *, mem_addr), &v,
  5685. sizeof(v));
  5686. #else
  5687. simde_float64 v = a_.f64[0];
  5688. simde_memcpy(mem_addr, &v, sizeof(simde_float64));
  5689. #endif
  5690. #endif
  5691. }
  5692. #if defined(SIMDE_X86_SSE2_ENABLE_NATIVE_ALIASES)
  5693. #define _mm_store_sd(mem_addr, a) \
  5694. simde_mm_store_sd(HEDLEY_REINTERPRET_CAST(double *, mem_addr), a)
  5695. #endif
  5696. SIMDE_FUNCTION_ATTRIBUTES
  5697. void simde_mm_store_si128(simde__m128i *mem_addr, simde__m128i a)
  5698. {
  5699. #if defined(SIMDE_X86_SSE2_NATIVE)
  5700. _mm_store_si128(HEDLEY_STATIC_CAST(__m128i *, mem_addr), a);
  5701. #else
  5702. simde__m128i_private a_ = simde__m128i_to_private(a);
  5703. #if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
  5704. vst1q_s32(HEDLEY_REINTERPRET_CAST(int32_t *, mem_addr), a_.neon_i32);
  5705. #else
  5706. simde_memcpy(SIMDE_ALIGN_ASSUME_LIKE(mem_addr, simde__m128i), &a_,
  5707. sizeof(a_));
  5708. #endif
  5709. #endif
  5710. }
  5711. #if defined(SIMDE_X86_SSE2_ENABLE_NATIVE_ALIASES)
  5712. #define _mm_store_si128(mem_addr, a) simde_mm_store_si128(mem_addr, a)
  5713. #endif
  5714. SIMDE_FUNCTION_ATTRIBUTES
  5715. void simde_mm_storeh_pd(simde_float64 *mem_addr, simde__m128d a)
  5716. {
  5717. #if defined(SIMDE_X86_SSE2_NATIVE)
  5718. _mm_storeh_pd(mem_addr, a);
  5719. #else
  5720. simde__m128d_private a_ = simde__m128d_to_private(a);
  5721. #if defined(SIMDE_ARM_NEON_A64V8_NATIVE)
  5722. *mem_addr = vgetq_lane_f64(a_.neon_f64, 1);
  5723. #else
  5724. *mem_addr = a_.f64[1];
  5725. #endif
  5726. #endif
  5727. }
  5728. #if defined(SIMDE_X86_SSE2_ENABLE_NATIVE_ALIASES)
  5729. #define _mm_storeh_pd(mem_addr, a) \
  5730. simde_mm_storeh_pd(HEDLEY_REINTERPRET_CAST(double *, mem_addr), a)
  5731. #endif
  5732. SIMDE_FUNCTION_ATTRIBUTES
  5733. void simde_mm_storel_epi64(simde__m128i *mem_addr, simde__m128i a)
  5734. {
  5735. #if defined(SIMDE_X86_SSE2_NATIVE)
  5736. _mm_storel_epi64(HEDLEY_STATIC_CAST(__m128i *, mem_addr), a);
  5737. #else
  5738. simde__m128i_private a_ = simde__m128i_to_private(a);
  5739. int64_t tmp;
  5740. /* memcpy to prevent aliasing, tmp because we can't take the
  5741. * address of a vector element. */
  5742. #if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
  5743. tmp = vgetq_lane_s64(a_.neon_i64, 0);
  5744. #elif defined(SIMDE_POWER_ALTIVEC_P7_NATIVE)
  5745. #if defined(SIMDE_BUG_GCC_95227)
  5746. (void)a_;
  5747. #endif
  5748. tmp = vec_extract(a_.altivec_i64, 0);
  5749. #else
  5750. tmp = a_.i64[0];
  5751. #endif
  5752. simde_memcpy(mem_addr, &tmp, sizeof(tmp));
  5753. #endif
  5754. }
  5755. #if defined(SIMDE_X86_SSE2_ENABLE_NATIVE_ALIASES)
  5756. #define _mm_storel_epi64(mem_addr, a) simde_mm_storel_epi64(mem_addr, a)
  5757. #endif
  5758. SIMDE_FUNCTION_ATTRIBUTES
  5759. void simde_mm_storel_pd(simde_float64 *mem_addr, simde__m128d a)
  5760. {
  5761. #if defined(SIMDE_X86_SSE2_NATIVE)
  5762. _mm_storel_pd(mem_addr, a);
  5763. #else
  5764. simde__m128d_private a_ = simde__m128d_to_private(a);
  5765. simde_float64 tmp;
  5766. #if defined(SIMDE_ARM_NEON_A64V8_NATIVE)
  5767. tmp = vgetq_lane_f64(a_.neon_f64, 0);
  5768. #else
  5769. tmp = a_.f64[0];
  5770. #endif
  5771. simde_memcpy(mem_addr, &tmp, sizeof(tmp));
  5772. #endif
  5773. }
  5774. #if defined(SIMDE_X86_SSE2_ENABLE_NATIVE_ALIASES)
  5775. #define _mm_storel_pd(mem_addr, a) \
  5776. simde_mm_storel_pd(HEDLEY_REINTERPRET_CAST(double *, mem_addr), a)
  5777. #endif
  5778. SIMDE_FUNCTION_ATTRIBUTES
  5779. void simde_mm_storer_pd(simde_float64 mem_addr[2], simde__m128d a)
  5780. {
  5781. #if defined(SIMDE_X86_SSE2_NATIVE)
  5782. _mm_storer_pd(mem_addr, a);
  5783. #else
  5784. simde__m128d_private a_ = simde__m128d_to_private(a);
  5785. #if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
  5786. vst1q_s64(HEDLEY_REINTERPRET_CAST(int64_t *, mem_addr),
  5787. vextq_s64(a_.neon_i64, a_.neon_i64, 1));
  5788. #elif defined(SIMDE_SHUFFLE_VECTOR_)
  5789. a_.f64 = SIMDE_SHUFFLE_VECTOR_(64, 16, a_.f64, a_.f64, 1, 0);
  5790. simde_mm_store_pd(mem_addr, simde__m128d_from_private(a_));
  5791. #else
  5792. mem_addr[0] = a_.f64[1];
  5793. mem_addr[1] = a_.f64[0];
  5794. #endif
  5795. #endif
  5796. }
  5797. #if defined(SIMDE_X86_SSE2_ENABLE_NATIVE_ALIASES)
  5798. #define _mm_storer_pd(mem_addr, a) \
  5799. simde_mm_storer_pd(HEDLEY_REINTERPRET_CAST(double *, mem_addr), a)
  5800. #endif
  5801. SIMDE_FUNCTION_ATTRIBUTES
  5802. void simde_mm_storeu_pd(simde_float64 *mem_addr, simde__m128d a)
  5803. {
  5804. #if defined(SIMDE_X86_SSE2_NATIVE)
  5805. _mm_storeu_pd(mem_addr, a);
  5806. #elif defined(SIMDE_ARM_NEON_A64V8_NATIVE)
  5807. vst1q_f64(mem_addr, simde__m128d_to_private(a).neon_f64);
  5808. #else
  5809. simde_memcpy(mem_addr, &a, sizeof(a));
  5810. #endif
  5811. }
  5812. #if defined(SIMDE_X86_SSE2_ENABLE_NATIVE_ALIASES)
  5813. #define _mm_storeu_pd(mem_addr, a) \
  5814. simde_mm_storeu_pd(HEDLEY_REINTERPRET_CAST(double *, mem_addr), a)
  5815. #endif
  5816. SIMDE_FUNCTION_ATTRIBUTES
  5817. void simde_mm_storeu_si128(simde__m128i *mem_addr, simde__m128i a)
  5818. {
  5819. #if defined(SIMDE_X86_SSE2_NATIVE)
  5820. _mm_storeu_si128(HEDLEY_STATIC_CAST(__m128i *, mem_addr), a);
  5821. #else
  5822. simde_memcpy(mem_addr, &a, sizeof(a));
  5823. #endif
  5824. }
  5825. #if defined(SIMDE_X86_SSE2_ENABLE_NATIVE_ALIASES)
  5826. #define _mm_storeu_si128(mem_addr, a) simde_mm_storeu_si128(mem_addr, a)
  5827. #endif
  5828. SIMDE_FUNCTION_ATTRIBUTES
  5829. void simde_mm_storeu_si16(void *mem_addr, simde__m128i a)
  5830. {
  5831. #if defined(SIMDE_X86_SSE2_NATIVE) && \
  5832. (SIMDE_DETECT_CLANG_VERSION_CHECK(8, 0, 0) || \
  5833. HEDLEY_GCC_VERSION_CHECK(11, 0, 0) || \
  5834. HEDLEY_INTEL_VERSION_CHECK(20, 21, 1))
  5835. _mm_storeu_si16(mem_addr, a);
  5836. #else
  5837. int16_t val = simde_x_mm_cvtsi128_si16(a);
  5838. simde_memcpy(mem_addr, &val, sizeof(val));
  5839. #endif
  5840. }
  5841. #if defined(SIMDE_X86_SSE2_ENABLE_NATIVE_ALIASES)
  5842. #define _mm_storeu_si16(mem_addr, a) simde_mm_storeu_si16(mem_addr, a)
  5843. #endif
  5844. SIMDE_FUNCTION_ATTRIBUTES
  5845. void simde_mm_storeu_si32(void *mem_addr, simde__m128i a)
  5846. {
  5847. #if defined(SIMDE_X86_SSE2_NATIVE) && \
  5848. (SIMDE_DETECT_CLANG_VERSION_CHECK(8, 0, 0) || \
  5849. HEDLEY_GCC_VERSION_CHECK(11, 0, 0) || \
  5850. HEDLEY_INTEL_VERSION_CHECK(20, 21, 1))
  5851. _mm_storeu_si32(mem_addr, a);
  5852. #else
  5853. int32_t val = simde_mm_cvtsi128_si32(a);
  5854. simde_memcpy(mem_addr, &val, sizeof(val));
  5855. #endif
  5856. }
  5857. #if defined(SIMDE_X86_SSE2_ENABLE_NATIVE_ALIASES)
  5858. #define _mm_storeu_si32(mem_addr, a) simde_mm_storeu_si32(mem_addr, a)
  5859. #endif
  5860. SIMDE_FUNCTION_ATTRIBUTES
  5861. void simde_mm_storeu_si64(void *mem_addr, simde__m128i a)
  5862. {
  5863. #if defined(SIMDE_X86_SSE2_NATIVE) && \
  5864. (SIMDE_DETECT_CLANG_VERSION_CHECK(8, 0, 0) || \
  5865. HEDLEY_GCC_VERSION_CHECK(11, 0, 0) || \
  5866. HEDLEY_INTEL_VERSION_CHECK(20, 21, 1))
  5867. _mm_storeu_si64(mem_addr, a);
  5868. #else
  5869. int64_t val = simde_mm_cvtsi128_si64(a);
  5870. simde_memcpy(mem_addr, &val, sizeof(val));
  5871. #endif
  5872. }
  5873. #if defined(SIMDE_X86_SSE2_ENABLE_NATIVE_ALIASES)
  5874. #define _mm_storeu_si64(mem_addr, a) simde_mm_storeu_si64(mem_addr, a)
  5875. #endif
  5876. SIMDE_FUNCTION_ATTRIBUTES
  5877. void simde_mm_stream_pd(simde_float64 mem_addr[HEDLEY_ARRAY_PARAM(2)],
  5878. simde__m128d a)
  5879. {
  5880. #if defined(SIMDE_X86_SSE2_NATIVE)
  5881. _mm_stream_pd(mem_addr, a);
  5882. #else
  5883. simde_memcpy(mem_addr, &a, sizeof(a));
  5884. #endif
  5885. }
  5886. #if defined(SIMDE_X86_SSE2_ENABLE_NATIVE_ALIASES)
  5887. #define _mm_stream_pd(mem_addr, a) \
  5888. simde_mm_stream_pd(HEDLEY_REINTERPRET_CAST(double *, mem_addr), a)
  5889. #endif
  5890. SIMDE_FUNCTION_ATTRIBUTES
  5891. void simde_mm_stream_si128(simde__m128i *mem_addr, simde__m128i a)
  5892. {
  5893. #if defined(SIMDE_X86_SSE2_NATIVE) && defined(SIMDE_ARCH_AMD64)
  5894. _mm_stream_si128(HEDLEY_STATIC_CAST(__m128i *, mem_addr), a);
  5895. #else
  5896. simde_memcpy(mem_addr, &a, sizeof(a));
  5897. #endif
  5898. }
  5899. #if defined(SIMDE_X86_SSE2_ENABLE_NATIVE_ALIASES)
  5900. #define _mm_stream_si128(mem_addr, a) simde_mm_stream_si128(mem_addr, a)
  5901. #endif
  5902. SIMDE_FUNCTION_ATTRIBUTES
  5903. void simde_mm_stream_si32(int32_t *mem_addr, int32_t a)
  5904. {
  5905. #if defined(SIMDE_X86_SSE2_NATIVE)
  5906. _mm_stream_si32(mem_addr, a);
  5907. #else
  5908. *mem_addr = a;
  5909. #endif
  5910. }
  5911. #if defined(SIMDE_X86_SSE2_ENABLE_NATIVE_ALIASES)
  5912. #define _mm_stream_si32(mem_addr, a) simde_mm_stream_si32(mem_addr, a)
  5913. #endif
  5914. SIMDE_FUNCTION_ATTRIBUTES
  5915. void simde_mm_stream_si64(int64_t *mem_addr, int64_t a)
  5916. {
  5917. #if defined(SIMDE_X86_SSE2_NATIVE) && defined(SIMDE_ARCH_AMD64) && \
  5918. !defined(HEDLEY_MSVC_VERSION)
  5919. _mm_stream_si64(SIMDE_CHECKED_REINTERPRET_CAST(long long int *,
  5920. int64_t *, mem_addr),
  5921. a);
  5922. #else
  5923. *mem_addr = a;
  5924. #endif
  5925. }
  5926. #define simde_mm_stream_si64x(mem_addr, a) simde_mm_stream_si64(mem_addr, a)
  5927. #if defined(SIMDE_X86_SSE2_ENABLE_NATIVE_ALIASES)
  5928. #define _mm_stream_si64(mem_addr, a) \
  5929. simde_mm_stream_si64(SIMDE_CHECKED_REINTERPRET_CAST( \
  5930. int64_t *, __int64 *, mem_addr), \
  5931. a)
  5932. #define _mm_stream_si64x(mem_addr, a) \
  5933. simde_mm_stream_si64(SIMDE_CHECKED_REINTERPRET_CAST( \
  5934. int64_t *, __int64 *, mem_addr), \
  5935. a)
  5936. #endif
  5937. SIMDE_FUNCTION_ATTRIBUTES
  5938. simde__m128i simde_mm_sub_epi8(simde__m128i a, simde__m128i b)
  5939. {
  5940. #if defined(SIMDE_X86_SSE2_NATIVE)
  5941. return _mm_sub_epi8(a, b);
  5942. #else
  5943. simde__m128i_private r_, a_ = simde__m128i_to_private(a),
  5944. b_ = simde__m128i_to_private(b);
  5945. #if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
  5946. r_.neon_i8 = vsubq_s8(a_.neon_i8, b_.neon_i8);
  5947. #elif defined(SIMDE_VECTOR_SUBSCRIPT_OPS)
  5948. r_.i8 = a_.i8 - b_.i8;
  5949. #else
  5950. SIMDE_VECTORIZE
  5951. for (size_t i = 0; i < (sizeof(r_.i8) / sizeof(r_.i8[0])); i++) {
  5952. r_.i8[i] = a_.i8[i] - b_.i8[i];
  5953. }
  5954. #endif
  5955. return simde__m128i_from_private(r_);
  5956. #endif
  5957. }
  5958. #if defined(SIMDE_X86_SSE2_ENABLE_NATIVE_ALIASES)
  5959. #define _mm_sub_epi8(a, b) simde_mm_sub_epi8(a, b)
  5960. #endif
  5961. SIMDE_FUNCTION_ATTRIBUTES
  5962. simde__m128i simde_mm_sub_epi16(simde__m128i a, simde__m128i b)
  5963. {
  5964. #if defined(SIMDE_X86_SSE2_NATIVE)
  5965. return _mm_sub_epi16(a, b);
  5966. #else
  5967. simde__m128i_private r_, a_ = simde__m128i_to_private(a),
  5968. b_ = simde__m128i_to_private(b);
  5969. #if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
  5970. r_.neon_i16 = vsubq_s16(a_.neon_i16, b_.neon_i16);
  5971. #elif defined(SIMDE_VECTOR_SUBSCRIPT_OPS)
  5972. r_.i16 = a_.i16 - b_.i16;
  5973. #else
  5974. SIMDE_VECTORIZE
  5975. for (size_t i = 0; i < (sizeof(r_.i16) / sizeof(r_.i16[0])); i++) {
  5976. r_.i16[i] = a_.i16[i] - b_.i16[i];
  5977. }
  5978. #endif
  5979. return simde__m128i_from_private(r_);
  5980. #endif
  5981. }
  5982. #if defined(SIMDE_X86_SSE2_ENABLE_NATIVE_ALIASES)
  5983. #define _mm_sub_epi16(a, b) simde_mm_sub_epi16(a, b)
  5984. #endif
  5985. SIMDE_FUNCTION_ATTRIBUTES
  5986. simde__m128i simde_mm_sub_epi32(simde__m128i a, simde__m128i b)
  5987. {
  5988. #if defined(SIMDE_X86_SSE2_NATIVE)
  5989. return _mm_sub_epi32(a, b);
  5990. #else
  5991. simde__m128i_private r_, a_ = simde__m128i_to_private(a),
  5992. b_ = simde__m128i_to_private(b);
  5993. #if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
  5994. r_.neon_i32 = vsubq_s32(a_.neon_i32, b_.neon_i32);
  5995. #elif defined(SIMDE_VECTOR_SUBSCRIPT_OPS)
  5996. r_.i32 = a_.i32 - b_.i32;
  5997. #else
  5998. SIMDE_VECTORIZE
  5999. for (size_t i = 0; i < (sizeof(r_.i32) / sizeof(r_.i32[0])); i++) {
  6000. r_.i32[i] = a_.i32[i] - b_.i32[i];
  6001. }
  6002. #endif
  6003. return simde__m128i_from_private(r_);
  6004. #endif
  6005. }
  6006. #if defined(SIMDE_X86_SSE2_ENABLE_NATIVE_ALIASES)
  6007. #define _mm_sub_epi32(a, b) simde_mm_sub_epi32(a, b)
  6008. #endif
  6009. SIMDE_FUNCTION_ATTRIBUTES
  6010. simde__m128i simde_mm_sub_epi64(simde__m128i a, simde__m128i b)
  6011. {
  6012. #if defined(SIMDE_X86_SSE2_NATIVE)
  6013. return _mm_sub_epi64(a, b);
  6014. #else
  6015. simde__m128i_private r_, a_ = simde__m128i_to_private(a),
  6016. b_ = simde__m128i_to_private(b);
  6017. #if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
  6018. r_.neon_i64 = vsubq_s64(a_.neon_i64, b_.neon_i64);
  6019. #elif defined(SIMDE_VECTOR_SUBSCRIPT_OPS)
  6020. r_.i64 = a_.i64 - b_.i64;
  6021. #else
  6022. SIMDE_VECTORIZE
  6023. for (size_t i = 0; i < (sizeof(r_.i64) / sizeof(r_.i64[0])); i++) {
  6024. r_.i64[i] = a_.i64[i] - b_.i64[i];
  6025. }
  6026. #endif
  6027. return simde__m128i_from_private(r_);
  6028. #endif
  6029. }
  6030. #if defined(SIMDE_X86_SSE2_ENABLE_NATIVE_ALIASES)
  6031. #define _mm_sub_epi64(a, b) simde_mm_sub_epi64(a, b)
  6032. #endif
  6033. SIMDE_FUNCTION_ATTRIBUTES
  6034. simde__m128i simde_x_mm_sub_epu32(simde__m128i a, simde__m128i b)
  6035. {
  6036. simde__m128i_private r_, a_ = simde__m128i_to_private(a),
  6037. b_ = simde__m128i_to_private(b);
  6038. #if defined(SIMDE_VECTOR_SUBSCRIPT_OPS)
  6039. r_.u32 = a_.u32 - b_.u32;
  6040. #elif defined(SIMDE_ARM_NEON_A32V7_NATIVE)
  6041. r_.neon_u32 = vsubq_u32(a_.neon_u32, b_.neon_u32);
  6042. #else
  6043. SIMDE_VECTORIZE
  6044. for (size_t i = 0; i < (sizeof(r_.u32) / sizeof(r_.u32[0])); i++) {
  6045. r_.u32[i] = a_.u32[i] - b_.u32[i];
  6046. }
  6047. #endif
  6048. return simde__m128i_from_private(r_);
  6049. }
  6050. SIMDE_FUNCTION_ATTRIBUTES
  6051. simde__m128d simde_mm_sub_pd(simde__m128d a, simde__m128d b)
  6052. {
  6053. #if defined(SIMDE_X86_SSE2_NATIVE)
  6054. return _mm_sub_pd(a, b);
  6055. #else
  6056. simde__m128d_private r_, a_ = simde__m128d_to_private(a),
  6057. b_ = simde__m128d_to_private(b);
  6058. #if defined(SIMDE_VECTOR_SUBSCRIPT_OPS)
  6059. r_.f64 = a_.f64 - b_.f64;
  6060. #elif defined(SIMDE_ARM_NEON_A64V8_NATIVE)
  6061. r_.neon_f64 = vsubq_f64(a_.neon_f64, b_.neon_f64);
  6062. #elif defined(SIMDE_WASM_SIMD128_NATIVE)
  6063. r_.wasm_v128 = wasm_f64x2_sub(a_.wasm_v128, b_.wasm_v128);
  6064. #else
  6065. SIMDE_VECTORIZE
  6066. for (size_t i = 0; i < (sizeof(r_.f64) / sizeof(r_.f64[0])); i++) {
  6067. r_.f64[i] = a_.f64[i] - b_.f64[i];
  6068. }
  6069. #endif
  6070. return simde__m128d_from_private(r_);
  6071. #endif
  6072. }
  6073. #if defined(SIMDE_X86_SSE2_ENABLE_NATIVE_ALIASES)
  6074. #define _mm_sub_pd(a, b) simde_mm_sub_pd(a, b)
  6075. #endif
  6076. SIMDE_FUNCTION_ATTRIBUTES
  6077. simde__m128d simde_mm_sub_sd(simde__m128d a, simde__m128d b)
  6078. {
  6079. #if defined(SIMDE_X86_SSE2_NATIVE)
  6080. return _mm_sub_sd(a, b);
  6081. #elif (SIMDE_NATURAL_VECTOR_SIZE > 0)
  6082. return simde_mm_move_sd(a, simde_mm_sub_pd(a, b));
  6083. #else
  6084. simde__m128d_private r_, a_ = simde__m128d_to_private(a),
  6085. b_ = simde__m128d_to_private(b);
  6086. r_.f64[0] = a_.f64[0] - b_.f64[0];
  6087. r_.f64[1] = a_.f64[1];
  6088. return simde__m128d_from_private(r_);
  6089. #endif
  6090. }
  6091. #if defined(SIMDE_X86_SSE2_ENABLE_NATIVE_ALIASES)
  6092. #define _mm_sub_sd(a, b) simde_mm_sub_sd(a, b)
  6093. #endif
  6094. SIMDE_FUNCTION_ATTRIBUTES
  6095. simde__m64 simde_mm_sub_si64(simde__m64 a, simde__m64 b)
  6096. {
  6097. #if defined(SIMDE_X86_SSE2_NATIVE) && defined(SIMDE_X86_MMX_NATIVE)
  6098. return _mm_sub_si64(a, b);
  6099. #else
  6100. simde__m64_private r_, a_ = simde__m64_to_private(a),
  6101. b_ = simde__m64_to_private(b);
  6102. #if defined(SIMDE_VECTOR_SUBSCRIPT_OPS)
  6103. r_.i64 = a_.i64 - b_.i64;
  6104. #elif defined(SIMDE_ARM_NEON_A32V7_NATIVE)
  6105. r_.neon_i64 = vsub_s64(a_.neon_i64, b_.neon_i64);
  6106. #else
  6107. r_.i64[0] = a_.i64[0] - b_.i64[0];
  6108. #endif
  6109. return simde__m64_from_private(r_);
  6110. #endif
  6111. }
  6112. #if defined(SIMDE_X86_SSE2_ENABLE_NATIVE_ALIASES)
  6113. #define _mm_sub_si64(a, b) simde_mm_sub_si64(a, b)
  6114. #endif
  6115. SIMDE_FUNCTION_ATTRIBUTES
  6116. simde__m128i simde_mm_subs_epi8(simde__m128i a, simde__m128i b)
  6117. {
  6118. #if defined(SIMDE_X86_SSE2_NATIVE)
  6119. return _mm_subs_epi8(a, b);
  6120. #else
  6121. simde__m128i_private r_, a_ = simde__m128i_to_private(a),
  6122. b_ = simde__m128i_to_private(b);
  6123. #if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
  6124. r_.neon_i8 = vqsubq_s8(a_.neon_i8, b_.neon_i8);
  6125. #elif defined(SIMDE_WASM_SIMD128_NATIVE)
  6126. r_.wasm_v128 = wasm_i8x16_sub_saturate(a_.wasm_v128, b_.wasm_v128);
  6127. #else
  6128. SIMDE_VECTORIZE
  6129. for (size_t i = 0; i < (sizeof(r_) / sizeof(r_.i8[0])); i++) {
  6130. if (((b_.i8[i]) > 0 && (a_.i8[i]) < INT8_MIN + (b_.i8[i]))) {
  6131. r_.i8[i] = INT8_MIN;
  6132. } else if ((b_.i8[i]) < 0 &&
  6133. (a_.i8[i]) > INT8_MAX + (b_.i8[i])) {
  6134. r_.i8[i] = INT8_MAX;
  6135. } else {
  6136. r_.i8[i] = (a_.i8[i]) - (b_.i8[i]);
  6137. }
  6138. }
  6139. #endif
  6140. return simde__m128i_from_private(r_);
  6141. #endif
  6142. }
  6143. #if defined(SIMDE_X86_SSE2_ENABLE_NATIVE_ALIASES)
  6144. #define _mm_subs_epi8(a, b) simde_mm_subs_epi8(a, b)
  6145. #endif
  6146. SIMDE_FUNCTION_ATTRIBUTES
  6147. simde__m128i simde_mm_subs_epi16(simde__m128i a, simde__m128i b)
  6148. {
  6149. #if defined(SIMDE_X86_SSE2_NATIVE)
  6150. return _mm_subs_epi16(a, b);
  6151. #else
  6152. simde__m128i_private r_, a_ = simde__m128i_to_private(a),
  6153. b_ = simde__m128i_to_private(b);
  6154. #if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
  6155. r_.neon_i16 = vqsubq_s16(a_.neon_i16, b_.neon_i16);
  6156. #elif defined(SIMDE_WASM_SIMD128_NATIVE)
  6157. r_.wasm_v128 = wasm_i16x8_sub_saturate(a_.wasm_v128, b_.wasm_v128);
  6158. #else
  6159. SIMDE_VECTORIZE
  6160. for (size_t i = 0; i < (sizeof(r_) / sizeof(r_.i16[0])); i++) {
  6161. if (((b_.i16[i]) > 0 &&
  6162. (a_.i16[i]) < INT16_MIN + (b_.i16[i]))) {
  6163. r_.i16[i] = INT16_MIN;
  6164. } else if ((b_.i16[i]) < 0 &&
  6165. (a_.i16[i]) > INT16_MAX + (b_.i16[i])) {
  6166. r_.i16[i] = INT16_MAX;
  6167. } else {
  6168. r_.i16[i] = (a_.i16[i]) - (b_.i16[i]);
  6169. }
  6170. }
  6171. #endif
  6172. return simde__m128i_from_private(r_);
  6173. #endif
  6174. }
  6175. #if defined(SIMDE_X86_SSE2_ENABLE_NATIVE_ALIASES)
  6176. #define _mm_subs_epi16(a, b) simde_mm_subs_epi16(a, b)
  6177. #endif
  6178. SIMDE_FUNCTION_ATTRIBUTES
  6179. simde__m128i simde_mm_subs_epu8(simde__m128i a, simde__m128i b)
  6180. {
  6181. #if defined(SIMDE_X86_SSE2_NATIVE)
  6182. return _mm_subs_epu8(a, b);
  6183. #else
  6184. simde__m128i_private r_, a_ = simde__m128i_to_private(a),
  6185. b_ = simde__m128i_to_private(b);
  6186. #if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
  6187. r_.neon_u8 = vqsubq_u8(a_.neon_u8, b_.neon_u8);
  6188. #elif defined(SIMDE_WASM_SIMD128_NATIVE)
  6189. r_.wasm_v128 = wasm_u8x16_sub_saturate(a_.wasm_v128, b_.wasm_v128);
  6190. #elif defined(SIMDE_POWER_ALTIVEC_P6_NATIVE)
  6191. r_.altivec_u8 = vec_subs(a_.altivec_u8, b_.altivec_u8);
  6192. #else
  6193. SIMDE_VECTORIZE
  6194. for (size_t i = 0; i < (sizeof(r_) / sizeof(r_.i8[0])); i++) {
  6195. const int32_t x = a_.u8[i] - b_.u8[i];
  6196. if (x < 0) {
  6197. r_.u8[i] = 0;
  6198. } else if (x > UINT8_MAX) {
  6199. r_.u8[i] = UINT8_MAX;
  6200. } else {
  6201. r_.u8[i] = HEDLEY_STATIC_CAST(uint8_t, x);
  6202. }
  6203. }
  6204. #endif
  6205. return simde__m128i_from_private(r_);
  6206. #endif
  6207. }
  6208. #if defined(SIMDE_X86_SSE2_ENABLE_NATIVE_ALIASES)
  6209. #define _mm_subs_epu8(a, b) simde_mm_subs_epu8(a, b)
  6210. #endif
  6211. SIMDE_FUNCTION_ATTRIBUTES
  6212. simde__m128i simde_mm_subs_epu16(simde__m128i a, simde__m128i b)
  6213. {
  6214. #if defined(SIMDE_X86_SSE2_NATIVE)
  6215. return _mm_subs_epu16(a, b);
  6216. #else
  6217. simde__m128i_private r_, a_ = simde__m128i_to_private(a),
  6218. b_ = simde__m128i_to_private(b);
  6219. #if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
  6220. r_.neon_u16 = vqsubq_u16(a_.neon_u16, b_.neon_u16);
  6221. #elif defined(SIMDE_WASM_SIMD128_NATIVE)
  6222. r_.wasm_v128 = wasm_u16x8_sub_saturate(a_.wasm_v128, b_.wasm_v128);
  6223. #elif defined(SIMDE_POWER_ALTIVEC_P6_NATIVE)
  6224. r_.altivec_u16 = vec_subs(a_.altivec_u16, b_.altivec_u16);
  6225. #else
  6226. SIMDE_VECTORIZE
  6227. for (size_t i = 0; i < (sizeof(r_) / sizeof(r_.i16[0])); i++) {
  6228. const int32_t x = a_.u16[i] - b_.u16[i];
  6229. if (x < 0) {
  6230. r_.u16[i] = 0;
  6231. } else if (x > UINT16_MAX) {
  6232. r_.u16[i] = UINT16_MAX;
  6233. } else {
  6234. r_.u16[i] = HEDLEY_STATIC_CAST(uint16_t, x);
  6235. }
  6236. }
  6237. #endif
  6238. return simde__m128i_from_private(r_);
  6239. #endif
  6240. }
  6241. #if defined(SIMDE_X86_SSE2_ENABLE_NATIVE_ALIASES)
  6242. #define _mm_subs_epu16(a, b) simde_mm_subs_epu16(a, b)
  6243. #endif
  6244. SIMDE_FUNCTION_ATTRIBUTES
  6245. int simde_mm_ucomieq_sd(simde__m128d a, simde__m128d b)
  6246. {
  6247. #if defined(SIMDE_X86_SSE2_NATIVE)
  6248. return _mm_ucomieq_sd(a, b);
  6249. #else
  6250. simde__m128d_private a_ = simde__m128d_to_private(a),
  6251. b_ = simde__m128d_to_private(b);
  6252. int r;
  6253. #if defined(SIMDE_ARM_NEON_A64V8_NATIVE)
  6254. uint64x2_t a_not_nan = vceqq_f64(a_.neon_f64, a_.neon_f64);
  6255. uint64x2_t b_not_nan = vceqq_f64(b_.neon_f64, b_.neon_f64);
  6256. uint64x2_t a_or_b_nan = vreinterpretq_u64_u32(vmvnq_u32(
  6257. vreinterpretq_u32_u64(vandq_u64(a_not_nan, b_not_nan))));
  6258. uint64x2_t a_eq_b = vceqq_f64(a_.neon_f64, b_.neon_f64);
  6259. r = !!(vgetq_lane_u64(vorrq_u64(a_or_b_nan, a_eq_b), 0) != 0);
  6260. #elif defined(SIMDE_WASM_SIMD128_NATIVE)
  6261. return wasm_f64x2_extract_lane(a_.wasm_v128, 0) ==
  6262. wasm_f64x2_extract_lane(b_.wasm_v128, 0);
  6263. #elif defined(SIMDE_HAVE_FENV_H)
  6264. fenv_t envp;
  6265. int x = feholdexcept(&envp);
  6266. r = a_.f64[0] == b_.f64[0];
  6267. if (HEDLEY_LIKELY(x == 0))
  6268. fesetenv(&envp);
  6269. #else
  6270. r = a_.f64[0] == b_.f64[0];
  6271. #endif
  6272. return r;
  6273. #endif
  6274. }
  6275. #if defined(SIMDE_X86_SSE2_ENABLE_NATIVE_ALIASES)
  6276. #define _mm_ucomieq_sd(a, b) simde_mm_ucomieq_sd(a, b)
  6277. #endif
  6278. SIMDE_FUNCTION_ATTRIBUTES
  6279. int simde_mm_ucomige_sd(simde__m128d a, simde__m128d b)
  6280. {
  6281. #if defined(SIMDE_X86_SSE2_NATIVE)
  6282. return _mm_ucomige_sd(a, b);
  6283. #else
  6284. simde__m128d_private a_ = simde__m128d_to_private(a),
  6285. b_ = simde__m128d_to_private(b);
  6286. int r;
  6287. #if defined(SIMDE_ARM_NEON_A64V8_NATIVE)
  6288. uint64x2_t a_not_nan = vceqq_f64(a_.neon_f64, a_.neon_f64);
  6289. uint64x2_t b_not_nan = vceqq_f64(b_.neon_f64, b_.neon_f64);
  6290. uint64x2_t a_and_b_not_nan = vandq_u64(a_not_nan, b_not_nan);
  6291. uint64x2_t a_ge_b = vcgeq_f64(a_.neon_f64, b_.neon_f64);
  6292. r = !!(vgetq_lane_u64(vandq_u64(a_and_b_not_nan, a_ge_b), 0) != 0);
  6293. #elif defined(SIMDE_WASM_SIMD128_NATIVE)
  6294. return wasm_f64x2_extract_lane(a_.wasm_v128, 0) >=
  6295. wasm_f64x2_extract_lane(b_.wasm_v128, 0);
  6296. #elif defined(SIMDE_HAVE_FENV_H)
  6297. fenv_t envp;
  6298. int x = feholdexcept(&envp);
  6299. r = a_.f64[0] >= b_.f64[0];
  6300. if (HEDLEY_LIKELY(x == 0))
  6301. fesetenv(&envp);
  6302. #else
  6303. r = a_.f64[0] >= b_.f64[0];
  6304. #endif
  6305. return r;
  6306. #endif
  6307. }
  6308. #if defined(SIMDE_X86_SSE2_ENABLE_NATIVE_ALIASES)
  6309. #define _mm_ucomige_sd(a, b) simde_mm_ucomige_sd(a, b)
  6310. #endif
  6311. SIMDE_FUNCTION_ATTRIBUTES
  6312. int simde_mm_ucomigt_sd(simde__m128d a, simde__m128d b)
  6313. {
  6314. #if defined(SIMDE_X86_SSE2_NATIVE)
  6315. return _mm_ucomigt_sd(a, b);
  6316. #else
  6317. simde__m128d_private a_ = simde__m128d_to_private(a),
  6318. b_ = simde__m128d_to_private(b);
  6319. int r;
  6320. #if defined(SIMDE_ARM_NEON_A64V8_NATIVE)
  6321. uint64x2_t a_not_nan = vceqq_f64(a_.neon_f64, a_.neon_f64);
  6322. uint64x2_t b_not_nan = vceqq_f64(b_.neon_f64, b_.neon_f64);
  6323. uint64x2_t a_and_b_not_nan = vandq_u64(a_not_nan, b_not_nan);
  6324. uint64x2_t a_gt_b = vcgtq_f64(a_.neon_f64, b_.neon_f64);
  6325. r = !!(vgetq_lane_u64(vandq_u64(a_and_b_not_nan, a_gt_b), 0) != 0);
  6326. #elif defined(SIMDE_WASM_SIMD128_NATIVE)
  6327. return wasm_f64x2_extract_lane(a_.wasm_v128, 0) >
  6328. wasm_f64x2_extract_lane(b_.wasm_v128, 0);
  6329. #elif defined(SIMDE_HAVE_FENV_H)
  6330. fenv_t envp;
  6331. int x = feholdexcept(&envp);
  6332. r = a_.f64[0] > b_.f64[0];
  6333. if (HEDLEY_LIKELY(x == 0))
  6334. fesetenv(&envp);
  6335. #else
  6336. r = a_.f64[0] > b_.f64[0];
  6337. #endif
  6338. return r;
  6339. #endif
  6340. }
  6341. #if defined(SIMDE_X86_SSE2_ENABLE_NATIVE_ALIASES)
  6342. #define _mm_ucomigt_sd(a, b) simde_mm_ucomigt_sd(a, b)
  6343. #endif
  6344. SIMDE_FUNCTION_ATTRIBUTES
  6345. int simde_mm_ucomile_sd(simde__m128d a, simde__m128d b)
  6346. {
  6347. #if defined(SIMDE_X86_SSE2_NATIVE)
  6348. return _mm_ucomile_sd(a, b);
  6349. #else
  6350. simde__m128d_private a_ = simde__m128d_to_private(a),
  6351. b_ = simde__m128d_to_private(b);
  6352. int r;
  6353. #if defined(SIMDE_ARM_NEON_A64V8_NATIVE)
  6354. uint64x2_t a_not_nan = vceqq_f64(a_.neon_f64, a_.neon_f64);
  6355. uint64x2_t b_not_nan = vceqq_f64(b_.neon_f64, b_.neon_f64);
  6356. uint64x2_t a_or_b_nan = vreinterpretq_u64_u32(vmvnq_u32(
  6357. vreinterpretq_u32_u64(vandq_u64(a_not_nan, b_not_nan))));
  6358. uint64x2_t a_le_b = vcleq_f64(a_.neon_f64, b_.neon_f64);
  6359. r = !!(vgetq_lane_u64(vorrq_u64(a_or_b_nan, a_le_b), 0) != 0);
  6360. #elif defined(SIMDE_WASM_SIMD128_NATIVE)
  6361. return wasm_f64x2_extract_lane(a_.wasm_v128, 0) <=
  6362. wasm_f64x2_extract_lane(b_.wasm_v128, 0);
  6363. #elif defined(SIMDE_HAVE_FENV_H)
  6364. fenv_t envp;
  6365. int x = feholdexcept(&envp);
  6366. r = a_.f64[0] <= b_.f64[0];
  6367. if (HEDLEY_LIKELY(x == 0))
  6368. fesetenv(&envp);
  6369. #else
  6370. r = a_.f64[0] <= b_.f64[0];
  6371. #endif
  6372. return r;
  6373. #endif
  6374. }
  6375. #if defined(SIMDE_X86_SSE2_ENABLE_NATIVE_ALIASES)
  6376. #define _mm_ucomile_sd(a, b) simde_mm_ucomile_sd(a, b)
  6377. #endif
  6378. SIMDE_FUNCTION_ATTRIBUTES
  6379. int simde_mm_ucomilt_sd(simde__m128d a, simde__m128d b)
  6380. {
  6381. #if defined(SIMDE_X86_SSE2_NATIVE)
  6382. return _mm_ucomilt_sd(a, b);
  6383. #else
  6384. simde__m128d_private a_ = simde__m128d_to_private(a),
  6385. b_ = simde__m128d_to_private(b);
  6386. int r;
  6387. #if defined(SIMDE_ARM_NEON_A64V8_NATIVE)
  6388. uint64x2_t a_not_nan = vceqq_f64(a_.neon_f64, a_.neon_f64);
  6389. uint64x2_t b_not_nan = vceqq_f64(b_.neon_f64, b_.neon_f64);
  6390. uint64x2_t a_or_b_nan = vreinterpretq_u64_u32(vmvnq_u32(
  6391. vreinterpretq_u32_u64(vandq_u64(a_not_nan, b_not_nan))));
  6392. uint64x2_t a_lt_b = vcltq_f64(a_.neon_f64, b_.neon_f64);
  6393. r = !!(vgetq_lane_u64(vorrq_u64(a_or_b_nan, a_lt_b), 0) != 0);
  6394. #elif defined(SIMDE_WASM_SIMD128_NATIVE)
  6395. return wasm_f64x2_extract_lane(a_.wasm_v128, 0) <
  6396. wasm_f64x2_extract_lane(b_.wasm_v128, 0);
  6397. #elif defined(SIMDE_HAVE_FENV_H)
  6398. fenv_t envp;
  6399. int x = feholdexcept(&envp);
  6400. r = a_.f64[0] < b_.f64[0];
  6401. if (HEDLEY_LIKELY(x == 0))
  6402. fesetenv(&envp);
  6403. #else
  6404. r = a_.f64[0] < b_.f64[0];
  6405. #endif
  6406. return r;
  6407. #endif
  6408. }
  6409. #if defined(SIMDE_X86_SSE2_ENABLE_NATIVE_ALIASES)
  6410. #define _mm_ucomilt_sd(a, b) simde_mm_ucomilt_sd(a, b)
  6411. #endif
  6412. SIMDE_FUNCTION_ATTRIBUTES
  6413. int simde_mm_ucomineq_sd(simde__m128d a, simde__m128d b)
  6414. {
  6415. #if defined(SIMDE_X86_SSE2_NATIVE)
  6416. return _mm_ucomineq_sd(a, b);
  6417. #else
  6418. simde__m128d_private a_ = simde__m128d_to_private(a),
  6419. b_ = simde__m128d_to_private(b);
  6420. int r;
  6421. #if defined(SIMDE_ARM_NEON_A64V8_NATIVE)
  6422. uint64x2_t a_not_nan = vceqq_f64(a_.neon_f64, a_.neon_f64);
  6423. uint64x2_t b_not_nan = vceqq_f64(b_.neon_f64, b_.neon_f64);
  6424. uint64x2_t a_and_b_not_nan = vandq_u64(a_not_nan, b_not_nan);
  6425. uint64x2_t a_neq_b = vreinterpretq_u64_u32(vmvnq_u32(
  6426. vreinterpretq_u32_u64(vceqq_f64(a_.neon_f64, b_.neon_f64))));
  6427. r = !!(vgetq_lane_u64(vandq_u64(a_and_b_not_nan, a_neq_b), 0) != 0);
  6428. #elif defined(SIMDE_WASM_SIMD128_NATIVE)
  6429. return wasm_f64x2_extract_lane(a_.wasm_v128, 0) !=
  6430. wasm_f64x2_extract_lane(b_.wasm_v128, 0);
  6431. #elif defined(SIMDE_HAVE_FENV_H)
  6432. fenv_t envp;
  6433. int x = feholdexcept(&envp);
  6434. r = a_.f64[0] != b_.f64[0];
  6435. if (HEDLEY_LIKELY(x == 0))
  6436. fesetenv(&envp);
  6437. #else
  6438. r = a_.f64[0] != b_.f64[0];
  6439. #endif
  6440. return r;
  6441. #endif
  6442. }
  6443. #if defined(SIMDE_X86_SSE2_ENABLE_NATIVE_ALIASES)
  6444. #define _mm_ucomineq_sd(a, b) simde_mm_ucomineq_sd(a, b)
  6445. #endif
  6446. #if defined(SIMDE_DIAGNOSTIC_DISABLE_UNINITIALIZED_)
  6447. HEDLEY_DIAGNOSTIC_PUSH
  6448. SIMDE_DIAGNOSTIC_DISABLE_UNINITIALIZED_
  6449. #endif
  6450. #if defined(SIMDE_DIAGNOSTIC_DISABLE_UNINITIALIZED_)
  6451. HEDLEY_DIAGNOSTIC_POP
  6452. #endif
  6453. SIMDE_FUNCTION_ATTRIBUTES
  6454. void simde_mm_lfence(void)
  6455. {
  6456. #if defined(SIMDE_X86_SSE2_NATIVE)
  6457. _mm_lfence();
  6458. #else
  6459. simde_mm_sfence();
  6460. #endif
  6461. }
  6462. #if defined(SIMDE_X86_SSE2_ENABLE_NATIVE_ALIASES)
  6463. #define _mm_lfence() simde_mm_lfence()
  6464. #endif
  6465. SIMDE_FUNCTION_ATTRIBUTES
  6466. void simde_mm_mfence(void)
  6467. {
  6468. #if defined(SIMDE_X86_SSE2_NATIVE)
  6469. _mm_mfence();
  6470. #else
  6471. simde_mm_sfence();
  6472. #endif
  6473. }
  6474. #if defined(SIMDE_X86_SSE2_ENABLE_NATIVE_ALIASES)
  6475. #define _mm_mfence() simde_mm_mfence()
  6476. #endif
  6477. SIMDE_FUNCTION_ATTRIBUTES
  6478. simde__m128i simde_mm_unpackhi_epi8(simde__m128i a, simde__m128i b)
  6479. {
  6480. #if defined(SIMDE_X86_SSE2_NATIVE)
  6481. return _mm_unpackhi_epi8(a, b);
  6482. #else
  6483. simde__m128i_private r_, a_ = simde__m128i_to_private(a),
  6484. b_ = simde__m128i_to_private(b);
  6485. #if defined(SIMDE_ARM_NEON_A64V8_NATIVE)
  6486. r_.neon_i8 = vzip2q_s8(a_.neon_i8, b_.neon_i8);
  6487. #elif defined(SIMDE_ARM_NEON_A32V7_NATIVE)
  6488. int8x8_t a1 = vreinterpret_s8_s16(vget_high_s16(a_.neon_i16));
  6489. int8x8_t b1 = vreinterpret_s8_s16(vget_high_s16(b_.neon_i16));
  6490. int8x8x2_t result = vzip_s8(a1, b1);
  6491. r_.neon_i8 = vcombine_s8(result.val[0], result.val[1]);
  6492. #elif defined(SIMDE_SHUFFLE_VECTOR_)
  6493. r_.i8 = SIMDE_SHUFFLE_VECTOR_(8, 16, a_.i8, b_.i8, 8, 24, 9, 25, 10, 26,
  6494. 11, 27, 12, 28, 13, 29, 14, 30, 15, 31);
  6495. #else
  6496. SIMDE_VECTORIZE
  6497. for (size_t i = 0; i < ((sizeof(r_) / sizeof(r_.i8[0])) / 2); i++) {
  6498. r_.i8[(i * 2)] =
  6499. a_.i8[i + ((sizeof(r_) / sizeof(r_.i8[0])) / 2)];
  6500. r_.i8[(i * 2) + 1] =
  6501. b_.i8[i + ((sizeof(r_) / sizeof(r_.i8[0])) / 2)];
  6502. }
  6503. #endif
  6504. return simde__m128i_from_private(r_);
  6505. #endif
  6506. }
  6507. #if defined(SIMDE_X86_SSE2_ENABLE_NATIVE_ALIASES)
  6508. #define _mm_unpackhi_epi8(a, b) simde_mm_unpackhi_epi8(a, b)
  6509. #endif
  6510. SIMDE_FUNCTION_ATTRIBUTES
  6511. simde__m128i simde_mm_unpackhi_epi16(simde__m128i a, simde__m128i b)
  6512. {
  6513. #if defined(SIMDE_X86_SSE2_NATIVE)
  6514. return _mm_unpackhi_epi16(a, b);
  6515. #else
  6516. simde__m128i_private r_, a_ = simde__m128i_to_private(a),
  6517. b_ = simde__m128i_to_private(b);
  6518. #if defined(SIMDE_ARM_NEON_A64V8_NATIVE)
  6519. r_.neon_i16 = vzip2q_s16(a_.neon_i16, b_.neon_i16);
  6520. #elif defined(SIMDE_ARM_NEON_A32V7_NATIVE)
  6521. int16x4_t a1 = vget_high_s16(a_.neon_i16);
  6522. int16x4_t b1 = vget_high_s16(b_.neon_i16);
  6523. int16x4x2_t result = vzip_s16(a1, b1);
  6524. r_.neon_i16 = vcombine_s16(result.val[0], result.val[1]);
  6525. #elif defined(SIMDE_SHUFFLE_VECTOR_)
  6526. r_.i16 = SIMDE_SHUFFLE_VECTOR_(16, 16, a_.i16, b_.i16, 4, 12, 5, 13, 6,
  6527. 14, 7, 15);
  6528. #else
  6529. SIMDE_VECTORIZE
  6530. for (size_t i = 0; i < ((sizeof(r_) / sizeof(r_.i16[0])) / 2); i++) {
  6531. r_.i16[(i * 2)] =
  6532. a_.i16[i + ((sizeof(r_) / sizeof(r_.i16[0])) / 2)];
  6533. r_.i16[(i * 2) + 1] =
  6534. b_.i16[i + ((sizeof(r_) / sizeof(r_.i16[0])) / 2)];
  6535. }
  6536. #endif
  6537. return simde__m128i_from_private(r_);
  6538. #endif
  6539. }
  6540. #if defined(SIMDE_X86_SSE2_ENABLE_NATIVE_ALIASES)
  6541. #define _mm_unpackhi_epi16(a, b) simde_mm_unpackhi_epi16(a, b)
  6542. #endif
  6543. SIMDE_FUNCTION_ATTRIBUTES
  6544. simde__m128i simde_mm_unpackhi_epi32(simde__m128i a, simde__m128i b)
  6545. {
  6546. #if defined(SIMDE_X86_SSE2_NATIVE)
  6547. return _mm_unpackhi_epi32(a, b);
  6548. #else
  6549. simde__m128i_private r_, a_ = simde__m128i_to_private(a),
  6550. b_ = simde__m128i_to_private(b);
  6551. #if defined(SIMDE_ARM_NEON_A64V8_NATIVE)
  6552. r_.neon_i32 = vzip2q_s32(a_.neon_i32, b_.neon_i32);
  6553. #elif defined(SIMDE_ARM_NEON_A32V7_NATIVE)
  6554. int32x2_t a1 = vget_high_s32(a_.neon_i32);
  6555. int32x2_t b1 = vget_high_s32(b_.neon_i32);
  6556. int32x2x2_t result = vzip_s32(a1, b1);
  6557. r_.neon_i32 = vcombine_s32(result.val[0], result.val[1]);
  6558. #elif defined(SIMDE_SHUFFLE_VECTOR_)
  6559. r_.i32 = SIMDE_SHUFFLE_VECTOR_(32, 16, a_.i32, b_.i32, 2, 6, 3, 7);
  6560. #else
  6561. SIMDE_VECTORIZE
  6562. for (size_t i = 0; i < ((sizeof(r_) / sizeof(r_.i32[0])) / 2); i++) {
  6563. r_.i32[(i * 2)] =
  6564. a_.i32[i + ((sizeof(r_) / sizeof(r_.i32[0])) / 2)];
  6565. r_.i32[(i * 2) + 1] =
  6566. b_.i32[i + ((sizeof(r_) / sizeof(r_.i32[0])) / 2)];
  6567. }
  6568. #endif
  6569. return simde__m128i_from_private(r_);
  6570. #endif
  6571. }
  6572. #if defined(SIMDE_X86_SSE2_ENABLE_NATIVE_ALIASES)
  6573. #define _mm_unpackhi_epi32(a, b) simde_mm_unpackhi_epi32(a, b)
  6574. #endif
  6575. SIMDE_FUNCTION_ATTRIBUTES
  6576. simde__m128i simde_mm_unpackhi_epi64(simde__m128i a, simde__m128i b)
  6577. {
  6578. #if defined(SIMDE_X86_SSE2_NATIVE)
  6579. return _mm_unpackhi_epi64(a, b);
  6580. #else
  6581. simde__m128i_private r_, a_ = simde__m128i_to_private(a),
  6582. b_ = simde__m128i_to_private(b);
  6583. #if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
  6584. int64x1_t a_h = vget_high_s64(a_.neon_i64);
  6585. int64x1_t b_h = vget_high_s64(b_.neon_i64);
  6586. r_.neon_i64 = vcombine_s64(a_h, b_h);
  6587. #elif defined(SIMDE_SHUFFLE_VECTOR_)
  6588. r_.i64 = SIMDE_SHUFFLE_VECTOR_(64, 16, a_.i64, b_.i64, 1, 3);
  6589. #else
  6590. SIMDE_VECTORIZE
  6591. for (size_t i = 0; i < ((sizeof(r_) / sizeof(r_.i64[0])) / 2); i++) {
  6592. r_.i64[(i * 2)] =
  6593. a_.i64[i + ((sizeof(r_) / sizeof(r_.i64[0])) / 2)];
  6594. r_.i64[(i * 2) + 1] =
  6595. b_.i64[i + ((sizeof(r_) / sizeof(r_.i64[0])) / 2)];
  6596. }
  6597. #endif
  6598. return simde__m128i_from_private(r_);
  6599. #endif
  6600. }
  6601. #if defined(SIMDE_X86_SSE2_ENABLE_NATIVE_ALIASES)
  6602. #define _mm_unpackhi_epi64(a, b) simde_mm_unpackhi_epi64(a, b)
  6603. #endif
  6604. SIMDE_FUNCTION_ATTRIBUTES
  6605. simde__m128d simde_mm_unpackhi_pd(simde__m128d a, simde__m128d b)
  6606. {
  6607. #if defined(SIMDE_X86_SSE2_NATIVE)
  6608. return _mm_unpackhi_pd(a, b);
  6609. #else
  6610. simde__m128d_private r_, a_ = simde__m128d_to_private(a),
  6611. b_ = simde__m128d_to_private(b);
  6612. #if defined(SIMDE_ARM_NEON_A64V8_NATIVE)
  6613. float64x1_t a_l = vget_high_f64(a_.f64);
  6614. float64x1_t b_l = vget_high_f64(b_.f64);
  6615. r_.neon_f64 = vcombine_f64(a_l, b_l);
  6616. #elif defined(SIMDE_WASM_SIMD128_NATIVE)
  6617. r_.wasm_v128 = wasm_v64x2_shuffle(a_.wasm_v128, b_.wasm_v128, 1, 3);
  6618. #elif defined(SIMDE_SHUFFLE_VECTOR_)
  6619. r_.f64 = SIMDE_SHUFFLE_VECTOR_(64, 16, a_.f64, b_.f64, 1, 3);
  6620. #else
  6621. SIMDE_VECTORIZE
  6622. for (size_t i = 0; i < ((sizeof(r_) / sizeof(r_.f64[0])) / 2); i++) {
  6623. r_.f64[(i * 2)] =
  6624. a_.f64[i + ((sizeof(r_) / sizeof(r_.f64[0])) / 2)];
  6625. r_.f64[(i * 2) + 1] =
  6626. b_.f64[i + ((sizeof(r_) / sizeof(r_.f64[0])) / 2)];
  6627. }
  6628. #endif
  6629. return simde__m128d_from_private(r_);
  6630. #endif
  6631. }
  6632. #if defined(SIMDE_X86_SSE2_ENABLE_NATIVE_ALIASES)
  6633. #define _mm_unpackhi_pd(a, b) simde_mm_unpackhi_pd(a, b)
  6634. #endif
  6635. SIMDE_FUNCTION_ATTRIBUTES
  6636. simde__m128i simde_mm_unpacklo_epi8(simde__m128i a, simde__m128i b)
  6637. {
  6638. #if defined(SIMDE_X86_SSE2_NATIVE)
  6639. return _mm_unpacklo_epi8(a, b);
  6640. #else
  6641. simde__m128i_private r_, a_ = simde__m128i_to_private(a),
  6642. b_ = simde__m128i_to_private(b);
  6643. #if defined(SIMDE_ARM_NEON_A64V8_NATIVE)
  6644. r_.neon_i8 = vzip1q_s8(a_.neon_i8, b_.neon_i8);
  6645. #elif defined(SIMDE_ARM_NEON_A32V7_NATIVE)
  6646. int8x8_t a1 = vreinterpret_s8_s16(vget_low_s16(a_.neon_i16));
  6647. int8x8_t b1 = vreinterpret_s8_s16(vget_low_s16(b_.neon_i16));
  6648. int8x8x2_t result = vzip_s8(a1, b1);
  6649. r_.neon_i8 = vcombine_s8(result.val[0], result.val[1]);
  6650. #elif defined(SIMDE_SHUFFLE_VECTOR_)
  6651. r_.i8 = SIMDE_SHUFFLE_VECTOR_(8, 16, a_.i8, b_.i8, 0, 16, 1, 17, 2, 18,
  6652. 3, 19, 4, 20, 5, 21, 6, 22, 7, 23);
  6653. #else
  6654. SIMDE_VECTORIZE
  6655. for (size_t i = 0; i < ((sizeof(r_) / sizeof(r_.i8[0])) / 2); i++) {
  6656. r_.i8[(i * 2)] = a_.i8[i];
  6657. r_.i8[(i * 2) + 1] = b_.i8[i];
  6658. }
  6659. #endif
  6660. return simde__m128i_from_private(r_);
  6661. #endif
  6662. }
  6663. #if defined(SIMDE_X86_SSE2_ENABLE_NATIVE_ALIASES)
  6664. #define _mm_unpacklo_epi8(a, b) simde_mm_unpacklo_epi8(a, b)
  6665. #endif
  6666. SIMDE_FUNCTION_ATTRIBUTES
  6667. simde__m128i simde_mm_unpacklo_epi16(simde__m128i a, simde__m128i b)
  6668. {
  6669. #if defined(SIMDE_X86_SSE2_NATIVE)
  6670. return _mm_unpacklo_epi16(a, b);
  6671. #else
  6672. simde__m128i_private r_, a_ = simde__m128i_to_private(a),
  6673. b_ = simde__m128i_to_private(b);
  6674. #if defined(SIMDE_ARM_NEON_A64V8_NATIVE)
  6675. r_.neon_i16 = vzip1q_s16(a_.neon_i16, b_.neon_i16);
  6676. #elif defined(SIMDE_ARM_NEON_A32V7_NATIVE)
  6677. int16x4_t a1 = vget_low_s16(a_.neon_i16);
  6678. int16x4_t b1 = vget_low_s16(b_.neon_i16);
  6679. int16x4x2_t result = vzip_s16(a1, b1);
  6680. r_.neon_i16 = vcombine_s16(result.val[0], result.val[1]);
  6681. #elif defined(SIMDE_SHUFFLE_VECTOR_)
  6682. r_.i16 = SIMDE_SHUFFLE_VECTOR_(16, 16, a_.i16, b_.i16, 0, 8, 1, 9, 2,
  6683. 10, 3, 11);
  6684. #else
  6685. SIMDE_VECTORIZE
  6686. for (size_t i = 0; i < ((sizeof(r_) / sizeof(r_.i16[0])) / 2); i++) {
  6687. r_.i16[(i * 2)] = a_.i16[i];
  6688. r_.i16[(i * 2) + 1] = b_.i16[i];
  6689. }
  6690. #endif
  6691. return simde__m128i_from_private(r_);
  6692. #endif
  6693. }
  6694. #if defined(SIMDE_X86_SSE2_ENABLE_NATIVE_ALIASES)
  6695. #define _mm_unpacklo_epi16(a, b) simde_mm_unpacklo_epi16(a, b)
  6696. #endif
  6697. SIMDE_FUNCTION_ATTRIBUTES
  6698. simde__m128i simde_mm_unpacklo_epi32(simde__m128i a, simde__m128i b)
  6699. {
  6700. #if defined(SIMDE_X86_SSE2_NATIVE)
  6701. return _mm_unpacklo_epi32(a, b);
  6702. #else
  6703. simde__m128i_private r_, a_ = simde__m128i_to_private(a),
  6704. b_ = simde__m128i_to_private(b);
  6705. #if defined(SIMDE_ARM_NEON_A64V8_NATIVE)
  6706. r_.neon_i32 = vzip1q_s32(a_.neon_i32, b_.neon_i32);
  6707. #elif defined(SIMDE_ARM_NEON_A32V7_NATIVE)
  6708. int32x2_t a1 = vget_low_s32(a_.neon_i32);
  6709. int32x2_t b1 = vget_low_s32(b_.neon_i32);
  6710. int32x2x2_t result = vzip_s32(a1, b1);
  6711. r_.neon_i32 = vcombine_s32(result.val[0], result.val[1]);
  6712. #elif defined(SIMDE_SHUFFLE_VECTOR_)
  6713. r_.i32 = SIMDE_SHUFFLE_VECTOR_(32, 16, a_.i32, b_.i32, 0, 4, 1, 5);
  6714. #else
  6715. SIMDE_VECTORIZE
  6716. for (size_t i = 0; i < ((sizeof(r_) / sizeof(r_.i32[0])) / 2); i++) {
  6717. r_.i32[(i * 2)] = a_.i32[i];
  6718. r_.i32[(i * 2) + 1] = b_.i32[i];
  6719. }
  6720. #endif
  6721. return simde__m128i_from_private(r_);
  6722. #endif
  6723. }
  6724. #if defined(SIMDE_X86_SSE2_ENABLE_NATIVE_ALIASES)
  6725. #define _mm_unpacklo_epi32(a, b) simde_mm_unpacklo_epi32(a, b)
  6726. #endif
  6727. SIMDE_FUNCTION_ATTRIBUTES
  6728. simde__m128i simde_mm_unpacklo_epi64(simde__m128i a, simde__m128i b)
  6729. {
  6730. #if defined(SIMDE_X86_SSE2_NATIVE)
  6731. return _mm_unpacklo_epi64(a, b);
  6732. #else
  6733. simde__m128i_private r_, a_ = simde__m128i_to_private(a),
  6734. b_ = simde__m128i_to_private(b);
  6735. #if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
  6736. int64x1_t a_l = vget_low_s64(a_.i64);
  6737. int64x1_t b_l = vget_low_s64(b_.i64);
  6738. r_.neon_i64 = vcombine_s64(a_l, b_l);
  6739. #elif defined(SIMDE_SHUFFLE_VECTOR_)
  6740. r_.i64 = SIMDE_SHUFFLE_VECTOR_(64, 16, a_.i64, b_.i64, 0, 2);
  6741. #else
  6742. SIMDE_VECTORIZE
  6743. for (size_t i = 0; i < ((sizeof(r_) / sizeof(r_.i64[0])) / 2); i++) {
  6744. r_.i64[(i * 2)] = a_.i64[i];
  6745. r_.i64[(i * 2) + 1] = b_.i64[i];
  6746. }
  6747. #endif
  6748. return simde__m128i_from_private(r_);
  6749. #endif
  6750. }
  6751. #if defined(SIMDE_X86_SSE2_ENABLE_NATIVE_ALIASES)
  6752. #define _mm_unpacklo_epi64(a, b) simde_mm_unpacklo_epi64(a, b)
  6753. #endif
  6754. SIMDE_FUNCTION_ATTRIBUTES
  6755. simde__m128d simde_mm_unpacklo_pd(simde__m128d a, simde__m128d b)
  6756. {
  6757. #if defined(SIMDE_X86_SSE2_NATIVE)
  6758. return _mm_unpacklo_pd(a, b);
  6759. #else
  6760. simde__m128d_private r_, a_ = simde__m128d_to_private(a),
  6761. b_ = simde__m128d_to_private(b);
  6762. #if defined(SIMDE_ARM_NEON_A64V8_NATIVE)
  6763. float64x1_t a_l = vget_low_f64(a_.f64);
  6764. float64x1_t b_l = vget_low_f64(b_.f64);
  6765. r_.neon_f64 = vcombine_f64(a_l, b_l);
  6766. #elif defined(SIMDE_SHUFFLE_VECTOR_)
  6767. r_.f64 = SIMDE_SHUFFLE_VECTOR_(64, 16, a_.f64, b_.f64, 0, 2);
  6768. #else
  6769. SIMDE_VECTORIZE
  6770. for (size_t i = 0; i < ((sizeof(r_) / sizeof(r_.f64[0])) / 2); i++) {
  6771. r_.f64[(i * 2)] = a_.f64[i];
  6772. r_.f64[(i * 2) + 1] = b_.f64[i];
  6773. }
  6774. #endif
  6775. return simde__m128d_from_private(r_);
  6776. #endif
  6777. }
  6778. #if defined(SIMDE_X86_SSE2_ENABLE_NATIVE_ALIASES)
  6779. #define _mm_unpacklo_pd(a, b) simde_mm_unpacklo_pd(a, b)
  6780. #endif
  6781. SIMDE_FUNCTION_ATTRIBUTES
  6782. simde__m128d simde_x_mm_negate_pd(simde__m128d a)
  6783. {
  6784. #if defined(SIMDE_X86_SSE_NATIVE)
  6785. return simde_mm_xor_pd(a, _mm_set1_pd(SIMDE_FLOAT64_C(-0.0)));
  6786. #else
  6787. simde__m128d_private r_, a_ = simde__m128d_to_private(a);
  6788. #if defined(SIMDE_POWER_ALTIVEC_P8_NATIVE) && \
  6789. (!defined(HEDLEY_GCC_VERSION) || HEDLEY_GCC_VERSION_CHECK(8, 1, 0))
  6790. r_.altivec_f64 = vec_neg(a_.altivec_f64);
  6791. #elif defined(SIMDE_ARM_NEON_A64V8_NATIVE)
  6792. r_.neon_f64 = vnegq_f64(a_.neon_f64);
  6793. #elif defined(SIMDE_WASM_SIMD128_NATIVE)
  6794. r_.wasm_v128 = wasm_f64x2_neg(a_.wasm_v128);
  6795. #elif defined(SIMDE_VECTOR_NEGATE)
  6796. r_.f64 = -a_.f64;
  6797. #else
  6798. SIMDE_VECTORIZE
  6799. for (size_t i = 0; i < (sizeof(r_.f64) / sizeof(r_.f64[0])); i++) {
  6800. r_.f64[i] = -a_.f64[i];
  6801. }
  6802. #endif
  6803. return simde__m128d_from_private(r_);
  6804. #endif
  6805. }
  6806. SIMDE_FUNCTION_ATTRIBUTES
  6807. simde__m128i simde_mm_xor_si128(simde__m128i a, simde__m128i b)
  6808. {
  6809. #if defined(SIMDE_X86_SSE2_NATIVE)
  6810. return _mm_xor_si128(a, b);
  6811. #else
  6812. simde__m128i_private r_, a_ = simde__m128i_to_private(a),
  6813. b_ = simde__m128i_to_private(b);
  6814. #if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
  6815. r_.neon_i32 = veorq_s32(a_.neon_i32, b_.neon_i32);
  6816. #elif defined(SIMDE_POWER_ALTIVEC_P6_NATIVE)
  6817. r_.altivec_i32 = vec_xor(a_.altivec_i32, b_.altivec_i32);
  6818. #elif defined(SIMDE_VECTOR_SUBSCRIPT_OPS)
  6819. r_.i32f = a_.i32f ^ b_.i32f;
  6820. #else
  6821. SIMDE_VECTORIZE
  6822. for (size_t i = 0; i < (sizeof(r_.i32f) / sizeof(r_.i32f[0])); i++) {
  6823. r_.i32f[i] = a_.i32f[i] ^ b_.i32f[i];
  6824. }
  6825. #endif
  6826. return simde__m128i_from_private(r_);
  6827. #endif
  6828. }
  6829. #if defined(SIMDE_X86_SSE2_ENABLE_NATIVE_ALIASES)
  6830. #define _mm_xor_si128(a, b) simde_mm_xor_si128(a, b)
  6831. #endif
  6832. SIMDE_FUNCTION_ATTRIBUTES
  6833. simde__m128i simde_x_mm_not_si128(simde__m128i a)
  6834. {
  6835. #if defined(SIMDE_X86_AVX512VL_NATIVE)
  6836. return _mm_ternarylogic_epi32(a, a, a, 0x55);
  6837. #else
  6838. simde__m128i_private r_, a_ = simde__m128i_to_private(a);
  6839. #if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
  6840. r_.neon_i32 = vmvnq_s32(a_.neon_i32);
  6841. #elif defined(SIMDE_POWER_ALTIVEC_P6_NATIVE)
  6842. r_.altivec_i32 = vec_nor(a_.altivec_i32, a_.altivec_i32);
  6843. #elif defined(SIMDE_WASM_SIMD128_NATIVE)
  6844. r_.wasm_v128 = wasm_v128_not(a_.wasm_v128);
  6845. #elif defined(SIMDE_VECTOR_SUBSCRIPT_OPS)
  6846. r_.i32f = ~a_.i32f;
  6847. #else
  6848. SIMDE_VECTORIZE
  6849. for (size_t i = 0; i < (sizeof(r_.i32f) / sizeof(r_.i32f[0])); i++) {
  6850. r_.i32f[i] = ~(a_.i32f[i]);
  6851. }
  6852. #endif
  6853. return simde__m128i_from_private(r_);
  6854. #endif
  6855. }
  6856. #define SIMDE_MM_SHUFFLE2(x, y) (((x) << 1) | (y))
  6857. #if defined(SIMDE_X86_SSE2_ENABLE_NATIVE_ALIASES)
  6858. #define _MM_SHUFFLE2(x, y) SIMDE_MM_SHUFFLE2(x, y)
  6859. #endif
  6860. SIMDE_END_DECLS_
  6861. HEDLEY_DIAGNOSTIC_POP
  6862. #endif /* !defined(SIMDE_X86_SSE2_H) */