1
0

sse2neon.h 152 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060106110621063106410651066106710681069107010711072107310741075107610771078107910801081108210831084108510861087108810891090109110921093109410951096109710981099110011011102110311041105110611071108110911101111111211131114111511161117111811191120112111221123112411251126112711281129113011311132113311341135113611371138113911401141114211431144114511461147114811491150115111521153115411551156115711581159116011611162116311641165116611671168116911701171117211731174117511761177117811791180118111821183118411851186118711881189119011911192119311941195119611971198119912001201120212031204120512061207120812091210121112121213121412151216121712181219122012211222122312241225122612271228122912301231123212331234123512361237123812391240124112421243124412451246124712481249125012511252125312541255125612571258125912601261126212631264126512661267126812691270127112721273127412751276127712781279128012811282128312841285128612871288128912901291129212931294129512961297129812991300130113021303130413051306130713081309131013111312131313141315131613171318131913201321132213231324132513261327132813291330133113321333133413351336133713381339134013411342134313441345134613471348134913501351135213531354135513561357135813591360136113621363136413651366136713681369137013711372137313741375137613771378137913801381138213831384138513861387138813891390139113921393139413951396139713981399140014011402140314041405140614071408140914101411141214131414141514161417141814191420142114221423142414251426142714281429143014311432143314341435143614371438143914401441144214431444144514461447144814491450145114521453145414551456145714581459146014611462146314641465146614671468146914701471147214731474147514761477147814791480148114821483148414851486148714881489149014911492149314941495149614971498149915001501150215031504150515061507150815091510151115121513151415151516151715181519152015211522152315241525152615271528152915301531153215331534153515361537153815391540154115421543154415451546154715481549155015511552155315541555155615571558155915601561156215631564156515661567156815691570157115721573157415751576157715781579158015811582158315841585158615871588158915901591159215931594159515961597159815991600160116021603160416051606160716081609161016111612161316141615161616171618161916201621162216231624162516261627162816291630163116321633163416351636163716381639164016411642164316441645164616471648164916501651165216531654165516561657165816591660166116621663166416651666166716681669167016711672167316741675167616771678167916801681168216831684168516861687168816891690169116921693169416951696169716981699170017011702170317041705170617071708170917101711171217131714171517161717171817191720172117221723172417251726172717281729173017311732173317341735173617371738173917401741174217431744174517461747174817491750175117521753175417551756175717581759176017611762176317641765176617671768176917701771177217731774177517761777177817791780178117821783178417851786178717881789179017911792179317941795179617971798179918001801180218031804180518061807180818091810181118121813181418151816181718181819182018211822182318241825182618271828182918301831183218331834183518361837183818391840184118421843184418451846184718481849185018511852185318541855185618571858185918601861186218631864186518661867186818691870187118721873187418751876187718781879188018811882188318841885188618871888188918901891189218931894189518961897189818991900190119021903190419051906190719081909191019111912191319141915191619171918191919201921192219231924192519261927192819291930193119321933193419351936193719381939194019411942194319441945194619471948194919501951195219531954195519561957195819591960196119621963196419651966196719681969197019711972197319741975197619771978197919801981198219831984198519861987198819891990199119921993199419951996199719981999200020012002200320042005200620072008200920102011201220132014201520162017201820192020202120222023202420252026202720282029203020312032203320342035203620372038203920402041204220432044204520462047204820492050205120522053205420552056205720582059206020612062206320642065206620672068206920702071207220732074207520762077207820792080208120822083208420852086208720882089209020912092209320942095209620972098209921002101210221032104210521062107210821092110211121122113211421152116211721182119212021212122212321242125212621272128212921302131213221332134213521362137213821392140214121422143214421452146214721482149215021512152215321542155215621572158215921602161216221632164216521662167216821692170217121722173217421752176217721782179218021812182218321842185218621872188218921902191219221932194219521962197219821992200220122022203220422052206220722082209221022112212221322142215221622172218221922202221222222232224222522262227222822292230223122322233223422352236223722382239224022412242224322442245224622472248224922502251225222532254225522562257225822592260226122622263226422652266226722682269227022712272227322742275227622772278227922802281228222832284228522862287228822892290229122922293229422952296229722982299230023012302230323042305230623072308230923102311231223132314231523162317231823192320232123222323232423252326232723282329233023312332233323342335233623372338233923402341234223432344234523462347234823492350235123522353235423552356235723582359236023612362236323642365236623672368236923702371237223732374237523762377237823792380238123822383238423852386238723882389239023912392239323942395239623972398239924002401240224032404240524062407240824092410241124122413241424152416241724182419242024212422242324242425242624272428242924302431243224332434243524362437243824392440244124422443244424452446244724482449245024512452245324542455245624572458245924602461246224632464246524662467246824692470247124722473247424752476247724782479248024812482248324842485248624872488248924902491249224932494249524962497249824992500250125022503250425052506250725082509251025112512251325142515251625172518251925202521252225232524252525262527252825292530253125322533253425352536253725382539254025412542254325442545254625472548254925502551255225532554255525562557255825592560256125622563256425652566256725682569257025712572257325742575257625772578257925802581258225832584258525862587258825892590259125922593259425952596259725982599260026012602260326042605260626072608260926102611261226132614261526162617261826192620262126222623262426252626262726282629263026312632263326342635263626372638263926402641264226432644264526462647264826492650265126522653265426552656265726582659266026612662266326642665266626672668266926702671267226732674267526762677267826792680268126822683268426852686268726882689269026912692269326942695269626972698269927002701270227032704270527062707270827092710271127122713271427152716271727182719272027212722272327242725272627272728272927302731273227332734273527362737273827392740274127422743274427452746274727482749275027512752275327542755275627572758275927602761276227632764276527662767276827692770277127722773277427752776277727782779278027812782278327842785278627872788278927902791279227932794279527962797279827992800280128022803280428052806280728082809281028112812281328142815281628172818281928202821282228232824282528262827282828292830283128322833283428352836283728382839284028412842284328442845284628472848284928502851285228532854285528562857285828592860286128622863286428652866286728682869287028712872287328742875287628772878287928802881288228832884288528862887288828892890289128922893289428952896289728982899290029012902290329042905290629072908290929102911291229132914291529162917291829192920292129222923292429252926292729282929293029312932293329342935293629372938293929402941294229432944294529462947294829492950295129522953295429552956295729582959296029612962296329642965296629672968296929702971297229732974297529762977297829792980298129822983298429852986298729882989299029912992299329942995299629972998299930003001300230033004300530063007300830093010301130123013301430153016301730183019302030213022302330243025302630273028302930303031303230333034303530363037303830393040304130423043304430453046304730483049305030513052305330543055305630573058305930603061306230633064306530663067306830693070307130723073307430753076307730783079308030813082308330843085308630873088308930903091309230933094309530963097309830993100310131023103310431053106310731083109311031113112311331143115311631173118311931203121312231233124312531263127312831293130313131323133313431353136313731383139314031413142314331443145314631473148314931503151315231533154315531563157315831593160316131623163316431653166316731683169317031713172317331743175317631773178317931803181318231833184318531863187318831893190319131923193319431953196319731983199320032013202320332043205320632073208320932103211321232133214321532163217321832193220322132223223322432253226322732283229323032313232323332343235323632373238323932403241324232433244324532463247324832493250325132523253325432553256325732583259326032613262326332643265326632673268326932703271327232733274327532763277327832793280328132823283328432853286328732883289329032913292329332943295329632973298329933003301330233033304330533063307330833093310331133123313331433153316331733183319332033213322332333243325332633273328332933303331333233333334333533363337333833393340334133423343334433453346334733483349335033513352335333543355335633573358335933603361336233633364336533663367336833693370337133723373337433753376337733783379338033813382338333843385338633873388338933903391339233933394339533963397339833993400340134023403340434053406340734083409341034113412341334143415341634173418341934203421342234233424342534263427342834293430343134323433343434353436343734383439344034413442344334443445344634473448344934503451345234533454345534563457345834593460346134623463346434653466346734683469347034713472347334743475347634773478347934803481348234833484348534863487348834893490349134923493349434953496349734983499350035013502350335043505350635073508350935103511351235133514351535163517351835193520352135223523352435253526352735283529353035313532353335343535353635373538353935403541354235433544354535463547354835493550355135523553355435553556355735583559356035613562356335643565356635673568356935703571357235733574357535763577357835793580358135823583358435853586358735883589359035913592359335943595359635973598359936003601360236033604360536063607360836093610361136123613361436153616361736183619362036213622362336243625362636273628362936303631363236333634363536363637363836393640364136423643364436453646364736483649365036513652365336543655365636573658365936603661366236633664366536663667366836693670367136723673367436753676367736783679368036813682368336843685368636873688368936903691369236933694369536963697369836993700370137023703370437053706370737083709371037113712371337143715371637173718371937203721372237233724372537263727372837293730373137323733373437353736373737383739374037413742374337443745374637473748374937503751375237533754375537563757375837593760376137623763376437653766376737683769377037713772377337743775377637773778377937803781378237833784378537863787378837893790379137923793379437953796379737983799380038013802380338043805380638073808380938103811381238133814381538163817381838193820382138223823382438253826382738283829383038313832383338343835383638373838383938403841384238433844384538463847384838493850385138523853385438553856385738583859386038613862386338643865386638673868386938703871387238733874387538763877387838793880388138823883388438853886388738883889389038913892389338943895389638973898389939003901390239033904390539063907390839093910391139123913391439153916391739183919392039213922392339243925392639273928392939303931393239333934393539363937393839393940394139423943394439453946394739483949395039513952395339543955395639573958395939603961396239633964396539663967396839693970397139723973397439753976397739783979398039813982398339843985398639873988398939903991399239933994399539963997399839994000400140024003400440054006400740084009401040114012401340144015401640174018401940204021402240234024402540264027402840294030403140324033403440354036403740384039404040414042404340444045404640474048404940504051405240534054405540564057405840594060406140624063406440654066406740684069407040714072407340744075407640774078407940804081408240834084408540864087408840894090409140924093409440954096409740984099410041014102410341044105410641074108410941104111411241134114411541164117411841194120412141224123412441254126412741284129413041314132413341344135413641374138413941404141414241434144414541464147414841494150415141524153415441554156415741584159416041614162416341644165416641674168416941704171417241734174417541764177417841794180418141824183418441854186418741884189419041914192419341944195419641974198419942004201420242034204420542064207
  1. #ifndef SSE2NEON_H
  2. #define SSE2NEON_H
  3. // This header file provides a simple API translation layer
  4. // between SSE intrinsics to their corresponding Arm/Aarch64 NEON versions
  5. //
  6. // This header file does not yet translate all of the SSE intrinsics.
  7. //
  8. // Contributors to this work are:
  9. // John W. Ratcliff <[email protected]>
  10. // Brandon Rowlett <[email protected]>
  11. // Ken Fast <[email protected]>
  12. // Eric van Beurden <[email protected]>
  13. // Alexander Potylitsin <[email protected]>
  14. // Hasindu Gamaarachchi <[email protected]>
  15. // Jim Huang <[email protected]>
  16. // Mark Cheng <[email protected]>
  17. // Malcolm James MacLeod <[email protected]>
  18. // Devin Hussey (easyaspi314) <[email protected]>
  19. // Sebastian Pop <[email protected]>
  20. // Developer Ecosystem Engineering <[email protected]>
  21. // Danila Kutenin <[email protected]>
  22. /*
  23. * sse2neon is freely redistributable under the MIT License.
  24. *
  25. * Permission is hereby granted, free of charge, to any person obtaining a copy
  26. * of this software and associated documentation files (the "Software"), to deal
  27. * in the Software without restriction, including without limitation the rights
  28. * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
  29. * copies of the Software, and to permit persons to whom the Software is
  30. * furnished to do so, subject to the following conditions:
  31. *
  32. * The above copyright notice and this permission notice shall be included in
  33. * all copies or substantial portions of the Software.
  34. *
  35. * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
  36. * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
  37. * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
  38. * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
  39. * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
  40. * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
  41. * SOFTWARE.
  42. */
  43. #if defined(__GNUC__) || defined(__clang__)
  44. #pragma push_macro("FORCE_INLINE")
  45. #pragma push_macro("ALIGN_STRUCT")
  46. #define FORCE_INLINE static inline __attribute__((always_inline))
  47. #define ALIGN_STRUCT(x) __attribute__((aligned(x)))
  48. #else
  49. #error "Macro name collisions may happen with unsupported compiler."
  50. #ifdef FORCE_INLINE
  51. #undef FORCE_INLINE
  52. #endif
  53. #define FORCE_INLINE static inline
  54. #ifndef ALIGN_STRUCT
  55. #define ALIGN_STRUCT(x) __declspec(align(x))
  56. #endif
  57. #endif
  58. #include <stdint.h>
  59. #include <stdlib.h>
  60. #include <arm_neon.h>
  61. /* "__has_builtin" can be used to query support for built-in functions
  62. * provided by gcc/clang and other compilers that support it.
  63. */
  64. #ifndef __has_builtin /* GCC prior to 10 or non-clang compilers */
  65. /* Compatibility with gcc <= 9 */
  66. #if __GNUC__ <= 9
  67. #define __has_builtin(x) HAS##x
  68. #define HAS__builtin_popcount 1
  69. #define HAS__builtin_popcountll 1
  70. #else
  71. #define __has_builtin(x) 0
  72. #endif
  73. #endif
  74. /**
  75. * MACRO for shuffle parameter for _mm_shuffle_ps().
  76. * Argument fp3 is a digit[0123] that represents the fp from argument "b"
  77. * of mm_shuffle_ps that will be placed in fp3 of result. fp2 is the same
  78. * for fp2 in result. fp1 is a digit[0123] that represents the fp from
  79. * argument "a" of mm_shuffle_ps that will be places in fp1 of result.
  80. * fp0 is the same for fp0 of result.
  81. */
  82. #define _MM_SHUFFLE(fp3, fp2, fp1, fp0) \
  83. (((fp3) << 6) | ((fp2) << 4) | ((fp1) << 2) | ((fp0)))
  84. /* indicate immediate constant argument in a given range */
  85. #define __constrange(a, b) const
  86. /* A few intrinsics accept traditional data types like ints or floats, but
  87. * most operate on data types that are specific to SSE.
  88. * If a vector type ends in d, it contains doubles, and if it does not have
  89. * a suffix, it contains floats. An integer vector type can contain any type
  90. * of integer, from chars to shorts to unsigned long longs.
  91. */
  92. typedef float32x2_t __m64;
  93. typedef float32x4_t __m128; /* 128-bit vector containing 4 floats */
  94. // On ARM 32-bit architecture, the float64x2_t is not supported.
  95. // The data type __m128d should be represented in a different way for related
  96. // intrinsic conversion.
  97. #if defined(__aarch64__)
  98. typedef float64x2_t __m128d; /* 128-bit vector containing 2 doubles */
  99. #else
  100. typedef float32x4_t __m128d;
  101. #endif
  102. typedef int64x1_t __m64i;
  103. typedef int64x2_t __m128i; /* 128-bit vector containing integers */
  104. /* type-safe casting between types */
  105. #define vreinterpretq_m128_f16(x) vreinterpretq_f32_f16(x)
  106. #define vreinterpretq_m128_f32(x) (x)
  107. #define vreinterpretq_m128_f64(x) vreinterpretq_f32_f64(x)
  108. #define vreinterpretq_m128_u8(x) vreinterpretq_f32_u8(x)
  109. #define vreinterpretq_m128_u16(x) vreinterpretq_f32_u16(x)
  110. #define vreinterpretq_m128_u32(x) vreinterpretq_f32_u32(x)
  111. #define vreinterpretq_m128_u64(x) vreinterpretq_f32_u64(x)
  112. #define vreinterpretq_m128_s8(x) vreinterpretq_f32_s8(x)
  113. #define vreinterpretq_m128_s16(x) vreinterpretq_f32_s16(x)
  114. #define vreinterpretq_m128_s32(x) vreinterpretq_f32_s32(x)
  115. #define vreinterpretq_m128_s64(x) vreinterpretq_f32_s64(x)
  116. #define vreinterpretq_f16_m128(x) vreinterpretq_f16_f32(x)
  117. #define vreinterpretq_f32_m128(x) (x)
  118. #define vreinterpretq_f64_m128(x) vreinterpretq_f64_f32(x)
  119. #define vreinterpretq_u8_m128(x) vreinterpretq_u8_f32(x)
  120. #define vreinterpretq_u16_m128(x) vreinterpretq_u16_f32(x)
  121. #define vreinterpretq_u32_m128(x) vreinterpretq_u32_f32(x)
  122. #define vreinterpretq_u64_m128(x) vreinterpretq_u64_f32(x)
  123. #define vreinterpretq_s8_m128(x) vreinterpretq_s8_f32(x)
  124. #define vreinterpretq_s16_m128(x) vreinterpretq_s16_f32(x)
  125. #define vreinterpretq_s32_m128(x) vreinterpretq_s32_f32(x)
  126. #define vreinterpretq_s64_m128(x) vreinterpretq_s64_f32(x)
  127. #define vreinterpretq_m128i_s8(x) vreinterpretq_s64_s8(x)
  128. #define vreinterpretq_m128i_s16(x) vreinterpretq_s64_s16(x)
  129. #define vreinterpretq_m128i_s32(x) vreinterpretq_s64_s32(x)
  130. #define vreinterpretq_m128i_s64(x) (x)
  131. #define vreinterpretq_m128i_u8(x) vreinterpretq_s64_u8(x)
  132. #define vreinterpretq_m128i_u16(x) vreinterpretq_s64_u16(x)
  133. #define vreinterpretq_m128i_u32(x) vreinterpretq_s64_u32(x)
  134. #define vreinterpretq_m128i_u64(x) vreinterpretq_s64_u64(x)
  135. #define vreinterpretq_s8_m128i(x) vreinterpretq_s8_s64(x)
  136. #define vreinterpretq_s16_m128i(x) vreinterpretq_s16_s64(x)
  137. #define vreinterpretq_s32_m128i(x) vreinterpretq_s32_s64(x)
  138. #define vreinterpretq_s64_m128i(x) (x)
  139. #define vreinterpretq_u8_m128i(x) vreinterpretq_u8_s64(x)
  140. #define vreinterpretq_u16_m128i(x) vreinterpretq_u16_s64(x)
  141. #define vreinterpretq_u32_m128i(x) vreinterpretq_u32_s64(x)
  142. #define vreinterpretq_u64_m128i(x) vreinterpretq_u64_s64(x)
  143. #define vreinterpret_m64i_s8(x) vreinterpret_s64_s8(x)
  144. #define vreinterpret_m64i_s16(x) vreinterpret_s64_s16(x)
  145. #define vreinterpret_m64i_s32(x) vreinterpret_s64_s32(x)
  146. #define vreinterpret_m64i_s64(x) (x)
  147. #define vreinterpret_m64i_u8(x) vreinterpret_s64_u8(x)
  148. #define vreinterpret_m64i_u16(x) vreinterpret_s64_u16(x)
  149. #define vreinterpret_m64i_u32(x) vreinterpret_s64_u32(x)
  150. #define vreinterpret_m64i_u64(x) vreinterpret_s64_u64(x)
  151. #define vreinterpret_u8_m64i(x) vreinterpret_u8_s64(x)
  152. #define vreinterpret_u16_m64i(x) vreinterpret_u16_s64(x)
  153. #define vreinterpret_u32_m64i(x) vreinterpret_u32_s64(x)
  154. #define vreinterpret_u64_m64i(x) vreinterpret_u64_s64(x)
  155. #define vreinterpret_s8_m64i(x) vreinterpret_s8_s64(x)
  156. #define vreinterpret_s16_m64i(x) vreinterpret_s16_s64(x)
  157. #define vreinterpret_s32_m64i(x) vreinterpret_s32_s64(x)
  158. #define vreinterpret_s64_m64i(x) (x)
  159. // A struct is defined in this header file called 'SIMDVec' which can be used
  160. // by applications which attempt to access the contents of an _m128 struct
  161. // directly. It is important to note that accessing the __m128 struct directly
  162. // is bad coding practice by Microsoft: @see:
  163. // https://msdn.microsoft.com/en-us/library/ayeb3ayc.aspx
  164. //
  165. // However, some legacy source code may try to access the contents of an __m128
  166. // struct directly so the developer can use the SIMDVec as an alias for it. Any
  167. // casting must be done manually by the developer, as you cannot cast or
  168. // otherwise alias the base NEON data type for intrinsic operations.
  169. //
  170. // union intended to allow direct access to an __m128 variable using the names
  171. // that the MSVC compiler provides. This union should really only be used when
  172. // trying to access the members of the vector as integer values. GCC/clang
  173. // allow native access to the float members through a simple array access
  174. // operator (in C since 4.6, in C++ since 4.8).
  175. //
  176. // Ideally direct accesses to SIMD vectors should not be used since it can cause
  177. // a performance hit. If it really is needed however, the original __m128
  178. // variable can be aliased with a pointer to this union and used to access
  179. // individual components. The use of this union should be hidden behind a macro
  180. // that is used throughout the codebase to access the members instead of always
  181. // declaring this type of variable.
  182. typedef union ALIGN_STRUCT(16) SIMDVec {
  183. float m128_f32[4]; // as floats - DON'T USE. Added for convenience.
  184. int8_t m128_i8[16]; // as signed 8-bit integers.
  185. int16_t m128_i16[8]; // as signed 16-bit integers.
  186. int32_t m128_i32[4]; // as signed 32-bit integers.
  187. int64_t m128_i64[2]; // as signed 64-bit integers.
  188. uint8_t m128_u8[16]; // as unsigned 8-bit integers.
  189. uint16_t m128_u16[8]; // as unsigned 16-bit integers.
  190. uint32_t m128_u32[4]; // as unsigned 32-bit integers.
  191. uint64_t m128_u64[2]; // as unsigned 64-bit integers.
  192. } SIMDVec;
  193. // casting using SIMDVec
  194. #define vreinterpretq_nth_u64_m128i(x, n) (((SIMDVec *)&x)->m128_u64[n])
  195. #define vreinterpretq_nth_u32_m128i(x, n) (((SIMDVec *)&x)->m128_u32[n])
  196. /* Backwards compatibility for compilers with lack of specific type support */
  197. // Older gcc does not define vld1q_u8_x4 type
  198. #if defined(__GNUC__) && !defined(__clang__)
  199. #if __GNUC__ <= 9
  200. FORCE_INLINE uint8x16x4_t vld1q_u8_x4(const uint8_t *p)
  201. {
  202. uint8x16x4_t ret;
  203. ret.val[0] = vld1q_u8(p + 0);
  204. ret.val[1] = vld1q_u8(p + 16);
  205. ret.val[2] = vld1q_u8(p + 32);
  206. ret.val[3] = vld1q_u8(p + 48);
  207. return ret;
  208. }
  209. #endif
  210. #endif
  211. /* Function Naming Conventions
  212. * The naming convention of SSE intrinsics is straightforward. A generic SSE
  213. * intrinsic function is given as follows:
  214. * _mm_<name>_<data_type>
  215. *
  216. * The parts of this format are given as follows:
  217. * 1. <name> describes the operation performed by the intrinsic
  218. * 2. <data_type> identifies the data type of the function's primary arguments
  219. *
  220. * This last part, <data_type>, is a little complicated. It identifies the
  221. * content of the input values, and can be set to any of the following values:
  222. * + ps - vectors contain floats (ps stands for packed single-precision)
  223. * + pd - vectors cantain doubles (pd stands for packed double-precision)
  224. * + epi8/epi16/epi32/epi64 - vectors contain 8-bit/16-bit/32-bit/64-bit
  225. * signed integers
  226. * + epu8/epu16/epu32/epu64 - vectors contain 8-bit/16-bit/32-bit/64-bit
  227. * unsigned integers
  228. * + si128 - unspecified 128-bit vector or 256-bit vector
  229. * + m128/m128i/m128d - identifies input vector types when they are different
  230. * than the type of the returned vector
  231. *
  232. * For example, _mm_setzero_ps. The _mm implies that the function returns
  233. * a 128-bit vector. The _ps at the end implies that the argument vectors
  234. * contain floats.
  235. *
  236. * A complete example: Byte Shuffle - pshufb (_mm_shuffle_epi8)
  237. * // Set packed 16-bit integers. 128 bits, 8 short, per 16 bits
  238. * __m128i v_in = _mm_setr_epi16(1, 2, 3, 4, 5, 6, 7, 8);
  239. * // Set packed 8-bit integers
  240. * // 128 bits, 16 chars, per 8 bits
  241. * __m128i v_perm = _mm_setr_epi8(1, 0, 2, 3, 8, 9, 10, 11,
  242. * 4, 5, 12, 13, 6, 7, 14, 15);
  243. * // Shuffle packed 8-bit integers
  244. * __m128i v_out = _mm_shuffle_epi8(v_in, v_perm); // pshufb
  245. *
  246. * Data (Number, Binary, Byte Index):
  247. +------+------+-------------+------+------+-------------+
  248. | 1 | 2 | 3 | 4 | Number
  249. +------+------+------+------+------+------+------+------+
  250. | 0000 | 0001 | 0000 | 0010 | 0000 | 0011 | 0000 | 0100 | Binary
  251. +------+------+------+------+------+------+------+------+
  252. | 0 | 1 | 2 | 3 | 4 | 5 | 6 | 7 | Index
  253. +------+------+------+------+------+------+------+------+
  254. +------+------+------+------+------+------+------+------+
  255. | 5 | 6 | 7 | 8 | Number
  256. +------+------+------+------+------+------+------+------+
  257. | 0000 | 0101 | 0000 | 0110 | 0000 | 0111 | 0000 | 1000 | Binary
  258. +------+------+------+------+------+------+------+------+
  259. | 8 | 9 | 10 | 11 | 12 | 13 | 14 | 15 | Index
  260. +------+------+------+------+------+------+------+------+
  261. * Index (Byte Index):
  262. +------+------+------+------+------+------+------+------+
  263. | 1 | 0 | 2 | 3 | 8 | 9 | 10 | 11 |
  264. +------+------+------+------+------+------+------+------+
  265. +------+------+------+------+------+------+------+------+
  266. | 4 | 5 | 12 | 13 | 6 | 7 | 14 | 15 |
  267. +------+------+------+------+------+------+------+------+
  268. * Result:
  269. +------+------+------+------+------+------+------+------+
  270. | 1 | 0 | 2 | 3 | 8 | 9 | 10 | 11 | Index
  271. +------+------+------+------+------+------+------+------+
  272. | 0001 | 0000 | 0000 | 0010 | 0000 | 0101 | 0000 | 0110 | Binary
  273. +------+------+------+------+------+------+------+------+
  274. | 256 | 2 | 5 | 6 | Number
  275. +------+------+------+------+------+------+------+------+
  276. +------+------+------+------+------+------+------+------+
  277. | 4 | 5 | 12 | 13 | 6 | 7 | 14 | 15 | Index
  278. +------+------+------+------+------+------+------+------+
  279. | 0000 | 0011 | 0000 | 0111 | 0000 | 0100 | 0000 | 1000 | Binary
  280. +------+------+------+------+------+------+------+------+
  281. | 3 | 7 | 4 | 8 | Number
  282. +------+------+------+------+------+------+-------------+
  283. */
  284. /* Set/get methods */
  285. /* Constants for use with _mm_prefetch. */
  286. enum _mm_hint {
  287. _MM_HINT_NTA = 0, /* load data to L1 and L2 cache, mark it as NTA */
  288. _MM_HINT_T0 = 1, /* load data to L1 and L2 cache */
  289. _MM_HINT_T1 = 2, /* load data to L2 cache only */
  290. _MM_HINT_T2 = 3, /* load data to L2 cache only, mark it as NTA */
  291. _MM_HINT_ENTA = 4, /* exclusive version of _MM_HINT_NTA */
  292. _MM_HINT_ET0 = 5, /* exclusive version of _MM_HINT_T0 */
  293. _MM_HINT_ET1 = 6, /* exclusive version of _MM_HINT_T1 */
  294. _MM_HINT_ET2 = 7 /* exclusive version of _MM_HINT_T2 */
  295. };
  296. // Loads one cache line of data from address p to a location closer to the
  297. // processor. https://msdn.microsoft.com/en-us/library/84szxsww(v=vs.100).aspx
  298. FORCE_INLINE void _mm_prefetch(const void *p, int i)
  299. {
  300. (void)i;
  301. __builtin_prefetch(p);
  302. }
  303. // extracts the lower order floating point value from the parameter :
  304. // https://msdn.microsoft.com/en-us/library/bb514059%28v=vs.120%29.aspx?f=255&MSPPError=-2147217396
  305. FORCE_INLINE float _mm_cvtss_f32(__m128 a)
  306. {
  307. return vgetq_lane_f32(vreinterpretq_f32_m128(a), 0);
  308. }
  309. // Sets the 128-bit value to zero
  310. // https://msdn.microsoft.com/en-us/library/vstudio/ys7dw0kh(v=vs.100).aspx
  311. FORCE_INLINE __m128i _mm_setzero_si128(void)
  312. {
  313. return vreinterpretq_m128i_s32(vdupq_n_s32(0));
  314. }
  315. // Clears the four single-precision, floating-point values.
  316. // https://msdn.microsoft.com/en-us/library/vstudio/tk1t2tbz(v=vs.100).aspx
  317. FORCE_INLINE __m128 _mm_setzero_ps(void)
  318. {
  319. return vreinterpretq_m128_f32(vdupq_n_f32(0));
  320. }
  321. // Sets the four single-precision, floating-point values to w.
  322. //
  323. // r0 := r1 := r2 := r3 := w
  324. //
  325. // https://msdn.microsoft.com/en-us/library/vstudio/2x1se8ha(v=vs.100).aspx
  326. FORCE_INLINE __m128 _mm_set1_ps(float _w)
  327. {
  328. return vreinterpretq_m128_f32(vdupq_n_f32(_w));
  329. }
  330. // Sets the four single-precision, floating-point values to w.
  331. // https://msdn.microsoft.com/en-us/library/vstudio/2x1se8ha(v=vs.100).aspx
  332. FORCE_INLINE __m128 _mm_set_ps1(float _w)
  333. {
  334. return vreinterpretq_m128_f32(vdupq_n_f32(_w));
  335. }
  336. // Sets the four single-precision, floating-point values to the four inputs.
  337. // https://msdn.microsoft.com/en-us/library/vstudio/afh0zf75(v=vs.100).aspx
  338. FORCE_INLINE __m128 _mm_set_ps(float w, float z, float y, float x)
  339. {
  340. float ALIGN_STRUCT(16) data[4] = {x, y, z, w};
  341. return vreinterpretq_m128_f32(vld1q_f32(data));
  342. }
  343. // Copy single-precision (32-bit) floating-point element a to the lower element
  344. // of dst, and zero the upper 3 elements.
  345. // https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_set_ss&expand=4901,4895,4901
  346. FORCE_INLINE __m128 _mm_set_ss(float a)
  347. {
  348. float ALIGN_STRUCT(16) data[4] = {a, 0, 0, 0};
  349. return vreinterpretq_m128_f32(vld1q_f32(data));
  350. }
  351. // Sets the four single-precision, floating-point values to the four inputs in
  352. // reverse order.
  353. // https://msdn.microsoft.com/en-us/library/vstudio/d2172ct3(v=vs.100).aspx
  354. FORCE_INLINE __m128 _mm_setr_ps(float w, float z, float y, float x)
  355. {
  356. float ALIGN_STRUCT(16) data[4] = {w, z, y, x};
  357. return vreinterpretq_m128_f32(vld1q_f32(data));
  358. }
  359. // Sets the 8 signed 16-bit integer values in reverse order.
  360. //
  361. // Return Value
  362. // r0 := w0
  363. // r1 := w1
  364. // ...
  365. // r7 := w7
  366. FORCE_INLINE __m128i _mm_setr_epi16(short w0, short w1, short w2, short w3,
  367. short w4, short w5, short w6, short w7)
  368. {
  369. int16_t ALIGN_STRUCT(16) data[8] = {w0, w1, w2, w3, w4, w5, w6, w7};
  370. return vreinterpretq_m128i_s16(vld1q_s16((int16_t *)data));
  371. }
  372. // Sets the 4 signed 32-bit integer values in reverse order
  373. // https://technet.microsoft.com/en-us/library/security/27yb3ee5(v=vs.90).aspx
  374. FORCE_INLINE __m128i _mm_setr_epi32(int i3, int i2, int i1, int i0)
  375. {
  376. int32_t ALIGN_STRUCT(16) data[4] = {i3, i2, i1, i0};
  377. return vreinterpretq_m128i_s32(vld1q_s32(data));
  378. }
  379. // Sets the 16 signed 8-bit integer values to b.
  380. //
  381. // r0 := b
  382. // r1 := b
  383. // ...
  384. // r15 := b
  385. //
  386. // https://msdn.microsoft.com/en-us/library/6e14xhyf(v=vs.100).aspx
  387. FORCE_INLINE __m128i _mm_set1_epi8(signed char w)
  388. {
  389. return vreinterpretq_m128i_s8(vdupq_n_s8(w));
  390. }
  391. // Sets the 8 signed 16-bit integer values to w.
  392. //
  393. // r0 := w
  394. // r1 := w
  395. // ...
  396. // r7 := w
  397. //
  398. // https://msdn.microsoft.com/en-us/library/k0ya3x0e(v=vs.90).aspx
  399. FORCE_INLINE __m128i _mm_set1_epi16(short w)
  400. {
  401. return vreinterpretq_m128i_s16(vdupq_n_s16(w));
  402. }
  403. // Sets the 16 signed 8-bit integer values.
  404. // https://msdn.microsoft.com/en-us/library/x0cx8zd3(v=vs.90).aspx
  405. FORCE_INLINE __m128i
  406. _mm_set_epi8(signed char b15, signed char b14, signed char b13, signed char b12,
  407. signed char b11, signed char b10, signed char b9, signed char b8,
  408. signed char b7, signed char b6, signed char b5, signed char b4,
  409. signed char b3, signed char b2, signed char b1, signed char b0)
  410. {
  411. int8_t ALIGN_STRUCT(16)
  412. data[16] = {(int8_t)b0, (int8_t)b1, (int8_t)b2, (int8_t)b3,
  413. (int8_t)b4, (int8_t)b5, (int8_t)b6, (int8_t)b7,
  414. (int8_t)b8, (int8_t)b9, (int8_t)b10, (int8_t)b11,
  415. (int8_t)b12, (int8_t)b13, (int8_t)b14, (int8_t)b15};
  416. return (__m128i)vld1q_s8(data);
  417. }
  418. // Sets the 8 signed 16-bit integer values.
  419. // https://msdn.microsoft.com/en-au/library/3e0fek84(v=vs.90).aspx
  420. FORCE_INLINE __m128i _mm_set_epi16(short i7, short i6, short i5, short i4,
  421. short i3, short i2, short i1, short i0)
  422. {
  423. int16_t ALIGN_STRUCT(16) data[8] = {i0, i1, i2, i3, i4, i5, i6, i7};
  424. return vreinterpretq_m128i_s16(vld1q_s16(data));
  425. }
  426. // Sets the 16 signed 8-bit integer values in reverse order.
  427. // https://msdn.microsoft.com/en-us/library/2khb9c7k(v=vs.90).aspx
  428. FORCE_INLINE __m128i _mm_setr_epi8(
  429. signed char b0, signed char b1, signed char b2, signed char b3,
  430. signed char b4, signed char b5, signed char b6, signed char b7,
  431. signed char b8, signed char b9, signed char b10, signed char b11,
  432. signed char b12, signed char b13, signed char b14, signed char b15)
  433. {
  434. int8_t ALIGN_STRUCT(16)
  435. data[16] = {(int8_t)b0, (int8_t)b1, (int8_t)b2, (int8_t)b3,
  436. (int8_t)b4, (int8_t)b5, (int8_t)b6, (int8_t)b7,
  437. (int8_t)b8, (int8_t)b9, (int8_t)b10, (int8_t)b11,
  438. (int8_t)b12, (int8_t)b13, (int8_t)b14, (int8_t)b15};
  439. return (__m128i)vld1q_s8(data);
  440. }
  441. // Sets the 4 signed 32-bit integer values to i.
  442. //
  443. // r0 := i
  444. // r1 := i
  445. // r2 := i
  446. // r3 := I
  447. //
  448. // https://msdn.microsoft.com/en-us/library/vstudio/h4xscxat(v=vs.100).aspx
  449. FORCE_INLINE __m128i _mm_set1_epi32(int _i)
  450. {
  451. return vreinterpretq_m128i_s32(vdupq_n_s32(_i));
  452. }
  453. // Sets the 2 signed 64-bit integer values to i.
  454. // https://docs.microsoft.com/en-us/previous-versions/visualstudio/visual-studio-2010/whtfzhzk(v=vs.100)
  455. FORCE_INLINE __m128i _mm_set1_epi64(int64_t _i)
  456. {
  457. return vreinterpretq_m128i_s64(vdupq_n_s64(_i));
  458. }
  459. // Sets the 2 signed 64-bit integer values to i.
  460. // https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_set1_epi64x&expand=4961
  461. FORCE_INLINE __m128i _mm_set1_epi64x(int64_t _i)
  462. {
  463. return vreinterpretq_m128i_s64(vdupq_n_s64(_i));
  464. }
  465. // Sets the 4 signed 32-bit integer values.
  466. // https://msdn.microsoft.com/en-us/library/vstudio/019beekt(v=vs.100).aspx
  467. FORCE_INLINE __m128i _mm_set_epi32(int i3, int i2, int i1, int i0)
  468. {
  469. int32_t ALIGN_STRUCT(16) data[4] = {i0, i1, i2, i3};
  470. return vreinterpretq_m128i_s32(vld1q_s32(data));
  471. }
  472. // Returns the __m128i structure with its two 64-bit integer values
  473. // initialized to the values of the two 64-bit integers passed in.
  474. // https://msdn.microsoft.com/en-us/library/dk2sdw0h(v=vs.120).aspx
  475. FORCE_INLINE __m128i _mm_set_epi64x(int64_t i1, int64_t i2)
  476. {
  477. int64_t ALIGN_STRUCT(16) data[2] = {i2, i1};
  478. return vreinterpretq_m128i_s64(vld1q_s64(data));
  479. }
  480. // Stores four single-precision, floating-point values.
  481. // https://msdn.microsoft.com/en-us/library/vstudio/s3h4ay6y(v=vs.100).aspx
  482. FORCE_INLINE void _mm_store_ps(float *p, __m128 a)
  483. {
  484. vst1q_f32(p, vreinterpretq_f32_m128(a));
  485. }
  486. // Stores four single-precision, floating-point values.
  487. // https://msdn.microsoft.com/en-us/library/44e30x22(v=vs.100).aspx
  488. FORCE_INLINE void _mm_storeu_ps(float *p, __m128 a)
  489. {
  490. vst1q_f32(p, vreinterpretq_f32_m128(a));
  491. }
  492. // Stores four 32-bit integer values as (as a __m128i value) at the address p.
  493. // https://msdn.microsoft.com/en-us/library/vstudio/edk11s13(v=vs.100).aspx
  494. FORCE_INLINE void _mm_store_si128(__m128i *p, __m128i a)
  495. {
  496. vst1q_s32((int32_t *)p, vreinterpretq_s32_m128i(a));
  497. }
  498. // Stores four 32-bit integer values as (as a __m128i value) at the address p.
  499. // https://msdn.microsoft.com/en-us/library/vstudio/edk11s13(v=vs.100).aspx
  500. FORCE_INLINE void _mm_storeu_si128(__m128i *p, __m128i a)
  501. {
  502. vst1q_s32((int32_t *)p, vreinterpretq_s32_m128i(a));
  503. }
  504. // Stores the lower single - precision, floating - point value.
  505. // https://msdn.microsoft.com/en-us/library/tzz10fbx(v=vs.100).aspx
  506. FORCE_INLINE void _mm_store_ss(float *p, __m128 a)
  507. {
  508. vst1q_lane_f32(p, vreinterpretq_f32_m128(a), 0);
  509. }
  510. // Reads the lower 64 bits of b and stores them into the lower 64 bits of a.
  511. // https://msdn.microsoft.com/en-us/library/hhwf428f%28v=vs.90%29.aspx
  512. FORCE_INLINE void _mm_storel_epi64(__m128i *a, __m128i b)
  513. {
  514. uint64x1_t hi = vget_high_u64(vreinterpretq_u64_m128i(*a));
  515. uint64x1_t lo = vget_low_u64(vreinterpretq_u64_m128i(b));
  516. *a = vreinterpretq_m128i_u64(vcombine_u64(lo, hi));
  517. }
  518. // Stores the lower two single-precision floating point values of a to the
  519. // address p.
  520. //
  521. // *p0 := a0
  522. // *p1 := a1
  523. //
  524. // https://msdn.microsoft.com/en-us/library/h54t98ks(v=vs.90).aspx
  525. FORCE_INLINE void _mm_storel_pi(__m64 *p, __m128 a)
  526. {
  527. *p = vget_low_f32(a);
  528. }
  529. // Stores the upper two single-precision, floating-point values of a to the
  530. // address p.
  531. //
  532. // *p0 := a2
  533. // *p1 := a3
  534. //
  535. // https://msdn.microsoft.com/en-us/library/a7525fs8(v%3dvs.90).aspx
  536. FORCE_INLINE void _mm_storeh_pi(__m64 *p, __m128 a)
  537. {
  538. *p = vget_high_f32(a);
  539. }
  540. // Loads a single single-precision, floating-point value, copying it into all
  541. // four words
  542. // https://msdn.microsoft.com/en-us/library/vstudio/5cdkf716(v=vs.100).aspx
  543. FORCE_INLINE __m128 _mm_load1_ps(const float *p)
  544. {
  545. return vreinterpretq_m128_f32(vld1q_dup_f32(p));
  546. }
  547. #define _mm_load_ps1 _mm_load1_ps
  548. // Sets the lower two single-precision, floating-point values with 64
  549. // bits of data loaded from the address p; the upper two values are passed
  550. // through from a.
  551. //
  552. // Return Value
  553. // r0 := *p0
  554. // r1 := *p1
  555. // r2 := a2
  556. // r3 := a3
  557. //
  558. // https://msdn.microsoft.com/en-us/library/s57cyak2(v=vs.100).aspx
  559. FORCE_INLINE __m128 _mm_loadl_pi(__m128 a, __m64 const *p)
  560. {
  561. return vreinterpretq_m128_f32(
  562. vcombine_f32(vld1_f32((const float32_t *)p), vget_high_f32(a)));
  563. }
  564. // Sets the upper two single-precision, floating-point values with 64
  565. // bits of data loaded from the address p; the lower two values are passed
  566. // through from a.
  567. //
  568. // r0 := a0
  569. // r1 := a1
  570. // r2 := *p0
  571. // r3 := *p1
  572. //
  573. // https://msdn.microsoft.com/en-us/library/w92wta0x(v%3dvs.100).aspx
  574. FORCE_INLINE __m128 _mm_loadh_pi(__m128 a, __m64 const *p)
  575. {
  576. return vreinterpretq_m128_f32(
  577. vcombine_f32(vget_low_f32(a), vld1_f32((const float32_t *)p)));
  578. }
  579. // Loads four single-precision, floating-point values.
  580. // https://msdn.microsoft.com/en-us/library/vstudio/zzd50xxt(v=vs.100).aspx
  581. FORCE_INLINE __m128 _mm_load_ps(const float *p)
  582. {
  583. return vreinterpretq_m128_f32(vld1q_f32(p));
  584. }
  585. // Loads four single-precision, floating-point values.
  586. // https://msdn.microsoft.com/en-us/library/x1b16s7z%28v=vs.90%29.aspx
  587. FORCE_INLINE __m128 _mm_loadu_ps(const float *p)
  588. {
  589. // for neon, alignment doesn't matter, so _mm_load_ps and _mm_loadu_ps are
  590. // equivalent for neon
  591. return vreinterpretq_m128_f32(vld1q_f32(p));
  592. }
  593. // Loads a double-precision, floating-point value.
  594. // The upper double-precision, floating-point is set to zero. The address p does
  595. // not need to be 16-byte aligned.
  596. // https://docs.microsoft.com/en-us/previous-versions/visualstudio/visual-studio-2010/574w9fdd(v%3dvs.100)
  597. FORCE_INLINE __m128d _mm_load_sd(const double *p)
  598. {
  599. #if defined(__aarch64__)
  600. return vsetq_lane_f64(*p, vdupq_n_f64(0), 0);
  601. #else
  602. const float *fp = (const float *)p;
  603. float ALIGN_STRUCT(16) data[4] = {fp[0], fp[1], 0, 0};
  604. return vld1q_f32(data);
  605. #endif
  606. }
  607. // Loads an single - precision, floating - point value into the low word and
  608. // clears the upper three words.
  609. // https://msdn.microsoft.com/en-us/library/548bb9h4%28v=vs.90%29.aspx
  610. FORCE_INLINE __m128 _mm_load_ss(const float *p)
  611. {
  612. return vreinterpretq_m128_f32(vsetq_lane_f32(*p, vdupq_n_f32(0), 0));
  613. }
  614. FORCE_INLINE __m128i _mm_loadl_epi64(__m128i const *p)
  615. {
  616. /* Load the lower 64 bits of the value pointed to by p into the
  617. * lower 64 bits of the result, zeroing the upper 64 bits of the result.
  618. */
  619. return vreinterpretq_m128i_s32(
  620. vcombine_s32(vld1_s32((int32_t const *)p), vcreate_s32(0)));
  621. }
  622. /* Logic/Binary operations */
  623. // Compares for inequality.
  624. // https://msdn.microsoft.com/en-us/library/sf44thbx(v=vs.100).aspx
  625. FORCE_INLINE __m128 _mm_cmpneq_ps(__m128 a, __m128 b)
  626. {
  627. return vreinterpretq_m128_u32(vmvnq_u32(vceqq_f32(
  628. vreinterpretq_f32_m128(a), vreinterpretq_f32_m128(b))));
  629. }
  630. // Computes the bitwise AND-NOT of the four single-precision, floating-point
  631. // values of a and b.
  632. //
  633. // r0 := ~a0 & b0
  634. // r1 := ~a1 & b1
  635. // r2 := ~a2 & b2
  636. // r3 := ~a3 & b3
  637. //
  638. // https://msdn.microsoft.com/en-us/library/vstudio/68h7wd02(v=vs.100).aspx
  639. FORCE_INLINE __m128 _mm_andnot_ps(__m128 a, __m128 b)
  640. {
  641. return vreinterpretq_m128_s32(
  642. vbicq_s32(vreinterpretq_s32_m128(b),
  643. vreinterpretq_s32_m128(a))); // *NOTE* argument swap
  644. }
  645. // Computes the bitwise AND of the 128-bit value in b and the bitwise NOT of the
  646. // 128-bit value in a.
  647. //
  648. // r := (~a) & b
  649. //
  650. // https://msdn.microsoft.com/en-us/library/vstudio/1beaceh8(v=vs.100).aspx
  651. FORCE_INLINE __m128i _mm_andnot_si128(__m128i a, __m128i b)
  652. {
  653. return vreinterpretq_m128i_s32(
  654. vbicq_s32(vreinterpretq_s32_m128i(b),
  655. vreinterpretq_s32_m128i(a))); // *NOTE* argument swap
  656. }
  657. // Computes the bitwise AND of the 128-bit value in a and the 128-bit value in
  658. // b.
  659. //
  660. // r := a & b
  661. //
  662. // https://msdn.microsoft.com/en-us/library/vstudio/6d1txsa8(v=vs.100).aspx
  663. FORCE_INLINE __m128i _mm_and_si128(__m128i a, __m128i b)
  664. {
  665. return vreinterpretq_m128i_s32(vandq_s32(vreinterpretq_s32_m128i(a),
  666. vreinterpretq_s32_m128i(b)));
  667. }
  668. // Computes the bitwise AND of the four single-precision, floating-point values
  669. // of a and b.
  670. //
  671. // r0 := a0 & b0
  672. // r1 := a1 & b1
  673. // r2 := a2 & b2
  674. // r3 := a3 & b3
  675. //
  676. // https://msdn.microsoft.com/en-us/library/vstudio/73ck1xc5(v=vs.100).aspx
  677. FORCE_INLINE __m128 _mm_and_ps(__m128 a, __m128 b)
  678. {
  679. return vreinterpretq_m128_s32(vandq_s32(vreinterpretq_s32_m128(a),
  680. vreinterpretq_s32_m128(b)));
  681. }
  682. // Computes the bitwise OR of the four single-precision, floating-point values
  683. // of a and b.
  684. // https://msdn.microsoft.com/en-us/library/vstudio/7ctdsyy0(v=vs.100).aspx
  685. FORCE_INLINE __m128 _mm_or_ps(__m128 a, __m128 b)
  686. {
  687. return vreinterpretq_m128_s32(vorrq_s32(vreinterpretq_s32_m128(a),
  688. vreinterpretq_s32_m128(b)));
  689. }
  690. // Computes bitwise EXOR (exclusive-or) of the four single-precision,
  691. // floating-point values of a and b.
  692. // https://msdn.microsoft.com/en-us/library/ss6k3wk8(v=vs.100).aspx
  693. FORCE_INLINE __m128 _mm_xor_ps(__m128 a, __m128 b)
  694. {
  695. return vreinterpretq_m128_s32(veorq_s32(vreinterpretq_s32_m128(a),
  696. vreinterpretq_s32_m128(b)));
  697. }
  698. // Computes the bitwise OR of the 128-bit value in a and the 128-bit value in b.
  699. //
  700. // r := a | b
  701. //
  702. // https://msdn.microsoft.com/en-us/library/vstudio/ew8ty0db(v=vs.100).aspx
  703. FORCE_INLINE __m128i _mm_or_si128(__m128i a, __m128i b)
  704. {
  705. return vreinterpretq_m128i_s32(vorrq_s32(vreinterpretq_s32_m128i(a),
  706. vreinterpretq_s32_m128i(b)));
  707. }
  708. // Computes the bitwise XOR of the 128-bit value in a and the 128-bit value in
  709. // b. https://msdn.microsoft.com/en-us/library/fzt08www(v=vs.100).aspx
  710. FORCE_INLINE __m128i _mm_xor_si128(__m128i a, __m128i b)
  711. {
  712. return vreinterpretq_m128i_s32(veorq_s32(vreinterpretq_s32_m128i(a),
  713. vreinterpretq_s32_m128i(b)));
  714. }
  715. // Moves the upper two values of B into the lower two values of A.
  716. //
  717. // r3 := a3
  718. // r2 := a2
  719. // r1 := b3
  720. // r0 := b2
  721. FORCE_INLINE __m128 _mm_movehl_ps(__m128 __A, __m128 __B)
  722. {
  723. float32x2_t a32 = vget_high_f32(vreinterpretq_f32_m128(__A));
  724. float32x2_t b32 = vget_high_f32(vreinterpretq_f32_m128(__B));
  725. return vreinterpretq_m128_f32(vcombine_f32(b32, a32));
  726. }
  727. // Moves the lower two values of B into the upper two values of A.
  728. //
  729. // r3 := b1
  730. // r2 := b0
  731. // r1 := a1
  732. // r0 := a0
  733. FORCE_INLINE __m128 _mm_movelh_ps(__m128 __A, __m128 __B)
  734. {
  735. float32x2_t a10 = vget_low_f32(vreinterpretq_f32_m128(__A));
  736. float32x2_t b10 = vget_low_f32(vreinterpretq_f32_m128(__B));
  737. return vreinterpretq_m128_f32(vcombine_f32(a10, b10));
  738. }
  739. FORCE_INLINE __m128i _mm_abs_epi32(__m128i a)
  740. {
  741. return vreinterpretq_m128i_s32(vabsq_s32(vreinterpretq_s32_m128i(a)));
  742. }
  743. FORCE_INLINE __m128i _mm_abs_epi16(__m128i a)
  744. {
  745. return vreinterpretq_m128i_s16(vabsq_s16(vreinterpretq_s16_m128i(a)));
  746. }
  747. FORCE_INLINE __m128i _mm_abs_epi8(__m128i a)
  748. {
  749. return vreinterpretq_m128i_s8(vabsq_s8(vreinterpretq_s8_m128i(a)));
  750. }
  751. // Takes the upper 64 bits of a and places it in the low end of the result
  752. // Takes the lower 64 bits of b and places it into the high end of the result.
  753. FORCE_INLINE __m128 _mm_shuffle_ps_1032(__m128 a, __m128 b)
  754. {
  755. float32x2_t a32 = vget_high_f32(vreinterpretq_f32_m128(a));
  756. float32x2_t b10 = vget_low_f32(vreinterpretq_f32_m128(b));
  757. return vreinterpretq_m128_f32(vcombine_f32(a32, b10));
  758. }
  759. // takes the lower two 32-bit values from a and swaps them and places in high
  760. // end of result takes the higher two 32 bit values from b and swaps them and
  761. // places in low end of result.
  762. FORCE_INLINE __m128 _mm_shuffle_ps_2301(__m128 a, __m128 b)
  763. {
  764. float32x2_t a01 = vrev64_f32(vget_low_f32(vreinterpretq_f32_m128(a)));
  765. float32x2_t b23 = vrev64_f32(vget_high_f32(vreinterpretq_f32_m128(b)));
  766. return vreinterpretq_m128_f32(vcombine_f32(a01, b23));
  767. }
  768. FORCE_INLINE __m128 _mm_shuffle_ps_0321(__m128 a, __m128 b)
  769. {
  770. float32x2_t a21 = vget_high_f32(vextq_f32(
  771. vreinterpretq_f32_m128(a), vreinterpretq_f32_m128(a), 3));
  772. float32x2_t b03 = vget_low_f32(vextq_f32(vreinterpretq_f32_m128(b),
  773. vreinterpretq_f32_m128(b), 3));
  774. return vreinterpretq_m128_f32(vcombine_f32(a21, b03));
  775. }
  776. FORCE_INLINE __m128 _mm_shuffle_ps_2103(__m128 a, __m128 b)
  777. {
  778. float32x2_t a03 = vget_low_f32(vextq_f32(vreinterpretq_f32_m128(a),
  779. vreinterpretq_f32_m128(a), 3));
  780. float32x2_t b21 = vget_high_f32(vextq_f32(
  781. vreinterpretq_f32_m128(b), vreinterpretq_f32_m128(b), 3));
  782. return vreinterpretq_m128_f32(vcombine_f32(a03, b21));
  783. }
  784. FORCE_INLINE __m128 _mm_shuffle_ps_1010(__m128 a, __m128 b)
  785. {
  786. float32x2_t a10 = vget_low_f32(vreinterpretq_f32_m128(a));
  787. float32x2_t b10 = vget_low_f32(vreinterpretq_f32_m128(b));
  788. return vreinterpretq_m128_f32(vcombine_f32(a10, b10));
  789. }
  790. FORCE_INLINE __m128 _mm_shuffle_ps_1001(__m128 a, __m128 b)
  791. {
  792. float32x2_t a01 = vrev64_f32(vget_low_f32(vreinterpretq_f32_m128(a)));
  793. float32x2_t b10 = vget_low_f32(vreinterpretq_f32_m128(b));
  794. return vreinterpretq_m128_f32(vcombine_f32(a01, b10));
  795. }
  796. FORCE_INLINE __m128 _mm_shuffle_ps_0101(__m128 a, __m128 b)
  797. {
  798. float32x2_t a01 = vrev64_f32(vget_low_f32(vreinterpretq_f32_m128(a)));
  799. float32x2_t b01 = vrev64_f32(vget_low_f32(vreinterpretq_f32_m128(b)));
  800. return vreinterpretq_m128_f32(vcombine_f32(a01, b01));
  801. }
  802. // keeps the low 64 bits of b in the low and puts the high 64 bits of a in the
  803. // high
  804. FORCE_INLINE __m128 _mm_shuffle_ps_3210(__m128 a, __m128 b)
  805. {
  806. float32x2_t a10 = vget_low_f32(vreinterpretq_f32_m128(a));
  807. float32x2_t b32 = vget_high_f32(vreinterpretq_f32_m128(b));
  808. return vreinterpretq_m128_f32(vcombine_f32(a10, b32));
  809. }
  810. FORCE_INLINE __m128 _mm_shuffle_ps_0011(__m128 a, __m128 b)
  811. {
  812. float32x2_t a11 =
  813. vdup_lane_f32(vget_low_f32(vreinterpretq_f32_m128(a)), 1);
  814. float32x2_t b00 =
  815. vdup_lane_f32(vget_low_f32(vreinterpretq_f32_m128(b)), 0);
  816. return vreinterpretq_m128_f32(vcombine_f32(a11, b00));
  817. }
  818. FORCE_INLINE __m128 _mm_shuffle_ps_0022(__m128 a, __m128 b)
  819. {
  820. float32x2_t a22 =
  821. vdup_lane_f32(vget_high_f32(vreinterpretq_f32_m128(a)), 0);
  822. float32x2_t b00 =
  823. vdup_lane_f32(vget_low_f32(vreinterpretq_f32_m128(b)), 0);
  824. return vreinterpretq_m128_f32(vcombine_f32(a22, b00));
  825. }
  826. FORCE_INLINE __m128 _mm_shuffle_ps_2200(__m128 a, __m128 b)
  827. {
  828. float32x2_t a00 =
  829. vdup_lane_f32(vget_low_f32(vreinterpretq_f32_m128(a)), 0);
  830. float32x2_t b22 =
  831. vdup_lane_f32(vget_high_f32(vreinterpretq_f32_m128(b)), 0);
  832. return vreinterpretq_m128_f32(vcombine_f32(a00, b22));
  833. }
  834. FORCE_INLINE __m128 _mm_shuffle_ps_3202(__m128 a, __m128 b)
  835. {
  836. float32_t a0 = vgetq_lane_f32(vreinterpretq_f32_m128(a), 0);
  837. float32x2_t a22 =
  838. vdup_lane_f32(vget_high_f32(vreinterpretq_f32_m128(a)), 0);
  839. float32x2_t a02 = vset_lane_f32(a0, a22, 1); /* TODO: use vzip ?*/
  840. float32x2_t b32 = vget_high_f32(vreinterpretq_f32_m128(b));
  841. return vreinterpretq_m128_f32(vcombine_f32(a02, b32));
  842. }
  843. FORCE_INLINE __m128 _mm_shuffle_ps_1133(__m128 a, __m128 b)
  844. {
  845. float32x2_t a33 =
  846. vdup_lane_f32(vget_high_f32(vreinterpretq_f32_m128(a)), 1);
  847. float32x2_t b11 =
  848. vdup_lane_f32(vget_low_f32(vreinterpretq_f32_m128(b)), 1);
  849. return vreinterpretq_m128_f32(vcombine_f32(a33, b11));
  850. }
  851. FORCE_INLINE __m128 _mm_shuffle_ps_2010(__m128 a, __m128 b)
  852. {
  853. float32x2_t a10 = vget_low_f32(vreinterpretq_f32_m128(a));
  854. float32_t b2 = vgetq_lane_f32(vreinterpretq_f32_m128(b), 2);
  855. float32x2_t b00 =
  856. vdup_lane_f32(vget_low_f32(vreinterpretq_f32_m128(b)), 0);
  857. float32x2_t b20 = vset_lane_f32(b2, b00, 1);
  858. return vreinterpretq_m128_f32(vcombine_f32(a10, b20));
  859. }
  860. FORCE_INLINE __m128 _mm_shuffle_ps_2001(__m128 a, __m128 b)
  861. {
  862. float32x2_t a01 = vrev64_f32(vget_low_f32(vreinterpretq_f32_m128(a)));
  863. float32_t b2 = vgetq_lane_f32(b, 2);
  864. float32x2_t b00 =
  865. vdup_lane_f32(vget_low_f32(vreinterpretq_f32_m128(b)), 0);
  866. float32x2_t b20 = vset_lane_f32(b2, b00, 1);
  867. return vreinterpretq_m128_f32(vcombine_f32(a01, b20));
  868. }
  869. FORCE_INLINE __m128 _mm_shuffle_ps_2032(__m128 a, __m128 b)
  870. {
  871. float32x2_t a32 = vget_high_f32(vreinterpretq_f32_m128(a));
  872. float32_t b2 = vgetq_lane_f32(b, 2);
  873. float32x2_t b00 =
  874. vdup_lane_f32(vget_low_f32(vreinterpretq_f32_m128(b)), 0);
  875. float32x2_t b20 = vset_lane_f32(b2, b00, 1);
  876. return vreinterpretq_m128_f32(vcombine_f32(a32, b20));
  877. }
  878. // NEON does not support a general purpose permute intrinsic
  879. // Selects four specific single-precision, floating-point values from a and b,
  880. // based on the mask i.
  881. // https://msdn.microsoft.com/en-us/library/vstudio/5f0858x0(v=vs.100).aspx
  882. #if 0 /* C version */
  883. FORCE_INLINE __m128 _mm_shuffle_ps_default(__m128 a,
  884. __m128 b,
  885. __constrange(0, 255) int imm)
  886. {
  887. __m128 ret;
  888. ret[0] = a[imm & 0x3];
  889. ret[1] = a[(imm >> 2) & 0x3];
  890. ret[2] = b[(imm >> 4) & 0x03];
  891. ret[3] = b[(imm >> 6) & 0x03];
  892. return ret;
  893. }
  894. #endif
  895. #define _mm_shuffle_ps_default(a, b, imm) \
  896. __extension__({ \
  897. float32x4_t ret; \
  898. ret = vmovq_n_f32(vgetq_lane_f32(vreinterpretq_f32_m128(a), \
  899. (imm) & (0x3))); \
  900. ret = vsetq_lane_f32(vgetq_lane_f32(vreinterpretq_f32_m128(a), \
  901. ((imm) >> 2) & 0x3), \
  902. ret, 1); \
  903. ret = vsetq_lane_f32(vgetq_lane_f32(vreinterpretq_f32_m128(b), \
  904. ((imm) >> 4) & 0x3), \
  905. ret, 2); \
  906. ret = vsetq_lane_f32(vgetq_lane_f32(vreinterpretq_f32_m128(b), \
  907. ((imm) >> 6) & 0x3), \
  908. ret, 3); \
  909. vreinterpretq_m128_f32(ret); \
  910. })
  911. // FORCE_INLINE __m128 _mm_shuffle_ps(__m128 a, __m128 b, __constrange(0,255)
  912. // int imm)
  913. #if __has_builtin(__builtin_shufflevector)
  914. #define _mm_shuffle_ps(a, b, imm) \
  915. __extension__({ \
  916. float32x4_t _input1 = vreinterpretq_f32_m128(a); \
  917. float32x4_t _input2 = vreinterpretq_f32_m128(b); \
  918. float32x4_t _shuf = __builtin_shufflevector( \
  919. _input1, _input2, (imm) & (0x3), ((imm) >> 2) & 0x3, \
  920. (((imm) >> 4) & 0x3) + 4, (((imm) >> 6) & 0x3) + 4); \
  921. vreinterpretq_m128_f32(_shuf); \
  922. })
  923. #else // generic
  924. #define _mm_shuffle_ps(a, b, imm) \
  925. __extension__({ \
  926. __m128 ret; \
  927. switch (imm) { \
  928. case _MM_SHUFFLE(1, 0, 3, 2): \
  929. ret = _mm_shuffle_ps_1032((a), (b)); \
  930. break; \
  931. case _MM_SHUFFLE(2, 3, 0, 1): \
  932. ret = _mm_shuffle_ps_2301((a), (b)); \
  933. break; \
  934. case _MM_SHUFFLE(0, 3, 2, 1): \
  935. ret = _mm_shuffle_ps_0321((a), (b)); \
  936. break; \
  937. case _MM_SHUFFLE(2, 1, 0, 3): \
  938. ret = _mm_shuffle_ps_2103((a), (b)); \
  939. break; \
  940. case _MM_SHUFFLE(1, 0, 1, 0): \
  941. ret = _mm_movelh_ps((a), (b)); \
  942. break; \
  943. case _MM_SHUFFLE(1, 0, 0, 1): \
  944. ret = _mm_shuffle_ps_1001((a), (b)); \
  945. break; \
  946. case _MM_SHUFFLE(0, 1, 0, 1): \
  947. ret = _mm_shuffle_ps_0101((a), (b)); \
  948. break; \
  949. case _MM_SHUFFLE(3, 2, 1, 0): \
  950. ret = _mm_shuffle_ps_3210((a), (b)); \
  951. break; \
  952. case _MM_SHUFFLE(0, 0, 1, 1): \
  953. ret = _mm_shuffle_ps_0011((a), (b)); \
  954. break; \
  955. case _MM_SHUFFLE(0, 0, 2, 2): \
  956. ret = _mm_shuffle_ps_0022((a), (b)); \
  957. break; \
  958. case _MM_SHUFFLE(2, 2, 0, 0): \
  959. ret = _mm_shuffle_ps_2200((a), (b)); \
  960. break; \
  961. case _MM_SHUFFLE(3, 2, 0, 2): \
  962. ret = _mm_shuffle_ps_3202((a), (b)); \
  963. break; \
  964. case _MM_SHUFFLE(3, 2, 3, 2): \
  965. ret = _mm_movehl_ps((b), (a)); \
  966. break; \
  967. case _MM_SHUFFLE(1, 1, 3, 3): \
  968. ret = _mm_shuffle_ps_1133((a), (b)); \
  969. break; \
  970. case _MM_SHUFFLE(2, 0, 1, 0): \
  971. ret = _mm_shuffle_ps_2010((a), (b)); \
  972. break; \
  973. case _MM_SHUFFLE(2, 0, 0, 1): \
  974. ret = _mm_shuffle_ps_2001((a), (b)); \
  975. break; \
  976. case _MM_SHUFFLE(2, 0, 3, 2): \
  977. ret = _mm_shuffle_ps_2032((a), (b)); \
  978. break; \
  979. default: \
  980. ret = _mm_shuffle_ps_default((a), (b), (imm)); \
  981. break; \
  982. } \
  983. ret; \
  984. })
  985. #endif
  986. // Takes the upper 64 bits of a and places it in the low end of the result
  987. // Takes the lower 64 bits of a and places it into the high end of the result.
  988. FORCE_INLINE __m128i _mm_shuffle_epi_1032(__m128i a)
  989. {
  990. int32x2_t a32 = vget_high_s32(vreinterpretq_s32_m128i(a));
  991. int32x2_t a10 = vget_low_s32(vreinterpretq_s32_m128i(a));
  992. return vreinterpretq_m128i_s32(vcombine_s32(a32, a10));
  993. }
  994. // takes the lower two 32-bit values from a and swaps them and places in low end
  995. // of result takes the higher two 32 bit values from a and swaps them and places
  996. // in high end of result.
  997. FORCE_INLINE __m128i _mm_shuffle_epi_2301(__m128i a)
  998. {
  999. int32x2_t a01 = vrev64_s32(vget_low_s32(vreinterpretq_s32_m128i(a)));
  1000. int32x2_t a23 = vrev64_s32(vget_high_s32(vreinterpretq_s32_m128i(a)));
  1001. return vreinterpretq_m128i_s32(vcombine_s32(a01, a23));
  1002. }
  1003. // rotates the least significant 32 bits into the most signficant 32 bits, and
  1004. // shifts the rest down
  1005. FORCE_INLINE __m128i _mm_shuffle_epi_0321(__m128i a)
  1006. {
  1007. return vreinterpretq_m128i_s32(vextq_s32(
  1008. vreinterpretq_s32_m128i(a), vreinterpretq_s32_m128i(a), 1));
  1009. }
  1010. // rotates the most significant 32 bits into the least signficant 32 bits, and
  1011. // shifts the rest up
  1012. FORCE_INLINE __m128i _mm_shuffle_epi_2103(__m128i a)
  1013. {
  1014. return vreinterpretq_m128i_s32(vextq_s32(
  1015. vreinterpretq_s32_m128i(a), vreinterpretq_s32_m128i(a), 3));
  1016. }
  1017. // gets the lower 64 bits of a, and places it in the upper 64 bits
  1018. // gets the lower 64 bits of a and places it in the lower 64 bits
  1019. FORCE_INLINE __m128i _mm_shuffle_epi_1010(__m128i a)
  1020. {
  1021. int32x2_t a10 = vget_low_s32(vreinterpretq_s32_m128i(a));
  1022. return vreinterpretq_m128i_s32(vcombine_s32(a10, a10));
  1023. }
  1024. // gets the lower 64 bits of a, swaps the 0 and 1 elements, and places it in the
  1025. // lower 64 bits gets the lower 64 bits of a, and places it in the upper 64 bits
  1026. FORCE_INLINE __m128i _mm_shuffle_epi_1001(__m128i a)
  1027. {
  1028. int32x2_t a01 = vrev64_s32(vget_low_s32(vreinterpretq_s32_m128i(a)));
  1029. int32x2_t a10 = vget_low_s32(vreinterpretq_s32_m128i(a));
  1030. return vreinterpretq_m128i_s32(vcombine_s32(a01, a10));
  1031. }
  1032. // gets the lower 64 bits of a, swaps the 0 and 1 elements and places it in the
  1033. // upper 64 bits gets the lower 64 bits of a, swaps the 0 and 1 elements, and
  1034. // places it in the lower 64 bits
  1035. FORCE_INLINE __m128i _mm_shuffle_epi_0101(__m128i a)
  1036. {
  1037. int32x2_t a01 = vrev64_s32(vget_low_s32(vreinterpretq_s32_m128i(a)));
  1038. return vreinterpretq_m128i_s32(vcombine_s32(a01, a01));
  1039. }
  1040. FORCE_INLINE __m128i _mm_shuffle_epi_2211(__m128i a)
  1041. {
  1042. int32x2_t a11 =
  1043. vdup_lane_s32(vget_low_s32(vreinterpretq_s32_m128i(a)), 1);
  1044. int32x2_t a22 =
  1045. vdup_lane_s32(vget_high_s32(vreinterpretq_s32_m128i(a)), 0);
  1046. return vreinterpretq_m128i_s32(vcombine_s32(a11, a22));
  1047. }
  1048. FORCE_INLINE __m128i _mm_shuffle_epi_0122(__m128i a)
  1049. {
  1050. int32x2_t a22 =
  1051. vdup_lane_s32(vget_high_s32(vreinterpretq_s32_m128i(a)), 0);
  1052. int32x2_t a01 = vrev64_s32(vget_low_s32(vreinterpretq_s32_m128i(a)));
  1053. return vreinterpretq_m128i_s32(vcombine_s32(a22, a01));
  1054. }
  1055. FORCE_INLINE __m128i _mm_shuffle_epi_3332(__m128i a)
  1056. {
  1057. int32x2_t a32 = vget_high_s32(vreinterpretq_s32_m128i(a));
  1058. int32x2_t a33 =
  1059. vdup_lane_s32(vget_high_s32(vreinterpretq_s32_m128i(a)), 1);
  1060. return vreinterpretq_m128i_s32(vcombine_s32(a32, a33));
  1061. }
  1062. // Shuffle packed 8-bit integers in a according to shuffle control mask in the
  1063. // corresponding 8-bit element of b, and store the results in dst.
  1064. // https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_shuffle_epi8&expand=5146
  1065. FORCE_INLINE __m128i _mm_shuffle_epi8(__m128i a, __m128i b)
  1066. {
  1067. int8x16_t tbl = vreinterpretq_s8_m128i(a); // input a
  1068. uint8x16_t idx = vreinterpretq_u8_m128i(b); // input b
  1069. uint8x16_t idx_masked =
  1070. vandq_u8(idx, vdupq_n_u8(0x8F)); // avoid using meaningless bits
  1071. #if defined(__aarch64__)
  1072. return vreinterpretq_m128i_s8(vqtbl1q_s8(tbl, idx_masked));
  1073. #elif defined(__GNUC__)
  1074. int8x16_t ret;
  1075. // %e and %f represent the even and odd D registers
  1076. // respectively.
  1077. __asm__ __volatile__("vtbl.8 %e[ret], {%e[tbl], %f[tbl]}, %e[idx]\n"
  1078. "vtbl.8 %f[ret], {%e[tbl], %f[tbl]}, %f[idx]\n"
  1079. : [ret] "=&w"(ret)
  1080. : [tbl] "w"(tbl), [idx] "w"(idx_masked));
  1081. return vreinterpretq_m128i_s8(ret);
  1082. #else
  1083. // use this line if testing on aarch64
  1084. int8x8x2_t a_split = {vget_low_s8(tbl), vget_high_s8(tbl)};
  1085. return vreinterpretq_m128i_s8(
  1086. vcombine_s8(vtbl2_s8(a_split, vget_low_u8(idx_masked)),
  1087. vtbl2_s8(a_split, vget_high_u8(idx_masked))));
  1088. #endif
  1089. }
  1090. #if 0 /* C version */
  1091. FORCE_INLINE __m128i _mm_shuffle_epi32_default(__m128i a,
  1092. __constrange(0, 255) int imm)
  1093. {
  1094. __m128i ret;
  1095. ret[0] = a[imm & 0x3];
  1096. ret[1] = a[(imm >> 2) & 0x3];
  1097. ret[2] = a[(imm >> 4) & 0x03];
  1098. ret[3] = a[(imm >> 6) & 0x03];
  1099. return ret;
  1100. }
  1101. #endif
  1102. #define _mm_shuffle_epi32_default(a, imm) \
  1103. __extension__({ \
  1104. int32x4_t ret; \
  1105. ret = vmovq_n_s32(vgetq_lane_s32(vreinterpretq_s32_m128i(a), \
  1106. (imm) & (0x3))); \
  1107. ret = vsetq_lane_s32( \
  1108. vgetq_lane_s32(vreinterpretq_s32_m128i(a), \
  1109. ((imm) >> 2) & 0x3), \
  1110. ret, 1); \
  1111. ret = vsetq_lane_s32( \
  1112. vgetq_lane_s32(vreinterpretq_s32_m128i(a), \
  1113. ((imm) >> 4) & 0x3), \
  1114. ret, 2); \
  1115. ret = vsetq_lane_s32( \
  1116. vgetq_lane_s32(vreinterpretq_s32_m128i(a), \
  1117. ((imm) >> 6) & 0x3), \
  1118. ret, 3); \
  1119. vreinterpretq_m128i_s32(ret); \
  1120. })
  1121. // FORCE_INLINE __m128i _mm_shuffle_epi32_splat(__m128i a, __constrange(0,255)
  1122. // int imm)
  1123. #if defined(__aarch64__)
  1124. #define _mm_shuffle_epi32_splat(a, imm) \
  1125. __extension__({ \
  1126. vreinterpretq_m128i_s32( \
  1127. vdupq_laneq_s32(vreinterpretq_s32_m128i(a), (imm))); \
  1128. })
  1129. #else
  1130. #define _mm_shuffle_epi32_splat(a, imm) \
  1131. __extension__({ \
  1132. vreinterpretq_m128i_s32(vdupq_n_s32( \
  1133. vgetq_lane_s32(vreinterpretq_s32_m128i(a), (imm)))); \
  1134. })
  1135. #endif
  1136. // Shuffles the 4 signed or unsigned 32-bit integers in a as specified by imm.
  1137. // https://msdn.microsoft.com/en-us/library/56f67xbk%28v=vs.90%29.aspx
  1138. // FORCE_INLINE __m128i _mm_shuffle_epi32(__m128i a,
  1139. // __constrange(0,255) int imm)
  1140. #if __has_builtin(__builtin_shufflevector)
  1141. #define _mm_shuffle_epi32(a, imm) \
  1142. __extension__({ \
  1143. int32x4_t _input = vreinterpretq_s32_m128i(a); \
  1144. int32x4_t _shuf = __builtin_shufflevector( \
  1145. _input, _input, (imm) & (0x3), ((imm) >> 2) & 0x3, \
  1146. ((imm) >> 4) & 0x3, ((imm) >> 6) & 0x3); \
  1147. vreinterpretq_m128i_s32(_shuf); \
  1148. })
  1149. #else // generic
  1150. #define _mm_shuffle_epi32(a, imm) \
  1151. __extension__({ \
  1152. __m128i ret; \
  1153. switch (imm) { \
  1154. case _MM_SHUFFLE(1, 0, 3, 2): \
  1155. ret = _mm_shuffle_epi_1032((a)); \
  1156. break; \
  1157. case _MM_SHUFFLE(2, 3, 0, 1): \
  1158. ret = _mm_shuffle_epi_2301((a)); \
  1159. break; \
  1160. case _MM_SHUFFLE(0, 3, 2, 1): \
  1161. ret = _mm_shuffle_epi_0321((a)); \
  1162. break; \
  1163. case _MM_SHUFFLE(2, 1, 0, 3): \
  1164. ret = _mm_shuffle_epi_2103((a)); \
  1165. break; \
  1166. case _MM_SHUFFLE(1, 0, 1, 0): \
  1167. ret = _mm_shuffle_epi_1010((a)); \
  1168. break; \
  1169. case _MM_SHUFFLE(1, 0, 0, 1): \
  1170. ret = _mm_shuffle_epi_1001((a)); \
  1171. break; \
  1172. case _MM_SHUFFLE(0, 1, 0, 1): \
  1173. ret = _mm_shuffle_epi_0101((a)); \
  1174. break; \
  1175. case _MM_SHUFFLE(2, 2, 1, 1): \
  1176. ret = _mm_shuffle_epi_2211((a)); \
  1177. break; \
  1178. case _MM_SHUFFLE(0, 1, 2, 2): \
  1179. ret = _mm_shuffle_epi_0122((a)); \
  1180. break; \
  1181. case _MM_SHUFFLE(3, 3, 3, 2): \
  1182. ret = _mm_shuffle_epi_3332((a)); \
  1183. break; \
  1184. case _MM_SHUFFLE(0, 0, 0, 0): \
  1185. ret = _mm_shuffle_epi32_splat((a), 0); \
  1186. break; \
  1187. case _MM_SHUFFLE(1, 1, 1, 1): \
  1188. ret = _mm_shuffle_epi32_splat((a), 1); \
  1189. break; \
  1190. case _MM_SHUFFLE(2, 2, 2, 2): \
  1191. ret = _mm_shuffle_epi32_splat((a), 2); \
  1192. break; \
  1193. case _MM_SHUFFLE(3, 3, 3, 3): \
  1194. ret = _mm_shuffle_epi32_splat((a), 3); \
  1195. break; \
  1196. default: \
  1197. ret = _mm_shuffle_epi32_default((a), (imm)); \
  1198. break; \
  1199. } \
  1200. ret; \
  1201. })
  1202. #endif
  1203. // Shuffles the lower 4 signed or unsigned 16-bit integers in a as specified
  1204. // by imm.
  1205. // https://docs.microsoft.com/en-us/previous-versions/visualstudio/visual-studio-2010/y41dkk37(v=vs.100)
  1206. // FORCE_INLINE __m128i _mm_shufflelo_epi16_function(__m128i a,
  1207. // __constrange(0,255) int
  1208. // imm)
  1209. #define _mm_shufflelo_epi16_function(a, imm) \
  1210. __extension__({ \
  1211. int16x8_t ret = vreinterpretq_s16_m128i(a); \
  1212. int16x4_t lowBits = vget_low_s16(ret); \
  1213. ret = vsetq_lane_s16(vget_lane_s16(lowBits, (imm) & (0x3)), \
  1214. ret, 0); \
  1215. ret = vsetq_lane_s16( \
  1216. vget_lane_s16(lowBits, ((imm) >> 2) & 0x3), ret, 1); \
  1217. ret = vsetq_lane_s16( \
  1218. vget_lane_s16(lowBits, ((imm) >> 4) & 0x3), ret, 2); \
  1219. ret = vsetq_lane_s16( \
  1220. vget_lane_s16(lowBits, ((imm) >> 6) & 0x3), ret, 3); \
  1221. vreinterpretq_m128i_s16(ret); \
  1222. })
  1223. // FORCE_INLINE __m128i _mm_shufflelo_epi16(__m128i a,
  1224. // __constrange(0,255) int imm)
  1225. #if __has_builtin(__builtin_shufflevector)
  1226. #define _mm_shufflelo_epi16(a, imm) \
  1227. __extension__({ \
  1228. int16x8_t _input = vreinterpretq_s16_m128i(a); \
  1229. int16x8_t _shuf = __builtin_shufflevector( \
  1230. _input, _input, ((imm) & (0x3)), (((imm) >> 2) & 0x3), \
  1231. (((imm) >> 4) & 0x3), (((imm) >> 6) & 0x3), 4, 5, 6, \
  1232. 7); \
  1233. vreinterpretq_m128i_s16(_shuf); \
  1234. })
  1235. #else // generic
  1236. #define _mm_shufflelo_epi16(a, imm) _mm_shufflelo_epi16_function((a), (imm))
  1237. #endif
  1238. // Shuffles the upper 4 signed or unsigned 16-bit integers in a as specified
  1239. // by imm.
  1240. // https://msdn.microsoft.com/en-us/library/13ywktbs(v=vs.100).aspx
  1241. // FORCE_INLINE __m128i _mm_shufflehi_epi16_function(__m128i a,
  1242. // __constrange(0,255) int
  1243. // imm)
  1244. #define _mm_shufflehi_epi16_function(a, imm) \
  1245. __extension__({ \
  1246. int16x8_t ret = vreinterpretq_s16_m128i(a); \
  1247. int16x4_t highBits = vget_high_s16(ret); \
  1248. ret = vsetq_lane_s16(vget_lane_s16(highBits, (imm) & (0x3)), \
  1249. ret, 4); \
  1250. ret = vsetq_lane_s16( \
  1251. vget_lane_s16(highBits, ((imm) >> 2) & 0x3), ret, 5); \
  1252. ret = vsetq_lane_s16( \
  1253. vget_lane_s16(highBits, ((imm) >> 4) & 0x3), ret, 6); \
  1254. ret = vsetq_lane_s16( \
  1255. vget_lane_s16(highBits, ((imm) >> 6) & 0x3), ret, 7); \
  1256. vreinterpretq_m128i_s16(ret); \
  1257. })
  1258. // FORCE_INLINE __m128i _mm_shufflehi_epi16(__m128i a,
  1259. // __constrange(0,255) int imm)
  1260. #if __has_builtin(__builtin_shufflevector)
  1261. #define _mm_shufflehi_epi16(a, imm) \
  1262. __extension__({ \
  1263. int16x8_t _input = vreinterpretq_s16_m128i(a); \
  1264. int16x8_t _shuf = __builtin_shufflevector( \
  1265. _input, _input, 0, 1, 2, 3, ((imm) & (0x3)) + 4, \
  1266. (((imm) >> 2) & 0x3) + 4, (((imm) >> 4) & 0x3) + 4, \
  1267. (((imm) >> 6) & 0x3) + 4); \
  1268. vreinterpretq_m128i_s16(_shuf); \
  1269. })
  1270. #else // generic
  1271. #define _mm_shufflehi_epi16(a, imm) _mm_shufflehi_epi16_function((a), (imm))
  1272. #endif
  1273. // Blend packed 16-bit integers from a and b using control mask imm8, and store
  1274. // the results in dst.
  1275. //
  1276. // FOR j := 0 to 7
  1277. // i := j*16
  1278. // IF imm8[j]
  1279. // dst[i+15:i] := b[i+15:i]
  1280. // ELSE
  1281. // dst[i+15:i] := a[i+15:i]
  1282. // FI
  1283. // ENDFOR
  1284. // FORCE_INLINE __m128i _mm_blend_epi16(__m128i a, __m128i b,
  1285. // __constrange(0,255) int imm)
  1286. #define _mm_blend_epi16(a, b, imm) \
  1287. __extension__({ \
  1288. const uint16_t _mask[8] = { \
  1289. ((imm) & (1 << 0)) ? 0xFFFF : 0x0000, \
  1290. ((imm) & (1 << 1)) ? 0xFFFF : 0x0000, \
  1291. ((imm) & (1 << 2)) ? 0xFFFF : 0x0000, \
  1292. ((imm) & (1 << 3)) ? 0xFFFF : 0x0000, \
  1293. ((imm) & (1 << 4)) ? 0xFFFF : 0x0000, \
  1294. ((imm) & (1 << 5)) ? 0xFFFF : 0x0000, \
  1295. ((imm) & (1 << 6)) ? 0xFFFF : 0x0000, \
  1296. ((imm) & (1 << 7)) ? 0xFFFF : 0x0000}; \
  1297. uint16x8_t _mask_vec = vld1q_u16(_mask); \
  1298. uint16x8_t _a = vreinterpretq_u16_m128i(a); \
  1299. uint16x8_t _b = vreinterpretq_u16_m128i(b); \
  1300. vreinterpretq_m128i_u16(vbslq_u16(_mask_vec, _b, _a)); \
  1301. })
  1302. // Blend packed 8-bit integers from a and b using mask, and store the results in
  1303. // dst.
  1304. //
  1305. // FOR j := 0 to 15
  1306. // i := j*8
  1307. // IF mask[i+7]
  1308. // dst[i+7:i] := b[i+7:i]
  1309. // ELSE
  1310. // dst[i+7:i] := a[i+7:i]
  1311. // FI
  1312. // ENDFOR
  1313. FORCE_INLINE __m128i _mm_blendv_epi8(__m128i _a, __m128i _b, __m128i _mask)
  1314. {
  1315. // Use a signed shift right to create a mask with the sign bit
  1316. uint8x16_t mask = vreinterpretq_u8_s8(
  1317. vshrq_n_s8(vreinterpretq_s8_m128i(_mask), 7));
  1318. uint8x16_t a = vreinterpretq_u8_m128i(_a);
  1319. uint8x16_t b = vreinterpretq_u8_m128i(_b);
  1320. return vreinterpretq_m128i_u8(vbslq_u8(mask, b, a));
  1321. }
  1322. /* Shifts */
  1323. // Shifts the 4 signed 32-bit integers in a right by count bits while shifting
  1324. // in the sign bit.
  1325. //
  1326. // r0 := a0 >> count
  1327. // r1 := a1 >> count
  1328. // r2 := a2 >> count
  1329. // r3 := a3 >> count immediate
  1330. FORCE_INLINE __m128i _mm_srai_epi32(__m128i a, int count)
  1331. {
  1332. return (__m128i)vshlq_s32((int32x4_t)a, vdupq_n_s32(-count));
  1333. }
  1334. // Shifts the 8 signed 16-bit integers in a right by count bits while shifting
  1335. // in the sign bit.
  1336. //
  1337. // r0 := a0 >> count
  1338. // r1 := a1 >> count
  1339. // ...
  1340. // r7 := a7 >> count
  1341. FORCE_INLINE __m128i _mm_srai_epi16(__m128i a, int count)
  1342. {
  1343. return (__m128i)vshlq_s16((int16x8_t)a, vdupq_n_s16(-count));
  1344. }
  1345. // Shifts the 8 signed or unsigned 16-bit integers in a left by count bits while
  1346. // shifting in zeros.
  1347. //
  1348. // r0 := a0 << count
  1349. // r1 := a1 << count
  1350. // ...
  1351. // r7 := a7 << count
  1352. //
  1353. // https://msdn.microsoft.com/en-us/library/es73bcsy(v=vs.90).aspx
  1354. #define _mm_slli_epi16(a, imm) \
  1355. __extension__({ \
  1356. __m128i ret; \
  1357. if ((imm) <= 0) { \
  1358. ret = a; \
  1359. } else if ((imm) > 31) { \
  1360. ret = _mm_setzero_si128(); \
  1361. } else { \
  1362. ret = vreinterpretq_m128i_s16(vshlq_n_s16( \
  1363. vreinterpretq_s16_m128i(a), (imm))); \
  1364. } \
  1365. ret; \
  1366. })
  1367. // Shifts the 4 signed or unsigned 32-bit integers in a left by count bits while
  1368. // shifting in zeros. :
  1369. // https://msdn.microsoft.com/en-us/library/z2k3bbtb%28v=vs.90%29.aspx
  1370. // FORCE_INLINE __m128i _mm_slli_epi32(__m128i a, __constrange(0,255) int imm)
  1371. #define _mm_slli_epi32(a, imm) \
  1372. __extension__({ \
  1373. __m128i ret; \
  1374. if ((imm) <= 0) { \
  1375. ret = a; \
  1376. } else if ((imm) > 31) { \
  1377. ret = _mm_setzero_si128(); \
  1378. } else { \
  1379. ret = vreinterpretq_m128i_s32(vshlq_n_s32( \
  1380. vreinterpretq_s32_m128i(a), (imm))); \
  1381. } \
  1382. ret; \
  1383. })
  1384. // Shift packed 64-bit integers in a left by imm8 while shifting in zeros, and
  1385. // store the results in dst.
  1386. #define _mm_slli_epi64(a, imm) \
  1387. __extension__({ \
  1388. __m128i ret; \
  1389. if ((imm) <= 0) { \
  1390. ret = a; \
  1391. } else if ((imm) > 63) { \
  1392. ret = _mm_setzero_si128(); \
  1393. } else { \
  1394. ret = vreinterpretq_m128i_s64(vshlq_n_s64( \
  1395. vreinterpretq_s64_m128i(a), (imm))); \
  1396. } \
  1397. ret; \
  1398. })
  1399. // Shifts the 8 signed or unsigned 16-bit integers in a right by count bits
  1400. // while shifting in zeros.
  1401. //
  1402. // r0 := srl(a0, count)
  1403. // r1 := srl(a1, count)
  1404. // ...
  1405. // r7 := srl(a7, count)
  1406. //
  1407. // https://msdn.microsoft.com/en-us/library/6tcwd38t(v=vs.90).aspx
  1408. #define _mm_srli_epi16(a, imm) \
  1409. __extension__({ \
  1410. __m128i ret; \
  1411. if ((imm) <= 0) { \
  1412. ret = a; \
  1413. } else if ((imm) > 31) { \
  1414. ret = _mm_setzero_si128(); \
  1415. } else { \
  1416. ret = vreinterpretq_m128i_u16(vshrq_n_u16( \
  1417. vreinterpretq_u16_m128i(a), (imm))); \
  1418. } \
  1419. ret; \
  1420. })
  1421. // Shifts the 4 signed or unsigned 32-bit integers in a right by count bits
  1422. // while shifting in zeros.
  1423. // https://msdn.microsoft.com/en-us/library/w486zcfa(v=vs.100).aspx FORCE_INLINE
  1424. // __m128i _mm_srli_epi32(__m128i a, __constrange(0,255) int imm)
  1425. #define _mm_srli_epi32(a, imm) \
  1426. __extension__({ \
  1427. __m128i ret; \
  1428. if ((imm) <= 0) { \
  1429. ret = a; \
  1430. } else if ((imm) > 31) { \
  1431. ret = _mm_setzero_si128(); \
  1432. } else { \
  1433. ret = vreinterpretq_m128i_u32(vshrq_n_u32( \
  1434. vreinterpretq_u32_m128i(a), (imm))); \
  1435. } \
  1436. ret; \
  1437. })
  1438. // Shift packed 64-bit integers in a right by imm8 while shifting in zeros, and
  1439. // store the results in dst.
  1440. #define _mm_srli_epi64(a, imm) \
  1441. __extension__({ \
  1442. __m128i ret; \
  1443. if ((imm) <= 0) { \
  1444. ret = a; \
  1445. } else if ((imm) > 63) { \
  1446. ret = _mm_setzero_si128(); \
  1447. } else { \
  1448. ret = vreinterpretq_m128i_u64(vshrq_n_u64( \
  1449. vreinterpretq_u64_m128i(a), (imm))); \
  1450. } \
  1451. ret; \
  1452. })
  1453. // Shifts the 4 signed 32 - bit integers in a right by count bits while shifting
  1454. // in the sign bit.
  1455. // https://msdn.microsoft.com/en-us/library/z1939387(v=vs.100).aspx
  1456. // FORCE_INLINE __m128i _mm_srai_epi32(__m128i a, __constrange(0,255) int imm)
  1457. #define _mm_srai_epi32(a, imm) \
  1458. __extension__({ \
  1459. __m128i ret; \
  1460. if ((imm) <= 0) { \
  1461. ret = a; \
  1462. } else if ((imm) > 31) { \
  1463. ret = vreinterpretq_m128i_s32( \
  1464. vshrq_n_s32(vreinterpretq_s32_m128i(a), 16)); \
  1465. ret = vreinterpretq_m128i_s32(vshrq_n_s32( \
  1466. vreinterpretq_s32_m128i(ret), 16)); \
  1467. } else { \
  1468. ret = vreinterpretq_m128i_s32(vshrq_n_s32( \
  1469. vreinterpretq_s32_m128i(a), (imm))); \
  1470. } \
  1471. ret; \
  1472. })
  1473. // Shifts the 128 - bit value in a right by imm bytes while shifting in
  1474. // zeros.imm must be an immediate.
  1475. //
  1476. // r := srl(a, imm*8)
  1477. //
  1478. // https://msdn.microsoft.com/en-us/library/305w28yz(v=vs.100).aspx
  1479. // FORCE_INLINE _mm_srli_si128(__m128i a, __constrange(0,255) int imm)
  1480. #define _mm_srli_si128(a, imm) \
  1481. __extension__({ \
  1482. __m128i ret; \
  1483. if ((imm) <= 0) { \
  1484. ret = a; \
  1485. } else if ((imm) > 15) { \
  1486. ret = _mm_setzero_si128(); \
  1487. } else { \
  1488. ret = vreinterpretq_m128i_s8( \
  1489. vextq_s8(vreinterpretq_s8_m128i(a), \
  1490. vdupq_n_s8(0), (imm))); \
  1491. } \
  1492. ret; \
  1493. })
  1494. // Shifts the 128-bit value in a left by imm bytes while shifting in zeros. imm
  1495. // must be an immediate.
  1496. //
  1497. // r := a << (imm * 8)
  1498. //
  1499. // https://msdn.microsoft.com/en-us/library/34d3k2kt(v=vs.100).aspx
  1500. // FORCE_INLINE __m128i _mm_slli_si128(__m128i a, __constrange(0,255) int imm)
  1501. #define _mm_slli_si128(a, imm) \
  1502. __extension__({ \
  1503. __m128i ret; \
  1504. if ((imm) <= 0) { \
  1505. ret = a; \
  1506. } else if ((imm) > 15) { \
  1507. ret = _mm_setzero_si128(); \
  1508. } else { \
  1509. ret = vreinterpretq_m128i_s8(vextq_s8( \
  1510. vdupq_n_s8(0), vreinterpretq_s8_m128i(a), \
  1511. 16 - (imm))); \
  1512. } \
  1513. ret; \
  1514. })
  1515. // Shifts the 8 signed or unsigned 16-bit integers in a left by count bits while
  1516. // shifting in zeros.
  1517. //
  1518. // r0 := a0 << count
  1519. // r1 := a1 << count
  1520. // ...
  1521. // r7 := a7 << count
  1522. //
  1523. // https://msdn.microsoft.com/en-us/library/c79w388h(v%3dvs.90).aspx
  1524. FORCE_INLINE __m128i _mm_sll_epi16(__m128i a, __m128i count)
  1525. {
  1526. uint64_t c = vreinterpretq_nth_u64_m128i(count, 0);
  1527. if (c > 15)
  1528. return _mm_setzero_si128();
  1529. int16x8_t vc = vdupq_n_s16((int16_t)c);
  1530. return vreinterpretq_m128i_s16(
  1531. vshlq_s16(vreinterpretq_s16_m128i(a), vc));
  1532. }
  1533. // Shifts the 4 signed or unsigned 32-bit integers in a left by count bits while
  1534. // shifting in zeros.
  1535. //
  1536. // r0 := a0 << count
  1537. // r1 := a1 << count
  1538. // r2 := a2 << count
  1539. // r3 := a3 << count
  1540. //
  1541. // https://msdn.microsoft.com/en-us/library/6fe5a6s9(v%3dvs.90).aspx
  1542. FORCE_INLINE __m128i _mm_sll_epi32(__m128i a, __m128i count)
  1543. {
  1544. uint64_t c = vreinterpretq_nth_u64_m128i(count, 0);
  1545. if (c > 31)
  1546. return _mm_setzero_si128();
  1547. int32x4_t vc = vdupq_n_s32((int32_t)c);
  1548. return vreinterpretq_m128i_s32(
  1549. vshlq_s32(vreinterpretq_s32_m128i(a), vc));
  1550. }
  1551. // Shifts the 2 signed or unsigned 64-bit integers in a left by count bits while
  1552. // shifting in zeros.
  1553. //
  1554. // r0 := a0 << count
  1555. // r1 := a1 << count
  1556. //
  1557. // https://msdn.microsoft.com/en-us/library/6ta9dffd(v%3dvs.90).aspx
  1558. FORCE_INLINE __m128i _mm_sll_epi64(__m128i a, __m128i count)
  1559. {
  1560. uint64_t c = vreinterpretq_nth_u64_m128i(count, 0);
  1561. if (c > 63)
  1562. return _mm_setzero_si128();
  1563. int64x2_t vc = vdupq_n_s64((int64_t)c);
  1564. return vreinterpretq_m128i_s64(
  1565. vshlq_s64(vreinterpretq_s64_m128i(a), vc));
  1566. }
  1567. // Shifts the 8 signed or unsigned 16-bit integers in a right by count bits
  1568. // while shifting in zeros.
  1569. //
  1570. // r0 := srl(a0, count)
  1571. // r1 := srl(a1, count)
  1572. // ...
  1573. // r7 := srl(a7, count)
  1574. //
  1575. // https://msdn.microsoft.com/en-us/library/wd5ax830(v%3dvs.90).aspx
  1576. FORCE_INLINE __m128i _mm_srl_epi16(__m128i a, __m128i count)
  1577. {
  1578. uint64_t c = vreinterpretq_nth_u64_m128i(count, 0);
  1579. if (c > 15)
  1580. return _mm_setzero_si128();
  1581. int16x8_t vc = vdupq_n_s16(-(int16_t)c);
  1582. return vreinterpretq_m128i_u16(
  1583. vshlq_u16(vreinterpretq_u16_m128i(a), vc));
  1584. }
  1585. // Shifts the 4 signed or unsigned 32-bit integers in a right by count bits
  1586. // while shifting in zeros.
  1587. //
  1588. // r0 := srl(a0, count)
  1589. // r1 := srl(a1, count)
  1590. // r2 := srl(a2, count)
  1591. // r3 := srl(a3, count)
  1592. //
  1593. // https://msdn.microsoft.com/en-us/library/a9cbttf4(v%3dvs.90).aspx
  1594. FORCE_INLINE __m128i _mm_srl_epi32(__m128i a, __m128i count)
  1595. {
  1596. uint64_t c = vreinterpretq_nth_u64_m128i(count, 0);
  1597. if (c > 31)
  1598. return _mm_setzero_si128();
  1599. int32x4_t vc = vdupq_n_s32(-(int32_t)c);
  1600. return vreinterpretq_m128i_u32(
  1601. vshlq_u32(vreinterpretq_u32_m128i(a), vc));
  1602. }
  1603. // Shifts the 2 signed or unsigned 64-bit integers in a right by count bits
  1604. // while shifting in zeros.
  1605. //
  1606. // r0 := srl(a0, count)
  1607. // r1 := srl(a1, count)
  1608. //
  1609. // https://msdn.microsoft.com/en-us/library/yf6cf9k8(v%3dvs.90).aspx
  1610. FORCE_INLINE __m128i _mm_srl_epi64(__m128i a, __m128i count)
  1611. {
  1612. uint64_t c = vreinterpretq_nth_u64_m128i(count, 0);
  1613. if (c > 63)
  1614. return _mm_setzero_si128();
  1615. int64x2_t vc = vdupq_n_s64(-(int64_t)c);
  1616. return vreinterpretq_m128i_u64(
  1617. vshlq_u64(vreinterpretq_u64_m128i(a), vc));
  1618. }
  1619. // NEON does not provide a version of this function.
  1620. // Creates a 16-bit mask from the most significant bits of the 16 signed or
  1621. // unsigned 8-bit integers in a and zero extends the upper bits.
  1622. // https://msdn.microsoft.com/en-us/library/vstudio/s090c8fk(v=vs.100).aspx
  1623. FORCE_INLINE int _mm_movemask_epi8(__m128i a)
  1624. {
  1625. #if defined(__aarch64__)
  1626. uint8x16_t input = vreinterpretq_u8_m128i(a);
  1627. const int8_t ALIGN_STRUCT(16) xr[16] = {-7, -6, -5, -4, -3, -2, -1, 0,
  1628. -7, -6, -5, -4, -3, -2, -1, 0};
  1629. const uint8x16_t mask_and = vdupq_n_u8(0x80);
  1630. const int8x16_t mask_shift = vld1q_s8(xr);
  1631. const uint8x16_t mask_result =
  1632. vshlq_u8(vandq_u8(input, mask_and), mask_shift);
  1633. uint8x8_t lo = vget_low_u8(mask_result);
  1634. uint8x8_t hi = vget_high_u8(mask_result);
  1635. return vaddv_u8(lo) + (vaddv_u8(hi) << 8);
  1636. #else
  1637. // Use increasingly wide shifts+adds to collect the sign bits
  1638. // together.
  1639. // Since the widening shifts would be rather confusing to follow in little
  1640. // endian, everything will be illustrated in big endian order instead. This
  1641. // has a different result - the bits would actually be reversed on a big
  1642. // endian machine.
  1643. // Starting input (only half the elements are shown):
  1644. // 89 ff 1d c0 00 10 99 33
  1645. uint8x16_t input = vreinterpretq_u8_m128i(a);
  1646. // Shift out everything but the sign bits with an unsigned shift right.
  1647. //
  1648. // Bytes of the vector::
  1649. // 89 ff 1d c0 00 10 99 33
  1650. // \ \ \ \ \ \ \ \ high_bits = (uint16x4_t)(input >> 7)
  1651. // | | | | | | | |
  1652. // 01 01 00 01 00 00 01 00
  1653. //
  1654. // Bits of first important lane(s):
  1655. // 10001001 (89)
  1656. // \______
  1657. // |
  1658. // 00000001 (01)
  1659. uint16x8_t high_bits = vreinterpretq_u16_u8(vshrq_n_u8(input, 7));
  1660. // Merge the even lanes together with a 16-bit unsigned shift right + add.
  1661. // 'xx' represents garbage data which will be ignored in the final result.
  1662. // In the important bytes, the add functions like a binary OR.
  1663. //
  1664. // 01 01 00 01 00 00 01 00
  1665. // \_ | \_ | \_ | \_ | paired16 = (uint32x4_t)(input + (input >> 7))
  1666. // \| \| \| \|
  1667. // xx 03 xx 01 xx 00 xx 02
  1668. //
  1669. // 00000001 00000001 (01 01)
  1670. // \_______ |
  1671. // \|
  1672. // xxxxxxxx xxxxxx11 (xx 03)
  1673. uint32x4_t paired16 =
  1674. vreinterpretq_u32_u16(vsraq_n_u16(high_bits, high_bits, 7));
  1675. // Repeat with a wider 32-bit shift + add.
  1676. // xx 03 xx 01 xx 00 xx 02
  1677. // \____ | \____ | paired32 = (uint64x1_t)(paired16 + (paired16 >>
  1678. // 14))
  1679. // \| \|
  1680. // xx xx xx 0d xx xx xx 02
  1681. //
  1682. // 00000011 00000001 (03 01)
  1683. // \\_____ ||
  1684. // '----.\||
  1685. // xxxxxxxx xxxx1101 (xx 0d)
  1686. uint64x2_t paired32 =
  1687. vreinterpretq_u64_u32(vsraq_n_u32(paired16, paired16, 14));
  1688. // Last, an even wider 64-bit shift + add to get our result in the low 8 bit
  1689. // lanes. xx xx xx 0d xx xx xx 02
  1690. // \_________ | paired64 = (uint8x8_t)(paired32 + (paired32 >>
  1691. // 28))
  1692. // \|
  1693. // xx xx xx xx xx xx xx d2
  1694. //
  1695. // 00001101 00000010 (0d 02)
  1696. // \ \___ | |
  1697. // '---. \| |
  1698. // xxxxxxxx 11010010 (xx d2)
  1699. uint8x16_t paired64 =
  1700. vreinterpretq_u8_u64(vsraq_n_u64(paired32, paired32, 28));
  1701. // Extract the low 8 bits from each 64-bit lane with 2 8-bit extracts.
  1702. // xx xx xx xx xx xx xx d2
  1703. // || return paired64[0]
  1704. // d2
  1705. // Note: Little endian would return the correct value 4b (01001011) instead.
  1706. return vgetq_lane_u8(paired64, 0) |
  1707. ((int)vgetq_lane_u8(paired64, 8) << 8);
  1708. #endif
  1709. }
  1710. // NEON does not provide this method
  1711. // Creates a 4-bit mask from the most significant bits of the four
  1712. // single-precision, floating-point values.
  1713. // https://msdn.microsoft.com/en-us/library/vstudio/4490ys29(v=vs.100).aspx
  1714. FORCE_INLINE int _mm_movemask_ps(__m128 a)
  1715. {
  1716. uint32x4_t input = vreinterpretq_u32_m128(a);
  1717. #if defined(__aarch64__)
  1718. static const int32x4_t shift = {-31, -30, -29, -28};
  1719. static const uint32x4_t highbit = {0x80000000, 0x80000000, 0x80000000,
  1720. 0x80000000};
  1721. return vaddvq_u32(vshlq_u32(vandq_u32(input, highbit), shift));
  1722. #else
  1723. // Uses the exact same method as _mm_movemask_epi8, see that for details.
  1724. // Shift out everything but the sign bits with a 32-bit unsigned shift
  1725. // right.
  1726. uint64x2_t high_bits = vreinterpretq_u64_u32(vshrq_n_u32(input, 31));
  1727. // Merge the two pairs together with a 64-bit unsigned shift right + add.
  1728. uint8x16_t paired =
  1729. vreinterpretq_u8_u64(vsraq_n_u64(high_bits, high_bits, 31));
  1730. // Extract the result.
  1731. return vgetq_lane_u8(paired, 0) | (vgetq_lane_u8(paired, 8) << 2);
  1732. #endif
  1733. }
  1734. // Compute the bitwise AND of 128 bits (representing integer data) in a and
  1735. // mask, and return 1 if the result is zero, otherwise return 0.
  1736. // https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_test_all_zeros&expand=5871
  1737. FORCE_INLINE int _mm_test_all_zeros(__m128i a, __m128i mask)
  1738. {
  1739. int64x2_t a_and_mask = vandq_s64(vreinterpretq_s64_m128i(a),
  1740. vreinterpretq_s64_m128i(mask));
  1741. return (vgetq_lane_s64(a_and_mask, 0) | vgetq_lane_s64(a_and_mask, 1))
  1742. ? 0
  1743. : 1;
  1744. }
  1745. /* Math operations */
  1746. // Subtracts the four single-precision, floating-point values of a and b.
  1747. //
  1748. // r0 := a0 - b0
  1749. // r1 := a1 - b1
  1750. // r2 := a2 - b2
  1751. // r3 := a3 - b3
  1752. //
  1753. // https://msdn.microsoft.com/en-us/library/vstudio/1zad2k61(v=vs.100).aspx
  1754. FORCE_INLINE __m128 _mm_sub_ps(__m128 a, __m128 b)
  1755. {
  1756. return vreinterpretq_m128_f32(vsubq_f32(vreinterpretq_f32_m128(a),
  1757. vreinterpretq_f32_m128(b)));
  1758. }
  1759. // Subtract 2 packed 64-bit integers in b from 2 packed 64-bit integers in a,
  1760. // and store the results in dst.
  1761. // r0 := a0 - b0
  1762. // r1 := a1 - b1
  1763. FORCE_INLINE __m128i _mm_sub_epi64(__m128i a, __m128i b)
  1764. {
  1765. return vreinterpretq_m128i_s64(vsubq_s64(vreinterpretq_s64_m128i(a),
  1766. vreinterpretq_s64_m128i(b)));
  1767. }
  1768. // Subtracts the 4 signed or unsigned 32-bit integers of b from the 4 signed or
  1769. // unsigned 32-bit integers of a.
  1770. //
  1771. // r0 := a0 - b0
  1772. // r1 := a1 - b1
  1773. // r2 := a2 - b2
  1774. // r3 := a3 - b3
  1775. //
  1776. // https://msdn.microsoft.com/en-us/library/vstudio/fhh866h0(v=vs.100).aspx
  1777. FORCE_INLINE __m128i _mm_sub_epi32(__m128i a, __m128i b)
  1778. {
  1779. return vreinterpretq_m128i_s32(vsubq_s32(vreinterpretq_s32_m128i(a),
  1780. vreinterpretq_s32_m128i(b)));
  1781. }
  1782. FORCE_INLINE __m128i _mm_sub_epi16(__m128i a, __m128i b)
  1783. {
  1784. return vreinterpretq_m128i_s16(vsubq_s16(vreinterpretq_s16_m128i(a),
  1785. vreinterpretq_s16_m128i(b)));
  1786. }
  1787. FORCE_INLINE __m128i _mm_sub_epi8(__m128i a, __m128i b)
  1788. {
  1789. return vreinterpretq_m128i_s8(
  1790. vsubq_s8(vreinterpretq_s8_m128i(a), vreinterpretq_s8_m128i(b)));
  1791. }
  1792. // Subtracts the 8 unsigned 16-bit integers of bfrom the 8 unsigned 16-bit
  1793. // integers of a and saturates..
  1794. // https://technet.microsoft.com/en-us/subscriptions/index/f44y0s19(v=vs.90).aspx
  1795. FORCE_INLINE __m128i _mm_subs_epu16(__m128i a, __m128i b)
  1796. {
  1797. return vreinterpretq_m128i_u16(vqsubq_u16(vreinterpretq_u16_m128i(a),
  1798. vreinterpretq_u16_m128i(b)));
  1799. }
  1800. // Subtracts the 16 unsigned 8-bit integers of b from the 16 unsigned 8-bit
  1801. // integers of a and saturates.
  1802. //
  1803. // r0 := UnsignedSaturate(a0 - b0)
  1804. // r1 := UnsignedSaturate(a1 - b1)
  1805. // ...
  1806. // r15 := UnsignedSaturate(a15 - b15)
  1807. //
  1808. // https://technet.microsoft.com/en-us/subscriptions/yadkxc18(v=vs.90)
  1809. FORCE_INLINE __m128i _mm_subs_epu8(__m128i a, __m128i b)
  1810. {
  1811. return vreinterpretq_m128i_u8(vqsubq_u8(vreinterpretq_u8_m128i(a),
  1812. vreinterpretq_u8_m128i(b)));
  1813. }
  1814. // Subtracts the 16 signed 8-bit integers of b from the 16 signed 8-bit integers
  1815. // of a and saturates.
  1816. //
  1817. // r0 := SignedSaturate(a0 - b0)
  1818. // r1 := SignedSaturate(a1 - b1)
  1819. // ...
  1820. // r15 := SignedSaturate(a15 - b15)
  1821. //
  1822. // https://technet.microsoft.com/en-us/subscriptions/by7kzks1(v=vs.90)
  1823. FORCE_INLINE __m128i _mm_subs_epi8(__m128i a, __m128i b)
  1824. {
  1825. return vreinterpretq_m128i_s8(vqsubq_s8(vreinterpretq_s8_m128i(a),
  1826. vreinterpretq_s8_m128i(b)));
  1827. }
  1828. // Subtracts the 8 signed 16-bit integers of b from the 8 signed 16-bit integers
  1829. // of a and saturates.
  1830. //
  1831. // r0 := SignedSaturate(a0 - b0)
  1832. // r1 := SignedSaturate(a1 - b1)
  1833. // ...
  1834. // r7 := SignedSaturate(a7 - b7)
  1835. //
  1836. // https://technet.microsoft.com/en-us/subscriptions/3247z5b8(v=vs.90)
  1837. FORCE_INLINE __m128i _mm_subs_epi16(__m128i a, __m128i b)
  1838. {
  1839. return vreinterpretq_m128i_s16(vqsubq_s16(vreinterpretq_s16_m128i(a),
  1840. vreinterpretq_s16_m128i(b)));
  1841. }
  1842. FORCE_INLINE __m128i _mm_adds_epu16(__m128i a, __m128i b)
  1843. {
  1844. return vreinterpretq_m128i_u16(vqaddq_u16(vreinterpretq_u16_m128i(a),
  1845. vreinterpretq_u16_m128i(b)));
  1846. }
  1847. // Negate packed 8-bit integers in a when the corresponding signed
  1848. // 8-bit integer in b is negative, and store the results in dst.
  1849. // Element in dst are zeroed out when the corresponding element
  1850. // in b is zero.
  1851. //
  1852. // for i in 0..15
  1853. // if b[i] < 0
  1854. // r[i] := -a[i]
  1855. // else if b[i] == 0
  1856. // r[i] := 0
  1857. // else
  1858. // r[i] := a[i]
  1859. // fi
  1860. // done
  1861. FORCE_INLINE __m128i _mm_sign_epi8(__m128i _a, __m128i _b)
  1862. {
  1863. int8x16_t a = vreinterpretq_s8_m128i(_a);
  1864. int8x16_t b = vreinterpretq_s8_m128i(_b);
  1865. int8x16_t zero = vdupq_n_s8(0);
  1866. // signed shift right: faster than vclt
  1867. // (b < 0) ? 0xFF : 0
  1868. uint8x16_t ltMask = vreinterpretq_u8_s8(vshrq_n_s8(b, 7));
  1869. // (b == 0) ? 0xFF : 0
  1870. int8x16_t zeroMask = vreinterpretq_s8_u8(vceqq_s8(b, zero));
  1871. // -a
  1872. int8x16_t neg = vnegq_s8(a);
  1873. // bitwise select either a or neg based on ltMask
  1874. int8x16_t masked = vbslq_s8(ltMask, a, neg);
  1875. // res = masked & (~zeroMask)
  1876. int8x16_t res = vbicq_s8(masked, zeroMask);
  1877. return vreinterpretq_m128i_s8(res);
  1878. }
  1879. // Negate packed 16-bit integers in a when the corresponding signed
  1880. // 16-bit integer in b is negative, and store the results in dst.
  1881. // Element in dst are zeroed out when the corresponding element
  1882. // in b is zero.
  1883. //
  1884. // for i in 0..7
  1885. // if b[i] < 0
  1886. // r[i] := -a[i]
  1887. // else if b[i] == 0
  1888. // r[i] := 0
  1889. // else
  1890. // r[i] := a[i]
  1891. // fi
  1892. // done
  1893. FORCE_INLINE __m128i _mm_sign_epi16(__m128i _a, __m128i _b)
  1894. {
  1895. int16x8_t a = vreinterpretq_s16_m128i(_a);
  1896. int16x8_t b = vreinterpretq_s16_m128i(_b);
  1897. int16x8_t zero = vdupq_n_s16(0);
  1898. // signed shift right: faster than vclt
  1899. // (b < 0) ? 0xFFFF : 0
  1900. uint16x8_t ltMask = vreinterpretq_u16_s16(vshrq_n_s16(b, 15));
  1901. // (b == 0) ? 0xFFFF : 0
  1902. int16x8_t zeroMask = vreinterpretq_s16_u16(vceqq_s16(b, zero));
  1903. // -a
  1904. int16x8_t neg = vnegq_s16(a);
  1905. // bitwise select either a or neg based on ltMask
  1906. int16x8_t masked = vbslq_s16(ltMask, a, neg);
  1907. // res = masked & (~zeroMask)
  1908. int16x8_t res = vbicq_s16(masked, zeroMask);
  1909. return vreinterpretq_m128i_s16(res);
  1910. }
  1911. // Negate packed 32-bit integers in a when the corresponding signed
  1912. // 32-bit integer in b is negative, and store the results in dst.
  1913. // Element in dst are zeroed out when the corresponding element
  1914. // in b is zero.
  1915. //
  1916. // for i in 0..3
  1917. // if b[i] < 0
  1918. // r[i] := -a[i]
  1919. // else if b[i] == 0
  1920. // r[i] := 0
  1921. // else
  1922. // r[i] := a[i]
  1923. // fi
  1924. // done
  1925. FORCE_INLINE __m128i _mm_sign_epi32(__m128i _a, __m128i _b)
  1926. {
  1927. int32x4_t a = vreinterpretq_s32_m128i(_a);
  1928. int32x4_t b = vreinterpretq_s32_m128i(_b);
  1929. int32x4_t zero = vdupq_n_s32(0);
  1930. // signed shift right: faster than vclt
  1931. // (b < 0) ? 0xFFFFFFFF : 0
  1932. uint32x4_t ltMask = vreinterpretq_u32_s32(vshrq_n_s32(b, 31));
  1933. // (b == 0) ? 0xFFFFFFFF : 0
  1934. int32x4_t zeroMask = vreinterpretq_s32_u32(vceqq_s32(b, zero));
  1935. // neg = -a
  1936. int32x4_t neg = vnegq_s32(a);
  1937. // bitwise select either a or neg based on ltMask
  1938. int32x4_t masked = vbslq_s32(ltMask, a, neg);
  1939. // res = masked & (~zeroMask)
  1940. int32x4_t res = vbicq_s32(masked, zeroMask);
  1941. return vreinterpretq_m128i_s32(res);
  1942. }
  1943. // Computes the average of the 16 unsigned 8-bit integers in a and the 16
  1944. // unsigned 8-bit integers in b and rounds.
  1945. //
  1946. // r0 := (a0 + b0) / 2
  1947. // r1 := (a1 + b1) / 2
  1948. // ...
  1949. // r15 := (a15 + b15) / 2
  1950. //
  1951. // https://msdn.microsoft.com/en-us/library/vstudio/8zwh554a(v%3dvs.90).aspx
  1952. FORCE_INLINE __m128i _mm_avg_epu8(__m128i a, __m128i b)
  1953. {
  1954. return vreinterpretq_m128i_u8(vrhaddq_u8(vreinterpretq_u8_m128i(a),
  1955. vreinterpretq_u8_m128i(b)));
  1956. }
  1957. // Computes the average of the 8 unsigned 16-bit integers in a and the 8
  1958. // unsigned 16-bit integers in b and rounds.
  1959. //
  1960. // r0 := (a0 + b0) / 2
  1961. // r1 := (a1 + b1) / 2
  1962. // ...
  1963. // r7 := (a7 + b7) / 2
  1964. //
  1965. // https://msdn.microsoft.com/en-us/library/vstudio/y13ca3c8(v=vs.90).aspx
  1966. FORCE_INLINE __m128i _mm_avg_epu16(__m128i a, __m128i b)
  1967. {
  1968. return (__m128i)vrhaddq_u16(vreinterpretq_u16_m128i(a),
  1969. vreinterpretq_u16_m128i(b));
  1970. }
  1971. // Adds the four single-precision, floating-point values of a and b.
  1972. //
  1973. // r0 := a0 + b0
  1974. // r1 := a1 + b1
  1975. // r2 := a2 + b2
  1976. // r3 := a3 + b3
  1977. //
  1978. // https://msdn.microsoft.com/en-us/library/vstudio/c9848chc(v=vs.100).aspx
  1979. FORCE_INLINE __m128 _mm_add_ps(__m128 a, __m128 b)
  1980. {
  1981. return vreinterpretq_m128_f32(vaddq_f32(vreinterpretq_f32_m128(a),
  1982. vreinterpretq_f32_m128(b)));
  1983. }
  1984. // adds the scalar single-precision floating point values of a and b.
  1985. // https://msdn.microsoft.com/en-us/library/be94x2y6(v=vs.100).aspx
  1986. FORCE_INLINE __m128 _mm_add_ss(__m128 a, __m128 b)
  1987. {
  1988. float32_t b0 = vgetq_lane_f32(vreinterpretq_f32_m128(b), 0);
  1989. float32x4_t value = vsetq_lane_f32(b0, vdupq_n_f32(0), 0);
  1990. // the upper values in the result must be the remnants of <a>.
  1991. return vreinterpretq_m128_f32(vaddq_f32(a, value));
  1992. }
  1993. // Adds the 4 signed or unsigned 64-bit integers in a to the 4 signed or
  1994. // unsigned 32-bit integers in b.
  1995. // https://msdn.microsoft.com/en-us/library/vstudio/09xs4fkk(v=vs.100).aspx
  1996. FORCE_INLINE __m128i _mm_add_epi64(__m128i a, __m128i b)
  1997. {
  1998. return vreinterpretq_m128i_s64(vaddq_s64(vreinterpretq_s64_m128i(a),
  1999. vreinterpretq_s64_m128i(b)));
  2000. }
  2001. // Adds the 4 signed or unsigned 32-bit integers in a to the 4 signed or
  2002. // unsigned 32-bit integers in b.
  2003. //
  2004. // r0 := a0 + b0
  2005. // r1 := a1 + b1
  2006. // r2 := a2 + b2
  2007. // r3 := a3 + b3
  2008. //
  2009. // https://msdn.microsoft.com/en-us/library/vstudio/09xs4fkk(v=vs.100).aspx
  2010. FORCE_INLINE __m128i _mm_add_epi32(__m128i a, __m128i b)
  2011. {
  2012. return vreinterpretq_m128i_s32(vaddq_s32(vreinterpretq_s32_m128i(a),
  2013. vreinterpretq_s32_m128i(b)));
  2014. }
  2015. // Adds the 8 signed or unsigned 16-bit integers in a to the 8 signed or
  2016. // unsigned 16-bit integers in b.
  2017. // https://msdn.microsoft.com/en-us/library/fceha5k4(v=vs.100).aspx
  2018. FORCE_INLINE __m128i _mm_add_epi16(__m128i a, __m128i b)
  2019. {
  2020. return vreinterpretq_m128i_s16(vaddq_s16(vreinterpretq_s16_m128i(a),
  2021. vreinterpretq_s16_m128i(b)));
  2022. }
  2023. // Adds the 16 signed or unsigned 8-bit integers in a to the 16 signed or
  2024. // unsigned 8-bit integers in b.
  2025. // https://technet.microsoft.com/en-us/subscriptions/yc7tcyzs(v=vs.90)
  2026. FORCE_INLINE __m128i _mm_add_epi8(__m128i a, __m128i b)
  2027. {
  2028. return vreinterpretq_m128i_s8(
  2029. vaddq_s8(vreinterpretq_s8_m128i(a), vreinterpretq_s8_m128i(b)));
  2030. }
  2031. // Adds the 8 signed 16-bit integers in a to the 8 signed 16-bit integers in b
  2032. // and saturates.
  2033. //
  2034. // r0 := SignedSaturate(a0 + b0)
  2035. // r1 := SignedSaturate(a1 + b1)
  2036. // ...
  2037. // r7 := SignedSaturate(a7 + b7)
  2038. //
  2039. // https://msdn.microsoft.com/en-us/library/1a306ef8(v=vs.100).aspx
  2040. FORCE_INLINE __m128i _mm_adds_epi16(__m128i a, __m128i b)
  2041. {
  2042. return vreinterpretq_m128i_s16(vqaddq_s16(vreinterpretq_s16_m128i(a),
  2043. vreinterpretq_s16_m128i(b)));
  2044. }
  2045. // Adds the 16 unsigned 8-bit integers in a to the 16 unsigned 8-bit integers in
  2046. // b and saturates..
  2047. // https://msdn.microsoft.com/en-us/library/9hahyddy(v=vs.100).aspx
  2048. FORCE_INLINE __m128i _mm_adds_epu8(__m128i a, __m128i b)
  2049. {
  2050. return vreinterpretq_m128i_u8(vqaddq_u8(vreinterpretq_u8_m128i(a),
  2051. vreinterpretq_u8_m128i(b)));
  2052. }
  2053. // Multiplies the 8 signed or unsigned 16-bit integers from a by the 8 signed or
  2054. // unsigned 16-bit integers from b.
  2055. //
  2056. // r0 := (a0 * b0)[15:0]
  2057. // r1 := (a1 * b1)[15:0]
  2058. // ...
  2059. // r7 := (a7 * b7)[15:0]
  2060. //
  2061. // https://msdn.microsoft.com/en-us/library/vstudio/9ks1472s(v=vs.100).aspx
  2062. FORCE_INLINE __m128i _mm_mullo_epi16(__m128i a, __m128i b)
  2063. {
  2064. return vreinterpretq_m128i_s16(vmulq_s16(vreinterpretq_s16_m128i(a),
  2065. vreinterpretq_s16_m128i(b)));
  2066. }
  2067. // Multiplies the 4 signed or unsigned 32-bit integers from a by the 4 signed or
  2068. // unsigned 32-bit integers from b.
  2069. // https://msdn.microsoft.com/en-us/library/vstudio/bb531409(v=vs.100).aspx
  2070. FORCE_INLINE __m128i _mm_mullo_epi32(__m128i a, __m128i b)
  2071. {
  2072. return vreinterpretq_m128i_s32(vmulq_s32(vreinterpretq_s32_m128i(a),
  2073. vreinterpretq_s32_m128i(b)));
  2074. }
  2075. // Multiplies the four single-precision, floating-point values of a and b.
  2076. //
  2077. // r0 := a0 * b0
  2078. // r1 := a1 * b1
  2079. // r2 := a2 * b2
  2080. // r3 := a3 * b3
  2081. //
  2082. // https://msdn.microsoft.com/en-us/library/vstudio/22kbk6t9(v=vs.100).aspx
  2083. FORCE_INLINE __m128 _mm_mul_ps(__m128 a, __m128 b)
  2084. {
  2085. return vreinterpretq_m128_f32(vmulq_f32(vreinterpretq_f32_m128(a),
  2086. vreinterpretq_f32_m128(b)));
  2087. }
  2088. // Multiply the low unsigned 32-bit integers from each packed 64-bit element in
  2089. // a and b, and store the unsigned 64-bit results in dst.
  2090. //
  2091. // r0 := (a0 & 0xFFFFFFFF) * (b0 & 0xFFFFFFFF)
  2092. // r1 := (a2 & 0xFFFFFFFF) * (b2 & 0xFFFFFFFF)
  2093. FORCE_INLINE __m128i _mm_mul_epu32(__m128i a, __m128i b)
  2094. {
  2095. // vmull_u32 upcasts instead of masking, so we downcast.
  2096. uint32x2_t a_lo = vmovn_u64(vreinterpretq_u64_m128i(a));
  2097. uint32x2_t b_lo = vmovn_u64(vreinterpretq_u64_m128i(b));
  2098. return vreinterpretq_m128i_u64(vmull_u32(a_lo, b_lo));
  2099. }
  2100. // Multiply the low signed 32-bit integers from each packed 64-bit element in
  2101. // a and b, and store the signed 64-bit results in dst.
  2102. //
  2103. // r0 := (int64_t)(int32_t)a0 * (int64_t)(int32_t)b0
  2104. // r1 := (int64_t)(int32_t)a2 * (int64_t)(int32_t)b2
  2105. FORCE_INLINE __m128i _mm_mul_epi32(__m128i a, __m128i b)
  2106. {
  2107. // vmull_s32 upcasts instead of masking, so we downcast.
  2108. int32x2_t a_lo = vmovn_s64(vreinterpretq_s64_m128i(a));
  2109. int32x2_t b_lo = vmovn_s64(vreinterpretq_s64_m128i(b));
  2110. return vreinterpretq_m128i_s64(vmull_s32(a_lo, b_lo));
  2111. }
  2112. // Multiplies the 8 signed 16-bit integers from a by the 8 signed 16-bit
  2113. // integers from b.
  2114. //
  2115. // r0 := (a0 * b0) + (a1 * b1)
  2116. // r1 := (a2 * b2) + (a3 * b3)
  2117. // r2 := (a4 * b4) + (a5 * b5)
  2118. // r3 := (a6 * b6) + (a7 * b7)
  2119. // https://msdn.microsoft.com/en-us/library/yht36sa6(v=vs.90).aspx
  2120. FORCE_INLINE __m128i _mm_madd_epi16(__m128i a, __m128i b)
  2121. {
  2122. int32x4_t low = vmull_s16(vget_low_s16(vreinterpretq_s16_m128i(a)),
  2123. vget_low_s16(vreinterpretq_s16_m128i(b)));
  2124. int32x4_t high = vmull_s16(vget_high_s16(vreinterpretq_s16_m128i(a)),
  2125. vget_high_s16(vreinterpretq_s16_m128i(b)));
  2126. int32x2_t low_sum = vpadd_s32(vget_low_s32(low), vget_high_s32(low));
  2127. int32x2_t high_sum = vpadd_s32(vget_low_s32(high), vget_high_s32(high));
  2128. return vreinterpretq_m128i_s32(vcombine_s32(low_sum, high_sum));
  2129. }
  2130. // Multiply packed signed 16-bit integers in a and b, producing intermediate
  2131. // signed 32-bit integers. Shift right by 15 bits while rounding up, and store
  2132. // the packed 16-bit integers in dst.
  2133. //
  2134. // r0 := Round(((int32_t)a0 * (int32_t)b0) >> 15)
  2135. // r1 := Round(((int32_t)a1 * (int32_t)b1) >> 15)
  2136. // r2 := Round(((int32_t)a2 * (int32_t)b2) >> 15)
  2137. // ...
  2138. // r7 := Round(((int32_t)a7 * (int32_t)b7) >> 15)
  2139. FORCE_INLINE __m128i _mm_mulhrs_epi16(__m128i a, __m128i b)
  2140. {
  2141. // Has issues due to saturation
  2142. // return vreinterpretq_m128i_s16(vqrdmulhq_s16(a, b));
  2143. // Multiply
  2144. int32x4_t mul_lo = vmull_s16(vget_low_s16(vreinterpretq_s16_m128i(a)),
  2145. vget_low_s16(vreinterpretq_s16_m128i(b)));
  2146. int32x4_t mul_hi = vmull_s16(vget_high_s16(vreinterpretq_s16_m128i(a)),
  2147. vget_high_s16(vreinterpretq_s16_m128i(b)));
  2148. // Rounding narrowing shift right
  2149. // narrow = (int16_t)((mul + 16384) >> 15);
  2150. int16x4_t narrow_lo = vrshrn_n_s32(mul_lo, 15);
  2151. int16x4_t narrow_hi = vrshrn_n_s32(mul_hi, 15);
  2152. // Join together
  2153. return vreinterpretq_m128i_s16(vcombine_s16(narrow_lo, narrow_hi));
  2154. }
  2155. // Vertically multiply each unsigned 8-bit integer from a with the corresponding
  2156. // signed 8-bit integer from b, producing intermediate signed 16-bit integers.
  2157. // Horizontally add adjacent pairs of intermediate signed 16-bit integers,
  2158. // and pack the saturated results in dst.
  2159. //
  2160. // FOR j := 0 to 7
  2161. // i := j*16
  2162. // dst[i+15:i] := Saturate_To_Int16( a[i+15:i+8]*b[i+15:i+8] +
  2163. // a[i+7:i]*b[i+7:i] )
  2164. // ENDFOR
  2165. FORCE_INLINE __m128i _mm_maddubs_epi16(__m128i _a, __m128i _b)
  2166. {
  2167. // This would be much simpler if x86 would choose to zero extend OR sign
  2168. // extend, not both. This could probably be optimized better.
  2169. uint16x8_t a = vreinterpretq_u16_m128i(_a);
  2170. int16x8_t b = vreinterpretq_s16_m128i(_b);
  2171. // Zero extend a
  2172. int16x8_t a_odd = vreinterpretq_s16_u16(vshrq_n_u16(a, 8));
  2173. int16x8_t a_even =
  2174. vreinterpretq_s16_u16(vbicq_u16(a, vdupq_n_u16(0xff00)));
  2175. // Sign extend by shifting left then shifting right.
  2176. int16x8_t b_even = vshrq_n_s16(vshlq_n_s16(b, 8), 8);
  2177. int16x8_t b_odd = vshrq_n_s16(b, 8);
  2178. // multiply
  2179. int16x8_t prod1 = vmulq_s16(a_even, b_even);
  2180. int16x8_t prod2 = vmulq_s16(a_odd, b_odd);
  2181. // saturated add
  2182. return vreinterpretq_m128i_s16(vqaddq_s16(prod1, prod2));
  2183. }
  2184. // Computes the absolute difference of the 16 unsigned 8-bit integers from a
  2185. // and the 16 unsigned 8-bit integers from b.
  2186. //
  2187. // Return Value
  2188. // Sums the upper 8 differences and lower 8 differences and packs the
  2189. // resulting 2 unsigned 16-bit integers into the upper and lower 64-bit
  2190. // elements.
  2191. //
  2192. // r0 := abs(a0 - b0) + abs(a1 - b1) +...+ abs(a7 - b7)
  2193. // r1 := 0x0
  2194. // r2 := 0x0
  2195. // r3 := 0x0
  2196. // r4 := abs(a8 - b8) + abs(a9 - b9) +...+ abs(a15 - b15)
  2197. // r5 := 0x0
  2198. // r6 := 0x0
  2199. // r7 := 0x0
  2200. FORCE_INLINE __m128i _mm_sad_epu8(__m128i a, __m128i b)
  2201. {
  2202. uint16x8_t t = vpaddlq_u8(vabdq_u8((uint8x16_t)a, (uint8x16_t)b));
  2203. uint16_t r0 = t[0] + t[1] + t[2] + t[3];
  2204. uint16_t r4 = t[4] + t[5] + t[6] + t[7];
  2205. uint16x8_t r = vsetq_lane_u16(r0, vdupq_n_u16(0), 0);
  2206. return (__m128i)vsetq_lane_u16(r4, r, 4);
  2207. }
  2208. // Divides the four single-precision, floating-point values of a and b.
  2209. //
  2210. // r0 := a0 / b0
  2211. // r1 := a1 / b1
  2212. // r2 := a2 / b2
  2213. // r3 := a3 / b3
  2214. //
  2215. // https://msdn.microsoft.com/en-us/library/edaw8147(v=vs.100).aspx
  2216. FORCE_INLINE __m128 _mm_div_ps(__m128 a, __m128 b)
  2217. {
  2218. float32x4_t recip0 = vrecpeq_f32(vreinterpretq_f32_m128(b));
  2219. float32x4_t recip1 = vmulq_f32(
  2220. recip0, vrecpsq_f32(recip0, vreinterpretq_f32_m128(b)));
  2221. return vreinterpretq_m128_f32(
  2222. vmulq_f32(vreinterpretq_f32_m128(a), recip1));
  2223. }
  2224. // Divides the scalar single-precision floating point value of a by b.
  2225. // https://msdn.microsoft.com/en-us/library/4y73xa49(v=vs.100).aspx
  2226. FORCE_INLINE __m128 _mm_div_ss(__m128 a, __m128 b)
  2227. {
  2228. float32_t value =
  2229. vgetq_lane_f32(vreinterpretq_f32_m128(_mm_div_ps(a, b)), 0);
  2230. return vreinterpretq_m128_f32(
  2231. vsetq_lane_f32(value, vreinterpretq_f32_m128(a), 0));
  2232. }
  2233. // Computes the approximations of reciprocals of the four single-precision,
  2234. // floating-point values of a.
  2235. // https://msdn.microsoft.com/en-us/library/vstudio/796k1tty(v=vs.100).aspx
  2236. FORCE_INLINE __m128 _mm_rcp_ps(__m128 in)
  2237. {
  2238. float32x4_t recip = vrecpeq_f32(vreinterpretq_f32_m128(in));
  2239. recip = vmulq_f32(recip,
  2240. vrecpsq_f32(recip, vreinterpretq_f32_m128(in)));
  2241. return vreinterpretq_m128_f32(recip);
  2242. }
  2243. // Computes the approximations of square roots of the four single-precision,
  2244. // floating-point values of a. First computes reciprocal square roots and then
  2245. // reciprocals of the four values.
  2246. //
  2247. // r0 := sqrt(a0)
  2248. // r1 := sqrt(a1)
  2249. // r2 := sqrt(a2)
  2250. // r3 := sqrt(a3)
  2251. //
  2252. // https://msdn.microsoft.com/en-us/library/vstudio/8z67bwwk(v=vs.100).aspx
  2253. FORCE_INLINE __m128 _mm_sqrt_ps(__m128 in)
  2254. {
  2255. #if defined(__aarch64__)
  2256. return vreinterpretq_m128_f32(vsqrtq_f32(vreinterpretq_f32_m128(in)));
  2257. #else
  2258. float32x4_t recipsq = vrsqrteq_f32(vreinterpretq_f32_m128(in));
  2259. float32x4_t sq = vrecpeq_f32(recipsq);
  2260. // ??? use step versions of both sqrt and recip for better accuracy?
  2261. return vreinterpretq_m128_f32(sq);
  2262. #endif
  2263. }
  2264. // Computes the approximation of the square root of the scalar single-precision
  2265. // floating point value of in.
  2266. // https://msdn.microsoft.com/en-us/library/ahfsc22d(v=vs.100).aspx
  2267. FORCE_INLINE __m128 _mm_sqrt_ss(__m128 in)
  2268. {
  2269. float32_t value =
  2270. vgetq_lane_f32(vreinterpretq_f32_m128(_mm_sqrt_ps(in)), 0);
  2271. return vreinterpretq_m128_f32(
  2272. vsetq_lane_f32(value, vreinterpretq_f32_m128(in), 0));
  2273. }
  2274. // Computes the approximations of the reciprocal square roots of the four
  2275. // single-precision floating point values of in.
  2276. // https://msdn.microsoft.com/en-us/library/22hfsh53(v=vs.100).aspx
  2277. FORCE_INLINE __m128 _mm_rsqrt_ps(__m128 in)
  2278. {
  2279. return vreinterpretq_m128_f32(vrsqrteq_f32(vreinterpretq_f32_m128(in)));
  2280. }
  2281. // Compute the approximate reciprocal square root of the lower single-precision
  2282. // (32-bit) floating-point element in a, store the result in the lower element
  2283. // of dst, and copy the upper 3 packed elements from a to the upper elements of
  2284. // dst.
  2285. // https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_rsqrt_ss
  2286. FORCE_INLINE __m128 _mm_rsqrt_ss(__m128 in)
  2287. {
  2288. return vsetq_lane_f32(vgetq_lane_f32(_mm_rsqrt_ps(in), 0), in, 0);
  2289. }
  2290. // Computes the maximums of the four single-precision, floating-point values of
  2291. // a and b.
  2292. // https://msdn.microsoft.com/en-us/library/vstudio/ff5d607a(v=vs.100).aspx
  2293. FORCE_INLINE __m128 _mm_max_ps(__m128 a, __m128 b)
  2294. {
  2295. return vreinterpretq_m128_f32(vmaxq_f32(vreinterpretq_f32_m128(a),
  2296. vreinterpretq_f32_m128(b)));
  2297. }
  2298. // Computes the minima of the four single-precision, floating-point values of a
  2299. // and b.
  2300. // https://msdn.microsoft.com/en-us/library/vstudio/wh13kadz(v=vs.100).aspx
  2301. FORCE_INLINE __m128 _mm_min_ps(__m128 a, __m128 b)
  2302. {
  2303. return vreinterpretq_m128_f32(vminq_f32(vreinterpretq_f32_m128(a),
  2304. vreinterpretq_f32_m128(b)));
  2305. }
  2306. // Computes the maximum of the two lower scalar single-precision floating point
  2307. // values of a and b.
  2308. // https://msdn.microsoft.com/en-us/library/s6db5esz(v=vs.100).aspx
  2309. FORCE_INLINE __m128 _mm_max_ss(__m128 a, __m128 b)
  2310. {
  2311. float32_t value = vgetq_lane_f32(vmaxq_f32(vreinterpretq_f32_m128(a),
  2312. vreinterpretq_f32_m128(b)),
  2313. 0);
  2314. return vreinterpretq_m128_f32(
  2315. vsetq_lane_f32(value, vreinterpretq_f32_m128(a), 0));
  2316. }
  2317. // Computes the minimum of the two lower scalar single-precision floating point
  2318. // values of a and b.
  2319. // https://msdn.microsoft.com/en-us/library/0a9y7xaa(v=vs.100).aspx
  2320. FORCE_INLINE __m128 _mm_min_ss(__m128 a, __m128 b)
  2321. {
  2322. float32_t value = vgetq_lane_f32(vminq_f32(vreinterpretq_f32_m128(a),
  2323. vreinterpretq_f32_m128(b)),
  2324. 0);
  2325. return vreinterpretq_m128_f32(
  2326. vsetq_lane_f32(value, vreinterpretq_f32_m128(a), 0));
  2327. }
  2328. // Computes the pairwise maxima of the 16 unsigned 8-bit integers from a and the
  2329. // 16 unsigned 8-bit integers from b.
  2330. // https://msdn.microsoft.com/en-us/library/st6634za(v=vs.100).aspx
  2331. FORCE_INLINE __m128i _mm_max_epu8(__m128i a, __m128i b)
  2332. {
  2333. return vreinterpretq_m128i_u8(
  2334. vmaxq_u8(vreinterpretq_u8_m128i(a), vreinterpretq_u8_m128i(b)));
  2335. }
  2336. // Computes the pairwise minima of the 16 unsigned 8-bit integers from a and the
  2337. // 16 unsigned 8-bit integers from b.
  2338. // https://msdn.microsoft.com/ko-kr/library/17k8cf58(v=vs.100).aspxx
  2339. FORCE_INLINE __m128i _mm_min_epu8(__m128i a, __m128i b)
  2340. {
  2341. return vreinterpretq_m128i_u8(
  2342. vminq_u8(vreinterpretq_u8_m128i(a), vreinterpretq_u8_m128i(b)));
  2343. }
  2344. // Computes the pairwise minima of the 8 signed 16-bit integers from a and the 8
  2345. // signed 16-bit integers from b.
  2346. // https://msdn.microsoft.com/en-us/library/vstudio/6te997ew(v=vs.100).aspx
  2347. FORCE_INLINE __m128i _mm_min_epi16(__m128i a, __m128i b)
  2348. {
  2349. return vreinterpretq_m128i_s16(vminq_s16(vreinterpretq_s16_m128i(a),
  2350. vreinterpretq_s16_m128i(b)));
  2351. }
  2352. // Computes the pairwise maxima of the 8 signed 16-bit integers from a and the 8
  2353. // signed 16-bit integers from b.
  2354. // https://msdn.microsoft.com/en-us/LIBRary/3x060h7c(v=vs.100).aspx
  2355. FORCE_INLINE __m128i _mm_max_epi16(__m128i a, __m128i b)
  2356. {
  2357. return vreinterpretq_m128i_s16(vmaxq_s16(vreinterpretq_s16_m128i(a),
  2358. vreinterpretq_s16_m128i(b)));
  2359. }
  2360. // epi versions of min/max
  2361. // Computes the pariwise maximums of the four signed 32-bit integer values of a
  2362. // and b.
  2363. //
  2364. // A 128-bit parameter that can be defined with the following equations:
  2365. // r0 := (a0 > b0) ? a0 : b0
  2366. // r1 := (a1 > b1) ? a1 : b1
  2367. // r2 := (a2 > b2) ? a2 : b2
  2368. // r3 := (a3 > b3) ? a3 : b3
  2369. //
  2370. // https://msdn.microsoft.com/en-us/library/vstudio/bb514055(v=vs.100).aspx
  2371. FORCE_INLINE __m128i _mm_max_epi32(__m128i a, __m128i b)
  2372. {
  2373. return vreinterpretq_m128i_s32(vmaxq_s32(vreinterpretq_s32_m128i(a),
  2374. vreinterpretq_s32_m128i(b)));
  2375. }
  2376. // Computes the pariwise minima of the four signed 32-bit integer values of a
  2377. // and b.
  2378. //
  2379. // A 128-bit parameter that can be defined with the following equations:
  2380. // r0 := (a0 < b0) ? a0 : b0
  2381. // r1 := (a1 < b1) ? a1 : b1
  2382. // r2 := (a2 < b2) ? a2 : b2
  2383. // r3 := (a3 < b3) ? a3 : b3
  2384. //
  2385. // https://msdn.microsoft.com/en-us/library/vstudio/bb531476(v=vs.100).aspx
  2386. FORCE_INLINE __m128i _mm_min_epi32(__m128i a, __m128i b)
  2387. {
  2388. return vreinterpretq_m128i_s32(vminq_s32(vreinterpretq_s32_m128i(a),
  2389. vreinterpretq_s32_m128i(b)));
  2390. }
  2391. // Multiplies the 8 signed 16-bit integers from a by the 8 signed 16-bit
  2392. // integers from b.
  2393. //
  2394. // r0 := (a0 * b0)[31:16]
  2395. // r1 := (a1 * b1)[31:16]
  2396. // ...
  2397. // r7 := (a7 * b7)[31:16]
  2398. //
  2399. // https://msdn.microsoft.com/en-us/library/vstudio/59hddw1d(v=vs.100).aspx
  2400. FORCE_INLINE __m128i _mm_mulhi_epi16(__m128i a, __m128i b)
  2401. {
  2402. /* FIXME: issue with large values because of result saturation */
  2403. // int16x8_t ret = vqdmulhq_s16(vreinterpretq_s16_m128i(a),
  2404. // vreinterpretq_s16_m128i(b)); /* =2*a*b */ return
  2405. // vreinterpretq_m128i_s16(vshrq_n_s16(ret, 1));
  2406. int16x4_t a3210 = vget_low_s16(vreinterpretq_s16_m128i(a));
  2407. int16x4_t b3210 = vget_low_s16(vreinterpretq_s16_m128i(b));
  2408. int32x4_t ab3210 = vmull_s16(a3210, b3210); /* 3333222211110000 */
  2409. int16x4_t a7654 = vget_high_s16(vreinterpretq_s16_m128i(a));
  2410. int16x4_t b7654 = vget_high_s16(vreinterpretq_s16_m128i(b));
  2411. int32x4_t ab7654 = vmull_s16(a7654, b7654); /* 7777666655554444 */
  2412. uint16x8x2_t r = vuzpq_u16(vreinterpretq_u16_s32(ab3210),
  2413. vreinterpretq_u16_s32(ab7654));
  2414. return vreinterpretq_m128i_u16(r.val[1]);
  2415. }
  2416. // Computes pairwise add of each argument as single-precision, floating-point
  2417. // values a and b.
  2418. // https://msdn.microsoft.com/en-us/library/yd9wecaa.aspx
  2419. FORCE_INLINE __m128 _mm_hadd_ps(__m128 a, __m128 b)
  2420. {
  2421. #if defined(__aarch64__)
  2422. return vreinterpretq_m128_f32(vpaddq_f32(vreinterpretq_f32_m128(a),
  2423. vreinterpretq_f32_m128(b)));
  2424. #else
  2425. float32x2_t a10 = vget_low_f32(vreinterpretq_f32_m128(a));
  2426. float32x2_t a32 = vget_high_f32(vreinterpretq_f32_m128(a));
  2427. float32x2_t b10 = vget_low_f32(vreinterpretq_f32_m128(b));
  2428. float32x2_t b32 = vget_high_f32(vreinterpretq_f32_m128(b));
  2429. return vreinterpretq_m128_f32(
  2430. vcombine_f32(vpadd_f32(a10, a32), vpadd_f32(b10, b32)));
  2431. #endif
  2432. }
  2433. // Computes pairwise add of each argument as a 16-bit signed or unsigned integer
  2434. // values a and b.
  2435. FORCE_INLINE __m128i _mm_hadd_epi16(__m128i _a, __m128i _b)
  2436. {
  2437. int16x8_t a = vreinterpretq_s16_m128i(_a);
  2438. int16x8_t b = vreinterpretq_s16_m128i(_b);
  2439. #if defined(__aarch64__)
  2440. return vreinterpretq_m128i_s16(vpaddq_s16(a, b));
  2441. #else
  2442. return vreinterpretq_m128i_s16(
  2443. vcombine_s16(vpadd_s16(vget_low_s16(a), vget_high_s16(a)),
  2444. vpadd_s16(vget_low_s16(b), vget_high_s16(b))));
  2445. #endif
  2446. }
  2447. // Computes pairwise difference of each argument as a 16-bit signed or unsigned
  2448. // integer values a and b.
  2449. FORCE_INLINE __m128i _mm_hsub_epi16(__m128i _a, __m128i _b)
  2450. {
  2451. int32x4_t a = vreinterpretq_s32_m128i(_a);
  2452. int32x4_t b = vreinterpretq_s32_m128i(_b);
  2453. // Interleave using vshrn/vmovn
  2454. // [a0|a2|a4|a6|b0|b2|b4|b6]
  2455. // [a1|a3|a5|a7|b1|b3|b5|b7]
  2456. int16x8_t ab0246 = vcombine_s16(vmovn_s32(a), vmovn_s32(b));
  2457. int16x8_t ab1357 = vcombine_s16(vshrn_n_s32(a, 16), vshrn_n_s32(b, 16));
  2458. // Subtract
  2459. return vreinterpretq_m128i_s16(vsubq_s16(ab0246, ab1357));
  2460. }
  2461. // Computes saturated pairwise sub of each argument as a 16-bit signed
  2462. // integer values a and b.
  2463. FORCE_INLINE __m128i _mm_hadds_epi16(__m128i _a, __m128i _b)
  2464. {
  2465. int32x4_t a = vreinterpretq_s32_m128i(_a);
  2466. int32x4_t b = vreinterpretq_s32_m128i(_b);
  2467. // Interleave using vshrn/vmovn
  2468. // [a0|a2|a4|a6|b0|b2|b4|b6]
  2469. // [a1|a3|a5|a7|b1|b3|b5|b7]
  2470. int16x8_t ab0246 = vcombine_s16(vmovn_s32(a), vmovn_s32(b));
  2471. int16x8_t ab1357 = vcombine_s16(vshrn_n_s32(a, 16), vshrn_n_s32(b, 16));
  2472. // Saturated add
  2473. return vreinterpretq_m128i_s16(vqaddq_s16(ab0246, ab1357));
  2474. }
  2475. // Computes saturated pairwise difference of each argument as a 16-bit signed
  2476. // integer values a and b.
  2477. FORCE_INLINE __m128i _mm_hsubs_epi16(__m128i _a, __m128i _b)
  2478. {
  2479. int32x4_t a = vreinterpretq_s32_m128i(_a);
  2480. int32x4_t b = vreinterpretq_s32_m128i(_b);
  2481. // Interleave using vshrn/vmovn
  2482. // [a0|a2|a4|a6|b0|b2|b4|b6]
  2483. // [a1|a3|a5|a7|b1|b3|b5|b7]
  2484. int16x8_t ab0246 = vcombine_s16(vmovn_s32(a), vmovn_s32(b));
  2485. int16x8_t ab1357 = vcombine_s16(vshrn_n_s32(a, 16), vshrn_n_s32(b, 16));
  2486. // Saturated subtract
  2487. return vreinterpretq_m128i_s16(vqsubq_s16(ab0246, ab1357));
  2488. }
  2489. // Computes pairwise add of each argument as a 32-bit signed or unsigned integer
  2490. // values a and b.
  2491. FORCE_INLINE __m128i _mm_hadd_epi32(__m128i _a, __m128i _b)
  2492. {
  2493. int32x4_t a = vreinterpretq_s32_m128i(_a);
  2494. int32x4_t b = vreinterpretq_s32_m128i(_b);
  2495. return vreinterpretq_m128i_s32(
  2496. vcombine_s32(vpadd_s32(vget_low_s32(a), vget_high_s32(a)),
  2497. vpadd_s32(vget_low_s32(b), vget_high_s32(b))));
  2498. }
  2499. // Computes pairwise difference of each argument as a 32-bit signed or unsigned
  2500. // integer values a and b.
  2501. FORCE_INLINE __m128i _mm_hsub_epi32(__m128i _a, __m128i _b)
  2502. {
  2503. int64x2_t a = vreinterpretq_s64_m128i(_a);
  2504. int64x2_t b = vreinterpretq_s64_m128i(_b);
  2505. // Interleave using vshrn/vmovn
  2506. // [a0|a2|b0|b2]
  2507. // [a1|a2|b1|b3]
  2508. int32x4_t ab02 = vcombine_s32(vmovn_s64(a), vmovn_s64(b));
  2509. int32x4_t ab13 = vcombine_s32(vshrn_n_s64(a, 32), vshrn_n_s64(b, 32));
  2510. // Subtract
  2511. return vreinterpretq_m128i_s32(vsubq_s32(ab02, ab13));
  2512. }
  2513. /* Compare operations */
  2514. // Compares for less than
  2515. // https://msdn.microsoft.com/en-us/library/vstudio/f330yhc8(v=vs.100).aspx
  2516. FORCE_INLINE __m128 _mm_cmplt_ps(__m128 a, __m128 b)
  2517. {
  2518. return vreinterpretq_m128_u32(vcltq_f32(vreinterpretq_f32_m128(a),
  2519. vreinterpretq_f32_m128(b)));
  2520. }
  2521. // Compares for greater than.
  2522. //
  2523. // r0 := (a0 > b0) ? 0xffffffff : 0x0
  2524. // r1 := (a1 > b1) ? 0xffffffff : 0x0
  2525. // r2 := (a2 > b2) ? 0xffffffff : 0x0
  2526. // r3 := (a3 > b3) ? 0xffffffff : 0x0
  2527. //
  2528. // https://msdn.microsoft.com/en-us/library/vstudio/11dy102s(v=vs.100).aspx
  2529. FORCE_INLINE __m128 _mm_cmpgt_ps(__m128 a, __m128 b)
  2530. {
  2531. return vreinterpretq_m128_u32(vcgtq_f32(vreinterpretq_f32_m128(a),
  2532. vreinterpretq_f32_m128(b)));
  2533. }
  2534. // Compares for greater than or equal.
  2535. // https://msdn.microsoft.com/en-us/library/vstudio/fs813y2t(v=vs.100).aspx
  2536. FORCE_INLINE __m128 _mm_cmpge_ps(__m128 a, __m128 b)
  2537. {
  2538. return vreinterpretq_m128_u32(vcgeq_f32(vreinterpretq_f32_m128(a),
  2539. vreinterpretq_f32_m128(b)));
  2540. }
  2541. // Compares for less than or equal.
  2542. //
  2543. // r0 := (a0 <= b0) ? 0xffffffff : 0x0
  2544. // r1 := (a1 <= b1) ? 0xffffffff : 0x0
  2545. // r2 := (a2 <= b2) ? 0xffffffff : 0x0
  2546. // r3 := (a3 <= b3) ? 0xffffffff : 0x0
  2547. //
  2548. // https://msdn.microsoft.com/en-us/library/vstudio/1s75w83z(v=vs.100).aspx
  2549. FORCE_INLINE __m128 _mm_cmple_ps(__m128 a, __m128 b)
  2550. {
  2551. return vreinterpretq_m128_u32(vcleq_f32(vreinterpretq_f32_m128(a),
  2552. vreinterpretq_f32_m128(b)));
  2553. }
  2554. // Compares for equality.
  2555. // https://msdn.microsoft.com/en-us/library/vstudio/36aectz5(v=vs.100).aspx
  2556. FORCE_INLINE __m128 _mm_cmpeq_ps(__m128 a, __m128 b)
  2557. {
  2558. return vreinterpretq_m128_u32(vceqq_f32(vreinterpretq_f32_m128(a),
  2559. vreinterpretq_f32_m128(b)));
  2560. }
  2561. // Compares the 16 signed or unsigned 8-bit integers in a and the 16 signed or
  2562. // unsigned 8-bit integers in b for equality.
  2563. // https://msdn.microsoft.com/en-us/library/windows/desktop/bz5xk21a(v=vs.90).aspx
  2564. FORCE_INLINE __m128i _mm_cmpeq_epi8(__m128i a, __m128i b)
  2565. {
  2566. return vreinterpretq_m128i_u8(
  2567. vceqq_s8(vreinterpretq_s8_m128i(a), vreinterpretq_s8_m128i(b)));
  2568. }
  2569. // Compares the 8 signed or unsigned 16-bit integers in a and the 8 signed or
  2570. // unsigned 16-bit integers in b for equality.
  2571. // https://msdn.microsoft.com/en-us/library/2ay060te(v=vs.100).aspx
  2572. FORCE_INLINE __m128i _mm_cmpeq_epi16(__m128i a, __m128i b)
  2573. {
  2574. return vreinterpretq_m128i_u16(vceqq_s16(vreinterpretq_s16_m128i(a),
  2575. vreinterpretq_s16_m128i(b)));
  2576. }
  2577. // Compare packed 32-bit integers in a and b for equality, and store the results
  2578. // in dst
  2579. FORCE_INLINE __m128i _mm_cmpeq_epi32(__m128i a, __m128i b)
  2580. {
  2581. return vreinterpretq_m128i_u32(vceqq_s32(vreinterpretq_s32_m128i(a),
  2582. vreinterpretq_s32_m128i(b)));
  2583. }
  2584. // Compare packed 64-bit integers in a and b for equality, and store the results
  2585. // in dst
  2586. FORCE_INLINE __m128i _mm_cmpeq_epi64(__m128i a, __m128i b)
  2587. {
  2588. #if defined(__aarch64__)
  2589. return vreinterpretq_m128i_u64(vceqq_u64(vreinterpretq_u64_m128i(a),
  2590. vreinterpretq_u64_m128i(b)));
  2591. #else
  2592. // ARMv7 lacks vceqq_u64
  2593. // (a == b) -> (a_lo == b_lo) && (a_hi == b_hi)
  2594. uint32x4_t cmp = vceqq_u32(vreinterpretq_u32_m128i(a),
  2595. vreinterpretq_u32_m128i(b));
  2596. uint32x4_t swapped = vrev64q_u32(cmp);
  2597. return vreinterpretq_m128i_u32(vandq_u32(cmp, swapped));
  2598. #endif
  2599. }
  2600. // Compares the 16 signed 8-bit integers in a and the 16 signed 8-bit integers
  2601. // in b for lesser than.
  2602. // https://msdn.microsoft.com/en-us/library/windows/desktop/9s46csht(v=vs.90).aspx
  2603. FORCE_INLINE __m128i _mm_cmplt_epi8(__m128i a, __m128i b)
  2604. {
  2605. return vreinterpretq_m128i_u8(
  2606. vcltq_s8(vreinterpretq_s8_m128i(a), vreinterpretq_s8_m128i(b)));
  2607. }
  2608. // Compares the 16 signed 8-bit integers in a and the 16 signed 8-bit integers
  2609. // in b for greater than.
  2610. //
  2611. // r0 := (a0 > b0) ? 0xff : 0x0
  2612. // r1 := (a1 > b1) ? 0xff : 0x0
  2613. // ...
  2614. // r15 := (a15 > b15) ? 0xff : 0x0
  2615. //
  2616. // https://msdn.microsoft.com/zh-tw/library/wf45zt2b(v=vs.100).aspx
  2617. FORCE_INLINE __m128i _mm_cmpgt_epi8(__m128i a, __m128i b)
  2618. {
  2619. return vreinterpretq_m128i_u8(
  2620. vcgtq_s8(vreinterpretq_s8_m128i(a), vreinterpretq_s8_m128i(b)));
  2621. }
  2622. // Compares the 8 signed 16-bit integers in a and the 8 signed 16-bit integers
  2623. // in b for less than.
  2624. //
  2625. // r0 := (a0 < b0) ? 0xffff : 0x0
  2626. // r1 := (a1 < b1) ? 0xffff : 0x0
  2627. // ...
  2628. // r7 := (a7 < b7) ? 0xffff : 0x0
  2629. //
  2630. // https://technet.microsoft.com/en-us/library/t863edb2(v=vs.100).aspx
  2631. FORCE_INLINE __m128i _mm_cmplt_epi16(__m128i a, __m128i b)
  2632. {
  2633. return vreinterpretq_m128i_u16(vcltq_s16(vreinterpretq_s16_m128i(a),
  2634. vreinterpretq_s16_m128i(b)));
  2635. }
  2636. // Compares the 8 signed 16-bit integers in a and the 8 signed 16-bit integers
  2637. // in b for greater than.
  2638. //
  2639. // r0 := (a0 > b0) ? 0xffff : 0x0
  2640. // r1 := (a1 > b1) ? 0xffff : 0x0
  2641. // ...
  2642. // r7 := (a7 > b7) ? 0xffff : 0x0
  2643. //
  2644. // https://technet.microsoft.com/en-us/library/xd43yfsa(v=vs.100).aspx
  2645. FORCE_INLINE __m128i _mm_cmpgt_epi16(__m128i a, __m128i b)
  2646. {
  2647. return vreinterpretq_m128i_u16(vcgtq_s16(vreinterpretq_s16_m128i(a),
  2648. vreinterpretq_s16_m128i(b)));
  2649. }
  2650. // Compares the 4 signed 32-bit integers in a and the 4 signed 32-bit integers
  2651. // in b for less than.
  2652. // https://msdn.microsoft.com/en-us/library/vstudio/4ak0bf5d(v=vs.100).aspx
  2653. FORCE_INLINE __m128i _mm_cmplt_epi32(__m128i a, __m128i b)
  2654. {
  2655. return vreinterpretq_m128i_u32(vcltq_s32(vreinterpretq_s32_m128i(a),
  2656. vreinterpretq_s32_m128i(b)));
  2657. }
  2658. // Compares the 4 signed 32-bit integers in a and the 4 signed 32-bit integers
  2659. // in b for greater than.
  2660. // https://msdn.microsoft.com/en-us/library/vstudio/1s9f2z0y(v=vs.100).aspx
  2661. FORCE_INLINE __m128i _mm_cmpgt_epi32(__m128i a, __m128i b)
  2662. {
  2663. return vreinterpretq_m128i_u32(vcgtq_s32(vreinterpretq_s32_m128i(a),
  2664. vreinterpretq_s32_m128i(b)));
  2665. }
  2666. // Compares the 2 signed 64-bit integers in a and the 2 signed 64-bit integers
  2667. // in b for greater than.
  2668. FORCE_INLINE __m128i _mm_cmpgt_epi64(__m128i a, __m128i b)
  2669. {
  2670. #if defined(__aarch64__)
  2671. return vreinterpretq_m128i_u64(vcgtq_s64(vreinterpretq_s64_m128i(a),
  2672. vreinterpretq_s64_m128i(b)));
  2673. #else
  2674. // ARMv7 lacks vcgtq_s64.
  2675. // This is based off of Clang's SSE2 polyfill:
  2676. // (a > b) -> ((a_hi > b_hi) || (a_lo > b_lo && a_hi == b_hi))
  2677. // Mask the sign bit out since we need a signed AND an unsigned comparison
  2678. // and it is ugly to try and split them.
  2679. int32x4_t mask = vreinterpretq_s32_s64(vdupq_n_s64(0x80000000ull));
  2680. int32x4_t a_mask = veorq_s32(vreinterpretq_s32_m128i(a), mask);
  2681. int32x4_t b_mask = veorq_s32(vreinterpretq_s32_m128i(b), mask);
  2682. // Check if a > b
  2683. int64x2_t greater = vreinterpretq_s64_u32(vcgtq_s32(a_mask, b_mask));
  2684. // Copy upper mask to lower mask
  2685. // a_hi > b_hi
  2686. int64x2_t gt_hi = vshrq_n_s64(greater, 63);
  2687. // Copy lower mask to upper mask
  2688. // a_lo > b_lo
  2689. int64x2_t gt_lo = vsliq_n_s64(greater, greater, 32);
  2690. // Compare for equality
  2691. int64x2_t equal = vreinterpretq_s64_u32(vceqq_s32(a_mask, b_mask));
  2692. // Copy upper mask to lower mask
  2693. // a_hi == b_hi
  2694. int64x2_t eq_hi = vshrq_n_s64(equal, 63);
  2695. // a_hi > b_hi || (a_lo > b_lo && a_hi == b_hi)
  2696. int64x2_t ret = vorrq_s64(gt_hi, vandq_s64(gt_lo, eq_hi));
  2697. return vreinterpretq_m128i_s64(ret);
  2698. #endif
  2699. }
  2700. // Compares the four 32-bit floats in a and b to check if any values are NaN.
  2701. // Ordered compare between each value returns true for "orderable" and false for
  2702. // "not orderable" (NaN).
  2703. // https://msdn.microsoft.com/en-us/library/vstudio/0h9w00fx(v=vs.100).aspx see
  2704. // also:
  2705. // http://stackoverflow.com/questions/8627331/what-does-ordered-unordered-comparison-mean
  2706. // http://stackoverflow.com/questions/29349621/neon-isnanval-intrinsics
  2707. FORCE_INLINE __m128 _mm_cmpord_ps(__m128 a, __m128 b)
  2708. {
  2709. // Note: NEON does not have ordered compare builtin
  2710. // Need to compare a eq a and b eq b to check for NaN
  2711. // Do AND of results to get final
  2712. uint32x4_t ceqaa =
  2713. vceqq_f32(vreinterpretq_f32_m128(a), vreinterpretq_f32_m128(a));
  2714. uint32x4_t ceqbb =
  2715. vceqq_f32(vreinterpretq_f32_m128(b), vreinterpretq_f32_m128(b));
  2716. return vreinterpretq_m128_u32(vandq_u32(ceqaa, ceqbb));
  2717. }
  2718. // Compares the lower single-precision floating point scalar values of a and b
  2719. // using a less than operation. :
  2720. // https://msdn.microsoft.com/en-us/library/2kwe606b(v=vs.90).aspx Important
  2721. // note!! The documentation on MSDN is incorrect! If either of the values is a
  2722. // NAN the docs say you will get a one, but in fact, it will return a zero!!
  2723. FORCE_INLINE int _mm_comilt_ss(__m128 a, __m128 b)
  2724. {
  2725. uint32x4_t a_not_nan =
  2726. vceqq_f32(vreinterpretq_f32_m128(a), vreinterpretq_f32_m128(a));
  2727. uint32x4_t b_not_nan =
  2728. vceqq_f32(vreinterpretq_f32_m128(b), vreinterpretq_f32_m128(b));
  2729. uint32x4_t a_and_b_not_nan = vandq_u32(a_not_nan, b_not_nan);
  2730. uint32x4_t a_lt_b =
  2731. vcltq_f32(vreinterpretq_f32_m128(a), vreinterpretq_f32_m128(b));
  2732. return (vgetq_lane_u32(vandq_u32(a_and_b_not_nan, a_lt_b), 0) != 0) ? 1
  2733. : 0;
  2734. }
  2735. // Compares the lower single-precision floating point scalar values of a and b
  2736. // using a greater than operation. :
  2737. // https://msdn.microsoft.com/en-us/library/b0738e0t(v=vs.100).aspx
  2738. FORCE_INLINE int _mm_comigt_ss(__m128 a, __m128 b)
  2739. {
  2740. // return vgetq_lane_u32(vcgtq_f32(vreinterpretq_f32_m128(a),
  2741. // vreinterpretq_f32_m128(b)), 0);
  2742. uint32x4_t a_not_nan =
  2743. vceqq_f32(vreinterpretq_f32_m128(a), vreinterpretq_f32_m128(a));
  2744. uint32x4_t b_not_nan =
  2745. vceqq_f32(vreinterpretq_f32_m128(b), vreinterpretq_f32_m128(b));
  2746. uint32x4_t a_and_b_not_nan = vandq_u32(a_not_nan, b_not_nan);
  2747. uint32x4_t a_gt_b =
  2748. vcgtq_f32(vreinterpretq_f32_m128(a), vreinterpretq_f32_m128(b));
  2749. return (vgetq_lane_u32(vandq_u32(a_and_b_not_nan, a_gt_b), 0) != 0) ? 1
  2750. : 0;
  2751. }
  2752. // Compares the lower single-precision floating point scalar values of a and b
  2753. // using a less than or equal operation. :
  2754. // https://msdn.microsoft.com/en-us/library/1w4t7c57(v=vs.90).aspx
  2755. FORCE_INLINE int _mm_comile_ss(__m128 a, __m128 b)
  2756. {
  2757. // return vgetq_lane_u32(vcleq_f32(vreinterpretq_f32_m128(a),
  2758. // vreinterpretq_f32_m128(b)), 0);
  2759. uint32x4_t a_not_nan =
  2760. vceqq_f32(vreinterpretq_f32_m128(a), vreinterpretq_f32_m128(a));
  2761. uint32x4_t b_not_nan =
  2762. vceqq_f32(vreinterpretq_f32_m128(b), vreinterpretq_f32_m128(b));
  2763. uint32x4_t a_and_b_not_nan = vandq_u32(a_not_nan, b_not_nan);
  2764. uint32x4_t a_le_b =
  2765. vcleq_f32(vreinterpretq_f32_m128(a), vreinterpretq_f32_m128(b));
  2766. return (vgetq_lane_u32(vandq_u32(a_and_b_not_nan, a_le_b), 0) != 0) ? 1
  2767. : 0;
  2768. }
  2769. // Compares the lower single-precision floating point scalar values of a and b
  2770. // using a greater than or equal operation. :
  2771. // https://msdn.microsoft.com/en-us/library/8t80des6(v=vs.100).aspx
  2772. FORCE_INLINE int _mm_comige_ss(__m128 a, __m128 b)
  2773. {
  2774. // return vgetq_lane_u32(vcgeq_f32(vreinterpretq_f32_m128(a),
  2775. // vreinterpretq_f32_m128(b)), 0);
  2776. uint32x4_t a_not_nan =
  2777. vceqq_f32(vreinterpretq_f32_m128(a), vreinterpretq_f32_m128(a));
  2778. uint32x4_t b_not_nan =
  2779. vceqq_f32(vreinterpretq_f32_m128(b), vreinterpretq_f32_m128(b));
  2780. uint32x4_t a_and_b_not_nan = vandq_u32(a_not_nan, b_not_nan);
  2781. uint32x4_t a_ge_b =
  2782. vcgeq_f32(vreinterpretq_f32_m128(a), vreinterpretq_f32_m128(b));
  2783. return (vgetq_lane_u32(vandq_u32(a_and_b_not_nan, a_ge_b), 0) != 0) ? 1
  2784. : 0;
  2785. }
  2786. // Compares the lower single-precision floating point scalar values of a and b
  2787. // using an equality operation. :
  2788. // https://msdn.microsoft.com/en-us/library/93yx2h2b(v=vs.100).aspx
  2789. FORCE_INLINE int _mm_comieq_ss(__m128 a, __m128 b)
  2790. {
  2791. // return vgetq_lane_u32(vceqq_f32(vreinterpretq_f32_m128(a),
  2792. // vreinterpretq_f32_m128(b)), 0);
  2793. uint32x4_t a_not_nan =
  2794. vceqq_f32(vreinterpretq_f32_m128(a), vreinterpretq_f32_m128(a));
  2795. uint32x4_t b_not_nan =
  2796. vceqq_f32(vreinterpretq_f32_m128(b), vreinterpretq_f32_m128(b));
  2797. uint32x4_t a_and_b_not_nan = vandq_u32(a_not_nan, b_not_nan);
  2798. uint32x4_t a_eq_b =
  2799. vceqq_f32(vreinterpretq_f32_m128(a), vreinterpretq_f32_m128(b));
  2800. return (vgetq_lane_u32(vandq_u32(a_and_b_not_nan, a_eq_b), 0) != 0) ? 1
  2801. : 0;
  2802. }
  2803. // Compares the lower single-precision floating point scalar values of a and b
  2804. // using an inequality operation. :
  2805. // https://msdn.microsoft.com/en-us/library/bafh5e0a(v=vs.90).aspx
  2806. FORCE_INLINE int _mm_comineq_ss(__m128 a, __m128 b)
  2807. {
  2808. // return !vgetq_lane_u32(vceqq_f32(vreinterpretq_f32_m128(a),
  2809. // vreinterpretq_f32_m128(b)), 0);
  2810. uint32x4_t a_not_nan =
  2811. vceqq_f32(vreinterpretq_f32_m128(a), vreinterpretq_f32_m128(a));
  2812. uint32x4_t b_not_nan =
  2813. vceqq_f32(vreinterpretq_f32_m128(b), vreinterpretq_f32_m128(b));
  2814. uint32x4_t a_or_b_nan = vmvnq_u32(vandq_u32(a_not_nan, b_not_nan));
  2815. uint32x4_t a_neq_b = vmvnq_u32(vceqq_f32(vreinterpretq_f32_m128(a),
  2816. vreinterpretq_f32_m128(b)));
  2817. return (vgetq_lane_u32(vorrq_u32(a_or_b_nan, a_neq_b), 0) != 0) ? 1 : 0;
  2818. }
  2819. // according to the documentation, these intrinsics behave the same as the
  2820. // non-'u' versions. We'll just alias them here.
  2821. #define _mm_ucomilt_ss _mm_comilt_ss
  2822. #define _mm_ucomile_ss _mm_comile_ss
  2823. #define _mm_ucomigt_ss _mm_comigt_ss
  2824. #define _mm_ucomige_ss _mm_comige_ss
  2825. #define _mm_ucomieq_ss _mm_comieq_ss
  2826. #define _mm_ucomineq_ss _mm_comineq_ss
  2827. /* Conversions */
  2828. // Converts the four single-precision, floating-point values of a to signed
  2829. // 32-bit integer values using truncate.
  2830. // https://msdn.microsoft.com/en-us/library/vstudio/1h005y6x(v=vs.100).aspx
  2831. FORCE_INLINE __m128i _mm_cvttps_epi32(__m128 a)
  2832. {
  2833. return vreinterpretq_m128i_s32(
  2834. vcvtq_s32_f32(vreinterpretq_f32_m128(a)));
  2835. }
  2836. // Converts the four signed 32-bit integer values of a to single-precision,
  2837. // floating-point values
  2838. // https://msdn.microsoft.com/en-us/library/vstudio/36bwxcx5(v=vs.100).aspx
  2839. FORCE_INLINE __m128 _mm_cvtepi32_ps(__m128i a)
  2840. {
  2841. return vreinterpretq_m128_f32(
  2842. vcvtq_f32_s32(vreinterpretq_s32_m128i(a)));
  2843. }
  2844. // Converts the four unsigned 8-bit integers in the lower 16 bits to four
  2845. // unsigned 32-bit integers.
  2846. FORCE_INLINE __m128i _mm_cvtepu8_epi16(__m128i a)
  2847. {
  2848. uint8x16_t u8x16 = vreinterpretq_u8_m128i(a); /* xxxx xxxx xxxx DCBA */
  2849. uint16x8_t u16x8 =
  2850. vmovl_u8(vget_low_u8(u8x16)); /* 0x0x 0x0x 0D0C 0B0A */
  2851. return vreinterpretq_m128i_u16(u16x8);
  2852. }
  2853. // Converts the four unsigned 8-bit integers in the lower 32 bits to four
  2854. // unsigned 32-bit integers.
  2855. // https://msdn.microsoft.com/en-us/library/bb531467%28v=vs.100%29.aspx
  2856. FORCE_INLINE __m128i _mm_cvtepu8_epi32(__m128i a)
  2857. {
  2858. uint8x16_t u8x16 = vreinterpretq_u8_m128i(a); /* xxxx xxxx xxxx DCBA */
  2859. uint16x8_t u16x8 =
  2860. vmovl_u8(vget_low_u8(u8x16)); /* 0x0x 0x0x 0D0C 0B0A */
  2861. uint32x4_t u32x4 =
  2862. vmovl_u16(vget_low_u16(u16x8)); /* 000D 000C 000B 000A */
  2863. return vreinterpretq_m128i_u32(u32x4);
  2864. }
  2865. // Converts the two unsigned 8-bit integers in the lower 16 bits to two
  2866. // unsigned 64-bit integers.
  2867. FORCE_INLINE __m128i _mm_cvtepu8_epi64(__m128i a)
  2868. {
  2869. uint8x16_t u8x16 = vreinterpretq_u8_m128i(a); /* xxxx xxxx xxxx xxBA */
  2870. uint16x8_t u16x8 =
  2871. vmovl_u8(vget_low_u8(u8x16)); /* 0x0x 0x0x 0x0x 0B0A */
  2872. uint32x4_t u32x4 =
  2873. vmovl_u16(vget_low_u16(u16x8)); /* 000x 000x 000B 000A */
  2874. uint64x2_t u64x2 =
  2875. vmovl_u32(vget_low_u32(u32x4)); /* 0000 000B 0000 000A */
  2876. return vreinterpretq_m128i_u64(u64x2);
  2877. }
  2878. // Converts the four unsigned 8-bit integers in the lower 16 bits to four
  2879. // unsigned 32-bit integers.
  2880. FORCE_INLINE __m128i _mm_cvtepi8_epi16(__m128i a)
  2881. {
  2882. int8x16_t s8x16 = vreinterpretq_s8_m128i(a); /* xxxx xxxx xxxx DCBA */
  2883. int16x8_t s16x8 =
  2884. vmovl_s8(vget_low_s8(s8x16)); /* 0x0x 0x0x 0D0C 0B0A */
  2885. return vreinterpretq_m128i_s16(s16x8);
  2886. }
  2887. // Converts the four unsigned 8-bit integers in the lower 32 bits to four
  2888. // unsigned 32-bit integers.
  2889. FORCE_INLINE __m128i _mm_cvtepi8_epi32(__m128i a)
  2890. {
  2891. int8x16_t s8x16 = vreinterpretq_s8_m128i(a); /* xxxx xxxx xxxx DCBA */
  2892. int16x8_t s16x8 =
  2893. vmovl_s8(vget_low_s8(s8x16)); /* 0x0x 0x0x 0D0C 0B0A */
  2894. int32x4_t s32x4 =
  2895. vmovl_s16(vget_low_s16(s16x8)); /* 000D 000C 000B 000A */
  2896. return vreinterpretq_m128i_s32(s32x4);
  2897. }
  2898. // Converts the two signed 8-bit integers in the lower 32 bits to four
  2899. // signed 64-bit integers.
  2900. FORCE_INLINE __m128i _mm_cvtepi8_epi64(__m128i a)
  2901. {
  2902. int8x16_t s8x16 = vreinterpretq_s8_m128i(a); /* xxxx xxxx xxxx xxBA */
  2903. int16x8_t s16x8 =
  2904. vmovl_s8(vget_low_s8(s8x16)); /* 0x0x 0x0x 0x0x 0B0A */
  2905. int32x4_t s32x4 =
  2906. vmovl_s16(vget_low_s16(s16x8)); /* 000x 000x 000B 000A */
  2907. int64x2_t s64x2 =
  2908. vmovl_s32(vget_low_s32(s32x4)); /* 0000 000B 0000 000A */
  2909. return vreinterpretq_m128i_s64(s64x2);
  2910. }
  2911. // Converts the four signed 16-bit integers in the lower 64 bits to four signed
  2912. // 32-bit integers.
  2913. FORCE_INLINE __m128i _mm_cvtepi16_epi32(__m128i a)
  2914. {
  2915. return vreinterpretq_m128i_s32(
  2916. vmovl_s16(vget_low_s16(vreinterpretq_s16_m128i(a))));
  2917. }
  2918. // Converts the two signed 16-bit integers in the lower 32 bits two signed
  2919. // 32-bit integers.
  2920. FORCE_INLINE __m128i _mm_cvtepi16_epi64(__m128i a)
  2921. {
  2922. int16x8_t s16x8 = vreinterpretq_s16_m128i(a); /* xxxx xxxx xxxx 0B0A */
  2923. int32x4_t s32x4 =
  2924. vmovl_s16(vget_low_s16(s16x8)); /* 000x 000x 000B 000A */
  2925. int64x2_t s64x2 =
  2926. vmovl_s32(vget_low_s32(s32x4)); /* 0000 000B 0000 000A */
  2927. return vreinterpretq_m128i_s64(s64x2);
  2928. }
  2929. // Converts the four unsigned 16-bit integers in the lower 64 bits to four
  2930. // unsigned 32-bit integers.
  2931. FORCE_INLINE __m128i _mm_cvtepu16_epi32(__m128i a)
  2932. {
  2933. return vreinterpretq_m128i_u32(
  2934. vmovl_u16(vget_low_u16(vreinterpretq_u16_m128i(a))));
  2935. }
  2936. // Converts the two unsigned 16-bit integers in the lower 32 bits to two
  2937. // unsigned 64-bit integers.
  2938. FORCE_INLINE __m128i _mm_cvtepu16_epi64(__m128i a)
  2939. {
  2940. uint16x8_t u16x8 = vreinterpretq_u16_m128i(a); /* xxxx xxxx xxxx 0B0A */
  2941. uint32x4_t u32x4 =
  2942. vmovl_u16(vget_low_u16(u16x8)); /* 000x 000x 000B 000A */
  2943. uint64x2_t u64x2 =
  2944. vmovl_u32(vget_low_u32(u32x4)); /* 0000 000B 0000 000A */
  2945. return vreinterpretq_m128i_u64(u64x2);
  2946. }
  2947. // Converts the two unsigned 32-bit integers in the lower 64 bits to two
  2948. // unsigned 64-bit integers.
  2949. FORCE_INLINE __m128i _mm_cvtepu32_epi64(__m128i a)
  2950. {
  2951. return vreinterpretq_m128i_u64(
  2952. vmovl_u32(vget_low_u32(vreinterpretq_u32_m128i(a))));
  2953. }
  2954. // Converts the two signed 32-bit integers in the lower 64 bits to two signed
  2955. // 64-bit integers.
  2956. FORCE_INLINE __m128i _mm_cvtepi32_epi64(__m128i a)
  2957. {
  2958. return vreinterpretq_m128i_s64(
  2959. vmovl_s32(vget_low_s32(vreinterpretq_s32_m128i(a))));
  2960. }
  2961. // Converts the four single-precision, floating-point values of a to signed
  2962. // 32-bit integer values.
  2963. //
  2964. // r0 := (int) a0
  2965. // r1 := (int) a1
  2966. // r2 := (int) a2
  2967. // r3 := (int) a3
  2968. //
  2969. // https://msdn.microsoft.com/en-us/library/vstudio/xdc42k5e(v=vs.100).aspx
  2970. // *NOTE*. The default rounding mode on SSE is 'round to even', which ARMv7-A
  2971. // does not support! It is supported on ARMv8-A however.
  2972. FORCE_INLINE __m128i _mm_cvtps_epi32(__m128 a)
  2973. {
  2974. #if defined(__aarch64__)
  2975. return vreinterpretq_m128i_s32(vcvtnq_s32_f32(a));
  2976. #else
  2977. uint32x4_t signmask = vdupq_n_u32(0x80000000);
  2978. float32x4_t half = vbslq_f32(signmask, vreinterpretq_f32_m128(a),
  2979. vdupq_n_f32(0.5f)); /* +/- 0.5 */
  2980. int32x4_t r_normal =
  2981. vcvtq_s32_f32(vaddq_f32(vreinterpretq_f32_m128(a),
  2982. half)); /* round to integer: [a + 0.5]*/
  2983. int32x4_t r_trunc = vcvtq_s32_f32(
  2984. vreinterpretq_f32_m128(a)); /* truncate to integer: [a] */
  2985. int32x4_t plusone = vreinterpretq_s32_u32(vshrq_n_u32(
  2986. vreinterpretq_u32_s32(vnegq_s32(r_trunc)), 31)); /* 1 or 0 */
  2987. int32x4_t r_even = vbicq_s32(vaddq_s32(r_trunc, plusone),
  2988. vdupq_n_s32(1)); /* ([a] + {0,1}) & ~1 */
  2989. float32x4_t delta = vsubq_f32(
  2990. vreinterpretq_f32_m128(a),
  2991. vcvtq_f32_s32(r_trunc)); /* compute delta: delta = (a - [a]) */
  2992. uint32x4_t is_delta_half =
  2993. vceqq_f32(delta, half); /* delta == +/- 0.5 */
  2994. return vreinterpretq_m128i_s32(
  2995. vbslq_s32(is_delta_half, r_even, r_normal));
  2996. #endif
  2997. }
  2998. // Moves the least significant 32 bits of a to a 32-bit integer.
  2999. // https://msdn.microsoft.com/en-us/library/5z7a9642%28v=vs.90%29.aspx
  3000. FORCE_INLINE int _mm_cvtsi128_si32(__m128i a)
  3001. {
  3002. return vgetq_lane_s32(vreinterpretq_s32_m128i(a), 0);
  3003. }
  3004. // Extracts the low order 64-bit integer from the parameter.
  3005. // https://msdn.microsoft.com/en-us/library/bb531384(v=vs.120).aspx
  3006. FORCE_INLINE uint64_t _mm_cvtsi128_si64(__m128i a)
  3007. {
  3008. return vgetq_lane_s64(vreinterpretq_s64_m128i(a), 0);
  3009. }
  3010. // Moves 32-bit integer a to the least significant 32 bits of an __m128 object,
  3011. // zero extending the upper bits.
  3012. //
  3013. // r0 := a
  3014. // r1 := 0x0
  3015. // r2 := 0x0
  3016. // r3 := 0x0
  3017. //
  3018. // https://msdn.microsoft.com/en-us/library/ct3539ha%28v=vs.90%29.aspx
  3019. FORCE_INLINE __m128i _mm_cvtsi32_si128(int a)
  3020. {
  3021. return vreinterpretq_m128i_s32(vsetq_lane_s32(a, vdupq_n_s32(0), 0));
  3022. }
  3023. // Moves 64-bit integer a to the least significant 64 bits of an __m128 object,
  3024. // zero extending the upper bits.
  3025. //
  3026. // r0 := a
  3027. // r1 := 0x0
  3028. FORCE_INLINE __m128i _mm_cvtsi64_si128(int64_t a)
  3029. {
  3030. return vreinterpretq_m128i_s64(vsetq_lane_s64(a, vdupq_n_s64(0), 0));
  3031. }
  3032. // Applies a type cast to reinterpret four 32-bit floating point values passed
  3033. // in as a 128-bit parameter as packed 32-bit integers.
  3034. // https://msdn.microsoft.com/en-us/library/bb514099.aspx
  3035. FORCE_INLINE __m128i _mm_castps_si128(__m128 a)
  3036. {
  3037. return vreinterpretq_m128i_s32(vreinterpretq_s32_m128(a));
  3038. }
  3039. // Applies a type cast to reinterpret four 32-bit integers passed in as a
  3040. // 128-bit parameter as packed 32-bit floating point values.
  3041. // https://msdn.microsoft.com/en-us/library/bb514029.aspx
  3042. FORCE_INLINE __m128 _mm_castsi128_ps(__m128i a)
  3043. {
  3044. return vreinterpretq_m128_s32(vreinterpretq_s32_m128i(a));
  3045. }
  3046. // Loads 128-bit value. :
  3047. // https://msdn.microsoft.com/en-us/library/atzzad1h(v=vs.80).aspx
  3048. FORCE_INLINE __m128i _mm_load_si128(const __m128i *p)
  3049. {
  3050. return vreinterpretq_m128i_s32(vld1q_s32((const int32_t *)p));
  3051. }
  3052. // Loads 128-bit value. :
  3053. // https://msdn.microsoft.com/zh-cn/library/f4k12ae8(v=vs.90).aspx
  3054. FORCE_INLINE __m128i _mm_loadu_si128(const __m128i *p)
  3055. {
  3056. return vreinterpretq_m128i_s32(vld1q_s32((const int32_t *)p));
  3057. }
  3058. // _mm_lddqu_si128 functions the same as _mm_loadu_si128.
  3059. #define _mm_lddqu_si128 _mm_loadu_si128
  3060. /* Miscellaneous Operations */
  3061. // Shifts the 8 signed 16-bit integers in a right by count bits while shifting
  3062. // in the sign bit.
  3063. //
  3064. // r0 := a0 >> count
  3065. // r1 := a1 >> count
  3066. // ...
  3067. // r7 := a7 >> count
  3068. //
  3069. // https://msdn.microsoft.com/en-us/library/3c9997dk(v%3dvs.90).aspx
  3070. FORCE_INLINE __m128i _mm_sra_epi16(__m128i a, __m128i count)
  3071. {
  3072. int64_t c = (int64_t)vget_low_s64((int64x2_t)count);
  3073. if (c > 15)
  3074. return _mm_cmplt_epi16(a, _mm_setzero_si128());
  3075. return vreinterpretq_m128i_s16(
  3076. vshlq_s16((int16x8_t)a, vdupq_n_s16(-c)));
  3077. }
  3078. // Shifts the 4 signed 32-bit integers in a right by count bits while shifting
  3079. // in the sign bit.
  3080. //
  3081. // r0 := a0 >> count
  3082. // r1 := a1 >> count
  3083. // r2 := a2 >> count
  3084. // r3 := a3 >> count
  3085. //
  3086. // https://msdn.microsoft.com/en-us/library/ce40009e(v%3dvs.100).aspx
  3087. FORCE_INLINE __m128i _mm_sra_epi32(__m128i a, __m128i count)
  3088. {
  3089. int64_t c = (int64_t)vget_low_s64((int64x2_t)count);
  3090. if (c > 31)
  3091. return _mm_cmplt_epi32(a, _mm_setzero_si128());
  3092. return vreinterpretq_m128i_s32(
  3093. vshlq_s32((int32x4_t)a, vdupq_n_s32(-c)));
  3094. }
  3095. // Packs the 16 signed 16-bit integers from a and b into 8-bit integers and
  3096. // saturates.
  3097. // https://msdn.microsoft.com/en-us/library/k4y4f7w5%28v=vs.90%29.aspx
  3098. FORCE_INLINE __m128i _mm_packs_epi16(__m128i a, __m128i b)
  3099. {
  3100. return vreinterpretq_m128i_s8(
  3101. vcombine_s8(vqmovn_s16(vreinterpretq_s16_m128i(a)),
  3102. vqmovn_s16(vreinterpretq_s16_m128i(b))));
  3103. }
  3104. // Packs the 16 signed 16 - bit integers from a and b into 8 - bit unsigned
  3105. // integers and saturates.
  3106. //
  3107. // r0 := UnsignedSaturate(a0)
  3108. // r1 := UnsignedSaturate(a1)
  3109. // ...
  3110. // r7 := UnsignedSaturate(a7)
  3111. // r8 := UnsignedSaturate(b0)
  3112. // r9 := UnsignedSaturate(b1)
  3113. // ...
  3114. // r15 := UnsignedSaturate(b7)
  3115. //
  3116. // https://msdn.microsoft.com/en-us/library/07ad1wx4(v=vs.100).aspx
  3117. FORCE_INLINE __m128i _mm_packus_epi16(const __m128i a, const __m128i b)
  3118. {
  3119. return vreinterpretq_m128i_u8(
  3120. vcombine_u8(vqmovun_s16(vreinterpretq_s16_m128i(a)),
  3121. vqmovun_s16(vreinterpretq_s16_m128i(b))));
  3122. }
  3123. // Packs the 8 signed 32-bit integers from a and b into signed 16-bit integers
  3124. // and saturates.
  3125. //
  3126. // r0 := SignedSaturate(a0)
  3127. // r1 := SignedSaturate(a1)
  3128. // r2 := SignedSaturate(a2)
  3129. // r3 := SignedSaturate(a3)
  3130. // r4 := SignedSaturate(b0)
  3131. // r5 := SignedSaturate(b1)
  3132. // r6 := SignedSaturate(b2)
  3133. // r7 := SignedSaturate(b3)
  3134. //
  3135. // https://msdn.microsoft.com/en-us/library/393t56f9%28v=vs.90%29.aspx
  3136. FORCE_INLINE __m128i _mm_packs_epi32(__m128i a, __m128i b)
  3137. {
  3138. return vreinterpretq_m128i_s16(
  3139. vcombine_s16(vqmovn_s32(vreinterpretq_s32_m128i(a)),
  3140. vqmovn_s32(vreinterpretq_s32_m128i(b))));
  3141. }
  3142. // Packs the 8 unsigned 32-bit integers from a and b into unsigned 16-bit
  3143. // integers and saturates.
  3144. //
  3145. // r0 := UnsignedSaturate(a0)
  3146. // r1 := UnsignedSaturate(a1)
  3147. // r2 := UnsignedSaturate(a2)
  3148. // r3 := UnsignedSaturate(a3)
  3149. // r4 := UnsignedSaturate(b0)
  3150. // r5 := UnsignedSaturate(b1)
  3151. // r6 := UnsignedSaturate(b2)
  3152. // r7 := UnsignedSaturate(b3)
  3153. FORCE_INLINE __m128i _mm_packus_epi32(__m128i a, __m128i b)
  3154. {
  3155. return vreinterpretq_m128i_u16(
  3156. vcombine_u16(vqmovn_u32(vreinterpretq_u32_m128i(a)),
  3157. vqmovn_u32(vreinterpretq_u32_m128i(b))));
  3158. }
  3159. // Interleaves the lower 8 signed or unsigned 8-bit integers in a with the lower
  3160. // 8 signed or unsigned 8-bit integers in b.
  3161. //
  3162. // r0 := a0
  3163. // r1 := b0
  3164. // r2 := a1
  3165. // r3 := b1
  3166. // ...
  3167. // r14 := a7
  3168. // r15 := b7
  3169. //
  3170. // https://msdn.microsoft.com/en-us/library/xf7k860c%28v=vs.90%29.aspx
  3171. FORCE_INLINE __m128i _mm_unpacklo_epi8(__m128i a, __m128i b)
  3172. {
  3173. #if defined(__aarch64__)
  3174. return vreinterpretq_m128i_s8(vzip1q_s8(vreinterpretq_s8_m128i(a),
  3175. vreinterpretq_s8_m128i(b)));
  3176. #else
  3177. int8x8_t a1 =
  3178. vreinterpret_s8_s16(vget_low_s16(vreinterpretq_s16_m128i(a)));
  3179. int8x8_t b1 =
  3180. vreinterpret_s8_s16(vget_low_s16(vreinterpretq_s16_m128i(b)));
  3181. int8x8x2_t result = vzip_s8(a1, b1);
  3182. return vreinterpretq_m128i_s8(
  3183. vcombine_s8(result.val[0], result.val[1]));
  3184. #endif
  3185. }
  3186. // Interleaves the lower 4 signed or unsigned 16-bit integers in a with the
  3187. // lower 4 signed or unsigned 16-bit integers in b.
  3188. //
  3189. // r0 := a0
  3190. // r1 := b0
  3191. // r2 := a1
  3192. // r3 := b1
  3193. // r4 := a2
  3194. // r5 := b2
  3195. // r6 := a3
  3196. // r7 := b3
  3197. //
  3198. // https://msdn.microsoft.com/en-us/library/btxb17bw%28v=vs.90%29.aspx
  3199. FORCE_INLINE __m128i _mm_unpacklo_epi16(__m128i a, __m128i b)
  3200. {
  3201. #if defined(__aarch64__)
  3202. return vreinterpretq_m128i_s16(vzip1q_s16(vreinterpretq_s16_m128i(a),
  3203. vreinterpretq_s16_m128i(b)));
  3204. #else
  3205. int16x4_t a1 = vget_low_s16(vreinterpretq_s16_m128i(a));
  3206. int16x4_t b1 = vget_low_s16(vreinterpretq_s16_m128i(b));
  3207. int16x4x2_t result = vzip_s16(a1, b1);
  3208. return vreinterpretq_m128i_s16(
  3209. vcombine_s16(result.val[0], result.val[1]));
  3210. #endif
  3211. }
  3212. // Interleaves the lower 2 signed or unsigned 32 - bit integers in a with the
  3213. // lower 2 signed or unsigned 32 - bit integers in b.
  3214. //
  3215. // r0 := a0
  3216. // r1 := b0
  3217. // r2 := a1
  3218. // r3 := b1
  3219. //
  3220. // https://msdn.microsoft.com/en-us/library/x8atst9d(v=vs.100).aspx
  3221. FORCE_INLINE __m128i _mm_unpacklo_epi32(__m128i a, __m128i b)
  3222. {
  3223. #if defined(__aarch64__)
  3224. return vreinterpretq_m128i_s32(vzip1q_s32(vreinterpretq_s32_m128i(a),
  3225. vreinterpretq_s32_m128i(b)));
  3226. #else
  3227. int32x2_t a1 = vget_low_s32(vreinterpretq_s32_m128i(a));
  3228. int32x2_t b1 = vget_low_s32(vreinterpretq_s32_m128i(b));
  3229. int32x2x2_t result = vzip_s32(a1, b1);
  3230. return vreinterpretq_m128i_s32(
  3231. vcombine_s32(result.val[0], result.val[1]));
  3232. #endif
  3233. }
  3234. FORCE_INLINE __m128i _mm_unpacklo_epi64(__m128i a, __m128i b)
  3235. {
  3236. int64x1_t a_l = vget_low_s64(vreinterpretq_s64_m128i(a));
  3237. int64x1_t b_l = vget_low_s64(vreinterpretq_s64_m128i(b));
  3238. return vreinterpretq_m128i_s64(vcombine_s64(a_l, b_l));
  3239. }
  3240. // Selects and interleaves the lower two single-precision, floating-point values
  3241. // from a and b.
  3242. //
  3243. // r0 := a0
  3244. // r1 := b0
  3245. // r2 := a1
  3246. // r3 := b1
  3247. //
  3248. // https://msdn.microsoft.com/en-us/library/25st103b%28v=vs.90%29.aspx
  3249. FORCE_INLINE __m128 _mm_unpacklo_ps(__m128 a, __m128 b)
  3250. {
  3251. #if defined(__aarch64__)
  3252. return vreinterpretq_m128_f32(vzip1q_f32(vreinterpretq_f32_m128(a),
  3253. vreinterpretq_f32_m128(b)));
  3254. #else
  3255. float32x2_t a1 = vget_low_f32(vreinterpretq_f32_m128(a));
  3256. float32x2_t b1 = vget_low_f32(vreinterpretq_f32_m128(b));
  3257. float32x2x2_t result = vzip_f32(a1, b1);
  3258. return vreinterpretq_m128_f32(
  3259. vcombine_f32(result.val[0], result.val[1]));
  3260. #endif
  3261. }
  3262. // Selects and interleaves the upper two single-precision, floating-point values
  3263. // from a and b.
  3264. //
  3265. // r0 := a2
  3266. // r1 := b2
  3267. // r2 := a3
  3268. // r3 := b3
  3269. //
  3270. // https://msdn.microsoft.com/en-us/library/skccxx7d%28v=vs.90%29.aspx
  3271. FORCE_INLINE __m128 _mm_unpackhi_ps(__m128 a, __m128 b)
  3272. {
  3273. #if defined(__aarch64__)
  3274. return vreinterpretq_m128_f32(vzip2q_f32(vreinterpretq_f32_m128(a),
  3275. vreinterpretq_f32_m128(b)));
  3276. #else
  3277. float32x2_t a1 = vget_high_f32(vreinterpretq_f32_m128(a));
  3278. float32x2_t b1 = vget_high_f32(vreinterpretq_f32_m128(b));
  3279. float32x2x2_t result = vzip_f32(a1, b1);
  3280. return vreinterpretq_m128_f32(
  3281. vcombine_f32(result.val[0], result.val[1]));
  3282. #endif
  3283. }
  3284. // Interleaves the upper 8 signed or unsigned 8-bit integers in a with the upper
  3285. // 8 signed or unsigned 8-bit integers in b.
  3286. //
  3287. // r0 := a8
  3288. // r1 := b8
  3289. // r2 := a9
  3290. // r3 := b9
  3291. // ...
  3292. // r14 := a15
  3293. // r15 := b15
  3294. //
  3295. // https://msdn.microsoft.com/en-us/library/t5h7783k(v=vs.100).aspx
  3296. FORCE_INLINE __m128i _mm_unpackhi_epi8(__m128i a, __m128i b)
  3297. {
  3298. #if defined(__aarch64__)
  3299. return vreinterpretq_m128i_s8(vzip2q_s8(vreinterpretq_s8_m128i(a),
  3300. vreinterpretq_s8_m128i(b)));
  3301. #else
  3302. int8x8_t a1 =
  3303. vreinterpret_s8_s16(vget_high_s16(vreinterpretq_s16_m128i(a)));
  3304. int8x8_t b1 =
  3305. vreinterpret_s8_s16(vget_high_s16(vreinterpretq_s16_m128i(b)));
  3306. int8x8x2_t result = vzip_s8(a1, b1);
  3307. return vreinterpretq_m128i_s8(
  3308. vcombine_s8(result.val[0], result.val[1]));
  3309. #endif
  3310. }
  3311. // Interleaves the upper 4 signed or unsigned 16-bit integers in a with the
  3312. // upper 4 signed or unsigned 16-bit integers in b.
  3313. //
  3314. // r0 := a4
  3315. // r1 := b4
  3316. // r2 := a5
  3317. // r3 := b5
  3318. // r4 := a6
  3319. // r5 := b6
  3320. // r6 := a7
  3321. // r7 := b7
  3322. //
  3323. // https://msdn.microsoft.com/en-us/library/03196cz7(v=vs.100).aspx
  3324. FORCE_INLINE __m128i _mm_unpackhi_epi16(__m128i a, __m128i b)
  3325. {
  3326. #if defined(__aarch64__)
  3327. return vreinterpretq_m128i_s16(vzip2q_s16(vreinterpretq_s16_m128i(a),
  3328. vreinterpretq_s16_m128i(b)));
  3329. #else
  3330. int16x4_t a1 = vget_high_s16(vreinterpretq_s16_m128i(a));
  3331. int16x4_t b1 = vget_high_s16(vreinterpretq_s16_m128i(b));
  3332. int16x4x2_t result = vzip_s16(a1, b1);
  3333. return vreinterpretq_m128i_s16(
  3334. vcombine_s16(result.val[0], result.val[1]));
  3335. #endif
  3336. }
  3337. // Interleaves the upper 2 signed or unsigned 32-bit integers in a with the
  3338. // upper 2 signed or unsigned 32-bit integers in b.
  3339. // https://msdn.microsoft.com/en-us/library/65sa7cbs(v=vs.100).aspx
  3340. FORCE_INLINE __m128i _mm_unpackhi_epi32(__m128i a, __m128i b)
  3341. {
  3342. #if defined(__aarch64__)
  3343. return vreinterpretq_m128i_s32(vzip2q_s32(vreinterpretq_s32_m128i(a),
  3344. vreinterpretq_s32_m128i(b)));
  3345. #else
  3346. int32x2_t a1 = vget_high_s32(vreinterpretq_s32_m128i(a));
  3347. int32x2_t b1 = vget_high_s32(vreinterpretq_s32_m128i(b));
  3348. int32x2x2_t result = vzip_s32(a1, b1);
  3349. return vreinterpretq_m128i_s32(
  3350. vcombine_s32(result.val[0], result.val[1]));
  3351. #endif
  3352. }
  3353. // Interleaves the upper signed or unsigned 64-bit integer in a with the
  3354. // upper signed or unsigned 64-bit integer in b.
  3355. //
  3356. // r0 := a1
  3357. // r1 := b1
  3358. FORCE_INLINE __m128i _mm_unpackhi_epi64(__m128i a, __m128i b)
  3359. {
  3360. int64x1_t a_h = vget_high_s64(vreinterpretq_s64_m128i(a));
  3361. int64x1_t b_h = vget_high_s64(vreinterpretq_s64_m128i(b));
  3362. return vreinterpretq_m128i_s64(vcombine_s64(a_h, b_h));
  3363. }
  3364. // Horizontally compute the minimum amongst the packed unsigned 16-bit integers
  3365. // in a, store the minimum and index in dst, and zero the remaining bits in dst.
  3366. //
  3367. // index[2:0] := 0
  3368. // min[15:0] := a[15:0]
  3369. // FOR j := 0 to 7
  3370. // i := j*16
  3371. // IF a[i+15:i] < min[15:0]
  3372. // index[2:0] := j
  3373. // min[15:0] := a[i+15:i]
  3374. // FI
  3375. // ENDFOR
  3376. // dst[15:0] := min[15:0]
  3377. // dst[18:16] := index[2:0]
  3378. // dst[127:19] := 0
  3379. //
  3380. // https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_minpos_epu16&expand=3789
  3381. FORCE_INLINE __m128i _mm_minpos_epu16(__m128i a)
  3382. {
  3383. __m128i dst;
  3384. uint16_t min, idx = 0;
  3385. // Find the minimum value
  3386. #if defined(__aarch64__)
  3387. min = vminvq_u16(vreinterpretq_u16_m128i(a));
  3388. #else
  3389. __m64i tmp;
  3390. tmp = vreinterpret_m64i_u16(
  3391. vmin_u16(vget_low_u16(vreinterpretq_u16_m128i(a)),
  3392. vget_high_u16(vreinterpretq_u16_m128i(a))));
  3393. tmp = vreinterpret_m64i_u16(vpmin_u16(vreinterpret_u16_m64i(tmp),
  3394. vreinterpret_u16_m64i(tmp)));
  3395. tmp = vreinterpret_m64i_u16(vpmin_u16(vreinterpret_u16_m64i(tmp),
  3396. vreinterpret_u16_m64i(tmp)));
  3397. min = vget_lane_u16(vreinterpret_u16_m64i(tmp), 0);
  3398. #endif
  3399. // Get the index of the minimum value
  3400. int i;
  3401. for (i = 0; i < 8; i++) {
  3402. if (min == vgetq_lane_u16(vreinterpretq_u16_m128i(a), 0)) {
  3403. idx = (uint16_t)i;
  3404. break;
  3405. }
  3406. a = _mm_srli_si128(a, 2);
  3407. }
  3408. // Generate result
  3409. dst = _mm_setzero_si128();
  3410. dst = vreinterpretq_m128i_u16(
  3411. vsetq_lane_u16(min, vreinterpretq_u16_m128i(dst), 0));
  3412. dst = vreinterpretq_m128i_u16(
  3413. vsetq_lane_u16(idx, vreinterpretq_u16_m128i(dst), 1));
  3414. return dst;
  3415. }
  3416. // shift to right
  3417. // https://msdn.microsoft.com/en-us/library/bb514041(v=vs.120).aspx
  3418. // http://blog.csdn.net/hemmingway/article/details/44828303
  3419. // Clang requires a macro here, as it is extremely picky about c being a
  3420. // literal.
  3421. #define _mm_alignr_epi8(a, b, c) \
  3422. ((__m128i)vextq_s8((int8x16_t)(b), (int8x16_t)(a), (c)))
  3423. // Extracts the selected signed or unsigned 8-bit integer from a and zero
  3424. // extends.
  3425. // FORCE_INLINE int _mm_extract_epi8(__m128i a, __constrange(0,16) int imm)
  3426. #define _mm_extract_epi8(a, imm) vgetq_lane_u8(vreinterpretq_u8_m128i(a), (imm))
  3427. // Inserts the least significant 8 bits of b into the selected 8-bit integer
  3428. // of a.
  3429. // FORCE_INLINE __m128i _mm_insert_epi8(__m128i a, int b,
  3430. // __constrange(0,16) int imm)
  3431. #define _mm_insert_epi8(a, b, imm) \
  3432. __extension__({ \
  3433. vreinterpretq_m128i_s8( \
  3434. vsetq_lane_s8((b), vreinterpretq_s8_m128i(a), (imm))); \
  3435. })
  3436. // Extracts the selected signed or unsigned 16-bit integer from a and zero
  3437. // extends.
  3438. // https://msdn.microsoft.com/en-us/library/6dceta0c(v=vs.100).aspx
  3439. // FORCE_INLINE int _mm_extract_epi16(__m128i a, __constrange(0,8) int imm)
  3440. #define _mm_extract_epi16(a, imm) \
  3441. vgetq_lane_u16(vreinterpretq_u16_m128i(a), (imm))
  3442. // Inserts the least significant 16 bits of b into the selected 16-bit integer
  3443. // of a.
  3444. // https://msdn.microsoft.com/en-us/library/kaze8hz1%28v=vs.100%29.aspx
  3445. // FORCE_INLINE __m128i _mm_insert_epi16(__m128i a, int b,
  3446. // __constrange(0,8) int imm)
  3447. #define _mm_insert_epi16(a, b, imm) \
  3448. __extension__({ \
  3449. vreinterpretq_m128i_s16(vsetq_lane_s16( \
  3450. (b), vreinterpretq_s16_m128i(a), (imm))); \
  3451. })
  3452. // Extracts the selected signed or unsigned 32-bit integer from a and zero
  3453. // extends.
  3454. // FORCE_INLINE int _mm_extract_epi32(__m128i a, __constrange(0,4) int imm)
  3455. #define _mm_extract_epi32(a, imm) \
  3456. vgetq_lane_s32(vreinterpretq_s32_m128i(a), (imm))
  3457. // Extracts the selected single-precision (32-bit) floating-point from a.
  3458. // FORCE_INLINE int _mm_extract_ps(__m128 a, __constrange(0,4) int imm)
  3459. #define _mm_extract_ps(a, imm) vgetq_lane_s32(vreinterpretq_s32_m128(a), (imm))
  3460. // Inserts the least significant 32 bits of b into the selected 32-bit integer
  3461. // of a.
  3462. // FORCE_INLINE __m128i _mm_insert_epi32(__m128i a, int b,
  3463. // __constrange(0,4) int imm)
  3464. #define _mm_insert_epi32(a, b, imm) \
  3465. __extension__({ \
  3466. vreinterpretq_m128i_s32(vsetq_lane_s32( \
  3467. (b), vreinterpretq_s32_m128i(a), (imm))); \
  3468. })
  3469. // Extracts the selected signed or unsigned 64-bit integer from a and zero
  3470. // extends.
  3471. // FORCE_INLINE __int64 _mm_extract_epi64(__m128i a, __constrange(0,2) int imm)
  3472. #define _mm_extract_epi64(a, imm) \
  3473. vgetq_lane_s64(vreinterpretq_s64_m128i(a), (imm))
  3474. // Inserts the least significant 64 bits of b into the selected 64-bit integer
  3475. // of a.
  3476. // FORCE_INLINE __m128i _mm_insert_epi64(__m128i a, __int64 b,
  3477. // __constrange(0,2) int imm)
  3478. #define _mm_insert_epi64(a, b, imm) \
  3479. __extension__({ \
  3480. vreinterpretq_m128i_s64(vsetq_lane_s64( \
  3481. (b), vreinterpretq_s64_m128i(a), (imm))); \
  3482. })
  3483. // Count the number of bits set to 1 in unsigned 32-bit integer a, and
  3484. // return that count in dst.
  3485. // https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_popcnt_u32
  3486. FORCE_INLINE int _mm_popcnt_u32(unsigned int a)
  3487. {
  3488. #if defined(__aarch64__)
  3489. #if __has_builtin(__builtin_popcount)
  3490. return __builtin_popcount(a);
  3491. #else
  3492. return (int)vaddlv_u8(vcnt_u8(vcreate_u8((uint64_t)a)));
  3493. #endif
  3494. #else
  3495. uint32_t count = 0;
  3496. uint8x8_t input_val, count8x8_val;
  3497. uint16x4_t count16x4_val;
  3498. uint32x2_t count32x2_val;
  3499. input_val = vld1_u8((uint8_t *)&a);
  3500. count8x8_val = vcnt_u8(input_val);
  3501. count16x4_val = vpaddl_u8(count8x8_val);
  3502. count32x2_val = vpaddl_u16(count16x4_val);
  3503. vst1_u32(&count, count32x2_val);
  3504. return count;
  3505. #endif
  3506. }
  3507. // Count the number of bits set to 1 in unsigned 64-bit integer a, and
  3508. // return that count in dst.
  3509. // https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_popcnt_u64
  3510. FORCE_INLINE int64_t _mm_popcnt_u64(uint64_t a)
  3511. {
  3512. #if defined(__aarch64__)
  3513. #if __has_builtin(__builtin_popcountll)
  3514. return __builtin_popcountll(a);
  3515. #else
  3516. return (int64_t)vaddlv_u8(vcnt_u8(vcreate_u8(a)));
  3517. #endif
  3518. #else
  3519. uint64_t count = 0;
  3520. uint8x8_t input_val, count8x8_val;
  3521. uint16x4_t count16x4_val;
  3522. uint32x2_t count32x2_val;
  3523. uint64x1_t count64x1_val;
  3524. input_val = vld1_u8((uint8_t *)&a);
  3525. count8x8_val = vcnt_u8(input_val);
  3526. count16x4_val = vpaddl_u8(count8x8_val);
  3527. count32x2_val = vpaddl_u16(count16x4_val);
  3528. count64x1_val = vpaddl_u32(count32x2_val);
  3529. vst1_u64(&count, count64x1_val);
  3530. return count;
  3531. #endif
  3532. }
  3533. // Macro: Transpose the 4x4 matrix formed by the 4 rows of single-precision
  3534. // (32-bit) floating-point elements in row0, row1, row2, and row3, and store the
  3535. // transposed matrix in these vectors (row0 now contains column 0, etc.).
  3536. // https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=MM_TRANSPOSE4_PS&expand=5949
  3537. #define _MM_TRANSPOSE4_PS(row0, row1, row2, row3) \
  3538. do { \
  3539. __m128 tmp0, tmp1, tmp2, tmp3; \
  3540. tmp0 = _mm_unpacklo_ps(row0, row1); \
  3541. tmp2 = _mm_unpacklo_ps(row2, row3); \
  3542. tmp1 = _mm_unpackhi_ps(row0, row1); \
  3543. tmp3 = _mm_unpackhi_ps(row2, row3); \
  3544. row0 = _mm_movelh_ps(tmp0, tmp2); \
  3545. row1 = _mm_movehl_ps(tmp2, tmp0); \
  3546. row2 = _mm_movelh_ps(tmp1, tmp3); \
  3547. row3 = _mm_movehl_ps(tmp3, tmp1); \
  3548. } while (0)
  3549. /* Crypto Extensions */
  3550. #if defined(__ARM_FEATURE_CRYPTO)
  3551. // Wraps vmull_p64
  3552. FORCE_INLINE uint64x2_t _sse2neon_vmull_p64(uint64x1_t _a, uint64x1_t _b)
  3553. {
  3554. poly64_t a = vget_lane_p64(vreinterpret_p64_u64(_a), 0);
  3555. poly64_t b = vget_lane_p64(vreinterpret_p64_u64(_b), 0);
  3556. return vreinterpretq_u64_p128(vmull_p64(a, b));
  3557. }
  3558. #else // ARMv7 polyfill
  3559. // ARMv7/some A64 lacks vmull_p64, but it has vmull_p8.
  3560. //
  3561. // vmull_p8 calculates 8 8-bit->16-bit polynomial multiplies, but we need a
  3562. // 64-bit->128-bit polynomial multiply.
  3563. //
  3564. // It needs some work and is somewhat slow, but it is still faster than all
  3565. // known scalar methods.
  3566. //
  3567. // Algorithm adapted to C from
  3568. // https://www.workofard.com/2017/07/ghash-for-low-end-cores/, which is adapted
  3569. // from "Fast Software Polynomial Multiplication on ARM Processors Using the
  3570. // NEON Engine" by Danilo Camara, Conrado Gouvea, Julio Lopez and Ricardo Dahab
  3571. // (https://hal.inria.fr/hal-01506572)
  3572. static uint64x2_t _sse2neon_vmull_p64(uint64x1_t _a, uint64x1_t _b)
  3573. {
  3574. poly8x8_t a = vreinterpret_p8_u64(_a);
  3575. poly8x8_t b = vreinterpret_p8_u64(_b);
  3576. // Masks
  3577. uint8x16_t k48_32 = vcombine_u8(vcreate_u8(0x0000ffffffffffff),
  3578. vcreate_u8(0x00000000ffffffff));
  3579. uint8x16_t k16_00 = vcombine_u8(vcreate_u8(0x000000000000ffff),
  3580. vcreate_u8(0x0000000000000000));
  3581. // Do the multiplies, rotating with vext to get all combinations
  3582. uint8x16_t d = vreinterpretq_u8_p16(vmull_p8(a, b)); // D = A0 * B0
  3583. uint8x16_t e = vreinterpretq_u8_p16(
  3584. vmull_p8(a, vext_p8(b, b, 1))); // E = A0 * B1
  3585. uint8x16_t f = vreinterpretq_u8_p16(
  3586. vmull_p8(vext_p8(a, a, 1), b)); // F = A1 * B0
  3587. uint8x16_t g = vreinterpretq_u8_p16(
  3588. vmull_p8(a, vext_p8(b, b, 2))); // G = A0 * B2
  3589. uint8x16_t h = vreinterpretq_u8_p16(
  3590. vmull_p8(vext_p8(a, a, 2), b)); // H = A2 * B0
  3591. uint8x16_t i = vreinterpretq_u8_p16(
  3592. vmull_p8(a, vext_p8(b, b, 3))); // I = A0 * B3
  3593. uint8x16_t j = vreinterpretq_u8_p16(
  3594. vmull_p8(vext_p8(a, a, 3), b)); // J = A3 * B0
  3595. uint8x16_t k = vreinterpretq_u8_p16(
  3596. vmull_p8(a, vext_p8(b, b, 4))); // L = A0 * B4
  3597. // Add cross products
  3598. uint8x16_t l = veorq_u8(e, f); // L = E + F
  3599. uint8x16_t m = veorq_u8(g, h); // M = G + H
  3600. uint8x16_t n = veorq_u8(i, j); // N = I + J
  3601. // Interleave. Using vzip1 and vzip2 prevents Clang from emitting TBL
  3602. // instructions.
  3603. #if defined(__aarch64__)
  3604. uint8x16_t lm_p0 = vreinterpretq_u8_u64(
  3605. vzip1q_u64(vreinterpretq_u64_u8(l), vreinterpretq_u64_u8(m)));
  3606. uint8x16_t lm_p1 = vreinterpretq_u8_u64(
  3607. vzip2q_u64(vreinterpretq_u64_u8(l), vreinterpretq_u64_u8(m)));
  3608. uint8x16_t nk_p0 = vreinterpretq_u8_u64(
  3609. vzip1q_u64(vreinterpretq_u64_u8(n), vreinterpretq_u64_u8(k)));
  3610. uint8x16_t nk_p1 = vreinterpretq_u8_u64(
  3611. vzip2q_u64(vreinterpretq_u64_u8(n), vreinterpretq_u64_u8(k)));
  3612. #else
  3613. uint8x16_t lm_p0 = vcombine_u8(vget_low_u8(l), vget_low_u8(m));
  3614. uint8x16_t lm_p1 = vcombine_u8(vget_high_u8(l), vget_high_u8(m));
  3615. uint8x16_t nk_p0 = vcombine_u8(vget_low_u8(n), vget_low_u8(k));
  3616. uint8x16_t nk_p1 = vcombine_u8(vget_high_u8(n), vget_high_u8(k));
  3617. #endif
  3618. // t0 = (L) (P0 + P1) << 8
  3619. // t1 = (M) (P2 + P3) << 16
  3620. uint8x16_t t0t1_tmp = veorq_u8(lm_p0, lm_p1);
  3621. uint8x16_t t0t1_h = vandq_u8(lm_p1, k48_32);
  3622. uint8x16_t t0t1_l = veorq_u8(t0t1_tmp, t0t1_h);
  3623. // t2 = (N) (P4 + P5) << 24
  3624. // t3 = (K) (P6 + P7) << 32
  3625. uint8x16_t t2t3_tmp = veorq_u8(nk_p0, nk_p1);
  3626. uint8x16_t t2t3_h = vandq_u8(nk_p1, k16_00);
  3627. uint8x16_t t2t3_l = veorq_u8(t2t3_tmp, t2t3_h);
  3628. // De-interleave
  3629. #if defined(__aarch64__)
  3630. uint8x16_t t0 = vreinterpretq_u8_u64(vuzp1q_u64(
  3631. vreinterpretq_u64_u8(t0t1_l), vreinterpretq_u64_u8(t0t1_h)));
  3632. uint8x16_t t1 = vreinterpretq_u8_u64(vuzp2q_u64(
  3633. vreinterpretq_u64_u8(t0t1_l), vreinterpretq_u64_u8(t0t1_h)));
  3634. uint8x16_t t2 = vreinterpretq_u8_u64(vuzp1q_u64(
  3635. vreinterpretq_u64_u8(t2t3_l), vreinterpretq_u64_u8(t2t3_h)));
  3636. uint8x16_t t3 = vreinterpretq_u8_u64(vuzp2q_u64(
  3637. vreinterpretq_u64_u8(t2t3_l), vreinterpretq_u64_u8(t2t3_h)));
  3638. #else
  3639. uint8x16_t t1 = vcombine_u8(vget_high_u8(t0t1_l), vget_high_u8(t0t1_h));
  3640. uint8x16_t t0 = vcombine_u8(vget_low_u8(t0t1_l), vget_low_u8(t0t1_h));
  3641. uint8x16_t t3 = vcombine_u8(vget_high_u8(t2t3_l), vget_high_u8(t2t3_h));
  3642. uint8x16_t t2 = vcombine_u8(vget_low_u8(t2t3_l), vget_low_u8(t2t3_h));
  3643. #endif
  3644. // Shift the cross products
  3645. uint8x16_t t0_shift = vextq_u8(t0, t0, 15); // t0 << 8
  3646. uint8x16_t t1_shift = vextq_u8(t1, t1, 14); // t1 << 16
  3647. uint8x16_t t2_shift = vextq_u8(t2, t2, 13); // t2 << 24
  3648. uint8x16_t t3_shift = vextq_u8(t3, t3, 12); // t3 << 32
  3649. // Accumulate the products
  3650. uint8x16_t cross1 = veorq_u8(t0_shift, t1_shift);
  3651. uint8x16_t cross2 = veorq_u8(t2_shift, t3_shift);
  3652. uint8x16_t mix = veorq_u8(d, cross1);
  3653. uint8x16_t r = veorq_u8(mix, cross2);
  3654. return vreinterpretq_u64_u8(r);
  3655. }
  3656. #endif // ARMv7 polyfill
  3657. FORCE_INLINE __m128i _mm_clmulepi64_si128(__m128i _a, __m128i _b, const int imm)
  3658. {
  3659. uint64x2_t a = vreinterpretq_u64_m128i(_a);
  3660. uint64x2_t b = vreinterpretq_u64_m128i(_b);
  3661. switch (imm & 0x11) {
  3662. case 0x00:
  3663. return vreinterpretq_m128i_u64(
  3664. _sse2neon_vmull_p64(vget_low_u64(a), vget_low_u64(b)));
  3665. case 0x01:
  3666. return vreinterpretq_m128i_u64(
  3667. _sse2neon_vmull_p64(vget_high_u64(a), vget_low_u64(b)));
  3668. case 0x10:
  3669. return vreinterpretq_m128i_u64(
  3670. _sse2neon_vmull_p64(vget_low_u64(a), vget_high_u64(b)));
  3671. case 0x11:
  3672. return vreinterpretq_m128i_u64(_sse2neon_vmull_p64(
  3673. vget_high_u64(a), vget_high_u64(b)));
  3674. default:
  3675. abort();
  3676. }
  3677. }
  3678. #if !defined(__ARM_FEATURE_CRYPTO) && defined(__aarch64__)
  3679. // In the absence of crypto extensions, implement aesenc using regular neon
  3680. // intrinsics instead. See:
  3681. // https://www.workofard.com/2017/01/accelerated-aes-for-the-arm64-linux-kernel/
  3682. // https://www.workofard.com/2017/07/ghash-for-low-end-cores/ and
  3683. // https://github.com/ColinIanKing/linux-next-mirror/blob/b5f466091e130caaf0735976648f72bd5e09aa84/crypto/aegis128-neon-inner.c#L52
  3684. // for more information Reproduced with permission of the author.
  3685. FORCE_INLINE __m128i _mm_aesenc_si128(__m128i EncBlock, __m128i RoundKey)
  3686. {
  3687. static const uint8_t crypto_aes_sbox[256] = {
  3688. 0x63, 0x7c, 0x77, 0x7b, 0xf2, 0x6b, 0x6f, 0xc5, 0x30, 0x01,
  3689. 0x67, 0x2b, 0xfe, 0xd7, 0xab, 0x76, 0xca, 0x82, 0xc9, 0x7d,
  3690. 0xfa, 0x59, 0x47, 0xf0, 0xad, 0xd4, 0xa2, 0xaf, 0x9c, 0xa4,
  3691. 0x72, 0xc0, 0xb7, 0xfd, 0x93, 0x26, 0x36, 0x3f, 0xf7, 0xcc,
  3692. 0x34, 0xa5, 0xe5, 0xf1, 0x71, 0xd8, 0x31, 0x15, 0x04, 0xc7,
  3693. 0x23, 0xc3, 0x18, 0x96, 0x05, 0x9a, 0x07, 0x12, 0x80, 0xe2,
  3694. 0xeb, 0x27, 0xb2, 0x75, 0x09, 0x83, 0x2c, 0x1a, 0x1b, 0x6e,
  3695. 0x5a, 0xa0, 0x52, 0x3b, 0xd6, 0xb3, 0x29, 0xe3, 0x2f, 0x84,
  3696. 0x53, 0xd1, 0x00, 0xed, 0x20, 0xfc, 0xb1, 0x5b, 0x6a, 0xcb,
  3697. 0xbe, 0x39, 0x4a, 0x4c, 0x58, 0xcf, 0xd0, 0xef, 0xaa, 0xfb,
  3698. 0x43, 0x4d, 0x33, 0x85, 0x45, 0xf9, 0x02, 0x7f, 0x50, 0x3c,
  3699. 0x9f, 0xa8, 0x51, 0xa3, 0x40, 0x8f, 0x92, 0x9d, 0x38, 0xf5,
  3700. 0xbc, 0xb6, 0xda, 0x21, 0x10, 0xff, 0xf3, 0xd2, 0xcd, 0x0c,
  3701. 0x13, 0xec, 0x5f, 0x97, 0x44, 0x17, 0xc4, 0xa7, 0x7e, 0x3d,
  3702. 0x64, 0x5d, 0x19, 0x73, 0x60, 0x81, 0x4f, 0xdc, 0x22, 0x2a,
  3703. 0x90, 0x88, 0x46, 0xee, 0xb8, 0x14, 0xde, 0x5e, 0x0b, 0xdb,
  3704. 0xe0, 0x32, 0x3a, 0x0a, 0x49, 0x06, 0x24, 0x5c, 0xc2, 0xd3,
  3705. 0xac, 0x62, 0x91, 0x95, 0xe4, 0x79, 0xe7, 0xc8, 0x37, 0x6d,
  3706. 0x8d, 0xd5, 0x4e, 0xa9, 0x6c, 0x56, 0xf4, 0xea, 0x65, 0x7a,
  3707. 0xae, 0x08, 0xba, 0x78, 0x25, 0x2e, 0x1c, 0xa6, 0xb4, 0xc6,
  3708. 0xe8, 0xdd, 0x74, 0x1f, 0x4b, 0xbd, 0x8b, 0x8a, 0x70, 0x3e,
  3709. 0xb5, 0x66, 0x48, 0x03, 0xf6, 0x0e, 0x61, 0x35, 0x57, 0xb9,
  3710. 0x86, 0xc1, 0x1d, 0x9e, 0xe1, 0xf8, 0x98, 0x11, 0x69, 0xd9,
  3711. 0x8e, 0x94, 0x9b, 0x1e, 0x87, 0xe9, 0xce, 0x55, 0x28, 0xdf,
  3712. 0x8c, 0xa1, 0x89, 0x0d, 0xbf, 0xe6, 0x42, 0x68, 0x41, 0x99,
  3713. 0x2d, 0x0f, 0xb0, 0x54, 0xbb, 0x16};
  3714. static const uint8_t shift_rows[] = {0x0, 0x5, 0xa, 0xf, 0x4, 0x9,
  3715. 0xe, 0x3, 0x8, 0xd, 0x2, 0x7,
  3716. 0xc, 0x1, 0x6, 0xb};
  3717. static const uint8_t ror32by8[] = {0x1, 0x2, 0x3, 0x0, 0x5, 0x6,
  3718. 0x7, 0x4, 0x9, 0xa, 0xb, 0x8,
  3719. 0xd, 0xe, 0xf, 0xc};
  3720. uint8x16_t v;
  3721. uint8x16_t w = vreinterpretq_u8_m128i(EncBlock);
  3722. // shift rows
  3723. w = vqtbl1q_u8(w, vld1q_u8(shift_rows));
  3724. // sub bytes
  3725. v = vqtbl4q_u8(vld1q_u8_x4(crypto_aes_sbox), w);
  3726. v = vqtbx4q_u8(v, vld1q_u8_x4(crypto_aes_sbox + 0x40), w - 0x40);
  3727. v = vqtbx4q_u8(v, vld1q_u8_x4(crypto_aes_sbox + 0x80), w - 0x80);
  3728. v = vqtbx4q_u8(v, vld1q_u8_x4(crypto_aes_sbox + 0xc0), w - 0xc0);
  3729. // mix columns
  3730. w = (v << 1) ^ (uint8x16_t)(((int8x16_t)v >> 7) & 0x1b);
  3731. w ^= (uint8x16_t)vrev32q_u16((uint16x8_t)v);
  3732. w ^= vqtbl1q_u8(v ^ w, vld1q_u8(ror32by8));
  3733. // add round key
  3734. return vreinterpretq_m128i_u8(w) ^ RoundKey;
  3735. }
  3736. #elif defined(__ARM_FEATURE_CRYPTO)
  3737. // Implements equivalent of 'aesenc' by combining AESE (with an empty key) and
  3738. // AESMC and then manually applying the real key as an xor operation This
  3739. // unfortunately means an additional xor op; the compiler should be able to
  3740. // optimise this away for repeated calls however See
  3741. // https://blog.michaelbrase.com/2018/05/08/emulating-x86-aes-intrinsics-on-armv8-a
  3742. // for more details.
  3743. inline __m128i _mm_aesenc_si128(__m128i a, __m128i b)
  3744. {
  3745. return vreinterpretq_m128i_u8(
  3746. vaesmcq_u8(
  3747. vaeseq_u8(vreinterpretq_u8_m128i(a), vdupq_n_u8(0))) ^
  3748. vreinterpretq_u8_m128i(b));
  3749. }
  3750. #endif
  3751. /* Streaming Extensions */
  3752. // Guarantees that every preceding store is globally visible before any
  3753. // subsequent store.
  3754. // https://msdn.microsoft.com/en-us/library/5h2w73d1%28v=vs.90%29.aspx
  3755. FORCE_INLINE void _mm_sfence(void)
  3756. {
  3757. __sync_synchronize();
  3758. }
  3759. // Stores the data in a to the address p without polluting the caches. If the
  3760. // cache line containing address p is already in the cache, the cache will be
  3761. // updated.Address p must be 16 - byte aligned.
  3762. // https://msdn.microsoft.com/en-us/library/ba08y07y%28v=vs.90%29.aspx
  3763. FORCE_INLINE void _mm_stream_si128(__m128i *p, __m128i a)
  3764. {
  3765. #if __has_builtin(__builtin_nontemporal_store)
  3766. __builtin_nontemporal_store(a, p);
  3767. #else
  3768. vst1q_s64((int64_t *)p, vreinterpretq_s64_m128i(a));
  3769. #endif
  3770. }
  3771. // Cache line containing p is flushed and invalidated from all caches in the
  3772. // coherency domain. :
  3773. // https://msdn.microsoft.com/en-us/library/ba08y07y(v=vs.100).aspx
  3774. FORCE_INLINE void _mm_clflush(void const *p)
  3775. {
  3776. (void)p;
  3777. // no corollary for Neon?
  3778. }
  3779. // Allocate aligned blocks of memory.
  3780. // https://software.intel.com/en-us/
  3781. // cpp-compiler-developer-guide-and-reference-allocating-and-freeing-aligned-memory-blocks
  3782. FORCE_INLINE void *_mm_malloc(size_t size, size_t align)
  3783. {
  3784. void *ptr;
  3785. if (align == 1)
  3786. return malloc(size);
  3787. if (align == 2 || (sizeof(void *) == 8 && align == 4))
  3788. align = sizeof(void *);
  3789. if (!posix_memalign(&ptr, align, size))
  3790. return ptr;
  3791. return NULL;
  3792. }
  3793. FORCE_INLINE void _mm_free(void *addr)
  3794. {
  3795. free(addr);
  3796. }
  3797. // Starting with the initial value in crc, accumulates a CRC32 value for
  3798. // unsigned 8-bit integer v.
  3799. // https://msdn.microsoft.com/en-us/library/bb514036(v=vs.100)
  3800. FORCE_INLINE uint32_t _mm_crc32_u8(uint32_t crc, uint8_t v)
  3801. {
  3802. #if defined(__aarch64__) && defined(__ARM_FEATURE_CRC32)
  3803. __asm__ __volatile__("crc32cb %w[c], %w[c], %w[v]\n\t"
  3804. : [c] "+r"(crc)
  3805. : [v] "r"(v));
  3806. #else
  3807. crc ^= v;
  3808. for (int bit = 0; bit < 8; bit++) {
  3809. if (crc & 1)
  3810. crc = (crc >> 1) ^ UINT32_C(0x82f63b78);
  3811. else
  3812. crc = (crc >> 1);
  3813. }
  3814. #endif
  3815. return crc;
  3816. }
  3817. // Starting with the initial value in crc, accumulates a CRC32 value for
  3818. // unsigned 16-bit integer v.
  3819. // https://msdn.microsoft.com/en-us/library/bb531411(v=vs.100)
  3820. FORCE_INLINE uint32_t _mm_crc32_u16(uint32_t crc, uint16_t v)
  3821. {
  3822. #if defined(__aarch64__) && defined(__ARM_FEATURE_CRC32)
  3823. __asm__ __volatile__("crc32ch %w[c], %w[c], %w[v]\n\t"
  3824. : [c] "+r"(crc)
  3825. : [v] "r"(v));
  3826. #else
  3827. crc = _mm_crc32_u8(crc, v & 0xff);
  3828. crc = _mm_crc32_u8(crc, (v >> 8) & 0xff);
  3829. #endif
  3830. return crc;
  3831. }
  3832. // Starting with the initial value in crc, accumulates a CRC32 value for
  3833. // unsigned 32-bit integer v.
  3834. // https://msdn.microsoft.com/en-us/library/bb531394(v=vs.100)
  3835. FORCE_INLINE uint32_t _mm_crc32_u32(uint32_t crc, uint32_t v)
  3836. {
  3837. #if defined(__aarch64__) && defined(__ARM_FEATURE_CRC32)
  3838. __asm__ __volatile__("crc32cw %w[c], %w[c], %w[v]\n\t"
  3839. : [c] "+r"(crc)
  3840. : [v] "r"(v));
  3841. #else
  3842. crc = _mm_crc32_u16(crc, v & 0xffff);
  3843. crc = _mm_crc32_u16(crc, (v >> 16) & 0xffff);
  3844. #endif
  3845. return crc;
  3846. }
  3847. // Starting with the initial value in crc, accumulates a CRC32 value for
  3848. // unsigned 64-bit integer v.
  3849. // https://msdn.microsoft.com/en-us/library/bb514033(v=vs.100)
  3850. FORCE_INLINE uint64_t _mm_crc32_u64(uint64_t crc, uint64_t v)
  3851. {
  3852. #if defined(__aarch64__) && defined(__ARM_FEATURE_CRC32)
  3853. __asm__ __volatile__("crc32cx %w[c], %w[c], %x[v]\n\t"
  3854. : [c] "+r"(crc)
  3855. : [v] "r"(v));
  3856. #else
  3857. crc = _mm_crc32_u32((uint32_t)(crc), v & 0xffffffff);
  3858. crc = _mm_crc32_u32((uint32_t)(crc), (v >> 32) & 0xffffffff);
  3859. #endif
  3860. return crc;
  3861. }
  3862. #if defined(__GNUC__) || defined(__clang__)
  3863. #pragma pop_macro("ALIGN_STRUCT")
  3864. #pragma pop_macro("FORCE_INLINE")
  3865. #endif
  3866. #endif