| 1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060106110621063106410651066106710681069107010711072107310741075107610771078107910801081108210831084108510861087108810891090109110921093109410951096109710981099110011011102110311041105110611071108110911101111111211131114111511161117111811191120112111221123112411251126112711281129113011311132113311341135113611371138113911401141114211431144114511461147114811491150115111521153115411551156115711581159116011611162116311641165116611671168116911701171117211731174117511761177117811791180118111821183118411851186118711881189119011911192119311941195119611971198119912001201120212031204120512061207120812091210121112121213121412151216121712181219122012211222122312241225122612271228122912301231123212331234123512361237123812391240124112421243124412451246124712481249125012511252125312541255125612571258125912601261126212631264126512661267126812691270127112721273127412751276127712781279128012811282128312841285128612871288128912901291129212931294129512961297129812991300130113021303130413051306130713081309131013111312131313141315131613171318131913201321132213231324132513261327132813291330133113321333133413351336133713381339134013411342134313441345 |
- /*
- * Based on public domain code available at: http://cr.yp.to/snuffle.html
- *
- * Modifications and C-native SSE macro based SSE implementation by
- * Adam Ierymenko <[email protected]>.
- *
- * Since the original was public domain, this is too.
- */
- #include "Constants.hpp"
- #include "Salsa20.hpp"
- #define ROTATE(v,c) (((v) << (c)) | ((v) >> (32 - (c))))
- #define XOR(v,w) ((v) ^ (w))
- #define PLUS(v,w) ((uint32_t)((v) + (w)))
- // Set up laod/store macros with appropriate endianness (we don't use these in SSE mode)
- #ifndef ZT_SALSA20_SSE
- #if __BYTE_ORDER == __LITTLE_ENDIAN
- #ifdef ZT_NO_TYPE_PUNNING
- // Slower version that does not use type punning
- #define U8TO32_LITTLE(p) ( ((uint32_t)(p)[0]) | ((uint32_t)(p)[1] << 8) | ((uint32_t)(p)[2] << 16) | ((uint32_t)(p)[3] << 24) )
- static inline void U32TO8_LITTLE(uint8_t *const c,const uint32_t v) { c[0] = (uint8_t)v; c[1] = (uint8_t)(v >> 8); c[2] = (uint8_t)(v >> 16); c[3] = (uint8_t)(v >> 24); }
- #else
- // Fast version that just does 32-bit load/store
- #define U8TO32_LITTLE(p) (*((const uint32_t *)((const void *)(p))))
- #define U32TO8_LITTLE(c,v) *((uint32_t *)((void *)(c))) = (v)
- #endif // ZT_NO_TYPE_PUNNING
- #else // __BYTE_ORDER == __BIG_ENDIAN (we don't support anything else... does MIDDLE_ENDIAN even still exist?)
- #ifdef __GNUC__
- // Use GNUC builtin bswap macros on big-endian machines if available
- #define U8TO32_LITTLE(p) __builtin_bswap32(*((const uint32_t *)((const void *)(p))))
- #define U32TO8_LITTLE(c,v) *((uint32_t *)((void *)(c))) = __builtin_bswap32((v))
- #else // no __GNUC__
- // Otherwise do it the slow, manual way on BE machines
- #define U8TO32_LITTLE(p) ( ((uint32_t)(p)[0]) | ((uint32_t)(p)[1] << 8) | ((uint32_t)(p)[2] << 16) | ((uint32_t)(p)[3] << 24) )
- static inline void U32TO8_LITTLE(uint8_t *const c,const uint32_t v) { c[0] = (uint8_t)v; c[1] = (uint8_t)(v >> 8); c[2] = (uint8_t)(v >> 16); c[3] = (uint8_t)(v >> 24); }
- #endif // __GNUC__ or not
- #endif // __BYTE_ORDER little or big?
- #endif // !ZT_SALSA20_SSE
- // Statically compute and define SSE constants
- #ifdef ZT_SALSA20_SSE
- class _s20sseconsts
- {
- public:
- _s20sseconsts()
- {
- maskLo32 = _mm_shuffle_epi32(_mm_cvtsi32_si128(-1), _MM_SHUFFLE(1, 0, 1, 0));
- maskHi32 = _mm_slli_epi64(maskLo32, 32);
- }
- __m128i maskLo32,maskHi32;
- };
- static const _s20sseconsts _S20SSECONSTANTS;
- #endif
- namespace ZeroTier {
- void Salsa20::init(const void *key,const void *iv)
- {
- #ifdef ZT_SALSA20_SSE
- const uint32_t *k = (const uint32_t *)key;
- _state.i[0] = 0x61707865;
- _state.i[3] = 0x6b206574;
- _state.i[13] = k[0];
- _state.i[10] = k[1];
- _state.i[7] = k[2];
- _state.i[4] = k[3];
- k += 4;
- _state.i[1] = 0x3320646e;
- _state.i[2] = 0x79622d32;
- _state.i[15] = k[0];
- _state.i[12] = k[1];
- _state.i[9] = k[2];
- _state.i[6] = k[3];
- _state.i[14] = ((const uint32_t *)iv)[0];
- _state.i[11] = ((const uint32_t *)iv)[1];
- _state.i[5] = 0;
- _state.i[8] = 0;
- #else
- const char *const constants = "expand 32-byte k";
- const uint8_t *k = (const uint8_t *)key;
- _state.i[1] = U8TO32_LITTLE(k + 0);
- _state.i[2] = U8TO32_LITTLE(k + 4);
- _state.i[3] = U8TO32_LITTLE(k + 8);
- _state.i[4] = U8TO32_LITTLE(k + 12);
- k += 16;
- _state.i[5] = U8TO32_LITTLE(constants + 4);
- _state.i[6] = U8TO32_LITTLE(((const uint8_t *)iv) + 0);
- _state.i[7] = U8TO32_LITTLE(((const uint8_t *)iv) + 4);
- _state.i[8] = 0;
- _state.i[9] = 0;
- _state.i[10] = U8TO32_LITTLE(constants + 8);
- _state.i[11] = U8TO32_LITTLE(k + 0);
- _state.i[12] = U8TO32_LITTLE(k + 4);
- _state.i[13] = U8TO32_LITTLE(k + 8);
- _state.i[14] = U8TO32_LITTLE(k + 12);
- _state.i[15] = U8TO32_LITTLE(constants + 12);
- _state.i[0] = U8TO32_LITTLE(constants + 0);
- #endif
- }
- void Salsa20::crypt12(const void *in,void *out,unsigned int bytes)
- {
- uint8_t tmp[64];
- const uint8_t *m = (const uint8_t *)in;
- uint8_t *c = (uint8_t *)out;
- uint8_t *ctarget = c;
- unsigned int i;
- #ifndef ZT_SALSA20_SSE
- uint32_t x0, x1, x2, x3, x4, x5, x6, x7, x8, x9, x10, x11, x12, x13, x14, x15;
- uint32_t j0, j1, j2, j3, j4, j5, j6, j7, j8, j9, j10, j11, j12, j13, j14, j15;
- #endif
- if (!bytes)
- return;
- #ifndef ZT_SALSA20_SSE
- j0 = _state.i[0];
- j1 = _state.i[1];
- j2 = _state.i[2];
- j3 = _state.i[3];
- j4 = _state.i[4];
- j5 = _state.i[5];
- j6 = _state.i[6];
- j7 = _state.i[7];
- j8 = _state.i[8];
- j9 = _state.i[9];
- j10 = _state.i[10];
- j11 = _state.i[11];
- j12 = _state.i[12];
- j13 = _state.i[13];
- j14 = _state.i[14];
- j15 = _state.i[15];
- #endif
- for (;;) {
- if (bytes < 64) {
- for (i = 0;i < bytes;++i)
- tmp[i] = m[i];
- m = tmp;
- ctarget = c;
- c = tmp;
- }
- #ifdef ZT_SALSA20_SSE
- __m128i X0 = _mm_loadu_si128((const __m128i *)&(_state.v[0]));
- __m128i X1 = _mm_loadu_si128((const __m128i *)&(_state.v[1]));
- __m128i X2 = _mm_loadu_si128((const __m128i *)&(_state.v[2]));
- __m128i X3 = _mm_loadu_si128((const __m128i *)&(_state.v[3]));
- __m128i T;
- __m128i X0s = X0;
- __m128i X1s = X1;
- __m128i X2s = X2;
- __m128i X3s = X3;
- // 2X round -------------------------------------------------------------
- T = _mm_add_epi32(X0, X3);
- X1 = _mm_xor_si128(_mm_xor_si128(X1, _mm_slli_epi32(T, 7)), _mm_srli_epi32(T, 25));
- T = _mm_add_epi32(X1, X0);
- X2 = _mm_xor_si128(_mm_xor_si128(X2, _mm_slli_epi32(T, 9)), _mm_srli_epi32(T, 23));
- T = _mm_add_epi32(X2, X1);
- X3 = _mm_xor_si128(_mm_xor_si128(X3, _mm_slli_epi32(T, 13)), _mm_srli_epi32(T, 19));
- T = _mm_add_epi32(X3, X2);
- X0 = _mm_xor_si128(_mm_xor_si128(X0, _mm_slli_epi32(T, 18)), _mm_srli_epi32(T, 14));
- X1 = _mm_shuffle_epi32(X1, 0x93);
- X2 = _mm_shuffle_epi32(X2, 0x4E);
- X3 = _mm_shuffle_epi32(X3, 0x39);
- T = _mm_add_epi32(X0, X1);
- X3 = _mm_xor_si128(_mm_xor_si128(X3, _mm_slli_epi32(T, 7)), _mm_srli_epi32(T, 25));
- T = _mm_add_epi32(X3, X0);
- X2 = _mm_xor_si128(_mm_xor_si128(X2, _mm_slli_epi32(T, 9)), _mm_srli_epi32(T, 23));
- T = _mm_add_epi32(X2, X3);
- X1 = _mm_xor_si128(_mm_xor_si128(X1, _mm_slli_epi32(T, 13)), _mm_srli_epi32(T, 19));
- T = _mm_add_epi32(X1, X2);
- X0 = _mm_xor_si128(_mm_xor_si128(X0, _mm_slli_epi32(T, 18)), _mm_srli_epi32(T, 14));
- X1 = _mm_shuffle_epi32(X1, 0x39);
- X2 = _mm_shuffle_epi32(X2, 0x4E);
- X3 = _mm_shuffle_epi32(X3, 0x93);
- // 2X round -------------------------------------------------------------
- T = _mm_add_epi32(X0, X3);
- X1 = _mm_xor_si128(_mm_xor_si128(X1, _mm_slli_epi32(T, 7)), _mm_srli_epi32(T, 25));
- T = _mm_add_epi32(X1, X0);
- X2 = _mm_xor_si128(_mm_xor_si128(X2, _mm_slli_epi32(T, 9)), _mm_srli_epi32(T, 23));
- T = _mm_add_epi32(X2, X1);
- X3 = _mm_xor_si128(_mm_xor_si128(X3, _mm_slli_epi32(T, 13)), _mm_srli_epi32(T, 19));
- T = _mm_add_epi32(X3, X2);
- X0 = _mm_xor_si128(_mm_xor_si128(X0, _mm_slli_epi32(T, 18)), _mm_srli_epi32(T, 14));
- X1 = _mm_shuffle_epi32(X1, 0x93);
- X2 = _mm_shuffle_epi32(X2, 0x4E);
- X3 = _mm_shuffle_epi32(X3, 0x39);
- T = _mm_add_epi32(X0, X1);
- X3 = _mm_xor_si128(_mm_xor_si128(X3, _mm_slli_epi32(T, 7)), _mm_srli_epi32(T, 25));
- T = _mm_add_epi32(X3, X0);
- X2 = _mm_xor_si128(_mm_xor_si128(X2, _mm_slli_epi32(T, 9)), _mm_srli_epi32(T, 23));
- T = _mm_add_epi32(X2, X3);
- X1 = _mm_xor_si128(_mm_xor_si128(X1, _mm_slli_epi32(T, 13)), _mm_srli_epi32(T, 19));
- T = _mm_add_epi32(X1, X2);
- X0 = _mm_xor_si128(_mm_xor_si128(X0, _mm_slli_epi32(T, 18)), _mm_srli_epi32(T, 14));
- X1 = _mm_shuffle_epi32(X1, 0x39);
- X2 = _mm_shuffle_epi32(X2, 0x4E);
- X3 = _mm_shuffle_epi32(X3, 0x93);
- // 2X round -------------------------------------------------------------
- T = _mm_add_epi32(X0, X3);
- X1 = _mm_xor_si128(_mm_xor_si128(X1, _mm_slli_epi32(T, 7)), _mm_srli_epi32(T, 25));
- T = _mm_add_epi32(X1, X0);
- X2 = _mm_xor_si128(_mm_xor_si128(X2, _mm_slli_epi32(T, 9)), _mm_srli_epi32(T, 23));
- T = _mm_add_epi32(X2, X1);
- X3 = _mm_xor_si128(_mm_xor_si128(X3, _mm_slli_epi32(T, 13)), _mm_srli_epi32(T, 19));
- T = _mm_add_epi32(X3, X2);
- X0 = _mm_xor_si128(_mm_xor_si128(X0, _mm_slli_epi32(T, 18)), _mm_srli_epi32(T, 14));
- X1 = _mm_shuffle_epi32(X1, 0x93);
- X2 = _mm_shuffle_epi32(X2, 0x4E);
- X3 = _mm_shuffle_epi32(X3, 0x39);
- T = _mm_add_epi32(X0, X1);
- X3 = _mm_xor_si128(_mm_xor_si128(X3, _mm_slli_epi32(T, 7)), _mm_srli_epi32(T, 25));
- T = _mm_add_epi32(X3, X0);
- X2 = _mm_xor_si128(_mm_xor_si128(X2, _mm_slli_epi32(T, 9)), _mm_srli_epi32(T, 23));
- T = _mm_add_epi32(X2, X3);
- X1 = _mm_xor_si128(_mm_xor_si128(X1, _mm_slli_epi32(T, 13)), _mm_srli_epi32(T, 19));
- T = _mm_add_epi32(X1, X2);
- X0 = _mm_xor_si128(_mm_xor_si128(X0, _mm_slli_epi32(T, 18)), _mm_srli_epi32(T, 14));
- X1 = _mm_shuffle_epi32(X1, 0x39);
- X2 = _mm_shuffle_epi32(X2, 0x4E);
- X3 = _mm_shuffle_epi32(X3, 0x93);
- // 2X round -------------------------------------------------------------
- T = _mm_add_epi32(X0, X3);
- X1 = _mm_xor_si128(_mm_xor_si128(X1, _mm_slli_epi32(T, 7)), _mm_srli_epi32(T, 25));
- T = _mm_add_epi32(X1, X0);
- X2 = _mm_xor_si128(_mm_xor_si128(X2, _mm_slli_epi32(T, 9)), _mm_srli_epi32(T, 23));
- T = _mm_add_epi32(X2, X1);
- X3 = _mm_xor_si128(_mm_xor_si128(X3, _mm_slli_epi32(T, 13)), _mm_srli_epi32(T, 19));
- T = _mm_add_epi32(X3, X2);
- X0 = _mm_xor_si128(_mm_xor_si128(X0, _mm_slli_epi32(T, 18)), _mm_srli_epi32(T, 14));
- X1 = _mm_shuffle_epi32(X1, 0x93);
- X2 = _mm_shuffle_epi32(X2, 0x4E);
- X3 = _mm_shuffle_epi32(X3, 0x39);
- T = _mm_add_epi32(X0, X1);
- X3 = _mm_xor_si128(_mm_xor_si128(X3, _mm_slli_epi32(T, 7)), _mm_srli_epi32(T, 25));
- T = _mm_add_epi32(X3, X0);
- X2 = _mm_xor_si128(_mm_xor_si128(X2, _mm_slli_epi32(T, 9)), _mm_srli_epi32(T, 23));
- T = _mm_add_epi32(X2, X3);
- X1 = _mm_xor_si128(_mm_xor_si128(X1, _mm_slli_epi32(T, 13)), _mm_srli_epi32(T, 19));
- T = _mm_add_epi32(X1, X2);
- X0 = _mm_xor_si128(_mm_xor_si128(X0, _mm_slli_epi32(T, 18)), _mm_srli_epi32(T, 14));
- X1 = _mm_shuffle_epi32(X1, 0x39);
- X2 = _mm_shuffle_epi32(X2, 0x4E);
- X3 = _mm_shuffle_epi32(X3, 0x93);
- // 2X round -------------------------------------------------------------
- T = _mm_add_epi32(X0, X3);
- X1 = _mm_xor_si128(_mm_xor_si128(X1, _mm_slli_epi32(T, 7)), _mm_srli_epi32(T, 25));
- T = _mm_add_epi32(X1, X0);
- X2 = _mm_xor_si128(_mm_xor_si128(X2, _mm_slli_epi32(T, 9)), _mm_srli_epi32(T, 23));
- T = _mm_add_epi32(X2, X1);
- X3 = _mm_xor_si128(_mm_xor_si128(X3, _mm_slli_epi32(T, 13)), _mm_srli_epi32(T, 19));
- T = _mm_add_epi32(X3, X2);
- X0 = _mm_xor_si128(_mm_xor_si128(X0, _mm_slli_epi32(T, 18)), _mm_srli_epi32(T, 14));
- X1 = _mm_shuffle_epi32(X1, 0x93);
- X2 = _mm_shuffle_epi32(X2, 0x4E);
- X3 = _mm_shuffle_epi32(X3, 0x39);
- T = _mm_add_epi32(X0, X1);
- X3 = _mm_xor_si128(_mm_xor_si128(X3, _mm_slli_epi32(T, 7)), _mm_srli_epi32(T, 25));
- T = _mm_add_epi32(X3, X0);
- X2 = _mm_xor_si128(_mm_xor_si128(X2, _mm_slli_epi32(T, 9)), _mm_srli_epi32(T, 23));
- T = _mm_add_epi32(X2, X3);
- X1 = _mm_xor_si128(_mm_xor_si128(X1, _mm_slli_epi32(T, 13)), _mm_srli_epi32(T, 19));
- T = _mm_add_epi32(X1, X2);
- X0 = _mm_xor_si128(_mm_xor_si128(X0, _mm_slli_epi32(T, 18)), _mm_srli_epi32(T, 14));
- X1 = _mm_shuffle_epi32(X1, 0x39);
- X2 = _mm_shuffle_epi32(X2, 0x4E);
- X3 = _mm_shuffle_epi32(X3, 0x93);
- // 2X round -------------------------------------------------------------
- T = _mm_add_epi32(X0, X3);
- X1 = _mm_xor_si128(_mm_xor_si128(X1, _mm_slli_epi32(T, 7)), _mm_srli_epi32(T, 25));
- T = _mm_add_epi32(X1, X0);
- X2 = _mm_xor_si128(_mm_xor_si128(X2, _mm_slli_epi32(T, 9)), _mm_srli_epi32(T, 23));
- T = _mm_add_epi32(X2, X1);
- X3 = _mm_xor_si128(_mm_xor_si128(X3, _mm_slli_epi32(T, 13)), _mm_srli_epi32(T, 19));
- T = _mm_add_epi32(X3, X2);
- X0 = _mm_xor_si128(_mm_xor_si128(X0, _mm_slli_epi32(T, 18)), _mm_srli_epi32(T, 14));
- X1 = _mm_shuffle_epi32(X1, 0x93);
- X2 = _mm_shuffle_epi32(X2, 0x4E);
- X3 = _mm_shuffle_epi32(X3, 0x39);
- T = _mm_add_epi32(X0, X1);
- X3 = _mm_xor_si128(_mm_xor_si128(X3, _mm_slli_epi32(T, 7)), _mm_srli_epi32(T, 25));
- T = _mm_add_epi32(X3, X0);
- X2 = _mm_xor_si128(_mm_xor_si128(X2, _mm_slli_epi32(T, 9)), _mm_srli_epi32(T, 23));
- T = _mm_add_epi32(X2, X3);
- X1 = _mm_xor_si128(_mm_xor_si128(X1, _mm_slli_epi32(T, 13)), _mm_srli_epi32(T, 19));
- T = _mm_add_epi32(X1, X2);
- X0 = _mm_xor_si128(_mm_xor_si128(X0, _mm_slli_epi32(T, 18)), _mm_srli_epi32(T, 14));
- X1 = _mm_shuffle_epi32(X1, 0x39);
- X2 = _mm_shuffle_epi32(X2, 0x4E);
- X3 = _mm_shuffle_epi32(X3, 0x93);
- X0 = _mm_add_epi32(X0s,X0);
- X1 = _mm_add_epi32(X1s,X1);
- X2 = _mm_add_epi32(X2s,X2);
- X3 = _mm_add_epi32(X3s,X3);
- __m128i k02 = _mm_shuffle_epi32(_mm_or_si128(_mm_slli_epi64(X0, 32), _mm_srli_epi64(X3, 32)), _MM_SHUFFLE(0, 1, 2, 3));
- __m128i k13 = _mm_shuffle_epi32(_mm_or_si128(_mm_slli_epi64(X1, 32), _mm_srli_epi64(X0, 32)), _MM_SHUFFLE(0, 1, 2, 3));
- __m128i k20 = _mm_or_si128(_mm_and_si128(X2, _S20SSECONSTANTS.maskLo32), _mm_and_si128(X1, _S20SSECONSTANTS.maskHi32));
- __m128i k31 = _mm_or_si128(_mm_and_si128(X3, _S20SSECONSTANTS.maskLo32), _mm_and_si128(X2, _S20SSECONSTANTS.maskHi32));
- _mm_storeu_ps(reinterpret_cast<float *>(c),_mm_castsi128_ps(_mm_xor_si128(_mm_unpackhi_epi64(k02,k20),_mm_castps_si128(_mm_loadu_ps(reinterpret_cast<const float *>(m))))));
- _mm_storeu_ps(reinterpret_cast<float *>(c) + 4,_mm_castsi128_ps(_mm_xor_si128(_mm_unpackhi_epi64(k13,k31),_mm_castps_si128(_mm_loadu_ps(reinterpret_cast<const float *>(m) + 4)))));
- _mm_storeu_ps(reinterpret_cast<float *>(c) + 8,_mm_castsi128_ps(_mm_xor_si128(_mm_unpacklo_epi64(k20,k02),_mm_castps_si128(_mm_loadu_ps(reinterpret_cast<const float *>(m) + 8)))));
- _mm_storeu_ps(reinterpret_cast<float *>(c) + 12,_mm_castsi128_ps(_mm_xor_si128(_mm_unpacklo_epi64(k31,k13),_mm_castps_si128(_mm_loadu_ps(reinterpret_cast<const float *>(m) + 12)))));
- if (!(++_state.i[8])) {
- ++_state.i[5]; // state reordered for SSE
- /* stopping at 2^70 bytes per nonce is user's responsibility */
- }
- #else
- x0 = j0;
- x1 = j1;
- x2 = j2;
- x3 = j3;
- x4 = j4;
- x5 = j5;
- x6 = j6;
- x7 = j7;
- x8 = j8;
- x9 = j9;
- x10 = j10;
- x11 = j11;
- x12 = j12;
- x13 = j13;
- x14 = j14;
- x15 = j15;
- // 2X round -------------------------------------------------------------
- x4 = XOR( x4,ROTATE(PLUS( x0,x12), 7));
- x8 = XOR( x8,ROTATE(PLUS( x4, x0), 9));
- x12 = XOR(x12,ROTATE(PLUS( x8, x4),13));
- x0 = XOR( x0,ROTATE(PLUS(x12, x8),18));
- x9 = XOR( x9,ROTATE(PLUS( x5, x1), 7));
- x13 = XOR(x13,ROTATE(PLUS( x9, x5), 9));
- x1 = XOR( x1,ROTATE(PLUS(x13, x9),13));
- x5 = XOR( x5,ROTATE(PLUS( x1,x13),18));
- x14 = XOR(x14,ROTATE(PLUS(x10, x6), 7));
- x2 = XOR( x2,ROTATE(PLUS(x14,x10), 9));
- x6 = XOR( x6,ROTATE(PLUS( x2,x14),13));
- x10 = XOR(x10,ROTATE(PLUS( x6, x2),18));
- x3 = XOR( x3,ROTATE(PLUS(x15,x11), 7));
- x7 = XOR( x7,ROTATE(PLUS( x3,x15), 9));
- x11 = XOR(x11,ROTATE(PLUS( x7, x3),13));
- x15 = XOR(x15,ROTATE(PLUS(x11, x7),18));
- x1 = XOR( x1,ROTATE(PLUS( x0, x3), 7));
- x2 = XOR( x2,ROTATE(PLUS( x1, x0), 9));
- x3 = XOR( x3,ROTATE(PLUS( x2, x1),13));
- x0 = XOR( x0,ROTATE(PLUS( x3, x2),18));
- x6 = XOR( x6,ROTATE(PLUS( x5, x4), 7));
- x7 = XOR( x7,ROTATE(PLUS( x6, x5), 9));
- x4 = XOR( x4,ROTATE(PLUS( x7, x6),13));
- x5 = XOR( x5,ROTATE(PLUS( x4, x7),18));
- x11 = XOR(x11,ROTATE(PLUS(x10, x9), 7));
- x8 = XOR( x8,ROTATE(PLUS(x11,x10), 9));
- x9 = XOR( x9,ROTATE(PLUS( x8,x11),13));
- x10 = XOR(x10,ROTATE(PLUS( x9, x8),18));
- x12 = XOR(x12,ROTATE(PLUS(x15,x14), 7));
- x13 = XOR(x13,ROTATE(PLUS(x12,x15), 9));
- x14 = XOR(x14,ROTATE(PLUS(x13,x12),13));
- x15 = XOR(x15,ROTATE(PLUS(x14,x13),18));
- // 2X round -------------------------------------------------------------
- x4 = XOR( x4,ROTATE(PLUS( x0,x12), 7));
- x8 = XOR( x8,ROTATE(PLUS( x4, x0), 9));
- x12 = XOR(x12,ROTATE(PLUS( x8, x4),13));
- x0 = XOR( x0,ROTATE(PLUS(x12, x8),18));
- x9 = XOR( x9,ROTATE(PLUS( x5, x1), 7));
- x13 = XOR(x13,ROTATE(PLUS( x9, x5), 9));
- x1 = XOR( x1,ROTATE(PLUS(x13, x9),13));
- x5 = XOR( x5,ROTATE(PLUS( x1,x13),18));
- x14 = XOR(x14,ROTATE(PLUS(x10, x6), 7));
- x2 = XOR( x2,ROTATE(PLUS(x14,x10), 9));
- x6 = XOR( x6,ROTATE(PLUS( x2,x14),13));
- x10 = XOR(x10,ROTATE(PLUS( x6, x2),18));
- x3 = XOR( x3,ROTATE(PLUS(x15,x11), 7));
- x7 = XOR( x7,ROTATE(PLUS( x3,x15), 9));
- x11 = XOR(x11,ROTATE(PLUS( x7, x3),13));
- x15 = XOR(x15,ROTATE(PLUS(x11, x7),18));
- x1 = XOR( x1,ROTATE(PLUS( x0, x3), 7));
- x2 = XOR( x2,ROTATE(PLUS( x1, x0), 9));
- x3 = XOR( x3,ROTATE(PLUS( x2, x1),13));
- x0 = XOR( x0,ROTATE(PLUS( x3, x2),18));
- x6 = XOR( x6,ROTATE(PLUS( x5, x4), 7));
- x7 = XOR( x7,ROTATE(PLUS( x6, x5), 9));
- x4 = XOR( x4,ROTATE(PLUS( x7, x6),13));
- x5 = XOR( x5,ROTATE(PLUS( x4, x7),18));
- x11 = XOR(x11,ROTATE(PLUS(x10, x9), 7));
- x8 = XOR( x8,ROTATE(PLUS(x11,x10), 9));
- x9 = XOR( x9,ROTATE(PLUS( x8,x11),13));
- x10 = XOR(x10,ROTATE(PLUS( x9, x8),18));
- x12 = XOR(x12,ROTATE(PLUS(x15,x14), 7));
- x13 = XOR(x13,ROTATE(PLUS(x12,x15), 9));
- x14 = XOR(x14,ROTATE(PLUS(x13,x12),13));
- x15 = XOR(x15,ROTATE(PLUS(x14,x13),18));
- // 2X round -------------------------------------------------------------
- x4 = XOR( x4,ROTATE(PLUS( x0,x12), 7));
- x8 = XOR( x8,ROTATE(PLUS( x4, x0), 9));
- x12 = XOR(x12,ROTATE(PLUS( x8, x4),13));
- x0 = XOR( x0,ROTATE(PLUS(x12, x8),18));
- x9 = XOR( x9,ROTATE(PLUS( x5, x1), 7));
- x13 = XOR(x13,ROTATE(PLUS( x9, x5), 9));
- x1 = XOR( x1,ROTATE(PLUS(x13, x9),13));
- x5 = XOR( x5,ROTATE(PLUS( x1,x13),18));
- x14 = XOR(x14,ROTATE(PLUS(x10, x6), 7));
- x2 = XOR( x2,ROTATE(PLUS(x14,x10), 9));
- x6 = XOR( x6,ROTATE(PLUS( x2,x14),13));
- x10 = XOR(x10,ROTATE(PLUS( x6, x2),18));
- x3 = XOR( x3,ROTATE(PLUS(x15,x11), 7));
- x7 = XOR( x7,ROTATE(PLUS( x3,x15), 9));
- x11 = XOR(x11,ROTATE(PLUS( x7, x3),13));
- x15 = XOR(x15,ROTATE(PLUS(x11, x7),18));
- x1 = XOR( x1,ROTATE(PLUS( x0, x3), 7));
- x2 = XOR( x2,ROTATE(PLUS( x1, x0), 9));
- x3 = XOR( x3,ROTATE(PLUS( x2, x1),13));
- x0 = XOR( x0,ROTATE(PLUS( x3, x2),18));
- x6 = XOR( x6,ROTATE(PLUS( x5, x4), 7));
- x7 = XOR( x7,ROTATE(PLUS( x6, x5), 9));
- x4 = XOR( x4,ROTATE(PLUS( x7, x6),13));
- x5 = XOR( x5,ROTATE(PLUS( x4, x7),18));
- x11 = XOR(x11,ROTATE(PLUS(x10, x9), 7));
- x8 = XOR( x8,ROTATE(PLUS(x11,x10), 9));
- x9 = XOR( x9,ROTATE(PLUS( x8,x11),13));
- x10 = XOR(x10,ROTATE(PLUS( x9, x8),18));
- x12 = XOR(x12,ROTATE(PLUS(x15,x14), 7));
- x13 = XOR(x13,ROTATE(PLUS(x12,x15), 9));
- x14 = XOR(x14,ROTATE(PLUS(x13,x12),13));
- x15 = XOR(x15,ROTATE(PLUS(x14,x13),18));
- // 2X round -------------------------------------------------------------
- x4 = XOR( x4,ROTATE(PLUS( x0,x12), 7));
- x8 = XOR( x8,ROTATE(PLUS( x4, x0), 9));
- x12 = XOR(x12,ROTATE(PLUS( x8, x4),13));
- x0 = XOR( x0,ROTATE(PLUS(x12, x8),18));
- x9 = XOR( x9,ROTATE(PLUS( x5, x1), 7));
- x13 = XOR(x13,ROTATE(PLUS( x9, x5), 9));
- x1 = XOR( x1,ROTATE(PLUS(x13, x9),13));
- x5 = XOR( x5,ROTATE(PLUS( x1,x13),18));
- x14 = XOR(x14,ROTATE(PLUS(x10, x6), 7));
- x2 = XOR( x2,ROTATE(PLUS(x14,x10), 9));
- x6 = XOR( x6,ROTATE(PLUS( x2,x14),13));
- x10 = XOR(x10,ROTATE(PLUS( x6, x2),18));
- x3 = XOR( x3,ROTATE(PLUS(x15,x11), 7));
- x7 = XOR( x7,ROTATE(PLUS( x3,x15), 9));
- x11 = XOR(x11,ROTATE(PLUS( x7, x3),13));
- x15 = XOR(x15,ROTATE(PLUS(x11, x7),18));
- x1 = XOR( x1,ROTATE(PLUS( x0, x3), 7));
- x2 = XOR( x2,ROTATE(PLUS( x1, x0), 9));
- x3 = XOR( x3,ROTATE(PLUS( x2, x1),13));
- x0 = XOR( x0,ROTATE(PLUS( x3, x2),18));
- x6 = XOR( x6,ROTATE(PLUS( x5, x4), 7));
- x7 = XOR( x7,ROTATE(PLUS( x6, x5), 9));
- x4 = XOR( x4,ROTATE(PLUS( x7, x6),13));
- x5 = XOR( x5,ROTATE(PLUS( x4, x7),18));
- x11 = XOR(x11,ROTATE(PLUS(x10, x9), 7));
- x8 = XOR( x8,ROTATE(PLUS(x11,x10), 9));
- x9 = XOR( x9,ROTATE(PLUS( x8,x11),13));
- x10 = XOR(x10,ROTATE(PLUS( x9, x8),18));
- x12 = XOR(x12,ROTATE(PLUS(x15,x14), 7));
- x13 = XOR(x13,ROTATE(PLUS(x12,x15), 9));
- x14 = XOR(x14,ROTATE(PLUS(x13,x12),13));
- x15 = XOR(x15,ROTATE(PLUS(x14,x13),18));
- // 2X round -------------------------------------------------------------
- x4 = XOR( x4,ROTATE(PLUS( x0,x12), 7));
- x8 = XOR( x8,ROTATE(PLUS( x4, x0), 9));
- x12 = XOR(x12,ROTATE(PLUS( x8, x4),13));
- x0 = XOR( x0,ROTATE(PLUS(x12, x8),18));
- x9 = XOR( x9,ROTATE(PLUS( x5, x1), 7));
- x13 = XOR(x13,ROTATE(PLUS( x9, x5), 9));
- x1 = XOR( x1,ROTATE(PLUS(x13, x9),13));
- x5 = XOR( x5,ROTATE(PLUS( x1,x13),18));
- x14 = XOR(x14,ROTATE(PLUS(x10, x6), 7));
- x2 = XOR( x2,ROTATE(PLUS(x14,x10), 9));
- x6 = XOR( x6,ROTATE(PLUS( x2,x14),13));
- x10 = XOR(x10,ROTATE(PLUS( x6, x2),18));
- x3 = XOR( x3,ROTATE(PLUS(x15,x11), 7));
- x7 = XOR( x7,ROTATE(PLUS( x3,x15), 9));
- x11 = XOR(x11,ROTATE(PLUS( x7, x3),13));
- x15 = XOR(x15,ROTATE(PLUS(x11, x7),18));
- x1 = XOR( x1,ROTATE(PLUS( x0, x3), 7));
- x2 = XOR( x2,ROTATE(PLUS( x1, x0), 9));
- x3 = XOR( x3,ROTATE(PLUS( x2, x1),13));
- x0 = XOR( x0,ROTATE(PLUS( x3, x2),18));
- x6 = XOR( x6,ROTATE(PLUS( x5, x4), 7));
- x7 = XOR( x7,ROTATE(PLUS( x6, x5), 9));
- x4 = XOR( x4,ROTATE(PLUS( x7, x6),13));
- x5 = XOR( x5,ROTATE(PLUS( x4, x7),18));
- x11 = XOR(x11,ROTATE(PLUS(x10, x9), 7));
- x8 = XOR( x8,ROTATE(PLUS(x11,x10), 9));
- x9 = XOR( x9,ROTATE(PLUS( x8,x11),13));
- x10 = XOR(x10,ROTATE(PLUS( x9, x8),18));
- x12 = XOR(x12,ROTATE(PLUS(x15,x14), 7));
- x13 = XOR(x13,ROTATE(PLUS(x12,x15), 9));
- x14 = XOR(x14,ROTATE(PLUS(x13,x12),13));
- x15 = XOR(x15,ROTATE(PLUS(x14,x13),18));
- // 2X round -------------------------------------------------------------
- x4 = XOR( x4,ROTATE(PLUS( x0,x12), 7));
- x8 = XOR( x8,ROTATE(PLUS( x4, x0), 9));
- x12 = XOR(x12,ROTATE(PLUS( x8, x4),13));
- x0 = XOR( x0,ROTATE(PLUS(x12, x8),18));
- x9 = XOR( x9,ROTATE(PLUS( x5, x1), 7));
- x13 = XOR(x13,ROTATE(PLUS( x9, x5), 9));
- x1 = XOR( x1,ROTATE(PLUS(x13, x9),13));
- x5 = XOR( x5,ROTATE(PLUS( x1,x13),18));
- x14 = XOR(x14,ROTATE(PLUS(x10, x6), 7));
- x2 = XOR( x2,ROTATE(PLUS(x14,x10), 9));
- x6 = XOR( x6,ROTATE(PLUS( x2,x14),13));
- x10 = XOR(x10,ROTATE(PLUS( x6, x2),18));
- x3 = XOR( x3,ROTATE(PLUS(x15,x11), 7));
- x7 = XOR( x7,ROTATE(PLUS( x3,x15), 9));
- x11 = XOR(x11,ROTATE(PLUS( x7, x3),13));
- x15 = XOR(x15,ROTATE(PLUS(x11, x7),18));
- x1 = XOR( x1,ROTATE(PLUS( x0, x3), 7));
- x2 = XOR( x2,ROTATE(PLUS( x1, x0), 9));
- x3 = XOR( x3,ROTATE(PLUS( x2, x1),13));
- x0 = XOR( x0,ROTATE(PLUS( x3, x2),18));
- x6 = XOR( x6,ROTATE(PLUS( x5, x4), 7));
- x7 = XOR( x7,ROTATE(PLUS( x6, x5), 9));
- x4 = XOR( x4,ROTATE(PLUS( x7, x6),13));
- x5 = XOR( x5,ROTATE(PLUS( x4, x7),18));
- x11 = XOR(x11,ROTATE(PLUS(x10, x9), 7));
- x8 = XOR( x8,ROTATE(PLUS(x11,x10), 9));
- x9 = XOR( x9,ROTATE(PLUS( x8,x11),13));
- x10 = XOR(x10,ROTATE(PLUS( x9, x8),18));
- x12 = XOR(x12,ROTATE(PLUS(x15,x14), 7));
- x13 = XOR(x13,ROTATE(PLUS(x12,x15), 9));
- x14 = XOR(x14,ROTATE(PLUS(x13,x12),13));
- x15 = XOR(x15,ROTATE(PLUS(x14,x13),18));
- x0 = PLUS(x0,j0);
- x1 = PLUS(x1,j1);
- x2 = PLUS(x2,j2);
- x3 = PLUS(x3,j3);
- x4 = PLUS(x4,j4);
- x5 = PLUS(x5,j5);
- x6 = PLUS(x6,j6);
- x7 = PLUS(x7,j7);
- x8 = PLUS(x8,j8);
- x9 = PLUS(x9,j9);
- x10 = PLUS(x10,j10);
- x11 = PLUS(x11,j11);
- x12 = PLUS(x12,j12);
- x13 = PLUS(x13,j13);
- x14 = PLUS(x14,j14);
- x15 = PLUS(x15,j15);
- U32TO8_LITTLE(c + 0,XOR(x0,U8TO32_LITTLE(m + 0)));
- U32TO8_LITTLE(c + 4,XOR(x1,U8TO32_LITTLE(m + 4)));
- U32TO8_LITTLE(c + 8,XOR(x2,U8TO32_LITTLE(m + 8)));
- U32TO8_LITTLE(c + 12,XOR(x3,U8TO32_LITTLE(m + 12)));
- U32TO8_LITTLE(c + 16,XOR(x4,U8TO32_LITTLE(m + 16)));
- U32TO8_LITTLE(c + 20,XOR(x5,U8TO32_LITTLE(m + 20)));
- U32TO8_LITTLE(c + 24,XOR(x6,U8TO32_LITTLE(m + 24)));
- U32TO8_LITTLE(c + 28,XOR(x7,U8TO32_LITTLE(m + 28)));
- U32TO8_LITTLE(c + 32,XOR(x8,U8TO32_LITTLE(m + 32)));
- U32TO8_LITTLE(c + 36,XOR(x9,U8TO32_LITTLE(m + 36)));
- U32TO8_LITTLE(c + 40,XOR(x10,U8TO32_LITTLE(m + 40)));
- U32TO8_LITTLE(c + 44,XOR(x11,U8TO32_LITTLE(m + 44)));
- U32TO8_LITTLE(c + 48,XOR(x12,U8TO32_LITTLE(m + 48)));
- U32TO8_LITTLE(c + 52,XOR(x13,U8TO32_LITTLE(m + 52)));
- U32TO8_LITTLE(c + 56,XOR(x14,U8TO32_LITTLE(m + 56)));
- U32TO8_LITTLE(c + 60,XOR(x15,U8TO32_LITTLE(m + 60)));
- if (!(++j8)) {
- ++j9;
- /* stopping at 2^70 bytes per nonce is user's responsibility */
- }
- #endif
- if (bytes <= 64) {
- if (bytes < 64) {
- for (i = 0;i < bytes;++i)
- ctarget[i] = c[i];
- }
- #ifndef ZT_SALSA20_SSE
- _state.i[8] = j8;
- _state.i[9] = j9;
- #endif
- return;
- }
- bytes -= 64;
- c += 64;
- m += 64;
- }
- }
- void Salsa20::crypt20(const void *in,void *out,unsigned int bytes)
- {
- uint8_t tmp[64];
- const uint8_t *m = (const uint8_t *)in;
- uint8_t *c = (uint8_t *)out;
- uint8_t *ctarget = c;
- unsigned int i;
- #ifndef ZT_SALSA20_SSE
- uint32_t x0, x1, x2, x3, x4, x5, x6, x7, x8, x9, x10, x11, x12, x13, x14, x15;
- uint32_t j0, j1, j2, j3, j4, j5, j6, j7, j8, j9, j10, j11, j12, j13, j14, j15;
- #endif
- if (!bytes)
- return;
- #ifndef ZT_SALSA20_SSE
- j0 = _state.i[0];
- j1 = _state.i[1];
- j2 = _state.i[2];
- j3 = _state.i[3];
- j4 = _state.i[4];
- j5 = _state.i[5];
- j6 = _state.i[6];
- j7 = _state.i[7];
- j8 = _state.i[8];
- j9 = _state.i[9];
- j10 = _state.i[10];
- j11 = _state.i[11];
- j12 = _state.i[12];
- j13 = _state.i[13];
- j14 = _state.i[14];
- j15 = _state.i[15];
- #endif
- for (;;) {
- if (bytes < 64) {
- for (i = 0;i < bytes;++i)
- tmp[i] = m[i];
- m = tmp;
- ctarget = c;
- c = tmp;
- }
- #ifdef ZT_SALSA20_SSE
- __m128i X0 = _mm_loadu_si128((const __m128i *)&(_state.v[0]));
- __m128i X1 = _mm_loadu_si128((const __m128i *)&(_state.v[1]));
- __m128i X2 = _mm_loadu_si128((const __m128i *)&(_state.v[2]));
- __m128i X3 = _mm_loadu_si128((const __m128i *)&(_state.v[3]));
- __m128i T;
- __m128i X0s = X0;
- __m128i X1s = X1;
- __m128i X2s = X2;
- __m128i X3s = X3;
- // 2X round -------------------------------------------------------------
- T = _mm_add_epi32(X0, X3);
- X1 = _mm_xor_si128(_mm_xor_si128(X1, _mm_slli_epi32(T, 7)), _mm_srli_epi32(T, 25));
- T = _mm_add_epi32(X1, X0);
- X2 = _mm_xor_si128(_mm_xor_si128(X2, _mm_slli_epi32(T, 9)), _mm_srli_epi32(T, 23));
- T = _mm_add_epi32(X2, X1);
- X3 = _mm_xor_si128(_mm_xor_si128(X3, _mm_slli_epi32(T, 13)), _mm_srli_epi32(T, 19));
- T = _mm_add_epi32(X3, X2);
- X0 = _mm_xor_si128(_mm_xor_si128(X0, _mm_slli_epi32(T, 18)), _mm_srli_epi32(T, 14));
- X1 = _mm_shuffle_epi32(X1, 0x93);
- X2 = _mm_shuffle_epi32(X2, 0x4E);
- X3 = _mm_shuffle_epi32(X3, 0x39);
- T = _mm_add_epi32(X0, X1);
- X3 = _mm_xor_si128(_mm_xor_si128(X3, _mm_slli_epi32(T, 7)), _mm_srli_epi32(T, 25));
- T = _mm_add_epi32(X3, X0);
- X2 = _mm_xor_si128(_mm_xor_si128(X2, _mm_slli_epi32(T, 9)), _mm_srli_epi32(T, 23));
- T = _mm_add_epi32(X2, X3);
- X1 = _mm_xor_si128(_mm_xor_si128(X1, _mm_slli_epi32(T, 13)), _mm_srli_epi32(T, 19));
- T = _mm_add_epi32(X1, X2);
- X0 = _mm_xor_si128(_mm_xor_si128(X0, _mm_slli_epi32(T, 18)), _mm_srli_epi32(T, 14));
- X1 = _mm_shuffle_epi32(X1, 0x39);
- X2 = _mm_shuffle_epi32(X2, 0x4E);
- X3 = _mm_shuffle_epi32(X3, 0x93);
- // 2X round -------------------------------------------------------------
- T = _mm_add_epi32(X0, X3);
- X1 = _mm_xor_si128(_mm_xor_si128(X1, _mm_slli_epi32(T, 7)), _mm_srli_epi32(T, 25));
- T = _mm_add_epi32(X1, X0);
- X2 = _mm_xor_si128(_mm_xor_si128(X2, _mm_slli_epi32(T, 9)), _mm_srli_epi32(T, 23));
- T = _mm_add_epi32(X2, X1);
- X3 = _mm_xor_si128(_mm_xor_si128(X3, _mm_slli_epi32(T, 13)), _mm_srli_epi32(T, 19));
- T = _mm_add_epi32(X3, X2);
- X0 = _mm_xor_si128(_mm_xor_si128(X0, _mm_slli_epi32(T, 18)), _mm_srli_epi32(T, 14));
- X1 = _mm_shuffle_epi32(X1, 0x93);
- X2 = _mm_shuffle_epi32(X2, 0x4E);
- X3 = _mm_shuffle_epi32(X3, 0x39);
- T = _mm_add_epi32(X0, X1);
- X3 = _mm_xor_si128(_mm_xor_si128(X3, _mm_slli_epi32(T, 7)), _mm_srli_epi32(T, 25));
- T = _mm_add_epi32(X3, X0);
- X2 = _mm_xor_si128(_mm_xor_si128(X2, _mm_slli_epi32(T, 9)), _mm_srli_epi32(T, 23));
- T = _mm_add_epi32(X2, X3);
- X1 = _mm_xor_si128(_mm_xor_si128(X1, _mm_slli_epi32(T, 13)), _mm_srli_epi32(T, 19));
- T = _mm_add_epi32(X1, X2);
- X0 = _mm_xor_si128(_mm_xor_si128(X0, _mm_slli_epi32(T, 18)), _mm_srli_epi32(T, 14));
- X1 = _mm_shuffle_epi32(X1, 0x39);
- X2 = _mm_shuffle_epi32(X2, 0x4E);
- X3 = _mm_shuffle_epi32(X3, 0x93);
- // 2X round -------------------------------------------------------------
- T = _mm_add_epi32(X0, X3);
- X1 = _mm_xor_si128(_mm_xor_si128(X1, _mm_slli_epi32(T, 7)), _mm_srli_epi32(T, 25));
- T = _mm_add_epi32(X1, X0);
- X2 = _mm_xor_si128(_mm_xor_si128(X2, _mm_slli_epi32(T, 9)), _mm_srli_epi32(T, 23));
- T = _mm_add_epi32(X2, X1);
- X3 = _mm_xor_si128(_mm_xor_si128(X3, _mm_slli_epi32(T, 13)), _mm_srli_epi32(T, 19));
- T = _mm_add_epi32(X3, X2);
- X0 = _mm_xor_si128(_mm_xor_si128(X0, _mm_slli_epi32(T, 18)), _mm_srli_epi32(T, 14));
- X1 = _mm_shuffle_epi32(X1, 0x93);
- X2 = _mm_shuffle_epi32(X2, 0x4E);
- X3 = _mm_shuffle_epi32(X3, 0x39);
- T = _mm_add_epi32(X0, X1);
- X3 = _mm_xor_si128(_mm_xor_si128(X3, _mm_slli_epi32(T, 7)), _mm_srli_epi32(T, 25));
- T = _mm_add_epi32(X3, X0);
- X2 = _mm_xor_si128(_mm_xor_si128(X2, _mm_slli_epi32(T, 9)), _mm_srli_epi32(T, 23));
- T = _mm_add_epi32(X2, X3);
- X1 = _mm_xor_si128(_mm_xor_si128(X1, _mm_slli_epi32(T, 13)), _mm_srli_epi32(T, 19));
- T = _mm_add_epi32(X1, X2);
- X0 = _mm_xor_si128(_mm_xor_si128(X0, _mm_slli_epi32(T, 18)), _mm_srli_epi32(T, 14));
- X1 = _mm_shuffle_epi32(X1, 0x39);
- X2 = _mm_shuffle_epi32(X2, 0x4E);
- X3 = _mm_shuffle_epi32(X3, 0x93);
- // 2X round -------------------------------------------------------------
- T = _mm_add_epi32(X0, X3);
- X1 = _mm_xor_si128(_mm_xor_si128(X1, _mm_slli_epi32(T, 7)), _mm_srli_epi32(T, 25));
- T = _mm_add_epi32(X1, X0);
- X2 = _mm_xor_si128(_mm_xor_si128(X2, _mm_slli_epi32(T, 9)), _mm_srli_epi32(T, 23));
- T = _mm_add_epi32(X2, X1);
- X3 = _mm_xor_si128(_mm_xor_si128(X3, _mm_slli_epi32(T, 13)), _mm_srli_epi32(T, 19));
- T = _mm_add_epi32(X3, X2);
- X0 = _mm_xor_si128(_mm_xor_si128(X0, _mm_slli_epi32(T, 18)), _mm_srli_epi32(T, 14));
- X1 = _mm_shuffle_epi32(X1, 0x93);
- X2 = _mm_shuffle_epi32(X2, 0x4E);
- X3 = _mm_shuffle_epi32(X3, 0x39);
- T = _mm_add_epi32(X0, X1);
- X3 = _mm_xor_si128(_mm_xor_si128(X3, _mm_slli_epi32(T, 7)), _mm_srli_epi32(T, 25));
- T = _mm_add_epi32(X3, X0);
- X2 = _mm_xor_si128(_mm_xor_si128(X2, _mm_slli_epi32(T, 9)), _mm_srli_epi32(T, 23));
- T = _mm_add_epi32(X2, X3);
- X1 = _mm_xor_si128(_mm_xor_si128(X1, _mm_slli_epi32(T, 13)), _mm_srli_epi32(T, 19));
- T = _mm_add_epi32(X1, X2);
- X0 = _mm_xor_si128(_mm_xor_si128(X0, _mm_slli_epi32(T, 18)), _mm_srli_epi32(T, 14));
- X1 = _mm_shuffle_epi32(X1, 0x39);
- X2 = _mm_shuffle_epi32(X2, 0x4E);
- X3 = _mm_shuffle_epi32(X3, 0x93);
- // 2X round -------------------------------------------------------------
- T = _mm_add_epi32(X0, X3);
- X1 = _mm_xor_si128(_mm_xor_si128(X1, _mm_slli_epi32(T, 7)), _mm_srli_epi32(T, 25));
- T = _mm_add_epi32(X1, X0);
- X2 = _mm_xor_si128(_mm_xor_si128(X2, _mm_slli_epi32(T, 9)), _mm_srli_epi32(T, 23));
- T = _mm_add_epi32(X2, X1);
- X3 = _mm_xor_si128(_mm_xor_si128(X3, _mm_slli_epi32(T, 13)), _mm_srli_epi32(T, 19));
- T = _mm_add_epi32(X3, X2);
- X0 = _mm_xor_si128(_mm_xor_si128(X0, _mm_slli_epi32(T, 18)), _mm_srli_epi32(T, 14));
- X1 = _mm_shuffle_epi32(X1, 0x93);
- X2 = _mm_shuffle_epi32(X2, 0x4E);
- X3 = _mm_shuffle_epi32(X3, 0x39);
- T = _mm_add_epi32(X0, X1);
- X3 = _mm_xor_si128(_mm_xor_si128(X3, _mm_slli_epi32(T, 7)), _mm_srli_epi32(T, 25));
- T = _mm_add_epi32(X3, X0);
- X2 = _mm_xor_si128(_mm_xor_si128(X2, _mm_slli_epi32(T, 9)), _mm_srli_epi32(T, 23));
- T = _mm_add_epi32(X2, X3);
- X1 = _mm_xor_si128(_mm_xor_si128(X1, _mm_slli_epi32(T, 13)), _mm_srli_epi32(T, 19));
- T = _mm_add_epi32(X1, X2);
- X0 = _mm_xor_si128(_mm_xor_si128(X0, _mm_slli_epi32(T, 18)), _mm_srli_epi32(T, 14));
- X1 = _mm_shuffle_epi32(X1, 0x39);
- X2 = _mm_shuffle_epi32(X2, 0x4E);
- X3 = _mm_shuffle_epi32(X3, 0x93);
- // 2X round -------------------------------------------------------------
- T = _mm_add_epi32(X0, X3);
- X1 = _mm_xor_si128(_mm_xor_si128(X1, _mm_slli_epi32(T, 7)), _mm_srli_epi32(T, 25));
- T = _mm_add_epi32(X1, X0);
- X2 = _mm_xor_si128(_mm_xor_si128(X2, _mm_slli_epi32(T, 9)), _mm_srli_epi32(T, 23));
- T = _mm_add_epi32(X2, X1);
- X3 = _mm_xor_si128(_mm_xor_si128(X3, _mm_slli_epi32(T, 13)), _mm_srli_epi32(T, 19));
- T = _mm_add_epi32(X3, X2);
- X0 = _mm_xor_si128(_mm_xor_si128(X0, _mm_slli_epi32(T, 18)), _mm_srli_epi32(T, 14));
- X1 = _mm_shuffle_epi32(X1, 0x93);
- X2 = _mm_shuffle_epi32(X2, 0x4E);
- X3 = _mm_shuffle_epi32(X3, 0x39);
- T = _mm_add_epi32(X0, X1);
- X3 = _mm_xor_si128(_mm_xor_si128(X3, _mm_slli_epi32(T, 7)), _mm_srli_epi32(T, 25));
- T = _mm_add_epi32(X3, X0);
- X2 = _mm_xor_si128(_mm_xor_si128(X2, _mm_slli_epi32(T, 9)), _mm_srli_epi32(T, 23));
- T = _mm_add_epi32(X2, X3);
- X1 = _mm_xor_si128(_mm_xor_si128(X1, _mm_slli_epi32(T, 13)), _mm_srli_epi32(T, 19));
- T = _mm_add_epi32(X1, X2);
- X0 = _mm_xor_si128(_mm_xor_si128(X0, _mm_slli_epi32(T, 18)), _mm_srli_epi32(T, 14));
- X1 = _mm_shuffle_epi32(X1, 0x39);
- X2 = _mm_shuffle_epi32(X2, 0x4E);
- X3 = _mm_shuffle_epi32(X3, 0x93);
- // 2X round -------------------------------------------------------------
- T = _mm_add_epi32(X0, X3);
- X1 = _mm_xor_si128(_mm_xor_si128(X1, _mm_slli_epi32(T, 7)), _mm_srli_epi32(T, 25));
- T = _mm_add_epi32(X1, X0);
- X2 = _mm_xor_si128(_mm_xor_si128(X2, _mm_slli_epi32(T, 9)), _mm_srli_epi32(T, 23));
- T = _mm_add_epi32(X2, X1);
- X3 = _mm_xor_si128(_mm_xor_si128(X3, _mm_slli_epi32(T, 13)), _mm_srli_epi32(T, 19));
- T = _mm_add_epi32(X3, X2);
- X0 = _mm_xor_si128(_mm_xor_si128(X0, _mm_slli_epi32(T, 18)), _mm_srli_epi32(T, 14));
- X1 = _mm_shuffle_epi32(X1, 0x93);
- X2 = _mm_shuffle_epi32(X2, 0x4E);
- X3 = _mm_shuffle_epi32(X3, 0x39);
- T = _mm_add_epi32(X0, X1);
- X3 = _mm_xor_si128(_mm_xor_si128(X3, _mm_slli_epi32(T, 7)), _mm_srli_epi32(T, 25));
- T = _mm_add_epi32(X3, X0);
- X2 = _mm_xor_si128(_mm_xor_si128(X2, _mm_slli_epi32(T, 9)), _mm_srli_epi32(T, 23));
- T = _mm_add_epi32(X2, X3);
- X1 = _mm_xor_si128(_mm_xor_si128(X1, _mm_slli_epi32(T, 13)), _mm_srli_epi32(T, 19));
- T = _mm_add_epi32(X1, X2);
- X0 = _mm_xor_si128(_mm_xor_si128(X0, _mm_slli_epi32(T, 18)), _mm_srli_epi32(T, 14));
- X1 = _mm_shuffle_epi32(X1, 0x39);
- X2 = _mm_shuffle_epi32(X2, 0x4E);
- X3 = _mm_shuffle_epi32(X3, 0x93);
- // 2X round -------------------------------------------------------------
- T = _mm_add_epi32(X0, X3);
- X1 = _mm_xor_si128(_mm_xor_si128(X1, _mm_slli_epi32(T, 7)), _mm_srli_epi32(T, 25));
- T = _mm_add_epi32(X1, X0);
- X2 = _mm_xor_si128(_mm_xor_si128(X2, _mm_slli_epi32(T, 9)), _mm_srli_epi32(T, 23));
- T = _mm_add_epi32(X2, X1);
- X3 = _mm_xor_si128(_mm_xor_si128(X3, _mm_slli_epi32(T, 13)), _mm_srli_epi32(T, 19));
- T = _mm_add_epi32(X3, X2);
- X0 = _mm_xor_si128(_mm_xor_si128(X0, _mm_slli_epi32(T, 18)), _mm_srli_epi32(T, 14));
- X1 = _mm_shuffle_epi32(X1, 0x93);
- X2 = _mm_shuffle_epi32(X2, 0x4E);
- X3 = _mm_shuffle_epi32(X3, 0x39);
- T = _mm_add_epi32(X0, X1);
- X3 = _mm_xor_si128(_mm_xor_si128(X3, _mm_slli_epi32(T, 7)), _mm_srli_epi32(T, 25));
- T = _mm_add_epi32(X3, X0);
- X2 = _mm_xor_si128(_mm_xor_si128(X2, _mm_slli_epi32(T, 9)), _mm_srli_epi32(T, 23));
- T = _mm_add_epi32(X2, X3);
- X1 = _mm_xor_si128(_mm_xor_si128(X1, _mm_slli_epi32(T, 13)), _mm_srli_epi32(T, 19));
- T = _mm_add_epi32(X1, X2);
- X0 = _mm_xor_si128(_mm_xor_si128(X0, _mm_slli_epi32(T, 18)), _mm_srli_epi32(T, 14));
- X1 = _mm_shuffle_epi32(X1, 0x39);
- X2 = _mm_shuffle_epi32(X2, 0x4E);
- X3 = _mm_shuffle_epi32(X3, 0x93);
- // 2X round -------------------------------------------------------------
- T = _mm_add_epi32(X0, X3);
- X1 = _mm_xor_si128(_mm_xor_si128(X1, _mm_slli_epi32(T, 7)), _mm_srli_epi32(T, 25));
- T = _mm_add_epi32(X1, X0);
- X2 = _mm_xor_si128(_mm_xor_si128(X2, _mm_slli_epi32(T, 9)), _mm_srli_epi32(T, 23));
- T = _mm_add_epi32(X2, X1);
- X3 = _mm_xor_si128(_mm_xor_si128(X3, _mm_slli_epi32(T, 13)), _mm_srli_epi32(T, 19));
- T = _mm_add_epi32(X3, X2);
- X0 = _mm_xor_si128(_mm_xor_si128(X0, _mm_slli_epi32(T, 18)), _mm_srli_epi32(T, 14));
- X1 = _mm_shuffle_epi32(X1, 0x93);
- X2 = _mm_shuffle_epi32(X2, 0x4E);
- X3 = _mm_shuffle_epi32(X3, 0x39);
- T = _mm_add_epi32(X0, X1);
- X3 = _mm_xor_si128(_mm_xor_si128(X3, _mm_slli_epi32(T, 7)), _mm_srli_epi32(T, 25));
- T = _mm_add_epi32(X3, X0);
- X2 = _mm_xor_si128(_mm_xor_si128(X2, _mm_slli_epi32(T, 9)), _mm_srli_epi32(T, 23));
- T = _mm_add_epi32(X2, X3);
- X1 = _mm_xor_si128(_mm_xor_si128(X1, _mm_slli_epi32(T, 13)), _mm_srli_epi32(T, 19));
- T = _mm_add_epi32(X1, X2);
- X0 = _mm_xor_si128(_mm_xor_si128(X0, _mm_slli_epi32(T, 18)), _mm_srli_epi32(T, 14));
- X1 = _mm_shuffle_epi32(X1, 0x39);
- X2 = _mm_shuffle_epi32(X2, 0x4E);
- X3 = _mm_shuffle_epi32(X3, 0x93);
- // 2X round -------------------------------------------------------------
- T = _mm_add_epi32(X0, X3);
- X1 = _mm_xor_si128(_mm_xor_si128(X1, _mm_slli_epi32(T, 7)), _mm_srli_epi32(T, 25));
- T = _mm_add_epi32(X1, X0);
- X2 = _mm_xor_si128(_mm_xor_si128(X2, _mm_slli_epi32(T, 9)), _mm_srli_epi32(T, 23));
- T = _mm_add_epi32(X2, X1);
- X3 = _mm_xor_si128(_mm_xor_si128(X3, _mm_slli_epi32(T, 13)), _mm_srli_epi32(T, 19));
- T = _mm_add_epi32(X3, X2);
- X0 = _mm_xor_si128(_mm_xor_si128(X0, _mm_slli_epi32(T, 18)), _mm_srli_epi32(T, 14));
- X1 = _mm_shuffle_epi32(X1, 0x93);
- X2 = _mm_shuffle_epi32(X2, 0x4E);
- X3 = _mm_shuffle_epi32(X3, 0x39);
- T = _mm_add_epi32(X0, X1);
- X3 = _mm_xor_si128(_mm_xor_si128(X3, _mm_slli_epi32(T, 7)), _mm_srli_epi32(T, 25));
- T = _mm_add_epi32(X3, X0);
- X2 = _mm_xor_si128(_mm_xor_si128(X2, _mm_slli_epi32(T, 9)), _mm_srli_epi32(T, 23));
- T = _mm_add_epi32(X2, X3);
- X1 = _mm_xor_si128(_mm_xor_si128(X1, _mm_slli_epi32(T, 13)), _mm_srli_epi32(T, 19));
- T = _mm_add_epi32(X1, X2);
- X0 = _mm_xor_si128(_mm_xor_si128(X0, _mm_slli_epi32(T, 18)), _mm_srli_epi32(T, 14));
- X1 = _mm_shuffle_epi32(X1, 0x39);
- X2 = _mm_shuffle_epi32(X2, 0x4E);
- X3 = _mm_shuffle_epi32(X3, 0x93);
- X0 = _mm_add_epi32(X0s,X0);
- X1 = _mm_add_epi32(X1s,X1);
- X2 = _mm_add_epi32(X2s,X2);
- X3 = _mm_add_epi32(X3s,X3);
- __m128i k02 = _mm_shuffle_epi32(_mm_or_si128(_mm_slli_epi64(X0, 32), _mm_srli_epi64(X3, 32)), _MM_SHUFFLE(0, 1, 2, 3));
- __m128i k13 = _mm_shuffle_epi32(_mm_or_si128(_mm_slli_epi64(X1, 32), _mm_srli_epi64(X0, 32)), _MM_SHUFFLE(0, 1, 2, 3));
- __m128i k20 = _mm_or_si128(_mm_and_si128(X2, _S20SSECONSTANTS.maskLo32), _mm_and_si128(X1, _S20SSECONSTANTS.maskHi32));
- __m128i k31 = _mm_or_si128(_mm_and_si128(X3, _S20SSECONSTANTS.maskLo32), _mm_and_si128(X2, _S20SSECONSTANTS.maskHi32));
- _mm_storeu_ps(reinterpret_cast<float *>(c),_mm_castsi128_ps(_mm_xor_si128(_mm_unpackhi_epi64(k02,k20),_mm_castps_si128(_mm_loadu_ps(reinterpret_cast<const float *>(m))))));
- _mm_storeu_ps(reinterpret_cast<float *>(c) + 4,_mm_castsi128_ps(_mm_xor_si128(_mm_unpackhi_epi64(k13,k31),_mm_castps_si128(_mm_loadu_ps(reinterpret_cast<const float *>(m) + 4)))));
- _mm_storeu_ps(reinterpret_cast<float *>(c) + 8,_mm_castsi128_ps(_mm_xor_si128(_mm_unpacklo_epi64(k20,k02),_mm_castps_si128(_mm_loadu_ps(reinterpret_cast<const float *>(m) + 8)))));
- _mm_storeu_ps(reinterpret_cast<float *>(c) + 12,_mm_castsi128_ps(_mm_xor_si128(_mm_unpacklo_epi64(k31,k13),_mm_castps_si128(_mm_loadu_ps(reinterpret_cast<const float *>(m) + 12)))));
- if (!(++_state.i[8])) {
- ++_state.i[5]; // state reordered for SSE
- /* stopping at 2^70 bytes per nonce is user's responsibility */
- }
- #else
- x0 = j0;
- x1 = j1;
- x2 = j2;
- x3 = j3;
- x4 = j4;
- x5 = j5;
- x6 = j6;
- x7 = j7;
- x8 = j8;
- x9 = j9;
- x10 = j10;
- x11 = j11;
- x12 = j12;
- x13 = j13;
- x14 = j14;
- x15 = j15;
- // 2X round -------------------------------------------------------------
- x4 = XOR( x4,ROTATE(PLUS( x0,x12), 7));
- x8 = XOR( x8,ROTATE(PLUS( x4, x0), 9));
- x12 = XOR(x12,ROTATE(PLUS( x8, x4),13));
- x0 = XOR( x0,ROTATE(PLUS(x12, x8),18));
- x9 = XOR( x9,ROTATE(PLUS( x5, x1), 7));
- x13 = XOR(x13,ROTATE(PLUS( x9, x5), 9));
- x1 = XOR( x1,ROTATE(PLUS(x13, x9),13));
- x5 = XOR( x5,ROTATE(PLUS( x1,x13),18));
- x14 = XOR(x14,ROTATE(PLUS(x10, x6), 7));
- x2 = XOR( x2,ROTATE(PLUS(x14,x10), 9));
- x6 = XOR( x6,ROTATE(PLUS( x2,x14),13));
- x10 = XOR(x10,ROTATE(PLUS( x6, x2),18));
- x3 = XOR( x3,ROTATE(PLUS(x15,x11), 7));
- x7 = XOR( x7,ROTATE(PLUS( x3,x15), 9));
- x11 = XOR(x11,ROTATE(PLUS( x7, x3),13));
- x15 = XOR(x15,ROTATE(PLUS(x11, x7),18));
- x1 = XOR( x1,ROTATE(PLUS( x0, x3), 7));
- x2 = XOR( x2,ROTATE(PLUS( x1, x0), 9));
- x3 = XOR( x3,ROTATE(PLUS( x2, x1),13));
- x0 = XOR( x0,ROTATE(PLUS( x3, x2),18));
- x6 = XOR( x6,ROTATE(PLUS( x5, x4), 7));
- x7 = XOR( x7,ROTATE(PLUS( x6, x5), 9));
- x4 = XOR( x4,ROTATE(PLUS( x7, x6),13));
- x5 = XOR( x5,ROTATE(PLUS( x4, x7),18));
- x11 = XOR(x11,ROTATE(PLUS(x10, x9), 7));
- x8 = XOR( x8,ROTATE(PLUS(x11,x10), 9));
- x9 = XOR( x9,ROTATE(PLUS( x8,x11),13));
- x10 = XOR(x10,ROTATE(PLUS( x9, x8),18));
- x12 = XOR(x12,ROTATE(PLUS(x15,x14), 7));
- x13 = XOR(x13,ROTATE(PLUS(x12,x15), 9));
- x14 = XOR(x14,ROTATE(PLUS(x13,x12),13));
- x15 = XOR(x15,ROTATE(PLUS(x14,x13),18));
- // 2X round -------------------------------------------------------------
- x4 = XOR( x4,ROTATE(PLUS( x0,x12), 7));
- x8 = XOR( x8,ROTATE(PLUS( x4, x0), 9));
- x12 = XOR(x12,ROTATE(PLUS( x8, x4),13));
- x0 = XOR( x0,ROTATE(PLUS(x12, x8),18));
- x9 = XOR( x9,ROTATE(PLUS( x5, x1), 7));
- x13 = XOR(x13,ROTATE(PLUS( x9, x5), 9));
- x1 = XOR( x1,ROTATE(PLUS(x13, x9),13));
- x5 = XOR( x5,ROTATE(PLUS( x1,x13),18));
- x14 = XOR(x14,ROTATE(PLUS(x10, x6), 7));
- x2 = XOR( x2,ROTATE(PLUS(x14,x10), 9));
- x6 = XOR( x6,ROTATE(PLUS( x2,x14),13));
- x10 = XOR(x10,ROTATE(PLUS( x6, x2),18));
- x3 = XOR( x3,ROTATE(PLUS(x15,x11), 7));
- x7 = XOR( x7,ROTATE(PLUS( x3,x15), 9));
- x11 = XOR(x11,ROTATE(PLUS( x7, x3),13));
- x15 = XOR(x15,ROTATE(PLUS(x11, x7),18));
- x1 = XOR( x1,ROTATE(PLUS( x0, x3), 7));
- x2 = XOR( x2,ROTATE(PLUS( x1, x0), 9));
- x3 = XOR( x3,ROTATE(PLUS( x2, x1),13));
- x0 = XOR( x0,ROTATE(PLUS( x3, x2),18));
- x6 = XOR( x6,ROTATE(PLUS( x5, x4), 7));
- x7 = XOR( x7,ROTATE(PLUS( x6, x5), 9));
- x4 = XOR( x4,ROTATE(PLUS( x7, x6),13));
- x5 = XOR( x5,ROTATE(PLUS( x4, x7),18));
- x11 = XOR(x11,ROTATE(PLUS(x10, x9), 7));
- x8 = XOR( x8,ROTATE(PLUS(x11,x10), 9));
- x9 = XOR( x9,ROTATE(PLUS( x8,x11),13));
- x10 = XOR(x10,ROTATE(PLUS( x9, x8),18));
- x12 = XOR(x12,ROTATE(PLUS(x15,x14), 7));
- x13 = XOR(x13,ROTATE(PLUS(x12,x15), 9));
- x14 = XOR(x14,ROTATE(PLUS(x13,x12),13));
- x15 = XOR(x15,ROTATE(PLUS(x14,x13),18));
- // 2X round -------------------------------------------------------------
- x4 = XOR( x4,ROTATE(PLUS( x0,x12), 7));
- x8 = XOR( x8,ROTATE(PLUS( x4, x0), 9));
- x12 = XOR(x12,ROTATE(PLUS( x8, x4),13));
- x0 = XOR( x0,ROTATE(PLUS(x12, x8),18));
- x9 = XOR( x9,ROTATE(PLUS( x5, x1), 7));
- x13 = XOR(x13,ROTATE(PLUS( x9, x5), 9));
- x1 = XOR( x1,ROTATE(PLUS(x13, x9),13));
- x5 = XOR( x5,ROTATE(PLUS( x1,x13),18));
- x14 = XOR(x14,ROTATE(PLUS(x10, x6), 7));
- x2 = XOR( x2,ROTATE(PLUS(x14,x10), 9));
- x6 = XOR( x6,ROTATE(PLUS( x2,x14),13));
- x10 = XOR(x10,ROTATE(PLUS( x6, x2),18));
- x3 = XOR( x3,ROTATE(PLUS(x15,x11), 7));
- x7 = XOR( x7,ROTATE(PLUS( x3,x15), 9));
- x11 = XOR(x11,ROTATE(PLUS( x7, x3),13));
- x15 = XOR(x15,ROTATE(PLUS(x11, x7),18));
- x1 = XOR( x1,ROTATE(PLUS( x0, x3), 7));
- x2 = XOR( x2,ROTATE(PLUS( x1, x0), 9));
- x3 = XOR( x3,ROTATE(PLUS( x2, x1),13));
- x0 = XOR( x0,ROTATE(PLUS( x3, x2),18));
- x6 = XOR( x6,ROTATE(PLUS( x5, x4), 7));
- x7 = XOR( x7,ROTATE(PLUS( x6, x5), 9));
- x4 = XOR( x4,ROTATE(PLUS( x7, x6),13));
- x5 = XOR( x5,ROTATE(PLUS( x4, x7),18));
- x11 = XOR(x11,ROTATE(PLUS(x10, x9), 7));
- x8 = XOR( x8,ROTATE(PLUS(x11,x10), 9));
- x9 = XOR( x9,ROTATE(PLUS( x8,x11),13));
- x10 = XOR(x10,ROTATE(PLUS( x9, x8),18));
- x12 = XOR(x12,ROTATE(PLUS(x15,x14), 7));
- x13 = XOR(x13,ROTATE(PLUS(x12,x15), 9));
- x14 = XOR(x14,ROTATE(PLUS(x13,x12),13));
- x15 = XOR(x15,ROTATE(PLUS(x14,x13),18));
- // 2X round -------------------------------------------------------------
- x4 = XOR( x4,ROTATE(PLUS( x0,x12), 7));
- x8 = XOR( x8,ROTATE(PLUS( x4, x0), 9));
- x12 = XOR(x12,ROTATE(PLUS( x8, x4),13));
- x0 = XOR( x0,ROTATE(PLUS(x12, x8),18));
- x9 = XOR( x9,ROTATE(PLUS( x5, x1), 7));
- x13 = XOR(x13,ROTATE(PLUS( x9, x5), 9));
- x1 = XOR( x1,ROTATE(PLUS(x13, x9),13));
- x5 = XOR( x5,ROTATE(PLUS( x1,x13),18));
- x14 = XOR(x14,ROTATE(PLUS(x10, x6), 7));
- x2 = XOR( x2,ROTATE(PLUS(x14,x10), 9));
- x6 = XOR( x6,ROTATE(PLUS( x2,x14),13));
- x10 = XOR(x10,ROTATE(PLUS( x6, x2),18));
- x3 = XOR( x3,ROTATE(PLUS(x15,x11), 7));
- x7 = XOR( x7,ROTATE(PLUS( x3,x15), 9));
- x11 = XOR(x11,ROTATE(PLUS( x7, x3),13));
- x15 = XOR(x15,ROTATE(PLUS(x11, x7),18));
- x1 = XOR( x1,ROTATE(PLUS( x0, x3), 7));
- x2 = XOR( x2,ROTATE(PLUS( x1, x0), 9));
- x3 = XOR( x3,ROTATE(PLUS( x2, x1),13));
- x0 = XOR( x0,ROTATE(PLUS( x3, x2),18));
- x6 = XOR( x6,ROTATE(PLUS( x5, x4), 7));
- x7 = XOR( x7,ROTATE(PLUS( x6, x5), 9));
- x4 = XOR( x4,ROTATE(PLUS( x7, x6),13));
- x5 = XOR( x5,ROTATE(PLUS( x4, x7),18));
- x11 = XOR(x11,ROTATE(PLUS(x10, x9), 7));
- x8 = XOR( x8,ROTATE(PLUS(x11,x10), 9));
- x9 = XOR( x9,ROTATE(PLUS( x8,x11),13));
- x10 = XOR(x10,ROTATE(PLUS( x9, x8),18));
- x12 = XOR(x12,ROTATE(PLUS(x15,x14), 7));
- x13 = XOR(x13,ROTATE(PLUS(x12,x15), 9));
- x14 = XOR(x14,ROTATE(PLUS(x13,x12),13));
- x15 = XOR(x15,ROTATE(PLUS(x14,x13),18));
- // 2X round -------------------------------------------------------------
- x4 = XOR( x4,ROTATE(PLUS( x0,x12), 7));
- x8 = XOR( x8,ROTATE(PLUS( x4, x0), 9));
- x12 = XOR(x12,ROTATE(PLUS( x8, x4),13));
- x0 = XOR( x0,ROTATE(PLUS(x12, x8),18));
- x9 = XOR( x9,ROTATE(PLUS( x5, x1), 7));
- x13 = XOR(x13,ROTATE(PLUS( x9, x5), 9));
- x1 = XOR( x1,ROTATE(PLUS(x13, x9),13));
- x5 = XOR( x5,ROTATE(PLUS( x1,x13),18));
- x14 = XOR(x14,ROTATE(PLUS(x10, x6), 7));
- x2 = XOR( x2,ROTATE(PLUS(x14,x10), 9));
- x6 = XOR( x6,ROTATE(PLUS( x2,x14),13));
- x10 = XOR(x10,ROTATE(PLUS( x6, x2),18));
- x3 = XOR( x3,ROTATE(PLUS(x15,x11), 7));
- x7 = XOR( x7,ROTATE(PLUS( x3,x15), 9));
- x11 = XOR(x11,ROTATE(PLUS( x7, x3),13));
- x15 = XOR(x15,ROTATE(PLUS(x11, x7),18));
- x1 = XOR( x1,ROTATE(PLUS( x0, x3), 7));
- x2 = XOR( x2,ROTATE(PLUS( x1, x0), 9));
- x3 = XOR( x3,ROTATE(PLUS( x2, x1),13));
- x0 = XOR( x0,ROTATE(PLUS( x3, x2),18));
- x6 = XOR( x6,ROTATE(PLUS( x5, x4), 7));
- x7 = XOR( x7,ROTATE(PLUS( x6, x5), 9));
- x4 = XOR( x4,ROTATE(PLUS( x7, x6),13));
- x5 = XOR( x5,ROTATE(PLUS( x4, x7),18));
- x11 = XOR(x11,ROTATE(PLUS(x10, x9), 7));
- x8 = XOR( x8,ROTATE(PLUS(x11,x10), 9));
- x9 = XOR( x9,ROTATE(PLUS( x8,x11),13));
- x10 = XOR(x10,ROTATE(PLUS( x9, x8),18));
- x12 = XOR(x12,ROTATE(PLUS(x15,x14), 7));
- x13 = XOR(x13,ROTATE(PLUS(x12,x15), 9));
- x14 = XOR(x14,ROTATE(PLUS(x13,x12),13));
- x15 = XOR(x15,ROTATE(PLUS(x14,x13),18));
- // 2X round -------------------------------------------------------------
- x4 = XOR( x4,ROTATE(PLUS( x0,x12), 7));
- x8 = XOR( x8,ROTATE(PLUS( x4, x0), 9));
- x12 = XOR(x12,ROTATE(PLUS( x8, x4),13));
- x0 = XOR( x0,ROTATE(PLUS(x12, x8),18));
- x9 = XOR( x9,ROTATE(PLUS( x5, x1), 7));
- x13 = XOR(x13,ROTATE(PLUS( x9, x5), 9));
- x1 = XOR( x1,ROTATE(PLUS(x13, x9),13));
- x5 = XOR( x5,ROTATE(PLUS( x1,x13),18));
- x14 = XOR(x14,ROTATE(PLUS(x10, x6), 7));
- x2 = XOR( x2,ROTATE(PLUS(x14,x10), 9));
- x6 = XOR( x6,ROTATE(PLUS( x2,x14),13));
- x10 = XOR(x10,ROTATE(PLUS( x6, x2),18));
- x3 = XOR( x3,ROTATE(PLUS(x15,x11), 7));
- x7 = XOR( x7,ROTATE(PLUS( x3,x15), 9));
- x11 = XOR(x11,ROTATE(PLUS( x7, x3),13));
- x15 = XOR(x15,ROTATE(PLUS(x11, x7),18));
- x1 = XOR( x1,ROTATE(PLUS( x0, x3), 7));
- x2 = XOR( x2,ROTATE(PLUS( x1, x0), 9));
- x3 = XOR( x3,ROTATE(PLUS( x2, x1),13));
- x0 = XOR( x0,ROTATE(PLUS( x3, x2),18));
- x6 = XOR( x6,ROTATE(PLUS( x5, x4), 7));
- x7 = XOR( x7,ROTATE(PLUS( x6, x5), 9));
- x4 = XOR( x4,ROTATE(PLUS( x7, x6),13));
- x5 = XOR( x5,ROTATE(PLUS( x4, x7),18));
- x11 = XOR(x11,ROTATE(PLUS(x10, x9), 7));
- x8 = XOR( x8,ROTATE(PLUS(x11,x10), 9));
- x9 = XOR( x9,ROTATE(PLUS( x8,x11),13));
- x10 = XOR(x10,ROTATE(PLUS( x9, x8),18));
- x12 = XOR(x12,ROTATE(PLUS(x15,x14), 7));
- x13 = XOR(x13,ROTATE(PLUS(x12,x15), 9));
- x14 = XOR(x14,ROTATE(PLUS(x13,x12),13));
- x15 = XOR(x15,ROTATE(PLUS(x14,x13),18));
- // 2X round -------------------------------------------------------------
- x4 = XOR( x4,ROTATE(PLUS( x0,x12), 7));
- x8 = XOR( x8,ROTATE(PLUS( x4, x0), 9));
- x12 = XOR(x12,ROTATE(PLUS( x8, x4),13));
- x0 = XOR( x0,ROTATE(PLUS(x12, x8),18));
- x9 = XOR( x9,ROTATE(PLUS( x5, x1), 7));
- x13 = XOR(x13,ROTATE(PLUS( x9, x5), 9));
- x1 = XOR( x1,ROTATE(PLUS(x13, x9),13));
- x5 = XOR( x5,ROTATE(PLUS( x1,x13),18));
- x14 = XOR(x14,ROTATE(PLUS(x10, x6), 7));
- x2 = XOR( x2,ROTATE(PLUS(x14,x10), 9));
- x6 = XOR( x6,ROTATE(PLUS( x2,x14),13));
- x10 = XOR(x10,ROTATE(PLUS( x6, x2),18));
- x3 = XOR( x3,ROTATE(PLUS(x15,x11), 7));
- x7 = XOR( x7,ROTATE(PLUS( x3,x15), 9));
- x11 = XOR(x11,ROTATE(PLUS( x7, x3),13));
- x15 = XOR(x15,ROTATE(PLUS(x11, x7),18));
- x1 = XOR( x1,ROTATE(PLUS( x0, x3), 7));
- x2 = XOR( x2,ROTATE(PLUS( x1, x0), 9));
- x3 = XOR( x3,ROTATE(PLUS( x2, x1),13));
- x0 = XOR( x0,ROTATE(PLUS( x3, x2),18));
- x6 = XOR( x6,ROTATE(PLUS( x5, x4), 7));
- x7 = XOR( x7,ROTATE(PLUS( x6, x5), 9));
- x4 = XOR( x4,ROTATE(PLUS( x7, x6),13));
- x5 = XOR( x5,ROTATE(PLUS( x4, x7),18));
- x11 = XOR(x11,ROTATE(PLUS(x10, x9), 7));
- x8 = XOR( x8,ROTATE(PLUS(x11,x10), 9));
- x9 = XOR( x9,ROTATE(PLUS( x8,x11),13));
- x10 = XOR(x10,ROTATE(PLUS( x9, x8),18));
- x12 = XOR(x12,ROTATE(PLUS(x15,x14), 7));
- x13 = XOR(x13,ROTATE(PLUS(x12,x15), 9));
- x14 = XOR(x14,ROTATE(PLUS(x13,x12),13));
- x15 = XOR(x15,ROTATE(PLUS(x14,x13),18));
- // 2X round -------------------------------------------------------------
- x4 = XOR( x4,ROTATE(PLUS( x0,x12), 7));
- x8 = XOR( x8,ROTATE(PLUS( x4, x0), 9));
- x12 = XOR(x12,ROTATE(PLUS( x8, x4),13));
- x0 = XOR( x0,ROTATE(PLUS(x12, x8),18));
- x9 = XOR( x9,ROTATE(PLUS( x5, x1), 7));
- x13 = XOR(x13,ROTATE(PLUS( x9, x5), 9));
- x1 = XOR( x1,ROTATE(PLUS(x13, x9),13));
- x5 = XOR( x5,ROTATE(PLUS( x1,x13),18));
- x14 = XOR(x14,ROTATE(PLUS(x10, x6), 7));
- x2 = XOR( x2,ROTATE(PLUS(x14,x10), 9));
- x6 = XOR( x6,ROTATE(PLUS( x2,x14),13));
- x10 = XOR(x10,ROTATE(PLUS( x6, x2),18));
- x3 = XOR( x3,ROTATE(PLUS(x15,x11), 7));
- x7 = XOR( x7,ROTATE(PLUS( x3,x15), 9));
- x11 = XOR(x11,ROTATE(PLUS( x7, x3),13));
- x15 = XOR(x15,ROTATE(PLUS(x11, x7),18));
- x1 = XOR( x1,ROTATE(PLUS( x0, x3), 7));
- x2 = XOR( x2,ROTATE(PLUS( x1, x0), 9));
- x3 = XOR( x3,ROTATE(PLUS( x2, x1),13));
- x0 = XOR( x0,ROTATE(PLUS( x3, x2),18));
- x6 = XOR( x6,ROTATE(PLUS( x5, x4), 7));
- x7 = XOR( x7,ROTATE(PLUS( x6, x5), 9));
- x4 = XOR( x4,ROTATE(PLUS( x7, x6),13));
- x5 = XOR( x5,ROTATE(PLUS( x4, x7),18));
- x11 = XOR(x11,ROTATE(PLUS(x10, x9), 7));
- x8 = XOR( x8,ROTATE(PLUS(x11,x10), 9));
- x9 = XOR( x9,ROTATE(PLUS( x8,x11),13));
- x10 = XOR(x10,ROTATE(PLUS( x9, x8),18));
- x12 = XOR(x12,ROTATE(PLUS(x15,x14), 7));
- x13 = XOR(x13,ROTATE(PLUS(x12,x15), 9));
- x14 = XOR(x14,ROTATE(PLUS(x13,x12),13));
- x15 = XOR(x15,ROTATE(PLUS(x14,x13),18));
- // 2X round -------------------------------------------------------------
- x4 = XOR( x4,ROTATE(PLUS( x0,x12), 7));
- x8 = XOR( x8,ROTATE(PLUS( x4, x0), 9));
- x12 = XOR(x12,ROTATE(PLUS( x8, x4),13));
- x0 = XOR( x0,ROTATE(PLUS(x12, x8),18));
- x9 = XOR( x9,ROTATE(PLUS( x5, x1), 7));
- x13 = XOR(x13,ROTATE(PLUS( x9, x5), 9));
- x1 = XOR( x1,ROTATE(PLUS(x13, x9),13));
- x5 = XOR( x5,ROTATE(PLUS( x1,x13),18));
- x14 = XOR(x14,ROTATE(PLUS(x10, x6), 7));
- x2 = XOR( x2,ROTATE(PLUS(x14,x10), 9));
- x6 = XOR( x6,ROTATE(PLUS( x2,x14),13));
- x10 = XOR(x10,ROTATE(PLUS( x6, x2),18));
- x3 = XOR( x3,ROTATE(PLUS(x15,x11), 7));
- x7 = XOR( x7,ROTATE(PLUS( x3,x15), 9));
- x11 = XOR(x11,ROTATE(PLUS( x7, x3),13));
- x15 = XOR(x15,ROTATE(PLUS(x11, x7),18));
- x1 = XOR( x1,ROTATE(PLUS( x0, x3), 7));
- x2 = XOR( x2,ROTATE(PLUS( x1, x0), 9));
- x3 = XOR( x3,ROTATE(PLUS( x2, x1),13));
- x0 = XOR( x0,ROTATE(PLUS( x3, x2),18));
- x6 = XOR( x6,ROTATE(PLUS( x5, x4), 7));
- x7 = XOR( x7,ROTATE(PLUS( x6, x5), 9));
- x4 = XOR( x4,ROTATE(PLUS( x7, x6),13));
- x5 = XOR( x5,ROTATE(PLUS( x4, x7),18));
- x11 = XOR(x11,ROTATE(PLUS(x10, x9), 7));
- x8 = XOR( x8,ROTATE(PLUS(x11,x10), 9));
- x9 = XOR( x9,ROTATE(PLUS( x8,x11),13));
- x10 = XOR(x10,ROTATE(PLUS( x9, x8),18));
- x12 = XOR(x12,ROTATE(PLUS(x15,x14), 7));
- x13 = XOR(x13,ROTATE(PLUS(x12,x15), 9));
- x14 = XOR(x14,ROTATE(PLUS(x13,x12),13));
- x15 = XOR(x15,ROTATE(PLUS(x14,x13),18));
- // 2X round -------------------------------------------------------------
- x4 = XOR( x4,ROTATE(PLUS( x0,x12), 7));
- x8 = XOR( x8,ROTATE(PLUS( x4, x0), 9));
- x12 = XOR(x12,ROTATE(PLUS( x8, x4),13));
- x0 = XOR( x0,ROTATE(PLUS(x12, x8),18));
- x9 = XOR( x9,ROTATE(PLUS( x5, x1), 7));
- x13 = XOR(x13,ROTATE(PLUS( x9, x5), 9));
- x1 = XOR( x1,ROTATE(PLUS(x13, x9),13));
- x5 = XOR( x5,ROTATE(PLUS( x1,x13),18));
- x14 = XOR(x14,ROTATE(PLUS(x10, x6), 7));
- x2 = XOR( x2,ROTATE(PLUS(x14,x10), 9));
- x6 = XOR( x6,ROTATE(PLUS( x2,x14),13));
- x10 = XOR(x10,ROTATE(PLUS( x6, x2),18));
- x3 = XOR( x3,ROTATE(PLUS(x15,x11), 7));
- x7 = XOR( x7,ROTATE(PLUS( x3,x15), 9));
- x11 = XOR(x11,ROTATE(PLUS( x7, x3),13));
- x15 = XOR(x15,ROTATE(PLUS(x11, x7),18));
- x1 = XOR( x1,ROTATE(PLUS( x0, x3), 7));
- x2 = XOR( x2,ROTATE(PLUS( x1, x0), 9));
- x3 = XOR( x3,ROTATE(PLUS( x2, x1),13));
- x0 = XOR( x0,ROTATE(PLUS( x3, x2),18));
- x6 = XOR( x6,ROTATE(PLUS( x5, x4), 7));
- x7 = XOR( x7,ROTATE(PLUS( x6, x5), 9));
- x4 = XOR( x4,ROTATE(PLUS( x7, x6),13));
- x5 = XOR( x5,ROTATE(PLUS( x4, x7),18));
- x11 = XOR(x11,ROTATE(PLUS(x10, x9), 7));
- x8 = XOR( x8,ROTATE(PLUS(x11,x10), 9));
- x9 = XOR( x9,ROTATE(PLUS( x8,x11),13));
- x10 = XOR(x10,ROTATE(PLUS( x9, x8),18));
- x12 = XOR(x12,ROTATE(PLUS(x15,x14), 7));
- x13 = XOR(x13,ROTATE(PLUS(x12,x15), 9));
- x14 = XOR(x14,ROTATE(PLUS(x13,x12),13));
- x15 = XOR(x15,ROTATE(PLUS(x14,x13),18));
- x0 = PLUS(x0,j0);
- x1 = PLUS(x1,j1);
- x2 = PLUS(x2,j2);
- x3 = PLUS(x3,j3);
- x4 = PLUS(x4,j4);
- x5 = PLUS(x5,j5);
- x6 = PLUS(x6,j6);
- x7 = PLUS(x7,j7);
- x8 = PLUS(x8,j8);
- x9 = PLUS(x9,j9);
- x10 = PLUS(x10,j10);
- x11 = PLUS(x11,j11);
- x12 = PLUS(x12,j12);
- x13 = PLUS(x13,j13);
- x14 = PLUS(x14,j14);
- x15 = PLUS(x15,j15);
- U32TO8_LITTLE(c + 0,XOR(x0,U8TO32_LITTLE(m + 0)));
- U32TO8_LITTLE(c + 4,XOR(x1,U8TO32_LITTLE(m + 4)));
- U32TO8_LITTLE(c + 8,XOR(x2,U8TO32_LITTLE(m + 8)));
- U32TO8_LITTLE(c + 12,XOR(x3,U8TO32_LITTLE(m + 12)));
- U32TO8_LITTLE(c + 16,XOR(x4,U8TO32_LITTLE(m + 16)));
- U32TO8_LITTLE(c + 20,XOR(x5,U8TO32_LITTLE(m + 20)));
- U32TO8_LITTLE(c + 24,XOR(x6,U8TO32_LITTLE(m + 24)));
- U32TO8_LITTLE(c + 28,XOR(x7,U8TO32_LITTLE(m + 28)));
- U32TO8_LITTLE(c + 32,XOR(x8,U8TO32_LITTLE(m + 32)));
- U32TO8_LITTLE(c + 36,XOR(x9,U8TO32_LITTLE(m + 36)));
- U32TO8_LITTLE(c + 40,XOR(x10,U8TO32_LITTLE(m + 40)));
- U32TO8_LITTLE(c + 44,XOR(x11,U8TO32_LITTLE(m + 44)));
- U32TO8_LITTLE(c + 48,XOR(x12,U8TO32_LITTLE(m + 48)));
- U32TO8_LITTLE(c + 52,XOR(x13,U8TO32_LITTLE(m + 52)));
- U32TO8_LITTLE(c + 56,XOR(x14,U8TO32_LITTLE(m + 56)));
- U32TO8_LITTLE(c + 60,XOR(x15,U8TO32_LITTLE(m + 60)));
- if (!(++j8)) {
- ++j9;
- /* stopping at 2^70 bytes per nonce is user's responsibility */
- }
- #endif
- if (bytes <= 64) {
- if (bytes < 64) {
- for (i = 0;i < bytes;++i)
- ctarget[i] = c[i];
- }
- #ifndef ZT_SALSA20_SSE
- _state.i[8] = j8;
- _state.i[9] = j9;
- #endif
- return;
- }
- bytes -= 64;
- c += 64;
- m += 64;
- }
- }
- } // namespace ZeroTier
|