AES.hpp 28 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798
  1. /*
  2. * Copyright (c)2019 ZeroTier, Inc.
  3. *
  4. * Use of this software is governed by the Business Source License included
  5. * in the LICENSE.TXT file in the project's root directory.
  6. *
  7. * Change Date: 2023-01-01
  8. *
  9. * On the date above, in accordance with the Business Source License, use
  10. * of this software will be governed by version 2.0 of the Apache License.
  11. */
  12. /****/
  13. #ifndef ZT_AES_HPP
  14. #define ZT_AES_HPP
  15. #include "Constants.hpp"
  16. #include "Utils.hpp"
  17. #include "SHA512.hpp"
  18. #if (defined(__amd64) || defined(__amd64__) || defined(__x86_64) || defined(__x86_64__) || defined(__AMD64) || defined(__AMD64__) || defined(_M_X64))
  19. #include <wmmintrin.h>
  20. #include <emmintrin.h>
  21. #include <smmintrin.h>
  22. #define ZT_AES_AESNI 1
  23. #endif
  24. #define ZT_AES_KEY_SIZE 32
  25. #define ZT_AES_BLOCK_SIZE 16
  26. namespace ZeroTier {
  27. /**
  28. * AES-256 and AES-GCM AEAD
  29. */
  30. class AES
  31. {
  32. public:
  33. /**
  34. * This will be true if your platform's type of AES acceleration is supported on this machine
  35. */
  36. static const bool HW_ACCEL;
  37. ZT_ALWAYS_INLINE AES() {}
  38. ZT_ALWAYS_INLINE AES(const uint8_t key[32]) { this->init(key); }
  39. ZT_ALWAYS_INLINE ~AES() { Utils::burn(&_k,sizeof(_k)); }
  40. /**
  41. * Set (or re-set) this AES256 cipher's key
  42. */
  43. ZT_ALWAYS_INLINE void init(const uint8_t key[32])
  44. {
  45. #ifdef ZT_AES_AESNI
  46. if (likely(HW_ACCEL)) {
  47. _init_aesni(key);
  48. return;
  49. }
  50. #endif
  51. _initSW(key);
  52. }
  53. /**
  54. * Encrypt a single AES block (ECB mode)
  55. *
  56. * @param in Input block
  57. * @param out Output block (can be same as input)
  58. */
  59. ZT_ALWAYS_INLINE void encrypt(const uint8_t in[16],uint8_t out[16]) const
  60. {
  61. #ifdef ZT_AES_AESNI
  62. if (likely(HW_ACCEL)) {
  63. _encrypt_aesni(in,out);
  64. return;
  65. }
  66. #endif
  67. _encryptSW(in,out);
  68. }
  69. /**
  70. * Compute GMAC-AES256 (GCM without ciphertext)
  71. *
  72. * @param iv 96-bit IV
  73. * @param in Input data
  74. * @param len Length of input
  75. * @param out 128-bit authorization tag from GMAC
  76. */
  77. ZT_ALWAYS_INLINE void gmac(const uint8_t iv[12],const void *in,const unsigned int len,uint8_t out[16]) const
  78. {
  79. #ifdef ZT_AES_AESNI
  80. if (likely(HW_ACCEL)) {
  81. _gmac_aesni(iv,(const uint8_t *)in,len,out);
  82. return;
  83. }
  84. #endif
  85. _gmacSW(iv,(const uint8_t *)in,len,out);
  86. }
  87. /**
  88. * Encrypt or decrypt (they're the same) using AES256-CTR
  89. *
  90. * The counter here is a 128-bit big-endian that starts at the IV. The code only
  91. * increments the least significant 64 bits, making it only safe to use for a
  92. * maximum of 2^64-1 bytes (much larger than we ever do).
  93. *
  94. * @param iv 128-bit CTR IV
  95. * @param in Input plaintext or ciphertext
  96. * @param len Length of input
  97. * @param out Output plaintext or ciphertext
  98. */
  99. ZT_ALWAYS_INLINE void ctr(const uint8_t iv[16],const void *in,unsigned int len,void *out) const
  100. {
  101. #ifdef ZT_AES_AESNI
  102. if (likely(HW_ACCEL)) {
  103. _crypt_ctr_aesni(iv,(const uint8_t *)in,len,(uint8_t *)out);
  104. return;
  105. }
  106. #endif
  107. uint64_t ctr[2],cenc[2];
  108. memcpy(ctr,iv,16);
  109. uint64_t bctr = Utils::ntoh(ctr[1]);
  110. const uint8_t *i = (const uint8_t *)in;
  111. uint8_t *o = (uint8_t *)out;
  112. while (len >= 16) {
  113. _encryptSW((const uint8_t *)ctr,(uint8_t *)cenc);
  114. ctr[1] = Utils::hton(++bctr);
  115. #ifdef ZT_NO_TYPE_PUNNING
  116. for(unsigned int k=0;k<16;++k)
  117. *(o++) = *(i++) ^ ((uint8_t *)cenc)[k];
  118. #else
  119. *((uint64_t *)o) = *((const uint64_t *)i) ^ cenc[0];
  120. o += 8;
  121. i += 8;
  122. *((uint64_t *)o) = *((const uint64_t *)i) ^ cenc[1];
  123. o += 8;
  124. i += 8;
  125. #endif
  126. len -= 16;
  127. }
  128. if (len) {
  129. _encryptSW((const uint8_t *)ctr,(uint8_t *)cenc);
  130. for(unsigned int k=0;k<len;++k)
  131. *(o++) = *(i++) ^ ((uint8_t *)cenc)[k];
  132. }
  133. }
  134. /**
  135. * Perform AES-GMAC-SIV encryption
  136. *
  137. * This is an AES mode built from GMAC and AES-CTR that is similar to the
  138. * various SIV (synthetic IV) modes for AES and is resistant to nonce
  139. * re-use. It's specifically tweaked for ZeroTier's packet structure with
  140. * a 64-bit IV (extended to 96 bits by including packet size and other info)
  141. * and a 64-bit auth tag.
  142. *
  143. * The use of separate keys for MAC and encrypt is precautionary. It
  144. * ensures that the CTR IV (and CTR output) are always secrets regardless
  145. * of what an attacker might do with accumulated IVs and auth tags.
  146. *
  147. * @param k1 GMAC key
  148. * @param k2 GMAC auth tag masking (ECB encryption) key
  149. * @param k3 CTR IV masking (ECB encryption) key
  150. * @param k4 AES-CTR key
  151. * @param iv 64-bit packet IV
  152. * @param direction Direction byte
  153. * @param in Message plaintext
  154. * @param len Length of plaintext
  155. * @param out Output buffer to receive ciphertext
  156. * @param tag Output buffer to receive 64-bit authentication tag
  157. */
  158. static ZT_ALWAYS_INLINE void gmacSivEncrypt(const AES &k1,const AES &k2,const AES &k3,const AES &k4,const uint8_t iv[8],const uint8_t direction,const void *in,const unsigned int len,void *out,uint8_t tag[8])
  159. {
  160. #ifdef __GNUC__
  161. uint8_t __attribute__ ((aligned (16))) miv[12];
  162. uint8_t __attribute__ ((aligned (16))) ctrIv[16];
  163. #else
  164. uint8_t miv[12];
  165. uint8_t ctrIv[16];
  166. #endif
  167. // Extend packet IV to 96-bit message IV using direction byte and message length
  168. #ifndef __GNUC__
  169. for(unsigned int i=0;i<8;++i) miv[i] = iv[i];
  170. #else
  171. *((uint64_t *)miv) = *((const uint64_t *)iv);
  172. #endif
  173. miv[8] = direction;
  174. miv[9] = (uint8_t)(len >> 16);
  175. miv[10] = (uint8_t)(len >> 8);
  176. miv[11] = (uint8_t)len;
  177. // Compute AES[k2](GMAC[k1](miv,plaintext))
  178. k1.gmac(miv,in,len,ctrIv);
  179. k2.encrypt(ctrIv,ctrIv); // ECB mode encrypt step is because GMAC is not a PRF
  180. // Auth tag for packet is first 64 bits of AES(GMAC) (rest is discarded)
  181. #ifndef __GNUC__
  182. for(unsigned int i=0;i<8;++i) tag[i] = ctrIv[i];
  183. #else
  184. *((uint64_t *)tag) = *((uint64_t *)ctrIv);
  185. #endif
  186. // Create synthetic CTR IV from keyed hash of tag and message IV
  187. #ifndef __GNUC__
  188. for(unsigned int i=0;i<4;++i) ctrIv[i+8] = miv[i];
  189. for(unsigned int i=4;i<8;++i) ctrIv[i+8] = miv[i] ^ miv[i+4];
  190. #else
  191. ((uint32_t *)ctrIv)[2] = ((const uint32_t *)miv)[0];
  192. ((uint32_t *)ctrIv)[3] = ((const uint32_t *)miv)[1] ^ ((const uint32_t *)miv)[2];
  193. #endif
  194. k3.encrypt(ctrIv,ctrIv);
  195. // Encrypt with AES[k4]-CTR
  196. k4.ctr(ctrIv,in,len,out);
  197. }
  198. /**
  199. * Decrypt a message encrypted with AES-GMAC-SIV and check its authenticity
  200. *
  201. * @param k1 GMAC key
  202. * @param k2 GMAC auth tag masking (ECB encryption) key
  203. * @param k3 CTR IV masking (ECB encryption) key
  204. * @param k4 AES-CTR key
  205. * @param iv 64-bit message IV
  206. * @param direction Direction byte
  207. * @param in Message ciphertext
  208. * @param len Length of ciphertext
  209. * @param out Output buffer to receive plaintext
  210. * @param tag Authentication tag supplied with message
  211. * @return True if authentication tags match and message appears authentic
  212. */
  213. static ZT_ALWAYS_INLINE bool gmacSivDecrypt(const AES &k1,const AES &k2,const AES &k3,const AES &k4,const uint8_t iv[8],const uint8_t direction,const void *in,const unsigned int len,void *out,const uint8_t tag[8])
  214. {
  215. #ifdef __GNUC__
  216. uint8_t __attribute__ ((aligned (16))) miv[12];
  217. uint8_t __attribute__ ((aligned (16))) ctrIv[16];
  218. uint8_t __attribute__ ((aligned (16))) gmacOut[16];
  219. #else
  220. uint8_t miv[12];
  221. uint8_t ctrIv[16];
  222. uint8_t gmacOut[16];
  223. #endif
  224. // Extend packet IV to 96-bit message IV using direction byte and message length
  225. #ifndef __GNUC__
  226. for(unsigned int i=0;i<8;++i) miv[i] = iv[i];
  227. #else
  228. *((uint64_t *)miv) = *((const uint64_t *)iv);
  229. #endif
  230. miv[8] = direction;
  231. miv[9] = (uint8_t)(len >> 16);
  232. miv[10] = (uint8_t)(len >> 8);
  233. miv[11] = (uint8_t)len;
  234. // Recover synthetic and secret CTR IV from auth tag and packet IV
  235. #ifndef __GNUC__
  236. for(unsigned int i=0;i<8;++i) ctrIv[i] = tag[i];
  237. for(unsigned int i=0;i<4;++i) ctrIv[i+8] = miv[i];
  238. for(unsigned int i=4;i<8;++i) ctrIv[i+8] = miv[i] ^ miv[i+4];
  239. #else
  240. *((uint64_t *)ctrIv) = *((const uint64_t *)tag);
  241. ((uint32_t *)ctrIv)[2] = ((const uint32_t *)miv)[0];
  242. ((uint32_t *)ctrIv)[3] = ((const uint32_t *)miv)[1] ^ ((const uint32_t *)miv)[2];
  243. #endif
  244. k3.encrypt(ctrIv,ctrIv);
  245. // Decrypt with AES[k4]-CTR
  246. k4.ctr(ctrIv,in,len,out);
  247. // Compute AES[k2](GMAC[k1](iv,plaintext))
  248. k1.gmac(miv,out,len,gmacOut);
  249. k2.encrypt(gmacOut,gmacOut);
  250. // Check that packet's auth tag matches first 64 bits of AES(GMAC)
  251. #ifndef __GNUC__
  252. return Utils::secureEq(gmacOut,tag,8);
  253. #else
  254. return (*((const uint64_t *)gmacOut) == *((const uint64_t *)tag));
  255. #endif
  256. }
  257. /**
  258. * Use KBKDF with HMAC-SHA-384 to derive four sub-keys for AES-GMAC-SIV from a single master key
  259. *
  260. * See section 5.1 at https://nvlpubs.nist.gov/nistpubs/Legacy/SP/nistspecialpublication800-108.pdf
  261. *
  262. * @param masterKey Master 256-bit key
  263. * @param k1 GMAC key
  264. * @param k2 GMAC auth tag masking (ECB encryption) key
  265. * @param k3 CTR IV masking (ECB encryption) key
  266. * @param k4 AES-CTR key
  267. */
  268. static ZT_ALWAYS_INLINE void initGmacCtrKeys(const uint8_t masterKey[32],AES &k1,AES &k2,AES &k3,AES &k4)
  269. {
  270. uint8_t k[32];
  271. KBKDFHMACSHA384(masterKey,ZT_PROTO_KBKDF_LABEL_KEY_USE_AES_GMAC_SIV_K1,0,0,k);
  272. k1.init(k);
  273. KBKDFHMACSHA384(masterKey,ZT_PROTO_KBKDF_LABEL_KEY_USE_AES_GMAC_SIV_K2,0,0,k);
  274. k2.init(k);
  275. KBKDFHMACSHA384(masterKey,ZT_PROTO_KBKDF_LABEL_KEY_USE_AES_GMAC_SIV_K3,0,0,k);
  276. k3.init(k);
  277. KBKDFHMACSHA384(masterKey,ZT_PROTO_KBKDF_LABEL_KEY_USE_AES_GMAC_SIV_K4,0,0,k);
  278. k4.init(k);
  279. }
  280. private:
  281. static const uint32_t Te0[256];
  282. static const uint32_t Te1[256];
  283. static const uint32_t Te2[256];
  284. static const uint32_t Te3[256];
  285. static const uint32_t rcon[10];
  286. void _initSW(const uint8_t key[32]);
  287. void _encryptSW(const uint8_t in[16],uint8_t out[16]) const;
  288. void _gmacSW(const uint8_t iv[12],const uint8_t *in,unsigned int len,uint8_t out[16]) const;
  289. /**************************************************************************/
  290. union {
  291. #ifdef ZT_AES_ARMNEON
  292. struct {
  293. uint32x4_t k[15];
  294. } neon;
  295. #endif
  296. #ifdef ZT_AES_AESNI
  297. struct {
  298. __m128i k[15];
  299. __m128i h,hh,hhh,hhhh;
  300. } ni;
  301. #endif
  302. struct {
  303. uint64_t h[2];
  304. uint32_t ek[60];
  305. } sw;
  306. } _k;
  307. /**************************************************************************/
  308. #ifdef ZT_AES_ARMNEON /******************************************************/
  309. static inline void _aes_256_expAssist_armneon(uint32x4_t prev1,uint32x4_t prev2,uint32_t rcon,uint32x4_t *e1,uint32x4_t *e2)
  310. {
  311. uint32_t round1[4], round2[4], prv1[4], prv2[4];
  312. vst1q_u32(prv1, prev1);
  313. vst1q_u32(prv2, prev2);
  314. round1[0] = sub_word(rot_word(prv2[3])) ^ rcon ^ prv1[0];
  315. round1[1] = sub_word(rot_word(round1[0])) ^ rcon ^ prv1[1];
  316. round1[2] = sub_word(rot_word(round1[1])) ^ rcon ^ prv1[2];
  317. round1[3] = sub_word(rot_word(round1[2])) ^ rcon ^ prv1[3];
  318. round2[0] = sub_word(rot_word(round1[3])) ^ rcon ^ prv2[0];
  319. round2[1] = sub_word(rot_word(round2[0])) ^ rcon ^ prv2[1];
  320. round2[2] = sub_word(rot_word(round2[1])) ^ rcon ^ prv2[2];
  321. round2[3] = sub_word(rot_word(round2[2])) ^ rcon ^ prv2[3];
  322. *e1 = vld1q_u3(round1);
  323. *e2 = vld1q_u3(round2);
  324. //uint32x4_t expansion[2] = {vld1q_u3(round1), vld1q_u3(round2)};
  325. //return expansion;
  326. }
  327. inline void _init_armneon(uint8x16_t encKey)
  328. {
  329. uint32x4_t *schedule = _k.neon.k;
  330. uint32x4_t e1,e2;
  331. (*schedule)[0] = vld1q_u32(encKey);
  332. (*schedule)[1] = vld1q_u32(encKey + 16);
  333. _aes_256_expAssist_armneon((*schedule)[0],(*schedule)[1],0x01,&e1,&e2);
  334. (*schedule)[2] = e1; (*schedule)[3] = e2;
  335. _aes_256_expAssist_armneon((*schedule)[2],(*schedule)[3],0x01,&e1,&e2);
  336. (*schedule)[4] = e1; (*schedule)[5] = e2;
  337. _aes_256_expAssist_armneon((*schedule)[4],(*schedule)[5],0x01,&e1,&e2);
  338. (*schedule)[6] = e1; (*schedule)[7] = e2;
  339. _aes_256_expAssist_armneon((*schedule)[6],(*schedule)[7],0x01,&e1,&e2);
  340. (*schedule)[8] = e1; (*schedule)[9] = e2;
  341. _aes_256_expAssist_armneon((*schedule)[8],(*schedule)[9],0x01,&e1,&e2);
  342. (*schedule)[10] = e1; (*schedule)[11] = e2;
  343. _aes_256_expAssist_armneon((*schedule)[10],(*schedule)[11],0x01,&e1,&e2);
  344. (*schedule)[12] = e1; (*schedule)[13] = e2;
  345. _aes_256_expAssist_armneon((*schedule)[12],(*schedule)[13],0x01,&e1,&e2);
  346. (*schedule)[14] = e1;
  347. /*
  348. doubleRound = _aes_256_expAssist_armneon((*schedule)[0], (*schedule)[1], 0x01);
  349. (*schedule)[2] = doubleRound[0];
  350. (*schedule)[3] = doubleRound[1];
  351. doubleRound = _aes_256_expAssist_armneon((*schedule)[2], (*schedule)[3], 0x02);
  352. (*schedule)[4] = doubleRound[0];
  353. (*schedule)[5] = doubleRound[1];
  354. doubleRound = _aes_256_expAssist_armneon((*schedule)[4], (*schedule)[5], 0x04);
  355. (*schedule)[6] = doubleRound[0];
  356. (*schedule)[7] = doubleRound[1];
  357. doubleRound = _aes_256_expAssist_armneon((*schedule)[6], (*schedule)[7], 0x08);
  358. (*schedule)[8] = doubleRound[0];
  359. (*schedule)[9] = doubleRound[1];
  360. doubleRound = _aes_256_expAssist_armneon((*schedule)[8], (*schedule)[9], 0x10);
  361. (*schedule)[10] = doubleRound[0];
  362. (*schedule)[11] = doubleRound[1];
  363. doubleRound = _aes_256_expAssist_armneon((*schedule)[10], (*schedule)[11], 0x20);
  364. (*schedule)[12] = doubleRound[0];
  365. (*schedule)[13] = doubleRound[1];
  366. doubleRound = _aes_256_expAssist_armneon((*schedule)[12], (*schedule)[13], 0x40);
  367. (*schedule)[14] = doubleRound[0];
  368. */
  369. }
  370. inline void _encrypt_armneon(uint8x16_t *data) const
  371. {
  372. *data = veorq_u8(*data, _k.neon.k[0]);
  373. *data = vaesmcq_u8(vaeseq_u8(*data, (uint8x16_t)_k.neon.k[1]));
  374. *data = vaesmcq_u8(vaeseq_u8(*data, (uint8x16_t)_k.neon.k[2]));
  375. *data = vaesmcq_u8(vaeseq_u8(*data, (uint8x16_t)_k.neon.k[3]));
  376. *data = vaesmcq_u8(vaeseq_u8(*data, (uint8x16_t)_k.neon.k[4]));
  377. *data = vaesmcq_u8(vaeseq_u8(*data, (uint8x16_t)_k.neon.k[5]));
  378. *data = vaesmcq_u8(vaeseq_u8(*data, (uint8x16_t)_k.neon.k[6]));
  379. *data = vaesmcq_u8(vaeseq_u8(*data, (uint8x16_t)_k.neon.k[7]));
  380. *data = vaesmcq_u8(vaeseq_u8(*data, (uint8x16_t)_k.neon.k[8]));
  381. *data = vaesmcq_u8(vaeseq_u8(*data, (uint8x16_t)_k.neon.k[9]));
  382. *data = vaesmcq_u8(vaeseq_u8(*data, (uint8x16_t)_k.neon.k[10]));
  383. *data = vaesmcq_u8(vaeseq_u8(*data, (uint8x16_t)_k.neon.k[11]));
  384. *data = vaesmcq_u8(vaeseq_u8(*data, (uint8x16_t)_k.neon.k[12]));
  385. *data = vaesmcq_u8(vaeseq_u8(*data, (uint8x16_t)_k.neon.k[13]));
  386. *data = vaeseq_u8(*data, _k.neon.k[14]);
  387. }
  388. #endif /*********************************************************************/
  389. #ifdef ZT_AES_AESNI /********************************************************/
  390. static ZT_ALWAYS_INLINE __m128i _init256_1_aesni(__m128i a,__m128i b)
  391. {
  392. __m128i x,y;
  393. b = _mm_shuffle_epi32(b,0xff);
  394. y = _mm_slli_si128(a,0x04);
  395. x = _mm_xor_si128(a,y);
  396. y = _mm_slli_si128(y,0x04);
  397. x = _mm_xor_si128(x,y);
  398. y = _mm_slli_si128(y,0x04);
  399. x = _mm_xor_si128(x,y);
  400. x = _mm_xor_si128(x,b);
  401. return x;
  402. }
  403. static ZT_ALWAYS_INLINE __m128i _init256_2_aesni(__m128i a,__m128i b)
  404. {
  405. __m128i x,y,z;
  406. y = _mm_aeskeygenassist_si128(a,0x00);
  407. z = _mm_shuffle_epi32(y,0xaa);
  408. y = _mm_slli_si128(b,0x04);
  409. x = _mm_xor_si128(b,y);
  410. y = _mm_slli_si128(y,0x04);
  411. x = _mm_xor_si128(x,y);
  412. y = _mm_slli_si128(y,0x04);
  413. x = _mm_xor_si128(x,y);
  414. x = _mm_xor_si128(x,z);
  415. return x;
  416. }
  417. ZT_ALWAYS_INLINE void _init_aesni(const uint8_t key[32])
  418. {
  419. __m128i t1,t2;
  420. _k.ni.k[0] = t1 = _mm_loadu_si128((const __m128i *)key);
  421. _k.ni.k[1] = t2 = _mm_loadu_si128((const __m128i *)(key+16));
  422. _k.ni.k[2] = t1 = _init256_1_aesni(t1,_mm_aeskeygenassist_si128(t2,0x01));
  423. _k.ni.k[3] = t2 = _init256_2_aesni(t1,t2);
  424. _k.ni.k[4] = t1 = _init256_1_aesni(t1,_mm_aeskeygenassist_si128(t2,0x02));
  425. _k.ni.k[5] = t2 = _init256_2_aesni(t1,t2);
  426. _k.ni.k[6] = t1 = _init256_1_aesni(t1,_mm_aeskeygenassist_si128(t2,0x04));
  427. _k.ni.k[7] = t2 = _init256_2_aesni(t1,t2);
  428. _k.ni.k[8] = t1 = _init256_1_aesni(t1,_mm_aeskeygenassist_si128(t2,0x08));
  429. _k.ni.k[9] = t2 = _init256_2_aesni(t1,t2);
  430. _k.ni.k[10] = t1 = _init256_1_aesni(t1,_mm_aeskeygenassist_si128(t2,0x10));
  431. _k.ni.k[11] = t2 = _init256_2_aesni(t1,t2);
  432. _k.ni.k[12] = t1 = _init256_1_aesni(t1,_mm_aeskeygenassist_si128(t2,0x20));
  433. _k.ni.k[13] = t2 = _init256_2_aesni(t1,t2);
  434. _k.ni.k[14] = _init256_1_aesni(t1,_mm_aeskeygenassist_si128(t2,0x40));
  435. __m128i h = _mm_xor_si128(_mm_setzero_si128(),_k.ni.k[0]);
  436. h = _mm_aesenc_si128(h,_k.ni.k[1]);
  437. h = _mm_aesenc_si128(h,_k.ni.k[2]);
  438. h = _mm_aesenc_si128(h,_k.ni.k[3]);
  439. h = _mm_aesenc_si128(h,_k.ni.k[4]);
  440. h = _mm_aesenc_si128(h,_k.ni.k[5]);
  441. h = _mm_aesenc_si128(h,_k.ni.k[6]);
  442. h = _mm_aesenc_si128(h,_k.ni.k[7]);
  443. h = _mm_aesenc_si128(h,_k.ni.k[8]);
  444. h = _mm_aesenc_si128(h,_k.ni.k[9]);
  445. h = _mm_aesenc_si128(h,_k.ni.k[10]);
  446. h = _mm_aesenc_si128(h,_k.ni.k[11]);
  447. h = _mm_aesenc_si128(h,_k.ni.k[12]);
  448. h = _mm_aesenc_si128(h,_k.ni.k[13]);
  449. h = _mm_aesenclast_si128(h,_k.ni.k[14]);
  450. const __m128i shuf = _mm_set_epi8(0,1,2,3,4,5,6,7,8,9,10,11,12,13,14,15);
  451. __m128i hswap = _mm_shuffle_epi8(h,shuf);
  452. __m128i hh = _mult_block_aesni(shuf,hswap,h);
  453. __m128i hhh = _mult_block_aesni(shuf,hswap,hh);
  454. __m128i hhhh = _mult_block_aesni(shuf,hswap,hhh);
  455. _k.ni.h = hswap;
  456. _k.ni.hh = _mm_shuffle_epi8(hh,shuf);
  457. _k.ni.hhh = _mm_shuffle_epi8(hhh,shuf);
  458. _k.ni.hhhh = _mm_shuffle_epi8(hhhh,shuf);
  459. }
  460. ZT_ALWAYS_INLINE void _encrypt_aesni(const void *in,void *out) const
  461. {
  462. __m128i tmp;
  463. tmp = _mm_loadu_si128((const __m128i *)in);
  464. tmp = _mm_xor_si128(tmp,_k.ni.k[0]);
  465. tmp = _mm_aesenc_si128(tmp,_k.ni.k[1]);
  466. tmp = _mm_aesenc_si128(tmp,_k.ni.k[2]);
  467. tmp = _mm_aesenc_si128(tmp,_k.ni.k[3]);
  468. tmp = _mm_aesenc_si128(tmp,_k.ni.k[4]);
  469. tmp = _mm_aesenc_si128(tmp,_k.ni.k[5]);
  470. tmp = _mm_aesenc_si128(tmp,_k.ni.k[6]);
  471. tmp = _mm_aesenc_si128(tmp,_k.ni.k[7]);
  472. tmp = _mm_aesenc_si128(tmp,_k.ni.k[8]);
  473. tmp = _mm_aesenc_si128(tmp,_k.ni.k[9]);
  474. tmp = _mm_aesenc_si128(tmp,_k.ni.k[10]);
  475. tmp = _mm_aesenc_si128(tmp,_k.ni.k[11]);
  476. tmp = _mm_aesenc_si128(tmp,_k.ni.k[12]);
  477. tmp = _mm_aesenc_si128(tmp,_k.ni.k[13]);
  478. _mm_storeu_si128((__m128i *)out,_mm_aesenclast_si128(tmp,_k.ni.k[14]));
  479. }
  480. ZT_ALWAYS_INLINE void _crypt_ctr_aesni(const uint8_t iv[16],const uint8_t *in,unsigned int len,uint8_t *out) const
  481. {
  482. const __m64 iv0 = (__m64)(*((const uint64_t *)iv));
  483. uint64_t ctr = Utils::ntoh(*((const uint64_t *)(iv+8)));
  484. const __m128i k0 = _k.ni.k[0];
  485. const __m128i k1 = _k.ni.k[1];
  486. const __m128i k2 = _k.ni.k[2];
  487. const __m128i k3 = _k.ni.k[3];
  488. const __m128i k4 = _k.ni.k[4];
  489. const __m128i k5 = _k.ni.k[5];
  490. const __m128i k6 = _k.ni.k[6];
  491. const __m128i k7 = _k.ni.k[7];
  492. const __m128i k8 = _k.ni.k[8];
  493. const __m128i k9 = _k.ni.k[9];
  494. const __m128i k10 = _k.ni.k[10];
  495. const __m128i k11 = _k.ni.k[11];
  496. const __m128i k12 = _k.ni.k[12];
  497. const __m128i k13 = _k.ni.k[13];
  498. const __m128i k14 = _k.ni.k[14];
  499. #define ZT_AES_CTR_AESNI_ROUND(k) \
  500. c0 = _mm_aesenc_si128(c0,k); \
  501. c1 = _mm_aesenc_si128(c1,k); \
  502. c2 = _mm_aesenc_si128(c2,k); \
  503. c3 = _mm_aesenc_si128(c3,k); \
  504. c4 = _mm_aesenc_si128(c4,k); \
  505. c5 = _mm_aesenc_si128(c5,k); \
  506. c6 = _mm_aesenc_si128(c6,k); \
  507. c7 = _mm_aesenc_si128(c7,k)
  508. while (len >= 128) {
  509. __m128i c0 = _mm_xor_si128(_mm_set_epi64((__m64)Utils::hton(ctr),iv0),k0);
  510. __m128i c1 = _mm_xor_si128(_mm_set_epi64((__m64)Utils::hton((uint64_t)(ctr+1ULL)),iv0),k0);
  511. __m128i c2 = _mm_xor_si128(_mm_set_epi64((__m64)Utils::hton((uint64_t)(ctr+2ULL)),iv0),k0);
  512. __m128i c3 = _mm_xor_si128(_mm_set_epi64((__m64)Utils::hton((uint64_t)(ctr+3ULL)),iv0),k0);
  513. __m128i c4 = _mm_xor_si128(_mm_set_epi64((__m64)Utils::hton((uint64_t)(ctr+4ULL)),iv0),k0);
  514. __m128i c5 = _mm_xor_si128(_mm_set_epi64((__m64)Utils::hton((uint64_t)(ctr+5ULL)),iv0),k0);
  515. __m128i c6 = _mm_xor_si128(_mm_set_epi64((__m64)Utils::hton((uint64_t)(ctr+6ULL)),iv0),k0);
  516. __m128i c7 = _mm_xor_si128(_mm_set_epi64((__m64)Utils::hton((uint64_t)(ctr+7ULL)),iv0),k0);
  517. ctr += 8;
  518. ZT_AES_CTR_AESNI_ROUND(k1);
  519. ZT_AES_CTR_AESNI_ROUND(k2);
  520. ZT_AES_CTR_AESNI_ROUND(k3);
  521. ZT_AES_CTR_AESNI_ROUND(k4);
  522. ZT_AES_CTR_AESNI_ROUND(k5);
  523. ZT_AES_CTR_AESNI_ROUND(k6);
  524. ZT_AES_CTR_AESNI_ROUND(k7);
  525. ZT_AES_CTR_AESNI_ROUND(k8);
  526. ZT_AES_CTR_AESNI_ROUND(k9);
  527. ZT_AES_CTR_AESNI_ROUND(k10);
  528. ZT_AES_CTR_AESNI_ROUND(k11);
  529. ZT_AES_CTR_AESNI_ROUND(k12);
  530. ZT_AES_CTR_AESNI_ROUND(k13);
  531. _mm_storeu_si128((__m128i *)out,_mm_xor_si128(_mm_loadu_si128((const __m128i *)in),_mm_aesenclast_si128(c0,k14)));
  532. _mm_storeu_si128((__m128i *)(out + 16),_mm_xor_si128(_mm_loadu_si128((const __m128i *)(in + 16)),_mm_aesenclast_si128(c1,k14)));
  533. _mm_storeu_si128((__m128i *)(out + 32),_mm_xor_si128(_mm_loadu_si128((const __m128i *)(in + 32)),_mm_aesenclast_si128(c2,k14)));
  534. _mm_storeu_si128((__m128i *)(out + 48),_mm_xor_si128(_mm_loadu_si128((const __m128i *)(in + 48)),_mm_aesenclast_si128(c3,k14)));
  535. _mm_storeu_si128((__m128i *)(out + 64),_mm_xor_si128(_mm_loadu_si128((const __m128i *)(in + 64)),_mm_aesenclast_si128(c4,k14)));
  536. _mm_storeu_si128((__m128i *)(out + 80),_mm_xor_si128(_mm_loadu_si128((const __m128i *)(in + 80)),_mm_aesenclast_si128(c5,k14)));
  537. _mm_storeu_si128((__m128i *)(out + 96),_mm_xor_si128(_mm_loadu_si128((const __m128i *)(in + 96)),_mm_aesenclast_si128(c6,k14)));
  538. _mm_storeu_si128((__m128i *)(out + 112),_mm_xor_si128(_mm_loadu_si128((const __m128i *)(in + 112)),_mm_aesenclast_si128(c7,k14)));
  539. in += 128;
  540. out += 128;
  541. len -= 128;
  542. }
  543. #undef ZT_AES_CTR_AESNI_ROUND
  544. while (len >= 16) {
  545. __m128i c0 = _mm_xor_si128(_mm_set_epi64((__m64)Utils::hton(ctr++),(__m64)iv0),k0);
  546. c0 = _mm_aesenc_si128(c0,k1);
  547. c0 = _mm_aesenc_si128(c0,k2);
  548. c0 = _mm_aesenc_si128(c0,k3);
  549. c0 = _mm_aesenc_si128(c0,k4);
  550. c0 = _mm_aesenc_si128(c0,k5);
  551. c0 = _mm_aesenc_si128(c0,k6);
  552. c0 = _mm_aesenc_si128(c0,k7);
  553. c0 = _mm_aesenc_si128(c0,k8);
  554. c0 = _mm_aesenc_si128(c0,k9);
  555. c0 = _mm_aesenc_si128(c0,k10);
  556. c0 = _mm_aesenc_si128(c0,k11);
  557. c0 = _mm_aesenc_si128(c0,k12);
  558. c0 = _mm_aesenc_si128(c0,k13);
  559. _mm_storeu_si128((__m128i *)out,_mm_xor_si128(_mm_loadu_si128((const __m128i *)in),_mm_aesenclast_si128(c0,k14)));
  560. in += 16;
  561. out += 16;
  562. len -= 16;
  563. }
  564. if (len) {
  565. __m128i c0 = _mm_xor_si128(_mm_set_epi64((__m64)Utils::hton(ctr++),(__m64)iv0),k0);
  566. c0 = _mm_aesenc_si128(c0,k1);
  567. c0 = _mm_aesenc_si128(c0,k2);
  568. c0 = _mm_aesenc_si128(c0,k3);
  569. c0 = _mm_aesenc_si128(c0,k4);
  570. c0 = _mm_aesenc_si128(c0,k5);
  571. c0 = _mm_aesenc_si128(c0,k6);
  572. c0 = _mm_aesenc_si128(c0,k7);
  573. c0 = _mm_aesenc_si128(c0,k8);
  574. c0 = _mm_aesenc_si128(c0,k9);
  575. c0 = _mm_aesenc_si128(c0,k10);
  576. c0 = _mm_aesenc_si128(c0,k11);
  577. c0 = _mm_aesenc_si128(c0,k12);
  578. c0 = _mm_aesenc_si128(c0,k13);
  579. c0 = _mm_aesenclast_si128(c0,k14);
  580. for(unsigned int i=0;i<len;++i)
  581. out[i] = in[i] ^ ((const uint8_t *)&c0)[i];
  582. }
  583. }
  584. static ZT_ALWAYS_INLINE __m128i _mult_block_aesni(__m128i shuf,__m128i h,__m128i y)
  585. {
  586. y = _mm_shuffle_epi8(y,shuf);
  587. __m128i t1 = _mm_clmulepi64_si128(h,y,0x00);
  588. __m128i t2 = _mm_clmulepi64_si128(h,y,0x01);
  589. __m128i t3 = _mm_clmulepi64_si128(h,y,0x10);
  590. __m128i t4 = _mm_clmulepi64_si128(h,y,0x11);
  591. t2 = _mm_xor_si128(t2,t3);
  592. t3 = _mm_slli_si128(t2,8);
  593. t2 = _mm_srli_si128(t2,8);
  594. t1 = _mm_xor_si128(t1,t3);
  595. t4 = _mm_xor_si128(t4,t2);
  596. __m128i t5 = _mm_srli_epi32(t1,31);
  597. t1 = _mm_slli_epi32(t1,1);
  598. __m128i t6 = _mm_srli_epi32(t4,31);
  599. t4 = _mm_slli_epi32(t4,1);
  600. t3 = _mm_srli_si128(t5,12);
  601. t6 = _mm_slli_si128(t6,4);
  602. t5 = _mm_slli_si128(t5,4);
  603. t1 = _mm_or_si128(t1,t5);
  604. t4 = _mm_or_si128(t4,t6);
  605. t4 = _mm_or_si128(t4,t3);
  606. t5 = _mm_slli_epi32(t1,31);
  607. t6 = _mm_slli_epi32(t1,30);
  608. t3 = _mm_slli_epi32(t1,25);
  609. t5 = _mm_xor_si128(t5,t6);
  610. t5 = _mm_xor_si128(t5,t3);
  611. t6 = _mm_srli_si128(t5,4);
  612. t4 = _mm_xor_si128(t4,t6);
  613. t5 = _mm_slli_si128(t5,12);
  614. t1 = _mm_xor_si128(t1,t5);
  615. t4 = _mm_xor_si128(t4,t1);
  616. t5 = _mm_srli_epi32(t1,1);
  617. t2 = _mm_srli_epi32(t1,2);
  618. t3 = _mm_srli_epi32(t1,7);
  619. t4 = _mm_xor_si128(t4,t2);
  620. t4 = _mm_xor_si128(t4,t3);
  621. t4 = _mm_xor_si128(t4,t5);
  622. return _mm_shuffle_epi8(t4,shuf);
  623. }
  624. static ZT_ALWAYS_INLINE __m128i _ghash_aesni(__m128i shuf,__m128i h,__m128i y,__m128i x) { return _mult_block_aesni(shuf,h,_mm_xor_si128(y,x)); }
  625. ZT_ALWAYS_INLINE void _gmac_aesni(const uint8_t iv[12],const uint8_t *in,const unsigned int len,uint8_t out[16]) const
  626. {
  627. const __m128i *ab = (const __m128i *)in;
  628. unsigned int blocks = len / 16;
  629. unsigned int pblocks = blocks - (blocks % 4);
  630. unsigned int rem = len % 16;
  631. const __m128i h1 = _k.ni.hhhh;
  632. const __m128i h2 = _k.ni.hhh;
  633. const __m128i h3 = _k.ni.hh;
  634. const __m128i h4 = _k.ni.h;
  635. const __m128i shuf = _mm_set_epi8(0,1,2,3,4,5,6,7,8,9,10,11,12,13,14,15);
  636. __m128i y = _mm_setzero_si128();
  637. unsigned int i = 0;
  638. for (;i<pblocks;i+=4) {
  639. __m128i d1 = _mm_shuffle_epi8(_mm_xor_si128(y,_mm_loadu_si128(ab + i + 0)),shuf);
  640. __m128i d2 = _mm_shuffle_epi8(_mm_loadu_si128(ab + i + 1),shuf);
  641. __m128i d3 = _mm_shuffle_epi8(_mm_loadu_si128(ab + i + 2),shuf);
  642. __m128i d4 = _mm_shuffle_epi8(_mm_loadu_si128(ab + i + 3),shuf);
  643. __m128i t0 = _mm_clmulepi64_si128(h1,d1,0x00);
  644. __m128i t1 = _mm_clmulepi64_si128(h2,d2,0x00);
  645. __m128i t2 = _mm_clmulepi64_si128(h3,d3,0x00);
  646. __m128i t3 = _mm_clmulepi64_si128(h4,d4,0x00);
  647. __m128i t8 = _mm_xor_si128(t0,t1);
  648. t8 = _mm_xor_si128(t8,t2);
  649. t8 = _mm_xor_si128(t8,t3);
  650. __m128i t4 = _mm_clmulepi64_si128(h1,d1,0x11);
  651. __m128i t5 = _mm_clmulepi64_si128(h2,d2,0x11);
  652. __m128i t6 = _mm_clmulepi64_si128(h3,d3,0x11);
  653. __m128i t7 = _mm_clmulepi64_si128(h4,d4,0x11);
  654. __m128i t9 = _mm_xor_si128(t4,t5);
  655. t9 = _mm_xor_si128(t9,t6);
  656. t9 = _mm_xor_si128(t9,t7);
  657. t0 = _mm_shuffle_epi32(h1,78);
  658. t4 = _mm_shuffle_epi32(d1,78);
  659. t0 = _mm_xor_si128(t0,h1);
  660. t4 = _mm_xor_si128(t4,d1);
  661. t1 = _mm_shuffle_epi32(h2,78);
  662. t5 = _mm_shuffle_epi32(d2,78);
  663. t1 = _mm_xor_si128(t1,h2);
  664. t5 = _mm_xor_si128(t5,d2);
  665. t2 = _mm_shuffle_epi32(h3,78);
  666. t6 = _mm_shuffle_epi32(d3,78);
  667. t2 = _mm_xor_si128(t2,h3);
  668. t6 = _mm_xor_si128(t6,d3);
  669. t3 = _mm_shuffle_epi32(h4,78);
  670. t7 = _mm_shuffle_epi32(d4,78);
  671. t3 = _mm_xor_si128(t3,h4);
  672. t7 = _mm_xor_si128(t7,d4);
  673. t0 = _mm_clmulepi64_si128(t0,t4,0x00);
  674. t1 = _mm_clmulepi64_si128(t1,t5,0x00);
  675. t2 = _mm_clmulepi64_si128(t2,t6,0x00);
  676. t3 = _mm_clmulepi64_si128(t3,t7,0x00);
  677. t0 = _mm_xor_si128(t0,t8);
  678. t0 = _mm_xor_si128(t0,t9);
  679. t0 = _mm_xor_si128(t1,t0);
  680. t0 = _mm_xor_si128(t2,t0);
  681. t0 = _mm_xor_si128(t3,t0);
  682. t4 = _mm_slli_si128(t0,8);
  683. t0 = _mm_srli_si128(t0,8);
  684. t3 = _mm_xor_si128(t4,t8);
  685. t6 = _mm_xor_si128(t0,t9);
  686. t7 = _mm_srli_epi32(t3,31);
  687. t8 = _mm_srli_epi32(t6,31);
  688. t3 = _mm_slli_epi32(t3,1);
  689. t6 = _mm_slli_epi32(t6,1);
  690. t9 = _mm_srli_si128(t7,12);
  691. t8 = _mm_slli_si128(t8,4);
  692. t7 = _mm_slli_si128(t7,4);
  693. t3 = _mm_or_si128(t3,t7);
  694. t6 = _mm_or_si128(t6,t8);
  695. t6 = _mm_or_si128(t6,t9);
  696. t7 = _mm_slli_epi32(t3,31);
  697. t8 = _mm_slli_epi32(t3,30);
  698. t9 = _mm_slli_epi32(t3,25);
  699. t7 = _mm_xor_si128(t7,t8);
  700. t7 = _mm_xor_si128(t7,t9);
  701. t8 = _mm_srli_si128(t7,4);
  702. t7 = _mm_slli_si128(t7,12);
  703. t3 = _mm_xor_si128(t3,t7);
  704. t2 = _mm_srli_epi32(t3,1);
  705. t4 = _mm_srli_epi32(t3,2);
  706. t5 = _mm_srli_epi32(t3,7);
  707. t2 = _mm_xor_si128(t2,t4);
  708. t2 = _mm_xor_si128(t2,t5);
  709. t2 = _mm_xor_si128(t2,t8);
  710. t3 = _mm_xor_si128(t3,t2);
  711. t6 = _mm_xor_si128(t6,t3);
  712. y = _mm_shuffle_epi8(t6,shuf);
  713. }
  714. for (;i<blocks;++i)
  715. y = _ghash_aesni(shuf,h4,y,_mm_loadu_si128(ab + i));
  716. if (rem) {
  717. __m128i last = _mm_setzero_si128();
  718. memcpy(&last,ab + blocks,rem);
  719. y = _ghash_aesni(shuf,h4,y,last);
  720. }
  721. y = _ghash_aesni(shuf,h4,y,_mm_set_epi64((__m64)0LL,(__m64)Utils::hton((uint64_t)len * (uint64_t)8)));
  722. __m128i t = _mm_xor_si128(_mm_set_epi32(0x01000000,(int)*((const uint32_t *)(iv+8)),(int)*((const uint32_t *)(iv+4)),(int)*((const uint32_t *)(iv))),_k.ni.k[0]);
  723. t = _mm_aesenc_si128(t,_k.ni.k[1]);
  724. t = _mm_aesenc_si128(t,_k.ni.k[2]);
  725. t = _mm_aesenc_si128(t,_k.ni.k[3]);
  726. t = _mm_aesenc_si128(t,_k.ni.k[4]);
  727. t = _mm_aesenc_si128(t,_k.ni.k[5]);
  728. t = _mm_aesenc_si128(t,_k.ni.k[6]);
  729. t = _mm_aesenc_si128(t,_k.ni.k[7]);
  730. t = _mm_aesenc_si128(t,_k.ni.k[8]);
  731. t = _mm_aesenc_si128(t,_k.ni.k[9]);
  732. t = _mm_aesenc_si128(t,_k.ni.k[10]);
  733. t = _mm_aesenc_si128(t,_k.ni.k[11]);
  734. t = _mm_aesenc_si128(t,_k.ni.k[12]);
  735. t = _mm_aesenc_si128(t,_k.ni.k[13]);
  736. t = _mm_aesenclast_si128(t,_k.ni.k[14]);
  737. _mm_storeu_si128((__m128i *)out,_mm_xor_si128(y,t));
  738. }
  739. #endif /* ZT_AES_AESNI ******************************************************/
  740. };
  741. } // namespace ZeroTier
  742. #endif