AES.hpp 23 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740
  1. /*
  2. * ZeroTier One - Network Virtualization Everywhere
  3. * Copyright (C) 2011-2019 ZeroTier, Inc. https://www.zerotier.com/
  4. *
  5. * This program is free software: you can redistribute it and/or modify
  6. * it under the terms of the GNU General Public License as published by
  7. * the Free Software Foundation, either version 3 of the License, or
  8. * (at your option) any later version.
  9. *
  10. * This program is distributed in the hope that it will be useful,
  11. * but WITHOUT ANY WARRANTY; without even the implied warranty of
  12. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
  13. * GNU General Public License for more details.
  14. *
  15. * You should have received a copy of the GNU General Public License
  16. * along with this program. If not, see <http://www.gnu.org/licenses/>.
  17. *
  18. * --
  19. *
  20. * You can be released from the requirements of the license by purchasing
  21. * a commercial license. Buying such a license is mandatory as soon as you
  22. * develop commercial closed-source software that incorporates or links
  23. * directly against ZeroTier software without disclosing the source code
  24. * of your own application.
  25. */
  26. #ifndef ZT_AES_HPP
  27. #define ZT_AES_HPP
  28. #include "Constants.hpp"
  29. #include "Utils.hpp"
  30. #if (defined(__amd64) || defined(__amd64__) || defined(__x86_64) || defined(__x86_64__) || defined(__AMD64) || defined(__AMD64__) || defined(_M_X64))
  31. #include <wmmintrin.h>
  32. #include <emmintrin.h>
  33. #include <smmintrin.h>
  34. #define ZT_AES_AESNI 1
  35. #endif
  36. namespace ZeroTier {
  37. /**
  38. * AES-256 and GCM AEAD
  39. *
  40. * AES with 128-bit or 192-bit key sizes isn't supported here. This also only
  41. * supports the encrypt operation since we use AES in GCM mode. For HW acceleration
  42. * the code is inlined for maximum performance.
  43. */
  44. class AES
  45. {
  46. public:
  47. /**
  48. * This will be true if your platform's type of AES acceleration is supported on this machine
  49. */
  50. static const bool HW_ACCEL;
  51. inline AES() {}
  52. inline AES(const uint8_t key[32]) { this->init(key); }
  53. inline ~AES()
  54. {
  55. Utils::burn(&_k,sizeof(_k));
  56. }
  57. inline void init(const uint8_t key[32])
  58. {
  59. #ifdef ZT_AES_AESNI
  60. if (HW_ACCEL) {
  61. _init_aesni(key);
  62. return;
  63. }
  64. #endif
  65. _initSW(key);
  66. }
  67. inline void encrypt(const uint8_t in[16],uint8_t out[16]) const
  68. {
  69. #ifdef ZT_AES_AESNI
  70. if (HW_ACCEL) {
  71. _encrypt_aesni(in,out);
  72. return;
  73. }
  74. #endif
  75. _encryptSW(in,out);
  76. }
  77. inline void gcmEncrypt(const uint8_t iv[12],const void *in,unsigned int inlen,const void *assoc,unsigned int assoclen,void *out,uint8_t *tag,unsigned int taglen)
  78. {
  79. #ifdef ZT_AES_AESNI
  80. if (HW_ACCEL) {
  81. _encrypt_gcm256_aesni(inlen,(const uint8_t *)in,(uint8_t *)out,iv,assoclen,(const uint8_t *)assoc,tag,taglen);
  82. return;
  83. }
  84. #endif
  85. abort(); // TODO: software
  86. }
  87. inline bool gcmDecrypt(const uint8_t iv[12],const void *in,unsigned int inlen,const void *assoc,unsigned int assoclen,void *out,const uint8_t *tag,unsigned int taglen)
  88. {
  89. #ifdef ZT_AES_AESNI
  90. if (HW_ACCEL) {
  91. uint8_t tagbuf[16];
  92. _decrypt_gcm256_aesni(inlen,(const uint8_t *)in,(uint8_t *)out,iv,assoclen,(const uint8_t *)assoc,tagbuf,taglen);
  93. return Utils::secureEq(tagbuf,tag,taglen);
  94. }
  95. #endif
  96. abort(); // TODO: software
  97. return false;
  98. }
  99. private:
  100. void _initSW(const uint8_t key[32]);
  101. void _encryptSW(const uint8_t in[16],uint8_t out[16]) const;
  102. union {
  103. #ifdef ZT_AES_AESNI
  104. struct {
  105. __m128i k[15];
  106. __m128i h,hh,hhh,hhhh;
  107. } ni;
  108. #endif
  109. struct {
  110. uint32_t k[60];
  111. } sw;
  112. } _k;
  113. #ifdef ZT_AES_AESNI /********************************************************/
  114. static inline __m128i _init256_1_aesni(__m128i a,__m128i b)
  115. {
  116. __m128i x,y;
  117. b = _mm_shuffle_epi32(b,0xff);
  118. y = _mm_slli_si128(a,0x04);
  119. x = _mm_xor_si128(a,y);
  120. y = _mm_slli_si128(y,0x04);
  121. x = _mm_xor_si128(x,y);
  122. y = _mm_slli_si128(y,0x04);
  123. x = _mm_xor_si128(x,y);
  124. x = _mm_xor_si128(x,b);
  125. return x;
  126. }
  127. static inline __m128i _init256_2_aesni(__m128i a,__m128i b)
  128. {
  129. __m128i x,y,z;
  130. y = _mm_aeskeygenassist_si128(a,0x00);
  131. z = _mm_shuffle_epi32(y,0xaa);
  132. y = _mm_slli_si128(b,0x04);
  133. x = _mm_xor_si128(b,y);
  134. y = _mm_slli_si128(y,0x04);
  135. x = _mm_xor_si128(x,y);
  136. y = _mm_slli_si128(y,0x04);
  137. x = _mm_xor_si128(x,y);
  138. x = _mm_xor_si128(x,z);
  139. return x;
  140. }
  141. inline void _init_aesni(const uint8_t key[32])
  142. {
  143. /* Init AES itself */
  144. __m128i t1,t2;
  145. _k.ni.k[0] = t1 = _mm_loadu_si128((const __m128i *)key);
  146. _k.ni.k[1] = t2 = _mm_loadu_si128((const __m128i *)(key+16));
  147. _k.ni.k[2] = t1 = _init256_1_aesni(t1,_mm_aeskeygenassist_si128(t2,0x01));
  148. _k.ni.k[3] = t2 = _init256_2_aesni(t1,t2);
  149. _k.ni.k[4] = t1 = _init256_1_aesni(t1,_mm_aeskeygenassist_si128(t2,0x02));
  150. _k.ni.k[5] = t2 = _init256_2_aesni(t1,t2);
  151. _k.ni.k[6] = t1 = _init256_1_aesni(t1,_mm_aeskeygenassist_si128(t2,0x04));
  152. _k.ni.k[7] = t2 = _init256_2_aesni(t1,t2);
  153. _k.ni.k[8] = t1 = _init256_1_aesni(t1,_mm_aeskeygenassist_si128(t2,0x08));
  154. _k.ni.k[9] = t2 = _init256_2_aesni(t1,t2);
  155. _k.ni.k[10] = t1 = _init256_1_aesni(t1,_mm_aeskeygenassist_si128(t2,0x10));
  156. _k.ni.k[11] = t2 = _init256_2_aesni(t1,t2);
  157. _k.ni.k[12] = t1 = _init256_1_aesni(t1,_mm_aeskeygenassist_si128(t2,0x20));
  158. _k.ni.k[13] = t2 = _init256_2_aesni(t1,t2);
  159. _k.ni.k[14] = _init256_1_aesni(t1,_mm_aeskeygenassist_si128(t2,0x40));
  160. /* Init GCM / GHASH */
  161. __m128i h = _mm_xor_si128(_mm_setzero_si128(),_k.ni.k[0]);
  162. h = _mm_aesenc_si128(h,_k.ni.k[1]);
  163. h = _mm_aesenc_si128(h,_k.ni.k[2]);
  164. h = _mm_aesenc_si128(h,_k.ni.k[3]);
  165. h = _mm_aesenc_si128(h,_k.ni.k[4]);
  166. h = _mm_aesenc_si128(h,_k.ni.k[5]);
  167. h = _mm_aesenc_si128(h,_k.ni.k[6]);
  168. h = _mm_aesenc_si128(h,_k.ni.k[7]);
  169. h = _mm_aesenc_si128(h,_k.ni.k[8]);
  170. h = _mm_aesenc_si128(h,_k.ni.k[9]);
  171. h = _mm_aesenc_si128(h,_k.ni.k[10]);
  172. h = _mm_aesenc_si128(h,_k.ni.k[11]);
  173. h = _mm_aesenc_si128(h,_k.ni.k[12]);
  174. h = _mm_aesenc_si128(h,_k.ni.k[13]);
  175. h = _mm_aesenclast_si128(h,_k.ni.k[14]);
  176. __m128i hswap = _swap128_aesni(h);
  177. __m128i hh = _mult_block_aesni(hswap,h);
  178. __m128i hhh = _mult_block_aesni(hswap,hh);
  179. __m128i hhhh = _mult_block_aesni(hswap,hhh);
  180. _k.ni.h = hswap;
  181. _k.ni.hh = _swap128_aesni(hh);
  182. _k.ni.hhh = _swap128_aesni(hhh);
  183. _k.ni.hhhh = _swap128_aesni(hhhh);
  184. /*
  185. this->h = h;
  186. h = swap128(h);
  187. this->hh = mult_block(h, this->h);
  188. this->hhh = mult_block(h, this->hh);
  189. this->hhhh = mult_block(h, this->hhh);
  190. this->h = swap128(this->h);
  191. this->hh = swap128(this->hh);
  192. this->hhh = swap128(this->hhh);
  193. this->hhhh = swap128(this->hhhh);
  194. */
  195. }
  196. inline void _encrypt_aesni(const void *in,void *out) const
  197. {
  198. __m128i tmp;
  199. tmp = _mm_loadu_si128((const __m128i *)in);
  200. tmp = _mm_xor_si128(tmp,_k.ni.k[0]);
  201. tmp = _mm_aesenc_si128(tmp,_k.ni.k[1]);
  202. tmp = _mm_aesenc_si128(tmp,_k.ni.k[2]);
  203. tmp = _mm_aesenc_si128(tmp,_k.ni.k[3]);
  204. tmp = _mm_aesenc_si128(tmp,_k.ni.k[4]);
  205. tmp = _mm_aesenc_si128(tmp,_k.ni.k[5]);
  206. tmp = _mm_aesenc_si128(tmp,_k.ni.k[6]);
  207. tmp = _mm_aesenc_si128(tmp,_k.ni.k[7]);
  208. tmp = _mm_aesenc_si128(tmp,_k.ni.k[8]);
  209. tmp = _mm_aesenc_si128(tmp,_k.ni.k[9]);
  210. tmp = _mm_aesenc_si128(tmp,_k.ni.k[10]);
  211. tmp = _mm_aesenc_si128(tmp,_k.ni.k[11]);
  212. tmp = _mm_aesenc_si128(tmp,_k.ni.k[12]);
  213. tmp = _mm_aesenc_si128(tmp,_k.ni.k[13]);
  214. _mm_storeu_si128((__m128i *)out,_mm_aesenclast_si128(tmp,_k.ni.k[14]));
  215. }
  216. static inline __m128i _swap128_aesni(__m128i x) { return _mm_shuffle_epi8(x,_mm_set_epi8(0,1,2,3,4,5,6,7,8,9,10,11,12,13,14,15)); }
  217. static inline __m128i _mult_block_aesni(__m128i h,__m128i y)
  218. {
  219. __m128i t1,t2,t3,t4,t5,t6;
  220. y = _swap128_aesni(y);
  221. t1 = _mm_clmulepi64_si128(h,y,0x00);
  222. t2 = _mm_clmulepi64_si128(h,y,0x01);
  223. t3 = _mm_clmulepi64_si128(h,y,0x10);
  224. t4 = _mm_clmulepi64_si128(h,y,0x11);
  225. t2 = _mm_xor_si128(t2,t3);
  226. t3 = _mm_slli_si128(t2,8);
  227. t2 = _mm_srli_si128(t2,8);
  228. t1 = _mm_xor_si128(t1,t3);
  229. t4 = _mm_xor_si128(t4,t2);
  230. t5 = _mm_srli_epi32(t1,31);
  231. t1 = _mm_slli_epi32(t1,1);
  232. t6 = _mm_srli_epi32(t4,31);
  233. t4 = _mm_slli_epi32(t4,1);
  234. t3 = _mm_srli_si128(t5,12);
  235. t6 = _mm_slli_si128(t6,4);
  236. t5 = _mm_slli_si128(t5,4);
  237. t1 = _mm_or_si128(t1,t5);
  238. t4 = _mm_or_si128(t4,t6);
  239. t4 = _mm_or_si128(t4,t3);
  240. t5 = _mm_slli_epi32(t1,31);
  241. t6 = _mm_slli_epi32(t1,30);
  242. t3 = _mm_slli_epi32(t1,25);
  243. t5 = _mm_xor_si128(t5,t6);
  244. t5 = _mm_xor_si128(t5,t3);
  245. t6 = _mm_srli_si128(t5,4);
  246. t4 = _mm_xor_si128(t4,t6);
  247. t5 = _mm_slli_si128(t5,12);
  248. t1 = _mm_xor_si128(t1,t5);
  249. t4 = _mm_xor_si128(t4,t1);
  250. t5 = _mm_srli_epi32(t1,1);
  251. t2 = _mm_srli_epi32(t1,2);
  252. t3 = _mm_srli_epi32(t1,7);
  253. t4 = _mm_xor_si128(t4,t2);
  254. t4 = _mm_xor_si128(t4,t3);
  255. t4 = _mm_xor_si128(t4,t5);
  256. return _swap128_aesni(t4);
  257. }
  258. static inline __m128i _mult4xor_aesni(__m128i h1,__m128i h2,__m128i h3,__m128i h4,__m128i d1,__m128i d2,__m128i d3,__m128i d4)
  259. {
  260. __m128i t0,t1,t2,t3,t4,t5,t6,t7,t8,t9;
  261. d1 = _swap128_aesni(d1);
  262. d2 = _swap128_aesni(d2);
  263. d3 = _swap128_aesni(d3);
  264. d4 = _swap128_aesni(d4);
  265. t0 = _mm_clmulepi64_si128(h1,d1,0x00);
  266. t1 = _mm_clmulepi64_si128(h2,d2,0x00);
  267. t2 = _mm_clmulepi64_si128(h3,d3,0x00);
  268. t3 = _mm_clmulepi64_si128(h4,d4,0x00);
  269. t8 = _mm_xor_si128(t0,t1);
  270. t8 = _mm_xor_si128(t8,t2);
  271. t8 = _mm_xor_si128(t8,t3);
  272. t4 = _mm_clmulepi64_si128(h1,d1,0x11);
  273. t5 = _mm_clmulepi64_si128(h2,d2,0x11);
  274. t6 = _mm_clmulepi64_si128(h3,d3,0x11);
  275. t7 = _mm_clmulepi64_si128(h4,d4,0x11);
  276. t9 = _mm_xor_si128(t4,t5);
  277. t9 = _mm_xor_si128(t9,t6);
  278. t9 = _mm_xor_si128(t9,t7);
  279. t0 = _mm_shuffle_epi32(h1,78);
  280. t4 = _mm_shuffle_epi32(d1,78);
  281. t0 = _mm_xor_si128(t0,h1);
  282. t4 = _mm_xor_si128(t4,d1);
  283. t1 = _mm_shuffle_epi32(h2,78);
  284. t5 = _mm_shuffle_epi32(d2,78);
  285. t1 = _mm_xor_si128(t1,h2);
  286. t5 = _mm_xor_si128(t5,d2);
  287. t2 = _mm_shuffle_epi32(h3,78);
  288. t6 = _mm_shuffle_epi32(d3,78);
  289. t2 = _mm_xor_si128(t2,h3);
  290. t6 = _mm_xor_si128(t6,d3);
  291. t3 = _mm_shuffle_epi32(h4,78);
  292. t7 = _mm_shuffle_epi32(d4,78);
  293. t3 = _mm_xor_si128(t3,h4);
  294. t7 = _mm_xor_si128(t7,d4);
  295. t0 = _mm_clmulepi64_si128(t0,t4,0x00);
  296. t1 = _mm_clmulepi64_si128(t1,t5,0x00);
  297. t2 = _mm_clmulepi64_si128(t2,t6,0x00);
  298. t3 = _mm_clmulepi64_si128(t3,t7,0x00);
  299. t0 = _mm_xor_si128(t0,t8);
  300. t0 = _mm_xor_si128(t0,t9);
  301. t0 = _mm_xor_si128(t1,t0);
  302. t0 = _mm_xor_si128(t2,t0);
  303. t0 = _mm_xor_si128(t3,t0);
  304. t4 = _mm_slli_si128(t0,8);
  305. t0 = _mm_srli_si128(t0,8);
  306. t3 = _mm_xor_si128(t4,t8);
  307. t6 = _mm_xor_si128(t0,t9);
  308. t7 = _mm_srli_epi32(t3,31);
  309. t8 = _mm_srli_epi32(t6,31);
  310. t3 = _mm_slli_epi32(t3,1);
  311. t6 = _mm_slli_epi32(t6,1);
  312. t9 = _mm_srli_si128(t7,12);
  313. t8 = _mm_slli_si128(t8,4);
  314. t7 = _mm_slli_si128(t7,4);
  315. t3 = _mm_or_si128(t3,t7);
  316. t6 = _mm_or_si128(t6,t8);
  317. t6 = _mm_or_si128(t6,t9);
  318. t7 = _mm_slli_epi32(t3,31);
  319. t8 = _mm_slli_epi32(t3,30);
  320. t9 = _mm_slli_epi32(t3,25);
  321. t7 = _mm_xor_si128(t7,t8);
  322. t7 = _mm_xor_si128(t7,t9);
  323. t8 = _mm_srli_si128(t7,4);
  324. t7 = _mm_slli_si128(t7,12);
  325. t3 = _mm_xor_si128(t3,t7);
  326. t2 = _mm_srli_epi32(t3,1);
  327. t4 = _mm_srli_epi32(t3,2);
  328. t5 = _mm_srli_epi32(t3,7);
  329. t2 = _mm_xor_si128(t2,t4);
  330. t2 = _mm_xor_si128(t2,t5);
  331. t2 = _mm_xor_si128(t2,t8);
  332. t3 = _mm_xor_si128(t3,t2);
  333. t6 = _mm_xor_si128(t6,t3);
  334. return _swap128_aesni(t6);
  335. }
  336. static inline __m128i _ghash_aesni(__m128i h,__m128i y,__m128i x) { return _mult_block_aesni(h,_mm_xor_si128(y,x)); }
  337. static inline __m128i _increment_be_aesni(__m128i x)
  338. {
  339. x = _swap128_aesni(x);
  340. x = _mm_add_epi64(x, _mm_set_epi32(0, 0, 0, 1));
  341. x = _swap128_aesni(x);
  342. return x;
  343. }
  344. static inline void _htoun64_aesni(void *network,const uint64_t host) { *((uint64_t *)network) = Utils::hton(host); }
  345. inline __m128i _create_j_aesni(const uint8_t *iv) const
  346. {
  347. uint8_t j[16];
  348. *((uint64_t *)j) = *((const uint64_t *)iv);
  349. *((uint32_t *)(j+8)) = *((const uint32_t *)(iv+8));
  350. j[12] = 0;
  351. j[13] = 0;
  352. j[14] = 0;
  353. j[15] = 1;
  354. return _mm_loadu_si128((__m128i *)j);
  355. }
  356. inline __m128i _icv_header_aesni(const void *assoc,unsigned int alen) const
  357. {
  358. unsigned int blocks,pblocks,rem,i;
  359. __m128i h1,h2,h3,h4,d1,d2,d3,d4;
  360. __m128i y,last;
  361. const __m128i *ab;
  362. h1 = _k.ni.hhhh;
  363. h2 = _k.ni.hhh;
  364. h3 = _k.ni.hh;
  365. h4 = _k.ni.h;
  366. y = _mm_setzero_si128();
  367. ab = (const __m128i *)assoc;
  368. blocks = alen / 16;
  369. pblocks = blocks - (blocks % 4);
  370. rem = alen % 16;
  371. for (i=0;i<pblocks;i+=4) {
  372. d1 = _mm_loadu_si128(ab + i + 0);
  373. d2 = _mm_loadu_si128(ab + i + 1);
  374. d3 = _mm_loadu_si128(ab + i + 2);
  375. d4 = _mm_loadu_si128(ab + i + 3);
  376. y = _mm_xor_si128(y, d1);
  377. y = _mult4xor_aesni(h1,h2,h3,h4,y,d2,d3,d4);
  378. }
  379. for (i = pblocks; i < blocks; i++)
  380. y = _ghash_aesni(_k.ni.h,y,_mm_loadu_si128(ab + i));
  381. if (rem) {
  382. last = _mm_setzero_si128();
  383. memcpy(&last,ab + blocks,rem);
  384. y = _ghash_aesni(_k.ni.h,y,last);
  385. }
  386. return y;
  387. }
  388. inline __m128i _icv_tailer_aesni(__m128i y,size_t alen,size_t dlen) const
  389. {
  390. __m128i b;
  391. _htoun64_aesni(&b, alen * 8);
  392. _htoun64_aesni((uint8_t *)&b + sizeof(uint64_t), dlen * 8);
  393. return _ghash_aesni(_k.ni.h, y, b);
  394. }
  395. inline void _icv_crypt_aesni(__m128i y,__m128i j,uint8_t *icv,unsigned int icvsize) const
  396. {
  397. __m128i t,b;
  398. t = _mm_xor_si128(j,_k.ni.k[0]);
  399. t = _mm_aesenc_si128(t,_k.ni.k[1]);
  400. t = _mm_aesenc_si128(t,_k.ni.k[2]);
  401. t = _mm_aesenc_si128(t,_k.ni.k[3]);
  402. t = _mm_aesenc_si128(t,_k.ni.k[4]);
  403. t = _mm_aesenc_si128(t,_k.ni.k[5]);
  404. t = _mm_aesenc_si128(t,_k.ni.k[6]);
  405. t = _mm_aesenc_si128(t,_k.ni.k[7]);
  406. t = _mm_aesenc_si128(t,_k.ni.k[8]);
  407. t = _mm_aesenc_si128(t,_k.ni.k[9]);
  408. t = _mm_aesenc_si128(t,_k.ni.k[10]);
  409. t = _mm_aesenc_si128(t,_k.ni.k[11]);
  410. t = _mm_aesenc_si128(t,_k.ni.k[12]);
  411. t = _mm_aesenc_si128(t,_k.ni.k[13]);
  412. t = _mm_aesenclast_si128(t,_k.ni.k[14]);
  413. t = _mm_xor_si128(y, t);
  414. _mm_storeu_si128(&b, t);
  415. memcpy(icv,&b,icvsize);
  416. }
  417. inline __m128i _encrypt_gcm_rem_aesni(unsigned int rem,const void *in,void *out,__m128i cb,__m128i y) const
  418. {
  419. __m128i t,b;
  420. memset(&b,0,sizeof(b));
  421. memcpy(&b,in,rem);
  422. t = _mm_xor_si128(cb,_k.ni.k[0]);
  423. t = _mm_aesenc_si128(t,_k.ni.k[1]);
  424. t = _mm_aesenc_si128(t,_k.ni.k[2]);
  425. t = _mm_aesenc_si128(t,_k.ni.k[3]);
  426. t = _mm_aesenc_si128(t,_k.ni.k[4]);
  427. t = _mm_aesenc_si128(t,_k.ni.k[5]);
  428. t = _mm_aesenc_si128(t,_k.ni.k[6]);
  429. t = _mm_aesenc_si128(t,_k.ni.k[7]);
  430. t = _mm_aesenc_si128(t,_k.ni.k[8]);
  431. t = _mm_aesenc_si128(t,_k.ni.k[9]);
  432. t = _mm_aesenc_si128(t,_k.ni.k[10]);
  433. t = _mm_aesenc_si128(t,_k.ni.k[11]);
  434. t = _mm_aesenc_si128(t,_k.ni.k[12]);
  435. t = _mm_aesenc_si128(t,_k.ni.k[13]);
  436. t = _mm_aesenclast_si128(t,_k.ni.k[14]);
  437. b = _mm_xor_si128(t,b);
  438. memcpy(out,&b,rem);
  439. memset((u_char*)&b + rem,0,16 - rem);
  440. return _ghash_aesni(_k.ni.h,y,b);
  441. }
  442. inline void _encrypt_gcm256_aesni(unsigned int len,const uint8_t *in,uint8_t *out,const uint8_t *iv,unsigned int alen,const uint8_t *assoc,uint8_t *icv,unsigned int icvsize) const
  443. {
  444. __m128i d1,d2,d3,d4,t1,t2,t3,t4,k;
  445. __m128i y,j,cb,*bi,*bo;
  446. j = _create_j_aesni(iv);
  447. cb = _increment_be_aesni(j);
  448. y = _icv_header_aesni(assoc,alen);
  449. unsigned int blocks = len / 16;
  450. unsigned int pblocks = blocks - (blocks % 4);
  451. unsigned int rem = len % 16;
  452. bi = (__m128i *)in;
  453. bo = (__m128i *)out;
  454. unsigned int i;
  455. for (i=0;i<pblocks;i+=4) {
  456. d1 = _mm_loadu_si128(bi + i + 0);
  457. d2 = _mm_loadu_si128(bi + i + 1);
  458. d3 = _mm_loadu_si128(bi + i + 2);
  459. d4 = _mm_loadu_si128(bi + i + 3);
  460. t1 = _mm_xor_si128(cb,k = _k.ni.k[0]);
  461. cb = _increment_be_aesni(cb);
  462. t2 = _mm_xor_si128(cb,k);
  463. cb = _increment_be_aesni(cb);
  464. t3 = _mm_xor_si128(cb,k);
  465. cb = _increment_be_aesni(cb);
  466. t4 = _mm_xor_si128(cb,k);
  467. cb = _increment_be_aesni(cb);
  468. t1 = _mm_aesenc_si128(t1,k = _k.ni.k[1]);
  469. t2 = _mm_aesenc_si128(t2,k);
  470. t3 = _mm_aesenc_si128(t3,k);
  471. t4 = _mm_aesenc_si128(t4,k);
  472. t1 = _mm_aesenc_si128(t1,k = _k.ni.k[2]);
  473. t2 = _mm_aesenc_si128(t2,k);
  474. t3 = _mm_aesenc_si128(t3,k);
  475. t4 = _mm_aesenc_si128(t4,k);
  476. t1 = _mm_aesenc_si128(t1,k = _k.ni.k[3]);
  477. t2 = _mm_aesenc_si128(t2,k);
  478. t3 = _mm_aesenc_si128(t3,k);
  479. t4 = _mm_aesenc_si128(t4,k);
  480. t1 = _mm_aesenc_si128(t1,k = _k.ni.k[4]);
  481. t2 = _mm_aesenc_si128(t2,k);
  482. t3 = _mm_aesenc_si128(t3,k);
  483. t4 = _mm_aesenc_si128(t4,k);
  484. t1 = _mm_aesenc_si128(t1,k = _k.ni.k[5]);
  485. t2 = _mm_aesenc_si128(t2,k);
  486. t3 = _mm_aesenc_si128(t3,k);
  487. t4 = _mm_aesenc_si128(t4,k);
  488. t1 = _mm_aesenc_si128(t1,k = _k.ni.k[6]);
  489. t2 = _mm_aesenc_si128(t2,k);
  490. t3 = _mm_aesenc_si128(t3,k);
  491. t4 = _mm_aesenc_si128(t4,k);
  492. t1 = _mm_aesenc_si128(t1,k = _k.ni.k[7]);
  493. t2 = _mm_aesenc_si128(t2,k);
  494. t3 = _mm_aesenc_si128(t3,k);
  495. t4 = _mm_aesenc_si128(t4,k);
  496. t1 = _mm_aesenc_si128(t1,k = _k.ni.k[8]);
  497. t2 = _mm_aesenc_si128(t2,k);
  498. t3 = _mm_aesenc_si128(t3,k);
  499. t4 = _mm_aesenc_si128(t4,k);
  500. t1 = _mm_aesenc_si128(t1,k = _k.ni.k[9]);
  501. t2 = _mm_aesenc_si128(t2,k);
  502. t3 = _mm_aesenc_si128(t3,k);
  503. t4 = _mm_aesenc_si128(t4,k);
  504. t1 = _mm_aesenc_si128(t1,k = _k.ni.k[10]);
  505. t2 = _mm_aesenc_si128(t2,k);
  506. t3 = _mm_aesenc_si128(t3,k);
  507. t4 = _mm_aesenc_si128(t4,k);
  508. t1 = _mm_aesenc_si128(t1,k = _k.ni.k[11]);
  509. t2 = _mm_aesenc_si128(t2,k);
  510. t3 = _mm_aesenc_si128(t3,k);
  511. t4 = _mm_aesenc_si128(t4,k);
  512. t1 = _mm_aesenc_si128(t1,k = _k.ni.k[12]);
  513. t2 = _mm_aesenc_si128(t2,k);
  514. t3 = _mm_aesenc_si128(t3,k);
  515. t4 = _mm_aesenc_si128(t4,k);
  516. t1 = _mm_aesenc_si128(t1,k = _k.ni.k[13]);
  517. t2 = _mm_aesenc_si128(t2,k);
  518. t3 = _mm_aesenc_si128(t3,k);
  519. t4 = _mm_aesenc_si128(t4,k);
  520. t1 = _mm_aesenclast_si128(t1,k = _k.ni.k[14]);
  521. t2 = _mm_aesenclast_si128(t2,k);
  522. t3 = _mm_aesenclast_si128(t3,k);
  523. t4 = _mm_aesenclast_si128(t4,k);
  524. t1 = _mm_xor_si128(t1,d1);
  525. t2 = _mm_xor_si128(t2,d2);
  526. t3 = _mm_xor_si128(t3,d3);
  527. t4 = _mm_xor_si128(t4,d4);
  528. y = _mm_xor_si128(y,t1);
  529. y = _mult4xor_aesni(_k.ni.hhhh,_k.ni.hhh,_k.ni.hh,_k.ni.h,y,t2,t3,t4);
  530. _mm_storeu_si128(bo + i + 0,t1);
  531. _mm_storeu_si128(bo + i + 1,t2);
  532. _mm_storeu_si128(bo + i + 2,t3);
  533. _mm_storeu_si128(bo + i + 3,t4);
  534. }
  535. for (i=pblocks;i<blocks;++i) {
  536. d1 = _mm_loadu_si128(bi + i);
  537. t1 = _mm_xor_si128(cb,_k.ni.k[0]);
  538. t1 = _mm_aesenc_si128(t1,_k.ni.k[1]);
  539. t1 = _mm_aesenc_si128(t1,_k.ni.k[2]);
  540. t1 = _mm_aesenc_si128(t1,_k.ni.k[3]);
  541. t1 = _mm_aesenc_si128(t1,_k.ni.k[4]);
  542. t1 = _mm_aesenc_si128(t1,_k.ni.k[5]);
  543. t1 = _mm_aesenc_si128(t1,_k.ni.k[6]);
  544. t1 = _mm_aesenc_si128(t1,_k.ni.k[7]);
  545. t1 = _mm_aesenc_si128(t1,_k.ni.k[8]);
  546. t1 = _mm_aesenc_si128(t1,_k.ni.k[9]);
  547. t1 = _mm_aesenc_si128(t1,_k.ni.k[10]);
  548. t1 = _mm_aesenc_si128(t1,_k.ni.k[11]);
  549. t1 = _mm_aesenc_si128(t1,_k.ni.k[12]);
  550. t1 = _mm_aesenc_si128(t1,_k.ni.k[13]);
  551. t1 = _mm_aesenclast_si128(t1,_k.ni.k[14]);
  552. t1 = _mm_xor_si128(t1,d1);
  553. _mm_storeu_si128(bo + i,t1);
  554. y = _ghash_aesni(_k.ni.h,y,t1);
  555. cb = _increment_be_aesni(cb);
  556. }
  557. if (rem)
  558. y = _encrypt_gcm_rem_aesni(rem,bi + blocks,bo + blocks,cb,y);
  559. y = _icv_tailer_aesni(y,alen,len);
  560. _icv_crypt_aesni(y,j,icv,icvsize);
  561. }
  562. inline __m128i _decrypt_gcm_rem_aesni(unsigned int rem,const void *in,void *out,__m128i cb,__m128i y)
  563. {
  564. __m128i t,b;
  565. memset(&b,0,sizeof(b));
  566. memcpy(&b,in,rem);
  567. y = _ghash_aesni(_k.ni.h,y,b);
  568. t = _mm_xor_si128(cb,_k.ni.k[0]);
  569. t = _mm_aesenc_si128(t,_k.ni.k[1]);
  570. t = _mm_aesenc_si128(t,_k.ni.k[2]);
  571. t = _mm_aesenc_si128(t,_k.ni.k[3]);
  572. t = _mm_aesenc_si128(t,_k.ni.k[4]);
  573. t = _mm_aesenc_si128(t,_k.ni.k[5]);
  574. t = _mm_aesenc_si128(t,_k.ni.k[6]);
  575. t = _mm_aesenc_si128(t,_k.ni.k[7]);
  576. t = _mm_aesenc_si128(t,_k.ni.k[8]);
  577. t = _mm_aesenc_si128(t,_k.ni.k[9]);
  578. t = _mm_aesenc_si128(t,_k.ni.k[10]);
  579. t = _mm_aesenc_si128(t,_k.ni.k[11]);
  580. t = _mm_aesenc_si128(t,_k.ni.k[12]);
  581. t = _mm_aesenc_si128(t,_k.ni.k[13]);
  582. t = _mm_aesenclast_si128(t,_k.ni.k[14]);
  583. b = _mm_xor_si128(t,b);
  584. memcpy(out,&b,rem);
  585. return y;
  586. }
  587. inline void _decrypt_gcm256_aesni(unsigned int len,const uint8_t *in,uint8_t *out,const uint8_t *iv,unsigned int alen,const uint8_t *assoc,uint8_t *icv,unsigned int icvsize)
  588. {
  589. __m128i d1,d2,d3,d4,t1,t2,t3,t4,k;
  590. __m128i y,j,cb,*bi,*bo;
  591. unsigned int blocks,pblocks,rem;
  592. j = _create_j_aesni(iv);
  593. cb = _increment_be_aesni(j);
  594. y = _icv_header_aesni(assoc,alen);
  595. blocks = len / 16;
  596. pblocks = blocks - (blocks % 4);
  597. rem = len % 16;
  598. bi = (__m128i *)in;
  599. bo = (__m128i *)out;
  600. unsigned int i;
  601. for (i=0;i<pblocks;i+=4) {
  602. d1 = _mm_loadu_si128(bi + i + 0);
  603. d2 = _mm_loadu_si128(bi + i + 1);
  604. d3 = _mm_loadu_si128(bi + i + 2);
  605. d4 = _mm_loadu_si128(bi + i + 3);
  606. y = _mm_xor_si128(y,d1);
  607. y = _mult4xor_aesni(_k.ni.hhhh,_k.ni.hhh,_k.ni.hh,_k.ni.h,y,d2,d3,d4);
  608. t1 = _mm_xor_si128(cb,k = _k.ni.k[0]);
  609. cb = _increment_be_aesni(cb);
  610. t2 = _mm_xor_si128(cb,k);
  611. cb = _increment_be_aesni(cb);
  612. t3 = _mm_xor_si128(cb,k);
  613. cb = _increment_be_aesni(cb);
  614. t4 = _mm_xor_si128(cb,k);
  615. cb = _increment_be_aesni(cb);
  616. t1 = _mm_aesenc_si128(t1,k = _k.ni.k[1]);
  617. t2 = _mm_aesenc_si128(t2,k);
  618. t3 = _mm_aesenc_si128(t3,k);
  619. t4 = _mm_aesenc_si128(t4,k);
  620. t1 = _mm_aesenc_si128(t1,k = _k.ni.k[2]);
  621. t2 = _mm_aesenc_si128(t2,k);
  622. t3 = _mm_aesenc_si128(t3,k);
  623. t4 = _mm_aesenc_si128(t4,k);
  624. t1 = _mm_aesenc_si128(t1,k = _k.ni.k[3]);
  625. t2 = _mm_aesenc_si128(t2,k);
  626. t3 = _mm_aesenc_si128(t3,k);
  627. t4 = _mm_aesenc_si128(t4,k);
  628. t1 = _mm_aesenc_si128(t1,k = _k.ni.k[4]);
  629. t2 = _mm_aesenc_si128(t2,k);
  630. t3 = _mm_aesenc_si128(t3,k);
  631. t4 = _mm_aesenc_si128(t4,k);
  632. t1 = _mm_aesenc_si128(t1,k = _k.ni.k[5]);
  633. t2 = _mm_aesenc_si128(t2,k);
  634. t3 = _mm_aesenc_si128(t3,k);
  635. t4 = _mm_aesenc_si128(t4,k);
  636. t1 = _mm_aesenc_si128(t1,k = _k.ni.k[6]);
  637. t2 = _mm_aesenc_si128(t2,k);
  638. t3 = _mm_aesenc_si128(t3,k);
  639. t4 = _mm_aesenc_si128(t4,k);
  640. t1 = _mm_aesenc_si128(t1,k = _k.ni.k[7]);
  641. t2 = _mm_aesenc_si128(t2,k);
  642. t3 = _mm_aesenc_si128(t3,k);
  643. t4 = _mm_aesenc_si128(t4,k);
  644. t1 = _mm_aesenc_si128(t1,k = _k.ni.k[8]);
  645. t2 = _mm_aesenc_si128(t2,k);
  646. t3 = _mm_aesenc_si128(t3,k);
  647. t4 = _mm_aesenc_si128(t4,k);
  648. t1 = _mm_aesenc_si128(t1,k = _k.ni.k[9]);
  649. t2 = _mm_aesenc_si128(t2,k);
  650. t3 = _mm_aesenc_si128(t3,k);
  651. t4 = _mm_aesenc_si128(t4,k);
  652. t1 = _mm_aesenc_si128(t1,k = _k.ni.k[10]);
  653. t2 = _mm_aesenc_si128(t2,k);
  654. t3 = _mm_aesenc_si128(t3,k);
  655. t4 = _mm_aesenc_si128(t4,k);
  656. t1 = _mm_aesenc_si128(t1,k = _k.ni.k[11]);
  657. t2 = _mm_aesenc_si128(t2,k);
  658. t3 = _mm_aesenc_si128(t3,k);
  659. t4 = _mm_aesenc_si128(t4,k);
  660. t1 = _mm_aesenc_si128(t1,k = _k.ni.k[12]);
  661. t2 = _mm_aesenc_si128(t2,k);
  662. t3 = _mm_aesenc_si128(t3,k);
  663. t4 = _mm_aesenc_si128(t4,k);
  664. t1 = _mm_aesenc_si128(t1,k = _k.ni.k[13]);
  665. t2 = _mm_aesenc_si128(t2,k);
  666. t3 = _mm_aesenc_si128(t3,k);
  667. t4 = _mm_aesenc_si128(t4,k);
  668. t1 = _mm_aesenclast_si128(t1,k = _k.ni.k[14]);
  669. t2 = _mm_aesenclast_si128(t2,k);
  670. t3 = _mm_aesenclast_si128(t3,k);
  671. t4 = _mm_aesenclast_si128(t4,k);
  672. t1 = _mm_xor_si128(t1,d1);
  673. t2 = _mm_xor_si128(t2,d2);
  674. t3 = _mm_xor_si128(t3,d3);
  675. t4 = _mm_xor_si128(t4,d4);
  676. _mm_storeu_si128(bo + i + 0,t1);
  677. _mm_storeu_si128(bo + i + 1,t2);
  678. _mm_storeu_si128(bo + i + 2,t3);
  679. _mm_storeu_si128(bo + i + 3,t4);
  680. }
  681. for (i=pblocks;i<blocks;i++) {
  682. d1 = _mm_loadu_si128(bi + i);
  683. y = _ghash_aesni(_k.ni.h,y,d1);
  684. t1 = _mm_xor_si128(cb,_k.ni.k[0]);
  685. t1 = _mm_aesenc_si128(t1,_k.ni.k[1]);
  686. t1 = _mm_aesenc_si128(t1,_k.ni.k[2]);
  687. t1 = _mm_aesenc_si128(t1,_k.ni.k[3]);
  688. t1 = _mm_aesenc_si128(t1,_k.ni.k[4]);
  689. t1 = _mm_aesenc_si128(t1,_k.ni.k[5]);
  690. t1 = _mm_aesenc_si128(t1,_k.ni.k[6]);
  691. t1 = _mm_aesenc_si128(t1,_k.ni.k[7]);
  692. t1 = _mm_aesenc_si128(t1,_k.ni.k[8]);
  693. t1 = _mm_aesenc_si128(t1,_k.ni.k[9]);
  694. t1 = _mm_aesenc_si128(t1,_k.ni.k[10]);
  695. t1 = _mm_aesenc_si128(t1,_k.ni.k[11]);
  696. t1 = _mm_aesenc_si128(t1,_k.ni.k[12]);
  697. t1 = _mm_aesenc_si128(t1,_k.ni.k[13]);
  698. t1 = _mm_aesenclast_si128(t1,_k.ni.k[14]);
  699. t1 = _mm_xor_si128(t1,d1);
  700. _mm_storeu_si128(bo + i,t1);
  701. cb = _increment_be_aesni(cb);
  702. }
  703. if (rem)
  704. y = _decrypt_gcm_rem_aesni(rem,bi + blocks,bo + blocks,cb,y);
  705. y = _icv_tailer_aesni(y,alen,len);
  706. _icv_crypt_aesni(y,j,icv,icvsize);
  707. }
  708. #endif /* ZT_AES_AESNI ******************************************************/
  709. };
  710. } // namespace ZeroTier
  711. #endif