sshsh256.c 29 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908
  1. /*
  2. * SHA-256 algorithm as described at
  3. *
  4. * http://csrc.nist.gov/cryptval/shs.html
  5. */
  6. #include "ssh.h"
  7. #include <assert.h>
  8. /*
  9. * Start by deciding whether we can support hardware SHA at all.
  10. */
  11. #define HW_SHA256_NONE 0
  12. #define HW_SHA256_NI 1
  13. #define HW_SHA256_NEON 2
  14. #ifdef _FORCE_SHA_NI
  15. # define HW_SHA256 HW_SHA256_NI
  16. #elif defined(__clang__)
  17. # if __has_attribute(target) && __has_include(<wmmintrin.h>) && \
  18. (defined(__x86_64__) || defined(__i386))
  19. # define HW_SHA256 HW_SHA256_NI
  20. # endif
  21. #elif defined(__GNUC__)
  22. # if (__GNUC__ > 4 || (__GNUC__ == 4 && __GNUC_MINOR__ >= 9)) && \
  23. (defined(__x86_64__) || defined(__i386))
  24. # define HW_SHA256 HW_SHA256_NI
  25. # endif
  26. #elif defined (_MSC_VER)
  27. # if (defined(_M_X64) || defined(_M_IX86)) && _MSC_FULL_VER >= 150030729
  28. # define HW_SHA256 HW_SHA256_NI
  29. # endif
  30. #endif
  31. #ifdef _FORCE_SHA_NEON
  32. # define HW_SHA256 HW_SHA256_NEON
  33. #elif defined __BYTE_ORDER__ && __BYTE_ORDER__ == __ORDER_BIG_ENDIAN__
  34. /* Arm can potentially support both endiannesses, but this code
  35. * hasn't been tested on anything but little. If anyone wants to
  36. * run big-endian, they'll need to fix it first. */
  37. #elif defined __ARM_FEATURE_CRYPTO
  38. /* If the Arm crypto extension is available already, we can
  39. * support NEON SHA without having to enable anything by hand */
  40. # define HW_SHA256 HW_SHA256_NEON
  41. #elif defined(__clang__)
  42. # if __has_attribute(target) && __has_include(<arm_neon.h>) && \
  43. (defined(__aarch64__))
  44. /* clang can enable the crypto extension in AArch64 using
  45. * __attribute__((target)) */
  46. # define HW_SHA256 HW_SHA256_NEON
  47. # define USE_CLANG_ATTR_TARGET_AARCH64
  48. # endif
  49. #elif defined _MSC_VER
  50. /* Visual Studio supports the crypto extension when targeting
  51. * AArch64, but as of VS2017, the AArch32 header doesn't quite
  52. * manage it (declaring the shae/shad intrinsics without a round
  53. * key operand). */
  54. # if defined _M_ARM64
  55. # define HW_SHA256 HW_SHA256_NEON
  56. # if defined _M_ARM64
  57. # define USE_ARM64_NEON_H /* unusual header name in this case */
  58. # endif
  59. # endif
  60. #endif
  61. #if defined _FORCE_SOFTWARE_SHA || !defined HW_SHA256
  62. # undef HW_SHA256
  63. # define HW_SHA256 HW_SHA256_NONE
  64. #endif
  65. /*
  66. * The actual query function that asks if hardware acceleration is
  67. * available.
  68. */
  69. static bool sha256_hw_available(void);
  70. /*
  71. * The top-level selection function, caching the results of
  72. * sha256_hw_available() so it only has to run once.
  73. */
  74. static bool sha256_hw_available_cached(void)
  75. {
  76. static bool initialised = false;
  77. static bool hw_available;
  78. if (!initialised) {
  79. hw_available = sha256_hw_available();
  80. initialised = true;
  81. }
  82. return hw_available;
  83. }
  84. static ssh_hash *sha256_select(const ssh_hashalg *alg)
  85. {
  86. const ssh_hashalg *real_alg =
  87. sha256_hw_available_cached() ? &ssh_sha256_hw : &ssh_sha256_sw;
  88. return ssh_hash_new(real_alg);
  89. }
  90. const ssh_hashalg ssh_sha256 = {
  91. sha256_select, NULL, NULL, NULL,
  92. 32, 64, HASHALG_NAMES_ANNOTATED("SHA-256", "dummy selector vtable"),
  93. };
  94. /* ----------------------------------------------------------------------
  95. * Definitions likely to be helpful to multiple implementations.
  96. */
  97. static const uint32_t sha256_initial_state[] = {
  98. 0x6a09e667, 0xbb67ae85, 0x3c6ef372, 0xa54ff53a,
  99. 0x510e527f, 0x9b05688c, 0x1f83d9ab, 0x5be0cd19,
  100. };
  101. static const uint32_t sha256_round_constants[] = {
  102. 0x428a2f98, 0x71374491, 0xb5c0fbcf, 0xe9b5dba5,
  103. 0x3956c25b, 0x59f111f1, 0x923f82a4, 0xab1c5ed5,
  104. 0xd807aa98, 0x12835b01, 0x243185be, 0x550c7dc3,
  105. 0x72be5d74, 0x80deb1fe, 0x9bdc06a7, 0xc19bf174,
  106. 0xe49b69c1, 0xefbe4786, 0x0fc19dc6, 0x240ca1cc,
  107. 0x2de92c6f, 0x4a7484aa, 0x5cb0a9dc, 0x76f988da,
  108. 0x983e5152, 0xa831c66d, 0xb00327c8, 0xbf597fc7,
  109. 0xc6e00bf3, 0xd5a79147, 0x06ca6351, 0x14292967,
  110. 0x27b70a85, 0x2e1b2138, 0x4d2c6dfc, 0x53380d13,
  111. 0x650a7354, 0x766a0abb, 0x81c2c92e, 0x92722c85,
  112. 0xa2bfe8a1, 0xa81a664b, 0xc24b8b70, 0xc76c51a3,
  113. 0xd192e819, 0xd6990624, 0xf40e3585, 0x106aa070,
  114. 0x19a4c116, 0x1e376c08, 0x2748774c, 0x34b0bcb5,
  115. 0x391c0cb3, 0x4ed8aa4a, 0x5b9cca4f, 0x682e6ff3,
  116. 0x748f82ee, 0x78a5636f, 0x84c87814, 0x8cc70208,
  117. 0x90befffa, 0xa4506ceb, 0xbef9a3f7, 0xc67178f2,
  118. };
  119. #define SHA256_ROUNDS 64
  120. typedef struct sha256_block sha256_block;
  121. struct sha256_block {
  122. uint8_t block[64];
  123. size_t used;
  124. uint64_t len;
  125. };
  126. static inline void sha256_block_setup(sha256_block *blk)
  127. {
  128. blk->used = 0;
  129. blk->len = 0;
  130. }
  131. static inline bool sha256_block_write(
  132. sha256_block *blk, const void **vdata, size_t *len)
  133. {
  134. size_t blkleft = sizeof(blk->block) - blk->used;
  135. size_t chunk = *len < blkleft ? *len : blkleft;
  136. const uint8_t *p = *vdata;
  137. memcpy(blk->block + blk->used, p, chunk);
  138. *vdata = p + chunk;
  139. *len -= chunk;
  140. blk->used += chunk;
  141. blk->len += chunk;
  142. if (blk->used == sizeof(blk->block)) {
  143. blk->used = 0;
  144. return true;
  145. }
  146. return false;
  147. }
  148. static inline void sha256_block_pad(sha256_block *blk, BinarySink *bs)
  149. {
  150. uint64_t final_len = blk->len << 3;
  151. size_t pad = 1 + (63 & (55 - blk->used));
  152. put_byte(bs, 0x80);
  153. for (size_t i = 1; i < pad; i++)
  154. put_byte(bs, 0);
  155. put_uint64(bs, final_len);
  156. assert(blk->used == 0 && "Should have exactly hit a block boundary");
  157. }
  158. /* ----------------------------------------------------------------------
  159. * Software implementation of SHA-256.
  160. */
  161. static inline uint32_t ror(uint32_t x, unsigned y)
  162. {
  163. return (x << (31 & -y)) | (x >> (31 & y));
  164. }
  165. static inline uint32_t Ch(uint32_t ctrl, uint32_t if1, uint32_t if0)
  166. {
  167. return if0 ^ (ctrl & (if1 ^ if0));
  168. }
  169. static inline uint32_t Maj(uint32_t x, uint32_t y, uint32_t z)
  170. {
  171. return (x & y) | (z & (x | y));
  172. }
  173. static inline uint32_t Sigma_0(uint32_t x)
  174. {
  175. return ror(x,2) ^ ror(x,13) ^ ror(x,22);
  176. }
  177. static inline uint32_t Sigma_1(uint32_t x)
  178. {
  179. return ror(x,6) ^ ror(x,11) ^ ror(x,25);
  180. }
  181. static inline uint32_t sigma_0(uint32_t x)
  182. {
  183. return ror(x,7) ^ ror(x,18) ^ (x >> 3);
  184. }
  185. static inline uint32_t sigma_1(uint32_t x)
  186. {
  187. return ror(x,17) ^ ror(x,19) ^ (x >> 10);
  188. }
  189. static inline void sha256_sw_round(
  190. unsigned round_index, const uint32_t *schedule,
  191. uint32_t *a, uint32_t *b, uint32_t *c, uint32_t *d,
  192. uint32_t *e, uint32_t *f, uint32_t *g, uint32_t *h)
  193. {
  194. uint32_t t1 = *h + Sigma_1(*e) + Ch(*e,*f,*g) +
  195. sha256_round_constants[round_index] + schedule[round_index];
  196. uint32_t t2 = Sigma_0(*a) + Maj(*a,*b,*c);
  197. *d += t1;
  198. *h = t1 + t2;
  199. }
  200. static void sha256_sw_block(uint32_t *core, const uint8_t *block)
  201. {
  202. uint32_t w[SHA256_ROUNDS];
  203. uint32_t a,b,c,d,e,f,g,h;
  204. for (size_t t = 0; t < 16; t++)
  205. w[t] = GET_32BIT_MSB_FIRST(block + 4*t);
  206. for (size_t t = 16; t < SHA256_ROUNDS; t++)
  207. w[t] = sigma_1(w[t-2]) + w[t-7] + sigma_0(w[t-15]) + w[t-16];
  208. a = core[0]; b = core[1]; c = core[2]; d = core[3];
  209. e = core[4]; f = core[5]; g = core[6]; h = core[7];
  210. for (size_t t = 0; t < SHA256_ROUNDS; t += 8) {
  211. sha256_sw_round(t+0, w, &a,&b,&c,&d,&e,&f,&g,&h);
  212. sha256_sw_round(t+1, w, &h,&a,&b,&c,&d,&e,&f,&g);
  213. sha256_sw_round(t+2, w, &g,&h,&a,&b,&c,&d,&e,&f);
  214. sha256_sw_round(t+3, w, &f,&g,&h,&a,&b,&c,&d,&e);
  215. sha256_sw_round(t+4, w, &e,&f,&g,&h,&a,&b,&c,&d);
  216. sha256_sw_round(t+5, w, &d,&e,&f,&g,&h,&a,&b,&c);
  217. sha256_sw_round(t+6, w, &c,&d,&e,&f,&g,&h,&a,&b);
  218. sha256_sw_round(t+7, w, &b,&c,&d,&e,&f,&g,&h,&a);
  219. }
  220. core[0] += a; core[1] += b; core[2] += c; core[3] += d;
  221. core[4] += e; core[5] += f; core[6] += g; core[7] += h;
  222. smemclr(w, sizeof(w));
  223. }
  224. typedef struct sha256_sw {
  225. uint32_t core[8];
  226. sha256_block blk;
  227. BinarySink_IMPLEMENTATION;
  228. ssh_hash hash;
  229. } sha256_sw;
  230. static void sha256_sw_write(BinarySink *bs, const void *vp, size_t len);
  231. static ssh_hash *sha256_sw_new(const ssh_hashalg *alg)
  232. {
  233. sha256_sw *s = snew(sha256_sw);
  234. memcpy(s->core, sha256_initial_state, sizeof(s->core));
  235. sha256_block_setup(&s->blk);
  236. s->hash.vt = alg;
  237. BinarySink_INIT(s, sha256_sw_write);
  238. BinarySink_DELEGATE_INIT(&s->hash, s);
  239. return &s->hash;
  240. }
  241. static ssh_hash *sha256_sw_copy(ssh_hash *hash)
  242. {
  243. sha256_sw *s = container_of(hash, sha256_sw, hash);
  244. sha256_sw *copy = snew(sha256_sw);
  245. memcpy(copy, s, sizeof(*copy));
  246. BinarySink_COPIED(copy);
  247. BinarySink_DELEGATE_INIT(&copy->hash, copy);
  248. return &copy->hash;
  249. }
  250. static void sha256_sw_free(ssh_hash *hash)
  251. {
  252. sha256_sw *s = container_of(hash, sha256_sw, hash);
  253. smemclr(s, sizeof(*s));
  254. sfree(s);
  255. }
  256. static void sha256_sw_write(BinarySink *bs, const void *vp, size_t len)
  257. {
  258. sha256_sw *s = BinarySink_DOWNCAST(bs, sha256_sw);
  259. while (len > 0)
  260. if (sha256_block_write(&s->blk, &vp, &len))
  261. sha256_sw_block(s->core, s->blk.block);
  262. }
  263. static void sha256_sw_final(ssh_hash *hash, uint8_t *digest)
  264. {
  265. sha256_sw *s = container_of(hash, sha256_sw, hash);
  266. sha256_block_pad(&s->blk, BinarySink_UPCAST(s));
  267. for (size_t i = 0; i < 8; i++)
  268. PUT_32BIT_MSB_FIRST(digest + 4*i, s->core[i]);
  269. sha256_sw_free(hash);
  270. }
  271. const ssh_hashalg ssh_sha256_sw = {
  272. sha256_sw_new, sha256_sw_copy, sha256_sw_final, sha256_sw_free,
  273. 32, 64, HASHALG_NAMES_ANNOTATED("SHA-256", "unaccelerated"),
  274. };
  275. /* ----------------------------------------------------------------------
  276. * Hardware-accelerated implementation of SHA-256 using x86 SHA-NI.
  277. */
  278. #if HW_SHA256 == HW_SHA256_NI
  279. /*
  280. * Set target architecture for Clang and GCC
  281. */
  282. #if defined(__clang__) || defined(__GNUC__)
  283. # define FUNC_ISA __attribute__ ((target("sse4.1,sha")))
  284. #if !defined(__clang__)
  285. # pragma GCC target("sha")
  286. # pragma GCC target("sse4.1")
  287. #endif
  288. #else
  289. # define FUNC_ISA
  290. #endif
  291. #include <wmmintrin.h>
  292. #include <smmintrin.h>
  293. #include <immintrin.h>
  294. #if defined(__clang__) || defined(__GNUC__)
  295. #include <shaintrin.h>
  296. #endif
  297. #if defined(__clang__) || defined(__GNUC__)
  298. #include <cpuid.h>
  299. #define GET_CPU_ID_0(out) \
  300. __cpuid(0, (out)[0], (out)[1], (out)[2], (out)[3])
  301. #define GET_CPU_ID_7(out) \
  302. __cpuid_count(7, 0, (out)[0], (out)[1], (out)[2], (out)[3])
  303. #else
  304. #define GET_CPU_ID_0(out) __cpuid(out, 0)
  305. #define GET_CPU_ID_7(out) __cpuidex(out, 7, 0)
  306. #endif
  307. static bool sha256_hw_available(void)
  308. {
  309. unsigned int CPUInfo[4];
  310. GET_CPU_ID_0(CPUInfo);
  311. if (CPUInfo[0] < 7)
  312. return false;
  313. GET_CPU_ID_7(CPUInfo);
  314. return CPUInfo[1] & (1 << 29); /* Check SHA */
  315. }
  316. /* SHA256 implementation using new instructions
  317. The code is based on Jeffrey Walton's SHA256 implementation:
  318. https://github.com/noloader/SHA-Intrinsics
  319. */
  320. FUNC_ISA
  321. static inline void sha256_ni_block(__m128i *core, const uint8_t *p)
  322. {
  323. __m128i STATE0, STATE1;
  324. __m128i MSG, TMP;
  325. __m128i MSG0, MSG1, MSG2, MSG3;
  326. const __m128i *block = (const __m128i *)p;
  327. const __m128i MASK = _mm_set_epi64x(
  328. 0x0c0d0e0f08090a0bULL, 0x0405060700010203ULL);
  329. /* Load initial values */
  330. STATE0 = core[0];
  331. STATE1 = core[1];
  332. /* Rounds 0-3 */
  333. MSG = _mm_loadu_si128(block);
  334. MSG0 = _mm_shuffle_epi8(MSG, MASK);
  335. MSG = _mm_add_epi32(MSG0, _mm_set_epi64x(
  336. 0xE9B5DBA5B5C0FBCFULL, 0x71374491428A2F98ULL));
  337. STATE1 = _mm_sha256rnds2_epu32(STATE1, STATE0, MSG);
  338. MSG = _mm_shuffle_epi32(MSG, 0x0E);
  339. STATE0 = _mm_sha256rnds2_epu32(STATE0, STATE1, MSG);
  340. /* Rounds 4-7 */
  341. MSG1 = _mm_loadu_si128(block + 1);
  342. MSG1 = _mm_shuffle_epi8(MSG1, MASK);
  343. MSG = _mm_add_epi32(MSG1, _mm_set_epi64x(
  344. 0xAB1C5ED5923F82A4ULL, 0x59F111F13956C25BULL));
  345. STATE1 = _mm_sha256rnds2_epu32(STATE1, STATE0, MSG);
  346. MSG = _mm_shuffle_epi32(MSG, 0x0E);
  347. STATE0 = _mm_sha256rnds2_epu32(STATE0, STATE1, MSG);
  348. MSG0 = _mm_sha256msg1_epu32(MSG0, MSG1);
  349. /* Rounds 8-11 */
  350. MSG2 = _mm_loadu_si128(block + 2);
  351. MSG2 = _mm_shuffle_epi8(MSG2, MASK);
  352. MSG = _mm_add_epi32(MSG2, _mm_set_epi64x(
  353. 0x550C7DC3243185BEULL, 0x12835B01D807AA98ULL));
  354. STATE1 = _mm_sha256rnds2_epu32(STATE1, STATE0, MSG);
  355. MSG = _mm_shuffle_epi32(MSG, 0x0E);
  356. STATE0 = _mm_sha256rnds2_epu32(STATE0, STATE1, MSG);
  357. MSG1 = _mm_sha256msg1_epu32(MSG1, MSG2);
  358. /* Rounds 12-15 */
  359. MSG3 = _mm_loadu_si128(block + 3);
  360. MSG3 = _mm_shuffle_epi8(MSG3, MASK);
  361. MSG = _mm_add_epi32(MSG3, _mm_set_epi64x(
  362. 0xC19BF1749BDC06A7ULL, 0x80DEB1FE72BE5D74ULL));
  363. STATE1 = _mm_sha256rnds2_epu32(STATE1, STATE0, MSG);
  364. TMP = _mm_alignr_epi8(MSG3, MSG2, 4);
  365. MSG0 = _mm_add_epi32(MSG0, TMP);
  366. MSG0 = _mm_sha256msg2_epu32(MSG0, MSG3);
  367. MSG = _mm_shuffle_epi32(MSG, 0x0E);
  368. STATE0 = _mm_sha256rnds2_epu32(STATE0, STATE1, MSG);
  369. MSG2 = _mm_sha256msg1_epu32(MSG2, MSG3);
  370. /* Rounds 16-19 */
  371. MSG = _mm_add_epi32(MSG0, _mm_set_epi64x(
  372. 0x240CA1CC0FC19DC6ULL, 0xEFBE4786E49B69C1ULL));
  373. STATE1 = _mm_sha256rnds2_epu32(STATE1, STATE0, MSG);
  374. TMP = _mm_alignr_epi8(MSG0, MSG3, 4);
  375. MSG1 = _mm_add_epi32(MSG1, TMP);
  376. MSG1 = _mm_sha256msg2_epu32(MSG1, MSG0);
  377. MSG = _mm_shuffle_epi32(MSG, 0x0E);
  378. STATE0 = _mm_sha256rnds2_epu32(STATE0, STATE1, MSG);
  379. MSG3 = _mm_sha256msg1_epu32(MSG3, MSG0);
  380. /* Rounds 20-23 */
  381. MSG = _mm_add_epi32(MSG1, _mm_set_epi64x(
  382. 0x76F988DA5CB0A9DCULL, 0x4A7484AA2DE92C6FULL));
  383. STATE1 = _mm_sha256rnds2_epu32(STATE1, STATE0, MSG);
  384. TMP = _mm_alignr_epi8(MSG1, MSG0, 4);
  385. MSG2 = _mm_add_epi32(MSG2, TMP);
  386. MSG2 = _mm_sha256msg2_epu32(MSG2, MSG1);
  387. MSG = _mm_shuffle_epi32(MSG, 0x0E);
  388. STATE0 = _mm_sha256rnds2_epu32(STATE0, STATE1, MSG);
  389. MSG0 = _mm_sha256msg1_epu32(MSG0, MSG1);
  390. /* Rounds 24-27 */
  391. MSG = _mm_add_epi32(MSG2, _mm_set_epi64x(
  392. 0xBF597FC7B00327C8ULL, 0xA831C66D983E5152ULL));
  393. STATE1 = _mm_sha256rnds2_epu32(STATE1, STATE0, MSG);
  394. TMP = _mm_alignr_epi8(MSG2, MSG1, 4);
  395. MSG3 = _mm_add_epi32(MSG3, TMP);
  396. MSG3 = _mm_sha256msg2_epu32(MSG3, MSG2);
  397. MSG = _mm_shuffle_epi32(MSG, 0x0E);
  398. STATE0 = _mm_sha256rnds2_epu32(STATE0, STATE1, MSG);
  399. MSG1 = _mm_sha256msg1_epu32(MSG1, MSG2);
  400. /* Rounds 28-31 */
  401. MSG = _mm_add_epi32(MSG3, _mm_set_epi64x(
  402. 0x1429296706CA6351ULL, 0xD5A79147C6E00BF3ULL));
  403. STATE1 = _mm_sha256rnds2_epu32(STATE1, STATE0, MSG);
  404. TMP = _mm_alignr_epi8(MSG3, MSG2, 4);
  405. MSG0 = _mm_add_epi32(MSG0, TMP);
  406. MSG0 = _mm_sha256msg2_epu32(MSG0, MSG3);
  407. MSG = _mm_shuffle_epi32(MSG, 0x0E);
  408. STATE0 = _mm_sha256rnds2_epu32(STATE0, STATE1, MSG);
  409. MSG2 = _mm_sha256msg1_epu32(MSG2, MSG3);
  410. /* Rounds 32-35 */
  411. MSG = _mm_add_epi32(MSG0, _mm_set_epi64x(
  412. 0x53380D134D2C6DFCULL, 0x2E1B213827B70A85ULL));
  413. STATE1 = _mm_sha256rnds2_epu32(STATE1, STATE0, MSG);
  414. TMP = _mm_alignr_epi8(MSG0, MSG3, 4);
  415. MSG1 = _mm_add_epi32(MSG1, TMP);
  416. MSG1 = _mm_sha256msg2_epu32(MSG1, MSG0);
  417. MSG = _mm_shuffle_epi32(MSG, 0x0E);
  418. STATE0 = _mm_sha256rnds2_epu32(STATE0, STATE1, MSG);
  419. MSG3 = _mm_sha256msg1_epu32(MSG3, MSG0);
  420. /* Rounds 36-39 */
  421. MSG = _mm_add_epi32(MSG1, _mm_set_epi64x(
  422. 0x92722C8581C2C92EULL, 0x766A0ABB650A7354ULL));
  423. STATE1 = _mm_sha256rnds2_epu32(STATE1, STATE0, MSG);
  424. TMP = _mm_alignr_epi8(MSG1, MSG0, 4);
  425. MSG2 = _mm_add_epi32(MSG2, TMP);
  426. MSG2 = _mm_sha256msg2_epu32(MSG2, MSG1);
  427. MSG = _mm_shuffle_epi32(MSG, 0x0E);
  428. STATE0 = _mm_sha256rnds2_epu32(STATE0, STATE1, MSG);
  429. MSG0 = _mm_sha256msg1_epu32(MSG0, MSG1);
  430. /* Rounds 40-43 */
  431. MSG = _mm_add_epi32(MSG2, _mm_set_epi64x(
  432. 0xC76C51A3C24B8B70ULL, 0xA81A664BA2BFE8A1ULL));
  433. STATE1 = _mm_sha256rnds2_epu32(STATE1, STATE0, MSG);
  434. TMP = _mm_alignr_epi8(MSG2, MSG1, 4);
  435. MSG3 = _mm_add_epi32(MSG3, TMP);
  436. MSG3 = _mm_sha256msg2_epu32(MSG3, MSG2);
  437. MSG = _mm_shuffle_epi32(MSG, 0x0E);
  438. STATE0 = _mm_sha256rnds2_epu32(STATE0, STATE1, MSG);
  439. MSG1 = _mm_sha256msg1_epu32(MSG1, MSG2);
  440. /* Rounds 44-47 */
  441. MSG = _mm_add_epi32(MSG3, _mm_set_epi64x(
  442. 0x106AA070F40E3585ULL, 0xD6990624D192E819ULL));
  443. STATE1 = _mm_sha256rnds2_epu32(STATE1, STATE0, MSG);
  444. TMP = _mm_alignr_epi8(MSG3, MSG2, 4);
  445. MSG0 = _mm_add_epi32(MSG0, TMP);
  446. MSG0 = _mm_sha256msg2_epu32(MSG0, MSG3);
  447. MSG = _mm_shuffle_epi32(MSG, 0x0E);
  448. STATE0 = _mm_sha256rnds2_epu32(STATE0, STATE1, MSG);
  449. MSG2 = _mm_sha256msg1_epu32(MSG2, MSG3);
  450. /* Rounds 48-51 */
  451. MSG = _mm_add_epi32(MSG0, _mm_set_epi64x(
  452. 0x34B0BCB52748774CULL, 0x1E376C0819A4C116ULL));
  453. STATE1 = _mm_sha256rnds2_epu32(STATE1, STATE0, MSG);
  454. TMP = _mm_alignr_epi8(MSG0, MSG3, 4);
  455. MSG1 = _mm_add_epi32(MSG1, TMP);
  456. MSG1 = _mm_sha256msg2_epu32(MSG1, MSG0);
  457. MSG = _mm_shuffle_epi32(MSG, 0x0E);
  458. STATE0 = _mm_sha256rnds2_epu32(STATE0, STATE1, MSG);
  459. MSG3 = _mm_sha256msg1_epu32(MSG3, MSG0);
  460. /* Rounds 52-55 */
  461. MSG = _mm_add_epi32(MSG1, _mm_set_epi64x(
  462. 0x682E6FF35B9CCA4FULL, 0x4ED8AA4A391C0CB3ULL));
  463. STATE1 = _mm_sha256rnds2_epu32(STATE1, STATE0, MSG);
  464. TMP = _mm_alignr_epi8(MSG1, MSG0, 4);
  465. MSG2 = _mm_add_epi32(MSG2, TMP);
  466. MSG2 = _mm_sha256msg2_epu32(MSG2, MSG1);
  467. MSG = _mm_shuffle_epi32(MSG, 0x0E);
  468. STATE0 = _mm_sha256rnds2_epu32(STATE0, STATE1, MSG);
  469. /* Rounds 56-59 */
  470. MSG = _mm_add_epi32(MSG2, _mm_set_epi64x(
  471. 0x8CC7020884C87814ULL, 0x78A5636F748F82EEULL));
  472. STATE1 = _mm_sha256rnds2_epu32(STATE1, STATE0, MSG);
  473. TMP = _mm_alignr_epi8(MSG2, MSG1, 4);
  474. MSG3 = _mm_add_epi32(MSG3, TMP);
  475. MSG3 = _mm_sha256msg2_epu32(MSG3, MSG2);
  476. MSG = _mm_shuffle_epi32(MSG, 0x0E);
  477. STATE0 = _mm_sha256rnds2_epu32(STATE0, STATE1, MSG);
  478. /* Rounds 60-63 */
  479. MSG = _mm_add_epi32(MSG3, _mm_set_epi64x(
  480. 0xC67178F2BEF9A3F7ULL, 0xA4506CEB90BEFFFAULL));
  481. STATE1 = _mm_sha256rnds2_epu32(STATE1, STATE0, MSG);
  482. MSG = _mm_shuffle_epi32(MSG, 0x0E);
  483. STATE0 = _mm_sha256rnds2_epu32(STATE0, STATE1, MSG);
  484. /* Combine state */
  485. core[0] = _mm_add_epi32(STATE0, core[0]);
  486. core[1] = _mm_add_epi32(STATE1, core[1]);
  487. }
  488. typedef struct sha256_ni {
  489. /*
  490. * These two vectors store the 8 words of the SHA-256 state, but
  491. * not in the same order they appear in the spec: the first word
  492. * holds A,B,E,F and the second word C,D,G,H.
  493. */
  494. __m128i core[2];
  495. sha256_block blk;
  496. void *pointer_to_free;
  497. BinarySink_IMPLEMENTATION;
  498. ssh_hash hash;
  499. } sha256_ni;
  500. static void sha256_ni_write(BinarySink *bs, const void *vp, size_t len);
  501. static sha256_ni *sha256_ni_alloc(void)
  502. {
  503. /*
  504. * The __m128i variables in the context structure need to be
  505. * 16-byte aligned, but not all malloc implementations that this
  506. * code has to work with will guarantee to return a 16-byte
  507. * aligned pointer. So we over-allocate, manually realign the
  508. * pointer ourselves, and store the original one inside the
  509. * context so we know how to free it later.
  510. */
  511. void *allocation = smalloc(sizeof(sha256_ni) + 15);
  512. uintptr_t alloc_address = (uintptr_t)allocation;
  513. uintptr_t aligned_address = (alloc_address + 15) & ~15;
  514. sha256_ni *s = (sha256_ni *)aligned_address;
  515. s->pointer_to_free = allocation;
  516. return s;
  517. }
  518. FUNC_ISA static ssh_hash *sha256_ni_new(const ssh_hashalg *alg)
  519. {
  520. if (!sha256_hw_available_cached())
  521. return NULL;
  522. sha256_ni *s = sha256_ni_alloc();
  523. /* Initialise the core vectors in their storage order */
  524. s->core[0] = _mm_set_epi64x(
  525. 0x6a09e667bb67ae85ULL, 0x510e527f9b05688cULL);
  526. s->core[1] = _mm_set_epi64x(
  527. 0x3c6ef372a54ff53aULL, 0x1f83d9ab5be0cd19ULL);
  528. sha256_block_setup(&s->blk);
  529. s->hash.vt = alg;
  530. BinarySink_INIT(s, sha256_ni_write);
  531. BinarySink_DELEGATE_INIT(&s->hash, s);
  532. return &s->hash;
  533. }
  534. static ssh_hash *sha256_ni_copy(ssh_hash *hash)
  535. {
  536. sha256_ni *s = container_of(hash, sha256_ni, hash);
  537. sha256_ni *copy = sha256_ni_alloc();
  538. void *ptf_save = copy->pointer_to_free;
  539. *copy = *s; /* structure copy */
  540. copy->pointer_to_free = ptf_save;
  541. BinarySink_COPIED(copy);
  542. BinarySink_DELEGATE_INIT(&copy->hash, copy);
  543. return &copy->hash;
  544. }
  545. static void sha256_ni_free(ssh_hash *hash)
  546. {
  547. sha256_ni *s = container_of(hash, sha256_ni, hash);
  548. void *ptf = s->pointer_to_free;
  549. smemclr(s, sizeof(*s));
  550. sfree(ptf);
  551. }
  552. static void sha256_ni_write(BinarySink *bs, const void *vp, size_t len)
  553. {
  554. sha256_ni *s = BinarySink_DOWNCAST(bs, sha256_ni);
  555. while (len > 0)
  556. if (sha256_block_write(&s->blk, &vp, &len))
  557. sha256_ni_block(s->core, s->blk.block);
  558. }
  559. FUNC_ISA static void sha256_ni_final(ssh_hash *hash, uint8_t *digest)
  560. {
  561. sha256_ni *s = container_of(hash, sha256_ni, hash);
  562. sha256_block_pad(&s->blk, BinarySink_UPCAST(s));
  563. /* Rearrange the words into the output order */
  564. __m128i feba = _mm_shuffle_epi32(s->core[0], 0x1B);
  565. __m128i dchg = _mm_shuffle_epi32(s->core[1], 0xB1);
  566. __m128i dcba = _mm_blend_epi16(feba, dchg, 0xF0);
  567. __m128i hgfe = _mm_alignr_epi8(dchg, feba, 8);
  568. /* Byte-swap them into the output endianness */
  569. const __m128i mask = _mm_setr_epi8(3,2,1,0,7,6,5,4,11,10,9,8,15,14,13,12);
  570. dcba = _mm_shuffle_epi8(dcba, mask);
  571. hgfe = _mm_shuffle_epi8(hgfe, mask);
  572. /* And store them */
  573. __m128i *output = (__m128i *)digest;
  574. _mm_storeu_si128(output, dcba);
  575. _mm_storeu_si128(output+1, hgfe);
  576. sha256_ni_free(hash);
  577. }
  578. const ssh_hashalg ssh_sha256_hw = {
  579. sha256_ni_new, sha256_ni_copy, sha256_ni_final, sha256_ni_free,
  580. 32, 64, HASHALG_NAMES_ANNOTATED("SHA-256", "SHA-NI accelerated"),
  581. };
  582. /* ----------------------------------------------------------------------
  583. * Hardware-accelerated implementation of SHA-256 using Arm NEON.
  584. */
  585. #elif HW_SHA256 == HW_SHA256_NEON
  586. /*
  587. * Manually set the target architecture, if we decided above that we
  588. * need to.
  589. */
  590. #ifdef USE_CLANG_ATTR_TARGET_AARCH64
  591. /*
  592. * A spot of cheating: redefine some ACLE feature macros before
  593. * including arm_neon.h. Otherwise we won't get the SHA intrinsics
  594. * defined by that header, because it will be looking at the settings
  595. * for the whole translation unit rather than the ones we're going to
  596. * put on some particular functions using __attribute__((target)).
  597. */
  598. #define __ARM_NEON 1
  599. #define __ARM_FEATURE_CRYPTO 1
  600. #define FUNC_ISA __attribute__ ((target("neon,crypto")))
  601. #endif /* USE_CLANG_ATTR_TARGET_AARCH64 */
  602. #ifndef FUNC_ISA
  603. #define FUNC_ISA
  604. #endif
  605. #ifdef USE_ARM64_NEON_H
  606. #include <arm64_neon.h>
  607. #else
  608. #include <arm_neon.h>
  609. #endif
  610. static bool sha256_hw_available(void)
  611. {
  612. /*
  613. * For Arm, we delegate to a per-platform detection function (see
  614. * explanation in sshaes.c).
  615. */
  616. return platform_sha256_hw_available();
  617. }
  618. typedef struct sha256_neon_core sha256_neon_core;
  619. struct sha256_neon_core {
  620. uint32x4_t abcd, efgh;
  621. };
  622. FUNC_ISA
  623. static inline uint32x4_t sha256_neon_load_input(const uint8_t *p)
  624. {
  625. return vreinterpretq_u32_u8(vrev32q_u8(vld1q_u8(p)));
  626. }
  627. FUNC_ISA
  628. static inline uint32x4_t sha256_neon_schedule_update(
  629. uint32x4_t m4, uint32x4_t m3, uint32x4_t m2, uint32x4_t m1)
  630. {
  631. return vsha256su1q_u32(vsha256su0q_u32(m4, m3), m2, m1);
  632. }
  633. FUNC_ISA
  634. static inline sha256_neon_core sha256_neon_round4(
  635. sha256_neon_core old, uint32x4_t sched, unsigned round)
  636. {
  637. sha256_neon_core new;
  638. uint32x4_t round_input = vaddq_u32(
  639. sched, vld1q_u32(sha256_round_constants + round));
  640. new.abcd = vsha256hq_u32 (old.abcd, old.efgh, round_input);
  641. new.efgh = vsha256h2q_u32(old.efgh, old.abcd, round_input);
  642. return new;
  643. }
  644. FUNC_ISA
  645. static inline void sha256_neon_block(sha256_neon_core *core, const uint8_t *p)
  646. {
  647. uint32x4_t s0, s1, s2, s3;
  648. sha256_neon_core cr = *core;
  649. s0 = sha256_neon_load_input(p);
  650. cr = sha256_neon_round4(cr, s0, 0);
  651. s1 = sha256_neon_load_input(p+16);
  652. cr = sha256_neon_round4(cr, s1, 4);
  653. s2 = sha256_neon_load_input(p+32);
  654. cr = sha256_neon_round4(cr, s2, 8);
  655. s3 = sha256_neon_load_input(p+48);
  656. cr = sha256_neon_round4(cr, s3, 12);
  657. s0 = sha256_neon_schedule_update(s0, s1, s2, s3);
  658. cr = sha256_neon_round4(cr, s0, 16);
  659. s1 = sha256_neon_schedule_update(s1, s2, s3, s0);
  660. cr = sha256_neon_round4(cr, s1, 20);
  661. s2 = sha256_neon_schedule_update(s2, s3, s0, s1);
  662. cr = sha256_neon_round4(cr, s2, 24);
  663. s3 = sha256_neon_schedule_update(s3, s0, s1, s2);
  664. cr = sha256_neon_round4(cr, s3, 28);
  665. s0 = sha256_neon_schedule_update(s0, s1, s2, s3);
  666. cr = sha256_neon_round4(cr, s0, 32);
  667. s1 = sha256_neon_schedule_update(s1, s2, s3, s0);
  668. cr = sha256_neon_round4(cr, s1, 36);
  669. s2 = sha256_neon_schedule_update(s2, s3, s0, s1);
  670. cr = sha256_neon_round4(cr, s2, 40);
  671. s3 = sha256_neon_schedule_update(s3, s0, s1, s2);
  672. cr = sha256_neon_round4(cr, s3, 44);
  673. s0 = sha256_neon_schedule_update(s0, s1, s2, s3);
  674. cr = sha256_neon_round4(cr, s0, 48);
  675. s1 = sha256_neon_schedule_update(s1, s2, s3, s0);
  676. cr = sha256_neon_round4(cr, s1, 52);
  677. s2 = sha256_neon_schedule_update(s2, s3, s0, s1);
  678. cr = sha256_neon_round4(cr, s2, 56);
  679. s3 = sha256_neon_schedule_update(s3, s0, s1, s2);
  680. cr = sha256_neon_round4(cr, s3, 60);
  681. core->abcd = vaddq_u32(core->abcd, cr.abcd);
  682. core->efgh = vaddq_u32(core->efgh, cr.efgh);
  683. }
  684. typedef struct sha256_neon {
  685. sha256_neon_core core;
  686. sha256_block blk;
  687. BinarySink_IMPLEMENTATION;
  688. ssh_hash hash;
  689. } sha256_neon;
  690. static void sha256_neon_write(BinarySink *bs, const void *vp, size_t len);
  691. static ssh_hash *sha256_neon_new(const ssh_hashalg *alg)
  692. {
  693. if (!sha256_hw_available_cached())
  694. return NULL;
  695. sha256_neon *s = snew(sha256_neon);
  696. s->core.abcd = vld1q_u32(sha256_initial_state);
  697. s->core.efgh = vld1q_u32(sha256_initial_state + 4);
  698. sha256_block_setup(&s->blk);
  699. s->hash.vt = alg;
  700. BinarySink_INIT(s, sha256_neon_write);
  701. BinarySink_DELEGATE_INIT(&s->hash, s);
  702. return &s->hash;
  703. }
  704. static ssh_hash *sha256_neon_copy(ssh_hash *hash)
  705. {
  706. sha256_neon *s = container_of(hash, sha256_neon, hash);
  707. sha256_neon *copy = snew(sha256_neon);
  708. *copy = *s; /* structure copy */
  709. BinarySink_COPIED(copy);
  710. BinarySink_DELEGATE_INIT(&copy->hash, copy);
  711. return &copy->hash;
  712. }
  713. static void sha256_neon_free(ssh_hash *hash)
  714. {
  715. sha256_neon *s = container_of(hash, sha256_neon, hash);
  716. smemclr(s, sizeof(*s));
  717. sfree(s);
  718. }
  719. static void sha256_neon_write(BinarySink *bs, const void *vp, size_t len)
  720. {
  721. sha256_neon *s = BinarySink_DOWNCAST(bs, sha256_neon);
  722. while (len > 0)
  723. if (sha256_block_write(&s->blk, &vp, &len))
  724. sha256_neon_block(&s->core, s->blk.block);
  725. }
  726. static void sha256_neon_final(ssh_hash *hash, uint8_t *digest)
  727. {
  728. sha256_neon *s = container_of(hash, sha256_neon, hash);
  729. sha256_block_pad(&s->blk, BinarySink_UPCAST(s));
  730. vst1q_u8(digest, vrev32q_u8(vreinterpretq_u8_u32(s->core.abcd)));
  731. vst1q_u8(digest + 16, vrev32q_u8(vreinterpretq_u8_u32(s->core.efgh)));
  732. sha256_neon_free(hash);
  733. }
  734. const ssh_hashalg ssh_sha256_hw = {
  735. sha256_neon_new, sha256_neon_copy, sha256_neon_final, sha256_neon_free,
  736. 32, 64, HASHALG_NAMES_ANNOTATED("SHA-256", "NEON accelerated"),
  737. };
  738. /* ----------------------------------------------------------------------
  739. * Stub functions if we have no hardware-accelerated SHA-256. In this
  740. * case, sha256_hw_new returns NULL (though it should also never be
  741. * selected by sha256_select, so the only thing that should even be
  742. * _able_ to call it is testcrypt). As a result, the remaining vtable
  743. * functions should never be called at all.
  744. */
  745. #elif HW_SHA256 == HW_SHA256_NONE
  746. static bool sha256_hw_available(void)
  747. {
  748. return false;
  749. }
  750. static ssh_hash *sha256_stub_new(const ssh_hashalg *alg)
  751. {
  752. return NULL;
  753. }
  754. #define STUB_BODY { unreachable("Should never be called"); }
  755. static ssh_hash *sha256_stub_copy(ssh_hash *hash) STUB_BODY
  756. static void sha256_stub_free(ssh_hash *hash) STUB_BODY
  757. static void sha256_stub_final(ssh_hash *hash, uint8_t *digest) STUB_BODY
  758. const ssh_hashalg ssh_sha256_hw = {
  759. sha256_stub_new, sha256_stub_copy, sha256_stub_final, sha256_stub_free,
  760. 32, 64, HASHALG_NAMES_ANNOTATED(
  761. "SHA-256", "!NONEXISTENT ACCELERATED VERSION!"),
  762. };
  763. #endif /* HW_SHA256 */