sshsha.c 27 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903
  1. /*
  2. * SHA-1 algorithm as described at
  3. *
  4. * http://csrc.nist.gov/cryptval/shs.html
  5. */
  6. #include "ssh.h"
  7. #include <assert.h>
  8. /*
  9. * Start by deciding whether we can support hardware SHA at all.
  10. */
  11. #define HW_SHA1_NONE 0
  12. #define HW_SHA1_NI 1
  13. #define HW_SHA1_NEON 2
  14. #ifdef _FORCE_SHA_NI
  15. # define HW_SHA1 HW_SHA1_NI
  16. #elif defined(__clang__)
  17. # if __has_attribute(target) && __has_include(<wmmintrin.h>) && \
  18. (defined(__x86_64__) || defined(__i386))
  19. # define HW_SHA1 HW_SHA1_NI
  20. # endif
  21. #elif defined(__GNUC__)
  22. # if (__GNUC__ > 4 || (__GNUC__ == 4 && __GNUC_MINOR__ >= 9)) && \
  23. (defined(__x86_64__) || defined(__i386))
  24. # define HW_SHA1 HW_SHA1_NI
  25. # endif
  26. #elif defined (_MSC_VER)
  27. # if (defined(_M_X64) || defined(_M_IX86)) && _MSC_FULL_VER >= 150030729
  28. # define HW_SHA1 HW_SHA1_NI
  29. # endif
  30. #endif
  31. #ifdef _FORCE_SHA_NEON
  32. # define HW_SHA1 HW_SHA1_NEON
  33. #elif defined __BYTE_ORDER__ && __BYTE_ORDER__ == __ORDER_BIG_ENDIAN__
  34. /* Arm can potentially support both endiannesses, but this code
  35. * hasn't been tested on anything but little. If anyone wants to
  36. * run big-endian, they'll need to fix it first. */
  37. #elif defined __ARM_FEATURE_CRYPTO
  38. /* If the Arm crypto extension is available already, we can
  39. * support NEON SHA without having to enable anything by hand */
  40. # define HW_SHA1 HW_SHA1_NEON
  41. #elif defined(__clang__)
  42. # if __has_attribute(target) && __has_include(<arm_neon.h>) && \
  43. (defined(__aarch64__))
  44. /* clang can enable the crypto extension in AArch64 using
  45. * __attribute__((target)) */
  46. # define HW_SHA1 HW_SHA1_NEON
  47. # define USE_CLANG_ATTR_TARGET_AARCH64
  48. # endif
  49. #elif defined _MSC_VER
  50. /* Visual Studio supports the crypto extension when targeting
  51. * AArch64, but as of VS2017, the AArch32 header doesn't quite
  52. * manage it (declaring the shae/shad intrinsics without a round
  53. * key operand). */
  54. # if defined _M_ARM64
  55. # define HW_SHA1 HW_SHA1_NEON
  56. # if defined _M_ARM64
  57. # define USE_ARM64_NEON_H /* unusual header name in this case */
  58. # endif
  59. # endif
  60. #endif
  61. #if defined _FORCE_SOFTWARE_SHA || !defined HW_SHA1
  62. # undef HW_SHA1
  63. # define HW_SHA1 HW_SHA1_NONE
  64. #endif
  65. /*
  66. * The actual query function that asks if hardware acceleration is
  67. * available.
  68. */
  69. static bool sha1_hw_available(void);
  70. /*
  71. * The top-level selection function, caching the results of
  72. * sha1_hw_available() so it only has to run once.
  73. */
  74. static bool sha1_hw_available_cached(void)
  75. {
  76. static bool initialised = false;
  77. static bool hw_available;
  78. if (!initialised) {
  79. hw_available = sha1_hw_available();
  80. initialised = true;
  81. }
  82. return hw_available;
  83. }
  84. static ssh_hash *sha1_select(const ssh_hashalg *alg)
  85. {
  86. const ssh_hashalg *real_alg =
  87. sha1_hw_available_cached() ? &ssh_sha1_hw : &ssh_sha1_sw;
  88. return ssh_hash_new(real_alg);
  89. }
  90. const ssh_hashalg ssh_sha1 = {
  91. sha1_select, NULL, NULL, NULL,
  92. 20, 64, HASHALG_NAMES_ANNOTATED("SHA-1", "dummy selector vtable"),
  93. };
  94. /* ----------------------------------------------------------------------
  95. * Definitions likely to be helpful to multiple implementations.
  96. */
  97. static const uint32_t sha1_initial_state[] = {
  98. 0x67452301, 0xefcdab89, 0x98badcfe, 0x10325476, 0xc3d2e1f0,
  99. };
  100. #define SHA1_ROUNDS_PER_STAGE 20
  101. #define SHA1_STAGE0_CONSTANT 0x5a827999
  102. #define SHA1_STAGE1_CONSTANT 0x6ed9eba1
  103. #define SHA1_STAGE2_CONSTANT 0x8f1bbcdc
  104. #define SHA1_STAGE3_CONSTANT 0xca62c1d6
  105. #define SHA1_ROUNDS (4 * SHA1_ROUNDS_PER_STAGE)
  106. typedef struct sha1_block sha1_block;
  107. struct sha1_block {
  108. uint8_t block[64];
  109. size_t used;
  110. uint64_t len;
  111. };
  112. static inline void sha1_block_setup(sha1_block *blk)
  113. {
  114. blk->used = 0;
  115. blk->len = 0;
  116. }
  117. static inline bool sha1_block_write(
  118. sha1_block *blk, const void **vdata, size_t *len)
  119. {
  120. size_t blkleft = sizeof(blk->block) - blk->used;
  121. size_t chunk = *len < blkleft ? *len : blkleft;
  122. const uint8_t *p = *vdata;
  123. memcpy(blk->block + blk->used, p, chunk);
  124. *vdata = p + chunk;
  125. *len -= chunk;
  126. blk->used += chunk;
  127. blk->len += chunk;
  128. if (blk->used == sizeof(blk->block)) {
  129. blk->used = 0;
  130. return true;
  131. }
  132. return false;
  133. }
  134. static inline void sha1_block_pad(sha1_block *blk, BinarySink *bs)
  135. {
  136. uint64_t final_len = blk->len << 3;
  137. size_t pad = 1 + (63 & (55 - blk->used));
  138. put_byte(bs, 0x80);
  139. for (size_t i = 1; i < pad; i++)
  140. put_byte(bs, 0);
  141. put_uint64(bs, final_len);
  142. assert(blk->used == 0 && "Should have exactly hit a block boundary");
  143. }
  144. /* ----------------------------------------------------------------------
  145. * Software implementation of SHA-1.
  146. */
  147. static inline uint32_t rol(uint32_t x, unsigned y)
  148. {
  149. return (x << (31 & y)) | (x >> (31 & -y));
  150. }
  151. static inline uint32_t Ch(uint32_t ctrl, uint32_t if1, uint32_t if0)
  152. {
  153. return if0 ^ (ctrl & (if1 ^ if0));
  154. }
  155. static inline uint32_t Maj(uint32_t x, uint32_t y, uint32_t z)
  156. {
  157. return (x & y) | (z & (x | y));
  158. }
  159. static inline uint32_t Par(uint32_t x, uint32_t y, uint32_t z)
  160. {
  161. return (x ^ y ^ z);
  162. }
  163. static inline void sha1_sw_round(
  164. unsigned round_index, const uint32_t *schedule,
  165. uint32_t *a, uint32_t *b, uint32_t *c, uint32_t *d, uint32_t *e,
  166. uint32_t f, uint32_t constant)
  167. {
  168. *e = rol(*a, 5) + f + *e + schedule[round_index] + constant;
  169. *b = rol(*b, 30);
  170. }
  171. static void sha1_sw_block(uint32_t *core, const uint8_t *block)
  172. {
  173. uint32_t w[SHA1_ROUNDS];
  174. uint32_t a,b,c,d,e;
  175. for (size_t t = 0; t < 16; t++)
  176. w[t] = GET_32BIT_MSB_FIRST(block + 4*t);
  177. for (size_t t = 16; t < SHA1_ROUNDS; t++)
  178. w[t] = rol(w[t - 3] ^ w[t - 8] ^ w[t - 14] ^ w[t - 16], 1);
  179. a = core[0]; b = core[1]; c = core[2]; d = core[3];
  180. e = core[4];
  181. size_t t = 0;
  182. for (size_t u = 0; u < SHA1_ROUNDS_PER_STAGE/5; u++) {
  183. sha1_sw_round(t++,w, &a,&b,&c,&d,&e, Ch(b,c,d), SHA1_STAGE0_CONSTANT);
  184. sha1_sw_round(t++,w, &e,&a,&b,&c,&d, Ch(a,b,c), SHA1_STAGE0_CONSTANT);
  185. sha1_sw_round(t++,w, &d,&e,&a,&b,&c, Ch(e,a,b), SHA1_STAGE0_CONSTANT);
  186. sha1_sw_round(t++,w, &c,&d,&e,&a,&b, Ch(d,e,a), SHA1_STAGE0_CONSTANT);
  187. sha1_sw_round(t++,w, &b,&c,&d,&e,&a, Ch(c,d,e), SHA1_STAGE0_CONSTANT);
  188. }
  189. for (size_t u = 0; u < SHA1_ROUNDS_PER_STAGE/5; u++) {
  190. sha1_sw_round(t++,w, &a,&b,&c,&d,&e, Par(b,c,d), SHA1_STAGE1_CONSTANT);
  191. sha1_sw_round(t++,w, &e,&a,&b,&c,&d, Par(a,b,c), SHA1_STAGE1_CONSTANT);
  192. sha1_sw_round(t++,w, &d,&e,&a,&b,&c, Par(e,a,b), SHA1_STAGE1_CONSTANT);
  193. sha1_sw_round(t++,w, &c,&d,&e,&a,&b, Par(d,e,a), SHA1_STAGE1_CONSTANT);
  194. sha1_sw_round(t++,w, &b,&c,&d,&e,&a, Par(c,d,e), SHA1_STAGE1_CONSTANT);
  195. }
  196. for (size_t u = 0; u < SHA1_ROUNDS_PER_STAGE/5; u++) {
  197. sha1_sw_round(t++,w, &a,&b,&c,&d,&e, Maj(b,c,d), SHA1_STAGE2_CONSTANT);
  198. sha1_sw_round(t++,w, &e,&a,&b,&c,&d, Maj(a,b,c), SHA1_STAGE2_CONSTANT);
  199. sha1_sw_round(t++,w, &d,&e,&a,&b,&c, Maj(e,a,b), SHA1_STAGE2_CONSTANT);
  200. sha1_sw_round(t++,w, &c,&d,&e,&a,&b, Maj(d,e,a), SHA1_STAGE2_CONSTANT);
  201. sha1_sw_round(t++,w, &b,&c,&d,&e,&a, Maj(c,d,e), SHA1_STAGE2_CONSTANT);
  202. }
  203. for (size_t u = 0; u < SHA1_ROUNDS_PER_STAGE/5; u++) {
  204. sha1_sw_round(t++,w, &a,&b,&c,&d,&e, Par(b,c,d), SHA1_STAGE3_CONSTANT);
  205. sha1_sw_round(t++,w, &e,&a,&b,&c,&d, Par(a,b,c), SHA1_STAGE3_CONSTANT);
  206. sha1_sw_round(t++,w, &d,&e,&a,&b,&c, Par(e,a,b), SHA1_STAGE3_CONSTANT);
  207. sha1_sw_round(t++,w, &c,&d,&e,&a,&b, Par(d,e,a), SHA1_STAGE3_CONSTANT);
  208. sha1_sw_round(t++,w, &b,&c,&d,&e,&a, Par(c,d,e), SHA1_STAGE3_CONSTANT);
  209. }
  210. core[0] += a; core[1] += b; core[2] += c; core[3] += d; core[4] += e;
  211. smemclr(w, sizeof(w));
  212. }
  213. typedef struct sha1_sw {
  214. uint32_t core[5];
  215. sha1_block blk;
  216. BinarySink_IMPLEMENTATION;
  217. ssh_hash hash;
  218. } sha1_sw;
  219. static void sha1_sw_write(BinarySink *bs, const void *vp, size_t len);
  220. static ssh_hash *sha1_sw_new(const ssh_hashalg *alg)
  221. {
  222. sha1_sw *s = snew(sha1_sw);
  223. memcpy(s->core, sha1_initial_state, sizeof(s->core));
  224. sha1_block_setup(&s->blk);
  225. s->hash.vt = alg;
  226. BinarySink_INIT(s, sha1_sw_write);
  227. BinarySink_DELEGATE_INIT(&s->hash, s);
  228. return &s->hash;
  229. }
  230. static ssh_hash *sha1_sw_copy(ssh_hash *hash)
  231. {
  232. sha1_sw *s = container_of(hash, sha1_sw, hash);
  233. sha1_sw *copy = snew(sha1_sw);
  234. memcpy(copy, s, sizeof(*copy));
  235. BinarySink_COPIED(copy);
  236. BinarySink_DELEGATE_INIT(&copy->hash, copy);
  237. return &copy->hash;
  238. }
  239. static void sha1_sw_free(ssh_hash *hash)
  240. {
  241. sha1_sw *s = container_of(hash, sha1_sw, hash);
  242. smemclr(s, sizeof(*s));
  243. sfree(s);
  244. }
  245. static void sha1_sw_write(BinarySink *bs, const void *vp, size_t len)
  246. {
  247. sha1_sw *s = BinarySink_DOWNCAST(bs, sha1_sw);
  248. while (len > 0)
  249. if (sha1_block_write(&s->blk, &vp, &len))
  250. sha1_sw_block(s->core, s->blk.block);
  251. }
  252. static void sha1_sw_final(ssh_hash *hash, uint8_t *digest)
  253. {
  254. sha1_sw *s = container_of(hash, sha1_sw, hash);
  255. sha1_block_pad(&s->blk, BinarySink_UPCAST(s));
  256. for (size_t i = 0; i < 5; i++)
  257. PUT_32BIT_MSB_FIRST(digest + 4*i, s->core[i]);
  258. sha1_sw_free(hash);
  259. }
  260. const ssh_hashalg ssh_sha1_sw = {
  261. sha1_sw_new, sha1_sw_copy, sha1_sw_final, sha1_sw_free,
  262. 20, 64, HASHALG_NAMES_ANNOTATED("SHA-1", "unaccelerated"),
  263. };
  264. /* ----------------------------------------------------------------------
  265. * Hardware-accelerated implementation of SHA-1 using x86 SHA-NI.
  266. */
  267. #if HW_SHA1 == HW_SHA1_NI
  268. /*
  269. * Set target architecture for Clang and GCC
  270. */
  271. #if defined(__clang__) || defined(__GNUC__)
  272. # define FUNC_ISA __attribute__ ((target("sse4.1,sha")))
  273. #if !defined(__clang__)
  274. # pragma GCC target("sha")
  275. # pragma GCC target("sse4.1")
  276. #endif
  277. #else
  278. # define FUNC_ISA
  279. #endif
  280. #include <wmmintrin.h>
  281. #include <smmintrin.h>
  282. #include <immintrin.h>
  283. #if defined(__clang__) || defined(__GNUC__)
  284. #include <shaintrin.h>
  285. #endif
  286. #if defined(__clang__) || defined(__GNUC__)
  287. #include <cpuid.h>
  288. #define GET_CPU_ID_0(out) \
  289. __cpuid(0, (out)[0], (out)[1], (out)[2], (out)[3])
  290. #define GET_CPU_ID_7(out) \
  291. __cpuid_count(7, 0, (out)[0], (out)[1], (out)[2], (out)[3])
  292. #else
  293. #define GET_CPU_ID_0(out) __cpuid(out, 0)
  294. #define GET_CPU_ID_7(out) __cpuidex(out, 7, 0)
  295. #endif
  296. static bool sha1_hw_available(void)
  297. {
  298. unsigned int CPUInfo[4];
  299. GET_CPU_ID_0(CPUInfo);
  300. if (CPUInfo[0] < 7)
  301. return false;
  302. GET_CPU_ID_7(CPUInfo);
  303. return CPUInfo[1] & (1 << 29); /* Check SHA */
  304. }
  305. /* SHA1 implementation using new instructions
  306. The code is based on Jeffrey Walton's SHA1 implementation:
  307. https://github.com/noloader/SHA-Intrinsics
  308. */
  309. FUNC_ISA
  310. static inline void sha1_ni_block(__m128i *core, const uint8_t *p)
  311. {
  312. __m128i ABCD, E0, E1, MSG0, MSG1, MSG2, MSG3;
  313. const __m128i MASK = _mm_set_epi64x(
  314. 0x0001020304050607ULL, 0x08090a0b0c0d0e0fULL);
  315. const __m128i *block = (const __m128i *)p;
  316. /* Load initial values */
  317. ABCD = core[0];
  318. E0 = core[1];
  319. /* Rounds 0-3 */
  320. MSG0 = _mm_loadu_si128(block);
  321. MSG0 = _mm_shuffle_epi8(MSG0, MASK);
  322. E0 = _mm_add_epi32(E0, MSG0);
  323. E1 = ABCD;
  324. ABCD = _mm_sha1rnds4_epu32(ABCD, E0, 0);
  325. /* Rounds 4-7 */
  326. MSG1 = _mm_loadu_si128(block + 1);
  327. MSG1 = _mm_shuffle_epi8(MSG1, MASK);
  328. E1 = _mm_sha1nexte_epu32(E1, MSG1);
  329. E0 = ABCD;
  330. ABCD = _mm_sha1rnds4_epu32(ABCD, E1, 0);
  331. MSG0 = _mm_sha1msg1_epu32(MSG0, MSG1);
  332. /* Rounds 8-11 */
  333. MSG2 = _mm_loadu_si128(block + 2);
  334. MSG2 = _mm_shuffle_epi8(MSG2, MASK);
  335. E0 = _mm_sha1nexte_epu32(E0, MSG2);
  336. E1 = ABCD;
  337. ABCD = _mm_sha1rnds4_epu32(ABCD, E0, 0);
  338. MSG1 = _mm_sha1msg1_epu32(MSG1, MSG2);
  339. MSG0 = _mm_xor_si128(MSG0, MSG2);
  340. /* Rounds 12-15 */
  341. MSG3 = _mm_loadu_si128(block + 3);
  342. MSG3 = _mm_shuffle_epi8(MSG3, MASK);
  343. E1 = _mm_sha1nexte_epu32(E1, MSG3);
  344. E0 = ABCD;
  345. MSG0 = _mm_sha1msg2_epu32(MSG0, MSG3);
  346. ABCD = _mm_sha1rnds4_epu32(ABCD, E1, 0);
  347. MSG2 = _mm_sha1msg1_epu32(MSG2, MSG3);
  348. MSG1 = _mm_xor_si128(MSG1, MSG3);
  349. /* Rounds 16-19 */
  350. E0 = _mm_sha1nexte_epu32(E0, MSG0);
  351. E1 = ABCD;
  352. MSG1 = _mm_sha1msg2_epu32(MSG1, MSG0);
  353. ABCD = _mm_sha1rnds4_epu32(ABCD, E0, 0);
  354. MSG3 = _mm_sha1msg1_epu32(MSG3, MSG0);
  355. MSG2 = _mm_xor_si128(MSG2, MSG0);
  356. /* Rounds 20-23 */
  357. E1 = _mm_sha1nexte_epu32(E1, MSG1);
  358. E0 = ABCD;
  359. MSG2 = _mm_sha1msg2_epu32(MSG2, MSG1);
  360. ABCD = _mm_sha1rnds4_epu32(ABCD, E1, 1);
  361. MSG0 = _mm_sha1msg1_epu32(MSG0, MSG1);
  362. MSG3 = _mm_xor_si128(MSG3, MSG1);
  363. /* Rounds 24-27 */
  364. E0 = _mm_sha1nexte_epu32(E0, MSG2);
  365. E1 = ABCD;
  366. MSG3 = _mm_sha1msg2_epu32(MSG3, MSG2);
  367. ABCD = _mm_sha1rnds4_epu32(ABCD, E0, 1);
  368. MSG1 = _mm_sha1msg1_epu32(MSG1, MSG2);
  369. MSG0 = _mm_xor_si128(MSG0, MSG2);
  370. /* Rounds 28-31 */
  371. E1 = _mm_sha1nexte_epu32(E1, MSG3);
  372. E0 = ABCD;
  373. MSG0 = _mm_sha1msg2_epu32(MSG0, MSG3);
  374. ABCD = _mm_sha1rnds4_epu32(ABCD, E1, 1);
  375. MSG2 = _mm_sha1msg1_epu32(MSG2, MSG3);
  376. MSG1 = _mm_xor_si128(MSG1, MSG3);
  377. /* Rounds 32-35 */
  378. E0 = _mm_sha1nexte_epu32(E0, MSG0);
  379. E1 = ABCD;
  380. MSG1 = _mm_sha1msg2_epu32(MSG1, MSG0);
  381. ABCD = _mm_sha1rnds4_epu32(ABCD, E0, 1);
  382. MSG3 = _mm_sha1msg1_epu32(MSG3, MSG0);
  383. MSG2 = _mm_xor_si128(MSG2, MSG0);
  384. /* Rounds 36-39 */
  385. E1 = _mm_sha1nexte_epu32(E1, MSG1);
  386. E0 = ABCD;
  387. MSG2 = _mm_sha1msg2_epu32(MSG2, MSG1);
  388. ABCD = _mm_sha1rnds4_epu32(ABCD, E1, 1);
  389. MSG0 = _mm_sha1msg1_epu32(MSG0, MSG1);
  390. MSG3 = _mm_xor_si128(MSG3, MSG1);
  391. /* Rounds 40-43 */
  392. E0 = _mm_sha1nexte_epu32(E0, MSG2);
  393. E1 = ABCD;
  394. MSG3 = _mm_sha1msg2_epu32(MSG3, MSG2);
  395. ABCD = _mm_sha1rnds4_epu32(ABCD, E0, 2);
  396. MSG1 = _mm_sha1msg1_epu32(MSG1, MSG2);
  397. MSG0 = _mm_xor_si128(MSG0, MSG2);
  398. /* Rounds 44-47 */
  399. E1 = _mm_sha1nexte_epu32(E1, MSG3);
  400. E0 = ABCD;
  401. MSG0 = _mm_sha1msg2_epu32(MSG0, MSG3);
  402. ABCD = _mm_sha1rnds4_epu32(ABCD, E1, 2);
  403. MSG2 = _mm_sha1msg1_epu32(MSG2, MSG3);
  404. MSG1 = _mm_xor_si128(MSG1, MSG3);
  405. /* Rounds 48-51 */
  406. E0 = _mm_sha1nexte_epu32(E0, MSG0);
  407. E1 = ABCD;
  408. MSG1 = _mm_sha1msg2_epu32(MSG1, MSG0);
  409. ABCD = _mm_sha1rnds4_epu32(ABCD, E0, 2);
  410. MSG3 = _mm_sha1msg1_epu32(MSG3, MSG0);
  411. MSG2 = _mm_xor_si128(MSG2, MSG0);
  412. /* Rounds 52-55 */
  413. E1 = _mm_sha1nexte_epu32(E1, MSG1);
  414. E0 = ABCD;
  415. MSG2 = _mm_sha1msg2_epu32(MSG2, MSG1);
  416. ABCD = _mm_sha1rnds4_epu32(ABCD, E1, 2);
  417. MSG0 = _mm_sha1msg1_epu32(MSG0, MSG1);
  418. MSG3 = _mm_xor_si128(MSG3, MSG1);
  419. /* Rounds 56-59 */
  420. E0 = _mm_sha1nexte_epu32(E0, MSG2);
  421. E1 = ABCD;
  422. MSG3 = _mm_sha1msg2_epu32(MSG3, MSG2);
  423. ABCD = _mm_sha1rnds4_epu32(ABCD, E0, 2);
  424. MSG1 = _mm_sha1msg1_epu32(MSG1, MSG2);
  425. MSG0 = _mm_xor_si128(MSG0, MSG2);
  426. /* Rounds 60-63 */
  427. E1 = _mm_sha1nexte_epu32(E1, MSG3);
  428. E0 = ABCD;
  429. MSG0 = _mm_sha1msg2_epu32(MSG0, MSG3);
  430. ABCD = _mm_sha1rnds4_epu32(ABCD, E1, 3);
  431. MSG2 = _mm_sha1msg1_epu32(MSG2, MSG3);
  432. MSG1 = _mm_xor_si128(MSG1, MSG3);
  433. /* Rounds 64-67 */
  434. E0 = _mm_sha1nexte_epu32(E0, MSG0);
  435. E1 = ABCD;
  436. MSG1 = _mm_sha1msg2_epu32(MSG1, MSG0);
  437. ABCD = _mm_sha1rnds4_epu32(ABCD, E0, 3);
  438. MSG3 = _mm_sha1msg1_epu32(MSG3, MSG0);
  439. MSG2 = _mm_xor_si128(MSG2, MSG0);
  440. /* Rounds 68-71 */
  441. E1 = _mm_sha1nexte_epu32(E1, MSG1);
  442. E0 = ABCD;
  443. MSG2 = _mm_sha1msg2_epu32(MSG2, MSG1);
  444. ABCD = _mm_sha1rnds4_epu32(ABCD, E1, 3);
  445. MSG3 = _mm_xor_si128(MSG3, MSG1);
  446. /* Rounds 72-75 */
  447. E0 = _mm_sha1nexte_epu32(E0, MSG2);
  448. E1 = ABCD;
  449. MSG3 = _mm_sha1msg2_epu32(MSG3, MSG2);
  450. ABCD = _mm_sha1rnds4_epu32(ABCD, E0, 3);
  451. /* Rounds 76-79 */
  452. E1 = _mm_sha1nexte_epu32(E1, MSG3);
  453. E0 = ABCD;
  454. ABCD = _mm_sha1rnds4_epu32(ABCD, E1, 3);
  455. /* Combine state */
  456. core[0] = _mm_add_epi32(ABCD, core[0]);
  457. core[1] = _mm_sha1nexte_epu32(E0, core[1]);
  458. }
  459. typedef struct sha1_ni {
  460. /*
  461. * core[0] stores the first four words of the SHA-1 state. core[1]
  462. * stores just the fifth word, in the vector lane at the highest
  463. * address.
  464. */
  465. __m128i core[2];
  466. sha1_block blk;
  467. void *pointer_to_free;
  468. BinarySink_IMPLEMENTATION;
  469. ssh_hash hash;
  470. } sha1_ni;
  471. static void sha1_ni_write(BinarySink *bs, const void *vp, size_t len);
  472. static sha1_ni *sha1_ni_alloc(void)
  473. {
  474. /*
  475. * The __m128i variables in the context structure need to be
  476. * 16-byte aligned, but not all malloc implementations that this
  477. * code has to work with will guarantee to return a 16-byte
  478. * aligned pointer. So we over-allocate, manually realign the
  479. * pointer ourselves, and store the original one inside the
  480. * context so we know how to free it later.
  481. */
  482. void *allocation = smalloc(sizeof(sha1_ni) + 15);
  483. uintptr_t alloc_address = (uintptr_t)allocation;
  484. uintptr_t aligned_address = (alloc_address + 15) & ~15;
  485. sha1_ni *s = (sha1_ni *)aligned_address;
  486. s->pointer_to_free = allocation;
  487. return s;
  488. }
  489. FUNC_ISA static ssh_hash *sha1_ni_new(const ssh_hashalg *alg)
  490. {
  491. if (!sha1_hw_available_cached())
  492. return NULL;
  493. sha1_ni *s = sha1_ni_alloc();
  494. /* Initialise the core vectors in their storage order */
  495. s->core[0] = _mm_set_epi64x(
  496. 0x67452301efcdab89ULL, 0x98badcfe10325476ULL);
  497. s->core[1] = _mm_set_epi32(0xc3d2e1f0, 0, 0, 0);
  498. sha1_block_setup(&s->blk);
  499. s->hash.vt = alg;
  500. BinarySink_INIT(s, sha1_ni_write);
  501. BinarySink_DELEGATE_INIT(&s->hash, s);
  502. return &s->hash;
  503. }
  504. static ssh_hash *sha1_ni_copy(ssh_hash *hash)
  505. {
  506. sha1_ni *s = container_of(hash, sha1_ni, hash);
  507. sha1_ni *copy = sha1_ni_alloc();
  508. void *ptf_save = copy->pointer_to_free;
  509. *copy = *s; /* structure copy */
  510. copy->pointer_to_free = ptf_save;
  511. BinarySink_COPIED(copy);
  512. BinarySink_DELEGATE_INIT(&copy->hash, copy);
  513. return &copy->hash;
  514. }
  515. static void sha1_ni_free(ssh_hash *hash)
  516. {
  517. sha1_ni *s = container_of(hash, sha1_ni, hash);
  518. void *ptf = s->pointer_to_free;
  519. smemclr(s, sizeof(*s));
  520. sfree(ptf);
  521. }
  522. static void sha1_ni_write(BinarySink *bs, const void *vp, size_t len)
  523. {
  524. sha1_ni *s = BinarySink_DOWNCAST(bs, sha1_ni);
  525. while (len > 0)
  526. if (sha1_block_write(&s->blk, &vp, &len))
  527. sha1_ni_block(s->core, s->blk.block);
  528. }
  529. FUNC_ISA static void sha1_ni_final(ssh_hash *hash, uint8_t *digest)
  530. {
  531. sha1_ni *s = container_of(hash, sha1_ni, hash);
  532. sha1_block_pad(&s->blk, BinarySink_UPCAST(s));
  533. /* Rearrange the first vector into its output order */
  534. __m128i abcd = _mm_shuffle_epi32(s->core[0], 0x1B);
  535. /* Byte-swap it into the output endianness */
  536. const __m128i mask = _mm_setr_epi8(3,2,1,0,7,6,5,4,11,10,9,8,15,14,13,12);
  537. abcd = _mm_shuffle_epi8(abcd, mask);
  538. /* And store it */
  539. _mm_storeu_si128((__m128i *)digest, abcd);
  540. /* Finally, store the leftover word */
  541. uint32_t e = _mm_extract_epi32(s->core[1], 3);
  542. PUT_32BIT_MSB_FIRST(digest + 16, e);
  543. sha1_ni_free(hash);
  544. }
  545. const ssh_hashalg ssh_sha1_hw = {
  546. sha1_ni_new, sha1_ni_copy, sha1_ni_final, sha1_ni_free,
  547. 20, 64, HASHALG_NAMES_ANNOTATED("SHA-1", "SHA-NI accelerated"),
  548. };
  549. /* ----------------------------------------------------------------------
  550. * Hardware-accelerated implementation of SHA-1 using Arm NEON.
  551. */
  552. #elif HW_SHA1 == HW_SHA1_NEON
  553. /*
  554. * Manually set the target architecture, if we decided above that we
  555. * need to.
  556. */
  557. #ifdef USE_CLANG_ATTR_TARGET_AARCH64
  558. /*
  559. * A spot of cheating: redefine some ACLE feature macros before
  560. * including arm_neon.h. Otherwise we won't get the SHA intrinsics
  561. * defined by that header, because it will be looking at the settings
  562. * for the whole translation unit rather than the ones we're going to
  563. * put on some particular functions using __attribute__((target)).
  564. */
  565. #define __ARM_NEON 1
  566. #define __ARM_FEATURE_CRYPTO 1
  567. #define FUNC_ISA __attribute__ ((target("neon,crypto")))
  568. #endif /* USE_CLANG_ATTR_TARGET_AARCH64 */
  569. #ifndef FUNC_ISA
  570. #define FUNC_ISA
  571. #endif
  572. #ifdef USE_ARM64_NEON_H
  573. #include <arm64_neon.h>
  574. #else
  575. #include <arm_neon.h>
  576. #endif
  577. static bool sha1_hw_available(void)
  578. {
  579. /*
  580. * For Arm, we delegate to a per-platform detection function (see
  581. * explanation in sshaes.c).
  582. */
  583. return platform_sha1_hw_available();
  584. }
  585. typedef struct sha1_neon_core sha1_neon_core;
  586. struct sha1_neon_core {
  587. uint32x4_t abcd;
  588. uint32_t e;
  589. };
  590. FUNC_ISA
  591. static inline uint32x4_t sha1_neon_load_input(const uint8_t *p)
  592. {
  593. return vreinterpretq_u32_u8(vrev32q_u8(vld1q_u8(p)));
  594. }
  595. FUNC_ISA
  596. static inline uint32x4_t sha1_neon_schedule_update(
  597. uint32x4_t m4, uint32x4_t m3, uint32x4_t m2, uint32x4_t m1)
  598. {
  599. return vsha1su1q_u32(vsha1su0q_u32(m4, m3, m2), m1);
  600. }
  601. /*
  602. * SHA-1 has three different kinds of round, differing in whether they
  603. * use the Ch, Maj or Par functions defined above. Each one uses a
  604. * separate NEON instruction, so we define three inline functions for
  605. * the different round types using this macro.
  606. *
  607. * The two batches of Par-type rounds also use a different constant,
  608. * but that's passed in as an operand, so we don't need a fourth
  609. * inline function just for that.
  610. */
  611. #define SHA1_NEON_ROUND_FN(type) \
  612. FUNC_ISA static inline sha1_neon_core sha1_neon_round4_##type( \
  613. sha1_neon_core old, uint32x4_t sched, uint32x4_t constant) \
  614. { \
  615. sha1_neon_core new; \
  616. uint32x4_t round_input = vaddq_u32(sched, constant); \
  617. new.abcd = vsha1##type##q_u32(old.abcd, old.e, round_input); \
  618. new.e = vsha1h_u32(vget_lane_u32(vget_low_u32(old.abcd), 0)); \
  619. return new; \
  620. }
  621. SHA1_NEON_ROUND_FN(c)
  622. SHA1_NEON_ROUND_FN(p)
  623. SHA1_NEON_ROUND_FN(m)
  624. FUNC_ISA
  625. static inline void sha1_neon_block(sha1_neon_core *core, const uint8_t *p)
  626. {
  627. uint32x4_t constant, s0, s1, s2, s3;
  628. sha1_neon_core cr = *core;
  629. constant = vdupq_n_u32(SHA1_STAGE0_CONSTANT);
  630. s0 = sha1_neon_load_input(p);
  631. cr = sha1_neon_round4_c(cr, s0, constant);
  632. s1 = sha1_neon_load_input(p + 16);
  633. cr = sha1_neon_round4_c(cr, s1, constant);
  634. s2 = sha1_neon_load_input(p + 32);
  635. cr = sha1_neon_round4_c(cr, s2, constant);
  636. s3 = sha1_neon_load_input(p + 48);
  637. cr = sha1_neon_round4_c(cr, s3, constant);
  638. s0 = sha1_neon_schedule_update(s0, s1, s2, s3);
  639. cr = sha1_neon_round4_c(cr, s0, constant);
  640. constant = vdupq_n_u32(SHA1_STAGE1_CONSTANT);
  641. s1 = sha1_neon_schedule_update(s1, s2, s3, s0);
  642. cr = sha1_neon_round4_p(cr, s1, constant);
  643. s2 = sha1_neon_schedule_update(s2, s3, s0, s1);
  644. cr = sha1_neon_round4_p(cr, s2, constant);
  645. s3 = sha1_neon_schedule_update(s3, s0, s1, s2);
  646. cr = sha1_neon_round4_p(cr, s3, constant);
  647. s0 = sha1_neon_schedule_update(s0, s1, s2, s3);
  648. cr = sha1_neon_round4_p(cr, s0, constant);
  649. s1 = sha1_neon_schedule_update(s1, s2, s3, s0);
  650. cr = sha1_neon_round4_p(cr, s1, constant);
  651. constant = vdupq_n_u32(SHA1_STAGE2_CONSTANT);
  652. s2 = sha1_neon_schedule_update(s2, s3, s0, s1);
  653. cr = sha1_neon_round4_m(cr, s2, constant);
  654. s3 = sha1_neon_schedule_update(s3, s0, s1, s2);
  655. cr = sha1_neon_round4_m(cr, s3, constant);
  656. s0 = sha1_neon_schedule_update(s0, s1, s2, s3);
  657. cr = sha1_neon_round4_m(cr, s0, constant);
  658. s1 = sha1_neon_schedule_update(s1, s2, s3, s0);
  659. cr = sha1_neon_round4_m(cr, s1, constant);
  660. s2 = sha1_neon_schedule_update(s2, s3, s0, s1);
  661. cr = sha1_neon_round4_m(cr, s2, constant);
  662. constant = vdupq_n_u32(SHA1_STAGE3_CONSTANT);
  663. s3 = sha1_neon_schedule_update(s3, s0, s1, s2);
  664. cr = sha1_neon_round4_p(cr, s3, constant);
  665. s0 = sha1_neon_schedule_update(s0, s1, s2, s3);
  666. cr = sha1_neon_round4_p(cr, s0, constant);
  667. s1 = sha1_neon_schedule_update(s1, s2, s3, s0);
  668. cr = sha1_neon_round4_p(cr, s1, constant);
  669. s2 = sha1_neon_schedule_update(s2, s3, s0, s1);
  670. cr = sha1_neon_round4_p(cr, s2, constant);
  671. s3 = sha1_neon_schedule_update(s3, s0, s1, s2);
  672. cr = sha1_neon_round4_p(cr, s3, constant);
  673. core->abcd = vaddq_u32(core->abcd, cr.abcd);
  674. core->e += cr.e;
  675. }
  676. typedef struct sha1_neon {
  677. sha1_neon_core core;
  678. sha1_block blk;
  679. BinarySink_IMPLEMENTATION;
  680. ssh_hash hash;
  681. } sha1_neon;
  682. static void sha1_neon_write(BinarySink *bs, const void *vp, size_t len);
  683. static ssh_hash *sha1_neon_new(const ssh_hashalg *alg)
  684. {
  685. if (!sha1_hw_available_cached())
  686. return NULL;
  687. sha1_neon *s = snew(sha1_neon);
  688. s->core.abcd = vld1q_u32(sha1_initial_state);
  689. s->core.e = sha1_initial_state[4];
  690. sha1_block_setup(&s->blk);
  691. s->hash.vt = alg;
  692. BinarySink_INIT(s, sha1_neon_write);
  693. BinarySink_DELEGATE_INIT(&s->hash, s);
  694. return &s->hash;
  695. }
  696. static ssh_hash *sha1_neon_copy(ssh_hash *hash)
  697. {
  698. sha1_neon *s = container_of(hash, sha1_neon, hash);
  699. sha1_neon *copy = snew(sha1_neon);
  700. *copy = *s; /* structure copy */
  701. BinarySink_COPIED(copy);
  702. BinarySink_DELEGATE_INIT(&copy->hash, copy);
  703. return &copy->hash;
  704. }
  705. static void sha1_neon_free(ssh_hash *hash)
  706. {
  707. sha1_neon *s = container_of(hash, sha1_neon, hash);
  708. smemclr(s, sizeof(*s));
  709. sfree(s);
  710. }
  711. static void sha1_neon_write(BinarySink *bs, const void *vp, size_t len)
  712. {
  713. sha1_neon *s = BinarySink_DOWNCAST(bs, sha1_neon);
  714. while (len > 0)
  715. if (sha1_block_write(&s->blk, &vp, &len))
  716. sha1_neon_block(&s->core, s->blk.block);
  717. }
  718. static void sha1_neon_final(ssh_hash *hash, uint8_t *digest)
  719. {
  720. sha1_neon *s = container_of(hash, sha1_neon, hash);
  721. sha1_block_pad(&s->blk, BinarySink_UPCAST(s));
  722. vst1q_u8(digest, vrev32q_u8(vreinterpretq_u8_u32(s->core.abcd)));
  723. PUT_32BIT_MSB_FIRST(digest + 16, s->core.e);
  724. sha1_neon_free(hash);
  725. }
  726. const ssh_hashalg ssh_sha1_hw = {
  727. sha1_neon_new, sha1_neon_copy, sha1_neon_final, sha1_neon_free,
  728. 20, 64, HASHALG_NAMES_ANNOTATED("SHA-1", "NEON accelerated"),
  729. };
  730. /* ----------------------------------------------------------------------
  731. * Stub functions if we have no hardware-accelerated SHA-1. In this
  732. * case, sha1_hw_new returns NULL (though it should also never be
  733. * selected by sha1_select, so the only thing that should even be
  734. * _able_ to call it is testcrypt). As a result, the remaining vtable
  735. * functions should never be called at all.
  736. */
  737. #elif HW_SHA1 == HW_SHA1_NONE
  738. static bool sha1_hw_available(void)
  739. {
  740. return false;
  741. }
  742. static ssh_hash *sha1_stub_new(const ssh_hashalg *alg)
  743. {
  744. return NULL;
  745. }
  746. #define STUB_BODY { unreachable("Should never be called"); }
  747. static ssh_hash *sha1_stub_copy(ssh_hash *hash) STUB_BODY
  748. static void sha1_stub_free(ssh_hash *hash) STUB_BODY
  749. static void sha1_stub_final(ssh_hash *hash, uint8_t *digest) STUB_BODY
  750. const ssh_hashalg ssh_sha1_hw = {
  751. sha1_stub_new, sha1_stub_copy, sha1_stub_final, sha1_stub_free,
  752. 20, 64, HASHALG_NAMES_ANNOTATED(
  753. "SHA-1", "!NONEXISTENT ACCELERATED VERSION!"),
  754. };
  755. #endif /* HW_SHA1 */