sshsh256.c 31 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994
  1. /*
  2. * SHA-256 algorithm as described at
  3. *
  4. * http://csrc.nist.gov/cryptval/shs.html
  5. */
  6. #include "ssh.h"
  7. #include <assert.h>
  8. /*
  9. * Start by deciding whether we can support hardware SHA at all.
  10. */
  11. #define HW_SHA256_NONE 0
  12. #define HW_SHA256_NI 1
  13. #define HW_SHA256_NEON 2
  14. #ifdef _FORCE_SHA_NI
  15. # define HW_SHA256 HW_SHA256_NI
  16. #elif defined(__clang__)
  17. # if __has_attribute(target) && __has_include(<wmmintrin.h>) && \
  18. (defined(__x86_64__) || defined(__i386))
  19. # define HW_SHA256 HW_SHA256_NI
  20. # endif
  21. #elif defined(__GNUC__)
  22. # if (__GNUC__ > 4 || (__GNUC__ == 4 && __GNUC_MINOR__ >= 9)) && \
  23. (defined(__x86_64__) || defined(__i386))
  24. # define HW_SHA256 HW_SHA256_NI
  25. # endif
  26. #elif defined (_MSC_VER)
  27. # if (defined(_M_X64) || defined(_M_IX86)) && _MSC_FULL_VER >= 150030729
  28. # define HW_SHA256 HW_SHA256_NI
  29. # endif
  30. #endif
  31. // Should be (almost) working (when set to HW_SHA256_NI), but we do not have a HW to test this on.
  32. // Need to replace _mm_setr_epi8 and there's also objconv warning about alignment.
  33. // Restore "unaccelerated" annotation.
  34. #undef HW_SHA256
  35. #ifdef _FORCE_SHA_NEON
  36. # define HW_SHA256 HW_SHA256_NEON
  37. #elif defined __BYTE_ORDER__ && __BYTE_ORDER__ == __ORDER_BIG_ENDIAN__
  38. /* Arm can potentially support both endiannesses, but this code
  39. * hasn't been tested on anything but little. If anyone wants to
  40. * run big-endian, they'll need to fix it first. */
  41. #elif defined __ARM_FEATURE_CRYPTO
  42. /* If the Arm crypto extension is available already, we can
  43. * support NEON SHA without having to enable anything by hand */
  44. # define HW_SHA256 HW_SHA256_NEON
  45. #elif defined(__clang__)
  46. # if __has_attribute(target) && __has_include(<arm_neon.h>) && \
  47. (defined(__aarch64__))
  48. /* clang can enable the crypto extension in AArch64 using
  49. * __attribute__((target)) */
  50. # define HW_SHA256 HW_SHA256_NEON
  51. # define USE_CLANG_ATTR_TARGET_AARCH64
  52. # endif
  53. #elif defined _MSC_VER
  54. /* Visual Studio supports the crypto extension when targeting
  55. * AArch64, but as of VS2017, the AArch32 header doesn't quite
  56. * manage it (declaring the shae/shad intrinsics without a round
  57. * key operand). */
  58. # if defined _M_ARM64
  59. # define HW_SHA256 HW_SHA256_NEON
  60. # if defined _M_ARM64
  61. # define USE_ARM64_NEON_H /* unusual header name in this case */
  62. # endif
  63. # endif
  64. #endif
  65. #if defined _FORCE_SOFTWARE_SHA || !defined HW_SHA256
  66. # undef HW_SHA256
  67. # define HW_SHA256 HW_SHA256_NONE
  68. #endif
  69. #ifndef WINSCP_VS
  70. /*
  71. * The actual query function that asks if hardware acceleration is
  72. * available.
  73. */
  74. bool sha256_hw_available(void);
  75. /*
  76. * The top-level selection function, caching the results of
  77. * sha256_hw_available() so it only has to run once.
  78. */
  79. /*WINSCP static*/ bool sha256_hw_available_cached(void)
  80. {
  81. static bool initialised = false;
  82. static bool hw_available;
  83. if (!initialised) {
  84. hw_available = sha256_hw_available();
  85. initialised = true;
  86. }
  87. return hw_available;
  88. }
  89. static ssh_hash *sha256_select(const ssh_hashalg *alg)
  90. {
  91. const ssh_hashalg *real_alg =
  92. sha256_hw_available_cached() ? &ssh_sha256_hw : &ssh_sha256_sw;
  93. return ssh_hash_new(real_alg);
  94. }
  95. const ssh_hashalg ssh_sha256 = {
  96. // WINSCP
  97. /*.new =*/ sha256_select,
  98. NULL,
  99. NULL,
  100. NULL,
  101. NULL,
  102. /*.hlen =*/ 32,
  103. /*.blocklen =*/ 64,
  104. HASHALG_NAMES_ANNOTATED("SHA-256", "dummy selector vtable"),
  105. NULL,
  106. };
  107. #else
  108. bool sha256_hw_available_cached(void);
  109. #endif
  110. /* ----------------------------------------------------------------------
  111. * Definitions likely to be helpful to multiple implementations.
  112. */
  113. static const uint32_t sha256_initial_state[] = {
  114. 0x6a09e667, 0xbb67ae85, 0x3c6ef372, 0xa54ff53a,
  115. 0x510e527f, 0x9b05688c, 0x1f83d9ab, 0x5be0cd19,
  116. };
  117. static const uint32_t sha256_round_constants[] = {
  118. 0x428a2f98, 0x71374491, 0xb5c0fbcf, 0xe9b5dba5,
  119. 0x3956c25b, 0x59f111f1, 0x923f82a4, 0xab1c5ed5,
  120. 0xd807aa98, 0x12835b01, 0x243185be, 0x550c7dc3,
  121. 0x72be5d74, 0x80deb1fe, 0x9bdc06a7, 0xc19bf174,
  122. 0xe49b69c1, 0xefbe4786, 0x0fc19dc6, 0x240ca1cc,
  123. 0x2de92c6f, 0x4a7484aa, 0x5cb0a9dc, 0x76f988da,
  124. 0x983e5152, 0xa831c66d, 0xb00327c8, 0xbf597fc7,
  125. 0xc6e00bf3, 0xd5a79147, 0x06ca6351, 0x14292967,
  126. 0x27b70a85, 0x2e1b2138, 0x4d2c6dfc, 0x53380d13,
  127. 0x650a7354, 0x766a0abb, 0x81c2c92e, 0x92722c85,
  128. 0xa2bfe8a1, 0xa81a664b, 0xc24b8b70, 0xc76c51a3,
  129. 0xd192e819, 0xd6990624, 0xf40e3585, 0x106aa070,
  130. 0x19a4c116, 0x1e376c08, 0x2748774c, 0x34b0bcb5,
  131. 0x391c0cb3, 0x4ed8aa4a, 0x5b9cca4f, 0x682e6ff3,
  132. 0x748f82ee, 0x78a5636f, 0x84c87814, 0x8cc70208,
  133. 0x90befffa, 0xa4506ceb, 0xbef9a3f7, 0xc67178f2,
  134. };
  135. #define SHA256_ROUNDS 64
  136. typedef struct sha256_block sha256_block;
  137. struct sha256_block {
  138. uint8_t block[64];
  139. size_t used;
  140. uint64_t len;
  141. };
  142. static inline void sha256_block_setup(sha256_block *blk)
  143. {
  144. blk->used = 0;
  145. blk->len = 0;
  146. }
  147. #ifdef WINSCP_VS
  148. /*WINSCP static inline*/ bool sha256_block_write(
  149. sha256_block *blk, const void **vdata, size_t *len)
  150. {
  151. size_t blkleft = sizeof(blk->block) - blk->used;
  152. size_t chunk = *len < blkleft ? *len : blkleft;
  153. const uint8_t *p = *vdata;
  154. memcpy(blk->block + blk->used, p, chunk);
  155. *vdata = p + chunk;
  156. *len -= chunk;
  157. blk->used += chunk;
  158. blk->len += chunk;
  159. if (blk->used == sizeof(blk->block)) {
  160. blk->used = 0;
  161. return true;
  162. }
  163. return false;
  164. }
  165. /*WINSCP static inline*/ void sha256_block_pad(sha256_block *blk, BinarySink *bs)
  166. {
  167. uint64_t final_len = blk->len << 3;
  168. size_t pad = 1 + (63 & (55 - blk->used));
  169. put_byte(bs, 0x80);
  170. for (size_t i = 1; i < pad; i++)
  171. put_byte(bs, 0);
  172. put_uint64(bs, final_len);
  173. assert(blk->used == 0 && "Should have exactly hit a block boundary");
  174. }
  175. /* ----------------------------------------------------------------------
  176. * Software implementation of SHA-256.
  177. */
  178. static inline uint32_t ror(uint32_t x, unsigned y)
  179. {
  180. return (x << (31 & /*WINSCP*/(uint32_t)(-(int32_t)y))) | (x >> (31 & y));
  181. }
  182. static inline uint32_t Ch(uint32_t ctrl, uint32_t if1, uint32_t if0)
  183. {
  184. return if0 ^ (ctrl & (if1 ^ if0));
  185. }
  186. static inline uint32_t Maj(uint32_t x, uint32_t y, uint32_t z)
  187. {
  188. return (x & y) | (z & (x | y));
  189. }
  190. static inline uint32_t Sigma_0(uint32_t x)
  191. {
  192. return ror(x,2) ^ ror(x,13) ^ ror(x,22);
  193. }
  194. static inline uint32_t Sigma_1(uint32_t x)
  195. {
  196. return ror(x,6) ^ ror(x,11) ^ ror(x,25);
  197. }
  198. static inline uint32_t sigma_0(uint32_t x)
  199. {
  200. return ror(x,7) ^ ror(x,18) ^ (x >> 3);
  201. }
  202. static inline uint32_t sigma_1(uint32_t x)
  203. {
  204. return ror(x,17) ^ ror(x,19) ^ (x >> 10);
  205. }
  206. static inline void sha256_sw_round(
  207. unsigned round_index, const uint32_t *schedule,
  208. uint32_t *a, uint32_t *b, uint32_t *c, uint32_t *d,
  209. uint32_t *e, uint32_t *f, uint32_t *g, uint32_t *h)
  210. {
  211. uint32_t t1 = *h + Sigma_1(*e) + Ch(*e,*f,*g) +
  212. sha256_round_constants[round_index] + schedule[round_index];
  213. uint32_t t2 = Sigma_0(*a) + Maj(*a,*b,*c);
  214. *d += t1;
  215. *h = t1 + t2;
  216. }
  217. /*WINSCP static*/ void sha256_sw_block(uint32_t *core, const uint8_t *block)
  218. {
  219. uint32_t w[SHA256_ROUNDS];
  220. uint32_t a,b,c,d,e,f,g,h;
  221. for (size_t t = 0; t < 16; t++)
  222. w[t] = GET_32BIT_MSB_FIRST(block + 4*t);
  223. for (size_t t = 16; t < SHA256_ROUNDS; t++)
  224. w[t] = sigma_1(w[t-2]) + w[t-7] + sigma_0(w[t-15]) + w[t-16];
  225. a = core[0]; b = core[1]; c = core[2]; d = core[3];
  226. e = core[4]; f = core[5]; g = core[6]; h = core[7];
  227. for (size_t t = 0; t < SHA256_ROUNDS; t += 8) {
  228. sha256_sw_round(t+0, w, &a,&b,&c,&d,&e,&f,&g,&h);
  229. sha256_sw_round(t+1, w, &h,&a,&b,&c,&d,&e,&f,&g);
  230. sha256_sw_round(t+2, w, &g,&h,&a,&b,&c,&d,&e,&f);
  231. sha256_sw_round(t+3, w, &f,&g,&h,&a,&b,&c,&d,&e);
  232. sha256_sw_round(t+4, w, &e,&f,&g,&h,&a,&b,&c,&d);
  233. sha256_sw_round(t+5, w, &d,&e,&f,&g,&h,&a,&b,&c);
  234. sha256_sw_round(t+6, w, &c,&d,&e,&f,&g,&h,&a,&b);
  235. sha256_sw_round(t+7, w, &b,&c,&d,&e,&f,&g,&h,&a);
  236. }
  237. core[0] += a; core[1] += b; core[2] += c; core[3] += d;
  238. core[4] += e; core[5] += f; core[6] += g; core[7] += h;
  239. smemclr(w, sizeof(w));
  240. }
  241. #endif // WINSCP_VS
  242. #ifndef WINSCP_VS
  243. bool sha256_block_write(
  244. sha256_block *blk, const void **vdata, size_t *len);
  245. void sha256_sw_block(uint32_t *core, const uint8_t *block);
  246. void sha256_block_pad(sha256_block *blk, BinarySink *bs);
  247. typedef struct sha256_sw {
  248. uint32_t core[8];
  249. sha256_block blk;
  250. BinarySink_IMPLEMENTATION;
  251. ssh_hash hash;
  252. } sha256_sw;
  253. static void sha256_sw_write(BinarySink *bs, const void *vp, size_t len);
  254. static ssh_hash *sha256_sw_new(const ssh_hashalg *alg)
  255. {
  256. sha256_sw *s = snew(sha256_sw);
  257. s->hash.vt = alg;
  258. BinarySink_INIT(s, sha256_sw_write);
  259. BinarySink_DELEGATE_INIT(&s->hash, s);
  260. return &s->hash;
  261. }
  262. static void sha256_sw_reset(ssh_hash *hash)
  263. {
  264. sha256_sw *s = container_of(hash, sha256_sw, hash);
  265. memcpy(s->core, sha256_initial_state, sizeof(s->core));
  266. sha256_block_setup(&s->blk);
  267. }
  268. static void sha256_sw_copyfrom(ssh_hash *hcopy, ssh_hash *horig)
  269. {
  270. sha256_sw *copy = container_of(hcopy, sha256_sw, hash);
  271. sha256_sw *orig = container_of(horig, sha256_sw, hash);
  272. memcpy(copy, orig, sizeof(*copy));
  273. BinarySink_COPIED(copy);
  274. BinarySink_DELEGATE_INIT(&copy->hash, copy);
  275. }
  276. static void sha256_sw_free(ssh_hash *hash)
  277. {
  278. sha256_sw *s = container_of(hash, sha256_sw, hash);
  279. smemclr(s, sizeof(*s));
  280. sfree(s);
  281. }
  282. static void sha256_sw_write(BinarySink *bs, const void *vp, size_t len)
  283. {
  284. sha256_sw *s = BinarySink_DOWNCAST(bs, sha256_sw);
  285. while (len > 0)
  286. if (sha256_block_write(&s->blk, &vp, &len))
  287. sha256_sw_block(s->core, s->blk.block);
  288. }
  289. static void sha256_sw_digest(ssh_hash *hash, uint8_t *digest)
  290. {
  291. sha256_sw *s = container_of(hash, sha256_sw, hash);
  292. sha256_block_pad(&s->blk, BinarySink_UPCAST(s));
  293. { // WINSCP
  294. size_t i; // WINSCP
  295. for (i = 0; i < 8; i++)
  296. PUT_32BIT_MSB_FIRST(digest + 4*i, s->core[i]);
  297. } // WINSCP
  298. }
  299. const ssh_hashalg ssh_sha256_sw = {
  300. // WINSCP
  301. /*.new =*/ sha256_sw_new,
  302. /*.reset =*/ sha256_sw_reset,
  303. /*.copyfrom =*/ sha256_sw_copyfrom,
  304. /*.digest =*/ sha256_sw_digest,
  305. /*.free =*/ sha256_sw_free,
  306. /*.hlen =*/ 32,
  307. /*.blocklen =*/ 64,
  308. HASHALG_NAMES_BARE("SHA-256"), // WINSCP (removed "unaccelerated" annotation)
  309. NULL,
  310. };
  311. #endif // !WINSCP_VS
  312. /* ----------------------------------------------------------------------
  313. * Hardware-accelerated implementation of SHA-256 using x86 SHA-NI.
  314. */
  315. #if HW_SHA256 == HW_SHA256_NI
  316. #ifdef WINSCP_VS
  317. /*
  318. * Set target architecture for Clang and GCC
  319. */
  320. #if defined(__clang__) || defined(__GNUC__)
  321. # define FUNC_ISA __attribute__ ((target("sse4.1,sha")))
  322. #if !defined(__clang__)
  323. # pragma GCC target("sha")
  324. # pragma GCC target("sse4.1")
  325. #endif
  326. #else
  327. # define FUNC_ISA
  328. #endif
  329. #include <wmmintrin.h>
  330. #include <smmintrin.h>
  331. #include <immintrin.h>
  332. #if defined(__clang__) || defined(__GNUC__)
  333. #include <shaintrin.h>
  334. #endif
  335. #if defined(__clang__) || defined(__GNUC__)
  336. #include <cpuid.h>
  337. #define GET_CPU_ID_0(out) \
  338. __cpuid(0, (out)[0], (out)[1], (out)[2], (out)[3])
  339. #define GET_CPU_ID_7(out) \
  340. __cpuid_count(7, 0, (out)[0], (out)[1], (out)[2], (out)[3])
  341. #else
  342. #define GET_CPU_ID_0(out) __cpuid(out, 0)
  343. #define GET_CPU_ID_7(out) __cpuidex(out, 7, 0)
  344. #endif
  345. /*WINSCP static*/ bool sha256_hw_available(void)
  346. {
  347. unsigned int CPUInfo[4];
  348. GET_CPU_ID_0(CPUInfo);
  349. if (CPUInfo[0] < 7)
  350. return false;
  351. GET_CPU_ID_7(CPUInfo);
  352. return CPUInfo[1] & (1 << 29); /* Check SHA */
  353. }
  354. /* SHA256 implementation using new instructions
  355. The code is based on Jeffrey Walton's SHA256 implementation:
  356. https://github.com/noloader/SHA-Intrinsics
  357. */
  358. FUNC_ISA
  359. static inline void sha256_ni_block(__m128i *core, const uint8_t *p)
  360. {
  361. __m128i STATE0, STATE1;
  362. __m128i MSG, TMP;
  363. __m128i MSG0, MSG1, MSG2, MSG3;
  364. const __m128i *block = (const __m128i *)p;
  365. const __m128i MASK = _mm_set_epi64x(
  366. 0x0c0d0e0f08090a0bULL, 0x0405060700010203ULL);
  367. /* Load initial values */
  368. STATE0 = core[0];
  369. STATE1 = core[1];
  370. /* Rounds 0-3 */
  371. MSG = _mm_loadu_si128(block);
  372. MSG0 = _mm_shuffle_epi8(MSG, MASK);
  373. MSG = _mm_add_epi32(MSG0, _mm_set_epi64x(
  374. 0xE9B5DBA5B5C0FBCFULL, 0x71374491428A2F98ULL));
  375. STATE1 = _mm_sha256rnds2_epu32(STATE1, STATE0, MSG);
  376. MSG = _mm_shuffle_epi32(MSG, 0x0E);
  377. STATE0 = _mm_sha256rnds2_epu32(STATE0, STATE1, MSG);
  378. /* Rounds 4-7 */
  379. MSG1 = _mm_loadu_si128(block + 1);
  380. MSG1 = _mm_shuffle_epi8(MSG1, MASK);
  381. MSG = _mm_add_epi32(MSG1, _mm_set_epi64x(
  382. 0xAB1C5ED5923F82A4ULL, 0x59F111F13956C25BULL));
  383. STATE1 = _mm_sha256rnds2_epu32(STATE1, STATE0, MSG);
  384. MSG = _mm_shuffle_epi32(MSG, 0x0E);
  385. STATE0 = _mm_sha256rnds2_epu32(STATE0, STATE1, MSG);
  386. MSG0 = _mm_sha256msg1_epu32(MSG0, MSG1);
  387. /* Rounds 8-11 */
  388. MSG2 = _mm_loadu_si128(block + 2);
  389. MSG2 = _mm_shuffle_epi8(MSG2, MASK);
  390. MSG = _mm_add_epi32(MSG2, _mm_set_epi64x(
  391. 0x550C7DC3243185BEULL, 0x12835B01D807AA98ULL));
  392. STATE1 = _mm_sha256rnds2_epu32(STATE1, STATE0, MSG);
  393. MSG = _mm_shuffle_epi32(MSG, 0x0E);
  394. STATE0 = _mm_sha256rnds2_epu32(STATE0, STATE1, MSG);
  395. MSG1 = _mm_sha256msg1_epu32(MSG1, MSG2);
  396. /* Rounds 12-15 */
  397. MSG3 = _mm_loadu_si128(block + 3);
  398. MSG3 = _mm_shuffle_epi8(MSG3, MASK);
  399. MSG = _mm_add_epi32(MSG3, _mm_set_epi64x(
  400. 0xC19BF1749BDC06A7ULL, 0x80DEB1FE72BE5D74ULL));
  401. STATE1 = _mm_sha256rnds2_epu32(STATE1, STATE0, MSG);
  402. TMP = _mm_alignr_epi8(MSG3, MSG2, 4);
  403. MSG0 = _mm_add_epi32(MSG0, TMP);
  404. MSG0 = _mm_sha256msg2_epu32(MSG0, MSG3);
  405. MSG = _mm_shuffle_epi32(MSG, 0x0E);
  406. STATE0 = _mm_sha256rnds2_epu32(STATE0, STATE1, MSG);
  407. MSG2 = _mm_sha256msg1_epu32(MSG2, MSG3);
  408. /* Rounds 16-19 */
  409. MSG = _mm_add_epi32(MSG0, _mm_set_epi64x(
  410. 0x240CA1CC0FC19DC6ULL, 0xEFBE4786E49B69C1ULL));
  411. STATE1 = _mm_sha256rnds2_epu32(STATE1, STATE0, MSG);
  412. TMP = _mm_alignr_epi8(MSG0, MSG3, 4);
  413. MSG1 = _mm_add_epi32(MSG1, TMP);
  414. MSG1 = _mm_sha256msg2_epu32(MSG1, MSG0);
  415. MSG = _mm_shuffle_epi32(MSG, 0x0E);
  416. STATE0 = _mm_sha256rnds2_epu32(STATE0, STATE1, MSG);
  417. MSG3 = _mm_sha256msg1_epu32(MSG3, MSG0);
  418. /* Rounds 20-23 */
  419. MSG = _mm_add_epi32(MSG1, _mm_set_epi64x(
  420. 0x76F988DA5CB0A9DCULL, 0x4A7484AA2DE92C6FULL));
  421. STATE1 = _mm_sha256rnds2_epu32(STATE1, STATE0, MSG);
  422. TMP = _mm_alignr_epi8(MSG1, MSG0, 4);
  423. MSG2 = _mm_add_epi32(MSG2, TMP);
  424. MSG2 = _mm_sha256msg2_epu32(MSG2, MSG1);
  425. MSG = _mm_shuffle_epi32(MSG, 0x0E);
  426. STATE0 = _mm_sha256rnds2_epu32(STATE0, STATE1, MSG);
  427. MSG0 = _mm_sha256msg1_epu32(MSG0, MSG1);
  428. /* Rounds 24-27 */
  429. MSG = _mm_add_epi32(MSG2, _mm_set_epi64x(
  430. 0xBF597FC7B00327C8ULL, 0xA831C66D983E5152ULL));
  431. STATE1 = _mm_sha256rnds2_epu32(STATE1, STATE0, MSG);
  432. TMP = _mm_alignr_epi8(MSG2, MSG1, 4);
  433. MSG3 = _mm_add_epi32(MSG3, TMP);
  434. MSG3 = _mm_sha256msg2_epu32(MSG3, MSG2);
  435. MSG = _mm_shuffle_epi32(MSG, 0x0E);
  436. STATE0 = _mm_sha256rnds2_epu32(STATE0, STATE1, MSG);
  437. MSG1 = _mm_sha256msg1_epu32(MSG1, MSG2);
  438. /* Rounds 28-31 */
  439. MSG = _mm_add_epi32(MSG3, _mm_set_epi64x(
  440. 0x1429296706CA6351ULL, 0xD5A79147C6E00BF3ULL));
  441. STATE1 = _mm_sha256rnds2_epu32(STATE1, STATE0, MSG);
  442. TMP = _mm_alignr_epi8(MSG3, MSG2, 4);
  443. MSG0 = _mm_add_epi32(MSG0, TMP);
  444. MSG0 = _mm_sha256msg2_epu32(MSG0, MSG3);
  445. MSG = _mm_shuffle_epi32(MSG, 0x0E);
  446. STATE0 = _mm_sha256rnds2_epu32(STATE0, STATE1, MSG);
  447. MSG2 = _mm_sha256msg1_epu32(MSG2, MSG3);
  448. /* Rounds 32-35 */
  449. MSG = _mm_add_epi32(MSG0, _mm_set_epi64x(
  450. 0x53380D134D2C6DFCULL, 0x2E1B213827B70A85ULL));
  451. STATE1 = _mm_sha256rnds2_epu32(STATE1, STATE0, MSG);
  452. TMP = _mm_alignr_epi8(MSG0, MSG3, 4);
  453. MSG1 = _mm_add_epi32(MSG1, TMP);
  454. MSG1 = _mm_sha256msg2_epu32(MSG1, MSG0);
  455. MSG = _mm_shuffle_epi32(MSG, 0x0E);
  456. STATE0 = _mm_sha256rnds2_epu32(STATE0, STATE1, MSG);
  457. MSG3 = _mm_sha256msg1_epu32(MSG3, MSG0);
  458. /* Rounds 36-39 */
  459. MSG = _mm_add_epi32(MSG1, _mm_set_epi64x(
  460. 0x92722C8581C2C92EULL, 0x766A0ABB650A7354ULL));
  461. STATE1 = _mm_sha256rnds2_epu32(STATE1, STATE0, MSG);
  462. TMP = _mm_alignr_epi8(MSG1, MSG0, 4);
  463. MSG2 = _mm_add_epi32(MSG2, TMP);
  464. MSG2 = _mm_sha256msg2_epu32(MSG2, MSG1);
  465. MSG = _mm_shuffle_epi32(MSG, 0x0E);
  466. STATE0 = _mm_sha256rnds2_epu32(STATE0, STATE1, MSG);
  467. MSG0 = _mm_sha256msg1_epu32(MSG0, MSG1);
  468. /* Rounds 40-43 */
  469. MSG = _mm_add_epi32(MSG2, _mm_set_epi64x(
  470. 0xC76C51A3C24B8B70ULL, 0xA81A664BA2BFE8A1ULL));
  471. STATE1 = _mm_sha256rnds2_epu32(STATE1, STATE0, MSG);
  472. TMP = _mm_alignr_epi8(MSG2, MSG1, 4);
  473. MSG3 = _mm_add_epi32(MSG3, TMP);
  474. MSG3 = _mm_sha256msg2_epu32(MSG3, MSG2);
  475. MSG = _mm_shuffle_epi32(MSG, 0x0E);
  476. STATE0 = _mm_sha256rnds2_epu32(STATE0, STATE1, MSG);
  477. MSG1 = _mm_sha256msg1_epu32(MSG1, MSG2);
  478. /* Rounds 44-47 */
  479. MSG = _mm_add_epi32(MSG3, _mm_set_epi64x(
  480. 0x106AA070F40E3585ULL, 0xD6990624D192E819ULL));
  481. STATE1 = _mm_sha256rnds2_epu32(STATE1, STATE0, MSG);
  482. TMP = _mm_alignr_epi8(MSG3, MSG2, 4);
  483. MSG0 = _mm_add_epi32(MSG0, TMP);
  484. MSG0 = _mm_sha256msg2_epu32(MSG0, MSG3);
  485. MSG = _mm_shuffle_epi32(MSG, 0x0E);
  486. STATE0 = _mm_sha256rnds2_epu32(STATE0, STATE1, MSG);
  487. MSG2 = _mm_sha256msg1_epu32(MSG2, MSG3);
  488. /* Rounds 48-51 */
  489. MSG = _mm_add_epi32(MSG0, _mm_set_epi64x(
  490. 0x34B0BCB52748774CULL, 0x1E376C0819A4C116ULL));
  491. STATE1 = _mm_sha256rnds2_epu32(STATE1, STATE0, MSG);
  492. TMP = _mm_alignr_epi8(MSG0, MSG3, 4);
  493. MSG1 = _mm_add_epi32(MSG1, TMP);
  494. MSG1 = _mm_sha256msg2_epu32(MSG1, MSG0);
  495. MSG = _mm_shuffle_epi32(MSG, 0x0E);
  496. STATE0 = _mm_sha256rnds2_epu32(STATE0, STATE1, MSG);
  497. MSG3 = _mm_sha256msg1_epu32(MSG3, MSG0);
  498. /* Rounds 52-55 */
  499. MSG = _mm_add_epi32(MSG1, _mm_set_epi64x(
  500. 0x682E6FF35B9CCA4FULL, 0x4ED8AA4A391C0CB3ULL));
  501. STATE1 = _mm_sha256rnds2_epu32(STATE1, STATE0, MSG);
  502. TMP = _mm_alignr_epi8(MSG1, MSG0, 4);
  503. MSG2 = _mm_add_epi32(MSG2, TMP);
  504. MSG2 = _mm_sha256msg2_epu32(MSG2, MSG1);
  505. MSG = _mm_shuffle_epi32(MSG, 0x0E);
  506. STATE0 = _mm_sha256rnds2_epu32(STATE0, STATE1, MSG);
  507. /* Rounds 56-59 */
  508. MSG = _mm_add_epi32(MSG2, _mm_set_epi64x(
  509. 0x8CC7020884C87814ULL, 0x78A5636F748F82EEULL));
  510. STATE1 = _mm_sha256rnds2_epu32(STATE1, STATE0, MSG);
  511. TMP = _mm_alignr_epi8(MSG2, MSG1, 4);
  512. MSG3 = _mm_add_epi32(MSG3, TMP);
  513. MSG3 = _mm_sha256msg2_epu32(MSG3, MSG2);
  514. MSG = _mm_shuffle_epi32(MSG, 0x0E);
  515. STATE0 = _mm_sha256rnds2_epu32(STATE0, STATE1, MSG);
  516. /* Rounds 60-63 */
  517. MSG = _mm_add_epi32(MSG3, _mm_set_epi64x(
  518. 0xC67178F2BEF9A3F7ULL, 0xA4506CEB90BEFFFAULL));
  519. STATE1 = _mm_sha256rnds2_epu32(STATE1, STATE0, MSG);
  520. MSG = _mm_shuffle_epi32(MSG, 0x0E);
  521. STATE0 = _mm_sha256rnds2_epu32(STATE0, STATE1, MSG);
  522. /* Combine state */
  523. core[0] = _mm_add_epi32(STATE0, core[0]);
  524. core[1] = _mm_add_epi32(STATE1, core[1]);
  525. }
  526. typedef struct sha256_ni {
  527. /*
  528. * These two vectors store the 8 words of the SHA-256 state, but
  529. * not in the same order they appear in the spec: the first word
  530. * holds A,B,E,F and the second word C,D,G,H.
  531. */
  532. __m128i core[2];
  533. sha256_block blk;
  534. void *pointer_to_free;
  535. BinarySink_IMPLEMENTATION;
  536. ssh_hash hash;
  537. } sha256_ni;
  538. static void sha256_ni_write(BinarySink *bs, const void *vp, size_t len);
  539. static sha256_ni *sha256_ni_alloc(void)
  540. {
  541. /*
  542. * The __m128i variables in the context structure need to be
  543. * 16-byte aligned, but not all malloc implementations that this
  544. * code has to work with will guarantee to return a 16-byte
  545. * aligned pointer. So we over-allocate, manually realign the
  546. * pointer ourselves, and store the original one inside the
  547. * context so we know how to free it later.
  548. */
  549. void *allocation = smalloc(sizeof(sha256_ni) + 15);
  550. uintptr_t alloc_address = (uintptr_t)allocation;
  551. uintptr_t aligned_address = (alloc_address + 15) & ~15;
  552. sha256_ni *s = (sha256_ni *)aligned_address;
  553. s->pointer_to_free = allocation;
  554. return s;
  555. }
  556. /*WINSCP static*/ ssh_hash *sha256_ni_new(const ssh_hashalg *alg)
  557. {
  558. if (!sha256_hw_available_cached())
  559. return NULL;
  560. sha256_ni *s = sha256_ni_alloc();
  561. s->hash.vt = alg;
  562. BinarySink_INIT(s, sha256_ni_write);
  563. BinarySink_DELEGATE_INIT(&s->hash, s);
  564. return &s->hash;
  565. }
  566. FUNC_ISA static void sha256_ni_reset(ssh_hash *hash)
  567. {
  568. sha256_ni *s = container_of(hash, sha256_ni, hash);
  569. /* Initialise the core vectors in their storage order */
  570. s->core[0] = _mm_set_epi64x(
  571. 0x6a09e667bb67ae85ULL, 0x510e527f9b05688cULL);
  572. s->core[1] = _mm_set_epi64x(
  573. 0x3c6ef372a54ff53aULL, 0x1f83d9ab5be0cd19ULL);
  574. sha256_block_setup(&s->blk);
  575. }
  576. /*WINSCP static*/ void sha256_ni_copyfrom(ssh_hash *hcopy, ssh_hash *horig)
  577. {
  578. sha256_ni *copy = container_of(hcopy, sha256_ni, hash);
  579. sha256_ni *orig = container_of(horig, sha256_ni, hash);
  580. void *ptf_save = copy->pointer_to_free;
  581. *copy = *orig; /* structure copy */
  582. copy->pointer_to_free = ptf_save;
  583. BinarySink_COPIED(copy);
  584. BinarySink_DELEGATE_INIT(&copy->hash, copy);
  585. }
  586. /*WINSCP static*/ void sha256_ni_free(ssh_hash *hash)
  587. {
  588. sha256_ni *s = container_of(hash, sha256_ni, hash);
  589. void *ptf = s->pointer_to_free;
  590. smemclr(s, sizeof(*s));
  591. sfree(ptf);
  592. }
  593. static void sha256_ni_write(BinarySink *bs, const void *vp, size_t len)
  594. {
  595. sha256_ni *s = BinarySink_DOWNCAST(bs, sha256_ni);
  596. while (len > 0)
  597. if (sha256_block_write(&s->blk, &vp, &len))
  598. sha256_ni_block(s->core, s->blk.block);
  599. }
  600. FUNC_ISA /*WINSCP static*/ void sha256_ni_digest(ssh_hash *hash, uint8_t *digest)
  601. {
  602. sha256_ni *s = container_of(hash, sha256_ni, hash);
  603. sha256_block_pad(&s->blk, BinarySink_UPCAST(s));
  604. /* Rearrange the words into the output order */
  605. __m128i feba = _mm_shuffle_epi32(s->core[0], 0x1B);
  606. __m128i dchg = _mm_shuffle_epi32(s->core[1], 0xB1);
  607. __m128i dcba = _mm_blend_epi16(feba, dchg, 0xF0);
  608. __m128i hgfe = _mm_alignr_epi8(dchg, feba, 8);
  609. /* Byte-swap them into the output endianness */
  610. const __m128i mask = _mm_setr_epi8(3,2,1,0,7,6,5,4,11,10,9,8,15,14,13,12);
  611. dcba = _mm_shuffle_epi8(dcba, mask);
  612. hgfe = _mm_shuffle_epi8(hgfe, mask);
  613. /* And store them */
  614. __m128i *output = (__m128i *)digest;
  615. _mm_storeu_si128(output, dcba);
  616. _mm_storeu_si128(output+1, hgfe);
  617. }
  618. #endif // WINSCP_VS
  619. #ifndef WINSCP_VS
  620. ssh_hash *sha256_ni_new(const ssh_hashalg *alg);
  621. ssh_hash *sha256_ni_copy(ssh_hash *hash);
  622. void sha256_ni_final(ssh_hash *hash, uint8_t *digest);
  623. void sha256_ni_free(ssh_hash *hash);
  624. const ssh_hashalg ssh_sha256_hw = {
  625. .new = sha256_ni_new,
  626. .reset = sha256_ni_reset,
  627. .copyfrom = sha256_ni_copyfrom,
  628. .digest = sha256_ni_digest,
  629. .free = sha256_ni_free,
  630. .hlen = 32,
  631. .blocklen = 64,
  632. HASHALG_NAMES_ANNOTATED("SHA-256", "SHA-NI accelerated"),
  633. };
  634. /* ----------------------------------------------------------------------
  635. * Hardware-accelerated implementation of SHA-256 using Arm NEON.
  636. */
  637. #elif HW_SHA256 == HW_SHA256_NEON
  638. /*
  639. * Manually set the target architecture, if we decided above that we
  640. * need to.
  641. */
  642. #ifdef USE_CLANG_ATTR_TARGET_AARCH64
  643. /*
  644. * A spot of cheating: redefine some ACLE feature macros before
  645. * including arm_neon.h. Otherwise we won't get the SHA intrinsics
  646. * defined by that header, because it will be looking at the settings
  647. * for the whole translation unit rather than the ones we're going to
  648. * put on some particular functions using __attribute__((target)).
  649. */
  650. #define __ARM_NEON 1
  651. #define __ARM_FEATURE_CRYPTO 1
  652. #define FUNC_ISA __attribute__ ((target("neon,crypto")))
  653. #endif /* USE_CLANG_ATTR_TARGET_AARCH64 */
  654. #ifndef FUNC_ISA
  655. #define FUNC_ISA
  656. #endif
  657. #ifdef USE_ARM64_NEON_H
  658. #include <arm64_neon.h>
  659. #else
  660. #include <arm_neon.h>
  661. #endif
  662. static bool sha256_hw_available(void)
  663. {
  664. /*
  665. * For Arm, we delegate to a per-platform detection function (see
  666. * explanation in sshaes.c).
  667. */
  668. return platform_sha256_hw_available();
  669. }
  670. typedef struct sha256_neon_core sha256_neon_core;
  671. struct sha256_neon_core {
  672. uint32x4_t abcd, efgh;
  673. };
  674. FUNC_ISA
  675. static inline uint32x4_t sha256_neon_load_input(const uint8_t *p)
  676. {
  677. return vreinterpretq_u32_u8(vrev32q_u8(vld1q_u8(p)));
  678. }
  679. FUNC_ISA
  680. static inline uint32x4_t sha256_neon_schedule_update(
  681. uint32x4_t m4, uint32x4_t m3, uint32x4_t m2, uint32x4_t m1)
  682. {
  683. return vsha256su1q_u32(vsha256su0q_u32(m4, m3), m2, m1);
  684. }
  685. FUNC_ISA
  686. static inline sha256_neon_core sha256_neon_round4(
  687. sha256_neon_core old, uint32x4_t sched, unsigned round)
  688. {
  689. sha256_neon_core new;
  690. uint32x4_t round_input = vaddq_u32(
  691. sched, vld1q_u32(sha256_round_constants + round));
  692. new.abcd = vsha256hq_u32 (old.abcd, old.efgh, round_input);
  693. new.efgh = vsha256h2q_u32(old.efgh, old.abcd, round_input);
  694. return new;
  695. }
  696. FUNC_ISA
  697. static inline void sha256_neon_block(sha256_neon_core *core, const uint8_t *p)
  698. {
  699. uint32x4_t s0, s1, s2, s3;
  700. sha256_neon_core cr = *core;
  701. s0 = sha256_neon_load_input(p);
  702. cr = sha256_neon_round4(cr, s0, 0);
  703. s1 = sha256_neon_load_input(p+16);
  704. cr = sha256_neon_round4(cr, s1, 4);
  705. s2 = sha256_neon_load_input(p+32);
  706. cr = sha256_neon_round4(cr, s2, 8);
  707. s3 = sha256_neon_load_input(p+48);
  708. cr = sha256_neon_round4(cr, s3, 12);
  709. s0 = sha256_neon_schedule_update(s0, s1, s2, s3);
  710. cr = sha256_neon_round4(cr, s0, 16);
  711. s1 = sha256_neon_schedule_update(s1, s2, s3, s0);
  712. cr = sha256_neon_round4(cr, s1, 20);
  713. s2 = sha256_neon_schedule_update(s2, s3, s0, s1);
  714. cr = sha256_neon_round4(cr, s2, 24);
  715. s3 = sha256_neon_schedule_update(s3, s0, s1, s2);
  716. cr = sha256_neon_round4(cr, s3, 28);
  717. s0 = sha256_neon_schedule_update(s0, s1, s2, s3);
  718. cr = sha256_neon_round4(cr, s0, 32);
  719. s1 = sha256_neon_schedule_update(s1, s2, s3, s0);
  720. cr = sha256_neon_round4(cr, s1, 36);
  721. s2 = sha256_neon_schedule_update(s2, s3, s0, s1);
  722. cr = sha256_neon_round4(cr, s2, 40);
  723. s3 = sha256_neon_schedule_update(s3, s0, s1, s2);
  724. cr = sha256_neon_round4(cr, s3, 44);
  725. s0 = sha256_neon_schedule_update(s0, s1, s2, s3);
  726. cr = sha256_neon_round4(cr, s0, 48);
  727. s1 = sha256_neon_schedule_update(s1, s2, s3, s0);
  728. cr = sha256_neon_round4(cr, s1, 52);
  729. s2 = sha256_neon_schedule_update(s2, s3, s0, s1);
  730. cr = sha256_neon_round4(cr, s2, 56);
  731. s3 = sha256_neon_schedule_update(s3, s0, s1, s2);
  732. cr = sha256_neon_round4(cr, s3, 60);
  733. core->abcd = vaddq_u32(core->abcd, cr.abcd);
  734. core->efgh = vaddq_u32(core->efgh, cr.efgh);
  735. }
  736. typedef struct sha256_neon {
  737. sha256_neon_core core;
  738. sha256_block blk;
  739. BinarySink_IMPLEMENTATION;
  740. ssh_hash hash;
  741. } sha256_neon;
  742. static void sha256_neon_write(BinarySink *bs, const void *vp, size_t len);
  743. static ssh_hash *sha256_neon_new(const ssh_hashalg *alg)
  744. {
  745. if (!sha256_hw_available_cached())
  746. return NULL;
  747. sha256_neon *s = snew(sha256_neon);
  748. s->hash.vt = alg;
  749. BinarySink_INIT(s, sha256_neon_write);
  750. BinarySink_DELEGATE_INIT(&s->hash, s);
  751. return &s->hash;
  752. }
  753. static void sha256_neon_reset(ssh_hash *hash)
  754. {
  755. sha256_neon *s = container_of(hash, sha256_neon, hash);
  756. s->core.abcd = vld1q_u32(sha256_initial_state);
  757. s->core.efgh = vld1q_u32(sha256_initial_state + 4);
  758. sha256_block_setup(&s->blk);
  759. }
  760. static void sha256_neon_copyfrom(ssh_hash *hcopy, ssh_hash *horig)
  761. {
  762. sha256_neon *copy = container_of(hcopy, sha256_neon, hash);
  763. sha256_neon *orig = container_of(horig, sha256_neon, hash);
  764. *copy = *orig; /* structure copy */
  765. BinarySink_COPIED(copy);
  766. BinarySink_DELEGATE_INIT(&copy->hash, copy);
  767. }
  768. static void sha256_neon_free(ssh_hash *hash)
  769. {
  770. sha256_neon *s = container_of(hash, sha256_neon, hash);
  771. smemclr(s, sizeof(*s));
  772. sfree(s);
  773. }
  774. static void sha256_neon_write(BinarySink *bs, const void *vp, size_t len)
  775. {
  776. sha256_neon *s = BinarySink_DOWNCAST(bs, sha256_neon);
  777. while (len > 0)
  778. if (sha256_block_write(&s->blk, &vp, &len))
  779. sha256_neon_block(&s->core, s->blk.block);
  780. }
  781. static void sha256_neon_digest(ssh_hash *hash, uint8_t *digest)
  782. {
  783. sha256_neon *s = container_of(hash, sha256_neon, hash);
  784. sha256_block_pad(&s->blk, BinarySink_UPCAST(s));
  785. vst1q_u8(digest, vrev32q_u8(vreinterpretq_u8_u32(s->core.abcd)));
  786. vst1q_u8(digest + 16, vrev32q_u8(vreinterpretq_u8_u32(s->core.efgh)));
  787. }
  788. const ssh_hashalg ssh_sha256_hw = {
  789. .new = sha256_neon_new,
  790. .reset = sha256_neon_reset,
  791. .copyfrom = sha256_neon_copyfrom,
  792. .digest = sha256_neon_digest,
  793. .free = sha256_neon_free,
  794. .hlen = 32,
  795. .blocklen = 64,
  796. HASHALG_NAMES_ANNOTATED("SHA-256", "NEON accelerated"),
  797. };
  798. #endif
  799. /* ----------------------------------------------------------------------
  800. * Stub functions if we have no hardware-accelerated SHA-256. In this
  801. * case, sha256_hw_new returns NULL (though it should also never be
  802. * selected by sha256_select, so the only thing that should even be
  803. * _able_ to call it is testcrypt). As a result, the remaining vtable
  804. * functions should never be called at all.
  805. */
  806. #elif HW_SHA256 == HW_SHA256_NONE
  807. #ifndef WINSCP_VS
  808. static bool sha256_hw_available(void)
  809. {
  810. return false;
  811. }
  812. static ssh_hash *sha256_stub_new(const ssh_hashalg *alg)
  813. {
  814. return NULL;
  815. }
  816. #define STUB_BODY { unreachable("Should never be called"); }
  817. static void sha256_stub_reset(ssh_hash *hash) STUB_BODY
  818. static void sha256_stub_copyfrom(ssh_hash *hash, ssh_hash *orig) STUB_BODY
  819. static void sha256_stub_free(ssh_hash *hash) STUB_BODY
  820. static void sha256_stub_digest(ssh_hash *hash, uint8_t *digest) STUB_BODY
  821. const ssh_hashalg ssh_sha256_hw = {
  822. // WINSCP
  823. /*.new =*/ sha256_stub_new,
  824. /*.reset =*/ sha256_stub_reset,
  825. /*.copyfrom =*/ sha256_stub_copyfrom,
  826. /*.digest =*/ sha256_stub_digest,
  827. /*.free =*/ sha256_stub_free,
  828. /*.hlen =*/ 32,
  829. /*.blocklen =*/ 64,
  830. HASHALG_NAMES_ANNOTATED("SHA-256", "!NONEXISTENT ACCELERATED VERSION!"),
  831. NULL,
  832. };
  833. #endif // !WINSCP_VS
  834. #endif /* HW_SHA256 */