cm_sha2.c 47 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104110511061107110811091110111111121113111411151116111711181119112011211122112311241125112611271128112911301131113211331134113511361137113811391140114111421143114411451146114711481149115011511152115311541155115611571158115911601161116211631164116511661167116811691170117111721173117411751176117711781179118011811182118311841185118611871188118911901191119211931194119511961197119811991200120112021203120412051206120712081209121012111212121312141215121612171218121912201221122212231224122512261227122812291230123112321233123412351236123712381239124012411242124312441245124612471248124912501251125212531254125512561257125812591260126112621263126412651266126712681269127012711272127312741275127612771278127912801281128212831284128512861287128812891290129112921293129412951296129712981299130013011302130313041305130613071308130913101311131213131314131513161317131813191320132113221323132413251326132713281329133013311332133313341335133613371338133913401341134213431344134513461347134813491350135113521353135413551356135713581359136013611362136313641365136613671368136913701371137213731374137513761377137813791380138113821383138413851386138713881389139013911392139313941395139613971398139914001401140214031404140514061407140814091410141114121413141414151416141714181419142014211422142314241425142614271428142914301431143214331434143514361437143814391440144114421443144414451446144714481449145014511452145314541455145614571458145914601461146214631464146514661467146814691470147114721473147414751476147714781479148014811482148314841485148614871488148914901491149214931494149514961497149814991500150115021503150415051506150715081509151015111512151315141515151615171518151915201521152215231524152515261527152815291530153115321533153415351536153715381539154015411542154315441545154615471548154915501551155215531554155515561557155815591560156115621563156415651566156715681569157015711572157315741575157615771578157915801581158215831584158515861587158815891590159115921593159415951596159715981599160016011602160316041605160616071608160916101611161216131614
  1. /*
  2. * FILE: sha2.c
  3. * AUTHOR: Aaron D. Gifford
  4. * http://www.aarongifford.com/computers/sha.html
  5. *
  6. * Copyright (c) 2000-2003, Aaron D. Gifford
  7. * All rights reserved.
  8. *
  9. * Redistribution and use in source and binary forms, with or without
  10. * modification, are permitted provided that the following conditions
  11. * are met:
  12. * 1. Redistributions of source code must retain the above copyright
  13. * notice, this list of conditions and the following disclaimer.
  14. * 2. Redistributions in binary form must reproduce the above copyright
  15. * notice, this list of conditions and the following disclaimer in the
  16. * documentation and/or other materials provided with the distribution.
  17. * 3. Neither the name of the copyright holder nor the names of contributors
  18. * may be used to endorse or promote products derived from this software
  19. * without specific prior written permission.
  20. *
  21. * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTOR(S) ``AS IS'' AND
  22. * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
  23. * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
  24. * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTOR(S) BE LIABLE
  25. * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
  26. * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
  27. * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
  28. * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
  29. * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
  30. * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
  31. * SUCH DAMAGE.
  32. *
  33. * $Id: sha2.c,v 1.4 2004/01/07 22:58:18 adg Exp $
  34. */
  35. #include <string.h> /* memcpy()/memset() or bcopy()/bzero() */
  36. #include <assert.h> /* assert() */
  37. #include "cm_sha2.h" /* "sha2.h" -> "cm_sha2.h" renamed for CMake */
  38. /*
  39. * ASSERT NOTE:
  40. * Some sanity checking code is included using assert(). On my FreeBSD
  41. * system, this additional code can be removed by compiling with NDEBUG
  42. * defined. Check your own systems manpage on assert() to see how to
  43. * compile WITHOUT the sanity checking code on your system.
  44. *
  45. * UNROLLED TRANSFORM LOOP NOTE:
  46. * You can define SHA2_UNROLL_TRANSFORM to use the unrolled transform
  47. * loop version for the hash transform rounds (defined using macros
  48. * later in this file). Either define on the command line, for example:
  49. *
  50. * cc -DSHA2_UNROLL_TRANSFORM -o sha2 sha2.c sha2prog.c
  51. *
  52. * or define below:
  53. *
  54. * #define SHA2_UNROLL_TRANSFORM
  55. *
  56. */
  57. /*** SHA-224/256/384/512 Machine Architecture Definitions *************/
  58. /*
  59. * BYTE_ORDER NOTE:
  60. *
  61. * Please make sure that your system defines BYTE_ORDER. If your
  62. * architecture is little-endian, make sure it also defines
  63. * LITTLE_ENDIAN and that the two (BYTE_ORDER and LITTLE_ENDIAN) are
  64. * equivilent.
  65. *
  66. * If your system does not define the above, then you can do so by
  67. * hand like this:
  68. *
  69. * #define LITTLE_ENDIAN 1234
  70. * #define BIG_ENDIAN 4321
  71. *
  72. * And for little-endian machines, add:
  73. *
  74. * #define BYTE_ORDER LITTLE_ENDIAN
  75. *
  76. * Or for big-endian machines:
  77. *
  78. * #define BYTE_ORDER BIG_ENDIAN
  79. *
  80. * The FreeBSD machine this was written on defines BYTE_ORDER
  81. * appropriately by including <sys/types.h> (which in turn includes
  82. * <machine/endian.h> where the appropriate definitions are actually
  83. * made).
  84. */
  85. #if !defined(BYTE_ORDER) || (BYTE_ORDER != LITTLE_ENDIAN && BYTE_ORDER != BIG_ENDIAN)
  86. /* CMake modification: use byte order from cmIML. */
  87. # include "cmIML/ABI.h"
  88. # undef BYTE_ORDER
  89. # undef BIG_ENDIAN
  90. # undef LITTLE_ENDIAN
  91. # define BYTE_ORDER cmIML_ABI_ENDIAN_ID
  92. # define BIG_ENDIAN cmIML_ABI_ENDIAN_ID_BIG
  93. # define LITTLE_ENDIAN cmIML_ABI_ENDIAN_ID_LITTLE
  94. #endif
  95. /* CMake modification: use types computed in header. */
  96. typedef cm_sha2_uint8_t sha_byte; /* Exactly 1 byte */
  97. typedef cm_sha2_uint32_t sha_word32; /* Exactly 4 bytes */
  98. typedef cm_sha2_uint64_t sha_word64; /* Exactly 8 bytes */
  99. #define SHA_UINT32_C(x) cmIML_INT_UINT32_C(x)
  100. #define SHA_UINT64_C(x) cmIML_INT_UINT64_C(x)
  101. #if defined(__clang__)
  102. # pragma clang diagnostic ignored "-Wcast-align"
  103. #endif
  104. /*** ENDIAN REVERSAL MACROS *******************************************/
  105. #if BYTE_ORDER == LITTLE_ENDIAN
  106. #define REVERSE32(w,x) { \
  107. sha_word32 tmp = (w); \
  108. tmp = (tmp >> 16) | (tmp << 16); \
  109. (x) = ((tmp & SHA_UINT32_C(0xff00ff00)) >> 8) | \
  110. ((tmp & SHA_UINT32_C(0x00ff00ff)) << 8); \
  111. }
  112. #define REVERSE64(w,x) { \
  113. sha_word64 tmp = (w); \
  114. tmp = (tmp >> 32) | (tmp << 32); \
  115. tmp = ((tmp & SHA_UINT64_C(0xff00ff00ff00ff00)) >> 8) | \
  116. ((tmp & SHA_UINT64_C(0x00ff00ff00ff00ff)) << 8); \
  117. (x) = ((tmp & SHA_UINT64_C(0xffff0000ffff0000)) >> 16) | \
  118. ((tmp & SHA_UINT64_C(0x0000ffff0000ffff)) << 16); \
  119. }
  120. #endif /* BYTE_ORDER == LITTLE_ENDIAN */
  121. /*
  122. * Macro for incrementally adding the unsigned 64-bit integer n to the
  123. * unsigned 128-bit integer (represented using a two-element array of
  124. * 64-bit words):
  125. */
  126. #define ADDINC128(w,n) { \
  127. (w)[0] += (sha_word64)(n); \
  128. if ((w)[0] < (n)) { \
  129. (w)[1]++; \
  130. } \
  131. }
  132. /*
  133. * Macros for copying blocks of memory and for zeroing out ranges
  134. * of memory. Using these macros makes it easy to switch from
  135. * using memset()/memcpy() and using bzero()/bcopy().
  136. *
  137. * Please define either SHA2_USE_MEMSET_MEMCPY or define
  138. * SHA2_USE_BZERO_BCOPY depending on which function set you
  139. * choose to use:
  140. */
  141. #if !defined(SHA2_USE_MEMSET_MEMCPY) && !defined(SHA2_USE_BZERO_BCOPY)
  142. /* Default to memset()/memcpy() if no option is specified */
  143. #define SHA2_USE_MEMSET_MEMCPY 1
  144. #endif
  145. #if defined(SHA2_USE_MEMSET_MEMCPY) && defined(SHA2_USE_BZERO_BCOPY)
  146. /* Abort with an error if BOTH options are defined */
  147. #error Define either SHA2_USE_MEMSET_MEMCPY or SHA2_USE_BZERO_BCOPY, not both!
  148. #endif
  149. #ifdef SHA2_USE_MEMSET_MEMCPY
  150. #define MEMSET_BZERO(p,l) memset((p), 0, (l))
  151. #define MEMCPY_BCOPY(d,s,l) memcpy((d), (s), (l))
  152. #endif
  153. #ifdef SHA2_USE_BZERO_BCOPY
  154. #define MEMSET_BZERO(p,l) bzero((p), (l))
  155. #define MEMCPY_BCOPY(d,s,l) bcopy((s), (d), (l))
  156. #endif
  157. /*** THE SIX LOGICAL FUNCTIONS ****************************************/
  158. /*
  159. * Bit shifting and rotation (used by the six SHA-XYZ logical functions:
  160. *
  161. * NOTE: In the original SHA-256/384/512 document, the shift-right
  162. * function was named R and the rotate-right function was called S.
  163. * (See: http://csrc.nist.gov/cryptval/shs/sha256-384-512.pdf on the
  164. * web.)
  165. *
  166. * The newer NIST FIPS 180-2 document uses a much clearer naming
  167. * scheme, SHR for shift-right, ROTR for rotate-right, and ROTL for
  168. * rotate-left. (See:
  169. * http://csrc.nist.gov/publications/fips/fips180-2/fips180-2.pdf
  170. * on the web.)
  171. *
  172. * WARNING: These macros must be used cautiously, since they reference
  173. * supplied parameters sometimes more than once, and thus could have
  174. * unexpected side-effects if used without taking this into account.
  175. */
  176. /* Shift-right (used in SHA-256, SHA-384, and SHA-512): */
  177. #define SHR(b,x) ((x) >> (b))
  178. /* 32-bit Rotate-right (used in SHA-256): */
  179. #define ROTR32(b,x) (((x) >> (b)) | ((x) << (32 - (b))))
  180. /* 64-bit Rotate-right (used in SHA-384 and SHA-512): */
  181. #define ROTR64(b,x) (((x) >> (b)) | ((x) << (64 - (b))))
  182. /* 32-bit Rotate-left (used in SHA-1): */
  183. #define ROTL32(b,x) (((x) << (b)) | ((x) >> (32 - (b))))
  184. /* Two logical functions used in SHA-1, SHA-254, SHA-256, SHA-384, and SHA-512: */
  185. #define Ch(x,y,z) (((x) & (y)) ^ ((~(x)) & (z)))
  186. #define Maj(x,y,z) (((x) & (y)) ^ ((x) & (z)) ^ ((y) & (z)))
  187. /* Function used in SHA-1: */
  188. #define Parity(x,y,z) ((x) ^ (y) ^ (z))
  189. /* Four logical functions used in SHA-256: */
  190. #define Sigma0_256(x) (ROTR32(2, (x)) ^ ROTR32(13, (x)) ^ ROTR32(22, (x)))
  191. #define Sigma1_256(x) (ROTR32(6, (x)) ^ ROTR32(11, (x)) ^ ROTR32(25, (x)))
  192. #define sigma0_256(x) (ROTR32(7, (x)) ^ ROTR32(18, (x)) ^ SHR( 3 , (x)))
  193. #define sigma1_256(x) (ROTR32(17, (x)) ^ ROTR32(19, (x)) ^ SHR( 10, (x)))
  194. /* Four of six logical functions used in SHA-384 and SHA-512: */
  195. #define Sigma0_512(x) (ROTR64(28, (x)) ^ ROTR64(34, (x)) ^ ROTR64(39, (x)))
  196. #define Sigma1_512(x) (ROTR64(14, (x)) ^ ROTR64(18, (x)) ^ ROTR64(41, (x)))
  197. #define sigma0_512(x) (ROTR64( 1, (x)) ^ ROTR64( 8, (x)) ^ SHR( 7, (x)))
  198. #define sigma1_512(x) (ROTR64(19, (x)) ^ ROTR64(61, (x)) ^ SHR( 6, (x)))
  199. /*** INTERNAL FUNCTION PROTOTYPES *************************************/
  200. /* SHA-224 and SHA-256: */
  201. void SHA256_Internal_Init(SHA_CTX*, const sha_word32*);
  202. void SHA256_Internal_Last(SHA_CTX*);
  203. void SHA256_Internal_Transform(SHA_CTX*, const sha_word32*);
  204. /* SHA-384 and SHA-512: */
  205. void SHA512_Internal_Init(SHA_CTX*, const sha_word64*);
  206. void SHA512_Internal_Last(SHA_CTX*);
  207. void SHA512_Internal_Transform(SHA_CTX*, const sha_word64*);
  208. /*** SHA2 INITIAL HASH VALUES AND CONSTANTS ***************************/
  209. /* Hash constant words K for SHA-1: */
  210. #define K1_0_TO_19 SHA_UINT32_C(0x5a827999)
  211. #define K1_20_TO_39 SHA_UINT32_C(0x6ed9eba1)
  212. #define K1_40_TO_59 SHA_UINT32_C(0x8f1bbcdc)
  213. #define K1_60_TO_79 SHA_UINT32_C(0xca62c1d6)
  214. /* Initial hash value H for SHA-1: */
  215. static const sha_word32 sha1_initial_hash_value[5] = {
  216. SHA_UINT32_C(0x67452301),
  217. SHA_UINT32_C(0xefcdab89),
  218. SHA_UINT32_C(0x98badcfe),
  219. SHA_UINT32_C(0x10325476),
  220. SHA_UINT32_C(0xc3d2e1f0)
  221. };
  222. /* Hash constant words K for SHA-224 and SHA-256: */
  223. static const sha_word32 K256[64] = {
  224. SHA_UINT32_C(0x428a2f98), SHA_UINT32_C(0x71374491),
  225. SHA_UINT32_C(0xb5c0fbcf), SHA_UINT32_C(0xe9b5dba5),
  226. SHA_UINT32_C(0x3956c25b), SHA_UINT32_C(0x59f111f1),
  227. SHA_UINT32_C(0x923f82a4), SHA_UINT32_C(0xab1c5ed5),
  228. SHA_UINT32_C(0xd807aa98), SHA_UINT32_C(0x12835b01),
  229. SHA_UINT32_C(0x243185be), SHA_UINT32_C(0x550c7dc3),
  230. SHA_UINT32_C(0x72be5d74), SHA_UINT32_C(0x80deb1fe),
  231. SHA_UINT32_C(0x9bdc06a7), SHA_UINT32_C(0xc19bf174),
  232. SHA_UINT32_C(0xe49b69c1), SHA_UINT32_C(0xefbe4786),
  233. SHA_UINT32_C(0x0fc19dc6), SHA_UINT32_C(0x240ca1cc),
  234. SHA_UINT32_C(0x2de92c6f), SHA_UINT32_C(0x4a7484aa),
  235. SHA_UINT32_C(0x5cb0a9dc), SHA_UINT32_C(0x76f988da),
  236. SHA_UINT32_C(0x983e5152), SHA_UINT32_C(0xa831c66d),
  237. SHA_UINT32_C(0xb00327c8), SHA_UINT32_C(0xbf597fc7),
  238. SHA_UINT32_C(0xc6e00bf3), SHA_UINT32_C(0xd5a79147),
  239. SHA_UINT32_C(0x06ca6351), SHA_UINT32_C(0x14292967),
  240. SHA_UINT32_C(0x27b70a85), SHA_UINT32_C(0x2e1b2138),
  241. SHA_UINT32_C(0x4d2c6dfc), SHA_UINT32_C(0x53380d13),
  242. SHA_UINT32_C(0x650a7354), SHA_UINT32_C(0x766a0abb),
  243. SHA_UINT32_C(0x81c2c92e), SHA_UINT32_C(0x92722c85),
  244. SHA_UINT32_C(0xa2bfe8a1), SHA_UINT32_C(0xa81a664b),
  245. SHA_UINT32_C(0xc24b8b70), SHA_UINT32_C(0xc76c51a3),
  246. SHA_UINT32_C(0xd192e819), SHA_UINT32_C(0xd6990624),
  247. SHA_UINT32_C(0xf40e3585), SHA_UINT32_C(0x106aa070),
  248. SHA_UINT32_C(0x19a4c116), SHA_UINT32_C(0x1e376c08),
  249. SHA_UINT32_C(0x2748774c), SHA_UINT32_C(0x34b0bcb5),
  250. SHA_UINT32_C(0x391c0cb3), SHA_UINT32_C(0x4ed8aa4a),
  251. SHA_UINT32_C(0x5b9cca4f), SHA_UINT32_C(0x682e6ff3),
  252. SHA_UINT32_C(0x748f82ee), SHA_UINT32_C(0x78a5636f),
  253. SHA_UINT32_C(0x84c87814), SHA_UINT32_C(0x8cc70208),
  254. SHA_UINT32_C(0x90befffa), SHA_UINT32_C(0xa4506ceb),
  255. SHA_UINT32_C(0xbef9a3f7), SHA_UINT32_C(0xc67178f2)
  256. };
  257. /* Initial hash value H for SHA-224: */
  258. static const sha_word32 sha224_initial_hash_value[8] = {
  259. SHA_UINT32_C(0xc1059ed8),
  260. SHA_UINT32_C(0x367cd507),
  261. SHA_UINT32_C(0x3070dd17),
  262. SHA_UINT32_C(0xf70e5939),
  263. SHA_UINT32_C(0xffc00b31),
  264. SHA_UINT32_C(0x68581511),
  265. SHA_UINT32_C(0x64f98fa7),
  266. SHA_UINT32_C(0xbefa4fa4)
  267. };
  268. /* Initial hash value H for SHA-256: */
  269. static const sha_word32 sha256_initial_hash_value[8] = {
  270. SHA_UINT32_C(0x6a09e667),
  271. SHA_UINT32_C(0xbb67ae85),
  272. SHA_UINT32_C(0x3c6ef372),
  273. SHA_UINT32_C(0xa54ff53a),
  274. SHA_UINT32_C(0x510e527f),
  275. SHA_UINT32_C(0x9b05688c),
  276. SHA_UINT32_C(0x1f83d9ab),
  277. SHA_UINT32_C(0x5be0cd19)
  278. };
  279. /* Hash constant words K for SHA-384 and SHA-512: */
  280. static const sha_word64 K512[80] = {
  281. SHA_UINT64_C(0x428a2f98d728ae22), SHA_UINT64_C(0x7137449123ef65cd),
  282. SHA_UINT64_C(0xb5c0fbcfec4d3b2f), SHA_UINT64_C(0xe9b5dba58189dbbc),
  283. SHA_UINT64_C(0x3956c25bf348b538), SHA_UINT64_C(0x59f111f1b605d019),
  284. SHA_UINT64_C(0x923f82a4af194f9b), SHA_UINT64_C(0xab1c5ed5da6d8118),
  285. SHA_UINT64_C(0xd807aa98a3030242), SHA_UINT64_C(0x12835b0145706fbe),
  286. SHA_UINT64_C(0x243185be4ee4b28c), SHA_UINT64_C(0x550c7dc3d5ffb4e2),
  287. SHA_UINT64_C(0x72be5d74f27b896f), SHA_UINT64_C(0x80deb1fe3b1696b1),
  288. SHA_UINT64_C(0x9bdc06a725c71235), SHA_UINT64_C(0xc19bf174cf692694),
  289. SHA_UINT64_C(0xe49b69c19ef14ad2), SHA_UINT64_C(0xefbe4786384f25e3),
  290. SHA_UINT64_C(0x0fc19dc68b8cd5b5), SHA_UINT64_C(0x240ca1cc77ac9c65),
  291. SHA_UINT64_C(0x2de92c6f592b0275), SHA_UINT64_C(0x4a7484aa6ea6e483),
  292. SHA_UINT64_C(0x5cb0a9dcbd41fbd4), SHA_UINT64_C(0x76f988da831153b5),
  293. SHA_UINT64_C(0x983e5152ee66dfab), SHA_UINT64_C(0xa831c66d2db43210),
  294. SHA_UINT64_C(0xb00327c898fb213f), SHA_UINT64_C(0xbf597fc7beef0ee4),
  295. SHA_UINT64_C(0xc6e00bf33da88fc2), SHA_UINT64_C(0xd5a79147930aa725),
  296. SHA_UINT64_C(0x06ca6351e003826f), SHA_UINT64_C(0x142929670a0e6e70),
  297. SHA_UINT64_C(0x27b70a8546d22ffc), SHA_UINT64_C(0x2e1b21385c26c926),
  298. SHA_UINT64_C(0x4d2c6dfc5ac42aed), SHA_UINT64_C(0x53380d139d95b3df),
  299. SHA_UINT64_C(0x650a73548baf63de), SHA_UINT64_C(0x766a0abb3c77b2a8),
  300. SHA_UINT64_C(0x81c2c92e47edaee6), SHA_UINT64_C(0x92722c851482353b),
  301. SHA_UINT64_C(0xa2bfe8a14cf10364), SHA_UINT64_C(0xa81a664bbc423001),
  302. SHA_UINT64_C(0xc24b8b70d0f89791), SHA_UINT64_C(0xc76c51a30654be30),
  303. SHA_UINT64_C(0xd192e819d6ef5218), SHA_UINT64_C(0xd69906245565a910),
  304. SHA_UINT64_C(0xf40e35855771202a), SHA_UINT64_C(0x106aa07032bbd1b8),
  305. SHA_UINT64_C(0x19a4c116b8d2d0c8), SHA_UINT64_C(0x1e376c085141ab53),
  306. SHA_UINT64_C(0x2748774cdf8eeb99), SHA_UINT64_C(0x34b0bcb5e19b48a8),
  307. SHA_UINT64_C(0x391c0cb3c5c95a63), SHA_UINT64_C(0x4ed8aa4ae3418acb),
  308. SHA_UINT64_C(0x5b9cca4f7763e373), SHA_UINT64_C(0x682e6ff3d6b2b8a3),
  309. SHA_UINT64_C(0x748f82ee5defb2fc), SHA_UINT64_C(0x78a5636f43172f60),
  310. SHA_UINT64_C(0x84c87814a1f0ab72), SHA_UINT64_C(0x8cc702081a6439ec),
  311. SHA_UINT64_C(0x90befffa23631e28), SHA_UINT64_C(0xa4506cebde82bde9),
  312. SHA_UINT64_C(0xbef9a3f7b2c67915), SHA_UINT64_C(0xc67178f2e372532b),
  313. SHA_UINT64_C(0xca273eceea26619c), SHA_UINT64_C(0xd186b8c721c0c207),
  314. SHA_UINT64_C(0xeada7dd6cde0eb1e), SHA_UINT64_C(0xf57d4f7fee6ed178),
  315. SHA_UINT64_C(0x06f067aa72176fba), SHA_UINT64_C(0x0a637dc5a2c898a6),
  316. SHA_UINT64_C(0x113f9804bef90dae), SHA_UINT64_C(0x1b710b35131c471b),
  317. SHA_UINT64_C(0x28db77f523047d84), SHA_UINT64_C(0x32caab7b40c72493),
  318. SHA_UINT64_C(0x3c9ebe0a15c9bebc), SHA_UINT64_C(0x431d67c49c100d4c),
  319. SHA_UINT64_C(0x4cc5d4becb3e42b6), SHA_UINT64_C(0x597f299cfc657e2a),
  320. SHA_UINT64_C(0x5fcb6fab3ad6faec), SHA_UINT64_C(0x6c44198c4a475817)
  321. };
  322. /* Initial hash value H for SHA-384 */
  323. static const sha_word64 sha384_initial_hash_value[8] = {
  324. SHA_UINT64_C(0xcbbb9d5dc1059ed8),
  325. SHA_UINT64_C(0x629a292a367cd507),
  326. SHA_UINT64_C(0x9159015a3070dd17),
  327. SHA_UINT64_C(0x152fecd8f70e5939),
  328. SHA_UINT64_C(0x67332667ffc00b31),
  329. SHA_UINT64_C(0x8eb44a8768581511),
  330. SHA_UINT64_C(0xdb0c2e0d64f98fa7),
  331. SHA_UINT64_C(0x47b5481dbefa4fa4)
  332. };
  333. /* Initial hash value H for SHA-512 */
  334. static const sha_word64 sha512_initial_hash_value[8] = {
  335. SHA_UINT64_C(0x6a09e667f3bcc908),
  336. SHA_UINT64_C(0xbb67ae8584caa73b),
  337. SHA_UINT64_C(0x3c6ef372fe94f82b),
  338. SHA_UINT64_C(0xa54ff53a5f1d36f1),
  339. SHA_UINT64_C(0x510e527fade682d1),
  340. SHA_UINT64_C(0x9b05688c2b3e6c1f),
  341. SHA_UINT64_C(0x1f83d9abfb41bd6b),
  342. SHA_UINT64_C(0x5be0cd19137e2179)
  343. };
  344. /*
  345. * Constant used by SHA224/256/384/512_End() functions for converting the
  346. * digest to a readable hexadecimal character string:
  347. */
  348. static const char *sha_hex_digits = "0123456789abcdef";
  349. /*** SHA-1: ***********************************************************/
  350. void SHA1_Init(SHA_CTX* context) {
  351. /* Sanity check: */
  352. assert(context != (SHA_CTX*)0);
  353. MEMCPY_BCOPY(context->s1.state, sha1_initial_hash_value, sizeof(sha_word32) * 5);
  354. MEMSET_BZERO(context->s1.buffer, 64);
  355. context->s1.bitcount = 0;
  356. }
  357. #ifdef SHA2_UNROLL_TRANSFORM
  358. /* Unrolled SHA-1 round macros: */
  359. #if BYTE_ORDER == LITTLE_ENDIAN
  360. #define ROUND1_0_TO_15(a,b,c,d,e) \
  361. REVERSE32(*data++, W1[j]); \
  362. (e) = ROTL32(5, (a)) + Ch((b), (c), (d)) + (e) + \
  363. K1_0_TO_19 + W1[j]; \
  364. (b) = ROTL32(30, (b)); \
  365. j++;
  366. #else /* BYTE_ORDER == LITTLE_ENDIAN */
  367. #define ROUND1_0_TO_15(a,b,c,d,e) \
  368. (e) = ROTL32(5, (a)) + Ch((b), (c), (d)) + (e) + \
  369. K1_0_TO_19 + ( W1[j] = *data++ ); \
  370. (b) = ROTL32(30, (b)); \
  371. j++;
  372. #endif /* BYTE_ORDER == LITTLE_ENDIAN */
  373. #define ROUND1_16_TO_19(a,b,c,d,e) \
  374. T1 = W1[(j+13)&0x0f] ^ W1[(j+8)&0x0f] ^ W1[(j+2)&0x0f] ^ W1[j&0x0f]; \
  375. (e) = ROTL32(5, a) + Ch(b,c,d) + e + K1_0_TO_19 + ( W1[j&0x0f] = ROTL32(1, T1) ); \
  376. (b) = ROTL32(30, b); \
  377. j++;
  378. #define ROUND1_20_TO_39(a,b,c,d,e) \
  379. T1 = W1[(j+13)&0x0f] ^ W1[(j+8)&0x0f] ^ W1[(j+2)&0x0f] ^ W1[j&0x0f]; \
  380. (e) = ROTL32(5, a) + Parity(b,c,d) + e + K1_20_TO_39 + ( W1[j&0x0f] = ROTL32(1, T1) ); \
  381. (b) = ROTL32(30, b); \
  382. j++;
  383. #define ROUND1_40_TO_59(a,b,c,d,e) \
  384. T1 = W1[(j+13)&0x0f] ^ W1[(j+8)&0x0f] ^ W1[(j+2)&0x0f] ^ W1[j&0x0f]; \
  385. (e) = ROTL32(5, a) + Maj(b,c,d) + e + K1_40_TO_59 + ( W1[j&0x0f] = ROTL32(1, T1) ); \
  386. (b) = ROTL32(30, b); \
  387. j++;
  388. #define ROUND1_60_TO_79(a,b,c,d,e) \
  389. T1 = W1[(j+13)&0x0f] ^ W1[(j+8)&0x0f] ^ W1[(j+2)&0x0f] ^ W1[j&0x0f]; \
  390. (e) = ROTL32(5, a) + Parity(b,c,d) + e + K1_60_TO_79 + ( W1[j&0x0f] = ROTL32(1, T1) ); \
  391. (b) = ROTL32(30, b); \
  392. j++;
  393. void SHA1_Internal_Transform(SHA_CTX* context, const sha_word32* data) {
  394. sha_word32 a, b, c, d, e;
  395. sha_word32 T1, *W1;
  396. int j;
  397. W1 = (sha_word32*)context->s1.buffer;
  398. /* Initialize registers with the prev. intermediate value */
  399. a = context->s1.state[0];
  400. b = context->s1.state[1];
  401. c = context->s1.state[2];
  402. d = context->s1.state[3];
  403. e = context->s1.state[4];
  404. j = 0;
  405. /* Rounds 0 to 15 unrolled: */
  406. ROUND1_0_TO_15(a,b,c,d,e);
  407. ROUND1_0_TO_15(e,a,b,c,d);
  408. ROUND1_0_TO_15(d,e,a,b,c);
  409. ROUND1_0_TO_15(c,d,e,a,b);
  410. ROUND1_0_TO_15(b,c,d,e,a);
  411. ROUND1_0_TO_15(a,b,c,d,e);
  412. ROUND1_0_TO_15(e,a,b,c,d);
  413. ROUND1_0_TO_15(d,e,a,b,c);
  414. ROUND1_0_TO_15(c,d,e,a,b);
  415. ROUND1_0_TO_15(b,c,d,e,a);
  416. ROUND1_0_TO_15(a,b,c,d,e);
  417. ROUND1_0_TO_15(e,a,b,c,d);
  418. ROUND1_0_TO_15(d,e,a,b,c);
  419. ROUND1_0_TO_15(c,d,e,a,b);
  420. ROUND1_0_TO_15(b,c,d,e,a);
  421. ROUND1_0_TO_15(a,b,c,d,e);
  422. /* Rounds 16 to 19 unrolled: */
  423. ROUND1_16_TO_19(e,a,b,c,d);
  424. ROUND1_16_TO_19(d,e,a,b,c);
  425. ROUND1_16_TO_19(c,d,e,a,b);
  426. ROUND1_16_TO_19(b,c,d,e,a);
  427. /* Rounds 20 to 39 unrolled: */
  428. ROUND1_20_TO_39(a,b,c,d,e);
  429. ROUND1_20_TO_39(e,a,b,c,d);
  430. ROUND1_20_TO_39(d,e,a,b,c);
  431. ROUND1_20_TO_39(c,d,e,a,b);
  432. ROUND1_20_TO_39(b,c,d,e,a);
  433. ROUND1_20_TO_39(a,b,c,d,e);
  434. ROUND1_20_TO_39(e,a,b,c,d);
  435. ROUND1_20_TO_39(d,e,a,b,c);
  436. ROUND1_20_TO_39(c,d,e,a,b);
  437. ROUND1_20_TO_39(b,c,d,e,a);
  438. ROUND1_20_TO_39(a,b,c,d,e);
  439. ROUND1_20_TO_39(e,a,b,c,d);
  440. ROUND1_20_TO_39(d,e,a,b,c);
  441. ROUND1_20_TO_39(c,d,e,a,b);
  442. ROUND1_20_TO_39(b,c,d,e,a);
  443. ROUND1_20_TO_39(a,b,c,d,e);
  444. ROUND1_20_TO_39(e,a,b,c,d);
  445. ROUND1_20_TO_39(d,e,a,b,c);
  446. ROUND1_20_TO_39(c,d,e,a,b);
  447. ROUND1_20_TO_39(b,c,d,e,a);
  448. /* Rounds 40 to 59 unrolled: */
  449. ROUND1_40_TO_59(a,b,c,d,e);
  450. ROUND1_40_TO_59(e,a,b,c,d);
  451. ROUND1_40_TO_59(d,e,a,b,c);
  452. ROUND1_40_TO_59(c,d,e,a,b);
  453. ROUND1_40_TO_59(b,c,d,e,a);
  454. ROUND1_40_TO_59(a,b,c,d,e);
  455. ROUND1_40_TO_59(e,a,b,c,d);
  456. ROUND1_40_TO_59(d,e,a,b,c);
  457. ROUND1_40_TO_59(c,d,e,a,b);
  458. ROUND1_40_TO_59(b,c,d,e,a);
  459. ROUND1_40_TO_59(a,b,c,d,e);
  460. ROUND1_40_TO_59(e,a,b,c,d);
  461. ROUND1_40_TO_59(d,e,a,b,c);
  462. ROUND1_40_TO_59(c,d,e,a,b);
  463. ROUND1_40_TO_59(b,c,d,e,a);
  464. ROUND1_40_TO_59(a,b,c,d,e);
  465. ROUND1_40_TO_59(e,a,b,c,d);
  466. ROUND1_40_TO_59(d,e,a,b,c);
  467. ROUND1_40_TO_59(c,d,e,a,b);
  468. ROUND1_40_TO_59(b,c,d,e,a);
  469. /* Rounds 60 to 79 unrolled: */
  470. ROUND1_60_TO_79(a,b,c,d,e);
  471. ROUND1_60_TO_79(e,a,b,c,d);
  472. ROUND1_60_TO_79(d,e,a,b,c);
  473. ROUND1_60_TO_79(c,d,e,a,b);
  474. ROUND1_60_TO_79(b,c,d,e,a);
  475. ROUND1_60_TO_79(a,b,c,d,e);
  476. ROUND1_60_TO_79(e,a,b,c,d);
  477. ROUND1_60_TO_79(d,e,a,b,c);
  478. ROUND1_60_TO_79(c,d,e,a,b);
  479. ROUND1_60_TO_79(b,c,d,e,a);
  480. ROUND1_60_TO_79(a,b,c,d,e);
  481. ROUND1_60_TO_79(e,a,b,c,d);
  482. ROUND1_60_TO_79(d,e,a,b,c);
  483. ROUND1_60_TO_79(c,d,e,a,b);
  484. ROUND1_60_TO_79(b,c,d,e,a);
  485. ROUND1_60_TO_79(a,b,c,d,e);
  486. ROUND1_60_TO_79(e,a,b,c,d);
  487. ROUND1_60_TO_79(d,e,a,b,c);
  488. ROUND1_60_TO_79(c,d,e,a,b);
  489. ROUND1_60_TO_79(b,c,d,e,a);
  490. /* Compute the current intermediate hash value */
  491. context->s1.state[0] += a;
  492. context->s1.state[1] += b;
  493. context->s1.state[2] += c;
  494. context->s1.state[3] += d;
  495. context->s1.state[4] += e;
  496. /* Clean up */
  497. a = b = c = d = e = T1 = 0;
  498. }
  499. #else /* SHA2_UNROLL_TRANSFORM */
  500. void SHA1_Internal_Transform(SHA_CTX* context, const sha_word32* data) {
  501. sha_word32 a, b, c, d, e;
  502. sha_word32 T1, *W1;
  503. int j;
  504. W1 = (sha_word32*)context->s1.buffer;
  505. /* Initialize registers with the prev. intermediate value */
  506. a = context->s1.state[0];
  507. b = context->s1.state[1];
  508. c = context->s1.state[2];
  509. d = context->s1.state[3];
  510. e = context->s1.state[4];
  511. j = 0;
  512. do {
  513. #if BYTE_ORDER == LITTLE_ENDIAN
  514. T1 = data[j];
  515. /* Copy data while converting to host byte order */
  516. REVERSE32(*data++, W1[j]);
  517. T1 = ROTL32(5, a) + Ch(b, c, d) + e + K1_0_TO_19 + W1[j];
  518. #else /* BYTE_ORDER == LITTLE_ENDIAN */
  519. T1 = ROTL32(5, a) + Ch(b, c, d) + e + K1_0_TO_19 + (W1[j] = *data++);
  520. #endif /* BYTE_ORDER == LITTLE_ENDIAN */
  521. e = d;
  522. d = c;
  523. c = ROTL32(30, b);
  524. b = a;
  525. a = T1;
  526. j++;
  527. } while (j < 16);
  528. do {
  529. T1 = W1[(j+13)&0x0f] ^ W1[(j+8)&0x0f] ^ W1[(j+2)&0x0f] ^ W1[j&0x0f];
  530. T1 = ROTL32(5, a) + Ch(b,c,d) + e + K1_0_TO_19 + (W1[j&0x0f] = ROTL32(1, T1));
  531. e = d;
  532. d = c;
  533. c = ROTL32(30, b);
  534. b = a;
  535. a = T1;
  536. j++;
  537. } while (j < 20);
  538. do {
  539. T1 = W1[(j+13)&0x0f] ^ W1[(j+8)&0x0f] ^ W1[(j+2)&0x0f] ^ W1[j&0x0f];
  540. T1 = ROTL32(5, a) + Parity(b,c,d) + e + K1_20_TO_39 + (W1[j&0x0f] = ROTL32(1, T1));
  541. e = d;
  542. d = c;
  543. c = ROTL32(30, b);
  544. b = a;
  545. a = T1;
  546. j++;
  547. } while (j < 40);
  548. do {
  549. T1 = W1[(j+13)&0x0f] ^ W1[(j+8)&0x0f] ^ W1[(j+2)&0x0f] ^ W1[j&0x0f];
  550. T1 = ROTL32(5, a) + Maj(b,c,d) + e + K1_40_TO_59 + (W1[j&0x0f] = ROTL32(1, T1));
  551. e = d;
  552. d = c;
  553. c = ROTL32(30, b);
  554. b = a;
  555. a = T1;
  556. j++;
  557. } while (j < 60);
  558. do {
  559. T1 = W1[(j+13)&0x0f] ^ W1[(j+8)&0x0f] ^ W1[(j+2)&0x0f] ^ W1[j&0x0f];
  560. T1 = ROTL32(5, a) + Parity(b,c,d) + e + K1_60_TO_79 + (W1[j&0x0f] = ROTL32(1, T1));
  561. e = d;
  562. d = c;
  563. c = ROTL32(30, b);
  564. b = a;
  565. a = T1;
  566. j++;
  567. } while (j < 80);
  568. /* Compute the current intermediate hash value */
  569. context->s1.state[0] += a;
  570. context->s1.state[1] += b;
  571. context->s1.state[2] += c;
  572. context->s1.state[3] += d;
  573. context->s1.state[4] += e;
  574. /* Clean up */
  575. a = b = c = d = e = T1 = 0;
  576. }
  577. #endif /* SHA2_UNROLL_TRANSFORM */
  578. void SHA1_Update(SHA_CTX* context, const sha_byte *data, size_t len) {
  579. unsigned int freespace, usedspace;
  580. if (len == 0) {
  581. /* Calling with no data is valid - we do nothing */
  582. return;
  583. }
  584. /* Sanity check: */
  585. assert(context != (SHA_CTX*)0 && data != (sha_byte*)0);
  586. usedspace = (unsigned int)((context->s1.bitcount >> 3) % 64);
  587. if (usedspace > 0) {
  588. /* Calculate how much free space is available in the buffer */
  589. freespace = 64 - usedspace;
  590. if (len >= freespace) {
  591. /* Fill the buffer completely and process it */
  592. MEMCPY_BCOPY(&context->s1.buffer[usedspace], data, freespace);
  593. context->s1.bitcount += freespace << 3;
  594. len -= freespace;
  595. data += freespace;
  596. SHA1_Internal_Transform(context, (sha_word32*)context->s1.buffer);
  597. } else {
  598. /* The buffer is not yet full */
  599. MEMCPY_BCOPY(&context->s1.buffer[usedspace], data, len);
  600. context->s1.bitcount += len << 3;
  601. /* Clean up: */
  602. usedspace = freespace = 0;
  603. return;
  604. }
  605. }
  606. while (len >= 64) {
  607. /* Process as many complete blocks as we can */
  608. SHA1_Internal_Transform(context, (sha_word32*)data);
  609. context->s1.bitcount += 512;
  610. len -= 64;
  611. data += 64;
  612. }
  613. if (len > 0) {
  614. /* There's left-overs, so save 'em */
  615. MEMCPY_BCOPY(context->s1.buffer, data, len);
  616. context->s1.bitcount += len << 3;
  617. }
  618. /* Clean up: */
  619. usedspace = freespace = 0;
  620. }
  621. void SHA1_Final(sha_byte digest[], SHA_CTX* context) {
  622. sha_word32 *d = (sha_word32*)digest;
  623. unsigned int usedspace;
  624. /* Sanity check: */
  625. assert(context != (SHA_CTX*)0);
  626. if (digest == (sha_byte*)0) {
  627. /*
  628. * No digest buffer, so we can do nothing
  629. * except clean up and go home
  630. */
  631. MEMSET_BZERO(context, sizeof(*context));
  632. return;
  633. }
  634. usedspace = (unsigned int)((context->s1.bitcount >> 3) % 64);
  635. if (usedspace == 0) {
  636. /* Set-up for the last transform: */
  637. MEMSET_BZERO(context->s1.buffer, 56);
  638. /* Begin padding with a 1 bit: */
  639. *context->s1.buffer = 0x80;
  640. } else {
  641. /* Begin padding with a 1 bit: */
  642. context->s1.buffer[usedspace++] = 0x80;
  643. if (usedspace <= 56) {
  644. /* Set-up for the last transform: */
  645. MEMSET_BZERO(&context->s1.buffer[usedspace], 56 - usedspace);
  646. } else {
  647. if (usedspace < 64) {
  648. MEMSET_BZERO(&context->s1.buffer[usedspace], 64 - usedspace);
  649. }
  650. /* Do second-to-last transform: */
  651. SHA1_Internal_Transform(context, (sha_word32*)context->s1.buffer);
  652. /* And set-up for the last transform: */
  653. MEMSET_BZERO(context->s1.buffer, 56);
  654. }
  655. /* Clean up: */
  656. usedspace = 0;
  657. }
  658. /* Set the bit count: */
  659. #if BYTE_ORDER == LITTLE_ENDIAN
  660. /* Convert FROM host byte order */
  661. REVERSE64(context->s1.bitcount,context->s1.bitcount);
  662. #endif
  663. MEMCPY_BCOPY(&context->s1.buffer[56], &context->s1.bitcount,
  664. sizeof(sha_word64));
  665. /* Final transform: */
  666. SHA1_Internal_Transform(context, (sha_word32*)context->s1.buffer);
  667. /* Save the hash data for output: */
  668. #if BYTE_ORDER == LITTLE_ENDIAN
  669. {
  670. /* Convert TO host byte order */
  671. int j;
  672. for (j = 0; j < (SHA1_DIGEST_LENGTH >> 2); j++) {
  673. REVERSE32(context->s1.state[j],context->s1.state[j]);
  674. *d++ = context->s1.state[j];
  675. }
  676. }
  677. #else
  678. MEMCPY_BCOPY(d, context->s1.state, SHA1_DIGEST_LENGTH);
  679. #endif
  680. /* Clean up: */
  681. MEMSET_BZERO(context, sizeof(*context));
  682. }
  683. char *SHA1_End(SHA_CTX* context, char buffer[]) {
  684. sha_byte digest[SHA1_DIGEST_LENGTH], *d = digest;
  685. int i;
  686. /* Sanity check: */
  687. assert(context != (SHA_CTX*)0);
  688. if (buffer != (char*)0) {
  689. SHA1_Final(digest, context);
  690. for (i = 0; i < SHA1_DIGEST_LENGTH; i++) {
  691. *buffer++ = sha_hex_digits[(*d & 0xf0) >> 4];
  692. *buffer++ = sha_hex_digits[*d & 0x0f];
  693. d++;
  694. }
  695. *buffer = (char)0;
  696. } else {
  697. MEMSET_BZERO(context, sizeof(*context));
  698. }
  699. MEMSET_BZERO(digest, SHA1_DIGEST_LENGTH);
  700. return buffer;
  701. }
  702. char* SHA1_Data(const sha_byte* data, size_t len, char digest[SHA1_DIGEST_STRING_LENGTH]) {
  703. SHA_CTX context;
  704. SHA1_Init(&context);
  705. SHA1_Update(&context, data, len);
  706. return SHA1_End(&context, digest);
  707. }
  708. /*** SHA-256: *********************************************************/
  709. void SHA256_Internal_Init(SHA_CTX* context, const sha_word32* ihv) {
  710. /* Sanity check: */
  711. assert(context != (SHA_CTX*)0);
  712. MEMCPY_BCOPY(context->s256.state, ihv, sizeof(sha_word32) * 8);
  713. MEMSET_BZERO(context->s256.buffer, 64);
  714. context->s256.bitcount = 0;
  715. }
  716. void SHA256_Init(SHA_CTX* context) {
  717. SHA256_Internal_Init(context, sha256_initial_hash_value);
  718. }
  719. #ifdef SHA2_UNROLL_TRANSFORM
  720. /* Unrolled SHA-256 round macros: */
  721. #if BYTE_ORDER == LITTLE_ENDIAN
  722. #define ROUND256_0_TO_15(a,b,c,d,e,f,g,h) \
  723. REVERSE32(*data++, W256[j]); \
  724. T1 = (h) + Sigma1_256(e) + Ch((e), (f), (g)) + \
  725. K256[j] + W256[j]; \
  726. (d) += T1; \
  727. (h) = T1 + Sigma0_256(a) + Maj((a), (b), (c)); \
  728. j++
  729. #else /* BYTE_ORDER == LITTLE_ENDIAN */
  730. #define ROUND256_0_TO_15(a,b,c,d,e,f,g,h) \
  731. T1 = (h) + Sigma1_256(e) + Ch((e), (f), (g)) + \
  732. K256[j] + (W256[j] = *data++); \
  733. (d) += T1; \
  734. (h) = T1 + Sigma0_256(a) + Maj((a), (b), (c)); \
  735. j++
  736. #endif /* BYTE_ORDER == LITTLE_ENDIAN */
  737. #define ROUND256(a,b,c,d,e,f,g,h) \
  738. s0 = W256[(j+1)&0x0f]; \
  739. s0 = sigma0_256(s0); \
  740. s1 = W256[(j+14)&0x0f]; \
  741. s1 = sigma1_256(s1); \
  742. T1 = (h) + Sigma1_256(e) + Ch((e), (f), (g)) + K256[j] + \
  743. (W256[j&0x0f] += s1 + W256[(j+9)&0x0f] + s0); \
  744. (d) += T1; \
  745. (h) = T1 + Sigma0_256(a) + Maj((a), (b), (c)); \
  746. j++
  747. void SHA256_Internal_Transform(SHA_CTX* context, const sha_word32* data) {
  748. sha_word32 a, b, c, d, e, f, g, h, s0, s1;
  749. sha_word32 T1, *W256;
  750. int j;
  751. W256 = (sha_word32*)context->s256.buffer;
  752. /* Initialize registers with the prev. intermediate value */
  753. a = context->s256.state[0];
  754. b = context->s256.state[1];
  755. c = context->s256.state[2];
  756. d = context->s256.state[3];
  757. e = context->s256.state[4];
  758. f = context->s256.state[5];
  759. g = context->s256.state[6];
  760. h = context->s256.state[7];
  761. j = 0;
  762. do {
  763. /* Rounds 0 to 15 (unrolled): */
  764. ROUND256_0_TO_15(a,b,c,d,e,f,g,h);
  765. ROUND256_0_TO_15(h,a,b,c,d,e,f,g);
  766. ROUND256_0_TO_15(g,h,a,b,c,d,e,f);
  767. ROUND256_0_TO_15(f,g,h,a,b,c,d,e);
  768. ROUND256_0_TO_15(e,f,g,h,a,b,c,d);
  769. ROUND256_0_TO_15(d,e,f,g,h,a,b,c);
  770. ROUND256_0_TO_15(c,d,e,f,g,h,a,b);
  771. ROUND256_0_TO_15(b,c,d,e,f,g,h,a);
  772. } while (j < 16);
  773. /* Now for the remaining rounds to 64: */
  774. do {
  775. ROUND256(a,b,c,d,e,f,g,h);
  776. ROUND256(h,a,b,c,d,e,f,g);
  777. ROUND256(g,h,a,b,c,d,e,f);
  778. ROUND256(f,g,h,a,b,c,d,e);
  779. ROUND256(e,f,g,h,a,b,c,d);
  780. ROUND256(d,e,f,g,h,a,b,c);
  781. ROUND256(c,d,e,f,g,h,a,b);
  782. ROUND256(b,c,d,e,f,g,h,a);
  783. } while (j < 64);
  784. /* Compute the current intermediate hash value */
  785. context->s256.state[0] += a;
  786. context->s256.state[1] += b;
  787. context->s256.state[2] += c;
  788. context->s256.state[3] += d;
  789. context->s256.state[4] += e;
  790. context->s256.state[5] += f;
  791. context->s256.state[6] += g;
  792. context->s256.state[7] += h;
  793. /* Clean up */
  794. a = b = c = d = e = f = g = h = T1 = 0;
  795. }
  796. #else /* SHA2_UNROLL_TRANSFORM */
  797. void SHA256_Internal_Transform(SHA_CTX* context, const sha_word32* data) {
  798. sha_word32 a, b, c, d, e, f, g, h, s0, s1;
  799. sha_word32 T1, T2, *W256;
  800. int j;
  801. W256 = (sha_word32*)context->s256.buffer;
  802. /* Initialize registers with the prev. intermediate value */
  803. a = context->s256.state[0];
  804. b = context->s256.state[1];
  805. c = context->s256.state[2];
  806. d = context->s256.state[3];
  807. e = context->s256.state[4];
  808. f = context->s256.state[5];
  809. g = context->s256.state[6];
  810. h = context->s256.state[7];
  811. j = 0;
  812. do {
  813. #if BYTE_ORDER == LITTLE_ENDIAN
  814. /* Copy data while converting to host byte order */
  815. REVERSE32(*data++,W256[j]);
  816. /* Apply the SHA-256 compression function to update a..h */
  817. T1 = h + Sigma1_256(e) + Ch(e, f, g) + K256[j] + W256[j];
  818. #else /* BYTE_ORDER == LITTLE_ENDIAN */
  819. /* Apply the SHA-256 compression function to update a..h with copy */
  820. T1 = h + Sigma1_256(e) + Ch(e, f, g) + K256[j] + (W256[j] = *data++);
  821. #endif /* BYTE_ORDER == LITTLE_ENDIAN */
  822. T2 = Sigma0_256(a) + Maj(a, b, c);
  823. h = g;
  824. g = f;
  825. f = e;
  826. e = d + T1;
  827. d = c;
  828. c = b;
  829. b = a;
  830. a = T1 + T2;
  831. j++;
  832. } while (j < 16);
  833. do {
  834. /* Part of the message block expansion: */
  835. s0 = W256[(j+1)&0x0f];
  836. s0 = sigma0_256(s0);
  837. s1 = W256[(j+14)&0x0f];
  838. s1 = sigma1_256(s1);
  839. /* Apply the SHA-256 compression function to update a..h */
  840. T1 = h + Sigma1_256(e) + Ch(e, f, g) + K256[j] +
  841. (W256[j&0x0f] += s1 + W256[(j+9)&0x0f] + s0);
  842. T2 = Sigma0_256(a) + Maj(a, b, c);
  843. h = g;
  844. g = f;
  845. f = e;
  846. e = d + T1;
  847. d = c;
  848. c = b;
  849. b = a;
  850. a = T1 + T2;
  851. j++;
  852. } while (j < 64);
  853. /* Compute the current intermediate hash value */
  854. context->s256.state[0] += a;
  855. context->s256.state[1] += b;
  856. context->s256.state[2] += c;
  857. context->s256.state[3] += d;
  858. context->s256.state[4] += e;
  859. context->s256.state[5] += f;
  860. context->s256.state[6] += g;
  861. context->s256.state[7] += h;
  862. /* Clean up */
  863. a = b = c = d = e = f = g = h = T1 = T2 = 0;
  864. }
  865. #endif /* SHA2_UNROLL_TRANSFORM */
  866. void SHA256_Update(SHA_CTX* context, const sha_byte *data, size_t len) {
  867. unsigned int freespace, usedspace;
  868. if (len == 0) {
  869. /* Calling with no data is valid - we do nothing */
  870. return;
  871. }
  872. /* Sanity check: */
  873. assert(context != (SHA_CTX*)0 && data != (sha_byte*)0);
  874. usedspace = (unsigned int)((context->s256.bitcount >> 3) % 64);
  875. if (usedspace > 0) {
  876. /* Calculate how much free space is available in the buffer */
  877. freespace = 64 - usedspace;
  878. if (len >= freespace) {
  879. /* Fill the buffer completely and process it */
  880. MEMCPY_BCOPY(&context->s256.buffer[usedspace], data, freespace);
  881. context->s256.bitcount += freespace << 3;
  882. len -= freespace;
  883. data += freespace;
  884. SHA256_Internal_Transform(context, (sha_word32*)context->s256.buffer);
  885. } else {
  886. /* The buffer is not yet full */
  887. MEMCPY_BCOPY(&context->s256.buffer[usedspace], data, len);
  888. context->s256.bitcount += len << 3;
  889. /* Clean up: */
  890. usedspace = freespace = 0;
  891. return;
  892. }
  893. }
  894. while (len >= 64) {
  895. /* Process as many complete blocks as we can */
  896. SHA256_Internal_Transform(context, (sha_word32*)data);
  897. context->s256.bitcount += 512;
  898. len -= 64;
  899. data += 64;
  900. }
  901. if (len > 0) {
  902. /* There's left-overs, so save 'em */
  903. MEMCPY_BCOPY(context->s256.buffer, data, len);
  904. context->s256.bitcount += len << 3;
  905. }
  906. /* Clean up: */
  907. usedspace = freespace = 0;
  908. }
  909. void SHA256_Internal_Last(SHA_CTX* context) {
  910. unsigned int usedspace;
  911. usedspace = (unsigned int)((context->s256.bitcount >> 3) % 64);
  912. #if BYTE_ORDER == LITTLE_ENDIAN
  913. /* Convert FROM host byte order */
  914. REVERSE64(context->s256.bitcount,context->s256.bitcount);
  915. #endif
  916. if (usedspace > 0) {
  917. /* Begin padding with a 1 bit: */
  918. context->s256.buffer[usedspace++] = 0x80;
  919. if (usedspace <= 56) {
  920. /* Set-up for the last transform: */
  921. MEMSET_BZERO(&context->s256.buffer[usedspace], 56 - usedspace);
  922. } else {
  923. if (usedspace < 64) {
  924. MEMSET_BZERO(&context->s256.buffer[usedspace], 64 - usedspace);
  925. }
  926. /* Do second-to-last transform: */
  927. SHA256_Internal_Transform(context, (sha_word32*)context->s256.buffer);
  928. /* And set-up for the last transform: */
  929. MEMSET_BZERO(context->s256.buffer, 56);
  930. }
  931. /* Clean up: */
  932. usedspace = 0;
  933. } else {
  934. /* Set-up for the last transform: */
  935. MEMSET_BZERO(context->s256.buffer, 56);
  936. /* Begin padding with a 1 bit: */
  937. *context->s256.buffer = 0x80;
  938. }
  939. /* Set the bit count: */
  940. MEMCPY_BCOPY(&context->s256.buffer[56], &context->s256.bitcount,
  941. sizeof(sha_word64));
  942. /* Final transform: */
  943. SHA256_Internal_Transform(context, (sha_word32*)context->s256.buffer);
  944. }
  945. void SHA256_Final(sha_byte digest[], SHA_CTX* context) {
  946. sha_word32 *d = (sha_word32*)digest;
  947. /* Sanity check: */
  948. assert(context != (SHA_CTX*)0);
  949. /* If no digest buffer is passed, we don't bother doing this: */
  950. if (digest != (sha_byte*)0) {
  951. SHA256_Internal_Last(context);
  952. /* Save the hash data for output: */
  953. #if BYTE_ORDER == LITTLE_ENDIAN
  954. {
  955. /* Convert TO host byte order */
  956. int j;
  957. for (j = 0; j < (SHA256_DIGEST_LENGTH >> 2); j++) {
  958. REVERSE32(context->s256.state[j],context->s256.state[j]);
  959. *d++ = context->s256.state[j];
  960. }
  961. }
  962. #else
  963. MEMCPY_BCOPY(d, context->s256.state, SHA256_DIGEST_LENGTH);
  964. #endif
  965. }
  966. /* Clean up state data: */
  967. MEMSET_BZERO(context, sizeof(*context));
  968. }
  969. char *SHA256_End(SHA_CTX* context, char buffer[]) {
  970. sha_byte digest[SHA256_DIGEST_LENGTH], *d = digest;
  971. int i;
  972. /* Sanity check: */
  973. assert(context != (SHA_CTX*)0);
  974. if (buffer != (char*)0) {
  975. SHA256_Final(digest, context);
  976. for (i = 0; i < SHA256_DIGEST_LENGTH; i++) {
  977. *buffer++ = sha_hex_digits[(*d & 0xf0) >> 4];
  978. *buffer++ = sha_hex_digits[*d & 0x0f];
  979. d++;
  980. }
  981. *buffer = (char)0;
  982. } else {
  983. MEMSET_BZERO(context, sizeof(*context));
  984. }
  985. MEMSET_BZERO(digest, SHA256_DIGEST_LENGTH);
  986. return buffer;
  987. }
  988. char* SHA256_Data(const sha_byte* data, size_t len, char digest[SHA256_DIGEST_STRING_LENGTH]) {
  989. SHA_CTX context;
  990. SHA256_Init(&context);
  991. SHA256_Update(&context, data, len);
  992. return SHA256_End(&context, digest);
  993. }
  994. /*** SHA-224: *********************************************************/
  995. void SHA224_Init(SHA_CTX* context) {
  996. SHA256_Internal_Init(context, sha224_initial_hash_value);
  997. }
  998. void SHA224_Internal_Transform(SHA_CTX* context, const sha_word32* data) {
  999. SHA256_Internal_Transform(context, data);
  1000. }
  1001. void SHA224_Update(SHA_CTX* context, const sha_byte *data, size_t len) {
  1002. SHA256_Update(context, data, len);
  1003. }
  1004. void SHA224_Final(sha_byte digest[], SHA_CTX* context) {
  1005. sha_word32 *d = (sha_word32*)digest;
  1006. /* Sanity check: */
  1007. assert(context != (SHA_CTX*)0);
  1008. /* If no digest buffer is passed, we don't bother doing this: */
  1009. if (digest != (sha_byte*)0) {
  1010. SHA256_Internal_Last(context);
  1011. /* Save the hash data for output: */
  1012. #if BYTE_ORDER == LITTLE_ENDIAN
  1013. {
  1014. /* Convert TO host byte order */
  1015. int j;
  1016. for (j = 0; j < (SHA224_DIGEST_LENGTH >> 2); j++) {
  1017. REVERSE32(context->s256.state[j],context->s256.state[j]);
  1018. *d++ = context->s256.state[j];
  1019. }
  1020. }
  1021. #else
  1022. MEMCPY_BCOPY(d, context->s256.state, SHA224_DIGEST_LENGTH);
  1023. #endif
  1024. }
  1025. /* Clean up state data: */
  1026. MEMSET_BZERO(context, sizeof(*context));
  1027. }
  1028. char *SHA224_End(SHA_CTX* context, char buffer[]) {
  1029. sha_byte digest[SHA224_DIGEST_LENGTH], *d = digest;
  1030. int i;
  1031. /* Sanity check: */
  1032. assert(context != (SHA_CTX*)0);
  1033. if (buffer != (char*)0) {
  1034. SHA224_Final(digest, context);
  1035. for (i = 0; i < SHA224_DIGEST_LENGTH; i++) {
  1036. *buffer++ = sha_hex_digits[(*d & 0xf0) >> 4];
  1037. *buffer++ = sha_hex_digits[*d & 0x0f];
  1038. d++;
  1039. }
  1040. *buffer = (char)0;
  1041. } else {
  1042. MEMSET_BZERO(context, sizeof(*context));
  1043. }
  1044. MEMSET_BZERO(digest, SHA224_DIGEST_LENGTH);
  1045. return buffer;
  1046. }
  1047. char* SHA224_Data(const sha_byte* data, size_t len, char digest[SHA224_DIGEST_STRING_LENGTH]) {
  1048. SHA_CTX context;
  1049. SHA224_Init(&context);
  1050. SHA224_Update(&context, data, len);
  1051. return SHA224_End(&context, digest);
  1052. }
  1053. /*** SHA-512: *********************************************************/
  1054. void SHA512_Internal_Init(SHA_CTX* context, const sha_word64* ihv) {
  1055. /* Sanity check: */
  1056. assert(context != (SHA_CTX*)0);
  1057. MEMCPY_BCOPY(context->s512.state, ihv, sizeof(sha_word64) * 8);
  1058. MEMSET_BZERO(context->s512.buffer, 128);
  1059. context->s512.bitcount[0] = context->s512.bitcount[1] = 0;
  1060. }
  1061. void SHA512_Init(SHA_CTX* context) {
  1062. SHA512_Internal_Init(context, sha512_initial_hash_value);
  1063. }
  1064. #ifdef SHA2_UNROLL_TRANSFORM
  1065. /* Unrolled SHA-512 round macros: */
  1066. #if BYTE_ORDER == LITTLE_ENDIAN
  1067. #define ROUND512_0_TO_15(a,b,c,d,e,f,g,h) \
  1068. REVERSE64(*data++, W512[j]); \
  1069. T1 = (h) + Sigma1_512(e) + Ch((e), (f), (g)) + \
  1070. K512[j] + W512[j]; \
  1071. (d) += T1, \
  1072. (h) = T1 + Sigma0_512(a) + Maj((a), (b), (c)), \
  1073. j++
  1074. #else /* BYTE_ORDER == LITTLE_ENDIAN */
  1075. #define ROUND512_0_TO_15(a,b,c,d,e,f,g,h) \
  1076. T1 = (h) + Sigma1_512(e) + Ch((e), (f), (g)) + \
  1077. K512[j] + (W512[j] = *data++); \
  1078. (d) += T1; \
  1079. (h) = T1 + Sigma0_512(a) + Maj((a), (b), (c)); \
  1080. j++
  1081. #endif /* BYTE_ORDER == LITTLE_ENDIAN */
  1082. #define ROUND512(a,b,c,d,e,f,g,h) \
  1083. s0 = W512[(j+1)&0x0f]; \
  1084. s0 = sigma0_512(s0); \
  1085. s1 = W512[(j+14)&0x0f]; \
  1086. s1 = sigma1_512(s1); \
  1087. T1 = (h) + Sigma1_512(e) + Ch((e), (f), (g)) + K512[j] + \
  1088. (W512[j&0x0f] += s1 + W512[(j+9)&0x0f] + s0); \
  1089. (d) += T1; \
  1090. (h) = T1 + Sigma0_512(a) + Maj((a), (b), (c)); \
  1091. j++
  1092. void SHA512_Internal_Transform(SHA_CTX* context, const sha_word64* data) {
  1093. sha_word64 a, b, c, d, e, f, g, h, s0, s1;
  1094. sha_word64 T1, *W512 = (sha_word64*)context->s512.buffer;
  1095. int j;
  1096. /* Initialize registers with the prev. intermediate value */
  1097. a = context->s512.state[0];
  1098. b = context->s512.state[1];
  1099. c = context->s512.state[2];
  1100. d = context->s512.state[3];
  1101. e = context->s512.state[4];
  1102. f = context->s512.state[5];
  1103. g = context->s512.state[6];
  1104. h = context->s512.state[7];
  1105. j = 0;
  1106. do {
  1107. ROUND512_0_TO_15(a,b,c,d,e,f,g,h);
  1108. ROUND512_0_TO_15(h,a,b,c,d,e,f,g);
  1109. ROUND512_0_TO_15(g,h,a,b,c,d,e,f);
  1110. ROUND512_0_TO_15(f,g,h,a,b,c,d,e);
  1111. ROUND512_0_TO_15(e,f,g,h,a,b,c,d);
  1112. ROUND512_0_TO_15(d,e,f,g,h,a,b,c);
  1113. ROUND512_0_TO_15(c,d,e,f,g,h,a,b);
  1114. ROUND512_0_TO_15(b,c,d,e,f,g,h,a);
  1115. } while (j < 16);
  1116. /* Now for the remaining rounds up to 79: */
  1117. do {
  1118. ROUND512(a,b,c,d,e,f,g,h);
  1119. ROUND512(h,a,b,c,d,e,f,g);
  1120. ROUND512(g,h,a,b,c,d,e,f);
  1121. ROUND512(f,g,h,a,b,c,d,e);
  1122. ROUND512(e,f,g,h,a,b,c,d);
  1123. ROUND512(d,e,f,g,h,a,b,c);
  1124. ROUND512(c,d,e,f,g,h,a,b);
  1125. ROUND512(b,c,d,e,f,g,h,a);
  1126. } while (j < 80);
  1127. /* Compute the current intermediate hash value */
  1128. context->s512.state[0] += a;
  1129. context->s512.state[1] += b;
  1130. context->s512.state[2] += c;
  1131. context->s512.state[3] += d;
  1132. context->s512.state[4] += e;
  1133. context->s512.state[5] += f;
  1134. context->s512.state[6] += g;
  1135. context->s512.state[7] += h;
  1136. /* Clean up */
  1137. a = b = c = d = e = f = g = h = T1 = 0;
  1138. }
  1139. #else /* SHA2_UNROLL_TRANSFORM */
  1140. void SHA512_Internal_Transform(SHA_CTX* context, const sha_word64* data) {
  1141. sha_word64 a, b, c, d, e, f, g, h, s0, s1;
  1142. sha_word64 T1, T2, *W512 = (sha_word64*)context->s512.buffer;
  1143. int j;
  1144. /* Initialize registers with the prev. intermediate value */
  1145. a = context->s512.state[0];
  1146. b = context->s512.state[1];
  1147. c = context->s512.state[2];
  1148. d = context->s512.state[3];
  1149. e = context->s512.state[4];
  1150. f = context->s512.state[5];
  1151. g = context->s512.state[6];
  1152. h = context->s512.state[7];
  1153. j = 0;
  1154. do {
  1155. #if BYTE_ORDER == LITTLE_ENDIAN
  1156. /* Convert TO host byte order */
  1157. REVERSE64(*data++, W512[j]);
  1158. /* Apply the SHA-512 compression function to update a..h */
  1159. T1 = h + Sigma1_512(e) + Ch(e, f, g) + K512[j] + W512[j];
  1160. #else /* BYTE_ORDER == LITTLE_ENDIAN */
  1161. /* Apply the SHA-512 compression function to update a..h with copy */
  1162. T1 = h + Sigma1_512(e) + Ch(e, f, g) + K512[j] + (W512[j] = *data++);
  1163. #endif /* BYTE_ORDER == LITTLE_ENDIAN */
  1164. T2 = Sigma0_512(a) + Maj(a, b, c);
  1165. h = g;
  1166. g = f;
  1167. f = e;
  1168. e = d + T1;
  1169. d = c;
  1170. c = b;
  1171. b = a;
  1172. a = T1 + T2;
  1173. j++;
  1174. } while (j < 16);
  1175. do {
  1176. /* Part of the message block expansion: */
  1177. s0 = W512[(j+1)&0x0f];
  1178. s0 = sigma0_512(s0);
  1179. s1 = W512[(j+14)&0x0f];
  1180. s1 = sigma1_512(s1);
  1181. /* Apply the SHA-512 compression function to update a..h */
  1182. T1 = h + Sigma1_512(e) + Ch(e, f, g) + K512[j] +
  1183. (W512[j&0x0f] += s1 + W512[(j+9)&0x0f] + s0);
  1184. T2 = Sigma0_512(a) + Maj(a, b, c);
  1185. h = g;
  1186. g = f;
  1187. f = e;
  1188. e = d + T1;
  1189. d = c;
  1190. c = b;
  1191. b = a;
  1192. a = T1 + T2;
  1193. j++;
  1194. } while (j < 80);
  1195. /* Compute the current intermediate hash value */
  1196. context->s512.state[0] += a;
  1197. context->s512.state[1] += b;
  1198. context->s512.state[2] += c;
  1199. context->s512.state[3] += d;
  1200. context->s512.state[4] += e;
  1201. context->s512.state[5] += f;
  1202. context->s512.state[6] += g;
  1203. context->s512.state[7] += h;
  1204. /* Clean up */
  1205. a = b = c = d = e = f = g = h = T1 = T2 = 0;
  1206. }
  1207. #endif /* SHA2_UNROLL_TRANSFORM */
  1208. void SHA512_Update(SHA_CTX* context, const sha_byte *data, size_t len) {
  1209. unsigned int freespace, usedspace;
  1210. if (len == 0) {
  1211. /* Calling with no data is valid - we do nothing */
  1212. return;
  1213. }
  1214. /* Sanity check: */
  1215. assert(context != (SHA_CTX*)0 && data != (sha_byte*)0);
  1216. usedspace = (unsigned int)((context->s512.bitcount[0] >> 3) % 128);
  1217. if (usedspace > 0) {
  1218. /* Calculate how much free space is available in the buffer */
  1219. freespace = 128 - usedspace;
  1220. if (len >= freespace) {
  1221. /* Fill the buffer completely and process it */
  1222. MEMCPY_BCOPY(&context->s512.buffer[usedspace], data, freespace);
  1223. ADDINC128(context->s512.bitcount, freespace << 3);
  1224. len -= freespace;
  1225. data += freespace;
  1226. SHA512_Internal_Transform(context, (sha_word64*)context->s512.buffer);
  1227. } else {
  1228. /* The buffer is not yet full */
  1229. MEMCPY_BCOPY(&context->s512.buffer[usedspace], data, len);
  1230. ADDINC128(context->s512.bitcount, len << 3);
  1231. /* Clean up: */
  1232. usedspace = freespace = 0;
  1233. return;
  1234. }
  1235. }
  1236. while (len >= 128) {
  1237. /* Process as many complete blocks as we can */
  1238. SHA512_Internal_Transform(context, (sha_word64*)data);
  1239. ADDINC128(context->s512.bitcount, 1024);
  1240. len -= 128;
  1241. data += 128;
  1242. }
  1243. if (len > 0) {
  1244. /* There's left-overs, so save 'em */
  1245. MEMCPY_BCOPY(context->s512.buffer, data, len);
  1246. ADDINC128(context->s512.bitcount, len << 3);
  1247. }
  1248. /* Clean up: */
  1249. usedspace = freespace = 0;
  1250. }
  1251. void SHA512_Internal_Last(SHA_CTX* context) {
  1252. unsigned int usedspace;
  1253. usedspace = (unsigned int)((context->s512.bitcount[0] >> 3) % 128);
  1254. #if BYTE_ORDER == LITTLE_ENDIAN
  1255. /* Convert FROM host byte order */
  1256. REVERSE64(context->s512.bitcount[0],context->s512.bitcount[0]);
  1257. REVERSE64(context->s512.bitcount[1],context->s512.bitcount[1]);
  1258. #endif
  1259. if (usedspace > 0) {
  1260. /* Begin padding with a 1 bit: */
  1261. context->s512.buffer[usedspace++] = 0x80;
  1262. if (usedspace <= 112) {
  1263. /* Set-up for the last transform: */
  1264. MEMSET_BZERO(&context->s512.buffer[usedspace], 112 - usedspace);
  1265. } else {
  1266. if (usedspace < 128) {
  1267. MEMSET_BZERO(&context->s512.buffer[usedspace], 128 - usedspace);
  1268. }
  1269. /* Do second-to-last transform: */
  1270. SHA512_Internal_Transform(context, (sha_word64*)context->s512.buffer);
  1271. /* And set-up for the last transform: */
  1272. MEMSET_BZERO(context->s512.buffer, 112);
  1273. }
  1274. /* Clean up: */
  1275. usedspace = 0;
  1276. } else {
  1277. /* Prepare for final transform: */
  1278. MEMSET_BZERO(context->s512.buffer, 112);
  1279. /* Begin padding with a 1 bit: */
  1280. *context->s512.buffer = 0x80;
  1281. }
  1282. /* Store the length of input data (in bits): */
  1283. MEMCPY_BCOPY(&context->s512.buffer[112], &context->s512.bitcount[1],
  1284. sizeof(sha_word64));
  1285. MEMCPY_BCOPY(&context->s512.buffer[120], &context->s512.bitcount[0],
  1286. sizeof(sha_word64));
  1287. /* Final transform: */
  1288. SHA512_Internal_Transform(context, (sha_word64*)context->s512.buffer);
  1289. }
  1290. void SHA512_Final(sha_byte digest[], SHA_CTX* context) {
  1291. sha_word64 *d = (sha_word64*)digest;
  1292. /* Sanity check: */
  1293. assert(context != (SHA_CTX*)0);
  1294. /* If no digest buffer is passed, we don't bother doing this: */
  1295. if (digest != (sha_byte*)0) {
  1296. SHA512_Internal_Last(context);
  1297. /* Save the hash data for output: */
  1298. #if BYTE_ORDER == LITTLE_ENDIAN
  1299. {
  1300. /* Convert TO host byte order */
  1301. int j;
  1302. for (j = 0; j < (SHA512_DIGEST_LENGTH >> 3); j++) {
  1303. REVERSE64(context->s512.state[j],context->s512.state[j]);
  1304. *d++ = context->s512.state[j];
  1305. }
  1306. }
  1307. #else
  1308. MEMCPY_BCOPY(d, context->s512.state, SHA512_DIGEST_LENGTH);
  1309. #endif
  1310. }
  1311. /* Zero out state data */
  1312. MEMSET_BZERO(context, sizeof(*context));
  1313. }
  1314. char *SHA512_End(SHA_CTX* context, char buffer[]) {
  1315. sha_byte digest[SHA512_DIGEST_LENGTH], *d = digest;
  1316. int i;
  1317. /* Sanity check: */
  1318. assert(context != (SHA_CTX*)0);
  1319. if (buffer != (char*)0) {
  1320. SHA512_Final(digest, context);
  1321. for (i = 0; i < SHA512_DIGEST_LENGTH; i++) {
  1322. *buffer++ = sha_hex_digits[(*d & 0xf0) >> 4];
  1323. *buffer++ = sha_hex_digits[*d & 0x0f];
  1324. d++;
  1325. }
  1326. *buffer = (char)0;
  1327. } else {
  1328. MEMSET_BZERO(context, sizeof(*context));
  1329. }
  1330. MEMSET_BZERO(digest, SHA512_DIGEST_LENGTH);
  1331. return buffer;
  1332. }
  1333. char* SHA512_Data(const sha_byte* data, size_t len, char digest[SHA512_DIGEST_STRING_LENGTH]) {
  1334. SHA_CTX context;
  1335. SHA512_Init(&context);
  1336. SHA512_Update(&context, data, len);
  1337. return SHA512_End(&context, digest);
  1338. }
  1339. /*** SHA-384: *********************************************************/
  1340. void SHA384_Init(SHA_CTX* context) {
  1341. SHA512_Internal_Init(context, sha384_initial_hash_value);
  1342. }
  1343. void SHA384_Update(SHA_CTX* context, const sha_byte* data, size_t len) {
  1344. SHA512_Update(context, data, len);
  1345. }
  1346. void SHA384_Final(sha_byte digest[], SHA_CTX* context) {
  1347. sha_word64 *d = (sha_word64*)digest;
  1348. /* Sanity check: */
  1349. assert(context != (SHA_CTX*)0);
  1350. /* If no digest buffer is passed, we don't bother doing this: */
  1351. if (digest != (sha_byte*)0) {
  1352. SHA512_Internal_Last(context);
  1353. /* Save the hash data for output: */
  1354. #if BYTE_ORDER == LITTLE_ENDIAN
  1355. {
  1356. /* Convert TO host byte order */
  1357. int j;
  1358. for (j = 0; j < (SHA384_DIGEST_LENGTH >> 3); j++) {
  1359. REVERSE64(context->s512.state[j],context->s512.state[j]);
  1360. *d++ = context->s512.state[j];
  1361. }
  1362. }
  1363. #else
  1364. MEMCPY_BCOPY(d, context->s512.state, SHA384_DIGEST_LENGTH);
  1365. #endif
  1366. }
  1367. /* Zero out state data */
  1368. MEMSET_BZERO(context, sizeof(*context));
  1369. }
  1370. char *SHA384_End(SHA_CTX* context, char buffer[]) {
  1371. sha_byte digest[SHA384_DIGEST_LENGTH], *d = digest;
  1372. int i;
  1373. /* Sanity check: */
  1374. assert(context != (SHA_CTX*)0);
  1375. if (buffer != (char*)0) {
  1376. SHA384_Final(digest, context);
  1377. for (i = 0; i < SHA384_DIGEST_LENGTH; i++) {
  1378. *buffer++ = sha_hex_digits[(*d & 0xf0) >> 4];
  1379. *buffer++ = sha_hex_digits[*d & 0x0f];
  1380. d++;
  1381. }
  1382. *buffer = (char)0;
  1383. } else {
  1384. MEMSET_BZERO(context, sizeof(*context));
  1385. }
  1386. MEMSET_BZERO(digest, SHA384_DIGEST_LENGTH);
  1387. return buffer;
  1388. }
  1389. char* SHA384_Data(const sha_byte* data, size_t len, char digest[SHA384_DIGEST_STRING_LENGTH]) {
  1390. SHA_CTX context;
  1391. SHA384_Init(&context);
  1392. SHA384_Update(&context, data, len);
  1393. return SHA384_End(&context, digest);
  1394. }