cm_sha2.c 44 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060106110621063106410651066106710681069107010711072107310741075107610771078107910801081108210831084108510861087108810891090109110921093109410951096109710981099110011011102110311041105110611071108110911101111111211131114111511161117111811191120112111221123112411251126112711281129113011311132113311341135113611371138113911401141114211431144114511461147114811491150115111521153115411551156115711581159116011611162116311641165116611671168116911701171117211731174117511761177117811791180118111821183118411851186118711881189119011911192119311941195119611971198119912001201120212031204120512061207120812091210121112121213121412151216121712181219122012211222122312241225122612271228122912301231123212331234123512361237123812391240124112421243124412451246124712481249125012511252125312541255125612571258125912601261126212631264126512661267126812691270127112721273127412751276127712781279128012811282128312841285128612871288128912901291129212931294129512961297129812991300130113021303130413051306130713081309131013111312131313141315131613171318131913201321132213231324132513261327132813291330133113321333133413351336133713381339134013411342134313441345134613471348134913501351135213531354135513561357135813591360136113621363136413651366136713681369137013711372137313741375137613771378137913801381138213831384138513861387138813891390139113921393139413951396139713981399140014011402140314041405140614071408140914101411141214131414141514161417141814191420142114221423142414251426142714281429143014311432143314341435143614371438143914401441144214431444144514461447144814491450145114521453145414551456145714581459146014611462146314641465146614671468146914701471147214731474147514761477147814791480148114821483148414851486148714881489149014911492149314941495149614971498149915001501150215031504150515061507150815091510151115121513151415151516151715181519152015211522152315241525152615271528152915301531153215331534153515361537153815391540154115421543154415451546154715481549155015511552155315541555155615571558155915601561156215631564156515661567156815691570157115721573157415751576157715781579158015811582158315841585158615871588
  1. /*
  2. * FILE: sha2.c
  3. * AUTHOR: Aaron D. Gifford
  4. * http://www.aarongifford.com/computers/sha.html
  5. *
  6. * Copyright (c) 2000-2003, Aaron D. Gifford
  7. * All rights reserved.
  8. *
  9. * Redistribution and use in source and binary forms, with or without
  10. * modification, are permitted provided that the following conditions
  11. * are met:
  12. * 1. Redistributions of source code must retain the above copyright
  13. * notice, this list of conditions and the following disclaimer.
  14. * 2. Redistributions in binary form must reproduce the above copyright
  15. * notice, this list of conditions and the following disclaimer in the
  16. * documentation and/or other materials provided with the distribution.
  17. * 3. Neither the name of the copyright holder nor the names of contributors
  18. * may be used to endorse or promote products derived from this software
  19. * without specific prior written permission.
  20. *
  21. * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTOR(S) ``AS IS'' AND
  22. * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
  23. * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
  24. * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTOR(S) BE LIABLE
  25. * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
  26. * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
  27. * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
  28. * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
  29. * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
  30. * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
  31. * SUCH DAMAGE.
  32. *
  33. * $Id: sha2.c,v 1.4 2004/01/07 22:58:18 adg Exp $
  34. */
  35. #include <string.h> /* memcpy()/memset() or bcopy()/bzero() */
  36. #include <assert.h> /* assert() */
  37. #include "cm_sha2.h" /* "sha2.h" -> "cm_sha2.h" renamed for CMake */
  38. /*
  39. * ASSERT NOTE:
  40. * Some sanity checking code is included using assert(). On my FreeBSD
  41. * system, this additional code can be removed by compiling with NDEBUG
  42. * defined. Check your own systems manpage on assert() to see how to
  43. * compile WITHOUT the sanity checking code on your system.
  44. *
  45. * UNROLLED TRANSFORM LOOP NOTE:
  46. * You can define SHA2_UNROLL_TRANSFORM to use the unrolled transform
  47. * loop version for the hash transform rounds (defined using macros
  48. * later in this file). Either define on the command line, for example:
  49. *
  50. * cc -DSHA2_UNROLL_TRANSFORM -o sha2 sha2.c sha2prog.c
  51. *
  52. * or define below:
  53. *
  54. * #define SHA2_UNROLL_TRANSFORM
  55. *
  56. */
  57. /*** SHA-224/256/384/512 Machine Architecture Definitions *************/
  58. /*
  59. * BYTE_ORDER NOTE:
  60. *
  61. * Please make sure that your system defines BYTE_ORDER. If your
  62. * architecture is little-endian, make sure it also defines
  63. * LITTLE_ENDIAN and that the two (BYTE_ORDER and LITTLE_ENDIAN) are
  64. * equivilent.
  65. *
  66. * If your system does not define the above, then you can do so by
  67. * hand like this:
  68. *
  69. * #define LITTLE_ENDIAN 1234
  70. * #define BIG_ENDIAN 4321
  71. *
  72. * And for little-endian machines, add:
  73. *
  74. * #define BYTE_ORDER LITTLE_ENDIAN
  75. *
  76. * Or for big-endian machines:
  77. *
  78. * #define BYTE_ORDER BIG_ENDIAN
  79. *
  80. * The FreeBSD machine this was written on defines BYTE_ORDER
  81. * appropriately by including <sys/types.h> (which in turn includes
  82. * <machine/endian.h> where the appropriate definitions are actually
  83. * made).
  84. */
  85. #if !defined(BYTE_ORDER) || (BYTE_ORDER != LITTLE_ENDIAN && BYTE_ORDER != BIG_ENDIAN)
  86. /* CMake modification: use byte order from cmIML. */
  87. # include "cmIML/ABI.h"
  88. # undef BYTE_ORDER
  89. # undef BIG_ENDIAN
  90. # undef LITTLE_ENDIAN
  91. # define BYTE_ORDER cmIML_ABI_ENDIAN_ID
  92. # define BIG_ENDIAN cmIML_ABI_ENDIAN_ID_BIG
  93. # define LITTLE_ENDIAN cmIML_ABI_ENDIAN_ID_LITTLE
  94. #endif
  95. /* CMake modification: use types computed in header. */
  96. typedef cm_sha2_uint8_t sha_byte; /* Exactly 1 byte */
  97. typedef cm_sha2_uint32_t sha_word32; /* Exactly 4 bytes */
  98. typedef cm_sha2_uint64_t sha_word64; /* Exactly 8 bytes */
  99. /*** ENDIAN REVERSAL MACROS *******************************************/
  100. #if BYTE_ORDER == LITTLE_ENDIAN
  101. #define REVERSE32(w,x) { \
  102. sha_word32 tmp = (w); \
  103. tmp = (tmp >> 16) | (tmp << 16); \
  104. (x) = ((tmp & 0xff00ff00UL) >> 8) | ((tmp & 0x00ff00ffUL) << 8); \
  105. }
  106. #define REVERSE64(w,x) { \
  107. sha_word64 tmp = (w); \
  108. tmp = (tmp >> 32) | (tmp << 32); \
  109. tmp = ((tmp & 0xff00ff00ff00ff00ULL) >> 8) | \
  110. ((tmp & 0x00ff00ff00ff00ffULL) << 8); \
  111. (x) = ((tmp & 0xffff0000ffff0000ULL) >> 16) | \
  112. ((tmp & 0x0000ffff0000ffffULL) << 16); \
  113. }
  114. #endif /* BYTE_ORDER == LITTLE_ENDIAN */
  115. /*
  116. * Macro for incrementally adding the unsigned 64-bit integer n to the
  117. * unsigned 128-bit integer (represented using a two-element array of
  118. * 64-bit words):
  119. */
  120. #define ADDINC128(w,n) { \
  121. (w)[0] += (sha_word64)(n); \
  122. if ((w)[0] < (n)) { \
  123. (w)[1]++; \
  124. } \
  125. }
  126. /*
  127. * Macros for copying blocks of memory and for zeroing out ranges
  128. * of memory. Using these macros makes it easy to switch from
  129. * using memset()/memcpy() and using bzero()/bcopy().
  130. *
  131. * Please define either SHA2_USE_MEMSET_MEMCPY or define
  132. * SHA2_USE_BZERO_BCOPY depending on which function set you
  133. * choose to use:
  134. */
  135. #if !defined(SHA2_USE_MEMSET_MEMCPY) && !defined(SHA2_USE_BZERO_BCOPY)
  136. /* Default to memset()/memcpy() if no option is specified */
  137. #define SHA2_USE_MEMSET_MEMCPY 1
  138. #endif
  139. #if defined(SHA2_USE_MEMSET_MEMCPY) && defined(SHA2_USE_BZERO_BCOPY)
  140. /* Abort with an error if BOTH options are defined */
  141. #error Define either SHA2_USE_MEMSET_MEMCPY or SHA2_USE_BZERO_BCOPY, not both!
  142. #endif
  143. #ifdef SHA2_USE_MEMSET_MEMCPY
  144. #define MEMSET_BZERO(p,l) memset((p), 0, (l))
  145. #define MEMCPY_BCOPY(d,s,l) memcpy((d), (s), (l))
  146. #endif
  147. #ifdef SHA2_USE_BZERO_BCOPY
  148. #define MEMSET_BZERO(p,l) bzero((p), (l))
  149. #define MEMCPY_BCOPY(d,s,l) bcopy((s), (d), (l))
  150. #endif
  151. /*** THE SIX LOGICAL FUNCTIONS ****************************************/
  152. /*
  153. * Bit shifting and rotation (used by the six SHA-XYZ logical functions:
  154. *
  155. * NOTE: In the original SHA-256/384/512 document, the shift-right
  156. * function was named R and the rotate-right function was called S.
  157. * (See: http://csrc.nist.gov/cryptval/shs/sha256-384-512.pdf on the
  158. * web.)
  159. *
  160. * The newer NIST FIPS 180-2 document uses a much clearer naming
  161. * scheme, SHR for shift-right, ROTR for rotate-right, and ROTL for
  162. * rotate-left. (See:
  163. * http://csrc.nist.gov/publications/fips/fips180-2/fips180-2.pdf
  164. * on the web.)
  165. *
  166. * WARNING: These macros must be used cautiously, since they reference
  167. * supplied parameters sometimes more than once, and thus could have
  168. * unexpected side-effects if used without taking this into account.
  169. */
  170. /* Shift-right (used in SHA-256, SHA-384, and SHA-512): */
  171. #define SHR(b,x) ((x) >> (b))
  172. /* 32-bit Rotate-right (used in SHA-256): */
  173. #define ROTR32(b,x) (((x) >> (b)) | ((x) << (32 - (b))))
  174. /* 64-bit Rotate-right (used in SHA-384 and SHA-512): */
  175. #define ROTR64(b,x) (((x) >> (b)) | ((x) << (64 - (b))))
  176. /* 32-bit Rotate-left (used in SHA-1): */
  177. #define ROTL32(b,x) (((x) << (b)) | ((x) >> (32 - (b))))
  178. /* Two logical functions used in SHA-1, SHA-254, SHA-256, SHA-384, and SHA-512: */
  179. #define Ch(x,y,z) (((x) & (y)) ^ ((~(x)) & (z)))
  180. #define Maj(x,y,z) (((x) & (y)) ^ ((x) & (z)) ^ ((y) & (z)))
  181. /* Function used in SHA-1: */
  182. #define Parity(x,y,z) ((x) ^ (y) ^ (z))
  183. /* Four logical functions used in SHA-256: */
  184. #define Sigma0_256(x) (ROTR32(2, (x)) ^ ROTR32(13, (x)) ^ ROTR32(22, (x)))
  185. #define Sigma1_256(x) (ROTR32(6, (x)) ^ ROTR32(11, (x)) ^ ROTR32(25, (x)))
  186. #define sigma0_256(x) (ROTR32(7, (x)) ^ ROTR32(18, (x)) ^ SHR( 3 , (x)))
  187. #define sigma1_256(x) (ROTR32(17, (x)) ^ ROTR32(19, (x)) ^ SHR( 10, (x)))
  188. /* Four of six logical functions used in SHA-384 and SHA-512: */
  189. #define Sigma0_512(x) (ROTR64(28, (x)) ^ ROTR64(34, (x)) ^ ROTR64(39, (x)))
  190. #define Sigma1_512(x) (ROTR64(14, (x)) ^ ROTR64(18, (x)) ^ ROTR64(41, (x)))
  191. #define sigma0_512(x) (ROTR64( 1, (x)) ^ ROTR64( 8, (x)) ^ SHR( 7, (x)))
  192. #define sigma1_512(x) (ROTR64(19, (x)) ^ ROTR64(61, (x)) ^ SHR( 6, (x)))
  193. /*** INTERNAL FUNCTION PROTOTYPES *************************************/
  194. /* SHA-224 and SHA-256: */
  195. void SHA256_Internal_Init(SHA_CTX*, const sha_word32*);
  196. void SHA256_Internal_Last(SHA_CTX*);
  197. void SHA256_Internal_Transform(SHA_CTX*, const sha_word32*);
  198. /* SHA-384 and SHA-512: */
  199. void SHA512_Internal_Init(SHA_CTX*, const sha_word64*);
  200. void SHA512_Internal_Last(SHA_CTX*);
  201. void SHA512_Internal_Transform(SHA_CTX*, const sha_word64*);
  202. /*** SHA2 INITIAL HASH VALUES AND CONSTANTS ***************************/
  203. /* Hash constant words K for SHA-1: */
  204. #define K1_0_TO_19 0x5a827999UL
  205. #define K1_20_TO_39 0x6ed9eba1UL
  206. #define K1_40_TO_59 0x8f1bbcdcUL
  207. #define K1_60_TO_79 0xca62c1d6UL
  208. /* Initial hash value H for SHA-1: */
  209. static const sha_word32 sha1_initial_hash_value[5] = {
  210. 0x67452301UL,
  211. 0xefcdab89UL,
  212. 0x98badcfeUL,
  213. 0x10325476UL,
  214. 0xc3d2e1f0UL
  215. };
  216. /* Hash constant words K for SHA-224 and SHA-256: */
  217. static const sha_word32 K256[64] = {
  218. 0x428a2f98UL, 0x71374491UL, 0xb5c0fbcfUL, 0xe9b5dba5UL,
  219. 0x3956c25bUL, 0x59f111f1UL, 0x923f82a4UL, 0xab1c5ed5UL,
  220. 0xd807aa98UL, 0x12835b01UL, 0x243185beUL, 0x550c7dc3UL,
  221. 0x72be5d74UL, 0x80deb1feUL, 0x9bdc06a7UL, 0xc19bf174UL,
  222. 0xe49b69c1UL, 0xefbe4786UL, 0x0fc19dc6UL, 0x240ca1ccUL,
  223. 0x2de92c6fUL, 0x4a7484aaUL, 0x5cb0a9dcUL, 0x76f988daUL,
  224. 0x983e5152UL, 0xa831c66dUL, 0xb00327c8UL, 0xbf597fc7UL,
  225. 0xc6e00bf3UL, 0xd5a79147UL, 0x06ca6351UL, 0x14292967UL,
  226. 0x27b70a85UL, 0x2e1b2138UL, 0x4d2c6dfcUL, 0x53380d13UL,
  227. 0x650a7354UL, 0x766a0abbUL, 0x81c2c92eUL, 0x92722c85UL,
  228. 0xa2bfe8a1UL, 0xa81a664bUL, 0xc24b8b70UL, 0xc76c51a3UL,
  229. 0xd192e819UL, 0xd6990624UL, 0xf40e3585UL, 0x106aa070UL,
  230. 0x19a4c116UL, 0x1e376c08UL, 0x2748774cUL, 0x34b0bcb5UL,
  231. 0x391c0cb3UL, 0x4ed8aa4aUL, 0x5b9cca4fUL, 0x682e6ff3UL,
  232. 0x748f82eeUL, 0x78a5636fUL, 0x84c87814UL, 0x8cc70208UL,
  233. 0x90befffaUL, 0xa4506cebUL, 0xbef9a3f7UL, 0xc67178f2UL
  234. };
  235. /* Initial hash value H for SHA-224: */
  236. static const sha_word32 sha224_initial_hash_value[8] = {
  237. 0xc1059ed8UL,
  238. 0x367cd507UL,
  239. 0x3070dd17UL,
  240. 0xf70e5939UL,
  241. 0xffc00b31UL,
  242. 0x68581511UL,
  243. 0x64f98fa7UL,
  244. 0xbefa4fa4UL
  245. };
  246. /* Initial hash value H for SHA-256: */
  247. static const sha_word32 sha256_initial_hash_value[8] = {
  248. 0x6a09e667UL,
  249. 0xbb67ae85UL,
  250. 0x3c6ef372UL,
  251. 0xa54ff53aUL,
  252. 0x510e527fUL,
  253. 0x9b05688cUL,
  254. 0x1f83d9abUL,
  255. 0x5be0cd19UL
  256. };
  257. /* Hash constant words K for SHA-384 and SHA-512: */
  258. static const sha_word64 K512[80] = {
  259. 0x428a2f98d728ae22ULL, 0x7137449123ef65cdULL,
  260. 0xb5c0fbcfec4d3b2fULL, 0xe9b5dba58189dbbcULL,
  261. 0x3956c25bf348b538ULL, 0x59f111f1b605d019ULL,
  262. 0x923f82a4af194f9bULL, 0xab1c5ed5da6d8118ULL,
  263. 0xd807aa98a3030242ULL, 0x12835b0145706fbeULL,
  264. 0x243185be4ee4b28cULL, 0x550c7dc3d5ffb4e2ULL,
  265. 0x72be5d74f27b896fULL, 0x80deb1fe3b1696b1ULL,
  266. 0x9bdc06a725c71235ULL, 0xc19bf174cf692694ULL,
  267. 0xe49b69c19ef14ad2ULL, 0xefbe4786384f25e3ULL,
  268. 0x0fc19dc68b8cd5b5ULL, 0x240ca1cc77ac9c65ULL,
  269. 0x2de92c6f592b0275ULL, 0x4a7484aa6ea6e483ULL,
  270. 0x5cb0a9dcbd41fbd4ULL, 0x76f988da831153b5ULL,
  271. 0x983e5152ee66dfabULL, 0xa831c66d2db43210ULL,
  272. 0xb00327c898fb213fULL, 0xbf597fc7beef0ee4ULL,
  273. 0xc6e00bf33da88fc2ULL, 0xd5a79147930aa725ULL,
  274. 0x06ca6351e003826fULL, 0x142929670a0e6e70ULL,
  275. 0x27b70a8546d22ffcULL, 0x2e1b21385c26c926ULL,
  276. 0x4d2c6dfc5ac42aedULL, 0x53380d139d95b3dfULL,
  277. 0x650a73548baf63deULL, 0x766a0abb3c77b2a8ULL,
  278. 0x81c2c92e47edaee6ULL, 0x92722c851482353bULL,
  279. 0xa2bfe8a14cf10364ULL, 0xa81a664bbc423001ULL,
  280. 0xc24b8b70d0f89791ULL, 0xc76c51a30654be30ULL,
  281. 0xd192e819d6ef5218ULL, 0xd69906245565a910ULL,
  282. 0xf40e35855771202aULL, 0x106aa07032bbd1b8ULL,
  283. 0x19a4c116b8d2d0c8ULL, 0x1e376c085141ab53ULL,
  284. 0x2748774cdf8eeb99ULL, 0x34b0bcb5e19b48a8ULL,
  285. 0x391c0cb3c5c95a63ULL, 0x4ed8aa4ae3418acbULL,
  286. 0x5b9cca4f7763e373ULL, 0x682e6ff3d6b2b8a3ULL,
  287. 0x748f82ee5defb2fcULL, 0x78a5636f43172f60ULL,
  288. 0x84c87814a1f0ab72ULL, 0x8cc702081a6439ecULL,
  289. 0x90befffa23631e28ULL, 0xa4506cebde82bde9ULL,
  290. 0xbef9a3f7b2c67915ULL, 0xc67178f2e372532bULL,
  291. 0xca273eceea26619cULL, 0xd186b8c721c0c207ULL,
  292. 0xeada7dd6cde0eb1eULL, 0xf57d4f7fee6ed178ULL,
  293. 0x06f067aa72176fbaULL, 0x0a637dc5a2c898a6ULL,
  294. 0x113f9804bef90daeULL, 0x1b710b35131c471bULL,
  295. 0x28db77f523047d84ULL, 0x32caab7b40c72493ULL,
  296. 0x3c9ebe0a15c9bebcULL, 0x431d67c49c100d4cULL,
  297. 0x4cc5d4becb3e42b6ULL, 0x597f299cfc657e2aULL,
  298. 0x5fcb6fab3ad6faecULL, 0x6c44198c4a475817ULL
  299. };
  300. /* Initial hash value H for SHA-384 */
  301. static const sha_word64 sha384_initial_hash_value[8] = {
  302. 0xcbbb9d5dc1059ed8ULL,
  303. 0x629a292a367cd507ULL,
  304. 0x9159015a3070dd17ULL,
  305. 0x152fecd8f70e5939ULL,
  306. 0x67332667ffc00b31ULL,
  307. 0x8eb44a8768581511ULL,
  308. 0xdb0c2e0d64f98fa7ULL,
  309. 0x47b5481dbefa4fa4ULL
  310. };
  311. /* Initial hash value H for SHA-512 */
  312. static const sha_word64 sha512_initial_hash_value[8] = {
  313. 0x6a09e667f3bcc908ULL,
  314. 0xbb67ae8584caa73bULL,
  315. 0x3c6ef372fe94f82bULL,
  316. 0xa54ff53a5f1d36f1ULL,
  317. 0x510e527fade682d1ULL,
  318. 0x9b05688c2b3e6c1fULL,
  319. 0x1f83d9abfb41bd6bULL,
  320. 0x5be0cd19137e2179ULL
  321. };
  322. /*
  323. * Constant used by SHA224/256/384/512_End() functions for converting the
  324. * digest to a readable hexadecimal character string:
  325. */
  326. static const char *sha_hex_digits = "0123456789abcdef";
  327. /*** SHA-1: ***********************************************************/
  328. void SHA1_Init(SHA_CTX* context) {
  329. /* Sanity check: */
  330. assert(context != (SHA_CTX*)0);
  331. MEMCPY_BCOPY(context->s1.state, sha1_initial_hash_value, sizeof(sha_word32) * 5);
  332. MEMSET_BZERO(context->s1.buffer, 64);
  333. context->s1.bitcount = 0;
  334. }
  335. #ifdef SHA2_UNROLL_TRANSFORM
  336. /* Unrolled SHA-1 round macros: */
  337. #if BYTE_ORDER == LITTLE_ENDIAN
  338. #define ROUND1_0_TO_15(a,b,c,d,e) \
  339. REVERSE32(*data++, W1[j]); \
  340. (e) = ROTL32(5, (a)) + Ch((b), (c), (d)) + (e) + \
  341. K1_0_TO_19 + W1[j]; \
  342. (b) = ROTL32(30, (b)); \
  343. j++;
  344. #else /* BYTE_ORDER == LITTLE_ENDIAN */
  345. #define ROUND1_0_TO_15(a,b,c,d,e) \
  346. (e) = ROTL32(5, (a)) + Ch((b), (c), (d)) + (e) + \
  347. K1_0_TO_19 + ( W1[j] = *data++ ); \
  348. (b) = ROTL32(30, (b)); \
  349. j++;
  350. #endif /* BYTE_ORDER == LITTLE_ENDIAN */
  351. #define ROUND1_16_TO_19(a,b,c,d,e) \
  352. T1 = W1[(j+13)&0x0f] ^ W1[(j+8)&0x0f] ^ W1[(j+2)&0x0f] ^ W1[j&0x0f]; \
  353. (e) = ROTL32(5, a) + Ch(b,c,d) + e + K1_0_TO_19 + ( W1[j&0x0f] = ROTL32(1, T1) ); \
  354. (b) = ROTL32(30, b); \
  355. j++;
  356. #define ROUND1_20_TO_39(a,b,c,d,e) \
  357. T1 = W1[(j+13)&0x0f] ^ W1[(j+8)&0x0f] ^ W1[(j+2)&0x0f] ^ W1[j&0x0f]; \
  358. (e) = ROTL32(5, a) + Parity(b,c,d) + e + K1_20_TO_39 + ( W1[j&0x0f] = ROTL32(1, T1) ); \
  359. (b) = ROTL32(30, b); \
  360. j++;
  361. #define ROUND1_40_TO_59(a,b,c,d,e) \
  362. T1 = W1[(j+13)&0x0f] ^ W1[(j+8)&0x0f] ^ W1[(j+2)&0x0f] ^ W1[j&0x0f]; \
  363. (e) = ROTL32(5, a) + Maj(b,c,d) + e + K1_40_TO_59 + ( W1[j&0x0f] = ROTL32(1, T1) ); \
  364. (b) = ROTL32(30, b); \
  365. j++;
  366. #define ROUND1_60_TO_79(a,b,c,d,e) \
  367. T1 = W1[(j+13)&0x0f] ^ W1[(j+8)&0x0f] ^ W1[(j+2)&0x0f] ^ W1[j&0x0f]; \
  368. (e) = ROTL32(5, a) + Parity(b,c,d) + e + K1_60_TO_79 + ( W1[j&0x0f] = ROTL32(1, T1) ); \
  369. (b) = ROTL32(30, b); \
  370. j++;
  371. void SHA1_Internal_Transform(SHA_CTX* context, const sha_word32* data) {
  372. sha_word32 a, b, c, d, e;
  373. sha_word32 T1, *W1;
  374. int j;
  375. W1 = (sha_word32*)context->s1.buffer;
  376. /* Initialize registers with the prev. intermediate value */
  377. a = context->s1.state[0];
  378. b = context->s1.state[1];
  379. c = context->s1.state[2];
  380. d = context->s1.state[3];
  381. e = context->s1.state[4];
  382. j = 0;
  383. /* Rounds 0 to 15 unrolled: */
  384. ROUND1_0_TO_15(a,b,c,d,e);
  385. ROUND1_0_TO_15(e,a,b,c,d);
  386. ROUND1_0_TO_15(d,e,a,b,c);
  387. ROUND1_0_TO_15(c,d,e,a,b);
  388. ROUND1_0_TO_15(b,c,d,e,a);
  389. ROUND1_0_TO_15(a,b,c,d,e);
  390. ROUND1_0_TO_15(e,a,b,c,d);
  391. ROUND1_0_TO_15(d,e,a,b,c);
  392. ROUND1_0_TO_15(c,d,e,a,b);
  393. ROUND1_0_TO_15(b,c,d,e,a);
  394. ROUND1_0_TO_15(a,b,c,d,e);
  395. ROUND1_0_TO_15(e,a,b,c,d);
  396. ROUND1_0_TO_15(d,e,a,b,c);
  397. ROUND1_0_TO_15(c,d,e,a,b);
  398. ROUND1_0_TO_15(b,c,d,e,a);
  399. ROUND1_0_TO_15(a,b,c,d,e);
  400. /* Rounds 16 to 19 unrolled: */
  401. ROUND1_16_TO_19(e,a,b,c,d);
  402. ROUND1_16_TO_19(d,e,a,b,c);
  403. ROUND1_16_TO_19(c,d,e,a,b);
  404. ROUND1_16_TO_19(b,c,d,e,a);
  405. /* Rounds 20 to 39 unrolled: */
  406. ROUND1_20_TO_39(a,b,c,d,e);
  407. ROUND1_20_TO_39(e,a,b,c,d);
  408. ROUND1_20_TO_39(d,e,a,b,c);
  409. ROUND1_20_TO_39(c,d,e,a,b);
  410. ROUND1_20_TO_39(b,c,d,e,a);
  411. ROUND1_20_TO_39(a,b,c,d,e);
  412. ROUND1_20_TO_39(e,a,b,c,d);
  413. ROUND1_20_TO_39(d,e,a,b,c);
  414. ROUND1_20_TO_39(c,d,e,a,b);
  415. ROUND1_20_TO_39(b,c,d,e,a);
  416. ROUND1_20_TO_39(a,b,c,d,e);
  417. ROUND1_20_TO_39(e,a,b,c,d);
  418. ROUND1_20_TO_39(d,e,a,b,c);
  419. ROUND1_20_TO_39(c,d,e,a,b);
  420. ROUND1_20_TO_39(b,c,d,e,a);
  421. ROUND1_20_TO_39(a,b,c,d,e);
  422. ROUND1_20_TO_39(e,a,b,c,d);
  423. ROUND1_20_TO_39(d,e,a,b,c);
  424. ROUND1_20_TO_39(c,d,e,a,b);
  425. ROUND1_20_TO_39(b,c,d,e,a);
  426. /* Rounds 40 to 59 unrolled: */
  427. ROUND1_40_TO_59(a,b,c,d,e);
  428. ROUND1_40_TO_59(e,a,b,c,d);
  429. ROUND1_40_TO_59(d,e,a,b,c);
  430. ROUND1_40_TO_59(c,d,e,a,b);
  431. ROUND1_40_TO_59(b,c,d,e,a);
  432. ROUND1_40_TO_59(a,b,c,d,e);
  433. ROUND1_40_TO_59(e,a,b,c,d);
  434. ROUND1_40_TO_59(d,e,a,b,c);
  435. ROUND1_40_TO_59(c,d,e,a,b);
  436. ROUND1_40_TO_59(b,c,d,e,a);
  437. ROUND1_40_TO_59(a,b,c,d,e);
  438. ROUND1_40_TO_59(e,a,b,c,d);
  439. ROUND1_40_TO_59(d,e,a,b,c);
  440. ROUND1_40_TO_59(c,d,e,a,b);
  441. ROUND1_40_TO_59(b,c,d,e,a);
  442. ROUND1_40_TO_59(a,b,c,d,e);
  443. ROUND1_40_TO_59(e,a,b,c,d);
  444. ROUND1_40_TO_59(d,e,a,b,c);
  445. ROUND1_40_TO_59(c,d,e,a,b);
  446. ROUND1_40_TO_59(b,c,d,e,a);
  447. /* Rounds 60 to 79 unrolled: */
  448. ROUND1_60_TO_79(a,b,c,d,e);
  449. ROUND1_60_TO_79(e,a,b,c,d);
  450. ROUND1_60_TO_79(d,e,a,b,c);
  451. ROUND1_60_TO_79(c,d,e,a,b);
  452. ROUND1_60_TO_79(b,c,d,e,a);
  453. ROUND1_60_TO_79(a,b,c,d,e);
  454. ROUND1_60_TO_79(e,a,b,c,d);
  455. ROUND1_60_TO_79(d,e,a,b,c);
  456. ROUND1_60_TO_79(c,d,e,a,b);
  457. ROUND1_60_TO_79(b,c,d,e,a);
  458. ROUND1_60_TO_79(a,b,c,d,e);
  459. ROUND1_60_TO_79(e,a,b,c,d);
  460. ROUND1_60_TO_79(d,e,a,b,c);
  461. ROUND1_60_TO_79(c,d,e,a,b);
  462. ROUND1_60_TO_79(b,c,d,e,a);
  463. ROUND1_60_TO_79(a,b,c,d,e);
  464. ROUND1_60_TO_79(e,a,b,c,d);
  465. ROUND1_60_TO_79(d,e,a,b,c);
  466. ROUND1_60_TO_79(c,d,e,a,b);
  467. ROUND1_60_TO_79(b,c,d,e,a);
  468. /* Compute the current intermediate hash value */
  469. context->s1.state[0] += a;
  470. context->s1.state[1] += b;
  471. context->s1.state[2] += c;
  472. context->s1.state[3] += d;
  473. context->s1.state[4] += e;
  474. /* Clean up */
  475. a = b = c = d = e = T1 = 0;
  476. }
  477. #else /* SHA2_UNROLL_TRANSFORM */
  478. void SHA1_Internal_Transform(SHA_CTX* context, const sha_word32* data) {
  479. sha_word32 a, b, c, d, e;
  480. sha_word32 T1, *W1;
  481. int j;
  482. W1 = (sha_word32*)context->s1.buffer;
  483. /* Initialize registers with the prev. intermediate value */
  484. a = context->s1.state[0];
  485. b = context->s1.state[1];
  486. c = context->s1.state[2];
  487. d = context->s1.state[3];
  488. e = context->s1.state[4];
  489. j = 0;
  490. do {
  491. #if BYTE_ORDER == LITTLE_ENDIAN
  492. T1 = data[j];
  493. /* Copy data while converting to host byte order */
  494. REVERSE32(*data++, W1[j]);
  495. T1 = ROTL32(5, a) + Ch(b, c, d) + e + K1_0_TO_19 + W1[j];
  496. #else /* BYTE_ORDER == LITTLE_ENDIAN */
  497. T1 = ROTL32(5, a) + Ch(b, c, d) + e + K1_0_TO_19 + (W1[j] = *data++);
  498. #endif /* BYTE_ORDER == LITTLE_ENDIAN */
  499. e = d;
  500. d = c;
  501. c = ROTL32(30, b);
  502. b = a;
  503. a = T1;
  504. j++;
  505. } while (j < 16);
  506. do {
  507. T1 = W1[(j+13)&0x0f] ^ W1[(j+8)&0x0f] ^ W1[(j+2)&0x0f] ^ W1[j&0x0f];
  508. T1 = ROTL32(5, a) + Ch(b,c,d) + e + K1_0_TO_19 + (W1[j&0x0f] = ROTL32(1, T1));
  509. e = d;
  510. d = c;
  511. c = ROTL32(30, b);
  512. b = a;
  513. a = T1;
  514. j++;
  515. } while (j < 20);
  516. do {
  517. T1 = W1[(j+13)&0x0f] ^ W1[(j+8)&0x0f] ^ W1[(j+2)&0x0f] ^ W1[j&0x0f];
  518. T1 = ROTL32(5, a) + Parity(b,c,d) + e + K1_20_TO_39 + (W1[j&0x0f] = ROTL32(1, T1));
  519. e = d;
  520. d = c;
  521. c = ROTL32(30, b);
  522. b = a;
  523. a = T1;
  524. j++;
  525. } while (j < 40);
  526. do {
  527. T1 = W1[(j+13)&0x0f] ^ W1[(j+8)&0x0f] ^ W1[(j+2)&0x0f] ^ W1[j&0x0f];
  528. T1 = ROTL32(5, a) + Maj(b,c,d) + e + K1_40_TO_59 + (W1[j&0x0f] = ROTL32(1, T1));
  529. e = d;
  530. d = c;
  531. c = ROTL32(30, b);
  532. b = a;
  533. a = T1;
  534. j++;
  535. } while (j < 60);
  536. do {
  537. T1 = W1[(j+13)&0x0f] ^ W1[(j+8)&0x0f] ^ W1[(j+2)&0x0f] ^ W1[j&0x0f];
  538. T1 = ROTL32(5, a) + Parity(b,c,d) + e + K1_60_TO_79 + (W1[j&0x0f] = ROTL32(1, T1));
  539. e = d;
  540. d = c;
  541. c = ROTL32(30, b);
  542. b = a;
  543. a = T1;
  544. j++;
  545. } while (j < 80);
  546. /* Compute the current intermediate hash value */
  547. context->s1.state[0] += a;
  548. context->s1.state[1] += b;
  549. context->s1.state[2] += c;
  550. context->s1.state[3] += d;
  551. context->s1.state[4] += e;
  552. /* Clean up */
  553. a = b = c = d = e = T1 = 0;
  554. }
  555. #endif /* SHA2_UNROLL_TRANSFORM */
  556. void SHA1_Update(SHA_CTX* context, const sha_byte *data, size_t len) {
  557. unsigned int freespace, usedspace;
  558. if (len == 0) {
  559. /* Calling with no data is valid - we do nothing */
  560. return;
  561. }
  562. /* Sanity check: */
  563. assert(context != (SHA_CTX*)0 && data != (sha_byte*)0);
  564. usedspace = (context->s1.bitcount >> 3) % 64;
  565. if (usedspace > 0) {
  566. /* Calculate how much free space is available in the buffer */
  567. freespace = 64 - usedspace;
  568. if (len >= freespace) {
  569. /* Fill the buffer completely and process it */
  570. MEMCPY_BCOPY(&context->s1.buffer[usedspace], data, freespace);
  571. context->s1.bitcount += freespace << 3;
  572. len -= freespace;
  573. data += freespace;
  574. SHA1_Internal_Transform(context, (sha_word32*)context->s1.buffer);
  575. } else {
  576. /* The buffer is not yet full */
  577. MEMCPY_BCOPY(&context->s1.buffer[usedspace], data, len);
  578. context->s1.bitcount += len << 3;
  579. /* Clean up: */
  580. usedspace = freespace = 0;
  581. return;
  582. }
  583. }
  584. while (len >= 64) {
  585. /* Process as many complete blocks as we can */
  586. SHA1_Internal_Transform(context, (sha_word32*)data);
  587. context->s1.bitcount += 512;
  588. len -= 64;
  589. data += 64;
  590. }
  591. if (len > 0) {
  592. /* There's left-overs, so save 'em */
  593. MEMCPY_BCOPY(context->s1.buffer, data, len);
  594. context->s1.bitcount += len << 3;
  595. }
  596. /* Clean up: */
  597. usedspace = freespace = 0;
  598. }
  599. void SHA1_Final(sha_byte digest[], SHA_CTX* context) {
  600. sha_word32 *d = (sha_word32*)digest;
  601. unsigned int usedspace;
  602. /* Sanity check: */
  603. assert(context != (SHA_CTX*)0);
  604. if (digest == (sha_byte*)0) {
  605. /*
  606. * No digest buffer, so we can do nothing
  607. * except clean up and go home
  608. */
  609. MEMSET_BZERO(context, sizeof(context));
  610. return;
  611. }
  612. usedspace = (context->s1.bitcount >> 3) % 64;
  613. if (usedspace == 0) {
  614. /* Set-up for the last transform: */
  615. MEMSET_BZERO(context->s1.buffer, 56);
  616. /* Begin padding with a 1 bit: */
  617. *context->s1.buffer = 0x80;
  618. } else {
  619. /* Begin padding with a 1 bit: */
  620. context->s1.buffer[usedspace++] = 0x80;
  621. if (usedspace <= 56) {
  622. /* Set-up for the last transform: */
  623. MEMSET_BZERO(&context->s1.buffer[usedspace], 56 - usedspace);
  624. } else {
  625. if (usedspace < 64) {
  626. MEMSET_BZERO(&context->s1.buffer[usedspace], 64 - usedspace);
  627. }
  628. /* Do second-to-last transform: */
  629. SHA1_Internal_Transform(context, (sha_word32*)context->s1.buffer);
  630. /* And set-up for the last transform: */
  631. MEMSET_BZERO(context->s1.buffer, 56);
  632. }
  633. /* Clean up: */
  634. usedspace = 0;
  635. }
  636. /* Set the bit count: */
  637. #if BYTE_ORDER == LITTLE_ENDIAN
  638. /* Convert FROM host byte order */
  639. REVERSE64(context->s1.bitcount,context->s1.bitcount);
  640. #endif
  641. *(sha_word64*)&context->s1.buffer[56] = context->s1.bitcount;
  642. /* Final transform: */
  643. SHA1_Internal_Transform(context, (sha_word32*)context->s1.buffer);
  644. /* Save the hash data for output: */
  645. #if BYTE_ORDER == LITTLE_ENDIAN
  646. {
  647. /* Convert TO host byte order */
  648. int j;
  649. for (j = 0; j < (SHA1_DIGEST_LENGTH >> 2); j++) {
  650. REVERSE32(context->s1.state[j],context->s1.state[j]);
  651. *d++ = context->s1.state[j];
  652. }
  653. }
  654. #else
  655. MEMCPY_BCOPY(d, context->s1.state, SHA1_DIGEST_LENGTH);
  656. #endif
  657. /* Clean up: */
  658. MEMSET_BZERO(context, sizeof(context));
  659. }
  660. char *SHA1_End(SHA_CTX* context, char buffer[]) {
  661. sha_byte digest[SHA1_DIGEST_LENGTH], *d = digest;
  662. int i;
  663. /* Sanity check: */
  664. assert(context != (SHA_CTX*)0);
  665. if (buffer != (char*)0) {
  666. SHA1_Final(digest, context);
  667. for (i = 0; i < SHA1_DIGEST_LENGTH; i++) {
  668. *buffer++ = sha_hex_digits[(*d & 0xf0) >> 4];
  669. *buffer++ = sha_hex_digits[*d & 0x0f];
  670. d++;
  671. }
  672. *buffer = (char)0;
  673. } else {
  674. MEMSET_BZERO(context, sizeof(context));
  675. }
  676. MEMSET_BZERO(digest, SHA1_DIGEST_LENGTH);
  677. return buffer;
  678. }
  679. char* SHA1_Data(const sha_byte* data, size_t len, char digest[SHA1_DIGEST_STRING_LENGTH]) {
  680. SHA_CTX context;
  681. SHA1_Init(&context);
  682. SHA1_Update(&context, data, len);
  683. return SHA1_End(&context, digest);
  684. }
  685. /*** SHA-256: *********************************************************/
  686. void SHA256_Internal_Init(SHA_CTX* context, const sha_word32* ihv) {
  687. /* Sanity check: */
  688. assert(context != (SHA_CTX*)0);
  689. MEMCPY_BCOPY(context->s256.state, ihv, sizeof(sha_word32) * 8);
  690. MEMSET_BZERO(context->s256.buffer, 64);
  691. context->s256.bitcount = 0;
  692. }
  693. void SHA256_Init(SHA_CTX* context) {
  694. SHA256_Internal_Init(context, sha256_initial_hash_value);
  695. }
  696. #ifdef SHA2_UNROLL_TRANSFORM
  697. /* Unrolled SHA-256 round macros: */
  698. #if BYTE_ORDER == LITTLE_ENDIAN
  699. #define ROUND256_0_TO_15(a,b,c,d,e,f,g,h) \
  700. REVERSE32(*data++, W256[j]); \
  701. T1 = (h) + Sigma1_256(e) + Ch((e), (f), (g)) + \
  702. K256[j] + W256[j]; \
  703. (d) += T1; \
  704. (h) = T1 + Sigma0_256(a) + Maj((a), (b), (c)); \
  705. j++
  706. #else /* BYTE_ORDER == LITTLE_ENDIAN */
  707. #define ROUND256_0_TO_15(a,b,c,d,e,f,g,h) \
  708. T1 = (h) + Sigma1_256(e) + Ch((e), (f), (g)) + \
  709. K256[j] + (W256[j] = *data++); \
  710. (d) += T1; \
  711. (h) = T1 + Sigma0_256(a) + Maj((a), (b), (c)); \
  712. j++
  713. #endif /* BYTE_ORDER == LITTLE_ENDIAN */
  714. #define ROUND256(a,b,c,d,e,f,g,h) \
  715. s0 = W256[(j+1)&0x0f]; \
  716. s0 = sigma0_256(s0); \
  717. s1 = W256[(j+14)&0x0f]; \
  718. s1 = sigma1_256(s1); \
  719. T1 = (h) + Sigma1_256(e) + Ch((e), (f), (g)) + K256[j] + \
  720. (W256[j&0x0f] += s1 + W256[(j+9)&0x0f] + s0); \
  721. (d) += T1; \
  722. (h) = T1 + Sigma0_256(a) + Maj((a), (b), (c)); \
  723. j++
  724. void SHA256_Internal_Transform(SHA_CTX* context, const sha_word32* data) {
  725. sha_word32 a, b, c, d, e, f, g, h, s0, s1;
  726. sha_word32 T1, *W256;
  727. int j;
  728. W256 = (sha_word32*)context->s256.buffer;
  729. /* Initialize registers with the prev. intermediate value */
  730. a = context->s256.state[0];
  731. b = context->s256.state[1];
  732. c = context->s256.state[2];
  733. d = context->s256.state[3];
  734. e = context->s256.state[4];
  735. f = context->s256.state[5];
  736. g = context->s256.state[6];
  737. h = context->s256.state[7];
  738. j = 0;
  739. do {
  740. /* Rounds 0 to 15 (unrolled): */
  741. ROUND256_0_TO_15(a,b,c,d,e,f,g,h);
  742. ROUND256_0_TO_15(h,a,b,c,d,e,f,g);
  743. ROUND256_0_TO_15(g,h,a,b,c,d,e,f);
  744. ROUND256_0_TO_15(f,g,h,a,b,c,d,e);
  745. ROUND256_0_TO_15(e,f,g,h,a,b,c,d);
  746. ROUND256_0_TO_15(d,e,f,g,h,a,b,c);
  747. ROUND256_0_TO_15(c,d,e,f,g,h,a,b);
  748. ROUND256_0_TO_15(b,c,d,e,f,g,h,a);
  749. } while (j < 16);
  750. /* Now for the remaining rounds to 64: */
  751. do {
  752. ROUND256(a,b,c,d,e,f,g,h);
  753. ROUND256(h,a,b,c,d,e,f,g);
  754. ROUND256(g,h,a,b,c,d,e,f);
  755. ROUND256(f,g,h,a,b,c,d,e);
  756. ROUND256(e,f,g,h,a,b,c,d);
  757. ROUND256(d,e,f,g,h,a,b,c);
  758. ROUND256(c,d,e,f,g,h,a,b);
  759. ROUND256(b,c,d,e,f,g,h,a);
  760. } while (j < 64);
  761. /* Compute the current intermediate hash value */
  762. context->s256.state[0] += a;
  763. context->s256.state[1] += b;
  764. context->s256.state[2] += c;
  765. context->s256.state[3] += d;
  766. context->s256.state[4] += e;
  767. context->s256.state[5] += f;
  768. context->s256.state[6] += g;
  769. context->s256.state[7] += h;
  770. /* Clean up */
  771. a = b = c = d = e = f = g = h = T1 = 0;
  772. }
  773. #else /* SHA2_UNROLL_TRANSFORM */
  774. void SHA256_Internal_Transform(SHA_CTX* context, const sha_word32* data) {
  775. sha_word32 a, b, c, d, e, f, g, h, s0, s1;
  776. sha_word32 T1, T2, *W256;
  777. int j;
  778. W256 = (sha_word32*)context->s256.buffer;
  779. /* Initialize registers with the prev. intermediate value */
  780. a = context->s256.state[0];
  781. b = context->s256.state[1];
  782. c = context->s256.state[2];
  783. d = context->s256.state[3];
  784. e = context->s256.state[4];
  785. f = context->s256.state[5];
  786. g = context->s256.state[6];
  787. h = context->s256.state[7];
  788. j = 0;
  789. do {
  790. #if BYTE_ORDER == LITTLE_ENDIAN
  791. /* Copy data while converting to host byte order */
  792. REVERSE32(*data++,W256[j]);
  793. /* Apply the SHA-256 compression function to update a..h */
  794. T1 = h + Sigma1_256(e) + Ch(e, f, g) + K256[j] + W256[j];
  795. #else /* BYTE_ORDER == LITTLE_ENDIAN */
  796. /* Apply the SHA-256 compression function to update a..h with copy */
  797. T1 = h + Sigma1_256(e) + Ch(e, f, g) + K256[j] + (W256[j] = *data++);
  798. #endif /* BYTE_ORDER == LITTLE_ENDIAN */
  799. T2 = Sigma0_256(a) + Maj(a, b, c);
  800. h = g;
  801. g = f;
  802. f = e;
  803. e = d + T1;
  804. d = c;
  805. c = b;
  806. b = a;
  807. a = T1 + T2;
  808. j++;
  809. } while (j < 16);
  810. do {
  811. /* Part of the message block expansion: */
  812. s0 = W256[(j+1)&0x0f];
  813. s0 = sigma0_256(s0);
  814. s1 = W256[(j+14)&0x0f];
  815. s1 = sigma1_256(s1);
  816. /* Apply the SHA-256 compression function to update a..h */
  817. T1 = h + Sigma1_256(e) + Ch(e, f, g) + K256[j] +
  818. (W256[j&0x0f] += s1 + W256[(j+9)&0x0f] + s0);
  819. T2 = Sigma0_256(a) + Maj(a, b, c);
  820. h = g;
  821. g = f;
  822. f = e;
  823. e = d + T1;
  824. d = c;
  825. c = b;
  826. b = a;
  827. a = T1 + T2;
  828. j++;
  829. } while (j < 64);
  830. /* Compute the current intermediate hash value */
  831. context->s256.state[0] += a;
  832. context->s256.state[1] += b;
  833. context->s256.state[2] += c;
  834. context->s256.state[3] += d;
  835. context->s256.state[4] += e;
  836. context->s256.state[5] += f;
  837. context->s256.state[6] += g;
  838. context->s256.state[7] += h;
  839. /* Clean up */
  840. a = b = c = d = e = f = g = h = T1 = T2 = 0;
  841. }
  842. #endif /* SHA2_UNROLL_TRANSFORM */
  843. void SHA256_Update(SHA_CTX* context, const sha_byte *data, size_t len) {
  844. unsigned int freespace, usedspace;
  845. if (len == 0) {
  846. /* Calling with no data is valid - we do nothing */
  847. return;
  848. }
  849. /* Sanity check: */
  850. assert(context != (SHA_CTX*)0 && data != (sha_byte*)0);
  851. usedspace = (context->s256.bitcount >> 3) % 64;
  852. if (usedspace > 0) {
  853. /* Calculate how much free space is available in the buffer */
  854. freespace = 64 - usedspace;
  855. if (len >= freespace) {
  856. /* Fill the buffer completely and process it */
  857. MEMCPY_BCOPY(&context->s256.buffer[usedspace], data, freespace);
  858. context->s256.bitcount += freespace << 3;
  859. len -= freespace;
  860. data += freespace;
  861. SHA256_Internal_Transform(context, (sha_word32*)context->s256.buffer);
  862. } else {
  863. /* The buffer is not yet full */
  864. MEMCPY_BCOPY(&context->s256.buffer[usedspace], data, len);
  865. context->s256.bitcount += len << 3;
  866. /* Clean up: */
  867. usedspace = freespace = 0;
  868. return;
  869. }
  870. }
  871. while (len >= 64) {
  872. /* Process as many complete blocks as we can */
  873. SHA256_Internal_Transform(context, (sha_word32*)data);
  874. context->s256.bitcount += 512;
  875. len -= 64;
  876. data += 64;
  877. }
  878. if (len > 0) {
  879. /* There's left-overs, so save 'em */
  880. MEMCPY_BCOPY(context->s256.buffer, data, len);
  881. context->s256.bitcount += len << 3;
  882. }
  883. /* Clean up: */
  884. usedspace = freespace = 0;
  885. }
  886. void SHA256_Internal_Last(SHA_CTX* context) {
  887. unsigned int usedspace;
  888. usedspace = (context->s256.bitcount >> 3) % 64;
  889. #if BYTE_ORDER == LITTLE_ENDIAN
  890. /* Convert FROM host byte order */
  891. REVERSE64(context->s256.bitcount,context->s256.bitcount);
  892. #endif
  893. if (usedspace > 0) {
  894. /* Begin padding with a 1 bit: */
  895. context->s256.buffer[usedspace++] = 0x80;
  896. if (usedspace <= 56) {
  897. /* Set-up for the last transform: */
  898. MEMSET_BZERO(&context->s256.buffer[usedspace], 56 - usedspace);
  899. } else {
  900. if (usedspace < 64) {
  901. MEMSET_BZERO(&context->s256.buffer[usedspace], 64 - usedspace);
  902. }
  903. /* Do second-to-last transform: */
  904. SHA256_Internal_Transform(context, (sha_word32*)context->s256.buffer);
  905. /* And set-up for the last transform: */
  906. MEMSET_BZERO(context->s256.buffer, 56);
  907. }
  908. /* Clean up: */
  909. usedspace = 0;
  910. } else {
  911. /* Set-up for the last transform: */
  912. MEMSET_BZERO(context->s256.buffer, 56);
  913. /* Begin padding with a 1 bit: */
  914. *context->s256.buffer = 0x80;
  915. }
  916. /* Set the bit count: */
  917. *(sha_word64*)&context->s256.buffer[56] = context->s256.bitcount;
  918. /* Final transform: */
  919. SHA256_Internal_Transform(context, (sha_word32*)context->s256.buffer);
  920. }
  921. void SHA256_Final(sha_byte digest[], SHA_CTX* context) {
  922. sha_word32 *d = (sha_word32*)digest;
  923. /* Sanity check: */
  924. assert(context != (SHA_CTX*)0);
  925. /* If no digest buffer is passed, we don't bother doing this: */
  926. if (digest != (sha_byte*)0) {
  927. SHA256_Internal_Last(context);
  928. /* Save the hash data for output: */
  929. #if BYTE_ORDER == LITTLE_ENDIAN
  930. {
  931. /* Convert TO host byte order */
  932. int j;
  933. for (j = 0; j < (SHA256_DIGEST_LENGTH >> 2); j++) {
  934. REVERSE32(context->s256.state[j],context->s256.state[j]);
  935. *d++ = context->s256.state[j];
  936. }
  937. }
  938. #else
  939. MEMCPY_BCOPY(d, context->s256.state, SHA256_DIGEST_LENGTH);
  940. #endif
  941. }
  942. /* Clean up state data: */
  943. MEMSET_BZERO(context, sizeof(context));
  944. }
  945. char *SHA256_End(SHA_CTX* context, char buffer[]) {
  946. sha_byte digest[SHA256_DIGEST_LENGTH], *d = digest;
  947. int i;
  948. /* Sanity check: */
  949. assert(context != (SHA_CTX*)0);
  950. if (buffer != (char*)0) {
  951. SHA256_Final(digest, context);
  952. for (i = 0; i < SHA256_DIGEST_LENGTH; i++) {
  953. *buffer++ = sha_hex_digits[(*d & 0xf0) >> 4];
  954. *buffer++ = sha_hex_digits[*d & 0x0f];
  955. d++;
  956. }
  957. *buffer = (char)0;
  958. } else {
  959. MEMSET_BZERO(context, sizeof(context));
  960. }
  961. MEMSET_BZERO(digest, SHA256_DIGEST_LENGTH);
  962. return buffer;
  963. }
  964. char* SHA256_Data(const sha_byte* data, size_t len, char digest[SHA256_DIGEST_STRING_LENGTH]) {
  965. SHA_CTX context;
  966. SHA256_Init(&context);
  967. SHA256_Update(&context, data, len);
  968. return SHA256_End(&context, digest);
  969. }
  970. /*** SHA-224: *********************************************************/
  971. void SHA224_Init(SHA_CTX* context) {
  972. SHA256_Internal_Init(context, sha224_initial_hash_value);
  973. }
  974. void SHA224_Internal_Transform(SHA_CTX* context, const sha_word32* data) {
  975. SHA256_Internal_Transform(context, data);
  976. }
  977. void SHA224_Update(SHA_CTX* context, const sha_byte *data, size_t len) {
  978. SHA256_Update(context, data, len);
  979. }
  980. void SHA224_Final(sha_byte digest[], SHA_CTX* context) {
  981. sha_word32 *d = (sha_word32*)digest;
  982. /* Sanity check: */
  983. assert(context != (SHA_CTX*)0);
  984. /* If no digest buffer is passed, we don't bother doing this: */
  985. if (digest != (sha_byte*)0) {
  986. SHA256_Internal_Last(context);
  987. /* Save the hash data for output: */
  988. #if BYTE_ORDER == LITTLE_ENDIAN
  989. {
  990. /* Convert TO host byte order */
  991. int j;
  992. for (j = 0; j < (SHA224_DIGEST_LENGTH >> 2); j++) {
  993. REVERSE32(context->s256.state[j],context->s256.state[j]);
  994. *d++ = context->s256.state[j];
  995. }
  996. }
  997. #else
  998. MEMCPY_BCOPY(d, context->s256.state, SHA224_DIGEST_LENGTH);
  999. #endif
  1000. }
  1001. /* Clean up state data: */
  1002. MEMSET_BZERO(context, sizeof(context));
  1003. }
  1004. char *SHA224_End(SHA_CTX* context, char buffer[]) {
  1005. sha_byte digest[SHA224_DIGEST_LENGTH], *d = digest;
  1006. int i;
  1007. /* Sanity check: */
  1008. assert(context != (SHA_CTX*)0);
  1009. if (buffer != (char*)0) {
  1010. SHA224_Final(digest, context);
  1011. for (i = 0; i < SHA224_DIGEST_LENGTH; i++) {
  1012. *buffer++ = sha_hex_digits[(*d & 0xf0) >> 4];
  1013. *buffer++ = sha_hex_digits[*d & 0x0f];
  1014. d++;
  1015. }
  1016. *buffer = (char)0;
  1017. } else {
  1018. MEMSET_BZERO(context, sizeof(context));
  1019. }
  1020. MEMSET_BZERO(digest, SHA224_DIGEST_LENGTH);
  1021. return buffer;
  1022. }
  1023. char* SHA224_Data(const sha_byte* data, size_t len, char digest[SHA224_DIGEST_STRING_LENGTH]) {
  1024. SHA_CTX context;
  1025. SHA224_Init(&context);
  1026. SHA224_Update(&context, data, len);
  1027. return SHA224_End(&context, digest);
  1028. }
  1029. /*** SHA-512: *********************************************************/
  1030. void SHA512_Internal_Init(SHA_CTX* context, const sha_word64* ihv) {
  1031. /* Sanity check: */
  1032. assert(context != (SHA_CTX*)0);
  1033. MEMCPY_BCOPY(context->s512.state, ihv, sizeof(sha_word64) * 8);
  1034. MEMSET_BZERO(context->s512.buffer, 128);
  1035. context->s512.bitcount[0] = context->s512.bitcount[1] = 0;
  1036. }
  1037. void SHA512_Init(SHA_CTX* context) {
  1038. SHA512_Internal_Init(context, sha512_initial_hash_value);
  1039. }
  1040. #ifdef SHA2_UNROLL_TRANSFORM
  1041. /* Unrolled SHA-512 round macros: */
  1042. #if BYTE_ORDER == LITTLE_ENDIAN
  1043. #define ROUND512_0_TO_15(a,b,c,d,e,f,g,h) \
  1044. REVERSE64(*data++, W512[j]); \
  1045. T1 = (h) + Sigma1_512(e) + Ch((e), (f), (g)) + \
  1046. K512[j] + W512[j]; \
  1047. (d) += T1, \
  1048. (h) = T1 + Sigma0_512(a) + Maj((a), (b), (c)), \
  1049. j++
  1050. #else /* BYTE_ORDER == LITTLE_ENDIAN */
  1051. #define ROUND512_0_TO_15(a,b,c,d,e,f,g,h) \
  1052. T1 = (h) + Sigma1_512(e) + Ch((e), (f), (g)) + \
  1053. K512[j] + (W512[j] = *data++); \
  1054. (d) += T1; \
  1055. (h) = T1 + Sigma0_512(a) + Maj((a), (b), (c)); \
  1056. j++
  1057. #endif /* BYTE_ORDER == LITTLE_ENDIAN */
  1058. #define ROUND512(a,b,c,d,e,f,g,h) \
  1059. s0 = W512[(j+1)&0x0f]; \
  1060. s0 = sigma0_512(s0); \
  1061. s1 = W512[(j+14)&0x0f]; \
  1062. s1 = sigma1_512(s1); \
  1063. T1 = (h) + Sigma1_512(e) + Ch((e), (f), (g)) + K512[j] + \
  1064. (W512[j&0x0f] += s1 + W512[(j+9)&0x0f] + s0); \
  1065. (d) += T1; \
  1066. (h) = T1 + Sigma0_512(a) + Maj((a), (b), (c)); \
  1067. j++
  1068. void SHA512_Internal_Transform(SHA_CTX* context, const sha_word64* data) {
  1069. sha_word64 a, b, c, d, e, f, g, h, s0, s1;
  1070. sha_word64 T1, *W512 = (sha_word64*)context->s512.buffer;
  1071. int j;
  1072. /* Initialize registers with the prev. intermediate value */
  1073. a = context->s512.state[0];
  1074. b = context->s512.state[1];
  1075. c = context->s512.state[2];
  1076. d = context->s512.state[3];
  1077. e = context->s512.state[4];
  1078. f = context->s512.state[5];
  1079. g = context->s512.state[6];
  1080. h = context->s512.state[7];
  1081. j = 0;
  1082. do {
  1083. ROUND512_0_TO_15(a,b,c,d,e,f,g,h);
  1084. ROUND512_0_TO_15(h,a,b,c,d,e,f,g);
  1085. ROUND512_0_TO_15(g,h,a,b,c,d,e,f);
  1086. ROUND512_0_TO_15(f,g,h,a,b,c,d,e);
  1087. ROUND512_0_TO_15(e,f,g,h,a,b,c,d);
  1088. ROUND512_0_TO_15(d,e,f,g,h,a,b,c);
  1089. ROUND512_0_TO_15(c,d,e,f,g,h,a,b);
  1090. ROUND512_0_TO_15(b,c,d,e,f,g,h,a);
  1091. } while (j < 16);
  1092. /* Now for the remaining rounds up to 79: */
  1093. do {
  1094. ROUND512(a,b,c,d,e,f,g,h);
  1095. ROUND512(h,a,b,c,d,e,f,g);
  1096. ROUND512(g,h,a,b,c,d,e,f);
  1097. ROUND512(f,g,h,a,b,c,d,e);
  1098. ROUND512(e,f,g,h,a,b,c,d);
  1099. ROUND512(d,e,f,g,h,a,b,c);
  1100. ROUND512(c,d,e,f,g,h,a,b);
  1101. ROUND512(b,c,d,e,f,g,h,a);
  1102. } while (j < 80);
  1103. /* Compute the current intermediate hash value */
  1104. context->s512.state[0] += a;
  1105. context->s512.state[1] += b;
  1106. context->s512.state[2] += c;
  1107. context->s512.state[3] += d;
  1108. context->s512.state[4] += e;
  1109. context->s512.state[5] += f;
  1110. context->s512.state[6] += g;
  1111. context->s512.state[7] += h;
  1112. /* Clean up */
  1113. a = b = c = d = e = f = g = h = T1 = 0;
  1114. }
  1115. #else /* SHA2_UNROLL_TRANSFORM */
  1116. void SHA512_Internal_Transform(SHA_CTX* context, const sha_word64* data) {
  1117. sha_word64 a, b, c, d, e, f, g, h, s0, s1;
  1118. sha_word64 T1, T2, *W512 = (sha_word64*)context->s512.buffer;
  1119. int j;
  1120. /* Initialize registers with the prev. intermediate value */
  1121. a = context->s512.state[0];
  1122. b = context->s512.state[1];
  1123. c = context->s512.state[2];
  1124. d = context->s512.state[3];
  1125. e = context->s512.state[4];
  1126. f = context->s512.state[5];
  1127. g = context->s512.state[6];
  1128. h = context->s512.state[7];
  1129. j = 0;
  1130. do {
  1131. #if BYTE_ORDER == LITTLE_ENDIAN
  1132. /* Convert TO host byte order */
  1133. REVERSE64(*data++, W512[j]);
  1134. /* Apply the SHA-512 compression function to update a..h */
  1135. T1 = h + Sigma1_512(e) + Ch(e, f, g) + K512[j] + W512[j];
  1136. #else /* BYTE_ORDER == LITTLE_ENDIAN */
  1137. /* Apply the SHA-512 compression function to update a..h with copy */
  1138. T1 = h + Sigma1_512(e) + Ch(e, f, g) + K512[j] + (W512[j] = *data++);
  1139. #endif /* BYTE_ORDER == LITTLE_ENDIAN */
  1140. T2 = Sigma0_512(a) + Maj(a, b, c);
  1141. h = g;
  1142. g = f;
  1143. f = e;
  1144. e = d + T1;
  1145. d = c;
  1146. c = b;
  1147. b = a;
  1148. a = T1 + T2;
  1149. j++;
  1150. } while (j < 16);
  1151. do {
  1152. /* Part of the message block expansion: */
  1153. s0 = W512[(j+1)&0x0f];
  1154. s0 = sigma0_512(s0);
  1155. s1 = W512[(j+14)&0x0f];
  1156. s1 = sigma1_512(s1);
  1157. /* Apply the SHA-512 compression function to update a..h */
  1158. T1 = h + Sigma1_512(e) + Ch(e, f, g) + K512[j] +
  1159. (W512[j&0x0f] += s1 + W512[(j+9)&0x0f] + s0);
  1160. T2 = Sigma0_512(a) + Maj(a, b, c);
  1161. h = g;
  1162. g = f;
  1163. f = e;
  1164. e = d + T1;
  1165. d = c;
  1166. c = b;
  1167. b = a;
  1168. a = T1 + T2;
  1169. j++;
  1170. } while (j < 80);
  1171. /* Compute the current intermediate hash value */
  1172. context->s512.state[0] += a;
  1173. context->s512.state[1] += b;
  1174. context->s512.state[2] += c;
  1175. context->s512.state[3] += d;
  1176. context->s512.state[4] += e;
  1177. context->s512.state[5] += f;
  1178. context->s512.state[6] += g;
  1179. context->s512.state[7] += h;
  1180. /* Clean up */
  1181. a = b = c = d = e = f = g = h = T1 = T2 = 0;
  1182. }
  1183. #endif /* SHA2_UNROLL_TRANSFORM */
  1184. void SHA512_Update(SHA_CTX* context, const sha_byte *data, size_t len) {
  1185. unsigned int freespace, usedspace;
  1186. if (len == 0) {
  1187. /* Calling with no data is valid - we do nothing */
  1188. return;
  1189. }
  1190. /* Sanity check: */
  1191. assert(context != (SHA_CTX*)0 && data != (sha_byte*)0);
  1192. usedspace = (context->s512.bitcount[0] >> 3) % 128;
  1193. if (usedspace > 0) {
  1194. /* Calculate how much free space is available in the buffer */
  1195. freespace = 128 - usedspace;
  1196. if (len >= freespace) {
  1197. /* Fill the buffer completely and process it */
  1198. MEMCPY_BCOPY(&context->s512.buffer[usedspace], data, freespace);
  1199. ADDINC128(context->s512.bitcount, freespace << 3);
  1200. len -= freespace;
  1201. data += freespace;
  1202. SHA512_Internal_Transform(context, (sha_word64*)context->s512.buffer);
  1203. } else {
  1204. /* The buffer is not yet full */
  1205. MEMCPY_BCOPY(&context->s512.buffer[usedspace], data, len);
  1206. ADDINC128(context->s512.bitcount, len << 3);
  1207. /* Clean up: */
  1208. usedspace = freespace = 0;
  1209. return;
  1210. }
  1211. }
  1212. while (len >= 128) {
  1213. /* Process as many complete blocks as we can */
  1214. SHA512_Internal_Transform(context, (sha_word64*)data);
  1215. ADDINC128(context->s512.bitcount, 1024);
  1216. len -= 128;
  1217. data += 128;
  1218. }
  1219. if (len > 0) {
  1220. /* There's left-overs, so save 'em */
  1221. MEMCPY_BCOPY(context->s512.buffer, data, len);
  1222. ADDINC128(context->s512.bitcount, len << 3);
  1223. }
  1224. /* Clean up: */
  1225. usedspace = freespace = 0;
  1226. }
  1227. void SHA512_Internal_Last(SHA_CTX* context) {
  1228. unsigned int usedspace;
  1229. usedspace = (context->s512.bitcount[0] >> 3) % 128;
  1230. #if BYTE_ORDER == LITTLE_ENDIAN
  1231. /* Convert FROM host byte order */
  1232. REVERSE64(context->s512.bitcount[0],context->s512.bitcount[0]);
  1233. REVERSE64(context->s512.bitcount[1],context->s512.bitcount[1]);
  1234. #endif
  1235. if (usedspace > 0) {
  1236. /* Begin padding with a 1 bit: */
  1237. context->s512.buffer[usedspace++] = 0x80;
  1238. if (usedspace <= 112) {
  1239. /* Set-up for the last transform: */
  1240. MEMSET_BZERO(&context->s512.buffer[usedspace], 112 - usedspace);
  1241. } else {
  1242. if (usedspace < 128) {
  1243. MEMSET_BZERO(&context->s512.buffer[usedspace], 128 - usedspace);
  1244. }
  1245. /* Do second-to-last transform: */
  1246. SHA512_Internal_Transform(context, (sha_word64*)context->s512.buffer);
  1247. /* And set-up for the last transform: */
  1248. MEMSET_BZERO(context->s512.buffer, 112);
  1249. }
  1250. /* Clean up: */
  1251. usedspace = 0;
  1252. } else {
  1253. /* Prepare for final transform: */
  1254. MEMSET_BZERO(context->s512.buffer, 112);
  1255. /* Begin padding with a 1 bit: */
  1256. *context->s512.buffer = 0x80;
  1257. }
  1258. /* Store the length of input data (in bits): */
  1259. *(sha_word64*)&context->s512.buffer[112] = context->s512.bitcount[1];
  1260. *(sha_word64*)&context->s512.buffer[120] = context->s512.bitcount[0];
  1261. /* Final transform: */
  1262. SHA512_Internal_Transform(context, (sha_word64*)context->s512.buffer);
  1263. }
  1264. void SHA512_Final(sha_byte digest[], SHA_CTX* context) {
  1265. sha_word64 *d = (sha_word64*)digest;
  1266. /* Sanity check: */
  1267. assert(context != (SHA_CTX*)0);
  1268. /* If no digest buffer is passed, we don't bother doing this: */
  1269. if (digest != (sha_byte*)0) {
  1270. SHA512_Internal_Last(context);
  1271. /* Save the hash data for output: */
  1272. #if BYTE_ORDER == LITTLE_ENDIAN
  1273. {
  1274. /* Convert TO host byte order */
  1275. int j;
  1276. for (j = 0; j < (SHA512_DIGEST_LENGTH >> 3); j++) {
  1277. REVERSE64(context->s512.state[j],context->s512.state[j]);
  1278. *d++ = context->s512.state[j];
  1279. }
  1280. }
  1281. #else
  1282. MEMCPY_BCOPY(d, context->s512.state, SHA512_DIGEST_LENGTH);
  1283. #endif
  1284. }
  1285. /* Zero out state data */
  1286. MEMSET_BZERO(context, sizeof(context));
  1287. }
  1288. char *SHA512_End(SHA_CTX* context, char buffer[]) {
  1289. sha_byte digest[SHA512_DIGEST_LENGTH], *d = digest;
  1290. int i;
  1291. /* Sanity check: */
  1292. assert(context != (SHA_CTX*)0);
  1293. if (buffer != (char*)0) {
  1294. SHA512_Final(digest, context);
  1295. for (i = 0; i < SHA512_DIGEST_LENGTH; i++) {
  1296. *buffer++ = sha_hex_digits[(*d & 0xf0) >> 4];
  1297. *buffer++ = sha_hex_digits[*d & 0x0f];
  1298. d++;
  1299. }
  1300. *buffer = (char)0;
  1301. } else {
  1302. MEMSET_BZERO(context, sizeof(context));
  1303. }
  1304. MEMSET_BZERO(digest, SHA512_DIGEST_LENGTH);
  1305. return buffer;
  1306. }
  1307. char* SHA512_Data(const sha_byte* data, size_t len, char digest[SHA512_DIGEST_STRING_LENGTH]) {
  1308. SHA_CTX context;
  1309. SHA512_Init(&context);
  1310. SHA512_Update(&context, data, len);
  1311. return SHA512_End(&context, digest);
  1312. }
  1313. /*** SHA-384: *********************************************************/
  1314. void SHA384_Init(SHA_CTX* context) {
  1315. SHA512_Internal_Init(context, sha384_initial_hash_value);
  1316. }
  1317. void SHA384_Update(SHA_CTX* context, const sha_byte* data, size_t len) {
  1318. SHA512_Update(context, data, len);
  1319. }
  1320. void SHA384_Final(sha_byte digest[], SHA_CTX* context) {
  1321. sha_word64 *d = (sha_word64*)digest;
  1322. /* Sanity check: */
  1323. assert(context != (SHA_CTX*)0);
  1324. /* If no digest buffer is passed, we don't bother doing this: */
  1325. if (digest != (sha_byte*)0) {
  1326. SHA512_Internal_Last(context);
  1327. /* Save the hash data for output: */
  1328. #if BYTE_ORDER == LITTLE_ENDIAN
  1329. {
  1330. /* Convert TO host byte order */
  1331. int j;
  1332. for (j = 0; j < (SHA384_DIGEST_LENGTH >> 3); j++) {
  1333. REVERSE64(context->s512.state[j],context->s512.state[j]);
  1334. *d++ = context->s512.state[j];
  1335. }
  1336. }
  1337. #else
  1338. MEMCPY_BCOPY(d, context->s512.state, SHA384_DIGEST_LENGTH);
  1339. #endif
  1340. }
  1341. /* Zero out state data */
  1342. MEMSET_BZERO(context, sizeof(context));
  1343. }
  1344. char *SHA384_End(SHA_CTX* context, char buffer[]) {
  1345. sha_byte digest[SHA384_DIGEST_LENGTH], *d = digest;
  1346. int i;
  1347. /* Sanity check: */
  1348. assert(context != (SHA_CTX*)0);
  1349. if (buffer != (char*)0) {
  1350. SHA384_Final(digest, context);
  1351. for (i = 0; i < SHA384_DIGEST_LENGTH; i++) {
  1352. *buffer++ = sha_hex_digits[(*d & 0xf0) >> 4];
  1353. *buffer++ = sha_hex_digits[*d & 0x0f];
  1354. d++;
  1355. }
  1356. *buffer = (char)0;
  1357. } else {
  1358. MEMSET_BZERO(context, sizeof(context));
  1359. }
  1360. MEMSET_BZERO(digest, SHA384_DIGEST_LENGTH);
  1361. return buffer;
  1362. }
  1363. char* SHA384_Data(const sha_byte* data, size_t len, char digest[SHA384_DIGEST_STRING_LENGTH]) {
  1364. SHA_CTX context;
  1365. SHA384_Init(&context);
  1366. SHA384_Update(&context, data, len);
  1367. return SHA384_End(&context, digest);
  1368. }