cm_sha2.c 47 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028102910301031103210331034103510361037103810391040104110421043104410451046104710481049105010511052105310541055105610571058105910601061106210631064106510661067106810691070107110721073107410751076107710781079108010811082108310841085108610871088108910901091109210931094109510961097109810991100110111021103110411051106110711081109111011111112111311141115111611171118111911201121112211231124112511261127112811291130113111321133113411351136113711381139114011411142114311441145114611471148114911501151115211531154115511561157115811591160116111621163116411651166116711681169117011711172117311741175117611771178117911801181118211831184118511861187118811891190119111921193119411951196119711981199120012011202120312041205120612071208120912101211121212131214121512161217121812191220122112221223122412251226122712281229123012311232123312341235123612371238123912401241124212431244124512461247124812491250125112521253125412551256125712581259126012611262126312641265126612671268126912701271127212731274127512761277127812791280128112821283128412851286128712881289129012911292129312941295129612971298129913001301130213031304130513061307130813091310131113121313131413151316131713181319132013211322132313241325132613271328132913301331133213331334133513361337133813391340134113421343134413451346134713481349135013511352135313541355135613571358135913601361136213631364136513661367136813691370137113721373137413751376137713781379138013811382138313841385138613871388138913901391139213931394139513961397139813991400140114021403140414051406140714081409141014111412141314141415141614171418141914201421142214231424142514261427142814291430143114321433143414351436143714381439144014411442144314441445144614471448144914501451145214531454145514561457145814591460146114621463146414651466146714681469147014711472147314741475147614771478147914801481148214831484148514861487148814891490149114921493149414951496149714981499150015011502150315041505150615071508150915101511151215131514151515161517151815191520152115221523152415251526152715281529153015311532153315341535153615371538153915401541154215431544154515461547154815491550155115521553155415551556155715581559156015611562156315641565156615671568156915701571157215731574157515761577157815791580158115821583158415851586158715881589159015911592159315941595159615971598159916001601160216031604160516061607160816091610
  1. /*
  2. * FILE: sha2.c
  3. * AUTHOR: Aaron D. Gifford
  4. * http://www.aarongifford.com/computers/sha.html
  5. *
  6. * Copyright (c) 2000-2003, Aaron D. Gifford
  7. * All rights reserved.
  8. *
  9. * Redistribution and use in source and binary forms, with or without
  10. * modification, are permitted provided that the following conditions
  11. * are met:
  12. * 1. Redistributions of source code must retain the above copyright
  13. * notice, this list of conditions and the following disclaimer.
  14. * 2. Redistributions in binary form must reproduce the above copyright
  15. * notice, this list of conditions and the following disclaimer in the
  16. * documentation and/or other materials provided with the distribution.
  17. * 3. Neither the name of the copyright holder nor the names of contributors
  18. * may be used to endorse or promote products derived from this software
  19. * without specific prior written permission.
  20. *
  21. * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTOR(S) ``AS IS'' AND
  22. * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
  23. * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
  24. * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTOR(S) BE LIABLE
  25. * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
  26. * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
  27. * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
  28. * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
  29. * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
  30. * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
  31. * SUCH DAMAGE.
  32. *
  33. * $Id: sha2.c,v 1.4 2004/01/07 22:58:18 adg Exp $
  34. */
  35. #include <string.h> /* memcpy()/memset() or bcopy()/bzero() */
  36. #include <assert.h> /* assert() */
  37. #include "cm_sha2.h" /* "sha2.h" -> "cm_sha2.h" renamed for CMake */
  38. /*
  39. * ASSERT NOTE:
  40. * Some sanity checking code is included using assert(). On my FreeBSD
  41. * system, this additional code can be removed by compiling with NDEBUG
  42. * defined. Check your own systems manpage on assert() to see how to
  43. * compile WITHOUT the sanity checking code on your system.
  44. *
  45. * UNROLLED TRANSFORM LOOP NOTE:
  46. * You can define SHA2_UNROLL_TRANSFORM to use the unrolled transform
  47. * loop version for the hash transform rounds (defined using macros
  48. * later in this file). Either define on the command line, for example:
  49. *
  50. * cc -DSHA2_UNROLL_TRANSFORM -o sha2 sha2.c sha2prog.c
  51. *
  52. * or define below:
  53. *
  54. * #define SHA2_UNROLL_TRANSFORM
  55. *
  56. */
  57. /*** SHA-224/256/384/512 Machine Architecture Definitions *************/
  58. /*
  59. * BYTE_ORDER NOTE:
  60. *
  61. * Please make sure that your system defines BYTE_ORDER. If your
  62. * architecture is little-endian, make sure it also defines
  63. * LITTLE_ENDIAN and that the two (BYTE_ORDER and LITTLE_ENDIAN) are
  64. * equivilent.
  65. *
  66. * If your system does not define the above, then you can do so by
  67. * hand like this:
  68. *
  69. * #define LITTLE_ENDIAN 1234
  70. * #define BIG_ENDIAN 4321
  71. *
  72. * And for little-endian machines, add:
  73. *
  74. * #define BYTE_ORDER LITTLE_ENDIAN
  75. *
  76. * Or for big-endian machines:
  77. *
  78. * #define BYTE_ORDER BIG_ENDIAN
  79. *
  80. * The FreeBSD machine this was written on defines BYTE_ORDER
  81. * appropriately by including <sys/types.h> (which in turn includes
  82. * <machine/endian.h> where the appropriate definitions are actually
  83. * made).
  84. */
  85. #if !defined(BYTE_ORDER) || (BYTE_ORDER != LITTLE_ENDIAN && BYTE_ORDER != BIG_ENDIAN)
  86. /* CMake modification: use byte order from cmIML. */
  87. # include "cmIML/ABI.h"
  88. # undef BYTE_ORDER
  89. # undef BIG_ENDIAN
  90. # undef LITTLE_ENDIAN
  91. # define BYTE_ORDER cmIML_ABI_ENDIAN_ID
  92. # define BIG_ENDIAN cmIML_ABI_ENDIAN_ID_BIG
  93. # define LITTLE_ENDIAN cmIML_ABI_ENDIAN_ID_LITTLE
  94. #endif
  95. /* CMake modification: use types computed in header. */
  96. typedef cm_sha2_uint8_t sha_byte; /* Exactly 1 byte */
  97. typedef cm_sha2_uint32_t sha_word32; /* Exactly 4 bytes */
  98. typedef cm_sha2_uint64_t sha_word64; /* Exactly 8 bytes */
  99. #define SHA_UINT32_C(x) cmIML_INT_UINT32_C(x)
  100. #define SHA_UINT64_C(x) cmIML_INT_UINT64_C(x)
  101. #if defined(__BORLANDC__)
  102. # pragma warn -8004 /* variable assigned value that is never used */
  103. #endif
  104. /*** ENDIAN REVERSAL MACROS *******************************************/
  105. #if BYTE_ORDER == LITTLE_ENDIAN
  106. #define REVERSE32(w,x) { \
  107. sha_word32 tmp = (w); \
  108. tmp = (tmp >> 16) | (tmp << 16); \
  109. (x) = ((tmp & SHA_UINT32_C(0xff00ff00)) >> 8) | \
  110. ((tmp & SHA_UINT32_C(0x00ff00ff)) << 8); \
  111. }
  112. #define REVERSE64(w,x) { \
  113. sha_word64 tmp = (w); \
  114. tmp = (tmp >> 32) | (tmp << 32); \
  115. tmp = ((tmp & SHA_UINT64_C(0xff00ff00ff00ff00)) >> 8) | \
  116. ((tmp & SHA_UINT64_C(0x00ff00ff00ff00ff)) << 8); \
  117. (x) = ((tmp & SHA_UINT64_C(0xffff0000ffff0000)) >> 16) | \
  118. ((tmp & SHA_UINT64_C(0x0000ffff0000ffff)) << 16); \
  119. }
  120. #endif /* BYTE_ORDER == LITTLE_ENDIAN */
  121. /*
  122. * Macro for incrementally adding the unsigned 64-bit integer n to the
  123. * unsigned 128-bit integer (represented using a two-element array of
  124. * 64-bit words):
  125. */
  126. #define ADDINC128(w,n) { \
  127. (w)[0] += (sha_word64)(n); \
  128. if ((w)[0] < (n)) { \
  129. (w)[1]++; \
  130. } \
  131. }
  132. /*
  133. * Macros for copying blocks of memory and for zeroing out ranges
  134. * of memory. Using these macros makes it easy to switch from
  135. * using memset()/memcpy() and using bzero()/bcopy().
  136. *
  137. * Please define either SHA2_USE_MEMSET_MEMCPY or define
  138. * SHA2_USE_BZERO_BCOPY depending on which function set you
  139. * choose to use:
  140. */
  141. #if !defined(SHA2_USE_MEMSET_MEMCPY) && !defined(SHA2_USE_BZERO_BCOPY)
  142. /* Default to memset()/memcpy() if no option is specified */
  143. #define SHA2_USE_MEMSET_MEMCPY 1
  144. #endif
  145. #if defined(SHA2_USE_MEMSET_MEMCPY) && defined(SHA2_USE_BZERO_BCOPY)
  146. /* Abort with an error if BOTH options are defined */
  147. #error Define either SHA2_USE_MEMSET_MEMCPY or SHA2_USE_BZERO_BCOPY, not both!
  148. #endif
  149. #ifdef SHA2_USE_MEMSET_MEMCPY
  150. #define MEMSET_BZERO(p,l) memset((p), 0, (l))
  151. #define MEMCPY_BCOPY(d,s,l) memcpy((d), (s), (l))
  152. #endif
  153. #ifdef SHA2_USE_BZERO_BCOPY
  154. #define MEMSET_BZERO(p,l) bzero((p), (l))
  155. #define MEMCPY_BCOPY(d,s,l) bcopy((s), (d), (l))
  156. #endif
  157. /*** THE SIX LOGICAL FUNCTIONS ****************************************/
  158. /*
  159. * Bit shifting and rotation (used by the six SHA-XYZ logical functions:
  160. *
  161. * NOTE: In the original SHA-256/384/512 document, the shift-right
  162. * function was named R and the rotate-right function was called S.
  163. * (See: http://csrc.nist.gov/cryptval/shs/sha256-384-512.pdf on the
  164. * web.)
  165. *
  166. * The newer NIST FIPS 180-2 document uses a much clearer naming
  167. * scheme, SHR for shift-right, ROTR for rotate-right, and ROTL for
  168. * rotate-left. (See:
  169. * http://csrc.nist.gov/publications/fips/fips180-2/fips180-2.pdf
  170. * on the web.)
  171. *
  172. * WARNING: These macros must be used cautiously, since they reference
  173. * supplied parameters sometimes more than once, and thus could have
  174. * unexpected side-effects if used without taking this into account.
  175. */
  176. /* Shift-right (used in SHA-256, SHA-384, and SHA-512): */
  177. #define SHR(b,x) ((x) >> (b))
  178. /* 32-bit Rotate-right (used in SHA-256): */
  179. #define ROTR32(b,x) (((x) >> (b)) | ((x) << (32 - (b))))
  180. /* 64-bit Rotate-right (used in SHA-384 and SHA-512): */
  181. #define ROTR64(b,x) (((x) >> (b)) | ((x) << (64 - (b))))
  182. /* 32-bit Rotate-left (used in SHA-1): */
  183. #define ROTL32(b,x) (((x) << (b)) | ((x) >> (32 - (b))))
  184. /* Two logical functions used in SHA-1, SHA-254, SHA-256, SHA-384, and SHA-512: */
  185. #define Ch(x,y,z) (((x) & (y)) ^ ((~(x)) & (z)))
  186. #define Maj(x,y,z) (((x) & (y)) ^ ((x) & (z)) ^ ((y) & (z)))
  187. /* Function used in SHA-1: */
  188. #define Parity(x,y,z) ((x) ^ (y) ^ (z))
  189. /* Four logical functions used in SHA-256: */
  190. #define Sigma0_256(x) (ROTR32(2, (x)) ^ ROTR32(13, (x)) ^ ROTR32(22, (x)))
  191. #define Sigma1_256(x) (ROTR32(6, (x)) ^ ROTR32(11, (x)) ^ ROTR32(25, (x)))
  192. #define sigma0_256(x) (ROTR32(7, (x)) ^ ROTR32(18, (x)) ^ SHR( 3 , (x)))
  193. #define sigma1_256(x) (ROTR32(17, (x)) ^ ROTR32(19, (x)) ^ SHR( 10, (x)))
  194. /* Four of six logical functions used in SHA-384 and SHA-512: */
  195. #define Sigma0_512(x) (ROTR64(28, (x)) ^ ROTR64(34, (x)) ^ ROTR64(39, (x)))
  196. #define Sigma1_512(x) (ROTR64(14, (x)) ^ ROTR64(18, (x)) ^ ROTR64(41, (x)))
  197. #define sigma0_512(x) (ROTR64( 1, (x)) ^ ROTR64( 8, (x)) ^ SHR( 7, (x)))
  198. #define sigma1_512(x) (ROTR64(19, (x)) ^ ROTR64(61, (x)) ^ SHR( 6, (x)))
  199. /*** INTERNAL FUNCTION PROTOTYPES *************************************/
  200. /* SHA-224 and SHA-256: */
  201. void SHA256_Internal_Init(SHA_CTX*, const sha_word32*);
  202. void SHA256_Internal_Last(SHA_CTX*);
  203. void SHA256_Internal_Transform(SHA_CTX*, const sha_word32*);
  204. /* SHA-384 and SHA-512: */
  205. void SHA512_Internal_Init(SHA_CTX*, const sha_word64*);
  206. void SHA512_Internal_Last(SHA_CTX*);
  207. void SHA512_Internal_Transform(SHA_CTX*, const sha_word64*);
  208. /*** SHA2 INITIAL HASH VALUES AND CONSTANTS ***************************/
  209. /* Hash constant words K for SHA-1: */
  210. #define K1_0_TO_19 SHA_UINT32_C(0x5a827999)
  211. #define K1_20_TO_39 SHA_UINT32_C(0x6ed9eba1)
  212. #define K1_40_TO_59 SHA_UINT32_C(0x8f1bbcdc)
  213. #define K1_60_TO_79 SHA_UINT32_C(0xca62c1d6)
  214. /* Initial hash value H for SHA-1: */
  215. static const sha_word32 sha1_initial_hash_value[5] = {
  216. SHA_UINT32_C(0x67452301),
  217. SHA_UINT32_C(0xefcdab89),
  218. SHA_UINT32_C(0x98badcfe),
  219. SHA_UINT32_C(0x10325476),
  220. SHA_UINT32_C(0xc3d2e1f0)
  221. };
  222. /* Hash constant words K for SHA-224 and SHA-256: */
  223. static const sha_word32 K256[64] = {
  224. SHA_UINT32_C(0x428a2f98), SHA_UINT32_C(0x71374491),
  225. SHA_UINT32_C(0xb5c0fbcf), SHA_UINT32_C(0xe9b5dba5),
  226. SHA_UINT32_C(0x3956c25b), SHA_UINT32_C(0x59f111f1),
  227. SHA_UINT32_C(0x923f82a4), SHA_UINT32_C(0xab1c5ed5),
  228. SHA_UINT32_C(0xd807aa98), SHA_UINT32_C(0x12835b01),
  229. SHA_UINT32_C(0x243185be), SHA_UINT32_C(0x550c7dc3),
  230. SHA_UINT32_C(0x72be5d74), SHA_UINT32_C(0x80deb1fe),
  231. SHA_UINT32_C(0x9bdc06a7), SHA_UINT32_C(0xc19bf174),
  232. SHA_UINT32_C(0xe49b69c1), SHA_UINT32_C(0xefbe4786),
  233. SHA_UINT32_C(0x0fc19dc6), SHA_UINT32_C(0x240ca1cc),
  234. SHA_UINT32_C(0x2de92c6f), SHA_UINT32_C(0x4a7484aa),
  235. SHA_UINT32_C(0x5cb0a9dc), SHA_UINT32_C(0x76f988da),
  236. SHA_UINT32_C(0x983e5152), SHA_UINT32_C(0xa831c66d),
  237. SHA_UINT32_C(0xb00327c8), SHA_UINT32_C(0xbf597fc7),
  238. SHA_UINT32_C(0xc6e00bf3), SHA_UINT32_C(0xd5a79147),
  239. SHA_UINT32_C(0x06ca6351), SHA_UINT32_C(0x14292967),
  240. SHA_UINT32_C(0x27b70a85), SHA_UINT32_C(0x2e1b2138),
  241. SHA_UINT32_C(0x4d2c6dfc), SHA_UINT32_C(0x53380d13),
  242. SHA_UINT32_C(0x650a7354), SHA_UINT32_C(0x766a0abb),
  243. SHA_UINT32_C(0x81c2c92e), SHA_UINT32_C(0x92722c85),
  244. SHA_UINT32_C(0xa2bfe8a1), SHA_UINT32_C(0xa81a664b),
  245. SHA_UINT32_C(0xc24b8b70), SHA_UINT32_C(0xc76c51a3),
  246. SHA_UINT32_C(0xd192e819), SHA_UINT32_C(0xd6990624),
  247. SHA_UINT32_C(0xf40e3585), SHA_UINT32_C(0x106aa070),
  248. SHA_UINT32_C(0x19a4c116), SHA_UINT32_C(0x1e376c08),
  249. SHA_UINT32_C(0x2748774c), SHA_UINT32_C(0x34b0bcb5),
  250. SHA_UINT32_C(0x391c0cb3), SHA_UINT32_C(0x4ed8aa4a),
  251. SHA_UINT32_C(0x5b9cca4f), SHA_UINT32_C(0x682e6ff3),
  252. SHA_UINT32_C(0x748f82ee), SHA_UINT32_C(0x78a5636f),
  253. SHA_UINT32_C(0x84c87814), SHA_UINT32_C(0x8cc70208),
  254. SHA_UINT32_C(0x90befffa), SHA_UINT32_C(0xa4506ceb),
  255. SHA_UINT32_C(0xbef9a3f7), SHA_UINT32_C(0xc67178f2)
  256. };
  257. /* Initial hash value H for SHA-224: */
  258. static const sha_word32 sha224_initial_hash_value[8] = {
  259. SHA_UINT32_C(0xc1059ed8),
  260. SHA_UINT32_C(0x367cd507),
  261. SHA_UINT32_C(0x3070dd17),
  262. SHA_UINT32_C(0xf70e5939),
  263. SHA_UINT32_C(0xffc00b31),
  264. SHA_UINT32_C(0x68581511),
  265. SHA_UINT32_C(0x64f98fa7),
  266. SHA_UINT32_C(0xbefa4fa4)
  267. };
  268. /* Initial hash value H for SHA-256: */
  269. static const sha_word32 sha256_initial_hash_value[8] = {
  270. SHA_UINT32_C(0x6a09e667),
  271. SHA_UINT32_C(0xbb67ae85),
  272. SHA_UINT32_C(0x3c6ef372),
  273. SHA_UINT32_C(0xa54ff53a),
  274. SHA_UINT32_C(0x510e527f),
  275. SHA_UINT32_C(0x9b05688c),
  276. SHA_UINT32_C(0x1f83d9ab),
  277. SHA_UINT32_C(0x5be0cd19)
  278. };
  279. /* Hash constant words K for SHA-384 and SHA-512: */
  280. static const sha_word64 K512[80] = {
  281. SHA_UINT64_C(0x428a2f98d728ae22), SHA_UINT64_C(0x7137449123ef65cd),
  282. SHA_UINT64_C(0xb5c0fbcfec4d3b2f), SHA_UINT64_C(0xe9b5dba58189dbbc),
  283. SHA_UINT64_C(0x3956c25bf348b538), SHA_UINT64_C(0x59f111f1b605d019),
  284. SHA_UINT64_C(0x923f82a4af194f9b), SHA_UINT64_C(0xab1c5ed5da6d8118),
  285. SHA_UINT64_C(0xd807aa98a3030242), SHA_UINT64_C(0x12835b0145706fbe),
  286. SHA_UINT64_C(0x243185be4ee4b28c), SHA_UINT64_C(0x550c7dc3d5ffb4e2),
  287. SHA_UINT64_C(0x72be5d74f27b896f), SHA_UINT64_C(0x80deb1fe3b1696b1),
  288. SHA_UINT64_C(0x9bdc06a725c71235), SHA_UINT64_C(0xc19bf174cf692694),
  289. SHA_UINT64_C(0xe49b69c19ef14ad2), SHA_UINT64_C(0xefbe4786384f25e3),
  290. SHA_UINT64_C(0x0fc19dc68b8cd5b5), SHA_UINT64_C(0x240ca1cc77ac9c65),
  291. SHA_UINT64_C(0x2de92c6f592b0275), SHA_UINT64_C(0x4a7484aa6ea6e483),
  292. SHA_UINT64_C(0x5cb0a9dcbd41fbd4), SHA_UINT64_C(0x76f988da831153b5),
  293. SHA_UINT64_C(0x983e5152ee66dfab), SHA_UINT64_C(0xa831c66d2db43210),
  294. SHA_UINT64_C(0xb00327c898fb213f), SHA_UINT64_C(0xbf597fc7beef0ee4),
  295. SHA_UINT64_C(0xc6e00bf33da88fc2), SHA_UINT64_C(0xd5a79147930aa725),
  296. SHA_UINT64_C(0x06ca6351e003826f), SHA_UINT64_C(0x142929670a0e6e70),
  297. SHA_UINT64_C(0x27b70a8546d22ffc), SHA_UINT64_C(0x2e1b21385c26c926),
  298. SHA_UINT64_C(0x4d2c6dfc5ac42aed), SHA_UINT64_C(0x53380d139d95b3df),
  299. SHA_UINT64_C(0x650a73548baf63de), SHA_UINT64_C(0x766a0abb3c77b2a8),
  300. SHA_UINT64_C(0x81c2c92e47edaee6), SHA_UINT64_C(0x92722c851482353b),
  301. SHA_UINT64_C(0xa2bfe8a14cf10364), SHA_UINT64_C(0xa81a664bbc423001),
  302. SHA_UINT64_C(0xc24b8b70d0f89791), SHA_UINT64_C(0xc76c51a30654be30),
  303. SHA_UINT64_C(0xd192e819d6ef5218), SHA_UINT64_C(0xd69906245565a910),
  304. SHA_UINT64_C(0xf40e35855771202a), SHA_UINT64_C(0x106aa07032bbd1b8),
  305. SHA_UINT64_C(0x19a4c116b8d2d0c8), SHA_UINT64_C(0x1e376c085141ab53),
  306. SHA_UINT64_C(0x2748774cdf8eeb99), SHA_UINT64_C(0x34b0bcb5e19b48a8),
  307. SHA_UINT64_C(0x391c0cb3c5c95a63), SHA_UINT64_C(0x4ed8aa4ae3418acb),
  308. SHA_UINT64_C(0x5b9cca4f7763e373), SHA_UINT64_C(0x682e6ff3d6b2b8a3),
  309. SHA_UINT64_C(0x748f82ee5defb2fc), SHA_UINT64_C(0x78a5636f43172f60),
  310. SHA_UINT64_C(0x84c87814a1f0ab72), SHA_UINT64_C(0x8cc702081a6439ec),
  311. SHA_UINT64_C(0x90befffa23631e28), SHA_UINT64_C(0xa4506cebde82bde9),
  312. SHA_UINT64_C(0xbef9a3f7b2c67915), SHA_UINT64_C(0xc67178f2e372532b),
  313. SHA_UINT64_C(0xca273eceea26619c), SHA_UINT64_C(0xd186b8c721c0c207),
  314. SHA_UINT64_C(0xeada7dd6cde0eb1e), SHA_UINT64_C(0xf57d4f7fee6ed178),
  315. SHA_UINT64_C(0x06f067aa72176fba), SHA_UINT64_C(0x0a637dc5a2c898a6),
  316. SHA_UINT64_C(0x113f9804bef90dae), SHA_UINT64_C(0x1b710b35131c471b),
  317. SHA_UINT64_C(0x28db77f523047d84), SHA_UINT64_C(0x32caab7b40c72493),
  318. SHA_UINT64_C(0x3c9ebe0a15c9bebc), SHA_UINT64_C(0x431d67c49c100d4c),
  319. SHA_UINT64_C(0x4cc5d4becb3e42b6), SHA_UINT64_C(0x597f299cfc657e2a),
  320. SHA_UINT64_C(0x5fcb6fab3ad6faec), SHA_UINT64_C(0x6c44198c4a475817)
  321. };
  322. /* Initial hash value H for SHA-384 */
  323. static const sha_word64 sha384_initial_hash_value[8] = {
  324. SHA_UINT64_C(0xcbbb9d5dc1059ed8),
  325. SHA_UINT64_C(0x629a292a367cd507),
  326. SHA_UINT64_C(0x9159015a3070dd17),
  327. SHA_UINT64_C(0x152fecd8f70e5939),
  328. SHA_UINT64_C(0x67332667ffc00b31),
  329. SHA_UINT64_C(0x8eb44a8768581511),
  330. SHA_UINT64_C(0xdb0c2e0d64f98fa7),
  331. SHA_UINT64_C(0x47b5481dbefa4fa4)
  332. };
  333. /* Initial hash value H for SHA-512 */
  334. static const sha_word64 sha512_initial_hash_value[8] = {
  335. SHA_UINT64_C(0x6a09e667f3bcc908),
  336. SHA_UINT64_C(0xbb67ae8584caa73b),
  337. SHA_UINT64_C(0x3c6ef372fe94f82b),
  338. SHA_UINT64_C(0xa54ff53a5f1d36f1),
  339. SHA_UINT64_C(0x510e527fade682d1),
  340. SHA_UINT64_C(0x9b05688c2b3e6c1f),
  341. SHA_UINT64_C(0x1f83d9abfb41bd6b),
  342. SHA_UINT64_C(0x5be0cd19137e2179)
  343. };
  344. /*
  345. * Constant used by SHA224/256/384/512_End() functions for converting the
  346. * digest to a readable hexadecimal character string:
  347. */
  348. static const char *sha_hex_digits = "0123456789abcdef";
  349. /*** SHA-1: ***********************************************************/
  350. void SHA1_Init(SHA_CTX* context) {
  351. /* Sanity check: */
  352. assert(context != (SHA_CTX*)0);
  353. MEMCPY_BCOPY(context->s1.state, sha1_initial_hash_value, sizeof(sha_word32) * 5);
  354. MEMSET_BZERO(context->s1.buffer, 64);
  355. context->s1.bitcount = 0;
  356. }
  357. #ifdef SHA2_UNROLL_TRANSFORM
  358. /* Unrolled SHA-1 round macros: */
  359. #if BYTE_ORDER == LITTLE_ENDIAN
  360. #define ROUND1_0_TO_15(a,b,c,d,e) \
  361. REVERSE32(*data++, W1[j]); \
  362. (e) = ROTL32(5, (a)) + Ch((b), (c), (d)) + (e) + \
  363. K1_0_TO_19 + W1[j]; \
  364. (b) = ROTL32(30, (b)); \
  365. j++;
  366. #else /* BYTE_ORDER == LITTLE_ENDIAN */
  367. #define ROUND1_0_TO_15(a,b,c,d,e) \
  368. (e) = ROTL32(5, (a)) + Ch((b), (c), (d)) + (e) + \
  369. K1_0_TO_19 + ( W1[j] = *data++ ); \
  370. (b) = ROTL32(30, (b)); \
  371. j++;
  372. #endif /* BYTE_ORDER == LITTLE_ENDIAN */
  373. #define ROUND1_16_TO_19(a,b,c,d,e) \
  374. T1 = W1[(j+13)&0x0f] ^ W1[(j+8)&0x0f] ^ W1[(j+2)&0x0f] ^ W1[j&0x0f]; \
  375. (e) = ROTL32(5, a) + Ch(b,c,d) + e + K1_0_TO_19 + ( W1[j&0x0f] = ROTL32(1, T1) ); \
  376. (b) = ROTL32(30, b); \
  377. j++;
  378. #define ROUND1_20_TO_39(a,b,c,d,e) \
  379. T1 = W1[(j+13)&0x0f] ^ W1[(j+8)&0x0f] ^ W1[(j+2)&0x0f] ^ W1[j&0x0f]; \
  380. (e) = ROTL32(5, a) + Parity(b,c,d) + e + K1_20_TO_39 + ( W1[j&0x0f] = ROTL32(1, T1) ); \
  381. (b) = ROTL32(30, b); \
  382. j++;
  383. #define ROUND1_40_TO_59(a,b,c,d,e) \
  384. T1 = W1[(j+13)&0x0f] ^ W1[(j+8)&0x0f] ^ W1[(j+2)&0x0f] ^ W1[j&0x0f]; \
  385. (e) = ROTL32(5, a) + Maj(b,c,d) + e + K1_40_TO_59 + ( W1[j&0x0f] = ROTL32(1, T1) ); \
  386. (b) = ROTL32(30, b); \
  387. j++;
  388. #define ROUND1_60_TO_79(a,b,c,d,e) \
  389. T1 = W1[(j+13)&0x0f] ^ W1[(j+8)&0x0f] ^ W1[(j+2)&0x0f] ^ W1[j&0x0f]; \
  390. (e) = ROTL32(5, a) + Parity(b,c,d) + e + K1_60_TO_79 + ( W1[j&0x0f] = ROTL32(1, T1) ); \
  391. (b) = ROTL32(30, b); \
  392. j++;
  393. void SHA1_Internal_Transform(SHA_CTX* context, const sha_word32* data) {
  394. sha_word32 a, b, c, d, e;
  395. sha_word32 T1, *W1;
  396. int j;
  397. W1 = (sha_word32*)context->s1.buffer;
  398. /* Initialize registers with the prev. intermediate value */
  399. a = context->s1.state[0];
  400. b = context->s1.state[1];
  401. c = context->s1.state[2];
  402. d = context->s1.state[3];
  403. e = context->s1.state[4];
  404. j = 0;
  405. /* Rounds 0 to 15 unrolled: */
  406. ROUND1_0_TO_15(a,b,c,d,e);
  407. ROUND1_0_TO_15(e,a,b,c,d);
  408. ROUND1_0_TO_15(d,e,a,b,c);
  409. ROUND1_0_TO_15(c,d,e,a,b);
  410. ROUND1_0_TO_15(b,c,d,e,a);
  411. ROUND1_0_TO_15(a,b,c,d,e);
  412. ROUND1_0_TO_15(e,a,b,c,d);
  413. ROUND1_0_TO_15(d,e,a,b,c);
  414. ROUND1_0_TO_15(c,d,e,a,b);
  415. ROUND1_0_TO_15(b,c,d,e,a);
  416. ROUND1_0_TO_15(a,b,c,d,e);
  417. ROUND1_0_TO_15(e,a,b,c,d);
  418. ROUND1_0_TO_15(d,e,a,b,c);
  419. ROUND1_0_TO_15(c,d,e,a,b);
  420. ROUND1_0_TO_15(b,c,d,e,a);
  421. ROUND1_0_TO_15(a,b,c,d,e);
  422. /* Rounds 16 to 19 unrolled: */
  423. ROUND1_16_TO_19(e,a,b,c,d);
  424. ROUND1_16_TO_19(d,e,a,b,c);
  425. ROUND1_16_TO_19(c,d,e,a,b);
  426. ROUND1_16_TO_19(b,c,d,e,a);
  427. /* Rounds 20 to 39 unrolled: */
  428. ROUND1_20_TO_39(a,b,c,d,e);
  429. ROUND1_20_TO_39(e,a,b,c,d);
  430. ROUND1_20_TO_39(d,e,a,b,c);
  431. ROUND1_20_TO_39(c,d,e,a,b);
  432. ROUND1_20_TO_39(b,c,d,e,a);
  433. ROUND1_20_TO_39(a,b,c,d,e);
  434. ROUND1_20_TO_39(e,a,b,c,d);
  435. ROUND1_20_TO_39(d,e,a,b,c);
  436. ROUND1_20_TO_39(c,d,e,a,b);
  437. ROUND1_20_TO_39(b,c,d,e,a);
  438. ROUND1_20_TO_39(a,b,c,d,e);
  439. ROUND1_20_TO_39(e,a,b,c,d);
  440. ROUND1_20_TO_39(d,e,a,b,c);
  441. ROUND1_20_TO_39(c,d,e,a,b);
  442. ROUND1_20_TO_39(b,c,d,e,a);
  443. ROUND1_20_TO_39(a,b,c,d,e);
  444. ROUND1_20_TO_39(e,a,b,c,d);
  445. ROUND1_20_TO_39(d,e,a,b,c);
  446. ROUND1_20_TO_39(c,d,e,a,b);
  447. ROUND1_20_TO_39(b,c,d,e,a);
  448. /* Rounds 40 to 59 unrolled: */
  449. ROUND1_40_TO_59(a,b,c,d,e);
  450. ROUND1_40_TO_59(e,a,b,c,d);
  451. ROUND1_40_TO_59(d,e,a,b,c);
  452. ROUND1_40_TO_59(c,d,e,a,b);
  453. ROUND1_40_TO_59(b,c,d,e,a);
  454. ROUND1_40_TO_59(a,b,c,d,e);
  455. ROUND1_40_TO_59(e,a,b,c,d);
  456. ROUND1_40_TO_59(d,e,a,b,c);
  457. ROUND1_40_TO_59(c,d,e,a,b);
  458. ROUND1_40_TO_59(b,c,d,e,a);
  459. ROUND1_40_TO_59(a,b,c,d,e);
  460. ROUND1_40_TO_59(e,a,b,c,d);
  461. ROUND1_40_TO_59(d,e,a,b,c);
  462. ROUND1_40_TO_59(c,d,e,a,b);
  463. ROUND1_40_TO_59(b,c,d,e,a);
  464. ROUND1_40_TO_59(a,b,c,d,e);
  465. ROUND1_40_TO_59(e,a,b,c,d);
  466. ROUND1_40_TO_59(d,e,a,b,c);
  467. ROUND1_40_TO_59(c,d,e,a,b);
  468. ROUND1_40_TO_59(b,c,d,e,a);
  469. /* Rounds 60 to 79 unrolled: */
  470. ROUND1_60_TO_79(a,b,c,d,e);
  471. ROUND1_60_TO_79(e,a,b,c,d);
  472. ROUND1_60_TO_79(d,e,a,b,c);
  473. ROUND1_60_TO_79(c,d,e,a,b);
  474. ROUND1_60_TO_79(b,c,d,e,a);
  475. ROUND1_60_TO_79(a,b,c,d,e);
  476. ROUND1_60_TO_79(e,a,b,c,d);
  477. ROUND1_60_TO_79(d,e,a,b,c);
  478. ROUND1_60_TO_79(c,d,e,a,b);
  479. ROUND1_60_TO_79(b,c,d,e,a);
  480. ROUND1_60_TO_79(a,b,c,d,e);
  481. ROUND1_60_TO_79(e,a,b,c,d);
  482. ROUND1_60_TO_79(d,e,a,b,c);
  483. ROUND1_60_TO_79(c,d,e,a,b);
  484. ROUND1_60_TO_79(b,c,d,e,a);
  485. ROUND1_60_TO_79(a,b,c,d,e);
  486. ROUND1_60_TO_79(e,a,b,c,d);
  487. ROUND1_60_TO_79(d,e,a,b,c);
  488. ROUND1_60_TO_79(c,d,e,a,b);
  489. ROUND1_60_TO_79(b,c,d,e,a);
  490. /* Compute the current intermediate hash value */
  491. context->s1.state[0] += a;
  492. context->s1.state[1] += b;
  493. context->s1.state[2] += c;
  494. context->s1.state[3] += d;
  495. context->s1.state[4] += e;
  496. /* Clean up */
  497. a = b = c = d = e = T1 = 0;
  498. }
  499. #else /* SHA2_UNROLL_TRANSFORM */
  500. void SHA1_Internal_Transform(SHA_CTX* context, const sha_word32* data) {
  501. sha_word32 a, b, c, d, e;
  502. sha_word32 T1, *W1;
  503. int j;
  504. W1 = (sha_word32*)context->s1.buffer;
  505. /* Initialize registers with the prev. intermediate value */
  506. a = context->s1.state[0];
  507. b = context->s1.state[1];
  508. c = context->s1.state[2];
  509. d = context->s1.state[3];
  510. e = context->s1.state[4];
  511. j = 0;
  512. do {
  513. #if BYTE_ORDER == LITTLE_ENDIAN
  514. T1 = data[j];
  515. /* Copy data while converting to host byte order */
  516. REVERSE32(*data++, W1[j]);
  517. T1 = ROTL32(5, a) + Ch(b, c, d) + e + K1_0_TO_19 + W1[j];
  518. #else /* BYTE_ORDER == LITTLE_ENDIAN */
  519. T1 = ROTL32(5, a) + Ch(b, c, d) + e + K1_0_TO_19 + (W1[j] = *data++);
  520. #endif /* BYTE_ORDER == LITTLE_ENDIAN */
  521. e = d;
  522. d = c;
  523. c = ROTL32(30, b);
  524. b = a;
  525. a = T1;
  526. j++;
  527. } while (j < 16);
  528. do {
  529. T1 = W1[(j+13)&0x0f] ^ W1[(j+8)&0x0f] ^ W1[(j+2)&0x0f] ^ W1[j&0x0f];
  530. T1 = ROTL32(5, a) + Ch(b,c,d) + e + K1_0_TO_19 + (W1[j&0x0f] = ROTL32(1, T1));
  531. e = d;
  532. d = c;
  533. c = ROTL32(30, b);
  534. b = a;
  535. a = T1;
  536. j++;
  537. } while (j < 20);
  538. do {
  539. T1 = W1[(j+13)&0x0f] ^ W1[(j+8)&0x0f] ^ W1[(j+2)&0x0f] ^ W1[j&0x0f];
  540. T1 = ROTL32(5, a) + Parity(b,c,d) + e + K1_20_TO_39 + (W1[j&0x0f] = ROTL32(1, T1));
  541. e = d;
  542. d = c;
  543. c = ROTL32(30, b);
  544. b = a;
  545. a = T1;
  546. j++;
  547. } while (j < 40);
  548. do {
  549. T1 = W1[(j+13)&0x0f] ^ W1[(j+8)&0x0f] ^ W1[(j+2)&0x0f] ^ W1[j&0x0f];
  550. T1 = ROTL32(5, a) + Maj(b,c,d) + e + K1_40_TO_59 + (W1[j&0x0f] = ROTL32(1, T1));
  551. e = d;
  552. d = c;
  553. c = ROTL32(30, b);
  554. b = a;
  555. a = T1;
  556. j++;
  557. } while (j < 60);
  558. do {
  559. T1 = W1[(j+13)&0x0f] ^ W1[(j+8)&0x0f] ^ W1[(j+2)&0x0f] ^ W1[j&0x0f];
  560. T1 = ROTL32(5, a) + Parity(b,c,d) + e + K1_60_TO_79 + (W1[j&0x0f] = ROTL32(1, T1));
  561. e = d;
  562. d = c;
  563. c = ROTL32(30, b);
  564. b = a;
  565. a = T1;
  566. j++;
  567. } while (j < 80);
  568. /* Compute the current intermediate hash value */
  569. context->s1.state[0] += a;
  570. context->s1.state[1] += b;
  571. context->s1.state[2] += c;
  572. context->s1.state[3] += d;
  573. context->s1.state[4] += e;
  574. /* Clean up */
  575. a = b = c = d = e = T1 = 0;
  576. }
  577. #endif /* SHA2_UNROLL_TRANSFORM */
  578. void SHA1_Update(SHA_CTX* context, const sha_byte *data, size_t len) {
  579. unsigned int freespace, usedspace;
  580. if (len == 0) {
  581. /* Calling with no data is valid - we do nothing */
  582. return;
  583. }
  584. /* Sanity check: */
  585. assert(context != (SHA_CTX*)0 && data != (sha_byte*)0);
  586. usedspace = (context->s1.bitcount >> 3) % 64;
  587. if (usedspace > 0) {
  588. /* Calculate how much free space is available in the buffer */
  589. freespace = 64 - usedspace;
  590. if (len >= freespace) {
  591. /* Fill the buffer completely and process it */
  592. MEMCPY_BCOPY(&context->s1.buffer[usedspace], data, freespace);
  593. context->s1.bitcount += freespace << 3;
  594. len -= freespace;
  595. data += freespace;
  596. SHA1_Internal_Transform(context, (sha_word32*)context->s1.buffer);
  597. } else {
  598. /* The buffer is not yet full */
  599. MEMCPY_BCOPY(&context->s1.buffer[usedspace], data, len);
  600. context->s1.bitcount += len << 3;
  601. /* Clean up: */
  602. usedspace = freespace = 0;
  603. return;
  604. }
  605. }
  606. while (len >= 64) {
  607. /* Process as many complete blocks as we can */
  608. SHA1_Internal_Transform(context, (sha_word32*)data);
  609. context->s1.bitcount += 512;
  610. len -= 64;
  611. data += 64;
  612. }
  613. if (len > 0) {
  614. /* There's left-overs, so save 'em */
  615. MEMCPY_BCOPY(context->s1.buffer, data, len);
  616. context->s1.bitcount += len << 3;
  617. }
  618. /* Clean up: */
  619. usedspace = freespace = 0;
  620. }
  621. void SHA1_Final(sha_byte digest[], SHA_CTX* context) {
  622. sha_word32 *d = (sha_word32*)digest;
  623. unsigned int usedspace;
  624. /* Sanity check: */
  625. assert(context != (SHA_CTX*)0);
  626. if (digest == (sha_byte*)0) {
  627. /*
  628. * No digest buffer, so we can do nothing
  629. * except clean up and go home
  630. */
  631. MEMSET_BZERO(context, sizeof(context));
  632. return;
  633. }
  634. usedspace = (context->s1.bitcount >> 3) % 64;
  635. if (usedspace == 0) {
  636. /* Set-up for the last transform: */
  637. MEMSET_BZERO(context->s1.buffer, 56);
  638. /* Begin padding with a 1 bit: */
  639. *context->s1.buffer = 0x80;
  640. } else {
  641. /* Begin padding with a 1 bit: */
  642. context->s1.buffer[usedspace++] = 0x80;
  643. if (usedspace <= 56) {
  644. /* Set-up for the last transform: */
  645. MEMSET_BZERO(&context->s1.buffer[usedspace], 56 - usedspace);
  646. } else {
  647. if (usedspace < 64) {
  648. MEMSET_BZERO(&context->s1.buffer[usedspace], 64 - usedspace);
  649. }
  650. /* Do second-to-last transform: */
  651. SHA1_Internal_Transform(context, (sha_word32*)context->s1.buffer);
  652. /* And set-up for the last transform: */
  653. MEMSET_BZERO(context->s1.buffer, 56);
  654. }
  655. /* Clean up: */
  656. usedspace = 0;
  657. }
  658. /* Set the bit count: */
  659. #if BYTE_ORDER == LITTLE_ENDIAN
  660. /* Convert FROM host byte order */
  661. REVERSE64(context->s1.bitcount,context->s1.bitcount);
  662. #endif
  663. *(sha_word64*)&context->s1.buffer[56] = context->s1.bitcount;
  664. /* Final transform: */
  665. SHA1_Internal_Transform(context, (sha_word32*)context->s1.buffer);
  666. /* Save the hash data for output: */
  667. #if BYTE_ORDER == LITTLE_ENDIAN
  668. {
  669. /* Convert TO host byte order */
  670. int j;
  671. for (j = 0; j < (SHA1_DIGEST_LENGTH >> 2); j++) {
  672. REVERSE32(context->s1.state[j],context->s1.state[j]);
  673. *d++ = context->s1.state[j];
  674. }
  675. }
  676. #else
  677. MEMCPY_BCOPY(d, context->s1.state, SHA1_DIGEST_LENGTH);
  678. #endif
  679. /* Clean up: */
  680. MEMSET_BZERO(context, sizeof(context));
  681. }
  682. char *SHA1_End(SHA_CTX* context, char buffer[]) {
  683. sha_byte digest[SHA1_DIGEST_LENGTH], *d = digest;
  684. int i;
  685. /* Sanity check: */
  686. assert(context != (SHA_CTX*)0);
  687. if (buffer != (char*)0) {
  688. SHA1_Final(digest, context);
  689. for (i = 0; i < SHA1_DIGEST_LENGTH; i++) {
  690. *buffer++ = sha_hex_digits[(*d & 0xf0) >> 4];
  691. *buffer++ = sha_hex_digits[*d & 0x0f];
  692. d++;
  693. }
  694. *buffer = (char)0;
  695. } else {
  696. MEMSET_BZERO(context, sizeof(context));
  697. }
  698. MEMSET_BZERO(digest, SHA1_DIGEST_LENGTH);
  699. return buffer;
  700. }
  701. char* SHA1_Data(const sha_byte* data, size_t len, char digest[SHA1_DIGEST_STRING_LENGTH]) {
  702. SHA_CTX context;
  703. SHA1_Init(&context);
  704. SHA1_Update(&context, data, len);
  705. return SHA1_End(&context, digest);
  706. }
  707. /*** SHA-256: *********************************************************/
  708. void SHA256_Internal_Init(SHA_CTX* context, const sha_word32* ihv) {
  709. /* Sanity check: */
  710. assert(context != (SHA_CTX*)0);
  711. MEMCPY_BCOPY(context->s256.state, ihv, sizeof(sha_word32) * 8);
  712. MEMSET_BZERO(context->s256.buffer, 64);
  713. context->s256.bitcount = 0;
  714. }
  715. void SHA256_Init(SHA_CTX* context) {
  716. SHA256_Internal_Init(context, sha256_initial_hash_value);
  717. }
  718. #ifdef SHA2_UNROLL_TRANSFORM
  719. /* Unrolled SHA-256 round macros: */
  720. #if BYTE_ORDER == LITTLE_ENDIAN
  721. #define ROUND256_0_TO_15(a,b,c,d,e,f,g,h) \
  722. REVERSE32(*data++, W256[j]); \
  723. T1 = (h) + Sigma1_256(e) + Ch((e), (f), (g)) + \
  724. K256[j] + W256[j]; \
  725. (d) += T1; \
  726. (h) = T1 + Sigma0_256(a) + Maj((a), (b), (c)); \
  727. j++
  728. #else /* BYTE_ORDER == LITTLE_ENDIAN */
  729. #define ROUND256_0_TO_15(a,b,c,d,e,f,g,h) \
  730. T1 = (h) + Sigma1_256(e) + Ch((e), (f), (g)) + \
  731. K256[j] + (W256[j] = *data++); \
  732. (d) += T1; \
  733. (h) = T1 + Sigma0_256(a) + Maj((a), (b), (c)); \
  734. j++
  735. #endif /* BYTE_ORDER == LITTLE_ENDIAN */
  736. #define ROUND256(a,b,c,d,e,f,g,h) \
  737. s0 = W256[(j+1)&0x0f]; \
  738. s0 = sigma0_256(s0); \
  739. s1 = W256[(j+14)&0x0f]; \
  740. s1 = sigma1_256(s1); \
  741. T1 = (h) + Sigma1_256(e) + Ch((e), (f), (g)) + K256[j] + \
  742. (W256[j&0x0f] += s1 + W256[(j+9)&0x0f] + s0); \
  743. (d) += T1; \
  744. (h) = T1 + Sigma0_256(a) + Maj((a), (b), (c)); \
  745. j++
  746. void SHA256_Internal_Transform(SHA_CTX* context, const sha_word32* data) {
  747. sha_word32 a, b, c, d, e, f, g, h, s0, s1;
  748. sha_word32 T1, *W256;
  749. int j;
  750. W256 = (sha_word32*)context->s256.buffer;
  751. /* Initialize registers with the prev. intermediate value */
  752. a = context->s256.state[0];
  753. b = context->s256.state[1];
  754. c = context->s256.state[2];
  755. d = context->s256.state[3];
  756. e = context->s256.state[4];
  757. f = context->s256.state[5];
  758. g = context->s256.state[6];
  759. h = context->s256.state[7];
  760. j = 0;
  761. do {
  762. /* Rounds 0 to 15 (unrolled): */
  763. ROUND256_0_TO_15(a,b,c,d,e,f,g,h);
  764. ROUND256_0_TO_15(h,a,b,c,d,e,f,g);
  765. ROUND256_0_TO_15(g,h,a,b,c,d,e,f);
  766. ROUND256_0_TO_15(f,g,h,a,b,c,d,e);
  767. ROUND256_0_TO_15(e,f,g,h,a,b,c,d);
  768. ROUND256_0_TO_15(d,e,f,g,h,a,b,c);
  769. ROUND256_0_TO_15(c,d,e,f,g,h,a,b);
  770. ROUND256_0_TO_15(b,c,d,e,f,g,h,a);
  771. } while (j < 16);
  772. /* Now for the remaining rounds to 64: */
  773. do {
  774. ROUND256(a,b,c,d,e,f,g,h);
  775. ROUND256(h,a,b,c,d,e,f,g);
  776. ROUND256(g,h,a,b,c,d,e,f);
  777. ROUND256(f,g,h,a,b,c,d,e);
  778. ROUND256(e,f,g,h,a,b,c,d);
  779. ROUND256(d,e,f,g,h,a,b,c);
  780. ROUND256(c,d,e,f,g,h,a,b);
  781. ROUND256(b,c,d,e,f,g,h,a);
  782. } while (j < 64);
  783. /* Compute the current intermediate hash value */
  784. context->s256.state[0] += a;
  785. context->s256.state[1] += b;
  786. context->s256.state[2] += c;
  787. context->s256.state[3] += d;
  788. context->s256.state[4] += e;
  789. context->s256.state[5] += f;
  790. context->s256.state[6] += g;
  791. context->s256.state[7] += h;
  792. /* Clean up */
  793. a = b = c = d = e = f = g = h = T1 = 0;
  794. }
  795. #else /* SHA2_UNROLL_TRANSFORM */
  796. void SHA256_Internal_Transform(SHA_CTX* context, const sha_word32* data) {
  797. sha_word32 a, b, c, d, e, f, g, h, s0, s1;
  798. sha_word32 T1, T2, *W256;
  799. int j;
  800. W256 = (sha_word32*)context->s256.buffer;
  801. /* Initialize registers with the prev. intermediate value */
  802. a = context->s256.state[0];
  803. b = context->s256.state[1];
  804. c = context->s256.state[2];
  805. d = context->s256.state[3];
  806. e = context->s256.state[4];
  807. f = context->s256.state[5];
  808. g = context->s256.state[6];
  809. h = context->s256.state[7];
  810. j = 0;
  811. do {
  812. #if BYTE_ORDER == LITTLE_ENDIAN
  813. /* Copy data while converting to host byte order */
  814. REVERSE32(*data++,W256[j]);
  815. /* Apply the SHA-256 compression function to update a..h */
  816. T1 = h + Sigma1_256(e) + Ch(e, f, g) + K256[j] + W256[j];
  817. #else /* BYTE_ORDER == LITTLE_ENDIAN */
  818. /* Apply the SHA-256 compression function to update a..h with copy */
  819. T1 = h + Sigma1_256(e) + Ch(e, f, g) + K256[j] + (W256[j] = *data++);
  820. #endif /* BYTE_ORDER == LITTLE_ENDIAN */
  821. T2 = Sigma0_256(a) + Maj(a, b, c);
  822. h = g;
  823. g = f;
  824. f = e;
  825. e = d + T1;
  826. d = c;
  827. c = b;
  828. b = a;
  829. a = T1 + T2;
  830. j++;
  831. } while (j < 16);
  832. do {
  833. /* Part of the message block expansion: */
  834. s0 = W256[(j+1)&0x0f];
  835. s0 = sigma0_256(s0);
  836. s1 = W256[(j+14)&0x0f];
  837. s1 = sigma1_256(s1);
  838. /* Apply the SHA-256 compression function to update a..h */
  839. T1 = h + Sigma1_256(e) + Ch(e, f, g) + K256[j] +
  840. (W256[j&0x0f] += s1 + W256[(j+9)&0x0f] + s0);
  841. T2 = Sigma0_256(a) + Maj(a, b, c);
  842. h = g;
  843. g = f;
  844. f = e;
  845. e = d + T1;
  846. d = c;
  847. c = b;
  848. b = a;
  849. a = T1 + T2;
  850. j++;
  851. } while (j < 64);
  852. /* Compute the current intermediate hash value */
  853. context->s256.state[0] += a;
  854. context->s256.state[1] += b;
  855. context->s256.state[2] += c;
  856. context->s256.state[3] += d;
  857. context->s256.state[4] += e;
  858. context->s256.state[5] += f;
  859. context->s256.state[6] += g;
  860. context->s256.state[7] += h;
  861. /* Clean up */
  862. a = b = c = d = e = f = g = h = T1 = T2 = 0;
  863. }
  864. #endif /* SHA2_UNROLL_TRANSFORM */
  865. void SHA256_Update(SHA_CTX* context, const sha_byte *data, size_t len) {
  866. unsigned int freespace, usedspace;
  867. if (len == 0) {
  868. /* Calling with no data is valid - we do nothing */
  869. return;
  870. }
  871. /* Sanity check: */
  872. assert(context != (SHA_CTX*)0 && data != (sha_byte*)0);
  873. usedspace = (context->s256.bitcount >> 3) % 64;
  874. if (usedspace > 0) {
  875. /* Calculate how much free space is available in the buffer */
  876. freespace = 64 - usedspace;
  877. if (len >= freespace) {
  878. /* Fill the buffer completely and process it */
  879. MEMCPY_BCOPY(&context->s256.buffer[usedspace], data, freespace);
  880. context->s256.bitcount += freespace << 3;
  881. len -= freespace;
  882. data += freespace;
  883. SHA256_Internal_Transform(context, (sha_word32*)context->s256.buffer);
  884. } else {
  885. /* The buffer is not yet full */
  886. MEMCPY_BCOPY(&context->s256.buffer[usedspace], data, len);
  887. context->s256.bitcount += len << 3;
  888. /* Clean up: */
  889. usedspace = freespace = 0;
  890. return;
  891. }
  892. }
  893. while (len >= 64) {
  894. /* Process as many complete blocks as we can */
  895. SHA256_Internal_Transform(context, (sha_word32*)data);
  896. context->s256.bitcount += 512;
  897. len -= 64;
  898. data += 64;
  899. }
  900. if (len > 0) {
  901. /* There's left-overs, so save 'em */
  902. MEMCPY_BCOPY(context->s256.buffer, data, len);
  903. context->s256.bitcount += len << 3;
  904. }
  905. /* Clean up: */
  906. usedspace = freespace = 0;
  907. }
  908. void SHA256_Internal_Last(SHA_CTX* context) {
  909. unsigned int usedspace;
  910. usedspace = (context->s256.bitcount >> 3) % 64;
  911. #if BYTE_ORDER == LITTLE_ENDIAN
  912. /* Convert FROM host byte order */
  913. REVERSE64(context->s256.bitcount,context->s256.bitcount);
  914. #endif
  915. if (usedspace > 0) {
  916. /* Begin padding with a 1 bit: */
  917. context->s256.buffer[usedspace++] = 0x80;
  918. if (usedspace <= 56) {
  919. /* Set-up for the last transform: */
  920. MEMSET_BZERO(&context->s256.buffer[usedspace], 56 - usedspace);
  921. } else {
  922. if (usedspace < 64) {
  923. MEMSET_BZERO(&context->s256.buffer[usedspace], 64 - usedspace);
  924. }
  925. /* Do second-to-last transform: */
  926. SHA256_Internal_Transform(context, (sha_word32*)context->s256.buffer);
  927. /* And set-up for the last transform: */
  928. MEMSET_BZERO(context->s256.buffer, 56);
  929. }
  930. /* Clean up: */
  931. usedspace = 0;
  932. } else {
  933. /* Set-up for the last transform: */
  934. MEMSET_BZERO(context->s256.buffer, 56);
  935. /* Begin padding with a 1 bit: */
  936. *context->s256.buffer = 0x80;
  937. }
  938. /* Set the bit count: */
  939. *(sha_word64*)&context->s256.buffer[56] = context->s256.bitcount;
  940. /* Final transform: */
  941. SHA256_Internal_Transform(context, (sha_word32*)context->s256.buffer);
  942. }
  943. void SHA256_Final(sha_byte digest[], SHA_CTX* context) {
  944. sha_word32 *d = (sha_word32*)digest;
  945. /* Sanity check: */
  946. assert(context != (SHA_CTX*)0);
  947. /* If no digest buffer is passed, we don't bother doing this: */
  948. if (digest != (sha_byte*)0) {
  949. SHA256_Internal_Last(context);
  950. /* Save the hash data for output: */
  951. #if BYTE_ORDER == LITTLE_ENDIAN
  952. {
  953. /* Convert TO host byte order */
  954. int j;
  955. for (j = 0; j < (SHA256_DIGEST_LENGTH >> 2); j++) {
  956. REVERSE32(context->s256.state[j],context->s256.state[j]);
  957. *d++ = context->s256.state[j];
  958. }
  959. }
  960. #else
  961. MEMCPY_BCOPY(d, context->s256.state, SHA256_DIGEST_LENGTH);
  962. #endif
  963. }
  964. /* Clean up state data: */
  965. MEMSET_BZERO(context, sizeof(context));
  966. }
  967. char *SHA256_End(SHA_CTX* context, char buffer[]) {
  968. sha_byte digest[SHA256_DIGEST_LENGTH], *d = digest;
  969. int i;
  970. /* Sanity check: */
  971. assert(context != (SHA_CTX*)0);
  972. if (buffer != (char*)0) {
  973. SHA256_Final(digest, context);
  974. for (i = 0; i < SHA256_DIGEST_LENGTH; i++) {
  975. *buffer++ = sha_hex_digits[(*d & 0xf0) >> 4];
  976. *buffer++ = sha_hex_digits[*d & 0x0f];
  977. d++;
  978. }
  979. *buffer = (char)0;
  980. } else {
  981. MEMSET_BZERO(context, sizeof(context));
  982. }
  983. MEMSET_BZERO(digest, SHA256_DIGEST_LENGTH);
  984. return buffer;
  985. }
  986. char* SHA256_Data(const sha_byte* data, size_t len, char digest[SHA256_DIGEST_STRING_LENGTH]) {
  987. SHA_CTX context;
  988. SHA256_Init(&context);
  989. SHA256_Update(&context, data, len);
  990. return SHA256_End(&context, digest);
  991. }
  992. /*** SHA-224: *********************************************************/
  993. void SHA224_Init(SHA_CTX* context) {
  994. SHA256_Internal_Init(context, sha224_initial_hash_value);
  995. }
  996. void SHA224_Internal_Transform(SHA_CTX* context, const sha_word32* data) {
  997. SHA256_Internal_Transform(context, data);
  998. }
  999. void SHA224_Update(SHA_CTX* context, const sha_byte *data, size_t len) {
  1000. SHA256_Update(context, data, len);
  1001. }
  1002. void SHA224_Final(sha_byte digest[], SHA_CTX* context) {
  1003. sha_word32 *d = (sha_word32*)digest;
  1004. /* Sanity check: */
  1005. assert(context != (SHA_CTX*)0);
  1006. /* If no digest buffer is passed, we don't bother doing this: */
  1007. if (digest != (sha_byte*)0) {
  1008. SHA256_Internal_Last(context);
  1009. /* Save the hash data for output: */
  1010. #if BYTE_ORDER == LITTLE_ENDIAN
  1011. {
  1012. /* Convert TO host byte order */
  1013. int j;
  1014. for (j = 0; j < (SHA224_DIGEST_LENGTH >> 2); j++) {
  1015. REVERSE32(context->s256.state[j],context->s256.state[j]);
  1016. *d++ = context->s256.state[j];
  1017. }
  1018. }
  1019. #else
  1020. MEMCPY_BCOPY(d, context->s256.state, SHA224_DIGEST_LENGTH);
  1021. #endif
  1022. }
  1023. /* Clean up state data: */
  1024. MEMSET_BZERO(context, sizeof(context));
  1025. }
  1026. char *SHA224_End(SHA_CTX* context, char buffer[]) {
  1027. sha_byte digest[SHA224_DIGEST_LENGTH], *d = digest;
  1028. int i;
  1029. /* Sanity check: */
  1030. assert(context != (SHA_CTX*)0);
  1031. if (buffer != (char*)0) {
  1032. SHA224_Final(digest, context);
  1033. for (i = 0; i < SHA224_DIGEST_LENGTH; i++) {
  1034. *buffer++ = sha_hex_digits[(*d & 0xf0) >> 4];
  1035. *buffer++ = sha_hex_digits[*d & 0x0f];
  1036. d++;
  1037. }
  1038. *buffer = (char)0;
  1039. } else {
  1040. MEMSET_BZERO(context, sizeof(context));
  1041. }
  1042. MEMSET_BZERO(digest, SHA224_DIGEST_LENGTH);
  1043. return buffer;
  1044. }
  1045. char* SHA224_Data(const sha_byte* data, size_t len, char digest[SHA224_DIGEST_STRING_LENGTH]) {
  1046. SHA_CTX context;
  1047. SHA224_Init(&context);
  1048. SHA224_Update(&context, data, len);
  1049. return SHA224_End(&context, digest);
  1050. }
  1051. /*** SHA-512: *********************************************************/
  1052. void SHA512_Internal_Init(SHA_CTX* context, const sha_word64* ihv) {
  1053. /* Sanity check: */
  1054. assert(context != (SHA_CTX*)0);
  1055. MEMCPY_BCOPY(context->s512.state, ihv, sizeof(sha_word64) * 8);
  1056. MEMSET_BZERO(context->s512.buffer, 128);
  1057. context->s512.bitcount[0] = context->s512.bitcount[1] = 0;
  1058. }
  1059. void SHA512_Init(SHA_CTX* context) {
  1060. SHA512_Internal_Init(context, sha512_initial_hash_value);
  1061. }
  1062. #ifdef SHA2_UNROLL_TRANSFORM
  1063. /* Unrolled SHA-512 round macros: */
  1064. #if BYTE_ORDER == LITTLE_ENDIAN
  1065. #define ROUND512_0_TO_15(a,b,c,d,e,f,g,h) \
  1066. REVERSE64(*data++, W512[j]); \
  1067. T1 = (h) + Sigma1_512(e) + Ch((e), (f), (g)) + \
  1068. K512[j] + W512[j]; \
  1069. (d) += T1, \
  1070. (h) = T1 + Sigma0_512(a) + Maj((a), (b), (c)), \
  1071. j++
  1072. #else /* BYTE_ORDER == LITTLE_ENDIAN */
  1073. #define ROUND512_0_TO_15(a,b,c,d,e,f,g,h) \
  1074. T1 = (h) + Sigma1_512(e) + Ch((e), (f), (g)) + \
  1075. K512[j] + (W512[j] = *data++); \
  1076. (d) += T1; \
  1077. (h) = T1 + Sigma0_512(a) + Maj((a), (b), (c)); \
  1078. j++
  1079. #endif /* BYTE_ORDER == LITTLE_ENDIAN */
  1080. #define ROUND512(a,b,c,d,e,f,g,h) \
  1081. s0 = W512[(j+1)&0x0f]; \
  1082. s0 = sigma0_512(s0); \
  1083. s1 = W512[(j+14)&0x0f]; \
  1084. s1 = sigma1_512(s1); \
  1085. T1 = (h) + Sigma1_512(e) + Ch((e), (f), (g)) + K512[j] + \
  1086. (W512[j&0x0f] += s1 + W512[(j+9)&0x0f] + s0); \
  1087. (d) += T1; \
  1088. (h) = T1 + Sigma0_512(a) + Maj((a), (b), (c)); \
  1089. j++
  1090. void SHA512_Internal_Transform(SHA_CTX* context, const sha_word64* data) {
  1091. sha_word64 a, b, c, d, e, f, g, h, s0, s1;
  1092. sha_word64 T1, *W512 = (sha_word64*)context->s512.buffer;
  1093. int j;
  1094. /* Initialize registers with the prev. intermediate value */
  1095. a = context->s512.state[0];
  1096. b = context->s512.state[1];
  1097. c = context->s512.state[2];
  1098. d = context->s512.state[3];
  1099. e = context->s512.state[4];
  1100. f = context->s512.state[5];
  1101. g = context->s512.state[6];
  1102. h = context->s512.state[7];
  1103. j = 0;
  1104. do {
  1105. ROUND512_0_TO_15(a,b,c,d,e,f,g,h);
  1106. ROUND512_0_TO_15(h,a,b,c,d,e,f,g);
  1107. ROUND512_0_TO_15(g,h,a,b,c,d,e,f);
  1108. ROUND512_0_TO_15(f,g,h,a,b,c,d,e);
  1109. ROUND512_0_TO_15(e,f,g,h,a,b,c,d);
  1110. ROUND512_0_TO_15(d,e,f,g,h,a,b,c);
  1111. ROUND512_0_TO_15(c,d,e,f,g,h,a,b);
  1112. ROUND512_0_TO_15(b,c,d,e,f,g,h,a);
  1113. } while (j < 16);
  1114. /* Now for the remaining rounds up to 79: */
  1115. do {
  1116. ROUND512(a,b,c,d,e,f,g,h);
  1117. ROUND512(h,a,b,c,d,e,f,g);
  1118. ROUND512(g,h,a,b,c,d,e,f);
  1119. ROUND512(f,g,h,a,b,c,d,e);
  1120. ROUND512(e,f,g,h,a,b,c,d);
  1121. ROUND512(d,e,f,g,h,a,b,c);
  1122. ROUND512(c,d,e,f,g,h,a,b);
  1123. ROUND512(b,c,d,e,f,g,h,a);
  1124. } while (j < 80);
  1125. /* Compute the current intermediate hash value */
  1126. context->s512.state[0] += a;
  1127. context->s512.state[1] += b;
  1128. context->s512.state[2] += c;
  1129. context->s512.state[3] += d;
  1130. context->s512.state[4] += e;
  1131. context->s512.state[5] += f;
  1132. context->s512.state[6] += g;
  1133. context->s512.state[7] += h;
  1134. /* Clean up */
  1135. a = b = c = d = e = f = g = h = T1 = 0;
  1136. }
  1137. #else /* SHA2_UNROLL_TRANSFORM */
  1138. void SHA512_Internal_Transform(SHA_CTX* context, const sha_word64* data) {
  1139. sha_word64 a, b, c, d, e, f, g, h, s0, s1;
  1140. sha_word64 T1, T2, *W512 = (sha_word64*)context->s512.buffer;
  1141. int j;
  1142. /* Initialize registers with the prev. intermediate value */
  1143. a = context->s512.state[0];
  1144. b = context->s512.state[1];
  1145. c = context->s512.state[2];
  1146. d = context->s512.state[3];
  1147. e = context->s512.state[4];
  1148. f = context->s512.state[5];
  1149. g = context->s512.state[6];
  1150. h = context->s512.state[7];
  1151. j = 0;
  1152. do {
  1153. #if BYTE_ORDER == LITTLE_ENDIAN
  1154. /* Convert TO host byte order */
  1155. REVERSE64(*data++, W512[j]);
  1156. /* Apply the SHA-512 compression function to update a..h */
  1157. T1 = h + Sigma1_512(e) + Ch(e, f, g) + K512[j] + W512[j];
  1158. #else /* BYTE_ORDER == LITTLE_ENDIAN */
  1159. /* Apply the SHA-512 compression function to update a..h with copy */
  1160. T1 = h + Sigma1_512(e) + Ch(e, f, g) + K512[j] + (W512[j] = *data++);
  1161. #endif /* BYTE_ORDER == LITTLE_ENDIAN */
  1162. T2 = Sigma0_512(a) + Maj(a, b, c);
  1163. h = g;
  1164. g = f;
  1165. f = e;
  1166. e = d + T1;
  1167. d = c;
  1168. c = b;
  1169. b = a;
  1170. a = T1 + T2;
  1171. j++;
  1172. } while (j < 16);
  1173. do {
  1174. /* Part of the message block expansion: */
  1175. s0 = W512[(j+1)&0x0f];
  1176. s0 = sigma0_512(s0);
  1177. s1 = W512[(j+14)&0x0f];
  1178. s1 = sigma1_512(s1);
  1179. /* Apply the SHA-512 compression function to update a..h */
  1180. T1 = h + Sigma1_512(e) + Ch(e, f, g) + K512[j] +
  1181. (W512[j&0x0f] += s1 + W512[(j+9)&0x0f] + s0);
  1182. T2 = Sigma0_512(a) + Maj(a, b, c);
  1183. h = g;
  1184. g = f;
  1185. f = e;
  1186. e = d + T1;
  1187. d = c;
  1188. c = b;
  1189. b = a;
  1190. a = T1 + T2;
  1191. j++;
  1192. } while (j < 80);
  1193. /* Compute the current intermediate hash value */
  1194. context->s512.state[0] += a;
  1195. context->s512.state[1] += b;
  1196. context->s512.state[2] += c;
  1197. context->s512.state[3] += d;
  1198. context->s512.state[4] += e;
  1199. context->s512.state[5] += f;
  1200. context->s512.state[6] += g;
  1201. context->s512.state[7] += h;
  1202. /* Clean up */
  1203. a = b = c = d = e = f = g = h = T1 = T2 = 0;
  1204. }
  1205. #endif /* SHA2_UNROLL_TRANSFORM */
  1206. void SHA512_Update(SHA_CTX* context, const sha_byte *data, size_t len) {
  1207. unsigned int freespace, usedspace;
  1208. if (len == 0) {
  1209. /* Calling with no data is valid - we do nothing */
  1210. return;
  1211. }
  1212. /* Sanity check: */
  1213. assert(context != (SHA_CTX*)0 && data != (sha_byte*)0);
  1214. usedspace = (context->s512.bitcount[0] >> 3) % 128;
  1215. if (usedspace > 0) {
  1216. /* Calculate how much free space is available in the buffer */
  1217. freespace = 128 - usedspace;
  1218. if (len >= freespace) {
  1219. /* Fill the buffer completely and process it */
  1220. MEMCPY_BCOPY(&context->s512.buffer[usedspace], data, freespace);
  1221. ADDINC128(context->s512.bitcount, freespace << 3);
  1222. len -= freespace;
  1223. data += freespace;
  1224. SHA512_Internal_Transform(context, (sha_word64*)context->s512.buffer);
  1225. } else {
  1226. /* The buffer is not yet full */
  1227. MEMCPY_BCOPY(&context->s512.buffer[usedspace], data, len);
  1228. ADDINC128(context->s512.bitcount, len << 3);
  1229. /* Clean up: */
  1230. usedspace = freespace = 0;
  1231. return;
  1232. }
  1233. }
  1234. while (len >= 128) {
  1235. /* Process as many complete blocks as we can */
  1236. SHA512_Internal_Transform(context, (sha_word64*)data);
  1237. ADDINC128(context->s512.bitcount, 1024);
  1238. len -= 128;
  1239. data += 128;
  1240. }
  1241. if (len > 0) {
  1242. /* There's left-overs, so save 'em */
  1243. MEMCPY_BCOPY(context->s512.buffer, data, len);
  1244. ADDINC128(context->s512.bitcount, len << 3);
  1245. }
  1246. /* Clean up: */
  1247. usedspace = freespace = 0;
  1248. }
  1249. void SHA512_Internal_Last(SHA_CTX* context) {
  1250. unsigned int usedspace;
  1251. usedspace = (context->s512.bitcount[0] >> 3) % 128;
  1252. #if BYTE_ORDER == LITTLE_ENDIAN
  1253. /* Convert FROM host byte order */
  1254. REVERSE64(context->s512.bitcount[0],context->s512.bitcount[0]);
  1255. REVERSE64(context->s512.bitcount[1],context->s512.bitcount[1]);
  1256. #endif
  1257. if (usedspace > 0) {
  1258. /* Begin padding with a 1 bit: */
  1259. context->s512.buffer[usedspace++] = 0x80;
  1260. if (usedspace <= 112) {
  1261. /* Set-up for the last transform: */
  1262. MEMSET_BZERO(&context->s512.buffer[usedspace], 112 - usedspace);
  1263. } else {
  1264. if (usedspace < 128) {
  1265. MEMSET_BZERO(&context->s512.buffer[usedspace], 128 - usedspace);
  1266. }
  1267. /* Do second-to-last transform: */
  1268. SHA512_Internal_Transform(context, (sha_word64*)context->s512.buffer);
  1269. /* And set-up for the last transform: */
  1270. MEMSET_BZERO(context->s512.buffer, 112);
  1271. }
  1272. /* Clean up: */
  1273. usedspace = 0;
  1274. } else {
  1275. /* Prepare for final transform: */
  1276. MEMSET_BZERO(context->s512.buffer, 112);
  1277. /* Begin padding with a 1 bit: */
  1278. *context->s512.buffer = 0x80;
  1279. }
  1280. /* Store the length of input data (in bits): */
  1281. *(sha_word64*)&context->s512.buffer[112] = context->s512.bitcount[1];
  1282. *(sha_word64*)&context->s512.buffer[120] = context->s512.bitcount[0];
  1283. /* Final transform: */
  1284. SHA512_Internal_Transform(context, (sha_word64*)context->s512.buffer);
  1285. }
  1286. void SHA512_Final(sha_byte digest[], SHA_CTX* context) {
  1287. sha_word64 *d = (sha_word64*)digest;
  1288. /* Sanity check: */
  1289. assert(context != (SHA_CTX*)0);
  1290. /* If no digest buffer is passed, we don't bother doing this: */
  1291. if (digest != (sha_byte*)0) {
  1292. SHA512_Internal_Last(context);
  1293. /* Save the hash data for output: */
  1294. #if BYTE_ORDER == LITTLE_ENDIAN
  1295. {
  1296. /* Convert TO host byte order */
  1297. int j;
  1298. for (j = 0; j < (SHA512_DIGEST_LENGTH >> 3); j++) {
  1299. REVERSE64(context->s512.state[j],context->s512.state[j]);
  1300. *d++ = context->s512.state[j];
  1301. }
  1302. }
  1303. #else
  1304. MEMCPY_BCOPY(d, context->s512.state, SHA512_DIGEST_LENGTH);
  1305. #endif
  1306. }
  1307. /* Zero out state data */
  1308. MEMSET_BZERO(context, sizeof(context));
  1309. }
  1310. char *SHA512_End(SHA_CTX* context, char buffer[]) {
  1311. sha_byte digest[SHA512_DIGEST_LENGTH], *d = digest;
  1312. int i;
  1313. /* Sanity check: */
  1314. assert(context != (SHA_CTX*)0);
  1315. if (buffer != (char*)0) {
  1316. SHA512_Final(digest, context);
  1317. for (i = 0; i < SHA512_DIGEST_LENGTH; i++) {
  1318. *buffer++ = sha_hex_digits[(*d & 0xf0) >> 4];
  1319. *buffer++ = sha_hex_digits[*d & 0x0f];
  1320. d++;
  1321. }
  1322. *buffer = (char)0;
  1323. } else {
  1324. MEMSET_BZERO(context, sizeof(context));
  1325. }
  1326. MEMSET_BZERO(digest, SHA512_DIGEST_LENGTH);
  1327. return buffer;
  1328. }
  1329. char* SHA512_Data(const sha_byte* data, size_t len, char digest[SHA512_DIGEST_STRING_LENGTH]) {
  1330. SHA_CTX context;
  1331. SHA512_Init(&context);
  1332. SHA512_Update(&context, data, len);
  1333. return SHA512_End(&context, digest);
  1334. }
  1335. /*** SHA-384: *********************************************************/
  1336. void SHA384_Init(SHA_CTX* context) {
  1337. SHA512_Internal_Init(context, sha384_initial_hash_value);
  1338. }
  1339. void SHA384_Update(SHA_CTX* context, const sha_byte* data, size_t len) {
  1340. SHA512_Update(context, data, len);
  1341. }
  1342. void SHA384_Final(sha_byte digest[], SHA_CTX* context) {
  1343. sha_word64 *d = (sha_word64*)digest;
  1344. /* Sanity check: */
  1345. assert(context != (SHA_CTX*)0);
  1346. /* If no digest buffer is passed, we don't bother doing this: */
  1347. if (digest != (sha_byte*)0) {
  1348. SHA512_Internal_Last(context);
  1349. /* Save the hash data for output: */
  1350. #if BYTE_ORDER == LITTLE_ENDIAN
  1351. {
  1352. /* Convert TO host byte order */
  1353. int j;
  1354. for (j = 0; j < (SHA384_DIGEST_LENGTH >> 3); j++) {
  1355. REVERSE64(context->s512.state[j],context->s512.state[j]);
  1356. *d++ = context->s512.state[j];
  1357. }
  1358. }
  1359. #else
  1360. MEMCPY_BCOPY(d, context->s512.state, SHA384_DIGEST_LENGTH);
  1361. #endif
  1362. }
  1363. /* Zero out state data */
  1364. MEMSET_BZERO(context, sizeof(context));
  1365. }
  1366. char *SHA384_End(SHA_CTX* context, char buffer[]) {
  1367. sha_byte digest[SHA384_DIGEST_LENGTH], *d = digest;
  1368. int i;
  1369. /* Sanity check: */
  1370. assert(context != (SHA_CTX*)0);
  1371. if (buffer != (char*)0) {
  1372. SHA384_Final(digest, context);
  1373. for (i = 0; i < SHA384_DIGEST_LENGTH; i++) {
  1374. *buffer++ = sha_hex_digits[(*d & 0xf0) >> 4];
  1375. *buffer++ = sha_hex_digits[*d & 0x0f];
  1376. d++;
  1377. }
  1378. *buffer = (char)0;
  1379. } else {
  1380. MEMSET_BZERO(context, sizeof(context));
  1381. }
  1382. MEMSET_BZERO(digest, SHA384_DIGEST_LENGTH);
  1383. return buffer;
  1384. }
  1385. char* SHA384_Data(const sha_byte* data, size_t len, char digest[SHA384_DIGEST_STRING_LENGTH]) {
  1386. SHA_CTX context;
  1387. SHA384_Init(&context);
  1388. SHA384_Update(&context, data, len);
  1389. return SHA384_End(&context, digest);
  1390. }