ghash-riscv64-zvkb-zvbc.pl 13 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378
  1. #! /usr/bin/env perl
  2. # This file is dual-licensed, meaning that you can use it under your
  3. # choice of either of the following two licenses:
  4. #
  5. # Copyright 2023 The OpenSSL Project Authors. All Rights Reserved.
  6. #
  7. # Licensed under the Apache License 2.0 (the "License"). You can obtain
  8. # a copy in the file LICENSE in the source distribution or at
  9. # https://www.openssl.org/source/license.html
  10. #
  11. # or
  12. #
  13. # Copyright (c) 2023, Christoph Müllner <[email protected]>
  14. # All rights reserved.
  15. #
  16. # Redistribution and use in source and binary forms, with or without
  17. # modification, are permitted provided that the following conditions
  18. # are met:
  19. # 1. Redistributions of source code must retain the above copyright
  20. # notice, this list of conditions and the following disclaimer.
  21. # 2. Redistributions in binary form must reproduce the above copyright
  22. # notice, this list of conditions and the following disclaimer in the
  23. # documentation and/or other materials provided with the distribution.
  24. #
  25. # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
  26. # "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
  27. # LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
  28. # A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
  29. # OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
  30. # SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
  31. # LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
  32. # DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
  33. # THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
  34. # (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
  35. # OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
  36. # - RV64I
  37. # - RISC-V Vector ('V') with VLEN >= 128
  38. # - RISC-V Vector Cryptography Bit-manipulation extension ('Zvkb')
  39. # - RISC-V Vector Carryless Multiplication extension ('Zvbc')
  40. use strict;
  41. use warnings;
  42. use FindBin qw($Bin);
  43. use lib "$Bin";
  44. use lib "$Bin/../../perlasm";
  45. use riscv;
  46. # $output is the last argument if it looks like a file (it has an extension)
  47. # $flavour is the first argument if it doesn't look like a file
  48. my $output = $#ARGV >= 0 && $ARGV[$#ARGV] =~ m|\.\w+$| ? pop : undef;
  49. my $flavour = $#ARGV >= 0 && $ARGV[0] !~ m|\.| ? shift : undef;
  50. $output and open STDOUT,">$output";
  51. my $code=<<___;
  52. .text
  53. ___
  54. ################################################################################
  55. # void gcm_init_rv64i_zvkb_zvbc(u128 Htable[16], const u64 H[2]);
  56. #
  57. # input: H: 128-bit H - secret parameter E(K, 0^128)
  58. # output: Htable: Preprocessed key data for gcm_gmult_rv64i_zvkb_zvbc and
  59. # gcm_ghash_rv64i_zvkb_zvbc
  60. {
  61. my ($Htable,$H,$TMP0,$TMP1,$TMP2) = ("a0","a1","t0","t1","t2");
  62. my ($V0,$V1,$V2,$V3,$V4,$V5,$V6) = ("v0","v1","v2","v3","v4","v5","v6");
  63. $code .= <<___;
  64. .p2align 3
  65. .globl gcm_init_rv64i_zvkb_zvbc
  66. .type gcm_init_rv64i_zvkb_zvbc,\@function
  67. gcm_init_rv64i_zvkb_zvbc:
  68. # Load/store data in reverse order.
  69. # This is needed as a part of endianness swap.
  70. add $H, $H, 8
  71. li $TMP0, -8
  72. li $TMP1, 63
  73. la $TMP2, Lpolymod
  74. @{[vsetivli__x0_2_e64_m1_tu_mu]} # vsetivli x0, 2, e64, m1, tu, mu
  75. @{[vlse64_v $V1, $H, $TMP0]} # vlse64.v v1, (a1), t0
  76. @{[vle64_v $V2, $TMP2]} # vle64.v v2, (t2)
  77. # Shift one left and get the carry bits.
  78. @{[vsrl_vx $V3, $V1, $TMP1]} # vsrl.vx v3, v1, t1
  79. @{[vsll_vi $V1, $V1, 1]} # vsll.vi v1, v1, 1
  80. # Use the fact that the polynomial degree is no more than 128,
  81. # i.e. only the LSB of the upper half could be set.
  82. # Thanks to this we don't need to do the full reduction here.
  83. # Instead simply subtract the reduction polynomial.
  84. # This idea was taken from x86 ghash implementation in OpenSSL.
  85. @{[vslideup_vi $V4, $V3, 1]} # vslideup.vi v4, v3, 1
  86. @{[vslidedown_vi $V3, $V3, 1]} # vslidedown.vi v3, v3, 1
  87. @{[vmv_v_i $V0, 2]} # vmv.v.i v0, 2
  88. @{[vor_vv_v0t $V1, $V1, $V4]} # vor.vv v1, v1, v4, v0.t
  89. # Need to set the mask to 3, if the carry bit is set.
  90. @{[vmv_v_v $V0, $V3]} # vmv.v.v v0, v3
  91. @{[vmv_v_i $V3, 0]} # vmv.v.i v3, 0
  92. @{[vmerge_vim $V3, $V3, 3]} # vmerge.vim v3, v3, 3, v0
  93. @{[vmv_v_v $V0, $V3]} # vmv.v.v v0, v3
  94. @{[vxor_vv_v0t $V1, $V1, $V2]} # vxor.vv v1, v1, v2, v0.t
  95. @{[vse64_v $V1, $Htable]} # vse64.v v1, (a0)
  96. ret
  97. .size gcm_init_rv64i_zvkb_zvbc,.-gcm_init_rv64i_zvkb_zvbc
  98. ___
  99. }
  100. ################################################################################
  101. # void gcm_gmult_rv64i_zvkb_zvbc(u64 Xi[2], const u128 Htable[16]);
  102. #
  103. # input: Xi: current hash value
  104. # Htable: preprocessed H
  105. # output: Xi: next hash value Xi = (Xi * H mod f)
  106. {
  107. my ($Xi,$Htable,$TMP0,$TMP1,$TMP2,$TMP3,$TMP4) = ("a0","a1","t0","t1","t2","t3","t4");
  108. my ($V0,$V1,$V2,$V3,$V4,$V5,$V6) = ("v0","v1","v2","v3","v4","v5","v6");
  109. $code .= <<___;
  110. .text
  111. .p2align 3
  112. .globl gcm_gmult_rv64i_zvkb_zvbc
  113. .type gcm_gmult_rv64i_zvkb_zvbc,\@function
  114. gcm_gmult_rv64i_zvkb_zvbc:
  115. ld $TMP0, ($Htable)
  116. ld $TMP1, 8($Htable)
  117. li $TMP2, 63
  118. la $TMP3, Lpolymod
  119. ld $TMP3, 8($TMP3)
  120. # Load/store data in reverse order.
  121. # This is needed as a part of endianness swap.
  122. add $Xi, $Xi, 8
  123. li $TMP4, -8
  124. @{[vsetivli__x0_2_e64_m1_tu_mu]} # vsetivli x0, 2, e64, m1, tu, mu
  125. @{[vlse64_v $V5, $Xi, $TMP4]} # vlse64.v v5, (a0), t4
  126. @{[vrev8_v $V5, $V5]} # vrev8.v v5, v5
  127. # Multiplication
  128. # Do two 64x64 multiplications in one go to save some time
  129. # and simplify things.
  130. # A = a1a0 (t1, t0)
  131. # B = b1b0 (v5)
  132. # C = c1c0 (256 bit)
  133. # c1 = a1b1 + (a0b1)h + (a1b0)h
  134. # c0 = a0b0 + (a0b1)l + (a1b0)h
  135. # v1 = (a0b1)l,(a0b0)l
  136. @{[vclmul_vx $V1, $V5, $TMP0]} # vclmul.vx v1, v5, t0
  137. # v3 = (a0b1)h,(a0b0)h
  138. @{[vclmulh_vx $V3, $V5, $TMP0]} # vclmulh.vx v3, v5, t0
  139. # v4 = (a1b1)l,(a1b0)l
  140. @{[vclmul_vx $V4, $V5, $TMP1]} # vclmul.vx v4, v5, t1
  141. # v2 = (a1b1)h,(a1b0)h
  142. @{[vclmulh_vx $V2, $V5, $TMP1]} # vclmulh.vx v2, v5, t1
  143. # Is there a better way to do this?
  144. # Would need to swap the order of elements within a vector register.
  145. @{[vslideup_vi $V5, $V3, 1]} # vslideup.vi v5, v3, 1
  146. @{[vslideup_vi $V6, $V4, 1]} # vslideup.vi v6, v4, 1
  147. @{[vslidedown_vi $V3, $V3, 1]} # vslidedown.vi v3, v3, 1
  148. @{[vslidedown_vi $V4, $V4, 1]} # vslidedown.vi v4, v4, 1
  149. @{[vmv_v_i $V0, 1]} # vmv.v.i v0, 1
  150. # v2 += (a0b1)h
  151. @{[vxor_vv_v0t $V2, $V2, $V3]} # vxor.vv v2, v2, v3, v0.t
  152. # v2 += (a1b1)l
  153. @{[vxor_vv_v0t $V2, $V2, $V4]} # vxor.vv v2, v2, v4, v0.t
  154. @{[vmv_v_i $V0, 2]} # vmv.v.i v0, 2
  155. # v1 += (a0b0)h,0
  156. @{[vxor_vv_v0t $V1, $V1, $V5]} # vxor.vv v1, v1, v5, v0.t
  157. # v1 += (a1b0)l,0
  158. @{[vxor_vv_v0t $V1, $V1, $V6]} # vxor.vv v1, v1, v6, v0.t
  159. # Now the 256bit product should be stored in (v2,v1)
  160. # v1 = (a0b1)l + (a0b0)h + (a1b0)l, (a0b0)l
  161. # v2 = (a1b1)h, (a1b0)h + (a0b1)h + (a1b1)l
  162. # Reduction
  163. # Let C := A*B = c3,c2,c1,c0 = v2[1],v2[0],v1[1],v1[0]
  164. # This is a slight variation of the Gueron's Montgomery reduction.
  165. # The difference being the order of some operations has been changed,
  166. # to make a better use of vclmul(h) instructions.
  167. # First step:
  168. # c1 += (c0 * P)l
  169. # vmv.v.i v0, 2
  170. @{[vslideup_vi_v0t $V3, $V1, 1]} # vslideup.vi v3, v1, 1, v0.t
  171. @{[vclmul_vx_v0t $V3, $V3, $TMP3]} # vclmul.vx v3, v3, t3, v0.t
  172. @{[vxor_vv_v0t $V1, $V1, $V3]} # vxor.vv v1, v1, v3, v0.t
  173. # Second step:
  174. # D = d1,d0 is final result
  175. # We want:
  176. # m1 = c1 + (c1 * P)h
  177. # m0 = (c1 * P)l + (c0 * P)h + c0
  178. # d1 = c3 + m1
  179. # d0 = c2 + m0
  180. #v3 = (c1 * P)l, 0
  181. @{[vclmul_vx_v0t $V3, $V1, $TMP3]} # vclmul.vx v3, v1, t3, v0.t
  182. #v4 = (c1 * P)h, (c0 * P)h
  183. @{[vclmulh_vx $V4, $V1, $TMP3]} # vclmulh.vx v4, v1, t3
  184. @{[vmv_v_i $V0, 1]} # vmv.v.i v0, 1
  185. @{[vslidedown_vi $V3, $V3, 1]} # vslidedown.vi v3, v3, 1
  186. @{[vxor_vv $V1, $V1, $V4]} # vxor.vv v1, v1, v4
  187. @{[vxor_vv_v0t $V1, $V1, $V3]} # vxor.vv v1, v1, v3, v0.t
  188. # XOR in the upper upper part of the product
  189. @{[vxor_vv $V2, $V2, $V1]} # vxor.vv v2, v2, v1
  190. @{[vrev8_v $V2, $V2]} # vrev8.v v2, v2
  191. @{[vsse64_v $V2, $Xi, $TMP4]} # vsse64.v v2, (a0), t4
  192. ret
  193. .size gcm_gmult_rv64i_zvkb_zvbc,.-gcm_gmult_rv64i_zvkb_zvbc
  194. ___
  195. }
  196. ################################################################################
  197. # void gcm_ghash_rv64i_zvkb_zvbc(u64 Xi[2], const u128 Htable[16],
  198. # const u8 *inp, size_t len);
  199. #
  200. # input: Xi: current hash value
  201. # Htable: preprocessed H
  202. # inp: pointer to input data
  203. # len: length of input data in bytes (multiple of block size)
  204. # output: Xi: Xi+1 (next hash value Xi)
  205. {
  206. my ($Xi,$Htable,$inp,$len,$TMP0,$TMP1,$TMP2,$TMP3,$M8,$TMP5,$TMP6) = ("a0","a1","a2","a3","t0","t1","t2","t3","t4","t5","t6");
  207. my ($V0,$V1,$V2,$V3,$V4,$V5,$V6,$Vinp) = ("v0","v1","v2","v3","v4","v5","v6","v7");
  208. $code .= <<___;
  209. .p2align 3
  210. .globl gcm_ghash_rv64i_zvkb_zvbc
  211. .type gcm_ghash_rv64i_zvkb_zvbc,\@function
  212. gcm_ghash_rv64i_zvkb_zvbc:
  213. ld $TMP0, ($Htable)
  214. ld $TMP1, 8($Htable)
  215. li $TMP2, 63
  216. la $TMP3, Lpolymod
  217. ld $TMP3, 8($TMP3)
  218. # Load/store data in reverse order.
  219. # This is needed as a part of endianness swap.
  220. add $Xi, $Xi, 8
  221. add $inp, $inp, 8
  222. li $M8, -8
  223. @{[vsetivli__x0_2_e64_m1_tu_mu]} # vsetivli x0, 2, e64, m1, tu, mu
  224. @{[vlse64_v $V5, $Xi, $M8]} # vlse64.v v5, (a0), t4
  225. Lstep:
  226. # Read input data
  227. @{[vlse64_v $Vinp, $inp, $M8]} # vle64.v v0, (a2)
  228. add $inp, $inp, 16
  229. add $len, $len, -16
  230. # XOR them into Xi
  231. @{[vxor_vv $V5, $V5, $Vinp]} # vxor.vv v0, v0, v1
  232. @{[vrev8_v $V5, $V5]} # vrev8.v v5, v5
  233. # Multiplication
  234. # Do two 64x64 multiplications in one go to save some time
  235. # and simplify things.
  236. # A = a1a0 (t1, t0)
  237. # B = b1b0 (v5)
  238. # C = c1c0 (256 bit)
  239. # c1 = a1b1 + (a0b1)h + (a1b0)h
  240. # c0 = a0b0 + (a0b1)l + (a1b0)h
  241. # v1 = (a0b1)l,(a0b0)l
  242. @{[vclmul_vx $V1, $V5, $TMP0]} # vclmul.vx v1, v5, t0
  243. # v3 = (a0b1)h,(a0b0)h
  244. @{[vclmulh_vx $V3, $V5, $TMP0]} # vclmulh.vx v3, v5, t0
  245. # v4 = (a1b1)l,(a1b0)l
  246. @{[vclmul_vx $V4, $V5, $TMP1]} # vclmul.vx v4, v5, t1
  247. # v2 = (a1b1)h,(a1b0)h
  248. @{[vclmulh_vx $V2, $V5, $TMP1]} # vclmulh.vx v2, v5, t1
  249. # Is there a better way to do this?
  250. # Would need to swap the order of elements within a vector register.
  251. @{[vslideup_vi $V5, $V3, 1]} # vslideup.vi v5, v3, 1
  252. @{[vslideup_vi $V6, $V4, 1]} # vslideup.vi v6, v4, 1
  253. @{[vslidedown_vi $V3, $V3, 1]} # vslidedown.vi v3, v3, 1
  254. @{[vslidedown_vi $V4, $V4, 1]} # vslidedown.vi v4, v4, 1
  255. @{[vmv_v_i $V0, 1]} # vmv.v.i v0, 1
  256. # v2 += (a0b1)h
  257. @{[vxor_vv_v0t $V2, $V2, $V3]} # vxor.vv v2, v2, v3, v0.t
  258. # v2 += (a1b1)l
  259. @{[vxor_vv_v0t $V2, $V2, $V4]} # vxor.vv v2, v2, v4, v0.t
  260. @{[vmv_v_i $V0, 2]} # vmv.v.i v0, 2
  261. # v1 += (a0b0)h,0
  262. @{[vxor_vv_v0t $V1, $V1, $V5]} # vxor.vv v1, v1, v5, v0.t
  263. # v1 += (a1b0)l,0
  264. @{[vxor_vv_v0t $V1, $V1, $V6]} # vxor.vv v1, v1, v6, v0.t
  265. # Now the 256bit product should be stored in (v2,v1)
  266. # v1 = (a0b1)l + (a0b0)h + (a1b0)l, (a0b0)l
  267. # v2 = (a1b1)h, (a1b0)h + (a0b1)h + (a1b1)l
  268. # Reduction
  269. # Let C := A*B = c3,c2,c1,c0 = v2[1],v2[0],v1[1],v1[0]
  270. # This is a slight variation of the Gueron's Montgomery reduction.
  271. # The difference being the order of some operations has been changed,
  272. # to make a better use of vclmul(h) instructions.
  273. # First step:
  274. # c1 += (c0 * P)l
  275. # vmv.v.i v0, 2
  276. @{[vslideup_vi_v0t $V3, $V1, 1]} # vslideup.vi v3, v1, 1, v0.t
  277. @{[vclmul_vx_v0t $V3, $V3, $TMP3]} # vclmul.vx v3, v3, t3, v0.t
  278. @{[vxor_vv_v0t $V1, $V1, $V3]} # vxor.vv v1, v1, v3, v0.t
  279. # Second step:
  280. # D = d1,d0 is final result
  281. # We want:
  282. # m1 = c1 + (c1 * P)h
  283. # m0 = (c1 * P)l + (c0 * P)h + c0
  284. # d1 = c3 + m1
  285. # d0 = c2 + m0
  286. #v3 = (c1 * P)l, 0
  287. @{[vclmul_vx_v0t $V3, $V1, $TMP3]} # vclmul.vx v3, v1, t3, v0.t
  288. #v4 = (c1 * P)h, (c0 * P)h
  289. @{[vclmulh_vx $V4, $V1, $TMP3]} # vclmulh.vx v4, v1, t3
  290. @{[vmv_v_i $V0, 1]} # vmv.v.i v0, 1
  291. @{[vslidedown_vi $V3, $V3, 1]} # vslidedown.vi v3, v3, 1
  292. @{[vxor_vv $V1, $V1, $V4]} # vxor.vv v1, v1, v4
  293. @{[vxor_vv_v0t $V1, $V1, $V3]} # vxor.vv v1, v1, v3, v0.t
  294. # XOR in the upper upper part of the product
  295. @{[vxor_vv $V2, $V2, $V1]} # vxor.vv v2, v2, v1
  296. @{[vrev8_v $V5, $V2]} # vrev8.v v2, v2
  297. bnez $len, Lstep
  298. @{[vsse64_v $V5, $Xi, $M8]} # vsse64.v v2, (a0), t4
  299. ret
  300. .size gcm_ghash_rv64i_zvkb_zvbc,.-gcm_ghash_rv64i_zvkb_zvbc
  301. ___
  302. }
  303. $code .= <<___;
  304. .p2align 4
  305. Lpolymod:
  306. .dword 0x0000000000000001
  307. .dword 0xc200000000000000
  308. .size Lpolymod,.-Lpolymod
  309. ___
  310. print $code;
  311. close STDOUT or die "error closing STDOUT: $!";