chacha-loongarch64.pl 33 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028102910301031103210331034103510361037103810391040104110421043104410451046104710481049105010511052105310541055105610571058105910601061106210631064106510661067106810691070107110721073107410751076107710781079108010811082108310841085108610871088108910901091109210931094109510961097109810991100110111021103110411051106110711081109111011111112111311141115111611171118111911201121112211231124112511261127112811291130113111321133113411351136113711381139114011411142114311441145114611471148114911501151115211531154115511561157115811591160116111621163116411651166116711681169117011711172117311741175117611771178117911801181118211831184118511861187118811891190119111921193119411951196119711981199120012011202120312041205120612071208120912101211121212131214121512161217121812191220122112221223122412251226122712281229123012311232123312341235123612371238123912401241124212431244124512461247124812491250125112521253125412551256125712581259126012611262126312641265126612671268126912701271127212731274127512761277127812791280128112821283128412851286128712881289129012911292129312941295129612971298129913001301130213031304130513061307130813091310131113121313131413151316131713181319132013211322132313241325132613271328132913301331133213331334133513361337133813391340134113421343134413451346134713481349135013511352135313541355135613571358135913601361136213631364136513661367136813691370137113721373137413751376137713781379138013811382138313841385138613871388138913901391139213931394139513961397139813991400140114021403140414051406140714081409141014111412141314141415141614171418141914201421142214231424142514261427142814291430143114321433143414351436143714381439
  1. #! /usr/bin/env perl
  2. # Author: Min Zhou <[email protected]>
  3. # Copyright 2023-2025 The OpenSSL Project Authors. All Rights Reserved.
  4. #
  5. # Licensed under the Apache License 2.0 (the "License"). You may not use
  6. # this file except in compliance with the License. You can obtain a copy
  7. # in the file LICENSE in the source distribution or at
  8. # https://www.openssl.org/source/license.html
  9. use strict;
  10. my $code;
  11. # Here is the scalar register layout for LoongArch.
  12. my ($zero,$ra,$tp,$sp,$fp)=map("\$r$_",(0..3,22));
  13. my ($a0,$a1,$a2,$a3,$a4,$a5,$a6,$a7)=map("\$r$_",(4..11));
  14. my ($t0,$t1,$t2,$t3,$t4,$t5,$t6,$t7,$t8,$x)=map("\$r$_",(12..21));
  15. my ($s0,$s1,$s2,$s3,$s4,$s5,$s6,$s7,$s8)=map("\$r$_",(23..31));
  16. # The saved floating-point registers in the LP64D ABI. In LoongArch
  17. # with vector extension, the low 64 bits of a vector register alias with
  18. # the corresponding FPR. So we must save and restore the corresponding
  19. # FPR if we'll write into a vector register. The ABI only requires
  20. # saving and restoring the FPR (i.e. 64 bits of the corresponding vector
  21. # register), not the entire vector register.
  22. my ($fs0,$fs1,$fs2,$fs3,$fs4,$fs5,$fs6,$fs7)=map("\$f$_",(24..31));
  23. # Here is the 128-bit vector register layout for LSX extension.
  24. my ($vr0,$vr1,$vr2,$vr3,$vr4,$vr5,$vr6,$vr7,$vr8,$vr9,$vr10,
  25. $vr11,$vr12,$vr13,$vr14,$vr15,$vr16,$vr17,$vr18,$vr19,
  26. $vr20,$vr21,$vr22,$vr23,$vr24,$vr25,$vr26,$vr27,$vr28,
  27. $vr29,$vr30,$vr31)=map("\$vr$_",(0..31));
  28. # Here is the 256-bit vector register layout for LASX extension.
  29. my ($xr0,$xr1,$xr2,$xr3,$xr4,$xr5,$xr6,$xr7,$xr8,$xr9,$xr10,
  30. $xr11,$xr12,$xr13,$xr14,$xr15,$xr16,$xr17,$xr18,$xr19,
  31. $xr20,$xr21,$xr22,$xr23,$xr24,$xr25,$xr26,$xr27,$xr28,
  32. $xr29,$xr30,$xr31)=map("\$xr$_",(0..31));
  33. # $output is the last argument if it looks like a file (it has an extension)
  34. my $output;
  35. $output = $#ARGV >= 0 && $ARGV[$#ARGV] =~ m|\.\w+$| ? pop : undef;
  36. open STDOUT,">$output";
  37. # Input parameter block
  38. my ($out, $inp, $len, $key, $counter) = ($a0, $a1, $a2, $a3, $a4);
  39. $code .= <<EOF;
  40. #include "loongarch_arch.h"
  41. .text
  42. .extern OPENSSL_loongarch_hwcap_P
  43. .align 6
  44. .Lsigma:
  45. .ascii "expand 32-byte k"
  46. .Linc8x:
  47. .long 0,1,2,3,4,5,6,7
  48. .Linc4x:
  49. .long 0,1,2,3
  50. .globl ChaCha20_ctr32
  51. .type ChaCha20_ctr32 function
  52. .align 6
  53. ChaCha20_ctr32:
  54. # $a0 = arg #1 (out pointer)
  55. # $a1 = arg #2 (inp pointer)
  56. # $a2 = arg #3 (len)
  57. # $a3 = arg #4 (key array)
  58. # $a4 = arg #5 (counter array)
  59. beqz $len,.Lno_data
  60. ori $t3,$zero,64
  61. la.global $t0,OPENSSL_loongarch_hwcap_P
  62. ld.w $t0,$t0,0
  63. bleu $len,$t3,.LChaCha20_1x # goto 1x when len <= 64
  64. andi $t0,$t0,LOONGARCH_HWCAP_LASX | LOONGARCH_HWCAP_LSX
  65. beqz $t0,.LChaCha20_1x
  66. addi.d $sp,$sp,-64
  67. fst.d $fs0,$sp,0
  68. fst.d $fs1,$sp,8
  69. fst.d $fs2,$sp,16
  70. fst.d $fs3,$sp,24
  71. fst.d $fs4,$sp,32
  72. fst.d $fs5,$sp,40
  73. fst.d $fs6,$sp,48
  74. fst.d $fs7,$sp,56
  75. andi $t1,$t0,LOONGARCH_HWCAP_LASX
  76. bnez $t1,.LChaCha20_8x
  77. b .LChaCha20_4x
  78. EOF
  79. ########################################################################
  80. # Scalar code path that handles all lengths.
  81. {
  82. # Load the initial states in array @x[*] and update directly
  83. my @x = ($t0, $t1, $t2, $t3, $t4, $t5, $t6, $t7,
  84. $s0, $s1, $s2, $s3, $s4, $s5, $s6, $s7);
  85. sub ROUND {
  86. my ($a0,$b0,$c0,$d0) = @_;
  87. my ($a1,$b1,$c1,$d1) = map(($_&~3)+(($_+1)&3),($a0,$b0,$c0,$d0));
  88. my ($a2,$b2,$c2,$d2) = map(($_&~3)+(($_+1)&3),($a1,$b1,$c1,$d1));
  89. my ($a3,$b3,$c3,$d3) = map(($_&~3)+(($_+1)&3),($a2,$b2,$c2,$d2));
  90. $code .= <<EOF;
  91. add.w @x[$a0],@x[$a0],@x[$b0]
  92. xor @x[$d0],@x[$d0],@x[$a0]
  93. rotri.w @x[$d0],@x[$d0],16 # rotate left 16 bits
  94. add.w @x[$a1],@x[$a1],@x[$b1]
  95. xor @x[$d1],@x[$d1],@x[$a1]
  96. rotri.w @x[$d1],@x[$d1],16
  97. add.w @x[$c0],@x[$c0],@x[$d0]
  98. xor @x[$b0],@x[$b0],@x[$c0]
  99. rotri.w @x[$b0],@x[$b0],20 # rotate left 12 bits
  100. add.w @x[$c1],@x[$c1],@x[$d1]
  101. xor @x[$b1],@x[$b1],@x[$c1]
  102. rotri.w @x[$b1],@x[$b1],20
  103. add.w @x[$a0],@x[$a0],@x[$b0]
  104. xor @x[$d0],@x[$d0],@x[$a0]
  105. rotri.w @x[$d0],@x[$d0],24 # rotate left 8 bits
  106. add.w @x[$a1],@x[$a1],@x[$b1]
  107. xor @x[$d1],@x[$d1],@x[$a1]
  108. rotri.w @x[$d1],@x[$d1],24
  109. add.w @x[$c0],@x[$c0],@x[$d0]
  110. xor @x[$b0],@x[$b0],@x[$c0]
  111. rotri.w @x[$b0],@x[$b0],25 # rotate left 7 bits
  112. add.w @x[$c1],@x[$c1],@x[$d1]
  113. xor @x[$b1],@x[$b1],@x[$c1]
  114. rotri.w @x[$b1],@x[$b1],25
  115. add.w @x[$a2],@x[$a2],@x[$b2]
  116. xor @x[$d2],@x[$d2],@x[$a2]
  117. rotri.w @x[$d2],@x[$d2],16
  118. add.w @x[$a3],@x[$a3],@x[$b3]
  119. xor @x[$d3],@x[$d3],@x[$a3]
  120. rotri.w @x[$d3],@x[$d3],16
  121. add.w @x[$c2],@x[$c2],@x[$d2]
  122. xor @x[$b2],@x[$b2],@x[$c2]
  123. rotri.w @x[$b2],@x[$b2],20
  124. add.w @x[$c3],@x[$c3],@x[$d3]
  125. xor @x[$b3],@x[$b3],@x[$c3]
  126. rotri.w @x[$b3],@x[$b3],20
  127. add.w @x[$a2],@x[$a2],@x[$b2]
  128. xor @x[$d2],@x[$d2],@x[$a2]
  129. rotri.w @x[$d2],@x[$d2],24
  130. add.w @x[$a3],@x[$a3],@x[$b3]
  131. xor @x[$d3],@x[$d3],@x[$a3]
  132. rotri.w @x[$d3],@x[$d3],24
  133. add.w @x[$c2],@x[$c2],@x[$d2]
  134. xor @x[$b2],@x[$b2],@x[$c2]
  135. rotri.w @x[$b2],@x[$b2],25
  136. add.w @x[$c3],@x[$c3],@x[$d3]
  137. xor @x[$b3],@x[$b3],@x[$c3]
  138. rotri.w @x[$b3],@x[$b3],25
  139. EOF
  140. }
  141. $code .= <<EOF;
  142. .align 6
  143. .LChaCha20_1x:
  144. addi.d $sp,$sp,-256
  145. st.d $s0,$sp,0
  146. st.d $s1,$sp,8
  147. st.d $s2,$sp,16
  148. st.d $s3,$sp,24
  149. st.d $s4,$sp,32
  150. st.d $s5,$sp,40
  151. st.d $s6,$sp,48
  152. st.d $s7,$sp,56
  153. st.d $s8,$sp,64
  154. # Save the initial block counter in $s8
  155. ld.w $s8,$counter,0
  156. b .Loop_outer_1x
  157. .align 5
  158. .Loop_outer_1x:
  159. # Load constants
  160. la.local $t8,.Lsigma
  161. ld.w @x[0],$t8,0 # 'expa'
  162. ld.w @x[1],$t8,4 # 'nd 3'
  163. ld.w @x[2],$t8,8 # '2-by'
  164. ld.w @x[3],$t8,12 # 'te k'
  165. # Load key
  166. ld.w @x[4],$key,4*0
  167. ld.w @x[5],$key,4*1
  168. ld.w @x[6],$key,4*2
  169. ld.w @x[7],$key,4*3
  170. ld.w @x[8],$key,4*4
  171. ld.w @x[9],$key,4*5
  172. ld.w @x[10],$key,4*6
  173. ld.w @x[11],$key,4*7
  174. # Load block counter
  175. move @x[12],$s8
  176. # Load nonce
  177. ld.w @x[13],$counter,4*1
  178. ld.w @x[14],$counter,4*2
  179. ld.w @x[15],$counter,4*3
  180. # Update states in \@x[*] for 20 rounds
  181. ori $t8,$zero,10
  182. b .Loop_1x
  183. .align 5
  184. .Loop_1x:
  185. EOF
  186. &ROUND (0, 4, 8, 12);
  187. &ROUND (0, 5, 10, 15);
  188. $code .= <<EOF;
  189. addi.w $t8,$t8,-1
  190. bnez $t8,.Loop_1x
  191. # Get the final states by adding the initial states
  192. la.local $t8,.Lsigma
  193. ld.w $a7,$t8,4*0
  194. ld.w $a6,$t8,4*1
  195. ld.w $a5,$t8,4*2
  196. add.w @x[0],@x[0],$a7
  197. add.w @x[1],@x[1],$a6
  198. add.w @x[2],@x[2],$a5
  199. ld.w $a7,$t8,4*3
  200. add.w @x[3],@x[3],$a7
  201. ld.w $t8,$key,4*0
  202. ld.w $a7,$key,4*1
  203. ld.w $a6,$key,4*2
  204. ld.w $a5,$key,4*3
  205. add.w @x[4],@x[4],$t8
  206. add.w @x[5],@x[5],$a7
  207. add.w @x[6],@x[6],$a6
  208. add.w @x[7],@x[7],$a5
  209. ld.w $t8,$key,4*4
  210. ld.w $a7,$key,4*5
  211. ld.w $a6,$key,4*6
  212. ld.w $a5,$key,4*7
  213. add.w @x[8],@x[8],$t8
  214. add.w @x[9],@x[9],$a7
  215. add.w @x[10],@x[10],$a6
  216. add.w @x[11],@x[11],$a5
  217. add.w @x[12],@x[12],$s8
  218. ld.w $t8,$counter,4*1
  219. ld.w $a7,$counter,4*2
  220. ld.w $a6,$counter,4*3
  221. add.w @x[13],@x[13],$t8
  222. add.w @x[14],@x[14],$a7
  223. add.w @x[15],@x[15],$a6
  224. ori $t8,$zero,64
  225. bltu $len,$t8,.Ltail_1x
  226. # Get the encrypted message by xor states with plaintext
  227. ld.w $t8,$inp,4*0
  228. ld.w $a7,$inp,4*1
  229. ld.w $a6,$inp,4*2
  230. ld.w $a5,$inp,4*3
  231. xor $t8,$t8,@x[0]
  232. xor $a7,$a7,@x[1]
  233. xor $a6,$a6,@x[2]
  234. xor $a5,$a5,@x[3]
  235. st.w $t8,$out,4*0
  236. st.w $a7,$out,4*1
  237. st.w $a6,$out,4*2
  238. st.w $a5,$out,4*3
  239. ld.w $t8,$inp,4*4
  240. ld.w $a7,$inp,4*5
  241. ld.w $a6,$inp,4*6
  242. ld.w $a5,$inp,4*7
  243. xor $t8,$t8,@x[4]
  244. xor $a7,$a7,@x[5]
  245. xor $a6,$a6,@x[6]
  246. xor $a5,$a5,@x[7]
  247. st.w $t8,$out,4*4
  248. st.w $a7,$out,4*5
  249. st.w $a6,$out,4*6
  250. st.w $a5,$out,4*7
  251. ld.w $t8,$inp,4*8
  252. ld.w $a7,$inp,4*9
  253. ld.w $a6,$inp,4*10
  254. ld.w $a5,$inp,4*11
  255. xor $t8,$t8,@x[8]
  256. xor $a7,$a7,@x[9]
  257. xor $a6,$a6,@x[10]
  258. xor $a5,$a5,@x[11]
  259. st.w $t8,$out,4*8
  260. st.w $a7,$out,4*9
  261. st.w $a6,$out,4*10
  262. st.w $a5,$out,4*11
  263. ld.w $t8,$inp,4*12
  264. ld.w $a7,$inp,4*13
  265. ld.w $a6,$inp,4*14
  266. ld.w $a5,$inp,4*15
  267. xor $t8,$t8,@x[12]
  268. xor $a7,$a7,@x[13]
  269. xor $a6,$a6,@x[14]
  270. xor $a5,$a5,@x[15]
  271. st.w $t8,$out,4*12
  272. st.w $a7,$out,4*13
  273. st.w $a6,$out,4*14
  274. st.w $a5,$out,4*15
  275. addi.d $len,$len,-64
  276. beqz $len,.Ldone_1x
  277. addi.d $inp,$inp,64
  278. addi.d $out,$out,64
  279. addi.w $s8,$s8,1
  280. b .Loop_outer_1x
  281. .align 4
  282. .Ltail_1x:
  283. # Handle the tail for 1x (1 <= tail_len <= 63)
  284. addi.d $a7,$sp,72
  285. st.w @x[0],$a7,4*0
  286. st.w @x[1],$a7,4*1
  287. st.w @x[2],$a7,4*2
  288. st.w @x[3],$a7,4*3
  289. st.w @x[4],$a7,4*4
  290. st.w @x[5],$a7,4*5
  291. st.w @x[6],$a7,4*6
  292. st.w @x[7],$a7,4*7
  293. st.w @x[8],$a7,4*8
  294. st.w @x[9],$a7,4*9
  295. st.w @x[10],$a7,4*10
  296. st.w @x[11],$a7,4*11
  297. st.w @x[12],$a7,4*12
  298. st.w @x[13],$a7,4*13
  299. st.w @x[14],$a7,4*14
  300. st.w @x[15],$a7,4*15
  301. move $t8,$zero
  302. .Loop_tail_1x:
  303. # Xor input with states byte by byte
  304. ldx.bu $a6,$inp,$t8
  305. ldx.bu $a5,$a7,$t8
  306. xor $a6,$a6,$a5
  307. stx.b $a6,$out,$t8
  308. addi.w $t8,$t8,1
  309. addi.d $len,$len,-1
  310. bnez $len,.Loop_tail_1x
  311. b .Ldone_1x
  312. .Ldone_1x:
  313. ld.d $s0,$sp,0
  314. ld.d $s1,$sp,8
  315. ld.d $s2,$sp,16
  316. ld.d $s3,$sp,24
  317. ld.d $s4,$sp,32
  318. ld.d $s5,$sp,40
  319. ld.d $s6,$sp,48
  320. ld.d $s7,$sp,56
  321. ld.d $s8,$sp,64
  322. addi.d $sp,$sp,256
  323. b .Lend
  324. EOF
  325. }
  326. ########################################################################
  327. # 128-bit LSX code path that handles all lengths.
  328. {
  329. # Load the initial states in array @x[*] and update directly.
  330. my @x = ($vr0, $vr1, $vr2, $vr3, $vr4, $vr5, $vr6, $vr7,
  331. $vr8, $vr9, $vr10, $vr11, $vr12, $vr13, $vr14, $vr15);
  332. # Save the initial states in array @y[*]
  333. my @y = ($vr16, $vr17, $vr18, $vr19, $vr20, $vr21, $vr22, $vr23,
  334. $vr24, $vr25, $vr26, $vr27, $vr28, $vr29, $vr30, $vr31);
  335. sub ROUND_4x {
  336. my ($a0,$b0,$c0,$d0) = @_;
  337. my ($a1,$b1,$c1,$d1) = map(($_&~3)+(($_+1)&3),($a0,$b0,$c0,$d0));
  338. my ($a2,$b2,$c2,$d2) = map(($_&~3)+(($_+1)&3),($a1,$b1,$c1,$d1));
  339. my ($a3,$b3,$c3,$d3) = map(($_&~3)+(($_+1)&3),($a2,$b2,$c2,$d2));
  340. $code .= <<EOF;
  341. vadd.w @x[$a0],@x[$a0],@x[$b0]
  342. vxor.v @x[$d0],@x[$d0],@x[$a0]
  343. vrotri.w @x[$d0],@x[$d0],16 # rotate left 16 bits
  344. vadd.w @x[$a1],@x[$a1],@x[$b1]
  345. vxor.v @x[$d1],@x[$d1],@x[$a1]
  346. vrotri.w @x[$d1],@x[$d1],16
  347. vadd.w @x[$c0],@x[$c0],@x[$d0]
  348. vxor.v @x[$b0],@x[$b0],@x[$c0]
  349. vrotri.w @x[$b0],@x[$b0],20 # rotate left 12 bits
  350. vadd.w @x[$c1],@x[$c1],@x[$d1]
  351. vxor.v @x[$b1],@x[$b1],@x[$c1]
  352. vrotri.w @x[$b1],@x[$b1],20
  353. vadd.w @x[$a0],@x[$a0],@x[$b0]
  354. vxor.v @x[$d0],@x[$d0],@x[$a0]
  355. vrotri.w @x[$d0],@x[$d0],24 # rotate left 8 bits
  356. vadd.w @x[$a1],@x[$a1],@x[$b1]
  357. vxor.v @x[$d1],@x[$d1],@x[$a1]
  358. vrotri.w @x[$d1],@x[$d1],24
  359. vadd.w @x[$c0],@x[$c0],@x[$d0]
  360. vxor.v @x[$b0],@x[$b0],@x[$c0]
  361. vrotri.w @x[$b0],@x[$b0],25 # rotate left 7 bits
  362. vadd.w @x[$c1],@x[$c1],@x[$d1]
  363. vxor.v @x[$b1],@x[$b1],@x[$c1]
  364. vrotri.w @x[$b1],@x[$b1],25
  365. vadd.w @x[$a2],@x[$a2],@x[$b2]
  366. vxor.v @x[$d2],@x[$d2],@x[$a2]
  367. vrotri.w @x[$d2],@x[$d2],16
  368. vadd.w @x[$a3],@x[$a3],@x[$b3]
  369. vxor.v @x[$d3],@x[$d3],@x[$a3]
  370. vrotri.w @x[$d3],@x[$d3],16
  371. vadd.w @x[$c2],@x[$c2],@x[$d2]
  372. vxor.v @x[$b2],@x[$b2],@x[$c2]
  373. vrotri.w @x[$b2],@x[$b2],20
  374. vadd.w @x[$c3],@x[$c3],@x[$d3]
  375. vxor.v @x[$b3],@x[$b3],@x[$c3]
  376. vrotri.w @x[$b3],@x[$b3],20
  377. vadd.w @x[$a2],@x[$a2],@x[$b2]
  378. vxor.v @x[$d2],@x[$d2],@x[$a2]
  379. vrotri.w @x[$d2],@x[$d2],24
  380. vadd.w @x[$a3],@x[$a3],@x[$b3]
  381. vxor.v @x[$d3],@x[$d3],@x[$a3]
  382. vrotri.w @x[$d3],@x[$d3],24
  383. vadd.w @x[$c2],@x[$c2],@x[$d2]
  384. vxor.v @x[$b2],@x[$b2],@x[$c2]
  385. vrotri.w @x[$b2],@x[$b2],25
  386. vadd.w @x[$c3],@x[$c3],@x[$d3]
  387. vxor.v @x[$b3],@x[$b3],@x[$c3]
  388. vrotri.w @x[$b3],@x[$b3],25
  389. EOF
  390. }
  391. $code .= <<EOF;
  392. .align 6
  393. .LChaCha20_4x:
  394. addi.d $sp,$sp,-128
  395. # Save the initial block counter in $t4
  396. ld.w $t4,$counter,0
  397. b .Loop_outer_4x
  398. .align 5
  399. .Loop_outer_4x:
  400. # Load constant
  401. la.local $t8,.Lsigma
  402. vldrepl.w @x[0],$t8,4*0 # 'expa'
  403. vldrepl.w @x[1],$t8,4*1 # 'nd 3'
  404. vldrepl.w @x[2],$t8,4*2 # '2-by'
  405. vldrepl.w @x[3],$t8,4*3 # 'te k'
  406. # Load key
  407. vldrepl.w @x[4],$key,4*0
  408. vldrepl.w @x[5],$key,4*1
  409. vldrepl.w @x[6],$key,4*2
  410. vldrepl.w @x[7],$key,4*3
  411. vldrepl.w @x[8],$key,4*4
  412. vldrepl.w @x[9],$key,4*5
  413. vldrepl.w @x[10],$key,4*6
  414. vldrepl.w @x[11],$key,4*7
  415. # Load block counter
  416. vreplgr2vr.w @x[12],$t4
  417. # Load nonce
  418. vldrepl.w @x[13],$counter,4*1
  419. vldrepl.w @x[14],$counter,4*2
  420. vldrepl.w @x[15],$counter,4*3
  421. # Get the correct block counter for each block
  422. la.local $t8,.Linc4x
  423. vld @y[0],$t8,0
  424. vadd.w @x[12],@x[12],@y[0]
  425. # Copy the initial states from \@x[*] to \@y[*]
  426. vori.b @y[0],@x[0],0
  427. vori.b @y[1],@x[1],0
  428. vori.b @y[2],@x[2],0
  429. vori.b @y[3],@x[3],0
  430. vori.b @y[4],@x[4],0
  431. vori.b @y[5],@x[5],0
  432. vori.b @y[6],@x[6],0
  433. vori.b @y[7],@x[7],0
  434. vori.b @y[8],@x[8],0
  435. vori.b @y[9],@x[9],0
  436. vori.b @y[10],@x[10],0
  437. vori.b @y[11],@x[11],0
  438. vori.b @y[12],@x[12],0
  439. vori.b @y[13],@x[13],0
  440. vori.b @y[14],@x[14],0
  441. vori.b @y[15],@x[15],0
  442. # Update states in \@x[*] for 20 rounds
  443. ori $t8,$zero,10
  444. b .Loop_4x
  445. .align 5
  446. .Loop_4x:
  447. EOF
  448. &ROUND_4x (0, 4, 8, 12);
  449. &ROUND_4x (0, 5, 10, 15);
  450. $code .= <<EOF;
  451. addi.w $t8,$t8,-1
  452. bnez $t8,.Loop_4x
  453. # Get the final states by adding the initial states
  454. vadd.w @x[0],@x[0],@y[0]
  455. vadd.w @x[1],@x[1],@y[1]
  456. vadd.w @x[2],@x[2],@y[2]
  457. vadd.w @x[3],@x[3],@y[3]
  458. vadd.w @x[4],@x[4],@y[4]
  459. vadd.w @x[5],@x[5],@y[5]
  460. vadd.w @x[6],@x[6],@y[6]
  461. vadd.w @x[7],@x[7],@y[7]
  462. vadd.w @x[8],@x[8],@y[8]
  463. vadd.w @x[9],@x[9],@y[9]
  464. vadd.w @x[10],@x[10],@y[10]
  465. vadd.w @x[11],@x[11],@y[11]
  466. vadd.w @x[12],@x[12],@y[12]
  467. vadd.w @x[13],@x[13],@y[13]
  468. vadd.w @x[14],@x[14],@y[14]
  469. vadd.w @x[15],@x[15],@y[15]
  470. # Get the transpose of \@x[*] and save them in \@x[*]
  471. vilvl.w @y[0],@x[1],@x[0]
  472. vilvh.w @y[1],@x[1],@x[0]
  473. vilvl.w @y[2],@x[3],@x[2]
  474. vilvh.w @y[3],@x[3],@x[2]
  475. vilvl.w @y[4],@x[5],@x[4]
  476. vilvh.w @y[5],@x[5],@x[4]
  477. vilvl.w @y[6],@x[7],@x[6]
  478. vilvh.w @y[7],@x[7],@x[6]
  479. vilvl.w @y[8],@x[9],@x[8]
  480. vilvh.w @y[9],@x[9],@x[8]
  481. vilvl.w @y[10],@x[11],@x[10]
  482. vilvh.w @y[11],@x[11],@x[10]
  483. vilvl.w @y[12],@x[13],@x[12]
  484. vilvh.w @y[13],@x[13],@x[12]
  485. vilvl.w @y[14],@x[15],@x[14]
  486. vilvh.w @y[15],@x[15],@x[14]
  487. vilvl.d @x[0],@y[2],@y[0]
  488. vilvh.d @x[1],@y[2],@y[0]
  489. vilvl.d @x[2],@y[3],@y[1]
  490. vilvh.d @x[3],@y[3],@y[1]
  491. vilvl.d @x[4],@y[6],@y[4]
  492. vilvh.d @x[5],@y[6],@y[4]
  493. vilvl.d @x[6],@y[7],@y[5]
  494. vilvh.d @x[7],@y[7],@y[5]
  495. vilvl.d @x[8],@y[10],@y[8]
  496. vilvh.d @x[9],@y[10],@y[8]
  497. vilvl.d @x[10],@y[11],@y[9]
  498. vilvh.d @x[11],@y[11],@y[9]
  499. vilvl.d @x[12],@y[14],@y[12]
  500. vilvh.d @x[13],@y[14],@y[12]
  501. vilvl.d @x[14],@y[15],@y[13]
  502. vilvh.d @x[15],@y[15],@y[13]
  503. EOF
  504. # Adjust the order of elements in @x[*] for ease of use.
  505. @x = (@x[0],@x[4],@x[8],@x[12],@x[1],@x[5],@x[9],@x[13],
  506. @x[2],@x[6],@x[10],@x[14],@x[3],@x[7],@x[11],@x[15]);
  507. $code .= <<EOF;
  508. ori $t8,$zero,64*4
  509. bltu $len,$t8,.Ltail_4x
  510. # Get the encrypted message by xor states with plaintext
  511. vld @y[0],$inp,16*0
  512. vld @y[1],$inp,16*1
  513. vld @y[2],$inp,16*2
  514. vld @y[3],$inp,16*3
  515. vxor.v @y[0],@y[0],@x[0]
  516. vxor.v @y[1],@y[1],@x[1]
  517. vxor.v @y[2],@y[2],@x[2]
  518. vxor.v @y[3],@y[3],@x[3]
  519. vst @y[0],$out,16*0
  520. vst @y[1],$out,16*1
  521. vst @y[2],$out,16*2
  522. vst @y[3],$out,16*3
  523. vld @y[0],$inp,16*4
  524. vld @y[1],$inp,16*5
  525. vld @y[2],$inp,16*6
  526. vld @y[3],$inp,16*7
  527. vxor.v @y[0],@y[0],@x[4]
  528. vxor.v @y[1],@y[1],@x[5]
  529. vxor.v @y[2],@y[2],@x[6]
  530. vxor.v @y[3],@y[3],@x[7]
  531. vst @y[0],$out,16*4
  532. vst @y[1],$out,16*5
  533. vst @y[2],$out,16*6
  534. vst @y[3],$out,16*7
  535. vld @y[0],$inp,16*8
  536. vld @y[1],$inp,16*9
  537. vld @y[2],$inp,16*10
  538. vld @y[3],$inp,16*11
  539. vxor.v @y[0],@y[0],@x[8]
  540. vxor.v @y[1],@y[1],@x[9]
  541. vxor.v @y[2],@y[2],@x[10]
  542. vxor.v @y[3],@y[3],@x[11]
  543. vst @y[0],$out,16*8
  544. vst @y[1],$out,16*9
  545. vst @y[2],$out,16*10
  546. vst @y[3],$out,16*11
  547. vld @y[0],$inp,16*12
  548. vld @y[1],$inp,16*13
  549. vld @y[2],$inp,16*14
  550. vld @y[3],$inp,16*15
  551. vxor.v @y[0],@y[0],@x[12]
  552. vxor.v @y[1],@y[1],@x[13]
  553. vxor.v @y[2],@y[2],@x[14]
  554. vxor.v @y[3],@y[3],@x[15]
  555. vst @y[0],$out,16*12
  556. vst @y[1],$out,16*13
  557. vst @y[2],$out,16*14
  558. vst @y[3],$out,16*15
  559. addi.d $len,$len,-64*4
  560. beqz $len,.Ldone_4x
  561. addi.d $inp,$inp,64*4
  562. addi.d $out,$out,64*4
  563. addi.w $t4,$t4,4
  564. b .Loop_outer_4x
  565. .Ltail_4x:
  566. # Handle the tail for 4x (1 <= tail_len <= 255)
  567. ori $t8,$zero,192
  568. bgeu $len,$t8,.L192_or_more4x
  569. ori $t8,$zero,128
  570. bgeu $len,$t8,.L128_or_more4x
  571. ori $t8,$zero,64
  572. bgeu $len,$t8,.L64_or_more4x
  573. vst @x[0],$sp,16*0
  574. vst @x[1],$sp,16*1
  575. vst @x[2],$sp,16*2
  576. vst @x[3],$sp,16*3
  577. move $t8,$zero
  578. b .Loop_tail_4x
  579. .align 5
  580. .L64_or_more4x:
  581. vld @y[0],$inp,16*0
  582. vld @y[1],$inp,16*1
  583. vld @y[2],$inp,16*2
  584. vld @y[3],$inp,16*3
  585. vxor.v @y[0],@y[0],@x[0]
  586. vxor.v @y[1],@y[1],@x[1]
  587. vxor.v @y[2],@y[2],@x[2]
  588. vxor.v @y[3],@y[3],@x[3]
  589. vst @y[0],$out,16*0
  590. vst @y[1],$out,16*1
  591. vst @y[2],$out,16*2
  592. vst @y[3],$out,16*3
  593. addi.d $len,$len,-64
  594. beqz $len,.Ldone_4x
  595. addi.d $inp,$inp,64
  596. addi.d $out,$out,64
  597. vst @x[4],$sp,16*0
  598. vst @x[5],$sp,16*1
  599. vst @x[6],$sp,16*2
  600. vst @x[7],$sp,16*3
  601. move $t8,$zero
  602. b .Loop_tail_4x
  603. .align 5
  604. .L128_or_more4x:
  605. vld @y[0],$inp,16*0
  606. vld @y[1],$inp,16*1
  607. vld @y[2],$inp,16*2
  608. vld @y[3],$inp,16*3
  609. vxor.v @y[0],@y[0],@x[0]
  610. vxor.v @y[1],@y[1],@x[1]
  611. vxor.v @y[2],@y[2],@x[2]
  612. vxor.v @y[3],@y[3],@x[3]
  613. vst @y[0],$out,16*0
  614. vst @y[1],$out,16*1
  615. vst @y[2],$out,16*2
  616. vst @y[3],$out,16*3
  617. vld @y[0],$inp,16*4
  618. vld @y[1],$inp,16*5
  619. vld @y[2],$inp,16*6
  620. vld @y[3],$inp,16*7
  621. vxor.v @y[0],@y[0],@x[4]
  622. vxor.v @y[1],@y[1],@x[5]
  623. vxor.v @y[2],@y[2],@x[6]
  624. vxor.v @y[3],@y[3],@x[7]
  625. vst @y[0],$out,16*4
  626. vst @y[1],$out,16*5
  627. vst @y[2],$out,16*6
  628. vst @y[3],$out,16*7
  629. addi.d $len,$len,-128
  630. beqz $len,.Ldone_4x
  631. addi.d $inp,$inp,128
  632. addi.d $out,$out,128
  633. vst @x[8],$sp,16*0
  634. vst @x[9],$sp,16*1
  635. vst @x[10],$sp,16*2
  636. vst @x[11],$sp,16*3
  637. move $t8,$zero
  638. b .Loop_tail_4x
  639. .align 5
  640. .L192_or_more4x:
  641. vld @y[0],$inp,16*0
  642. vld @y[1],$inp,16*1
  643. vld @y[2],$inp,16*2
  644. vld @y[3],$inp,16*3
  645. vxor.v @y[0],@y[0],@x[0]
  646. vxor.v @y[1],@y[1],@x[1]
  647. vxor.v @y[2],@y[2],@x[2]
  648. vxor.v @y[3],@y[3],@x[3]
  649. vst @y[0],$out,16*0
  650. vst @y[1],$out,16*1
  651. vst @y[2],$out,16*2
  652. vst @y[3],$out,16*3
  653. vld @y[0],$inp,16*4
  654. vld @y[1],$inp,16*5
  655. vld @y[2],$inp,16*6
  656. vld @y[3],$inp,16*7
  657. vxor.v @y[0],@y[0],@x[4]
  658. vxor.v @y[1],@y[1],@x[5]
  659. vxor.v @y[2],@y[2],@x[6]
  660. vxor.v @y[3],@y[3],@x[7]
  661. vst @y[0],$out,16*4
  662. vst @y[1],$out,16*5
  663. vst @y[2],$out,16*6
  664. vst @y[3],$out,16*7
  665. vld @y[0],$inp,16*8
  666. vld @y[1],$inp,16*9
  667. vld @y[2],$inp,16*10
  668. vld @y[3],$inp,16*11
  669. vxor.v @y[0],@y[0],@x[8]
  670. vxor.v @y[1],@y[1],@x[9]
  671. vxor.v @y[2],@y[2],@x[10]
  672. vxor.v @y[3],@y[3],@x[11]
  673. vst @y[0],$out,16*8
  674. vst @y[1],$out,16*9
  675. vst @y[2],$out,16*10
  676. vst @y[3],$out,16*11
  677. addi.d $len,$len,-192
  678. beqz $len,.Ldone_4x
  679. addi.d $inp,$inp,192
  680. addi.d $out,$out,192
  681. vst @x[12],$sp,16*0
  682. vst @x[13],$sp,16*1
  683. vst @x[14],$sp,16*2
  684. vst @x[15],$sp,16*3
  685. move $t8,$zero
  686. b .Loop_tail_4x
  687. .Loop_tail_4x:
  688. # Xor input with states byte by byte
  689. ldx.bu $t5,$inp,$t8
  690. ldx.bu $t6,$sp,$t8
  691. xor $t5,$t5,$t6
  692. stx.b $t5,$out,$t8
  693. addi.w $t8,$t8,1
  694. addi.d $len,$len,-1
  695. bnez $len,.Loop_tail_4x
  696. b .Ldone_4x
  697. .Ldone_4x:
  698. addi.d $sp,$sp,128
  699. b .Lrestore_saved_fpr
  700. EOF
  701. }
  702. ########################################################################
  703. # 256-bit LASX code path that handles all lengths.
  704. {
  705. # Load the initial states in array @x[*] and update directly.
  706. my @x = ($xr0, $xr1, $xr2, $xr3, $xr4, $xr5, $xr6, $xr7,
  707. $xr8, $xr9, $xr10, $xr11, $xr12, $xr13, $xr14, $xr15);
  708. # Save the initial states in array @y[*]
  709. my @y = ($xr16, $xr17, $xr18, $xr19, $xr20, $xr21, $xr22, $xr23,
  710. $xr24, $xr25, $xr26, $xr27, $xr28, $xr29, $xr30, $xr31);
  711. sub ROUND_8x {
  712. my ($a0,$b0,$c0,$d0) = @_;
  713. my ($a1,$b1,$c1,$d1) = map(($_&~3)+(($_+1)&3),($a0,$b0,$c0,$d0));
  714. my ($a2,$b2,$c2,$d2) = map(($_&~3)+(($_+1)&3),($a1,$b1,$c1,$d1));
  715. my ($a3,$b3,$c3,$d3) = map(($_&~3)+(($_+1)&3),($a2,$b2,$c2,$d2));
  716. $code .= <<EOF;
  717. xvadd.w @x[$a0],@x[$a0],@x[$b0]
  718. xvxor.v @x[$d0],@x[$d0],@x[$a0]
  719. xvrotri.w @x[$d0],@x[$d0],16 # rotate left 16 bits
  720. xvadd.w @x[$a1],@x[$a1],@x[$b1]
  721. xvxor.v @x[$d1],@x[$d1],@x[$a1]
  722. xvrotri.w @x[$d1],@x[$d1],16
  723. xvadd.w @x[$c0],@x[$c0],@x[$d0]
  724. xvxor.v @x[$b0],@x[$b0],@x[$c0]
  725. xvrotri.w @x[$b0],@x[$b0],20 # rotate left 12 bits
  726. xvadd.w @x[$c1],@x[$c1],@x[$d1]
  727. xvxor.v @x[$b1],@x[$b1],@x[$c1]
  728. xvrotri.w @x[$b1],@x[$b1],20
  729. xvadd.w @x[$a0],@x[$a0],@x[$b0]
  730. xvxor.v @x[$d0],@x[$d0],@x[$a0]
  731. xvrotri.w @x[$d0],@x[$d0],24 # rotate left 8 bits
  732. xvadd.w @x[$a1],@x[$a1],@x[$b1]
  733. xvxor.v @x[$d1],@x[$d1],@x[$a1]
  734. xvrotri.w @x[$d1],@x[$d1],24
  735. xvadd.w @x[$c0],@x[$c0],@x[$d0]
  736. xvxor.v @x[$b0],@x[$b0],@x[$c0]
  737. xvrotri.w @x[$b0],@x[$b0],25 # rotate left 7 bits
  738. xvadd.w @x[$c1],@x[$c1],@x[$d1]
  739. xvxor.v @x[$b1],@x[$b1],@x[$c1]
  740. xvrotri.w @x[$b1],@x[$b1],25
  741. xvadd.w @x[$a2],@x[$a2],@x[$b2]
  742. xvxor.v @x[$d2],@x[$d2],@x[$a2]
  743. xvrotri.w @x[$d2],@x[$d2],16
  744. xvadd.w @x[$a3],@x[$a3],@x[$b3]
  745. xvxor.v @x[$d3],@x[$d3],@x[$a3]
  746. xvrotri.w @x[$d3],@x[$d3],16
  747. xvadd.w @x[$c2],@x[$c2],@x[$d2]
  748. xvxor.v @x[$b2],@x[$b2],@x[$c2]
  749. xvrotri.w @x[$b2],@x[$b2],20
  750. xvadd.w @x[$c3],@x[$c3],@x[$d3]
  751. xvxor.v @x[$b3],@x[$b3],@x[$c3]
  752. xvrotri.w @x[$b3],@x[$b3],20
  753. xvadd.w @x[$a2],@x[$a2],@x[$b2]
  754. xvxor.v @x[$d2],@x[$d2],@x[$a2]
  755. xvrotri.w @x[$d2],@x[$d2],24
  756. xvadd.w @x[$a3],@x[$a3],@x[$b3]
  757. xvxor.v @x[$d3],@x[$d3],@x[$a3]
  758. xvrotri.w @x[$d3],@x[$d3],24
  759. xvadd.w @x[$c2],@x[$c2],@x[$d2]
  760. xvxor.v @x[$b2],@x[$b2],@x[$c2]
  761. xvrotri.w @x[$b2],@x[$b2],25
  762. xvadd.w @x[$c3],@x[$c3],@x[$d3]
  763. xvxor.v @x[$b3],@x[$b3],@x[$c3]
  764. xvrotri.w @x[$b3],@x[$b3],25
  765. EOF
  766. }
  767. $code .= <<EOF;
  768. .align 6
  769. .LChaCha20_8x:
  770. addi.d $sp,$sp,-128
  771. # Save the initial block counter in $t4
  772. ld.w $t4,$counter,0
  773. b .Loop_outer_8x
  774. .align 5
  775. .Loop_outer_8x:
  776. # Load constant
  777. la.local $t8,.Lsigma
  778. xvldrepl.w @x[0],$t8,4*0 # 'expa'
  779. xvldrepl.w @x[1],$t8,4*1 # 'nd 3'
  780. xvldrepl.w @x[2],$t8,4*2 # '2-by'
  781. xvldrepl.w @x[3],$t8,4*3 # 'te k'
  782. # Load key
  783. xvldrepl.w @x[4],$key,4*0
  784. xvldrepl.w @x[5],$key,4*1
  785. xvldrepl.w @x[6],$key,4*2
  786. xvldrepl.w @x[7],$key,4*3
  787. xvldrepl.w @x[8],$key,4*4
  788. xvldrepl.w @x[9],$key,4*5
  789. xvldrepl.w @x[10],$key,4*6
  790. xvldrepl.w @x[11],$key,4*7
  791. # Load block counter
  792. xvreplgr2vr.w @x[12],$t4
  793. # Load nonce
  794. xvldrepl.w @x[13],$counter,4*1
  795. xvldrepl.w @x[14],$counter,4*2
  796. xvldrepl.w @x[15],$counter,4*3
  797. # Get the correct block counter for each block
  798. la.local $t8,.Linc8x
  799. xvld @y[0],$t8,0
  800. xvadd.w @x[12],@x[12],@y[0]
  801. # Copy the initial states from \@x[*] to \@y[*]
  802. xvori.b @y[0],@x[0],0
  803. xvori.b @y[1],@x[1],0
  804. xvori.b @y[2],@x[2],0
  805. xvori.b @y[3],@x[3],0
  806. xvori.b @y[4],@x[4],0
  807. xvori.b @y[5],@x[5],0
  808. xvori.b @y[6],@x[6],0
  809. xvori.b @y[7],@x[7],0
  810. xvori.b @y[8],@x[8],0
  811. xvori.b @y[9],@x[9],0
  812. xvori.b @y[10],@x[10],0
  813. xvori.b @y[11],@x[11],0
  814. xvori.b @y[12],@x[12],0
  815. xvori.b @y[13],@x[13],0
  816. xvori.b @y[14],@x[14],0
  817. xvori.b @y[15],@x[15],0
  818. # Update states in \@x[*] for 20 rounds
  819. ori $t8,$zero,10
  820. b .Loop_8x
  821. .align 5
  822. .Loop_8x:
  823. EOF
  824. &ROUND_8x (0, 4, 8, 12);
  825. &ROUND_8x (0, 5, 10, 15);
  826. $code .= <<EOF;
  827. addi.w $t8,$t8,-1
  828. bnez $t8,.Loop_8x
  829. # Get the final states by adding the initial states
  830. xvadd.w @x[0],@x[0],@y[0]
  831. xvadd.w @x[1],@x[1],@y[1]
  832. xvadd.w @x[2],@x[2],@y[2]
  833. xvadd.w @x[3],@x[3],@y[3]
  834. xvadd.w @x[4],@x[4],@y[4]
  835. xvadd.w @x[5],@x[5],@y[5]
  836. xvadd.w @x[6],@x[6],@y[6]
  837. xvadd.w @x[7],@x[7],@y[7]
  838. xvadd.w @x[8],@x[8],@y[8]
  839. xvadd.w @x[9],@x[9],@y[9]
  840. xvadd.w @x[10],@x[10],@y[10]
  841. xvadd.w @x[11],@x[11],@y[11]
  842. xvadd.w @x[12],@x[12],@y[12]
  843. xvadd.w @x[13],@x[13],@y[13]
  844. xvadd.w @x[14],@x[14],@y[14]
  845. xvadd.w @x[15],@x[15],@y[15]
  846. # Get the transpose of \@x[*] and save them in \@y[*]
  847. xvilvl.w @y[0],@x[1],@x[0]
  848. xvilvh.w @y[1],@x[1],@x[0]
  849. xvilvl.w @y[2],@x[3],@x[2]
  850. xvilvh.w @y[3],@x[3],@x[2]
  851. xvilvl.w @y[4],@x[5],@x[4]
  852. xvilvh.w @y[5],@x[5],@x[4]
  853. xvilvl.w @y[6],@x[7],@x[6]
  854. xvilvh.w @y[7],@x[7],@x[6]
  855. xvilvl.w @y[8],@x[9],@x[8]
  856. xvilvh.w @y[9],@x[9],@x[8]
  857. xvilvl.w @y[10],@x[11],@x[10]
  858. xvilvh.w @y[11],@x[11],@x[10]
  859. xvilvl.w @y[12],@x[13],@x[12]
  860. xvilvh.w @y[13],@x[13],@x[12]
  861. xvilvl.w @y[14],@x[15],@x[14]
  862. xvilvh.w @y[15],@x[15],@x[14]
  863. xvilvl.d @x[0],@y[2],@y[0]
  864. xvilvh.d @x[1],@y[2],@y[0]
  865. xvilvl.d @x[2],@y[3],@y[1]
  866. xvilvh.d @x[3],@y[3],@y[1]
  867. xvilvl.d @x[4],@y[6],@y[4]
  868. xvilvh.d @x[5],@y[6],@y[4]
  869. xvilvl.d @x[6],@y[7],@y[5]
  870. xvilvh.d @x[7],@y[7],@y[5]
  871. xvilvl.d @x[8],@y[10],@y[8]
  872. xvilvh.d @x[9],@y[10],@y[8]
  873. xvilvl.d @x[10],@y[11],@y[9]
  874. xvilvh.d @x[11],@y[11],@y[9]
  875. xvilvl.d @x[12],@y[14],@y[12]
  876. xvilvh.d @x[13],@y[14],@y[12]
  877. xvilvl.d @x[14],@y[15],@y[13]
  878. xvilvh.d @x[15],@y[15],@y[13]
  879. xvori.b @y[0],@x[4],0
  880. xvpermi.q @y[0],@x[0],0x20
  881. xvori.b @y[1],@x[5],0
  882. xvpermi.q @y[1],@x[1],0x20
  883. xvori.b @y[2],@x[6],0
  884. xvpermi.q @y[2],@x[2],0x20
  885. xvori.b @y[3],@x[7],0
  886. xvpermi.q @y[3],@x[3],0x20
  887. xvori.b @y[4],@x[4],0
  888. xvpermi.q @y[4],@x[0],0x31
  889. xvori.b @y[5],@x[5],0
  890. xvpermi.q @y[5],@x[1],0x31
  891. xvori.b @y[6],@x[6],0
  892. xvpermi.q @y[6],@x[2],0x31
  893. xvori.b @y[7],@x[7],0
  894. xvpermi.q @y[7],@x[3],0x31
  895. xvori.b @y[8],@x[12],0
  896. xvpermi.q @y[8],@x[8],0x20
  897. xvori.b @y[9],@x[13],0
  898. xvpermi.q @y[9],@x[9],0x20
  899. xvori.b @y[10],@x[14],0
  900. xvpermi.q @y[10],@x[10],0x20
  901. xvori.b @y[11],@x[15],0
  902. xvpermi.q @y[11],@x[11],0x20
  903. xvori.b @y[12],@x[12],0
  904. xvpermi.q @y[12],@x[8],0x31
  905. xvori.b @y[13],@x[13],0
  906. xvpermi.q @y[13],@x[9],0x31
  907. xvori.b @y[14],@x[14],0
  908. xvpermi.q @y[14],@x[10],0x31
  909. xvori.b @y[15],@x[15],0
  910. xvpermi.q @y[15],@x[11],0x31
  911. EOF
  912. # Adjust the order of elements in @y[*] for ease of use.
  913. @y = (@y[0],@y[8],@y[1],@y[9],@y[2],@y[10],@y[3],@y[11],
  914. @y[4],@y[12],@y[5],@y[13],@y[6],@y[14],@y[7],@y[15]);
  915. $code .= <<EOF;
  916. ori $t8,$zero,64*8
  917. bltu $len,$t8,.Ltail_8x
  918. # Get the encrypted message by xor states with plaintext
  919. xvld @x[0],$inp,32*0
  920. xvld @x[1],$inp,32*1
  921. xvld @x[2],$inp,32*2
  922. xvld @x[3],$inp,32*3
  923. xvxor.v @x[0],@x[0],@y[0]
  924. xvxor.v @x[1],@x[1],@y[1]
  925. xvxor.v @x[2],@x[2],@y[2]
  926. xvxor.v @x[3],@x[3],@y[3]
  927. xvst @x[0],$out,32*0
  928. xvst @x[1],$out,32*1
  929. xvst @x[2],$out,32*2
  930. xvst @x[3],$out,32*3
  931. xvld @x[0],$inp,32*4
  932. xvld @x[1],$inp,32*5
  933. xvld @x[2],$inp,32*6
  934. xvld @x[3],$inp,32*7
  935. xvxor.v @x[0],@x[0],@y[4]
  936. xvxor.v @x[1],@x[1],@y[5]
  937. xvxor.v @x[2],@x[2],@y[6]
  938. xvxor.v @x[3],@x[3],@y[7]
  939. xvst @x[0],$out,32*4
  940. xvst @x[1],$out,32*5
  941. xvst @x[2],$out,32*6
  942. xvst @x[3],$out,32*7
  943. xvld @x[0],$inp,32*8
  944. xvld @x[1],$inp,32*9
  945. xvld @x[2],$inp,32*10
  946. xvld @x[3],$inp,32*11
  947. xvxor.v @x[0],@x[0],@y[8]
  948. xvxor.v @x[1],@x[1],@y[9]
  949. xvxor.v @x[2],@x[2],@y[10]
  950. xvxor.v @x[3],@x[3],@y[11]
  951. xvst @x[0],$out,32*8
  952. xvst @x[1],$out,32*9
  953. xvst @x[2],$out,32*10
  954. xvst @x[3],$out,32*11
  955. xvld @x[0],$inp,32*12
  956. xvld @x[1],$inp,32*13
  957. xvld @x[2],$inp,32*14
  958. xvld @x[3],$inp,32*15
  959. xvxor.v @x[0],@x[0],@y[12]
  960. xvxor.v @x[1],@x[1],@y[13]
  961. xvxor.v @x[2],@x[2],@y[14]
  962. xvxor.v @x[3],@x[3],@y[15]
  963. xvst @x[0],$out,32*12
  964. xvst @x[1],$out,32*13
  965. xvst @x[2],$out,32*14
  966. xvst @x[3],$out,32*15
  967. addi.d $len,$len,-64*8
  968. beqz $len,.Ldone_8x
  969. addi.d $inp,$inp,64*8
  970. addi.d $out,$out,64*8
  971. addi.w $t4,$t4,8
  972. b .Loop_outer_8x
  973. .Ltail_8x:
  974. # Handle the tail for 8x (1 <= tail_len <= 511)
  975. ori $t8,$zero,448
  976. bgeu $len,$t8,.L448_or_more8x
  977. ori $t8,$zero,384
  978. bgeu $len,$t8,.L384_or_more8x
  979. ori $t8,$zero,320
  980. bgeu $len,$t8,.L320_or_more8x
  981. ori $t8,$zero,256
  982. bgeu $len,$t8,.L256_or_more8x
  983. ori $t8,$zero,192
  984. bgeu $len,$t8,.L192_or_more8x
  985. ori $t8,$zero,128
  986. bgeu $len,$t8,.L128_or_more8x
  987. ori $t8,$zero,64
  988. bgeu $len,$t8,.L64_or_more8x
  989. xvst @y[0],$sp,32*0
  990. xvst @y[1],$sp,32*1
  991. move $t8,$zero
  992. b .Loop_tail_8x
  993. .align 5
  994. .L64_or_more8x:
  995. xvld @x[0],$inp,32*0
  996. xvld @x[1],$inp,32*1
  997. xvxor.v @x[0],@x[0],@y[0]
  998. xvxor.v @x[1],@x[1],@y[1]
  999. xvst @x[0],$out,32*0
  1000. xvst @x[1],$out,32*1
  1001. addi.d $len,$len,-64
  1002. beqz $len,.Ldone_8x
  1003. addi.d $inp,$inp,64
  1004. addi.d $out,$out,64
  1005. xvst @y[2],$sp,32*0
  1006. xvst @y[3],$sp,32*1
  1007. move $t8,$zero
  1008. b .Loop_tail_8x
  1009. .align 5
  1010. .L128_or_more8x:
  1011. xvld @x[0],$inp,32*0
  1012. xvld @x[1],$inp,32*1
  1013. xvld @x[2],$inp,32*2
  1014. xvld @x[3],$inp,32*3
  1015. xvxor.v @x[0],@x[0],@y[0]
  1016. xvxor.v @x[1],@x[1],@y[1]
  1017. xvxor.v @x[2],@x[2],@y[2]
  1018. xvxor.v @x[3],@x[3],@y[3]
  1019. xvst @x[0],$out,32*0
  1020. xvst @x[1],$out,32*1
  1021. xvst @x[2],$out,32*2
  1022. xvst @x[3],$out,32*3
  1023. addi.d $len,$len,-128
  1024. beqz $len,.Ldone_8x
  1025. addi.d $inp,$inp,128
  1026. addi.d $out,$out,128
  1027. xvst @y[4],$sp,32*0
  1028. xvst @y[5],$sp,32*1
  1029. move $t8,$zero
  1030. b .Loop_tail_8x
  1031. .align 5
  1032. .L192_or_more8x:
  1033. xvld @x[0],$inp,32*0
  1034. xvld @x[1],$inp,32*1
  1035. xvld @x[2],$inp,32*2
  1036. xvld @x[3],$inp,32*3
  1037. xvxor.v @x[0],@x[0],@y[0]
  1038. xvxor.v @x[1],@x[1],@y[1]
  1039. xvxor.v @x[2],@x[2],@y[2]
  1040. xvxor.v @x[3],@x[3],@y[3]
  1041. xvst @x[0],$out,32*0
  1042. xvst @x[1],$out,32*1
  1043. xvst @x[2],$out,32*2
  1044. xvst @x[3],$out,32*3
  1045. xvld @x[0],$inp,32*4
  1046. xvld @x[1],$inp,32*5
  1047. xvxor.v @x[0],@x[0],@y[4]
  1048. xvxor.v @x[1],@x[1],@y[5]
  1049. xvst @x[0],$out,32*4
  1050. xvst @x[1],$out,32*5
  1051. addi.d $len,$len,-192
  1052. beqz $len,.Ldone_8x
  1053. addi.d $inp,$inp,192
  1054. addi.d $out,$out,192
  1055. xvst @y[6],$sp,32*0
  1056. xvst @y[7],$sp,32*1
  1057. move $t8,$zero
  1058. b .Loop_tail_8x
  1059. .align 5
  1060. .L256_or_more8x:
  1061. xvld @x[0],$inp,32*0
  1062. xvld @x[1],$inp,32*1
  1063. xvld @x[2],$inp,32*2
  1064. xvld @x[3],$inp,32*3
  1065. xvxor.v @x[0],@x[0],@y[0]
  1066. xvxor.v @x[1],@x[1],@y[1]
  1067. xvxor.v @x[2],@x[2],@y[2]
  1068. xvxor.v @x[3],@x[3],@y[3]
  1069. xvst @x[0],$out,32*0
  1070. xvst @x[1],$out,32*1
  1071. xvst @x[2],$out,32*2
  1072. xvst @x[3],$out,32*3
  1073. xvld @x[0],$inp,32*4
  1074. xvld @x[1],$inp,32*5
  1075. xvld @x[2],$inp,32*6
  1076. xvld @x[3],$inp,32*7
  1077. xvxor.v @x[0],@x[0],@y[4]
  1078. xvxor.v @x[1],@x[1],@y[5]
  1079. xvxor.v @x[2],@x[2],@y[6]
  1080. xvxor.v @x[3],@x[3],@y[7]
  1081. xvst @x[0],$out,32*4
  1082. xvst @x[1],$out,32*5
  1083. xvst @x[2],$out,32*6
  1084. xvst @x[3],$out,32*7
  1085. addi.d $len,$len,-256
  1086. beqz $len,.Ldone_8x
  1087. addi.d $inp,$inp,256
  1088. addi.d $out,$out,256
  1089. xvst @y[8],$sp,32*0
  1090. xvst @y[9],$sp,32*1
  1091. move $t8,$zero
  1092. b .Loop_tail_8x
  1093. .align 5
  1094. .L320_or_more8x:
  1095. xvld @x[0],$inp,32*0
  1096. xvld @x[1],$inp,32*1
  1097. xvld @x[2],$inp,32*2
  1098. xvld @x[3],$inp,32*3
  1099. xvxor.v @x[0],@x[0],@y[0]
  1100. xvxor.v @x[1],@x[1],@y[1]
  1101. xvxor.v @x[2],@x[2],@y[2]
  1102. xvxor.v @x[3],@x[3],@y[3]
  1103. xvst @x[0],$out,32*0
  1104. xvst @x[1],$out,32*1
  1105. xvst @x[2],$out,32*2
  1106. xvst @x[3],$out,32*3
  1107. xvld @x[0],$inp,32*4
  1108. xvld @x[1],$inp,32*5
  1109. xvld @x[2],$inp,32*6
  1110. xvld @x[3],$inp,32*7
  1111. xvxor.v @x[0],@x[0],@y[4]
  1112. xvxor.v @x[1],@x[1],@y[5]
  1113. xvxor.v @x[2],@x[2],@y[6]
  1114. xvxor.v @x[3],@x[3],@y[7]
  1115. xvst @x[0],$out,32*4
  1116. xvst @x[1],$out,32*5
  1117. xvst @x[2],$out,32*6
  1118. xvst @x[3],$out,32*7
  1119. xvld @x[0],$inp,32*8
  1120. xvld @x[1],$inp,32*9
  1121. xvxor.v @x[0],@x[0],@y[8]
  1122. xvxor.v @x[1],@x[1],@y[9]
  1123. xvst @x[0],$out,32*8
  1124. xvst @x[1],$out,32*9
  1125. addi.d $len,$len,-320
  1126. beqz $len,.Ldone_8x
  1127. addi.d $inp,$inp,320
  1128. addi.d $out,$out,320
  1129. xvst @y[10],$sp,32*0
  1130. xvst @y[11],$sp,32*1
  1131. move $t8,$zero
  1132. b .Loop_tail_8x
  1133. .align 5
  1134. .L384_or_more8x:
  1135. xvld @x[0],$inp,32*0
  1136. xvld @x[1],$inp,32*1
  1137. xvld @x[2],$inp,32*2
  1138. xvld @x[3],$inp,32*3
  1139. xvxor.v @x[0],@x[0],@y[0]
  1140. xvxor.v @x[1],@x[1],@y[1]
  1141. xvxor.v @x[2],@x[2],@y[2]
  1142. xvxor.v @x[3],@x[3],@y[3]
  1143. xvst @x[0],$out,32*0
  1144. xvst @x[1],$out,32*1
  1145. xvst @x[2],$out,32*2
  1146. xvst @x[3],$out,32*3
  1147. xvld @x[0],$inp,32*4
  1148. xvld @x[1],$inp,32*5
  1149. xvld @x[2],$inp,32*6
  1150. xvld @x[3],$inp,32*7
  1151. xvxor.v @x[0],@x[0],@y[4]
  1152. xvxor.v @x[1],@x[1],@y[5]
  1153. xvxor.v @x[2],@x[2],@y[6]
  1154. xvxor.v @x[3],@x[3],@y[7]
  1155. xvst @x[0],$out,32*4
  1156. xvst @x[1],$out,32*5
  1157. xvst @x[2],$out,32*6
  1158. xvst @x[3],$out,32*7
  1159. xvld @x[0],$inp,32*8
  1160. xvld @x[1],$inp,32*9
  1161. xvld @x[2],$inp,32*10
  1162. xvld @x[3],$inp,32*11
  1163. xvxor.v @x[0],@x[0],@y[8]
  1164. xvxor.v @x[1],@x[1],@y[9]
  1165. xvxor.v @x[2],@x[2],@y[10]
  1166. xvxor.v @x[3],@x[3],@y[11]
  1167. xvst @x[0],$out,32*8
  1168. xvst @x[1],$out,32*9
  1169. xvst @x[2],$out,32*10
  1170. xvst @x[3],$out,32*11
  1171. addi.d $len,$len,-384
  1172. beqz $len,.Ldone_8x
  1173. addi.d $inp,$inp,384
  1174. addi.d $out,$out,384
  1175. xvst @y[12],$sp,32*0
  1176. xvst @y[13],$sp,32*1
  1177. move $t8,$zero
  1178. b .Loop_tail_8x
  1179. .align 5
  1180. .L448_or_more8x:
  1181. xvld @x[0],$inp,32*0
  1182. xvld @x[1],$inp,32*1
  1183. xvld @x[2],$inp,32*2
  1184. xvld @x[3],$inp,32*3
  1185. xvxor.v @x[0],@x[0],@y[0]
  1186. xvxor.v @x[1],@x[1],@y[1]
  1187. xvxor.v @x[2],@x[2],@y[2]
  1188. xvxor.v @x[3],@x[3],@y[3]
  1189. xvst @x[0],$out,32*0
  1190. xvst @x[1],$out,32*1
  1191. xvst @x[2],$out,32*2
  1192. xvst @x[3],$out,32*3
  1193. xvld @x[0],$inp,32*4
  1194. xvld @x[1],$inp,32*5
  1195. xvld @x[2],$inp,32*6
  1196. xvld @x[3],$inp,32*7
  1197. xvxor.v @x[0],@x[0],@y[4]
  1198. xvxor.v @x[1],@x[1],@y[5]
  1199. xvxor.v @x[2],@x[2],@y[6]
  1200. xvxor.v @x[3],@x[3],@y[7]
  1201. xvst @x[0],$out,32*4
  1202. xvst @x[1],$out,32*5
  1203. xvst @x[2],$out,32*6
  1204. xvst @x[3],$out,32*7
  1205. xvld @x[0],$inp,32*8
  1206. xvld @x[1],$inp,32*9
  1207. xvld @x[2],$inp,32*10
  1208. xvld @x[3],$inp,32*11
  1209. xvxor.v @x[0],@x[0],@y[8]
  1210. xvxor.v @x[1],@x[1],@y[9]
  1211. xvxor.v @x[2],@x[2],@y[10]
  1212. xvxor.v @x[3],@x[3],@y[11]
  1213. xvst @x[0],$out,32*8
  1214. xvst @x[1],$out,32*9
  1215. xvst @x[2],$out,32*10
  1216. xvst @x[3],$out,32*11
  1217. xvld @x[0],$inp,32*12
  1218. xvld @x[1],$inp,32*13
  1219. xvxor.v @x[0],@x[0],@y[12]
  1220. xvxor.v @x[1],@x[1],@y[13]
  1221. xvst @x[0],$out,32*12
  1222. xvst @x[1],$out,32*13
  1223. addi.d $len,$len,-448
  1224. beqz $len,.Ldone_8x
  1225. addi.d $inp,$inp,448
  1226. addi.d $out,$out,448
  1227. xvst @y[14],$sp,32*0
  1228. xvst @y[15],$sp,32*1
  1229. move $t8,$zero
  1230. b .Loop_tail_8x
  1231. .Loop_tail_8x:
  1232. # Xor input with states byte by byte
  1233. ldx.bu $t5,$inp,$t8
  1234. ldx.bu $t6,$sp,$t8
  1235. xor $t5,$t5,$t6
  1236. stx.b $t5,$out,$t8
  1237. addi.w $t8,$t8,1
  1238. addi.d $len,$len,-1
  1239. bnez $len,.Loop_tail_8x
  1240. b .Ldone_8x
  1241. .Ldone_8x:
  1242. addi.d $sp,$sp,128
  1243. b .Lrestore_saved_fpr
  1244. EOF
  1245. }
  1246. $code .= <<EOF;
  1247. .Lrestore_saved_fpr:
  1248. fld.d $fs0,$sp,0
  1249. fld.d $fs1,$sp,8
  1250. fld.d $fs2,$sp,16
  1251. fld.d $fs3,$sp,24
  1252. fld.d $fs4,$sp,32
  1253. fld.d $fs5,$sp,40
  1254. fld.d $fs6,$sp,48
  1255. fld.d $fs7,$sp,56
  1256. addi.d $sp,$sp,64
  1257. .Lno_data:
  1258. .Lend:
  1259. jr $ra
  1260. .size ChaCha20_ctr32,.-ChaCha20_ctr32
  1261. EOF
  1262. $code =~ s/\`([^\`]*)\`/eval($1)/gem;
  1263. print $code;
  1264. close STDOUT;