050-v5.16-02-mips-bpf-Add-eBPF-JIT-for-32-bit-MIPS.patch 89 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104110511061107110811091110111111121113111411151116111711181119112011211122112311241125112611271128112911301131113211331134113511361137113811391140114111421143114411451146114711481149115011511152115311541155115611571158115911601161116211631164116511661167116811691170117111721173117411751176117711781179118011811182118311841185118611871188118911901191119211931194119511961197119811991200120112021203120412051206120712081209121012111212121312141215121612171218121912201221122212231224122512261227122812291230123112321233123412351236123712381239124012411242124312441245124612471248124912501251125212531254125512561257125812591260126112621263126412651266126712681269127012711272127312741275127612771278127912801281128212831284128512861287128812891290129112921293129412951296129712981299130013011302130313041305130613071308130913101311131213131314131513161317131813191320132113221323132413251326132713281329133013311332133313341335133613371338133913401341134213431344134513461347134813491350135113521353135413551356135713581359136013611362136313641365136613671368136913701371137213731374137513761377137813791380138113821383138413851386138713881389139013911392139313941395139613971398139914001401140214031404140514061407140814091410141114121413141414151416141714181419142014211422142314241425142614271428142914301431143214331434143514361437143814391440144114421443144414451446144714481449145014511452145314541455145614571458145914601461146214631464146514661467146814691470147114721473147414751476147714781479148014811482148314841485148614871488148914901491149214931494149514961497149814991500150115021503150415051506150715081509151015111512151315141515151615171518151915201521152215231524152515261527152815291530153115321533153415351536153715381539154015411542154315441545154615471548154915501551155215531554155515561557155815591560156115621563156415651566156715681569157015711572157315741575157615771578157915801581158215831584158515861587158815891590159115921593159415951596159715981599160016011602160316041605160616071608160916101611161216131614161516161617161816191620162116221623162416251626162716281629163016311632163316341635163616371638163916401641164216431644164516461647164816491650165116521653165416551656165716581659166016611662166316641665166616671668166916701671167216731674167516761677167816791680168116821683168416851686168716881689169016911692169316941695169616971698169917001701170217031704170517061707170817091710171117121713171417151716171717181719172017211722172317241725172617271728172917301731173217331734173517361737173817391740174117421743174417451746174717481749175017511752175317541755175617571758175917601761176217631764176517661767176817691770177117721773177417751776177717781779178017811782178317841785178617871788178917901791179217931794179517961797179817991800180118021803180418051806180718081809181018111812181318141815181618171818181918201821182218231824182518261827182818291830183118321833183418351836183718381839184018411842184318441845184618471848184918501851185218531854185518561857185818591860186118621863186418651866186718681869187018711872187318741875187618771878187918801881188218831884188518861887188818891890189118921893189418951896189718981899190019011902190319041905190619071908190919101911191219131914191519161917191819191920192119221923192419251926192719281929193019311932193319341935193619371938193919401941194219431944194519461947194819491950195119521953195419551956195719581959196019611962196319641965196619671968196919701971197219731974197519761977197819791980198119821983198419851986198719881989199019911992199319941995199619971998199920002001200220032004200520062007200820092010201120122013201420152016201720182019202020212022202320242025202620272028202920302031203220332034203520362037203820392040204120422043204420452046204720482049205020512052205320542055205620572058205920602061206220632064206520662067206820692070207120722073207420752076207720782079208020812082208320842085208620872088208920902091209220932094209520962097209820992100210121022103210421052106210721082109211021112112211321142115211621172118211921202121212221232124212521262127212821292130213121322133213421352136213721382139214021412142214321442145214621472148214921502151215221532154215521562157215821592160216121622163216421652166216721682169217021712172217321742175217621772178217921802181218221832184218521862187218821892190219121922193219421952196219721982199220022012202220322042205220622072208220922102211221222132214221522162217221822192220222122222223222422252226222722282229223022312232223322342235223622372238223922402241224222432244224522462247224822492250225122522253225422552256225722582259226022612262226322642265226622672268226922702271227222732274227522762277227822792280228122822283228422852286228722882289229022912292229322942295229622972298229923002301230223032304230523062307230823092310231123122313231423152316231723182319232023212322232323242325232623272328232923302331233223332334233523362337233823392340234123422343234423452346234723482349235023512352235323542355235623572358235923602361236223632364236523662367236823692370237123722373237423752376237723782379238023812382238323842385238623872388238923902391239223932394239523962397239823992400240124022403240424052406240724082409241024112412241324142415241624172418241924202421242224232424242524262427242824292430243124322433243424352436243724382439244024412442244324442445244624472448244924502451245224532454245524562457245824592460246124622463246424652466246724682469247024712472247324742475247624772478247924802481248224832484248524862487248824892490249124922493249424952496249724982499250025012502250325042505250625072508250925102511251225132514251525162517251825192520252125222523252425252526252725282529253025312532253325342535253625372538253925402541254225432544254525462547254825492550255125522553255425552556255725582559256025612562256325642565256625672568256925702571257225732574257525762577257825792580258125822583258425852586258725882589259025912592259325942595259625972598259926002601260226032604260526062607260826092610261126122613261426152616261726182619262026212622262326242625262626272628262926302631263226332634263526362637263826392640264126422643264426452646264726482649265026512652265326542655265626572658265926602661266226632664266526662667266826692670267126722673267426752676267726782679268026812682268326842685268626872688268926902691269226932694269526962697269826992700270127022703270427052706270727082709271027112712271327142715271627172718271927202721272227232724272527262727272827292730273127322733273427352736273727382739274027412742274327442745274627472748274927502751275227532754275527562757275827592760276127622763276427652766276727682769277027712772277327742775277627772778277927802781278227832784278527862787278827892790279127922793279427952796279727982799280028012802280328042805280628072808280928102811281228132814281528162817281828192820282128222823282428252826282728282829283028312832283328342835283628372838283928402841284228432844284528462847284828492850285128522853285428552856285728582859286028612862286328642865286628672868286928702871287228732874287528762877287828792880288128822883288428852886288728882889289028912892289328942895289628972898289929002901290229032904290529062907290829092910291129122913291429152916291729182919292029212922292329242925292629272928292929302931293229332934293529362937293829392940294129422943294429452946294729482949295029512952295329542955295629572958295929602961296229632964296529662967296829692970297129722973297429752976297729782979298029812982298329842985298629872988298929902991299229932994299529962997299829993000300130023003300430053006300730083009301030113012301330143015301630173018301930203021302230233024302530263027302830293030303130323033303430353036303730383039304030413042304330443045304630473048304930503051305230533054305530563057305830593060306130623063306430653066306730683069307030713072307330743075307630773078
  1. From: Johan Almbladh <[email protected]>
  2. Date: Tue, 5 Oct 2021 18:54:04 +0200
  3. Subject: [PATCH] mips: bpf: Add eBPF JIT for 32-bit MIPS
  4. This is an implementation of an eBPF JIT for 32-bit MIPS I-V and MIPS32.
  5. The implementation supports all 32-bit and 64-bit ALU and JMP operations,
  6. including the recently-added atomics. 64-bit div/mod and 64-bit atomics
  7. are implemented using function calls to math64 and atomic64 functions,
  8. respectively. All 32-bit operations are implemented natively by the JIT,
  9. except if the CPU lacks ll/sc instructions.
  10. Register mapping
  11. ================
  12. All 64-bit eBPF registers are mapped to native 32-bit MIPS register pairs,
  13. and does not use any stack scratch space for register swapping. This means
  14. that all eBPF register data is kept in CPU registers all the time, and
  15. this simplifies the register management a lot. It also reduces the JIT's
  16. pressure on temporary registers since we do not have to move data around.
  17. Native register pairs are ordered according to CPU endiannes, following
  18. the O32 calling convention for passing 64-bit arguments and return values.
  19. The eBPF return value, arguments and callee-saved registers are mapped to
  20. their native MIPS equivalents.
  21. Since the 32 highest bits in the eBPF FP (frame pointer) register are
  22. always zero, only one general-purpose register is actually needed for the
  23. mapping. The MIPS fp register is used for this purpose. The high bits are
  24. mapped to MIPS register r0. This saves us one CPU register, which is much
  25. needed for temporaries, while still allowing us to treat the R10 (FP)
  26. register just like any other eBPF register in the JIT.
  27. The MIPS gp (global pointer) and at (assembler temporary) registers are
  28. used as internal temporary registers for constant blinding. CPU registers
  29. t6-t9 are used internally by the JIT when constructing more complex 64-bit
  30. operations. This is precisely what is needed - two registers to store an
  31. operand value, and two more as scratch registers when performing the
  32. operation.
  33. The register mapping is shown below.
  34. R0 - $v1, $v0 return value
  35. R1 - $a1, $a0 argument 1, passed in registers
  36. R2 - $a3, $a2 argument 2, passed in registers
  37. R3 - $t1, $t0 argument 3, passed on stack
  38. R4 - $t3, $t2 argument 4, passed on stack
  39. R5 - $t4, $t3 argument 5, passed on stack
  40. R6 - $s1, $s0 callee-saved
  41. R7 - $s3, $s2 callee-saved
  42. R8 - $s5, $s4 callee-saved
  43. R9 - $s7, $s6 callee-saved
  44. FP - $r0, $fp 32-bit frame pointer
  45. AX - $gp, $at constant-blinding
  46. $t6 - $t9 unallocated, JIT temporaries
  47. Jump offsets
  48. ============
  49. The JIT tries to map all conditional JMP operations to MIPS conditional
  50. PC-relative branches. The MIPS branch offset field is 18 bits, in bytes,
  51. which is equivalent to the eBPF 16-bit instruction offset. However, since
  52. the JIT may emit more than one CPU instruction per eBPF instruction, the
  53. field width may overflow. If that happens, the JIT converts the long
  54. conditional jump to a short PC-relative branch with the condition
  55. inverted, jumping over a long unconditional absolute jmp (j).
  56. This conversion will change the instruction offset mapping used for jumps,
  57. and may in turn result in more branch offset overflows. The JIT therefore
  58. dry-runs the translation until no more branches are converted and the
  59. offsets do not change anymore. There is an upper bound on this of course,
  60. and if the JIT hits that limit, the last two iterations are run with all
  61. branches being converted.
  62. Tail call count
  63. ===============
  64. The current tail call count is stored in the 16-byte area of the caller's
  65. stack frame that is reserved for the callee in the o32 ABI. The value is
  66. initialized in the prologue, and propagated to the tail-callee by skipping
  67. the initialization instructions when emitting the tail call.
  68. Signed-off-by: Johan Almbladh <[email protected]>
  69. ---
  70. create mode 100644 arch/mips/net/bpf_jit_comp.c
  71. create mode 100644 arch/mips/net/bpf_jit_comp.h
  72. create mode 100644 arch/mips/net/bpf_jit_comp32.c
  73. --- a/arch/mips/net/Makefile
  74. +++ b/arch/mips/net/Makefile
  75. @@ -2,4 +2,9 @@
  76. # MIPS networking code
  77. obj-$(CONFIG_MIPS_CBPF_JIT) += bpf_jit.o bpf_jit_asm.o
  78. -obj-$(CONFIG_MIPS_EBPF_JIT) += ebpf_jit.o
  79. +
  80. +ifeq ($(CONFIG_32BIT),y)
  81. + obj-$(CONFIG_MIPS_EBPF_JIT) += bpf_jit_comp.o bpf_jit_comp32.o
  82. +else
  83. + obj-$(CONFIG_MIPS_EBPF_JIT) += ebpf_jit.o
  84. +endif
  85. --- /dev/null
  86. +++ b/arch/mips/net/bpf_jit_comp.c
  87. @@ -0,0 +1,1020 @@
  88. +// SPDX-License-Identifier: GPL-2.0-only
  89. +/*
  90. + * Just-In-Time compiler for eBPF bytecode on MIPS.
  91. + * Implementation of JIT functions common to 32-bit and 64-bit CPUs.
  92. + *
  93. + * Copyright (c) 2021 Anyfi Networks AB.
  94. + * Author: Johan Almbladh <[email protected]>
  95. + *
  96. + * Based on code and ideas from
  97. + * Copyright (c) 2017 Cavium, Inc.
  98. + * Copyright (c) 2017 Shubham Bansal <[email protected]>
  99. + * Copyright (c) 2011 Mircea Gherzan <[email protected]>
  100. + */
  101. +
  102. +/*
  103. + * Code overview
  104. + * =============
  105. + *
  106. + * - bpf_jit_comp.h
  107. + * Common definitions and utilities.
  108. + *
  109. + * - bpf_jit_comp.c
  110. + * Implementation of JIT top-level logic and exported JIT API functions.
  111. + * Implementation of internal operations shared by 32-bit and 64-bit code.
  112. + * JMP and ALU JIT control code, register control code, shared ALU and
  113. + * JMP/JMP32 JIT operations.
  114. + *
  115. + * - bpf_jit_comp32.c
  116. + * Implementation of functions to JIT prologue, epilogue and a single eBPF
  117. + * instruction for 32-bit MIPS CPUs. The functions use shared operations
  118. + * where possible, and implement the rest for 32-bit MIPS such as ALU64
  119. + * operations.
  120. + *
  121. + * - bpf_jit_comp64.c
  122. + * Ditto, for 64-bit MIPS CPUs.
  123. + *
  124. + * Zero and sign extension
  125. + * ========================
  126. + * 32-bit MIPS instructions on 64-bit MIPS registers use sign extension,
  127. + * but the eBPF instruction set mandates zero extension. We let the verifier
  128. + * insert explicit zero-extensions after 32-bit ALU operations, both for
  129. + * 32-bit and 64-bit MIPS JITs. Conditional JMP32 operations on 64-bit MIPs
  130. + * are JITed with sign extensions inserted when so expected.
  131. + *
  132. + * ALU operations
  133. + * ==============
  134. + * ALU operations on 32/64-bit MIPS and ALU64 operations on 64-bit MIPS are
  135. + * JITed in the following steps. ALU64 operations on 32-bit MIPS are more
  136. + * complicated and therefore only processed by special implementations in
  137. + * step (3).
  138. + *
  139. + * 1) valid_alu_i:
  140. + * Determine if an immediate operation can be emitted as such, or if
  141. + * we must fall back to the register version.
  142. + *
  143. + * 2) rewrite_alu_i:
  144. + * Convert BPF operation and immediate value to a canonical form for
  145. + * JITing. In some degenerate cases this form may be a no-op.
  146. + *
  147. + * 3) emit_alu_{i,i64,r,64}:
  148. + * Emit instructions for an ALU or ALU64 immediate or register operation.
  149. + *
  150. + * JMP operations
  151. + * ==============
  152. + * JMP and JMP32 operations require an JIT instruction offset table for
  153. + * translating the jump offset. This table is computed by dry-running the
  154. + * JIT without actually emitting anything. However, the computed PC-relative
  155. + * offset may overflow the 18-bit offset field width of the native MIPS
  156. + * branch instruction. In such cases, the long jump is converted into the
  157. + * following sequence.
  158. + *
  159. + * <branch> !<cond> +2 Inverted PC-relative branch
  160. + * nop Delay slot
  161. + * j <offset> Unconditional absolute long jump
  162. + * nop Delay slot
  163. + *
  164. + * Since this converted sequence alters the offset table, all offsets must
  165. + * be re-calculated. This may in turn trigger new branch conversions, so
  166. + * the process is repeated until no further changes are made. Normally it
  167. + * completes in 1-2 iterations. If JIT_MAX_ITERATIONS should reached, we
  168. + * fall back to converting every remaining jump operation. The branch
  169. + * conversion is independent of how the JMP or JMP32 condition is JITed.
  170. + *
  171. + * JMP32 and JMP operations are JITed as follows.
  172. + *
  173. + * 1) setup_jmp_{i,r}:
  174. + * Convert jump conditional and offset into a form that can be JITed.
  175. + * This form may be a no-op, a canonical form, or an inverted PC-relative
  176. + * jump if branch conversion is necessary.
  177. + *
  178. + * 2) valid_jmp_i:
  179. + * Determine if an immediate operations can be emitted as such, or if
  180. + * we must fall back to the register version. Applies to JMP32 for 32-bit
  181. + * MIPS, and both JMP and JMP32 for 64-bit MIPS.
  182. + *
  183. + * 3) emit_jmp_{i,i64,r,r64}:
  184. + * Emit instructions for an JMP or JMP32 immediate or register operation.
  185. + *
  186. + * 4) finish_jmp_{i,r}:
  187. + * Emit any instructions needed to finish the jump. This includes a nop
  188. + * for the delay slot if a branch was emitted, and a long absolute jump
  189. + * if the branch was converted.
  190. + */
  191. +
  192. +#include <linux/limits.h>
  193. +#include <linux/bitops.h>
  194. +#include <linux/errno.h>
  195. +#include <linux/filter.h>
  196. +#include <linux/bpf.h>
  197. +#include <linux/slab.h>
  198. +#include <asm/bitops.h>
  199. +#include <asm/cacheflush.h>
  200. +#include <asm/cpu-features.h>
  201. +#include <asm/isa-rev.h>
  202. +#include <asm/uasm.h>
  203. +
  204. +#include "bpf_jit_comp.h"
  205. +
  206. +/* Convenience macros for descriptor access */
  207. +#define CONVERTED(desc) ((desc) & JIT_DESC_CONVERT)
  208. +#define INDEX(desc) ((desc) & ~JIT_DESC_CONVERT)
  209. +
  210. +/*
  211. + * Push registers on the stack, starting at a given depth from the stack
  212. + * pointer and increasing. The next depth to be written is returned.
  213. + */
  214. +int push_regs(struct jit_context *ctx, u32 mask, u32 excl, int depth)
  215. +{
  216. + int reg;
  217. +
  218. + for (reg = 0; reg < BITS_PER_BYTE * sizeof(mask); reg++)
  219. + if (mask & BIT(reg)) {
  220. + if ((excl & BIT(reg)) == 0) {
  221. + if (sizeof(long) == 4)
  222. + emit(ctx, sw, reg, depth, MIPS_R_SP);
  223. + else /* sizeof(long) == 8 */
  224. + emit(ctx, sd, reg, depth, MIPS_R_SP);
  225. + }
  226. + depth += sizeof(long);
  227. + }
  228. +
  229. + ctx->stack_used = max((int)ctx->stack_used, depth);
  230. + return depth;
  231. +}
  232. +
  233. +/*
  234. + * Pop registers from the stack, starting at a given depth from the stack
  235. + * pointer and increasing. The next depth to be read is returned.
  236. + */
  237. +int pop_regs(struct jit_context *ctx, u32 mask, u32 excl, int depth)
  238. +{
  239. + int reg;
  240. +
  241. + for (reg = 0; reg < BITS_PER_BYTE * sizeof(mask); reg++)
  242. + if (mask & BIT(reg)) {
  243. + if ((excl & BIT(reg)) == 0) {
  244. + if (sizeof(long) == 4)
  245. + emit(ctx, lw, reg, depth, MIPS_R_SP);
  246. + else /* sizeof(long) == 8 */
  247. + emit(ctx, ld, reg, depth, MIPS_R_SP);
  248. + }
  249. + depth += sizeof(long);
  250. + }
  251. +
  252. + return depth;
  253. +}
  254. +
  255. +/* Compute the 28-bit jump target address from a BPF program location */
  256. +int get_target(struct jit_context *ctx, u32 loc)
  257. +{
  258. + u32 index = INDEX(ctx->descriptors[loc]);
  259. + unsigned long pc = (unsigned long)&ctx->target[ctx->jit_index];
  260. + unsigned long addr = (unsigned long)&ctx->target[index];
  261. +
  262. + if (!ctx->target)
  263. + return 0;
  264. +
  265. + if ((addr ^ pc) & ~MIPS_JMP_MASK)
  266. + return -1;
  267. +
  268. + return addr & MIPS_JMP_MASK;
  269. +}
  270. +
  271. +/* Compute the PC-relative offset to relative BPF program offset */
  272. +int get_offset(const struct jit_context *ctx, int off)
  273. +{
  274. + return (INDEX(ctx->descriptors[ctx->bpf_index + off]) -
  275. + ctx->jit_index - 1) * sizeof(u32);
  276. +}
  277. +
  278. +/* dst = imm (register width) */
  279. +void emit_mov_i(struct jit_context *ctx, u8 dst, s32 imm)
  280. +{
  281. + if (imm >= -0x8000 && imm <= 0x7fff) {
  282. + emit(ctx, addiu, dst, MIPS_R_ZERO, imm);
  283. + } else {
  284. + emit(ctx, lui, dst, (s16)((u32)imm >> 16));
  285. + emit(ctx, ori, dst, dst, (u16)(imm & 0xffff));
  286. + }
  287. + clobber_reg(ctx, dst);
  288. +}
  289. +
  290. +/* dst = src (register width) */
  291. +void emit_mov_r(struct jit_context *ctx, u8 dst, u8 src)
  292. +{
  293. + emit(ctx, ori, dst, src, 0);
  294. + clobber_reg(ctx, dst);
  295. +}
  296. +
  297. +/* Validate ALU immediate range */
  298. +bool valid_alu_i(u8 op, s32 imm)
  299. +{
  300. + switch (BPF_OP(op)) {
  301. + case BPF_NEG:
  302. + case BPF_LSH:
  303. + case BPF_RSH:
  304. + case BPF_ARSH:
  305. + /* All legal eBPF values are valid */
  306. + return true;
  307. + case BPF_ADD:
  308. + /* imm must be 16 bits */
  309. + return imm >= -0x8000 && imm <= 0x7fff;
  310. + case BPF_SUB:
  311. + /* -imm must be 16 bits */
  312. + return imm >= -0x7fff && imm <= 0x8000;
  313. + case BPF_AND:
  314. + case BPF_OR:
  315. + case BPF_XOR:
  316. + /* imm must be 16 bits unsigned */
  317. + return imm >= 0 && imm <= 0xffff;
  318. + case BPF_MUL:
  319. + /* imm must be zero or a positive power of two */
  320. + return imm == 0 || (imm > 0 && is_power_of_2(imm));
  321. + case BPF_DIV:
  322. + case BPF_MOD:
  323. + /* imm must be an 17-bit power of two */
  324. + return (u32)imm <= 0x10000 && is_power_of_2((u32)imm);
  325. + }
  326. + return false;
  327. +}
  328. +
  329. +/* Rewrite ALU immediate operation */
  330. +bool rewrite_alu_i(u8 op, s32 imm, u8 *alu, s32 *val)
  331. +{
  332. + bool act = true;
  333. +
  334. + switch (BPF_OP(op)) {
  335. + case BPF_LSH:
  336. + case BPF_RSH:
  337. + case BPF_ARSH:
  338. + case BPF_ADD:
  339. + case BPF_SUB:
  340. + case BPF_OR:
  341. + case BPF_XOR:
  342. + /* imm == 0 is a no-op */
  343. + act = imm != 0;
  344. + break;
  345. + case BPF_MUL:
  346. + if (imm == 1) {
  347. + /* dst * 1 is a no-op */
  348. + act = false;
  349. + } else if (imm == 0) {
  350. + /* dst * 0 is dst & 0 */
  351. + op = BPF_AND;
  352. + } else {
  353. + /* dst * (1 << n) is dst << n */
  354. + op = BPF_LSH;
  355. + imm = ilog2(abs(imm));
  356. + }
  357. + break;
  358. + case BPF_DIV:
  359. + if (imm == 1) {
  360. + /* dst / 1 is a no-op */
  361. + act = false;
  362. + } else {
  363. + /* dst / (1 << n) is dst >> n */
  364. + op = BPF_RSH;
  365. + imm = ilog2(imm);
  366. + }
  367. + break;
  368. + case BPF_MOD:
  369. + /* dst % (1 << n) is dst & ((1 << n) - 1) */
  370. + op = BPF_AND;
  371. + imm--;
  372. + break;
  373. + }
  374. +
  375. + *alu = op;
  376. + *val = imm;
  377. + return act;
  378. +}
  379. +
  380. +/* ALU immediate operation (32-bit) */
  381. +void emit_alu_i(struct jit_context *ctx, u8 dst, s32 imm, u8 op)
  382. +{
  383. + switch (BPF_OP(op)) {
  384. + /* dst = -dst */
  385. + case BPF_NEG:
  386. + emit(ctx, subu, dst, MIPS_R_ZERO, dst);
  387. + break;
  388. + /* dst = dst & imm */
  389. + case BPF_AND:
  390. + emit(ctx, andi, dst, dst, (u16)imm);
  391. + break;
  392. + /* dst = dst | imm */
  393. + case BPF_OR:
  394. + emit(ctx, ori, dst, dst, (u16)imm);
  395. + break;
  396. + /* dst = dst ^ imm */
  397. + case BPF_XOR:
  398. + emit(ctx, xori, dst, dst, (u16)imm);
  399. + break;
  400. + /* dst = dst << imm */
  401. + case BPF_LSH:
  402. + emit(ctx, sll, dst, dst, imm);
  403. + break;
  404. + /* dst = dst >> imm */
  405. + case BPF_RSH:
  406. + emit(ctx, srl, dst, dst, imm);
  407. + break;
  408. + /* dst = dst >> imm (arithmetic) */
  409. + case BPF_ARSH:
  410. + emit(ctx, sra, dst, dst, imm);
  411. + break;
  412. + /* dst = dst + imm */
  413. + case BPF_ADD:
  414. + emit(ctx, addiu, dst, dst, imm);
  415. + break;
  416. + /* dst = dst - imm */
  417. + case BPF_SUB:
  418. + emit(ctx, addiu, dst, dst, -imm);
  419. + break;
  420. + }
  421. + clobber_reg(ctx, dst);
  422. +}
  423. +
  424. +/* ALU register operation (32-bit) */
  425. +void emit_alu_r(struct jit_context *ctx, u8 dst, u8 src, u8 op)
  426. +{
  427. + switch (BPF_OP(op)) {
  428. + /* dst = dst & src */
  429. + case BPF_AND:
  430. + emit(ctx, and, dst, dst, src);
  431. + break;
  432. + /* dst = dst | src */
  433. + case BPF_OR:
  434. + emit(ctx, or, dst, dst, src);
  435. + break;
  436. + /* dst = dst ^ src */
  437. + case BPF_XOR:
  438. + emit(ctx, xor, dst, dst, src);
  439. + break;
  440. + /* dst = dst << src */
  441. + case BPF_LSH:
  442. + emit(ctx, sllv, dst, dst, src);
  443. + break;
  444. + /* dst = dst >> src */
  445. + case BPF_RSH:
  446. + emit(ctx, srlv, dst, dst, src);
  447. + break;
  448. + /* dst = dst >> src (arithmetic) */
  449. + case BPF_ARSH:
  450. + emit(ctx, srav, dst, dst, src);
  451. + break;
  452. + /* dst = dst + src */
  453. + case BPF_ADD:
  454. + emit(ctx, addu, dst, dst, src);
  455. + break;
  456. + /* dst = dst - src */
  457. + case BPF_SUB:
  458. + emit(ctx, subu, dst, dst, src);
  459. + break;
  460. + /* dst = dst * src */
  461. + case BPF_MUL:
  462. + if (cpu_has_mips32r1 || cpu_has_mips32r6) {
  463. + emit(ctx, mul, dst, dst, src);
  464. + } else {
  465. + emit(ctx, multu, dst, src);
  466. + emit(ctx, mflo, dst);
  467. + }
  468. + break;
  469. + /* dst = dst / src */
  470. + case BPF_DIV:
  471. + if (cpu_has_mips32r6) {
  472. + emit(ctx, divu_r6, dst, dst, src);
  473. + } else {
  474. + emit(ctx, divu, dst, src);
  475. + emit(ctx, mflo, dst);
  476. + }
  477. + break;
  478. + /* dst = dst % src */
  479. + case BPF_MOD:
  480. + if (cpu_has_mips32r6) {
  481. + emit(ctx, modu, dst, dst, src);
  482. + } else {
  483. + emit(ctx, divu, dst, src);
  484. + emit(ctx, mfhi, dst);
  485. + }
  486. + break;
  487. + }
  488. + clobber_reg(ctx, dst);
  489. +}
  490. +
  491. +/* Atomic read-modify-write (32-bit) */
  492. +void emit_atomic_r(struct jit_context *ctx, u8 dst, u8 src, s16 off, u8 code)
  493. +{
  494. + emit(ctx, ll, MIPS_R_T9, off, dst);
  495. + switch (code) {
  496. + case BPF_ADD:
  497. + emit(ctx, addu, MIPS_R_T8, MIPS_R_T9, src);
  498. + break;
  499. + case BPF_AND:
  500. + emit(ctx, and, MIPS_R_T8, MIPS_R_T9, src);
  501. + break;
  502. + case BPF_OR:
  503. + emit(ctx, or, MIPS_R_T8, MIPS_R_T9, src);
  504. + break;
  505. + case BPF_XOR:
  506. + emit(ctx, xor, MIPS_R_T8, MIPS_R_T9, src);
  507. + break;
  508. + }
  509. + emit(ctx, sc, MIPS_R_T8, off, dst);
  510. + emit(ctx, beqz, MIPS_R_T8, -16);
  511. + emit(ctx, nop); /* Delay slot */
  512. +}
  513. +
  514. +/* Atomic compare-and-exchange (32-bit) */
  515. +void emit_cmpxchg_r(struct jit_context *ctx, u8 dst, u8 src, u8 res, s16 off)
  516. +{
  517. + emit(ctx, ll, MIPS_R_T9, off, dst);
  518. + emit(ctx, bne, MIPS_R_T9, res, 12);
  519. + emit(ctx, move, MIPS_R_T8, src); /* Delay slot */
  520. + emit(ctx, sc, MIPS_R_T8, off, dst);
  521. + emit(ctx, beqz, MIPS_R_T8, -20);
  522. + emit(ctx, move, res, MIPS_R_T9); /* Delay slot */
  523. + clobber_reg(ctx, res);
  524. +}
  525. +
  526. +/* Swap bytes and truncate a register word or half word */
  527. +void emit_bswap_r(struct jit_context *ctx, u8 dst, u32 width)
  528. +{
  529. + u8 tmp = MIPS_R_T8;
  530. + u8 msk = MIPS_R_T9;
  531. +
  532. + switch (width) {
  533. + /* Swap bytes in a word */
  534. + case 32:
  535. + if (cpu_has_mips32r2 || cpu_has_mips32r6) {
  536. + emit(ctx, wsbh, dst, dst);
  537. + emit(ctx, rotr, dst, dst, 16);
  538. + } else {
  539. + emit(ctx, sll, tmp, dst, 16); /* tmp = dst << 16 */
  540. + emit(ctx, srl, dst, dst, 16); /* dst = dst >> 16 */
  541. + emit(ctx, or, dst, dst, tmp); /* dst = dst | tmp */
  542. +
  543. + emit(ctx, lui, msk, 0xff); /* msk = 0x00ff0000 */
  544. + emit(ctx, ori, msk, msk, 0xff); /* msk = msk | 0xff */
  545. +
  546. + emit(ctx, and, tmp, dst, msk); /* tmp = dst & msk */
  547. + emit(ctx, sll, tmp, tmp, 8); /* tmp = tmp << 8 */
  548. + emit(ctx, srl, dst, dst, 8); /* dst = dst >> 8 */
  549. + emit(ctx, and, dst, dst, msk); /* dst = dst & msk */
  550. + emit(ctx, or, dst, dst, tmp); /* reg = dst | tmp */
  551. + }
  552. + break;
  553. + /* Swap bytes in a half word */
  554. + case 16:
  555. + if (cpu_has_mips32r2 || cpu_has_mips32r6) {
  556. + emit(ctx, wsbh, dst, dst);
  557. + emit(ctx, andi, dst, dst, 0xffff);
  558. + } else {
  559. + emit(ctx, andi, tmp, dst, 0xff00); /* t = d & 0xff00 */
  560. + emit(ctx, srl, tmp, tmp, 8); /* t = t >> 8 */
  561. + emit(ctx, andi, dst, dst, 0x00ff); /* d = d & 0x00ff */
  562. + emit(ctx, sll, dst, dst, 8); /* d = d << 8 */
  563. + emit(ctx, or, dst, dst, tmp); /* d = d | t */
  564. + }
  565. + break;
  566. + }
  567. + clobber_reg(ctx, dst);
  568. +}
  569. +
  570. +/* Validate jump immediate range */
  571. +bool valid_jmp_i(u8 op, s32 imm)
  572. +{
  573. + switch (op) {
  574. + case JIT_JNOP:
  575. + /* Immediate value not used */
  576. + return true;
  577. + case BPF_JEQ:
  578. + case BPF_JNE:
  579. + /* No immediate operation */
  580. + return false;
  581. + case BPF_JSET:
  582. + case JIT_JNSET:
  583. + /* imm must be 16 bits unsigned */
  584. + return imm >= 0 && imm <= 0xffff;
  585. + case BPF_JGE:
  586. + case BPF_JLT:
  587. + case BPF_JSGE:
  588. + case BPF_JSLT:
  589. + /* imm must be 16 bits */
  590. + return imm >= -0x8000 && imm <= 0x7fff;
  591. + case BPF_JGT:
  592. + case BPF_JLE:
  593. + case BPF_JSGT:
  594. + case BPF_JSLE:
  595. + /* imm + 1 must be 16 bits */
  596. + return imm >= -0x8001 && imm <= 0x7ffe;
  597. + }
  598. + return false;
  599. +}
  600. +
  601. +/* Invert a conditional jump operation */
  602. +static u8 invert_jmp(u8 op)
  603. +{
  604. + switch (op) {
  605. + case BPF_JA: return JIT_JNOP;
  606. + case BPF_JEQ: return BPF_JNE;
  607. + case BPF_JNE: return BPF_JEQ;
  608. + case BPF_JSET: return JIT_JNSET;
  609. + case BPF_JGT: return BPF_JLE;
  610. + case BPF_JGE: return BPF_JLT;
  611. + case BPF_JLT: return BPF_JGE;
  612. + case BPF_JLE: return BPF_JGT;
  613. + case BPF_JSGT: return BPF_JSLE;
  614. + case BPF_JSGE: return BPF_JSLT;
  615. + case BPF_JSLT: return BPF_JSGE;
  616. + case BPF_JSLE: return BPF_JSGT;
  617. + }
  618. + return 0;
  619. +}
  620. +
  621. +/* Prepare a PC-relative jump operation */
  622. +static void setup_jmp(struct jit_context *ctx, u8 bpf_op,
  623. + s16 bpf_off, u8 *jit_op, s32 *jit_off)
  624. +{
  625. + u32 *descp = &ctx->descriptors[ctx->bpf_index];
  626. + int op = bpf_op;
  627. + int offset = 0;
  628. +
  629. + /* Do not compute offsets on the first pass */
  630. + if (INDEX(*descp) == 0)
  631. + goto done;
  632. +
  633. + /* Skip jumps never taken */
  634. + if (bpf_op == JIT_JNOP)
  635. + goto done;
  636. +
  637. + /* Convert jumps always taken */
  638. + if (bpf_op == BPF_JA)
  639. + *descp |= JIT_DESC_CONVERT;
  640. +
  641. + /*
  642. + * Current ctx->jit_index points to the start of the branch preamble.
  643. + * Since the preamble differs among different branch conditionals,
  644. + * the current index cannot be used to compute the branch offset.
  645. + * Instead, we use the offset table value for the next instruction,
  646. + * which gives the index immediately after the branch delay slot.
  647. + */
  648. + if (!CONVERTED(*descp)) {
  649. + int target = ctx->bpf_index + bpf_off + 1;
  650. + int origin = ctx->bpf_index + 1;
  651. +
  652. + offset = (INDEX(ctx->descriptors[target]) -
  653. + INDEX(ctx->descriptors[origin]) + 1) * sizeof(u32);
  654. + }
  655. +
  656. + /*
  657. + * The PC-relative branch offset field on MIPS is 18 bits signed,
  658. + * so if the computed offset is larger than this we generate a an
  659. + * absolute jump that we skip with an inverted conditional branch.
  660. + */
  661. + if (CONVERTED(*descp) || offset < -0x20000 || offset > 0x1ffff) {
  662. + offset = 3 * sizeof(u32);
  663. + op = invert_jmp(bpf_op);
  664. + ctx->changes += !CONVERTED(*descp);
  665. + *descp |= JIT_DESC_CONVERT;
  666. + }
  667. +
  668. +done:
  669. + *jit_off = offset;
  670. + *jit_op = op;
  671. +}
  672. +
  673. +/* Prepare a PC-relative jump operation with immediate conditional */
  674. +void setup_jmp_i(struct jit_context *ctx, s32 imm, u8 width,
  675. + u8 bpf_op, s16 bpf_off, u8 *jit_op, s32 *jit_off)
  676. +{
  677. + bool always = false;
  678. + bool never = false;
  679. +
  680. + switch (bpf_op) {
  681. + case BPF_JEQ:
  682. + case BPF_JNE:
  683. + break;
  684. + case BPF_JSET:
  685. + case BPF_JLT:
  686. + never = imm == 0;
  687. + break;
  688. + case BPF_JGE:
  689. + always = imm == 0;
  690. + break;
  691. + case BPF_JGT:
  692. + never = (u32)imm == U32_MAX;
  693. + break;
  694. + case BPF_JLE:
  695. + always = (u32)imm == U32_MAX;
  696. + break;
  697. + case BPF_JSGT:
  698. + never = imm == S32_MAX && width == 32;
  699. + break;
  700. + case BPF_JSGE:
  701. + always = imm == S32_MIN && width == 32;
  702. + break;
  703. + case BPF_JSLT:
  704. + never = imm == S32_MIN && width == 32;
  705. + break;
  706. + case BPF_JSLE:
  707. + always = imm == S32_MAX && width == 32;
  708. + break;
  709. + }
  710. +
  711. + if (never)
  712. + bpf_op = JIT_JNOP;
  713. + if (always)
  714. + bpf_op = BPF_JA;
  715. + setup_jmp(ctx, bpf_op, bpf_off, jit_op, jit_off);
  716. +}
  717. +
  718. +/* Prepare a PC-relative jump operation with register conditional */
  719. +void setup_jmp_r(struct jit_context *ctx, bool same_reg,
  720. + u8 bpf_op, s16 bpf_off, u8 *jit_op, s32 *jit_off)
  721. +{
  722. + switch (bpf_op) {
  723. + case BPF_JSET:
  724. + break;
  725. + case BPF_JEQ:
  726. + case BPF_JGE:
  727. + case BPF_JLE:
  728. + case BPF_JSGE:
  729. + case BPF_JSLE:
  730. + if (same_reg)
  731. + bpf_op = BPF_JA;
  732. + break;
  733. + case BPF_JNE:
  734. + case BPF_JLT:
  735. + case BPF_JGT:
  736. + case BPF_JSGT:
  737. + case BPF_JSLT:
  738. + if (same_reg)
  739. + bpf_op = JIT_JNOP;
  740. + break;
  741. + }
  742. + setup_jmp(ctx, bpf_op, bpf_off, jit_op, jit_off);
  743. +}
  744. +
  745. +/* Finish a PC-relative jump operation */
  746. +int finish_jmp(struct jit_context *ctx, u8 jit_op, s16 bpf_off)
  747. +{
  748. + /* Emit conditional branch delay slot */
  749. + if (jit_op != JIT_JNOP)
  750. + emit(ctx, nop);
  751. + /*
  752. + * Emit an absolute long jump with delay slot,
  753. + * if the PC-relative branch was converted.
  754. + */
  755. + if (CONVERTED(ctx->descriptors[ctx->bpf_index])) {
  756. + int target = get_target(ctx, ctx->bpf_index + bpf_off + 1);
  757. +
  758. + if (target < 0)
  759. + return -1;
  760. + emit(ctx, j, target);
  761. + emit(ctx, nop);
  762. + }
  763. + return 0;
  764. +}
  765. +
  766. +/* Jump immediate (32-bit) */
  767. +void emit_jmp_i(struct jit_context *ctx, u8 dst, s32 imm, s32 off, u8 op)
  768. +{
  769. + switch (op) {
  770. + /* No-op, used internally for branch optimization */
  771. + case JIT_JNOP:
  772. + break;
  773. + /* PC += off if dst & imm */
  774. + case BPF_JSET:
  775. + emit(ctx, andi, MIPS_R_T9, dst, (u16)imm);
  776. + emit(ctx, bnez, MIPS_R_T9, off);
  777. + break;
  778. + /* PC += off if (dst & imm) == 0 (not in BPF, used for long jumps) */
  779. + case JIT_JNSET:
  780. + emit(ctx, andi, MIPS_R_T9, dst, (u16)imm);
  781. + emit(ctx, beqz, MIPS_R_T9, off);
  782. + break;
  783. + /* PC += off if dst > imm */
  784. + case BPF_JGT:
  785. + emit(ctx, sltiu, MIPS_R_T9, dst, imm + 1);
  786. + emit(ctx, beqz, MIPS_R_T9, off);
  787. + break;
  788. + /* PC += off if dst >= imm */
  789. + case BPF_JGE:
  790. + emit(ctx, sltiu, MIPS_R_T9, dst, imm);
  791. + emit(ctx, beqz, MIPS_R_T9, off);
  792. + break;
  793. + /* PC += off if dst < imm */
  794. + case BPF_JLT:
  795. + emit(ctx, sltiu, MIPS_R_T9, dst, imm);
  796. + emit(ctx, bnez, MIPS_R_T9, off);
  797. + break;
  798. + /* PC += off if dst <= imm */
  799. + case BPF_JLE:
  800. + emit(ctx, sltiu, MIPS_R_T9, dst, imm + 1);
  801. + emit(ctx, bnez, MIPS_R_T9, off);
  802. + break;
  803. + /* PC += off if dst > imm (signed) */
  804. + case BPF_JSGT:
  805. + emit(ctx, slti, MIPS_R_T9, dst, imm + 1);
  806. + emit(ctx, beqz, MIPS_R_T9, off);
  807. + break;
  808. + /* PC += off if dst >= imm (signed) */
  809. + case BPF_JSGE:
  810. + emit(ctx, slti, MIPS_R_T9, dst, imm);
  811. + emit(ctx, beqz, MIPS_R_T9, off);
  812. + break;
  813. + /* PC += off if dst < imm (signed) */
  814. + case BPF_JSLT:
  815. + emit(ctx, slti, MIPS_R_T9, dst, imm);
  816. + emit(ctx, bnez, MIPS_R_T9, off);
  817. + break;
  818. + /* PC += off if dst <= imm (signed) */
  819. + case BPF_JSLE:
  820. + emit(ctx, slti, MIPS_R_T9, dst, imm + 1);
  821. + emit(ctx, bnez, MIPS_R_T9, off);
  822. + break;
  823. + }
  824. +}
  825. +
  826. +/* Jump register (32-bit) */
  827. +void emit_jmp_r(struct jit_context *ctx, u8 dst, u8 src, s32 off, u8 op)
  828. +{
  829. + switch (op) {
  830. + /* No-op, used internally for branch optimization */
  831. + case JIT_JNOP:
  832. + break;
  833. + /* PC += off if dst == src */
  834. + case BPF_JEQ:
  835. + emit(ctx, beq, dst, src, off);
  836. + break;
  837. + /* PC += off if dst != src */
  838. + case BPF_JNE:
  839. + emit(ctx, bne, dst, src, off);
  840. + break;
  841. + /* PC += off if dst & src */
  842. + case BPF_JSET:
  843. + emit(ctx, and, MIPS_R_T9, dst, src);
  844. + emit(ctx, bnez, MIPS_R_T9, off);
  845. + break;
  846. + /* PC += off if (dst & imm) == 0 (not in BPF, used for long jumps) */
  847. + case JIT_JNSET:
  848. + emit(ctx, and, MIPS_R_T9, dst, src);
  849. + emit(ctx, beqz, MIPS_R_T9, off);
  850. + break;
  851. + /* PC += off if dst > src */
  852. + case BPF_JGT:
  853. + emit(ctx, sltu, MIPS_R_T9, src, dst);
  854. + emit(ctx, bnez, MIPS_R_T9, off);
  855. + break;
  856. + /* PC += off if dst >= src */
  857. + case BPF_JGE:
  858. + emit(ctx, sltu, MIPS_R_T9, dst, src);
  859. + emit(ctx, beqz, MIPS_R_T9, off);
  860. + break;
  861. + /* PC += off if dst < src */
  862. + case BPF_JLT:
  863. + emit(ctx, sltu, MIPS_R_T9, dst, src);
  864. + emit(ctx, bnez, MIPS_R_T9, off);
  865. + break;
  866. + /* PC += off if dst <= src */
  867. + case BPF_JLE:
  868. + emit(ctx, sltu, MIPS_R_T9, src, dst);
  869. + emit(ctx, beqz, MIPS_R_T9, off);
  870. + break;
  871. + /* PC += off if dst > src (signed) */
  872. + case BPF_JSGT:
  873. + emit(ctx, slt, MIPS_R_T9, src, dst);
  874. + emit(ctx, bnez, MIPS_R_T9, off);
  875. + break;
  876. + /* PC += off if dst >= src (signed) */
  877. + case BPF_JSGE:
  878. + emit(ctx, slt, MIPS_R_T9, dst, src);
  879. + emit(ctx, beqz, MIPS_R_T9, off);
  880. + break;
  881. + /* PC += off if dst < src (signed) */
  882. + case BPF_JSLT:
  883. + emit(ctx, slt, MIPS_R_T9, dst, src);
  884. + emit(ctx, bnez, MIPS_R_T9, off);
  885. + break;
  886. + /* PC += off if dst <= src (signed) */
  887. + case BPF_JSLE:
  888. + emit(ctx, slt, MIPS_R_T9, src, dst);
  889. + emit(ctx, beqz, MIPS_R_T9, off);
  890. + break;
  891. + }
  892. +}
  893. +
  894. +/* Jump always */
  895. +int emit_ja(struct jit_context *ctx, s16 off)
  896. +{
  897. + int target = get_target(ctx, ctx->bpf_index + off + 1);
  898. +
  899. + if (target < 0)
  900. + return -1;
  901. + emit(ctx, j, target);
  902. + emit(ctx, nop);
  903. + return 0;
  904. +}
  905. +
  906. +/* Jump to epilogue */
  907. +int emit_exit(struct jit_context *ctx)
  908. +{
  909. + int target = get_target(ctx, ctx->program->len);
  910. +
  911. + if (target < 0)
  912. + return -1;
  913. + emit(ctx, j, target);
  914. + emit(ctx, nop);
  915. + return 0;
  916. +}
  917. +
  918. +/* Build the program body from eBPF bytecode */
  919. +static int build_body(struct jit_context *ctx)
  920. +{
  921. + const struct bpf_prog *prog = ctx->program;
  922. + unsigned int i;
  923. +
  924. + ctx->stack_used = 0;
  925. + for (i = 0; i < prog->len; i++) {
  926. + const struct bpf_insn *insn = &prog->insnsi[i];
  927. + u32 *descp = &ctx->descriptors[i];
  928. + int ret;
  929. +
  930. + access_reg(ctx, insn->src_reg);
  931. + access_reg(ctx, insn->dst_reg);
  932. +
  933. + ctx->bpf_index = i;
  934. + if (ctx->target == NULL) {
  935. + ctx->changes += INDEX(*descp) != ctx->jit_index;
  936. + *descp &= JIT_DESC_CONVERT;
  937. + *descp |= ctx->jit_index;
  938. + }
  939. +
  940. + ret = build_insn(insn, ctx);
  941. + if (ret < 0)
  942. + return ret;
  943. +
  944. + if (ret > 0) {
  945. + i++;
  946. + if (ctx->target == NULL)
  947. + descp[1] = ctx->jit_index;
  948. + }
  949. + }
  950. +
  951. + /* Store the end offset, where the epilogue begins */
  952. + ctx->descriptors[prog->len] = ctx->jit_index;
  953. + return 0;
  954. +}
  955. +
  956. +/* Set the branch conversion flag on all instructions */
  957. +static void set_convert_flag(struct jit_context *ctx, bool enable)
  958. +{
  959. + const struct bpf_prog *prog = ctx->program;
  960. + u32 flag = enable ? JIT_DESC_CONVERT : 0;
  961. + unsigned int i;
  962. +
  963. + for (i = 0; i <= prog->len; i++)
  964. + ctx->descriptors[i] = INDEX(ctx->descriptors[i]) | flag;
  965. +}
  966. +
  967. +static void jit_fill_hole(void *area, unsigned int size)
  968. +{
  969. + u32 *p;
  970. +
  971. + /* We are guaranteed to have aligned memory. */
  972. + for (p = area; size >= sizeof(u32); size -= sizeof(u32))
  973. + uasm_i_break(&p, BRK_BUG); /* Increments p */
  974. +}
  975. +
  976. +bool bpf_jit_needs_zext(void)
  977. +{
  978. + return true;
  979. +}
  980. +
  981. +struct bpf_prog *bpf_int_jit_compile(struct bpf_prog *prog)
  982. +{
  983. + struct bpf_prog *tmp, *orig_prog = prog;
  984. + struct bpf_binary_header *header = NULL;
  985. + struct jit_context ctx;
  986. + bool tmp_blinded = false;
  987. + unsigned int tmp_idx;
  988. + unsigned int image_size;
  989. + u8 *image_ptr;
  990. + int tries;
  991. +
  992. + /*
  993. + * If BPF JIT was not enabled then we must fall back to
  994. + * the interpreter.
  995. + */
  996. + if (!prog->jit_requested)
  997. + return orig_prog;
  998. + /*
  999. + * If constant blinding was enabled and we failed during blinding
  1000. + * then we must fall back to the interpreter. Otherwise, we save
  1001. + * the new JITed code.
  1002. + */
  1003. + tmp = bpf_jit_blind_constants(prog);
  1004. + if (IS_ERR(tmp))
  1005. + return orig_prog;
  1006. + if (tmp != prog) {
  1007. + tmp_blinded = true;
  1008. + prog = tmp;
  1009. + }
  1010. +
  1011. + memset(&ctx, 0, sizeof(ctx));
  1012. + ctx.program = prog;
  1013. +
  1014. + /*
  1015. + * Not able to allocate memory for descriptors[], then
  1016. + * we must fall back to the interpreter
  1017. + */
  1018. + ctx.descriptors = kcalloc(prog->len + 1, sizeof(*ctx.descriptors),
  1019. + GFP_KERNEL);
  1020. + if (ctx.descriptors == NULL)
  1021. + goto out_err;
  1022. +
  1023. + /* First pass discovers used resources */
  1024. + if (build_body(&ctx) < 0)
  1025. + goto out_err;
  1026. + /*
  1027. + * Second pass computes instruction offsets.
  1028. + * If any PC-relative branches are out of range, a sequence of
  1029. + * a PC-relative branch + a jump is generated, and we have to
  1030. + * try again from the beginning to generate the new offsets.
  1031. + * This is done until no additional conversions are necessary.
  1032. + * The last two iterations are done with all branches being
  1033. + * converted, to guarantee offset table convergence within a
  1034. + * fixed number of iterations.
  1035. + */
  1036. + ctx.jit_index = 0;
  1037. + build_prologue(&ctx);
  1038. + tmp_idx = ctx.jit_index;
  1039. +
  1040. + tries = JIT_MAX_ITERATIONS;
  1041. + do {
  1042. + ctx.jit_index = tmp_idx;
  1043. + ctx.changes = 0;
  1044. + if (tries == 2)
  1045. + set_convert_flag(&ctx, true);
  1046. + if (build_body(&ctx) < 0)
  1047. + goto out_err;
  1048. + } while (ctx.changes > 0 && --tries > 0);
  1049. +
  1050. + if (WARN_ONCE(ctx.changes > 0, "JIT offsets failed to converge"))
  1051. + goto out_err;
  1052. +
  1053. + build_epilogue(&ctx, MIPS_R_RA);
  1054. +
  1055. + /* Now we know the size of the structure to make */
  1056. + image_size = sizeof(u32) * ctx.jit_index;
  1057. + header = bpf_jit_binary_alloc(image_size, &image_ptr,
  1058. + sizeof(u32), jit_fill_hole);
  1059. + /*
  1060. + * Not able to allocate memory for the structure then
  1061. + * we must fall back to the interpretation
  1062. + */
  1063. + if (header == NULL)
  1064. + goto out_err;
  1065. +
  1066. + /* Actual pass to generate final JIT code */
  1067. + ctx.target = (u32 *)image_ptr;
  1068. + ctx.jit_index = 0;
  1069. +
  1070. + /*
  1071. + * If building the JITed code fails somehow,
  1072. + * we fall back to the interpretation.
  1073. + */
  1074. + build_prologue(&ctx);
  1075. + if (build_body(&ctx) < 0)
  1076. + goto out_err;
  1077. + build_epilogue(&ctx, MIPS_R_RA);
  1078. +
  1079. + /* Populate line info meta data */
  1080. + set_convert_flag(&ctx, false);
  1081. + bpf_prog_fill_jited_linfo(prog, &ctx.descriptors[1]);
  1082. +
  1083. + /* Set as read-only exec and flush instruction cache */
  1084. + bpf_jit_binary_lock_ro(header);
  1085. + flush_icache_range((unsigned long)header,
  1086. + (unsigned long)&ctx.target[ctx.jit_index]);
  1087. +
  1088. + if (bpf_jit_enable > 1)
  1089. + bpf_jit_dump(prog->len, image_size, 2, ctx.target);
  1090. +
  1091. + prog->bpf_func = (void *)ctx.target;
  1092. + prog->jited = 1;
  1093. + prog->jited_len = image_size;
  1094. +
  1095. +out:
  1096. + if (tmp_blinded)
  1097. + bpf_jit_prog_release_other(prog, prog == orig_prog ?
  1098. + tmp : orig_prog);
  1099. + kfree(ctx.descriptors);
  1100. + return prog;
  1101. +
  1102. +out_err:
  1103. + prog = orig_prog;
  1104. + if (header)
  1105. + bpf_jit_binary_free(header);
  1106. + goto out;
  1107. +}
  1108. --- /dev/null
  1109. +++ b/arch/mips/net/bpf_jit_comp.h
  1110. @@ -0,0 +1,211 @@
  1111. +/* SPDX-License-Identifier: GPL-2.0-only */
  1112. +/*
  1113. + * Just-In-Time compiler for eBPF bytecode on 32-bit and 64-bit MIPS.
  1114. + *
  1115. + * Copyright (c) 2021 Anyfi Networks AB.
  1116. + * Author: Johan Almbladh <[email protected]>
  1117. + *
  1118. + * Based on code and ideas from
  1119. + * Copyright (c) 2017 Cavium, Inc.
  1120. + * Copyright (c) 2017 Shubham Bansal <[email protected]>
  1121. + * Copyright (c) 2011 Mircea Gherzan <[email protected]>
  1122. + */
  1123. +
  1124. +#ifndef _BPF_JIT_COMP_H
  1125. +#define _BPF_JIT_COMP_H
  1126. +
  1127. +/* MIPS registers */
  1128. +#define MIPS_R_ZERO 0 /* Const zero */
  1129. +#define MIPS_R_AT 1 /* Asm temp */
  1130. +#define MIPS_R_V0 2 /* Result */
  1131. +#define MIPS_R_V1 3 /* Result */
  1132. +#define MIPS_R_A0 4 /* Argument */
  1133. +#define MIPS_R_A1 5 /* Argument */
  1134. +#define MIPS_R_A2 6 /* Argument */
  1135. +#define MIPS_R_A3 7 /* Argument */
  1136. +#define MIPS_R_A4 8 /* Arg (n64) */
  1137. +#define MIPS_R_A5 9 /* Arg (n64) */
  1138. +#define MIPS_R_A6 10 /* Arg (n64) */
  1139. +#define MIPS_R_A7 11 /* Arg (n64) */
  1140. +#define MIPS_R_T0 8 /* Temp (o32) */
  1141. +#define MIPS_R_T1 9 /* Temp (o32) */
  1142. +#define MIPS_R_T2 10 /* Temp (o32) */
  1143. +#define MIPS_R_T3 11 /* Temp (o32) */
  1144. +#define MIPS_R_T4 12 /* Temporary */
  1145. +#define MIPS_R_T5 13 /* Temporary */
  1146. +#define MIPS_R_T6 14 /* Temporary */
  1147. +#define MIPS_R_T7 15 /* Temporary */
  1148. +#define MIPS_R_S0 16 /* Saved */
  1149. +#define MIPS_R_S1 17 /* Saved */
  1150. +#define MIPS_R_S2 18 /* Saved */
  1151. +#define MIPS_R_S3 19 /* Saved */
  1152. +#define MIPS_R_S4 20 /* Saved */
  1153. +#define MIPS_R_S5 21 /* Saved */
  1154. +#define MIPS_R_S6 22 /* Saved */
  1155. +#define MIPS_R_S7 23 /* Saved */
  1156. +#define MIPS_R_T8 24 /* Temporary */
  1157. +#define MIPS_R_T9 25 /* Temporary */
  1158. +/* MIPS_R_K0 26 Reserved */
  1159. +/* MIPS_R_K1 27 Reserved */
  1160. +#define MIPS_R_GP 28 /* Global ptr */
  1161. +#define MIPS_R_SP 29 /* Stack ptr */
  1162. +#define MIPS_R_FP 30 /* Frame ptr */
  1163. +#define MIPS_R_RA 31 /* Return */
  1164. +
  1165. +/*
  1166. + * Jump address mask for immediate jumps. The four most significant bits
  1167. + * must be equal to PC.
  1168. + */
  1169. +#define MIPS_JMP_MASK 0x0fffffffUL
  1170. +
  1171. +/* Maximum number of iterations in offset table computation */
  1172. +#define JIT_MAX_ITERATIONS 8
  1173. +
  1174. +/*
  1175. + * Jump pseudo-instructions used internally
  1176. + * for branch conversion and branch optimization.
  1177. + */
  1178. +#define JIT_JNSET 0xe0
  1179. +#define JIT_JNOP 0xf0
  1180. +
  1181. +/* Descriptor flag for PC-relative branch conversion */
  1182. +#define JIT_DESC_CONVERT BIT(31)
  1183. +
  1184. +/* JIT context for an eBPF program */
  1185. +struct jit_context {
  1186. + struct bpf_prog *program; /* The eBPF program being JITed */
  1187. + u32 *descriptors; /* eBPF to JITed CPU insn descriptors */
  1188. + u32 *target; /* JITed code buffer */
  1189. + u32 bpf_index; /* Index of current BPF program insn */
  1190. + u32 jit_index; /* Index of current JIT target insn */
  1191. + u32 changes; /* Number of PC-relative branch conv */
  1192. + u32 accessed; /* Bit mask of read eBPF registers */
  1193. + u32 clobbered; /* Bit mask of modified CPU registers */
  1194. + u32 stack_size; /* Total allocated stack size in bytes */
  1195. + u32 saved_size; /* Size of callee-saved registers */
  1196. + u32 stack_used; /* Stack size used for function calls */
  1197. +};
  1198. +
  1199. +/* Emit the instruction if the JIT memory space has been allocated */
  1200. +#define emit(ctx, func, ...) \
  1201. +do { \
  1202. + if ((ctx)->target != NULL) { \
  1203. + u32 *p = &(ctx)->target[ctx->jit_index]; \
  1204. + uasm_i_##func(&p, ##__VA_ARGS__); \
  1205. + } \
  1206. + (ctx)->jit_index++; \
  1207. +} while (0)
  1208. +
  1209. +/*
  1210. + * Mark a BPF register as accessed, it needs to be
  1211. + * initialized by the program if expected, e.g. FP.
  1212. + */
  1213. +static inline void access_reg(struct jit_context *ctx, u8 reg)
  1214. +{
  1215. + ctx->accessed |= BIT(reg);
  1216. +}
  1217. +
  1218. +/*
  1219. + * Mark a CPU register as clobbered, it needs to be
  1220. + * saved/restored by the program if callee-saved.
  1221. + */
  1222. +static inline void clobber_reg(struct jit_context *ctx, u8 reg)
  1223. +{
  1224. + ctx->clobbered |= BIT(reg);
  1225. +}
  1226. +
  1227. +/*
  1228. + * Push registers on the stack, starting at a given depth from the stack
  1229. + * pointer and increasing. The next depth to be written is returned.
  1230. + */
  1231. +int push_regs(struct jit_context *ctx, u32 mask, u32 excl, int depth);
  1232. +
  1233. +/*
  1234. + * Pop registers from the stack, starting at a given depth from the stack
  1235. + * pointer and increasing. The next depth to be read is returned.
  1236. + */
  1237. +int pop_regs(struct jit_context *ctx, u32 mask, u32 excl, int depth);
  1238. +
  1239. +/* Compute the 28-bit jump target address from a BPF program location */
  1240. +int get_target(struct jit_context *ctx, u32 loc);
  1241. +
  1242. +/* Compute the PC-relative offset to relative BPF program offset */
  1243. +int get_offset(const struct jit_context *ctx, int off);
  1244. +
  1245. +/* dst = imm (32-bit) */
  1246. +void emit_mov_i(struct jit_context *ctx, u8 dst, s32 imm);
  1247. +
  1248. +/* dst = src (32-bit) */
  1249. +void emit_mov_r(struct jit_context *ctx, u8 dst, u8 src);
  1250. +
  1251. +/* Validate ALU/ALU64 immediate range */
  1252. +bool valid_alu_i(u8 op, s32 imm);
  1253. +
  1254. +/* Rewrite ALU/ALU64 immediate operation */
  1255. +bool rewrite_alu_i(u8 op, s32 imm, u8 *alu, s32 *val);
  1256. +
  1257. +/* ALU immediate operation (32-bit) */
  1258. +void emit_alu_i(struct jit_context *ctx, u8 dst, s32 imm, u8 op);
  1259. +
  1260. +/* ALU register operation (32-bit) */
  1261. +void emit_alu_r(struct jit_context *ctx, u8 dst, u8 src, u8 op);
  1262. +
  1263. +/* Atomic read-modify-write (32-bit) */
  1264. +void emit_atomic_r(struct jit_context *ctx, u8 dst, u8 src, s16 off, u8 code);
  1265. +
  1266. +/* Atomic compare-and-exchange (32-bit) */
  1267. +void emit_cmpxchg_r(struct jit_context *ctx, u8 dst, u8 src, u8 res, s16 off);
  1268. +
  1269. +/* Swap bytes and truncate a register word or half word */
  1270. +void emit_bswap_r(struct jit_context *ctx, u8 dst, u32 width);
  1271. +
  1272. +/* Validate JMP/JMP32 immediate range */
  1273. +bool valid_jmp_i(u8 op, s32 imm);
  1274. +
  1275. +/* Prepare a PC-relative jump operation with immediate conditional */
  1276. +void setup_jmp_i(struct jit_context *ctx, s32 imm, u8 width,
  1277. + u8 bpf_op, s16 bpf_off, u8 *jit_op, s32 *jit_off);
  1278. +
  1279. +/* Prepare a PC-relative jump operation with register conditional */
  1280. +void setup_jmp_r(struct jit_context *ctx, bool same_reg,
  1281. + u8 bpf_op, s16 bpf_off, u8 *jit_op, s32 *jit_off);
  1282. +
  1283. +/* Finish a PC-relative jump operation */
  1284. +int finish_jmp(struct jit_context *ctx, u8 jit_op, s16 bpf_off);
  1285. +
  1286. +/* Conditional JMP/JMP32 immediate */
  1287. +void emit_jmp_i(struct jit_context *ctx, u8 dst, s32 imm, s32 off, u8 op);
  1288. +
  1289. +/* Conditional JMP/JMP32 register */
  1290. +void emit_jmp_r(struct jit_context *ctx, u8 dst, u8 src, s32 off, u8 op);
  1291. +
  1292. +/* Jump always */
  1293. +int emit_ja(struct jit_context *ctx, s16 off);
  1294. +
  1295. +/* Jump to epilogue */
  1296. +int emit_exit(struct jit_context *ctx);
  1297. +
  1298. +/*
  1299. + * Build program prologue to set up the stack and registers.
  1300. + * This function is implemented separately for 32-bit and 64-bit JITs.
  1301. + */
  1302. +void build_prologue(struct jit_context *ctx);
  1303. +
  1304. +/*
  1305. + * Build the program epilogue to restore the stack and registers.
  1306. + * This function is implemented separately for 32-bit and 64-bit JITs.
  1307. + */
  1308. +void build_epilogue(struct jit_context *ctx, int dest_reg);
  1309. +
  1310. +/*
  1311. + * Convert an eBPF instruction to native instruction, i.e
  1312. + * JITs an eBPF instruction.
  1313. + * Returns :
  1314. + * 0 - Successfully JITed an 8-byte eBPF instruction
  1315. + * >0 - Successfully JITed a 16-byte eBPF instruction
  1316. + * <0 - Failed to JIT.
  1317. + * This function is implemented separately for 32-bit and 64-bit JITs.
  1318. + */
  1319. +int build_insn(const struct bpf_insn *insn, struct jit_context *ctx);
  1320. +
  1321. +#endif /* _BPF_JIT_COMP_H */
  1322. --- /dev/null
  1323. +++ b/arch/mips/net/bpf_jit_comp32.c
  1324. @@ -0,0 +1,1741 @@
  1325. +// SPDX-License-Identifier: GPL-2.0-only
  1326. +/*
  1327. + * Just-In-Time compiler for eBPF bytecode on MIPS.
  1328. + * Implementation of JIT functions for 32-bit CPUs.
  1329. + *
  1330. + * Copyright (c) 2021 Anyfi Networks AB.
  1331. + * Author: Johan Almbladh <[email protected]>
  1332. + *
  1333. + * Based on code and ideas from
  1334. + * Copyright (c) 2017 Cavium, Inc.
  1335. + * Copyright (c) 2017 Shubham Bansal <[email protected]>
  1336. + * Copyright (c) 2011 Mircea Gherzan <[email protected]>
  1337. + */
  1338. +
  1339. +#include <linux/math64.h>
  1340. +#include <linux/errno.h>
  1341. +#include <linux/filter.h>
  1342. +#include <linux/bpf.h>
  1343. +#include <asm/cpu-features.h>
  1344. +#include <asm/isa-rev.h>
  1345. +#include <asm/uasm.h>
  1346. +
  1347. +#include "bpf_jit_comp.h"
  1348. +
  1349. +/* MIPS a4-a7 are not available in the o32 ABI */
  1350. +#undef MIPS_R_A4
  1351. +#undef MIPS_R_A5
  1352. +#undef MIPS_R_A6
  1353. +#undef MIPS_R_A7
  1354. +
  1355. +/* Stack is 8-byte aligned in o32 ABI */
  1356. +#define MIPS_STACK_ALIGNMENT 8
  1357. +
  1358. +/*
  1359. + * The top 16 bytes of a stack frame is reserved for the callee in O32 ABI.
  1360. + * This corresponds to stack space for register arguments a0-a3.
  1361. + */
  1362. +#define JIT_RESERVED_STACK 16
  1363. +
  1364. +/* Temporary 64-bit register used by JIT */
  1365. +#define JIT_REG_TMP MAX_BPF_JIT_REG
  1366. +
  1367. +/*
  1368. + * Number of prologue bytes to skip when doing a tail call.
  1369. + * Tail call count (TCC) initialization (8 bytes) always, plus
  1370. + * R0-to-v0 assignment (4 bytes) if big endian.
  1371. + */
  1372. +#ifdef __BIG_ENDIAN
  1373. +#define JIT_TCALL_SKIP 12
  1374. +#else
  1375. +#define JIT_TCALL_SKIP 8
  1376. +#endif
  1377. +
  1378. +/* CPU registers holding the callee return value */
  1379. +#define JIT_RETURN_REGS \
  1380. + (BIT(MIPS_R_V0) | \
  1381. + BIT(MIPS_R_V1))
  1382. +
  1383. +/* CPU registers arguments passed to callee directly */
  1384. +#define JIT_ARG_REGS \
  1385. + (BIT(MIPS_R_A0) | \
  1386. + BIT(MIPS_R_A1) | \
  1387. + BIT(MIPS_R_A2) | \
  1388. + BIT(MIPS_R_A3))
  1389. +
  1390. +/* CPU register arguments passed to callee on stack */
  1391. +#define JIT_STACK_REGS \
  1392. + (BIT(MIPS_R_T0) | \
  1393. + BIT(MIPS_R_T1) | \
  1394. + BIT(MIPS_R_T2) | \
  1395. + BIT(MIPS_R_T3) | \
  1396. + BIT(MIPS_R_T4) | \
  1397. + BIT(MIPS_R_T5))
  1398. +
  1399. +/* Caller-saved CPU registers */
  1400. +#define JIT_CALLER_REGS \
  1401. + (JIT_RETURN_REGS | \
  1402. + JIT_ARG_REGS | \
  1403. + JIT_STACK_REGS)
  1404. +
  1405. +/* Callee-saved CPU registers */
  1406. +#define JIT_CALLEE_REGS \
  1407. + (BIT(MIPS_R_S0) | \
  1408. + BIT(MIPS_R_S1) | \
  1409. + BIT(MIPS_R_S2) | \
  1410. + BIT(MIPS_R_S3) | \
  1411. + BIT(MIPS_R_S4) | \
  1412. + BIT(MIPS_R_S5) | \
  1413. + BIT(MIPS_R_S6) | \
  1414. + BIT(MIPS_R_S7) | \
  1415. + BIT(MIPS_R_GP) | \
  1416. + BIT(MIPS_R_FP) | \
  1417. + BIT(MIPS_R_RA))
  1418. +
  1419. +/*
  1420. + * Mapping of 64-bit eBPF registers to 32-bit native MIPS registers.
  1421. + *
  1422. + * 1) Native register pairs are ordered according to CPU endiannes, following
  1423. + * the MIPS convention for passing 64-bit arguments and return values.
  1424. + * 2) The eBPF return value, arguments and callee-saved registers are mapped
  1425. + * to their native MIPS equivalents.
  1426. + * 3) Since the 32 highest bits in the eBPF FP register are always zero,
  1427. + * only one general-purpose register is actually needed for the mapping.
  1428. + * We use the fp register for this purpose, and map the highest bits to
  1429. + * the MIPS register r0 (zero).
  1430. + * 4) We use the MIPS gp and at registers as internal temporary registers
  1431. + * for constant blinding. The gp register is callee-saved.
  1432. + * 5) One 64-bit temporary register is mapped for use when sign-extending
  1433. + * immediate operands. MIPS registers t6-t9 are available to the JIT
  1434. + * for as temporaries when implementing complex 64-bit operations.
  1435. + *
  1436. + * With this scheme all eBPF registers are being mapped to native MIPS
  1437. + * registers without having to use any stack scratch space. The direct
  1438. + * register mapping (2) simplifies the handling of function calls.
  1439. + */
  1440. +static const u8 bpf2mips32[][2] = {
  1441. + /* Return value from in-kernel function, and exit value from eBPF */
  1442. + [BPF_REG_0] = {MIPS_R_V1, MIPS_R_V0},
  1443. + /* Arguments from eBPF program to in-kernel function */
  1444. + [BPF_REG_1] = {MIPS_R_A1, MIPS_R_A0},
  1445. + [BPF_REG_2] = {MIPS_R_A3, MIPS_R_A2},
  1446. + /* Remaining arguments, to be passed on the stack per O32 ABI */
  1447. + [BPF_REG_3] = {MIPS_R_T1, MIPS_R_T0},
  1448. + [BPF_REG_4] = {MIPS_R_T3, MIPS_R_T2},
  1449. + [BPF_REG_5] = {MIPS_R_T5, MIPS_R_T4},
  1450. + /* Callee-saved registers that in-kernel function will preserve */
  1451. + [BPF_REG_6] = {MIPS_R_S1, MIPS_R_S0},
  1452. + [BPF_REG_7] = {MIPS_R_S3, MIPS_R_S2},
  1453. + [BPF_REG_8] = {MIPS_R_S5, MIPS_R_S4},
  1454. + [BPF_REG_9] = {MIPS_R_S7, MIPS_R_S6},
  1455. + /* Read-only frame pointer to access the eBPF stack */
  1456. +#ifdef __BIG_ENDIAN
  1457. + [BPF_REG_FP] = {MIPS_R_FP, MIPS_R_ZERO},
  1458. +#else
  1459. + [BPF_REG_FP] = {MIPS_R_ZERO, MIPS_R_FP},
  1460. +#endif
  1461. + /* Temporary register for blinding constants */
  1462. + [BPF_REG_AX] = {MIPS_R_GP, MIPS_R_AT},
  1463. + /* Temporary register for internal JIT use */
  1464. + [JIT_REG_TMP] = {MIPS_R_T7, MIPS_R_T6},
  1465. +};
  1466. +
  1467. +/* Get low CPU register for a 64-bit eBPF register mapping */
  1468. +static inline u8 lo(const u8 reg[])
  1469. +{
  1470. +#ifdef __BIG_ENDIAN
  1471. + return reg[0];
  1472. +#else
  1473. + return reg[1];
  1474. +#endif
  1475. +}
  1476. +
  1477. +/* Get high CPU register for a 64-bit eBPF register mapping */
  1478. +static inline u8 hi(const u8 reg[])
  1479. +{
  1480. +#ifdef __BIG_ENDIAN
  1481. + return reg[1];
  1482. +#else
  1483. + return reg[0];
  1484. +#endif
  1485. +}
  1486. +
  1487. +/*
  1488. + * Mark a 64-bit CPU register pair as clobbered, it needs to be
  1489. + * saved/restored by the program if callee-saved.
  1490. + */
  1491. +static void clobber_reg64(struct jit_context *ctx, const u8 reg[])
  1492. +{
  1493. + clobber_reg(ctx, reg[0]);
  1494. + clobber_reg(ctx, reg[1]);
  1495. +}
  1496. +
  1497. +/* dst = imm (sign-extended) */
  1498. +static void emit_mov_se_i64(struct jit_context *ctx, const u8 dst[], s32 imm)
  1499. +{
  1500. + emit_mov_i(ctx, lo(dst), imm);
  1501. + if (imm < 0)
  1502. + emit(ctx, addiu, hi(dst), MIPS_R_ZERO, -1);
  1503. + else
  1504. + emit(ctx, move, hi(dst), MIPS_R_ZERO);
  1505. + clobber_reg64(ctx, dst);
  1506. +}
  1507. +
  1508. +/* Zero extension, if verifier does not do it for us */
  1509. +static void emit_zext_ver(struct jit_context *ctx, const u8 dst[])
  1510. +{
  1511. + if (!ctx->program->aux->verifier_zext) {
  1512. + emit(ctx, move, hi(dst), MIPS_R_ZERO);
  1513. + clobber_reg(ctx, hi(dst));
  1514. + }
  1515. +}
  1516. +
  1517. +/* Load delay slot, if ISA mandates it */
  1518. +static void emit_load_delay(struct jit_context *ctx)
  1519. +{
  1520. + if (!cpu_has_mips_2_3_4_5_r)
  1521. + emit(ctx, nop);
  1522. +}
  1523. +
  1524. +/* ALU immediate operation (64-bit) */
  1525. +static void emit_alu_i64(struct jit_context *ctx,
  1526. + const u8 dst[], s32 imm, u8 op)
  1527. +{
  1528. + u8 src = MIPS_R_T6;
  1529. +
  1530. + /*
  1531. + * ADD/SUB with all but the max negative imm can be handled by
  1532. + * inverting the operation and the imm value, saving one insn.
  1533. + */
  1534. + if (imm > S32_MIN && imm < 0)
  1535. + switch (op) {
  1536. + case BPF_ADD:
  1537. + op = BPF_SUB;
  1538. + imm = -imm;
  1539. + break;
  1540. + case BPF_SUB:
  1541. + op = BPF_ADD;
  1542. + imm = -imm;
  1543. + break;
  1544. + }
  1545. +
  1546. + /* Move immediate to temporary register */
  1547. + emit_mov_i(ctx, src, imm);
  1548. +
  1549. + switch (op) {
  1550. + /* dst = dst + imm */
  1551. + case BPF_ADD:
  1552. + emit(ctx, addu, lo(dst), lo(dst), src);
  1553. + emit(ctx, sltu, MIPS_R_T9, lo(dst), src);
  1554. + emit(ctx, addu, hi(dst), hi(dst), MIPS_R_T9);
  1555. + if (imm < 0)
  1556. + emit(ctx, addiu, hi(dst), hi(dst), -1);
  1557. + break;
  1558. + /* dst = dst - imm */
  1559. + case BPF_SUB:
  1560. + emit(ctx, sltu, MIPS_R_T9, lo(dst), src);
  1561. + emit(ctx, subu, lo(dst), lo(dst), src);
  1562. + emit(ctx, subu, hi(dst), hi(dst), MIPS_R_T9);
  1563. + if (imm < 0)
  1564. + emit(ctx, addiu, hi(dst), hi(dst), 1);
  1565. + break;
  1566. + /* dst = dst | imm */
  1567. + case BPF_OR:
  1568. + emit(ctx, or, lo(dst), lo(dst), src);
  1569. + if (imm < 0)
  1570. + emit(ctx, addiu, hi(dst), MIPS_R_ZERO, -1);
  1571. + break;
  1572. + /* dst = dst & imm */
  1573. + case BPF_AND:
  1574. + emit(ctx, and, lo(dst), lo(dst), src);
  1575. + if (imm >= 0)
  1576. + emit(ctx, move, hi(dst), MIPS_R_ZERO);
  1577. + break;
  1578. + /* dst = dst ^ imm */
  1579. + case BPF_XOR:
  1580. + emit(ctx, xor, lo(dst), lo(dst), src);
  1581. + if (imm < 0) {
  1582. + emit(ctx, subu, hi(dst), MIPS_R_ZERO, hi(dst));
  1583. + emit(ctx, addiu, hi(dst), hi(dst), -1);
  1584. + }
  1585. + break;
  1586. + }
  1587. + clobber_reg64(ctx, dst);
  1588. +}
  1589. +
  1590. +/* ALU register operation (64-bit) */
  1591. +static void emit_alu_r64(struct jit_context *ctx,
  1592. + const u8 dst[], const u8 src[], u8 op)
  1593. +{
  1594. + switch (BPF_OP(op)) {
  1595. + /* dst = dst + src */
  1596. + case BPF_ADD:
  1597. + if (src == dst) {
  1598. + emit(ctx, srl, MIPS_R_T9, lo(dst), 31);
  1599. + emit(ctx, addu, lo(dst), lo(dst), lo(dst));
  1600. + } else {
  1601. + emit(ctx, addu, lo(dst), lo(dst), lo(src));
  1602. + emit(ctx, sltu, MIPS_R_T9, lo(dst), lo(src));
  1603. + }
  1604. + emit(ctx, addu, hi(dst), hi(dst), hi(src));
  1605. + emit(ctx, addu, hi(dst), hi(dst), MIPS_R_T9);
  1606. + break;
  1607. + /* dst = dst - src */
  1608. + case BPF_SUB:
  1609. + emit(ctx, sltu, MIPS_R_T9, lo(dst), lo(src));
  1610. + emit(ctx, subu, lo(dst), lo(dst), lo(src));
  1611. + emit(ctx, subu, hi(dst), hi(dst), hi(src));
  1612. + emit(ctx, subu, hi(dst), hi(dst), MIPS_R_T9);
  1613. + break;
  1614. + /* dst = dst | src */
  1615. + case BPF_OR:
  1616. + emit(ctx, or, lo(dst), lo(dst), lo(src));
  1617. + emit(ctx, or, hi(dst), hi(dst), hi(src));
  1618. + break;
  1619. + /* dst = dst & src */
  1620. + case BPF_AND:
  1621. + emit(ctx, and, lo(dst), lo(dst), lo(src));
  1622. + emit(ctx, and, hi(dst), hi(dst), hi(src));
  1623. + break;
  1624. + /* dst = dst ^ src */
  1625. + case BPF_XOR:
  1626. + emit(ctx, xor, lo(dst), lo(dst), lo(src));
  1627. + emit(ctx, xor, hi(dst), hi(dst), hi(src));
  1628. + break;
  1629. + }
  1630. + clobber_reg64(ctx, dst);
  1631. +}
  1632. +
  1633. +/* ALU invert (64-bit) */
  1634. +static void emit_neg_i64(struct jit_context *ctx, const u8 dst[])
  1635. +{
  1636. + emit(ctx, sltu, MIPS_R_T9, MIPS_R_ZERO, lo(dst));
  1637. + emit(ctx, subu, lo(dst), MIPS_R_ZERO, lo(dst));
  1638. + emit(ctx, subu, hi(dst), MIPS_R_ZERO, hi(dst));
  1639. + emit(ctx, subu, hi(dst), hi(dst), MIPS_R_T9);
  1640. +
  1641. + clobber_reg64(ctx, dst);
  1642. +}
  1643. +
  1644. +/* ALU shift immediate (64-bit) */
  1645. +static void emit_shift_i64(struct jit_context *ctx,
  1646. + const u8 dst[], u32 imm, u8 op)
  1647. +{
  1648. + switch (BPF_OP(op)) {
  1649. + /* dst = dst << imm */
  1650. + case BPF_LSH:
  1651. + if (imm < 32) {
  1652. + emit(ctx, srl, MIPS_R_T9, lo(dst), 32 - imm);
  1653. + emit(ctx, sll, lo(dst), lo(dst), imm);
  1654. + emit(ctx, sll, hi(dst), hi(dst), imm);
  1655. + emit(ctx, or, hi(dst), hi(dst), MIPS_R_T9);
  1656. + } else {
  1657. + emit(ctx, sll, hi(dst), lo(dst), imm - 32);
  1658. + emit(ctx, move, lo(dst), MIPS_R_ZERO);
  1659. + }
  1660. + break;
  1661. + /* dst = dst >> imm */
  1662. + case BPF_RSH:
  1663. + if (imm < 32) {
  1664. + emit(ctx, sll, MIPS_R_T9, hi(dst), 32 - imm);
  1665. + emit(ctx, srl, lo(dst), lo(dst), imm);
  1666. + emit(ctx, srl, hi(dst), hi(dst), imm);
  1667. + emit(ctx, or, lo(dst), lo(dst), MIPS_R_T9);
  1668. + } else {
  1669. + emit(ctx, srl, lo(dst), hi(dst), imm - 32);
  1670. + emit(ctx, move, hi(dst), MIPS_R_ZERO);
  1671. + }
  1672. + break;
  1673. + /* dst = dst >> imm (arithmetic) */
  1674. + case BPF_ARSH:
  1675. + if (imm < 32) {
  1676. + emit(ctx, sll, MIPS_R_T9, hi(dst), 32 - imm);
  1677. + emit(ctx, srl, lo(dst), lo(dst), imm);
  1678. + emit(ctx, sra, hi(dst), hi(dst), imm);
  1679. + emit(ctx, or, lo(dst), lo(dst), MIPS_R_T9);
  1680. + } else {
  1681. + emit(ctx, sra, lo(dst), hi(dst), imm - 32);
  1682. + emit(ctx, sra, hi(dst), hi(dst), 31);
  1683. + }
  1684. + break;
  1685. + }
  1686. + clobber_reg64(ctx, dst);
  1687. +}
  1688. +
  1689. +/* ALU shift register (64-bit) */
  1690. +static void emit_shift_r64(struct jit_context *ctx,
  1691. + const u8 dst[], u8 src, u8 op)
  1692. +{
  1693. + u8 t1 = MIPS_R_T8;
  1694. + u8 t2 = MIPS_R_T9;
  1695. +
  1696. + emit(ctx, andi, t1, src, 32); /* t1 = src & 32 */
  1697. + emit(ctx, beqz, t1, 16); /* PC += 16 if t1 == 0 */
  1698. + emit(ctx, nor, t2, src, MIPS_R_ZERO); /* t2 = ~src (delay slot) */
  1699. +
  1700. + switch (BPF_OP(op)) {
  1701. + /* dst = dst << src */
  1702. + case BPF_LSH:
  1703. + /* Next: shift >= 32 */
  1704. + emit(ctx, sllv, hi(dst), lo(dst), src); /* dh = dl << src */
  1705. + emit(ctx, move, lo(dst), MIPS_R_ZERO); /* dl = 0 */
  1706. + emit(ctx, b, 20); /* PC += 20 */
  1707. + /* +16: shift < 32 */
  1708. + emit(ctx, srl, t1, lo(dst), 1); /* t1 = dl >> 1 */
  1709. + emit(ctx, srlv, t1, t1, t2); /* t1 = t1 >> t2 */
  1710. + emit(ctx, sllv, lo(dst), lo(dst), src); /* dl = dl << src */
  1711. + emit(ctx, sllv, hi(dst), hi(dst), src); /* dh = dh << src */
  1712. + emit(ctx, or, hi(dst), hi(dst), t1); /* dh = dh | t1 */
  1713. + break;
  1714. + /* dst = dst >> src */
  1715. + case BPF_RSH:
  1716. + /* Next: shift >= 32 */
  1717. + emit(ctx, srlv, lo(dst), hi(dst), src); /* dl = dh >> src */
  1718. + emit(ctx, move, hi(dst), MIPS_R_ZERO); /* dh = 0 */
  1719. + emit(ctx, b, 20); /* PC += 20 */
  1720. + /* +16: shift < 32 */
  1721. + emit(ctx, sll, t1, hi(dst), 1); /* t1 = dl << 1 */
  1722. + emit(ctx, sllv, t1, t1, t2); /* t1 = t1 << t2 */
  1723. + emit(ctx, srlv, lo(dst), lo(dst), src); /* dl = dl >> src */
  1724. + emit(ctx, srlv, hi(dst), hi(dst), src); /* dh = dh >> src */
  1725. + emit(ctx, or, lo(dst), lo(dst), t1); /* dl = dl | t1 */
  1726. + break;
  1727. + /* dst = dst >> src (arithmetic) */
  1728. + case BPF_ARSH:
  1729. + /* Next: shift >= 32 */
  1730. + emit(ctx, srav, lo(dst), hi(dst), src); /* dl = dh >>a src */
  1731. + emit(ctx, sra, hi(dst), hi(dst), 31); /* dh = dh >>a 31 */
  1732. + emit(ctx, b, 20); /* PC += 20 */
  1733. + /* +16: shift < 32 */
  1734. + emit(ctx, sll, t1, hi(dst), 1); /* t1 = dl << 1 */
  1735. + emit(ctx, sllv, t1, t1, t2); /* t1 = t1 << t2 */
  1736. + emit(ctx, srlv, lo(dst), lo(dst), src); /* dl = dl >>a src */
  1737. + emit(ctx, srav, hi(dst), hi(dst), src); /* dh = dh >> src */
  1738. + emit(ctx, or, lo(dst), lo(dst), t1); /* dl = dl | t1 */
  1739. + break;
  1740. + }
  1741. +
  1742. + /* +20: Done */
  1743. + clobber_reg64(ctx, dst);
  1744. +}
  1745. +
  1746. +/* ALU mul immediate (64x32-bit) */
  1747. +static void emit_mul_i64(struct jit_context *ctx, const u8 dst[], s32 imm)
  1748. +{
  1749. + u8 src = MIPS_R_T6;
  1750. + u8 tmp = MIPS_R_T9;
  1751. +
  1752. + switch (imm) {
  1753. + /* dst = dst * 1 is a no-op */
  1754. + case 1:
  1755. + break;
  1756. + /* dst = dst * -1 */
  1757. + case -1:
  1758. + emit_neg_i64(ctx, dst);
  1759. + break;
  1760. + case 0:
  1761. + emit_mov_r(ctx, lo(dst), MIPS_R_ZERO);
  1762. + emit_mov_r(ctx, hi(dst), MIPS_R_ZERO);
  1763. + break;
  1764. + /* Full 64x32 multiply */
  1765. + default:
  1766. + /* hi(dst) = hi(dst) * src(imm) */
  1767. + emit_mov_i(ctx, src, imm);
  1768. + if (cpu_has_mips32r1 || cpu_has_mips32r6) {
  1769. + emit(ctx, mul, hi(dst), hi(dst), src);
  1770. + } else {
  1771. + emit(ctx, multu, hi(dst), src);
  1772. + emit(ctx, mflo, hi(dst));
  1773. + }
  1774. +
  1775. + /* hi(dst) = hi(dst) - lo(dst) */
  1776. + if (imm < 0)
  1777. + emit(ctx, subu, hi(dst), hi(dst), lo(dst));
  1778. +
  1779. + /* tmp = lo(dst) * src(imm) >> 32 */
  1780. + /* lo(dst) = lo(dst) * src(imm) */
  1781. + if (cpu_has_mips32r6) {
  1782. + emit(ctx, muhu, tmp, lo(dst), src);
  1783. + emit(ctx, mulu, lo(dst), lo(dst), src);
  1784. + } else {
  1785. + emit(ctx, multu, lo(dst), src);
  1786. + emit(ctx, mflo, lo(dst));
  1787. + emit(ctx, mfhi, tmp);
  1788. + }
  1789. +
  1790. + /* hi(dst) += tmp */
  1791. + emit(ctx, addu, hi(dst), hi(dst), tmp);
  1792. + clobber_reg64(ctx, dst);
  1793. + break;
  1794. + }
  1795. +}
  1796. +
  1797. +/* ALU mul register (64x64-bit) */
  1798. +static void emit_mul_r64(struct jit_context *ctx,
  1799. + const u8 dst[], const u8 src[])
  1800. +{
  1801. + u8 acc = MIPS_R_T8;
  1802. + u8 tmp = MIPS_R_T9;
  1803. +
  1804. + /* acc = hi(dst) * lo(src) */
  1805. + if (cpu_has_mips32r1 || cpu_has_mips32r6) {
  1806. + emit(ctx, mul, acc, hi(dst), lo(src));
  1807. + } else {
  1808. + emit(ctx, multu, hi(dst), lo(src));
  1809. + emit(ctx, mflo, acc);
  1810. + }
  1811. +
  1812. + /* tmp = lo(dst) * hi(src) */
  1813. + if (cpu_has_mips32r1 || cpu_has_mips32r6) {
  1814. + emit(ctx, mul, tmp, lo(dst), hi(src));
  1815. + } else {
  1816. + emit(ctx, multu, lo(dst), hi(src));
  1817. + emit(ctx, mflo, tmp);
  1818. + }
  1819. +
  1820. + /* acc += tmp */
  1821. + emit(ctx, addu, acc, acc, tmp);
  1822. +
  1823. + /* tmp = lo(dst) * lo(src) >> 32 */
  1824. + /* lo(dst) = lo(dst) * lo(src) */
  1825. + if (cpu_has_mips32r6) {
  1826. + emit(ctx, muhu, tmp, lo(dst), lo(src));
  1827. + emit(ctx, mulu, lo(dst), lo(dst), lo(src));
  1828. + } else {
  1829. + emit(ctx, multu, lo(dst), lo(src));
  1830. + emit(ctx, mflo, lo(dst));
  1831. + emit(ctx, mfhi, tmp);
  1832. + }
  1833. +
  1834. + /* hi(dst) = acc + tmp */
  1835. + emit(ctx, addu, hi(dst), acc, tmp);
  1836. + clobber_reg64(ctx, dst);
  1837. +}
  1838. +
  1839. +/* Helper function for 64-bit modulo */
  1840. +static u64 jit_mod64(u64 a, u64 b)
  1841. +{
  1842. + u64 rem;
  1843. +
  1844. + div64_u64_rem(a, b, &rem);
  1845. + return rem;
  1846. +}
  1847. +
  1848. +/* ALU div/mod register (64-bit) */
  1849. +static void emit_divmod_r64(struct jit_context *ctx,
  1850. + const u8 dst[], const u8 src[], u8 op)
  1851. +{
  1852. + const u8 *r0 = bpf2mips32[BPF_REG_0]; /* Mapped to v0-v1 */
  1853. + const u8 *r1 = bpf2mips32[BPF_REG_1]; /* Mapped to a0-a1 */
  1854. + const u8 *r2 = bpf2mips32[BPF_REG_2]; /* Mapped to a2-a3 */
  1855. + int exclude, k;
  1856. + u32 addr = 0;
  1857. +
  1858. + /* Push caller-saved registers on stack */
  1859. + push_regs(ctx, ctx->clobbered & JIT_CALLER_REGS,
  1860. + 0, JIT_RESERVED_STACK);
  1861. +
  1862. + /* Put 64-bit arguments 1 and 2 in registers a0-a3 */
  1863. + for (k = 0; k < 2; k++) {
  1864. + emit(ctx, move, MIPS_R_T9, src[k]);
  1865. + emit(ctx, move, r1[k], dst[k]);
  1866. + emit(ctx, move, r2[k], MIPS_R_T9);
  1867. + }
  1868. +
  1869. + /* Emit function call */
  1870. + switch (BPF_OP(op)) {
  1871. + /* dst = dst / src */
  1872. + case BPF_DIV:
  1873. + addr = (u32)&div64_u64;
  1874. + break;
  1875. + /* dst = dst % src */
  1876. + case BPF_MOD:
  1877. + addr = (u32)&jit_mod64;
  1878. + break;
  1879. + }
  1880. + emit_mov_i(ctx, MIPS_R_T9, addr);
  1881. + emit(ctx, jalr, MIPS_R_RA, MIPS_R_T9);
  1882. + emit(ctx, nop); /* Delay slot */
  1883. +
  1884. + /* Store the 64-bit result in dst */
  1885. + emit(ctx, move, dst[0], r0[0]);
  1886. + emit(ctx, move, dst[1], r0[1]);
  1887. +
  1888. + /* Restore caller-saved registers, excluding the computed result */
  1889. + exclude = BIT(lo(dst)) | BIT(hi(dst));
  1890. + pop_regs(ctx, ctx->clobbered & JIT_CALLER_REGS,
  1891. + exclude, JIT_RESERVED_STACK);
  1892. + emit_load_delay(ctx);
  1893. +
  1894. + clobber_reg64(ctx, dst);
  1895. + clobber_reg(ctx, MIPS_R_V0);
  1896. + clobber_reg(ctx, MIPS_R_V1);
  1897. + clobber_reg(ctx, MIPS_R_RA);
  1898. +}
  1899. +
  1900. +/* Swap bytes in a register word */
  1901. +static void emit_swap8_r(struct jit_context *ctx, u8 dst, u8 src, u8 mask)
  1902. +{
  1903. + u8 tmp = MIPS_R_T9;
  1904. +
  1905. + emit(ctx, and, tmp, src, mask); /* tmp = src & 0x00ff00ff */
  1906. + emit(ctx, sll, tmp, tmp, 8); /* tmp = tmp << 8 */
  1907. + emit(ctx, srl, dst, src, 8); /* dst = src >> 8 */
  1908. + emit(ctx, and, dst, dst, mask); /* dst = dst & 0x00ff00ff */
  1909. + emit(ctx, or, dst, dst, tmp); /* dst = dst | tmp */
  1910. +}
  1911. +
  1912. +/* Swap half words in a register word */
  1913. +static void emit_swap16_r(struct jit_context *ctx, u8 dst, u8 src)
  1914. +{
  1915. + u8 tmp = MIPS_R_T9;
  1916. +
  1917. + emit(ctx, sll, tmp, src, 16); /* tmp = src << 16 */
  1918. + emit(ctx, srl, dst, src, 16); /* dst = src >> 16 */
  1919. + emit(ctx, or, dst, dst, tmp); /* dst = dst | tmp */
  1920. +}
  1921. +
  1922. +/* Swap bytes and truncate a register double word, word or half word */
  1923. +static void emit_bswap_r64(struct jit_context *ctx, const u8 dst[], u32 width)
  1924. +{
  1925. + u8 tmp = MIPS_R_T8;
  1926. +
  1927. + switch (width) {
  1928. + /* Swap bytes in a double word */
  1929. + case 64:
  1930. + if (cpu_has_mips32r2 || cpu_has_mips32r6) {
  1931. + emit(ctx, rotr, tmp, hi(dst), 16);
  1932. + emit(ctx, rotr, hi(dst), lo(dst), 16);
  1933. + emit(ctx, wsbh, lo(dst), tmp);
  1934. + emit(ctx, wsbh, hi(dst), hi(dst));
  1935. + } else {
  1936. + emit_swap16_r(ctx, tmp, lo(dst));
  1937. + emit_swap16_r(ctx, lo(dst), hi(dst));
  1938. + emit(ctx, move, hi(dst), tmp);
  1939. +
  1940. + emit(ctx, lui, tmp, 0xff); /* tmp = 0x00ff0000 */
  1941. + emit(ctx, ori, tmp, tmp, 0xff); /* tmp = 0x00ff00ff */
  1942. + emit_swap8_r(ctx, lo(dst), lo(dst), tmp);
  1943. + emit_swap8_r(ctx, hi(dst), hi(dst), tmp);
  1944. + }
  1945. + break;
  1946. + /* Swap bytes in a word */
  1947. + /* Swap bytes in a half word */
  1948. + case 32:
  1949. + case 16:
  1950. + emit_bswap_r(ctx, lo(dst), width);
  1951. + emit(ctx, move, hi(dst), MIPS_R_ZERO);
  1952. + break;
  1953. + }
  1954. + clobber_reg64(ctx, dst);
  1955. +}
  1956. +
  1957. +/* Truncate a register double word, word or half word */
  1958. +static void emit_trunc_r64(struct jit_context *ctx, const u8 dst[], u32 width)
  1959. +{
  1960. + switch (width) {
  1961. + case 64:
  1962. + break;
  1963. + /* Zero-extend a word */
  1964. + case 32:
  1965. + emit(ctx, move, hi(dst), MIPS_R_ZERO);
  1966. + clobber_reg(ctx, hi(dst));
  1967. + break;
  1968. + /* Zero-extend a half word */
  1969. + case 16:
  1970. + emit(ctx, move, hi(dst), MIPS_R_ZERO);
  1971. + emit(ctx, andi, lo(dst), lo(dst), 0xffff);
  1972. + clobber_reg64(ctx, dst);
  1973. + break;
  1974. + }
  1975. +}
  1976. +
  1977. +/* Load operation: dst = *(size*)(src + off) */
  1978. +static void emit_ldx(struct jit_context *ctx,
  1979. + const u8 dst[], u8 src, s16 off, u8 size)
  1980. +{
  1981. + switch (size) {
  1982. + /* Load a byte */
  1983. + case BPF_B:
  1984. + emit(ctx, lbu, lo(dst), off, src);
  1985. + emit(ctx, move, hi(dst), MIPS_R_ZERO);
  1986. + break;
  1987. + /* Load a half word */
  1988. + case BPF_H:
  1989. + emit(ctx, lhu, lo(dst), off, src);
  1990. + emit(ctx, move, hi(dst), MIPS_R_ZERO);
  1991. + break;
  1992. + /* Load a word */
  1993. + case BPF_W:
  1994. + emit(ctx, lw, lo(dst), off, src);
  1995. + emit(ctx, move, hi(dst), MIPS_R_ZERO);
  1996. + break;
  1997. + /* Load a double word */
  1998. + case BPF_DW:
  1999. + if (dst[1] == src) {
  2000. + emit(ctx, lw, dst[0], off + 4, src);
  2001. + emit(ctx, lw, dst[1], off, src);
  2002. + } else {
  2003. + emit(ctx, lw, dst[1], off, src);
  2004. + emit(ctx, lw, dst[0], off + 4, src);
  2005. + }
  2006. + emit_load_delay(ctx);
  2007. + break;
  2008. + }
  2009. + clobber_reg64(ctx, dst);
  2010. +}
  2011. +
  2012. +/* Store operation: *(size *)(dst + off) = src */
  2013. +static void emit_stx(struct jit_context *ctx,
  2014. + const u8 dst, const u8 src[], s16 off, u8 size)
  2015. +{
  2016. + switch (size) {
  2017. + /* Store a byte */
  2018. + case BPF_B:
  2019. + emit(ctx, sb, lo(src), off, dst);
  2020. + break;
  2021. + /* Store a half word */
  2022. + case BPF_H:
  2023. + emit(ctx, sh, lo(src), off, dst);
  2024. + break;
  2025. + /* Store a word */
  2026. + case BPF_W:
  2027. + emit(ctx, sw, lo(src), off, dst);
  2028. + break;
  2029. + /* Store a double word */
  2030. + case BPF_DW:
  2031. + emit(ctx, sw, src[1], off, dst);
  2032. + emit(ctx, sw, src[0], off + 4, dst);
  2033. + break;
  2034. + }
  2035. +}
  2036. +
  2037. +/* Atomic read-modify-write (32-bit, non-ll/sc fallback) */
  2038. +static void emit_atomic_r32(struct jit_context *ctx,
  2039. + u8 dst, u8 src, s16 off, u8 code)
  2040. +{
  2041. + u32 exclude = 0;
  2042. + u32 addr = 0;
  2043. +
  2044. + /* Push caller-saved registers on stack */
  2045. + push_regs(ctx, ctx->clobbered & JIT_CALLER_REGS,
  2046. + 0, JIT_RESERVED_STACK);
  2047. + /*
  2048. + * Argument 1: dst+off if xchg, otherwise src, passed in register a0
  2049. + * Argument 2: src if xchg, othersize dst+off, passed in register a1
  2050. + */
  2051. + emit(ctx, move, MIPS_R_T9, dst);
  2052. + emit(ctx, move, MIPS_R_A0, src);
  2053. + emit(ctx, addiu, MIPS_R_A1, MIPS_R_T9, off);
  2054. +
  2055. + /* Emit function call */
  2056. + switch (code) {
  2057. + case BPF_ADD:
  2058. + addr = (u32)&atomic_add;
  2059. + break;
  2060. + case BPF_SUB:
  2061. + addr = (u32)&atomic_sub;
  2062. + break;
  2063. + case BPF_OR:
  2064. + addr = (u32)&atomic_or;
  2065. + break;
  2066. + case BPF_AND:
  2067. + addr = (u32)&atomic_and;
  2068. + break;
  2069. + case BPF_XOR:
  2070. + addr = (u32)&atomic_xor;
  2071. + break;
  2072. + }
  2073. + emit_mov_i(ctx, MIPS_R_T9, addr);
  2074. + emit(ctx, jalr, MIPS_R_RA, MIPS_R_T9);
  2075. + emit(ctx, nop); /* Delay slot */
  2076. +
  2077. + /* Restore caller-saved registers, except any fetched value */
  2078. + pop_regs(ctx, ctx->clobbered & JIT_CALLER_REGS,
  2079. + exclude, JIT_RESERVED_STACK);
  2080. + emit_load_delay(ctx);
  2081. + clobber_reg(ctx, MIPS_R_RA);
  2082. +}
  2083. +
  2084. +/* Atomic read-modify-write (64-bit) */
  2085. +static void emit_atomic_r64(struct jit_context *ctx,
  2086. + u8 dst, const u8 src[], s16 off, u8 code)
  2087. +{
  2088. + const u8 *r1 = bpf2mips32[BPF_REG_1]; /* Mapped to a0-a1 */
  2089. + u32 exclude = 0;
  2090. + u32 addr = 0;
  2091. +
  2092. + /* Push caller-saved registers on stack */
  2093. + push_regs(ctx, ctx->clobbered & JIT_CALLER_REGS,
  2094. + 0, JIT_RESERVED_STACK);
  2095. + /*
  2096. + * Argument 1: 64-bit src, passed in registers a0-a1
  2097. + * Argument 2: 32-bit dst+off, passed in register a2
  2098. + */
  2099. + emit(ctx, move, MIPS_R_T9, dst);
  2100. + emit(ctx, move, r1[0], src[0]);
  2101. + emit(ctx, move, r1[1], src[1]);
  2102. + emit(ctx, addiu, MIPS_R_A2, MIPS_R_T9, off);
  2103. +
  2104. + /* Emit function call */
  2105. + switch (code) {
  2106. + case BPF_ADD:
  2107. + addr = (u32)&atomic64_add;
  2108. + break;
  2109. + case BPF_SUB:
  2110. + addr = (u32)&atomic64_sub;
  2111. + break;
  2112. + case BPF_OR:
  2113. + addr = (u32)&atomic64_or;
  2114. + break;
  2115. + case BPF_AND:
  2116. + addr = (u32)&atomic64_and;
  2117. + break;
  2118. + case BPF_XOR:
  2119. + addr = (u32)&atomic64_xor;
  2120. + break;
  2121. + }
  2122. + emit_mov_i(ctx, MIPS_R_T9, addr);
  2123. + emit(ctx, jalr, MIPS_R_RA, MIPS_R_T9);
  2124. + emit(ctx, nop); /* Delay slot */
  2125. +
  2126. + /* Restore caller-saved registers, except any fetched value */
  2127. + pop_regs(ctx, ctx->clobbered & JIT_CALLER_REGS,
  2128. + exclude, JIT_RESERVED_STACK);
  2129. + emit_load_delay(ctx);
  2130. + clobber_reg(ctx, MIPS_R_RA);
  2131. +}
  2132. +
  2133. +/*
  2134. + * Conditional movz or an emulated equivalent.
  2135. + * Note that the rs register may be modified.
  2136. + */
  2137. +static void emit_movz_r(struct jit_context *ctx, u8 rd, u8 rs, u8 rt)
  2138. +{
  2139. + if (cpu_has_mips_2) {
  2140. + emit(ctx, movz, rd, rs, rt); /* rd = rt ? rd : rs */
  2141. + } else if (cpu_has_mips32r6) {
  2142. + if (rs != MIPS_R_ZERO)
  2143. + emit(ctx, seleqz, rs, rs, rt); /* rs = 0 if rt == 0 */
  2144. + emit(ctx, selnez, rd, rd, rt); /* rd = 0 if rt != 0 */
  2145. + if (rs != MIPS_R_ZERO)
  2146. + emit(ctx, or, rd, rd, rs); /* rd = rd | rs */
  2147. + } else {
  2148. + emit(ctx, bnez, rt, 8); /* PC += 8 if rd != 0 */
  2149. + emit(ctx, nop); /* +0: delay slot */
  2150. + emit(ctx, or, rd, rs, MIPS_R_ZERO); /* +4: rd = rs */
  2151. + }
  2152. + clobber_reg(ctx, rd);
  2153. + clobber_reg(ctx, rs);
  2154. +}
  2155. +
  2156. +/*
  2157. + * Conditional movn or an emulated equivalent.
  2158. + * Note that the rs register may be modified.
  2159. + */
  2160. +static void emit_movn_r(struct jit_context *ctx, u8 rd, u8 rs, u8 rt)
  2161. +{
  2162. + if (cpu_has_mips_2) {
  2163. + emit(ctx, movn, rd, rs, rt); /* rd = rt ? rs : rd */
  2164. + } else if (cpu_has_mips32r6) {
  2165. + if (rs != MIPS_R_ZERO)
  2166. + emit(ctx, selnez, rs, rs, rt); /* rs = 0 if rt == 0 */
  2167. + emit(ctx, seleqz, rd, rd, rt); /* rd = 0 if rt != 0 */
  2168. + if (rs != MIPS_R_ZERO)
  2169. + emit(ctx, or, rd, rd, rs); /* rd = rd | rs */
  2170. + } else {
  2171. + emit(ctx, beqz, rt, 8); /* PC += 8 if rd == 0 */
  2172. + emit(ctx, nop); /* +0: delay slot */
  2173. + emit(ctx, or, rd, rs, MIPS_R_ZERO); /* +4: rd = rs */
  2174. + }
  2175. + clobber_reg(ctx, rd);
  2176. + clobber_reg(ctx, rs);
  2177. +}
  2178. +
  2179. +/* Emulation of 64-bit sltiu rd, rs, imm, where imm may be S32_MAX + 1 */
  2180. +static void emit_sltiu_r64(struct jit_context *ctx, u8 rd,
  2181. + const u8 rs[], s64 imm)
  2182. +{
  2183. + u8 tmp = MIPS_R_T9;
  2184. +
  2185. + if (imm < 0) {
  2186. + emit_mov_i(ctx, rd, imm); /* rd = imm */
  2187. + emit(ctx, sltu, rd, lo(rs), rd); /* rd = rsl < rd */
  2188. + emit(ctx, sltiu, tmp, hi(rs), -1); /* tmp = rsh < ~0U */
  2189. + emit(ctx, or, rd, rd, tmp); /* rd = rd | tmp */
  2190. + } else { /* imm >= 0 */
  2191. + if (imm > 0x7fff) {
  2192. + emit_mov_i(ctx, rd, (s32)imm); /* rd = imm */
  2193. + emit(ctx, sltu, rd, lo(rs), rd); /* rd = rsl < rd */
  2194. + } else {
  2195. + emit(ctx, sltiu, rd, lo(rs), imm); /* rd = rsl < imm */
  2196. + }
  2197. + emit_movn_r(ctx, rd, MIPS_R_ZERO, hi(rs)); /* rd = 0 if rsh */
  2198. + }
  2199. +}
  2200. +
  2201. +/* Emulation of 64-bit sltu rd, rs, rt */
  2202. +static void emit_sltu_r64(struct jit_context *ctx, u8 rd,
  2203. + const u8 rs[], const u8 rt[])
  2204. +{
  2205. + u8 tmp = MIPS_R_T9;
  2206. +
  2207. + emit(ctx, sltu, rd, lo(rs), lo(rt)); /* rd = rsl < rtl */
  2208. + emit(ctx, subu, tmp, hi(rs), hi(rt)); /* tmp = rsh - rth */
  2209. + emit_movn_r(ctx, rd, MIPS_R_ZERO, tmp); /* rd = 0 if tmp != 0 */
  2210. + emit(ctx, sltu, tmp, hi(rs), hi(rt)); /* tmp = rsh < rth */
  2211. + emit(ctx, or, rd, rd, tmp); /* rd = rd | tmp */
  2212. +}
  2213. +
  2214. +/* Emulation of 64-bit slti rd, rs, imm, where imm may be S32_MAX + 1 */
  2215. +static void emit_slti_r64(struct jit_context *ctx, u8 rd,
  2216. + const u8 rs[], s64 imm)
  2217. +{
  2218. + u8 t1 = MIPS_R_T8;
  2219. + u8 t2 = MIPS_R_T9;
  2220. + u8 cmp;
  2221. +
  2222. + /*
  2223. + * if ((rs < 0) ^ (imm < 0)) t1 = imm >u rsl
  2224. + * else t1 = rsl <u imm
  2225. + */
  2226. + emit_mov_i(ctx, rd, (s32)imm);
  2227. + emit(ctx, sltu, t1, lo(rs), rd); /* t1 = rsl <u imm */
  2228. + emit(ctx, sltu, t2, rd, lo(rs)); /* t2 = imm <u rsl */
  2229. + emit(ctx, srl, rd, hi(rs), 31); /* rd = rsh >> 31 */
  2230. + if (imm < 0)
  2231. + emit_movz_r(ctx, t1, t2, rd); /* t1 = rd ? t1 : t2 */
  2232. + else
  2233. + emit_movn_r(ctx, t1, t2, rd); /* t1 = rd ? t2 : t1 */
  2234. + /*
  2235. + * if ((imm < 0 && rsh != 0xffffffff) ||
  2236. + * (imm >= 0 && rsh != 0))
  2237. + * t1 = 0
  2238. + */
  2239. + if (imm < 0) {
  2240. + emit(ctx, addiu, rd, hi(rs), 1); /* rd = rsh + 1 */
  2241. + cmp = rd;
  2242. + } else { /* imm >= 0 */
  2243. + cmp = hi(rs);
  2244. + }
  2245. + emit_movn_r(ctx, t1, MIPS_R_ZERO, cmp); /* t1 = 0 if cmp != 0 */
  2246. +
  2247. + /*
  2248. + * if (imm < 0) rd = rsh < -1
  2249. + * else rd = rsh != 0
  2250. + * rd = rd | t1
  2251. + */
  2252. + emit(ctx, slti, rd, hi(rs), imm < 0 ? -1 : 0); /* rd = rsh < hi(imm) */
  2253. + emit(ctx, or, rd, rd, t1); /* rd = rd | t1 */
  2254. +}
  2255. +
  2256. +/* Emulation of 64-bit(slt rd, rs, rt) */
  2257. +static void emit_slt_r64(struct jit_context *ctx, u8 rd,
  2258. + const u8 rs[], const u8 rt[])
  2259. +{
  2260. + u8 t1 = MIPS_R_T7;
  2261. + u8 t2 = MIPS_R_T8;
  2262. + u8 t3 = MIPS_R_T9;
  2263. +
  2264. + /*
  2265. + * if ((rs < 0) ^ (rt < 0)) t1 = rtl <u rsl
  2266. + * else t1 = rsl <u rtl
  2267. + * if (rsh == rth) t1 = 0
  2268. + */
  2269. + emit(ctx, sltu, t1, lo(rs), lo(rt)); /* t1 = rsl <u rtl */
  2270. + emit(ctx, sltu, t2, lo(rt), lo(rs)); /* t2 = rtl <u rsl */
  2271. + emit(ctx, xor, t3, hi(rs), hi(rt)); /* t3 = rlh ^ rth */
  2272. + emit(ctx, srl, rd, t3, 31); /* rd = t3 >> 31 */
  2273. + emit_movn_r(ctx, t1, t2, rd); /* t1 = rd ? t2 : t1 */
  2274. + emit_movn_r(ctx, t1, MIPS_R_ZERO, t3); /* t1 = 0 if t3 != 0 */
  2275. +
  2276. + /* rd = (rsh < rth) | t1 */
  2277. + emit(ctx, slt, rd, hi(rs), hi(rt)); /* rd = rsh <s rth */
  2278. + emit(ctx, or, rd, rd, t1); /* rd = rd | t1 */
  2279. +}
  2280. +
  2281. +/* Jump immediate (64-bit) */
  2282. +static void emit_jmp_i64(struct jit_context *ctx,
  2283. + const u8 dst[], s32 imm, s32 off, u8 op)
  2284. +{
  2285. + u8 tmp = MIPS_R_T6;
  2286. +
  2287. + switch (op) {
  2288. + /* No-op, used internally for branch optimization */
  2289. + case JIT_JNOP:
  2290. + break;
  2291. + /* PC += off if dst == imm */
  2292. + /* PC += off if dst != imm */
  2293. + case BPF_JEQ:
  2294. + case BPF_JNE:
  2295. + if (imm >= -0x7fff && imm <= 0x8000) {
  2296. + emit(ctx, addiu, tmp, lo(dst), -imm);
  2297. + } else if ((u32)imm <= 0xffff) {
  2298. + emit(ctx, xori, tmp, lo(dst), imm);
  2299. + } else { /* Register fallback */
  2300. + emit_mov_i(ctx, tmp, imm);
  2301. + emit(ctx, xor, tmp, lo(dst), tmp);
  2302. + }
  2303. + if (imm < 0) { /* Compare sign extension */
  2304. + emit(ctx, addu, MIPS_R_T9, hi(dst), 1);
  2305. + emit(ctx, or, tmp, tmp, MIPS_R_T9);
  2306. + } else { /* Compare zero extension */
  2307. + emit(ctx, or, tmp, tmp, hi(dst));
  2308. + }
  2309. + if (op == BPF_JEQ)
  2310. + emit(ctx, beqz, tmp, off);
  2311. + else /* BPF_JNE */
  2312. + emit(ctx, bnez, tmp, off);
  2313. + break;
  2314. + /* PC += off if dst & imm */
  2315. + /* PC += off if (dst & imm) == 0 (not in BPF, used for long jumps) */
  2316. + case BPF_JSET:
  2317. + case JIT_JNSET:
  2318. + if ((u32)imm <= 0xffff) {
  2319. + emit(ctx, andi, tmp, lo(dst), imm);
  2320. + } else { /* Register fallback */
  2321. + emit_mov_i(ctx, tmp, imm);
  2322. + emit(ctx, and, tmp, lo(dst), tmp);
  2323. + }
  2324. + if (imm < 0) /* Sign-extension pulls in high word */
  2325. + emit(ctx, or, tmp, tmp, hi(dst));
  2326. + if (op == BPF_JSET)
  2327. + emit(ctx, bnez, tmp, off);
  2328. + else /* JIT_JNSET */
  2329. + emit(ctx, beqz, tmp, off);
  2330. + break;
  2331. + /* PC += off if dst > imm */
  2332. + case BPF_JGT:
  2333. + emit_sltiu_r64(ctx, tmp, dst, (s64)imm + 1);
  2334. + emit(ctx, beqz, tmp, off);
  2335. + break;
  2336. + /* PC += off if dst >= imm */
  2337. + case BPF_JGE:
  2338. + emit_sltiu_r64(ctx, tmp, dst, imm);
  2339. + emit(ctx, beqz, tmp, off);
  2340. + break;
  2341. + /* PC += off if dst < imm */
  2342. + case BPF_JLT:
  2343. + emit_sltiu_r64(ctx, tmp, dst, imm);
  2344. + emit(ctx, bnez, tmp, off);
  2345. + break;
  2346. + /* PC += off if dst <= imm */
  2347. + case BPF_JLE:
  2348. + emit_sltiu_r64(ctx, tmp, dst, (s64)imm + 1);
  2349. + emit(ctx, bnez, tmp, off);
  2350. + break;
  2351. + /* PC += off if dst > imm (signed) */
  2352. + case BPF_JSGT:
  2353. + emit_slti_r64(ctx, tmp, dst, (s64)imm + 1);
  2354. + emit(ctx, beqz, tmp, off);
  2355. + break;
  2356. + /* PC += off if dst >= imm (signed) */
  2357. + case BPF_JSGE:
  2358. + emit_slti_r64(ctx, tmp, dst, imm);
  2359. + emit(ctx, beqz, tmp, off);
  2360. + break;
  2361. + /* PC += off if dst < imm (signed) */
  2362. + case BPF_JSLT:
  2363. + emit_slti_r64(ctx, tmp, dst, imm);
  2364. + emit(ctx, bnez, tmp, off);
  2365. + break;
  2366. + /* PC += off if dst <= imm (signed) */
  2367. + case BPF_JSLE:
  2368. + emit_slti_r64(ctx, tmp, dst, (s64)imm + 1);
  2369. + emit(ctx, bnez, tmp, off);
  2370. + break;
  2371. + }
  2372. +}
  2373. +
  2374. +/* Jump register (64-bit) */
  2375. +static void emit_jmp_r64(struct jit_context *ctx,
  2376. + const u8 dst[], const u8 src[], s32 off, u8 op)
  2377. +{
  2378. + u8 t1 = MIPS_R_T6;
  2379. + u8 t2 = MIPS_R_T7;
  2380. +
  2381. + switch (op) {
  2382. + /* No-op, used internally for branch optimization */
  2383. + case JIT_JNOP:
  2384. + break;
  2385. + /* PC += off if dst == src */
  2386. + /* PC += off if dst != src */
  2387. + case BPF_JEQ:
  2388. + case BPF_JNE:
  2389. + emit(ctx, subu, t1, lo(dst), lo(src));
  2390. + emit(ctx, subu, t2, hi(dst), hi(src));
  2391. + emit(ctx, or, t1, t1, t2);
  2392. + if (op == BPF_JEQ)
  2393. + emit(ctx, beqz, t1, off);
  2394. + else /* BPF_JNE */
  2395. + emit(ctx, bnez, t1, off);
  2396. + break;
  2397. + /* PC += off if dst & src */
  2398. + /* PC += off if (dst & imm) == 0 (not in BPF, used for long jumps) */
  2399. + case BPF_JSET:
  2400. + case JIT_JNSET:
  2401. + emit(ctx, and, t1, lo(dst), lo(src));
  2402. + emit(ctx, and, t2, hi(dst), hi(src));
  2403. + emit(ctx, or, t1, t1, t2);
  2404. + if (op == BPF_JSET)
  2405. + emit(ctx, bnez, t1, off);
  2406. + else /* JIT_JNSET */
  2407. + emit(ctx, beqz, t1, off);
  2408. + break;
  2409. + /* PC += off if dst > src */
  2410. + case BPF_JGT:
  2411. + emit_sltu_r64(ctx, t1, src, dst);
  2412. + emit(ctx, bnez, t1, off);
  2413. + break;
  2414. + /* PC += off if dst >= src */
  2415. + case BPF_JGE:
  2416. + emit_sltu_r64(ctx, t1, dst, src);
  2417. + emit(ctx, beqz, t1, off);
  2418. + break;
  2419. + /* PC += off if dst < src */
  2420. + case BPF_JLT:
  2421. + emit_sltu_r64(ctx, t1, dst, src);
  2422. + emit(ctx, bnez, t1, off);
  2423. + break;
  2424. + /* PC += off if dst <= src */
  2425. + case BPF_JLE:
  2426. + emit_sltu_r64(ctx, t1, src, dst);
  2427. + emit(ctx, beqz, t1, off);
  2428. + break;
  2429. + /* PC += off if dst > src (signed) */
  2430. + case BPF_JSGT:
  2431. + emit_slt_r64(ctx, t1, src, dst);
  2432. + emit(ctx, bnez, t1, off);
  2433. + break;
  2434. + /* PC += off if dst >= src (signed) */
  2435. + case BPF_JSGE:
  2436. + emit_slt_r64(ctx, t1, dst, src);
  2437. + emit(ctx, beqz, t1, off);
  2438. + break;
  2439. + /* PC += off if dst < src (signed) */
  2440. + case BPF_JSLT:
  2441. + emit_slt_r64(ctx, t1, dst, src);
  2442. + emit(ctx, bnez, t1, off);
  2443. + break;
  2444. + /* PC += off if dst <= src (signed) */
  2445. + case BPF_JSLE:
  2446. + emit_slt_r64(ctx, t1, src, dst);
  2447. + emit(ctx, beqz, t1, off);
  2448. + break;
  2449. + }
  2450. +}
  2451. +
  2452. +/* Function call */
  2453. +static int emit_call(struct jit_context *ctx, const struct bpf_insn *insn)
  2454. +{
  2455. + bool fixed;
  2456. + u64 addr;
  2457. +
  2458. + /* Decode the call address */
  2459. + if (bpf_jit_get_func_addr(ctx->program, insn, false,
  2460. + &addr, &fixed) < 0)
  2461. + return -1;
  2462. + if (!fixed)
  2463. + return -1;
  2464. +
  2465. + /* Push stack arguments */
  2466. + push_regs(ctx, JIT_STACK_REGS, 0, JIT_RESERVED_STACK);
  2467. +
  2468. + /* Emit function call */
  2469. + emit_mov_i(ctx, MIPS_R_T9, addr);
  2470. + emit(ctx, jalr, MIPS_R_RA, MIPS_R_T9);
  2471. + emit(ctx, nop); /* Delay slot */
  2472. +
  2473. + clobber_reg(ctx, MIPS_R_RA);
  2474. + clobber_reg(ctx, MIPS_R_V0);
  2475. + clobber_reg(ctx, MIPS_R_V1);
  2476. + return 0;
  2477. +}
  2478. +
  2479. +/* Function tail call */
  2480. +static int emit_tail_call(struct jit_context *ctx)
  2481. +{
  2482. + u8 ary = lo(bpf2mips32[BPF_REG_2]);
  2483. + u8 ind = lo(bpf2mips32[BPF_REG_3]);
  2484. + u8 t1 = MIPS_R_T8;
  2485. + u8 t2 = MIPS_R_T9;
  2486. + int off;
  2487. +
  2488. + /*
  2489. + * Tail call:
  2490. + * eBPF R1 - function argument (context ptr), passed in a0-a1
  2491. + * eBPF R2 - ptr to object with array of function entry points
  2492. + * eBPF R3 - array index of function to be called
  2493. + * stack[sz] - remaining tail call count, initialized in prologue
  2494. + */
  2495. +
  2496. + /* if (ind >= ary->map.max_entries) goto out */
  2497. + off = offsetof(struct bpf_array, map.max_entries);
  2498. + if (off > 0x7fff)
  2499. + return -1;
  2500. + emit(ctx, lw, t1, off, ary); /* t1 = ary->map.max_entries*/
  2501. + emit_load_delay(ctx); /* Load delay slot */
  2502. + emit(ctx, sltu, t1, ind, t1); /* t1 = ind < t1 */
  2503. + emit(ctx, beqz, t1, get_offset(ctx, 1)); /* PC += off(1) if t1 == 0 */
  2504. + /* (next insn delay slot) */
  2505. + /* if (TCC-- <= 0) goto out */
  2506. + emit(ctx, lw, t2, ctx->stack_size, MIPS_R_SP); /* t2 = *(SP + size) */
  2507. + emit_load_delay(ctx); /* Load delay slot */
  2508. + emit(ctx, blez, t2, get_offset(ctx, 1)); /* PC += off(1) if t2 < 0 */
  2509. + emit(ctx, addiu, t2, t2, -1); /* t2-- (delay slot) */
  2510. + emit(ctx, sw, t2, ctx->stack_size, MIPS_R_SP); /* *(SP + size) = t2 */
  2511. +
  2512. + /* prog = ary->ptrs[ind] */
  2513. + off = offsetof(struct bpf_array, ptrs);
  2514. + if (off > 0x7fff)
  2515. + return -1;
  2516. + emit(ctx, sll, t1, ind, 2); /* t1 = ind << 2 */
  2517. + emit(ctx, addu, t1, t1, ary); /* t1 += ary */
  2518. + emit(ctx, lw, t2, off, t1); /* t2 = *(t1 + off) */
  2519. + emit_load_delay(ctx); /* Load delay slot */
  2520. +
  2521. + /* if (prog == 0) goto out */
  2522. + emit(ctx, beqz, t2, get_offset(ctx, 1)); /* PC += off(1) if t2 == 0 */
  2523. + emit(ctx, nop); /* Delay slot */
  2524. +
  2525. + /* func = prog->bpf_func + 8 (prologue skip offset) */
  2526. + off = offsetof(struct bpf_prog, bpf_func);
  2527. + if (off > 0x7fff)
  2528. + return -1;
  2529. + emit(ctx, lw, t1, off, t2); /* t1 = *(t2 + off) */
  2530. + emit_load_delay(ctx); /* Load delay slot */
  2531. + emit(ctx, addiu, t1, t1, JIT_TCALL_SKIP); /* t1 += skip (8 or 12) */
  2532. +
  2533. + /* goto func */
  2534. + build_epilogue(ctx, t1);
  2535. + return 0;
  2536. +}
  2537. +
  2538. +/*
  2539. + * Stack frame layout for a JITed program (stack grows down).
  2540. + *
  2541. + * Higher address : Caller's stack frame :
  2542. + * :----------------------------:
  2543. + * : 64-bit eBPF args r3-r5 :
  2544. + * :----------------------------:
  2545. + * : Reserved / tail call count :
  2546. + * +============================+ <--- MIPS sp before call
  2547. + * | Callee-saved registers, |
  2548. + * | including RA and FP |
  2549. + * +----------------------------+ <--- eBPF FP (MIPS zero,fp)
  2550. + * | Local eBPF variables |
  2551. + * | allocated by program |
  2552. + * +----------------------------+
  2553. + * | Reserved for caller-saved |
  2554. + * | registers |
  2555. + * +----------------------------+
  2556. + * | Reserved for 64-bit eBPF |
  2557. + * | args r3-r5 & args passed |
  2558. + * | on stack in kernel calls |
  2559. + * Lower address +============================+ <--- MIPS sp
  2560. + */
  2561. +
  2562. +/* Build program prologue to set up the stack and registers */
  2563. +void build_prologue(struct jit_context *ctx)
  2564. +{
  2565. + const u8 *r1 = bpf2mips32[BPF_REG_1];
  2566. + const u8 *fp = bpf2mips32[BPF_REG_FP];
  2567. + int stack, saved, locals, reserved;
  2568. +
  2569. + /*
  2570. + * The first two instructions initialize TCC in the reserved (for us)
  2571. + * 16-byte area in the parent's stack frame. On a tail call, the
  2572. + * calling function jumps into the prologue after these instructions.
  2573. + */
  2574. + emit(ctx, ori, MIPS_R_T9, MIPS_R_ZERO,
  2575. + min(MAX_TAIL_CALL_CNT + 1, 0xffff));
  2576. + emit(ctx, sw, MIPS_R_T9, 0, MIPS_R_SP);
  2577. +
  2578. + /*
  2579. + * Register eBPF R1 contains the 32-bit context pointer argument.
  2580. + * A 32-bit argument is always passed in MIPS register a0, regardless
  2581. + * of CPU endianness. Initialize R1 accordingly and zero-extend.
  2582. + */
  2583. +#ifdef __BIG_ENDIAN
  2584. + emit(ctx, move, lo(r1), MIPS_R_A0);
  2585. +#endif
  2586. +
  2587. + /* === Entry-point for tail calls === */
  2588. +
  2589. + /* Zero-extend the 32-bit argument */
  2590. + emit(ctx, move, hi(r1), MIPS_R_ZERO);
  2591. +
  2592. + /* If the eBPF frame pointer was accessed it must be saved */
  2593. + if (ctx->accessed & BIT(BPF_REG_FP))
  2594. + clobber_reg64(ctx, fp);
  2595. +
  2596. + /* Compute the stack space needed for callee-saved registers */
  2597. + saved = hweight32(ctx->clobbered & JIT_CALLEE_REGS) * sizeof(u32);
  2598. + saved = ALIGN(saved, MIPS_STACK_ALIGNMENT);
  2599. +
  2600. + /* Stack space used by eBPF program local data */
  2601. + locals = ALIGN(ctx->program->aux->stack_depth, MIPS_STACK_ALIGNMENT);
  2602. +
  2603. + /*
  2604. + * If we are emitting function calls, reserve extra stack space for
  2605. + * caller-saved registers and function arguments passed on the stack.
  2606. + * The required space is computed automatically during resource
  2607. + * usage discovery (pass 1).
  2608. + */
  2609. + reserved = ctx->stack_used;
  2610. +
  2611. + /* Allocate the stack frame */
  2612. + stack = ALIGN(saved + locals + reserved, MIPS_STACK_ALIGNMENT);
  2613. + emit(ctx, addiu, MIPS_R_SP, MIPS_R_SP, -stack);
  2614. +
  2615. + /* Store callee-saved registers on stack */
  2616. + push_regs(ctx, ctx->clobbered & JIT_CALLEE_REGS, 0, stack - saved);
  2617. +
  2618. + /* Initialize the eBPF frame pointer if accessed */
  2619. + if (ctx->accessed & BIT(BPF_REG_FP))
  2620. + emit(ctx, addiu, lo(fp), MIPS_R_SP, stack - saved);
  2621. +
  2622. + ctx->saved_size = saved;
  2623. + ctx->stack_size = stack;
  2624. +}
  2625. +
  2626. +/* Build the program epilogue to restore the stack and registers */
  2627. +void build_epilogue(struct jit_context *ctx, int dest_reg)
  2628. +{
  2629. + /* Restore callee-saved registers from stack */
  2630. + pop_regs(ctx, ctx->clobbered & JIT_CALLEE_REGS, 0,
  2631. + ctx->stack_size - ctx->saved_size);
  2632. + /*
  2633. + * A 32-bit return value is always passed in MIPS register v0,
  2634. + * but on big-endian targets the low part of R0 is mapped to v1.
  2635. + */
  2636. +#ifdef __BIG_ENDIAN
  2637. + emit(ctx, move, MIPS_R_V0, MIPS_R_V1);
  2638. +#endif
  2639. +
  2640. + /* Jump to the return address and adjust the stack pointer */
  2641. + emit(ctx, jr, dest_reg);
  2642. + emit(ctx, addiu, MIPS_R_SP, MIPS_R_SP, ctx->stack_size);
  2643. +}
  2644. +
  2645. +/* Build one eBPF instruction */
  2646. +int build_insn(const struct bpf_insn *insn, struct jit_context *ctx)
  2647. +{
  2648. + const u8 *dst = bpf2mips32[insn->dst_reg];
  2649. + const u8 *src = bpf2mips32[insn->src_reg];
  2650. + const u8 *tmp = bpf2mips32[JIT_REG_TMP];
  2651. + u8 code = insn->code;
  2652. + s16 off = insn->off;
  2653. + s32 imm = insn->imm;
  2654. + s32 val, rel;
  2655. + u8 alu, jmp;
  2656. +
  2657. + switch (code) {
  2658. + /* ALU operations */
  2659. + /* dst = imm */
  2660. + case BPF_ALU | BPF_MOV | BPF_K:
  2661. + emit_mov_i(ctx, lo(dst), imm);
  2662. + emit_zext_ver(ctx, dst);
  2663. + break;
  2664. + /* dst = src */
  2665. + case BPF_ALU | BPF_MOV | BPF_X:
  2666. + if (imm == 1) {
  2667. + /* Special mov32 for zext */
  2668. + emit_mov_i(ctx, hi(dst), 0);
  2669. + } else {
  2670. + emit_mov_r(ctx, lo(dst), lo(src));
  2671. + emit_zext_ver(ctx, dst);
  2672. + }
  2673. + break;
  2674. + /* dst = -dst */
  2675. + case BPF_ALU | BPF_NEG:
  2676. + emit_alu_i(ctx, lo(dst), 0, BPF_NEG);
  2677. + emit_zext_ver(ctx, dst);
  2678. + break;
  2679. + /* dst = dst & imm */
  2680. + /* dst = dst | imm */
  2681. + /* dst = dst ^ imm */
  2682. + /* dst = dst << imm */
  2683. + /* dst = dst >> imm */
  2684. + /* dst = dst >> imm (arithmetic) */
  2685. + /* dst = dst + imm */
  2686. + /* dst = dst - imm */
  2687. + /* dst = dst * imm */
  2688. + /* dst = dst / imm */
  2689. + /* dst = dst % imm */
  2690. + case BPF_ALU | BPF_OR | BPF_K:
  2691. + case BPF_ALU | BPF_AND | BPF_K:
  2692. + case BPF_ALU | BPF_XOR | BPF_K:
  2693. + case BPF_ALU | BPF_LSH | BPF_K:
  2694. + case BPF_ALU | BPF_RSH | BPF_K:
  2695. + case BPF_ALU | BPF_ARSH | BPF_K:
  2696. + case BPF_ALU | BPF_ADD | BPF_K:
  2697. + case BPF_ALU | BPF_SUB | BPF_K:
  2698. + case BPF_ALU | BPF_MUL | BPF_K:
  2699. + case BPF_ALU | BPF_DIV | BPF_K:
  2700. + case BPF_ALU | BPF_MOD | BPF_K:
  2701. + if (!valid_alu_i(BPF_OP(code), imm)) {
  2702. + emit_mov_i(ctx, MIPS_R_T6, imm);
  2703. + emit_alu_r(ctx, lo(dst), MIPS_R_T6, BPF_OP(code));
  2704. + } else if (rewrite_alu_i(BPF_OP(code), imm, &alu, &val)) {
  2705. + emit_alu_i(ctx, lo(dst), val, alu);
  2706. + }
  2707. + emit_zext_ver(ctx, dst);
  2708. + break;
  2709. + /* dst = dst & src */
  2710. + /* dst = dst | src */
  2711. + /* dst = dst ^ src */
  2712. + /* dst = dst << src */
  2713. + /* dst = dst >> src */
  2714. + /* dst = dst >> src (arithmetic) */
  2715. + /* dst = dst + src */
  2716. + /* dst = dst - src */
  2717. + /* dst = dst * src */
  2718. + /* dst = dst / src */
  2719. + /* dst = dst % src */
  2720. + case BPF_ALU | BPF_AND | BPF_X:
  2721. + case BPF_ALU | BPF_OR | BPF_X:
  2722. + case BPF_ALU | BPF_XOR | BPF_X:
  2723. + case BPF_ALU | BPF_LSH | BPF_X:
  2724. + case BPF_ALU | BPF_RSH | BPF_X:
  2725. + case BPF_ALU | BPF_ARSH | BPF_X:
  2726. + case BPF_ALU | BPF_ADD | BPF_X:
  2727. + case BPF_ALU | BPF_SUB | BPF_X:
  2728. + case BPF_ALU | BPF_MUL | BPF_X:
  2729. + case BPF_ALU | BPF_DIV | BPF_X:
  2730. + case BPF_ALU | BPF_MOD | BPF_X:
  2731. + emit_alu_r(ctx, lo(dst), lo(src), BPF_OP(code));
  2732. + emit_zext_ver(ctx, dst);
  2733. + break;
  2734. + /* dst = imm (64-bit) */
  2735. + case BPF_ALU64 | BPF_MOV | BPF_K:
  2736. + emit_mov_se_i64(ctx, dst, imm);
  2737. + break;
  2738. + /* dst = src (64-bit) */
  2739. + case BPF_ALU64 | BPF_MOV | BPF_X:
  2740. + emit_mov_r(ctx, lo(dst), lo(src));
  2741. + emit_mov_r(ctx, hi(dst), hi(src));
  2742. + break;
  2743. + /* dst = -dst (64-bit) */
  2744. + case BPF_ALU64 | BPF_NEG:
  2745. + emit_neg_i64(ctx, dst);
  2746. + break;
  2747. + /* dst = dst & imm (64-bit) */
  2748. + case BPF_ALU64 | BPF_AND | BPF_K:
  2749. + emit_alu_i64(ctx, dst, imm, BPF_OP(code));
  2750. + break;
  2751. + /* dst = dst | imm (64-bit) */
  2752. + /* dst = dst ^ imm (64-bit) */
  2753. + /* dst = dst + imm (64-bit) */
  2754. + /* dst = dst - imm (64-bit) */
  2755. + case BPF_ALU64 | BPF_OR | BPF_K:
  2756. + case BPF_ALU64 | BPF_XOR | BPF_K:
  2757. + case BPF_ALU64 | BPF_ADD | BPF_K:
  2758. + case BPF_ALU64 | BPF_SUB | BPF_K:
  2759. + if (imm)
  2760. + emit_alu_i64(ctx, dst, imm, BPF_OP(code));
  2761. + break;
  2762. + /* dst = dst << imm (64-bit) */
  2763. + /* dst = dst >> imm (64-bit) */
  2764. + /* dst = dst >> imm (64-bit, arithmetic) */
  2765. + case BPF_ALU64 | BPF_LSH | BPF_K:
  2766. + case BPF_ALU64 | BPF_RSH | BPF_K:
  2767. + case BPF_ALU64 | BPF_ARSH | BPF_K:
  2768. + if (imm)
  2769. + emit_shift_i64(ctx, dst, imm, BPF_OP(code));
  2770. + break;
  2771. + /* dst = dst * imm (64-bit) */
  2772. + case BPF_ALU64 | BPF_MUL | BPF_K:
  2773. + emit_mul_i64(ctx, dst, imm);
  2774. + break;
  2775. + /* dst = dst / imm (64-bit) */
  2776. + /* dst = dst % imm (64-bit) */
  2777. + case BPF_ALU64 | BPF_DIV | BPF_K:
  2778. + case BPF_ALU64 | BPF_MOD | BPF_K:
  2779. + /*
  2780. + * Sign-extend the immediate value into a temporary register,
  2781. + * and then do the operation on this register.
  2782. + */
  2783. + emit_mov_se_i64(ctx, tmp, imm);
  2784. + emit_divmod_r64(ctx, dst, tmp, BPF_OP(code));
  2785. + break;
  2786. + /* dst = dst & src (64-bit) */
  2787. + /* dst = dst | src (64-bit) */
  2788. + /* dst = dst ^ src (64-bit) */
  2789. + /* dst = dst + src (64-bit) */
  2790. + /* dst = dst - src (64-bit) */
  2791. + case BPF_ALU64 | BPF_AND | BPF_X:
  2792. + case BPF_ALU64 | BPF_OR | BPF_X:
  2793. + case BPF_ALU64 | BPF_XOR | BPF_X:
  2794. + case BPF_ALU64 | BPF_ADD | BPF_X:
  2795. + case BPF_ALU64 | BPF_SUB | BPF_X:
  2796. + emit_alu_r64(ctx, dst, src, BPF_OP(code));
  2797. + break;
  2798. + /* dst = dst << src (64-bit) */
  2799. + /* dst = dst >> src (64-bit) */
  2800. + /* dst = dst >> src (64-bit, arithmetic) */
  2801. + case BPF_ALU64 | BPF_LSH | BPF_X:
  2802. + case BPF_ALU64 | BPF_RSH | BPF_X:
  2803. + case BPF_ALU64 | BPF_ARSH | BPF_X:
  2804. + emit_shift_r64(ctx, dst, lo(src), BPF_OP(code));
  2805. + break;
  2806. + /* dst = dst * src (64-bit) */
  2807. + case BPF_ALU64 | BPF_MUL | BPF_X:
  2808. + emit_mul_r64(ctx, dst, src);
  2809. + break;
  2810. + /* dst = dst / src (64-bit) */
  2811. + /* dst = dst % src (64-bit) */
  2812. + case BPF_ALU64 | BPF_DIV | BPF_X:
  2813. + case BPF_ALU64 | BPF_MOD | BPF_X:
  2814. + emit_divmod_r64(ctx, dst, src, BPF_OP(code));
  2815. + break;
  2816. + /* dst = htole(dst) */
  2817. + /* dst = htobe(dst) */
  2818. + case BPF_ALU | BPF_END | BPF_FROM_LE:
  2819. + case BPF_ALU | BPF_END | BPF_FROM_BE:
  2820. + if (BPF_SRC(code) ==
  2821. +#ifdef __BIG_ENDIAN
  2822. + BPF_FROM_LE
  2823. +#else
  2824. + BPF_FROM_BE
  2825. +#endif
  2826. + )
  2827. + emit_bswap_r64(ctx, dst, imm);
  2828. + else
  2829. + emit_trunc_r64(ctx, dst, imm);
  2830. + break;
  2831. + /* dst = imm64 */
  2832. + case BPF_LD | BPF_IMM | BPF_DW:
  2833. + emit_mov_i(ctx, lo(dst), imm);
  2834. + emit_mov_i(ctx, hi(dst), insn[1].imm);
  2835. + return 1;
  2836. + /* LDX: dst = *(size *)(src + off) */
  2837. + case BPF_LDX | BPF_MEM | BPF_W:
  2838. + case BPF_LDX | BPF_MEM | BPF_H:
  2839. + case BPF_LDX | BPF_MEM | BPF_B:
  2840. + case BPF_LDX | BPF_MEM | BPF_DW:
  2841. + emit_ldx(ctx, dst, lo(src), off, BPF_SIZE(code));
  2842. + break;
  2843. + /* ST: *(size *)(dst + off) = imm */
  2844. + case BPF_ST | BPF_MEM | BPF_W:
  2845. + case BPF_ST | BPF_MEM | BPF_H:
  2846. + case BPF_ST | BPF_MEM | BPF_B:
  2847. + case BPF_ST | BPF_MEM | BPF_DW:
  2848. + switch (BPF_SIZE(code)) {
  2849. + case BPF_DW:
  2850. + /* Sign-extend immediate value into temporary reg */
  2851. + emit_mov_se_i64(ctx, tmp, imm);
  2852. + break;
  2853. + case BPF_W:
  2854. + case BPF_H:
  2855. + case BPF_B:
  2856. + emit_mov_i(ctx, lo(tmp), imm);
  2857. + break;
  2858. + }
  2859. + emit_stx(ctx, lo(dst), tmp, off, BPF_SIZE(code));
  2860. + break;
  2861. + /* STX: *(size *)(dst + off) = src */
  2862. + case BPF_STX | BPF_MEM | BPF_W:
  2863. + case BPF_STX | BPF_MEM | BPF_H:
  2864. + case BPF_STX | BPF_MEM | BPF_B:
  2865. + case BPF_STX | BPF_MEM | BPF_DW:
  2866. + emit_stx(ctx, lo(dst), src, off, BPF_SIZE(code));
  2867. + break;
  2868. + /* Speculation barrier */
  2869. + case BPF_ST | BPF_NOSPEC:
  2870. + break;
  2871. + /* Atomics */
  2872. + case BPF_STX | BPF_XADD | BPF_W:
  2873. + switch (imm) {
  2874. + case BPF_ADD:
  2875. + case BPF_AND:
  2876. + case BPF_OR:
  2877. + case BPF_XOR:
  2878. + if (cpu_has_llsc)
  2879. + emit_atomic_r(ctx, lo(dst), lo(src), off, imm);
  2880. + else /* Non-ll/sc fallback */
  2881. + emit_atomic_r32(ctx, lo(dst), lo(src),
  2882. + off, imm);
  2883. + break;
  2884. + default:
  2885. + goto notyet;
  2886. + }
  2887. + break;
  2888. + /* Atomics (64-bit) */
  2889. + case BPF_STX | BPF_XADD | BPF_DW:
  2890. + switch (imm) {
  2891. + case BPF_ADD:
  2892. + case BPF_AND:
  2893. + case BPF_OR:
  2894. + case BPF_XOR:
  2895. + emit_atomic_r64(ctx, lo(dst), src, off, imm);
  2896. + break;
  2897. + default:
  2898. + goto notyet;
  2899. + }
  2900. + break;
  2901. + /* PC += off if dst == src */
  2902. + /* PC += off if dst != src */
  2903. + /* PC += off if dst & src */
  2904. + /* PC += off if dst > src */
  2905. + /* PC += off if dst >= src */
  2906. + /* PC += off if dst < src */
  2907. + /* PC += off if dst <= src */
  2908. + /* PC += off if dst > src (signed) */
  2909. + /* PC += off if dst >= src (signed) */
  2910. + /* PC += off if dst < src (signed) */
  2911. + /* PC += off if dst <= src (signed) */
  2912. + case BPF_JMP32 | BPF_JEQ | BPF_X:
  2913. + case BPF_JMP32 | BPF_JNE | BPF_X:
  2914. + case BPF_JMP32 | BPF_JSET | BPF_X:
  2915. + case BPF_JMP32 | BPF_JGT | BPF_X:
  2916. + case BPF_JMP32 | BPF_JGE | BPF_X:
  2917. + case BPF_JMP32 | BPF_JLT | BPF_X:
  2918. + case BPF_JMP32 | BPF_JLE | BPF_X:
  2919. + case BPF_JMP32 | BPF_JSGT | BPF_X:
  2920. + case BPF_JMP32 | BPF_JSGE | BPF_X:
  2921. + case BPF_JMP32 | BPF_JSLT | BPF_X:
  2922. + case BPF_JMP32 | BPF_JSLE | BPF_X:
  2923. + if (off == 0)
  2924. + break;
  2925. + setup_jmp_r(ctx, dst == src, BPF_OP(code), off, &jmp, &rel);
  2926. + emit_jmp_r(ctx, lo(dst), lo(src), rel, jmp);
  2927. + if (finish_jmp(ctx, jmp, off) < 0)
  2928. + goto toofar;
  2929. + break;
  2930. + /* PC += off if dst == imm */
  2931. + /* PC += off if dst != imm */
  2932. + /* PC += off if dst & imm */
  2933. + /* PC += off if dst > imm */
  2934. + /* PC += off if dst >= imm */
  2935. + /* PC += off if dst < imm */
  2936. + /* PC += off if dst <= imm */
  2937. + /* PC += off if dst > imm (signed) */
  2938. + /* PC += off if dst >= imm (signed) */
  2939. + /* PC += off if dst < imm (signed) */
  2940. + /* PC += off if dst <= imm (signed) */
  2941. + case BPF_JMP32 | BPF_JEQ | BPF_K:
  2942. + case BPF_JMP32 | BPF_JNE | BPF_K:
  2943. + case BPF_JMP32 | BPF_JSET | BPF_K:
  2944. + case BPF_JMP32 | BPF_JGT | BPF_K:
  2945. + case BPF_JMP32 | BPF_JGE | BPF_K:
  2946. + case BPF_JMP32 | BPF_JLT | BPF_K:
  2947. + case BPF_JMP32 | BPF_JLE | BPF_K:
  2948. + case BPF_JMP32 | BPF_JSGT | BPF_K:
  2949. + case BPF_JMP32 | BPF_JSGE | BPF_K:
  2950. + case BPF_JMP32 | BPF_JSLT | BPF_K:
  2951. + case BPF_JMP32 | BPF_JSLE | BPF_K:
  2952. + if (off == 0)
  2953. + break;
  2954. + setup_jmp_i(ctx, imm, 32, BPF_OP(code), off, &jmp, &rel);
  2955. + if (valid_jmp_i(jmp, imm)) {
  2956. + emit_jmp_i(ctx, lo(dst), imm, rel, jmp);
  2957. + } else {
  2958. + /* Move large immediate to register */
  2959. + emit_mov_i(ctx, MIPS_R_T6, imm);
  2960. + emit_jmp_r(ctx, lo(dst), MIPS_R_T6, rel, jmp);
  2961. + }
  2962. + if (finish_jmp(ctx, jmp, off) < 0)
  2963. + goto toofar;
  2964. + break;
  2965. + /* PC += off if dst == src */
  2966. + /* PC += off if dst != src */
  2967. + /* PC += off if dst & src */
  2968. + /* PC += off if dst > src */
  2969. + /* PC += off if dst >= src */
  2970. + /* PC += off if dst < src */
  2971. + /* PC += off if dst <= src */
  2972. + /* PC += off if dst > src (signed) */
  2973. + /* PC += off if dst >= src (signed) */
  2974. + /* PC += off if dst < src (signed) */
  2975. + /* PC += off if dst <= src (signed) */
  2976. + case BPF_JMP | BPF_JEQ | BPF_X:
  2977. + case BPF_JMP | BPF_JNE | BPF_X:
  2978. + case BPF_JMP | BPF_JSET | BPF_X:
  2979. + case BPF_JMP | BPF_JGT | BPF_X:
  2980. + case BPF_JMP | BPF_JGE | BPF_X:
  2981. + case BPF_JMP | BPF_JLT | BPF_X:
  2982. + case BPF_JMP | BPF_JLE | BPF_X:
  2983. + case BPF_JMP | BPF_JSGT | BPF_X:
  2984. + case BPF_JMP | BPF_JSGE | BPF_X:
  2985. + case BPF_JMP | BPF_JSLT | BPF_X:
  2986. + case BPF_JMP | BPF_JSLE | BPF_X:
  2987. + if (off == 0)
  2988. + break;
  2989. + setup_jmp_r(ctx, dst == src, BPF_OP(code), off, &jmp, &rel);
  2990. + emit_jmp_r64(ctx, dst, src, rel, jmp);
  2991. + if (finish_jmp(ctx, jmp, off) < 0)
  2992. + goto toofar;
  2993. + break;
  2994. + /* PC += off if dst == imm */
  2995. + /* PC += off if dst != imm */
  2996. + /* PC += off if dst & imm */
  2997. + /* PC += off if dst > imm */
  2998. + /* PC += off if dst >= imm */
  2999. + /* PC += off if dst < imm */
  3000. + /* PC += off if dst <= imm */
  3001. + /* PC += off if dst > imm (signed) */
  3002. + /* PC += off if dst >= imm (signed) */
  3003. + /* PC += off if dst < imm (signed) */
  3004. + /* PC += off if dst <= imm (signed) */
  3005. + case BPF_JMP | BPF_JEQ | BPF_K:
  3006. + case BPF_JMP | BPF_JNE | BPF_K:
  3007. + case BPF_JMP | BPF_JSET | BPF_K:
  3008. + case BPF_JMP | BPF_JGT | BPF_K:
  3009. + case BPF_JMP | BPF_JGE | BPF_K:
  3010. + case BPF_JMP | BPF_JLT | BPF_K:
  3011. + case BPF_JMP | BPF_JLE | BPF_K:
  3012. + case BPF_JMP | BPF_JSGT | BPF_K:
  3013. + case BPF_JMP | BPF_JSGE | BPF_K:
  3014. + case BPF_JMP | BPF_JSLT | BPF_K:
  3015. + case BPF_JMP | BPF_JSLE | BPF_K:
  3016. + if (off == 0)
  3017. + break;
  3018. + setup_jmp_i(ctx, imm, 64, BPF_OP(code), off, &jmp, &rel);
  3019. + emit_jmp_i64(ctx, dst, imm, rel, jmp);
  3020. + if (finish_jmp(ctx, jmp, off) < 0)
  3021. + goto toofar;
  3022. + break;
  3023. + /* PC += off */
  3024. + case BPF_JMP | BPF_JA:
  3025. + if (off == 0)
  3026. + break;
  3027. + if (emit_ja(ctx, off) < 0)
  3028. + goto toofar;
  3029. + break;
  3030. + /* Tail call */
  3031. + case BPF_JMP | BPF_TAIL_CALL:
  3032. + if (emit_tail_call(ctx) < 0)
  3033. + goto invalid;
  3034. + break;
  3035. + /* Function call */
  3036. + case BPF_JMP | BPF_CALL:
  3037. + if (emit_call(ctx, insn) < 0)
  3038. + goto invalid;
  3039. + break;
  3040. + /* Function return */
  3041. + case BPF_JMP | BPF_EXIT:
  3042. + /*
  3043. + * Optimization: when last instruction is EXIT
  3044. + * simply continue to epilogue.
  3045. + */
  3046. + if (ctx->bpf_index == ctx->program->len - 1)
  3047. + break;
  3048. + if (emit_exit(ctx) < 0)
  3049. + goto toofar;
  3050. + break;
  3051. +
  3052. + default:
  3053. +invalid:
  3054. + pr_err_once("unknown opcode %02x\n", code);
  3055. + return -EINVAL;
  3056. +notyet:
  3057. + pr_info_once("*** NOT YET: opcode %02x ***\n", code);
  3058. + return -EFAULT;
  3059. +toofar:
  3060. + pr_info_once("*** TOO FAR: jump at %u opcode %02x ***\n",
  3061. + ctx->bpf_index, code);
  3062. + return -E2BIG;
  3063. + }
  3064. + return 0;
  3065. +}