050-v5.16-03-mips-bpf-Add-new-eBPF-JIT-for-64-bit-MIPS.patch 29 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005
  1. From: Johan Almbladh <[email protected]>
  2. Date: Tue, 5 Oct 2021 18:54:05 +0200
  3. Subject: [PATCH] mips: bpf: Add new eBPF JIT for 64-bit MIPS
  4. This is an implementation on of an eBPF JIT for 64-bit MIPS III-V and
  5. MIPS64r1-r6. It uses the same framework introduced by the 32-bit JIT.
  6. Signed-off-by: Johan Almbladh <[email protected]>
  7. ---
  8. create mode 100644 arch/mips/net/bpf_jit_comp64.c
  9. --- /dev/null
  10. +++ b/arch/mips/net/bpf_jit_comp64.c
  11. @@ -0,0 +1,991 @@
  12. +// SPDX-License-Identifier: GPL-2.0-only
  13. +/*
  14. + * Just-In-Time compiler for eBPF bytecode on MIPS.
  15. + * Implementation of JIT functions for 64-bit CPUs.
  16. + *
  17. + * Copyright (c) 2021 Anyfi Networks AB.
  18. + * Author: Johan Almbladh <[email protected]>
  19. + *
  20. + * Based on code and ideas from
  21. + * Copyright (c) 2017 Cavium, Inc.
  22. + * Copyright (c) 2017 Shubham Bansal <[email protected]>
  23. + * Copyright (c) 2011 Mircea Gherzan <[email protected]>
  24. + */
  25. +
  26. +#include <linux/errno.h>
  27. +#include <linux/filter.h>
  28. +#include <linux/bpf.h>
  29. +#include <asm/cpu-features.h>
  30. +#include <asm/isa-rev.h>
  31. +#include <asm/uasm.h>
  32. +
  33. +#include "bpf_jit_comp.h"
  34. +
  35. +/* MIPS t0-t3 are not available in the n64 ABI */
  36. +#undef MIPS_R_T0
  37. +#undef MIPS_R_T1
  38. +#undef MIPS_R_T2
  39. +#undef MIPS_R_T3
  40. +
  41. +/* Stack is 16-byte aligned in n64 ABI */
  42. +#define MIPS_STACK_ALIGNMENT 16
  43. +
  44. +/* Extra 64-bit eBPF registers used by JIT */
  45. +#define JIT_REG_TC (MAX_BPF_JIT_REG + 0)
  46. +#define JIT_REG_ZX (MAX_BPF_JIT_REG + 1)
  47. +
  48. +/* Number of prologue bytes to skip when doing a tail call */
  49. +#define JIT_TCALL_SKIP 4
  50. +
  51. +/* Callee-saved CPU registers that the JIT must preserve */
  52. +#define JIT_CALLEE_REGS \
  53. + (BIT(MIPS_R_S0) | \
  54. + BIT(MIPS_R_S1) | \
  55. + BIT(MIPS_R_S2) | \
  56. + BIT(MIPS_R_S3) | \
  57. + BIT(MIPS_R_S4) | \
  58. + BIT(MIPS_R_S5) | \
  59. + BIT(MIPS_R_S6) | \
  60. + BIT(MIPS_R_S7) | \
  61. + BIT(MIPS_R_GP) | \
  62. + BIT(MIPS_R_FP) | \
  63. + BIT(MIPS_R_RA))
  64. +
  65. +/* Caller-saved CPU registers available for JIT use */
  66. +#define JIT_CALLER_REGS \
  67. + (BIT(MIPS_R_A5) | \
  68. + BIT(MIPS_R_A6) | \
  69. + BIT(MIPS_R_A7))
  70. +/*
  71. + * Mapping of 64-bit eBPF registers to 64-bit native MIPS registers.
  72. + * MIPS registers t4 - t7 may be used by the JIT as temporary registers.
  73. + * MIPS registers t8 - t9 are reserved for single-register common functions.
  74. + */
  75. +static const u8 bpf2mips64[] = {
  76. + /* Return value from in-kernel function, and exit value from eBPF */
  77. + [BPF_REG_0] = MIPS_R_V0,
  78. + /* Arguments from eBPF program to in-kernel function */
  79. + [BPF_REG_1] = MIPS_R_A0,
  80. + [BPF_REG_2] = MIPS_R_A1,
  81. + [BPF_REG_3] = MIPS_R_A2,
  82. + [BPF_REG_4] = MIPS_R_A3,
  83. + [BPF_REG_5] = MIPS_R_A4,
  84. + /* Callee-saved registers that in-kernel function will preserve */
  85. + [BPF_REG_6] = MIPS_R_S0,
  86. + [BPF_REG_7] = MIPS_R_S1,
  87. + [BPF_REG_8] = MIPS_R_S2,
  88. + [BPF_REG_9] = MIPS_R_S3,
  89. + /* Read-only frame pointer to access the eBPF stack */
  90. + [BPF_REG_FP] = MIPS_R_FP,
  91. + /* Temporary register for blinding constants */
  92. + [BPF_REG_AX] = MIPS_R_AT,
  93. + /* Tail call count register, caller-saved */
  94. + [JIT_REG_TC] = MIPS_R_A5,
  95. + /* Constant for register zero-extension */
  96. + [JIT_REG_ZX] = MIPS_R_V1,
  97. +};
  98. +
  99. +/*
  100. + * MIPS 32-bit operations on 64-bit registers generate a sign-extended
  101. + * result. However, the eBPF ISA mandates zero-extension, so we rely on the
  102. + * verifier to add that for us (emit_zext_ver). In addition, ALU arithmetic
  103. + * operations, right shift and byte swap require properly sign-extended
  104. + * operands or the result is unpredictable. We emit explicit sign-extensions
  105. + * in those cases.
  106. + */
  107. +
  108. +/* Sign extension */
  109. +static void emit_sext(struct jit_context *ctx, u8 dst, u8 src)
  110. +{
  111. + emit(ctx, sll, dst, src, 0);
  112. + clobber_reg(ctx, dst);
  113. +}
  114. +
  115. +/* Zero extension */
  116. +static void emit_zext(struct jit_context *ctx, u8 dst)
  117. +{
  118. + if (cpu_has_mips64r2 || cpu_has_mips64r6) {
  119. + emit(ctx, dinsu, dst, MIPS_R_ZERO, 32, 32);
  120. + } else {
  121. + emit(ctx, and, dst, dst, bpf2mips64[JIT_REG_ZX]);
  122. + access_reg(ctx, JIT_REG_ZX); /* We need the ZX register */
  123. + }
  124. + clobber_reg(ctx, dst);
  125. +}
  126. +
  127. +/* Zero extension, if verifier does not do it for us */
  128. +static void emit_zext_ver(struct jit_context *ctx, u8 dst)
  129. +{
  130. + if (!ctx->program->aux->verifier_zext)
  131. + emit_zext(ctx, dst);
  132. +}
  133. +
  134. +/* dst = imm (64-bit) */
  135. +static void emit_mov_i64(struct jit_context *ctx, u8 dst, u64 imm64)
  136. +{
  137. + if (imm64 >= 0xffffffffffff8000ULL || imm64 < 0x8000ULL) {
  138. + emit(ctx, daddiu, dst, MIPS_R_ZERO, (s16)imm64);
  139. + } else if (imm64 >= 0xffffffff80000000ULL ||
  140. + (imm64 < 0x80000000 && imm64 > 0xffff)) {
  141. + emit(ctx, lui, dst, (s16)(imm64 >> 16));
  142. + emit(ctx, ori, dst, dst, (u16)imm64 & 0xffff);
  143. + } else {
  144. + u8 acc = MIPS_R_ZERO;
  145. + int k;
  146. +
  147. + for (k = 0; k < 4; k++) {
  148. + u16 half = imm64 >> (48 - 16 * k);
  149. +
  150. + if (acc == dst)
  151. + emit(ctx, dsll, dst, dst, 16);
  152. +
  153. + if (half) {
  154. + emit(ctx, ori, dst, acc, half);
  155. + acc = dst;
  156. + }
  157. + }
  158. + }
  159. + clobber_reg(ctx, dst);
  160. +}
  161. +
  162. +/* ALU immediate operation (64-bit) */
  163. +static void emit_alu_i64(struct jit_context *ctx, u8 dst, s32 imm, u8 op)
  164. +{
  165. + switch (BPF_OP(op)) {
  166. + /* dst = dst | imm */
  167. + case BPF_OR:
  168. + emit(ctx, ori, dst, dst, (u16)imm);
  169. + break;
  170. + /* dst = dst ^ imm */
  171. + case BPF_XOR:
  172. + emit(ctx, xori, dst, dst, (u16)imm);
  173. + break;
  174. + /* dst = -dst */
  175. + case BPF_NEG:
  176. + emit(ctx, dsubu, dst, MIPS_R_ZERO, dst);
  177. + break;
  178. + /* dst = dst << imm */
  179. + case BPF_LSH:
  180. + emit(ctx, dsll_safe, dst, dst, imm);
  181. + break;
  182. + /* dst = dst >> imm */
  183. + case BPF_RSH:
  184. + emit(ctx, dsrl_safe, dst, dst, imm);
  185. + break;
  186. + /* dst = dst >> imm (arithmetic) */
  187. + case BPF_ARSH:
  188. + emit(ctx, dsra_safe, dst, dst, imm);
  189. + break;
  190. + /* dst = dst + imm */
  191. + case BPF_ADD:
  192. + emit(ctx, daddiu, dst, dst, imm);
  193. + break;
  194. + /* dst = dst - imm */
  195. + case BPF_SUB:
  196. + emit(ctx, daddiu, dst, dst, -imm);
  197. + break;
  198. + default:
  199. + /* Width-generic operations */
  200. + emit_alu_i(ctx, dst, imm, op);
  201. + }
  202. + clobber_reg(ctx, dst);
  203. +}
  204. +
  205. +/* ALU register operation (64-bit) */
  206. +static void emit_alu_r64(struct jit_context *ctx, u8 dst, u8 src, u8 op)
  207. +{
  208. + switch (BPF_OP(op)) {
  209. + /* dst = dst << src */
  210. + case BPF_LSH:
  211. + emit(ctx, dsllv, dst, dst, src);
  212. + break;
  213. + /* dst = dst >> src */
  214. + case BPF_RSH:
  215. + emit(ctx, dsrlv, dst, dst, src);
  216. + break;
  217. + /* dst = dst >> src (arithmetic) */
  218. + case BPF_ARSH:
  219. + emit(ctx, dsrav, dst, dst, src);
  220. + break;
  221. + /* dst = dst + src */
  222. + case BPF_ADD:
  223. + emit(ctx, daddu, dst, dst, src);
  224. + break;
  225. + /* dst = dst - src */
  226. + case BPF_SUB:
  227. + emit(ctx, dsubu, dst, dst, src);
  228. + break;
  229. + /* dst = dst * src */
  230. + case BPF_MUL:
  231. + if (cpu_has_mips64r6) {
  232. + emit(ctx, dmulu, dst, dst, src);
  233. + } else {
  234. + emit(ctx, dmultu, dst, src);
  235. + emit(ctx, mflo, dst);
  236. + }
  237. + break;
  238. + /* dst = dst / src */
  239. + case BPF_DIV:
  240. + if (cpu_has_mips64r6) {
  241. + emit(ctx, ddivu_r6, dst, dst, src);
  242. + } else {
  243. + emit(ctx, ddivu, dst, src);
  244. + emit(ctx, mflo, dst);
  245. + }
  246. + break;
  247. + /* dst = dst % src */
  248. + case BPF_MOD:
  249. + if (cpu_has_mips64r6) {
  250. + emit(ctx, dmodu, dst, dst, src);
  251. + } else {
  252. + emit(ctx, ddivu, dst, src);
  253. + emit(ctx, mfhi, dst);
  254. + }
  255. + break;
  256. + default:
  257. + /* Width-generic operations */
  258. + emit_alu_r(ctx, dst, src, op);
  259. + }
  260. + clobber_reg(ctx, dst);
  261. +}
  262. +
  263. +/* Swap sub words in a register double word */
  264. +static void emit_swap_r64(struct jit_context *ctx, u8 dst, u8 mask, u32 bits)
  265. +{
  266. + u8 tmp = MIPS_R_T9;
  267. +
  268. + emit(ctx, and, tmp, dst, mask); /* tmp = dst & mask */
  269. + emit(ctx, dsll, tmp, tmp, bits); /* tmp = tmp << bits */
  270. + emit(ctx, dsrl, dst, dst, bits); /* dst = dst >> bits */
  271. + emit(ctx, and, dst, dst, mask); /* dst = dst & mask */
  272. + emit(ctx, or, dst, dst, tmp); /* dst = dst | tmp */
  273. +}
  274. +
  275. +/* Swap bytes and truncate a register double word, word or half word */
  276. +static void emit_bswap_r64(struct jit_context *ctx, u8 dst, u32 width)
  277. +{
  278. + switch (width) {
  279. + /* Swap bytes in a double word */
  280. + case 64:
  281. + if (cpu_has_mips64r2 || cpu_has_mips64r6) {
  282. + emit(ctx, dsbh, dst, dst);
  283. + emit(ctx, dshd, dst, dst);
  284. + } else {
  285. + u8 t1 = MIPS_R_T6;
  286. + u8 t2 = MIPS_R_T7;
  287. +
  288. + emit(ctx, dsll32, t2, dst, 0); /* t2 = dst << 32 */
  289. + emit(ctx, dsrl32, dst, dst, 0); /* dst = dst >> 32 */
  290. + emit(ctx, or, dst, dst, t2); /* dst = dst | t2 */
  291. +
  292. + emit(ctx, ori, t2, MIPS_R_ZERO, 0xffff);
  293. + emit(ctx, dsll32, t1, t2, 0); /* t1 = t2 << 32 */
  294. + emit(ctx, or, t1, t1, t2); /* t1 = t1 | t2 */
  295. + emit_swap_r64(ctx, dst, t1, 16);/* dst = swap16(dst) */
  296. +
  297. + emit(ctx, lui, t2, 0xff); /* t2 = 0x00ff0000 */
  298. + emit(ctx, ori, t2, t2, 0xff); /* t2 = t2 | 0x00ff */
  299. + emit(ctx, dsll32, t1, t2, 0); /* t1 = t2 << 32 */
  300. + emit(ctx, or, t1, t1, t2); /* t1 = t1 | t2 */
  301. + emit_swap_r64(ctx, dst, t1, 8); /* dst = swap8(dst) */
  302. + }
  303. + break;
  304. + /* Swap bytes in a half word */
  305. + /* Swap bytes in a word */
  306. + case 32:
  307. + case 16:
  308. + emit_sext(ctx, dst, dst);
  309. + emit_bswap_r(ctx, dst, width);
  310. + if (cpu_has_mips64r2 || cpu_has_mips64r6)
  311. + emit_zext(ctx, dst);
  312. + break;
  313. + }
  314. + clobber_reg(ctx, dst);
  315. +}
  316. +
  317. +/* Truncate a register double word, word or half word */
  318. +static void emit_trunc_r64(struct jit_context *ctx, u8 dst, u32 width)
  319. +{
  320. + switch (width) {
  321. + case 64:
  322. + break;
  323. + /* Zero-extend a word */
  324. + case 32:
  325. + emit_zext(ctx, dst);
  326. + break;
  327. + /* Zero-extend a half word */
  328. + case 16:
  329. + emit(ctx, andi, dst, dst, 0xffff);
  330. + break;
  331. + }
  332. + clobber_reg(ctx, dst);
  333. +}
  334. +
  335. +/* Load operation: dst = *(size*)(src + off) */
  336. +static void emit_ldx(struct jit_context *ctx, u8 dst, u8 src, s16 off, u8 size)
  337. +{
  338. + switch (size) {
  339. + /* Load a byte */
  340. + case BPF_B:
  341. + emit(ctx, lbu, dst, off, src);
  342. + break;
  343. + /* Load a half word */
  344. + case BPF_H:
  345. + emit(ctx, lhu, dst, off, src);
  346. + break;
  347. + /* Load a word */
  348. + case BPF_W:
  349. + emit(ctx, lwu, dst, off, src);
  350. + break;
  351. + /* Load a double word */
  352. + case BPF_DW:
  353. + emit(ctx, ld, dst, off, src);
  354. + break;
  355. + }
  356. + clobber_reg(ctx, dst);
  357. +}
  358. +
  359. +/* Store operation: *(size *)(dst + off) = src */
  360. +static void emit_stx(struct jit_context *ctx, u8 dst, u8 src, s16 off, u8 size)
  361. +{
  362. + switch (size) {
  363. + /* Store a byte */
  364. + case BPF_B:
  365. + emit(ctx, sb, src, off, dst);
  366. + break;
  367. + /* Store a half word */
  368. + case BPF_H:
  369. + emit(ctx, sh, src, off, dst);
  370. + break;
  371. + /* Store a word */
  372. + case BPF_W:
  373. + emit(ctx, sw, src, off, dst);
  374. + break;
  375. + /* Store a double word */
  376. + case BPF_DW:
  377. + emit(ctx, sd, src, off, dst);
  378. + break;
  379. + }
  380. +}
  381. +
  382. +/* Atomic read-modify-write */
  383. +static void emit_atomic_r64(struct jit_context *ctx,
  384. + u8 dst, u8 src, s16 off, u8 code)
  385. +{
  386. + u8 t1 = MIPS_R_T6;
  387. + u8 t2 = MIPS_R_T7;
  388. +
  389. + emit(ctx, lld, t1, off, dst);
  390. + switch (code) {
  391. + case BPF_ADD:
  392. + emit(ctx, daddu, t2, t1, src);
  393. + break;
  394. + case BPF_AND:
  395. + emit(ctx, and, t2, t1, src);
  396. + break;
  397. + case BPF_OR:
  398. + emit(ctx, or, t2, t1, src);
  399. + break;
  400. + case BPF_XOR:
  401. + emit(ctx, xor, t2, t1, src);
  402. + break;
  403. + }
  404. + emit(ctx, scd, t2, off, dst);
  405. + emit(ctx, beqz, t2, -16);
  406. + emit(ctx, nop); /* Delay slot */
  407. +}
  408. +
  409. +/* Function call */
  410. +static int emit_call(struct jit_context *ctx, const struct bpf_insn *insn)
  411. +{
  412. + u8 zx = bpf2mips64[JIT_REG_ZX];
  413. + u8 tmp = MIPS_R_T6;
  414. + bool fixed;
  415. + u64 addr;
  416. +
  417. + /* Decode the call address */
  418. + if (bpf_jit_get_func_addr(ctx->program, insn, false,
  419. + &addr, &fixed) < 0)
  420. + return -1;
  421. + if (!fixed)
  422. + return -1;
  423. +
  424. + /* Push caller-saved registers on stack */
  425. + push_regs(ctx, ctx->clobbered & JIT_CALLER_REGS, 0, 0);
  426. +
  427. + /* Emit function call */
  428. + emit_mov_i64(ctx, tmp, addr);
  429. + emit(ctx, jalr, MIPS_R_RA, tmp);
  430. + emit(ctx, nop); /* Delay slot */
  431. +
  432. + /* Restore caller-saved registers */
  433. + pop_regs(ctx, ctx->clobbered & JIT_CALLER_REGS, 0, 0);
  434. +
  435. + /* Re-initialize the JIT zero-extension register if accessed */
  436. + if (ctx->accessed & BIT(JIT_REG_ZX)) {
  437. + emit(ctx, daddiu, zx, MIPS_R_ZERO, -1);
  438. + emit(ctx, dsrl32, zx, zx, 0);
  439. + }
  440. +
  441. + clobber_reg(ctx, MIPS_R_RA);
  442. + clobber_reg(ctx, MIPS_R_V0);
  443. + clobber_reg(ctx, MIPS_R_V1);
  444. + return 0;
  445. +}
  446. +
  447. +/* Function tail call */
  448. +static int emit_tail_call(struct jit_context *ctx)
  449. +{
  450. + u8 ary = bpf2mips64[BPF_REG_2];
  451. + u8 ind = bpf2mips64[BPF_REG_3];
  452. + u8 tcc = bpf2mips64[JIT_REG_TC];
  453. + u8 tmp = MIPS_R_T6;
  454. + int off;
  455. +
  456. + /*
  457. + * Tail call:
  458. + * eBPF R1 - function argument (context ptr), passed in a0-a1
  459. + * eBPF R2 - ptr to object with array of function entry points
  460. + * eBPF R3 - array index of function to be called
  461. + */
  462. +
  463. + /* if (ind >= ary->map.max_entries) goto out */
  464. + off = offsetof(struct bpf_array, map.max_entries);
  465. + if (off > 0x7fff)
  466. + return -1;
  467. + emit(ctx, lwu, tmp, off, ary); /* tmp = ary->map.max_entrs*/
  468. + emit(ctx, sltu, tmp, ind, tmp); /* tmp = ind < t1 */
  469. + emit(ctx, beqz, tmp, get_offset(ctx, 1)); /* PC += off(1) if tmp == 0*/
  470. +
  471. + /* if (--TCC < 0) goto out */
  472. + emit(ctx, daddiu, tcc, tcc, -1); /* tcc-- (delay slot) */
  473. + emit(ctx, bltz, tcc, get_offset(ctx, 1)); /* PC += off(1) if tcc < 0 */
  474. + /* (next insn delay slot) */
  475. + /* prog = ary->ptrs[ind] */
  476. + off = offsetof(struct bpf_array, ptrs);
  477. + if (off > 0x7fff)
  478. + return -1;
  479. + emit(ctx, dsll, tmp, ind, 3); /* tmp = ind << 3 */
  480. + emit(ctx, daddu, tmp, tmp, ary); /* tmp += ary */
  481. + emit(ctx, ld, tmp, off, tmp); /* tmp = *(tmp + off) */
  482. +
  483. + /* if (prog == 0) goto out */
  484. + emit(ctx, beqz, tmp, get_offset(ctx, 1)); /* PC += off(1) if tmp == 0*/
  485. + emit(ctx, nop); /* Delay slot */
  486. +
  487. + /* func = prog->bpf_func + 8 (prologue skip offset) */
  488. + off = offsetof(struct bpf_prog, bpf_func);
  489. + if (off > 0x7fff)
  490. + return -1;
  491. + emit(ctx, ld, tmp, off, tmp); /* tmp = *(tmp + off) */
  492. + emit(ctx, daddiu, tmp, tmp, JIT_TCALL_SKIP); /* tmp += skip (4) */
  493. +
  494. + /* goto func */
  495. + build_epilogue(ctx, tmp);
  496. + access_reg(ctx, JIT_REG_TC);
  497. + return 0;
  498. +}
  499. +
  500. +/*
  501. + * Stack frame layout for a JITed program (stack grows down).
  502. + *
  503. + * Higher address : Previous stack frame :
  504. + * +===========================+ <--- MIPS sp before call
  505. + * | Callee-saved registers, |
  506. + * | including RA and FP |
  507. + * +---------------------------+ <--- eBPF FP (MIPS fp)
  508. + * | Local eBPF variables |
  509. + * | allocated by program |
  510. + * +---------------------------+
  511. + * | Reserved for caller-saved |
  512. + * | registers |
  513. + * Lower address +===========================+ <--- MIPS sp
  514. + */
  515. +
  516. +/* Build program prologue to set up the stack and registers */
  517. +void build_prologue(struct jit_context *ctx)
  518. +{
  519. + u8 fp = bpf2mips64[BPF_REG_FP];
  520. + u8 tc = bpf2mips64[JIT_REG_TC];
  521. + u8 zx = bpf2mips64[JIT_REG_ZX];
  522. + int stack, saved, locals, reserved;
  523. +
  524. + /*
  525. + * The first instruction initializes the tail call count register.
  526. + * On a tail call, the calling function jumps into the prologue
  527. + * after this instruction.
  528. + */
  529. + emit(ctx, addiu, tc, MIPS_R_ZERO, min(MAX_TAIL_CALL_CNT + 1, 0xffff));
  530. +
  531. + /* === Entry-point for tail calls === */
  532. +
  533. + /*
  534. + * If the eBPF frame pointer and tail call count registers were
  535. + * accessed they must be preserved. Mark them as clobbered here
  536. + * to save and restore them on the stack as needed.
  537. + */
  538. + if (ctx->accessed & BIT(BPF_REG_FP))
  539. + clobber_reg(ctx, fp);
  540. + if (ctx->accessed & BIT(JIT_REG_TC))
  541. + clobber_reg(ctx, tc);
  542. + if (ctx->accessed & BIT(JIT_REG_ZX))
  543. + clobber_reg(ctx, zx);
  544. +
  545. + /* Compute the stack space needed for callee-saved registers */
  546. + saved = hweight32(ctx->clobbered & JIT_CALLEE_REGS) * sizeof(u64);
  547. + saved = ALIGN(saved, MIPS_STACK_ALIGNMENT);
  548. +
  549. + /* Stack space used by eBPF program local data */
  550. + locals = ALIGN(ctx->program->aux->stack_depth, MIPS_STACK_ALIGNMENT);
  551. +
  552. + /*
  553. + * If we are emitting function calls, reserve extra stack space for
  554. + * caller-saved registers needed by the JIT. The required space is
  555. + * computed automatically during resource usage discovery (pass 1).
  556. + */
  557. + reserved = ctx->stack_used;
  558. +
  559. + /* Allocate the stack frame */
  560. + stack = ALIGN(saved + locals + reserved, MIPS_STACK_ALIGNMENT);
  561. + if (stack)
  562. + emit(ctx, daddiu, MIPS_R_SP, MIPS_R_SP, -stack);
  563. +
  564. + /* Store callee-saved registers on stack */
  565. + push_regs(ctx, ctx->clobbered & JIT_CALLEE_REGS, 0, stack - saved);
  566. +
  567. + /* Initialize the eBPF frame pointer if accessed */
  568. + if (ctx->accessed & BIT(BPF_REG_FP))
  569. + emit(ctx, daddiu, fp, MIPS_R_SP, stack - saved);
  570. +
  571. + /* Initialize the ePF JIT zero-extension register if accessed */
  572. + if (ctx->accessed & BIT(JIT_REG_ZX)) {
  573. + emit(ctx, daddiu, zx, MIPS_R_ZERO, -1);
  574. + emit(ctx, dsrl32, zx, zx, 0);
  575. + }
  576. +
  577. + ctx->saved_size = saved;
  578. + ctx->stack_size = stack;
  579. +}
  580. +
  581. +/* Build the program epilogue to restore the stack and registers */
  582. +void build_epilogue(struct jit_context *ctx, int dest_reg)
  583. +{
  584. + /* Restore callee-saved registers from stack */
  585. + pop_regs(ctx, ctx->clobbered & JIT_CALLEE_REGS, 0,
  586. + ctx->stack_size - ctx->saved_size);
  587. +
  588. + /* Release the stack frame */
  589. + if (ctx->stack_size)
  590. + emit(ctx, daddiu, MIPS_R_SP, MIPS_R_SP, ctx->stack_size);
  591. +
  592. + /* Jump to return address and sign-extend the 32-bit return value */
  593. + emit(ctx, jr, dest_reg);
  594. + emit(ctx, sll, MIPS_R_V0, MIPS_R_V0, 0); /* Delay slot */
  595. +}
  596. +
  597. +/* Build one eBPF instruction */
  598. +int build_insn(const struct bpf_insn *insn, struct jit_context *ctx)
  599. +{
  600. + u8 dst = bpf2mips64[insn->dst_reg];
  601. + u8 src = bpf2mips64[insn->src_reg];
  602. + u8 code = insn->code;
  603. + s16 off = insn->off;
  604. + s32 imm = insn->imm;
  605. + s32 val, rel;
  606. + u8 alu, jmp;
  607. +
  608. + switch (code) {
  609. + /* ALU operations */
  610. + /* dst = imm */
  611. + case BPF_ALU | BPF_MOV | BPF_K:
  612. + emit_mov_i(ctx, dst, imm);
  613. + emit_zext_ver(ctx, dst);
  614. + break;
  615. + /* dst = src */
  616. + case BPF_ALU | BPF_MOV | BPF_X:
  617. + if (imm == 1) {
  618. + /* Special mov32 for zext */
  619. + emit_zext(ctx, dst);
  620. + } else {
  621. + emit_mov_r(ctx, dst, src);
  622. + emit_zext_ver(ctx, dst);
  623. + }
  624. + break;
  625. + /* dst = -dst */
  626. + case BPF_ALU | BPF_NEG:
  627. + emit_sext(ctx, dst, dst);
  628. + emit_alu_i(ctx, dst, 0, BPF_NEG);
  629. + emit_zext_ver(ctx, dst);
  630. + break;
  631. + /* dst = dst & imm */
  632. + /* dst = dst | imm */
  633. + /* dst = dst ^ imm */
  634. + /* dst = dst << imm */
  635. + case BPF_ALU | BPF_OR | BPF_K:
  636. + case BPF_ALU | BPF_AND | BPF_K:
  637. + case BPF_ALU | BPF_XOR | BPF_K:
  638. + case BPF_ALU | BPF_LSH | BPF_K:
  639. + if (!valid_alu_i(BPF_OP(code), imm)) {
  640. + emit_mov_i(ctx, MIPS_R_T4, imm);
  641. + emit_alu_r(ctx, dst, MIPS_R_T4, BPF_OP(code));
  642. + } else if (rewrite_alu_i(BPF_OP(code), imm, &alu, &val)) {
  643. + emit_alu_i(ctx, dst, val, alu);
  644. + }
  645. + emit_zext_ver(ctx, dst);
  646. + break;
  647. + /* dst = dst >> imm */
  648. + /* dst = dst >> imm (arithmetic) */
  649. + /* dst = dst + imm */
  650. + /* dst = dst - imm */
  651. + /* dst = dst * imm */
  652. + /* dst = dst / imm */
  653. + /* dst = dst % imm */
  654. + case BPF_ALU | BPF_RSH | BPF_K:
  655. + case BPF_ALU | BPF_ARSH | BPF_K:
  656. + case BPF_ALU | BPF_ADD | BPF_K:
  657. + case BPF_ALU | BPF_SUB | BPF_K:
  658. + case BPF_ALU | BPF_MUL | BPF_K:
  659. + case BPF_ALU | BPF_DIV | BPF_K:
  660. + case BPF_ALU | BPF_MOD | BPF_K:
  661. + if (!valid_alu_i(BPF_OP(code), imm)) {
  662. + emit_sext(ctx, dst, dst);
  663. + emit_mov_i(ctx, MIPS_R_T4, imm);
  664. + emit_alu_r(ctx, dst, MIPS_R_T4, BPF_OP(code));
  665. + } else if (rewrite_alu_i(BPF_OP(code), imm, &alu, &val)) {
  666. + emit_sext(ctx, dst, dst);
  667. + emit_alu_i(ctx, dst, val, alu);
  668. + }
  669. + emit_zext_ver(ctx, dst);
  670. + break;
  671. + /* dst = dst & src */
  672. + /* dst = dst | src */
  673. + /* dst = dst ^ src */
  674. + /* dst = dst << src */
  675. + case BPF_ALU | BPF_AND | BPF_X:
  676. + case BPF_ALU | BPF_OR | BPF_X:
  677. + case BPF_ALU | BPF_XOR | BPF_X:
  678. + case BPF_ALU | BPF_LSH | BPF_X:
  679. + emit_alu_r(ctx, dst, src, BPF_OP(code));
  680. + emit_zext_ver(ctx, dst);
  681. + break;
  682. + /* dst = dst >> src */
  683. + /* dst = dst >> src (arithmetic) */
  684. + /* dst = dst + src */
  685. + /* dst = dst - src */
  686. + /* dst = dst * src */
  687. + /* dst = dst / src */
  688. + /* dst = dst % src */
  689. + case BPF_ALU | BPF_RSH | BPF_X:
  690. + case BPF_ALU | BPF_ARSH | BPF_X:
  691. + case BPF_ALU | BPF_ADD | BPF_X:
  692. + case BPF_ALU | BPF_SUB | BPF_X:
  693. + case BPF_ALU | BPF_MUL | BPF_X:
  694. + case BPF_ALU | BPF_DIV | BPF_X:
  695. + case BPF_ALU | BPF_MOD | BPF_X:
  696. + emit_sext(ctx, dst, dst);
  697. + emit_sext(ctx, MIPS_R_T4, src);
  698. + emit_alu_r(ctx, dst, MIPS_R_T4, BPF_OP(code));
  699. + emit_zext_ver(ctx, dst);
  700. + break;
  701. + /* dst = imm (64-bit) */
  702. + case BPF_ALU64 | BPF_MOV | BPF_K:
  703. + emit_mov_i(ctx, dst, imm);
  704. + break;
  705. + /* dst = src (64-bit) */
  706. + case BPF_ALU64 | BPF_MOV | BPF_X:
  707. + emit_mov_r(ctx, dst, src);
  708. + break;
  709. + /* dst = -dst (64-bit) */
  710. + case BPF_ALU64 | BPF_NEG:
  711. + emit_alu_i64(ctx, dst, 0, BPF_NEG);
  712. + break;
  713. + /* dst = dst & imm (64-bit) */
  714. + /* dst = dst | imm (64-bit) */
  715. + /* dst = dst ^ imm (64-bit) */
  716. + /* dst = dst << imm (64-bit) */
  717. + /* dst = dst >> imm (64-bit) */
  718. + /* dst = dst >> imm ((64-bit, arithmetic) */
  719. + /* dst = dst + imm (64-bit) */
  720. + /* dst = dst - imm (64-bit) */
  721. + /* dst = dst * imm (64-bit) */
  722. + /* dst = dst / imm (64-bit) */
  723. + /* dst = dst % imm (64-bit) */
  724. + case BPF_ALU64 | BPF_AND | BPF_K:
  725. + case BPF_ALU64 | BPF_OR | BPF_K:
  726. + case BPF_ALU64 | BPF_XOR | BPF_K:
  727. + case BPF_ALU64 | BPF_LSH | BPF_K:
  728. + case BPF_ALU64 | BPF_RSH | BPF_K:
  729. + case BPF_ALU64 | BPF_ARSH | BPF_K:
  730. + case BPF_ALU64 | BPF_ADD | BPF_K:
  731. + case BPF_ALU64 | BPF_SUB | BPF_K:
  732. + case BPF_ALU64 | BPF_MUL | BPF_K:
  733. + case BPF_ALU64 | BPF_DIV | BPF_K:
  734. + case BPF_ALU64 | BPF_MOD | BPF_K:
  735. + if (!valid_alu_i(BPF_OP(code), imm)) {
  736. + emit_mov_i(ctx, MIPS_R_T4, imm);
  737. + emit_alu_r64(ctx, dst, MIPS_R_T4, BPF_OP(code));
  738. + } else if (rewrite_alu_i(BPF_OP(code), imm, &alu, &val)) {
  739. + emit_alu_i64(ctx, dst, val, alu);
  740. + }
  741. + break;
  742. + /* dst = dst & src (64-bit) */
  743. + /* dst = dst | src (64-bit) */
  744. + /* dst = dst ^ src (64-bit) */
  745. + /* dst = dst << src (64-bit) */
  746. + /* dst = dst >> src (64-bit) */
  747. + /* dst = dst >> src (64-bit, arithmetic) */
  748. + /* dst = dst + src (64-bit) */
  749. + /* dst = dst - src (64-bit) */
  750. + /* dst = dst * src (64-bit) */
  751. + /* dst = dst / src (64-bit) */
  752. + /* dst = dst % src (64-bit) */
  753. + case BPF_ALU64 | BPF_AND | BPF_X:
  754. + case BPF_ALU64 | BPF_OR | BPF_X:
  755. + case BPF_ALU64 | BPF_XOR | BPF_X:
  756. + case BPF_ALU64 | BPF_LSH | BPF_X:
  757. + case BPF_ALU64 | BPF_RSH | BPF_X:
  758. + case BPF_ALU64 | BPF_ARSH | BPF_X:
  759. + case BPF_ALU64 | BPF_ADD | BPF_X:
  760. + case BPF_ALU64 | BPF_SUB | BPF_X:
  761. + case BPF_ALU64 | BPF_MUL | BPF_X:
  762. + case BPF_ALU64 | BPF_DIV | BPF_X:
  763. + case BPF_ALU64 | BPF_MOD | BPF_X:
  764. + emit_alu_r64(ctx, dst, src, BPF_OP(code));
  765. + break;
  766. + /* dst = htole(dst) */
  767. + /* dst = htobe(dst) */
  768. + case BPF_ALU | BPF_END | BPF_FROM_LE:
  769. + case BPF_ALU | BPF_END | BPF_FROM_BE:
  770. + if (BPF_SRC(code) ==
  771. +#ifdef __BIG_ENDIAN
  772. + BPF_FROM_LE
  773. +#else
  774. + BPF_FROM_BE
  775. +#endif
  776. + )
  777. + emit_bswap_r64(ctx, dst, imm);
  778. + else
  779. + emit_trunc_r64(ctx, dst, imm);
  780. + break;
  781. + /* dst = imm64 */
  782. + case BPF_LD | BPF_IMM | BPF_DW:
  783. + emit_mov_i64(ctx, dst, (u32)imm | ((u64)insn[1].imm << 32));
  784. + return 1;
  785. + /* LDX: dst = *(size *)(src + off) */
  786. + case BPF_LDX | BPF_MEM | BPF_W:
  787. + case BPF_LDX | BPF_MEM | BPF_H:
  788. + case BPF_LDX | BPF_MEM | BPF_B:
  789. + case BPF_LDX | BPF_MEM | BPF_DW:
  790. + emit_ldx(ctx, dst, src, off, BPF_SIZE(code));
  791. + break;
  792. + /* ST: *(size *)(dst + off) = imm */
  793. + case BPF_ST | BPF_MEM | BPF_W:
  794. + case BPF_ST | BPF_MEM | BPF_H:
  795. + case BPF_ST | BPF_MEM | BPF_B:
  796. + case BPF_ST | BPF_MEM | BPF_DW:
  797. + emit_mov_i(ctx, MIPS_R_T4, imm);
  798. + emit_stx(ctx, dst, MIPS_R_T4, off, BPF_SIZE(code));
  799. + break;
  800. + /* STX: *(size *)(dst + off) = src */
  801. + case BPF_STX | BPF_MEM | BPF_W:
  802. + case BPF_STX | BPF_MEM | BPF_H:
  803. + case BPF_STX | BPF_MEM | BPF_B:
  804. + case BPF_STX | BPF_MEM | BPF_DW:
  805. + emit_stx(ctx, dst, src, off, BPF_SIZE(code));
  806. + break;
  807. + /* Speculation barrier */
  808. + case BPF_ST | BPF_NOSPEC:
  809. + break;
  810. + /* Atomics */
  811. + case BPF_STX | BPF_XADD | BPF_W:
  812. + case BPF_STX | BPF_XADD | BPF_DW:
  813. + switch (imm) {
  814. + case BPF_ADD:
  815. + case BPF_AND:
  816. + case BPF_OR:
  817. + case BPF_XOR:
  818. + if (BPF_SIZE(code) == BPF_DW) {
  819. + emit_atomic_r64(ctx, dst, src, off, imm);
  820. + } else { /* 32-bit, no fetch */
  821. + emit_sext(ctx, MIPS_R_T4, src);
  822. + emit_atomic_r(ctx, dst, MIPS_R_T4, off, imm);
  823. + }
  824. + break;
  825. + default:
  826. + goto notyet;
  827. + }
  828. + break;
  829. + /* PC += off if dst == src */
  830. + /* PC += off if dst != src */
  831. + /* PC += off if dst & src */
  832. + /* PC += off if dst > src */
  833. + /* PC += off if dst >= src */
  834. + /* PC += off if dst < src */
  835. + /* PC += off if dst <= src */
  836. + /* PC += off if dst > src (signed) */
  837. + /* PC += off if dst >= src (signed) */
  838. + /* PC += off if dst < src (signed) */
  839. + /* PC += off if dst <= src (signed) */
  840. + case BPF_JMP32 | BPF_JEQ | BPF_X:
  841. + case BPF_JMP32 | BPF_JNE | BPF_X:
  842. + case BPF_JMP32 | BPF_JSET | BPF_X:
  843. + case BPF_JMP32 | BPF_JGT | BPF_X:
  844. + case BPF_JMP32 | BPF_JGE | BPF_X:
  845. + case BPF_JMP32 | BPF_JLT | BPF_X:
  846. + case BPF_JMP32 | BPF_JLE | BPF_X:
  847. + case BPF_JMP32 | BPF_JSGT | BPF_X:
  848. + case BPF_JMP32 | BPF_JSGE | BPF_X:
  849. + case BPF_JMP32 | BPF_JSLT | BPF_X:
  850. + case BPF_JMP32 | BPF_JSLE | BPF_X:
  851. + if (off == 0)
  852. + break;
  853. + setup_jmp_r(ctx, dst == src, BPF_OP(code), off, &jmp, &rel);
  854. + emit_sext(ctx, MIPS_R_T4, dst); /* Sign-extended dst */
  855. + emit_sext(ctx, MIPS_R_T5, src); /* Sign-extended src */
  856. + emit_jmp_r(ctx, MIPS_R_T4, MIPS_R_T5, rel, jmp);
  857. + if (finish_jmp(ctx, jmp, off) < 0)
  858. + goto toofar;
  859. + break;
  860. + /* PC += off if dst == imm */
  861. + /* PC += off if dst != imm */
  862. + /* PC += off if dst & imm */
  863. + /* PC += off if dst > imm */
  864. + /* PC += off if dst >= imm */
  865. + /* PC += off if dst < imm */
  866. + /* PC += off if dst <= imm */
  867. + /* PC += off if dst > imm (signed) */
  868. + /* PC += off if dst >= imm (signed) */
  869. + /* PC += off if dst < imm (signed) */
  870. + /* PC += off if dst <= imm (signed) */
  871. + case BPF_JMP32 | BPF_JEQ | BPF_K:
  872. + case BPF_JMP32 | BPF_JNE | BPF_K:
  873. + case BPF_JMP32 | BPF_JSET | BPF_K:
  874. + case BPF_JMP32 | BPF_JGT | BPF_K:
  875. + case BPF_JMP32 | BPF_JGE | BPF_K:
  876. + case BPF_JMP32 | BPF_JLT | BPF_K:
  877. + case BPF_JMP32 | BPF_JLE | BPF_K:
  878. + case BPF_JMP32 | BPF_JSGT | BPF_K:
  879. + case BPF_JMP32 | BPF_JSGE | BPF_K:
  880. + case BPF_JMP32 | BPF_JSLT | BPF_K:
  881. + case BPF_JMP32 | BPF_JSLE | BPF_K:
  882. + if (off == 0)
  883. + break;
  884. + setup_jmp_i(ctx, imm, 32, BPF_OP(code), off, &jmp, &rel);
  885. + emit_sext(ctx, MIPS_R_T4, dst); /* Sign-extended dst */
  886. + if (valid_jmp_i(jmp, imm)) {
  887. + emit_jmp_i(ctx, MIPS_R_T4, imm, rel, jmp);
  888. + } else {
  889. + /* Move large immediate to register, sign-extended */
  890. + emit_mov_i(ctx, MIPS_R_T5, imm);
  891. + emit_jmp_r(ctx, MIPS_R_T4, MIPS_R_T5, rel, jmp);
  892. + }
  893. + if (finish_jmp(ctx, jmp, off) < 0)
  894. + goto toofar;
  895. + break;
  896. + /* PC += off if dst == src */
  897. + /* PC += off if dst != src */
  898. + /* PC += off if dst & src */
  899. + /* PC += off if dst > src */
  900. + /* PC += off if dst >= src */
  901. + /* PC += off if dst < src */
  902. + /* PC += off if dst <= src */
  903. + /* PC += off if dst > src (signed) */
  904. + /* PC += off if dst >= src (signed) */
  905. + /* PC += off if dst < src (signed) */
  906. + /* PC += off if dst <= src (signed) */
  907. + case BPF_JMP | BPF_JEQ | BPF_X:
  908. + case BPF_JMP | BPF_JNE | BPF_X:
  909. + case BPF_JMP | BPF_JSET | BPF_X:
  910. + case BPF_JMP | BPF_JGT | BPF_X:
  911. + case BPF_JMP | BPF_JGE | BPF_X:
  912. + case BPF_JMP | BPF_JLT | BPF_X:
  913. + case BPF_JMP | BPF_JLE | BPF_X:
  914. + case BPF_JMP | BPF_JSGT | BPF_X:
  915. + case BPF_JMP | BPF_JSGE | BPF_X:
  916. + case BPF_JMP | BPF_JSLT | BPF_X:
  917. + case BPF_JMP | BPF_JSLE | BPF_X:
  918. + if (off == 0)
  919. + break;
  920. + setup_jmp_r(ctx, dst == src, BPF_OP(code), off, &jmp, &rel);
  921. + emit_jmp_r(ctx, dst, src, rel, jmp);
  922. + if (finish_jmp(ctx, jmp, off) < 0)
  923. + goto toofar;
  924. + break;
  925. + /* PC += off if dst == imm */
  926. + /* PC += off if dst != imm */
  927. + /* PC += off if dst & imm */
  928. + /* PC += off if dst > imm */
  929. + /* PC += off if dst >= imm */
  930. + /* PC += off if dst < imm */
  931. + /* PC += off if dst <= imm */
  932. + /* PC += off if dst > imm (signed) */
  933. + /* PC += off if dst >= imm (signed) */
  934. + /* PC += off if dst < imm (signed) */
  935. + /* PC += off if dst <= imm (signed) */
  936. + case BPF_JMP | BPF_JEQ | BPF_K:
  937. + case BPF_JMP | BPF_JNE | BPF_K:
  938. + case BPF_JMP | BPF_JSET | BPF_K:
  939. + case BPF_JMP | BPF_JGT | BPF_K:
  940. + case BPF_JMP | BPF_JGE | BPF_K:
  941. + case BPF_JMP | BPF_JLT | BPF_K:
  942. + case BPF_JMP | BPF_JLE | BPF_K:
  943. + case BPF_JMP | BPF_JSGT | BPF_K:
  944. + case BPF_JMP | BPF_JSGE | BPF_K:
  945. + case BPF_JMP | BPF_JSLT | BPF_K:
  946. + case BPF_JMP | BPF_JSLE | BPF_K:
  947. + if (off == 0)
  948. + break;
  949. + setup_jmp_i(ctx, imm, 64, BPF_OP(code), off, &jmp, &rel);
  950. + if (valid_jmp_i(jmp, imm)) {
  951. + emit_jmp_i(ctx, dst, imm, rel, jmp);
  952. + } else {
  953. + /* Move large immediate to register */
  954. + emit_mov_i(ctx, MIPS_R_T4, imm);
  955. + emit_jmp_r(ctx, dst, MIPS_R_T4, rel, jmp);
  956. + }
  957. + if (finish_jmp(ctx, jmp, off) < 0)
  958. + goto toofar;
  959. + break;
  960. + /* PC += off */
  961. + case BPF_JMP | BPF_JA:
  962. + if (off == 0)
  963. + break;
  964. + if (emit_ja(ctx, off) < 0)
  965. + goto toofar;
  966. + break;
  967. + /* Tail call */
  968. + case BPF_JMP | BPF_TAIL_CALL:
  969. + if (emit_tail_call(ctx) < 0)
  970. + goto invalid;
  971. + break;
  972. + /* Function call */
  973. + case BPF_JMP | BPF_CALL:
  974. + if (emit_call(ctx, insn) < 0)
  975. + goto invalid;
  976. + break;
  977. + /* Function return */
  978. + case BPF_JMP | BPF_EXIT:
  979. + /*
  980. + * Optimization: when last instruction is EXIT
  981. + * simply continue to epilogue.
  982. + */
  983. + if (ctx->bpf_index == ctx->program->len - 1)
  984. + break;
  985. + if (emit_exit(ctx) < 0)
  986. + goto toofar;
  987. + break;
  988. +
  989. + default:
  990. +invalid:
  991. + pr_err_once("unknown opcode %02x\n", code);
  992. + return -EINVAL;
  993. +notyet:
  994. + pr_info_once("*** NOT YET: opcode %02x ***\n", code);
  995. + return -EFAULT;
  996. +toofar:
  997. + pr_info_once("*** TOO FAR: jump at %u opcode %02x ***\n",
  998. + ctx->bpf_index, code);
  999. + return -E2BIG;
  1000. + }
  1001. + return 0;
  1002. +}