050-v5.16-06-mips-bpf-Remove-old-BPF-JIT-implementations.patch 10.0 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387
  1. From: Johan Almbladh <[email protected]>
  2. Date: Tue, 5 Oct 2021 18:54:08 +0200
  3. Subject: [PATCH] mips: bpf: Remove old BPF JIT implementations
  4. This patch removes the old 32-bit cBPF and 64-bit eBPF JIT implementations.
  5. They are replaced by a new eBPF implementation that supports both 32-bit
  6. and 64-bit MIPS CPUs.
  7. Signed-off-by: Johan Almbladh <[email protected]>
  8. ---
  9. delete mode 100644 arch/mips/net/bpf_jit.c
  10. delete mode 100644 arch/mips/net/bpf_jit.h
  11. delete mode 100644 arch/mips/net/bpf_jit_asm.S
  12. delete mode 100644 arch/mips/net/ebpf_jit.c
  13. --- a/arch/mips/net/bpf_jit.h
  14. +++ /dev/null
  15. @@ -1,81 +0,0 @@
  16. -/* SPDX-License-Identifier: GPL-2.0-only */
  17. -/*
  18. - * Just-In-Time compiler for BPF filters on MIPS
  19. - *
  20. - * Copyright (c) 2014 Imagination Technologies Ltd.
  21. - * Author: Markos Chandras <[email protected]>
  22. - */
  23. -
  24. -#ifndef BPF_JIT_MIPS_OP_H
  25. -#define BPF_JIT_MIPS_OP_H
  26. -
  27. -/* Registers used by JIT */
  28. -#define MIPS_R_ZERO 0
  29. -#define MIPS_R_V0 2
  30. -#define MIPS_R_A0 4
  31. -#define MIPS_R_A1 5
  32. -#define MIPS_R_T4 12
  33. -#define MIPS_R_T5 13
  34. -#define MIPS_R_T6 14
  35. -#define MIPS_R_T7 15
  36. -#define MIPS_R_S0 16
  37. -#define MIPS_R_S1 17
  38. -#define MIPS_R_S2 18
  39. -#define MIPS_R_S3 19
  40. -#define MIPS_R_S4 20
  41. -#define MIPS_R_S5 21
  42. -#define MIPS_R_S6 22
  43. -#define MIPS_R_S7 23
  44. -#define MIPS_R_SP 29
  45. -#define MIPS_R_RA 31
  46. -
  47. -/* Conditional codes */
  48. -#define MIPS_COND_EQ 0x1
  49. -#define MIPS_COND_GE (0x1 << 1)
  50. -#define MIPS_COND_GT (0x1 << 2)
  51. -#define MIPS_COND_NE (0x1 << 3)
  52. -#define MIPS_COND_ALL (0x1 << 4)
  53. -/* Conditionals on X register or K immediate */
  54. -#define MIPS_COND_X (0x1 << 5)
  55. -#define MIPS_COND_K (0x1 << 6)
  56. -
  57. -#define r_ret MIPS_R_V0
  58. -
  59. -/*
  60. - * Use 2 scratch registers to avoid pipeline interlocks.
  61. - * There is no overhead during epilogue and prologue since
  62. - * any of the $s0-$s6 registers will only be preserved if
  63. - * they are going to actually be used.
  64. - */
  65. -#define r_skb_hl MIPS_R_S0 /* skb header length */
  66. -#define r_skb_data MIPS_R_S1 /* skb actual data */
  67. -#define r_off MIPS_R_S2
  68. -#define r_A MIPS_R_S3
  69. -#define r_X MIPS_R_S4
  70. -#define r_skb MIPS_R_S5
  71. -#define r_M MIPS_R_S6
  72. -#define r_skb_len MIPS_R_S7
  73. -#define r_s0 MIPS_R_T4 /* scratch reg 1 */
  74. -#define r_s1 MIPS_R_T5 /* scratch reg 2 */
  75. -#define r_tmp_imm MIPS_R_T6 /* No need to preserve this */
  76. -#define r_tmp MIPS_R_T7 /* No need to preserve this */
  77. -#define r_zero MIPS_R_ZERO
  78. -#define r_sp MIPS_R_SP
  79. -#define r_ra MIPS_R_RA
  80. -
  81. -#ifndef __ASSEMBLY__
  82. -
  83. -/* Declare ASM helpers */
  84. -
  85. -#define DECLARE_LOAD_FUNC(func) \
  86. - extern u8 func(unsigned long *skb, int offset); \
  87. - extern u8 func##_negative(unsigned long *skb, int offset); \
  88. - extern u8 func##_positive(unsigned long *skb, int offset)
  89. -
  90. -DECLARE_LOAD_FUNC(sk_load_word);
  91. -DECLARE_LOAD_FUNC(sk_load_half);
  92. -DECLARE_LOAD_FUNC(sk_load_byte);
  93. -
  94. -#endif
  95. -
  96. -#endif /* BPF_JIT_MIPS_OP_H */
  97. --- a/arch/mips/net/bpf_jit_asm.S
  98. +++ /dev/null
  99. @@ -1,285 +0,0 @@
  100. -/*
  101. - * bpf_jib_asm.S: Packet/header access helper functions for MIPS/MIPS64 BPF
  102. - * compiler.
  103. - *
  104. - * Copyright (C) 2015 Imagination Technologies Ltd.
  105. - * Author: Markos Chandras <[email protected]>
  106. - *
  107. - * This program is free software; you can redistribute it and/or modify it
  108. - * under the terms of the GNU General Public License as published by the
  109. - * Free Software Foundation; version 2 of the License.
  110. - */
  111. -
  112. -#include <asm/asm.h>
  113. -#include <asm/isa-rev.h>
  114. -#include <asm/regdef.h>
  115. -#include "bpf_jit.h"
  116. -
  117. -/* ABI
  118. - *
  119. - * r_skb_hl skb header length
  120. - * r_skb_data skb data
  121. - * r_off(a1) offset register
  122. - * r_A BPF register A
  123. - * r_X PF register X
  124. - * r_skb(a0) *skb
  125. - * r_M *scratch memory
  126. - * r_skb_le skb length
  127. - * r_s0 Scratch register 0
  128. - * r_s1 Scratch register 1
  129. - *
  130. - * On entry:
  131. - * a0: *skb
  132. - * a1: offset (imm or imm + X)
  133. - *
  134. - * All non-BPF-ABI registers are free for use. On return, we only
  135. - * care about r_ret. The BPF-ABI registers are assumed to remain
  136. - * unmodified during the entire filter operation.
  137. - */
  138. -
  139. -#define skb a0
  140. -#define offset a1
  141. -#define SKF_LL_OFF (-0x200000) /* Can't include linux/filter.h in assembly */
  142. -
  143. - /* We know better :) so prevent assembler reordering etc */
  144. - .set noreorder
  145. -
  146. -#define is_offset_negative(TYPE) \
  147. - /* If offset is negative we have more work to do */ \
  148. - slti t0, offset, 0; \
  149. - bgtz t0, bpf_slow_path_##TYPE##_neg; \
  150. - /* Be careful what follows in DS. */
  151. -
  152. -#define is_offset_in_header(SIZE, TYPE) \
  153. - /* Reading from header? */ \
  154. - addiu $r_s0, $r_skb_hl, -SIZE; \
  155. - slt t0, $r_s0, offset; \
  156. - bgtz t0, bpf_slow_path_##TYPE; \
  157. -
  158. -LEAF(sk_load_word)
  159. - is_offset_negative(word)
  160. -FEXPORT(sk_load_word_positive)
  161. - is_offset_in_header(4, word)
  162. - /* Offset within header boundaries */
  163. - PTR_ADDU t1, $r_skb_data, offset
  164. - .set reorder
  165. - lw $r_A, 0(t1)
  166. - .set noreorder
  167. -#ifdef CONFIG_CPU_LITTLE_ENDIAN
  168. -# if MIPS_ISA_REV >= 2
  169. - wsbh t0, $r_A
  170. - rotr $r_A, t0, 16
  171. -# else
  172. - sll t0, $r_A, 24
  173. - srl t1, $r_A, 24
  174. - srl t2, $r_A, 8
  175. - or t0, t0, t1
  176. - andi t2, t2, 0xff00
  177. - andi t1, $r_A, 0xff00
  178. - or t0, t0, t2
  179. - sll t1, t1, 8
  180. - or $r_A, t0, t1
  181. -# endif
  182. -#endif
  183. - jr $r_ra
  184. - move $r_ret, zero
  185. - END(sk_load_word)
  186. -
  187. -LEAF(sk_load_half)
  188. - is_offset_negative(half)
  189. -FEXPORT(sk_load_half_positive)
  190. - is_offset_in_header(2, half)
  191. - /* Offset within header boundaries */
  192. - PTR_ADDU t1, $r_skb_data, offset
  193. - lhu $r_A, 0(t1)
  194. -#ifdef CONFIG_CPU_LITTLE_ENDIAN
  195. -# if MIPS_ISA_REV >= 2
  196. - wsbh $r_A, $r_A
  197. -# else
  198. - sll t0, $r_A, 8
  199. - srl t1, $r_A, 8
  200. - andi t0, t0, 0xff00
  201. - or $r_A, t0, t1
  202. -# endif
  203. -#endif
  204. - jr $r_ra
  205. - move $r_ret, zero
  206. - END(sk_load_half)
  207. -
  208. -LEAF(sk_load_byte)
  209. - is_offset_negative(byte)
  210. -FEXPORT(sk_load_byte_positive)
  211. - is_offset_in_header(1, byte)
  212. - /* Offset within header boundaries */
  213. - PTR_ADDU t1, $r_skb_data, offset
  214. - lbu $r_A, 0(t1)
  215. - jr $r_ra
  216. - move $r_ret, zero
  217. - END(sk_load_byte)
  218. -
  219. -/*
  220. - * call skb_copy_bits:
  221. - * (prototype in linux/skbuff.h)
  222. - *
  223. - * int skb_copy_bits(sk_buff *skb, int offset, void *to, int len)
  224. - *
  225. - * o32 mandates we leave 4 spaces for argument registers in case
  226. - * the callee needs to use them. Even though we don't care about
  227. - * the argument registers ourselves, we need to allocate that space
  228. - * to remain ABI compliant since the callee may want to use that space.
  229. - * We also allocate 2 more spaces for $r_ra and our return register (*to).
  230. - *
  231. - * n64 is a bit different. The *caller* will allocate the space to preserve
  232. - * the arguments. So in 64-bit kernels, we allocate the 4-arg space for no
  233. - * good reason but it does not matter that much really.
  234. - *
  235. - * (void *to) is returned in r_s0
  236. - *
  237. - */
  238. -#ifdef CONFIG_CPU_LITTLE_ENDIAN
  239. -#define DS_OFFSET(SIZE) (4 * SZREG)
  240. -#else
  241. -#define DS_OFFSET(SIZE) ((4 * SZREG) + (4 - SIZE))
  242. -#endif
  243. -#define bpf_slow_path_common(SIZE) \
  244. - /* Quick check. Are we within reasonable boundaries? */ \
  245. - LONG_ADDIU $r_s1, $r_skb_len, -SIZE; \
  246. - sltu $r_s0, offset, $r_s1; \
  247. - beqz $r_s0, fault; \
  248. - /* Load 4th argument in DS */ \
  249. - LONG_ADDIU a3, zero, SIZE; \
  250. - PTR_ADDIU $r_sp, $r_sp, -(6 * SZREG); \
  251. - PTR_LA t0, skb_copy_bits; \
  252. - PTR_S $r_ra, (5 * SZREG)($r_sp); \
  253. - /* Assign low slot to a2 */ \
  254. - PTR_ADDIU a2, $r_sp, DS_OFFSET(SIZE); \
  255. - jalr t0; \
  256. - /* Reset our destination slot (DS but it's ok) */ \
  257. - INT_S zero, (4 * SZREG)($r_sp); \
  258. - /* \
  259. - * skb_copy_bits returns 0 on success and -EFAULT \
  260. - * on error. Our data live in a2. Do not bother with \
  261. - * our data if an error has been returned. \
  262. - */ \
  263. - /* Restore our frame */ \
  264. - PTR_L $r_ra, (5 * SZREG)($r_sp); \
  265. - INT_L $r_s0, (4 * SZREG)($r_sp); \
  266. - bltz v0, fault; \
  267. - PTR_ADDIU $r_sp, $r_sp, 6 * SZREG; \
  268. - move $r_ret, zero; \
  269. -
  270. -NESTED(bpf_slow_path_word, (6 * SZREG), $r_sp)
  271. - bpf_slow_path_common(4)
  272. -#ifdef CONFIG_CPU_LITTLE_ENDIAN
  273. -# if MIPS_ISA_REV >= 2
  274. - wsbh t0, $r_s0
  275. - jr $r_ra
  276. - rotr $r_A, t0, 16
  277. -# else
  278. - sll t0, $r_s0, 24
  279. - srl t1, $r_s0, 24
  280. - srl t2, $r_s0, 8
  281. - or t0, t0, t1
  282. - andi t2, t2, 0xff00
  283. - andi t1, $r_s0, 0xff00
  284. - or t0, t0, t2
  285. - sll t1, t1, 8
  286. - jr $r_ra
  287. - or $r_A, t0, t1
  288. -# endif
  289. -#else
  290. - jr $r_ra
  291. - move $r_A, $r_s0
  292. -#endif
  293. -
  294. - END(bpf_slow_path_word)
  295. -
  296. -NESTED(bpf_slow_path_half, (6 * SZREG), $r_sp)
  297. - bpf_slow_path_common(2)
  298. -#ifdef CONFIG_CPU_LITTLE_ENDIAN
  299. -# if MIPS_ISA_REV >= 2
  300. - jr $r_ra
  301. - wsbh $r_A, $r_s0
  302. -# else
  303. - sll t0, $r_s0, 8
  304. - andi t1, $r_s0, 0xff00
  305. - andi t0, t0, 0xff00
  306. - srl t1, t1, 8
  307. - jr $r_ra
  308. - or $r_A, t0, t1
  309. -# endif
  310. -#else
  311. - jr $r_ra
  312. - move $r_A, $r_s0
  313. -#endif
  314. -
  315. - END(bpf_slow_path_half)
  316. -
  317. -NESTED(bpf_slow_path_byte, (6 * SZREG), $r_sp)
  318. - bpf_slow_path_common(1)
  319. - jr $r_ra
  320. - move $r_A, $r_s0
  321. -
  322. - END(bpf_slow_path_byte)
  323. -
  324. -/*
  325. - * Negative entry points
  326. - */
  327. - .macro bpf_is_end_of_data
  328. - li t0, SKF_LL_OFF
  329. - /* Reading link layer data? */
  330. - slt t1, offset, t0
  331. - bgtz t1, fault
  332. - /* Be careful what follows in DS. */
  333. - .endm
  334. -/*
  335. - * call skb_copy_bits:
  336. - * (prototype in linux/filter.h)
  337. - *
  338. - * void *bpf_internal_load_pointer_neg_helper(const struct sk_buff *skb,
  339. - * int k, unsigned int size)
  340. - *
  341. - * see above (bpf_slow_path_common) for ABI restrictions
  342. - */
  343. -#define bpf_negative_common(SIZE) \
  344. - PTR_ADDIU $r_sp, $r_sp, -(6 * SZREG); \
  345. - PTR_LA t0, bpf_internal_load_pointer_neg_helper; \
  346. - PTR_S $r_ra, (5 * SZREG)($r_sp); \
  347. - jalr t0; \
  348. - li a2, SIZE; \
  349. - PTR_L $r_ra, (5 * SZREG)($r_sp); \
  350. - /* Check return pointer */ \
  351. - beqz v0, fault; \
  352. - PTR_ADDIU $r_sp, $r_sp, 6 * SZREG; \
  353. - /* Preserve our pointer */ \
  354. - move $r_s0, v0; \
  355. - /* Set return value */ \
  356. - move $r_ret, zero; \
  357. -
  358. -bpf_slow_path_word_neg:
  359. - bpf_is_end_of_data
  360. -NESTED(sk_load_word_negative, (6 * SZREG), $r_sp)
  361. - bpf_negative_common(4)
  362. - jr $r_ra
  363. - lw $r_A, 0($r_s0)
  364. - END(sk_load_word_negative)
  365. -
  366. -bpf_slow_path_half_neg:
  367. - bpf_is_end_of_data
  368. -NESTED(sk_load_half_negative, (6 * SZREG), $r_sp)
  369. - bpf_negative_common(2)
  370. - jr $r_ra
  371. - lhu $r_A, 0($r_s0)
  372. - END(sk_load_half_negative)
  373. -
  374. -bpf_slow_path_byte_neg:
  375. - bpf_is_end_of_data
  376. -NESTED(sk_load_byte_negative, (6 * SZREG), $r_sp)
  377. - bpf_negative_common(1)
  378. - jr $r_ra
  379. - lbu $r_A, 0($r_s0)
  380. - END(sk_load_byte_negative)
  381. -
  382. -fault:
  383. - jr $r_ra
  384. - addiu $r_ret, zero, 1