|
|
@@ -1,97 +0,0 @@
|
|
|
-From: KaFai Wan <[email protected]>
|
|
|
-Date: Tue, 9 Sep 2025 22:46:14 +0800
|
|
|
-Subject: [PATCH] bpf: Allow fall back to interpreter for programs with stack
|
|
|
- size <= 512
|
|
|
-
|
|
|
-OpenWRT users reported regression on ARMv6 devices after updating to latest
|
|
|
-HEAD, where tcpdump filter:
|
|
|
-
|
|
|
-tcpdump "not ether host 3c37121a2b3c and not ether host 184ecbca2a3a \
|
|
|
-and not ether host 14130b4d3f47 and not ether host f0f61cf440b7 \
|
|
|
-and not ether host a84b4dedf471 and not ether host d022be17e1d7 \
|
|
|
-and not ether host 5c497967208b and not ether host 706655784d5b"
|
|
|
-
|
|
|
-fails with warning: "Kernel filter failed: No error information"
|
|
|
-when using config:
|
|
|
- # CONFIG_BPF_JIT_ALWAYS_ON is not set
|
|
|
- CONFIG_BPF_JIT_DEFAULT_ON=y
|
|
|
-
|
|
|
-The issue arises because commits:
|
|
|
-1. "bpf: Fix array bounds error with may_goto" changed default runtime to
|
|
|
- __bpf_prog_ret0_warn when jit_requested = 1
|
|
|
-2. "bpf: Avoid __bpf_prog_ret0_warn when jit fails" returns error when
|
|
|
- jit_requested = 1 but jit fails
|
|
|
-
|
|
|
-This change restores interpreter fallback capability for BPF programs with
|
|
|
-stack size <= 512 bytes when jit fails.
|
|
|
-
|
|
|
-Reported-by: Felix Fietkau <[email protected]>
|
|
|
-Closes: https://lore.kernel.org/bpf/[email protected]/
|
|
|
-Fixes: 6ebc5030e0c5 ("bpf: Fix array bounds error with may_goto")
|
|
|
-Signed-off-by: KaFai Wan <[email protected]>
|
|
|
-Acked-by: Eduard Zingerman <[email protected]>
|
|
|
-Link: https://lore.kernel.org/r/[email protected]
|
|
|
-Signed-off-by: Alexei Starovoitov <[email protected]>
|
|
|
----
|
|
|
-
|
|
|
---- a/kernel/bpf/core.c
|
|
|
-+++ b/kernel/bpf/core.c
|
|
|
-@@ -2299,8 +2299,7 @@ static unsigned int __bpf_prog_ret0_warn
|
|
|
- const struct bpf_insn *insn)
|
|
|
- {
|
|
|
- /* If this handler ever gets executed, then BPF_JIT_ALWAYS_ON
|
|
|
-- * is not working properly, or interpreter is being used when
|
|
|
-- * prog->jit_requested is not 0, so warn about it!
|
|
|
-+ * is not working properly, so warn about it!
|
|
|
- */
|
|
|
- WARN_ON_ONCE(1);
|
|
|
- return 0;
|
|
|
-@@ -2401,8 +2400,9 @@ out:
|
|
|
- return ret;
|
|
|
- }
|
|
|
-
|
|
|
--static void bpf_prog_select_func(struct bpf_prog *fp)
|
|
|
-+static bool bpf_prog_select_interpreter(struct bpf_prog *fp)
|
|
|
- {
|
|
|
-+ bool select_interpreter = false;
|
|
|
- #ifndef CONFIG_BPF_JIT_ALWAYS_ON
|
|
|
- u32 stack_depth = max_t(u32, fp->aux->stack_depth, 1);
|
|
|
- u32 idx = (round_up(stack_depth, 32) / 32) - 1;
|
|
|
-@@ -2411,15 +2411,16 @@ static void bpf_prog_select_func(struct
|
|
|
- * But for non-JITed programs, we don't need bpf_func, so no bounds
|
|
|
- * check needed.
|
|
|
- */
|
|
|
-- if (!fp->jit_requested &&
|
|
|
-- !WARN_ON_ONCE(idx >= ARRAY_SIZE(interpreters))) {
|
|
|
-+ if (idx < ARRAY_SIZE(interpreters)) {
|
|
|
- fp->bpf_func = interpreters[idx];
|
|
|
-+ select_interpreter = true;
|
|
|
- } else {
|
|
|
- fp->bpf_func = __bpf_prog_ret0_warn;
|
|
|
- }
|
|
|
- #else
|
|
|
- fp->bpf_func = __bpf_prog_ret0_warn;
|
|
|
- #endif
|
|
|
-+ return select_interpreter;
|
|
|
- }
|
|
|
-
|
|
|
- /**
|
|
|
-@@ -2438,7 +2439,7 @@ struct bpf_prog *bpf_prog_select_runtime
|
|
|
- /* In case of BPF to BPF calls, verifier did all the prep
|
|
|
- * work with regards to JITing, etc.
|
|
|
- */
|
|
|
-- bool jit_needed = fp->jit_requested;
|
|
|
-+ bool jit_needed = false;
|
|
|
-
|
|
|
- if (fp->bpf_func)
|
|
|
- goto finalize;
|
|
|
-@@ -2447,7 +2448,8 @@ struct bpf_prog *bpf_prog_select_runtime
|
|
|
- bpf_prog_has_kfunc_call(fp))
|
|
|
- jit_needed = true;
|
|
|
-
|
|
|
-- bpf_prog_select_func(fp);
|
|
|
-+ if (!bpf_prog_select_interpreter(fp))
|
|
|
-+ jit_needed = true;
|
|
|
-
|
|
|
- /* eBPF JITs can rewrite the program in case constant
|
|
|
- * blinding is active. However, in case of error during
|