| 123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293 |
- From 0000000000000000000000000000000000000000 Mon Sep 17 00:00:00 2001
- From: Elena Reshetova <[email protected]>
- Date: Mon, 4 Sep 2017 13:11:45 +0300
- Subject: [PATCH] x86, bpf, jit: prevent speculative execution when JIT is
- enabled
- MIME-Version: 1.0
- Content-Type: text/plain; charset=UTF-8
- Content-Transfer-Encoding: 8bit
- CVE-2017-5753
- CVE-2017-5715
- When constant blinding is enabled (bpf_jit_harden = 1), this adds
- a generic memory barrier (lfence for intel, mfence for AMD) before
- emitting x86 jitted code for the BPF_ALU(64)_OR_X and BPF_ALU_LHS_X
- (for BPF_REG_AX register) eBPF instructions. This is needed in order
- to prevent speculative execution on out of bounds BPF_MAP array
- indexes when JIT is enabled. This way an arbitary kernel memory is
- not exposed through side-channel attacks.
- For more details, please see this Google Project Zero report: tbd
- Signed-off-by: Elena Reshetova <[email protected]>
- Signed-off-by: Tim Chen <[email protected]>
- Signed-off-by: Andy Whitcroft <[email protected]>
- Signed-off-by: Kleber Sacilotto de Souza <[email protected]>
- (cherry picked from commit cf9676859a05d0d784067072e8121e63888bacc7)
- Signed-off-by: Fabian Grünbichler <[email protected]>
- ---
- arch/x86/net/bpf_jit_comp.c | 33 ++++++++++++++++++++++++++++++++-
- 1 file changed, 32 insertions(+), 1 deletion(-)
- diff --git a/arch/x86/net/bpf_jit_comp.c b/arch/x86/net/bpf_jit_comp.c
- index 4d50ced94686..879dbfefb66d 100644
- --- a/arch/x86/net/bpf_jit_comp.c
- +++ b/arch/x86/net/bpf_jit_comp.c
- @@ -107,6 +107,27 @@ static void bpf_flush_icache(void *start, void *end)
- set_fs(old_fs);
- }
-
- +static void emit_memory_barrier(u8 **pprog)
- +{
- + u8 *prog = *pprog;
- + int cnt = 0;
- +
- + if (bpf_jit_blinding_enabled()) {
- + if (boot_cpu_has(X86_FEATURE_LFENCE_RDTSC))
- + /* x86 LFENCE opcode 0F AE E8 */
- + EMIT3(0x0f, 0xae, 0xe8);
- + else if (boot_cpu_has(X86_FEATURE_MFENCE_RDTSC))
- + /* AMD MFENCE opcode 0F AE F0 */
- + EMIT3(0x0f, 0xae, 0xf0);
- + else
- + /* we should never end up here,
- + * but if we do, better not to emit anything*/
- + return;
- + }
- + *pprog = prog;
- + return;
- +}
- +
- #define CHOOSE_LOAD_FUNC(K, func) \
- ((int)K < 0 ? ((int)K >= SKF_LL_OFF ? func##_negative_offset : func) : func##_positive_offset)
-
- @@ -399,7 +420,7 @@ static int do_jit(struct bpf_prog *bpf_prog, int *addrs, u8 *image,
- case BPF_ADD: b2 = 0x01; break;
- case BPF_SUB: b2 = 0x29; break;
- case BPF_AND: b2 = 0x21; break;
- - case BPF_OR: b2 = 0x09; break;
- + case BPF_OR: b2 = 0x09; emit_memory_barrier(&prog); break;
- case BPF_XOR: b2 = 0x31; break;
- }
- if (BPF_CLASS(insn->code) == BPF_ALU64)
- @@ -646,6 +667,16 @@ static int do_jit(struct bpf_prog *bpf_prog, int *addrs, u8 *image,
- case BPF_ALU64 | BPF_RSH | BPF_X:
- case BPF_ALU64 | BPF_ARSH | BPF_X:
-
- + /* If blinding is enabled, each
- + * BPF_LD | BPF_IMM | BPF_DW instruction
- + * is converted to 4 eBPF instructions with
- + * BPF_ALU64_IMM(BPF_LSH, BPF_REG_AX, 32)
- + * always present(number 3). Detect such cases
- + * and insert memory barriers. */
- + if ((BPF_CLASS(insn->code) == BPF_ALU64)
- + && (BPF_OP(insn->code) == BPF_LSH)
- + && (src_reg == BPF_REG_AX))
- + emit_memory_barrier(&prog);
- /* check for bad case when dst_reg == rcx */
- if (dst_reg == BPF_REG_4) {
- /* mov r11, dst_reg */
- --
- 2.14.2
|