0262-x86-bpf-jit-prevent-speculative-execution-when-JIT-i.patch 3.3 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293
  1. From 0000000000000000000000000000000000000000 Mon Sep 17 00:00:00 2001
  2. From: Elena Reshetova <[email protected]>
  3. Date: Mon, 4 Sep 2017 13:11:45 +0300
  4. Subject: [PATCH] x86, bpf, jit: prevent speculative execution when JIT is
  5. enabled
  6. MIME-Version: 1.0
  7. Content-Type: text/plain; charset=UTF-8
  8. Content-Transfer-Encoding: 8bit
  9. CVE-2017-5753
  10. CVE-2017-5715
  11. When constant blinding is enabled (bpf_jit_harden = 1), this adds
  12. a generic memory barrier (lfence for intel, mfence for AMD) before
  13. emitting x86 jitted code for the BPF_ALU(64)_OR_X and BPF_ALU_LHS_X
  14. (for BPF_REG_AX register) eBPF instructions. This is needed in order
  15. to prevent speculative execution on out of bounds BPF_MAP array
  16. indexes when JIT is enabled. This way an arbitary kernel memory is
  17. not exposed through side-channel attacks.
  18. For more details, please see this Google Project Zero report: tbd
  19. Signed-off-by: Elena Reshetova <[email protected]>
  20. Signed-off-by: Tim Chen <[email protected]>
  21. Signed-off-by: Andy Whitcroft <[email protected]>
  22. Signed-off-by: Kleber Sacilotto de Souza <[email protected]>
  23. (cherry picked from commit cf9676859a05d0d784067072e8121e63888bacc7)
  24. Signed-off-by: Fabian Grünbichler <[email protected]>
  25. ---
  26. arch/x86/net/bpf_jit_comp.c | 33 ++++++++++++++++++++++++++++++++-
  27. 1 file changed, 32 insertions(+), 1 deletion(-)
  28. diff --git a/arch/x86/net/bpf_jit_comp.c b/arch/x86/net/bpf_jit_comp.c
  29. index 4d50ced94686..879dbfefb66d 100644
  30. --- a/arch/x86/net/bpf_jit_comp.c
  31. +++ b/arch/x86/net/bpf_jit_comp.c
  32. @@ -107,6 +107,27 @@ static void bpf_flush_icache(void *start, void *end)
  33. set_fs(old_fs);
  34. }
  35. +static void emit_memory_barrier(u8 **pprog)
  36. +{
  37. + u8 *prog = *pprog;
  38. + int cnt = 0;
  39. +
  40. + if (bpf_jit_blinding_enabled()) {
  41. + if (boot_cpu_has(X86_FEATURE_LFENCE_RDTSC))
  42. + /* x86 LFENCE opcode 0F AE E8 */
  43. + EMIT3(0x0f, 0xae, 0xe8);
  44. + else if (boot_cpu_has(X86_FEATURE_MFENCE_RDTSC))
  45. + /* AMD MFENCE opcode 0F AE F0 */
  46. + EMIT3(0x0f, 0xae, 0xf0);
  47. + else
  48. + /* we should never end up here,
  49. + * but if we do, better not to emit anything*/
  50. + return;
  51. + }
  52. + *pprog = prog;
  53. + return;
  54. +}
  55. +
  56. #define CHOOSE_LOAD_FUNC(K, func) \
  57. ((int)K < 0 ? ((int)K >= SKF_LL_OFF ? func##_negative_offset : func) : func##_positive_offset)
  58. @@ -399,7 +420,7 @@ static int do_jit(struct bpf_prog *bpf_prog, int *addrs, u8 *image,
  59. case BPF_ADD: b2 = 0x01; break;
  60. case BPF_SUB: b2 = 0x29; break;
  61. case BPF_AND: b2 = 0x21; break;
  62. - case BPF_OR: b2 = 0x09; break;
  63. + case BPF_OR: b2 = 0x09; emit_memory_barrier(&prog); break;
  64. case BPF_XOR: b2 = 0x31; break;
  65. }
  66. if (BPF_CLASS(insn->code) == BPF_ALU64)
  67. @@ -646,6 +667,16 @@ static int do_jit(struct bpf_prog *bpf_prog, int *addrs, u8 *image,
  68. case BPF_ALU64 | BPF_RSH | BPF_X:
  69. case BPF_ALU64 | BPF_ARSH | BPF_X:
  70. + /* If blinding is enabled, each
  71. + * BPF_LD | BPF_IMM | BPF_DW instruction
  72. + * is converted to 4 eBPF instructions with
  73. + * BPF_ALU64_IMM(BPF_LSH, BPF_REG_AX, 32)
  74. + * always present(number 3). Detect such cases
  75. + * and insert memory barriers. */
  76. + if ((BPF_CLASS(insn->code) == BPF_ALU64)
  77. + && (BPF_OP(insn->code) == BPF_LSH)
  78. + && (src_reg == BPF_REG_AX))
  79. + emit_memory_barrier(&prog);
  80. /* check for bad case when dst_reg == rcx */
  81. if (dst_reg == BPF_REG_4) {
  82. /* mov r11, dst_reg */
  83. --
  84. 2.14.2