0274-x86-enter-Use-IBRS-on-syscall-and-interrupts.patch 4.8 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171
  1. From 0000000000000000000000000000000000000000 Mon Sep 17 00:00:00 2001
  2. From: Tim Chen <[email protected]>
  3. Date: Fri, 13 Oct 2017 14:25:00 -0700
  4. Subject: [PATCH] x86/enter: Use IBRS on syscall and interrupts
  5. MIME-Version: 1.0
  6. Content-Type: text/plain; charset=UTF-8
  7. Content-Transfer-Encoding: 8bit
  8. CVE-2017-5753
  9. CVE-2017-5715
  10. Set IBRS upon kernel entrance via syscall and interrupts. Clear it upon exit.
  11. Signed-off-by: Tim Chen <[email protected]>
  12. Signed-off-by: Andy Whitcroft <[email protected]>
  13. Signed-off-by: Kleber Sacilotto de Souza <[email protected]>
  14. (cherry picked from commit d7eb5f9ed26dbdc39df793491bdcc9f80d41325e)
  15. Signed-off-by: Fabian Grünbichler <[email protected]>
  16. ---
  17. arch/x86/entry/entry_64.S | 18 +++++++++++++++++-
  18. arch/x86/entry/entry_64_compat.S | 7 +++++++
  19. 2 files changed, 24 insertions(+), 1 deletion(-)
  20. diff --git a/arch/x86/entry/entry_64.S b/arch/x86/entry/entry_64.S
  21. index b48f2c78a9bf..5f898c3c1dad 100644
  22. --- a/arch/x86/entry/entry_64.S
  23. +++ b/arch/x86/entry/entry_64.S
  24. @@ -36,6 +36,7 @@
  25. #include <asm/pgtable_types.h>
  26. #include <asm/export.h>
  27. #include <asm/frame.h>
  28. +#include <asm/spec_ctrl.h>
  29. #include <linux/err.h>
  30. #include "calling.h"
  31. @@ -235,6 +236,8 @@ GLOBAL(entry_SYSCALL_64_after_hwframe)
  32. sub $(6*8), %rsp /* pt_regs->bp, bx, r12-15 not saved */
  33. UNWIND_HINT_REGS extra=0
  34. + ENABLE_IBRS
  35. +
  36. /*
  37. * If we need to do entry work or if we guess we'll need to do
  38. * exit work, go straight to the slow path.
  39. @@ -286,6 +289,7 @@ entry_SYSCALL_64_fastpath:
  40. TRACE_IRQS_ON /* user mode is traced as IRQs on */
  41. movq RIP(%rsp), %rcx
  42. movq EFLAGS(%rsp), %r11
  43. + DISABLE_IBRS
  44. addq $6*8, %rsp /* skip extra regs -- they were preserved */
  45. UNWIND_HINT_EMPTY
  46. jmp .Lpop_c_regs_except_rcx_r11_and_sysret
  47. @@ -379,6 +383,8 @@ return_from_SYSCALL_64:
  48. * perf profiles. Nothing jumps here.
  49. */
  50. syscall_return_via_sysret:
  51. + DISABLE_IBRS
  52. +
  53. /* rcx and r11 are already restored (see code above) */
  54. UNWIND_HINT_EMPTY
  55. POP_EXTRA_REGS
  56. @@ -660,6 +666,10 @@ END(irq_entries_start)
  57. /*
  58. * IRQ from user mode.
  59. *
  60. + */
  61. + ENABLE_IBRS
  62. +
  63. + /*
  64. * We need to tell lockdep that IRQs are off. We can't do this until
  65. * we fix gsbase, and we should do it before enter_from_user_mode
  66. * (which can take locks). Since TRACE_IRQS_OFF idempotent,
  67. @@ -743,7 +753,7 @@ GLOBAL(swapgs_restore_regs_and_return_to_usermode)
  68. * We are on the trampoline stack. All regs except RDI are live.
  69. * We can do future final exit work right here.
  70. */
  71. -
  72. + DISABLE_IBRS
  73. SWITCH_TO_USER_CR3_STACK scratch_reg=%rdi
  74. /* Restore RDI. */
  75. @@ -1277,6 +1287,7 @@ ENTRY(paranoid_entry)
  76. 1:
  77. SAVE_AND_SWITCH_TO_KERNEL_CR3 scratch_reg=%rax save_reg=%r14
  78. + ENABLE_IBRS_CLOBBER
  79. ret
  80. END(paranoid_entry)
  81. @@ -1331,6 +1342,8 @@ ENTRY(error_entry)
  82. /* We have user CR3. Change to kernel CR3. */
  83. SWITCH_TO_KERNEL_CR3 scratch_reg=%rax
  84. + ENABLE_IBRS
  85. +
  86. .Lerror_entry_from_usermode_after_swapgs:
  87. /* Put us onto the real thread stack. */
  88. popq %r12 /* save return addr in %12 */
  89. @@ -1377,6 +1390,7 @@ ENTRY(error_entry)
  90. */
  91. SWAPGS
  92. SWITCH_TO_KERNEL_CR3 scratch_reg=%rax
  93. + ENABLE_IBRS_CLOBBER
  94. jmp .Lerror_entry_done
  95. .Lbstep_iret:
  96. @@ -1391,6 +1405,7 @@ ENTRY(error_entry)
  97. */
  98. SWAPGS
  99. SWITCH_TO_KERNEL_CR3 scratch_reg=%rax
  100. + ENABLE_IBRS_CLOBBER
  101. /*
  102. * Pretend that the exception came from user mode: set up pt_regs
  103. @@ -1518,6 +1533,7 @@ ENTRY(nmi)
  104. UNWIND_HINT_REGS
  105. ENCODE_FRAME_POINTER
  106. + ENABLE_IBRS
  107. /*
  108. * At this point we no longer need to worry about stack damage
  109. * due to nesting -- we're on the normal thread stack and we're
  110. diff --git a/arch/x86/entry/entry_64_compat.S b/arch/x86/entry/entry_64_compat.S
  111. index 2b5e7685823c..ee4f3edb3c50 100644
  112. --- a/arch/x86/entry/entry_64_compat.S
  113. +++ b/arch/x86/entry/entry_64_compat.S
  114. @@ -13,6 +13,7 @@
  115. #include <asm/irqflags.h>
  116. #include <asm/asm.h>
  117. #include <asm/smap.h>
  118. +#include <asm/spec_ctrl.h>
  119. #include <linux/linkage.h>
  120. #include <linux/err.h>
  121. @@ -95,6 +96,8 @@ ENTRY(entry_SYSENTER_compat)
  122. pushq $0 /* pt_regs->r15 = 0 */
  123. cld
  124. + ENABLE_IBRS
  125. +
  126. /*
  127. * SYSENTER doesn't filter flags, so we need to clear NT and AC
  128. * ourselves. To save a few cycles, we can check whether
  129. @@ -194,6 +197,7 @@ ENTRY(entry_SYSCALL_compat)
  130. /* Use %rsp as scratch reg. User ESP is stashed in r8 */
  131. SWITCH_TO_KERNEL_CR3 scratch_reg=%rsp
  132. + ENABLE_IBRS
  133. /* Switch to the kernel stack */
  134. movq PER_CPU_VAR(cpu_current_top_of_stack), %rsp
  135. @@ -249,6 +253,7 @@ sysret32_from_system_call:
  136. popq %rsi /* pt_regs->si */
  137. popq %rdi /* pt_regs->di */
  138. + DISABLE_IBRS
  139. /*
  140. * USERGS_SYSRET32 does:
  141. * GSBASE = user's GS base
  142. @@ -348,6 +353,8 @@ ENTRY(entry_INT80_compat)
  143. pushq %r15 /* pt_regs->r15 */
  144. cld
  145. + ENABLE_IBRS
  146. +
  147. /*
  148. * User mode is traced as though IRQs are on, and the interrupt
  149. * gate turned them off.
  150. --
  151. 2.14.2