0189-x86-mm-pti-Prepare-the-x86-entry-assembly-code-for-e.patch 11 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381
  1. From e3d1463a9c719eda9d9c566dd55b287018b320c0 Mon Sep 17 00:00:00 2001
  2. From: Dave Hansen <[email protected]>
  3. Date: Mon, 4 Dec 2017 15:07:35 +0100
  4. Subject: [PATCH 189/242] x86/mm/pti: Prepare the x86/entry assembly code for
  5. entry/exit CR3 switching
  6. MIME-Version: 1.0
  7. Content-Type: text/plain; charset=UTF-8
  8. Content-Transfer-Encoding: 8bit
  9. CVE-2017-5754
  10. PAGE_TABLE_ISOLATION needs to switch to a different CR3 value when it
  11. enters the kernel and switch back when it exits. This essentially needs to
  12. be done before leaving assembly code.
  13. This is extra challenging because the switching context is tricky: the
  14. registers that can be clobbered can vary. It is also hard to store things
  15. on the stack because there is an established ABI (ptregs) or the stack is
  16. entirely unsafe to use.
  17. Establish a set of macros that allow changing to the user and kernel CR3
  18. values.
  19. Interactions with SWAPGS:
  20. Previous versions of the PAGE_TABLE_ISOLATION code relied on having
  21. per-CPU scratch space to save/restore a register that can be used for the
  22. CR3 MOV. The %GS register is used to index into our per-CPU space, so
  23. SWAPGS *had* to be done before the CR3 switch. That scratch space is gone
  24. now, but the semantic that SWAPGS must be done before the CR3 MOV is
  25. retained. This is good to keep because it is not that hard to do and it
  26. allows to do things like add per-CPU debugging information.
  27. What this does in the NMI code is worth pointing out. NMIs can interrupt
  28. *any* context and they can also be nested with NMIs interrupting other
  29. NMIs. The comments below ".Lnmi_from_kernel" explain the format of the
  30. stack during this situation. Changing the format of this stack is hard.
  31. Instead of storing the old CR3 value on the stack, this depends on the
  32. *regular* register save/restore mechanism and then uses %r14 to keep CR3
  33. during the NMI. It is callee-saved and will not be clobbered by the C NMI
  34. handlers that get called.
  35. [ PeterZ: ESPFIX optimization ]
  36. Based-on-code-from: Andy Lutomirski <[email protected]>
  37. Signed-off-by: Dave Hansen <[email protected]>
  38. Signed-off-by: Thomas Gleixner <[email protected]>
  39. Reviewed-by: Borislav Petkov <[email protected]>
  40. Reviewed-by: Thomas Gleixner <[email protected]>
  41. Cc: Andy Lutomirski <[email protected]>
  42. Cc: Boris Ostrovsky <[email protected]>
  43. Cc: Borislav Petkov <[email protected]>
  44. Cc: Brian Gerst <[email protected]>
  45. Cc: David Laight <[email protected]>
  46. Cc: Denys Vlasenko <[email protected]>
  47. Cc: Eduardo Valentin <[email protected]>
  48. Cc: Greg KH <[email protected]>
  49. Cc: H. Peter Anvin <[email protected]>
  50. Cc: Josh Poimboeuf <[email protected]>
  51. Cc: Juergen Gross <[email protected]>
  52. Cc: Linus Torvalds <[email protected]>
  53. Cc: Peter Zijlstra <[email protected]>
  54. Cc: Will Deacon <[email protected]>
  55. Cc: [email protected]
  56. Cc: [email protected]
  57. Cc: [email protected]
  58. Cc: [email protected]
  59. Cc: [email protected]
  60. Signed-off-by: Ingo Molnar <[email protected]>
  61. (cherry picked from commit 8a09317b895f073977346779df52f67c1056d81d)
  62. Signed-off-by: Andy Whitcroft <[email protected]>
  63. Signed-off-by: Kleber Sacilotto de Souza <[email protected]>
  64. (cherry picked from commit 313dfb599cf7f8e53fc6f710d15bed60972dcd6f)
  65. Signed-off-by: Fabian Grünbichler <[email protected]>
  66. ---
  67. arch/x86/entry/calling.h | 66 ++++++++++++++++++++++++++++++++++++++++
  68. arch/x86/entry/entry_64.S | 45 +++++++++++++++++++++++----
  69. arch/x86/entry/entry_64_compat.S | 24 ++++++++++++++-
  70. 3 files changed, 128 insertions(+), 7 deletions(-)
  71. diff --git a/arch/x86/entry/calling.h b/arch/x86/entry/calling.h
  72. index 1895a685d3dd..dde6262be0a3 100644
  73. --- a/arch/x86/entry/calling.h
  74. +++ b/arch/x86/entry/calling.h
  75. @@ -1,5 +1,7 @@
  76. #include <linux/jump_label.h>
  77. #include <asm/unwind_hints.h>
  78. +#include <asm/cpufeatures.h>
  79. +#include <asm/page_types.h>
  80. /*
  81. @@ -186,6 +188,70 @@ For 32-bit we have the following conventions - kernel is built with
  82. #endif
  83. .endm
  84. +#ifdef CONFIG_PAGE_TABLE_ISOLATION
  85. +
  86. +/* PAGE_TABLE_ISOLATION PGDs are 8k. Flip bit 12 to switch between the two halves: */
  87. +#define PTI_SWITCH_MASK (1<<PAGE_SHIFT)
  88. +
  89. +.macro ADJUST_KERNEL_CR3 reg:req
  90. + /* Clear "PAGE_TABLE_ISOLATION bit", point CR3 at kernel pagetables: */
  91. + andq $(~PTI_SWITCH_MASK), \reg
  92. +.endm
  93. +
  94. +.macro ADJUST_USER_CR3 reg:req
  95. + /* Move CR3 up a page to the user page tables: */
  96. + orq $(PTI_SWITCH_MASK), \reg
  97. +.endm
  98. +
  99. +.macro SWITCH_TO_KERNEL_CR3 scratch_reg:req
  100. + mov %cr3, \scratch_reg
  101. + ADJUST_KERNEL_CR3 \scratch_reg
  102. + mov \scratch_reg, %cr3
  103. +.endm
  104. +
  105. +.macro SWITCH_TO_USER_CR3 scratch_reg:req
  106. + mov %cr3, \scratch_reg
  107. + ADJUST_USER_CR3 \scratch_reg
  108. + mov \scratch_reg, %cr3
  109. +.endm
  110. +
  111. +.macro SAVE_AND_SWITCH_TO_KERNEL_CR3 scratch_reg:req save_reg:req
  112. + movq %cr3, \scratch_reg
  113. + movq \scratch_reg, \save_reg
  114. + /*
  115. + * Is the switch bit zero? This means the address is
  116. + * up in real PAGE_TABLE_ISOLATION patches in a moment.
  117. + */
  118. + testq $(PTI_SWITCH_MASK), \scratch_reg
  119. + jz .Ldone_\@
  120. +
  121. + ADJUST_KERNEL_CR3 \scratch_reg
  122. + movq \scratch_reg, %cr3
  123. +
  124. +.Ldone_\@:
  125. +.endm
  126. +
  127. +.macro RESTORE_CR3 save_reg:req
  128. + /*
  129. + * The CR3 write could be avoided when not changing its value,
  130. + * but would require a CR3 read *and* a scratch register.
  131. + */
  132. + movq \save_reg, %cr3
  133. +.endm
  134. +
  135. +#else /* CONFIG_PAGE_TABLE_ISOLATION=n: */
  136. +
  137. +.macro SWITCH_TO_KERNEL_CR3 scratch_reg:req
  138. +.endm
  139. +.macro SWITCH_TO_USER_CR3 scratch_reg:req
  140. +.endm
  141. +.macro SAVE_AND_SWITCH_TO_KERNEL_CR3 scratch_reg:req save_reg:req
  142. +.endm
  143. +.macro RESTORE_CR3 save_reg:req
  144. +.endm
  145. +
  146. +#endif
  147. +
  148. #endif /* CONFIG_X86_64 */
  149. /*
  150. diff --git a/arch/x86/entry/entry_64.S b/arch/x86/entry/entry_64.S
  151. index 03e052f02176..292ccc6ec48d 100644
  152. --- a/arch/x86/entry/entry_64.S
  153. +++ b/arch/x86/entry/entry_64.S
  154. @@ -163,6 +163,9 @@ ENTRY(entry_SYSCALL_64_trampoline)
  155. /* Stash the user RSP. */
  156. movq %rsp, RSP_SCRATCH
  157. + /* Note: using %rsp as a scratch reg. */
  158. + SWITCH_TO_KERNEL_CR3 scratch_reg=%rsp
  159. +
  160. /* Load the top of the task stack into RSP */
  161. movq CPU_ENTRY_AREA_tss + TSS_sp1 + CPU_ENTRY_AREA, %rsp
  162. @@ -202,6 +205,10 @@ ENTRY(entry_SYSCALL_64)
  163. */
  164. swapgs
  165. + /*
  166. + * This path is not taken when PAGE_TABLE_ISOLATION is disabled so it
  167. + * is not required to switch CR3.
  168. + */
  169. movq %rsp, PER_CPU_VAR(rsp_scratch)
  170. movq PER_CPU_VAR(cpu_current_top_of_stack), %rsp
  171. @@ -398,6 +405,7 @@ syscall_return_via_sysret:
  172. * We are on the trampoline stack. All regs except RDI are live.
  173. * We can do future final exit work right here.
  174. */
  175. + SWITCH_TO_USER_CR3 scratch_reg=%rdi
  176. popq %rdi
  177. popq %rsp
  178. @@ -735,6 +743,8 @@ GLOBAL(swapgs_restore_regs_and_return_to_usermode)
  179. * We can do future final exit work right here.
  180. */
  181. + SWITCH_TO_USER_CR3 scratch_reg=%rdi
  182. +
  183. /* Restore RDI. */
  184. popq %rdi
  185. SWAPGS
  186. @@ -817,7 +827,9 @@ native_irq_return_ldt:
  187. */
  188. pushq %rdi /* Stash user RDI */
  189. - SWAPGS
  190. + SWAPGS /* to kernel GS */
  191. + SWITCH_TO_KERNEL_CR3 scratch_reg=%rdi /* to kernel CR3 */
  192. +
  193. movq PER_CPU_VAR(espfix_waddr), %rdi
  194. movq %rax, (0*8)(%rdi) /* user RAX */
  195. movq (1*8)(%rsp), %rax /* user RIP */
  196. @@ -833,7 +845,6 @@ native_irq_return_ldt:
  197. /* Now RAX == RSP. */
  198. andl $0xffff0000, %eax /* RAX = (RSP & 0xffff0000) */
  199. - popq %rdi /* Restore user RDI */
  200. /*
  201. * espfix_stack[31:16] == 0. The page tables are set up such that
  202. @@ -844,7 +855,11 @@ native_irq_return_ldt:
  203. * still points to an RO alias of the ESPFIX stack.
  204. */
  205. orq PER_CPU_VAR(espfix_stack), %rax
  206. - SWAPGS
  207. +
  208. + SWITCH_TO_USER_CR3 scratch_reg=%rdi /* to user CR3 */
  209. + SWAPGS /* to user GS */
  210. + popq %rdi /* Restore user RDI */
  211. +
  212. movq %rax, %rsp
  213. UNWIND_HINT_IRET_REGS offset=8
  214. @@ -957,6 +972,8 @@ ENTRY(switch_to_thread_stack)
  215. UNWIND_HINT_FUNC
  216. pushq %rdi
  217. + /* Need to switch before accessing the thread stack. */
  218. + SWITCH_TO_KERNEL_CR3 scratch_reg=%rdi
  219. movq %rsp, %rdi
  220. movq PER_CPU_VAR(cpu_current_top_of_stack), %rsp
  221. UNWIND_HINT sp_offset=16 sp_reg=ORC_REG_DI
  222. @@ -1256,7 +1273,11 @@ ENTRY(paranoid_entry)
  223. js 1f /* negative -> in kernel */
  224. SWAPGS
  225. xorl %ebx, %ebx
  226. -1: ret
  227. +
  228. +1:
  229. + SAVE_AND_SWITCH_TO_KERNEL_CR3 scratch_reg=%rax save_reg=%r14
  230. +
  231. + ret
  232. END(paranoid_entry)
  233. /*
  234. @@ -1278,6 +1299,7 @@ ENTRY(paranoid_exit)
  235. testl %ebx, %ebx /* swapgs needed? */
  236. jnz .Lparanoid_exit_no_swapgs
  237. TRACE_IRQS_IRETQ
  238. + RESTORE_CR3 save_reg=%r14
  239. SWAPGS_UNSAFE_STACK
  240. jmp .Lparanoid_exit_restore
  241. .Lparanoid_exit_no_swapgs:
  242. @@ -1305,6 +1327,8 @@ ENTRY(error_entry)
  243. * from user mode due to an IRET fault.
  244. */
  245. SWAPGS
  246. + /* We have user CR3. Change to kernel CR3. */
  247. + SWITCH_TO_KERNEL_CR3 scratch_reg=%rax
  248. .Lerror_entry_from_usermode_after_swapgs:
  249. /* Put us onto the real thread stack. */
  250. @@ -1351,6 +1375,7 @@ ENTRY(error_entry)
  251. * .Lgs_change's error handler with kernel gsbase.
  252. */
  253. SWAPGS
  254. + SWITCH_TO_KERNEL_CR3 scratch_reg=%rax
  255. jmp .Lerror_entry_done
  256. .Lbstep_iret:
  257. @@ -1360,10 +1385,11 @@ ENTRY(error_entry)
  258. .Lerror_bad_iret:
  259. /*
  260. - * We came from an IRET to user mode, so we have user gsbase.
  261. - * Switch to kernel gsbase:
  262. + * We came from an IRET to user mode, so we have user
  263. + * gsbase and CR3. Switch to kernel gsbase and CR3:
  264. */
  265. SWAPGS
  266. + SWITCH_TO_KERNEL_CR3 scratch_reg=%rax
  267. /*
  268. * Pretend that the exception came from user mode: set up pt_regs
  269. @@ -1395,6 +1421,10 @@ END(error_exit)
  270. /*
  271. * Runs on exception stack. Xen PV does not go through this path at all,
  272. * so we can use real assembly here.
  273. + *
  274. + * Registers:
  275. + * %r14: Used to save/restore the CR3 of the interrupted context
  276. + * when PAGE_TABLE_ISOLATION is in use. Do not clobber.
  277. */
  278. ENTRY(nmi)
  279. UNWIND_HINT_IRET_REGS
  280. @@ -1458,6 +1488,7 @@ ENTRY(nmi)
  281. swapgs
  282. cld
  283. + SWITCH_TO_KERNEL_CR3 scratch_reg=%rdx
  284. movq %rsp, %rdx
  285. movq PER_CPU_VAR(cpu_current_top_of_stack), %rsp
  286. UNWIND_HINT_IRET_REGS base=%rdx offset=8
  287. @@ -1710,6 +1741,8 @@ end_repeat_nmi:
  288. movq $-1, %rsi
  289. call do_nmi
  290. + RESTORE_CR3 save_reg=%r14
  291. +
  292. testl %ebx, %ebx /* swapgs needed? */
  293. jnz nmi_restore
  294. nmi_swapgs:
  295. diff --git a/arch/x86/entry/entry_64_compat.S b/arch/x86/entry/entry_64_compat.S
  296. index 2270601b6218..43f856aeee67 100644
  297. --- a/arch/x86/entry/entry_64_compat.S
  298. +++ b/arch/x86/entry/entry_64_compat.S
  299. @@ -48,6 +48,10 @@
  300. ENTRY(entry_SYSENTER_compat)
  301. /* Interrupts are off on entry. */
  302. SWAPGS
  303. +
  304. + /* We are about to clobber %rsp anyway, clobbering here is OK */
  305. + SWITCH_TO_KERNEL_CR3 scratch_reg=%rsp
  306. +
  307. movq PER_CPU_VAR(cpu_current_top_of_stack), %rsp
  308. /*
  309. @@ -214,6 +218,12 @@ GLOBAL(entry_SYSCALL_compat_after_hwframe)
  310. pushq $0 /* pt_regs->r14 = 0 */
  311. pushq $0 /* pt_regs->r15 = 0 */
  312. + /*
  313. + * We just saved %rdi so it is safe to clobber. It is not
  314. + * preserved during the C calls inside TRACE_IRQS_OFF anyway.
  315. + */
  316. + SWITCH_TO_KERNEL_CR3 scratch_reg=%rdi
  317. +
  318. /*
  319. * User mode is traced as though IRQs are on, and SYSENTER
  320. * turned them off.
  321. @@ -255,10 +265,22 @@ sysret32_from_system_call:
  322. * when the system call started, which is already known to user
  323. * code. We zero R8-R10 to avoid info leaks.
  324. */
  325. + movq RSP-ORIG_RAX(%rsp), %rsp
  326. +
  327. + /*
  328. + * The original userspace %rsp (RSP-ORIG_RAX(%rsp)) is stored
  329. + * on the process stack which is not mapped to userspace and
  330. + * not readable after we SWITCH_TO_USER_CR3. Delay the CR3
  331. + * switch until after after the last reference to the process
  332. + * stack.
  333. + *
  334. + * %r8 is zeroed before the sysret, thus safe to clobber.
  335. + */
  336. + SWITCH_TO_USER_CR3 scratch_reg=%r8
  337. +
  338. xorq %r8, %r8
  339. xorq %r9, %r9
  340. xorq %r10, %r10
  341. - movq RSP-ORIG_RAX(%rsp), %rsp
  342. swapgs
  343. sysretl
  344. END(entry_SYSCALL_compat)
  345. --
  346. 2.14.2