0020-x86-entry-64-Add-unwind-hint-annotations.patch 13 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463
  1. From 884fcb9e8befe21a962d95664b1e60377284636a Mon Sep 17 00:00:00 2001
  2. From: Josh Poimboeuf <[email protected]>
  3. Date: Tue, 11 Jul 2017 10:33:44 -0500
  4. Subject: [PATCH 020/231] x86/entry/64: Add unwind hint annotations
  5. MIME-Version: 1.0
  6. Content-Type: text/plain; charset=UTF-8
  7. Content-Transfer-Encoding: 8bit
  8. CVE-2017-5754
  9. Add unwind hint annotations to entry_64.S. This will enable the ORC
  10. unwinder to unwind through any location in the entry code including
  11. syscalls, interrupts, and exceptions.
  12. Signed-off-by: Josh Poimboeuf <[email protected]>
  13. Cc: Andy Lutomirski <[email protected]>
  14. Cc: Borislav Petkov <[email protected]>
  15. Cc: Brian Gerst <[email protected]>
  16. Cc: Denys Vlasenko <[email protected]>
  17. Cc: H. Peter Anvin <[email protected]>
  18. Cc: Jiri Slaby <[email protected]>
  19. Cc: Linus Torvalds <[email protected]>
  20. Cc: Mike Galbraith <[email protected]>
  21. Cc: Peter Zijlstra <[email protected]>
  22. Cc: Thomas Gleixner <[email protected]>
  23. Cc: [email protected]
  24. Link: http://lkml.kernel.org/r/b9f6d478aadf68ba57c739dcfac34ec0dc021c4c.1499786555.git.jpoimboe@redhat.com
  25. Signed-off-by: Ingo Molnar <[email protected]>
  26. (cherry picked from commit 8c1f75587a18ca032da8f6376d1ed882d7095289)
  27. Signed-off-by: Andy Whitcroft <[email protected]>
  28. Signed-off-by: Kleber Sacilotto de Souza <[email protected]>
  29. (cherry picked from commit a8448e6971c1e71b22c651131d14f8be76e6d399)
  30. Signed-off-by: Fabian Grünbichler <[email protected]>
  31. ---
  32. arch/x86/entry/Makefile | 1 -
  33. arch/x86/entry/calling.h | 5 ++++
  34. arch/x86/entry/entry_64.S | 71 ++++++++++++++++++++++++++++++++++++++++-------
  35. 3 files changed, 66 insertions(+), 11 deletions(-)
  36. diff --git a/arch/x86/entry/Makefile b/arch/x86/entry/Makefile
  37. index 9976fcecd17e..af28a8a24366 100644
  38. --- a/arch/x86/entry/Makefile
  39. +++ b/arch/x86/entry/Makefile
  40. @@ -2,7 +2,6 @@
  41. # Makefile for the x86 low level entry code
  42. #
  43. -OBJECT_FILES_NON_STANDARD_entry_$(BITS).o := y
  44. OBJECT_FILES_NON_STANDARD_entry_64_compat.o := y
  45. CFLAGS_syscall_64.o += $(call cc-option,-Wno-override-init,)
  46. diff --git a/arch/x86/entry/calling.h b/arch/x86/entry/calling.h
  47. index 05ed3d393da7..640aafebdc00 100644
  48. --- a/arch/x86/entry/calling.h
  49. +++ b/arch/x86/entry/calling.h
  50. @@ -1,4 +1,5 @@
  51. #include <linux/jump_label.h>
  52. +#include <asm/unwind_hints.h>
  53. /*
  54. @@ -112,6 +113,7 @@ For 32-bit we have the following conventions - kernel is built with
  55. movq %rdx, 12*8+\offset(%rsp)
  56. movq %rsi, 13*8+\offset(%rsp)
  57. movq %rdi, 14*8+\offset(%rsp)
  58. + UNWIND_HINT_REGS offset=\offset extra=0
  59. .endm
  60. .macro SAVE_C_REGS offset=0
  61. SAVE_C_REGS_HELPER \offset, 1, 1, 1, 1
  62. @@ -136,6 +138,7 @@ For 32-bit we have the following conventions - kernel is built with
  63. movq %r12, 3*8+\offset(%rsp)
  64. movq %rbp, 4*8+\offset(%rsp)
  65. movq %rbx, 5*8+\offset(%rsp)
  66. + UNWIND_HINT_REGS offset=\offset
  67. .endm
  68. .macro RESTORE_EXTRA_REGS offset=0
  69. @@ -145,6 +148,7 @@ For 32-bit we have the following conventions - kernel is built with
  70. movq 3*8+\offset(%rsp), %r12
  71. movq 4*8+\offset(%rsp), %rbp
  72. movq 5*8+\offset(%rsp), %rbx
  73. + UNWIND_HINT_REGS offset=\offset extra=0
  74. .endm
  75. .macro RESTORE_C_REGS_HELPER rstor_rax=1, rstor_rcx=1, rstor_r11=1, rstor_r8910=1, rstor_rdx=1
  76. @@ -167,6 +171,7 @@ For 32-bit we have the following conventions - kernel is built with
  77. .endif
  78. movq 13*8(%rsp), %rsi
  79. movq 14*8(%rsp), %rdi
  80. + UNWIND_HINT_IRET_REGS offset=16*8
  81. .endm
  82. .macro RESTORE_C_REGS
  83. RESTORE_C_REGS_HELPER 1,1,1,1,1
  84. diff --git a/arch/x86/entry/entry_64.S b/arch/x86/entry/entry_64.S
  85. index 184b70712545..64b233ab7cad 100644
  86. --- a/arch/x86/entry/entry_64.S
  87. +++ b/arch/x86/entry/entry_64.S
  88. @@ -36,6 +36,7 @@
  89. #include <asm/smap.h>
  90. #include <asm/pgtable_types.h>
  91. #include <asm/export.h>
  92. +#include <asm/frame.h>
  93. #include <linux/err.h>
  94. .code64
  95. @@ -43,9 +44,10 @@
  96. #ifdef CONFIG_PARAVIRT
  97. ENTRY(native_usergs_sysret64)
  98. + UNWIND_HINT_EMPTY
  99. swapgs
  100. sysretq
  101. -ENDPROC(native_usergs_sysret64)
  102. +END(native_usergs_sysret64)
  103. #endif /* CONFIG_PARAVIRT */
  104. .macro TRACE_IRQS_IRETQ
  105. @@ -134,6 +136,7 @@ ENDPROC(native_usergs_sysret64)
  106. */
  107. ENTRY(entry_SYSCALL_64)
  108. + UNWIND_HINT_EMPTY
  109. /*
  110. * Interrupts are off on entry.
  111. * We do not frame this tiny irq-off block with TRACE_IRQS_OFF/ON,
  112. @@ -169,6 +172,7 @@ GLOBAL(entry_SYSCALL_64_after_swapgs)
  113. pushq %r10 /* pt_regs->r10 */
  114. pushq %r11 /* pt_regs->r11 */
  115. sub $(6*8), %rsp /* pt_regs->bp, bx, r12-15 not saved */
  116. + UNWIND_HINT_REGS extra=0
  117. /*
  118. * If we need to do entry work or if we guess we'll need to do
  119. @@ -223,6 +227,7 @@ entry_SYSCALL_64_fastpath:
  120. movq EFLAGS(%rsp), %r11
  121. RESTORE_C_REGS_EXCEPT_RCX_R11
  122. movq RSP(%rsp), %rsp
  123. + UNWIND_HINT_EMPTY
  124. USERGS_SYSRET64
  125. 1:
  126. @@ -316,6 +321,7 @@ syscall_return_via_sysret:
  127. /* rcx and r11 are already restored (see code above) */
  128. RESTORE_C_REGS_EXCEPT_RCX_R11
  129. movq RSP(%rsp), %rsp
  130. + UNWIND_HINT_EMPTY
  131. USERGS_SYSRET64
  132. opportunistic_sysret_failed:
  133. @@ -343,6 +349,7 @@ ENTRY(stub_ptregs_64)
  134. DISABLE_INTERRUPTS(CLBR_ANY)
  135. TRACE_IRQS_OFF
  136. popq %rax
  137. + UNWIND_HINT_REGS extra=0
  138. jmp entry_SYSCALL64_slow_path
  139. 1:
  140. @@ -351,6 +358,7 @@ END(stub_ptregs_64)
  141. .macro ptregs_stub func
  142. ENTRY(ptregs_\func)
  143. + UNWIND_HINT_FUNC
  144. leaq \func(%rip), %rax
  145. jmp stub_ptregs_64
  146. END(ptregs_\func)
  147. @@ -367,6 +375,7 @@ END(ptregs_\func)
  148. * %rsi: next task
  149. */
  150. ENTRY(__switch_to_asm)
  151. + UNWIND_HINT_FUNC
  152. /*
  153. * Save callee-saved registers
  154. * This must match the order in inactive_task_frame
  155. @@ -406,6 +415,7 @@ END(__switch_to_asm)
  156. * r12: kernel thread arg
  157. */
  158. ENTRY(ret_from_fork)
  159. + UNWIND_HINT_EMPTY
  160. movq %rax, %rdi
  161. call schedule_tail /* rdi: 'prev' task parameter */
  162. @@ -413,6 +423,7 @@ ENTRY(ret_from_fork)
  163. jnz 1f /* kernel threads are uncommon */
  164. 2:
  165. + UNWIND_HINT_REGS
  166. movq %rsp, %rdi
  167. call syscall_return_slowpath /* returns with IRQs disabled */
  168. TRACE_IRQS_ON /* user mode is traced as IRQS on */
  169. @@ -440,10 +451,11 @@ END(ret_from_fork)
  170. ENTRY(irq_entries_start)
  171. vector=FIRST_EXTERNAL_VECTOR
  172. .rept (FIRST_SYSTEM_VECTOR - FIRST_EXTERNAL_VECTOR)
  173. + UNWIND_HINT_IRET_REGS
  174. pushq $(~vector+0x80) /* Note: always in signed byte range */
  175. - vector=vector+1
  176. jmp common_interrupt
  177. .align 8
  178. + vector=vector+1
  179. .endr
  180. END(irq_entries_start)
  181. @@ -465,9 +477,14 @@ END(irq_entries_start)
  182. *
  183. * The invariant is that, if irq_count != -1, then the IRQ stack is in use.
  184. */
  185. -.macro ENTER_IRQ_STACK old_rsp
  186. +.macro ENTER_IRQ_STACK regs=1 old_rsp
  187. DEBUG_ENTRY_ASSERT_IRQS_OFF
  188. movq %rsp, \old_rsp
  189. +
  190. + .if \regs
  191. + UNWIND_HINT_REGS base=\old_rsp
  192. + .endif
  193. +
  194. incl PER_CPU_VAR(irq_count)
  195. jnz .Lirq_stack_push_old_rsp_\@
  196. @@ -504,16 +521,24 @@ END(irq_entries_start)
  197. .Lirq_stack_push_old_rsp_\@:
  198. pushq \old_rsp
  199. +
  200. + .if \regs
  201. + UNWIND_HINT_REGS indirect=1
  202. + .endif
  203. .endm
  204. /*
  205. * Undoes ENTER_IRQ_STACK.
  206. */
  207. -.macro LEAVE_IRQ_STACK
  208. +.macro LEAVE_IRQ_STACK regs=1
  209. DEBUG_ENTRY_ASSERT_IRQS_OFF
  210. /* We need to be off the IRQ stack before decrementing irq_count. */
  211. popq %rsp
  212. + .if \regs
  213. + UNWIND_HINT_REGS
  214. + .endif
  215. +
  216. /*
  217. * As in ENTER_IRQ_STACK, irq_count == 0, we are still claiming
  218. * the irq stack but we're not on it.
  219. @@ -624,6 +649,7 @@ restore_c_regs_and_iret:
  220. INTERRUPT_RETURN
  221. ENTRY(native_iret)
  222. + UNWIND_HINT_IRET_REGS
  223. /*
  224. * Are we returning to a stack segment from the LDT? Note: in
  225. * 64-bit mode SS:RSP on the exception stack is always valid.
  226. @@ -696,6 +722,7 @@ native_irq_return_ldt:
  227. orq PER_CPU_VAR(espfix_stack), %rax
  228. SWAPGS
  229. movq %rax, %rsp
  230. + UNWIND_HINT_IRET_REGS offset=8
  231. /*
  232. * At this point, we cannot write to the stack any more, but we can
  233. @@ -717,6 +744,7 @@ END(common_interrupt)
  234. */
  235. .macro apicinterrupt3 num sym do_sym
  236. ENTRY(\sym)
  237. + UNWIND_HINT_IRET_REGS
  238. ASM_CLAC
  239. pushq $~(\num)
  240. .Lcommon_\sym:
  241. @@ -803,6 +831,8 @@ apicinterrupt IRQ_WORK_VECTOR irq_work_interrupt smp_irq_work_interrupt
  242. .macro idtentry sym do_sym has_error_code:req paranoid=0 shift_ist=-1
  243. ENTRY(\sym)
  244. + UNWIND_HINT_IRET_REGS offset=8
  245. +
  246. /* Sanity check */
  247. .if \shift_ist != -1 && \paranoid == 0
  248. .error "using shift_ist requires paranoid=1"
  249. @@ -826,6 +856,7 @@ ENTRY(\sym)
  250. .else
  251. call error_entry
  252. .endif
  253. + UNWIND_HINT_REGS
  254. /* returned flag: ebx=0: need swapgs on exit, ebx=1: don't need it */
  255. .if \paranoid
  256. @@ -923,6 +954,7 @@ idtentry simd_coprocessor_error do_simd_coprocessor_error has_error_code=0
  257. * edi: new selector
  258. */
  259. ENTRY(native_load_gs_index)
  260. + FRAME_BEGIN
  261. pushfq
  262. DISABLE_INTERRUPTS(CLBR_ANY & ~CLBR_RDI)
  263. SWAPGS
  264. @@ -931,8 +963,9 @@ ENTRY(native_load_gs_index)
  265. 2: ALTERNATIVE "", "mfence", X86_BUG_SWAPGS_FENCE
  266. SWAPGS
  267. popfq
  268. + FRAME_END
  269. ret
  270. -END(native_load_gs_index)
  271. +ENDPROC(native_load_gs_index)
  272. EXPORT_SYMBOL(native_load_gs_index)
  273. _ASM_EXTABLE(.Lgs_change, bad_gs)
  274. @@ -955,12 +988,12 @@ bad_gs:
  275. ENTRY(do_softirq_own_stack)
  276. pushq %rbp
  277. mov %rsp, %rbp
  278. - ENTER_IRQ_STACK old_rsp=%r11
  279. + ENTER_IRQ_STACK regs=0 old_rsp=%r11
  280. call __do_softirq
  281. - LEAVE_IRQ_STACK
  282. + LEAVE_IRQ_STACK regs=0
  283. leaveq
  284. ret
  285. -END(do_softirq_own_stack)
  286. +ENDPROC(do_softirq_own_stack)
  287. #ifdef CONFIG_XEN
  288. idtentry xen_hypervisor_callback xen_do_hypervisor_callback has_error_code=0
  289. @@ -984,7 +1017,9 @@ ENTRY(xen_do_hypervisor_callback) /* do_hypervisor_callback(struct *pt_regs) */
  290. * Since we don't modify %rdi, evtchn_do_upall(struct *pt_regs) will
  291. * see the correct pointer to the pt_regs
  292. */
  293. + UNWIND_HINT_FUNC
  294. movq %rdi, %rsp /* we don't return, adjust the stack frame */
  295. + UNWIND_HINT_REGS
  296. ENTER_IRQ_STACK old_rsp=%r10
  297. call xen_evtchn_do_upcall
  298. @@ -1010,6 +1045,7 @@ END(xen_do_hypervisor_callback)
  299. * with its current contents: any discrepancy means we in category 1.
  300. */
  301. ENTRY(xen_failsafe_callback)
  302. + UNWIND_HINT_EMPTY
  303. movl %ds, %ecx
  304. cmpw %cx, 0x10(%rsp)
  305. jne 1f
  306. @@ -1029,11 +1065,13 @@ ENTRY(xen_failsafe_callback)
  307. pushq $0 /* RIP */
  308. pushq %r11
  309. pushq %rcx
  310. + UNWIND_HINT_IRET_REGS offset=8
  311. jmp general_protection
  312. 1: /* Segment mismatch => Category 1 (Bad segment). Retry the IRET. */
  313. movq (%rsp), %rcx
  314. movq 8(%rsp), %r11
  315. addq $0x30, %rsp
  316. + UNWIND_HINT_IRET_REGS
  317. pushq $-1 /* orig_ax = -1 => not a system call */
  318. ALLOC_PT_GPREGS_ON_STACK
  319. SAVE_C_REGS
  320. @@ -1079,6 +1117,7 @@ idtentry machine_check has_error_code=0 paranoid=1 do_sym=*machine_check_vec
  321. * Return: ebx=0: need swapgs on exit, ebx=1: otherwise
  322. */
  323. ENTRY(paranoid_entry)
  324. + UNWIND_HINT_FUNC
  325. cld
  326. SAVE_C_REGS 8
  327. SAVE_EXTRA_REGS 8
  328. @@ -1106,6 +1145,7 @@ END(paranoid_entry)
  329. * On entry, ebx is "no swapgs" flag (1: don't need swapgs, 0: need it)
  330. */
  331. ENTRY(paranoid_exit)
  332. + UNWIND_HINT_REGS
  333. DISABLE_INTERRUPTS(CLBR_ANY)
  334. TRACE_IRQS_OFF_DEBUG
  335. testl %ebx, %ebx /* swapgs needed? */
  336. @@ -1127,6 +1167,7 @@ END(paranoid_exit)
  337. * Return: EBX=0: came from user mode; EBX=1: otherwise
  338. */
  339. ENTRY(error_entry)
  340. + UNWIND_HINT_FUNC
  341. cld
  342. SAVE_C_REGS 8
  343. SAVE_EXTRA_REGS 8
  344. @@ -1211,6 +1252,7 @@ END(error_entry)
  345. * 0: user gsbase is loaded, we need SWAPGS and standard preparation for return to usermode
  346. */
  347. ENTRY(error_exit)
  348. + UNWIND_HINT_REGS
  349. DISABLE_INTERRUPTS(CLBR_ANY)
  350. TRACE_IRQS_OFF
  351. testl %ebx, %ebx
  352. @@ -1220,6 +1262,7 @@ END(error_exit)
  353. /* Runs on exception stack */
  354. ENTRY(nmi)
  355. + UNWIND_HINT_IRET_REGS
  356. /*
  357. * Fix up the exception frame if we're on Xen.
  358. * PARAVIRT_ADJUST_EXCEPTION_FRAME is guaranteed to push at most
  359. @@ -1293,11 +1336,13 @@ ENTRY(nmi)
  360. cld
  361. movq %rsp, %rdx
  362. movq PER_CPU_VAR(cpu_current_top_of_stack), %rsp
  363. + UNWIND_HINT_IRET_REGS base=%rdx offset=8
  364. pushq 5*8(%rdx) /* pt_regs->ss */
  365. pushq 4*8(%rdx) /* pt_regs->rsp */
  366. pushq 3*8(%rdx) /* pt_regs->flags */
  367. pushq 2*8(%rdx) /* pt_regs->cs */
  368. pushq 1*8(%rdx) /* pt_regs->rip */
  369. + UNWIND_HINT_IRET_REGS
  370. pushq $-1 /* pt_regs->orig_ax */
  371. pushq %rdi /* pt_regs->di */
  372. pushq %rsi /* pt_regs->si */
  373. @@ -1314,6 +1359,7 @@ ENTRY(nmi)
  374. pushq %r13 /* pt_regs->r13 */
  375. pushq %r14 /* pt_regs->r14 */
  376. pushq %r15 /* pt_regs->r15 */
  377. + UNWIND_HINT_REGS
  378. ENCODE_FRAME_POINTER
  379. /*
  380. @@ -1468,6 +1514,7 @@ first_nmi:
  381. .rept 5
  382. pushq 11*8(%rsp)
  383. .endr
  384. + UNWIND_HINT_IRET_REGS
  385. /* Everything up to here is safe from nested NMIs */
  386. @@ -1483,6 +1530,7 @@ first_nmi:
  387. pushq $__KERNEL_CS /* CS */
  388. pushq $1f /* RIP */
  389. INTERRUPT_RETURN /* continues at repeat_nmi below */
  390. + UNWIND_HINT_IRET_REGS
  391. 1:
  392. #endif
  393. @@ -1532,6 +1580,7 @@ end_repeat_nmi:
  394. * exceptions might do.
  395. */
  396. call paranoid_entry
  397. + UNWIND_HINT_REGS
  398. /* paranoidentry do_nmi, 0; without TRACE_IRQS_OFF */
  399. movq %rsp, %rdi
  400. @@ -1569,17 +1618,19 @@ nmi_restore:
  401. END(nmi)
  402. ENTRY(ignore_sysret)
  403. + UNWIND_HINT_EMPTY
  404. mov $-ENOSYS, %eax
  405. sysret
  406. END(ignore_sysret)
  407. ENTRY(rewind_stack_do_exit)
  408. + UNWIND_HINT_FUNC
  409. /* Prevent any naive code from trying to unwind to our caller. */
  410. xorl %ebp, %ebp
  411. movq PER_CPU_VAR(cpu_current_top_of_stack), %rax
  412. - leaq -TOP_OF_KERNEL_STACK_PADDING-PTREGS_SIZE(%rax), %rsp
  413. + leaq -PTREGS_SIZE(%rax), %rsp
  414. + UNWIND_HINT_FUNC sp_offset=PTREGS_SIZE
  415. call do_exit
  416. -1: jmp 1b
  417. END(rewind_stack_do_exit)
  418. --
  419. 2.14.2