0022-x86-xen-64-Rearrange-the-SYSCALL-entries.patch 4.9 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152
  1. From c63a9850ba744d9871b4ca2dad11588db5d670a2 Mon Sep 17 00:00:00 2001
  2. From: Andy Lutomirski <[email protected]>
  3. Date: Mon, 7 Aug 2017 20:59:21 -0700
  4. Subject: [PATCH 022/231] x86/xen/64: Rearrange the SYSCALL entries
  5. MIME-Version: 1.0
  6. Content-Type: text/plain; charset=UTF-8
  7. Content-Transfer-Encoding: 8bit
  8. CVE-2017-5754
  9. Xen's raw SYSCALL entries are much less weird than native. Rather
  10. than fudging them to look like native entries, use the Xen-provided
  11. stack frame directly.
  12. This lets us eliminate entry_SYSCALL_64_after_swapgs and two uses of
  13. the SWAPGS_UNSAFE_STACK paravirt hook. The SYSENTER code would
  14. benefit from similar treatment.
  15. This makes one change to the native code path: the compat
  16. instruction that clears the high 32 bits of %rax is moved slightly
  17. later. I'd be surprised if this affects performance at all.
  18. Tested-by: Juergen Gross <[email protected]>
  19. Signed-off-by: Andy Lutomirski <[email protected]>
  20. Reviewed-by: Juergen Gross <[email protected]>
  21. Cc: Boris Ostrovsky <[email protected]>
  22. Cc: Borislav Petkov <[email protected]>
  23. Cc: Linus Torvalds <[email protected]>
  24. Cc: Peter Zijlstra <[email protected]>
  25. Cc: Thomas Gleixner <[email protected]>
  26. Cc: [email protected]
  27. Link: http://lkml.kernel.org/r/7c88ed36805d36841ab03ec3b48b4122c4418d71.1502164668.git.luto@kernel.org
  28. Signed-off-by: Ingo Molnar <[email protected]>
  29. (cherry picked from commit 8a9949bc71a71b3dd633255ebe8f8869b1f73474)
  30. Signed-off-by: Andy Whitcroft <[email protected]>
  31. Signed-off-by: Kleber Sacilotto de Souza <[email protected]>
  32. (cherry picked from commit b8cec41ee5f30df5032cfe8c86103f7d92a89590)
  33. Signed-off-by: Fabian Grünbichler <[email protected]>
  34. ---
  35. arch/x86/entry/entry_64.S | 9 ++-------
  36. arch/x86/entry/entry_64_compat.S | 7 +++----
  37. arch/x86/xen/xen-asm_64.S | 23 +++++++++--------------
  38. 3 files changed, 14 insertions(+), 25 deletions(-)
  39. diff --git a/arch/x86/entry/entry_64.S b/arch/x86/entry/entry_64.S
  40. index 64b233ab7cad..4dbb336a1fdd 100644
  41. --- a/arch/x86/entry/entry_64.S
  42. +++ b/arch/x86/entry/entry_64.S
  43. @@ -142,14 +142,8 @@ ENTRY(entry_SYSCALL_64)
  44. * We do not frame this tiny irq-off block with TRACE_IRQS_OFF/ON,
  45. * it is too small to ever cause noticeable irq latency.
  46. */
  47. - SWAPGS_UNSAFE_STACK
  48. - /*
  49. - * A hypervisor implementation might want to use a label
  50. - * after the swapgs, so that it can do the swapgs
  51. - * for the guest and jump here on syscall.
  52. - */
  53. -GLOBAL(entry_SYSCALL_64_after_swapgs)
  54. + swapgs
  55. movq %rsp, PER_CPU_VAR(rsp_scratch)
  56. movq PER_CPU_VAR(cpu_current_top_of_stack), %rsp
  57. @@ -161,6 +155,7 @@ GLOBAL(entry_SYSCALL_64_after_swapgs)
  58. pushq %r11 /* pt_regs->flags */
  59. pushq $__USER_CS /* pt_regs->cs */
  60. pushq %rcx /* pt_regs->ip */
  61. +GLOBAL(entry_SYSCALL_64_after_hwframe)
  62. pushq %rax /* pt_regs->orig_ax */
  63. pushq %rdi /* pt_regs->di */
  64. pushq %rsi /* pt_regs->si */
  65. diff --git a/arch/x86/entry/entry_64_compat.S b/arch/x86/entry/entry_64_compat.S
  66. index e1721dafbcb1..5314d7b8e5ad 100644
  67. --- a/arch/x86/entry/entry_64_compat.S
  68. +++ b/arch/x86/entry/entry_64_compat.S
  69. @@ -183,21 +183,20 @@ ENDPROC(entry_SYSENTER_compat)
  70. */
  71. ENTRY(entry_SYSCALL_compat)
  72. /* Interrupts are off on entry. */
  73. - SWAPGS_UNSAFE_STACK
  74. + swapgs
  75. /* Stash user ESP and switch to the kernel stack. */
  76. movl %esp, %r8d
  77. movq PER_CPU_VAR(cpu_current_top_of_stack), %rsp
  78. - /* Zero-extending 32-bit regs, do not remove */
  79. - movl %eax, %eax
  80. -
  81. /* Construct struct pt_regs on stack */
  82. pushq $__USER32_DS /* pt_regs->ss */
  83. pushq %r8 /* pt_regs->sp */
  84. pushq %r11 /* pt_regs->flags */
  85. pushq $__USER32_CS /* pt_regs->cs */
  86. pushq %rcx /* pt_regs->ip */
  87. +GLOBAL(entry_SYSCALL_compat_after_hwframe)
  88. + movl %eax, %eax /* discard orig_ax high bits */
  89. pushq %rax /* pt_regs->orig_ax */
  90. pushq %rdi /* pt_regs->di */
  91. pushq %rsi /* pt_regs->si */
  92. diff --git a/arch/x86/xen/xen-asm_64.S b/arch/x86/xen/xen-asm_64.S
  93. index c3df43141e70..a8a4f4c460a6 100644
  94. --- a/arch/x86/xen/xen-asm_64.S
  95. +++ b/arch/x86/xen/xen-asm_64.S
  96. @@ -82,34 +82,29 @@ RELOC(xen_sysret64, 1b+1)
  97. * rip
  98. * r11
  99. * rsp->rcx
  100. - *
  101. - * In all the entrypoints, we undo all that to make it look like a
  102. - * CPU-generated syscall/sysenter and jump to the normal entrypoint.
  103. */
  104. -.macro undo_xen_syscall
  105. - mov 0*8(%rsp), %rcx
  106. - mov 1*8(%rsp), %r11
  107. - mov 5*8(%rsp), %rsp
  108. -.endm
  109. -
  110. /* Normal 64-bit system call target */
  111. ENTRY(xen_syscall_target)
  112. - undo_xen_syscall
  113. - jmp entry_SYSCALL_64_after_swapgs
  114. + popq %rcx
  115. + popq %r11
  116. + jmp entry_SYSCALL_64_after_hwframe
  117. ENDPROC(xen_syscall_target)
  118. #ifdef CONFIG_IA32_EMULATION
  119. /* 32-bit compat syscall target */
  120. ENTRY(xen_syscall32_target)
  121. - undo_xen_syscall
  122. - jmp entry_SYSCALL_compat
  123. + popq %rcx
  124. + popq %r11
  125. + jmp entry_SYSCALL_compat_after_hwframe
  126. ENDPROC(xen_syscall32_target)
  127. /* 32-bit compat sysenter target */
  128. ENTRY(xen_sysenter_target)
  129. - undo_xen_syscall
  130. + mov 0*8(%rsp), %rcx
  131. + mov 1*8(%rsp), %r11
  132. + mov 5*8(%rsp), %rsp
  133. jmp entry_SYSENTER_compat
  134. ENDPROC(xen_sysenter_target)
  135. --
  136. 2.14.2