0232-kvm-vmx-Scrub-hardware-GPRs-at-VM-exit.patch 3.3 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798
  1. From 711a55c9d58955a2bfca89cd25935ca607e49bc0 Mon Sep 17 00:00:00 2001
  2. From: Jim Mattson <[email protected]>
  3. Date: Wed, 3 Jan 2018 14:31:38 -0800
  4. Subject: [PATCH 232/241] kvm: vmx: Scrub hardware GPRs at VM-exit
  5. MIME-Version: 1.0
  6. Content-Type: text/plain; charset=UTF-8
  7. Content-Transfer-Encoding: 8bit
  8. Guest GPR values are live in the hardware GPRs at VM-exit. Do not
  9. leave any guest values in hardware GPRs after the guest GPR values are
  10. saved to the vcpu_vmx structure.
  11. This is a partial mitigation for CVE 2017-5715 and CVE 2017-5753.
  12. Specifically, it defeats the Project Zero PoC for CVE 2017-5715.
  13. Suggested-by: Eric Northup <[email protected]>
  14. Signed-off-by: Jim Mattson <[email protected]>
  15. Reviewed-by: Eric Northup <[email protected]>
  16. Reviewed-by: Benjamin Serebrin <[email protected]>
  17. Reviewed-by: Andrew Honig <[email protected]>
  18. [Paolo: Add AMD bits, Signed-off-by: Tom Lendacky <[email protected]>]
  19. Signed-off-by: Paolo Bonzini <[email protected]>
  20. Signed-off-by: Fabian Grünbichler <[email protected]>
  21. ---
  22. arch/x86/kvm/svm.c | 19 +++++++++++++++++++
  23. arch/x86/kvm/vmx.c | 14 +++++++++++++-
  24. 2 files changed, 32 insertions(+), 1 deletion(-)
  25. diff --git a/arch/x86/kvm/svm.c b/arch/x86/kvm/svm.c
  26. index af09baa3d736..92cd94d51e1f 100644
  27. --- a/arch/x86/kvm/svm.c
  28. +++ b/arch/x86/kvm/svm.c
  29. @@ -4924,6 +4924,25 @@ static void svm_vcpu_run(struct kvm_vcpu *vcpu)
  30. "mov %%r13, %c[r13](%[svm]) \n\t"
  31. "mov %%r14, %c[r14](%[svm]) \n\t"
  32. "mov %%r15, %c[r15](%[svm]) \n\t"
  33. +#endif
  34. + /*
  35. + * Clear host registers marked as clobbered to prevent
  36. + * speculative use.
  37. + */
  38. + "xor %%" _ASM_BX ", %%" _ASM_BX " \n\t"
  39. + "xor %%" _ASM_CX ", %%" _ASM_CX " \n\t"
  40. + "xor %%" _ASM_DX ", %%" _ASM_DX " \n\t"
  41. + "xor %%" _ASM_SI ", %%" _ASM_SI " \n\t"
  42. + "xor %%" _ASM_DI ", %%" _ASM_DI " \n\t"
  43. +#ifdef CONFIG_X86_64
  44. + "xor %%r8, %%r8 \n\t"
  45. + "xor %%r9, %%r9 \n\t"
  46. + "xor %%r10, %%r10 \n\t"
  47. + "xor %%r11, %%r11 \n\t"
  48. + "xor %%r12, %%r12 \n\t"
  49. + "xor %%r13, %%r13 \n\t"
  50. + "xor %%r14, %%r14 \n\t"
  51. + "xor %%r15, %%r15 \n\t"
  52. #endif
  53. "pop %%" _ASM_BP
  54. :
  55. diff --git a/arch/x86/kvm/vmx.c b/arch/x86/kvm/vmx.c
  56. index d61986a36575..9b4256fd589a 100644
  57. --- a/arch/x86/kvm/vmx.c
  58. +++ b/arch/x86/kvm/vmx.c
  59. @@ -9140,6 +9140,7 @@ static void __noclone vmx_vcpu_run(struct kvm_vcpu *vcpu)
  60. /* Save guest registers, load host registers, keep flags */
  61. "mov %0, %c[wordsize](%%" _ASM_SP ") \n\t"
  62. "pop %0 \n\t"
  63. + "setbe %c[fail](%0)\n\t"
  64. "mov %%" _ASM_AX ", %c[rax](%0) \n\t"
  65. "mov %%" _ASM_BX ", %c[rbx](%0) \n\t"
  66. __ASM_SIZE(pop) " %c[rcx](%0) \n\t"
  67. @@ -9156,12 +9157,23 @@ static void __noclone vmx_vcpu_run(struct kvm_vcpu *vcpu)
  68. "mov %%r13, %c[r13](%0) \n\t"
  69. "mov %%r14, %c[r14](%0) \n\t"
  70. "mov %%r15, %c[r15](%0) \n\t"
  71. + "xor %%r8d, %%r8d \n\t"
  72. + "xor %%r9d, %%r9d \n\t"
  73. + "xor %%r10d, %%r10d \n\t"
  74. + "xor %%r11d, %%r11d \n\t"
  75. + "xor %%r12d, %%r12d \n\t"
  76. + "xor %%r13d, %%r13d \n\t"
  77. + "xor %%r14d, %%r14d \n\t"
  78. + "xor %%r15d, %%r15d \n\t"
  79. #endif
  80. "mov %%cr2, %%" _ASM_AX " \n\t"
  81. "mov %%" _ASM_AX ", %c[cr2](%0) \n\t"
  82. + "xor %%eax, %%eax \n\t"
  83. + "xor %%ebx, %%ebx \n\t"
  84. + "xor %%esi, %%esi \n\t"
  85. + "xor %%edi, %%edi \n\t"
  86. "pop %%" _ASM_BP "; pop %%" _ASM_DX " \n\t"
  87. - "setbe %c[fail](%0) \n\t"
  88. ".pushsection .rodata \n\t"
  89. ".global vmx_return \n\t"
  90. "vmx_return: " _ASM_PTR " 2b \n\t"
  91. --
  92. 2.14.2