0030-KVM-x86-smm-preserve-interrupt-shadow-in-SMRAM.patch 4.5 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138
  1. From 810253988e9e317d6e576ebe608a5454f274b8fc Mon Sep 17 00:00:00 2001
  2. From: Maxim Levitsky <[email protected]>
  3. Date: Tue, 25 Oct 2022 15:47:41 +0300
  4. Subject: [PATCH] KVM: x86: smm: preserve interrupt shadow in SMRAM
  5. When #SMI is asserted, the CPU can be in interrupt shadow due to sti or
  6. mov ss.
  7. It is not mandatory in Intel/AMD prm to have the #SMI blocked during the
  8. shadow, and on top of that, since neither SVM nor VMX has true support
  9. for SMI window, waiting for one instruction would mean single stepping
  10. the guest.
  11. Instead, allow #SMI in this case, but both reset the interrupt window and
  12. stash its value in SMRAM to restore it on exit from SMM.
  13. This fixes rare failures seen mostly on windows guests on VMX, when #SMI
  14. falls on the sti instruction which mainfest in VM entry failure due
  15. to EFLAGS.IF not being set, but STI interrupt window still being set
  16. in the VMCS.
  17. Signed-off-by: Maxim Levitsky <[email protected]>
  18. ---
  19. arch/x86/kvm/smm.c | 24 +++++++++++++++++++++---
  20. arch/x86/kvm/smm.h | 5 +++--
  21. 2 files changed, 24 insertions(+), 5 deletions(-)
  22. diff --git a/arch/x86/kvm/smm.c b/arch/x86/kvm/smm.c
  23. index 82761384a866..46d2656937a7 100644
  24. --- a/arch/x86/kvm/smm.c
  25. +++ b/arch/x86/kvm/smm.c
  26. @@ -21,6 +21,7 @@ static void check_smram_offsets(void)
  27. CHECK_SMRAM32_OFFSET(smbase, 0xFEF8);
  28. CHECK_SMRAM32_OFFSET(smm_revision, 0xFEFC);
  29. CHECK_SMRAM32_OFFSET(reserved2, 0xFF00);
  30. + CHECK_SMRAM32_OFFSET(int_shadow, 0xFF10);
  31. CHECK_SMRAM32_OFFSET(cr4, 0xFF14);
  32. CHECK_SMRAM32_OFFSET(reserved3, 0xFF18);
  33. CHECK_SMRAM32_OFFSET(ds, 0xFF2C);
  34. @@ -65,7 +66,7 @@ static void check_smram_offsets(void)
  35. CHECK_SMRAM64_OFFSET(io_restart_rsi, 0xFEB0);
  36. CHECK_SMRAM64_OFFSET(io_restart_rdi, 0xFEB8);
  37. CHECK_SMRAM64_OFFSET(io_restart_dword, 0xFEC0);
  38. - CHECK_SMRAM64_OFFSET(reserved1, 0xFEC4);
  39. + CHECK_SMRAM64_OFFSET(int_shadow, 0xFEC4);
  40. CHECK_SMRAM64_OFFSET(io_inst_restart, 0xFEC8);
  41. CHECK_SMRAM64_OFFSET(auto_hlt_restart, 0xFEC9);
  42. CHECK_SMRAM64_OFFSET(reserved2, 0xFECA);
  43. @@ -212,6 +213,8 @@ static void enter_smm_save_state_32(struct kvm_vcpu *vcpu,
  44. smram->cr4 = kvm_read_cr4(vcpu);
  45. smram->smm_revision = 0x00020000;
  46. smram->smbase = vcpu->arch.smbase;
  47. +
  48. + smram->int_shadow = static_call(kvm_x86_get_interrupt_shadow)(vcpu);
  49. }
  50. #ifdef CONFIG_X86_64
  51. @@ -261,6 +264,8 @@ static void enter_smm_save_state_64(struct kvm_vcpu *vcpu,
  52. enter_smm_save_seg_64(vcpu, &smram->ds, VCPU_SREG_DS);
  53. enter_smm_save_seg_64(vcpu, &smram->fs, VCPU_SREG_FS);
  54. enter_smm_save_seg_64(vcpu, &smram->gs, VCPU_SREG_GS);
  55. +
  56. + smram->int_shadow = static_call(kvm_x86_get_interrupt_shadow)(vcpu);
  57. }
  58. #endif
  59. @@ -306,6 +311,8 @@ void enter_smm(struct kvm_vcpu *vcpu)
  60. kvm_set_rflags(vcpu, X86_EFLAGS_FIXED);
  61. kvm_rip_write(vcpu, 0x8000);
  62. + static_call(kvm_x86_set_interrupt_shadow)(vcpu, 0);
  63. +
  64. cr0 = vcpu->arch.cr0 & ~(X86_CR0_PE | X86_CR0_EM | X86_CR0_TS | X86_CR0_PG);
  65. static_call(kvm_x86_set_cr0)(vcpu, cr0);
  66. vcpu->arch.cr0 = cr0;
  67. @@ -453,7 +460,7 @@ static int rsm_load_state_32(struct x86_emulate_ctxt *ctxt,
  68. {
  69. struct kvm_vcpu *vcpu = ctxt->vcpu;
  70. struct desc_ptr dt;
  71. - int i;
  72. + int i, r;
  73. ctxt->eflags = smstate->eflags | X86_EFLAGS_FIXED;
  74. ctxt->_eip = smstate->eip;
  75. @@ -487,8 +494,16 @@ static int rsm_load_state_32(struct x86_emulate_ctxt *ctxt,
  76. vcpu->arch.smbase = smstate->smbase;
  77. - return rsm_enter_protected_mode(vcpu, smstate->cr0,
  78. + r = rsm_enter_protected_mode(vcpu, smstate->cr0,
  79. smstate->cr3, smstate->cr4);
  80. +
  81. + if (r != X86EMUL_CONTINUE)
  82. + return r;
  83. +
  84. + static_call(kvm_x86_set_interrupt_shadow)(vcpu, 0);
  85. + ctxt->interruptibility = (u8)smstate->int_shadow;
  86. +
  87. + return r;
  88. }
  89. #ifdef CONFIG_X86_64
  90. @@ -539,6 +554,9 @@ static int rsm_load_state_64(struct x86_emulate_ctxt *ctxt,
  91. rsm_load_seg_64(vcpu, &smstate->fs, VCPU_SREG_FS);
  92. rsm_load_seg_64(vcpu, &smstate->gs, VCPU_SREG_GS);
  93. + static_call(kvm_x86_set_interrupt_shadow)(vcpu, 0);
  94. + ctxt->interruptibility = (u8)smstate->int_shadow;
  95. +
  96. return X86EMUL_CONTINUE;
  97. }
  98. #endif
  99. diff --git a/arch/x86/kvm/smm.h b/arch/x86/kvm/smm.h
  100. index 8d96bff3f4d5..2eaec53bcc95 100644
  101. --- a/arch/x86/kvm/smm.h
  102. +++ b/arch/x86/kvm/smm.h
  103. @@ -19,7 +19,8 @@ struct kvm_smram_state_32 {
  104. u32 reserved1[62];
  105. u32 smbase;
  106. u32 smm_revision;
  107. - u32 reserved2[5];
  108. + u32 reserved2[4];
  109. + u32 int_shadow; /* KVM extension */
  110. u32 cr4; /* CR4 is not present in Intel/AMD SMRAM image */
  111. u32 reserved3[5];
  112. @@ -86,7 +87,7 @@ struct kvm_smram_state_64 {
  113. u64 io_restart_rsi;
  114. u64 io_restart_rdi;
  115. u32 io_restart_dword;
  116. - u32 reserved1;
  117. + u32 int_shadow;
  118. u8 io_inst_restart;
  119. u8 auto_hlt_restart;
  120. u8 reserved2[6];
  121. --
  122. 2.38.1