0021-KVM-x86-emulator-smm-use-smram-structs-in-the-common.patch 7.2 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214
  1. From 0000000000000000000000000000000000000000 Mon Sep 17 00:00:00 2001
  2. From: Maxim Levitsky <[email protected]>
  3. Date: Wed, 3 Aug 2022 18:50:06 +0300
  4. Subject: [PATCH] KVM: x86: emulator/smm: use smram structs in the common code
  5. Switch from using a raw array to 'union kvm_smram'.
  6. Signed-off-by: Maxim Levitsky <[email protected]>
  7. Signed-off-by: Thomas Lamprecht <[email protected]>
  8. ---
  9. arch/x86/include/asm/kvm_host.h | 5 +++--
  10. arch/x86/kvm/emulate.c | 12 +++++++-----
  11. arch/x86/kvm/kvm_emulate.h | 3 ++-
  12. arch/x86/kvm/svm/svm.c | 8 ++++++--
  13. arch/x86/kvm/vmx/vmx.c | 4 ++--
  14. arch/x86/kvm/x86.c | 16 ++++++++--------
  15. 6 files changed, 28 insertions(+), 20 deletions(-)
  16. diff --git a/arch/x86/include/asm/kvm_host.h b/arch/x86/include/asm/kvm_host.h
  17. index 867febee8fc3..fb48dd8773e1 100644
  18. --- a/arch/x86/include/asm/kvm_host.h
  19. +++ b/arch/x86/include/asm/kvm_host.h
  20. @@ -200,6 +200,7 @@ typedef enum exit_fastpath_completion fastpath_t;
  21. struct x86_emulate_ctxt;
  22. struct x86_exception;
  23. +union kvm_smram;
  24. enum x86_intercept;
  25. enum x86_intercept_stage;
  26. @@ -1463,8 +1464,8 @@ struct kvm_x86_ops {
  27. void (*setup_mce)(struct kvm_vcpu *vcpu);
  28. int (*smi_allowed)(struct kvm_vcpu *vcpu, bool for_injection);
  29. - int (*enter_smm)(struct kvm_vcpu *vcpu, char *smstate);
  30. - int (*leave_smm)(struct kvm_vcpu *vcpu, const char *smstate);
  31. + int (*enter_smm)(struct kvm_vcpu *vcpu, union kvm_smram *smram);
  32. + int (*leave_smm)(struct kvm_vcpu *vcpu, const union kvm_smram *smram);
  33. void (*enable_smi_window)(struct kvm_vcpu *vcpu);
  34. int (*mem_enc_op)(struct kvm *kvm, void __user *argp);
  35. diff --git a/arch/x86/kvm/emulate.c b/arch/x86/kvm/emulate.c
  36. index bfaf5d24bf1e..730c3e2662d6 100644
  37. --- a/arch/x86/kvm/emulate.c
  38. +++ b/arch/x86/kvm/emulate.c
  39. @@ -2567,16 +2567,18 @@ static int rsm_load_state_64(struct x86_emulate_ctxt *ctxt,
  40. static int em_rsm(struct x86_emulate_ctxt *ctxt)
  41. {
  42. unsigned long cr0, cr4, efer;
  43. - char buf[512];
  44. + const union kvm_smram smram;
  45. u64 smbase;
  46. int ret;
  47. + BUILD_BUG_ON(sizeof(smram) != 512);
  48. +
  49. if ((ctxt->ops->get_hflags(ctxt) & X86EMUL_SMM_MASK) == 0)
  50. return emulate_ud(ctxt);
  51. smbase = ctxt->ops->get_smbase(ctxt);
  52. - ret = ctxt->ops->read_phys(ctxt, smbase + 0xfe00, buf, sizeof(buf));
  53. + ret = ctxt->ops->read_phys(ctxt, smbase + 0xfe00, (void *)&smram, sizeof(smram));
  54. if (ret != X86EMUL_CONTINUE)
  55. return X86EMUL_UNHANDLEABLE;
  56. @@ -2626,15 +2628,15 @@ static int em_rsm(struct x86_emulate_ctxt *ctxt)
  57. * state (e.g. enter guest mode) before loading state from the SMM
  58. * state-save area.
  59. */
  60. - if (ctxt->ops->leave_smm(ctxt, buf))
  61. + if (ctxt->ops->leave_smm(ctxt, &smram))
  62. goto emulate_shutdown;
  63. #ifdef CONFIG_X86_64
  64. if (emulator_has_longmode(ctxt))
  65. - ret = rsm_load_state_64(ctxt, buf);
  66. + ret = rsm_load_state_64(ctxt, (const char *)&smram);
  67. else
  68. #endif
  69. - ret = rsm_load_state_32(ctxt, buf);
  70. + ret = rsm_load_state_32(ctxt, (const char *)&smram);
  71. if (ret != X86EMUL_CONTINUE)
  72. goto emulate_shutdown;
  73. diff --git a/arch/x86/kvm/kvm_emulate.h b/arch/x86/kvm/kvm_emulate.h
  74. index 0b2bbcce321a..3b37b3e17379 100644
  75. --- a/arch/x86/kvm/kvm_emulate.h
  76. +++ b/arch/x86/kvm/kvm_emulate.h
  77. @@ -19,6 +19,7 @@
  78. struct x86_emulate_ctxt;
  79. enum x86_intercept;
  80. enum x86_intercept_stage;
  81. +union kvm_smram;
  82. struct x86_exception {
  83. u8 vector;
  84. @@ -233,7 +234,7 @@ struct x86_emulate_ops {
  85. unsigned (*get_hflags)(struct x86_emulate_ctxt *ctxt);
  86. void (*exiting_smm)(struct x86_emulate_ctxt *ctxt);
  87. - int (*leave_smm)(struct x86_emulate_ctxt *ctxt, const char *smstate);
  88. + int (*leave_smm)(struct x86_emulate_ctxt *ctxt, const union kvm_smram *smram);
  89. void (*triple_fault)(struct x86_emulate_ctxt *ctxt);
  90. int (*set_xcr)(struct x86_emulate_ctxt *ctxt, u32 index, u64 xcr);
  91. };
  92. diff --git a/arch/x86/kvm/svm/svm.c b/arch/x86/kvm/svm/svm.c
  93. index 21f747eacc9a..d903120811b9 100644
  94. --- a/arch/x86/kvm/svm/svm.c
  95. +++ b/arch/x86/kvm/svm/svm.c
  96. @@ -4302,12 +4302,14 @@ static int svm_smi_allowed(struct kvm_vcpu *vcpu, bool for_injection)
  97. return !svm_smi_blocked(vcpu);
  98. }
  99. -static int svm_enter_smm(struct kvm_vcpu *vcpu, char *smstate)
  100. +static int svm_enter_smm(struct kvm_vcpu *vcpu, union kvm_smram *smram)
  101. {
  102. struct vcpu_svm *svm = to_svm(vcpu);
  103. struct kvm_host_map map_save;
  104. int ret;
  105. + char *smstate = (char *)smram;
  106. +
  107. if (!is_guest_mode(vcpu))
  108. return 0;
  109. @@ -4349,7 +4351,7 @@ static int svm_enter_smm(struct kvm_vcpu *vcpu, char *smstate)
  110. return 0;
  111. }
  112. -static int svm_leave_smm(struct kvm_vcpu *vcpu, const char *smstate)
  113. +static int svm_leave_smm(struct kvm_vcpu *vcpu, const union kvm_smram *smram)
  114. {
  115. struct vcpu_svm *svm = to_svm(vcpu);
  116. struct kvm_host_map map, map_save;
  117. @@ -4357,6 +4359,8 @@ static int svm_leave_smm(struct kvm_vcpu *vcpu, const char *smstate)
  118. struct vmcb *vmcb12;
  119. int ret;
  120. + const char *smstate = (const char *)smram;
  121. +
  122. if (!guest_cpuid_has(vcpu, X86_FEATURE_LM))
  123. return 0;
  124. diff --git a/arch/x86/kvm/vmx/vmx.c b/arch/x86/kvm/vmx/vmx.c
  125. index 417176817d80..a45a43bcc844 100644
  126. --- a/arch/x86/kvm/vmx/vmx.c
  127. +++ b/arch/x86/kvm/vmx/vmx.c
  128. @@ -7594,7 +7594,7 @@ static int vmx_smi_allowed(struct kvm_vcpu *vcpu, bool for_injection)
  129. return !is_smm(vcpu);
  130. }
  131. -static int vmx_enter_smm(struct kvm_vcpu *vcpu, char *smstate)
  132. +static int vmx_enter_smm(struct kvm_vcpu *vcpu, union kvm_smram *smram)
  133. {
  134. struct vcpu_vmx *vmx = to_vmx(vcpu);
  135. @@ -7608,7 +7608,7 @@ static int vmx_enter_smm(struct kvm_vcpu *vcpu, char *smstate)
  136. return 0;
  137. }
  138. -static int vmx_leave_smm(struct kvm_vcpu *vcpu, const char *smstate)
  139. +static int vmx_leave_smm(struct kvm_vcpu *vcpu, const union kvm_smram *smram)
  140. {
  141. struct vcpu_vmx *vmx = to_vmx(vcpu);
  142. int ret;
  143. diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c
  144. index 07575e5eb254..2ebbb441880c 100644
  145. --- a/arch/x86/kvm/x86.c
  146. +++ b/arch/x86/kvm/x86.c
  147. @@ -7312,9 +7312,9 @@ static void emulator_exiting_smm(struct x86_emulate_ctxt *ctxt)
  148. }
  149. static int emulator_leave_smm(struct x86_emulate_ctxt *ctxt,
  150. - const char *smstate)
  151. + const union kvm_smram *smram)
  152. {
  153. - return static_call(kvm_x86_leave_smm)(emul_to_vcpu(ctxt), smstate);
  154. + return static_call(kvm_x86_leave_smm)(emul_to_vcpu(ctxt), smram);
  155. }
  156. static void emulator_triple_fault(struct x86_emulate_ctxt *ctxt)
  157. @@ -9164,25 +9164,25 @@ static void enter_smm(struct kvm_vcpu *vcpu)
  158. struct kvm_segment cs, ds;
  159. struct desc_ptr dt;
  160. unsigned long cr0;
  161. - char buf[512];
  162. + union kvm_smram smram;
  163. - memset(buf, 0, 512);
  164. + memset(smram.bytes, 0, sizeof(smram.bytes));
  165. #ifdef CONFIG_X86_64
  166. if (guest_cpuid_has(vcpu, X86_FEATURE_LM))
  167. - enter_smm_save_state_64(vcpu, buf);
  168. + enter_smm_save_state_64(vcpu, (char *)&smram);
  169. else
  170. #endif
  171. - enter_smm_save_state_32(vcpu, buf);
  172. + enter_smm_save_state_32(vcpu, (char *)&smram);
  173. /*
  174. * Give enter_smm() a chance to make ISA-specific changes to the vCPU
  175. * state (e.g. leave guest mode) after we've saved the state into the
  176. * SMM state-save area.
  177. */
  178. - static_call(kvm_x86_enter_smm)(vcpu, buf);
  179. + static_call(kvm_x86_enter_smm)(vcpu, &smram);
  180. kvm_smm_changed(vcpu, true);
  181. - kvm_vcpu_write_guest(vcpu, vcpu->arch.smbase + 0xfe00, buf, sizeof(buf));
  182. + kvm_vcpu_write_guest(vcpu, vcpu->arch.smbase + 0xfe00, &smram, sizeof(smram));
  183. if (static_call(kvm_x86_get_nmi_mask)(vcpu))
  184. vcpu->arch.hflags |= HF_SMM_INSIDE_NMI_MASK;