0024-KVM-x86-smm-use-smram-structs-in-the-common-code.patch 6.1 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194
  1. From e13349f01bc9b4b94dd995d60fad196d3074a868 Mon Sep 17 00:00:00 2001
  2. From: Maxim Levitsky <[email protected]>
  3. Date: Tue, 25 Oct 2022 15:47:35 +0300
  4. Subject: [PATCH] KVM: x86: smm: use smram structs in the common code
  5. Use kvm_smram union instad of raw arrays in the common smm code.
  6. Signed-off-by: Maxim Levitsky <[email protected]>
  7. ---
  8. arch/x86/include/asm/kvm_host.h | 5 +++--
  9. arch/x86/kvm/smm.c | 27 ++++++++++++++-------------
  10. arch/x86/kvm/svm/svm.c | 8 ++++++--
  11. arch/x86/kvm/vmx/vmx.c | 4 ++--
  12. 4 files changed, 25 insertions(+), 19 deletions(-)
  13. diff --git a/arch/x86/include/asm/kvm_host.h b/arch/x86/include/asm/kvm_host.h
  14. index 87ee187b3f26..84c0b441a336 100644
  15. --- a/arch/x86/include/asm/kvm_host.h
  16. +++ b/arch/x86/include/asm/kvm_host.h
  17. @@ -206,6 +206,7 @@ typedef enum exit_fastpath_completion fastpath_t;
  18. struct x86_emulate_ctxt;
  19. struct x86_exception;
  20. +union kvm_smram;
  21. enum x86_intercept;
  22. enum x86_intercept_stage;
  23. @@ -1604,8 +1605,8 @@ struct kvm_x86_ops {
  24. #ifdef CONFIG_KVM_SMM
  25. int (*smi_allowed)(struct kvm_vcpu *vcpu, bool for_injection);
  26. - int (*enter_smm)(struct kvm_vcpu *vcpu, char *smstate);
  27. - int (*leave_smm)(struct kvm_vcpu *vcpu, const char *smstate);
  28. + int (*enter_smm)(struct kvm_vcpu *vcpu, union kvm_smram *smram);
  29. + int (*leave_smm)(struct kvm_vcpu *vcpu, const union kvm_smram *smram);
  30. void (*enable_smi_window)(struct kvm_vcpu *vcpu);
  31. #endif
  32. diff --git a/arch/x86/kvm/smm.c b/arch/x86/kvm/smm.c
  33. index 01dab9fc3ab4..e714d43b746c 100644
  34. --- a/arch/x86/kvm/smm.c
  35. +++ b/arch/x86/kvm/smm.c
  36. @@ -288,17 +288,18 @@ void enter_smm(struct kvm_vcpu *vcpu)
  37. struct kvm_segment cs, ds;
  38. struct desc_ptr dt;
  39. unsigned long cr0;
  40. - char buf[512];
  41. + union kvm_smram smram;
  42. check_smram_offsets();
  43. - memset(buf, 0, 512);
  44. + memset(smram.bytes, 0, sizeof(smram.bytes));
  45. +
  46. #ifdef CONFIG_X86_64
  47. if (guest_cpuid_has(vcpu, X86_FEATURE_LM))
  48. - enter_smm_save_state_64(vcpu, buf);
  49. + enter_smm_save_state_64(vcpu, smram.bytes);
  50. else
  51. #endif
  52. - enter_smm_save_state_32(vcpu, buf);
  53. + enter_smm_save_state_32(vcpu, smram.bytes);
  54. /*
  55. * Give enter_smm() a chance to make ISA-specific changes to the vCPU
  56. @@ -308,12 +309,12 @@ void enter_smm(struct kvm_vcpu *vcpu)
  57. * Kill the VM in the unlikely case of failure, because the VM
  58. * can be in undefined state in this case.
  59. */
  60. - if (static_call(kvm_x86_enter_smm)(vcpu, buf))
  61. + if (static_call(kvm_x86_enter_smm)(vcpu, &smram))
  62. goto error;
  63. kvm_smm_changed(vcpu, true);
  64. - if (kvm_vcpu_write_guest(vcpu, vcpu->arch.smbase + 0xfe00, buf, sizeof(buf)))
  65. + if (kvm_vcpu_write_guest(vcpu, vcpu->arch.smbase + 0xfe00, &smram, sizeof(smram)))
  66. goto error;
  67. if (static_call(kvm_x86_get_nmi_mask)(vcpu))
  68. @@ -473,7 +474,7 @@ static int rsm_enter_protected_mode(struct kvm_vcpu *vcpu,
  69. }
  70. static int rsm_load_state_32(struct x86_emulate_ctxt *ctxt,
  71. - const char *smstate)
  72. + u8 *smstate)
  73. {
  74. struct kvm_vcpu *vcpu = ctxt->vcpu;
  75. struct kvm_segment desc;
  76. @@ -534,7 +535,7 @@ static int rsm_load_state_32(struct x86_emulate_ctxt *ctxt,
  77. #ifdef CONFIG_X86_64
  78. static int rsm_load_state_64(struct x86_emulate_ctxt *ctxt,
  79. - const char *smstate)
  80. + u8 *smstate)
  81. {
  82. struct kvm_vcpu *vcpu = ctxt->vcpu;
  83. struct kvm_segment desc;
  84. @@ -606,13 +607,13 @@ int emulator_leave_smm(struct x86_emulate_ctxt *ctxt)
  85. {
  86. struct kvm_vcpu *vcpu = ctxt->vcpu;
  87. unsigned long cr0, cr4, efer;
  88. - char buf[512];
  89. + union kvm_smram smram;
  90. u64 smbase;
  91. int ret;
  92. smbase = vcpu->arch.smbase;
  93. - ret = kvm_vcpu_read_guest(vcpu, smbase + 0xfe00, buf, sizeof(buf));
  94. + ret = kvm_vcpu_read_guest(vcpu, smbase + 0xfe00, smram.bytes, sizeof(smram));
  95. if (ret < 0)
  96. return X86EMUL_UNHANDLEABLE;
  97. @@ -666,13 +667,13 @@ int emulator_leave_smm(struct x86_emulate_ctxt *ctxt)
  98. * state (e.g. enter guest mode) before loading state from the SMM
  99. * state-save area.
  100. */
  101. - if (static_call(kvm_x86_leave_smm)(vcpu, buf))
  102. + if (static_call(kvm_x86_leave_smm)(vcpu, &smram))
  103. return X86EMUL_UNHANDLEABLE;
  104. #ifdef CONFIG_X86_64
  105. if (guest_cpuid_has(vcpu, X86_FEATURE_LM))
  106. - return rsm_load_state_64(ctxt, buf);
  107. + return rsm_load_state_64(ctxt, smram.bytes);
  108. else
  109. #endif
  110. - return rsm_load_state_32(ctxt, buf);
  111. + return rsm_load_state_32(ctxt, smram.bytes);
  112. }
  113. diff --git a/arch/x86/kvm/svm/svm.c b/arch/x86/kvm/svm/svm.c
  114. index e69390909d08..2a61b8c50ab4 100644
  115. --- a/arch/x86/kvm/svm/svm.c
  116. +++ b/arch/x86/kvm/svm/svm.c
  117. @@ -4437,12 +4437,14 @@ static int svm_smi_allowed(struct kvm_vcpu *vcpu, bool for_injection)
  118. return 1;
  119. }
  120. -static int svm_enter_smm(struct kvm_vcpu *vcpu, char *smstate)
  121. +static int svm_enter_smm(struct kvm_vcpu *vcpu, union kvm_smram *smram)
  122. {
  123. struct vcpu_svm *svm = to_svm(vcpu);
  124. struct kvm_host_map map_save;
  125. int ret;
  126. + char *smstate = (char *)smram;
  127. +
  128. if (!is_guest_mode(vcpu))
  129. return 0;
  130. @@ -4484,7 +4486,7 @@ static int svm_enter_smm(struct kvm_vcpu *vcpu, char *smstate)
  131. return 0;
  132. }
  133. -static int svm_leave_smm(struct kvm_vcpu *vcpu, const char *smstate)
  134. +static int svm_leave_smm(struct kvm_vcpu *vcpu, const union kvm_smram *smram)
  135. {
  136. struct vcpu_svm *svm = to_svm(vcpu);
  137. struct kvm_host_map map, map_save;
  138. @@ -4492,6 +4494,8 @@ static int svm_leave_smm(struct kvm_vcpu *vcpu, const char *smstate)
  139. struct vmcb *vmcb12;
  140. int ret;
  141. + const char *smstate = (const char *)smram;
  142. +
  143. if (!guest_cpuid_has(vcpu, X86_FEATURE_LM))
  144. return 0;
  145. diff --git a/arch/x86/kvm/vmx/vmx.c b/arch/x86/kvm/vmx/vmx.c
  146. index 8cfb40cfad10..480ff79071c6 100644
  147. --- a/arch/x86/kvm/vmx/vmx.c
  148. +++ b/arch/x86/kvm/vmx/vmx.c
  149. @@ -7922,7 +7922,7 @@ static int vmx_smi_allowed(struct kvm_vcpu *vcpu, bool for_injection)
  150. return !is_smm(vcpu);
  151. }
  152. -static int vmx_enter_smm(struct kvm_vcpu *vcpu, char *smstate)
  153. +static int vmx_enter_smm(struct kvm_vcpu *vcpu, union kvm_smram *smram)
  154. {
  155. struct vcpu_vmx *vmx = to_vmx(vcpu);
  156. @@ -7943,7 +7943,7 @@ static int vmx_enter_smm(struct kvm_vcpu *vcpu, char *smstate)
  157. return 0;
  158. }
  159. -static int vmx_leave_smm(struct kvm_vcpu *vcpu, const char *smstate)
  160. +static int vmx_leave_smm(struct kvm_vcpu *vcpu, const union kvm_smram *smram)
  161. {
  162. struct vcpu_vmx *vmx = to_vmx(vcpu);
  163. int ret;
  164. --
  165. 2.38.1