0008-KVM-x86-start-moving-SMM-related-functions-to-new-fi.patch 17 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529
  1. From c0c8e5258f6aa8e5fa65ce118333b7c1e484c607 Mon Sep 17 00:00:00 2001
  2. From: Paolo Bonzini <[email protected]>
  3. Date: Tue, 25 Oct 2022 15:47:19 +0300
  4. Subject: [PATCH] KVM: x86: start moving SMM-related functions to new files
  5. Create a new header and source with code related to system management
  6. mode emulation. Entry and exit will move there too; for now,
  7. opportunistically rename put_smstate to PUT_SMSTATE while moving
  8. it to smm.h, and adjust the SMM state saving code.
  9. Signed-off-by: Paolo Bonzini <[email protected]>
  10. ---
  11. arch/x86/include/asm/kvm_host.h | 6 --
  12. arch/x86/kvm/Makefile | 1 +
  13. arch/x86/kvm/emulate.c | 1 +
  14. arch/x86/kvm/kvm_cache_regs.h | 5 --
  15. arch/x86/kvm/lapic.c | 1 +
  16. arch/x86/kvm/mmu/mmu.c | 1 +
  17. arch/x86/kvm/smm.c | 37 ++++++++
  18. arch/x86/kvm/smm.h | 25 ++++++
  19. arch/x86/kvm/svm/nested.c | 1 +
  20. arch/x86/kvm/svm/svm.c | 5 +-
  21. arch/x86/kvm/vmx/nested.c | 1 +
  22. arch/x86/kvm/vmx/vmx.c | 1 +
  23. arch/x86/kvm/x86.c | 148 ++++++++++++--------------------
  24. arch/x86/kvm/x86.h | 1 +
  25. 14 files changed, 128 insertions(+), 106 deletions(-)
  26. create mode 100644 arch/x86/kvm/smm.c
  27. create mode 100644 arch/x86/kvm/smm.h
  28. diff --git a/arch/x86/include/asm/kvm_host.h b/arch/x86/include/asm/kvm_host.h
  29. index aa381ab69a19..eed72a164a5c 100644
  30. --- a/arch/x86/include/asm/kvm_host.h
  31. +++ b/arch/x86/include/asm/kvm_host.h
  32. @@ -2077,12 +2077,6 @@ static inline int kvm_cpu_get_apicid(int mps_cpu)
  33. #endif
  34. }
  35. -#define put_smstate(type, buf, offset, val) \
  36. - *(type *)((buf) + (offset) - 0x7e00) = val
  37. -
  38. -#define GET_SMSTATE(type, buf, offset) \
  39. - (*(type *)((buf) + (offset) - 0x7e00))
  40. -
  41. int kvm_cpu_dirty_log_size(void);
  42. int memslot_rmap_alloc(struct kvm_memory_slot *slot, unsigned long npages);
  43. diff --git a/arch/x86/kvm/Makefile b/arch/x86/kvm/Makefile
  44. index 30f244b64523..ec6f7656254b 100644
  45. --- a/arch/x86/kvm/Makefile
  46. +++ b/arch/x86/kvm/Makefile
  47. @@ -20,6 +20,7 @@ endif
  48. kvm-$(CONFIG_X86_64) += mmu/tdp_iter.o mmu/tdp_mmu.o
  49. kvm-$(CONFIG_KVM_XEN) += xen.o
  50. +kvm-y += smm.o
  51. kvm-intel-y += vmx/vmx.o vmx/vmenter.o vmx/pmu_intel.o vmx/vmcs12.o \
  52. vmx/evmcs.o vmx/nested.o vmx/posted_intr.o
  53. diff --git a/arch/x86/kvm/emulate.c b/arch/x86/kvm/emulate.c
  54. index aacb28c83e43..3c3bf6f66a7e 100644
  55. --- a/arch/x86/kvm/emulate.c
  56. +++ b/arch/x86/kvm/emulate.c
  57. @@ -30,6 +30,7 @@
  58. #include "tss.h"
  59. #include "mmu.h"
  60. #include "pmu.h"
  61. +#include "smm.h"
  62. /*
  63. * Operand types
  64. diff --git a/arch/x86/kvm/kvm_cache_regs.h b/arch/x86/kvm/kvm_cache_regs.h
  65. index 3febc342360c..c09174f73a34 100644
  66. --- a/arch/x86/kvm/kvm_cache_regs.h
  67. +++ b/arch/x86/kvm/kvm_cache_regs.h
  68. @@ -200,9 +200,4 @@ static inline bool is_guest_mode(struct kvm_vcpu *vcpu)
  69. return vcpu->arch.hflags & HF_GUEST_MASK;
  70. }
  71. -static inline bool is_smm(struct kvm_vcpu *vcpu)
  72. -{
  73. - return vcpu->arch.hflags & HF_SMM_MASK;
  74. -}
  75. -
  76. #endif
  77. diff --git a/arch/x86/kvm/lapic.c b/arch/x86/kvm/lapic.c
  78. index 9dda989a1cf0..7460d9566119 100644
  79. --- a/arch/x86/kvm/lapic.c
  80. +++ b/arch/x86/kvm/lapic.c
  81. @@ -42,6 +42,7 @@
  82. #include "x86.h"
  83. #include "cpuid.h"
  84. #include "hyperv.h"
  85. +#include "smm.h"
  86. #ifndef CONFIG_X86_64
  87. #define mod_64(x, y) ((x) - (y) * div64_u64(x, y))
  88. diff --git a/arch/x86/kvm/mmu/mmu.c b/arch/x86/kvm/mmu/mmu.c
  89. index 3552e6af3684..60a2c5c75095 100644
  90. --- a/arch/x86/kvm/mmu/mmu.c
  91. +++ b/arch/x86/kvm/mmu/mmu.c
  92. @@ -22,6 +22,7 @@
  93. #include "tdp_mmu.h"
  94. #include "x86.h"
  95. #include "kvm_cache_regs.h"
  96. +#include "smm.h"
  97. #include "kvm_emulate.h"
  98. #include "cpuid.h"
  99. #include "spte.h"
  100. diff --git a/arch/x86/kvm/smm.c b/arch/x86/kvm/smm.c
  101. new file mode 100644
  102. index 000000000000..b91c48d91f6e
  103. --- /dev/null
  104. +++ b/arch/x86/kvm/smm.c
  105. @@ -0,0 +1,37 @@
  106. +/* SPDX-License-Identifier: GPL-2.0 */
  107. +
  108. +#include <linux/kvm_host.h>
  109. +#include "x86.h"
  110. +#include "kvm_cache_regs.h"
  111. +#include "kvm_emulate.h"
  112. +#include "smm.h"
  113. +#include "trace.h"
  114. +
  115. +void kvm_smm_changed(struct kvm_vcpu *vcpu, bool entering_smm)
  116. +{
  117. + trace_kvm_smm_transition(vcpu->vcpu_id, vcpu->arch.smbase, entering_smm);
  118. +
  119. + if (entering_smm) {
  120. + vcpu->arch.hflags |= HF_SMM_MASK;
  121. + } else {
  122. + vcpu->arch.hflags &= ~(HF_SMM_MASK | HF_SMM_INSIDE_NMI_MASK);
  123. +
  124. + /* Process a latched INIT or SMI, if any. */
  125. + kvm_make_request(KVM_REQ_EVENT, vcpu);
  126. +
  127. + /*
  128. + * Even if KVM_SET_SREGS2 loaded PDPTRs out of band,
  129. + * on SMM exit we still need to reload them from
  130. + * guest memory
  131. + */
  132. + vcpu->arch.pdptrs_from_userspace = false;
  133. + }
  134. +
  135. + kvm_mmu_reset_context(vcpu);
  136. +}
  137. +
  138. +void process_smi(struct kvm_vcpu *vcpu)
  139. +{
  140. + vcpu->arch.smi_pending = true;
  141. + kvm_make_request(KVM_REQ_EVENT, vcpu);
  142. +}
  143. diff --git a/arch/x86/kvm/smm.h b/arch/x86/kvm/smm.h
  144. new file mode 100644
  145. index 000000000000..d85d4ccd32dd
  146. --- /dev/null
  147. +++ b/arch/x86/kvm/smm.h
  148. @@ -0,0 +1,25 @@
  149. +/* SPDX-License-Identifier: GPL-2.0 */
  150. +#ifndef ASM_KVM_SMM_H
  151. +#define ASM_KVM_SMM_H
  152. +
  153. +#define GET_SMSTATE(type, buf, offset) \
  154. + (*(type *)((buf) + (offset) - 0x7e00))
  155. +
  156. +#define PUT_SMSTATE(type, buf, offset, val) \
  157. + *(type *)((buf) + (offset) - 0x7e00) = val
  158. +
  159. +static inline int kvm_inject_smi(struct kvm_vcpu *vcpu)
  160. +{
  161. + kvm_make_request(KVM_REQ_SMI, vcpu);
  162. + return 0;
  163. +}
  164. +
  165. +static inline bool is_smm(struct kvm_vcpu *vcpu)
  166. +{
  167. + return vcpu->arch.hflags & HF_SMM_MASK;
  168. +}
  169. +
  170. +void kvm_smm_changed(struct kvm_vcpu *vcpu, bool in_smm);
  171. +void process_smi(struct kvm_vcpu *vcpu);
  172. +
  173. +#endif
  174. diff --git a/arch/x86/kvm/svm/nested.c b/arch/x86/kvm/svm/nested.c
  175. index 76dcc8a3e849..d6cc9963b04a 100644
  176. --- a/arch/x86/kvm/svm/nested.c
  177. +++ b/arch/x86/kvm/svm/nested.c
  178. @@ -25,6 +25,7 @@
  179. #include "trace.h"
  180. #include "mmu.h"
  181. #include "x86.h"
  182. +#include "smm.h"
  183. #include "cpuid.h"
  184. #include "lapic.h"
  185. #include "svm.h"
  186. diff --git a/arch/x86/kvm/svm/svm.c b/arch/x86/kvm/svm/svm.c
  187. index f3813dbacb9f..f4ed4a02b109 100644
  188. --- a/arch/x86/kvm/svm/svm.c
  189. +++ b/arch/x86/kvm/svm/svm.c
  190. @@ -6,6 +6,7 @@
  191. #include "mmu.h"
  192. #include "kvm_cache_regs.h"
  193. #include "x86.h"
  194. +#include "smm.h"
  195. #include "cpuid.h"
  196. #include "pmu.h"
  197. @@ -4443,9 +4444,9 @@ static int svm_enter_smm(struct kvm_vcpu *vcpu, char *smstate)
  198. return 0;
  199. /* FED8h - SVM Guest */
  200. - put_smstate(u64, smstate, 0x7ed8, 1);
  201. + PUT_SMSTATE(u64, smstate, 0x7ed8, 1);
  202. /* FEE0h - SVM Guest VMCB Physical Address */
  203. - put_smstate(u64, smstate, 0x7ee0, svm->nested.vmcb12_gpa);
  204. + PUT_SMSTATE(u64, smstate, 0x7ee0, svm->nested.vmcb12_gpa);
  205. svm->vmcb->save.rax = vcpu->arch.regs[VCPU_REGS_RAX];
  206. svm->vmcb->save.rsp = vcpu->arch.regs[VCPU_REGS_RSP];
  207. diff --git a/arch/x86/kvm/vmx/nested.c b/arch/x86/kvm/vmx/nested.c
  208. index ddd4367d4826..e8197915b8b0 100644
  209. --- a/arch/x86/kvm/vmx/nested.c
  210. +++ b/arch/x86/kvm/vmx/nested.c
  211. @@ -16,6 +16,7 @@
  212. #include "trace.h"
  213. #include "vmx.h"
  214. #include "x86.h"
  215. +#include "smm.h"
  216. static bool __read_mostly enable_shadow_vmcs = 1;
  217. module_param_named(enable_shadow_vmcs, enable_shadow_vmcs, bool, S_IRUGO);
  218. diff --git a/arch/x86/kvm/vmx/vmx.c b/arch/x86/kvm/vmx/vmx.c
  219. index c9b49a09e6b5..dc75de78ceb6 100644
  220. --- a/arch/x86/kvm/vmx/vmx.c
  221. +++ b/arch/x86/kvm/vmx/vmx.c
  222. @@ -66,6 +66,7 @@
  223. #include "vmcs12.h"
  224. #include "vmx.h"
  225. #include "x86.h"
  226. +#include "smm.h"
  227. MODULE_AUTHOR("Qumranet");
  228. MODULE_LICENSE("GPL");
  229. diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c
  230. index b0c47b41c264..7e60b4c12b91 100644
  231. --- a/arch/x86/kvm/x86.c
  232. +++ b/arch/x86/kvm/x86.c
  233. @@ -30,6 +30,7 @@
  234. #include "hyperv.h"
  235. #include "lapic.h"
  236. #include "xen.h"
  237. +#include "smm.h"
  238. #include <linux/clocksource.h>
  239. #include <linux/interrupt.h>
  240. @@ -119,7 +120,6 @@ static u64 __read_mostly cr4_reserved_bits = CR4_RESERVED_BITS;
  241. static void update_cr8_intercept(struct kvm_vcpu *vcpu);
  242. static void process_nmi(struct kvm_vcpu *vcpu);
  243. -static void process_smi(struct kvm_vcpu *vcpu);
  244. static void enter_smm(struct kvm_vcpu *vcpu);
  245. static void __kvm_set_rflags(struct kvm_vcpu *vcpu, unsigned long rflags);
  246. static void store_regs(struct kvm_vcpu *vcpu);
  247. @@ -4878,13 +4878,6 @@ static int kvm_vcpu_ioctl_nmi(struct kvm_vcpu *vcpu)
  248. return 0;
  249. }
  250. -static int kvm_vcpu_ioctl_smi(struct kvm_vcpu *vcpu)
  251. -{
  252. - kvm_make_request(KVM_REQ_SMI, vcpu);
  253. -
  254. - return 0;
  255. -}
  256. -
  257. static int vcpu_ioctl_tpr_access_reporting(struct kvm_vcpu *vcpu,
  258. struct kvm_tpr_access_ctl *tac)
  259. {
  260. @@ -5095,8 +5088,6 @@ static void kvm_vcpu_ioctl_x86_get_vcpu_events(struct kvm_vcpu *vcpu,
  261. memset(&events->reserved, 0, sizeof(events->reserved));
  262. }
  263. -static void kvm_smm_changed(struct kvm_vcpu *vcpu, bool entering_smm);
  264. -
  265. static int kvm_vcpu_ioctl_x86_set_vcpu_events(struct kvm_vcpu *vcpu,
  266. struct kvm_vcpu_events *events)
  267. {
  268. @@ -5536,7 +5527,7 @@ long kvm_arch_vcpu_ioctl(struct file *filp,
  269. break;
  270. }
  271. case KVM_SMI: {
  272. - r = kvm_vcpu_ioctl_smi(vcpu);
  273. + r = kvm_inject_smi(vcpu);
  274. break;
  275. }
  276. case KVM_SET_CPUID: {
  277. @@ -8470,29 +8461,6 @@ static bool retry_instruction(struct x86_emulate_ctxt *ctxt,
  278. static int complete_emulated_mmio(struct kvm_vcpu *vcpu);
  279. static int complete_emulated_pio(struct kvm_vcpu *vcpu);
  280. -static void kvm_smm_changed(struct kvm_vcpu *vcpu, bool entering_smm)
  281. -{
  282. - trace_kvm_smm_transition(vcpu->vcpu_id, vcpu->arch.smbase, entering_smm);
  283. -
  284. - if (entering_smm) {
  285. - vcpu->arch.hflags |= HF_SMM_MASK;
  286. - } else {
  287. - vcpu->arch.hflags &= ~(HF_SMM_MASK | HF_SMM_INSIDE_NMI_MASK);
  288. -
  289. - /* Process a latched INIT or SMI, if any. */
  290. - kvm_make_request(KVM_REQ_EVENT, vcpu);
  291. -
  292. - /*
  293. - * Even if KVM_SET_SREGS2 loaded PDPTRs out of band,
  294. - * on SMM exit we still need to reload them from
  295. - * guest memory
  296. - */
  297. - vcpu->arch.pdptrs_from_userspace = false;
  298. - }
  299. -
  300. - kvm_mmu_reset_context(vcpu);
  301. -}
  302. -
  303. static int kvm_vcpu_check_hw_bp(unsigned long addr, u32 type, u32 dr7,
  304. unsigned long *db)
  305. {
  306. @@ -9853,16 +9821,16 @@ static void enter_smm_save_seg_32(struct kvm_vcpu *vcpu, char *buf, int n)
  307. int offset;
  308. kvm_get_segment(vcpu, &seg, n);
  309. - put_smstate(u32, buf, 0x7fa8 + n * 4, seg.selector);
  310. + PUT_SMSTATE(u32, buf, 0x7fa8 + n * 4, seg.selector);
  311. if (n < 3)
  312. offset = 0x7f84 + n * 12;
  313. else
  314. offset = 0x7f2c + (n - 3) * 12;
  315. - put_smstate(u32, buf, offset + 8, seg.base);
  316. - put_smstate(u32, buf, offset + 4, seg.limit);
  317. - put_smstate(u32, buf, offset, enter_smm_get_segment_flags(&seg));
  318. + PUT_SMSTATE(u32, buf, offset + 8, seg.base);
  319. + PUT_SMSTATE(u32, buf, offset + 4, seg.limit);
  320. + PUT_SMSTATE(u32, buf, offset, enter_smm_get_segment_flags(&seg));
  321. }
  322. #ifdef CONFIG_X86_64
  323. @@ -9876,10 +9844,10 @@ static void enter_smm_save_seg_64(struct kvm_vcpu *vcpu, char *buf, int n)
  324. offset = 0x7e00 + n * 16;
  325. flags = enter_smm_get_segment_flags(&seg) >> 8;
  326. - put_smstate(u16, buf, offset, seg.selector);
  327. - put_smstate(u16, buf, offset + 2, flags);
  328. - put_smstate(u32, buf, offset + 4, seg.limit);
  329. - put_smstate(u64, buf, offset + 8, seg.base);
  330. + PUT_SMSTATE(u16, buf, offset, seg.selector);
  331. + PUT_SMSTATE(u16, buf, offset + 2, flags);
  332. + PUT_SMSTATE(u32, buf, offset + 4, seg.limit);
  333. + PUT_SMSTATE(u64, buf, offset + 8, seg.base);
  334. }
  335. #endif
  336. @@ -9890,47 +9858,47 @@ static void enter_smm_save_state_32(struct kvm_vcpu *vcpu, char *buf)
  337. unsigned long val;
  338. int i;
  339. - put_smstate(u32, buf, 0x7ffc, kvm_read_cr0(vcpu));
  340. - put_smstate(u32, buf, 0x7ff8, kvm_read_cr3(vcpu));
  341. - put_smstate(u32, buf, 0x7ff4, kvm_get_rflags(vcpu));
  342. - put_smstate(u32, buf, 0x7ff0, kvm_rip_read(vcpu));
  343. + PUT_SMSTATE(u32, buf, 0x7ffc, kvm_read_cr0(vcpu));
  344. + PUT_SMSTATE(u32, buf, 0x7ff8, kvm_read_cr3(vcpu));
  345. + PUT_SMSTATE(u32, buf, 0x7ff4, kvm_get_rflags(vcpu));
  346. + PUT_SMSTATE(u32, buf, 0x7ff0, kvm_rip_read(vcpu));
  347. for (i = 0; i < 8; i++)
  348. - put_smstate(u32, buf, 0x7fd0 + i * 4, kvm_register_read_raw(vcpu, i));
  349. + PUT_SMSTATE(u32, buf, 0x7fd0 + i * 4, kvm_register_read_raw(vcpu, i));
  350. kvm_get_dr(vcpu, 6, &val);
  351. - put_smstate(u32, buf, 0x7fcc, (u32)val);
  352. + PUT_SMSTATE(u32, buf, 0x7fcc, (u32)val);
  353. kvm_get_dr(vcpu, 7, &val);
  354. - put_smstate(u32, buf, 0x7fc8, (u32)val);
  355. + PUT_SMSTATE(u32, buf, 0x7fc8, (u32)val);
  356. kvm_get_segment(vcpu, &seg, VCPU_SREG_TR);
  357. - put_smstate(u32, buf, 0x7fc4, seg.selector);
  358. - put_smstate(u32, buf, 0x7f64, seg.base);
  359. - put_smstate(u32, buf, 0x7f60, seg.limit);
  360. - put_smstate(u32, buf, 0x7f5c, enter_smm_get_segment_flags(&seg));
  361. + PUT_SMSTATE(u32, buf, 0x7fc4, seg.selector);
  362. + PUT_SMSTATE(u32, buf, 0x7f64, seg.base);
  363. + PUT_SMSTATE(u32, buf, 0x7f60, seg.limit);
  364. + PUT_SMSTATE(u32, buf, 0x7f5c, enter_smm_get_segment_flags(&seg));
  365. kvm_get_segment(vcpu, &seg, VCPU_SREG_LDTR);
  366. - put_smstate(u32, buf, 0x7fc0, seg.selector);
  367. - put_smstate(u32, buf, 0x7f80, seg.base);
  368. - put_smstate(u32, buf, 0x7f7c, seg.limit);
  369. - put_smstate(u32, buf, 0x7f78, enter_smm_get_segment_flags(&seg));
  370. + PUT_SMSTATE(u32, buf, 0x7fc0, seg.selector);
  371. + PUT_SMSTATE(u32, buf, 0x7f80, seg.base);
  372. + PUT_SMSTATE(u32, buf, 0x7f7c, seg.limit);
  373. + PUT_SMSTATE(u32, buf, 0x7f78, enter_smm_get_segment_flags(&seg));
  374. static_call(kvm_x86_get_gdt)(vcpu, &dt);
  375. - put_smstate(u32, buf, 0x7f74, dt.address);
  376. - put_smstate(u32, buf, 0x7f70, dt.size);
  377. + PUT_SMSTATE(u32, buf, 0x7f74, dt.address);
  378. + PUT_SMSTATE(u32, buf, 0x7f70, dt.size);
  379. static_call(kvm_x86_get_idt)(vcpu, &dt);
  380. - put_smstate(u32, buf, 0x7f58, dt.address);
  381. - put_smstate(u32, buf, 0x7f54, dt.size);
  382. + PUT_SMSTATE(u32, buf, 0x7f58, dt.address);
  383. + PUT_SMSTATE(u32, buf, 0x7f54, dt.size);
  384. for (i = 0; i < 6; i++)
  385. enter_smm_save_seg_32(vcpu, buf, i);
  386. - put_smstate(u32, buf, 0x7f14, kvm_read_cr4(vcpu));
  387. + PUT_SMSTATE(u32, buf, 0x7f14, kvm_read_cr4(vcpu));
  388. /* revision id */
  389. - put_smstate(u32, buf, 0x7efc, 0x00020000);
  390. - put_smstate(u32, buf, 0x7ef8, vcpu->arch.smbase);
  391. + PUT_SMSTATE(u32, buf, 0x7efc, 0x00020000);
  392. + PUT_SMSTATE(u32, buf, 0x7ef8, vcpu->arch.smbase);
  393. }
  394. #ifdef CONFIG_X86_64
  395. @@ -9942,46 +9910,46 @@ static void enter_smm_save_state_64(struct kvm_vcpu *vcpu, char *buf)
  396. int i;
  397. for (i = 0; i < 16; i++)
  398. - put_smstate(u64, buf, 0x7ff8 - i * 8, kvm_register_read_raw(vcpu, i));
  399. + PUT_SMSTATE(u64, buf, 0x7ff8 - i * 8, kvm_register_read_raw(vcpu, i));
  400. - put_smstate(u64, buf, 0x7f78, kvm_rip_read(vcpu));
  401. - put_smstate(u32, buf, 0x7f70, kvm_get_rflags(vcpu));
  402. + PUT_SMSTATE(u64, buf, 0x7f78, kvm_rip_read(vcpu));
  403. + PUT_SMSTATE(u32, buf, 0x7f70, kvm_get_rflags(vcpu));
  404. kvm_get_dr(vcpu, 6, &val);
  405. - put_smstate(u64, buf, 0x7f68, val);
  406. + PUT_SMSTATE(u64, buf, 0x7f68, val);
  407. kvm_get_dr(vcpu, 7, &val);
  408. - put_smstate(u64, buf, 0x7f60, val);
  409. + PUT_SMSTATE(u64, buf, 0x7f60, val);
  410. - put_smstate(u64, buf, 0x7f58, kvm_read_cr0(vcpu));
  411. - put_smstate(u64, buf, 0x7f50, kvm_read_cr3(vcpu));
  412. - put_smstate(u64, buf, 0x7f48, kvm_read_cr4(vcpu));
  413. + PUT_SMSTATE(u64, buf, 0x7f58, kvm_read_cr0(vcpu));
  414. + PUT_SMSTATE(u64, buf, 0x7f50, kvm_read_cr3(vcpu));
  415. + PUT_SMSTATE(u64, buf, 0x7f48, kvm_read_cr4(vcpu));
  416. - put_smstate(u32, buf, 0x7f00, vcpu->arch.smbase);
  417. + PUT_SMSTATE(u32, buf, 0x7f00, vcpu->arch.smbase);
  418. /* revision id */
  419. - put_smstate(u32, buf, 0x7efc, 0x00020064);
  420. + PUT_SMSTATE(u32, buf, 0x7efc, 0x00020064);
  421. - put_smstate(u64, buf, 0x7ed0, vcpu->arch.efer);
  422. + PUT_SMSTATE(u64, buf, 0x7ed0, vcpu->arch.efer);
  423. kvm_get_segment(vcpu, &seg, VCPU_SREG_TR);
  424. - put_smstate(u16, buf, 0x7e90, seg.selector);
  425. - put_smstate(u16, buf, 0x7e92, enter_smm_get_segment_flags(&seg) >> 8);
  426. - put_smstate(u32, buf, 0x7e94, seg.limit);
  427. - put_smstate(u64, buf, 0x7e98, seg.base);
  428. + PUT_SMSTATE(u16, buf, 0x7e90, seg.selector);
  429. + PUT_SMSTATE(u16, buf, 0x7e92, enter_smm_get_segment_flags(&seg) >> 8);
  430. + PUT_SMSTATE(u32, buf, 0x7e94, seg.limit);
  431. + PUT_SMSTATE(u64, buf, 0x7e98, seg.base);
  432. static_call(kvm_x86_get_idt)(vcpu, &dt);
  433. - put_smstate(u32, buf, 0x7e84, dt.size);
  434. - put_smstate(u64, buf, 0x7e88, dt.address);
  435. + PUT_SMSTATE(u32, buf, 0x7e84, dt.size);
  436. + PUT_SMSTATE(u64, buf, 0x7e88, dt.address);
  437. kvm_get_segment(vcpu, &seg, VCPU_SREG_LDTR);
  438. - put_smstate(u16, buf, 0x7e70, seg.selector);
  439. - put_smstate(u16, buf, 0x7e72, enter_smm_get_segment_flags(&seg) >> 8);
  440. - put_smstate(u32, buf, 0x7e74, seg.limit);
  441. - put_smstate(u64, buf, 0x7e78, seg.base);
  442. + PUT_SMSTATE(u16, buf, 0x7e70, seg.selector);
  443. + PUT_SMSTATE(u16, buf, 0x7e72, enter_smm_get_segment_flags(&seg) >> 8);
  444. + PUT_SMSTATE(u32, buf, 0x7e74, seg.limit);
  445. + PUT_SMSTATE(u64, buf, 0x7e78, seg.base);
  446. static_call(kvm_x86_get_gdt)(vcpu, &dt);
  447. - put_smstate(u32, buf, 0x7e64, dt.size);
  448. - put_smstate(u64, buf, 0x7e68, dt.address);
  449. + PUT_SMSTATE(u32, buf, 0x7e64, dt.size);
  450. + PUT_SMSTATE(u64, buf, 0x7e68, dt.address);
  451. for (i = 0; i < 6; i++)
  452. enter_smm_save_seg_64(vcpu, buf, i);
  453. @@ -10067,12 +10035,6 @@ static void enter_smm(struct kvm_vcpu *vcpu)
  454. kvm_mmu_reset_context(vcpu);
  455. }
  456. -static void process_smi(struct kvm_vcpu *vcpu)
  457. -{
  458. - vcpu->arch.smi_pending = true;
  459. - kvm_make_request(KVM_REQ_EVENT, vcpu);
  460. -}
  461. -
  462. void kvm_make_scan_ioapic_request_mask(struct kvm *kvm,
  463. unsigned long *vcpu_bitmap)
  464. {
  465. diff --git a/arch/x86/kvm/x86.h b/arch/x86/kvm/x86.h
  466. index 1926d2cb8e79..cb64fa05405f 100644
  467. --- a/arch/x86/kvm/x86.h
  468. +++ b/arch/x86/kvm/x86.h
  469. @@ -7,6 +7,7 @@
  470. #include <asm/pvclock.h>
  471. #include "kvm_cache_regs.h"
  472. #include "kvm_emulate.h"
  473. +#include "smm.h"
  474. struct kvm_caps {
  475. /* control of guest tsc rate supported? */
  476. --
  477. 2.38.1