0011-KVM-x86-do-not-go-through-ctxt-ops-when-emulating-rs.patch 18 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500
  1. From fa637dcaf7f19a746fe507349db8b56f49ab03b0 Mon Sep 17 00:00:00 2001
  2. From: Paolo Bonzini <[email protected]>
  3. Date: Wed, 26 Oct 2022 14:47:45 +0200
  4. Subject: [PATCH] KVM: x86: do not go through ctxt->ops when emulating rsm
  5. Now that RSM is implemented in a single emulator callback, there is no
  6. point in going through other callbacks for the sake of modifying
  7. processor state. Just invoke KVM's own internal functions directly,
  8. and remove the callbacks that were only used by em_rsm; the only
  9. substantial difference is in the handling of the segment registers
  10. and descriptor cache, which have to be parsed into a struct kvm_segment
  11. instead of a struct desc_struct.
  12. This also fixes a bug where emulator_set_segment was shifting the
  13. limit left by 12 if the G bit is set, but the limit had not been
  14. shifted right upon entry to SMM.
  15. The emulator context is still used to restore EIP and the general
  16. purpose registers.
  17. Signed-off-by: Paolo Bonzini <[email protected]>
  18. ---
  19. arch/x86/kvm/kvm_emulate.h | 13 ---
  20. arch/x86/kvm/smm.c | 177 +++++++++++++++++--------------------
  21. arch/x86/kvm/x86.c | 33 -------
  22. 3 files changed, 81 insertions(+), 142 deletions(-)
  23. diff --git a/arch/x86/kvm/kvm_emulate.h b/arch/x86/kvm/kvm_emulate.h
  24. index d7afbc448dd2..84b1f2661463 100644
  25. --- a/arch/x86/kvm/kvm_emulate.h
  26. +++ b/arch/x86/kvm/kvm_emulate.h
  27. @@ -116,16 +116,6 @@ struct x86_emulate_ops {
  28. unsigned int bytes,
  29. struct x86_exception *fault, bool system);
  30. - /*
  31. - * read_phys: Read bytes of standard (non-emulated/special) memory.
  32. - * Used for descriptor reading.
  33. - * @addr: [IN ] Physical address from which to read.
  34. - * @val: [OUT] Value read from memory.
  35. - * @bytes: [IN ] Number of bytes to read from memory.
  36. - */
  37. - int (*read_phys)(struct x86_emulate_ctxt *ctxt, unsigned long addr,
  38. - void *val, unsigned int bytes);
  39. -
  40. /*
  41. * write_std: Write bytes of standard (non-emulated/special) memory.
  42. * Used for descriptor writing.
  43. @@ -209,11 +199,8 @@ struct x86_emulate_ops {
  44. int (*cpl)(struct x86_emulate_ctxt *ctxt);
  45. void (*get_dr)(struct x86_emulate_ctxt *ctxt, int dr, ulong *dest);
  46. int (*set_dr)(struct x86_emulate_ctxt *ctxt, int dr, ulong value);
  47. - u64 (*get_smbase)(struct x86_emulate_ctxt *ctxt);
  48. - void (*set_smbase)(struct x86_emulate_ctxt *ctxt, u64 smbase);
  49. int (*set_msr_with_filter)(struct x86_emulate_ctxt *ctxt, u32 msr_index, u64 data);
  50. int (*get_msr_with_filter)(struct x86_emulate_ctxt *ctxt, u32 msr_index, u64 *pdata);
  51. - int (*set_msr)(struct x86_emulate_ctxt *ctxt, u32 msr_index, u64 data);
  52. int (*get_msr)(struct x86_emulate_ctxt *ctxt, u32 msr_index, u64 *pdata);
  53. int (*check_pmc)(struct x86_emulate_ctxt *ctxt, u32 pmc);
  54. int (*read_pmc)(struct x86_emulate_ctxt *ctxt, u32 pmc, u64 *pdata);
  55. diff --git a/arch/x86/kvm/smm.c b/arch/x86/kvm/smm.c
  56. index 773e07b6397d..41ca128478fc 100644
  57. --- a/arch/x86/kvm/smm.c
  58. +++ b/arch/x86/kvm/smm.c
  59. @@ -271,71 +271,59 @@ void enter_smm(struct kvm_vcpu *vcpu)
  60. kvm_mmu_reset_context(vcpu);
  61. }
  62. -static int emulator_has_longmode(struct x86_emulate_ctxt *ctxt)
  63. -{
  64. -#ifdef CONFIG_X86_64
  65. - return ctxt->ops->guest_has_long_mode(ctxt);
  66. -#else
  67. - return false;
  68. -#endif
  69. -}
  70. -
  71. -static void rsm_set_desc_flags(struct desc_struct *desc, u32 flags)
  72. +static void rsm_set_desc_flags(struct kvm_segment *desc, u32 flags)
  73. {
  74. desc->g = (flags >> 23) & 1;
  75. - desc->d = (flags >> 22) & 1;
  76. + desc->db = (flags >> 22) & 1;
  77. desc->l = (flags >> 21) & 1;
  78. desc->avl = (flags >> 20) & 1;
  79. - desc->p = (flags >> 15) & 1;
  80. + desc->present = (flags >> 15) & 1;
  81. desc->dpl = (flags >> 13) & 3;
  82. desc->s = (flags >> 12) & 1;
  83. desc->type = (flags >> 8) & 15;
  84. +
  85. + desc->unusable = !desc->present;
  86. + desc->padding = 0;
  87. }
  88. -static int rsm_load_seg_32(struct x86_emulate_ctxt *ctxt, const char *smstate,
  89. +static int rsm_load_seg_32(struct kvm_vcpu *vcpu, const char *smstate,
  90. int n)
  91. {
  92. - struct desc_struct desc;
  93. + struct kvm_segment desc;
  94. int offset;
  95. - u16 selector;
  96. -
  97. - selector = GET_SMSTATE(u32, smstate, 0x7fa8 + n * 4);
  98. if (n < 3)
  99. offset = 0x7f84 + n * 12;
  100. else
  101. offset = 0x7f2c + (n - 3) * 12;
  102. - set_desc_base(&desc, GET_SMSTATE(u32, smstate, offset + 8));
  103. - set_desc_limit(&desc, GET_SMSTATE(u32, smstate, offset + 4));
  104. + desc.selector = GET_SMSTATE(u32, smstate, 0x7fa8 + n * 4);
  105. + desc.base = GET_SMSTATE(u32, smstate, offset + 8);
  106. + desc.limit = GET_SMSTATE(u32, smstate, offset + 4);
  107. rsm_set_desc_flags(&desc, GET_SMSTATE(u32, smstate, offset));
  108. - ctxt->ops->set_segment(ctxt, selector, &desc, 0, n);
  109. + kvm_set_segment(vcpu, &desc, n);
  110. return X86EMUL_CONTINUE;
  111. }
  112. #ifdef CONFIG_X86_64
  113. -static int rsm_load_seg_64(struct x86_emulate_ctxt *ctxt, const char *smstate,
  114. +static int rsm_load_seg_64(struct kvm_vcpu *vcpu, const char *smstate,
  115. int n)
  116. {
  117. - struct desc_struct desc;
  118. + struct kvm_segment desc;
  119. int offset;
  120. - u16 selector;
  121. - u32 base3;
  122. offset = 0x7e00 + n * 16;
  123. - selector = GET_SMSTATE(u16, smstate, offset);
  124. + desc.selector = GET_SMSTATE(u16, smstate, offset);
  125. rsm_set_desc_flags(&desc, GET_SMSTATE(u16, smstate, offset + 2) << 8);
  126. - set_desc_limit(&desc, GET_SMSTATE(u32, smstate, offset + 4));
  127. - set_desc_base(&desc, GET_SMSTATE(u32, smstate, offset + 8));
  128. - base3 = GET_SMSTATE(u32, smstate, offset + 12);
  129. -
  130. - ctxt->ops->set_segment(ctxt, selector, &desc, base3, n);
  131. + desc.limit = GET_SMSTATE(u32, smstate, offset + 4);
  132. + desc.base = GET_SMSTATE(u64, smstate, offset + 8);
  133. + kvm_set_segment(vcpu, &desc, n);
  134. return X86EMUL_CONTINUE;
  135. }
  136. #endif
  137. -static int rsm_enter_protected_mode(struct x86_emulate_ctxt *ctxt,
  138. +static int rsm_enter_protected_mode(struct kvm_vcpu *vcpu,
  139. u64 cr0, u64 cr3, u64 cr4)
  140. {
  141. int bad;
  142. @@ -348,7 +336,7 @@ static int rsm_enter_protected_mode(struct x86_emulate_ctxt *ctxt,
  143. cr3 &= ~0xfff;
  144. }
  145. - bad = ctxt->ops->set_cr(ctxt, 3, cr3);
  146. + bad = kvm_set_cr3(vcpu, cr3);
  147. if (bad)
  148. return X86EMUL_UNHANDLEABLE;
  149. @@ -357,20 +345,20 @@ static int rsm_enter_protected_mode(struct x86_emulate_ctxt *ctxt,
  150. * Then enable protected mode. However, PCID cannot be enabled
  151. * if EFER.LMA=0, so set it separately.
  152. */
  153. - bad = ctxt->ops->set_cr(ctxt, 4, cr4 & ~X86_CR4_PCIDE);
  154. + bad = kvm_set_cr4(vcpu, cr4 & ~X86_CR4_PCIDE);
  155. if (bad)
  156. return X86EMUL_UNHANDLEABLE;
  157. - bad = ctxt->ops->set_cr(ctxt, 0, cr0);
  158. + bad = kvm_set_cr0(vcpu, cr0);
  159. if (bad)
  160. return X86EMUL_UNHANDLEABLE;
  161. if (cr4 & X86_CR4_PCIDE) {
  162. - bad = ctxt->ops->set_cr(ctxt, 4, cr4);
  163. + bad = kvm_set_cr4(vcpu, cr4);
  164. if (bad)
  165. return X86EMUL_UNHANDLEABLE;
  166. if (pcid) {
  167. - bad = ctxt->ops->set_cr(ctxt, 3, cr3 | pcid);
  168. + bad = kvm_set_cr3(vcpu, cr3 | pcid);
  169. if (bad)
  170. return X86EMUL_UNHANDLEABLE;
  171. }
  172. @@ -383,9 +371,9 @@ static int rsm_enter_protected_mode(struct x86_emulate_ctxt *ctxt,
  173. static int rsm_load_state_32(struct x86_emulate_ctxt *ctxt,
  174. const char *smstate)
  175. {
  176. - struct desc_struct desc;
  177. + struct kvm_vcpu *vcpu = ctxt->vcpu;
  178. + struct kvm_segment desc;
  179. struct desc_ptr dt;
  180. - u16 selector;
  181. u32 val, cr0, cr3, cr4;
  182. int i;
  183. @@ -399,56 +387,55 @@ static int rsm_load_state_32(struct x86_emulate_ctxt *ctxt,
  184. val = GET_SMSTATE(u32, smstate, 0x7fcc);
  185. - if (ctxt->ops->set_dr(ctxt, 6, val))
  186. + if (kvm_set_dr(vcpu, 6, val))
  187. return X86EMUL_UNHANDLEABLE;
  188. val = GET_SMSTATE(u32, smstate, 0x7fc8);
  189. - if (ctxt->ops->set_dr(ctxt, 7, val))
  190. + if (kvm_set_dr(vcpu, 7, val))
  191. return X86EMUL_UNHANDLEABLE;
  192. - selector = GET_SMSTATE(u32, smstate, 0x7fc4);
  193. - set_desc_base(&desc, GET_SMSTATE(u32, smstate, 0x7f64));
  194. - set_desc_limit(&desc, GET_SMSTATE(u32, smstate, 0x7f60));
  195. + desc.selector = GET_SMSTATE(u32, smstate, 0x7fc4);
  196. + desc.base = GET_SMSTATE(u32, smstate, 0x7f64);
  197. + desc.limit = GET_SMSTATE(u32, smstate, 0x7f60);
  198. rsm_set_desc_flags(&desc, GET_SMSTATE(u32, smstate, 0x7f5c));
  199. - ctxt->ops->set_segment(ctxt, selector, &desc, 0, VCPU_SREG_TR);
  200. + kvm_set_segment(vcpu, &desc, VCPU_SREG_TR);
  201. - selector = GET_SMSTATE(u32, smstate, 0x7fc0);
  202. - set_desc_base(&desc, GET_SMSTATE(u32, smstate, 0x7f80));
  203. - set_desc_limit(&desc, GET_SMSTATE(u32, smstate, 0x7f7c));
  204. + desc.selector = GET_SMSTATE(u32, smstate, 0x7fc0);
  205. + desc.base = GET_SMSTATE(u32, smstate, 0x7f80);
  206. + desc.limit = GET_SMSTATE(u32, smstate, 0x7f7c);
  207. rsm_set_desc_flags(&desc, GET_SMSTATE(u32, smstate, 0x7f78));
  208. - ctxt->ops->set_segment(ctxt, selector, &desc, 0, VCPU_SREG_LDTR);
  209. + kvm_set_segment(vcpu, &desc, VCPU_SREG_LDTR);
  210. dt.address = GET_SMSTATE(u32, smstate, 0x7f74);
  211. dt.size = GET_SMSTATE(u32, smstate, 0x7f70);
  212. - ctxt->ops->set_gdt(ctxt, &dt);
  213. + static_call(kvm_x86_set_gdt)(vcpu, &dt);
  214. dt.address = GET_SMSTATE(u32, smstate, 0x7f58);
  215. dt.size = GET_SMSTATE(u32, smstate, 0x7f54);
  216. - ctxt->ops->set_idt(ctxt, &dt);
  217. + static_call(kvm_x86_set_idt)(vcpu, &dt);
  218. for (i = 0; i < 6; i++) {
  219. - int r = rsm_load_seg_32(ctxt, smstate, i);
  220. + int r = rsm_load_seg_32(vcpu, smstate, i);
  221. if (r != X86EMUL_CONTINUE)
  222. return r;
  223. }
  224. cr4 = GET_SMSTATE(u32, smstate, 0x7f14);
  225. - ctxt->ops->set_smbase(ctxt, GET_SMSTATE(u32, smstate, 0x7ef8));
  226. + vcpu->arch.smbase = GET_SMSTATE(u32, smstate, 0x7ef8);
  227. - return rsm_enter_protected_mode(ctxt, cr0, cr3, cr4);
  228. + return rsm_enter_protected_mode(vcpu, cr0, cr3, cr4);
  229. }
  230. #ifdef CONFIG_X86_64
  231. static int rsm_load_state_64(struct x86_emulate_ctxt *ctxt,
  232. const char *smstate)
  233. {
  234. - struct desc_struct desc;
  235. + struct kvm_vcpu *vcpu = ctxt->vcpu;
  236. + struct kvm_segment desc;
  237. struct desc_ptr dt;
  238. u64 val, cr0, cr3, cr4;
  239. - u32 base3;
  240. - u16 selector;
  241. int i, r;
  242. for (i = 0; i < NR_EMULATOR_GPRS; i++)
  243. @@ -459,51 +446,49 @@ static int rsm_load_state_64(struct x86_emulate_ctxt *ctxt,
  244. val = GET_SMSTATE(u64, smstate, 0x7f68);
  245. - if (ctxt->ops->set_dr(ctxt, 6, val))
  246. + if (kvm_set_dr(vcpu, 6, val))
  247. return X86EMUL_UNHANDLEABLE;
  248. val = GET_SMSTATE(u64, smstate, 0x7f60);
  249. - if (ctxt->ops->set_dr(ctxt, 7, val))
  250. + if (kvm_set_dr(vcpu, 7, val))
  251. return X86EMUL_UNHANDLEABLE;
  252. cr0 = GET_SMSTATE(u64, smstate, 0x7f58);
  253. cr3 = GET_SMSTATE(u64, smstate, 0x7f50);
  254. cr4 = GET_SMSTATE(u64, smstate, 0x7f48);
  255. - ctxt->ops->set_smbase(ctxt, GET_SMSTATE(u32, smstate, 0x7f00));
  256. + vcpu->arch.smbase = GET_SMSTATE(u32, smstate, 0x7f00);
  257. val = GET_SMSTATE(u64, smstate, 0x7ed0);
  258. - if (ctxt->ops->set_msr(ctxt, MSR_EFER, val & ~EFER_LMA))
  259. + if (kvm_set_msr(vcpu, MSR_EFER, val & ~EFER_LMA))
  260. return X86EMUL_UNHANDLEABLE;
  261. - selector = GET_SMSTATE(u32, smstate, 0x7e90);
  262. + desc.selector = GET_SMSTATE(u32, smstate, 0x7e90);
  263. rsm_set_desc_flags(&desc, GET_SMSTATE(u32, smstate, 0x7e92) << 8);
  264. - set_desc_limit(&desc, GET_SMSTATE(u32, smstate, 0x7e94));
  265. - set_desc_base(&desc, GET_SMSTATE(u32, smstate, 0x7e98));
  266. - base3 = GET_SMSTATE(u32, smstate, 0x7e9c);
  267. - ctxt->ops->set_segment(ctxt, selector, &desc, base3, VCPU_SREG_TR);
  268. + desc.limit = GET_SMSTATE(u32, smstate, 0x7e94);
  269. + desc.base = GET_SMSTATE(u64, smstate, 0x7e98);
  270. + kvm_set_segment(vcpu, &desc, VCPU_SREG_TR);
  271. dt.size = GET_SMSTATE(u32, smstate, 0x7e84);
  272. dt.address = GET_SMSTATE(u64, smstate, 0x7e88);
  273. - ctxt->ops->set_idt(ctxt, &dt);
  274. + static_call(kvm_x86_set_idt)(vcpu, &dt);
  275. - selector = GET_SMSTATE(u32, smstate, 0x7e70);
  276. + desc.selector = GET_SMSTATE(u32, smstate, 0x7e70);
  277. rsm_set_desc_flags(&desc, GET_SMSTATE(u32, smstate, 0x7e72) << 8);
  278. - set_desc_limit(&desc, GET_SMSTATE(u32, smstate, 0x7e74));
  279. - set_desc_base(&desc, GET_SMSTATE(u32, smstate, 0x7e78));
  280. - base3 = GET_SMSTATE(u32, smstate, 0x7e7c);
  281. - ctxt->ops->set_segment(ctxt, selector, &desc, base3, VCPU_SREG_LDTR);
  282. + desc.limit = GET_SMSTATE(u32, smstate, 0x7e74);
  283. + desc.base = GET_SMSTATE(u64, smstate, 0x7e78);
  284. + kvm_set_segment(vcpu, &desc, VCPU_SREG_LDTR);
  285. dt.size = GET_SMSTATE(u32, smstate, 0x7e64);
  286. dt.address = GET_SMSTATE(u64, smstate, 0x7e68);
  287. - ctxt->ops->set_gdt(ctxt, &dt);
  288. + static_call(kvm_x86_set_gdt)(vcpu, &dt);
  289. - r = rsm_enter_protected_mode(ctxt, cr0, cr3, cr4);
  290. + r = rsm_enter_protected_mode(vcpu, cr0, cr3, cr4);
  291. if (r != X86EMUL_CONTINUE)
  292. return r;
  293. for (i = 0; i < 6; i++) {
  294. - r = rsm_load_seg_64(ctxt, smstate, i);
  295. + r = rsm_load_seg_64(vcpu, smstate, i);
  296. if (r != X86EMUL_CONTINUE)
  297. return r;
  298. }
  299. @@ -520,14 +505,14 @@ int emulator_leave_smm(struct x86_emulate_ctxt *ctxt)
  300. u64 smbase;
  301. int ret;
  302. - smbase = ctxt->ops->get_smbase(ctxt);
  303. + smbase = vcpu->arch.smbase;
  304. - ret = ctxt->ops->read_phys(ctxt, smbase + 0xfe00, buf, sizeof(buf));
  305. - if (ret != X86EMUL_CONTINUE)
  306. + ret = kvm_vcpu_read_guest(vcpu, smbase + 0xfe00, buf, sizeof(buf));
  307. + if (ret < 0)
  308. return X86EMUL_UNHANDLEABLE;
  309. - if ((ctxt->ops->get_hflags(ctxt) & X86EMUL_SMM_INSIDE_NMI_MASK) == 0)
  310. - ctxt->ops->set_nmi_mask(ctxt, false);
  311. + if ((vcpu->arch.hflags & HF_SMM_INSIDE_NMI_MASK) == 0)
  312. + static_call(kvm_x86_set_nmi_mask)(vcpu, false);
  313. kvm_smm_changed(vcpu, false);
  314. @@ -535,41 +520,41 @@ int emulator_leave_smm(struct x86_emulate_ctxt *ctxt)
  315. * Get back to real mode, to prepare a safe state in which to load
  316. * CR0/CR3/CR4/EFER. It's all a bit more complicated if the vCPU
  317. * supports long mode.
  318. - *
  319. - * The ctxt->ops callbacks will handle all side effects when writing
  320. - * writing MSRs and CRs, e.g. MMU context resets, CPUID
  321. - * runtime updates, etc.
  322. */
  323. - if (emulator_has_longmode(ctxt)) {
  324. - struct desc_struct cs_desc;
  325. +#ifdef CONFIG_X86_64
  326. + if (guest_cpuid_has(vcpu, X86_FEATURE_LM)) {
  327. + struct kvm_segment cs_desc;
  328. /* Zero CR4.PCIDE before CR0.PG. */
  329. - cr4 = ctxt->ops->get_cr(ctxt, 4);
  330. + cr4 = kvm_read_cr4(vcpu);
  331. if (cr4 & X86_CR4_PCIDE)
  332. - ctxt->ops->set_cr(ctxt, 4, cr4 & ~X86_CR4_PCIDE);
  333. + kvm_set_cr4(vcpu, cr4 & ~X86_CR4_PCIDE);
  334. /* A 32-bit code segment is required to clear EFER.LMA. */
  335. memset(&cs_desc, 0, sizeof(cs_desc));
  336. cs_desc.type = 0xb;
  337. - cs_desc.s = cs_desc.g = cs_desc.p = 1;
  338. - ctxt->ops->set_segment(ctxt, 0, &cs_desc, 0, VCPU_SREG_CS);
  339. + cs_desc.s = cs_desc.g = cs_desc.present = 1;
  340. + kvm_set_segment(vcpu, &cs_desc, VCPU_SREG_CS);
  341. }
  342. +#endif
  343. /* For the 64-bit case, this will clear EFER.LMA. */
  344. - cr0 = ctxt->ops->get_cr(ctxt, 0);
  345. + cr0 = kvm_read_cr0(vcpu);
  346. if (cr0 & X86_CR0_PE)
  347. - ctxt->ops->set_cr(ctxt, 0, cr0 & ~(X86_CR0_PG | X86_CR0_PE));
  348. + kvm_set_cr0(vcpu, cr0 & ~(X86_CR0_PG | X86_CR0_PE));
  349. - if (emulator_has_longmode(ctxt)) {
  350. +#ifdef CONFIG_X86_64
  351. + if (guest_cpuid_has(vcpu, X86_FEATURE_LM)) {
  352. /* Clear CR4.PAE before clearing EFER.LME. */
  353. - cr4 = ctxt->ops->get_cr(ctxt, 4);
  354. + cr4 = kvm_read_cr4(vcpu);
  355. if (cr4 & X86_CR4_PAE)
  356. - ctxt->ops->set_cr(ctxt, 4, cr4 & ~X86_CR4_PAE);
  357. + kvm_set_cr4(vcpu, cr4 & ~X86_CR4_PAE);
  358. /* And finally go back to 32-bit mode. */
  359. efer = 0;
  360. - ctxt->ops->set_msr(ctxt, MSR_EFER, efer);
  361. + kvm_set_msr(vcpu, MSR_EFER, efer);
  362. }
  363. +#endif
  364. /*
  365. * Give leave_smm() a chance to make ISA-specific changes to the vCPU
  366. @@ -580,7 +565,7 @@ int emulator_leave_smm(struct x86_emulate_ctxt *ctxt)
  367. return X86EMUL_UNHANDLEABLE;
  368. #ifdef CONFIG_X86_64
  369. - if (emulator_has_longmode(ctxt))
  370. + if (guest_cpuid_has(vcpu, X86_FEATURE_LM))
  371. return rsm_load_state_64(ctxt, buf);
  372. else
  373. #endif
  374. diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c
  375. index dae68ef0c3c8..77e0ca43ee27 100644
  376. --- a/arch/x86/kvm/x86.c
  377. +++ b/arch/x86/kvm/x86.c
  378. @@ -7171,15 +7171,6 @@ static int emulator_read_std(struct x86_emulate_ctxt *ctxt,
  379. return kvm_read_guest_virt_helper(addr, val, bytes, vcpu, access, exception);
  380. }
  381. -static int kvm_read_guest_phys_system(struct x86_emulate_ctxt *ctxt,
  382. - unsigned long addr, void *val, unsigned int bytes)
  383. -{
  384. - struct kvm_vcpu *vcpu = emul_to_vcpu(ctxt);
  385. - int r = kvm_vcpu_read_guest(vcpu, addr, val, bytes);
  386. -
  387. - return r < 0 ? X86EMUL_IO_NEEDED : X86EMUL_CONTINUE;
  388. -}
  389. -
  390. static int kvm_write_guest_virt_helper(gva_t addr, void *val, unsigned int bytes,
  391. struct kvm_vcpu *vcpu, u64 access,
  392. struct x86_exception *exception)
  393. @@ -7956,26 +7947,6 @@ static int emulator_get_msr(struct x86_emulate_ctxt *ctxt,
  394. return kvm_get_msr(emul_to_vcpu(ctxt), msr_index, pdata);
  395. }
  396. -static int emulator_set_msr(struct x86_emulate_ctxt *ctxt,
  397. - u32 msr_index, u64 data)
  398. -{
  399. - return kvm_set_msr(emul_to_vcpu(ctxt), msr_index, data);
  400. -}
  401. -
  402. -static u64 emulator_get_smbase(struct x86_emulate_ctxt *ctxt)
  403. -{
  404. - struct kvm_vcpu *vcpu = emul_to_vcpu(ctxt);
  405. -
  406. - return vcpu->arch.smbase;
  407. -}
  408. -
  409. -static void emulator_set_smbase(struct x86_emulate_ctxt *ctxt, u64 smbase)
  410. -{
  411. - struct kvm_vcpu *vcpu = emul_to_vcpu(ctxt);
  412. -
  413. - vcpu->arch.smbase = smbase;
  414. -}
  415. -
  416. static int emulator_check_pmc(struct x86_emulate_ctxt *ctxt,
  417. u32 pmc)
  418. {
  419. @@ -8074,7 +8045,6 @@ static const struct x86_emulate_ops emulate_ops = {
  420. .write_gpr = emulator_write_gpr,
  421. .read_std = emulator_read_std,
  422. .write_std = emulator_write_std,
  423. - .read_phys = kvm_read_guest_phys_system,
  424. .fetch = kvm_fetch_guest_virt,
  425. .read_emulated = emulator_read_emulated,
  426. .write_emulated = emulator_write_emulated,
  427. @@ -8094,11 +8064,8 @@ static const struct x86_emulate_ops emulate_ops = {
  428. .cpl = emulator_get_cpl,
  429. .get_dr = emulator_get_dr,
  430. .set_dr = emulator_set_dr,
  431. - .get_smbase = emulator_get_smbase,
  432. - .set_smbase = emulator_set_smbase,
  433. .set_msr_with_filter = emulator_set_msr_with_filter,
  434. .get_msr_with_filter = emulator_get_msr_with_filter,
  435. - .set_msr = emulator_set_msr,
  436. .get_msr = emulator_get_msr,
  437. .check_pmc = emulator_check_pmc,
  438. .read_pmc = emulator_read_pmc,
  439. --
  440. 2.38.1