0013-KVM-x86-emulator-smm-use-smram-struct-for-64-bit-smr.patch 9.9 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279
  1. From 0000000000000000000000000000000000000000 Mon Sep 17 00:00:00 2001
  2. From: Maxim Levitsky <[email protected]>
  3. Date: Wed, 3 Aug 2022 18:50:08 +0300
  4. Subject: [PATCH] KVM: x86: emulator/smm: use smram struct for 64 bit smram
  5. load/restore
  6. Use kvm_smram_state_64 struct to save/restore the 64 bit SMM state
  7. (used when X86_FEATURE_LM is present in the guest CPUID,
  8. regardless of 32-bitness of the guest).
  9. Signed-off-by: Maxim Levitsky <[email protected]>
  10. Signed-off-by: Thomas Lamprecht <[email protected]>
  11. ---
  12. arch/x86/kvm/emulate.c | 88 ++++++++++++++----------------------------
  13. arch/x86/kvm/x86.c | 75 ++++++++++++++++-------------------
  14. 2 files changed, 62 insertions(+), 101 deletions(-)
  15. diff --git a/arch/x86/kvm/emulate.c b/arch/x86/kvm/emulate.c
  16. index 65d82292ccec..03f9e5aa036e 100644
  17. --- a/arch/x86/kvm/emulate.c
  18. +++ b/arch/x86/kvm/emulate.c
  19. @@ -2373,24 +2373,16 @@ static void rsm_load_seg_32(struct x86_emulate_ctxt *ctxt,
  20. }
  21. #ifdef CONFIG_X86_64
  22. -static int rsm_load_seg_64(struct x86_emulate_ctxt *ctxt, const char *smstate,
  23. - int n)
  24. +static void rsm_load_seg_64(struct x86_emulate_ctxt *ctxt,
  25. + const struct kvm_smm_seg_state_64 *state,
  26. + int n)
  27. {
  28. struct desc_struct desc;
  29. - int offset;
  30. - u16 selector;
  31. - u32 base3;
  32. -
  33. - offset = 0x7e00 + n * 16;
  34. -
  35. - selector = GET_SMSTATE(u16, smstate, offset);
  36. - rsm_set_desc_flags(&desc, GET_SMSTATE(u16, smstate, offset + 2) << 8);
  37. - set_desc_limit(&desc, GET_SMSTATE(u32, smstate, offset + 4));
  38. - set_desc_base(&desc, GET_SMSTATE(u32, smstate, offset + 8));
  39. - base3 = GET_SMSTATE(u32, smstate, offset + 12);
  40. - ctxt->ops->set_segment(ctxt, selector, &desc, base3, n);
  41. - return X86EMUL_CONTINUE;
  42. + rsm_set_desc_flags(&desc, state->attributes << 8);
  43. + set_desc_limit(&desc, state->limit);
  44. + set_desc_base(&desc, (u32)state->base);
  45. + ctxt->ops->set_segment(ctxt, state->selector, &desc, state->base >> 32, n);
  46. }
  47. #endif
  48. @@ -2484,71 +2476,49 @@ static int rsm_load_state_32(struct x86_emulate_ctxt *ctxt,
  49. #ifdef CONFIG_X86_64
  50. static int rsm_load_state_64(struct x86_emulate_ctxt *ctxt,
  51. - const char *smstate)
  52. + const struct kvm_smram_state_64 *smstate)
  53. {
  54. - struct desc_struct desc;
  55. struct desc_ptr dt;
  56. - u64 val, cr0, cr3, cr4;
  57. - u32 base3;
  58. - u16 selector;
  59. int i, r;
  60. for (i = 0; i < 16; i++)
  61. - *reg_write(ctxt, i) = GET_SMSTATE(u64, smstate, 0x7ff8 - i * 8);
  62. + *reg_write(ctxt, i) = smstate->gprs[15 - i];
  63. - ctxt->_eip = GET_SMSTATE(u64, smstate, 0x7f78);
  64. - ctxt->eflags = GET_SMSTATE(u32, smstate, 0x7f70) | X86_EFLAGS_FIXED;
  65. + ctxt->_eip = smstate->rip;
  66. + ctxt->eflags = smstate->rflags | X86_EFLAGS_FIXED;
  67. - val = GET_SMSTATE(u64, smstate, 0x7f68);
  68. -
  69. - if (ctxt->ops->set_dr(ctxt, 6, val))
  70. + if (ctxt->ops->set_dr(ctxt, 6, smstate->dr6))
  71. return X86EMUL_UNHANDLEABLE;
  72. -
  73. - val = GET_SMSTATE(u64, smstate, 0x7f60);
  74. -
  75. - if (ctxt->ops->set_dr(ctxt, 7, val))
  76. + if (ctxt->ops->set_dr(ctxt, 7, smstate->dr7))
  77. return X86EMUL_UNHANDLEABLE;
  78. - cr0 = GET_SMSTATE(u64, smstate, 0x7f58);
  79. - cr3 = GET_SMSTATE(u64, smstate, 0x7f50);
  80. - cr4 = GET_SMSTATE(u64, smstate, 0x7f48);
  81. - ctxt->ops->set_smbase(ctxt, GET_SMSTATE(u32, smstate, 0x7f00));
  82. - val = GET_SMSTATE(u64, smstate, 0x7ed0);
  83. + ctxt->ops->set_smbase(ctxt, smstate->smbase);
  84. - if (ctxt->ops->set_msr(ctxt, MSR_EFER, val & ~EFER_LMA))
  85. + if (ctxt->ops->set_msr(ctxt, MSR_EFER, smstate->efer & ~EFER_LMA))
  86. return X86EMUL_UNHANDLEABLE;
  87. - selector = GET_SMSTATE(u32, smstate, 0x7e90);
  88. - rsm_set_desc_flags(&desc, GET_SMSTATE(u32, smstate, 0x7e92) << 8);
  89. - set_desc_limit(&desc, GET_SMSTATE(u32, smstate, 0x7e94));
  90. - set_desc_base(&desc, GET_SMSTATE(u32, smstate, 0x7e98));
  91. - base3 = GET_SMSTATE(u32, smstate, 0x7e9c);
  92. - ctxt->ops->set_segment(ctxt, selector, &desc, base3, VCPU_SREG_TR);
  93. + rsm_load_seg_64(ctxt, &smstate->tr, VCPU_SREG_TR);
  94. - dt.size = GET_SMSTATE(u32, smstate, 0x7e84);
  95. - dt.address = GET_SMSTATE(u64, smstate, 0x7e88);
  96. + dt.size = smstate->idtr.limit;
  97. + dt.address = smstate->idtr.base;
  98. ctxt->ops->set_idt(ctxt, &dt);
  99. - selector = GET_SMSTATE(u32, smstate, 0x7e70);
  100. - rsm_set_desc_flags(&desc, GET_SMSTATE(u32, smstate, 0x7e72) << 8);
  101. - set_desc_limit(&desc, GET_SMSTATE(u32, smstate, 0x7e74));
  102. - set_desc_base(&desc, GET_SMSTATE(u32, smstate, 0x7e78));
  103. - base3 = GET_SMSTATE(u32, smstate, 0x7e7c);
  104. - ctxt->ops->set_segment(ctxt, selector, &desc, base3, VCPU_SREG_LDTR);
  105. + rsm_load_seg_64(ctxt, &smstate->ldtr, VCPU_SREG_LDTR);
  106. - dt.size = GET_SMSTATE(u32, smstate, 0x7e64);
  107. - dt.address = GET_SMSTATE(u64, smstate, 0x7e68);
  108. + dt.size = smstate->gdtr.limit;
  109. + dt.address = smstate->gdtr.base;
  110. ctxt->ops->set_gdt(ctxt, &dt);
  111. - r = rsm_enter_protected_mode(ctxt, cr0, cr3, cr4);
  112. + r = rsm_enter_protected_mode(ctxt, smstate->cr0, smstate->cr3, smstate->cr4);
  113. if (r != X86EMUL_CONTINUE)
  114. return r;
  115. - for (i = 0; i < 6; i++) {
  116. - r = rsm_load_seg_64(ctxt, smstate, i);
  117. - if (r != X86EMUL_CONTINUE)
  118. - return r;
  119. - }
  120. + rsm_load_seg_64(ctxt, &smstate->es, VCPU_SREG_ES);
  121. + rsm_load_seg_64(ctxt, &smstate->cs, VCPU_SREG_CS);
  122. + rsm_load_seg_64(ctxt, &smstate->ss, VCPU_SREG_SS);
  123. + rsm_load_seg_64(ctxt, &smstate->ds, VCPU_SREG_DS);
  124. + rsm_load_seg_64(ctxt, &smstate->fs, VCPU_SREG_FS);
  125. + rsm_load_seg_64(ctxt, &smstate->gs, VCPU_SREG_GS);
  126. return X86EMUL_CONTINUE;
  127. }
  128. @@ -2623,7 +2593,7 @@ static int em_rsm(struct x86_emulate_ctxt *ctxt)
  129. #ifdef CONFIG_X86_64
  130. if (emulator_has_longmode(ctxt))
  131. - ret = rsm_load_state_64(ctxt, (const char *)&smram);
  132. + ret = rsm_load_state_64(ctxt, &smram.smram64);
  133. else
  134. #endif
  135. ret = rsm_load_state_32(ctxt, &smram.smram32);
  136. diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c
  137. index 579a1cb6a7c8..7a4d86f9bdcd 100644
  138. --- a/arch/x86/kvm/x86.c
  139. +++ b/arch/x86/kvm/x86.c
  140. @@ -10115,20 +10115,17 @@ static void enter_smm_save_seg_32(struct kvm_vcpu *vcpu,
  141. }
  142. #ifdef CONFIG_X86_64
  143. -static void enter_smm_save_seg_64(struct kvm_vcpu *vcpu, char *buf, int n)
  144. +static void enter_smm_save_seg_64(struct kvm_vcpu *vcpu,
  145. + struct kvm_smm_seg_state_64 *state,
  146. + int n)
  147. {
  148. struct kvm_segment seg;
  149. - int offset;
  150. - u16 flags;
  151. kvm_get_segment(vcpu, &seg, n);
  152. - offset = 0x7e00 + n * 16;
  153. -
  154. - flags = enter_smm_get_segment_flags(&seg) >> 8;
  155. - put_smstate(u16, buf, offset, seg.selector);
  156. - put_smstate(u16, buf, offset + 2, flags);
  157. - put_smstate(u32, buf, offset + 4, seg.limit);
  158. - put_smstate(u64, buf, offset + 8, seg.base);
  159. + state->selector = seg.selector;
  160. + state->attributes = enter_smm_get_segment_flags(&seg) >> 8;
  161. + state->limit = seg.limit;
  162. + state->base = seg.base;
  163. }
  164. #endif
  165. @@ -10176,57 +10173,51 @@ static void enter_smm_save_state_32(struct kvm_vcpu *vcpu, struct kvm_smram_stat
  166. }
  167. #ifdef CONFIG_X86_64
  168. -static void enter_smm_save_state_64(struct kvm_vcpu *vcpu, char *buf)
  169. +static void enter_smm_save_state_64(struct kvm_vcpu *vcpu, struct kvm_smram_state_64 *smram)
  170. {
  171. struct desc_ptr dt;
  172. - struct kvm_segment seg;
  173. unsigned long val;
  174. int i;
  175. for (i = 0; i < 16; i++)
  176. - put_smstate(u64, buf, 0x7ff8 - i * 8, kvm_register_read_raw(vcpu, i));
  177. + smram->gprs[15 - i] = kvm_register_read_raw(vcpu, i);
  178. +
  179. + smram->rip = kvm_rip_read(vcpu);
  180. + smram->rflags = kvm_get_rflags(vcpu);
  181. - put_smstate(u64, buf, 0x7f78, kvm_rip_read(vcpu));
  182. - put_smstate(u32, buf, 0x7f70, kvm_get_rflags(vcpu));
  183. kvm_get_dr(vcpu, 6, &val);
  184. - put_smstate(u64, buf, 0x7f68, val);
  185. + smram->dr6 = val;
  186. kvm_get_dr(vcpu, 7, &val);
  187. - put_smstate(u64, buf, 0x7f60, val);
  188. -
  189. - put_smstate(u64, buf, 0x7f58, kvm_read_cr0(vcpu));
  190. - put_smstate(u64, buf, 0x7f50, kvm_read_cr3(vcpu));
  191. - put_smstate(u64, buf, 0x7f48, kvm_read_cr4(vcpu));
  192. + smram->dr7 = val;
  193. - put_smstate(u32, buf, 0x7f00, vcpu->arch.smbase);
  194. + smram->cr0 = kvm_read_cr0(vcpu);
  195. + smram->cr3 = kvm_read_cr3(vcpu);
  196. + smram->cr4 = kvm_read_cr4(vcpu);
  197. - /* revision id */
  198. - put_smstate(u32, buf, 0x7efc, 0x00020064);
  199. + smram->smbase = vcpu->arch.smbase;
  200. + smram->smm_revison = 0x00020064;
  201. - put_smstate(u64, buf, 0x7ed0, vcpu->arch.efer);
  202. + smram->efer = vcpu->arch.efer;
  203. - kvm_get_segment(vcpu, &seg, VCPU_SREG_TR);
  204. - put_smstate(u16, buf, 0x7e90, seg.selector);
  205. - put_smstate(u16, buf, 0x7e92, enter_smm_get_segment_flags(&seg) >> 8);
  206. - put_smstate(u32, buf, 0x7e94, seg.limit);
  207. - put_smstate(u64, buf, 0x7e98, seg.base);
  208. + enter_smm_save_seg_64(vcpu, &smram->tr, VCPU_SREG_TR);
  209. static_call(kvm_x86_get_idt)(vcpu, &dt);
  210. - put_smstate(u32, buf, 0x7e84, dt.size);
  211. - put_smstate(u64, buf, 0x7e88, dt.address);
  212. + smram->idtr.limit = dt.size;
  213. + smram->idtr.base = dt.address;
  214. - kvm_get_segment(vcpu, &seg, VCPU_SREG_LDTR);
  215. - put_smstate(u16, buf, 0x7e70, seg.selector);
  216. - put_smstate(u16, buf, 0x7e72, enter_smm_get_segment_flags(&seg) >> 8);
  217. - put_smstate(u32, buf, 0x7e74, seg.limit);
  218. - put_smstate(u64, buf, 0x7e78, seg.base);
  219. + enter_smm_save_seg_64(vcpu, &smram->ldtr, VCPU_SREG_LDTR);
  220. static_call(kvm_x86_get_gdt)(vcpu, &dt);
  221. - put_smstate(u32, buf, 0x7e64, dt.size);
  222. - put_smstate(u64, buf, 0x7e68, dt.address);
  223. + smram->gdtr.limit = dt.size;
  224. + smram->gdtr.base = dt.address;
  225. - for (i = 0; i < 6; i++)
  226. - enter_smm_save_seg_64(vcpu, buf, i);
  227. + enter_smm_save_seg_64(vcpu, &smram->es, VCPU_SREG_ES);
  228. + enter_smm_save_seg_64(vcpu, &smram->cs, VCPU_SREG_CS);
  229. + enter_smm_save_seg_64(vcpu, &smram->ss, VCPU_SREG_SS);
  230. + enter_smm_save_seg_64(vcpu, &smram->ds, VCPU_SREG_DS);
  231. + enter_smm_save_seg_64(vcpu, &smram->fs, VCPU_SREG_FS);
  232. + enter_smm_save_seg_64(vcpu, &smram->gs, VCPU_SREG_GS);
  233. }
  234. #endif
  235. @@ -10240,7 +10231,7 @@ static void enter_smm(struct kvm_vcpu *vcpu)
  236. memset(smram.bytes, 0, sizeof(smram.bytes));
  237. #ifdef CONFIG_X86_64
  238. if (guest_cpuid_has(vcpu, X86_FEATURE_LM))
  239. - enter_smm_save_state_64(vcpu, (char *)&smram);
  240. + enter_smm_save_state_64(vcpu, &smram.smram64);
  241. else
  242. #endif
  243. enter_smm_save_state_32(vcpu, &smram.smram32);