0026-KVM-x86-smm-use-smram-struct-for-64-bit-smram-load-r.patch 9.2 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268
  1. From b7913065928c913fb8569a8a71f6eec4a32779c7 Mon Sep 17 00:00:00 2001
  2. From: Maxim Levitsky <[email protected]>
  3. Date: Tue, 25 Oct 2022 15:47:37 +0300
  4. Subject: [PATCH] KVM: x86: smm: use smram struct for 64 bit smram load/restore
  5. Use kvm_smram_state_64 struct to save/restore the 64 bit SMM state
  6. (used when X86_FEATURE_LM is present in the guest CPUID,
  7. regardless of 32-bitness of the guest).
  8. Signed-off-by: Maxim Levitsky <[email protected]>
  9. ---
  10. arch/x86/kvm/smm.c | 153 +++++++++++++++++++--------------------------
  11. 1 file changed, 63 insertions(+), 90 deletions(-)
  12. diff --git a/arch/x86/kvm/smm.c b/arch/x86/kvm/smm.c
  13. index 2635f6b1d81a..82761384a866 100644
  14. --- a/arch/x86/kvm/smm.c
  15. +++ b/arch/x86/kvm/smm.c
  16. @@ -156,20 +156,17 @@ static void enter_smm_save_seg_32(struct kvm_vcpu *vcpu,
  17. }
  18. #ifdef CONFIG_X86_64
  19. -static void enter_smm_save_seg_64(struct kvm_vcpu *vcpu, char *buf, int n)
  20. +static void enter_smm_save_seg_64(struct kvm_vcpu *vcpu,
  21. + struct kvm_smm_seg_state_64 *state,
  22. + int n)
  23. {
  24. struct kvm_segment seg;
  25. - int offset;
  26. - u16 flags;
  27. kvm_get_segment(vcpu, &seg, n);
  28. - offset = 0x7e00 + n * 16;
  29. -
  30. - flags = enter_smm_get_segment_flags(&seg) >> 8;
  31. - PUT_SMSTATE(u16, buf, offset, seg.selector);
  32. - PUT_SMSTATE(u16, buf, offset + 2, flags);
  33. - PUT_SMSTATE(u32, buf, offset + 4, seg.limit);
  34. - PUT_SMSTATE(u64, buf, offset + 8, seg.base);
  35. + state->selector = seg.selector;
  36. + state->attributes = enter_smm_get_segment_flags(&seg) >> 8;
  37. + state->limit = seg.limit;
  38. + state->base = seg.base;
  39. }
  40. #endif
  41. @@ -218,57 +215,52 @@ static void enter_smm_save_state_32(struct kvm_vcpu *vcpu,
  42. }
  43. #ifdef CONFIG_X86_64
  44. -static void enter_smm_save_state_64(struct kvm_vcpu *vcpu, char *buf)
  45. +static void enter_smm_save_state_64(struct kvm_vcpu *vcpu,
  46. + struct kvm_smram_state_64 *smram)
  47. {
  48. struct desc_ptr dt;
  49. - struct kvm_segment seg;
  50. unsigned long val;
  51. int i;
  52. for (i = 0; i < 16; i++)
  53. - PUT_SMSTATE(u64, buf, 0x7ff8 - i * 8, kvm_register_read_raw(vcpu, i));
  54. + smram->gprs[15 - i] = kvm_register_read_raw(vcpu, i);
  55. +
  56. + smram->rip = kvm_rip_read(vcpu);
  57. + smram->rflags = kvm_get_rflags(vcpu);
  58. - PUT_SMSTATE(u64, buf, 0x7f78, kvm_rip_read(vcpu));
  59. - PUT_SMSTATE(u32, buf, 0x7f70, kvm_get_rflags(vcpu));
  60. kvm_get_dr(vcpu, 6, &val);
  61. - PUT_SMSTATE(u64, buf, 0x7f68, val);
  62. + smram->dr6 = val;
  63. kvm_get_dr(vcpu, 7, &val);
  64. - PUT_SMSTATE(u64, buf, 0x7f60, val);
  65. -
  66. - PUT_SMSTATE(u64, buf, 0x7f58, kvm_read_cr0(vcpu));
  67. - PUT_SMSTATE(u64, buf, 0x7f50, kvm_read_cr3(vcpu));
  68. - PUT_SMSTATE(u64, buf, 0x7f48, kvm_read_cr4(vcpu));
  69. + smram->dr7 = val;
  70. - PUT_SMSTATE(u32, buf, 0x7f00, vcpu->arch.smbase);
  71. + smram->cr0 = kvm_read_cr0(vcpu);
  72. + smram->cr3 = kvm_read_cr3(vcpu);
  73. + smram->cr4 = kvm_read_cr4(vcpu);
  74. - /* revision id */
  75. - PUT_SMSTATE(u32, buf, 0x7efc, 0x00020064);
  76. + smram->smbase = vcpu->arch.smbase;
  77. + smram->smm_revison = 0x00020064;
  78. - PUT_SMSTATE(u64, buf, 0x7ed0, vcpu->arch.efer);
  79. + smram->efer = vcpu->arch.efer;
  80. - kvm_get_segment(vcpu, &seg, VCPU_SREG_TR);
  81. - PUT_SMSTATE(u16, buf, 0x7e90, seg.selector);
  82. - PUT_SMSTATE(u16, buf, 0x7e92, enter_smm_get_segment_flags(&seg) >> 8);
  83. - PUT_SMSTATE(u32, buf, 0x7e94, seg.limit);
  84. - PUT_SMSTATE(u64, buf, 0x7e98, seg.base);
  85. + enter_smm_save_seg_64(vcpu, &smram->tr, VCPU_SREG_TR);
  86. static_call(kvm_x86_get_idt)(vcpu, &dt);
  87. - PUT_SMSTATE(u32, buf, 0x7e84, dt.size);
  88. - PUT_SMSTATE(u64, buf, 0x7e88, dt.address);
  89. + smram->idtr.limit = dt.size;
  90. + smram->idtr.base = dt.address;
  91. - kvm_get_segment(vcpu, &seg, VCPU_SREG_LDTR);
  92. - PUT_SMSTATE(u16, buf, 0x7e70, seg.selector);
  93. - PUT_SMSTATE(u16, buf, 0x7e72, enter_smm_get_segment_flags(&seg) >> 8);
  94. - PUT_SMSTATE(u32, buf, 0x7e74, seg.limit);
  95. - PUT_SMSTATE(u64, buf, 0x7e78, seg.base);
  96. + enter_smm_save_seg_64(vcpu, &smram->ldtr, VCPU_SREG_LDTR);
  97. static_call(kvm_x86_get_gdt)(vcpu, &dt);
  98. - PUT_SMSTATE(u32, buf, 0x7e64, dt.size);
  99. - PUT_SMSTATE(u64, buf, 0x7e68, dt.address);
  100. + smram->gdtr.limit = dt.size;
  101. + smram->gdtr.base = dt.address;
  102. - for (i = 0; i < 6; i++)
  103. - enter_smm_save_seg_64(vcpu, buf, i);
  104. + enter_smm_save_seg_64(vcpu, &smram->es, VCPU_SREG_ES);
  105. + enter_smm_save_seg_64(vcpu, &smram->cs, VCPU_SREG_CS);
  106. + enter_smm_save_seg_64(vcpu, &smram->ss, VCPU_SREG_SS);
  107. + enter_smm_save_seg_64(vcpu, &smram->ds, VCPU_SREG_DS);
  108. + enter_smm_save_seg_64(vcpu, &smram->fs, VCPU_SREG_FS);
  109. + enter_smm_save_seg_64(vcpu, &smram->gs, VCPU_SREG_GS);
  110. }
  111. #endif
  112. @@ -285,7 +277,7 @@ void enter_smm(struct kvm_vcpu *vcpu)
  113. #ifdef CONFIG_X86_64
  114. if (guest_cpuid_has(vcpu, X86_FEATURE_LM))
  115. - enter_smm_save_state_64(vcpu, smram.bytes);
  116. + enter_smm_save_state_64(vcpu, &smram.smram64);
  117. else
  118. #endif
  119. enter_smm_save_state_32(vcpu, &smram.smram32);
  120. @@ -395,18 +387,17 @@ static int rsm_load_seg_32(struct kvm_vcpu *vcpu,
  121. }
  122. #ifdef CONFIG_X86_64
  123. -static int rsm_load_seg_64(struct kvm_vcpu *vcpu, const char *smstate,
  124. +
  125. +static int rsm_load_seg_64(struct kvm_vcpu *vcpu,
  126. + const struct kvm_smm_seg_state_64 *state,
  127. int n)
  128. {
  129. struct kvm_segment desc;
  130. - int offset;
  131. -
  132. - offset = 0x7e00 + n * 16;
  133. - desc.selector = GET_SMSTATE(u16, smstate, offset);
  134. - rsm_set_desc_flags(&desc, GET_SMSTATE(u16, smstate, offset + 2) << 8);
  135. - desc.limit = GET_SMSTATE(u32, smstate, offset + 4);
  136. - desc.base = GET_SMSTATE(u64, smstate, offset + 8);
  137. + desc.selector = state->selector;
  138. + rsm_set_desc_flags(&desc, state->attributes << 8);
  139. + desc.limit = state->limit;
  140. + desc.base = state->base;
  141. kvm_set_segment(vcpu, &desc, n);
  142. return X86EMUL_CONTINUE;
  143. }
  144. @@ -502,69 +493,51 @@ static int rsm_load_state_32(struct x86_emulate_ctxt *ctxt,
  145. #ifdef CONFIG_X86_64
  146. static int rsm_load_state_64(struct x86_emulate_ctxt *ctxt,
  147. - u8 *smstate)
  148. + const struct kvm_smram_state_64 *smstate)
  149. {
  150. struct kvm_vcpu *vcpu = ctxt->vcpu;
  151. - struct kvm_segment desc;
  152. struct desc_ptr dt;
  153. - u64 val, cr0, cr3, cr4;
  154. int i, r;
  155. for (i = 0; i < 16; i++)
  156. - *reg_write(ctxt, i) = GET_SMSTATE(u64, smstate, 0x7ff8 - i * 8);
  157. -
  158. - ctxt->_eip = GET_SMSTATE(u64, smstate, 0x7f78);
  159. - ctxt->eflags = GET_SMSTATE(u32, smstate, 0x7f70) | X86_EFLAGS_FIXED;
  160. + *reg_write(ctxt, i) = smstate->gprs[15 - i];
  161. - val = GET_SMSTATE(u64, smstate, 0x7f68);
  162. + ctxt->_eip = smstate->rip;
  163. + ctxt->eflags = smstate->rflags | X86_EFLAGS_FIXED;
  164. - if (kvm_set_dr(vcpu, 6, val))
  165. + if (kvm_set_dr(vcpu, 6, smstate->dr6))
  166. return X86EMUL_UNHANDLEABLE;
  167. -
  168. - val = GET_SMSTATE(u64, smstate, 0x7f60);
  169. -
  170. - if (kvm_set_dr(vcpu, 7, val))
  171. + if (kvm_set_dr(vcpu, 7, smstate->dr7))
  172. return X86EMUL_UNHANDLEABLE;
  173. - cr0 = GET_SMSTATE(u64, smstate, 0x7f58);
  174. - cr3 = GET_SMSTATE(u64, smstate, 0x7f50);
  175. - cr4 = GET_SMSTATE(u64, smstate, 0x7f48);
  176. - vcpu->arch.smbase = GET_SMSTATE(u32, smstate, 0x7f00);
  177. - val = GET_SMSTATE(u64, smstate, 0x7ed0);
  178. + vcpu->arch.smbase = smstate->smbase;
  179. - if (kvm_set_msr(vcpu, MSR_EFER, val & ~EFER_LMA))
  180. + if (kvm_set_msr(vcpu, MSR_EFER, smstate->efer & ~EFER_LMA))
  181. return X86EMUL_UNHANDLEABLE;
  182. - desc.selector = GET_SMSTATE(u32, smstate, 0x7e90);
  183. - rsm_set_desc_flags(&desc, GET_SMSTATE(u32, smstate, 0x7e92) << 8);
  184. - desc.limit = GET_SMSTATE(u32, smstate, 0x7e94);
  185. - desc.base = GET_SMSTATE(u64, smstate, 0x7e98);
  186. - kvm_set_segment(vcpu, &desc, VCPU_SREG_TR);
  187. + rsm_load_seg_64(vcpu, &smstate->tr, VCPU_SREG_TR);
  188. - dt.size = GET_SMSTATE(u32, smstate, 0x7e84);
  189. - dt.address = GET_SMSTATE(u64, smstate, 0x7e88);
  190. + dt.size = smstate->idtr.limit;
  191. + dt.address = smstate->idtr.base;
  192. static_call(kvm_x86_set_idt)(vcpu, &dt);
  193. - desc.selector = GET_SMSTATE(u32, smstate, 0x7e70);
  194. - rsm_set_desc_flags(&desc, GET_SMSTATE(u32, smstate, 0x7e72) << 8);
  195. - desc.limit = GET_SMSTATE(u32, smstate, 0x7e74);
  196. - desc.base = GET_SMSTATE(u64, smstate, 0x7e78);
  197. - kvm_set_segment(vcpu, &desc, VCPU_SREG_LDTR);
  198. + rsm_load_seg_64(vcpu, &smstate->ldtr, VCPU_SREG_LDTR);
  199. - dt.size = GET_SMSTATE(u32, smstate, 0x7e64);
  200. - dt.address = GET_SMSTATE(u64, smstate, 0x7e68);
  201. + dt.size = smstate->gdtr.limit;
  202. + dt.address = smstate->gdtr.base;
  203. static_call(kvm_x86_set_gdt)(vcpu, &dt);
  204. - r = rsm_enter_protected_mode(vcpu, cr0, cr3, cr4);
  205. + r = rsm_enter_protected_mode(vcpu, smstate->cr0, smstate->cr3, smstate->cr4);
  206. if (r != X86EMUL_CONTINUE)
  207. return r;
  208. - for (i = 0; i < 6; i++) {
  209. - r = rsm_load_seg_64(vcpu, smstate, i);
  210. - if (r != X86EMUL_CONTINUE)
  211. - return r;
  212. - }
  213. + rsm_load_seg_64(vcpu, &smstate->es, VCPU_SREG_ES);
  214. + rsm_load_seg_64(vcpu, &smstate->cs, VCPU_SREG_CS);
  215. + rsm_load_seg_64(vcpu, &smstate->ss, VCPU_SREG_SS);
  216. + rsm_load_seg_64(vcpu, &smstate->ds, VCPU_SREG_DS);
  217. + rsm_load_seg_64(vcpu, &smstate->fs, VCPU_SREG_FS);
  218. + rsm_load_seg_64(vcpu, &smstate->gs, VCPU_SREG_GS);
  219. return X86EMUL_CONTINUE;
  220. }
  221. @@ -639,7 +612,7 @@ int emulator_leave_smm(struct x86_emulate_ctxt *ctxt)
  222. #ifdef CONFIG_X86_64
  223. if (guest_cpuid_has(vcpu, X86_FEATURE_LM))
  224. - return rsm_load_state_64(ctxt, smram.bytes);
  225. + return rsm_load_state_64(ctxt, &smram.smram64);
  226. else
  227. #endif
  228. return rsm_load_state_32(ctxt, &smram.smram32);
  229. --
  230. 2.38.1