0013-kvm-vmx-Reinstate-support-for-CPUs-without-virtual-N.patch 11 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299
  1. From 0000000000000000000000000000000000000000 Mon Sep 17 00:00:00 2001
  2. From: Paolo Bonzini <[email protected]>
  3. Date: Mon, 6 Nov 2017 13:31:12 +0100
  4. Subject: [PATCH] kvm: vmx: Reinstate support for CPUs without virtual NMI
  5. MIME-Version: 1.0
  6. Content-Type: text/plain; charset=UTF-8
  7. Content-Transfer-Encoding: 8bit
  8. commit 8a1b43922d0d1279e7936ba85c4c2a870403c95f upstream.
  9. This is more or less a revert of commit 2c82878b0cb3 ("KVM: VMX: require
  10. virtual NMI support", 2017-03-27); it turns out that Core 2 Duo machines
  11. only had virtual NMIs in some SKUs.
  12. The revert is not trivial because in the meanwhile there have been several
  13. fixes to nested NMI injection. Therefore, the entire vNMI state is moved
  14. to struct loaded_vmcs.
  15. Another change compared to before the patch is a simplification here:
  16. if (unlikely(!cpu_has_virtual_nmis() && vmx->soft_vnmi_blocked &&
  17. !(is_guest_mode(vcpu) && nested_cpu_has_virtual_nmis(
  18. get_vmcs12(vcpu))))) {
  19. The final condition here is always true (because nested_cpu_has_virtual_nmis
  20. is always false) and is removed.
  21. Fixes: 2c82878b0cb38fd516fd612c67852a6bbf282003
  22. Fixes: https://bugzilla.redhat.com/show_bug.cgi?id=1490803
  23. Signed-off-by: Paolo Bonzini <[email protected]>
  24. Signed-off-by: Radim Krčmář <[email protected]>
  25. Signed-off-by: Greg Kroah-Hartman <[email protected]>
  26. Signed-off-by: Fabian Grünbichler <[email protected]>
  27. ---
  28. arch/x86/kvm/vmx.c | 150 +++++++++++++++++++++++++++++++++++++----------------
  29. 1 file changed, 106 insertions(+), 44 deletions(-)
  30. diff --git a/arch/x86/kvm/vmx.c b/arch/x86/kvm/vmx.c
  31. index 118709e7597d..a2c95522ac99 100644
  32. --- a/arch/x86/kvm/vmx.c
  33. +++ b/arch/x86/kvm/vmx.c
  34. @@ -202,6 +202,10 @@ struct loaded_vmcs {
  35. bool nmi_known_unmasked;
  36. unsigned long vmcs_host_cr3; /* May not match real cr3 */
  37. unsigned long vmcs_host_cr4; /* May not match real cr4 */
  38. + /* Support for vnmi-less CPUs */
  39. + int soft_vnmi_blocked;
  40. + ktime_t entry_time;
  41. + s64 vnmi_blocked_time;
  42. struct list_head loaded_vmcss_on_cpu_link;
  43. };
  44. @@ -1288,6 +1292,11 @@ static inline bool cpu_has_vmx_invpcid(void)
  45. SECONDARY_EXEC_ENABLE_INVPCID;
  46. }
  47. +static inline bool cpu_has_virtual_nmis(void)
  48. +{
  49. + return vmcs_config.pin_based_exec_ctrl & PIN_BASED_VIRTUAL_NMIS;
  50. +}
  51. +
  52. static inline bool cpu_has_vmx_wbinvd_exit(void)
  53. {
  54. return vmcs_config.cpu_based_2nd_exec_ctrl &
  55. @@ -1339,11 +1348,6 @@ static inline bool nested_cpu_has2(struct vmcs12 *vmcs12, u32 bit)
  56. (vmcs12->secondary_vm_exec_control & bit);
  57. }
  58. -static inline bool nested_cpu_has_virtual_nmis(struct vmcs12 *vmcs12)
  59. -{
  60. - return vmcs12->pin_based_vm_exec_control & PIN_BASED_VIRTUAL_NMIS;
  61. -}
  62. -
  63. static inline bool nested_cpu_has_preemption_timer(struct vmcs12 *vmcs12)
  64. {
  65. return vmcs12->pin_based_vm_exec_control &
  66. @@ -3676,9 +3680,9 @@ static __init int setup_vmcs_config(struct vmcs_config *vmcs_conf)
  67. &_vmexit_control) < 0)
  68. return -EIO;
  69. - min = PIN_BASED_EXT_INTR_MASK | PIN_BASED_NMI_EXITING |
  70. - PIN_BASED_VIRTUAL_NMIS;
  71. - opt = PIN_BASED_POSTED_INTR | PIN_BASED_VMX_PREEMPTION_TIMER;
  72. + min = PIN_BASED_EXT_INTR_MASK | PIN_BASED_NMI_EXITING;
  73. + opt = PIN_BASED_VIRTUAL_NMIS | PIN_BASED_POSTED_INTR |
  74. + PIN_BASED_VMX_PREEMPTION_TIMER;
  75. if (adjust_vmx_controls(min, opt, MSR_IA32_VMX_PINBASED_CTLS,
  76. &_pin_based_exec_control) < 0)
  77. return -EIO;
  78. @@ -5538,7 +5542,8 @@ static void enable_irq_window(struct kvm_vcpu *vcpu)
  79. static void enable_nmi_window(struct kvm_vcpu *vcpu)
  80. {
  81. - if (vmcs_read32(GUEST_INTERRUPTIBILITY_INFO) & GUEST_INTR_STATE_STI) {
  82. + if (!cpu_has_virtual_nmis() ||
  83. + vmcs_read32(GUEST_INTERRUPTIBILITY_INFO) & GUEST_INTR_STATE_STI) {
  84. enable_irq_window(vcpu);
  85. return;
  86. }
  87. @@ -5578,6 +5583,19 @@ static void vmx_inject_nmi(struct kvm_vcpu *vcpu)
  88. {
  89. struct vcpu_vmx *vmx = to_vmx(vcpu);
  90. + if (!cpu_has_virtual_nmis()) {
  91. + /*
  92. + * Tracking the NMI-blocked state in software is built upon
  93. + * finding the next open IRQ window. This, in turn, depends on
  94. + * well-behaving guests: They have to keep IRQs disabled at
  95. + * least as long as the NMI handler runs. Otherwise we may
  96. + * cause NMI nesting, maybe breaking the guest. But as this is
  97. + * highly unlikely, we can live with the residual risk.
  98. + */
  99. + vmx->loaded_vmcs->soft_vnmi_blocked = 1;
  100. + vmx->loaded_vmcs->vnmi_blocked_time = 0;
  101. + }
  102. +
  103. ++vcpu->stat.nmi_injections;
  104. vmx->loaded_vmcs->nmi_known_unmasked = false;
  105. @@ -5596,6 +5614,8 @@ static bool vmx_get_nmi_mask(struct kvm_vcpu *vcpu)
  106. struct vcpu_vmx *vmx = to_vmx(vcpu);
  107. bool masked;
  108. + if (!cpu_has_virtual_nmis())
  109. + return vmx->loaded_vmcs->soft_vnmi_blocked;
  110. if (vmx->loaded_vmcs->nmi_known_unmasked)
  111. return false;
  112. masked = vmcs_read32(GUEST_INTERRUPTIBILITY_INFO) & GUEST_INTR_STATE_NMI;
  113. @@ -5607,13 +5627,20 @@ static void vmx_set_nmi_mask(struct kvm_vcpu *vcpu, bool masked)
  114. {
  115. struct vcpu_vmx *vmx = to_vmx(vcpu);
  116. - vmx->loaded_vmcs->nmi_known_unmasked = !masked;
  117. - if (masked)
  118. - vmcs_set_bits(GUEST_INTERRUPTIBILITY_INFO,
  119. - GUEST_INTR_STATE_NMI);
  120. - else
  121. - vmcs_clear_bits(GUEST_INTERRUPTIBILITY_INFO,
  122. - GUEST_INTR_STATE_NMI);
  123. + if (!cpu_has_virtual_nmis()) {
  124. + if (vmx->loaded_vmcs->soft_vnmi_blocked != masked) {
  125. + vmx->loaded_vmcs->soft_vnmi_blocked = masked;
  126. + vmx->loaded_vmcs->vnmi_blocked_time = 0;
  127. + }
  128. + } else {
  129. + vmx->loaded_vmcs->nmi_known_unmasked = !masked;
  130. + if (masked)
  131. + vmcs_set_bits(GUEST_INTERRUPTIBILITY_INFO,
  132. + GUEST_INTR_STATE_NMI);
  133. + else
  134. + vmcs_clear_bits(GUEST_INTERRUPTIBILITY_INFO,
  135. + GUEST_INTR_STATE_NMI);
  136. + }
  137. }
  138. static int vmx_nmi_allowed(struct kvm_vcpu *vcpu)
  139. @@ -5621,6 +5648,10 @@ static int vmx_nmi_allowed(struct kvm_vcpu *vcpu)
  140. if (to_vmx(vcpu)->nested.nested_run_pending)
  141. return 0;
  142. + if (!cpu_has_virtual_nmis() &&
  143. + to_vmx(vcpu)->loaded_vmcs->soft_vnmi_blocked)
  144. + return 0;
  145. +
  146. return !(vmcs_read32(GUEST_INTERRUPTIBILITY_INFO) &
  147. (GUEST_INTR_STATE_MOV_SS | GUEST_INTR_STATE_STI
  148. | GUEST_INTR_STATE_NMI));
  149. @@ -6348,6 +6379,7 @@ static int handle_ept_violation(struct kvm_vcpu *vcpu)
  150. * AAK134, BY25.
  151. */
  152. if (!(to_vmx(vcpu)->idt_vectoring_info & VECTORING_INFO_VALID_MASK) &&
  153. + cpu_has_virtual_nmis() &&
  154. (exit_qualification & INTR_INFO_UNBLOCK_NMI))
  155. vmcs_set_bits(GUEST_INTERRUPTIBILITY_INFO, GUEST_INTR_STATE_NMI);
  156. @@ -6820,7 +6852,7 @@ static struct loaded_vmcs *nested_get_current_vmcs02(struct vcpu_vmx *vmx)
  157. }
  158. /* Create a new VMCS */
  159. - item = kmalloc(sizeof(struct vmcs02_list), GFP_KERNEL);
  160. + item = kzalloc(sizeof(struct vmcs02_list), GFP_KERNEL);
  161. if (!item)
  162. return NULL;
  163. item->vmcs02.vmcs = alloc_vmcs();
  164. @@ -7837,6 +7869,7 @@ static int handle_pml_full(struct kvm_vcpu *vcpu)
  165. * "blocked by NMI" bit has to be set before next VM entry.
  166. */
  167. if (!(to_vmx(vcpu)->idt_vectoring_info & VECTORING_INFO_VALID_MASK) &&
  168. + cpu_has_virtual_nmis() &&
  169. (exit_qualification & INTR_INFO_UNBLOCK_NMI))
  170. vmcs_set_bits(GUEST_INTERRUPTIBILITY_INFO,
  171. GUEST_INTR_STATE_NMI);
  172. @@ -8554,6 +8587,25 @@ static int vmx_handle_exit(struct kvm_vcpu *vcpu)
  173. return 0;
  174. }
  175. + if (unlikely(!cpu_has_virtual_nmis() &&
  176. + vmx->loaded_vmcs->soft_vnmi_blocked)) {
  177. + if (vmx_interrupt_allowed(vcpu)) {
  178. + vmx->loaded_vmcs->soft_vnmi_blocked = 0;
  179. + } else if (vmx->loaded_vmcs->vnmi_blocked_time > 1000000000LL &&
  180. + vcpu->arch.nmi_pending) {
  181. + /*
  182. + * This CPU don't support us in finding the end of an
  183. + * NMI-blocked window if the guest runs with IRQs
  184. + * disabled. So we pull the trigger after 1 s of
  185. + * futile waiting, but inform the user about this.
  186. + */
  187. + printk(KERN_WARNING "%s: Breaking out of NMI-blocked "
  188. + "state on VCPU %d after 1 s timeout\n",
  189. + __func__, vcpu->vcpu_id);
  190. + vmx->loaded_vmcs->soft_vnmi_blocked = 0;
  191. + }
  192. + }
  193. +
  194. if (exit_reason < kvm_vmx_max_exit_handlers
  195. && kvm_vmx_exit_handlers[exit_reason])
  196. return kvm_vmx_exit_handlers[exit_reason](vcpu);
  197. @@ -8837,33 +8889,38 @@ static void vmx_recover_nmi_blocking(struct vcpu_vmx *vmx)
  198. idtv_info_valid = vmx->idt_vectoring_info & VECTORING_INFO_VALID_MASK;
  199. - if (vmx->loaded_vmcs->nmi_known_unmasked)
  200. - return;
  201. - /*
  202. - * Can't use vmx->exit_intr_info since we're not sure what
  203. - * the exit reason is.
  204. - */
  205. - exit_intr_info = vmcs_read32(VM_EXIT_INTR_INFO);
  206. - unblock_nmi = (exit_intr_info & INTR_INFO_UNBLOCK_NMI) != 0;
  207. - vector = exit_intr_info & INTR_INFO_VECTOR_MASK;
  208. - /*
  209. - * SDM 3: 27.7.1.2 (September 2008)
  210. - * Re-set bit "block by NMI" before VM entry if vmexit caused by
  211. - * a guest IRET fault.
  212. - * SDM 3: 23.2.2 (September 2008)
  213. - * Bit 12 is undefined in any of the following cases:
  214. - * If the VM exit sets the valid bit in the IDT-vectoring
  215. - * information field.
  216. - * If the VM exit is due to a double fault.
  217. - */
  218. - if ((exit_intr_info & INTR_INFO_VALID_MASK) && unblock_nmi &&
  219. - vector != DF_VECTOR && !idtv_info_valid)
  220. - vmcs_set_bits(GUEST_INTERRUPTIBILITY_INFO,
  221. - GUEST_INTR_STATE_NMI);
  222. - else
  223. - vmx->loaded_vmcs->nmi_known_unmasked =
  224. - !(vmcs_read32(GUEST_INTERRUPTIBILITY_INFO)
  225. - & GUEST_INTR_STATE_NMI);
  226. + if (cpu_has_virtual_nmis()) {
  227. + if (vmx->loaded_vmcs->nmi_known_unmasked)
  228. + return;
  229. + /*
  230. + * Can't use vmx->exit_intr_info since we're not sure what
  231. + * the exit reason is.
  232. + */
  233. + exit_intr_info = vmcs_read32(VM_EXIT_INTR_INFO);
  234. + unblock_nmi = (exit_intr_info & INTR_INFO_UNBLOCK_NMI) != 0;
  235. + vector = exit_intr_info & INTR_INFO_VECTOR_MASK;
  236. + /*
  237. + * SDM 3: 27.7.1.2 (September 2008)
  238. + * Re-set bit "block by NMI" before VM entry if vmexit caused by
  239. + * a guest IRET fault.
  240. + * SDM 3: 23.2.2 (September 2008)
  241. + * Bit 12 is undefined in any of the following cases:
  242. + * If the VM exit sets the valid bit in the IDT-vectoring
  243. + * information field.
  244. + * If the VM exit is due to a double fault.
  245. + */
  246. + if ((exit_intr_info & INTR_INFO_VALID_MASK) && unblock_nmi &&
  247. + vector != DF_VECTOR && !idtv_info_valid)
  248. + vmcs_set_bits(GUEST_INTERRUPTIBILITY_INFO,
  249. + GUEST_INTR_STATE_NMI);
  250. + else
  251. + vmx->loaded_vmcs->nmi_known_unmasked =
  252. + !(vmcs_read32(GUEST_INTERRUPTIBILITY_INFO)
  253. + & GUEST_INTR_STATE_NMI);
  254. + } else if (unlikely(vmx->loaded_vmcs->soft_vnmi_blocked))
  255. + vmx->loaded_vmcs->vnmi_blocked_time +=
  256. + ktime_to_ns(ktime_sub(ktime_get(),
  257. + vmx->loaded_vmcs->entry_time));
  258. }
  259. static void __vmx_complete_interrupts(struct kvm_vcpu *vcpu,
  260. @@ -8980,6 +9037,11 @@ static void __noclone vmx_vcpu_run(struct kvm_vcpu *vcpu)
  261. struct vcpu_vmx *vmx = to_vmx(vcpu);
  262. unsigned long debugctlmsr, cr3, cr4;
  263. + /* Record the guest's net vcpu time for enforced NMI injections. */
  264. + if (unlikely(!cpu_has_virtual_nmis() &&
  265. + vmx->loaded_vmcs->soft_vnmi_blocked))
  266. + vmx->loaded_vmcs->entry_time = ktime_get();
  267. +
  268. /* Don't enter VMX if guest state is invalid, let the exit handler
  269. start emulation until we arrive back to a valid state */
  270. if (vmx->emulation_required)
  271. --
  272. 2.14.2