0010-x86-KVM-Make-sure-KVM_VCPU_FLUSH_TLB-flag-is-not-mis.patch 4.2 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125
  1. From 0000000000000000000000000000000000000000 Mon Sep 17 00:00:00 2001
  2. From: Boris Ostrovsky <[email protected]>
  3. Date: Fri, 31 Jan 2020 08:06:43 -0300
  4. Subject: [PATCH] x86/KVM: Make sure KVM_VCPU_FLUSH_TLB flag is not missed
  5. CVE-2019-3016
  6. CVE-2020-3016
  7. There is a potential race in record_steal_time() between setting
  8. host-local vcpu->arch.st.steal.preempted to zero (i.e. clearing
  9. KVM_VCPU_PREEMPTED) and propagating this value to the guest with
  10. kvm_write_guest_cached(). Between those two events the guest may
  11. still see KVM_VCPU_PREEMPTED in its copy of kvm_steal_time, set
  12. KVM_VCPU_FLUSH_TLB and assume that hypervisor will do the right
  13. thing. Which it won't.
  14. Instad of copying, we should map kvm_steal_time and that will
  15. guarantee atomicity of accesses to @preempted.
  16. This is part of CVE-2019-3016.
  17. Signed-off-by: Boris Ostrovsky <[email protected]>
  18. Reviewed-by: Joao Martins <[email protected]>
  19. Signed-off-by: Thadeu Lima de Souza Cascardo <[email protected]>
  20. Signed-off-by: Thomas Lamprecht <[email protected]>
  21. ---
  22. arch/x86/kvm/x86.c | 49 +++++++++++++++++++++++++++-------------------
  23. 1 file changed, 29 insertions(+), 20 deletions(-)
  24. diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c
  25. index cb18560b07bc..f63fa5846f08 100644
  26. --- a/arch/x86/kvm/x86.c
  27. +++ b/arch/x86/kvm/x86.c
  28. @@ -2488,43 +2488,45 @@ static void kvm_vcpu_flush_tlb(struct kvm_vcpu *vcpu, bool invalidate_gpa)
  29. static void record_steal_time(struct kvm_vcpu *vcpu)
  30. {
  31. + struct kvm_host_map map;
  32. + struct kvm_steal_time *st;
  33. +
  34. if (!(vcpu->arch.st.msr_val & KVM_MSR_ENABLED))
  35. return;
  36. - if (unlikely(kvm_read_guest_cached(vcpu->kvm, &vcpu->arch.st.stime,
  37. - &vcpu->arch.st.steal, sizeof(struct kvm_steal_time))))
  38. + /* -EAGAIN is returned in atomic context so we can just return. */
  39. + if (kvm_map_gfn(vcpu, vcpu->arch.st.msr_val >> PAGE_SHIFT,
  40. + &map, &vcpu->arch.st.cache, false))
  41. return;
  42. + st = map.hva +
  43. + offset_in_page(vcpu->arch.st.msr_val & KVM_STEAL_VALID_BITS);
  44. +
  45. /*
  46. * Doing a TLB flush here, on the guest's behalf, can avoid
  47. * expensive IPIs.
  48. */
  49. - if (xchg(&vcpu->arch.st.steal.preempted, 0) & KVM_VCPU_FLUSH_TLB)
  50. + if (xchg(&st->preempted, 0) & KVM_VCPU_FLUSH_TLB)
  51. kvm_vcpu_flush_tlb(vcpu, false);
  52. - if (vcpu->arch.st.steal.version & 1)
  53. - vcpu->arch.st.steal.version += 1; /* first time write, random junk */
  54. + vcpu->arch.st.steal.preempted = 0;
  55. - vcpu->arch.st.steal.version += 1;
  56. + if (st->version & 1)
  57. + st->version += 1; /* first time write, random junk */
  58. - kvm_write_guest_cached(vcpu->kvm, &vcpu->arch.st.stime,
  59. - &vcpu->arch.st.steal, sizeof(struct kvm_steal_time));
  60. + st->version += 1;
  61. smp_wmb();
  62. - vcpu->arch.st.steal.steal += current->sched_info.run_delay -
  63. + st->steal += current->sched_info.run_delay -
  64. vcpu->arch.st.last_steal;
  65. vcpu->arch.st.last_steal = current->sched_info.run_delay;
  66. - kvm_write_guest_cached(vcpu->kvm, &vcpu->arch.st.stime,
  67. - &vcpu->arch.st.steal, sizeof(struct kvm_steal_time));
  68. -
  69. smp_wmb();
  70. - vcpu->arch.st.steal.version += 1;
  71. + st->version += 1;
  72. - kvm_write_guest_cached(vcpu->kvm, &vcpu->arch.st.stime,
  73. - &vcpu->arch.st.steal, sizeof(struct kvm_steal_time));
  74. + kvm_unmap_gfn(vcpu, &map, &vcpu->arch.st.cache, true, false);
  75. }
  76. int kvm_set_msr_common(struct kvm_vcpu *vcpu, struct msr_data *msr_info)
  77. @@ -3396,18 +3398,25 @@ void kvm_arch_vcpu_load(struct kvm_vcpu *vcpu, int cpu)
  78. static void kvm_steal_time_set_preempted(struct kvm_vcpu *vcpu)
  79. {
  80. + struct kvm_host_map map;
  81. + struct kvm_steal_time *st;
  82. +
  83. if (!(vcpu->arch.st.msr_val & KVM_MSR_ENABLED))
  84. return;
  85. if (vcpu->arch.st.steal.preempted)
  86. return;
  87. - vcpu->arch.st.steal.preempted = KVM_VCPU_PREEMPTED;
  88. + if (kvm_map_gfn(vcpu, vcpu->arch.st.msr_val >> PAGE_SHIFT, &map,
  89. + &vcpu->arch.st.cache, true))
  90. + return;
  91. +
  92. + st = map.hva +
  93. + offset_in_page(vcpu->arch.st.msr_val & KVM_STEAL_VALID_BITS);
  94. +
  95. + st->preempted = vcpu->arch.st.steal.preempted = KVM_VCPU_PREEMPTED;
  96. - kvm_write_guest_offset_cached(vcpu->kvm, &vcpu->arch.st.stime,
  97. - &vcpu->arch.st.steal.preempted,
  98. - offsetof(struct kvm_steal_time, preempted),
  99. - sizeof(vcpu->arch.st.steal.preempted));
  100. + kvm_unmap_gfn(vcpu, &map, &vcpu->arch.st.cache, true, true);
  101. }
  102. void kvm_arch_vcpu_put(struct kvm_vcpu *vcpu)