|
|
@@ -0,0 +1,88 @@
|
|
|
+From ebe182a7c6221878cbb5d03e1eafa8002494f8cb Mon Sep 17 00:00:00 2001
|
|
|
+From: Ladi Prosek <[email protected]>
|
|
|
+Date: Tue, 10 Oct 2017 17:30:59 +0200
|
|
|
+Subject: [CVE-2017-12188 2/2] KVM: MMU: always terminate page walks at level 1
|
|
|
+MIME-Version: 1.0
|
|
|
+Content-Type: text/plain; charset=UTF-8
|
|
|
+Content-Transfer-Encoding: 8bit
|
|
|
+
|
|
|
+is_last_gpte() is not equivalent to the pseudo-code given in commit
|
|
|
+6bb69c9b69c31 ("KVM: MMU: simplify last_pte_bitmap") because an incorrect
|
|
|
+value of last_nonleaf_level may override the result even if level == 1.
|
|
|
+
|
|
|
+It is critical for is_last_gpte() to return true on level == 1 to
|
|
|
+terminate page walks. Otherwise memory corruption may occur as level
|
|
|
+is used as an index to various data structures throughout the page
|
|
|
+walking code. Even though the actual bug would be wherever the MMU is
|
|
|
+initialized (as in the previous patch), be defensive and ensure here
|
|
|
+that is_last_gpte() returns the correct value.
|
|
|
+
|
|
|
+This patch is also enough to fix CVE-2017-12188, and suggested for
|
|
|
+stable and distro kernels.
|
|
|
+
|
|
|
+Fixes: 6bb69c9b69c315200ddc2bc79aee14c0184cf5b2
|
|
|
+Cc: [email protected]
|
|
|
+Cc: Andy Honig <[email protected]>
|
|
|
+Signed-off-by: Ladi Prosek <[email protected]>
|
|
|
+[Panic if walk_addr_generic gets an incorrect level; this is a serious
|
|
|
+ bug and it's not worth a WARN_ON where the recovery path might hide
|
|
|
+ further exploitable issues; suggested by Andrew Honig. - Paolo]
|
|
|
+Signed-off-by: Paolo Bonzini <[email protected]>
|
|
|
+Signed-off-by: Fabian Grünbichler <[email protected]>
|
|
|
+---
|
|
|
+ arch/x86/kvm/paging_tmpl.h | 3 ++-
|
|
|
+ arch/x86/kvm/mmu.c | 14 +++++++-------
|
|
|
+ 2 files changed, 9 insertions(+), 8 deletions(-)
|
|
|
+
|
|
|
+diff --git a/arch/x86/kvm/paging_tmpl.h b/arch/x86/kvm/paging_tmpl.h
|
|
|
+index b0454c7e4cff..da06dc8c4fc4 100644
|
|
|
+--- a/arch/x86/kvm/paging_tmpl.h
|
|
|
++++ b/arch/x86/kvm/paging_tmpl.h
|
|
|
+@@ -334,10 +334,11 @@ static int FNAME(walk_addr_generic)(struct guest_walker *walker,
|
|
|
+ --walker->level;
|
|
|
+
|
|
|
+ index = PT_INDEX(addr, walker->level);
|
|
|
+-
|
|
|
+ table_gfn = gpte_to_gfn(pte);
|
|
|
+ offset = index * sizeof(pt_element_t);
|
|
|
+ pte_gpa = gfn_to_gpa(table_gfn) + offset;
|
|
|
++
|
|
|
++ BUG_ON(walker->level < 1);
|
|
|
+ walker->table_gfn[walker->level - 1] = table_gfn;
|
|
|
+ walker->pte_gpa[walker->level - 1] = pte_gpa;
|
|
|
+
|
|
|
+diff --git a/arch/x86/kvm/mmu.c b/arch/x86/kvm/mmu.c
|
|
|
+index ca0112742343..2e4a6732aaa9 100644
|
|
|
+--- a/arch/x86/kvm/mmu.c
|
|
|
++++ b/arch/x86/kvm/mmu.c
|
|
|
+@@ -3934,13 +3934,6 @@ static bool sync_mmio_spte(struct kvm_vcpu *vcpu, u64 *sptep, gfn_t gfn,
|
|
|
+ static inline bool is_last_gpte(struct kvm_mmu *mmu,
|
|
|
+ unsigned level, unsigned gpte)
|
|
|
+ {
|
|
|
+- /*
|
|
|
+- * PT_PAGE_TABLE_LEVEL always terminates. The RHS has bit 7 set
|
|
|
+- * iff level <= PT_PAGE_TABLE_LEVEL, which for our purpose means
|
|
|
+- * level == PT_PAGE_TABLE_LEVEL; set PT_PAGE_SIZE_MASK in gpte then.
|
|
|
+- */
|
|
|
+- gpte |= level - PT_PAGE_TABLE_LEVEL - 1;
|
|
|
+-
|
|
|
+ /*
|
|
|
+ * The RHS has bit 7 set iff level < mmu->last_nonleaf_level.
|
|
|
+ * If it is clear, there are no large pages at this level, so clear
|
|
|
+@@ -3948,6 +3941,13 @@ static inline bool is_last_gpte(struct kvm_mmu *mmu,
|
|
|
+ */
|
|
|
+ gpte &= level - mmu->last_nonleaf_level;
|
|
|
+
|
|
|
++ /*
|
|
|
++ * PT_PAGE_TABLE_LEVEL always terminates. The RHS has bit 7 set
|
|
|
++ * iff level <= PT_PAGE_TABLE_LEVEL, which for our purpose means
|
|
|
++ * level == PT_PAGE_TABLE_LEVEL; set PT_PAGE_SIZE_MASK in gpte then.
|
|
|
++ */
|
|
|
++ gpte |= level - PT_PAGE_TABLE_LEVEL - 1;
|
|
|
++
|
|
|
+ return gpte & PT_PAGE_SIZE_MASK;
|
|
|
+ }
|
|
|
+
|
|
|
+--
|
|
|
+2.14.1
|
|
|
+
|