| 123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192 |
- From 0000000000000000000000000000000000000000 Mon Sep 17 00:00:00 2001
- From: Dave Hansen <[email protected]>
- Date: Mon, 4 Dec 2017 15:07:57 +0100
- Subject: [PATCH] x86/mm: Allow flushing for future ASID switches
- MIME-Version: 1.0
- Content-Type: text/plain; charset=UTF-8
- Content-Transfer-Encoding: 8bit
- CVE-2017-5754
- If changing the page tables in such a way that an invalidation of all
- contexts (aka. PCIDs / ASIDs) is required, they can be actively invalidated
- by:
- 1. INVPCID for each PCID (works for single pages too).
- 2. Load CR3 with each PCID without the NOFLUSH bit set
- 3. Load CR3 with the NOFLUSH bit set for each and do INVLPG for each address.
- But, none of these are really feasible since there are ~6 ASIDs (12 with
- PAGE_TABLE_ISOLATION) at the time that invalidation is required.
- Instead of actively invalidating them, invalidate the *current* context and
- also mark the cpu_tlbstate _quickly_ to indicate future invalidation to be
- required.
- At the next context-switch, look for this indicator
- ('invalidate_other' being set) invalidate all of the
- cpu_tlbstate.ctxs[] entries.
- This ensures that any future context switches will do a full flush
- of the TLB, picking up the previous changes.
- [ tglx: Folded more fixups from Peter ]
- Signed-off-by: Dave Hansen <[email protected]>
- Signed-off-by: Peter Zijlstra (Intel) <[email protected]>
- Signed-off-by: Thomas Gleixner <[email protected]>
- Cc: Andy Lutomirski <[email protected]>
- Cc: Boris Ostrovsky <[email protected]>
- Cc: Borislav Petkov <[email protected]>
- Cc: Brian Gerst <[email protected]>
- Cc: David Laight <[email protected]>
- Cc: Denys Vlasenko <[email protected]>
- Cc: Eduardo Valentin <[email protected]>
- Cc: Greg KH <[email protected]>
- Cc: H. Peter Anvin <[email protected]>
- Cc: Josh Poimboeuf <[email protected]>
- Cc: Juergen Gross <[email protected]>
- Cc: Linus Torvalds <[email protected]>
- Cc: Peter Zijlstra <[email protected]>
- Cc: Will Deacon <[email protected]>
- Cc: [email protected]
- Cc: [email protected]
- Cc: [email protected]
- Cc: [email protected]
- Signed-off-by: Ingo Molnar <[email protected]>
- (cherry picked from commit 2ea907c4fe7b78e5840c1dc07800eae93248cad1)
- Signed-off-by: Andy Whitcroft <[email protected]>
- Signed-off-by: Kleber Sacilotto de Souza <[email protected]>
- (cherry picked from commit fbb7e6e9e7e7cedecc164d660d08563f88103b56)
- Signed-off-by: Fabian Grünbichler <[email protected]>
- ---
- arch/x86/include/asm/tlbflush.h | 37 +++++++++++++++++++++++++++++--------
- arch/x86/mm/tlb.c | 35 +++++++++++++++++++++++++++++++++++
- 2 files changed, 64 insertions(+), 8 deletions(-)
- diff --git a/arch/x86/include/asm/tlbflush.h b/arch/x86/include/asm/tlbflush.h
- index 503f87c30c15..3769ce182eac 100644
- --- a/arch/x86/include/asm/tlbflush.h
- +++ b/arch/x86/include/asm/tlbflush.h
- @@ -124,6 +124,17 @@ struct tlb_state {
- */
- bool is_lazy;
-
- + /*
- + * If set we changed the page tables in such a way that we
- + * needed an invalidation of all contexts (aka. PCIDs / ASIDs).
- + * This tells us to go invalidate all the non-loaded ctxs[]
- + * on the next context switch.
- + *
- + * The current ctx was kept up-to-date as it ran and does not
- + * need to be invalidated.
- + */
- + bool invalidate_other;
- +
- /*
- * Access to this CR4 shadow and to H/W CR4 is protected by
- * disabling interrupts when modifying either one.
- @@ -201,6 +212,14 @@ static inline unsigned long cr4_read_shadow(void)
- return this_cpu_read(cpu_tlbstate.cr4);
- }
-
- +/*
- + * Mark all other ASIDs as invalid, preserves the current.
- + */
- +static inline void invalidate_other_asid(void)
- +{
- + this_cpu_write(cpu_tlbstate.invalidate_other, true);
- +}
- +
- /*
- * Save some of cr4 feature set we're using (e.g. Pentium 4MB
- * enable and PPro Global page enable), so that any CPU's that boot
- @@ -287,14 +306,6 @@ static inline void __flush_tlb_all(void)
- */
- __flush_tlb();
- }
- -
- - /*
- - * Note: if we somehow had PCID but not PGE, then this wouldn't work --
- - * we'd end up flushing kernel translations for the current ASID but
- - * we might fail to flush kernel translations for other cached ASIDs.
- - *
- - * To avoid this issue, we force PCID off if PGE is off.
- - */
- }
-
- /*
- @@ -304,6 +315,16 @@ static inline void __flush_tlb_one(unsigned long addr)
- {
- count_vm_tlb_event(NR_TLB_LOCAL_FLUSH_ONE);
- __flush_tlb_single(addr);
- +
- + if (!static_cpu_has(X86_FEATURE_PTI))
- + return;
- +
- + /*
- + * __flush_tlb_single() will have cleared the TLB entry for this ASID,
- + * but since kernel space is replicated across all, we must also
- + * invalidate all others.
- + */
- + invalidate_other_asid();
- }
-
- #define TLB_FLUSH_ALL -1UL
- diff --git a/arch/x86/mm/tlb.c b/arch/x86/mm/tlb.c
- index 87d4f961bcb4..ce87b69fb4e0 100644
- --- a/arch/x86/mm/tlb.c
- +++ b/arch/x86/mm/tlb.c
- @@ -28,6 +28,38 @@
- * Implement flush IPI by CALL_FUNCTION_VECTOR, Alex Shi
- */
-
- +/*
- + * We get here when we do something requiring a TLB invalidation
- + * but could not go invalidate all of the contexts. We do the
- + * necessary invalidation by clearing out the 'ctx_id' which
- + * forces a TLB flush when the context is loaded.
- + */
- +void clear_asid_other(void)
- +{
- + u16 asid;
- +
- + /*
- + * This is only expected to be set if we have disabled
- + * kernel _PAGE_GLOBAL pages.
- + */
- + if (!static_cpu_has(X86_FEATURE_PTI)) {
- + WARN_ON_ONCE(1);
- + return;
- + }
- +
- + for (asid = 0; asid < TLB_NR_DYN_ASIDS; asid++) {
- + /* Do not need to flush the current asid */
- + if (asid == this_cpu_read(cpu_tlbstate.loaded_mm_asid))
- + continue;
- + /*
- + * Make sure the next time we go to switch to
- + * this asid, we do a flush:
- + */
- + this_cpu_write(cpu_tlbstate.ctxs[asid].ctx_id, 0);
- + }
- + this_cpu_write(cpu_tlbstate.invalidate_other, false);
- +}
- +
- atomic64_t last_mm_ctx_id = ATOMIC64_INIT(1);
-
- DEFINE_STATIC_KEY_TRUE(tlb_use_lazy_mode);
- @@ -43,6 +75,9 @@ static void choose_new_asid(struct mm_struct *next, u64 next_tlb_gen,
- return;
- }
-
- + if (this_cpu_read(cpu_tlbstate.invalidate_other))
- + clear_asid_other();
- +
- for (asid = 0; asid < TLB_NR_DYN_ASIDS; asid++) {
- if (this_cpu_read(cpu_tlbstate.ctxs[asid].ctx_id) !=
- next->context.ctx_id)
- --
- 2.14.2
|