0210-x86-mm-Allow-flushing-for-future-ASID-switches.patch 6.0 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192
  1. From 0000000000000000000000000000000000000000 Mon Sep 17 00:00:00 2001
  2. From: Dave Hansen <[email protected]>
  3. Date: Mon, 4 Dec 2017 15:07:57 +0100
  4. Subject: [PATCH] x86/mm: Allow flushing for future ASID switches
  5. MIME-Version: 1.0
  6. Content-Type: text/plain; charset=UTF-8
  7. Content-Transfer-Encoding: 8bit
  8. CVE-2017-5754
  9. If changing the page tables in such a way that an invalidation of all
  10. contexts (aka. PCIDs / ASIDs) is required, they can be actively invalidated
  11. by:
  12. 1. INVPCID for each PCID (works for single pages too).
  13. 2. Load CR3 with each PCID without the NOFLUSH bit set
  14. 3. Load CR3 with the NOFLUSH bit set for each and do INVLPG for each address.
  15. But, none of these are really feasible since there are ~6 ASIDs (12 with
  16. PAGE_TABLE_ISOLATION) at the time that invalidation is required.
  17. Instead of actively invalidating them, invalidate the *current* context and
  18. also mark the cpu_tlbstate _quickly_ to indicate future invalidation to be
  19. required.
  20. At the next context-switch, look for this indicator
  21. ('invalidate_other' being set) invalidate all of the
  22. cpu_tlbstate.ctxs[] entries.
  23. This ensures that any future context switches will do a full flush
  24. of the TLB, picking up the previous changes.
  25. [ tglx: Folded more fixups from Peter ]
  26. Signed-off-by: Dave Hansen <[email protected]>
  27. Signed-off-by: Peter Zijlstra (Intel) <[email protected]>
  28. Signed-off-by: Thomas Gleixner <[email protected]>
  29. Cc: Andy Lutomirski <[email protected]>
  30. Cc: Boris Ostrovsky <[email protected]>
  31. Cc: Borislav Petkov <[email protected]>
  32. Cc: Brian Gerst <[email protected]>
  33. Cc: David Laight <[email protected]>
  34. Cc: Denys Vlasenko <[email protected]>
  35. Cc: Eduardo Valentin <[email protected]>
  36. Cc: Greg KH <[email protected]>
  37. Cc: H. Peter Anvin <[email protected]>
  38. Cc: Josh Poimboeuf <[email protected]>
  39. Cc: Juergen Gross <[email protected]>
  40. Cc: Linus Torvalds <[email protected]>
  41. Cc: Peter Zijlstra <[email protected]>
  42. Cc: Will Deacon <[email protected]>
  43. Cc: [email protected]
  44. Cc: [email protected]
  45. Cc: [email protected]
  46. Cc: [email protected]
  47. Signed-off-by: Ingo Molnar <[email protected]>
  48. (cherry picked from commit 2ea907c4fe7b78e5840c1dc07800eae93248cad1)
  49. Signed-off-by: Andy Whitcroft <[email protected]>
  50. Signed-off-by: Kleber Sacilotto de Souza <[email protected]>
  51. (cherry picked from commit fbb7e6e9e7e7cedecc164d660d08563f88103b56)
  52. Signed-off-by: Fabian Grünbichler <[email protected]>
  53. ---
  54. arch/x86/include/asm/tlbflush.h | 37 +++++++++++++++++++++++++++++--------
  55. arch/x86/mm/tlb.c | 35 +++++++++++++++++++++++++++++++++++
  56. 2 files changed, 64 insertions(+), 8 deletions(-)
  57. diff --git a/arch/x86/include/asm/tlbflush.h b/arch/x86/include/asm/tlbflush.h
  58. index 503f87c30c15..3769ce182eac 100644
  59. --- a/arch/x86/include/asm/tlbflush.h
  60. +++ b/arch/x86/include/asm/tlbflush.h
  61. @@ -124,6 +124,17 @@ struct tlb_state {
  62. */
  63. bool is_lazy;
  64. + /*
  65. + * If set we changed the page tables in such a way that we
  66. + * needed an invalidation of all contexts (aka. PCIDs / ASIDs).
  67. + * This tells us to go invalidate all the non-loaded ctxs[]
  68. + * on the next context switch.
  69. + *
  70. + * The current ctx was kept up-to-date as it ran and does not
  71. + * need to be invalidated.
  72. + */
  73. + bool invalidate_other;
  74. +
  75. /*
  76. * Access to this CR4 shadow and to H/W CR4 is protected by
  77. * disabling interrupts when modifying either one.
  78. @@ -201,6 +212,14 @@ static inline unsigned long cr4_read_shadow(void)
  79. return this_cpu_read(cpu_tlbstate.cr4);
  80. }
  81. +/*
  82. + * Mark all other ASIDs as invalid, preserves the current.
  83. + */
  84. +static inline void invalidate_other_asid(void)
  85. +{
  86. + this_cpu_write(cpu_tlbstate.invalidate_other, true);
  87. +}
  88. +
  89. /*
  90. * Save some of cr4 feature set we're using (e.g. Pentium 4MB
  91. * enable and PPro Global page enable), so that any CPU's that boot
  92. @@ -287,14 +306,6 @@ static inline void __flush_tlb_all(void)
  93. */
  94. __flush_tlb();
  95. }
  96. -
  97. - /*
  98. - * Note: if we somehow had PCID but not PGE, then this wouldn't work --
  99. - * we'd end up flushing kernel translations for the current ASID but
  100. - * we might fail to flush kernel translations for other cached ASIDs.
  101. - *
  102. - * To avoid this issue, we force PCID off if PGE is off.
  103. - */
  104. }
  105. /*
  106. @@ -304,6 +315,16 @@ static inline void __flush_tlb_one(unsigned long addr)
  107. {
  108. count_vm_tlb_event(NR_TLB_LOCAL_FLUSH_ONE);
  109. __flush_tlb_single(addr);
  110. +
  111. + if (!static_cpu_has(X86_FEATURE_PTI))
  112. + return;
  113. +
  114. + /*
  115. + * __flush_tlb_single() will have cleared the TLB entry for this ASID,
  116. + * but since kernel space is replicated across all, we must also
  117. + * invalidate all others.
  118. + */
  119. + invalidate_other_asid();
  120. }
  121. #define TLB_FLUSH_ALL -1UL
  122. diff --git a/arch/x86/mm/tlb.c b/arch/x86/mm/tlb.c
  123. index 87d4f961bcb4..ce87b69fb4e0 100644
  124. --- a/arch/x86/mm/tlb.c
  125. +++ b/arch/x86/mm/tlb.c
  126. @@ -28,6 +28,38 @@
  127. * Implement flush IPI by CALL_FUNCTION_VECTOR, Alex Shi
  128. */
  129. +/*
  130. + * We get here when we do something requiring a TLB invalidation
  131. + * but could not go invalidate all of the contexts. We do the
  132. + * necessary invalidation by clearing out the 'ctx_id' which
  133. + * forces a TLB flush when the context is loaded.
  134. + */
  135. +void clear_asid_other(void)
  136. +{
  137. + u16 asid;
  138. +
  139. + /*
  140. + * This is only expected to be set if we have disabled
  141. + * kernel _PAGE_GLOBAL pages.
  142. + */
  143. + if (!static_cpu_has(X86_FEATURE_PTI)) {
  144. + WARN_ON_ONCE(1);
  145. + return;
  146. + }
  147. +
  148. + for (asid = 0; asid < TLB_NR_DYN_ASIDS; asid++) {
  149. + /* Do not need to flush the current asid */
  150. + if (asid == this_cpu_read(cpu_tlbstate.loaded_mm_asid))
  151. + continue;
  152. + /*
  153. + * Make sure the next time we go to switch to
  154. + * this asid, we do a flush:
  155. + */
  156. + this_cpu_write(cpu_tlbstate.ctxs[asid].ctx_id, 0);
  157. + }
  158. + this_cpu_write(cpu_tlbstate.invalidate_other, false);
  159. +}
  160. +
  161. atomic64_t last_mm_ctx_id = ATOMIC64_INIT(1);
  162. DEFINE_STATIC_KEY_TRUE(tlb_use_lazy_mode);
  163. @@ -43,6 +75,9 @@ static void choose_new_asid(struct mm_struct *next, u64 next_tlb_gen,
  164. return;
  165. }
  166. + if (this_cpu_read(cpu_tlbstate.invalidate_other))
  167. + clear_asid_other();
  168. +
  169. for (asid = 0; asid < TLB_NR_DYN_ASIDS; asid++) {
  170. if (this_cpu_read(cpu_tlbstate.ctxs[asid].ctx_id) !=
  171. next->context.ctx_id)
  172. --
  173. 2.14.2