0039-x86-mm-Give-each-mm-TLB-flush-generation-a-unique-ID.patch 6.0 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182
  1. From 09fedd9befc7affbfa9490ef3993d60c7d582a6f Mon Sep 17 00:00:00 2001
  2. From: Andy Lutomirski <[email protected]>
  3. Date: Thu, 29 Jun 2017 08:53:15 -0700
  4. Subject: [PATCH 039/231] x86/mm: Give each mm TLB flush generation a unique ID
  5. MIME-Version: 1.0
  6. Content-Type: text/plain; charset=UTF-8
  7. Content-Transfer-Encoding: 8bit
  8. CVE-2017-5754
  9. This adds two new variables to mmu_context_t: ctx_id and tlb_gen.
  10. ctx_id uniquely identifies the mm_struct and will never be reused.
  11. For a given mm_struct (and hence ctx_id), tlb_gen is a monotonic
  12. count of the number of times that a TLB flush has been requested.
  13. The pair (ctx_id, tlb_gen) can be used as an identifier for TLB
  14. flush actions and will be used in subsequent patches to reliably
  15. determine whether all needed TLB flushes have occurred on a given
  16. CPU.
  17. This patch is split out for ease of review. By itself, it has no
  18. real effect other than creating and updating the new variables.
  19. Signed-off-by: Andy Lutomirski <[email protected]>
  20. Reviewed-by: Nadav Amit <[email protected]>
  21. Reviewed-by: Thomas Gleixner <[email protected]>
  22. Cc: Andrew Morton <[email protected]>
  23. Cc: Arjan van de Ven <[email protected]>
  24. Cc: Borislav Petkov <[email protected]>
  25. Cc: Dave Hansen <[email protected]>
  26. Cc: Linus Torvalds <[email protected]>
  27. Cc: Mel Gorman <[email protected]>
  28. Cc: Peter Zijlstra <[email protected]>
  29. Cc: Rik van Riel <[email protected]>
  30. Cc: [email protected]
  31. Link: http://lkml.kernel.org/r/413a91c24dab3ed0caa5f4e4d017d87b0857f920.1498751203.git.luto@kernel.org
  32. Signed-off-by: Ingo Molnar <[email protected]>
  33. (cherry picked from commit f39681ed0f48498b80455095376f11535feea332)
  34. Signed-off-by: Andy Whitcroft <[email protected]>
  35. Signed-off-by: Kleber Sacilotto de Souza <[email protected]>
  36. (cherry picked from commit e566a0dfbb2a5f7ea90dd66ce384740372739e14)
  37. Signed-off-by: Fabian Grünbichler <[email protected]>
  38. ---
  39. arch/x86/include/asm/mmu.h | 25 +++++++++++++++++++++++--
  40. arch/x86/include/asm/mmu_context.h | 6 ++++++
  41. arch/x86/include/asm/tlbflush.h | 18 ++++++++++++++++++
  42. arch/x86/mm/tlb.c | 6 ++++--
  43. 4 files changed, 51 insertions(+), 4 deletions(-)
  44. diff --git a/arch/x86/include/asm/mmu.h b/arch/x86/include/asm/mmu.h
  45. index 79b647a7ebd0..bb8c597c2248 100644
  46. --- a/arch/x86/include/asm/mmu.h
  47. +++ b/arch/x86/include/asm/mmu.h
  48. @@ -3,12 +3,28 @@
  49. #include <linux/spinlock.h>
  50. #include <linux/mutex.h>
  51. +#include <linux/atomic.h>
  52. /*
  53. - * The x86 doesn't have a mmu context, but
  54. - * we put the segment information here.
  55. + * x86 has arch-specific MMU state beyond what lives in mm_struct.
  56. */
  57. typedef struct {
  58. + /*
  59. + * ctx_id uniquely identifies this mm_struct. A ctx_id will never
  60. + * be reused, and zero is not a valid ctx_id.
  61. + */
  62. + u64 ctx_id;
  63. +
  64. + /*
  65. + * Any code that needs to do any sort of TLB flushing for this
  66. + * mm will first make its changes to the page tables, then
  67. + * increment tlb_gen, then flush. This lets the low-level
  68. + * flushing code keep track of what needs flushing.
  69. + *
  70. + * This is not used on Xen PV.
  71. + */
  72. + atomic64_t tlb_gen;
  73. +
  74. #ifdef CONFIG_MODIFY_LDT_SYSCALL
  75. struct ldt_struct *ldt;
  76. #endif
  77. @@ -37,6 +53,11 @@ typedef struct {
  78. #endif
  79. } mm_context_t;
  80. +#define INIT_MM_CONTEXT(mm) \
  81. + .context = { \
  82. + .ctx_id = 1, \
  83. + }
  84. +
  85. void leave_mm(int cpu);
  86. #endif /* _ASM_X86_MMU_H */
  87. diff --git a/arch/x86/include/asm/mmu_context.h b/arch/x86/include/asm/mmu_context.h
  88. index 7a234be7e298..6c05679c715b 100644
  89. --- a/arch/x86/include/asm/mmu_context.h
  90. +++ b/arch/x86/include/asm/mmu_context.h
  91. @@ -12,6 +12,9 @@
  92. #include <asm/tlbflush.h>
  93. #include <asm/paravirt.h>
  94. #include <asm/mpx.h>
  95. +
  96. +extern atomic64_t last_mm_ctx_id;
  97. +
  98. #ifndef CONFIG_PARAVIRT
  99. static inline void paravirt_activate_mm(struct mm_struct *prev,
  100. struct mm_struct *next)
  101. @@ -132,6 +135,9 @@ static inline void enter_lazy_tlb(struct mm_struct *mm, struct task_struct *tsk)
  102. static inline int init_new_context(struct task_struct *tsk,
  103. struct mm_struct *mm)
  104. {
  105. + mm->context.ctx_id = atomic64_inc_return(&last_mm_ctx_id);
  106. + atomic64_set(&mm->context.tlb_gen, 0);
  107. +
  108. #ifdef CONFIG_X86_INTEL_MEMORY_PROTECTION_KEYS
  109. if (cpu_feature_enabled(X86_FEATURE_OSPKE)) {
  110. /* pkey 0 is the default and always allocated */
  111. diff --git a/arch/x86/include/asm/tlbflush.h b/arch/x86/include/asm/tlbflush.h
  112. index 2b3d68093235..f1f2e73b7b77 100644
  113. --- a/arch/x86/include/asm/tlbflush.h
  114. +++ b/arch/x86/include/asm/tlbflush.h
  115. @@ -57,6 +57,23 @@ static inline void invpcid_flush_all_nonglobals(void)
  116. __invpcid(0, 0, INVPCID_TYPE_ALL_NON_GLOBAL);
  117. }
  118. +static inline u64 inc_mm_tlb_gen(struct mm_struct *mm)
  119. +{
  120. + u64 new_tlb_gen;
  121. +
  122. + /*
  123. + * Bump the generation count. This also serves as a full barrier
  124. + * that synchronizes with switch_mm(): callers are required to order
  125. + * their read of mm_cpumask after their writes to the paging
  126. + * structures.
  127. + */
  128. + smp_mb__before_atomic();
  129. + new_tlb_gen = atomic64_inc_return(&mm->context.tlb_gen);
  130. + smp_mb__after_atomic();
  131. +
  132. + return new_tlb_gen;
  133. +}
  134. +
  135. #ifdef CONFIG_PARAVIRT
  136. #include <asm/paravirt.h>
  137. #else
  138. @@ -270,6 +287,7 @@ void native_flush_tlb_others(const struct cpumask *cpumask,
  139. static inline void arch_tlbbatch_add_mm(struct arch_tlbflush_unmap_batch *batch,
  140. struct mm_struct *mm)
  141. {
  142. + inc_mm_tlb_gen(mm);
  143. cpumask_or(&batch->cpumask, &batch->cpumask, mm_cpumask(mm));
  144. }
  145. diff --git a/arch/x86/mm/tlb.c b/arch/x86/mm/tlb.c
  146. index 014d07a80053..14f4f8f66aa8 100644
  147. --- a/arch/x86/mm/tlb.c
  148. +++ b/arch/x86/mm/tlb.c
  149. @@ -28,6 +28,8 @@
  150. * Implement flush IPI by CALL_FUNCTION_VECTOR, Alex Shi
  151. */
  152. +atomic64_t last_mm_ctx_id = ATOMIC64_INIT(1);
  153. +
  154. void leave_mm(int cpu)
  155. {
  156. struct mm_struct *loaded_mm = this_cpu_read(cpu_tlbstate.loaded_mm);
  157. @@ -250,8 +252,8 @@ void flush_tlb_mm_range(struct mm_struct *mm, unsigned long start,
  158. cpu = get_cpu();
  159. - /* Synchronize with switch_mm. */
  160. - smp_mb();
  161. + /* This is also a barrier that synchronizes with switch_mm(). */
  162. + inc_mm_tlb_gen(mm);
  163. /* Should we flush just the requested range? */
  164. if ((end != TLB_FLUSH_ALL) &&
  165. --
  166. 2.14.2