0041-x86-mm-Rework-lazy-TLB-mode-and-TLB-freshness-tracki.patch 16 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454
  1. From caa3549fe709971498eaf080c1710ef627a0df5a Mon Sep 17 00:00:00 2001
  2. From: Andy Lutomirski <[email protected]>
  3. Date: Thu, 29 Jun 2017 08:53:17 -0700
  4. Subject: [PATCH 041/242] x86/mm: Rework lazy TLB mode and TLB freshness
  5. tracking
  6. MIME-Version: 1.0
  7. Content-Type: text/plain; charset=UTF-8
  8. Content-Transfer-Encoding: 8bit
  9. CVE-2017-5754
  10. x86's lazy TLB mode used to be fairly weak -- it would switch to
  11. init_mm the first time it tried to flush a lazy TLB. This meant an
  12. unnecessary CR3 write and, if the flush was remote, an unnecessary
  13. IPI.
  14. Rewrite it entirely. When we enter lazy mode, we simply remove the
  15. CPU from mm_cpumask. This means that we need a way to figure out
  16. whether we've missed a flush when we switch back out of lazy mode.
  17. I use the tlb_gen machinery to track whether a context is up to
  18. date.
  19. Note to reviewers: this patch, my itself, looks a bit odd. I'm
  20. using an array of length 1 containing (ctx_id, tlb_gen) rather than
  21. just storing tlb_gen, and making it at array isn't necessary yet.
  22. I'm doing this because the next few patches add PCID support, and,
  23. with PCID, we need ctx_id, and the array will end up with a length
  24. greater than 1. Making it an array now means that there will be
  25. less churn and therefore less stress on your eyeballs.
  26. NB: This is dubious but, AFAICT, still correct on Xen and UV.
  27. xen_exit_mmap() uses mm_cpumask() for nefarious purposes and this
  28. patch changes the way that mm_cpumask() works. This should be okay,
  29. since Xen *also* iterates all online CPUs to find all the CPUs it
  30. needs to twiddle.
  31. The UV tlbflush code is rather dated and should be changed.
  32. Here are some benchmark results, done on a Skylake laptop at 2.3 GHz
  33. (turbo off, intel_pstate requesting max performance) under KVM with
  34. the guest using idle=poll (to avoid artifacts when bouncing between
  35. CPUs). I haven't done any real statistics here -- I just ran them
  36. in a loop and picked the fastest results that didn't look like
  37. outliers. Unpatched means commit a4eb8b993554, so all the
  38. bookkeeping overhead is gone.
  39. MADV_DONTNEED; touch the page; switch CPUs using sched_setaffinity. In
  40. an unpatched kernel, MADV_DONTNEED will send an IPI to the previous CPU.
  41. This is intended to be a nearly worst-case test.
  42. patched: 13.4µs
  43. unpatched: 21.6µs
  44. Vitaly's pthread_mmap microbenchmark with 8 threads (on four cores),
  45. nrounds = 100, 256M data
  46. patched: 1.1 seconds or so
  47. unpatched: 1.9 seconds or so
  48. The sleepup on Vitaly's test appearss to be because it spends a lot
  49. of time blocked on mmap_sem, and this patch avoids sending IPIs to
  50. blocked CPUs.
  51. Signed-off-by: Andy Lutomirski <[email protected]>
  52. Reviewed-by: Nadav Amit <[email protected]>
  53. Reviewed-by: Thomas Gleixner <[email protected]>
  54. Cc: Andrew Banman <[email protected]>
  55. Cc: Andrew Morton <[email protected]>
  56. Cc: Arjan van de Ven <[email protected]>
  57. Cc: Boris Ostrovsky <[email protected]>
  58. Cc: Borislav Petkov <[email protected]>
  59. Cc: Dave Hansen <[email protected]>
  60. Cc: Dimitri Sivanich <[email protected]>
  61. Cc: Juergen Gross <[email protected]>
  62. Cc: Linus Torvalds <[email protected]>
  63. Cc: Mel Gorman <[email protected]>
  64. Cc: Mike Travis <[email protected]>
  65. Cc: Peter Zijlstra <[email protected]>
  66. Cc: Rik van Riel <[email protected]>
  67. Cc: [email protected]
  68. Link: http://lkml.kernel.org/r/ddf2c92962339f4ba39d8fc41b853936ec0b44f1.1498751203.git.luto@kernel.org
  69. Signed-off-by: Ingo Molnar <[email protected]>
  70. (cherry picked from commit 94b1b03b519b81c494900cb112aa00ed205cc2d9)
  71. Signed-off-by: Andy Whitcroft <[email protected]>
  72. Signed-off-by: Kleber Sacilotto de Souza <[email protected]>
  73. (cherry picked from commit b381b7ae452f2bc6384507a897247be7c93a71cc)
  74. Signed-off-by: Fabian Grünbichler <[email protected]>
  75. ---
  76. arch/x86/include/asm/mmu_context.h | 6 +-
  77. arch/x86/include/asm/tlbflush.h | 4 -
  78. arch/x86/mm/init.c | 1 -
  79. arch/x86/mm/tlb.c | 197 ++++++++++++++++++++++---------------
  80. arch/x86/xen/mmu_pv.c | 5 +-
  81. 5 files changed, 124 insertions(+), 89 deletions(-)
  82. diff --git a/arch/x86/include/asm/mmu_context.h b/arch/x86/include/asm/mmu_context.h
  83. index 6c05679c715b..d6b055b328f2 100644
  84. --- a/arch/x86/include/asm/mmu_context.h
  85. +++ b/arch/x86/include/asm/mmu_context.h
  86. @@ -128,8 +128,10 @@ static inline void switch_ldt(struct mm_struct *prev, struct mm_struct *next)
  87. static inline void enter_lazy_tlb(struct mm_struct *mm, struct task_struct *tsk)
  88. {
  89. - if (this_cpu_read(cpu_tlbstate.state) == TLBSTATE_OK)
  90. - this_cpu_write(cpu_tlbstate.state, TLBSTATE_LAZY);
  91. + int cpu = smp_processor_id();
  92. +
  93. + if (cpumask_test_cpu(cpu, mm_cpumask(mm)))
  94. + cpumask_clear_cpu(cpu, mm_cpumask(mm));
  95. }
  96. static inline int init_new_context(struct task_struct *tsk,
  97. diff --git a/arch/x86/include/asm/tlbflush.h b/arch/x86/include/asm/tlbflush.h
  98. index 3a167c214560..6397275008db 100644
  99. --- a/arch/x86/include/asm/tlbflush.h
  100. +++ b/arch/x86/include/asm/tlbflush.h
  101. @@ -95,7 +95,6 @@ struct tlb_state {
  102. * mode even if we've already switched back to swapper_pg_dir.
  103. */
  104. struct mm_struct *loaded_mm;
  105. - int state;
  106. /*
  107. * Access to this CR4 shadow and to H/W CR4 is protected by
  108. @@ -318,9 +317,6 @@ static inline void flush_tlb_page(struct vm_area_struct *vma, unsigned long a)
  109. void native_flush_tlb_others(const struct cpumask *cpumask,
  110. const struct flush_tlb_info *info);
  111. -#define TLBSTATE_OK 1
  112. -#define TLBSTATE_LAZY 2
  113. -
  114. static inline void arch_tlbbatch_add_mm(struct arch_tlbflush_unmap_batch *batch,
  115. struct mm_struct *mm)
  116. {
  117. diff --git a/arch/x86/mm/init.c b/arch/x86/mm/init.c
  118. index df2624b091a7..c86dc071bb10 100644
  119. --- a/arch/x86/mm/init.c
  120. +++ b/arch/x86/mm/init.c
  121. @@ -849,7 +849,6 @@ void __init zone_sizes_init(void)
  122. DEFINE_PER_CPU_SHARED_ALIGNED(struct tlb_state, cpu_tlbstate) = {
  123. .loaded_mm = &init_mm,
  124. - .state = 0,
  125. .cr4 = ~0UL, /* fail hard if we screw up cr4 shadow initialization */
  126. };
  127. EXPORT_SYMBOL_GPL(cpu_tlbstate);
  128. diff --git a/arch/x86/mm/tlb.c b/arch/x86/mm/tlb.c
  129. index 4e5a5ddb9e4d..0982c997d36f 100644
  130. --- a/arch/x86/mm/tlb.c
  131. +++ b/arch/x86/mm/tlb.c
  132. @@ -45,8 +45,8 @@ void leave_mm(int cpu)
  133. if (loaded_mm == &init_mm)
  134. return;
  135. - if (this_cpu_read(cpu_tlbstate.state) == TLBSTATE_OK)
  136. - BUG();
  137. + /* Warn if we're not lazy. */
  138. + WARN_ON(cpumask_test_cpu(smp_processor_id(), mm_cpumask(loaded_mm)));
  139. switch_mm(NULL, &init_mm, NULL);
  140. }
  141. @@ -65,94 +65,117 @@ void switch_mm(struct mm_struct *prev, struct mm_struct *next,
  142. void switch_mm_irqs_off(struct mm_struct *prev, struct mm_struct *next,
  143. struct task_struct *tsk)
  144. {
  145. - unsigned cpu = smp_processor_id();
  146. struct mm_struct *real_prev = this_cpu_read(cpu_tlbstate.loaded_mm);
  147. + unsigned cpu = smp_processor_id();
  148. + u64 next_tlb_gen;
  149. /*
  150. - * NB: The scheduler will call us with prev == next when
  151. - * switching from lazy TLB mode to normal mode if active_mm
  152. - * isn't changing. When this happens, there is no guarantee
  153. - * that CR3 (and hence cpu_tlbstate.loaded_mm) matches next.
  154. + * NB: The scheduler will call us with prev == next when switching
  155. + * from lazy TLB mode to normal mode if active_mm isn't changing.
  156. + * When this happens, we don't assume that CR3 (and hence
  157. + * cpu_tlbstate.loaded_mm) matches next.
  158. *
  159. * NB: leave_mm() calls us with prev == NULL and tsk == NULL.
  160. */
  161. - this_cpu_write(cpu_tlbstate.state, TLBSTATE_OK);
  162. + /* We don't want flush_tlb_func_* to run concurrently with us. */
  163. + if (IS_ENABLED(CONFIG_PROVE_LOCKING))
  164. + WARN_ON_ONCE(!irqs_disabled());
  165. +
  166. + /*
  167. + * Verify that CR3 is what we think it is. This will catch
  168. + * hypothetical buggy code that directly switches to swapper_pg_dir
  169. + * without going through leave_mm() / switch_mm_irqs_off().
  170. + */
  171. + VM_BUG_ON(read_cr3_pa() != __pa(real_prev->pgd));
  172. if (real_prev == next) {
  173. - /*
  174. - * There's nothing to do: we always keep the per-mm control
  175. - * regs in sync with cpu_tlbstate.loaded_mm. Just
  176. - * sanity-check mm_cpumask.
  177. - */
  178. - if (WARN_ON_ONCE(!cpumask_test_cpu(cpu, mm_cpumask(next))))
  179. - cpumask_set_cpu(cpu, mm_cpumask(next));
  180. - return;
  181. - }
  182. + VM_BUG_ON(this_cpu_read(cpu_tlbstate.ctxs[0].ctx_id) !=
  183. + next->context.ctx_id);
  184. +
  185. + if (cpumask_test_cpu(cpu, mm_cpumask(next))) {
  186. + /*
  187. + * There's nothing to do: we weren't lazy, and we
  188. + * aren't changing our mm. We don't need to flush
  189. + * anything, nor do we need to update CR3, CR4, or
  190. + * LDTR.
  191. + */
  192. + return;
  193. + }
  194. +
  195. + /* Resume remote flushes and then read tlb_gen. */
  196. + cpumask_set_cpu(cpu, mm_cpumask(next));
  197. + next_tlb_gen = atomic64_read(&next->context.tlb_gen);
  198. +
  199. + if (this_cpu_read(cpu_tlbstate.ctxs[0].tlb_gen) < next_tlb_gen) {
  200. + /*
  201. + * Ideally, we'd have a flush_tlb() variant that
  202. + * takes the known CR3 value as input. This would
  203. + * be faster on Xen PV and on hypothetical CPUs
  204. + * on which INVPCID is fast.
  205. + */
  206. + this_cpu_write(cpu_tlbstate.ctxs[0].tlb_gen,
  207. + next_tlb_gen);
  208. + write_cr3(__pa(next->pgd));
  209. +
  210. + /*
  211. + * This gets called via leave_mm() in the idle path
  212. + * where RCU functions differently. Tracing normally
  213. + * uses RCU, so we have to call the tracepoint
  214. + * specially here.
  215. + */
  216. + trace_tlb_flush_rcuidle(TLB_FLUSH_ON_TASK_SWITCH,
  217. + TLB_FLUSH_ALL);
  218. + }
  219. - if (IS_ENABLED(CONFIG_VMAP_STACK)) {
  220. /*
  221. - * If our current stack is in vmalloc space and isn't
  222. - * mapped in the new pgd, we'll double-fault. Forcibly
  223. - * map it.
  224. + * We just exited lazy mode, which means that CR4 and/or LDTR
  225. + * may be stale. (Changes to the required CR4 and LDTR states
  226. + * are not reflected in tlb_gen.)
  227. */
  228. - unsigned int stack_pgd_index = pgd_index(current_stack_pointer());
  229. -
  230. - pgd_t *pgd = next->pgd + stack_pgd_index;
  231. -
  232. - if (unlikely(pgd_none(*pgd)))
  233. - set_pgd(pgd, init_mm.pgd[stack_pgd_index]);
  234. - }
  235. + } else {
  236. + VM_BUG_ON(this_cpu_read(cpu_tlbstate.ctxs[0].ctx_id) ==
  237. + next->context.ctx_id);
  238. +
  239. + if (IS_ENABLED(CONFIG_VMAP_STACK)) {
  240. + /*
  241. + * If our current stack is in vmalloc space and isn't
  242. + * mapped in the new pgd, we'll double-fault. Forcibly
  243. + * map it.
  244. + */
  245. + unsigned int index = pgd_index(current_stack_pointer());
  246. + pgd_t *pgd = next->pgd + index;
  247. +
  248. + if (unlikely(pgd_none(*pgd)))
  249. + set_pgd(pgd, init_mm.pgd[index]);
  250. + }
  251. - this_cpu_write(cpu_tlbstate.loaded_mm, next);
  252. - this_cpu_write(cpu_tlbstate.ctxs[0].ctx_id, next->context.ctx_id);
  253. - this_cpu_write(cpu_tlbstate.ctxs[0].tlb_gen, atomic64_read(&next->context.tlb_gen));
  254. + /* Stop remote flushes for the previous mm */
  255. + if (cpumask_test_cpu(cpu, mm_cpumask(real_prev)))
  256. + cpumask_clear_cpu(cpu, mm_cpumask(real_prev));
  257. - WARN_ON_ONCE(cpumask_test_cpu(cpu, mm_cpumask(next)));
  258. - cpumask_set_cpu(cpu, mm_cpumask(next));
  259. + VM_WARN_ON_ONCE(cpumask_test_cpu(cpu, mm_cpumask(next)));
  260. - /*
  261. - * Re-load page tables.
  262. - *
  263. - * This logic has an ordering constraint:
  264. - *
  265. - * CPU 0: Write to a PTE for 'next'
  266. - * CPU 0: load bit 1 in mm_cpumask. if nonzero, send IPI.
  267. - * CPU 1: set bit 1 in next's mm_cpumask
  268. - * CPU 1: load from the PTE that CPU 0 writes (implicit)
  269. - *
  270. - * We need to prevent an outcome in which CPU 1 observes
  271. - * the new PTE value and CPU 0 observes bit 1 clear in
  272. - * mm_cpumask. (If that occurs, then the IPI will never
  273. - * be sent, and CPU 0's TLB will contain a stale entry.)
  274. - *
  275. - * The bad outcome can occur if either CPU's load is
  276. - * reordered before that CPU's store, so both CPUs must
  277. - * execute full barriers to prevent this from happening.
  278. - *
  279. - * Thus, switch_mm needs a full barrier between the
  280. - * store to mm_cpumask and any operation that could load
  281. - * from next->pgd. TLB fills are special and can happen
  282. - * due to instruction fetches or for no reason at all,
  283. - * and neither LOCK nor MFENCE orders them.
  284. - * Fortunately, load_cr3() is serializing and gives the
  285. - * ordering guarantee we need.
  286. - */
  287. - load_cr3(next->pgd);
  288. + /*
  289. + * Start remote flushes and then read tlb_gen.
  290. + */
  291. + cpumask_set_cpu(cpu, mm_cpumask(next));
  292. + next_tlb_gen = atomic64_read(&next->context.tlb_gen);
  293. - /*
  294. - * This gets called via leave_mm() in the idle path where RCU
  295. - * functions differently. Tracing normally uses RCU, so we have to
  296. - * call the tracepoint specially here.
  297. - */
  298. - trace_tlb_flush_rcuidle(TLB_FLUSH_ON_TASK_SWITCH, TLB_FLUSH_ALL);
  299. + this_cpu_write(cpu_tlbstate.ctxs[0].ctx_id, next->context.ctx_id);
  300. + this_cpu_write(cpu_tlbstate.ctxs[0].tlb_gen, next_tlb_gen);
  301. + this_cpu_write(cpu_tlbstate.loaded_mm, next);
  302. + write_cr3(__pa(next->pgd));
  303. - /* Stop flush ipis for the previous mm */
  304. - WARN_ON_ONCE(!cpumask_test_cpu(cpu, mm_cpumask(real_prev)) &&
  305. - real_prev != &init_mm);
  306. - cpumask_clear_cpu(cpu, mm_cpumask(real_prev));
  307. + /*
  308. + * This gets called via leave_mm() in the idle path where RCU
  309. + * functions differently. Tracing normally uses RCU, so we
  310. + * have to call the tracepoint specially here.
  311. + */
  312. + trace_tlb_flush_rcuidle(TLB_FLUSH_ON_TASK_SWITCH,
  313. + TLB_FLUSH_ALL);
  314. + }
  315. - /* Load per-mm CR4 and LDTR state */
  316. load_mm_cr4(next);
  317. switch_ldt(real_prev, next);
  318. }
  319. @@ -186,13 +209,13 @@ static void flush_tlb_func_common(const struct flush_tlb_info *f,
  320. VM_WARN_ON(this_cpu_read(cpu_tlbstate.ctxs[0].ctx_id) !=
  321. loaded_mm->context.ctx_id);
  322. - if (this_cpu_read(cpu_tlbstate.state) != TLBSTATE_OK) {
  323. + if (!cpumask_test_cpu(smp_processor_id(), mm_cpumask(loaded_mm))) {
  324. /*
  325. - * leave_mm() is adequate to handle any type of flush, and
  326. - * we would prefer not to receive further IPIs. leave_mm()
  327. - * clears this CPU's bit in mm_cpumask().
  328. + * We're in lazy mode -- don't flush. We can get here on
  329. + * remote flushes due to races and on local flushes if a
  330. + * kernel thread coincidentally flushes the mm it's lazily
  331. + * still using.
  332. */
  333. - leave_mm(smp_processor_id());
  334. return;
  335. }
  336. @@ -203,6 +226,7 @@ static void flush_tlb_func_common(const struct flush_tlb_info *f,
  337. * be handled can catch us all the way up, leaving no work for
  338. * the second flush.
  339. */
  340. + trace_tlb_flush(reason, 0);
  341. return;
  342. }
  343. @@ -304,6 +328,21 @@ void native_flush_tlb_others(const struct cpumask *cpumask,
  344. (info->end - info->start) >> PAGE_SHIFT);
  345. if (is_uv_system()) {
  346. + /*
  347. + * This whole special case is confused. UV has a "Broadcast
  348. + * Assist Unit", which seems to be a fancy way to send IPIs.
  349. + * Back when x86 used an explicit TLB flush IPI, UV was
  350. + * optimized to use its own mechanism. These days, x86 uses
  351. + * smp_call_function_many(), but UV still uses a manual IPI,
  352. + * and that IPI's action is out of date -- it does a manual
  353. + * flush instead of calling flush_tlb_func_remote(). This
  354. + * means that the percpu tlb_gen variables won't be updated
  355. + * and we'll do pointless flushes on future context switches.
  356. + *
  357. + * Rather than hooking native_flush_tlb_others() here, I think
  358. + * that UV should be updated so that smp_call_function_many(),
  359. + * etc, are optimal on UV.
  360. + */
  361. unsigned int cpu;
  362. cpu = smp_processor_id();
  363. @@ -363,6 +402,7 @@ void flush_tlb_mm_range(struct mm_struct *mm, unsigned long start,
  364. if (cpumask_any_but(mm_cpumask(mm), cpu) < nr_cpu_ids)
  365. flush_tlb_others(mm_cpumask(mm), &info);
  366. +
  367. put_cpu();
  368. }
  369. @@ -371,8 +411,6 @@ static void do_flush_tlb_all(void *info)
  370. {
  371. count_vm_tlb_event(NR_TLB_REMOTE_FLUSH_RECEIVED);
  372. __flush_tlb_all();
  373. - if (this_cpu_read(cpu_tlbstate.state) == TLBSTATE_LAZY)
  374. - leave_mm(smp_processor_id());
  375. }
  376. void flush_tlb_all(void)
  377. @@ -425,6 +463,7 @@ void arch_tlbbatch_flush(struct arch_tlbflush_unmap_batch *batch)
  378. if (cpumask_any_but(&batch->cpumask, cpu) < nr_cpu_ids)
  379. flush_tlb_others(&batch->cpumask, &info);
  380. +
  381. cpumask_clear(&batch->cpumask);
  382. put_cpu();
  383. diff --git a/arch/x86/xen/mmu_pv.c b/arch/x86/xen/mmu_pv.c
  384. index 5f61b7e2e6b2..ba76f3ce997f 100644
  385. --- a/arch/x86/xen/mmu_pv.c
  386. +++ b/arch/x86/xen/mmu_pv.c
  387. @@ -1005,14 +1005,12 @@ static void xen_drop_mm_ref(struct mm_struct *mm)
  388. /* Get the "official" set of cpus referring to our pagetable. */
  389. if (!alloc_cpumask_var(&mask, GFP_ATOMIC)) {
  390. for_each_online_cpu(cpu) {
  391. - if (!cpumask_test_cpu(cpu, mm_cpumask(mm))
  392. - && per_cpu(xen_current_cr3, cpu) != __pa(mm->pgd))
  393. + if (per_cpu(xen_current_cr3, cpu) != __pa(mm->pgd))
  394. continue;
  395. smp_call_function_single(cpu, drop_mm_ref_this_cpu, mm, 1);
  396. }
  397. return;
  398. }
  399. - cpumask_copy(mask, mm_cpumask(mm));
  400. /*
  401. * It's possible that a vcpu may have a stale reference to our
  402. @@ -1021,6 +1019,7 @@ static void xen_drop_mm_ref(struct mm_struct *mm)
  403. * look at its actual current cr3 value, and force it to flush
  404. * if needed.
  405. */
  406. + cpumask_clear(mask);
  407. for_each_online_cpu(cpu) {
  408. if (per_cpu(xen_current_cr3, cpu) == __pa(mm->pgd))
  409. cpumask_set_cpu(cpu, mask);
  410. --
  411. 2.14.2