0235-x86-events-intel-ds-Use-the-proper-cache-flush-metho.patch 3.3 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105
  1. From 35f252cc11a7fc0009caf1a088cbb5d47a60ab50 Mon Sep 17 00:00:00 2001
  2. From: Peter Zijlstra <[email protected]>
  3. Date: Thu, 4 Jan 2018 18:07:12 +0100
  4. Subject: [PATCH 235/242] x86/events/intel/ds: Use the proper cache flush
  5. method for mapping ds buffers
  6. MIME-Version: 1.0
  7. Content-Type: text/plain; charset=UTF-8
  8. Content-Transfer-Encoding: 8bit
  9. commit 42f3bdc5dd962a5958bc024c1e1444248a6b8b4a upstream.
  10. Thomas reported the following warning:
  11. BUG: using smp_processor_id() in preemptible [00000000] code: ovsdb-server/4498
  12. caller is native_flush_tlb_single+0x57/0xc0
  13. native_flush_tlb_single+0x57/0xc0
  14. __set_pte_vaddr+0x2d/0x40
  15. set_pte_vaddr+0x2f/0x40
  16. cea_set_pte+0x30/0x40
  17. ds_update_cea.constprop.4+0x4d/0x70
  18. reserve_ds_buffers+0x159/0x410
  19. x86_reserve_hardware+0x150/0x160
  20. x86_pmu_event_init+0x3e/0x1f0
  21. perf_try_init_event+0x69/0x80
  22. perf_event_alloc+0x652/0x740
  23. SyS_perf_event_open+0x3f6/0xd60
  24. do_syscall_64+0x5c/0x190
  25. set_pte_vaddr is used to map the ds buffers into the cpu entry area, but
  26. there are two problems with that:
  27. 1) The resulting flush is not supposed to be called in preemptible context
  28. 2) The cpu entry area is supposed to be per CPU, but the debug store
  29. buffers are mapped for all CPUs so these mappings need to be flushed
  30. globally.
  31. Add the necessary preemption protection across the mapping code and flush
  32. TLBs globally.
  33. Fixes: c1961a4631da ("x86/events/intel/ds: Map debug buffers in cpu_entry_area")
  34. Reported-by: Thomas Zeitlhofer <[email protected]>
  35. Signed-off-by: Peter Zijlstra <[email protected]>
  36. Signed-off-by: Thomas Gleixner <[email protected]>
  37. Tested-by: Thomas Zeitlhofer <[email protected]>
  38. Cc: Greg Kroah-Hartman <[email protected]>
  39. Cc: Hugh Dickins <[email protected]>
  40. Link: https://lkml.kernel.org/r/[email protected]
  41. Signed-off-by: Greg Kroah-Hartman <[email protected]>
  42. Signed-off-by: Fabian Grünbichler <[email protected]>
  43. ---
  44. arch/x86/events/intel/ds.c | 16 ++++++++++++++++
  45. 1 file changed, 16 insertions(+)
  46. diff --git a/arch/x86/events/intel/ds.c b/arch/x86/events/intel/ds.c
  47. index 85df1f12c49e..1d236666ee0e 100644
  48. --- a/arch/x86/events/intel/ds.c
  49. +++ b/arch/x86/events/intel/ds.c
  50. @@ -4,6 +4,7 @@
  51. #include <asm/cpu_entry_area.h>
  52. #include <asm/perf_event.h>
  53. +#include <asm/tlbflush.h>
  54. #include <asm/insn.h>
  55. #include "../perf_event.h"
  56. @@ -282,20 +283,35 @@ static DEFINE_PER_CPU(void *, insn_buffer);
  57. static void ds_update_cea(void *cea, void *addr, size_t size, pgprot_t prot)
  58. {
  59. + unsigned long start = (unsigned long)cea;
  60. phys_addr_t pa;
  61. size_t msz = 0;
  62. pa = virt_to_phys(addr);
  63. +
  64. + preempt_disable();
  65. for (; msz < size; msz += PAGE_SIZE, pa += PAGE_SIZE, cea += PAGE_SIZE)
  66. cea_set_pte(cea, pa, prot);
  67. +
  68. + /*
  69. + * This is a cross-CPU update of the cpu_entry_area, we must shoot down
  70. + * all TLB entries for it.
  71. + */
  72. + flush_tlb_kernel_range(start, start + size);
  73. + preempt_enable();
  74. }
  75. static void ds_clear_cea(void *cea, size_t size)
  76. {
  77. + unsigned long start = (unsigned long)cea;
  78. size_t msz = 0;
  79. + preempt_disable();
  80. for (; msz < size; msz += PAGE_SIZE, cea += PAGE_SIZE)
  81. cea_set_pte(cea, 0, PAGE_NONE);
  82. +
  83. + flush_tlb_kernel_range(start, start + size);
  84. + preempt_enable();
  85. }
  86. static void *dsalloc_pages(size_t size, gfp_t flags, int cpu)
  87. --
  88. 2.14.2