| 123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105 |
- From 35f252cc11a7fc0009caf1a088cbb5d47a60ab50 Mon Sep 17 00:00:00 2001
- From: Peter Zijlstra <[email protected]>
- Date: Thu, 4 Jan 2018 18:07:12 +0100
- Subject: [PATCH 235/242] x86/events/intel/ds: Use the proper cache flush
- method for mapping ds buffers
- MIME-Version: 1.0
- Content-Type: text/plain; charset=UTF-8
- Content-Transfer-Encoding: 8bit
- commit 42f3bdc5dd962a5958bc024c1e1444248a6b8b4a upstream.
- Thomas reported the following warning:
- BUG: using smp_processor_id() in preemptible [00000000] code: ovsdb-server/4498
- caller is native_flush_tlb_single+0x57/0xc0
- native_flush_tlb_single+0x57/0xc0
- __set_pte_vaddr+0x2d/0x40
- set_pte_vaddr+0x2f/0x40
- cea_set_pte+0x30/0x40
- ds_update_cea.constprop.4+0x4d/0x70
- reserve_ds_buffers+0x159/0x410
- x86_reserve_hardware+0x150/0x160
- x86_pmu_event_init+0x3e/0x1f0
- perf_try_init_event+0x69/0x80
- perf_event_alloc+0x652/0x740
- SyS_perf_event_open+0x3f6/0xd60
- do_syscall_64+0x5c/0x190
- set_pte_vaddr is used to map the ds buffers into the cpu entry area, but
- there are two problems with that:
- 1) The resulting flush is not supposed to be called in preemptible context
- 2) The cpu entry area is supposed to be per CPU, but the debug store
- buffers are mapped for all CPUs so these mappings need to be flushed
- globally.
- Add the necessary preemption protection across the mapping code and flush
- TLBs globally.
- Fixes: c1961a4631da ("x86/events/intel/ds: Map debug buffers in cpu_entry_area")
- Reported-by: Thomas Zeitlhofer <[email protected]>
- Signed-off-by: Peter Zijlstra <[email protected]>
- Signed-off-by: Thomas Gleixner <[email protected]>
- Tested-by: Thomas Zeitlhofer <[email protected]>
- Cc: Greg Kroah-Hartman <[email protected]>
- Cc: Hugh Dickins <[email protected]>
- Link: https://lkml.kernel.org/r/[email protected]
- Signed-off-by: Greg Kroah-Hartman <[email protected]>
- Signed-off-by: Fabian Grünbichler <[email protected]>
- ---
- arch/x86/events/intel/ds.c | 16 ++++++++++++++++
- 1 file changed, 16 insertions(+)
- diff --git a/arch/x86/events/intel/ds.c b/arch/x86/events/intel/ds.c
- index 85df1f12c49e..1d236666ee0e 100644
- --- a/arch/x86/events/intel/ds.c
- +++ b/arch/x86/events/intel/ds.c
- @@ -4,6 +4,7 @@
-
- #include <asm/cpu_entry_area.h>
- #include <asm/perf_event.h>
- +#include <asm/tlbflush.h>
- #include <asm/insn.h>
-
- #include "../perf_event.h"
- @@ -282,20 +283,35 @@ static DEFINE_PER_CPU(void *, insn_buffer);
-
- static void ds_update_cea(void *cea, void *addr, size_t size, pgprot_t prot)
- {
- + unsigned long start = (unsigned long)cea;
- phys_addr_t pa;
- size_t msz = 0;
-
- pa = virt_to_phys(addr);
- +
- + preempt_disable();
- for (; msz < size; msz += PAGE_SIZE, pa += PAGE_SIZE, cea += PAGE_SIZE)
- cea_set_pte(cea, pa, prot);
- +
- + /*
- + * This is a cross-CPU update of the cpu_entry_area, we must shoot down
- + * all TLB entries for it.
- + */
- + flush_tlb_kernel_range(start, start + size);
- + preempt_enable();
- }
-
- static void ds_clear_cea(void *cea, size_t size)
- {
- + unsigned long start = (unsigned long)cea;
- size_t msz = 0;
-
- + preempt_disable();
- for (; msz < size; msz += PAGE_SIZE, cea += PAGE_SIZE)
- cea_set_pte(cea, 0, PAGE_NONE);
- +
- + flush_tlb_kernel_range(start, start + size);
- + preempt_enable();
- }
-
- static void *dsalloc_pages(size_t size, gfp_t flags, int cpu)
- --
- 2.14.2
|