| 123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245 |
- From a1ccda197e7a758c8e9b7be299e9beaf3ca3ed51 Mon Sep 17 00:00:00 2001
- From: Thomas Gleixner <[email protected]>
- Date: Mon, 4 Dec 2017 15:07:49 +0100
- Subject: [PATCH 202/231] x86/cpu_entry_area: Add debugstore entries to
- cpu_entry_area
- MIME-Version: 1.0
- Content-Type: text/plain; charset=UTF-8
- Content-Transfer-Encoding: 8bit
- CVE-2017-5754
- The Intel PEBS/BTS debug store is a design trainwreck as it expects virtual
- addresses which must be visible in any execution context.
- So it is required to make these mappings visible to user space when kernel
- page table isolation is active.
- Provide enough room for the buffer mappings in the cpu_entry_area so the
- buffers are available in the user space visible page tables.
- At the point where the kernel side entry area is populated there is no
- buffer available yet, but the kernel PMD must be populated. To achieve this
- set the entries for these buffers to non present.
- Signed-off-by: Thomas Gleixner <[email protected]>
- Cc: Andy Lutomirski <[email protected]>
- Cc: Boris Ostrovsky <[email protected]>
- Cc: Borislav Petkov <[email protected]>
- Cc: Brian Gerst <[email protected]>
- Cc: Dave Hansen <[email protected]>
- Cc: David Laight <[email protected]>
- Cc: Denys Vlasenko <[email protected]>
- Cc: Eduardo Valentin <[email protected]>
- Cc: Greg KH <[email protected]>
- Cc: H. Peter Anvin <[email protected]>
- Cc: Josh Poimboeuf <[email protected]>
- Cc: Juergen Gross <[email protected]>
- Cc: Linus Torvalds <[email protected]>
- Cc: Peter Zijlstra <[email protected]>
- Cc: Will Deacon <[email protected]>
- Cc: [email protected]
- Cc: [email protected]
- Cc: [email protected]
- Cc: [email protected]
- Signed-off-by: Ingo Molnar <[email protected]>
- (cherry picked from commit 10043e02db7f8a4161f76434931051e7d797a5f6)
- Signed-off-by: Andy Whitcroft <[email protected]>
- Signed-off-by: Kleber Sacilotto de Souza <[email protected]>
- (cherry picked from commit 4b9996f9c2d35d23a9fa2afe4f161402e6f28309)
- Signed-off-by: Fabian Grünbichler <[email protected]>
- ---
- arch/x86/events/perf_event.h | 21 ++------------------
- arch/x86/include/asm/cpu_entry_area.h | 13 +++++++++++++
- arch/x86/include/asm/intel_ds.h | 36 +++++++++++++++++++++++++++++++++++
- arch/x86/events/intel/ds.c | 5 +++--
- arch/x86/mm/cpu_entry_area.c | 27 ++++++++++++++++++++++++++
- 5 files changed, 81 insertions(+), 21 deletions(-)
- create mode 100644 arch/x86/include/asm/intel_ds.h
- diff --git a/arch/x86/events/perf_event.h b/arch/x86/events/perf_event.h
- index 590eaf7c2c3e..308bc14f58af 100644
- --- a/arch/x86/events/perf_event.h
- +++ b/arch/x86/events/perf_event.h
- @@ -14,6 +14,8 @@
-
- #include <linux/perf_event.h>
-
- +#include <asm/intel_ds.h>
- +
- /* To enable MSR tracing please use the generic trace points. */
-
- /*
- @@ -77,8 +79,6 @@ struct amd_nb {
- struct event_constraint event_constraints[X86_PMC_IDX_MAX];
- };
-
- -/* The maximal number of PEBS events: */
- -#define MAX_PEBS_EVENTS 8
- #define PEBS_COUNTER_MASK ((1ULL << MAX_PEBS_EVENTS) - 1)
-
- /*
- @@ -95,23 +95,6 @@ struct amd_nb {
- PERF_SAMPLE_TRANSACTION | \
- PERF_SAMPLE_REGS_INTR | PERF_SAMPLE_REGS_USER)
-
- -/*
- - * A debug store configuration.
- - *
- - * We only support architectures that use 64bit fields.
- - */
- -struct debug_store {
- - u64 bts_buffer_base;
- - u64 bts_index;
- - u64 bts_absolute_maximum;
- - u64 bts_interrupt_threshold;
- - u64 pebs_buffer_base;
- - u64 pebs_index;
- - u64 pebs_absolute_maximum;
- - u64 pebs_interrupt_threshold;
- - u64 pebs_event_reset[MAX_PEBS_EVENTS];
- -};
- -
- #define PEBS_REGS \
- (PERF_REG_X86_AX | \
- PERF_REG_X86_BX | \
- diff --git a/arch/x86/include/asm/cpu_entry_area.h b/arch/x86/include/asm/cpu_entry_area.h
- index 2fbc69a0916e..4a7884b8dca5 100644
- --- a/arch/x86/include/asm/cpu_entry_area.h
- +++ b/arch/x86/include/asm/cpu_entry_area.h
- @@ -5,6 +5,7 @@
-
- #include <linux/percpu-defs.h>
- #include <asm/processor.h>
- +#include <asm/intel_ds.h>
-
- /*
- * cpu_entry_area is a percpu region that contains things needed by the CPU
- @@ -40,6 +41,18 @@ struct cpu_entry_area {
- */
- char exception_stacks[(N_EXCEPTION_STACKS - 1) * EXCEPTION_STKSZ + DEBUG_STKSZ];
- #endif
- +#ifdef CONFIG_CPU_SUP_INTEL
- + /*
- + * Per CPU debug store for Intel performance monitoring. Wastes a
- + * full page at the moment.
- + */
- + struct debug_store cpu_debug_store;
- + /*
- + * The actual PEBS/BTS buffers must be mapped to user space
- + * Reserve enough fixmap PTEs.
- + */
- + struct debug_store_buffers cpu_debug_buffers;
- +#endif
- };
-
- #define CPU_ENTRY_AREA_SIZE (sizeof(struct cpu_entry_area))
- diff --git a/arch/x86/include/asm/intel_ds.h b/arch/x86/include/asm/intel_ds.h
- new file mode 100644
- index 000000000000..62a9f4966b42
- --- /dev/null
- +++ b/arch/x86/include/asm/intel_ds.h
- @@ -0,0 +1,36 @@
- +#ifndef _ASM_INTEL_DS_H
- +#define _ASM_INTEL_DS_H
- +
- +#include <linux/percpu-defs.h>
- +
- +#define BTS_BUFFER_SIZE (PAGE_SIZE << 4)
- +#define PEBS_BUFFER_SIZE (PAGE_SIZE << 4)
- +
- +/* The maximal number of PEBS events: */
- +#define MAX_PEBS_EVENTS 8
- +
- +/*
- + * A debug store configuration.
- + *
- + * We only support architectures that use 64bit fields.
- + */
- +struct debug_store {
- + u64 bts_buffer_base;
- + u64 bts_index;
- + u64 bts_absolute_maximum;
- + u64 bts_interrupt_threshold;
- + u64 pebs_buffer_base;
- + u64 pebs_index;
- + u64 pebs_absolute_maximum;
- + u64 pebs_interrupt_threshold;
- + u64 pebs_event_reset[MAX_PEBS_EVENTS];
- +} __aligned(PAGE_SIZE);
- +
- +DECLARE_PER_CPU_PAGE_ALIGNED(struct debug_store, cpu_debug_store);
- +
- +struct debug_store_buffers {
- + char bts_buffer[BTS_BUFFER_SIZE];
- + char pebs_buffer[PEBS_BUFFER_SIZE];
- +};
- +
- +#endif
- diff --git a/arch/x86/events/intel/ds.c b/arch/x86/events/intel/ds.c
- index 98e36e0c791c..21a4ed789ec0 100644
- --- a/arch/x86/events/intel/ds.c
- +++ b/arch/x86/events/intel/ds.c
- @@ -7,11 +7,12 @@
-
- #include "../perf_event.h"
-
- +/* Waste a full page so it can be mapped into the cpu_entry_area */
- +DEFINE_PER_CPU_PAGE_ALIGNED(struct debug_store, cpu_debug_store);
- +
- /* The size of a BTS record in bytes: */
- #define BTS_RECORD_SIZE 24
-
- -#define BTS_BUFFER_SIZE (PAGE_SIZE << 4)
- -#define PEBS_BUFFER_SIZE (PAGE_SIZE << 4)
- #define PEBS_FIXUP_SIZE PAGE_SIZE
-
- /*
- diff --git a/arch/x86/mm/cpu_entry_area.c b/arch/x86/mm/cpu_entry_area.c
- index fe814fd5e014..b9283cc27622 100644
- --- a/arch/x86/mm/cpu_entry_area.c
- +++ b/arch/x86/mm/cpu_entry_area.c
- @@ -38,6 +38,32 @@ cea_map_percpu_pages(void *cea_vaddr, void *ptr, int pages, pgprot_t prot)
- cea_set_pte(cea_vaddr, per_cpu_ptr_to_phys(ptr), prot);
- }
-
- +static void percpu_setup_debug_store(int cpu)
- +{
- +#ifdef CONFIG_CPU_SUP_INTEL
- + int npages;
- + void *cea;
- +
- + if (boot_cpu_data.x86_vendor != X86_VENDOR_INTEL)
- + return;
- +
- + cea = &get_cpu_entry_area(cpu)->cpu_debug_store;
- + npages = sizeof(struct debug_store) / PAGE_SIZE;
- + BUILD_BUG_ON(sizeof(struct debug_store) % PAGE_SIZE != 0);
- + cea_map_percpu_pages(cea, &per_cpu(cpu_debug_store, cpu), npages,
- + PAGE_KERNEL);
- +
- + cea = &get_cpu_entry_area(cpu)->cpu_debug_buffers;
- + /*
- + * Force the population of PMDs for not yet allocated per cpu
- + * memory like debug store buffers.
- + */
- + npages = sizeof(struct debug_store_buffers) / PAGE_SIZE;
- + for (; npages; npages--, cea += PAGE_SIZE)
- + cea_set_pte(cea, 0, PAGE_NONE);
- +#endif
- +}
- +
- /* Setup the fixmap mappings only once per-processor */
- static void __init setup_cpu_entry_area(int cpu)
- {
- @@ -109,6 +135,7 @@ static void __init setup_cpu_entry_area(int cpu)
- cea_set_pte(&get_cpu_entry_area(cpu)->entry_trampoline,
- __pa_symbol(_entry_trampoline), PAGE_KERNEL_RX);
- #endif
- + percpu_setup_debug_store(cpu);
- }
-
- static __init void setup_cpu_entry_area_ptes(void)
- --
- 2.14.2
|