| 123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162 |
- From c2cd64d7bc24a46e3192246a97b30ca5a9692d42 Mon Sep 17 00:00:00 2001
- From: Andy Lutomirski <[email protected]>
- Date: Mon, 4 Dec 2017 15:07:21 +0100
- Subject: [PATCH 151/232] x86/entry/64: Separate cpu_current_top_of_stack from
- TSS.sp0
- MIME-Version: 1.0
- Content-Type: text/plain; charset=UTF-8
- Content-Transfer-Encoding: 8bit
- CVE-2017-5754
- On 64-bit kernels, we used to assume that TSS.sp0 was the current
- top of stack. With the addition of an entry trampoline, this will
- no longer be the case. Store the current top of stack in TSS.sp1,
- which is otherwise unused but shares the same cacheline.
- Signed-off-by: Andy Lutomirski <[email protected]>
- Signed-off-by: Thomas Gleixner <[email protected]>
- Reviewed-by: Thomas Gleixner <[email protected]>
- Reviewed-by: Borislav Petkov <[email protected]>
- Cc: Boris Ostrovsky <[email protected]>
- Cc: Borislav Petkov <[email protected]>
- Cc: Borislav Petkov <[email protected]>
- Cc: Brian Gerst <[email protected]>
- Cc: Dave Hansen <[email protected]>
- Cc: Dave Hansen <[email protected]>
- Cc: David Laight <[email protected]>
- Cc: Denys Vlasenko <[email protected]>
- Cc: Eduardo Valentin <[email protected]>
- Cc: Greg KH <[email protected]>
- Cc: H. Peter Anvin <[email protected]>
- Cc: Josh Poimboeuf <[email protected]>
- Cc: Juergen Gross <[email protected]>
- Cc: Linus Torvalds <[email protected]>
- Cc: Peter Zijlstra <[email protected]>
- Cc: Rik van Riel <[email protected]>
- Cc: Will Deacon <[email protected]>
- Cc: [email protected]
- Cc: [email protected]
- Cc: [email protected]
- Cc: [email protected]
- Link: https://lkml.kernel.org/r/[email protected]
- Signed-off-by: Ingo Molnar <[email protected]>
- (cherry picked from commit 9aaefe7b59ae00605256a7d6bd1c1456432495fc)
- Signed-off-by: Andy Whitcroft <[email protected]>
- Signed-off-by: Kleber Sacilotto de Souza <[email protected]>
- (cherry picked from commit 281be4ff07f7c67dc2a9c75ab24a7b9ff25544ae)
- Signed-off-by: Fabian Grünbichler <[email protected]>
- ---
- arch/x86/include/asm/processor.h | 18 +++++++++++++-----
- arch/x86/include/asm/thread_info.h | 2 +-
- arch/x86/kernel/asm-offsets_64.c | 1 +
- arch/x86/kernel/process.c | 10 ++++++++++
- arch/x86/kernel/process_64.c | 1 +
- 5 files changed, 26 insertions(+), 6 deletions(-)
- diff --git a/arch/x86/include/asm/processor.h b/arch/x86/include/asm/processor.h
- index 55885465c3a7..1bfe4bad797a 100644
- --- a/arch/x86/include/asm/processor.h
- +++ b/arch/x86/include/asm/processor.h
- @@ -303,7 +303,13 @@ struct x86_hw_tss {
- struct x86_hw_tss {
- u32 reserved1;
- u64 sp0;
- +
- + /*
- + * We store cpu_current_top_of_stack in sp1 so it's always accessible.
- + * Linux does not use ring 1, so sp1 is not otherwise needed.
- + */
- u64 sp1;
- +
- u64 sp2;
- u64 reserved2;
- u64 ist[7];
- @@ -362,6 +368,8 @@ DECLARE_PER_CPU_PAGE_ALIGNED(struct tss_struct, cpu_tss);
-
- #ifdef CONFIG_X86_32
- DECLARE_PER_CPU(unsigned long, cpu_current_top_of_stack);
- +#else
- +#define cpu_current_top_of_stack cpu_tss.x86_tss.sp1
- #endif
-
- /*
- @@ -533,12 +541,12 @@ static inline void native_swapgs(void)
-
- static inline unsigned long current_top_of_stack(void)
- {
- -#ifdef CONFIG_X86_64
- - return this_cpu_read_stable(cpu_tss.x86_tss.sp0);
- -#else
- - /* sp0 on x86_32 is special in and around vm86 mode. */
- + /*
- + * We can't read directly from tss.sp0: sp0 on x86_32 is special in
- + * and around vm86 mode and sp0 on x86_64 is special because of the
- + * entry trampoline.
- + */
- return this_cpu_read_stable(cpu_current_top_of_stack);
- -#endif
- }
-
- static inline bool on_thread_stack(void)
- diff --git a/arch/x86/include/asm/thread_info.h b/arch/x86/include/asm/thread_info.h
- index ec8ef3bbb7dc..760dd8a73927 100644
- --- a/arch/x86/include/asm/thread_info.h
- +++ b/arch/x86/include/asm/thread_info.h
- @@ -214,7 +214,7 @@ static inline int arch_within_stack_frames(const void * const stack,
- #else /* !__ASSEMBLY__ */
-
- #ifdef CONFIG_X86_64
- -# define cpu_current_top_of_stack (cpu_tss + TSS_sp0)
- +# define cpu_current_top_of_stack (cpu_tss + TSS_sp1)
- #endif
-
- #endif
- diff --git a/arch/x86/kernel/asm-offsets_64.c b/arch/x86/kernel/asm-offsets_64.c
- index c21a5315b38e..048f68ff3396 100644
- --- a/arch/x86/kernel/asm-offsets_64.c
- +++ b/arch/x86/kernel/asm-offsets_64.c
- @@ -65,6 +65,7 @@ int main(void)
-
- OFFSET(TSS_ist, tss_struct, x86_tss.ist);
- OFFSET(TSS_sp0, tss_struct, x86_tss.sp0);
- + OFFSET(TSS_sp1, tss_struct, x86_tss.sp1);
- BLANK();
-
- #ifdef CONFIG_CC_STACKPROTECTOR
- diff --git a/arch/x86/kernel/process.c b/arch/x86/kernel/process.c
- index aa86e810fb54..407fc37a8718 100644
- --- a/arch/x86/kernel/process.c
- +++ b/arch/x86/kernel/process.c
- @@ -55,6 +55,16 @@ __visible DEFINE_PER_CPU_SHARED_ALIGNED(struct tss_struct, cpu_tss) = {
- * Poison it.
- */
- .sp0 = (1UL << (BITS_PER_LONG-1)) + 1,
- +
- +#ifdef CONFIG_X86_64
- + /*
- + * .sp1 is cpu_current_top_of_stack. The init task never
- + * runs user code, but cpu_current_top_of_stack should still
- + * be well defined before the first context switch.
- + */
- + .sp1 = TOP_OF_INIT_STACK,
- +#endif
- +
- #ifdef CONFIG_X86_32
- .ss0 = __KERNEL_DS,
- .ss1 = __KERNEL_CS,
- diff --git a/arch/x86/kernel/process_64.c b/arch/x86/kernel/process_64.c
- index 01b119bebb68..157f81816915 100644
- --- a/arch/x86/kernel/process_64.c
- +++ b/arch/x86/kernel/process_64.c
- @@ -461,6 +461,7 @@ __switch_to(struct task_struct *prev_p, struct task_struct *next_p)
- * Switch the PDA and FPU contexts.
- */
- this_cpu_write(current_task, next_p);
- + this_cpu_write(cpu_current_top_of_stack, task_top_of_stack(next_p));
-
- /* Reload sp0. */
- update_sp0(next_p);
- --
- 2.14.2
|