| 123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154 |
- From 0000000000000000000000000000000000000000 Mon Sep 17 00:00:00 2001
- From: Andy Lutomirski <[email protected]>
- Date: Thu, 2 Nov 2017 00:59:16 -0700
- Subject: [PATCH] x86/entry/64: Remove thread_struct::sp0
- MIME-Version: 1.0
- Content-Type: text/plain; charset=UTF-8
- Content-Transfer-Encoding: 8bit
- CVE-2017-5754
- On x86_64, we can easily calculate sp0 when needed instead of
- storing it in thread_struct.
- On x86_32, a similar cleanup would be possible, but it would require
- cleaning up the vm86 code first, and that can wait for a later
- cleanup series.
- Signed-off-by: Andy Lutomirski <[email protected]>
- Cc: Borislav Petkov <[email protected]>
- Cc: Brian Gerst <[email protected]>
- Cc: Dave Hansen <[email protected]>
- Cc: Linus Torvalds <[email protected]>
- Cc: Peter Zijlstra <[email protected]>
- Cc: Thomas Gleixner <[email protected]>
- Link: http://lkml.kernel.org/r/719cd9c66c548c4350d98a90f050aee8b17f8919.1509609304.git.luto@kernel.org
- Signed-off-by: Ingo Molnar <[email protected]>
- (cherry picked from commit d375cf1530595e33961a8844192cddab913650e3)
- Signed-off-by: Andy Whitcroft <[email protected]>
- Signed-off-by: Kleber Sacilotto de Souza <[email protected]>
- (cherry picked from commit 4910af19c69a87e9432467f4d7cb78da5fbcc30a)
- Signed-off-by: Fabian Grünbichler <[email protected]>
- ---
- arch/x86/include/asm/compat.h | 1 +
- arch/x86/include/asm/processor.h | 28 +++++++++-------------------
- arch/x86/include/asm/switch_to.h | 6 ++++++
- arch/x86/kernel/process_64.c | 1 -
- 4 files changed, 16 insertions(+), 20 deletions(-)
- diff --git a/arch/x86/include/asm/compat.h b/arch/x86/include/asm/compat.h
- index 5343c19814b3..948b6d8ec46f 100644
- --- a/arch/x86/include/asm/compat.h
- +++ b/arch/x86/include/asm/compat.h
- @@ -6,6 +6,7 @@
- */
- #include <linux/types.h>
- #include <linux/sched.h>
- +#include <linux/sched/task_stack.h>
- #include <asm/processor.h>
- #include <asm/user32.h>
- #include <asm/unistd.h>
- diff --git a/arch/x86/include/asm/processor.h b/arch/x86/include/asm/processor.h
- index f83fbf1b6dd9..cec9a329c0f1 100644
- --- a/arch/x86/include/asm/processor.h
- +++ b/arch/x86/include/asm/processor.h
- @@ -423,7 +423,9 @@ typedef struct {
- struct thread_struct {
- /* Cached TLS descriptors: */
- struct desc_struct tls_array[GDT_ENTRY_TLS_ENTRIES];
- +#ifdef CONFIG_X86_32
- unsigned long sp0;
- +#endif
- unsigned long sp;
- #ifdef CONFIG_X86_32
- unsigned long sysenter_cs;
- @@ -790,6 +792,13 @@ static inline void spin_lock_prefetch(const void *x)
-
- #define task_top_of_stack(task) ((unsigned long)(task_pt_regs(task) + 1))
-
- +#define task_pt_regs(task) \
- +({ \
- + unsigned long __ptr = (unsigned long)task_stack_page(task); \
- + __ptr += THREAD_SIZE - TOP_OF_KERNEL_STACK_PADDING; \
- + ((struct pt_regs *)__ptr) - 1; \
- +})
- +
- #ifdef CONFIG_X86_32
- /*
- * User space process size: 3GB (default).
- @@ -807,23 +816,6 @@ static inline void spin_lock_prefetch(const void *x)
- .addr_limit = KERNEL_DS, \
- }
-
- -/*
- - * TOP_OF_KERNEL_STACK_PADDING reserves 8 bytes on top of the ring0 stack.
- - * This is necessary to guarantee that the entire "struct pt_regs"
- - * is accessible even if the CPU haven't stored the SS/ESP registers
- - * on the stack (interrupt gate does not save these registers
- - * when switching to the same priv ring).
- - * Therefore beware: accessing the ss/esp fields of the
- - * "struct pt_regs" is possible, but they may contain the
- - * completely wrong values.
- - */
- -#define task_pt_regs(task) \
- -({ \
- - unsigned long __ptr = (unsigned long)task_stack_page(task); \
- - __ptr += THREAD_SIZE - TOP_OF_KERNEL_STACK_PADDING; \
- - ((struct pt_regs *)__ptr) - 1; \
- -})
- -
- #define KSTK_ESP(task) (task_pt_regs(task)->sp)
-
- #else
- @@ -853,11 +845,9 @@ static inline void spin_lock_prefetch(const void *x)
- #define STACK_TOP_MAX TASK_SIZE_MAX
-
- #define INIT_THREAD { \
- - .sp0 = TOP_OF_INIT_STACK, \
- .addr_limit = KERNEL_DS, \
- }
-
- -#define task_pt_regs(tsk) ((struct pt_regs *)(tsk)->thread.sp0 - 1)
- extern unsigned long KSTK_ESP(struct task_struct *task);
-
- #endif /* CONFIG_X86_64 */
- diff --git a/arch/x86/include/asm/switch_to.h b/arch/x86/include/asm/switch_to.h
- index 54e64d909725..010cd6e4eafc 100644
- --- a/arch/x86/include/asm/switch_to.h
- +++ b/arch/x86/include/asm/switch_to.h
- @@ -1,6 +1,8 @@
- #ifndef _ASM_X86_SWITCH_TO_H
- #define _ASM_X86_SWITCH_TO_H
-
- +#include <linux/sched/task_stack.h>
- +
- struct task_struct; /* one of the stranger aspects of C forward declarations */
-
- struct task_struct *__switch_to_asm(struct task_struct *prev,
- @@ -87,7 +89,11 @@ static inline void refresh_sysenter_cs(struct thread_struct *thread)
- /* This is used when switching tasks or entering/exiting vm86 mode. */
- static inline void update_sp0(struct task_struct *task)
- {
- +#ifdef CONFIG_X86_32
- load_sp0(task->thread.sp0);
- +#else
- + load_sp0(task_top_of_stack(task));
- +#endif
- }
-
- #endif /* _ASM_X86_SWITCH_TO_H */
- diff --git a/arch/x86/kernel/process_64.c b/arch/x86/kernel/process_64.c
- index 8a748e17bf6e..b08b9b6c40eb 100644
- --- a/arch/x86/kernel/process_64.c
- +++ b/arch/x86/kernel/process_64.c
- @@ -275,7 +275,6 @@ int copy_thread_tls(unsigned long clone_flags, unsigned long sp,
- struct inactive_task_frame *frame;
- struct task_struct *me = current;
-
- - p->thread.sp0 = (unsigned long)task_stack_page(p) + THREAD_SIZE;
- childregs = task_pt_regs(p);
- fork_frame = container_of(childregs, struct fork_frame, regs);
- frame = &fork_frame->frame;
- --
- 2.14.2
|