| 123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492 |
- From a4da7aed31f0355b881bdeeb3d269a20759f16a8 Mon Sep 17 00:00:00 2001
- From: Andy Lutomirski <[email protected]>
- Date: Mon, 4 Dec 2017 15:07:29 +0100
- Subject: [PATCH 159/241] x86/entry/64: Make cpu_entry_area.tss read-only
- MIME-Version: 1.0
- Content-Type: text/plain; charset=UTF-8
- Content-Transfer-Encoding: 8bit
- CVE-2017-5754
- The TSS is a fairly juicy target for exploits, and, now that the TSS
- is in the cpu_entry_area, it's no longer protected by kASLR. Make it
- read-only on x86_64.
- On x86_32, it can't be RO because it's written by the CPU during task
- switches, and we use a task gate for double faults. I'd also be
- nervous about errata if we tried to make it RO even on configurations
- without double fault handling.
- [ tglx: AMD confirmed that there is no problem on 64-bit with TSS RO. So
- it's probably safe to assume that it's a non issue, though Intel
- might have been creative in that area. Still waiting for
- confirmation. ]
- Signed-off-by: Andy Lutomirski <[email protected]>
- Signed-off-by: Thomas Gleixner <[email protected]>
- Reviewed-by: Borislav Petkov <[email protected]>
- Cc: Boris Ostrovsky <[email protected]>
- Cc: Borislav Petkov <[email protected]>
- Cc: Brian Gerst <[email protected]>
- Cc: Dave Hansen <[email protected]>
- Cc: Dave Hansen <[email protected]>
- Cc: David Laight <[email protected]>
- Cc: Denys Vlasenko <[email protected]>
- Cc: Eduardo Valentin <[email protected]>
- Cc: Greg KH <[email protected]>
- Cc: H. Peter Anvin <[email protected]>
- Cc: Josh Poimboeuf <[email protected]>
- Cc: Juergen Gross <[email protected]>
- Cc: Kees Cook <[email protected]>
- Cc: Linus Torvalds <[email protected]>
- Cc: Peter Zijlstra <[email protected]>
- Cc: Rik van Riel <[email protected]>
- Cc: Will Deacon <[email protected]>
- Cc: [email protected]
- Cc: [email protected]
- Cc: [email protected]
- Cc: [email protected]
- Link: https://lkml.kernel.org/r/[email protected]
- Signed-off-by: Ingo Molnar <[email protected]>
- (backported from commit c482feefe1aeb150156248ba0fd3e029bc886605)
- Signed-off-by: Andy Whitcroft <[email protected]>
- Signed-off-by: Kleber Sacilotto de Souza <[email protected]>
- (cherry picked from commit 785be108f90cd62eab2da17490714085ef752538)
- Signed-off-by: Fabian Grünbichler <[email protected]>
- ---
- arch/x86/include/asm/fixmap.h | 13 +++++++++----
- arch/x86/include/asm/processor.h | 17 ++++++++---------
- arch/x86/include/asm/switch_to.h | 4 ++--
- arch/x86/include/asm/thread_info.h | 2 +-
- arch/x86/kernel/asm-offsets.c | 5 ++---
- arch/x86/kernel/asm-offsets_32.c | 4 ++--
- arch/x86/kernel/cpu/common.c | 29 +++++++++++++++++++----------
- arch/x86/kernel/ioport.c | 2 +-
- arch/x86/kernel/process.c | 6 +++---
- arch/x86/kernel/process_32.c | 2 +-
- arch/x86/kernel/process_64.c | 2 +-
- arch/x86/kernel/traps.c | 4 ++--
- arch/x86/lib/delay.c | 4 ++--
- arch/x86/xen/enlighten_pv.c | 2 +-
- arch/x86/entry/entry_32.S | 4 ++--
- arch/x86/entry/entry_64.S | 8 ++++----
- 16 files changed, 60 insertions(+), 48 deletions(-)
- diff --git a/arch/x86/include/asm/fixmap.h b/arch/x86/include/asm/fixmap.h
- index 56aaffbbffd6..5dc269ff4085 100644
- --- a/arch/x86/include/asm/fixmap.h
- +++ b/arch/x86/include/asm/fixmap.h
- @@ -56,9 +56,14 @@ struct cpu_entry_area {
- char gdt[PAGE_SIZE];
-
- /*
- - * The GDT is just below cpu_tss and thus serves (on x86_64) as a
- - * a read-only guard page for the SYSENTER stack at the bottom
- - * of the TSS region.
- + * The GDT is just below SYSENTER_stack and thus serves (on x86_64) as
- + * a a read-only guard page.
- + */
- + struct SYSENTER_stack_page SYSENTER_stack_page;
- +
- + /*
- + * On x86_64, the TSS is mapped RO. On x86_32, it's mapped RW because
- + * we need task switches to work, and task switches write to the TSS.
- */
- struct tss_struct tss;
-
- @@ -227,7 +232,7 @@ static inline struct cpu_entry_area *get_cpu_entry_area(int cpu)
-
- static inline struct SYSENTER_stack *cpu_SYSENTER_stack(int cpu)
- {
- - return &get_cpu_entry_area(cpu)->tss.SYSENTER_stack;
- + return &get_cpu_entry_area(cpu)->SYSENTER_stack_page.stack;
- }
-
- #endif /* !__ASSEMBLY__ */
- diff --git a/arch/x86/include/asm/processor.h b/arch/x86/include/asm/processor.h
- index 2d489a414a86..bccec7ed1676 100644
- --- a/arch/x86/include/asm/processor.h
- +++ b/arch/x86/include/asm/processor.h
- @@ -334,13 +334,11 @@ struct SYSENTER_stack {
- unsigned long words[64];
- };
-
- -struct tss_struct {
- - /*
- - * Space for the temporary SYSENTER stack, used for SYSENTER
- - * and the entry trampoline as well.
- - */
- - struct SYSENTER_stack SYSENTER_stack;
- +struct SYSENTER_stack_page {
- + struct SYSENTER_stack stack;
- +} __aligned(PAGE_SIZE);
-
- +struct tss_struct {
- /*
- * The fixed hardware portion. This must not cross a page boundary
- * at risk of violating the SDM's advice and potentially triggering
- @@ -357,7 +355,7 @@ struct tss_struct {
- unsigned long io_bitmap[IO_BITMAP_LONGS + 1];
- } __aligned(PAGE_SIZE);
-
- -DECLARE_PER_CPU_PAGE_ALIGNED(struct tss_struct, cpu_tss);
- +DECLARE_PER_CPU_PAGE_ALIGNED(struct tss_struct, cpu_tss_rw);
-
- /*
- * sizeof(unsigned long) coming from an extra "long" at the end
- @@ -372,7 +370,8 @@ DECLARE_PER_CPU_PAGE_ALIGNED(struct tss_struct, cpu_tss);
- #ifdef CONFIG_X86_32
- DECLARE_PER_CPU(unsigned long, cpu_current_top_of_stack);
- #else
- -#define cpu_current_top_of_stack cpu_tss.x86_tss.sp1
- +/* The RO copy can't be accessed with this_cpu_xyz(), so use the RW copy. */
- +#define cpu_current_top_of_stack cpu_tss_rw.x86_tss.sp1
- #endif
-
- /*
- @@ -532,7 +531,7 @@ static inline void native_set_iopl_mask(unsigned mask)
- static inline void
- native_load_sp0(unsigned long sp0)
- {
- - this_cpu_write(cpu_tss.x86_tss.sp0, sp0);
- + this_cpu_write(cpu_tss_rw.x86_tss.sp0, sp0);
- }
-
- static inline void native_swapgs(void)
- diff --git a/arch/x86/include/asm/switch_to.h b/arch/x86/include/asm/switch_to.h
- index ca2fc84ad278..cfb6dfe4c457 100644
- --- a/arch/x86/include/asm/switch_to.h
- +++ b/arch/x86/include/asm/switch_to.h
- @@ -78,10 +78,10 @@ do { \
- static inline void refresh_sysenter_cs(struct thread_struct *thread)
- {
- /* Only happens when SEP is enabled, no need to test "SEP"arately: */
- - if (unlikely(this_cpu_read(cpu_tss.x86_tss.ss1) == thread->sysenter_cs))
- + if (unlikely(this_cpu_read(cpu_tss_rw.x86_tss.ss1) == thread->sysenter_cs))
- return;
-
- - this_cpu_write(cpu_tss.x86_tss.ss1, thread->sysenter_cs);
- + this_cpu_write(cpu_tss_rw.x86_tss.ss1, thread->sysenter_cs);
- wrmsr(MSR_IA32_SYSENTER_CS, thread->sysenter_cs, 0);
- }
- #endif
- diff --git a/arch/x86/include/asm/thread_info.h b/arch/x86/include/asm/thread_info.h
- index 760dd8a73927..6275b391ac61 100644
- --- a/arch/x86/include/asm/thread_info.h
- +++ b/arch/x86/include/asm/thread_info.h
- @@ -214,7 +214,7 @@ static inline int arch_within_stack_frames(const void * const stack,
- #else /* !__ASSEMBLY__ */
-
- #ifdef CONFIG_X86_64
- -# define cpu_current_top_of_stack (cpu_tss + TSS_sp1)
- +# define cpu_current_top_of_stack (cpu_tss_rw + TSS_sp1)
- #endif
-
- #endif
- diff --git a/arch/x86/kernel/asm-offsets.c b/arch/x86/kernel/asm-offsets.c
- index 00ea20bfa857..40c3fab107ac 100644
- --- a/arch/x86/kernel/asm-offsets.c
- +++ b/arch/x86/kernel/asm-offsets.c
- @@ -93,10 +93,9 @@ void common(void) {
- BLANK();
- DEFINE(PTREGS_SIZE, sizeof(struct pt_regs));
-
- - OFFSET(TSS_STRUCT_SYSENTER_stack, tss_struct, SYSENTER_stack);
- - DEFINE(SIZEOF_SYSENTER_stack, sizeof(struct SYSENTER_stack));
- -
- /* Layout info for cpu_entry_area */
- OFFSET(CPU_ENTRY_AREA_tss, cpu_entry_area, tss);
- OFFSET(CPU_ENTRY_AREA_entry_trampoline, cpu_entry_area, entry_trampoline);
- + OFFSET(CPU_ENTRY_AREA_SYSENTER_stack, cpu_entry_area, SYSENTER_stack_page);
- + DEFINE(SIZEOF_SYSENTER_stack, sizeof(struct SYSENTER_stack));
- }
- diff --git a/arch/x86/kernel/asm-offsets_32.c b/arch/x86/kernel/asm-offsets_32.c
- index d09b161a3bd0..c4f23da7a0f0 100644
- --- a/arch/x86/kernel/asm-offsets_32.c
- +++ b/arch/x86/kernel/asm-offsets_32.c
- @@ -49,8 +49,8 @@ void foo(void)
- BLANK();
-
- /* Offset from the sysenter stack to tss.sp0 */
- - DEFINE(TSS_sysenter_sp0, offsetof(struct tss_struct, x86_tss.sp0) -
- - offsetofend(struct tss_struct, SYSENTER_stack));
- + DEFINE(TSS_sysenter_sp0, offsetof(struct cpu_entry_area, tss.x86_tss.sp0) -
- + offsetofend(struct cpu_entry_area, SYSENTER_stack_page.stack));
-
- #ifdef CONFIG_CC_STACKPROTECTOR
- BLANK();
- diff --git a/arch/x86/kernel/cpu/common.c b/arch/x86/kernel/cpu/common.c
- index f9541c48c290..7992e5a8076c 100644
- --- a/arch/x86/kernel/cpu/common.c
- +++ b/arch/x86/kernel/cpu/common.c
- @@ -487,6 +487,9 @@ static DEFINE_PER_CPU_PAGE_ALIGNED(char, exception_stacks
- [(N_EXCEPTION_STACKS - 1) * EXCEPTION_STKSZ + DEBUG_STKSZ]);
- #endif
-
- +static DEFINE_PER_CPU_PAGE_ALIGNED(struct SYSENTER_stack_page,
- + SYSENTER_stack_storage);
- +
- static void __init
- set_percpu_fixmap_pages(int idx, void *ptr, int pages, pgprot_t prot)
- {
- @@ -500,23 +503,29 @@ static void __init setup_cpu_entry_area(int cpu)
- #ifdef CONFIG_X86_64
- extern char _entry_trampoline[];
-
- - /* On 64-bit systems, we use a read-only fixmap GDT. */
- + /* On 64-bit systems, we use a read-only fixmap GDT and TSS. */
- pgprot_t gdt_prot = PAGE_KERNEL_RO;
- + pgprot_t tss_prot = PAGE_KERNEL_RO;
- #else
- /*
- * On native 32-bit systems, the GDT cannot be read-only because
- * our double fault handler uses a task gate, and entering through
- - * a task gate needs to change an available TSS to busy. If the GDT
- - * is read-only, that will triple fault.
- + * a task gate needs to change an available TSS to busy. If the
- + * GDT is read-only, that will triple fault. The TSS cannot be
- + * read-only because the CPU writes to it on task switches.
- *
- - * On Xen PV, the GDT must be read-only because the hypervisor requires
- - * it.
- + * On Xen PV, the GDT must be read-only because the hypervisor
- + * requires it.
- */
- pgprot_t gdt_prot = boot_cpu_has(X86_FEATURE_XENPV) ?
- PAGE_KERNEL_RO : PAGE_KERNEL;
- + pgprot_t tss_prot = PAGE_KERNEL;
- #endif
-
- __set_fixmap(get_cpu_entry_area_index(cpu, gdt), get_cpu_gdt_paddr(cpu), gdt_prot);
- + set_percpu_fixmap_pages(get_cpu_entry_area_index(cpu, SYSENTER_stack_page),
- + per_cpu_ptr(&SYSENTER_stack_storage, cpu), 1,
- + PAGE_KERNEL);
-
- /*
- * The Intel SDM says (Volume 3, 7.2.1):
- @@ -539,9 +548,9 @@ static void __init setup_cpu_entry_area(int cpu)
- offsetofend(struct tss_struct, x86_tss)) & PAGE_MASK);
- BUILD_BUG_ON(sizeof(struct tss_struct) % PAGE_SIZE != 0);
- set_percpu_fixmap_pages(get_cpu_entry_area_index(cpu, tss),
- - &per_cpu(cpu_tss, cpu),
- + &per_cpu(cpu_tss_rw, cpu),
- sizeof(struct tss_struct) / PAGE_SIZE,
- - PAGE_KERNEL);
- + tss_prot);
-
- #ifdef CONFIG_X86_32
- per_cpu(cpu_entry_area, cpu) = get_cpu_entry_area(cpu);
- @@ -1297,7 +1306,7 @@ void enable_sep_cpu(void)
- return;
-
- cpu = get_cpu();
- - tss = &per_cpu(cpu_tss, cpu);
- + tss = &per_cpu(cpu_tss_rw, cpu);
-
- /*
- * We cache MSR_IA32_SYSENTER_CS's value in the TSS's ss1 field --
- @@ -1576,7 +1585,7 @@ void cpu_init(void)
- if (cpu)
- load_ucode_ap();
-
- - t = &per_cpu(cpu_tss, cpu);
- + t = &per_cpu(cpu_tss_rw, cpu);
- oist = &per_cpu(orig_ist, cpu);
-
- #ifdef CONFIG_NUMA
- @@ -1667,7 +1676,7 @@ void cpu_init(void)
- {
- int cpu = smp_processor_id();
- struct task_struct *curr = current;
- - struct tss_struct *t = &per_cpu(cpu_tss, cpu);
- + struct tss_struct *t = &per_cpu(cpu_tss_rw, cpu);
-
- wait_for_master_cpu(cpu);
-
- diff --git a/arch/x86/kernel/ioport.c b/arch/x86/kernel/ioport.c
- index 4a613fed94b6..d13777d49d8b 100644
- --- a/arch/x86/kernel/ioport.c
- +++ b/arch/x86/kernel/ioport.c
- @@ -66,7 +66,7 @@ asmlinkage long sys_ioperm(unsigned long from, unsigned long num, int turn_on)
- * because the ->io_bitmap_max value must match the bitmap
- * contents:
- */
- - tss = &per_cpu(cpu_tss, get_cpu());
- + tss = &per_cpu(cpu_tss_rw, get_cpu());
-
- if (turn_on)
- bitmap_clear(t->io_bitmap_ptr, from, num);
- diff --git a/arch/x86/kernel/process.c b/arch/x86/kernel/process.c
- index ec758390d24e..3688a7b9d055 100644
- --- a/arch/x86/kernel/process.c
- +++ b/arch/x86/kernel/process.c
- @@ -46,7 +46,7 @@
- * section. Since TSS's are completely CPU-local, we want them
- * on exact cacheline boundaries, to eliminate cacheline ping-pong.
- */
- -__visible DEFINE_PER_CPU_SHARED_ALIGNED(struct tss_struct, cpu_tss) = {
- +__visible DEFINE_PER_CPU_SHARED_ALIGNED(struct tss_struct, cpu_tss_rw) = {
- .x86_tss = {
- /*
- * .sp0 is only used when entering ring 0 from a lower
- @@ -81,7 +81,7 @@ __visible DEFINE_PER_CPU_SHARED_ALIGNED(struct tss_struct, cpu_tss) = {
- .io_bitmap = { [0 ... IO_BITMAP_LONGS] = ~0 },
- #endif
- };
- -EXPORT_PER_CPU_SYMBOL(cpu_tss);
- +EXPORT_PER_CPU_SYMBOL(cpu_tss_rw);
-
- DEFINE_PER_CPU(bool, __tss_limit_invalid);
- EXPORT_PER_CPU_SYMBOL_GPL(__tss_limit_invalid);
- @@ -110,7 +110,7 @@ void exit_thread(struct task_struct *tsk)
- struct fpu *fpu = &t->fpu;
-
- if (bp) {
- - struct tss_struct *tss = &per_cpu(cpu_tss, get_cpu());
- + struct tss_struct *tss = &per_cpu(cpu_tss_rw, get_cpu());
-
- t->io_bitmap_ptr = NULL;
- clear_thread_flag(TIF_IO_BITMAP);
- diff --git a/arch/x86/kernel/process_32.c b/arch/x86/kernel/process_32.c
- index c0d60420466c..784ff9147172 100644
- --- a/arch/x86/kernel/process_32.c
- +++ b/arch/x86/kernel/process_32.c
- @@ -234,7 +234,7 @@ __switch_to(struct task_struct *prev_p, struct task_struct *next_p)
- struct fpu *prev_fpu = &prev->fpu;
- struct fpu *next_fpu = &next->fpu;
- int cpu = smp_processor_id();
- - struct tss_struct *tss = &per_cpu(cpu_tss, cpu);
- + struct tss_struct *tss = &per_cpu(cpu_tss_rw, cpu);
-
- /* never put a printk in __switch_to... printk() calls wake_up*() indirectly */
-
- diff --git a/arch/x86/kernel/process_64.c b/arch/x86/kernel/process_64.c
- index 157f81816915..c75466232016 100644
- --- a/arch/x86/kernel/process_64.c
- +++ b/arch/x86/kernel/process_64.c
- @@ -399,7 +399,7 @@ __switch_to(struct task_struct *prev_p, struct task_struct *next_p)
- struct fpu *prev_fpu = &prev->fpu;
- struct fpu *next_fpu = &next->fpu;
- int cpu = smp_processor_id();
- - struct tss_struct *tss = &per_cpu(cpu_tss, cpu);
- + struct tss_struct *tss = &per_cpu(cpu_tss_rw, cpu);
-
- WARN_ON_ONCE(IS_ENABLED(CONFIG_DEBUG_ENTRY) &&
- this_cpu_read(irq_count) != -1);
- diff --git a/arch/x86/kernel/traps.c b/arch/x86/kernel/traps.c
- index 2818c83892b3..14b462eefa17 100644
- --- a/arch/x86/kernel/traps.c
- +++ b/arch/x86/kernel/traps.c
- @@ -376,7 +376,7 @@ dotraplinkage void do_double_fault(struct pt_regs *regs, long error_code)
- regs->cs == __KERNEL_CS &&
- regs->ip == (unsigned long)native_irq_return_iret)
- {
- - struct pt_regs *gpregs = (struct pt_regs *)this_cpu_read(cpu_tss.x86_tss.sp0) - 1;
- + struct pt_regs *gpregs = (struct pt_regs *)this_cpu_read(cpu_tss_rw.x86_tss.sp0) - 1;
-
- /*
- * regs->sp points to the failing IRET frame on the
- @@ -661,7 +661,7 @@ struct bad_iret_stack *fixup_bad_iret(struct bad_iret_stack *s)
- * exception came from the IRET target.
- */
- struct bad_iret_stack *new_stack =
- - (struct bad_iret_stack *)this_cpu_read(cpu_tss.x86_tss.sp0) - 1;
- + (struct bad_iret_stack *)this_cpu_read(cpu_tss_rw.x86_tss.sp0) - 1;
-
- /* Copy the IRET target to the new stack. */
- memmove(&new_stack->regs.ip, (void *)s->regs.sp, 5*8);
- diff --git a/arch/x86/lib/delay.c b/arch/x86/lib/delay.c
- index 29df077cb089..cf2ac227c2ac 100644
- --- a/arch/x86/lib/delay.c
- +++ b/arch/x86/lib/delay.c
- @@ -106,10 +106,10 @@ static void delay_mwaitx(unsigned long __loops)
- delay = min_t(u64, MWAITX_MAX_LOOPS, loops);
-
- /*
- - * Use cpu_tss as a cacheline-aligned, seldomly
- + * Use cpu_tss_rw as a cacheline-aligned, seldomly
- * accessed per-cpu variable as the monitor target.
- */
- - __monitorx(raw_cpu_ptr(&cpu_tss), 0, 0);
- + __monitorx(raw_cpu_ptr(&cpu_tss_rw), 0, 0);
-
- /*
- * AMD, like Intel, supports the EAX hint and EAX=0xf
- diff --git a/arch/x86/xen/enlighten_pv.c b/arch/x86/xen/enlighten_pv.c
- index 63c81154083b..3b76cf85e306 100644
- --- a/arch/x86/xen/enlighten_pv.c
- +++ b/arch/x86/xen/enlighten_pv.c
- @@ -817,7 +817,7 @@ static void xen_load_sp0(unsigned long sp0)
- mcs = xen_mc_entry(0);
- MULTI_stack_switch(mcs.mc, __KERNEL_DS, sp0);
- xen_mc_issue(PARAVIRT_LAZY_CPU);
- - this_cpu_write(cpu_tss.x86_tss.sp0, sp0);
- + this_cpu_write(cpu_tss_rw.x86_tss.sp0, sp0);
- }
-
- void xen_set_iopl_mask(unsigned mask)
- diff --git a/arch/x86/entry/entry_32.S b/arch/x86/entry/entry_32.S
- index 04abcd3f8e2d..3ef7800007f8 100644
- --- a/arch/x86/entry/entry_32.S
- +++ b/arch/x86/entry/entry_32.S
- @@ -949,7 +949,7 @@ ENTRY(debug)
-
- /* Are we currently on the SYSENTER stack? */
- movl PER_CPU_VAR(cpu_entry_area), %ecx
- - addl $CPU_ENTRY_AREA_tss + TSS_STRUCT_SYSENTER_stack + SIZEOF_SYSENTER_stack, %ecx
- + addl $CPU_ENTRY_AREA_SYSENTER_stack + SIZEOF_SYSENTER_stack, %ecx
- subl %eax, %ecx /* ecx = (end of SYSENTER_stack) - esp */
- cmpl $SIZEOF_SYSENTER_stack, %ecx
- jb .Ldebug_from_sysenter_stack
- @@ -993,7 +993,7 @@ ENTRY(nmi)
-
- /* Are we currently on the SYSENTER stack? */
- movl PER_CPU_VAR(cpu_entry_area), %ecx
- - addl $CPU_ENTRY_AREA_tss + TSS_STRUCT_SYSENTER_stack + SIZEOF_SYSENTER_stack, %ecx
- + addl $CPU_ENTRY_AREA_SYSENTER_stack + SIZEOF_SYSENTER_stack, %ecx
- subl %eax, %ecx /* ecx = (end of SYSENTER_stack) - esp */
- cmpl $SIZEOF_SYSENTER_stack, %ecx
- jb .Lnmi_from_sysenter_stack
- diff --git a/arch/x86/entry/entry_64.S b/arch/x86/entry/entry_64.S
- index 7a5e9edcdaf4..157860b3569f 100644
- --- a/arch/x86/entry/entry_64.S
- +++ b/arch/x86/entry/entry_64.S
- @@ -153,7 +153,7 @@ END(native_usergs_sysret64)
- _entry_trampoline - CPU_ENTRY_AREA_entry_trampoline(%rip)
-
- /* The top word of the SYSENTER stack is hot and is usable as scratch space. */
- -#define RSP_SCRATCH CPU_ENTRY_AREA_tss + TSS_STRUCT_SYSENTER_stack + \
- +#define RSP_SCRATCH CPU_ENTRY_AREA_SYSENTER_stack + \
- SIZEOF_SYSENTER_stack - 8 + CPU_ENTRY_AREA
-
- ENTRY(entry_SYSCALL_64_trampoline)
- @@ -389,7 +389,7 @@ syscall_return_via_sysret:
- * Save old stack pointer and switch to trampoline stack.
- */
- movq %rsp, %rdi
- - movq PER_CPU_VAR(cpu_tss + TSS_sp0), %rsp
- + movq PER_CPU_VAR(cpu_tss_rw + TSS_sp0), %rsp
-
- pushq RSP-RDI(%rdi) /* RSP */
- pushq (%rdi) /* RDI */
- @@ -718,7 +718,7 @@ GLOBAL(swapgs_restore_regs_and_return_to_usermode)
- * Save old stack pointer and switch to trampoline stack.
- */
- movq %rsp, %rdi
- - movq PER_CPU_VAR(cpu_tss + TSS_sp0), %rsp
- + movq PER_CPU_VAR(cpu_tss_rw + TSS_sp0), %rsp
-
- /* Copy the IRET frame to the trampoline stack. */
- pushq 6*8(%rdi) /* SS */
- @@ -946,7 +946,7 @@ apicinterrupt IRQ_WORK_VECTOR irq_work_interrupt smp_irq_work_interrupt
- /*
- * Exception entry points.
- */
- -#define CPU_TSS_IST(x) PER_CPU_VAR(cpu_tss) + (TSS_ist + ((x) - 1) * 8)
- +#define CPU_TSS_IST(x) PER_CPU_VAR(cpu_tss_rw) + (TSS_ist + ((x) - 1) * 8)
-
- /*
- * Switch to the thread stack. This is called with the IRET frame and
- --
- 2.14.2
|