0159-x86-entry-64-Make-cpu_entry_area.tss-read-only.patch 18 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492
  1. From a4da7aed31f0355b881bdeeb3d269a20759f16a8 Mon Sep 17 00:00:00 2001
  2. From: Andy Lutomirski <[email protected]>
  3. Date: Mon, 4 Dec 2017 15:07:29 +0100
  4. Subject: [PATCH 159/241] x86/entry/64: Make cpu_entry_area.tss read-only
  5. MIME-Version: 1.0
  6. Content-Type: text/plain; charset=UTF-8
  7. Content-Transfer-Encoding: 8bit
  8. CVE-2017-5754
  9. The TSS is a fairly juicy target for exploits, and, now that the TSS
  10. is in the cpu_entry_area, it's no longer protected by kASLR. Make it
  11. read-only on x86_64.
  12. On x86_32, it can't be RO because it's written by the CPU during task
  13. switches, and we use a task gate for double faults. I'd also be
  14. nervous about errata if we tried to make it RO even on configurations
  15. without double fault handling.
  16. [ tglx: AMD confirmed that there is no problem on 64-bit with TSS RO. So
  17. it's probably safe to assume that it's a non issue, though Intel
  18. might have been creative in that area. Still waiting for
  19. confirmation. ]
  20. Signed-off-by: Andy Lutomirski <[email protected]>
  21. Signed-off-by: Thomas Gleixner <[email protected]>
  22. Reviewed-by: Borislav Petkov <[email protected]>
  23. Cc: Boris Ostrovsky <[email protected]>
  24. Cc: Borislav Petkov <[email protected]>
  25. Cc: Brian Gerst <[email protected]>
  26. Cc: Dave Hansen <[email protected]>
  27. Cc: Dave Hansen <[email protected]>
  28. Cc: David Laight <[email protected]>
  29. Cc: Denys Vlasenko <[email protected]>
  30. Cc: Eduardo Valentin <[email protected]>
  31. Cc: Greg KH <[email protected]>
  32. Cc: H. Peter Anvin <[email protected]>
  33. Cc: Josh Poimboeuf <[email protected]>
  34. Cc: Juergen Gross <[email protected]>
  35. Cc: Kees Cook <[email protected]>
  36. Cc: Linus Torvalds <[email protected]>
  37. Cc: Peter Zijlstra <[email protected]>
  38. Cc: Rik van Riel <[email protected]>
  39. Cc: Will Deacon <[email protected]>
  40. Cc: [email protected]
  41. Cc: [email protected]
  42. Cc: [email protected]
  43. Cc: [email protected]
  44. Link: https://lkml.kernel.org/r/[email protected]
  45. Signed-off-by: Ingo Molnar <[email protected]>
  46. (backported from commit c482feefe1aeb150156248ba0fd3e029bc886605)
  47. Signed-off-by: Andy Whitcroft <[email protected]>
  48. Signed-off-by: Kleber Sacilotto de Souza <[email protected]>
  49. (cherry picked from commit 785be108f90cd62eab2da17490714085ef752538)
  50. Signed-off-by: Fabian Grünbichler <[email protected]>
  51. ---
  52. arch/x86/include/asm/fixmap.h | 13 +++++++++----
  53. arch/x86/include/asm/processor.h | 17 ++++++++---------
  54. arch/x86/include/asm/switch_to.h | 4 ++--
  55. arch/x86/include/asm/thread_info.h | 2 +-
  56. arch/x86/kernel/asm-offsets.c | 5 ++---
  57. arch/x86/kernel/asm-offsets_32.c | 4 ++--
  58. arch/x86/kernel/cpu/common.c | 29 +++++++++++++++++++----------
  59. arch/x86/kernel/ioport.c | 2 +-
  60. arch/x86/kernel/process.c | 6 +++---
  61. arch/x86/kernel/process_32.c | 2 +-
  62. arch/x86/kernel/process_64.c | 2 +-
  63. arch/x86/kernel/traps.c | 4 ++--
  64. arch/x86/lib/delay.c | 4 ++--
  65. arch/x86/xen/enlighten_pv.c | 2 +-
  66. arch/x86/entry/entry_32.S | 4 ++--
  67. arch/x86/entry/entry_64.S | 8 ++++----
  68. 16 files changed, 60 insertions(+), 48 deletions(-)
  69. diff --git a/arch/x86/include/asm/fixmap.h b/arch/x86/include/asm/fixmap.h
  70. index 56aaffbbffd6..5dc269ff4085 100644
  71. --- a/arch/x86/include/asm/fixmap.h
  72. +++ b/arch/x86/include/asm/fixmap.h
  73. @@ -56,9 +56,14 @@ struct cpu_entry_area {
  74. char gdt[PAGE_SIZE];
  75. /*
  76. - * The GDT is just below cpu_tss and thus serves (on x86_64) as a
  77. - * a read-only guard page for the SYSENTER stack at the bottom
  78. - * of the TSS region.
  79. + * The GDT is just below SYSENTER_stack and thus serves (on x86_64) as
  80. + * a a read-only guard page.
  81. + */
  82. + struct SYSENTER_stack_page SYSENTER_stack_page;
  83. +
  84. + /*
  85. + * On x86_64, the TSS is mapped RO. On x86_32, it's mapped RW because
  86. + * we need task switches to work, and task switches write to the TSS.
  87. */
  88. struct tss_struct tss;
  89. @@ -227,7 +232,7 @@ static inline struct cpu_entry_area *get_cpu_entry_area(int cpu)
  90. static inline struct SYSENTER_stack *cpu_SYSENTER_stack(int cpu)
  91. {
  92. - return &get_cpu_entry_area(cpu)->tss.SYSENTER_stack;
  93. + return &get_cpu_entry_area(cpu)->SYSENTER_stack_page.stack;
  94. }
  95. #endif /* !__ASSEMBLY__ */
  96. diff --git a/arch/x86/include/asm/processor.h b/arch/x86/include/asm/processor.h
  97. index 2d489a414a86..bccec7ed1676 100644
  98. --- a/arch/x86/include/asm/processor.h
  99. +++ b/arch/x86/include/asm/processor.h
  100. @@ -334,13 +334,11 @@ struct SYSENTER_stack {
  101. unsigned long words[64];
  102. };
  103. -struct tss_struct {
  104. - /*
  105. - * Space for the temporary SYSENTER stack, used for SYSENTER
  106. - * and the entry trampoline as well.
  107. - */
  108. - struct SYSENTER_stack SYSENTER_stack;
  109. +struct SYSENTER_stack_page {
  110. + struct SYSENTER_stack stack;
  111. +} __aligned(PAGE_SIZE);
  112. +struct tss_struct {
  113. /*
  114. * The fixed hardware portion. This must not cross a page boundary
  115. * at risk of violating the SDM's advice and potentially triggering
  116. @@ -357,7 +355,7 @@ struct tss_struct {
  117. unsigned long io_bitmap[IO_BITMAP_LONGS + 1];
  118. } __aligned(PAGE_SIZE);
  119. -DECLARE_PER_CPU_PAGE_ALIGNED(struct tss_struct, cpu_tss);
  120. +DECLARE_PER_CPU_PAGE_ALIGNED(struct tss_struct, cpu_tss_rw);
  121. /*
  122. * sizeof(unsigned long) coming from an extra "long" at the end
  123. @@ -372,7 +370,8 @@ DECLARE_PER_CPU_PAGE_ALIGNED(struct tss_struct, cpu_tss);
  124. #ifdef CONFIG_X86_32
  125. DECLARE_PER_CPU(unsigned long, cpu_current_top_of_stack);
  126. #else
  127. -#define cpu_current_top_of_stack cpu_tss.x86_tss.sp1
  128. +/* The RO copy can't be accessed with this_cpu_xyz(), so use the RW copy. */
  129. +#define cpu_current_top_of_stack cpu_tss_rw.x86_tss.sp1
  130. #endif
  131. /*
  132. @@ -532,7 +531,7 @@ static inline void native_set_iopl_mask(unsigned mask)
  133. static inline void
  134. native_load_sp0(unsigned long sp0)
  135. {
  136. - this_cpu_write(cpu_tss.x86_tss.sp0, sp0);
  137. + this_cpu_write(cpu_tss_rw.x86_tss.sp0, sp0);
  138. }
  139. static inline void native_swapgs(void)
  140. diff --git a/arch/x86/include/asm/switch_to.h b/arch/x86/include/asm/switch_to.h
  141. index ca2fc84ad278..cfb6dfe4c457 100644
  142. --- a/arch/x86/include/asm/switch_to.h
  143. +++ b/arch/x86/include/asm/switch_to.h
  144. @@ -78,10 +78,10 @@ do { \
  145. static inline void refresh_sysenter_cs(struct thread_struct *thread)
  146. {
  147. /* Only happens when SEP is enabled, no need to test "SEP"arately: */
  148. - if (unlikely(this_cpu_read(cpu_tss.x86_tss.ss1) == thread->sysenter_cs))
  149. + if (unlikely(this_cpu_read(cpu_tss_rw.x86_tss.ss1) == thread->sysenter_cs))
  150. return;
  151. - this_cpu_write(cpu_tss.x86_tss.ss1, thread->sysenter_cs);
  152. + this_cpu_write(cpu_tss_rw.x86_tss.ss1, thread->sysenter_cs);
  153. wrmsr(MSR_IA32_SYSENTER_CS, thread->sysenter_cs, 0);
  154. }
  155. #endif
  156. diff --git a/arch/x86/include/asm/thread_info.h b/arch/x86/include/asm/thread_info.h
  157. index 760dd8a73927..6275b391ac61 100644
  158. --- a/arch/x86/include/asm/thread_info.h
  159. +++ b/arch/x86/include/asm/thread_info.h
  160. @@ -214,7 +214,7 @@ static inline int arch_within_stack_frames(const void * const stack,
  161. #else /* !__ASSEMBLY__ */
  162. #ifdef CONFIG_X86_64
  163. -# define cpu_current_top_of_stack (cpu_tss + TSS_sp1)
  164. +# define cpu_current_top_of_stack (cpu_tss_rw + TSS_sp1)
  165. #endif
  166. #endif
  167. diff --git a/arch/x86/kernel/asm-offsets.c b/arch/x86/kernel/asm-offsets.c
  168. index 00ea20bfa857..40c3fab107ac 100644
  169. --- a/arch/x86/kernel/asm-offsets.c
  170. +++ b/arch/x86/kernel/asm-offsets.c
  171. @@ -93,10 +93,9 @@ void common(void) {
  172. BLANK();
  173. DEFINE(PTREGS_SIZE, sizeof(struct pt_regs));
  174. - OFFSET(TSS_STRUCT_SYSENTER_stack, tss_struct, SYSENTER_stack);
  175. - DEFINE(SIZEOF_SYSENTER_stack, sizeof(struct SYSENTER_stack));
  176. -
  177. /* Layout info for cpu_entry_area */
  178. OFFSET(CPU_ENTRY_AREA_tss, cpu_entry_area, tss);
  179. OFFSET(CPU_ENTRY_AREA_entry_trampoline, cpu_entry_area, entry_trampoline);
  180. + OFFSET(CPU_ENTRY_AREA_SYSENTER_stack, cpu_entry_area, SYSENTER_stack_page);
  181. + DEFINE(SIZEOF_SYSENTER_stack, sizeof(struct SYSENTER_stack));
  182. }
  183. diff --git a/arch/x86/kernel/asm-offsets_32.c b/arch/x86/kernel/asm-offsets_32.c
  184. index d09b161a3bd0..c4f23da7a0f0 100644
  185. --- a/arch/x86/kernel/asm-offsets_32.c
  186. +++ b/arch/x86/kernel/asm-offsets_32.c
  187. @@ -49,8 +49,8 @@ void foo(void)
  188. BLANK();
  189. /* Offset from the sysenter stack to tss.sp0 */
  190. - DEFINE(TSS_sysenter_sp0, offsetof(struct tss_struct, x86_tss.sp0) -
  191. - offsetofend(struct tss_struct, SYSENTER_stack));
  192. + DEFINE(TSS_sysenter_sp0, offsetof(struct cpu_entry_area, tss.x86_tss.sp0) -
  193. + offsetofend(struct cpu_entry_area, SYSENTER_stack_page.stack));
  194. #ifdef CONFIG_CC_STACKPROTECTOR
  195. BLANK();
  196. diff --git a/arch/x86/kernel/cpu/common.c b/arch/x86/kernel/cpu/common.c
  197. index f9541c48c290..7992e5a8076c 100644
  198. --- a/arch/x86/kernel/cpu/common.c
  199. +++ b/arch/x86/kernel/cpu/common.c
  200. @@ -487,6 +487,9 @@ static DEFINE_PER_CPU_PAGE_ALIGNED(char, exception_stacks
  201. [(N_EXCEPTION_STACKS - 1) * EXCEPTION_STKSZ + DEBUG_STKSZ]);
  202. #endif
  203. +static DEFINE_PER_CPU_PAGE_ALIGNED(struct SYSENTER_stack_page,
  204. + SYSENTER_stack_storage);
  205. +
  206. static void __init
  207. set_percpu_fixmap_pages(int idx, void *ptr, int pages, pgprot_t prot)
  208. {
  209. @@ -500,23 +503,29 @@ static void __init setup_cpu_entry_area(int cpu)
  210. #ifdef CONFIG_X86_64
  211. extern char _entry_trampoline[];
  212. - /* On 64-bit systems, we use a read-only fixmap GDT. */
  213. + /* On 64-bit systems, we use a read-only fixmap GDT and TSS. */
  214. pgprot_t gdt_prot = PAGE_KERNEL_RO;
  215. + pgprot_t tss_prot = PAGE_KERNEL_RO;
  216. #else
  217. /*
  218. * On native 32-bit systems, the GDT cannot be read-only because
  219. * our double fault handler uses a task gate, and entering through
  220. - * a task gate needs to change an available TSS to busy. If the GDT
  221. - * is read-only, that will triple fault.
  222. + * a task gate needs to change an available TSS to busy. If the
  223. + * GDT is read-only, that will triple fault. The TSS cannot be
  224. + * read-only because the CPU writes to it on task switches.
  225. *
  226. - * On Xen PV, the GDT must be read-only because the hypervisor requires
  227. - * it.
  228. + * On Xen PV, the GDT must be read-only because the hypervisor
  229. + * requires it.
  230. */
  231. pgprot_t gdt_prot = boot_cpu_has(X86_FEATURE_XENPV) ?
  232. PAGE_KERNEL_RO : PAGE_KERNEL;
  233. + pgprot_t tss_prot = PAGE_KERNEL;
  234. #endif
  235. __set_fixmap(get_cpu_entry_area_index(cpu, gdt), get_cpu_gdt_paddr(cpu), gdt_prot);
  236. + set_percpu_fixmap_pages(get_cpu_entry_area_index(cpu, SYSENTER_stack_page),
  237. + per_cpu_ptr(&SYSENTER_stack_storage, cpu), 1,
  238. + PAGE_KERNEL);
  239. /*
  240. * The Intel SDM says (Volume 3, 7.2.1):
  241. @@ -539,9 +548,9 @@ static void __init setup_cpu_entry_area(int cpu)
  242. offsetofend(struct tss_struct, x86_tss)) & PAGE_MASK);
  243. BUILD_BUG_ON(sizeof(struct tss_struct) % PAGE_SIZE != 0);
  244. set_percpu_fixmap_pages(get_cpu_entry_area_index(cpu, tss),
  245. - &per_cpu(cpu_tss, cpu),
  246. + &per_cpu(cpu_tss_rw, cpu),
  247. sizeof(struct tss_struct) / PAGE_SIZE,
  248. - PAGE_KERNEL);
  249. + tss_prot);
  250. #ifdef CONFIG_X86_32
  251. per_cpu(cpu_entry_area, cpu) = get_cpu_entry_area(cpu);
  252. @@ -1297,7 +1306,7 @@ void enable_sep_cpu(void)
  253. return;
  254. cpu = get_cpu();
  255. - tss = &per_cpu(cpu_tss, cpu);
  256. + tss = &per_cpu(cpu_tss_rw, cpu);
  257. /*
  258. * We cache MSR_IA32_SYSENTER_CS's value in the TSS's ss1 field --
  259. @@ -1576,7 +1585,7 @@ void cpu_init(void)
  260. if (cpu)
  261. load_ucode_ap();
  262. - t = &per_cpu(cpu_tss, cpu);
  263. + t = &per_cpu(cpu_tss_rw, cpu);
  264. oist = &per_cpu(orig_ist, cpu);
  265. #ifdef CONFIG_NUMA
  266. @@ -1667,7 +1676,7 @@ void cpu_init(void)
  267. {
  268. int cpu = smp_processor_id();
  269. struct task_struct *curr = current;
  270. - struct tss_struct *t = &per_cpu(cpu_tss, cpu);
  271. + struct tss_struct *t = &per_cpu(cpu_tss_rw, cpu);
  272. wait_for_master_cpu(cpu);
  273. diff --git a/arch/x86/kernel/ioport.c b/arch/x86/kernel/ioport.c
  274. index 4a613fed94b6..d13777d49d8b 100644
  275. --- a/arch/x86/kernel/ioport.c
  276. +++ b/arch/x86/kernel/ioport.c
  277. @@ -66,7 +66,7 @@ asmlinkage long sys_ioperm(unsigned long from, unsigned long num, int turn_on)
  278. * because the ->io_bitmap_max value must match the bitmap
  279. * contents:
  280. */
  281. - tss = &per_cpu(cpu_tss, get_cpu());
  282. + tss = &per_cpu(cpu_tss_rw, get_cpu());
  283. if (turn_on)
  284. bitmap_clear(t->io_bitmap_ptr, from, num);
  285. diff --git a/arch/x86/kernel/process.c b/arch/x86/kernel/process.c
  286. index ec758390d24e..3688a7b9d055 100644
  287. --- a/arch/x86/kernel/process.c
  288. +++ b/arch/x86/kernel/process.c
  289. @@ -46,7 +46,7 @@
  290. * section. Since TSS's are completely CPU-local, we want them
  291. * on exact cacheline boundaries, to eliminate cacheline ping-pong.
  292. */
  293. -__visible DEFINE_PER_CPU_SHARED_ALIGNED(struct tss_struct, cpu_tss) = {
  294. +__visible DEFINE_PER_CPU_SHARED_ALIGNED(struct tss_struct, cpu_tss_rw) = {
  295. .x86_tss = {
  296. /*
  297. * .sp0 is only used when entering ring 0 from a lower
  298. @@ -81,7 +81,7 @@ __visible DEFINE_PER_CPU_SHARED_ALIGNED(struct tss_struct, cpu_tss) = {
  299. .io_bitmap = { [0 ... IO_BITMAP_LONGS] = ~0 },
  300. #endif
  301. };
  302. -EXPORT_PER_CPU_SYMBOL(cpu_tss);
  303. +EXPORT_PER_CPU_SYMBOL(cpu_tss_rw);
  304. DEFINE_PER_CPU(bool, __tss_limit_invalid);
  305. EXPORT_PER_CPU_SYMBOL_GPL(__tss_limit_invalid);
  306. @@ -110,7 +110,7 @@ void exit_thread(struct task_struct *tsk)
  307. struct fpu *fpu = &t->fpu;
  308. if (bp) {
  309. - struct tss_struct *tss = &per_cpu(cpu_tss, get_cpu());
  310. + struct tss_struct *tss = &per_cpu(cpu_tss_rw, get_cpu());
  311. t->io_bitmap_ptr = NULL;
  312. clear_thread_flag(TIF_IO_BITMAP);
  313. diff --git a/arch/x86/kernel/process_32.c b/arch/x86/kernel/process_32.c
  314. index c0d60420466c..784ff9147172 100644
  315. --- a/arch/x86/kernel/process_32.c
  316. +++ b/arch/x86/kernel/process_32.c
  317. @@ -234,7 +234,7 @@ __switch_to(struct task_struct *prev_p, struct task_struct *next_p)
  318. struct fpu *prev_fpu = &prev->fpu;
  319. struct fpu *next_fpu = &next->fpu;
  320. int cpu = smp_processor_id();
  321. - struct tss_struct *tss = &per_cpu(cpu_tss, cpu);
  322. + struct tss_struct *tss = &per_cpu(cpu_tss_rw, cpu);
  323. /* never put a printk in __switch_to... printk() calls wake_up*() indirectly */
  324. diff --git a/arch/x86/kernel/process_64.c b/arch/x86/kernel/process_64.c
  325. index 157f81816915..c75466232016 100644
  326. --- a/arch/x86/kernel/process_64.c
  327. +++ b/arch/x86/kernel/process_64.c
  328. @@ -399,7 +399,7 @@ __switch_to(struct task_struct *prev_p, struct task_struct *next_p)
  329. struct fpu *prev_fpu = &prev->fpu;
  330. struct fpu *next_fpu = &next->fpu;
  331. int cpu = smp_processor_id();
  332. - struct tss_struct *tss = &per_cpu(cpu_tss, cpu);
  333. + struct tss_struct *tss = &per_cpu(cpu_tss_rw, cpu);
  334. WARN_ON_ONCE(IS_ENABLED(CONFIG_DEBUG_ENTRY) &&
  335. this_cpu_read(irq_count) != -1);
  336. diff --git a/arch/x86/kernel/traps.c b/arch/x86/kernel/traps.c
  337. index 2818c83892b3..14b462eefa17 100644
  338. --- a/arch/x86/kernel/traps.c
  339. +++ b/arch/x86/kernel/traps.c
  340. @@ -376,7 +376,7 @@ dotraplinkage void do_double_fault(struct pt_regs *regs, long error_code)
  341. regs->cs == __KERNEL_CS &&
  342. regs->ip == (unsigned long)native_irq_return_iret)
  343. {
  344. - struct pt_regs *gpregs = (struct pt_regs *)this_cpu_read(cpu_tss.x86_tss.sp0) - 1;
  345. + struct pt_regs *gpregs = (struct pt_regs *)this_cpu_read(cpu_tss_rw.x86_tss.sp0) - 1;
  346. /*
  347. * regs->sp points to the failing IRET frame on the
  348. @@ -661,7 +661,7 @@ struct bad_iret_stack *fixup_bad_iret(struct bad_iret_stack *s)
  349. * exception came from the IRET target.
  350. */
  351. struct bad_iret_stack *new_stack =
  352. - (struct bad_iret_stack *)this_cpu_read(cpu_tss.x86_tss.sp0) - 1;
  353. + (struct bad_iret_stack *)this_cpu_read(cpu_tss_rw.x86_tss.sp0) - 1;
  354. /* Copy the IRET target to the new stack. */
  355. memmove(&new_stack->regs.ip, (void *)s->regs.sp, 5*8);
  356. diff --git a/arch/x86/lib/delay.c b/arch/x86/lib/delay.c
  357. index 29df077cb089..cf2ac227c2ac 100644
  358. --- a/arch/x86/lib/delay.c
  359. +++ b/arch/x86/lib/delay.c
  360. @@ -106,10 +106,10 @@ static void delay_mwaitx(unsigned long __loops)
  361. delay = min_t(u64, MWAITX_MAX_LOOPS, loops);
  362. /*
  363. - * Use cpu_tss as a cacheline-aligned, seldomly
  364. + * Use cpu_tss_rw as a cacheline-aligned, seldomly
  365. * accessed per-cpu variable as the monitor target.
  366. */
  367. - __monitorx(raw_cpu_ptr(&cpu_tss), 0, 0);
  368. + __monitorx(raw_cpu_ptr(&cpu_tss_rw), 0, 0);
  369. /*
  370. * AMD, like Intel, supports the EAX hint and EAX=0xf
  371. diff --git a/arch/x86/xen/enlighten_pv.c b/arch/x86/xen/enlighten_pv.c
  372. index 63c81154083b..3b76cf85e306 100644
  373. --- a/arch/x86/xen/enlighten_pv.c
  374. +++ b/arch/x86/xen/enlighten_pv.c
  375. @@ -817,7 +817,7 @@ static void xen_load_sp0(unsigned long sp0)
  376. mcs = xen_mc_entry(0);
  377. MULTI_stack_switch(mcs.mc, __KERNEL_DS, sp0);
  378. xen_mc_issue(PARAVIRT_LAZY_CPU);
  379. - this_cpu_write(cpu_tss.x86_tss.sp0, sp0);
  380. + this_cpu_write(cpu_tss_rw.x86_tss.sp0, sp0);
  381. }
  382. void xen_set_iopl_mask(unsigned mask)
  383. diff --git a/arch/x86/entry/entry_32.S b/arch/x86/entry/entry_32.S
  384. index 04abcd3f8e2d..3ef7800007f8 100644
  385. --- a/arch/x86/entry/entry_32.S
  386. +++ b/arch/x86/entry/entry_32.S
  387. @@ -949,7 +949,7 @@ ENTRY(debug)
  388. /* Are we currently on the SYSENTER stack? */
  389. movl PER_CPU_VAR(cpu_entry_area), %ecx
  390. - addl $CPU_ENTRY_AREA_tss + TSS_STRUCT_SYSENTER_stack + SIZEOF_SYSENTER_stack, %ecx
  391. + addl $CPU_ENTRY_AREA_SYSENTER_stack + SIZEOF_SYSENTER_stack, %ecx
  392. subl %eax, %ecx /* ecx = (end of SYSENTER_stack) - esp */
  393. cmpl $SIZEOF_SYSENTER_stack, %ecx
  394. jb .Ldebug_from_sysenter_stack
  395. @@ -993,7 +993,7 @@ ENTRY(nmi)
  396. /* Are we currently on the SYSENTER stack? */
  397. movl PER_CPU_VAR(cpu_entry_area), %ecx
  398. - addl $CPU_ENTRY_AREA_tss + TSS_STRUCT_SYSENTER_stack + SIZEOF_SYSENTER_stack, %ecx
  399. + addl $CPU_ENTRY_AREA_SYSENTER_stack + SIZEOF_SYSENTER_stack, %ecx
  400. subl %eax, %ecx /* ecx = (end of SYSENTER_stack) - esp */
  401. cmpl $SIZEOF_SYSENTER_stack, %ecx
  402. jb .Lnmi_from_sysenter_stack
  403. diff --git a/arch/x86/entry/entry_64.S b/arch/x86/entry/entry_64.S
  404. index 7a5e9edcdaf4..157860b3569f 100644
  405. --- a/arch/x86/entry/entry_64.S
  406. +++ b/arch/x86/entry/entry_64.S
  407. @@ -153,7 +153,7 @@ END(native_usergs_sysret64)
  408. _entry_trampoline - CPU_ENTRY_AREA_entry_trampoline(%rip)
  409. /* The top word of the SYSENTER stack is hot and is usable as scratch space. */
  410. -#define RSP_SCRATCH CPU_ENTRY_AREA_tss + TSS_STRUCT_SYSENTER_stack + \
  411. +#define RSP_SCRATCH CPU_ENTRY_AREA_SYSENTER_stack + \
  412. SIZEOF_SYSENTER_stack - 8 + CPU_ENTRY_AREA
  413. ENTRY(entry_SYSCALL_64_trampoline)
  414. @@ -389,7 +389,7 @@ syscall_return_via_sysret:
  415. * Save old stack pointer and switch to trampoline stack.
  416. */
  417. movq %rsp, %rdi
  418. - movq PER_CPU_VAR(cpu_tss + TSS_sp0), %rsp
  419. + movq PER_CPU_VAR(cpu_tss_rw + TSS_sp0), %rsp
  420. pushq RSP-RDI(%rdi) /* RSP */
  421. pushq (%rdi) /* RDI */
  422. @@ -718,7 +718,7 @@ GLOBAL(swapgs_restore_regs_and_return_to_usermode)
  423. * Save old stack pointer and switch to trampoline stack.
  424. */
  425. movq %rsp, %rdi
  426. - movq PER_CPU_VAR(cpu_tss + TSS_sp0), %rsp
  427. + movq PER_CPU_VAR(cpu_tss_rw + TSS_sp0), %rsp
  428. /* Copy the IRET frame to the trampoline stack. */
  429. pushq 6*8(%rdi) /* SS */
  430. @@ -946,7 +946,7 @@ apicinterrupt IRQ_WORK_VECTOR irq_work_interrupt smp_irq_work_interrupt
  431. /*
  432. * Exception entry points.
  433. */
  434. -#define CPU_TSS_IST(x) PER_CPU_VAR(cpu_tss) + (TSS_ist + ((x) - 1) * 8)
  435. +#define CPU_TSS_IST(x) PER_CPU_VAR(cpu_tss_rw) + (TSS_ist + ((x) - 1) * 8)
  436. /*
  437. * Switch to the thread stack. This is called with the IRET frame and
  438. --
  439. 2.14.2