0151-x86-entry-64-Separate-cpu_current_top_of_stack-from-.patch 5.5 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162
  1. From c2cd64d7bc24a46e3192246a97b30ca5a9692d42 Mon Sep 17 00:00:00 2001
  2. From: Andy Lutomirski <[email protected]>
  3. Date: Mon, 4 Dec 2017 15:07:21 +0100
  4. Subject: [PATCH 151/232] x86/entry/64: Separate cpu_current_top_of_stack from
  5. TSS.sp0
  6. MIME-Version: 1.0
  7. Content-Type: text/plain; charset=UTF-8
  8. Content-Transfer-Encoding: 8bit
  9. CVE-2017-5754
  10. On 64-bit kernels, we used to assume that TSS.sp0 was the current
  11. top of stack. With the addition of an entry trampoline, this will
  12. no longer be the case. Store the current top of stack in TSS.sp1,
  13. which is otherwise unused but shares the same cacheline.
  14. Signed-off-by: Andy Lutomirski <[email protected]>
  15. Signed-off-by: Thomas Gleixner <[email protected]>
  16. Reviewed-by: Thomas Gleixner <[email protected]>
  17. Reviewed-by: Borislav Petkov <[email protected]>
  18. Cc: Boris Ostrovsky <[email protected]>
  19. Cc: Borislav Petkov <[email protected]>
  20. Cc: Borislav Petkov <[email protected]>
  21. Cc: Brian Gerst <[email protected]>
  22. Cc: Dave Hansen <[email protected]>
  23. Cc: Dave Hansen <[email protected]>
  24. Cc: David Laight <[email protected]>
  25. Cc: Denys Vlasenko <[email protected]>
  26. Cc: Eduardo Valentin <[email protected]>
  27. Cc: Greg KH <[email protected]>
  28. Cc: H. Peter Anvin <[email protected]>
  29. Cc: Josh Poimboeuf <[email protected]>
  30. Cc: Juergen Gross <[email protected]>
  31. Cc: Linus Torvalds <[email protected]>
  32. Cc: Peter Zijlstra <[email protected]>
  33. Cc: Rik van Riel <[email protected]>
  34. Cc: Will Deacon <[email protected]>
  35. Cc: [email protected]
  36. Cc: [email protected]
  37. Cc: [email protected]
  38. Cc: [email protected]
  39. Link: https://lkml.kernel.org/r/[email protected]
  40. Signed-off-by: Ingo Molnar <[email protected]>
  41. (cherry picked from commit 9aaefe7b59ae00605256a7d6bd1c1456432495fc)
  42. Signed-off-by: Andy Whitcroft <[email protected]>
  43. Signed-off-by: Kleber Sacilotto de Souza <[email protected]>
  44. (cherry picked from commit 281be4ff07f7c67dc2a9c75ab24a7b9ff25544ae)
  45. Signed-off-by: Fabian Grünbichler <[email protected]>
  46. ---
  47. arch/x86/include/asm/processor.h | 18 +++++++++++++-----
  48. arch/x86/include/asm/thread_info.h | 2 +-
  49. arch/x86/kernel/asm-offsets_64.c | 1 +
  50. arch/x86/kernel/process.c | 10 ++++++++++
  51. arch/x86/kernel/process_64.c | 1 +
  52. 5 files changed, 26 insertions(+), 6 deletions(-)
  53. diff --git a/arch/x86/include/asm/processor.h b/arch/x86/include/asm/processor.h
  54. index 55885465c3a7..1bfe4bad797a 100644
  55. --- a/arch/x86/include/asm/processor.h
  56. +++ b/arch/x86/include/asm/processor.h
  57. @@ -303,7 +303,13 @@ struct x86_hw_tss {
  58. struct x86_hw_tss {
  59. u32 reserved1;
  60. u64 sp0;
  61. +
  62. + /*
  63. + * We store cpu_current_top_of_stack in sp1 so it's always accessible.
  64. + * Linux does not use ring 1, so sp1 is not otherwise needed.
  65. + */
  66. u64 sp1;
  67. +
  68. u64 sp2;
  69. u64 reserved2;
  70. u64 ist[7];
  71. @@ -362,6 +368,8 @@ DECLARE_PER_CPU_PAGE_ALIGNED(struct tss_struct, cpu_tss);
  72. #ifdef CONFIG_X86_32
  73. DECLARE_PER_CPU(unsigned long, cpu_current_top_of_stack);
  74. +#else
  75. +#define cpu_current_top_of_stack cpu_tss.x86_tss.sp1
  76. #endif
  77. /*
  78. @@ -533,12 +541,12 @@ static inline void native_swapgs(void)
  79. static inline unsigned long current_top_of_stack(void)
  80. {
  81. -#ifdef CONFIG_X86_64
  82. - return this_cpu_read_stable(cpu_tss.x86_tss.sp0);
  83. -#else
  84. - /* sp0 on x86_32 is special in and around vm86 mode. */
  85. + /*
  86. + * We can't read directly from tss.sp0: sp0 on x86_32 is special in
  87. + * and around vm86 mode and sp0 on x86_64 is special because of the
  88. + * entry trampoline.
  89. + */
  90. return this_cpu_read_stable(cpu_current_top_of_stack);
  91. -#endif
  92. }
  93. static inline bool on_thread_stack(void)
  94. diff --git a/arch/x86/include/asm/thread_info.h b/arch/x86/include/asm/thread_info.h
  95. index ec8ef3bbb7dc..760dd8a73927 100644
  96. --- a/arch/x86/include/asm/thread_info.h
  97. +++ b/arch/x86/include/asm/thread_info.h
  98. @@ -214,7 +214,7 @@ static inline int arch_within_stack_frames(const void * const stack,
  99. #else /* !__ASSEMBLY__ */
  100. #ifdef CONFIG_X86_64
  101. -# define cpu_current_top_of_stack (cpu_tss + TSS_sp0)
  102. +# define cpu_current_top_of_stack (cpu_tss + TSS_sp1)
  103. #endif
  104. #endif
  105. diff --git a/arch/x86/kernel/asm-offsets_64.c b/arch/x86/kernel/asm-offsets_64.c
  106. index c21a5315b38e..048f68ff3396 100644
  107. --- a/arch/x86/kernel/asm-offsets_64.c
  108. +++ b/arch/x86/kernel/asm-offsets_64.c
  109. @@ -65,6 +65,7 @@ int main(void)
  110. OFFSET(TSS_ist, tss_struct, x86_tss.ist);
  111. OFFSET(TSS_sp0, tss_struct, x86_tss.sp0);
  112. + OFFSET(TSS_sp1, tss_struct, x86_tss.sp1);
  113. BLANK();
  114. #ifdef CONFIG_CC_STACKPROTECTOR
  115. diff --git a/arch/x86/kernel/process.c b/arch/x86/kernel/process.c
  116. index aa86e810fb54..407fc37a8718 100644
  117. --- a/arch/x86/kernel/process.c
  118. +++ b/arch/x86/kernel/process.c
  119. @@ -55,6 +55,16 @@ __visible DEFINE_PER_CPU_SHARED_ALIGNED(struct tss_struct, cpu_tss) = {
  120. * Poison it.
  121. */
  122. .sp0 = (1UL << (BITS_PER_LONG-1)) + 1,
  123. +
  124. +#ifdef CONFIG_X86_64
  125. + /*
  126. + * .sp1 is cpu_current_top_of_stack. The init task never
  127. + * runs user code, but cpu_current_top_of_stack should still
  128. + * be well defined before the first context switch.
  129. + */
  130. + .sp1 = TOP_OF_INIT_STACK,
  131. +#endif
  132. +
  133. #ifdef CONFIG_X86_32
  134. .ss0 = __KERNEL_DS,
  135. .ss1 = __KERNEL_CS,
  136. diff --git a/arch/x86/kernel/process_64.c b/arch/x86/kernel/process_64.c
  137. index 01b119bebb68..157f81816915 100644
  138. --- a/arch/x86/kernel/process_64.c
  139. +++ b/arch/x86/kernel/process_64.c
  140. @@ -461,6 +461,7 @@ __switch_to(struct task_struct *prev_p, struct task_struct *next_p)
  141. * Switch the PDA and FPU contexts.
  142. */
  143. this_cpu_write(current_task, next_p);
  144. + this_cpu_write(cpu_current_top_of_stack, task_top_of_stack(next_p));
  145. /* Reload sp0. */
  146. update_sp0(next_p);
  147. --
  148. 2.14.2