0156-x86-entry-64-Move-the-IST-stacks-into-struct-cpu_ent.patch 7.6 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235
  1. From 548bfd4d539c4e13eb86236f8f09596e3663c38b Mon Sep 17 00:00:00 2001
  2. From: Andy Lutomirski <[email protected]>
  3. Date: Mon, 4 Dec 2017 15:07:26 +0100
  4. Subject: [PATCH 156/242] x86/entry/64: Move the IST stacks into struct
  5. cpu_entry_area
  6. MIME-Version: 1.0
  7. Content-Type: text/plain; charset=UTF-8
  8. Content-Transfer-Encoding: 8bit
  9. CVE-2017-5754
  10. The IST stacks are needed when an IST exception occurs and are accessed
  11. before any kernel code at all runs. Move them into struct cpu_entry_area.
  12. The IST stacks are unlike the rest of cpu_entry_area: they're used even for
  13. entries from kernel mode. This means that they should be set up before we
  14. load the final IDT. Move cpu_entry_area setup to trap_init() for the boot
  15. CPU and set it up for all possible CPUs at once in native_smp_prepare_cpus().
  16. Signed-off-by: Andy Lutomirski <[email protected]>
  17. Signed-off-by: Thomas Gleixner <[email protected]>
  18. Reviewed-by: Thomas Gleixner <[email protected]>
  19. Reviewed-by: Borislav Petkov <[email protected]>
  20. Cc: Boris Ostrovsky <[email protected]>
  21. Cc: Borislav Petkov <[email protected]>
  22. Cc: Borislav Petkov <[email protected]>
  23. Cc: Brian Gerst <[email protected]>
  24. Cc: Dave Hansen <[email protected]>
  25. Cc: Dave Hansen <[email protected]>
  26. Cc: David Laight <[email protected]>
  27. Cc: Denys Vlasenko <[email protected]>
  28. Cc: Eduardo Valentin <[email protected]>
  29. Cc: Greg KH <[email protected]>
  30. Cc: H. Peter Anvin <[email protected]>
  31. Cc: Josh Poimboeuf <[email protected]>
  32. Cc: Juergen Gross <[email protected]>
  33. Cc: Linus Torvalds <[email protected]>
  34. Cc: Peter Zijlstra <[email protected]>
  35. Cc: Rik van Riel <[email protected]>
  36. Cc: Will Deacon <[email protected]>
  37. Cc: [email protected]
  38. Cc: [email protected]
  39. Cc: [email protected]
  40. Cc: [email protected]
  41. Link: https://lkml.kernel.org/r/[email protected]
  42. Signed-off-by: Ingo Molnar <[email protected]>
  43. (backported from commit 40e7f949e0d9a33968ebde5d67f7e3a47c97742a)
  44. Signed-off-by: Andy Whitcroft <[email protected]>
  45. Signed-off-by: Kleber Sacilotto de Souza <[email protected]>
  46. (cherry picked from commit 88e7277709f2e7c023e66ff9ae158aeff4cf7c8f)
  47. Signed-off-by: Fabian Grünbichler <[email protected]>
  48. ---
  49. arch/x86/include/asm/fixmap.h | 12 +++++++
  50. arch/x86/kernel/cpu/common.c | 74 ++++++++++++++++++++++++-------------------
  51. arch/x86/kernel/traps.c | 3 ++
  52. 3 files changed, 57 insertions(+), 32 deletions(-)
  53. diff --git a/arch/x86/include/asm/fixmap.h b/arch/x86/include/asm/fixmap.h
  54. index 189d12d8afe0..953aed54cb5e 100644
  55. --- a/arch/x86/include/asm/fixmap.h
  56. +++ b/arch/x86/include/asm/fixmap.h
  57. @@ -63,10 +63,22 @@ struct cpu_entry_area {
  58. struct tss_struct tss;
  59. char entry_trampoline[PAGE_SIZE];
  60. +
  61. +#ifdef CONFIG_X86_64
  62. + /*
  63. + * Exception stacks used for IST entries.
  64. + *
  65. + * In the future, this should have a separate slot for each stack
  66. + * with guard pages between them.
  67. + */
  68. + char exception_stacks[(N_EXCEPTION_STACKS - 1) * EXCEPTION_STKSZ + DEBUG_STKSZ];
  69. +#endif
  70. };
  71. #define CPU_ENTRY_AREA_PAGES (sizeof(struct cpu_entry_area) / PAGE_SIZE)
  72. +extern void setup_cpu_entry_areas(void);
  73. +
  74. /*
  75. * Here we define all the compile-time 'special' virtual
  76. * addresses. The point is to have a constant address at
  77. diff --git a/arch/x86/kernel/cpu/common.c b/arch/x86/kernel/cpu/common.c
  78. index c2b2ee73b8a1..f487766855d3 100644
  79. --- a/arch/x86/kernel/cpu/common.c
  80. +++ b/arch/x86/kernel/cpu/common.c
  81. @@ -466,24 +466,36 @@ void load_percpu_segment(int cpu)
  82. load_stack_canary_segment();
  83. }
  84. -static void set_percpu_fixmap_pages(int fixmap_index, void *ptr,
  85. - int pages, pgprot_t prot)
  86. -{
  87. - int i;
  88. -
  89. - for (i = 0; i < pages; i++) {
  90. - __set_fixmap(fixmap_index - i,
  91. - per_cpu_ptr_to_phys(ptr + i * PAGE_SIZE), prot);
  92. - }
  93. -}
  94. -
  95. #ifdef CONFIG_X86_32
  96. /* The 32-bit entry code needs to find cpu_entry_area. */
  97. DEFINE_PER_CPU(struct cpu_entry_area *, cpu_entry_area);
  98. #endif
  99. +#ifdef CONFIG_X86_64
  100. +/*
  101. + * Special IST stacks which the CPU switches to when it calls
  102. + * an IST-marked descriptor entry. Up to 7 stacks (hardware
  103. + * limit), all of them are 4K, except the debug stack which
  104. + * is 8K.
  105. + */
  106. +static const unsigned int exception_stack_sizes[N_EXCEPTION_STACKS] = {
  107. + [0 ... N_EXCEPTION_STACKS - 1] = EXCEPTION_STKSZ,
  108. + [DEBUG_STACK - 1] = DEBUG_STKSZ
  109. +};
  110. +
  111. +static DEFINE_PER_CPU_PAGE_ALIGNED(char, exception_stacks
  112. + [(N_EXCEPTION_STACKS - 1) * EXCEPTION_STKSZ + DEBUG_STKSZ]);
  113. +#endif
  114. +
  115. +static void __init
  116. +set_percpu_fixmap_pages(int idx, void *ptr, int pages, pgprot_t prot)
  117. +{
  118. + for ( ; pages; pages--, idx--, ptr += PAGE_SIZE)
  119. + __set_fixmap(idx, per_cpu_ptr_to_phys(ptr), prot);
  120. +}
  121. +
  122. /* Setup the fixmap mappings only once per-processor */
  123. -static inline void setup_cpu_entry_area(int cpu)
  124. +static void __init setup_cpu_entry_area(int cpu)
  125. {
  126. #ifdef CONFIG_X86_64
  127. extern char _entry_trampoline[];
  128. @@ -532,15 +544,31 @@ static inline void setup_cpu_entry_area(int cpu)
  129. PAGE_KERNEL);
  130. #ifdef CONFIG_X86_32
  131. - this_cpu_write(cpu_entry_area, get_cpu_entry_area(cpu));
  132. + per_cpu(cpu_entry_area, cpu) = get_cpu_entry_area(cpu);
  133. #endif
  134. #ifdef CONFIG_X86_64
  135. + BUILD_BUG_ON(sizeof(exception_stacks) % PAGE_SIZE != 0);
  136. + BUILD_BUG_ON(sizeof(exception_stacks) !=
  137. + sizeof(((struct cpu_entry_area *)0)->exception_stacks));
  138. + set_percpu_fixmap_pages(get_cpu_entry_area_index(cpu, exception_stacks),
  139. + &per_cpu(exception_stacks, cpu),
  140. + sizeof(exception_stacks) / PAGE_SIZE,
  141. + PAGE_KERNEL);
  142. +
  143. __set_fixmap(get_cpu_entry_area_index(cpu, entry_trampoline),
  144. __pa_symbol(_entry_trampoline), PAGE_KERNEL_RX);
  145. #endif
  146. }
  147. +void __init setup_cpu_entry_areas(void)
  148. +{
  149. + unsigned int cpu;
  150. +
  151. + for_each_possible_cpu(cpu)
  152. + setup_cpu_entry_area(cpu);
  153. +}
  154. +
  155. /* Load the original GDT from the per-cpu structure */
  156. void load_direct_gdt(int cpu)
  157. {
  158. @@ -1386,20 +1414,6 @@ DEFINE_PER_CPU(unsigned int, irq_count) __visible = -1;
  159. DEFINE_PER_CPU(int, __preempt_count) = INIT_PREEMPT_COUNT;
  160. EXPORT_PER_CPU_SYMBOL(__preempt_count);
  161. -/*
  162. - * Special IST stacks which the CPU switches to when it calls
  163. - * an IST-marked descriptor entry. Up to 7 stacks (hardware
  164. - * limit), all of them are 4K, except the debug stack which
  165. - * is 8K.
  166. - */
  167. -static const unsigned int exception_stack_sizes[N_EXCEPTION_STACKS] = {
  168. - [0 ... N_EXCEPTION_STACKS - 1] = EXCEPTION_STKSZ,
  169. - [DEBUG_STACK - 1] = DEBUG_STKSZ
  170. -};
  171. -
  172. -static DEFINE_PER_CPU_PAGE_ALIGNED(char, exception_stacks
  173. - [(N_EXCEPTION_STACKS - 1) * EXCEPTION_STKSZ + DEBUG_STKSZ]);
  174. -
  175. /* May not be marked __init: used by software suspend */
  176. void syscall_init(void)
  177. {
  178. @@ -1608,7 +1622,7 @@ void cpu_init(void)
  179. * set up and load the per-CPU TSS
  180. */
  181. if (!oist->ist[0]) {
  182. - char *estacks = per_cpu(exception_stacks, cpu);
  183. + char *estacks = get_cpu_entry_area(cpu)->exception_stacks;
  184. for (v = 0; v < N_EXCEPTION_STACKS; v++) {
  185. estacks += exception_stack_sizes[v];
  186. @@ -1633,8 +1647,6 @@ void cpu_init(void)
  187. BUG_ON(me->mm);
  188. enter_lazy_tlb(&init_mm, me);
  189. - setup_cpu_entry_area(cpu);
  190. -
  191. /*
  192. * Initialize the TSS. sp0 points to the entry trampoline stack
  193. * regardless of what task is running.
  194. @@ -1693,8 +1705,6 @@ void cpu_init(void)
  195. BUG_ON(curr->mm);
  196. enter_lazy_tlb(&init_mm, curr);
  197. - setup_cpu_entry_area(cpu);
  198. -
  199. /*
  200. * Initialize the TSS. Don't bother initializing sp0, as the initial
  201. * task never enters user mode.
  202. diff --git a/arch/x86/kernel/traps.c b/arch/x86/kernel/traps.c
  203. index d9debdafe7a6..fd4d47e8672e 100644
  204. --- a/arch/x86/kernel/traps.c
  205. +++ b/arch/x86/kernel/traps.c
  206. @@ -992,6 +992,9 @@ void __init trap_init(void)
  207. {
  208. int i;
  209. + /* Init cpu_entry_area before IST entries are set up */
  210. + setup_cpu_entry_areas();
  211. +
  212. #ifdef CONFIG_EISA
  213. void __iomem *p = early_ioremap(0x0FFFD9, 4);
  214. --
  215. 2.14.2