0184-x86-cpu_entry_area-Move-it-out-of-the-fixmap.patch 21 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588
  1. From 1463b91d69f9e8ce61d264c4d108251192a9afbf Mon Sep 17 00:00:00 2001
  2. From: Thomas Gleixner <[email protected]>
  3. Date: Wed, 20 Dec 2017 18:51:31 +0100
  4. Subject: [PATCH 184/241] x86/cpu_entry_area: Move it out of the fixmap
  5. MIME-Version: 1.0
  6. Content-Type: text/plain; charset=UTF-8
  7. Content-Transfer-Encoding: 8bit
  8. CVE-2017-5754
  9. Put the cpu_entry_area into a separate P4D entry. The fixmap gets too big
  10. and 0-day already hit a case where the fixmap PTEs were cleared by
  11. cleanup_highmap().
  12. Aside of that the fixmap API is a pain as it's all backwards.
  13. Signed-off-by: Thomas Gleixner <[email protected]>
  14. Cc: Andy Lutomirski <[email protected]>
  15. Cc: Borislav Petkov <[email protected]>
  16. Cc: Dave Hansen <[email protected]>
  17. Cc: H. Peter Anvin <[email protected]>
  18. Cc: Josh Poimboeuf <[email protected]>
  19. Cc: Juergen Gross <[email protected]>
  20. Cc: Linus Torvalds <[email protected]>
  21. Cc: Peter Zijlstra <[email protected]>
  22. Cc: [email protected]
  23. Signed-off-by: Ingo Molnar <[email protected]>
  24. (backported from commit 92a0f81d89571e3e8759366e050ee05cc545ef99)
  25. Signed-off-by: Andy Whitcroft <[email protected]>
  26. Signed-off-by: Kleber Sacilotto de Souza <[email protected]>
  27. (cherry picked from commit bda9eb328d9ce3757f22794f79da73dd5886c93a)
  28. Signed-off-by: Fabian Grünbichler <[email protected]>
  29. ---
  30. Documentation/x86/x86_64/mm.txt | 2 +
  31. arch/x86/include/asm/cpu_entry_area.h | 18 ++++++++-
  32. arch/x86/include/asm/desc.h | 2 +
  33. arch/x86/include/asm/fixmap.h | 32 +---------------
  34. arch/x86/include/asm/pgtable_32_types.h | 15 ++++++--
  35. arch/x86/include/asm/pgtable_64_types.h | 47 +++++++++++++----------
  36. arch/x86/kernel/dumpstack.c | 1 +
  37. arch/x86/kernel/traps.c | 5 ++-
  38. arch/x86/mm/cpu_entry_area.c | 66 +++++++++++++++++++++++++--------
  39. arch/x86/mm/dump_pagetables.c | 6 ++-
  40. arch/x86/mm/init_32.c | 6 +++
  41. arch/x86/mm/kasan_init_64.c | 30 ++++++++-------
  42. arch/x86/mm/pgtable_32.c | 1 +
  43. arch/x86/xen/mmu_pv.c | 2 -
  44. 14 files changed, 145 insertions(+), 88 deletions(-)
  45. diff --git a/Documentation/x86/x86_64/mm.txt b/Documentation/x86/x86_64/mm.txt
  46. index 63a41671d25b..51101708a03a 100644
  47. --- a/Documentation/x86/x86_64/mm.txt
  48. +++ b/Documentation/x86/x86_64/mm.txt
  49. @@ -12,6 +12,7 @@ ffffea0000000000 - ffffeaffffffffff (=40 bits) virtual memory map (1TB)
  50. ... unused hole ...
  51. ffffec0000000000 - fffffbffffffffff (=44 bits) kasan shadow memory (16TB)
  52. ... unused hole ...
  53. +fffffe8000000000 - fffffeffffffffff (=39 bits) cpu_entry_area mapping
  54. ffffff0000000000 - ffffff7fffffffff (=39 bits) %esp fixup stacks
  55. ... unused hole ...
  56. ffffffef00000000 - fffffffeffffffff (=64 GB) EFI region mapping space
  57. @@ -35,6 +36,7 @@ ffd4000000000000 - ffd5ffffffffffff (=49 bits) virtual memory map (512TB)
  58. ... unused hole ...
  59. ffdf000000000000 - fffffc0000000000 (=53 bits) kasan shadow memory (8PB)
  60. ... unused hole ...
  61. +fffffe8000000000 - fffffeffffffffff (=39 bits) cpu_entry_area mapping
  62. ffffff0000000000 - ffffff7fffffffff (=39 bits) %esp fixup stacks
  63. ... unused hole ...
  64. ffffffef00000000 - fffffffeffffffff (=64 GB) EFI region mapping space
  65. diff --git a/arch/x86/include/asm/cpu_entry_area.h b/arch/x86/include/asm/cpu_entry_area.h
  66. index 5471826803af..2fbc69a0916e 100644
  67. --- a/arch/x86/include/asm/cpu_entry_area.h
  68. +++ b/arch/x86/include/asm/cpu_entry_area.h
  69. @@ -43,10 +43,26 @@ struct cpu_entry_area {
  70. };
  71. #define CPU_ENTRY_AREA_SIZE (sizeof(struct cpu_entry_area))
  72. -#define CPU_ENTRY_AREA_PAGES (CPU_ENTRY_AREA_SIZE / PAGE_SIZE)
  73. +#define CPU_ENTRY_AREA_TOT_SIZE (CPU_ENTRY_AREA_SIZE * NR_CPUS)
  74. DECLARE_PER_CPU(struct cpu_entry_area *, cpu_entry_area);
  75. extern void setup_cpu_entry_areas(void);
  76. +extern void cea_set_pte(void *cea_vaddr, phys_addr_t pa, pgprot_t flags);
  77. +
  78. +#define CPU_ENTRY_AREA_RO_IDT CPU_ENTRY_AREA_BASE
  79. +#define CPU_ENTRY_AREA_PER_CPU (CPU_ENTRY_AREA_RO_IDT + PAGE_SIZE)
  80. +
  81. +#define CPU_ENTRY_AREA_RO_IDT_VADDR ((void *)CPU_ENTRY_AREA_RO_IDT)
  82. +
  83. +#define CPU_ENTRY_AREA_MAP_SIZE \
  84. + (CPU_ENTRY_AREA_PER_CPU + CPU_ENTRY_AREA_TOT_SIZE - CPU_ENTRY_AREA_BASE)
  85. +
  86. +extern struct cpu_entry_area *get_cpu_entry_area(int cpu);
  87. +
  88. +static inline struct entry_stack *cpu_entry_stack(int cpu)
  89. +{
  90. + return &get_cpu_entry_area(cpu)->entry_stack_page.stack;
  91. +}
  92. #endif
  93. diff --git a/arch/x86/include/asm/desc.h b/arch/x86/include/asm/desc.h
  94. index b817fe247506..de40c514ba25 100644
  95. --- a/arch/x86/include/asm/desc.h
  96. +++ b/arch/x86/include/asm/desc.h
  97. @@ -5,6 +5,8 @@
  98. #include <asm/ldt.h>
  99. #include <asm/mmu.h>
  100. #include <asm/fixmap.h>
  101. +#include <asm/pgtable.h>
  102. +#include <asm/cpu_entry_area.h>
  103. #include <linux/smp.h>
  104. #include <linux/percpu.h>
  105. diff --git a/arch/x86/include/asm/fixmap.h b/arch/x86/include/asm/fixmap.h
  106. index 1b2521473480..a6ff9e1a6189 100644
  107. --- a/arch/x86/include/asm/fixmap.h
  108. +++ b/arch/x86/include/asm/fixmap.h
  109. @@ -25,7 +25,6 @@
  110. #else
  111. #include <uapi/asm/vsyscall.h>
  112. #endif
  113. -#include <asm/cpu_entry_area.h>
  114. /*
  115. * We can't declare FIXADDR_TOP as variable for x86_64 because vsyscall
  116. @@ -84,7 +83,6 @@ enum fixed_addresses {
  117. FIX_IO_APIC_BASE_0,
  118. FIX_IO_APIC_BASE_END = FIX_IO_APIC_BASE_0 + MAX_IO_APICS - 1,
  119. #endif
  120. - FIX_RO_IDT, /* Virtual mapping for read-only IDT */
  121. #ifdef CONFIG_X86_32
  122. FIX_KMAP_BEGIN, /* reserved pte's for temporary kernel mappings */
  123. FIX_KMAP_END = FIX_KMAP_BEGIN+(KM_TYPE_NR*NR_CPUS)-1,
  124. @@ -100,9 +98,6 @@ enum fixed_addresses {
  125. #ifdef CONFIG_X86_INTEL_MID
  126. FIX_LNW_VRTC,
  127. #endif
  128. - /* Fixmap entries to remap the GDTs, one per processor. */
  129. - FIX_CPU_ENTRY_AREA_TOP,
  130. - FIX_CPU_ENTRY_AREA_BOTTOM = FIX_CPU_ENTRY_AREA_TOP + (CPU_ENTRY_AREA_PAGES * NR_CPUS) - 1,
  131. #ifdef CONFIG_ACPI_APEI_GHES
  132. /* Used for GHES mapping from assorted contexts */
  133. @@ -143,7 +138,7 @@ enum fixed_addresses {
  134. extern void reserve_top_address(unsigned long reserve);
  135. #define FIXADDR_SIZE (__end_of_permanent_fixed_addresses << PAGE_SHIFT)
  136. -#define FIXADDR_START (FIXADDR_TOP - FIXADDR_SIZE)
  137. +#define FIXADDR_START (FIXADDR_TOP - FIXADDR_SIZE)
  138. extern int fixmaps_set;
  139. @@ -171,30 +166,5 @@ static inline void __set_fixmap(enum fixed_addresses idx,
  140. void __early_set_fixmap(enum fixed_addresses idx,
  141. phys_addr_t phys, pgprot_t flags);
  142. -static inline unsigned int __get_cpu_entry_area_page_index(int cpu, int page)
  143. -{
  144. - BUILD_BUG_ON(sizeof(struct cpu_entry_area) % PAGE_SIZE != 0);
  145. -
  146. - return FIX_CPU_ENTRY_AREA_BOTTOM - cpu*CPU_ENTRY_AREA_PAGES - page;
  147. -}
  148. -
  149. -#define __get_cpu_entry_area_offset_index(cpu, offset) ({ \
  150. - BUILD_BUG_ON(offset % PAGE_SIZE != 0); \
  151. - __get_cpu_entry_area_page_index(cpu, offset / PAGE_SIZE); \
  152. - })
  153. -
  154. -#define get_cpu_entry_area_index(cpu, field) \
  155. - __get_cpu_entry_area_offset_index((cpu), offsetof(struct cpu_entry_area, field))
  156. -
  157. -static inline struct cpu_entry_area *get_cpu_entry_area(int cpu)
  158. -{
  159. - return (struct cpu_entry_area *)__fix_to_virt(__get_cpu_entry_area_page_index(cpu, 0));
  160. -}
  161. -
  162. -static inline struct entry_stack *cpu_entry_stack(int cpu)
  163. -{
  164. - return &get_cpu_entry_area(cpu)->entry_stack_page.stack;
  165. -}
  166. -
  167. #endif /* !__ASSEMBLY__ */
  168. #endif /* _ASM_X86_FIXMAP_H */
  169. diff --git a/arch/x86/include/asm/pgtable_32_types.h b/arch/x86/include/asm/pgtable_32_types.h
  170. index 9fb2f2bc8245..67b60e11b70d 100644
  171. --- a/arch/x86/include/asm/pgtable_32_types.h
  172. +++ b/arch/x86/include/asm/pgtable_32_types.h
  173. @@ -37,13 +37,22 @@ extern bool __vmalloc_start_set; /* set once high_memory is set */
  174. #define LAST_PKMAP 1024
  175. #endif
  176. -#define PKMAP_BASE ((FIXADDR_START - PAGE_SIZE * (LAST_PKMAP + 1)) \
  177. - & PMD_MASK)
  178. +/*
  179. + * Define this here and validate with BUILD_BUG_ON() in pgtable_32.c
  180. + * to avoid include recursion hell
  181. + */
  182. +#define CPU_ENTRY_AREA_PAGES (NR_CPUS * 40)
  183. +
  184. +#define CPU_ENTRY_AREA_BASE \
  185. + ((FIXADDR_START - PAGE_SIZE * (CPU_ENTRY_AREA_PAGES + 1)) & PMD_MASK)
  186. +
  187. +#define PKMAP_BASE \
  188. + ((CPU_ENTRY_AREA_BASE - PAGE_SIZE) & PMD_MASK)
  189. #ifdef CONFIG_HIGHMEM
  190. # define VMALLOC_END (PKMAP_BASE - 2 * PAGE_SIZE)
  191. #else
  192. -# define VMALLOC_END (FIXADDR_START - 2 * PAGE_SIZE)
  193. +# define VMALLOC_END (CPU_ENTRY_AREA_BASE - 2 * PAGE_SIZE)
  194. #endif
  195. #define MODULES_VADDR VMALLOC_START
  196. diff --git a/arch/x86/include/asm/pgtable_64_types.h b/arch/x86/include/asm/pgtable_64_types.h
  197. index 06470da156ba..42e2750da525 100644
  198. --- a/arch/x86/include/asm/pgtable_64_types.h
  199. +++ b/arch/x86/include/asm/pgtable_64_types.h
  200. @@ -75,32 +75,41 @@ typedef struct { pteval_t pte; } pte_t;
  201. #define PGDIR_MASK (~(PGDIR_SIZE - 1))
  202. /* See Documentation/x86/x86_64/mm.txt for a description of the memory map. */
  203. -#define MAXMEM _AC(__AC(1, UL) << MAX_PHYSMEM_BITS, UL)
  204. +#define MAXMEM _AC(__AC(1, UL) << MAX_PHYSMEM_BITS, UL)
  205. +
  206. #ifdef CONFIG_X86_5LEVEL
  207. -#define VMALLOC_SIZE_TB _AC(16384, UL)
  208. -#define __VMALLOC_BASE _AC(0xff92000000000000, UL)
  209. -#define __VMEMMAP_BASE _AC(0xffd4000000000000, UL)
  210. +# define VMALLOC_SIZE_TB _AC(16384, UL)
  211. +# define __VMALLOC_BASE _AC(0xff92000000000000, UL)
  212. +# define __VMEMMAP_BASE _AC(0xffd4000000000000, UL)
  213. #else
  214. -#define VMALLOC_SIZE_TB _AC(32, UL)
  215. -#define __VMALLOC_BASE _AC(0xffffc90000000000, UL)
  216. -#define __VMEMMAP_BASE _AC(0xffffea0000000000, UL)
  217. +# define VMALLOC_SIZE_TB _AC(32, UL)
  218. +# define __VMALLOC_BASE _AC(0xffffc90000000000, UL)
  219. +# define __VMEMMAP_BASE _AC(0xffffea0000000000, UL)
  220. #endif
  221. +
  222. #ifdef CONFIG_RANDOMIZE_MEMORY
  223. -#define VMALLOC_START vmalloc_base
  224. -#define VMEMMAP_START vmemmap_base
  225. +# define VMALLOC_START vmalloc_base
  226. +# define VMEMMAP_START vmemmap_base
  227. #else
  228. -#define VMALLOC_START __VMALLOC_BASE
  229. -#define VMEMMAP_START __VMEMMAP_BASE
  230. +# define VMALLOC_START __VMALLOC_BASE
  231. +# define VMEMMAP_START __VMEMMAP_BASE
  232. #endif /* CONFIG_RANDOMIZE_MEMORY */
  233. -#define VMALLOC_END (VMALLOC_START + _AC((VMALLOC_SIZE_TB << 40) - 1, UL))
  234. -#define MODULES_VADDR (__START_KERNEL_map + KERNEL_IMAGE_SIZE)
  235. +
  236. +#define VMALLOC_END (VMALLOC_START + _AC((VMALLOC_SIZE_TB << 40) - 1, UL))
  237. +
  238. +#define MODULES_VADDR (__START_KERNEL_map + KERNEL_IMAGE_SIZE)
  239. /* The module sections ends with the start of the fixmap */
  240. -#define MODULES_END __fix_to_virt(__end_of_fixed_addresses + 1)
  241. -#define MODULES_LEN (MODULES_END - MODULES_VADDR)
  242. -#define ESPFIX_PGD_ENTRY _AC(-2, UL)
  243. -#define ESPFIX_BASE_ADDR (ESPFIX_PGD_ENTRY << P4D_SHIFT)
  244. -#define EFI_VA_START ( -4 * (_AC(1, UL) << 30))
  245. -#define EFI_VA_END (-68 * (_AC(1, UL) << 30))
  246. +#define MODULES_END __fix_to_virt(__end_of_fixed_addresses + 1)
  247. +#define MODULES_LEN (MODULES_END - MODULES_VADDR)
  248. +
  249. +#define ESPFIX_PGD_ENTRY _AC(-2, UL)
  250. +#define ESPFIX_BASE_ADDR (ESPFIX_PGD_ENTRY << P4D_SHIFT)
  251. +
  252. +#define CPU_ENTRY_AREA_PGD _AC(-3, UL)
  253. +#define CPU_ENTRY_AREA_BASE (CPU_ENTRY_AREA_PGD << P4D_SHIFT)
  254. +
  255. +#define EFI_VA_START ( -4 * (_AC(1, UL) << 30))
  256. +#define EFI_VA_END (-68 * (_AC(1, UL) << 30))
  257. #define EARLY_DYNAMIC_PAGE_TABLES 64
  258. diff --git a/arch/x86/kernel/dumpstack.c b/arch/x86/kernel/dumpstack.c
  259. index 55bf1c3b5319..2bdeb983b9d8 100644
  260. --- a/arch/x86/kernel/dumpstack.c
  261. +++ b/arch/x86/kernel/dumpstack.c
  262. @@ -18,6 +18,7 @@
  263. #include <linux/nmi.h>
  264. #include <linux/sysfs.h>
  265. +#include <asm/cpu_entry_area.h>
  266. #include <asm/stacktrace.h>
  267. #include <asm/unwind.h>
  268. diff --git a/arch/x86/kernel/traps.c b/arch/x86/kernel/traps.c
  269. index ef2d1b8a0516..5808ccb59266 100644
  270. --- a/arch/x86/kernel/traps.c
  271. +++ b/arch/x86/kernel/traps.c
  272. @@ -1041,8 +1041,9 @@ void __init trap_init(void)
  273. * "sidt" instruction will not leak the location of the kernel, and
  274. * to defend the IDT against arbitrary memory write vulnerabilities.
  275. * It will be reloaded in cpu_init() */
  276. - __set_fixmap(FIX_RO_IDT, __pa_symbol(idt_table), PAGE_KERNEL_RO);
  277. - idt_descr.address = fix_to_virt(FIX_RO_IDT);
  278. + cea_set_pte(CPU_ENTRY_AREA_RO_IDT_VADDR, __pa_symbol(idt_table),
  279. + PAGE_KERNEL_RO);
  280. + idt_descr.address = CPU_ENTRY_AREA_RO_IDT;
  281. /*
  282. * Should be a barrier for any external CPU state:
  283. diff --git a/arch/x86/mm/cpu_entry_area.c b/arch/x86/mm/cpu_entry_area.c
  284. index 235ff9cfaaf4..21e8b595cbb1 100644
  285. --- a/arch/x86/mm/cpu_entry_area.c
  286. +++ b/arch/x86/mm/cpu_entry_area.c
  287. @@ -15,11 +15,27 @@ static DEFINE_PER_CPU_PAGE_ALIGNED(char, exception_stacks
  288. [(N_EXCEPTION_STACKS - 1) * EXCEPTION_STKSZ + DEBUG_STKSZ]);
  289. #endif
  290. +struct cpu_entry_area *get_cpu_entry_area(int cpu)
  291. +{
  292. + unsigned long va = CPU_ENTRY_AREA_PER_CPU + cpu * CPU_ENTRY_AREA_SIZE;
  293. + BUILD_BUG_ON(sizeof(struct cpu_entry_area) % PAGE_SIZE != 0);
  294. +
  295. + return (struct cpu_entry_area *) va;
  296. +}
  297. +EXPORT_SYMBOL(get_cpu_entry_area);
  298. +
  299. +void cea_set_pte(void *cea_vaddr, phys_addr_t pa, pgprot_t flags)
  300. +{
  301. + unsigned long va = (unsigned long) cea_vaddr;
  302. +
  303. + set_pte_vaddr(va, pfn_pte(pa >> PAGE_SHIFT, flags));
  304. +}
  305. +
  306. static void __init
  307. -set_percpu_fixmap_pages(int idx, void *ptr, int pages, pgprot_t prot)
  308. +cea_map_percpu_pages(void *cea_vaddr, void *ptr, int pages, pgprot_t prot)
  309. {
  310. - for ( ; pages; pages--, idx--, ptr += PAGE_SIZE)
  311. - __set_fixmap(idx, per_cpu_ptr_to_phys(ptr), prot);
  312. + for ( ; pages; pages--, cea_vaddr+= PAGE_SIZE, ptr += PAGE_SIZE)
  313. + cea_set_pte(cea_vaddr, per_cpu_ptr_to_phys(ptr), prot);
  314. }
  315. /* Setup the fixmap mappings only once per-processor */
  316. @@ -47,10 +63,12 @@ static void __init setup_cpu_entry_area(int cpu)
  317. pgprot_t tss_prot = PAGE_KERNEL;
  318. #endif
  319. - __set_fixmap(get_cpu_entry_area_index(cpu, gdt), get_cpu_gdt_paddr(cpu), gdt_prot);
  320. - set_percpu_fixmap_pages(get_cpu_entry_area_index(cpu, entry_stack_page),
  321. - per_cpu_ptr(&entry_stack_storage, cpu), 1,
  322. - PAGE_KERNEL);
  323. + cea_set_pte(&get_cpu_entry_area(cpu)->gdt, get_cpu_gdt_paddr(cpu),
  324. + gdt_prot);
  325. +
  326. + cea_map_percpu_pages(&get_cpu_entry_area(cpu)->entry_stack_page,
  327. + per_cpu_ptr(&entry_stack_storage, cpu), 1,
  328. + PAGE_KERNEL);
  329. /*
  330. * The Intel SDM says (Volume 3, 7.2.1):
  331. @@ -72,10 +90,9 @@ static void __init setup_cpu_entry_area(int cpu)
  332. BUILD_BUG_ON((offsetof(struct tss_struct, x86_tss) ^
  333. offsetofend(struct tss_struct, x86_tss)) & PAGE_MASK);
  334. BUILD_BUG_ON(sizeof(struct tss_struct) % PAGE_SIZE != 0);
  335. - set_percpu_fixmap_pages(get_cpu_entry_area_index(cpu, tss),
  336. - &per_cpu(cpu_tss_rw, cpu),
  337. - sizeof(struct tss_struct) / PAGE_SIZE,
  338. - tss_prot);
  339. + cea_map_percpu_pages(&get_cpu_entry_area(cpu)->tss,
  340. + &per_cpu(cpu_tss_rw, cpu),
  341. + sizeof(struct tss_struct) / PAGE_SIZE, tss_prot);
  342. #ifdef CONFIG_X86_32
  343. per_cpu(cpu_entry_area, cpu) = get_cpu_entry_area(cpu);
  344. @@ -85,20 +102,37 @@ static void __init setup_cpu_entry_area(int cpu)
  345. BUILD_BUG_ON(sizeof(exception_stacks) % PAGE_SIZE != 0);
  346. BUILD_BUG_ON(sizeof(exception_stacks) !=
  347. sizeof(((struct cpu_entry_area *)0)->exception_stacks));
  348. - set_percpu_fixmap_pages(get_cpu_entry_area_index(cpu, exception_stacks),
  349. - &per_cpu(exception_stacks, cpu),
  350. - sizeof(exception_stacks) / PAGE_SIZE,
  351. - PAGE_KERNEL);
  352. + cea_map_percpu_pages(&get_cpu_entry_area(cpu)->exception_stacks,
  353. + &per_cpu(exception_stacks, cpu),
  354. + sizeof(exception_stacks) / PAGE_SIZE, PAGE_KERNEL);
  355. - __set_fixmap(get_cpu_entry_area_index(cpu, entry_trampoline),
  356. + cea_set_pte(&get_cpu_entry_area(cpu)->entry_trampoline,
  357. __pa_symbol(_entry_trampoline), PAGE_KERNEL_RX);
  358. #endif
  359. }
  360. +static __init void setup_cpu_entry_area_ptes(void)
  361. +{
  362. +#ifdef CONFIG_X86_32
  363. + unsigned long start, end;
  364. +
  365. + BUILD_BUG_ON(CPU_ENTRY_AREA_PAGES * PAGE_SIZE < CPU_ENTRY_AREA_MAP_SIZE);
  366. + BUG_ON(CPU_ENTRY_AREA_BASE & ~PMD_MASK);
  367. +
  368. + start = CPU_ENTRY_AREA_BASE;
  369. + end = start + CPU_ENTRY_AREA_MAP_SIZE;
  370. +
  371. + for (; start < end; start += PMD_SIZE)
  372. + populate_extra_pte(start);
  373. +#endif
  374. +}
  375. +
  376. void __init setup_cpu_entry_areas(void)
  377. {
  378. unsigned int cpu;
  379. + setup_cpu_entry_area_ptes();
  380. +
  381. for_each_possible_cpu(cpu)
  382. setup_cpu_entry_area(cpu);
  383. }
  384. diff --git a/arch/x86/mm/dump_pagetables.c b/arch/x86/mm/dump_pagetables.c
  385. index 318a7c30e87e..3b7720404a9f 100644
  386. --- a/arch/x86/mm/dump_pagetables.c
  387. +++ b/arch/x86/mm/dump_pagetables.c
  388. @@ -58,6 +58,7 @@ enum address_markers_idx {
  389. KASAN_SHADOW_START_NR,
  390. KASAN_SHADOW_END_NR,
  391. #endif
  392. + CPU_ENTRY_AREA_NR,
  393. #ifdef CONFIG_X86_ESPFIX64
  394. ESPFIX_START_NR,
  395. #endif
  396. @@ -81,6 +82,7 @@ static struct addr_marker address_markers[] = {
  397. [KASAN_SHADOW_START_NR] = { KASAN_SHADOW_START, "KASAN shadow" },
  398. [KASAN_SHADOW_END_NR] = { KASAN_SHADOW_END, "KASAN shadow end" },
  399. #endif
  400. + [CPU_ENTRY_AREA_NR] = { CPU_ENTRY_AREA_BASE,"CPU entry Area" },
  401. #ifdef CONFIG_X86_ESPFIX64
  402. [ESPFIX_START_NR] = { ESPFIX_BASE_ADDR, "ESPfix Area", 16 },
  403. #endif
  404. @@ -104,6 +106,7 @@ enum address_markers_idx {
  405. #ifdef CONFIG_HIGHMEM
  406. PKMAP_BASE_NR,
  407. #endif
  408. + CPU_ENTRY_AREA_NR,
  409. FIXADDR_START_NR,
  410. END_OF_SPACE_NR,
  411. };
  412. @@ -116,6 +119,7 @@ static struct addr_marker address_markers[] = {
  413. #ifdef CONFIG_HIGHMEM
  414. [PKMAP_BASE_NR] = { 0UL, "Persistent kmap() Area" },
  415. #endif
  416. + [CPU_ENTRY_AREA_NR] = { 0UL, "CPU entry area" },
  417. [FIXADDR_START_NR] = { 0UL, "Fixmap area" },
  418. [END_OF_SPACE_NR] = { -1, NULL }
  419. };
  420. @@ -522,8 +526,8 @@ static int __init pt_dump_init(void)
  421. address_markers[PKMAP_BASE_NR].start_address = PKMAP_BASE;
  422. # endif
  423. address_markers[FIXADDR_START_NR].start_address = FIXADDR_START;
  424. + address_markers[CPU_ENTRY_AREA_NR].start_address = CPU_ENTRY_AREA_BASE;
  425. #endif
  426. -
  427. return 0;
  428. }
  429. __initcall(pt_dump_init);
  430. diff --git a/arch/x86/mm/init_32.c b/arch/x86/mm/init_32.c
  431. index 8a64a6f2848d..135c9a7898c7 100644
  432. --- a/arch/x86/mm/init_32.c
  433. +++ b/arch/x86/mm/init_32.c
  434. @@ -50,6 +50,7 @@
  435. #include <asm/setup.h>
  436. #include <asm/set_memory.h>
  437. #include <asm/page_types.h>
  438. +#include <asm/cpu_entry_area.h>
  439. #include <asm/init.h>
  440. #include "mm_internal.h"
  441. @@ -766,6 +767,7 @@ void __init mem_init(void)
  442. mem_init_print_info(NULL);
  443. printk(KERN_INFO "virtual kernel memory layout:\n"
  444. " fixmap : 0x%08lx - 0x%08lx (%4ld kB)\n"
  445. + " cpu_entry : 0x%08lx - 0x%08lx (%4ld kB)\n"
  446. #ifdef CONFIG_HIGHMEM
  447. " pkmap : 0x%08lx - 0x%08lx (%4ld kB)\n"
  448. #endif
  449. @@ -777,6 +779,10 @@ void __init mem_init(void)
  450. FIXADDR_START, FIXADDR_TOP,
  451. (FIXADDR_TOP - FIXADDR_START) >> 10,
  452. + CPU_ENTRY_AREA_BASE,
  453. + CPU_ENTRY_AREA_BASE + CPU_ENTRY_AREA_MAP_SIZE,
  454. + CPU_ENTRY_AREA_MAP_SIZE >> 10,
  455. +
  456. #ifdef CONFIG_HIGHMEM
  457. PKMAP_BASE, PKMAP_BASE+LAST_PKMAP*PAGE_SIZE,
  458. (LAST_PKMAP*PAGE_SIZE) >> 10,
  459. diff --git a/arch/x86/mm/kasan_init_64.c b/arch/x86/mm/kasan_init_64.c
  460. index d8836e45bc07..4cd556a30ee1 100644
  461. --- a/arch/x86/mm/kasan_init_64.c
  462. +++ b/arch/x86/mm/kasan_init_64.c
  463. @@ -13,6 +13,8 @@
  464. #include <asm/pgalloc.h>
  465. #include <asm/tlbflush.h>
  466. #include <asm/sections.h>
  467. +#include <asm/pgtable.h>
  468. +#include <asm/cpu_entry_area.h>
  469. extern pgd_t early_top_pgt[PTRS_PER_PGD];
  470. extern struct range pfn_mapped[E820_MAX_ENTRIES];
  471. @@ -321,31 +323,33 @@ void __init kasan_init(void)
  472. map_range(&pfn_mapped[i]);
  473. }
  474. - kasan_populate_zero_shadow(
  475. - kasan_mem_to_shadow((void *)PAGE_OFFSET + MAXMEM),
  476. - kasan_mem_to_shadow((void *)__START_KERNEL_map));
  477. -
  478. - kasan_populate_shadow((unsigned long)kasan_mem_to_shadow(_stext),
  479. - (unsigned long)kasan_mem_to_shadow(_end),
  480. - early_pfn_to_nid(__pa(_stext)));
  481. -
  482. - shadow_cpu_entry_begin = (void *)__fix_to_virt(FIX_CPU_ENTRY_AREA_BOTTOM);
  483. + shadow_cpu_entry_begin = (void *)CPU_ENTRY_AREA_BASE;
  484. shadow_cpu_entry_begin = kasan_mem_to_shadow(shadow_cpu_entry_begin);
  485. shadow_cpu_entry_begin = (void *)round_down((unsigned long)shadow_cpu_entry_begin,
  486. PAGE_SIZE);
  487. - shadow_cpu_entry_end = (void *)(__fix_to_virt(FIX_CPU_ENTRY_AREA_TOP) + PAGE_SIZE);
  488. + shadow_cpu_entry_end = (void *)(CPU_ENTRY_AREA_BASE +
  489. + CPU_ENTRY_AREA_MAP_SIZE);
  490. shadow_cpu_entry_end = kasan_mem_to_shadow(shadow_cpu_entry_end);
  491. shadow_cpu_entry_end = (void *)round_up((unsigned long)shadow_cpu_entry_end,
  492. PAGE_SIZE);
  493. - kasan_populate_zero_shadow(kasan_mem_to_shadow((void *)MODULES_END),
  494. - shadow_cpu_entry_begin);
  495. + kasan_populate_zero_shadow(
  496. + kasan_mem_to_shadow((void *)PAGE_OFFSET + MAXMEM),
  497. + shadow_cpu_entry_begin);
  498. kasan_populate_shadow((unsigned long)shadow_cpu_entry_begin,
  499. (unsigned long)shadow_cpu_entry_end, 0);
  500. - kasan_populate_zero_shadow(shadow_cpu_entry_end, (void *)KASAN_SHADOW_END);
  501. + kasan_populate_zero_shadow(shadow_cpu_entry_end,
  502. + kasan_mem_to_shadow((void *)__START_KERNEL_map));
  503. +
  504. + kasan_populate_shadow((unsigned long)kasan_mem_to_shadow(_stext),
  505. + (unsigned long)kasan_mem_to_shadow(_end),
  506. + early_pfn_to_nid(__pa(_stext)));
  507. +
  508. + kasan_populate_zero_shadow(kasan_mem_to_shadow((void *)MODULES_END),
  509. + (void *)KASAN_SHADOW_END);
  510. load_cr3(init_top_pgt);
  511. __flush_tlb_all();
  512. diff --git a/arch/x86/mm/pgtable_32.c b/arch/x86/mm/pgtable_32.c
  513. index b9bd5b8b14fa..77909bae5943 100644
  514. --- a/arch/x86/mm/pgtable_32.c
  515. +++ b/arch/x86/mm/pgtable_32.c
  516. @@ -9,6 +9,7 @@
  517. #include <linux/pagemap.h>
  518. #include <linux/spinlock.h>
  519. +#include <asm/cpu_entry_area.h>
  520. #include <asm/pgtable.h>
  521. #include <asm/pgalloc.h>
  522. #include <asm/fixmap.h>
  523. diff --git a/arch/x86/xen/mmu_pv.c b/arch/x86/xen/mmu_pv.c
  524. index 53e65f605bdd..cd4b91b8d614 100644
  525. --- a/arch/x86/xen/mmu_pv.c
  526. +++ b/arch/x86/xen/mmu_pv.c
  527. @@ -2286,7 +2286,6 @@ static void xen_set_fixmap(unsigned idx, phys_addr_t phys, pgprot_t prot)
  528. switch (idx) {
  529. case FIX_BTMAP_END ... FIX_BTMAP_BEGIN:
  530. - case FIX_RO_IDT:
  531. #ifdef CONFIG_X86_32
  532. case FIX_WP_TEST:
  533. # ifdef CONFIG_HIGHMEM
  534. @@ -2297,7 +2296,6 @@ static void xen_set_fixmap(unsigned idx, phys_addr_t phys, pgprot_t prot)
  535. #endif
  536. case FIX_TEXT_POKE0:
  537. case FIX_TEXT_POKE1:
  538. - case FIX_CPU_ENTRY_AREA_TOP ... FIX_CPU_ENTRY_AREA_BOTTOM:
  539. /* All local page mappings */
  540. pte = pfn_pte(phys, prot);
  541. break;
  542. --
  543. 2.14.2