1
0

0075-x86-kasan-Use-the-same-shadow-offset-for-4-and-5-lev.patch 8.0 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244
  1. From f6bb8e560b2229af5dcf3127fc92e732539b4823 Mon Sep 17 00:00:00 2001
  2. From: Andrey Ryabinin <[email protected]>
  3. Date: Fri, 29 Sep 2017 17:08:18 +0300
  4. Subject: [PATCH 075/242] x86/kasan: Use the same shadow offset for 4- and
  5. 5-level paging
  6. MIME-Version: 1.0
  7. Content-Type: text/plain; charset=UTF-8
  8. Content-Transfer-Encoding: 8bit
  9. CVE-2017-5754
  10. We are going to support boot-time switching between 4- and 5-level
  11. paging. For KASAN it means we cannot have different KASAN_SHADOW_OFFSET
  12. for different paging modes: the constant is passed to gcc to generate
  13. code and cannot be changed at runtime.
  14. This patch changes KASAN code to use 0xdffffc0000000000 as shadow offset
  15. for both 4- and 5-level paging.
  16. For 5-level paging it means that shadow memory region is not aligned to
  17. PGD boundary anymore and we have to handle unaligned parts of the region
  18. properly.
  19. In addition, we have to exclude paravirt code from KASAN instrumentation
  20. as we now use set_pgd() before KASAN is fully ready.
  21. [[email protected]: clenaup, changelog message]
  22. Signed-off-by: Andrey Ryabinin <[email protected]>
  23. Signed-off-by: Kirill A. Shutemov <[email protected]>
  24. Cc: Andrew Morton <[email protected]>
  25. Cc: Andy Lutomirski <[email protected]>
  26. Cc: Borislav Petkov <[email protected]>
  27. Cc: Cyrill Gorcunov <[email protected]>
  28. Cc: Linus Torvalds <[email protected]>
  29. Cc: Peter Zijlstra <[email protected]>
  30. Cc: Thomas Gleixner <[email protected]>
  31. Cc: [email protected]
  32. Link: http://lkml.kernel.org/r/[email protected]
  33. Signed-off-by: Ingo Molnar <[email protected]>
  34. (cherry picked from commit 12a8cc7fcf54a8575f094be1e99032ec38aa045c)
  35. Signed-off-by: Andy Whitcroft <[email protected]>
  36. Signed-off-by: Kleber Sacilotto de Souza <[email protected]>
  37. (cherry picked from commit 2ce428150e002623aa0ed2a1ab840fde5f860f32)
  38. Signed-off-by: Fabian Grünbichler <[email protected]>
  39. ---
  40. Documentation/x86/x86_64/mm.txt | 2 +-
  41. arch/x86/kernel/Makefile | 3 +-
  42. arch/x86/mm/kasan_init_64.c | 101 +++++++++++++++++++++++++++++++---------
  43. arch/x86/Kconfig | 1 -
  44. 4 files changed, 83 insertions(+), 24 deletions(-)
  45. diff --git a/Documentation/x86/x86_64/mm.txt b/Documentation/x86/x86_64/mm.txt
  46. index b0798e281aa6..3448e675b462 100644
  47. --- a/Documentation/x86/x86_64/mm.txt
  48. +++ b/Documentation/x86/x86_64/mm.txt
  49. @@ -34,7 +34,7 @@ ff92000000000000 - ffd1ffffffffffff (=54 bits) vmalloc/ioremap space
  50. ffd2000000000000 - ffd3ffffffffffff (=49 bits) hole
  51. ffd4000000000000 - ffd5ffffffffffff (=49 bits) virtual memory map (512TB)
  52. ... unused hole ...
  53. -ffd8000000000000 - fff7ffffffffffff (=53 bits) kasan shadow memory (8PB)
  54. +ffdf000000000000 - fffffc0000000000 (=53 bits) kasan shadow memory (8PB)
  55. ... unused hole ...
  56. ffffff0000000000 - ffffff7fffffffff (=39 bits) %esp fixup stacks
  57. ... unused hole ...
  58. diff --git a/arch/x86/kernel/Makefile b/arch/x86/kernel/Makefile
  59. index 5bf0d5a473b4..aa059806201d 100644
  60. --- a/arch/x86/kernel/Makefile
  61. +++ b/arch/x86/kernel/Makefile
  62. @@ -24,7 +24,8 @@ endif
  63. KASAN_SANITIZE_head$(BITS).o := n
  64. KASAN_SANITIZE_dumpstack.o := n
  65. KASAN_SANITIZE_dumpstack_$(BITS).o := n
  66. -KASAN_SANITIZE_stacktrace.o := n
  67. +KASAN_SANITIZE_stacktrace.o := n
  68. +KASAN_SANITIZE_paravirt.o := n
  69. OBJECT_FILES_NON_STANDARD_relocate_kernel_$(BITS).o := y
  70. OBJECT_FILES_NON_STANDARD_ftrace_$(BITS).o := y
  71. diff --git a/arch/x86/mm/kasan_init_64.c b/arch/x86/mm/kasan_init_64.c
  72. index 02c9d7553409..464089f33e80 100644
  73. --- a/arch/x86/mm/kasan_init_64.c
  74. +++ b/arch/x86/mm/kasan_init_64.c
  75. @@ -15,6 +15,8 @@
  76. extern pgd_t early_top_pgt[PTRS_PER_PGD];
  77. extern struct range pfn_mapped[E820_MAX_ENTRIES];
  78. +static p4d_t tmp_p4d_table[PTRS_PER_P4D] __initdata __aligned(PAGE_SIZE);
  79. +
  80. static int __init map_range(struct range *range)
  81. {
  82. unsigned long start;
  83. @@ -30,8 +32,10 @@ static void __init clear_pgds(unsigned long start,
  84. unsigned long end)
  85. {
  86. pgd_t *pgd;
  87. + /* See comment in kasan_init() */
  88. + unsigned long pgd_end = end & PGDIR_MASK;
  89. - for (; start < end; start += PGDIR_SIZE) {
  90. + for (; start < pgd_end; start += PGDIR_SIZE) {
  91. pgd = pgd_offset_k(start);
  92. /*
  93. * With folded p4d, pgd_clear() is nop, use p4d_clear()
  94. @@ -42,29 +46,61 @@ static void __init clear_pgds(unsigned long start,
  95. else
  96. pgd_clear(pgd);
  97. }
  98. +
  99. + pgd = pgd_offset_k(start);
  100. + for (; start < end; start += P4D_SIZE)
  101. + p4d_clear(p4d_offset(pgd, start));
  102. +}
  103. +
  104. +static inline p4d_t *early_p4d_offset(pgd_t *pgd, unsigned long addr)
  105. +{
  106. + unsigned long p4d;
  107. +
  108. + if (!IS_ENABLED(CONFIG_X86_5LEVEL))
  109. + return (p4d_t *)pgd;
  110. +
  111. + p4d = __pa_nodebug(pgd_val(*pgd)) & PTE_PFN_MASK;
  112. + p4d += __START_KERNEL_map - phys_base;
  113. + return (p4d_t *)p4d + p4d_index(addr);
  114. +}
  115. +
  116. +static void __init kasan_early_p4d_populate(pgd_t *pgd,
  117. + unsigned long addr,
  118. + unsigned long end)
  119. +{
  120. + pgd_t pgd_entry;
  121. + p4d_t *p4d, p4d_entry;
  122. + unsigned long next;
  123. +
  124. + if (pgd_none(*pgd)) {
  125. + pgd_entry = __pgd(_KERNPG_TABLE | __pa_nodebug(kasan_zero_p4d));
  126. + set_pgd(pgd, pgd_entry);
  127. + }
  128. +
  129. + p4d = early_p4d_offset(pgd, addr);
  130. + do {
  131. + next = p4d_addr_end(addr, end);
  132. +
  133. + if (!p4d_none(*p4d))
  134. + continue;
  135. +
  136. + p4d_entry = __p4d(_KERNPG_TABLE | __pa_nodebug(kasan_zero_pud));
  137. + set_p4d(p4d, p4d_entry);
  138. + } while (p4d++, addr = next, addr != end && p4d_none(*p4d));
  139. }
  140. static void __init kasan_map_early_shadow(pgd_t *pgd)
  141. {
  142. - int i;
  143. - unsigned long start = KASAN_SHADOW_START;
  144. + /* See comment in kasan_init() */
  145. + unsigned long addr = KASAN_SHADOW_START & PGDIR_MASK;
  146. unsigned long end = KASAN_SHADOW_END;
  147. + unsigned long next;
  148. - for (i = pgd_index(start); start < end; i++) {
  149. - switch (CONFIG_PGTABLE_LEVELS) {
  150. - case 4:
  151. - pgd[i] = __pgd(__pa_nodebug(kasan_zero_pud) |
  152. - _KERNPG_TABLE);
  153. - break;
  154. - case 5:
  155. - pgd[i] = __pgd(__pa_nodebug(kasan_zero_p4d) |
  156. - _KERNPG_TABLE);
  157. - break;
  158. - default:
  159. - BUILD_BUG();
  160. - }
  161. - start += PGDIR_SIZE;
  162. - }
  163. + pgd += pgd_index(addr);
  164. + do {
  165. + next = pgd_addr_end(addr, end);
  166. + kasan_early_p4d_populate(pgd, addr, next);
  167. + } while (pgd++, addr = next, addr != end);
  168. }
  169. #ifdef CONFIG_KASAN_INLINE
  170. @@ -101,7 +137,7 @@ void __init kasan_early_init(void)
  171. for (i = 0; i < PTRS_PER_PUD; i++)
  172. kasan_zero_pud[i] = __pud(pud_val);
  173. - for (i = 0; CONFIG_PGTABLE_LEVELS >= 5 && i < PTRS_PER_P4D; i++)
  174. + for (i = 0; IS_ENABLED(CONFIG_X86_5LEVEL) && i < PTRS_PER_P4D; i++)
  175. kasan_zero_p4d[i] = __p4d(p4d_val);
  176. kasan_map_early_shadow(early_top_pgt);
  177. @@ -117,12 +153,35 @@ void __init kasan_init(void)
  178. #endif
  179. memcpy(early_top_pgt, init_top_pgt, sizeof(early_top_pgt));
  180. +
  181. + /*
  182. + * We use the same shadow offset for 4- and 5-level paging to
  183. + * facilitate boot-time switching between paging modes.
  184. + * As result in 5-level paging mode KASAN_SHADOW_START and
  185. + * KASAN_SHADOW_END are not aligned to PGD boundary.
  186. + *
  187. + * KASAN_SHADOW_START doesn't share PGD with anything else.
  188. + * We claim whole PGD entry to make things easier.
  189. + *
  190. + * KASAN_SHADOW_END lands in the last PGD entry and it collides with
  191. + * bunch of things like kernel code, modules, EFI mapping, etc.
  192. + * We need to take extra steps to not overwrite them.
  193. + */
  194. + if (IS_ENABLED(CONFIG_X86_5LEVEL)) {
  195. + void *ptr;
  196. +
  197. + ptr = (void *)pgd_page_vaddr(*pgd_offset_k(KASAN_SHADOW_END));
  198. + memcpy(tmp_p4d_table, (void *)ptr, sizeof(tmp_p4d_table));
  199. + set_pgd(&early_top_pgt[pgd_index(KASAN_SHADOW_END)],
  200. + __pgd(__pa(tmp_p4d_table) | _KERNPG_TABLE));
  201. + }
  202. +
  203. load_cr3(early_top_pgt);
  204. __flush_tlb_all();
  205. - clear_pgds(KASAN_SHADOW_START, KASAN_SHADOW_END);
  206. + clear_pgds(KASAN_SHADOW_START & PGDIR_MASK, KASAN_SHADOW_END);
  207. - kasan_populate_zero_shadow((void *)KASAN_SHADOW_START,
  208. + kasan_populate_zero_shadow((void *)(KASAN_SHADOW_START & PGDIR_MASK),
  209. kasan_mem_to_shadow((void *)PAGE_OFFSET));
  210. for (i = 0; i < E820_MAX_ENTRIES; i++) {
  211. diff --git a/arch/x86/Kconfig b/arch/x86/Kconfig
  212. index bf9f03740c30..67d07802ae95 100644
  213. --- a/arch/x86/Kconfig
  214. +++ b/arch/x86/Kconfig
  215. @@ -300,7 +300,6 @@ config ARCH_SUPPORTS_DEBUG_PAGEALLOC
  216. config KASAN_SHADOW_OFFSET
  217. hex
  218. depends on KASAN
  219. - default 0xdff8000000000000 if X86_5LEVEL
  220. default 0xdffffc0000000000
  221. config HAVE_INTEL_TXT
  222. --
  223. 2.14.2