0127-x86-mm-kasan-Don-t-use-vmemmap_populate-to-initializ.patch 8.1 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266
  1. From 95ee3aee92e32b90ff10f47cb6cfc414e1fd92b2 Mon Sep 17 00:00:00 2001
  2. From: Andrey Ryabinin <[email protected]>
  3. Date: Wed, 15 Nov 2017 17:36:35 -0800
  4. Subject: [PATCH 127/242] x86/mm/kasan: Don't use vmemmap_populate() to
  5. initialize shadow
  6. MIME-Version: 1.0
  7. Content-Type: text/plain; charset=UTF-8
  8. Content-Transfer-Encoding: 8bit
  9. CVE-2017-5754
  10. [ Note, this is a Git cherry-pick of the following commit:
  11. d17a1d97dc20: ("x86/mm/kasan: don't use vmemmap_populate() to initialize shadow")
  12. ... for easier x86 PTI code testing and back-porting. ]
  13. The KASAN shadow is currently mapped using vmemmap_populate() since that
  14. provides a semi-convenient way to map pages into init_top_pgt. However,
  15. since that no longer zeroes the mapped pages, it is not suitable for
  16. KASAN, which requires zeroed shadow memory.
  17. Add kasan_populate_shadow() interface and use it instead of
  18. vmemmap_populate(). Besides, this allows us to take advantage of
  19. gigantic pages and use them to populate the shadow, which should save us
  20. some memory wasted on page tables and reduce TLB pressure.
  21. Link: http://lkml.kernel.org/r/[email protected]
  22. Signed-off-by: Andrey Ryabinin <[email protected]>
  23. Signed-off-by: Pavel Tatashin <[email protected]>
  24. Cc: Andy Lutomirski <[email protected]>
  25. Cc: Steven Sistare <[email protected]>
  26. Cc: Daniel Jordan <[email protected]>
  27. Cc: Bob Picco <[email protected]>
  28. Cc: Michal Hocko <[email protected]>
  29. Cc: Alexander Potapenko <[email protected]>
  30. Cc: Ard Biesheuvel <[email protected]>
  31. Cc: Catalin Marinas <[email protected]>
  32. Cc: Christian Borntraeger <[email protected]>
  33. Cc: David S. Miller <[email protected]>
  34. Cc: Dmitry Vyukov <[email protected]>
  35. Cc: Heiko Carstens <[email protected]>
  36. Cc: "H. Peter Anvin" <[email protected]>
  37. Cc: Ingo Molnar <[email protected]>
  38. Cc: Mark Rutland <[email protected]>
  39. Cc: Matthew Wilcox <[email protected]>
  40. Cc: Mel Gorman <[email protected]>
  41. Cc: Michal Hocko <[email protected]>
  42. Cc: Sam Ravnborg <[email protected]>
  43. Cc: Thomas Gleixner <[email protected]>
  44. Cc: Will Deacon <[email protected]>
  45. Signed-off-by: Andrew Morton <[email protected]>
  46. Signed-off-by: Linus Torvalds <[email protected]>
  47. Signed-off-by: Ingo Molnar <[email protected]>
  48. (cherry picked from commit 2aeb07365bcd489620f71390a7d2031cd4dfb83e)
  49. Signed-off-by: Andy Whitcroft <[email protected]>
  50. Signed-off-by: Kleber Sacilotto de Souza <[email protected]>
  51. (cherry picked from commit f60ab0015a57d9fbf659b212d504682f069b0590)
  52. Signed-off-by: Fabian Grünbichler <[email protected]>
  53. ---
  54. arch/x86/mm/kasan_init_64.c | 143 +++++++++++++++++++++++++++++++++++++++++---
  55. arch/x86/Kconfig | 2 +-
  56. 2 files changed, 137 insertions(+), 8 deletions(-)
  57. diff --git a/arch/x86/mm/kasan_init_64.c b/arch/x86/mm/kasan_init_64.c
  58. index 464089f33e80..3d7341986e13 100644
  59. --- a/arch/x86/mm/kasan_init_64.c
  60. +++ b/arch/x86/mm/kasan_init_64.c
  61. @@ -3,12 +3,14 @@
  62. #include <linux/bootmem.h>
  63. #include <linux/kasan.h>
  64. #include <linux/kdebug.h>
  65. +#include <linux/memblock.h>
  66. #include <linux/mm.h>
  67. #include <linux/sched.h>
  68. #include <linux/sched/task.h>
  69. #include <linux/vmalloc.h>
  70. #include <asm/e820/types.h>
  71. +#include <asm/pgalloc.h>
  72. #include <asm/tlbflush.h>
  73. #include <asm/sections.h>
  74. @@ -17,7 +19,134 @@ extern struct range pfn_mapped[E820_MAX_ENTRIES];
  75. static p4d_t tmp_p4d_table[PTRS_PER_P4D] __initdata __aligned(PAGE_SIZE);
  76. -static int __init map_range(struct range *range)
  77. +static __init void *early_alloc(size_t size, int nid)
  78. +{
  79. + return memblock_virt_alloc_try_nid_nopanic(size, size,
  80. + __pa(MAX_DMA_ADDRESS), BOOTMEM_ALLOC_ACCESSIBLE, nid);
  81. +}
  82. +
  83. +static void __init kasan_populate_pmd(pmd_t *pmd, unsigned long addr,
  84. + unsigned long end, int nid)
  85. +{
  86. + pte_t *pte;
  87. +
  88. + if (pmd_none(*pmd)) {
  89. + void *p;
  90. +
  91. + if (boot_cpu_has(X86_FEATURE_PSE) &&
  92. + ((end - addr) == PMD_SIZE) &&
  93. + IS_ALIGNED(addr, PMD_SIZE)) {
  94. + p = early_alloc(PMD_SIZE, nid);
  95. + if (p && pmd_set_huge(pmd, __pa(p), PAGE_KERNEL))
  96. + return;
  97. + else if (p)
  98. + memblock_free(__pa(p), PMD_SIZE);
  99. + }
  100. +
  101. + p = early_alloc(PAGE_SIZE, nid);
  102. + pmd_populate_kernel(&init_mm, pmd, p);
  103. + }
  104. +
  105. + pte = pte_offset_kernel(pmd, addr);
  106. + do {
  107. + pte_t entry;
  108. + void *p;
  109. +
  110. + if (!pte_none(*pte))
  111. + continue;
  112. +
  113. + p = early_alloc(PAGE_SIZE, nid);
  114. + entry = pfn_pte(PFN_DOWN(__pa(p)), PAGE_KERNEL);
  115. + set_pte_at(&init_mm, addr, pte, entry);
  116. + } while (pte++, addr += PAGE_SIZE, addr != end);
  117. +}
  118. +
  119. +static void __init kasan_populate_pud(pud_t *pud, unsigned long addr,
  120. + unsigned long end, int nid)
  121. +{
  122. + pmd_t *pmd;
  123. + unsigned long next;
  124. +
  125. + if (pud_none(*pud)) {
  126. + void *p;
  127. +
  128. + if (boot_cpu_has(X86_FEATURE_GBPAGES) &&
  129. + ((end - addr) == PUD_SIZE) &&
  130. + IS_ALIGNED(addr, PUD_SIZE)) {
  131. + p = early_alloc(PUD_SIZE, nid);
  132. + if (p && pud_set_huge(pud, __pa(p), PAGE_KERNEL))
  133. + return;
  134. + else if (p)
  135. + memblock_free(__pa(p), PUD_SIZE);
  136. + }
  137. +
  138. + p = early_alloc(PAGE_SIZE, nid);
  139. + pud_populate(&init_mm, pud, p);
  140. + }
  141. +
  142. + pmd = pmd_offset(pud, addr);
  143. + do {
  144. + next = pmd_addr_end(addr, end);
  145. + if (!pmd_large(*pmd))
  146. + kasan_populate_pmd(pmd, addr, next, nid);
  147. + } while (pmd++, addr = next, addr != end);
  148. +}
  149. +
  150. +static void __init kasan_populate_p4d(p4d_t *p4d, unsigned long addr,
  151. + unsigned long end, int nid)
  152. +{
  153. + pud_t *pud;
  154. + unsigned long next;
  155. +
  156. + if (p4d_none(*p4d)) {
  157. + void *p = early_alloc(PAGE_SIZE, nid);
  158. +
  159. + p4d_populate(&init_mm, p4d, p);
  160. + }
  161. +
  162. + pud = pud_offset(p4d, addr);
  163. + do {
  164. + next = pud_addr_end(addr, end);
  165. + if (!pud_large(*pud))
  166. + kasan_populate_pud(pud, addr, next, nid);
  167. + } while (pud++, addr = next, addr != end);
  168. +}
  169. +
  170. +static void __init kasan_populate_pgd(pgd_t *pgd, unsigned long addr,
  171. + unsigned long end, int nid)
  172. +{
  173. + void *p;
  174. + p4d_t *p4d;
  175. + unsigned long next;
  176. +
  177. + if (pgd_none(*pgd)) {
  178. + p = early_alloc(PAGE_SIZE, nid);
  179. + pgd_populate(&init_mm, pgd, p);
  180. + }
  181. +
  182. + p4d = p4d_offset(pgd, addr);
  183. + do {
  184. + next = p4d_addr_end(addr, end);
  185. + kasan_populate_p4d(p4d, addr, next, nid);
  186. + } while (p4d++, addr = next, addr != end);
  187. +}
  188. +
  189. +static void __init kasan_populate_shadow(unsigned long addr, unsigned long end,
  190. + int nid)
  191. +{
  192. + pgd_t *pgd;
  193. + unsigned long next;
  194. +
  195. + addr = addr & PAGE_MASK;
  196. + end = round_up(end, PAGE_SIZE);
  197. + pgd = pgd_offset_k(addr);
  198. + do {
  199. + next = pgd_addr_end(addr, end);
  200. + kasan_populate_pgd(pgd, addr, next, nid);
  201. + } while (pgd++, addr = next, addr != end);
  202. +}
  203. +
  204. +static void __init map_range(struct range *range)
  205. {
  206. unsigned long start;
  207. unsigned long end;
  208. @@ -25,7 +154,7 @@ static int __init map_range(struct range *range)
  209. start = (unsigned long)kasan_mem_to_shadow(pfn_to_kaddr(range->start));
  210. end = (unsigned long)kasan_mem_to_shadow(pfn_to_kaddr(range->end));
  211. - return vmemmap_populate(start, end, NUMA_NO_NODE);
  212. + kasan_populate_shadow(start, end, early_pfn_to_nid(range->start));
  213. }
  214. static void __init clear_pgds(unsigned long start,
  215. @@ -188,16 +317,16 @@ void __init kasan_init(void)
  216. if (pfn_mapped[i].end == 0)
  217. break;
  218. - if (map_range(&pfn_mapped[i]))
  219. - panic("kasan: unable to allocate shadow!");
  220. + map_range(&pfn_mapped[i]);
  221. }
  222. +
  223. kasan_populate_zero_shadow(
  224. kasan_mem_to_shadow((void *)PAGE_OFFSET + MAXMEM),
  225. kasan_mem_to_shadow((void *)__START_KERNEL_map));
  226. - vmemmap_populate((unsigned long)kasan_mem_to_shadow(_stext),
  227. - (unsigned long)kasan_mem_to_shadow(_end),
  228. - NUMA_NO_NODE);
  229. + kasan_populate_shadow((unsigned long)kasan_mem_to_shadow(_stext),
  230. + (unsigned long)kasan_mem_to_shadow(_end),
  231. + early_pfn_to_nid(__pa(_stext)));
  232. kasan_populate_zero_shadow(kasan_mem_to_shadow((void *)MODULES_END),
  233. (void *)KASAN_SHADOW_END);
  234. diff --git a/arch/x86/Kconfig b/arch/x86/Kconfig
  235. index 67d07802ae95..8b5499bb24bb 100644
  236. --- a/arch/x86/Kconfig
  237. +++ b/arch/x86/Kconfig
  238. @@ -106,7 +106,7 @@ config X86
  239. select HAVE_ARCH_AUDITSYSCALL
  240. select HAVE_ARCH_HUGE_VMAP if X86_64 || X86_PAE
  241. select HAVE_ARCH_JUMP_LABEL
  242. - select HAVE_ARCH_KASAN if X86_64 && SPARSEMEM_VMEMMAP
  243. + select HAVE_ARCH_KASAN if X86_64
  244. select HAVE_ARCH_KGDB
  245. select HAVE_ARCH_KMEMCHECK
  246. select HAVE_ARCH_MMAP_RND_BITS if MMU
  247. --
  248. 2.14.2