0080-x86-xen-Drop-5-level-paging-support-code-from-the-XE.patch 10 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316
  1. From 0000000000000000000000000000000000000000 Mon Sep 17 00:00:00 2001
  2. From: "Kirill A. Shutemov" <[email protected]>
  3. Date: Fri, 29 Sep 2017 17:08:20 +0300
  4. Subject: [PATCH] x86/xen: Drop 5-level paging support code from the XEN_PV
  5. code
  6. MIME-Version: 1.0
  7. Content-Type: text/plain; charset=UTF-8
  8. Content-Transfer-Encoding: 8bit
  9. CVE-2017-5754
  10. It was decided 5-level paging is not going to be supported in XEN_PV.
  11. Let's drop the dead code from the XEN_PV code.
  12. Tested-by: Juergen Gross <[email protected]>
  13. Signed-off-by: Kirill A. Shutemov <[email protected]>
  14. Reviewed-by: Juergen Gross <[email protected]>
  15. Cc: Andrew Morton <[email protected]>
  16. Cc: Andy Lutomirski <[email protected]>
  17. Cc: Borislav Petkov <[email protected]>
  18. Cc: Cyrill Gorcunov <[email protected]>
  19. Cc: Linus Torvalds <[email protected]>
  20. Cc: Peter Zijlstra <[email protected]>
  21. Cc: Thomas Gleixner <[email protected]>
  22. Cc: [email protected]
  23. Link: http://lkml.kernel.org/r/[email protected]
  24. Signed-off-by: Ingo Molnar <[email protected]>
  25. (cherry picked from commit 773dd2fca581b0a80e5a33332cc8ee67e5a79cba)
  26. Signed-off-by: Andy Whitcroft <[email protected]>
  27. Signed-off-by: Kleber Sacilotto de Souza <[email protected]>
  28. (cherry picked from commit 3fd0b7ef0094fd8bb3c8172d9b137ebe0d81ecbc)
  29. Signed-off-by: Fabian Grünbichler <[email protected]>
  30. ---
  31. arch/x86/xen/mmu_pv.c | 159 +++++++++++++++++++-------------------------------
  32. 1 file changed, 60 insertions(+), 99 deletions(-)
  33. diff --git a/arch/x86/xen/mmu_pv.c b/arch/x86/xen/mmu_pv.c
  34. index ba76f3ce997f..45bb2d462e44 100644
  35. --- a/arch/x86/xen/mmu_pv.c
  36. +++ b/arch/x86/xen/mmu_pv.c
  37. @@ -469,7 +469,7 @@ __visible pmd_t xen_make_pmd(pmdval_t pmd)
  38. }
  39. PV_CALLEE_SAVE_REGS_THUNK(xen_make_pmd);
  40. -#if CONFIG_PGTABLE_LEVELS == 4
  41. +#ifdef CONFIG_X86_64
  42. __visible pudval_t xen_pud_val(pud_t pud)
  43. {
  44. return pte_mfn_to_pfn(pud.pud);
  45. @@ -558,7 +558,7 @@ static void xen_set_p4d(p4d_t *ptr, p4d_t val)
  46. xen_mc_issue(PARAVIRT_LAZY_MMU);
  47. }
  48. -#endif /* CONFIG_PGTABLE_LEVELS == 4 */
  49. +#endif /* CONFIG_X86_64 */
  50. static int xen_pmd_walk(struct mm_struct *mm, pmd_t *pmd,
  51. int (*func)(struct mm_struct *mm, struct page *, enum pt_level),
  52. @@ -600,21 +600,17 @@ static int xen_p4d_walk(struct mm_struct *mm, p4d_t *p4d,
  53. int (*func)(struct mm_struct *mm, struct page *, enum pt_level),
  54. bool last, unsigned long limit)
  55. {
  56. - int i, nr, flush = 0;
  57. + int flush = 0;
  58. + pud_t *pud;
  59. - nr = last ? p4d_index(limit) + 1 : PTRS_PER_P4D;
  60. - for (i = 0; i < nr; i++) {
  61. - pud_t *pud;
  62. - if (p4d_none(p4d[i]))
  63. - continue;
  64. + if (p4d_none(*p4d))
  65. + return flush;
  66. - pud = pud_offset(&p4d[i], 0);
  67. - if (PTRS_PER_PUD > 1)
  68. - flush |= (*func)(mm, virt_to_page(pud), PT_PUD);
  69. - flush |= xen_pud_walk(mm, pud, func,
  70. - last && i == nr - 1, limit);
  71. - }
  72. + pud = pud_offset(p4d, 0);
  73. + if (PTRS_PER_PUD > 1)
  74. + flush |= (*func)(mm, virt_to_page(pud), PT_PUD);
  75. + flush |= xen_pud_walk(mm, pud, func, last, limit);
  76. return flush;
  77. }
  78. @@ -664,8 +660,6 @@ static int __xen_pgd_walk(struct mm_struct *mm, pgd_t *pgd,
  79. continue;
  80. p4d = p4d_offset(&pgd[i], 0);
  81. - if (PTRS_PER_P4D > 1)
  82. - flush |= (*func)(mm, virt_to_page(p4d), PT_P4D);
  83. flush |= xen_p4d_walk(mm, p4d, func, i == nr - 1, limit);
  84. }
  85. @@ -1196,22 +1190,14 @@ static void __init xen_cleanmfnmap(unsigned long vaddr)
  86. {
  87. pgd_t *pgd;
  88. p4d_t *p4d;
  89. - unsigned int i;
  90. bool unpin;
  91. unpin = (vaddr == 2 * PGDIR_SIZE);
  92. vaddr &= PMD_MASK;
  93. pgd = pgd_offset_k(vaddr);
  94. p4d = p4d_offset(pgd, 0);
  95. - for (i = 0; i < PTRS_PER_P4D; i++) {
  96. - if (p4d_none(p4d[i]))
  97. - continue;
  98. - xen_cleanmfnmap_p4d(p4d + i, unpin);
  99. - }
  100. - if (IS_ENABLED(CONFIG_X86_5LEVEL)) {
  101. - set_pgd(pgd, __pgd(0));
  102. - xen_cleanmfnmap_free_pgtbl(p4d, unpin);
  103. - }
  104. + if (!p4d_none(*p4d))
  105. + xen_cleanmfnmap_p4d(p4d, unpin);
  106. }
  107. static void __init xen_pagetable_p2m_free(void)
  108. @@ -1717,7 +1703,7 @@ static void xen_release_pmd(unsigned long pfn)
  109. xen_release_ptpage(pfn, PT_PMD);
  110. }
  111. -#if CONFIG_PGTABLE_LEVELS >= 4
  112. +#ifdef CONFIG_X86_64
  113. static void xen_alloc_pud(struct mm_struct *mm, unsigned long pfn)
  114. {
  115. xen_alloc_ptpage(mm, pfn, PT_PUD);
  116. @@ -2054,13 +2040,12 @@ static phys_addr_t __init xen_early_virt_to_phys(unsigned long vaddr)
  117. */
  118. void __init xen_relocate_p2m(void)
  119. {
  120. - phys_addr_t size, new_area, pt_phys, pmd_phys, pud_phys, p4d_phys;
  121. + phys_addr_t size, new_area, pt_phys, pmd_phys, pud_phys;
  122. unsigned long p2m_pfn, p2m_pfn_end, n_frames, pfn, pfn_end;
  123. - int n_pte, n_pt, n_pmd, n_pud, n_p4d, idx_pte, idx_pt, idx_pmd, idx_pud, idx_p4d;
  124. + int n_pte, n_pt, n_pmd, n_pud, idx_pte, idx_pt, idx_pmd, idx_pud;
  125. pte_t *pt;
  126. pmd_t *pmd;
  127. pud_t *pud;
  128. - p4d_t *p4d = NULL;
  129. pgd_t *pgd;
  130. unsigned long *new_p2m;
  131. int save_pud;
  132. @@ -2070,11 +2055,7 @@ void __init xen_relocate_p2m(void)
  133. n_pt = roundup(size, PMD_SIZE) >> PMD_SHIFT;
  134. n_pmd = roundup(size, PUD_SIZE) >> PUD_SHIFT;
  135. n_pud = roundup(size, P4D_SIZE) >> P4D_SHIFT;
  136. - if (PTRS_PER_P4D > 1)
  137. - n_p4d = roundup(size, PGDIR_SIZE) >> PGDIR_SHIFT;
  138. - else
  139. - n_p4d = 0;
  140. - n_frames = n_pte + n_pt + n_pmd + n_pud + n_p4d;
  141. + n_frames = n_pte + n_pt + n_pmd + n_pud;
  142. new_area = xen_find_free_area(PFN_PHYS(n_frames));
  143. if (!new_area) {
  144. @@ -2090,76 +2071,56 @@ void __init xen_relocate_p2m(void)
  145. * To avoid any possible virtual address collision, just use
  146. * 2 * PUD_SIZE for the new area.
  147. */
  148. - p4d_phys = new_area;
  149. - pud_phys = p4d_phys + PFN_PHYS(n_p4d);
  150. + pud_phys = new_area;
  151. pmd_phys = pud_phys + PFN_PHYS(n_pud);
  152. pt_phys = pmd_phys + PFN_PHYS(n_pmd);
  153. p2m_pfn = PFN_DOWN(pt_phys) + n_pt;
  154. pgd = __va(read_cr3_pa());
  155. new_p2m = (unsigned long *)(2 * PGDIR_SIZE);
  156. - idx_p4d = 0;
  157. save_pud = n_pud;
  158. - do {
  159. - if (n_p4d > 0) {
  160. - p4d = early_memremap(p4d_phys, PAGE_SIZE);
  161. - clear_page(p4d);
  162. - n_pud = min(save_pud, PTRS_PER_P4D);
  163. - }
  164. - for (idx_pud = 0; idx_pud < n_pud; idx_pud++) {
  165. - pud = early_memremap(pud_phys, PAGE_SIZE);
  166. - clear_page(pud);
  167. - for (idx_pmd = 0; idx_pmd < min(n_pmd, PTRS_PER_PUD);
  168. - idx_pmd++) {
  169. - pmd = early_memremap(pmd_phys, PAGE_SIZE);
  170. - clear_page(pmd);
  171. - for (idx_pt = 0; idx_pt < min(n_pt, PTRS_PER_PMD);
  172. - idx_pt++) {
  173. - pt = early_memremap(pt_phys, PAGE_SIZE);
  174. - clear_page(pt);
  175. - for (idx_pte = 0;
  176. - idx_pte < min(n_pte, PTRS_PER_PTE);
  177. - idx_pte++) {
  178. - set_pte(pt + idx_pte,
  179. - pfn_pte(p2m_pfn, PAGE_KERNEL));
  180. - p2m_pfn++;
  181. - }
  182. - n_pte -= PTRS_PER_PTE;
  183. - early_memunmap(pt, PAGE_SIZE);
  184. - make_lowmem_page_readonly(__va(pt_phys));
  185. - pin_pagetable_pfn(MMUEXT_PIN_L1_TABLE,
  186. - PFN_DOWN(pt_phys));
  187. - set_pmd(pmd + idx_pt,
  188. - __pmd(_PAGE_TABLE | pt_phys));
  189. - pt_phys += PAGE_SIZE;
  190. + for (idx_pud = 0; idx_pud < n_pud; idx_pud++) {
  191. + pud = early_memremap(pud_phys, PAGE_SIZE);
  192. + clear_page(pud);
  193. + for (idx_pmd = 0; idx_pmd < min(n_pmd, PTRS_PER_PUD);
  194. + idx_pmd++) {
  195. + pmd = early_memremap(pmd_phys, PAGE_SIZE);
  196. + clear_page(pmd);
  197. + for (idx_pt = 0; idx_pt < min(n_pt, PTRS_PER_PMD);
  198. + idx_pt++) {
  199. + pt = early_memremap(pt_phys, PAGE_SIZE);
  200. + clear_page(pt);
  201. + for (idx_pte = 0;
  202. + idx_pte < min(n_pte, PTRS_PER_PTE);
  203. + idx_pte++) {
  204. + set_pte(pt + idx_pte,
  205. + pfn_pte(p2m_pfn, PAGE_KERNEL));
  206. + p2m_pfn++;
  207. }
  208. - n_pt -= PTRS_PER_PMD;
  209. - early_memunmap(pmd, PAGE_SIZE);
  210. - make_lowmem_page_readonly(__va(pmd_phys));
  211. - pin_pagetable_pfn(MMUEXT_PIN_L2_TABLE,
  212. - PFN_DOWN(pmd_phys));
  213. - set_pud(pud + idx_pmd, __pud(_PAGE_TABLE | pmd_phys));
  214. - pmd_phys += PAGE_SIZE;
  215. + n_pte -= PTRS_PER_PTE;
  216. + early_memunmap(pt, PAGE_SIZE);
  217. + make_lowmem_page_readonly(__va(pt_phys));
  218. + pin_pagetable_pfn(MMUEXT_PIN_L1_TABLE,
  219. + PFN_DOWN(pt_phys));
  220. + set_pmd(pmd + idx_pt,
  221. + __pmd(_PAGE_TABLE | pt_phys));
  222. + pt_phys += PAGE_SIZE;
  223. }
  224. - n_pmd -= PTRS_PER_PUD;
  225. - early_memunmap(pud, PAGE_SIZE);
  226. - make_lowmem_page_readonly(__va(pud_phys));
  227. - pin_pagetable_pfn(MMUEXT_PIN_L3_TABLE, PFN_DOWN(pud_phys));
  228. - if (n_p4d > 0)
  229. - set_p4d(p4d + idx_pud, __p4d(_PAGE_TABLE | pud_phys));
  230. - else
  231. - set_pgd(pgd + 2 + idx_pud, __pgd(_PAGE_TABLE | pud_phys));
  232. - pud_phys += PAGE_SIZE;
  233. - }
  234. - if (n_p4d > 0) {
  235. - save_pud -= PTRS_PER_P4D;
  236. - early_memunmap(p4d, PAGE_SIZE);
  237. - make_lowmem_page_readonly(__va(p4d_phys));
  238. - pin_pagetable_pfn(MMUEXT_PIN_L4_TABLE, PFN_DOWN(p4d_phys));
  239. - set_pgd(pgd + 2 + idx_p4d, __pgd(_PAGE_TABLE | p4d_phys));
  240. - p4d_phys += PAGE_SIZE;
  241. + n_pt -= PTRS_PER_PMD;
  242. + early_memunmap(pmd, PAGE_SIZE);
  243. + make_lowmem_page_readonly(__va(pmd_phys));
  244. + pin_pagetable_pfn(MMUEXT_PIN_L2_TABLE,
  245. + PFN_DOWN(pmd_phys));
  246. + set_pud(pud + idx_pmd, __pud(_PAGE_TABLE | pmd_phys));
  247. + pmd_phys += PAGE_SIZE;
  248. }
  249. - } while (++idx_p4d < n_p4d);
  250. + n_pmd -= PTRS_PER_PUD;
  251. + early_memunmap(pud, PAGE_SIZE);
  252. + make_lowmem_page_readonly(__va(pud_phys));
  253. + pin_pagetable_pfn(MMUEXT_PIN_L3_TABLE, PFN_DOWN(pud_phys));
  254. + set_pgd(pgd + 2 + idx_pud, __pgd(_PAGE_TABLE | pud_phys));
  255. + pud_phys += PAGE_SIZE;
  256. + }
  257. /* Now copy the old p2m info to the new area. */
  258. memcpy(new_p2m, xen_p2m_addr, size);
  259. @@ -2386,7 +2347,7 @@ static void __init xen_post_allocator_init(void)
  260. pv_mmu_ops.set_pte = xen_set_pte;
  261. pv_mmu_ops.set_pmd = xen_set_pmd;
  262. pv_mmu_ops.set_pud = xen_set_pud;
  263. -#if CONFIG_PGTABLE_LEVELS >= 4
  264. +#ifdef CONFIG_X86_64
  265. pv_mmu_ops.set_p4d = xen_set_p4d;
  266. #endif
  267. @@ -2396,7 +2357,7 @@ static void __init xen_post_allocator_init(void)
  268. pv_mmu_ops.alloc_pmd = xen_alloc_pmd;
  269. pv_mmu_ops.release_pte = xen_release_pte;
  270. pv_mmu_ops.release_pmd = xen_release_pmd;
  271. -#if CONFIG_PGTABLE_LEVELS >= 4
  272. +#ifdef CONFIG_X86_64
  273. pv_mmu_ops.alloc_pud = xen_alloc_pud;
  274. pv_mmu_ops.release_pud = xen_release_pud;
  275. #endif
  276. @@ -2460,14 +2421,14 @@ static const struct pv_mmu_ops xen_mmu_ops __initconst = {
  277. .make_pmd = PV_CALLEE_SAVE(xen_make_pmd),
  278. .pmd_val = PV_CALLEE_SAVE(xen_pmd_val),
  279. -#if CONFIG_PGTABLE_LEVELS >= 4
  280. +#ifdef CONFIG_X86_64
  281. .pud_val = PV_CALLEE_SAVE(xen_pud_val),
  282. .make_pud = PV_CALLEE_SAVE(xen_make_pud),
  283. .set_p4d = xen_set_p4d_hyper,
  284. .alloc_pud = xen_alloc_pmd_init,
  285. .release_pud = xen_release_pmd_init,
  286. -#endif /* CONFIG_PGTABLE_LEVELS == 4 */
  287. +#endif /* CONFIG_X86_64 */
  288. .activate_mm = xen_activate_mm,
  289. .dup_mmap = xen_dup_mmap,
  290. --
  291. 2.14.2