| 123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172 |
- From 7505dd405211a42c3abf52ef33b97eea470aaf60 Mon Sep 17 00:00:00 2001
- From: Andy Lutomirski <[email protected]>
- Date: Tue, 12 Dec 2017 07:56:42 -0800
- Subject: [PATCH 206/242] x86/pti: Map the vsyscall page if needed
- MIME-Version: 1.0
- Content-Type: text/plain; charset=UTF-8
- Content-Transfer-Encoding: 8bit
- CVE-2017-5754
- Make VSYSCALLs work fully in PTI mode by mapping them properly to the user
- space visible page tables.
- [ tglx: Hide unused functions (Patch by Arnd Bergmann) ]
- Signed-off-by: Andy Lutomirski <[email protected]>
- Signed-off-by: Thomas Gleixner <[email protected]>
- Cc: Borislav Petkov <[email protected]>
- Cc: Brian Gerst <[email protected]>
- Cc: Dave Hansen <[email protected]>
- Cc: David Laight <[email protected]>
- Cc: H. Peter Anvin <[email protected]>
- Cc: Josh Poimboeuf <[email protected]>
- Cc: Juergen Gross <[email protected]>
- Cc: Kees Cook <[email protected]>
- Cc: Linus Torvalds <[email protected]>
- Cc: Peter Zijlstra <[email protected]>
- Signed-off-by: Ingo Molnar <[email protected]>
- (cherry picked from commit 85900ea51577e31b186e523c8f4e068c79ecc7d3)
- Signed-off-by: Andy Whitcroft <[email protected]>
- Signed-off-by: Kleber Sacilotto de Souza <[email protected]>
- (cherry picked from commit 7a2ba0ea0a18cfc1f18c3f1389ef85f2a0d3227d)
- Signed-off-by: Fabian Grünbichler <[email protected]>
- ---
- arch/x86/include/asm/vsyscall.h | 1 +
- arch/x86/entry/vsyscall/vsyscall_64.c | 6 ++--
- arch/x86/mm/pti.c | 65 +++++++++++++++++++++++++++++++++++
- 3 files changed, 69 insertions(+), 3 deletions(-)
- diff --git a/arch/x86/include/asm/vsyscall.h b/arch/x86/include/asm/vsyscall.h
- index 6ba66ee79710..0eaeb223d692 100644
- --- a/arch/x86/include/asm/vsyscall.h
- +++ b/arch/x86/include/asm/vsyscall.h
- @@ -6,6 +6,7 @@
-
- #ifdef CONFIG_X86_VSYSCALL_EMULATION
- extern void map_vsyscall(void);
- +extern void set_vsyscall_pgtable_user_bits(pgd_t *root);
-
- /*
- * Called on instruction fetch fault in vsyscall page.
- diff --git a/arch/x86/entry/vsyscall/vsyscall_64.c b/arch/x86/entry/vsyscall/vsyscall_64.c
- index 5e56a4ced848..238b4bcd3c47 100644
- --- a/arch/x86/entry/vsyscall/vsyscall_64.c
- +++ b/arch/x86/entry/vsyscall/vsyscall_64.c
- @@ -343,14 +343,14 @@ int in_gate_area_no_mm(unsigned long addr)
- * vsyscalls but leave the page not present. If so, we skip calling
- * this.
- */
- -static void __init set_vsyscall_pgtable_user_bits(void)
- +void __init set_vsyscall_pgtable_user_bits(pgd_t *root)
- {
- pgd_t *pgd;
- p4d_t *p4d;
- pud_t *pud;
- pmd_t *pmd;
-
- - pgd = pgd_offset_k(VSYSCALL_ADDR);
- + pgd = pgd_offset_pgd(root, VSYSCALL_ADDR);
- set_pgd(pgd, __pgd(pgd_val(*pgd) | _PAGE_USER));
- p4d = p4d_offset(pgd, VSYSCALL_ADDR);
- #if CONFIG_PGTABLE_LEVELS >= 5
- @@ -372,7 +372,7 @@ void __init map_vsyscall(void)
- vsyscall_mode == NATIVE
- ? PAGE_KERNEL_VSYSCALL
- : PAGE_KERNEL_VVAR);
- - set_vsyscall_pgtable_user_bits();
- + set_vsyscall_pgtable_user_bits(swapper_pg_dir);
- }
-
- BUILD_BUG_ON((unsigned long)__fix_to_virt(VSYSCALL_PAGE) !=
- diff --git a/arch/x86/mm/pti.c b/arch/x86/mm/pti.c
- index b1c38ef9fbbb..bce8aea65606 100644
- --- a/arch/x86/mm/pti.c
- +++ b/arch/x86/mm/pti.c
- @@ -38,6 +38,7 @@
-
- #include <asm/cpufeature.h>
- #include <asm/hypervisor.h>
- +#include <asm/vsyscall.h>
- #include <asm/cmdline.h>
- #include <asm/pti.h>
- #include <asm/pgtable.h>
- @@ -223,6 +224,69 @@ static pmd_t *pti_user_pagetable_walk_pmd(unsigned long address)
- return pmd_offset(pud, address);
- }
-
- +#ifdef CONFIG_X86_VSYSCALL_EMULATION
- +/*
- + * Walk the shadow copy of the page tables (optionally) trying to allocate
- + * page table pages on the way down. Does not support large pages.
- + *
- + * Note: this is only used when mapping *new* kernel data into the
- + * user/shadow page tables. It is never used for userspace data.
- + *
- + * Returns a pointer to a PTE on success, or NULL on failure.
- + */
- +static __init pte_t *pti_user_pagetable_walk_pte(unsigned long address)
- +{
- + gfp_t gfp = (GFP_KERNEL | __GFP_NOTRACK | __GFP_ZERO);
- + pmd_t *pmd = pti_user_pagetable_walk_pmd(address);
- + pte_t *pte;
- +
- + /* We can't do anything sensible if we hit a large mapping. */
- + if (pmd_large(*pmd)) {
- + WARN_ON(1);
- + return NULL;
- + }
- +
- + if (pmd_none(*pmd)) {
- + unsigned long new_pte_page = __get_free_page(gfp);
- + if (!new_pte_page)
- + return NULL;
- +
- + if (pmd_none(*pmd)) {
- + set_pmd(pmd, __pmd(_KERNPG_TABLE | __pa(new_pte_page)));
- + new_pte_page = 0;
- + }
- + if (new_pte_page)
- + free_page(new_pte_page);
- + }
- +
- + pte = pte_offset_kernel(pmd, address);
- + if (pte_flags(*pte) & _PAGE_USER) {
- + WARN_ONCE(1, "attempt to walk to user pte\n");
- + return NULL;
- + }
- + return pte;
- +}
- +
- +static void __init pti_setup_vsyscall(void)
- +{
- + pte_t *pte, *target_pte;
- + unsigned int level;
- +
- + pte = lookup_address(VSYSCALL_ADDR, &level);
- + if (!pte || WARN_ON(level != PG_LEVEL_4K) || pte_none(*pte))
- + return;
- +
- + target_pte = pti_user_pagetable_walk_pte(VSYSCALL_ADDR);
- + if (WARN_ON(!target_pte))
- + return;
- +
- + *target_pte = *pte;
- + set_vsyscall_pgtable_user_bits(kernel_to_user_pgdp(swapper_pg_dir));
- +}
- +#else
- +static void __init pti_setup_vsyscall(void) { }
- +#endif
- +
- static void __init
- pti_clone_pmds(unsigned long start, unsigned long end, pmdval_t clear)
- {
- @@ -319,4 +383,5 @@ void __init pti_init(void)
- pti_clone_user_shared();
- pti_clone_entry_text();
- pti_setup_espfix64();
- + pti_setup_vsyscall();
- }
- --
- 2.14.2
|