| 123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204 |
- From bcb5ffbfba8c6c557ad536eb9084040b8e52923e Mon Sep 17 00:00:00 2001
- From: Andy Lutomirski <[email protected]>
- Date: Mon, 4 Dec 2017 15:07:42 +0100
- Subject: [PATCH 196/232] x86/mm/pti: Add functions to clone kernel PMDs
- MIME-Version: 1.0
- Content-Type: text/plain; charset=UTF-8
- Content-Transfer-Encoding: 8bit
- CVE-2017-5754
- Provide infrastructure to:
- - find a kernel PMD for a mapping which must be visible to user space for
- the entry/exit code to work.
- - walk an address range and share the kernel PMD with it.
- This reuses a small part of the original KAISER patches to populate the
- user space page table.
- [ tglx: Made it universally usable so it can be used for any kind of shared
- mapping. Add a mechanism to clear specific bits in the user space
- visible PMD entry. Folded Andys simplifactions ]
- Originally-by: Dave Hansen <[email protected]>
- Signed-off-by: Andy Lutomirski <[email protected]>
- Signed-off-by: Thomas Gleixner <[email protected]>
- Cc: Boris Ostrovsky <[email protected]>
- Cc: Borislav Petkov <[email protected]>
- Cc: Borislav Petkov <[email protected]>
- Cc: Brian Gerst <[email protected]>
- Cc: Dave Hansen <[email protected]>
- Cc: David Laight <[email protected]>
- Cc: Denys Vlasenko <[email protected]>
- Cc: Eduardo Valentin <[email protected]>
- Cc: Greg KH <[email protected]>
- Cc: H. Peter Anvin <[email protected]>
- Cc: Josh Poimboeuf <[email protected]>
- Cc: Juergen Gross <[email protected]>
- Cc: Linus Torvalds <[email protected]>
- Cc: Peter Zijlstra <[email protected]>
- Cc: Will Deacon <[email protected]>
- Cc: [email protected]
- Cc: [email protected]
- Cc: [email protected]
- Cc: [email protected]
- Signed-off-by: Ingo Molnar <[email protected]>
- (cherry picked from commit 03f4424f348e8be95eb1bbeba09461cd7b867828)
- Signed-off-by: Andy Whitcroft <[email protected]>
- Signed-off-by: Kleber Sacilotto de Souza <[email protected]>
- (cherry picked from commit 262ab7e8665e88581d20ccaefa107340457224bb)
- Signed-off-by: Fabian Grünbichler <[email protected]>
- ---
- arch/x86/mm/pti.c | 127 ++++++++++++++++++++++++++++++++++++++++++++++++++++++
- 1 file changed, 127 insertions(+)
- diff --git a/arch/x86/mm/pti.c b/arch/x86/mm/pti.c
- index 69a983365392..d58bcee470fc 100644
- --- a/arch/x86/mm/pti.c
- +++ b/arch/x86/mm/pti.c
- @@ -48,6 +48,11 @@
- #undef pr_fmt
- #define pr_fmt(fmt) "Kernel/User page tables isolation: " fmt
-
- +/* Backporting helper */
- +#ifndef __GFP_NOTRACK
- +#define __GFP_NOTRACK 0
- +#endif
- +
- static void __init pti_print_if_insecure(const char *reason)
- {
- if (boot_cpu_has_bug(X86_BUG_CPU_INSECURE))
- @@ -137,6 +142,128 @@ pgd_t __pti_set_user_pgd(pgd_t *pgdp, pgd_t pgd)
- return pgd;
- }
-
- +/*
- + * Walk the user copy of the page tables (optionally) trying to allocate
- + * page table pages on the way down.
- + *
- + * Returns a pointer to a P4D on success, or NULL on failure.
- + */
- +static p4d_t *pti_user_pagetable_walk_p4d(unsigned long address)
- +{
- + pgd_t *pgd = kernel_to_user_pgdp(pgd_offset_k(address));
- + gfp_t gfp = (GFP_KERNEL | __GFP_NOTRACK | __GFP_ZERO);
- +
- + if (address < PAGE_OFFSET) {
- + WARN_ONCE(1, "attempt to walk user address\n");
- + return NULL;
- + }
- +
- + if (pgd_none(*pgd)) {
- + unsigned long new_p4d_page = __get_free_page(gfp);
- + if (!new_p4d_page)
- + return NULL;
- +
- + if (pgd_none(*pgd)) {
- + set_pgd(pgd, __pgd(_KERNPG_TABLE | __pa(new_p4d_page)));
- + new_p4d_page = 0;
- + }
- + if (new_p4d_page)
- + free_page(new_p4d_page);
- + }
- + BUILD_BUG_ON(pgd_large(*pgd) != 0);
- +
- + return p4d_offset(pgd, address);
- +}
- +
- +/*
- + * Walk the user copy of the page tables (optionally) trying to allocate
- + * page table pages on the way down.
- + *
- + * Returns a pointer to a PMD on success, or NULL on failure.
- + */
- +static pmd_t *pti_user_pagetable_walk_pmd(unsigned long address)
- +{
- + gfp_t gfp = (GFP_KERNEL | __GFP_NOTRACK | __GFP_ZERO);
- + p4d_t *p4d = pti_user_pagetable_walk_p4d(address);
- + pud_t *pud;
- +
- + BUILD_BUG_ON(p4d_large(*p4d) != 0);
- + if (p4d_none(*p4d)) {
- + unsigned long new_pud_page = __get_free_page(gfp);
- + if (!new_pud_page)
- + return NULL;
- +
- + if (p4d_none(*p4d)) {
- + set_p4d(p4d, __p4d(_KERNPG_TABLE | __pa(new_pud_page)));
- + new_pud_page = 0;
- + }
- + if (new_pud_page)
- + free_page(new_pud_page);
- + }
- +
- + pud = pud_offset(p4d, address);
- + /* The user page tables do not use large mappings: */
- + if (pud_large(*pud)) {
- + WARN_ON(1);
- + return NULL;
- + }
- + if (pud_none(*pud)) {
- + unsigned long new_pmd_page = __get_free_page(gfp);
- + if (!new_pmd_page)
- + return NULL;
- +
- + if (pud_none(*pud)) {
- + set_pud(pud, __pud(_KERNPG_TABLE | __pa(new_pmd_page)));
- + new_pmd_page = 0;
- + }
- + if (new_pmd_page)
- + free_page(new_pmd_page);
- + }
- +
- + return pmd_offset(pud, address);
- +}
- +
- +static void __init
- +pti_clone_pmds(unsigned long start, unsigned long end, pmdval_t clear)
- +{
- + unsigned long addr;
- +
- + /*
- + * Clone the populated PMDs which cover start to end. These PMD areas
- + * can have holes.
- + */
- + for (addr = start; addr < end; addr += PMD_SIZE) {
- + pmd_t *pmd, *target_pmd;
- + pgd_t *pgd;
- + p4d_t *p4d;
- + pud_t *pud;
- +
- + pgd = pgd_offset_k(addr);
- + if (WARN_ON(pgd_none(*pgd)))
- + return;
- + p4d = p4d_offset(pgd, addr);
- + if (WARN_ON(p4d_none(*p4d)))
- + return;
- + pud = pud_offset(p4d, addr);
- + if (pud_none(*pud))
- + continue;
- + pmd = pmd_offset(pud, addr);
- + if (pmd_none(*pmd))
- + continue;
- +
- + target_pmd = pti_user_pagetable_walk_pmd(addr);
- + if (WARN_ON(!target_pmd))
- + return;
- +
- + /*
- + * Copy the PMD. That is, the kernelmode and usermode
- + * tables will share the last-level page tables of this
- + * address range
- + */
- + *target_pmd = pmd_clear_flags(*pmd, clear);
- + }
- +}
- +
- /*
- * Initialize kernel page table isolation
- */
- --
- 2.14.2
|