| 123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363 |
- From 9e6bc95ae1c4b92d9838ee8d2ee8b0e65f4e4469 Mon Sep 17 00:00:00 2001
- From: Ricardo Neri <[email protected]>
- Date: Fri, 27 Oct 2017 13:25:28 -0700
- Subject: [PATCH 083/242] x86/mm: Relocate page fault error codes to traps.h
- MIME-Version: 1.0
- Content-Type: text/plain; charset=UTF-8
- Content-Transfer-Encoding: 8bit
- CVE-2017-5754
- Up to this point, only fault.c used the definitions of the page fault error
- codes. Thus, it made sense to keep them within such file. Other portions of
- code might be interested in those definitions too. For instance, the User-
- Mode Instruction Prevention emulation code will use such definitions to
- emulate a page fault when it is unable to successfully copy the results
- of the emulated instructions to user space.
- While relocating the error code enumeration, the prefix X86_ is used to
- make it consistent with the rest of the definitions in traps.h. Of course,
- code using the enumeration had to be updated as well. No functional changes
- were performed.
- Signed-off-by: Ricardo Neri <[email protected]>
- Signed-off-by: Thomas Gleixner <[email protected]>
- Reviewed-by: Borislav Petkov <[email protected]>
- Reviewed-by: Andy Lutomirski <[email protected]>
- Cc: "Michael S. Tsirkin" <[email protected]>
- Cc: Peter Zijlstra <[email protected]>
- Cc: Dave Hansen <[email protected]>
- Cc: [email protected]
- Cc: Paul Gortmaker <[email protected]>
- Cc: Huang Rui <[email protected]>
- Cc: Shuah Khan <[email protected]>
- Cc: Jonathan Corbet <[email protected]>
- Cc: Jiri Slaby <[email protected]>
- Cc: "Ravi V. Shankar" <[email protected]>
- Cc: Chris Metcalf <[email protected]>
- Cc: Brian Gerst <[email protected]>
- Cc: Josh Poimboeuf <[email protected]>
- Cc: Chen Yucong <[email protected]>
- Cc: Vlastimil Babka <[email protected]>
- Cc: Masami Hiramatsu <[email protected]>
- Cc: Paolo Bonzini <[email protected]>
- Cc: Andrew Morton <[email protected]>
- Cc: "Kirill A. Shutemov" <[email protected]>
- Link: https://lkml.kernel.org/r/1509135945-13762-2-git-send-email-ricardo.neri-calderon@linux.intel.com
- (cherry picked from commit 1067f030994c69ca1fba8c607437c8895dcf8509)
- Signed-off-by: Andy Whitcroft <[email protected]>
- Signed-off-by: Kleber Sacilotto de Souza <[email protected]>
- (cherry picked from commit a85a07ab9111e3c78797c20b60a664dbd5db4981)
- Signed-off-by: Fabian Grünbichler <[email protected]>
- ---
- arch/x86/include/asm/traps.h | 18 +++++++++
- arch/x86/mm/fault.c | 88 +++++++++++++++++---------------------------
- 2 files changed, 52 insertions(+), 54 deletions(-)
- diff --git a/arch/x86/include/asm/traps.h b/arch/x86/include/asm/traps.h
- index feb89dbe359d..8e5bf86f87e5 100644
- --- a/arch/x86/include/asm/traps.h
- +++ b/arch/x86/include/asm/traps.h
- @@ -162,4 +162,22 @@ enum {
- X86_TRAP_IRET = 32, /* 32, IRET Exception */
- };
-
- +/*
- + * Page fault error code bits:
- + *
- + * bit 0 == 0: no page found 1: protection fault
- + * bit 1 == 0: read access 1: write access
- + * bit 2 == 0: kernel-mode access 1: user-mode access
- + * bit 3 == 1: use of reserved bit detected
- + * bit 4 == 1: fault was an instruction fetch
- + * bit 5 == 1: protection keys block access
- + */
- +enum x86_pf_error_code {
- + X86_PF_PROT = 1 << 0,
- + X86_PF_WRITE = 1 << 1,
- + X86_PF_USER = 1 << 2,
- + X86_PF_RSVD = 1 << 3,
- + X86_PF_INSTR = 1 << 4,
- + X86_PF_PK = 1 << 5,
- +};
- #endif /* _ASM_X86_TRAPS_H */
- diff --git a/arch/x86/mm/fault.c b/arch/x86/mm/fault.c
- index 4ee9eb916826..d3a57e7ad311 100644
- --- a/arch/x86/mm/fault.c
- +++ b/arch/x86/mm/fault.c
- @@ -28,26 +28,6 @@
- #define CREATE_TRACE_POINTS
- #include <asm/trace/exceptions.h>
-
- -/*
- - * Page fault error code bits:
- - *
- - * bit 0 == 0: no page found 1: protection fault
- - * bit 1 == 0: read access 1: write access
- - * bit 2 == 0: kernel-mode access 1: user-mode access
- - * bit 3 == 1: use of reserved bit detected
- - * bit 4 == 1: fault was an instruction fetch
- - * bit 5 == 1: protection keys block access
- - */
- -enum x86_pf_error_code {
- -
- - PF_PROT = 1 << 0,
- - PF_WRITE = 1 << 1,
- - PF_USER = 1 << 2,
- - PF_RSVD = 1 << 3,
- - PF_INSTR = 1 << 4,
- - PF_PK = 1 << 5,
- -};
- -
- /*
- * Returns 0 if mmiotrace is disabled, or if the fault is not
- * handled by mmiotrace:
- @@ -149,7 +129,7 @@ is_prefetch(struct pt_regs *regs, unsigned long error_code, unsigned long addr)
- * If it was a exec (instruction fetch) fault on NX page, then
- * do not ignore the fault:
- */
- - if (error_code & PF_INSTR)
- + if (error_code & X86_PF_INSTR)
- return 0;
-
- instr = (void *)convert_ip_to_linear(current, regs);
- @@ -179,7 +159,7 @@ is_prefetch(struct pt_regs *regs, unsigned long error_code, unsigned long addr)
- * siginfo so userspace can discover which protection key was set
- * on the PTE.
- *
- - * If we get here, we know that the hardware signaled a PF_PK
- + * If we get here, we know that the hardware signaled a X86_PF_PK
- * fault and that there was a VMA once we got in the fault
- * handler. It does *not* guarantee that the VMA we find here
- * was the one that we faulted on.
- @@ -204,7 +184,7 @@ static void fill_sig_info_pkey(int si_code, siginfo_t *info, u32 *pkey)
- /*
- * force_sig_info_fault() is called from a number of
- * contexts, some of which have a VMA and some of which
- - * do not. The PF_PK handing happens after we have a
- + * do not. The X86_PF_PK handing happens after we have a
- * valid VMA, so we should never reach this without a
- * valid VMA.
- */
- @@ -693,7 +673,7 @@ show_fault_oops(struct pt_regs *regs, unsigned long error_code,
- if (!oops_may_print())
- return;
-
- - if (error_code & PF_INSTR) {
- + if (error_code & X86_PF_INSTR) {
- unsigned int level;
- pgd_t *pgd;
- pte_t *pte;
- @@ -775,7 +755,7 @@ no_context(struct pt_regs *regs, unsigned long error_code,
- */
- if (current->thread.sig_on_uaccess_err && signal) {
- tsk->thread.trap_nr = X86_TRAP_PF;
- - tsk->thread.error_code = error_code | PF_USER;
- + tsk->thread.error_code = error_code | X86_PF_USER;
- tsk->thread.cr2 = address;
-
- /* XXX: hwpoison faults will set the wrong code. */
- @@ -894,7 +874,7 @@ __bad_area_nosemaphore(struct pt_regs *regs, unsigned long error_code,
- struct task_struct *tsk = current;
-
- /* User mode accesses just cause a SIGSEGV */
- - if (error_code & PF_USER) {
- + if (error_code & X86_PF_USER) {
- /*
- * It's possible to have interrupts off here:
- */
- @@ -915,7 +895,7 @@ __bad_area_nosemaphore(struct pt_regs *regs, unsigned long error_code,
- * Instruction fetch faults in the vsyscall page might need
- * emulation.
- */
- - if (unlikely((error_code & PF_INSTR) &&
- + if (unlikely((error_code & X86_PF_INSTR) &&
- ((address & ~0xfff) == VSYSCALL_ADDR))) {
- if (emulate_vsyscall(regs, address))
- return;
- @@ -928,7 +908,7 @@ __bad_area_nosemaphore(struct pt_regs *regs, unsigned long error_code,
- * are always protection faults.
- */
- if (address >= TASK_SIZE_MAX)
- - error_code |= PF_PROT;
- + error_code |= X86_PF_PROT;
-
- if (likely(show_unhandled_signals))
- show_signal_msg(regs, error_code, address, tsk);
- @@ -989,11 +969,11 @@ static inline bool bad_area_access_from_pkeys(unsigned long error_code,
-
- if (!boot_cpu_has(X86_FEATURE_OSPKE))
- return false;
- - if (error_code & PF_PK)
- + if (error_code & X86_PF_PK)
- return true;
- /* this checks permission keys on the VMA: */
- - if (!arch_vma_access_permitted(vma, (error_code & PF_WRITE),
- - (error_code & PF_INSTR), foreign))
- + if (!arch_vma_access_permitted(vma, (error_code & X86_PF_WRITE),
- + (error_code & X86_PF_INSTR), foreign))
- return true;
- return false;
- }
- @@ -1021,7 +1001,7 @@ do_sigbus(struct pt_regs *regs, unsigned long error_code, unsigned long address,
- int code = BUS_ADRERR;
-
- /* Kernel mode? Handle exceptions or die: */
- - if (!(error_code & PF_USER)) {
- + if (!(error_code & X86_PF_USER)) {
- no_context(regs, error_code, address, SIGBUS, BUS_ADRERR);
- return;
- }
- @@ -1049,14 +1029,14 @@ static noinline void
- mm_fault_error(struct pt_regs *regs, unsigned long error_code,
- unsigned long address, u32 *pkey, unsigned int fault)
- {
- - if (fatal_signal_pending(current) && !(error_code & PF_USER)) {
- + if (fatal_signal_pending(current) && !(error_code & X86_PF_USER)) {
- no_context(regs, error_code, address, 0, 0);
- return;
- }
-
- if (fault & VM_FAULT_OOM) {
- /* Kernel mode? Handle exceptions or die: */
- - if (!(error_code & PF_USER)) {
- + if (!(error_code & X86_PF_USER)) {
- no_context(regs, error_code, address,
- SIGSEGV, SEGV_MAPERR);
- return;
- @@ -1081,16 +1061,16 @@ mm_fault_error(struct pt_regs *regs, unsigned long error_code,
-
- static int spurious_fault_check(unsigned long error_code, pte_t *pte)
- {
- - if ((error_code & PF_WRITE) && !pte_write(*pte))
- + if ((error_code & X86_PF_WRITE) && !pte_write(*pte))
- return 0;
-
- - if ((error_code & PF_INSTR) && !pte_exec(*pte))
- + if ((error_code & X86_PF_INSTR) && !pte_exec(*pte))
- return 0;
- /*
- * Note: We do not do lazy flushing on protection key
- - * changes, so no spurious fault will ever set PF_PK.
- + * changes, so no spurious fault will ever set X86_PF_PK.
- */
- - if ((error_code & PF_PK))
- + if ((error_code & X86_PF_PK))
- return 1;
-
- return 1;
- @@ -1136,8 +1116,8 @@ spurious_fault(unsigned long error_code, unsigned long address)
- * change, so user accesses are not expected to cause spurious
- * faults.
- */
- - if (error_code != (PF_WRITE | PF_PROT)
- - && error_code != (PF_INSTR | PF_PROT))
- + if (error_code != (X86_PF_WRITE | X86_PF_PROT) &&
- + error_code != (X86_PF_INSTR | X86_PF_PROT))
- return 0;
-
- pgd = init_mm.pgd + pgd_index(address);
- @@ -1197,19 +1177,19 @@ access_error(unsigned long error_code, struct vm_area_struct *vma)
- * always an unconditional error and can never result in
- * a follow-up action to resolve the fault, like a COW.
- */
- - if (error_code & PF_PK)
- + if (error_code & X86_PF_PK)
- return 1;
-
- /*
- * Make sure to check the VMA so that we do not perform
- - * faults just to hit a PF_PK as soon as we fill in a
- + * faults just to hit a X86_PF_PK as soon as we fill in a
- * page.
- */
- - if (!arch_vma_access_permitted(vma, (error_code & PF_WRITE),
- - (error_code & PF_INSTR), foreign))
- + if (!arch_vma_access_permitted(vma, (error_code & X86_PF_WRITE),
- + (error_code & X86_PF_INSTR), foreign))
- return 1;
-
- - if (error_code & PF_WRITE) {
- + if (error_code & X86_PF_WRITE) {
- /* write, present and write, not present: */
- if (unlikely(!(vma->vm_flags & VM_WRITE)))
- return 1;
- @@ -1217,7 +1197,7 @@ access_error(unsigned long error_code, struct vm_area_struct *vma)
- }
-
- /* read, present: */
- - if (unlikely(error_code & PF_PROT))
- + if (unlikely(error_code & X86_PF_PROT))
- return 1;
-
- /* read, not present: */
- @@ -1240,7 +1220,7 @@ static inline bool smap_violation(int error_code, struct pt_regs *regs)
- if (!static_cpu_has(X86_FEATURE_SMAP))
- return false;
-
- - if (error_code & PF_USER)
- + if (error_code & X86_PF_USER)
- return false;
-
- if (!user_mode(regs) && (regs->flags & X86_EFLAGS_AC))
- @@ -1293,7 +1273,7 @@ __do_page_fault(struct pt_regs *regs, unsigned long error_code,
- * protection error (error_code & 9) == 0.
- */
- if (unlikely(fault_in_kernel_space(address))) {
- - if (!(error_code & (PF_RSVD | PF_USER | PF_PROT))) {
- + if (!(error_code & (X86_PF_RSVD | X86_PF_USER | X86_PF_PROT))) {
- if (vmalloc_fault(address) >= 0)
- return;
-
- @@ -1321,7 +1301,7 @@ __do_page_fault(struct pt_regs *regs, unsigned long error_code,
- if (unlikely(kprobes_fault(regs)))
- return;
-
- - if (unlikely(error_code & PF_RSVD))
- + if (unlikely(error_code & X86_PF_RSVD))
- pgtable_bad(regs, error_code, address);
-
- if (unlikely(smap_violation(error_code, regs))) {
- @@ -1347,7 +1327,7 @@ __do_page_fault(struct pt_regs *regs, unsigned long error_code,
- */
- if (user_mode(regs)) {
- local_irq_enable();
- - error_code |= PF_USER;
- + error_code |= X86_PF_USER;
- flags |= FAULT_FLAG_USER;
- } else {
- if (regs->flags & X86_EFLAGS_IF)
- @@ -1356,9 +1336,9 @@ __do_page_fault(struct pt_regs *regs, unsigned long error_code,
-
- perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS, 1, regs, address);
-
- - if (error_code & PF_WRITE)
- + if (error_code & X86_PF_WRITE)
- flags |= FAULT_FLAG_WRITE;
- - if (error_code & PF_INSTR)
- + if (error_code & X86_PF_INSTR)
- flags |= FAULT_FLAG_INSTRUCTION;
-
- /*
- @@ -1378,7 +1358,7 @@ __do_page_fault(struct pt_regs *regs, unsigned long error_code,
- * space check, thus avoiding the deadlock:
- */
- if (unlikely(!down_read_trylock(&mm->mmap_sem))) {
- - if ((error_code & PF_USER) == 0 &&
- + if (!(error_code & X86_PF_USER) &&
- !search_exception_tables(regs->ip)) {
- bad_area_nosemaphore(regs, error_code, address, NULL);
- return;
- @@ -1405,7 +1385,7 @@ __do_page_fault(struct pt_regs *regs, unsigned long error_code,
- bad_area(regs, error_code, address);
- return;
- }
- - if (error_code & PF_USER) {
- + if (error_code & X86_PF_USER) {
- /*
- * Accessing the stack below %sp is always a bug.
- * The large cushion allows instructions like enter
- --
- 2.14.2
|