0083-x86-mm-Relocate-page-fault-error-codes-to-traps.h.patch 13 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363
  1. From 9e6bc95ae1c4b92d9838ee8d2ee8b0e65f4e4469 Mon Sep 17 00:00:00 2001
  2. From: Ricardo Neri <[email protected]>
  3. Date: Fri, 27 Oct 2017 13:25:28 -0700
  4. Subject: [PATCH 083/242] x86/mm: Relocate page fault error codes to traps.h
  5. MIME-Version: 1.0
  6. Content-Type: text/plain; charset=UTF-8
  7. Content-Transfer-Encoding: 8bit
  8. CVE-2017-5754
  9. Up to this point, only fault.c used the definitions of the page fault error
  10. codes. Thus, it made sense to keep them within such file. Other portions of
  11. code might be interested in those definitions too. For instance, the User-
  12. Mode Instruction Prevention emulation code will use such definitions to
  13. emulate a page fault when it is unable to successfully copy the results
  14. of the emulated instructions to user space.
  15. While relocating the error code enumeration, the prefix X86_ is used to
  16. make it consistent with the rest of the definitions in traps.h. Of course,
  17. code using the enumeration had to be updated as well. No functional changes
  18. were performed.
  19. Signed-off-by: Ricardo Neri <[email protected]>
  20. Signed-off-by: Thomas Gleixner <[email protected]>
  21. Reviewed-by: Borislav Petkov <[email protected]>
  22. Reviewed-by: Andy Lutomirski <[email protected]>
  23. Cc: "Michael S. Tsirkin" <[email protected]>
  24. Cc: Peter Zijlstra <[email protected]>
  25. Cc: Dave Hansen <[email protected]>
  26. Cc: [email protected]
  27. Cc: Paul Gortmaker <[email protected]>
  28. Cc: Huang Rui <[email protected]>
  29. Cc: Shuah Khan <[email protected]>
  30. Cc: Jonathan Corbet <[email protected]>
  31. Cc: Jiri Slaby <[email protected]>
  32. Cc: "Ravi V. Shankar" <[email protected]>
  33. Cc: Chris Metcalf <[email protected]>
  34. Cc: Brian Gerst <[email protected]>
  35. Cc: Josh Poimboeuf <[email protected]>
  36. Cc: Chen Yucong <[email protected]>
  37. Cc: Vlastimil Babka <[email protected]>
  38. Cc: Masami Hiramatsu <[email protected]>
  39. Cc: Paolo Bonzini <[email protected]>
  40. Cc: Andrew Morton <[email protected]>
  41. Cc: "Kirill A. Shutemov" <[email protected]>
  42. Link: https://lkml.kernel.org/r/1509135945-13762-2-git-send-email-ricardo.neri-calderon@linux.intel.com
  43. (cherry picked from commit 1067f030994c69ca1fba8c607437c8895dcf8509)
  44. Signed-off-by: Andy Whitcroft <[email protected]>
  45. Signed-off-by: Kleber Sacilotto de Souza <[email protected]>
  46. (cherry picked from commit a85a07ab9111e3c78797c20b60a664dbd5db4981)
  47. Signed-off-by: Fabian Grünbichler <[email protected]>
  48. ---
  49. arch/x86/include/asm/traps.h | 18 +++++++++
  50. arch/x86/mm/fault.c | 88 +++++++++++++++++---------------------------
  51. 2 files changed, 52 insertions(+), 54 deletions(-)
  52. diff --git a/arch/x86/include/asm/traps.h b/arch/x86/include/asm/traps.h
  53. index feb89dbe359d..8e5bf86f87e5 100644
  54. --- a/arch/x86/include/asm/traps.h
  55. +++ b/arch/x86/include/asm/traps.h
  56. @@ -162,4 +162,22 @@ enum {
  57. X86_TRAP_IRET = 32, /* 32, IRET Exception */
  58. };
  59. +/*
  60. + * Page fault error code bits:
  61. + *
  62. + * bit 0 == 0: no page found 1: protection fault
  63. + * bit 1 == 0: read access 1: write access
  64. + * bit 2 == 0: kernel-mode access 1: user-mode access
  65. + * bit 3 == 1: use of reserved bit detected
  66. + * bit 4 == 1: fault was an instruction fetch
  67. + * bit 5 == 1: protection keys block access
  68. + */
  69. +enum x86_pf_error_code {
  70. + X86_PF_PROT = 1 << 0,
  71. + X86_PF_WRITE = 1 << 1,
  72. + X86_PF_USER = 1 << 2,
  73. + X86_PF_RSVD = 1 << 3,
  74. + X86_PF_INSTR = 1 << 4,
  75. + X86_PF_PK = 1 << 5,
  76. +};
  77. #endif /* _ASM_X86_TRAPS_H */
  78. diff --git a/arch/x86/mm/fault.c b/arch/x86/mm/fault.c
  79. index 4ee9eb916826..d3a57e7ad311 100644
  80. --- a/arch/x86/mm/fault.c
  81. +++ b/arch/x86/mm/fault.c
  82. @@ -28,26 +28,6 @@
  83. #define CREATE_TRACE_POINTS
  84. #include <asm/trace/exceptions.h>
  85. -/*
  86. - * Page fault error code bits:
  87. - *
  88. - * bit 0 == 0: no page found 1: protection fault
  89. - * bit 1 == 0: read access 1: write access
  90. - * bit 2 == 0: kernel-mode access 1: user-mode access
  91. - * bit 3 == 1: use of reserved bit detected
  92. - * bit 4 == 1: fault was an instruction fetch
  93. - * bit 5 == 1: protection keys block access
  94. - */
  95. -enum x86_pf_error_code {
  96. -
  97. - PF_PROT = 1 << 0,
  98. - PF_WRITE = 1 << 1,
  99. - PF_USER = 1 << 2,
  100. - PF_RSVD = 1 << 3,
  101. - PF_INSTR = 1 << 4,
  102. - PF_PK = 1 << 5,
  103. -};
  104. -
  105. /*
  106. * Returns 0 if mmiotrace is disabled, or if the fault is not
  107. * handled by mmiotrace:
  108. @@ -149,7 +129,7 @@ is_prefetch(struct pt_regs *regs, unsigned long error_code, unsigned long addr)
  109. * If it was a exec (instruction fetch) fault on NX page, then
  110. * do not ignore the fault:
  111. */
  112. - if (error_code & PF_INSTR)
  113. + if (error_code & X86_PF_INSTR)
  114. return 0;
  115. instr = (void *)convert_ip_to_linear(current, regs);
  116. @@ -179,7 +159,7 @@ is_prefetch(struct pt_regs *regs, unsigned long error_code, unsigned long addr)
  117. * siginfo so userspace can discover which protection key was set
  118. * on the PTE.
  119. *
  120. - * If we get here, we know that the hardware signaled a PF_PK
  121. + * If we get here, we know that the hardware signaled a X86_PF_PK
  122. * fault and that there was a VMA once we got in the fault
  123. * handler. It does *not* guarantee that the VMA we find here
  124. * was the one that we faulted on.
  125. @@ -204,7 +184,7 @@ static void fill_sig_info_pkey(int si_code, siginfo_t *info, u32 *pkey)
  126. /*
  127. * force_sig_info_fault() is called from a number of
  128. * contexts, some of which have a VMA and some of which
  129. - * do not. The PF_PK handing happens after we have a
  130. + * do not. The X86_PF_PK handing happens after we have a
  131. * valid VMA, so we should never reach this without a
  132. * valid VMA.
  133. */
  134. @@ -693,7 +673,7 @@ show_fault_oops(struct pt_regs *regs, unsigned long error_code,
  135. if (!oops_may_print())
  136. return;
  137. - if (error_code & PF_INSTR) {
  138. + if (error_code & X86_PF_INSTR) {
  139. unsigned int level;
  140. pgd_t *pgd;
  141. pte_t *pte;
  142. @@ -775,7 +755,7 @@ no_context(struct pt_regs *regs, unsigned long error_code,
  143. */
  144. if (current->thread.sig_on_uaccess_err && signal) {
  145. tsk->thread.trap_nr = X86_TRAP_PF;
  146. - tsk->thread.error_code = error_code | PF_USER;
  147. + tsk->thread.error_code = error_code | X86_PF_USER;
  148. tsk->thread.cr2 = address;
  149. /* XXX: hwpoison faults will set the wrong code. */
  150. @@ -894,7 +874,7 @@ __bad_area_nosemaphore(struct pt_regs *regs, unsigned long error_code,
  151. struct task_struct *tsk = current;
  152. /* User mode accesses just cause a SIGSEGV */
  153. - if (error_code & PF_USER) {
  154. + if (error_code & X86_PF_USER) {
  155. /*
  156. * It's possible to have interrupts off here:
  157. */
  158. @@ -915,7 +895,7 @@ __bad_area_nosemaphore(struct pt_regs *regs, unsigned long error_code,
  159. * Instruction fetch faults in the vsyscall page might need
  160. * emulation.
  161. */
  162. - if (unlikely((error_code & PF_INSTR) &&
  163. + if (unlikely((error_code & X86_PF_INSTR) &&
  164. ((address & ~0xfff) == VSYSCALL_ADDR))) {
  165. if (emulate_vsyscall(regs, address))
  166. return;
  167. @@ -928,7 +908,7 @@ __bad_area_nosemaphore(struct pt_regs *regs, unsigned long error_code,
  168. * are always protection faults.
  169. */
  170. if (address >= TASK_SIZE_MAX)
  171. - error_code |= PF_PROT;
  172. + error_code |= X86_PF_PROT;
  173. if (likely(show_unhandled_signals))
  174. show_signal_msg(regs, error_code, address, tsk);
  175. @@ -989,11 +969,11 @@ static inline bool bad_area_access_from_pkeys(unsigned long error_code,
  176. if (!boot_cpu_has(X86_FEATURE_OSPKE))
  177. return false;
  178. - if (error_code & PF_PK)
  179. + if (error_code & X86_PF_PK)
  180. return true;
  181. /* this checks permission keys on the VMA: */
  182. - if (!arch_vma_access_permitted(vma, (error_code & PF_WRITE),
  183. - (error_code & PF_INSTR), foreign))
  184. + if (!arch_vma_access_permitted(vma, (error_code & X86_PF_WRITE),
  185. + (error_code & X86_PF_INSTR), foreign))
  186. return true;
  187. return false;
  188. }
  189. @@ -1021,7 +1001,7 @@ do_sigbus(struct pt_regs *regs, unsigned long error_code, unsigned long address,
  190. int code = BUS_ADRERR;
  191. /* Kernel mode? Handle exceptions or die: */
  192. - if (!(error_code & PF_USER)) {
  193. + if (!(error_code & X86_PF_USER)) {
  194. no_context(regs, error_code, address, SIGBUS, BUS_ADRERR);
  195. return;
  196. }
  197. @@ -1049,14 +1029,14 @@ static noinline void
  198. mm_fault_error(struct pt_regs *regs, unsigned long error_code,
  199. unsigned long address, u32 *pkey, unsigned int fault)
  200. {
  201. - if (fatal_signal_pending(current) && !(error_code & PF_USER)) {
  202. + if (fatal_signal_pending(current) && !(error_code & X86_PF_USER)) {
  203. no_context(regs, error_code, address, 0, 0);
  204. return;
  205. }
  206. if (fault & VM_FAULT_OOM) {
  207. /* Kernel mode? Handle exceptions or die: */
  208. - if (!(error_code & PF_USER)) {
  209. + if (!(error_code & X86_PF_USER)) {
  210. no_context(regs, error_code, address,
  211. SIGSEGV, SEGV_MAPERR);
  212. return;
  213. @@ -1081,16 +1061,16 @@ mm_fault_error(struct pt_regs *regs, unsigned long error_code,
  214. static int spurious_fault_check(unsigned long error_code, pte_t *pte)
  215. {
  216. - if ((error_code & PF_WRITE) && !pte_write(*pte))
  217. + if ((error_code & X86_PF_WRITE) && !pte_write(*pte))
  218. return 0;
  219. - if ((error_code & PF_INSTR) && !pte_exec(*pte))
  220. + if ((error_code & X86_PF_INSTR) && !pte_exec(*pte))
  221. return 0;
  222. /*
  223. * Note: We do not do lazy flushing on protection key
  224. - * changes, so no spurious fault will ever set PF_PK.
  225. + * changes, so no spurious fault will ever set X86_PF_PK.
  226. */
  227. - if ((error_code & PF_PK))
  228. + if ((error_code & X86_PF_PK))
  229. return 1;
  230. return 1;
  231. @@ -1136,8 +1116,8 @@ spurious_fault(unsigned long error_code, unsigned long address)
  232. * change, so user accesses are not expected to cause spurious
  233. * faults.
  234. */
  235. - if (error_code != (PF_WRITE | PF_PROT)
  236. - && error_code != (PF_INSTR | PF_PROT))
  237. + if (error_code != (X86_PF_WRITE | X86_PF_PROT) &&
  238. + error_code != (X86_PF_INSTR | X86_PF_PROT))
  239. return 0;
  240. pgd = init_mm.pgd + pgd_index(address);
  241. @@ -1197,19 +1177,19 @@ access_error(unsigned long error_code, struct vm_area_struct *vma)
  242. * always an unconditional error and can never result in
  243. * a follow-up action to resolve the fault, like a COW.
  244. */
  245. - if (error_code & PF_PK)
  246. + if (error_code & X86_PF_PK)
  247. return 1;
  248. /*
  249. * Make sure to check the VMA so that we do not perform
  250. - * faults just to hit a PF_PK as soon as we fill in a
  251. + * faults just to hit a X86_PF_PK as soon as we fill in a
  252. * page.
  253. */
  254. - if (!arch_vma_access_permitted(vma, (error_code & PF_WRITE),
  255. - (error_code & PF_INSTR), foreign))
  256. + if (!arch_vma_access_permitted(vma, (error_code & X86_PF_WRITE),
  257. + (error_code & X86_PF_INSTR), foreign))
  258. return 1;
  259. - if (error_code & PF_WRITE) {
  260. + if (error_code & X86_PF_WRITE) {
  261. /* write, present and write, not present: */
  262. if (unlikely(!(vma->vm_flags & VM_WRITE)))
  263. return 1;
  264. @@ -1217,7 +1197,7 @@ access_error(unsigned long error_code, struct vm_area_struct *vma)
  265. }
  266. /* read, present: */
  267. - if (unlikely(error_code & PF_PROT))
  268. + if (unlikely(error_code & X86_PF_PROT))
  269. return 1;
  270. /* read, not present: */
  271. @@ -1240,7 +1220,7 @@ static inline bool smap_violation(int error_code, struct pt_regs *regs)
  272. if (!static_cpu_has(X86_FEATURE_SMAP))
  273. return false;
  274. - if (error_code & PF_USER)
  275. + if (error_code & X86_PF_USER)
  276. return false;
  277. if (!user_mode(regs) && (regs->flags & X86_EFLAGS_AC))
  278. @@ -1293,7 +1273,7 @@ __do_page_fault(struct pt_regs *regs, unsigned long error_code,
  279. * protection error (error_code & 9) == 0.
  280. */
  281. if (unlikely(fault_in_kernel_space(address))) {
  282. - if (!(error_code & (PF_RSVD | PF_USER | PF_PROT))) {
  283. + if (!(error_code & (X86_PF_RSVD | X86_PF_USER | X86_PF_PROT))) {
  284. if (vmalloc_fault(address) >= 0)
  285. return;
  286. @@ -1321,7 +1301,7 @@ __do_page_fault(struct pt_regs *regs, unsigned long error_code,
  287. if (unlikely(kprobes_fault(regs)))
  288. return;
  289. - if (unlikely(error_code & PF_RSVD))
  290. + if (unlikely(error_code & X86_PF_RSVD))
  291. pgtable_bad(regs, error_code, address);
  292. if (unlikely(smap_violation(error_code, regs))) {
  293. @@ -1347,7 +1327,7 @@ __do_page_fault(struct pt_regs *regs, unsigned long error_code,
  294. */
  295. if (user_mode(regs)) {
  296. local_irq_enable();
  297. - error_code |= PF_USER;
  298. + error_code |= X86_PF_USER;
  299. flags |= FAULT_FLAG_USER;
  300. } else {
  301. if (regs->flags & X86_EFLAGS_IF)
  302. @@ -1356,9 +1336,9 @@ __do_page_fault(struct pt_regs *regs, unsigned long error_code,
  303. perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS, 1, regs, address);
  304. - if (error_code & PF_WRITE)
  305. + if (error_code & X86_PF_WRITE)
  306. flags |= FAULT_FLAG_WRITE;
  307. - if (error_code & PF_INSTR)
  308. + if (error_code & X86_PF_INSTR)
  309. flags |= FAULT_FLAG_INSTRUCTION;
  310. /*
  311. @@ -1378,7 +1358,7 @@ __do_page_fault(struct pt_regs *regs, unsigned long error_code,
  312. * space check, thus avoiding the deadlock:
  313. */
  314. if (unlikely(!down_read_trylock(&mm->mmap_sem))) {
  315. - if ((error_code & PF_USER) == 0 &&
  316. + if (!(error_code & X86_PF_USER) &&
  317. !search_exception_tables(regs->ip)) {
  318. bad_area_nosemaphore(regs, error_code, address, NULL);
  319. return;
  320. @@ -1405,7 +1385,7 @@ __do_page_fault(struct pt_regs *regs, unsigned long error_code,
  321. bad_area(regs, error_code, address);
  322. return;
  323. }
  324. - if (error_code & PF_USER) {
  325. + if (error_code & X86_PF_USER) {
  326. /*
  327. * Accessing the stack below %sp is always a bug.
  328. * The large cushion allows instructions like enter
  329. --
  330. 2.14.2