|
|
@@ -0,0 +1,103 @@
|
|
|
+From 0000000000000000000000000000000000000000 Mon Sep 17 00:00:00 2001
|
|
|
+From: Thomas Lamprecht <[email protected]>
|
|
|
+Date: Wed, 3 Apr 2019 18:41:50 +0200
|
|
|
+Subject: [PATCH] x86/fpu: backport copy_kernel_to_XYZ_err helpers
|
|
|
+MIME-Version: 1.0
|
|
|
+Content-Type: text/plain; charset=UTF-8
|
|
|
+Content-Transfer-Encoding: 8bit
|
|
|
+
|
|
|
+partial cherry-pick from upstream 5.2 "86/fpu: Restore from kernel
|
|
|
+memory on the 64-bit path too"
|
|
|
+commit 926b21f37b072ae4c117052de45a975c6d468fec
|
|
|
+Author: Sebastian Andrzej Siewior <[email protected]>
|
|
|
+
|
|
|
+Namely, only backport the added helpers, none of the semantic changes.
|
|
|
+
|
|
|
+relevant parts of the original commit message:
|
|
|
+> In order to avoid that mess, copy the FPU state from userland, validate
|
|
|
+> it and then load it. The copy_kernel_…() helpers are basically just
|
|
|
+> like the old helpers except that they operate on kernel memory and the
|
|
|
+> fault handler just sets the error value and the caller handles it.
|
|
|
+
|
|
|
+Link: https://lkml.kernel.org/r/[email protected]
|
|
|
+(partial cherry picked from commit 926b21f37b072ae4c117052de45a975c6d468fec)
|
|
|
+Signed-off-by: Thomas Lamprecht <[email protected]>
|
|
|
+---
|
|
|
+ arch/x86/include/asm/fpu/internal.h | 43 +++++++++++++++++++++++++++++
|
|
|
+ 1 file changed, 43 insertions(+)
|
|
|
+
|
|
|
+diff --git a/arch/x86/include/asm/fpu/internal.h b/arch/x86/include/asm/fpu/internal.h
|
|
|
+index fa2c93cb42a2..f3193ab0a2fb 100644
|
|
|
+--- a/arch/x86/include/asm/fpu/internal.h
|
|
|
++++ b/arch/x86/include/asm/fpu/internal.h
|
|
|
+@@ -122,6 +122,21 @@ extern void fpstate_sanitize_xstate(struct fpu *fpu);
|
|
|
+ err; \
|
|
|
+ })
|
|
|
+
|
|
|
++#define kernel_insn_err(insn, output, input...) \
|
|
|
++({ \
|
|
|
++ int err; \
|
|
|
++ asm volatile("1:" #insn "\n\t" \
|
|
|
++ "2:\n" \
|
|
|
++ ".section .fixup,\"ax\"\n" \
|
|
|
++ "3: movl $-1,%[err]\n" \
|
|
|
++ " jmp 2b\n" \
|
|
|
++ ".previous\n" \
|
|
|
++ _ASM_EXTABLE(1b, 3b) \
|
|
|
++ : [err] "=r" (err), output \
|
|
|
++ : "0"(0), input); \
|
|
|
++ err; \
|
|
|
++})
|
|
|
++
|
|
|
+ #define kernel_insn(insn, output, input...) \
|
|
|
+ asm volatile("1:" #insn "\n\t" \
|
|
|
+ "2:\n" \
|
|
|
+@@ -158,6 +173,14 @@ static inline void copy_kernel_to_fxregs(struct fxregs_state *fx)
|
|
|
+ }
|
|
|
+ }
|
|
|
+
|
|
|
++static inline int copy_kernel_to_fxregs_err(struct fxregs_state *fx)
|
|
|
++{
|
|
|
++ if (IS_ENABLED(CONFIG_X86_32))
|
|
|
++ return kernel_insn_err(fxrstor %[fx], "=m" (*fx), [fx] "m" (*fx));
|
|
|
++ else
|
|
|
++ return kernel_insn_err(fxrstorq %[fx], "=m" (*fx), [fx] "m" (*fx));
|
|
|
++}
|
|
|
++
|
|
|
+ static inline int copy_user_to_fxregs(struct fxregs_state __user *fx)
|
|
|
+ {
|
|
|
+ if (IS_ENABLED(CONFIG_X86_32))
|
|
|
+@@ -175,6 +198,11 @@ static inline void copy_kernel_to_fregs(struct fregs_state *fx)
|
|
|
+ kernel_insn(frstor %[fx], "=m" (*fx), [fx] "m" (*fx));
|
|
|
+ }
|
|
|
+
|
|
|
++static inline int copy_kernel_to_fregs_err(struct fregs_state *fx)
|
|
|
++{
|
|
|
++ return kernel_insn_err(frstor %[fx], "=m" (*fx), [fx] "m" (*fx));
|
|
|
++}
|
|
|
++
|
|
|
+ static inline int copy_user_to_fregs(struct fregs_state __user *fx)
|
|
|
+ {
|
|
|
+ return user_insn(frstor %[fx], "=m" (*fx), [fx] "m" (*fx));
|
|
|
+@@ -400,6 +428,21 @@ static inline int copy_user_to_xregs(struct xregs_state __user *buf, u64 mask)
|
|
|
+ return err;
|
|
|
+ }
|
|
|
+
|
|
|
++/*
|
|
|
++ * Restore xstate from kernel space xsave area, return an error code instead of
|
|
|
++ * an exception.
|
|
|
++ */
|
|
|
++static inline int copy_kernel_to_xregs_err(struct xregs_state *xstate, u64 mask)
|
|
|
++{
|
|
|
++ u32 lmask = mask;
|
|
|
++ u32 hmask = mask >> 32;
|
|
|
++ int err;
|
|
|
++
|
|
|
++ XSTATE_OP(XRSTOR, xstate, lmask, hmask, err);
|
|
|
++
|
|
|
++ return err;
|
|
|
++}
|
|
|
++
|
|
|
+ /*
|
|
|
+ * These must be called with preempt disabled. Returns
|
|
|
+ * 'true' if the FPU state is still intact and we can
|