0007-x86-fpu-backport-copy_kernel_to_XYZ_err-helpers.patch 3.4 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103
  1. From 0000000000000000000000000000000000000000 Mon Sep 17 00:00:00 2001
  2. From: Thomas Lamprecht <[email protected]>
  3. Date: Wed, 3 Apr 2019 18:41:50 +0200
  4. Subject: [PATCH] x86/fpu: backport copy_kernel_to_XYZ_err helpers
  5. MIME-Version: 1.0
  6. Content-Type: text/plain; charset=UTF-8
  7. Content-Transfer-Encoding: 8bit
  8. partial cherry-pick from upstream 5.2 "86/fpu: Restore from kernel
  9. memory on the 64-bit path too"
  10. commit 926b21f37b072ae4c117052de45a975c6d468fec
  11. Author: Sebastian Andrzej Siewior <[email protected]>
  12. Namely, only backport the added helpers, none of the semantic changes.
  13. relevant parts of the original commit message:
  14. > In order to avoid that mess, copy the FPU state from userland, validate
  15. > it and then load it. The copy_kernel_…() helpers are basically just
  16. > like the old helpers except that they operate on kernel memory and the
  17. > fault handler just sets the error value and the caller handles it.
  18. Link: https://lkml.kernel.org/r/[email protected]
  19. (partial cherry picked from commit 926b21f37b072ae4c117052de45a975c6d468fec)
  20. Signed-off-by: Thomas Lamprecht <[email protected]>
  21. ---
  22. arch/x86/include/asm/fpu/internal.h | 43 +++++++++++++++++++++++++++++
  23. 1 file changed, 43 insertions(+)
  24. diff --git a/arch/x86/include/asm/fpu/internal.h b/arch/x86/include/asm/fpu/internal.h
  25. index fa2c93cb42a2..f3193ab0a2fb 100644
  26. --- a/arch/x86/include/asm/fpu/internal.h
  27. +++ b/arch/x86/include/asm/fpu/internal.h
  28. @@ -122,6 +122,21 @@ extern void fpstate_sanitize_xstate(struct fpu *fpu);
  29. err; \
  30. })
  31. +#define kernel_insn_err(insn, output, input...) \
  32. +({ \
  33. + int err; \
  34. + asm volatile("1:" #insn "\n\t" \
  35. + "2:\n" \
  36. + ".section .fixup,\"ax\"\n" \
  37. + "3: movl $-1,%[err]\n" \
  38. + " jmp 2b\n" \
  39. + ".previous\n" \
  40. + _ASM_EXTABLE(1b, 3b) \
  41. + : [err] "=r" (err), output \
  42. + : "0"(0), input); \
  43. + err; \
  44. +})
  45. +
  46. #define kernel_insn(insn, output, input...) \
  47. asm volatile("1:" #insn "\n\t" \
  48. "2:\n" \
  49. @@ -158,6 +173,14 @@ static inline void copy_kernel_to_fxregs(struct fxregs_state *fx)
  50. }
  51. }
  52. +static inline int copy_kernel_to_fxregs_err(struct fxregs_state *fx)
  53. +{
  54. + if (IS_ENABLED(CONFIG_X86_32))
  55. + return kernel_insn_err(fxrstor %[fx], "=m" (*fx), [fx] "m" (*fx));
  56. + else
  57. + return kernel_insn_err(fxrstorq %[fx], "=m" (*fx), [fx] "m" (*fx));
  58. +}
  59. +
  60. static inline int copy_user_to_fxregs(struct fxregs_state __user *fx)
  61. {
  62. if (IS_ENABLED(CONFIG_X86_32))
  63. @@ -175,6 +198,11 @@ static inline void copy_kernel_to_fregs(struct fregs_state *fx)
  64. kernel_insn(frstor %[fx], "=m" (*fx), [fx] "m" (*fx));
  65. }
  66. +static inline int copy_kernel_to_fregs_err(struct fregs_state *fx)
  67. +{
  68. + return kernel_insn_err(frstor %[fx], "=m" (*fx), [fx] "m" (*fx));
  69. +}
  70. +
  71. static inline int copy_user_to_fregs(struct fregs_state __user *fx)
  72. {
  73. return user_insn(frstor %[fx], "=m" (*fx), [fx] "m" (*fx));
  74. @@ -400,6 +428,21 @@ static inline int copy_user_to_xregs(struct xregs_state __user *buf, u64 mask)
  75. return err;
  76. }
  77. +/*
  78. + * Restore xstate from kernel space xsave area, return an error code instead of
  79. + * an exception.
  80. + */
  81. +static inline int copy_kernel_to_xregs_err(struct xregs_state *xstate, u64 mask)
  82. +{
  83. + u32 lmask = mask;
  84. + u32 hmask = mask >> 32;
  85. + int err;
  86. +
  87. + XSTATE_OP(XRSTOR, xstate, lmask, hmask, err);
  88. +
  89. + return err;
  90. +}
  91. +
  92. /*
  93. * These must be called with preempt disabled. Returns
  94. * 'true' if the FPU state is still intact and we can