summaryrefslogtreecommitdiff
path: root/arch/x86
diff options
context:
space:
mode:
Diffstat (limited to 'arch/x86')
-rw-r--r--arch/x86/include/asm/fpu/internal.h43
-rw-r--r--arch/x86/kernel/fpu/signal.c62
2 files changed, 92 insertions, 13 deletions
diff --git a/arch/x86/include/asm/fpu/internal.h b/arch/x86/include/asm/fpu/internal.h
index da75d7b3e37d..2cf04fbcba5d 100644
--- a/arch/x86/include/asm/fpu/internal.h
+++ b/arch/x86/include/asm/fpu/internal.h
@@ -121,6 +121,21 @@ extern void fpstate_sanitize_xstate(struct fpu *fpu);
err; \
})
+#define kernel_insn_err(insn, output, input...) \
+({ \
+ int err; \
+ asm volatile("1:" #insn "\n\t" \
+ "2:\n" \
+ ".section .fixup,\"ax\"\n" \
+ "3: movl $-1,%[err]\n" \
+ " jmp 2b\n" \
+ ".previous\n" \
+ _ASM_EXTABLE(1b, 3b) \
+ : [err] "=r" (err), output \
+ : "0"(0), input); \
+ err; \
+})
+
#define kernel_insn(insn, output, input...) \
asm volatile("1:" #insn "\n\t" \
"2:\n" \
@@ -149,6 +164,14 @@ static inline void copy_kernel_to_fxregs(struct fxregs_state *fx)
kernel_insn(fxrstorq %[fx], "=m" (*fx), [fx] "m" (*fx));
}
+static inline int copy_kernel_to_fxregs_err(struct fxregs_state *fx)
+{
+ if (IS_ENABLED(CONFIG_X86_32))
+ return kernel_insn_err(fxrstor %[fx], "=m" (*fx), [fx] "m" (*fx));
+ else
+ return kernel_insn_err(fxrstorq %[fx], "=m" (*fx), [fx] "m" (*fx));
+}
+
static inline int copy_user_to_fxregs(struct fxregs_state __user *fx)
{
if (IS_ENABLED(CONFIG_X86_32))
@@ -162,6 +185,11 @@ static inline void copy_kernel_to_fregs(struct fregs_state *fx)
kernel_insn(frstor %[fx], "=m" (*fx), [fx] "m" (*fx));
}
+static inline int copy_kernel_to_fregs_err(struct fregs_state *fx)
+{
+ return kernel_insn_err(frstor %[fx], "=m" (*fx), [fx] "m" (*fx));
+}
+
static inline int copy_user_to_fregs(struct fregs_state __user *fx)
{
return user_insn(frstor %[fx], "=m" (*fx), [fx] "m" (*fx));
@@ -362,6 +390,21 @@ static inline int copy_user_to_xregs(struct xregs_state __user *buf, u64 mask)
}
/*
+ * Restore xstate from kernel space xsave area, return an error code instead of
+ * an exception.
+ */
+static inline int copy_kernel_to_xregs_err(struct xregs_state *xstate, u64 mask)
+{
+ u32 lmask = mask;
+ u32 hmask = mask >> 32;
+ int err;
+
+ XSTATE_OP(XRSTOR, xstate, lmask, hmask, err);
+
+ return err;
+}
+
+/*
* These must be called with preempt disabled. Returns
* 'true' if the FPU state is still intact and we can
* keep registers active.
diff --git a/arch/x86/kernel/fpu/signal.c b/arch/x86/kernel/fpu/signal.c
index c2ff43fbbd07..9ea1eaa4c9b1 100644
--- a/arch/x86/kernel/fpu/signal.c
+++ b/arch/x86/kernel/fpu/signal.c
@@ -234,7 +234,8 @@ sanitize_restored_xstate(union fpregs_state *state,
*/
xsave->i387.mxcsr &= mxcsr_feature_mask;
- convert_to_fxsr(&state->fxsave, ia32_env);
+ if (ia32_env)
+ convert_to_fxsr(&state->fxsave, ia32_env);
}
}
@@ -337,28 +338,63 @@ static int __fpu__restore_sig(void __user *buf, void __user *buf_fx, int size)
kfree(tmp);
return err;
} else {
+ union fpregs_state *state;
+ void *tmp;
int ret;
+ tmp = kzalloc(sizeof(*state) + fpu_kernel_xstate_size + 64, GFP_KERNEL);
+ if (!tmp)
+ return -ENOMEM;
+ state = PTR_ALIGN(tmp, 64);
+
/*
* For 64-bit frames and 32-bit fsave frames, restore the user
* state to the registers directly (with exceptions handled).
*/
- if (use_xsave()) {
- if ((unsigned long)buf_fx % 64 || fx_only) {
- u64 init_bv = xfeatures_mask & ~XFEATURE_MASK_FPSSE;
- copy_kernel_to_xregs(&init_fpstate.xsave, init_bv);
- ret = copy_user_to_fxregs(buf_fx);
+ if ((unsigned long)buf_fx % 64)
+ fx_only = 1;
+
+ if (use_xsave() && !fx_only) {
+ u64 init_bv = xfeatures_mask & ~xfeatures;
+
+ if (using_compacted_format()) {
+ ret = copy_user_to_xstate(&state->xsave, buf_fx);
} else {
- u64 init_bv = xfeatures_mask & ~xfeatures;
- if (unlikely(init_bv))
- copy_kernel_to_xregs(&init_fpstate.xsave, init_bv);
- ret = copy_user_to_xregs(buf_fx, xfeatures);
+ ret = __copy_from_user(&state->xsave, buf_fx, state_size);
+
+ if (!ret && state_size > offsetof(struct xregs_state, header))
+ ret = validate_xstate_header(&state->xsave.header);
}
+ if (ret)
+ goto err_out;
+
+ sanitize_restored_xstate(state, NULL, xfeatures, fx_only);
+
+ if (unlikely(init_bv))
+ copy_kernel_to_xregs(&init_fpstate.xsave, init_bv);
+ ret = copy_kernel_to_xregs_err(&state->xsave, xfeatures);
+
} else if (use_fxsr()) {
- ret = copy_user_to_fxregs(buf_fx);
- } else
- ret = copy_user_to_fregs(buf_fx);
+ ret = __copy_from_user(&state->fxsave, buf_fx, state_size);
+ if (ret)
+ goto err_out;
+ if (use_xsave()) {
+ u64 init_bv = xfeatures_mask & ~XFEATURE_MASK_FPSSE;
+ copy_kernel_to_xregs(&init_fpstate.xsave, init_bv);
+ }
+ state->fxsave.mxcsr &= mxcsr_feature_mask;
+
+ ret = copy_kernel_to_fxregs_err(&state->fxsave);
+ } else {
+ ret = __copy_from_user(&state->fsave, buf_fx, state_size);
+ if (ret)
+ goto err_out;
+ ret = copy_kernel_to_fregs_err(&state->fsave);
+ }
+
+err_out:
+ kfree(tmp);
if (ret) {
fpu__clear(fpu);
return -1;