summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
-rw-r--r--arch/x86/include/asm/fpu/internal.h23
-rw-r--r--arch/x86/kernel/fpu/core.c6
-rw-r--r--arch/x86/kernel/fpu/xstate.c6
-rw-r--r--arch/x86/kernel/process.c2
-rw-r--r--arch/x86/kernel/signal.c2
5 files changed, 22 insertions, 17 deletions
diff --git a/arch/x86/include/asm/fpu/internal.h b/arch/x86/include/asm/fpu/internal.h
index 0f17cd4e4e58..31bfda818f30 100644
--- a/arch/x86/include/asm/fpu/internal.h
+++ b/arch/x86/include/asm/fpu/internal.h
@@ -382,11 +382,17 @@ static inline void fpregs_deactivate(struct fpu *fpu)
__fpregs_deactivate_hw();
}
-static inline void drop_fpu(struct fpu *fpu)
+/*
+ * Drops current FPU state: deactivates the fpregs and
+ * the fpstate. NOTE: it still leaves previous contents
+ * in the fpregs in the eager-FPU case.
+ *
+ * This function can be used in cases where we know that
+ * a state-restore is coming: either an explicit one,
+ * or a reschedule.
+ */
+static inline void fpu__drop(struct fpu *fpu)
{
- /*
- * Forget coprocessor state..
- */
preempt_disable();
fpu->counter = 0;
@@ -412,13 +418,12 @@ static inline void restore_init_xstate(void)
}
/*
- * Reset the FPU state in the eager case and drop it in the lazy case (later use
- * will reinit it).
+ * Reset the FPU state back to init state.
*/
-static inline void fpu_reset_state(struct fpu *fpu)
+static inline void fpu__reset(struct fpu *fpu)
{
if (!use_eager_fpu())
- drop_fpu(fpu);
+ fpu__drop(fpu);
else
restore_init_xstate();
}
@@ -516,7 +521,7 @@ static inline void switch_fpu_finish(struct fpu *new_fpu, fpu_switch_t fpu_switc
{
if (fpu_switch.preload) {
if (unlikely(restore_fpu_checking(new_fpu)))
- fpu_reset_state(new_fpu);
+ fpu__reset(new_fpu);
}
}
diff --git a/arch/x86/kernel/fpu/core.c b/arch/x86/kernel/fpu/core.c
index a2e2da2b08c5..bf217cde114d 100644
--- a/arch/x86/kernel/fpu/core.c
+++ b/arch/x86/kernel/fpu/core.c
@@ -116,7 +116,7 @@ void __kernel_fpu_end(void)
if (fpu->fpregs_active) {
if (WARN_ON(restore_fpu_checking(fpu)))
- fpu_reset_state(fpu);
+ fpu__reset(fpu);
} else {
__fpregs_deactivate_hw();
}
@@ -339,7 +339,7 @@ void fpu__restore(void)
kernel_fpu_disable();
fpregs_activate(fpu);
if (unlikely(restore_fpu_checking(fpu))) {
- fpu_reset_state(fpu);
+ fpu__reset(fpu);
force_sig_info(SIGSEGV, SEND_SIG_PRIV, tsk);
} else {
tsk->thread.fpu.counter++;
@@ -360,7 +360,7 @@ void fpu__clear(struct task_struct *tsk)
if (!use_eager_fpu()) {
/* FPU state will be reallocated lazily at the first use. */
- drop_fpu(fpu);
+ fpu__drop(fpu);
} else {
if (!fpu->fpstate_active) {
fpu__activate_curr(fpu);
diff --git a/arch/x86/kernel/fpu/xstate.c b/arch/x86/kernel/fpu/xstate.c
index b8e5fee2aef3..5e3d9242bb95 100644
--- a/arch/x86/kernel/fpu/xstate.c
+++ b/arch/x86/kernel/fpu/xstate.c
@@ -401,7 +401,7 @@ int __restore_xstate_sig(void __user *buf, void __user *buf_fx, int size)
config_enabled(CONFIG_IA32_EMULATION));
if (!buf) {
- fpu_reset_state(fpu);
+ fpu__reset(fpu);
return 0;
}
@@ -449,7 +449,7 @@ int __restore_xstate_sig(void __user *buf, void __user *buf_fx, int size)
* We will be ready to restore/save the state only after
* fpu->fpstate_active is again set.
*/
- drop_fpu(fpu);
+ fpu__drop(fpu);
if (__copy_from_user(&fpu->state.xsave, buf_fx, state_size) ||
__copy_from_user(&env, buf, sizeof(env))) {
@@ -474,7 +474,7 @@ int __restore_xstate_sig(void __user *buf, void __user *buf_fx, int size)
*/
user_fpu_begin();
if (restore_user_xstate(buf_fx, xfeatures, fx_only)) {
- fpu_reset_state(fpu);
+ fpu__reset(fpu);
return -1;
}
}
diff --git a/arch/x86/kernel/process.c b/arch/x86/kernel/process.c
index 5d37c26fa89f..dde263fb2031 100644
--- a/arch/x86/kernel/process.c
+++ b/arch/x86/kernel/process.c
@@ -110,7 +110,7 @@ void exit_thread(void)
kfree(bp);
}
- drop_fpu(fpu);
+ fpu__drop(fpu);
}
void flush_thread(void)
diff --git a/arch/x86/kernel/signal.c b/arch/x86/kernel/signal.c
index 59cfc9c97491..6bf512390536 100644
--- a/arch/x86/kernel/signal.c
+++ b/arch/x86/kernel/signal.c
@@ -667,7 +667,7 @@ handle_signal(struct ksignal *ksig, struct pt_regs *regs)
* Ensure the signal handler starts with the new fpu state.
*/
if (fpu->fpstate_active)
- fpu_reset_state(fpu);
+ fpu__reset(fpu);
}
signal_setup_done(failed, ksig, stepping);
}