summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
-rw-r--r--arch/x86/include/asm/fpu/internal.h8
-rw-r--r--arch/x86/kernel/fpu/core.c4
-rw-r--r--arch/x86/kvm/x86.c2
3 files changed, 7 insertions, 7 deletions
diff --git a/arch/x86/include/asm/fpu/internal.h b/arch/x86/include/asm/fpu/internal.h
index 31bfda818f30..c09aea145e09 100644
--- a/arch/x86/include/asm/fpu/internal.h
+++ b/arch/x86/include/asm/fpu/internal.h
@@ -289,7 +289,7 @@ static inline int copy_fpregs_to_fpstate(struct fpu *fpu)
extern void fpu__save(struct fpu *fpu);
-static inline int fpu_restore_checking(struct fpu *fpu)
+static inline int __copy_fpstate_to_fpregs(struct fpu *fpu)
{
if (use_xsave())
return fpu_xrstor_checking(&fpu->state.xsave);
@@ -299,7 +299,7 @@ static inline int fpu_restore_checking(struct fpu *fpu)
return frstor_checking(&fpu->state.fsave);
}
-static inline int restore_fpu_checking(struct fpu *fpu)
+static inline int copy_fpstate_to_fpregs(struct fpu *fpu)
{
/*
* AMD K7/K8 CPUs don't save/restore FDP/FIP/FOP unless an exception is
@@ -314,7 +314,7 @@ static inline int restore_fpu_checking(struct fpu *fpu)
: : [addr] "m" (fpu->fpregs_active));
}
- return fpu_restore_checking(fpu);
+ return __copy_fpstate_to_fpregs(fpu);
}
/*
@@ -520,7 +520,7 @@ switch_fpu_prepare(struct fpu *old_fpu, struct fpu *new_fpu, int cpu)
static inline void switch_fpu_finish(struct fpu *new_fpu, fpu_switch_t fpu_switch)
{
if (fpu_switch.preload) {
- if (unlikely(restore_fpu_checking(new_fpu)))
+ if (unlikely(copy_fpstate_to_fpregs(new_fpu)))
fpu__reset(new_fpu);
}
}
diff --git a/arch/x86/kernel/fpu/core.c b/arch/x86/kernel/fpu/core.c
index bf217cde114d..14d8e33d9fe0 100644
--- a/arch/x86/kernel/fpu/core.c
+++ b/arch/x86/kernel/fpu/core.c
@@ -115,7 +115,7 @@ void __kernel_fpu_end(void)
struct fpu *fpu = &current->thread.fpu;
if (fpu->fpregs_active) {
- if (WARN_ON(restore_fpu_checking(fpu)))
+ if (WARN_ON(copy_fpstate_to_fpregs(fpu)))
fpu__reset(fpu);
} else {
__fpregs_deactivate_hw();
@@ -338,7 +338,7 @@ void fpu__restore(void)
/* Avoid __kernel_fpu_begin() right after fpregs_activate() */
kernel_fpu_disable();
fpregs_activate(fpu);
- if (unlikely(restore_fpu_checking(fpu))) {
+ if (unlikely(copy_fpstate_to_fpregs(fpu))) {
fpu__reset(fpu);
force_sig_info(SIGSEGV, SEND_SIG_PRIV, tsk);
} else {
diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c
index 5c61aae277f9..f4438179398b 100644
--- a/arch/x86/kvm/x86.c
+++ b/arch/x86/kvm/x86.c
@@ -7030,7 +7030,7 @@ void kvm_load_guest_fpu(struct kvm_vcpu *vcpu)
kvm_put_guest_xcr0(vcpu);
vcpu->guest_fpu_loaded = 1;
__kernel_fpu_begin();
- fpu_restore_checking(&vcpu->arch.guest_fpu);
+ __copy_fpstate_to_fpregs(&vcpu->arch.guest_fpu);
trace_kvm_fpu(1);
}