summaryrefslogtreecommitdiff
path: root/arch/x86
diff options
context:
space:
mode:
authorOleg Nesterov <oleg@redhat.com>2015-01-15 20:19:43 +0100
committerThomas Gleixner <tglx@linutronix.de>2015-01-20 13:53:07 +0100
commit14e153ef75eecae8fd0738ffb42120f4962a00cd (patch)
tree22fb7872fddb4c639d882ca602f4240c38acfaae /arch/x86
parent6ca7a8a15035add0a4f9b2fd658118d41dbeb20c (diff)
x86, fpu: Introduce per-cpu in_kernel_fpu state
interrupted_kernel_fpu_idle() tries to detect if kernel_fpu_begin() is safe or not. In particular it should obviously deny the nested kernel_fpu_begin() and this logic looks very confusing. If use_eager_fpu() == T we rely on a) __thread_has_fpu() check in interrupted_kernel_fpu_idle(), and b) on the fact that _begin() does __thread_clear_has_fpu(). Otherwise we demand that the interrupted task has no FPU if it is in kernel mode, this works because __kernel_fpu_begin() does clts() and interrupted_kernel_fpu_idle() checks X86_CR0_TS. Add the per-cpu "bool in_kernel_fpu" variable, and change this code to check/set/clear it. This allows to do more cleanups and fixes, see the next changes. The patch also moves WARN_ON_ONCE() under preempt_disable() just to make this_cpu_read() look better, this is not really needed. And in fact I think we should move it into __kernel_fpu_begin(). Signed-off-by: Oleg Nesterov <oleg@redhat.com> Reviewed-by: Rik van Riel <riel@redhat.com> Cc: matt.fleming@intel.com Cc: bp@suse.de Cc: pbonzini@redhat.com Cc: luto@amacapital.net Cc: Linus Torvalds <torvalds@linux-foundation.org> Cc: Suresh Siddha <sbsiddha@gmail.com> Link: http://lkml.kernel.org/r/20150115191943.GB27332@redhat.com Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
Diffstat (limited to 'arch/x86')
-rw-r--r--arch/x86/include/asm/i387.h2
-rw-r--r--arch/x86/kernel/i387.c9
2 files changed, 10 insertions, 1 deletions
diff --git a/arch/x86/include/asm/i387.h b/arch/x86/include/asm/i387.h
index ed8089d69094..5e275d31802e 100644
--- a/arch/x86/include/asm/i387.h
+++ b/arch/x86/include/asm/i387.h
@@ -40,8 +40,8 @@ extern void __kernel_fpu_end(void);
static inline void kernel_fpu_begin(void)
{
- WARN_ON_ONCE(!irq_fpu_usable());
preempt_disable();
+ WARN_ON_ONCE(!irq_fpu_usable());
__kernel_fpu_begin();
}
diff --git a/arch/x86/kernel/i387.c b/arch/x86/kernel/i387.c
index a9a4229f6161..a81572338243 100644
--- a/arch/x86/kernel/i387.c
+++ b/arch/x86/kernel/i387.c
@@ -19,6 +19,8 @@
#include <asm/fpu-internal.h>
#include <asm/user.h>
+static DEFINE_PER_CPU(bool, in_kernel_fpu);
+
/*
* Were we in an interrupt that interrupted kernel mode?
*
@@ -33,6 +35,9 @@
*/
static inline bool interrupted_kernel_fpu_idle(void)
{
+ if (this_cpu_read(in_kernel_fpu))
+ return false;
+
if (use_eager_fpu())
return __thread_has_fpu(current);
@@ -73,6 +78,8 @@ void __kernel_fpu_begin(void)
{
struct task_struct *me = current;
+ this_cpu_write(in_kernel_fpu, true);
+
if (__thread_has_fpu(me)) {
__thread_clear_has_fpu(me);
__save_init_fpu(me);
@@ -99,6 +106,8 @@ void __kernel_fpu_end(void)
} else {
stts();
}
+
+ this_cpu_write(in_kernel_fpu, false);
}
EXPORT_SYMBOL(__kernel_fpu_end);