diff options
-rw-r--r-- | arch/x86/include/asm/fpu-internal.h | 10 | ||||
-rw-r--r-- | arch/x86/include/asm/processor.h | 18 | ||||
-rw-r--r-- | arch/x86/kernel/process.c | 2 | ||||
-rw-r--r-- | arch/x86/kernel/traps.c | 2 |
4 files changed, 16 insertions, 16 deletions
diff --git a/arch/x86/include/asm/fpu-internal.h b/arch/x86/include/asm/fpu-internal.h index 02e0e97d8be7..f85d21b68901 100644 --- a/arch/x86/include/asm/fpu-internal.h +++ b/arch/x86/include/asm/fpu-internal.h @@ -384,7 +384,7 @@ static inline void drop_fpu(struct task_struct *tsk) * Forget coprocessor state.. */ preempt_disable(); - tsk->thread.fpu_counter = 0; + tsk->thread.fpu.counter = 0; if (__thread_has_fpu(tsk)) { /* Ignore delayed exceptions from user space */ @@ -441,7 +441,7 @@ static inline fpu_switch_t switch_fpu_prepare(struct task_struct *old, struct ta * or if the past 5 consecutive context-switches used math. */ fpu.preload = tsk_used_math(new) && - (use_eager_fpu() || new->thread.fpu_counter > 5); + (use_eager_fpu() || new->thread.fpu.counter > 5); if (__thread_has_fpu(old)) { if (!__save_init_fpu(old)) @@ -454,16 +454,16 @@ static inline fpu_switch_t switch_fpu_prepare(struct task_struct *old, struct ta /* Don't change CR0.TS if we just switch! */ if (fpu.preload) { - new->thread.fpu_counter++; + new->thread.fpu.counter++; __thread_set_has_fpu(new); prefetch(new->thread.fpu.state); } else if (!use_eager_fpu()) stts(); } else { - old->thread.fpu_counter = 0; + old->thread.fpu.counter = 0; task_disable_lazy_fpu_restore(old); if (fpu.preload) { - new->thread.fpu_counter++; + new->thread.fpu.counter++; if (fpu_lazy_restore(new, cpu)) fpu.preload = 0; else diff --git a/arch/x86/include/asm/processor.h b/arch/x86/include/asm/processor.h index 2dc08c231a9a..64d6b5d97ce9 100644 --- a/arch/x86/include/asm/processor.h +++ b/arch/x86/include/asm/processor.h @@ -433,6 +433,15 @@ struct fpu { unsigned int last_cpu; unsigned int has_fpu; union thread_xstate *state; + /* + * This counter contains the number of consecutive context switches + * that the FPU is used. If this is over a threshold, the lazy fpu + * saving becomes unlazy to save the trap. This is an unsigned char + * so that after 256 times the counter wraps and the behavior turns + * lazy again; this to deal with bursty apps that only use FPU for + * a short time + */ + unsigned char counter; }; #ifdef CONFIG_X86_64 @@ -535,15 +544,6 @@ struct thread_struct { unsigned long iopl; /* Max allowed port in the bitmap, in bytes: */ unsigned io_bitmap_max; - /* - * fpu_counter contains the number of consecutive context switches - * that the FPU is used. If this is over a threshold, the lazy fpu - * saving becomes unlazy to save the trap. This is an unsigned char - * so that after 256 times the counter wraps and the behavior turns - * lazy again; this to deal with bursty apps that only use FPU for - * a short time - */ - unsigned char fpu_counter; }; /* diff --git a/arch/x86/kernel/process.c b/arch/x86/kernel/process.c index fd4aa56335de..c7793addc237 100644 --- a/arch/x86/kernel/process.c +++ b/arch/x86/kernel/process.c @@ -87,7 +87,7 @@ int arch_dup_task_struct(struct task_struct *dst, struct task_struct *src) { *dst = *src; - dst->thread.fpu_counter = 0; + dst->thread.fpu.counter = 0; dst->thread.fpu.has_fpu = 0; dst->thread.fpu.state = NULL; task_disable_lazy_fpu_restore(dst); diff --git a/arch/x86/kernel/traps.c b/arch/x86/kernel/traps.c index cf9c9627be19..231aa579d9cd 100644 --- a/arch/x86/kernel/traps.c +++ b/arch/x86/kernel/traps.c @@ -863,7 +863,7 @@ void math_state_restore(void) fpu_reset_state(tsk); force_sig_info(SIGSEGV, SEND_SIG_PRIV, tsk); } else { - tsk->thread.fpu_counter++; + tsk->thread.fpu.counter++; } kernel_fpu_enable(); } |