summaryrefslogtreecommitdiff
path: root/arch/powerpc/kernel
diff options
context:
space:
mode:
authorAnton Blanchard <anton@samba.org>2015-10-29 11:44:08 +1100
committerMichael Ellerman <mpe@ellerman.id.au>2015-12-02 19:34:26 +1100
commitc208505900b232ecdc81dee54cb3a032e75d88d6 (patch)
treee63fe696c5a8404bc4257204502f0d6f9ed3ccaf /arch/powerpc/kernel
parent1f2e25b2d552cade43eacb2edc4e7f01c1cfecb3 (diff)
powerpc: create giveup_all()
Create a single function that gives everything up (FP, VMX, VSX, SPE). Doing this all at once means we only do one MSR write. A context switch microbenchmark using yield(): http://ozlabs.org/~anton/junkcode/context_switch2.c ./context_switch2 --test=yield --fp --altivec --vector 0 0 shows an improvement of 3% on POWER8. Signed-off-by: Anton Blanchard <anton@samba.org> [mpe: giveup_all() needs to be EXPORT_SYMBOL'ed] Signed-off-by: Michael Ellerman <mpe@ellerman.id.au>
Diffstat (limited to 'arch/powerpc/kernel')
-rw-r--r--arch/powerpc/kernel/process.c76
1 files changed, 61 insertions, 15 deletions
diff --git a/arch/powerpc/kernel/process.c b/arch/powerpc/kernel/process.c
index 9f8444b84dde..4c087b9ed2d6 100644
--- a/arch/powerpc/kernel/process.c
+++ b/arch/powerpc/kernel/process.c
@@ -308,6 +308,65 @@ void flush_spe_to_thread(struct task_struct *tsk)
}
#endif /* CONFIG_SPE */
+static unsigned long msr_all_available;
+
+static int __init init_msr_all_available(void)
+{
+#ifdef CONFIG_PPC_FPU
+ msr_all_available |= MSR_FP;
+#endif
+#ifdef CONFIG_ALTIVEC
+ if (cpu_has_feature(CPU_FTR_ALTIVEC))
+ msr_all_available |= MSR_VEC;
+#endif
+#ifdef CONFIG_VSX
+ if (cpu_has_feature(CPU_FTR_VSX))
+ msr_all_available |= MSR_VSX;
+#endif
+#ifdef CONFIG_SPE
+ if (cpu_has_feature(CPU_FTR_SPE))
+ msr_all_available |= MSR_SPE;
+#endif
+
+ return 0;
+}
+early_initcall(init_msr_all_available);
+
+void giveup_all(struct task_struct *tsk)
+{
+ unsigned long usermsr;
+
+ if (!tsk->thread.regs)
+ return;
+
+ usermsr = tsk->thread.regs->msr;
+
+ if ((usermsr & msr_all_available) == 0)
+ return;
+
+ msr_check_and_set(msr_all_available);
+
+#ifdef CONFIG_PPC_FPU
+ if (usermsr & MSR_FP)
+ __giveup_fpu(tsk);
+#endif
+#ifdef CONFIG_ALTIVEC
+ if (usermsr & MSR_VEC)
+ __giveup_altivec(tsk);
+#endif
+#ifdef CONFIG_VSX
+ if (usermsr & MSR_VSX)
+ __giveup_vsx(tsk);
+#endif
+#ifdef CONFIG_SPE
+ if (usermsr & MSR_SPE)
+ __giveup_spe(tsk);
+#endif
+
+ msr_check_and_clear(msr_all_available);
+}
+EXPORT_SYMBOL(giveup_all);
+
#ifdef CONFIG_PPC_ADV_DEBUG_REGS
void do_send_trap(struct pt_regs *regs, unsigned long address,
unsigned long error_code, int signal_code, int breakpt)
@@ -839,21 +898,8 @@ struct task_struct *__switch_to(struct task_struct *prev,
__switch_to_tm(prev);
- if (prev->thread.regs && (prev->thread.regs->msr & MSR_FP))
- giveup_fpu(prev);
-#ifdef CONFIG_ALTIVEC
- if (prev->thread.regs && (prev->thread.regs->msr & MSR_VEC))
- giveup_altivec(prev);
-#endif /* CONFIG_ALTIVEC */
-#ifdef CONFIG_VSX
- if (prev->thread.regs && (prev->thread.regs->msr & MSR_VSX))
- /* VMX and FPU registers are already save here */
- __giveup_vsx(prev);
-#endif /* CONFIG_VSX */
-#ifdef CONFIG_SPE
- if ((prev->thread.regs && (prev->thread.regs->msr & MSR_SPE)))
- giveup_spe(prev);
-#endif /* CONFIG_SPE */
+ /* Save FPU, Altivec, VSX and SPE state */
+ giveup_all(prev);
#ifdef CONFIG_PPC_ADV_DEBUG_REGS
switch_booke_debug_regs(&new->thread.debug);