summaryrefslogtreecommitdiff
path: root/arch/mips
diff options
context:
space:
mode:
authorJames Hogan <james.hogan@imgtec.com>2016-06-23 17:34:43 +0100
committerPaolo Bonzini <pbonzini@redhat.com>2016-07-05 16:08:51 +0200
commit38ea7a715d43752e1c53d5a0c3cbab5e321f22f7 (patch)
tree618dd624c887e5c9a24c0fd3b5ec07832418c7df /arch/mips
parentd37f4038d16273087bdc60387807b90a8c06da7f (diff)
MIPS: KVM: Check MSA presence at uasm time
Check for presence of MSA at uasm assembly time rather than at runtime in the generated KVM host entry code. This optimises the guest exit path by eliminating the MSA code entirely if not present, and eliminating the read of Config3.MSAP and conditional branch if MSA is present. Signed-off-by: James Hogan <james.hogan@imgtec.com> Cc: Paolo Bonzini <pbonzini@redhat.com> Cc: Radim KrÄmář <rkrcmar@redhat.com> Cc: Ralf Baechle <ralf@linux-mips.org> Cc: linux-mips@linux-mips.org Cc: kvm@vger.kernel.org Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
Diffstat (limited to 'arch/mips')
-rw-r--r--arch/mips/kvm/entry.c35
1 files changed, 15 insertions, 20 deletions
diff --git a/arch/mips/kvm/entry.c b/arch/mips/kvm/entry.c
index c0d9f551c1c1..53e1e576d18a 100644
--- a/arch/mips/kvm/entry.c
+++ b/arch/mips/kvm/entry.c
@@ -55,7 +55,6 @@
#define C0_CAUSE 13, 0
#define C0_EPC 14, 0
#define C0_EBASE 15, 1
-#define C0_CONFIG3 16, 3
#define C0_CONFIG5 16, 5
#define C0_DDATA_LO 28, 3
#define C0_ERROREPC 30, 0
@@ -409,25 +408,21 @@ void *kvm_mips_build_exit(void *addr)
uasm_l_fpu_1(&l, p);
}
-#ifdef CONFIG_CPU_HAS_MSA
- /*
- * If MSA is enabled, save MSACSR and clear it so that later
- * instructions don't trigger MSAFPE for pending exceptions.
- */
- uasm_i_mfc0(&p, T0, C0_CONFIG3);
- uasm_i_ext(&p, T0, T0, 28, 1); /* MIPS_CONF3_MSAP */
- uasm_il_beqz(&p, &r, T0, label_msa_1);
- uasm_i_nop(&p);
- uasm_i_mfc0(&p, T0, C0_CONFIG5);
- uasm_i_ext(&p, T0, T0, 27, 1); /* MIPS_CONF5_MSAEN */
- uasm_il_beqz(&p, &r, T0, label_msa_1);
- uasm_i_nop(&p);
- uasm_i_cfcmsa(&p, T0, MSA_CSR);
- uasm_i_sw(&p, T0, offsetof(struct kvm_vcpu_arch, fpu.msacsr),
- K1);
- uasm_i_ctcmsa(&p, MSA_CSR, ZERO);
- uasm_l_msa_1(&l, p);
-#endif
+ if (cpu_has_msa) {
+ /*
+ * If MSA is enabled, save MSACSR and clear it so that later
+ * instructions don't trigger MSAFPE for pending exceptions.
+ */
+ uasm_i_mfc0(&p, T0, C0_CONFIG5);
+ uasm_i_ext(&p, T0, T0, 27, 1); /* MIPS_CONF5_MSAEN */
+ uasm_il_beqz(&p, &r, T0, label_msa_1);
+ uasm_i_nop(&p);
+ uasm_i_cfcmsa(&p, T0, MSA_CSR);
+ uasm_i_sw(&p, T0, offsetof(struct kvm_vcpu_arch, fpu.msacsr),
+ K1);
+ uasm_i_ctcmsa(&p, MSA_CSR, ZERO);
+ uasm_l_msa_1(&l, p);
+ }
/* Now that the new EBASE has been loaded, unset BEV and KSU_USER */
uasm_i_addiu(&p, AT, ZERO, ~(ST0_EXL | KSU_USER | ST0_IE));