summaryrefslogtreecommitdiff
path: root/arch/powerpc/kvm
diff options
context:
space:
mode:
authorBin Lu <lblulb@linux.vnet.ibm.com>2017-02-21 21:12:36 +0800
committerPaul Mackerras <paulus@ozlabs.org>2017-04-20 11:36:41 +1000
commit6f63e81bda98cbb549b01faf978884692ded438d (patch)
treeed26d38aacc94c14990ebf6ee2979530bc96e7b3 /arch/powerpc/kvm
parent307d927967007acef98cfd3f0639c7a4bf234ede (diff)
KVM: PPC: Book3S: Add MMIO emulation for FP and VSX instructions
This patch provides the MMIO load/store emulation for instructions of 'double & vector unsigned char & vector signed char & vector unsigned short & vector signed short & vector unsigned int & vector signed int & vector double '. The instructions that this adds emulation for are: - ldx, ldux, lwax, - lfs, lfsx, lfsu, lfsux, lfd, lfdx, lfdu, lfdux, - stfs, stfsx, stfsu, stfsux, stfd, stfdx, stfdu, stfdux, stfiwx, - lxsdx, lxsspx, lxsiwax, lxsiwzx, lxvd2x, lxvw4x, lxvdsx, - stxsdx, stxsspx, stxsiwx, stxvd2x, stxvw4x [paulus@ozlabs.org - some cleanups, fixes and rework, make it compile for Book E, fix build when PR KVM is built in] Signed-off-by: Bin Lu <lblulb@linux.vnet.ibm.com> Signed-off-by: Paul Mackerras <paulus@ozlabs.org>
Diffstat (limited to 'arch/powerpc/kvm')
-rw-r--r--arch/powerpc/kvm/emulate_loadstore.c335
-rw-r--r--arch/powerpc/kvm/powerpc.c318
2 files changed, 646 insertions, 7 deletions
diff --git a/arch/powerpc/kvm/emulate_loadstore.c b/arch/powerpc/kvm/emulate_loadstore.c
index 6d3c0ee1d744..9cda1b9dbc51 100644
--- a/arch/powerpc/kvm/emulate_loadstore.c
+++ b/arch/powerpc/kvm/emulate_loadstore.c
@@ -34,6 +34,30 @@
#include "timing.h"
#include "trace.h"
+#ifdef CONFIG_PPC_FPU
+static bool kvmppc_check_fp_disabled(struct kvm_vcpu *vcpu)
+{
+ if (!(kvmppc_get_msr(vcpu) & MSR_FP)) {
+ kvmppc_core_queue_fpunavail(vcpu);
+ return true;
+ }
+
+ return false;
+}
+#endif /* CONFIG_PPC_FPU */
+
+#ifdef CONFIG_VSX
+static bool kvmppc_check_vsx_disabled(struct kvm_vcpu *vcpu)
+{
+ if (!(kvmppc_get_msr(vcpu) & MSR_VSX)) {
+ kvmppc_core_queue_vsx_unavail(vcpu);
+ return true;
+ }
+
+ return false;
+}
+#endif /* CONFIG_VSX */
+
/* XXX to do:
* lhax
* lhaux
@@ -66,6 +90,19 @@ int kvmppc_emulate_loadstore(struct kvm_vcpu *vcpu)
rs = get_rs(inst);
rt = get_rt(inst);
+ /*
+ * if mmio_vsx_tx_sx_enabled == 0, copy data between
+ * VSR[0..31] and memory
+ * if mmio_vsx_tx_sx_enabled == 1, copy data between
+ * VSR[32..63] and memory
+ */
+ vcpu->arch.mmio_vsx_tx_sx_enabled = get_tx_or_sx(inst);
+ vcpu->arch.mmio_vsx_copy_nums = 0;
+ vcpu->arch.mmio_vsx_offset = 0;
+ vcpu->arch.mmio_vsx_copy_type = KVMPPC_VSX_COPY_NONE;
+ vcpu->arch.mmio_sp64_extend = 0;
+ vcpu->arch.mmio_sign_extend = 0;
+
switch (get_op(inst)) {
case 31:
switch (get_xop(inst)) {
@@ -157,6 +194,230 @@ int kvmppc_emulate_loadstore(struct kvm_vcpu *vcpu)
2, 0);
break;
+ case OP_31_XOP_LDX:
+ emulated = kvmppc_handle_load(run, vcpu, rt, 8, 1);
+ break;
+
+ case OP_31_XOP_LDUX:
+ emulated = kvmppc_handle_load(run, vcpu, rt, 8, 1);
+ kvmppc_set_gpr(vcpu, ra, vcpu->arch.vaddr_accessed);
+ break;
+
+ case OP_31_XOP_LWAX:
+ emulated = kvmppc_handle_loads(run, vcpu, rt, 4, 1);
+ break;
+
+#ifdef CONFIG_PPC_FPU
+ case OP_31_XOP_LFSX:
+ if (kvmppc_check_fp_disabled(vcpu))
+ return EMULATE_DONE;
+ vcpu->arch.mmio_sp64_extend = 1;
+ emulated = kvmppc_handle_load(run, vcpu,
+ KVM_MMIO_REG_FPR|rt, 4, 1);
+ break;
+
+ case OP_31_XOP_LFSUX:
+ if (kvmppc_check_fp_disabled(vcpu))
+ return EMULATE_DONE;
+ vcpu->arch.mmio_sp64_extend = 1;
+ emulated = kvmppc_handle_load(run, vcpu,
+ KVM_MMIO_REG_FPR|rt, 4, 1);
+ kvmppc_set_gpr(vcpu, ra, vcpu->arch.vaddr_accessed);
+ break;
+
+ case OP_31_XOP_LFDX:
+ if (kvmppc_check_fp_disabled(vcpu))
+ return EMULATE_DONE;
+ emulated = kvmppc_handle_load(run, vcpu,
+ KVM_MMIO_REG_FPR|rt, 8, 1);
+ break;
+
+ case OP_31_XOP_LFDUX:
+ if (kvmppc_check_fp_disabled(vcpu))
+ return EMULATE_DONE;
+ emulated = kvmppc_handle_load(run, vcpu,
+ KVM_MMIO_REG_FPR|rt, 8, 1);
+ kvmppc_set_gpr(vcpu, ra, vcpu->arch.vaddr_accessed);
+ break;
+
+ case OP_31_XOP_STFSX:
+ if (kvmppc_check_fp_disabled(vcpu))
+ return EMULATE_DONE;
+ vcpu->arch.mmio_sp64_extend = 1;
+ emulated = kvmppc_handle_store(run, vcpu,
+ VCPU_FPR(vcpu, rs), 4, 1);
+ break;
+
+ case OP_31_XOP_STFSUX:
+ if (kvmppc_check_fp_disabled(vcpu))
+ return EMULATE_DONE;
+ vcpu->arch.mmio_sp64_extend = 1;
+ emulated = kvmppc_handle_store(run, vcpu,
+ VCPU_FPR(vcpu, rs), 4, 1);
+ kvmppc_set_gpr(vcpu, ra, vcpu->arch.vaddr_accessed);
+ break;
+
+ case OP_31_XOP_STFDX:
+ if (kvmppc_check_fp_disabled(vcpu))
+ return EMULATE_DONE;
+ emulated = kvmppc_handle_store(run, vcpu,
+ VCPU_FPR(vcpu, rs),
+ 8, 1);
+ break;
+
+ case OP_31_XOP_STFDUX:
+ if (kvmppc_check_fp_disabled(vcpu))
+ return EMULATE_DONE;
+ emulated = kvmppc_handle_store(run, vcpu,
+ VCPU_FPR(vcpu, rs),
+ 8, 1);
+ kvmppc_set_gpr(vcpu, ra, vcpu->arch.vaddr_accessed);
+ break;
+
+ case OP_31_XOP_STFIWX:
+ if (kvmppc_check_fp_disabled(vcpu))
+ return EMULATE_DONE;
+ emulated = kvmppc_handle_store(run, vcpu,
+ VCPU_FPR(vcpu, rs),
+ 4, 1);
+ break;
+#endif
+
+#ifdef CONFIG_VSX
+ case OP_31_XOP_LXSDX:
+ if (kvmppc_check_vsx_disabled(vcpu))
+ return EMULATE_DONE;
+ vcpu->arch.mmio_vsx_copy_nums = 1;
+ vcpu->arch.mmio_vsx_copy_type = KVMPPC_VSX_COPY_DWORD;
+ emulated = kvmppc_handle_vsx_load(run, vcpu,
+ KVM_MMIO_REG_VSX|rt, 8, 1, 0);
+ break;
+
+ case OP_31_XOP_LXSSPX:
+ if (kvmppc_check_vsx_disabled(vcpu))
+ return EMULATE_DONE;
+ vcpu->arch.mmio_vsx_copy_nums = 1;
+ vcpu->arch.mmio_vsx_copy_type = KVMPPC_VSX_COPY_DWORD;
+ vcpu->arch.mmio_sp64_extend = 1;
+ emulated = kvmppc_handle_vsx_load(run, vcpu,
+ KVM_MMIO_REG_VSX|rt, 4, 1, 0);
+ break;
+
+ case OP_31_XOP_LXSIWAX:
+ if (kvmppc_check_vsx_disabled(vcpu))
+ return EMULATE_DONE;
+ vcpu->arch.mmio_vsx_copy_nums = 1;
+ vcpu->arch.mmio_vsx_copy_type = KVMPPC_VSX_COPY_DWORD;
+ emulated = kvmppc_handle_vsx_load(run, vcpu,
+ KVM_MMIO_REG_VSX|rt, 4, 1, 1);
+ break;
+
+ case OP_31_XOP_LXSIWZX:
+ if (kvmppc_check_vsx_disabled(vcpu))
+ return EMULATE_DONE;
+ vcpu->arch.mmio_vsx_copy_nums = 1;
+ vcpu->arch.mmio_vsx_copy_type = KVMPPC_VSX_COPY_DWORD;
+ emulated = kvmppc_handle_vsx_load(run, vcpu,
+ KVM_MMIO_REG_VSX|rt, 4, 1, 0);
+ break;
+
+ case OP_31_XOP_LXVD2X:
+ /*
+ * In this case, the official load/store process is like this:
+ * Step1, exit from vm by page fault isr, then kvm save vsr.
+ * Please see guest_exit_cont->store_fp_state->SAVE_32VSRS
+ * as reference.
+ *
+ * Step2, copy data between memory and VCPU
+ * Notice: for LXVD2X/STXVD2X/LXVW4X/STXVW4X, we use
+ * 2copies*8bytes or 4copies*4bytes
+ * to simulate one copy of 16bytes.
+ * Also there is an endian issue here, we should notice the
+ * layout of memory.
+ * Please see MARCO of LXVD2X_ROT/STXVD2X_ROT as more reference.
+ * If host is little-endian, kvm will call XXSWAPD for
+ * LXVD2X_ROT/STXVD2X_ROT.
+ * So, if host is little-endian,
+ * the postion of memeory should be swapped.
+ *
+ * Step3, return to guest, kvm reset register.
+ * Please see kvmppc_hv_entry->load_fp_state->REST_32VSRS
+ * as reference.
+ */
+ if (kvmppc_check_vsx_disabled(vcpu))
+ return EMULATE_DONE;
+ vcpu->arch.mmio_vsx_copy_nums = 2;
+ vcpu->arch.mmio_vsx_copy_type = KVMPPC_VSX_COPY_DWORD;
+ emulated = kvmppc_handle_vsx_load(run, vcpu,
+ KVM_MMIO_REG_VSX|rt, 8, 1, 0);
+ break;
+
+ case OP_31_XOP_LXVW4X:
+ if (kvmppc_check_vsx_disabled(vcpu))
+ return EMULATE_DONE;
+ vcpu->arch.mmio_vsx_copy_nums = 4;
+ vcpu->arch.mmio_vsx_copy_type = KVMPPC_VSX_COPY_WORD;
+ emulated = kvmppc_handle_vsx_load(run, vcpu,
+ KVM_MMIO_REG_VSX|rt, 4, 1, 0);
+ break;
+
+ case OP_31_XOP_LXVDSX:
+ if (kvmppc_check_vsx_disabled(vcpu))
+ return EMULATE_DONE;
+ vcpu->arch.mmio_vsx_copy_nums = 1;
+ vcpu->arch.mmio_vsx_copy_type =
+ KVMPPC_VSX_COPY_DWORD_LOAD_DUMP;
+ emulated = kvmppc_handle_vsx_load(run, vcpu,
+ KVM_MMIO_REG_VSX|rt, 8, 1, 0);
+ break;
+
+ case OP_31_XOP_STXSDX:
+ if (kvmppc_check_vsx_disabled(vcpu))
+ return EMULATE_DONE;
+ vcpu->arch.mmio_vsx_copy_nums = 1;
+ vcpu->arch.mmio_vsx_copy_type = KVMPPC_VSX_COPY_DWORD;
+ emulated = kvmppc_handle_vsx_store(run, vcpu,
+ rs, 8, 1);
+ break;
+
+ case OP_31_XOP_STXSSPX:
+ if (kvmppc_check_vsx_disabled(vcpu))
+ return EMULATE_DONE;
+ vcpu->arch.mmio_vsx_copy_nums = 1;
+ vcpu->arch.mmio_vsx_copy_type = KVMPPC_VSX_COPY_DWORD;
+ vcpu->arch.mmio_sp64_extend = 1;
+ emulated = kvmppc_handle_vsx_store(run, vcpu,
+ rs, 4, 1);
+ break;
+
+ case OP_31_XOP_STXSIWX:
+ if (kvmppc_check_vsx_disabled(vcpu))
+ return EMULATE_DONE;
+ vcpu->arch.mmio_vsx_offset = 1;
+ vcpu->arch.mmio_vsx_copy_nums = 1;
+ vcpu->arch.mmio_vsx_copy_type = KVMPPC_VSX_COPY_WORD;
+ emulated = kvmppc_handle_vsx_store(run, vcpu,
+ rs, 4, 1);
+ break;
+
+ case OP_31_XOP_STXVD2X:
+ if (kvmppc_check_vsx_disabled(vcpu))
+ return EMULATE_DONE;
+ vcpu->arch.mmio_vsx_copy_nums = 2;
+ vcpu->arch.mmio_vsx_copy_type = KVMPPC_VSX_COPY_DWORD;
+ emulated = kvmppc_handle_vsx_store(run, vcpu,
+ rs, 8, 1);
+ break;
+
+ case OP_31_XOP_STXVW4X:
+ if (kvmppc_check_vsx_disabled(vcpu))
+ return EMULATE_DONE;
+ vcpu->arch.mmio_vsx_copy_nums = 4;
+ vcpu->arch.mmio_vsx_copy_type = KVMPPC_VSX_COPY_WORD;
+ emulated = kvmppc_handle_vsx_store(run, vcpu,
+ rs, 4, 1);
+ break;
+#endif /* CONFIG_VSX */
default:
emulated = EMULATE_FAIL;
break;
@@ -167,7 +428,45 @@ int kvmppc_emulate_loadstore(struct kvm_vcpu *vcpu)
emulated = kvmppc_handle_load(run, vcpu, rt, 4, 1);
break;
- /* TBD: Add support for other 64 bit load variants like ldu, ldux, ldx etc. */
+#ifdef CONFIG_PPC_FPU
+ case OP_STFS:
+ if (kvmppc_check_fp_disabled(vcpu))
+ return EMULATE_DONE;
+ vcpu->arch.mmio_sp64_extend = 1;
+ emulated = kvmppc_handle_store(run, vcpu,
+ VCPU_FPR(vcpu, rs),
+ 4, 1);
+ break;
+
+ case OP_STFSU:
+ if (kvmppc_check_fp_disabled(vcpu))
+ return EMULATE_DONE;
+ vcpu->arch.mmio_sp64_extend = 1;
+ emulated = kvmppc_handle_store(run, vcpu,
+ VCPU_FPR(vcpu, rs),
+ 4, 1);
+ kvmppc_set_gpr(vcpu, ra, vcpu->arch.vaddr_accessed);
+ break;
+
+ case OP_STFD:
+ if (kvmppc_check_fp_disabled(vcpu))
+ return EMULATE_DONE;
+ emulated = kvmppc_handle_store(run, vcpu,
+ VCPU_FPR(vcpu, rs),
+ 8, 1);
+ break;
+
+ case OP_STFDU:
+ if (kvmppc_check_fp_disabled(vcpu))
+ return EMULATE_DONE;
+ emulated = kvmppc_handle_store(run, vcpu,
+ VCPU_FPR(vcpu, rs),
+ 8, 1);
+ kvmppc_set_gpr(vcpu, ra, vcpu->arch.vaddr_accessed);
+ break;
+#endif
+
+ /* TBD: Add support for other 64 bit load variants like ldu etc. */
case OP_LD:
rt = get_rt(inst);
emulated = kvmppc_handle_load(run, vcpu, rt, 8, 1);
@@ -252,6 +551,40 @@ int kvmppc_emulate_loadstore(struct kvm_vcpu *vcpu)
kvmppc_set_gpr(vcpu, ra, vcpu->arch.vaddr_accessed);
break;
+#ifdef CONFIG_PPC_FPU
+ case OP_LFS:
+ if (kvmppc_check_fp_disabled(vcpu))
+ return EMULATE_DONE;
+ vcpu->arch.mmio_sp64_extend = 1;
+ emulated = kvmppc_handle_load(run, vcpu,
+ KVM_MMIO_REG_FPR|rt, 4, 1);
+ break;
+
+ case OP_LFSU:
+ if (kvmppc_check_fp_disabled(vcpu))
+ return EMULATE_DONE;
+ vcpu->arch.mmio_sp64_extend = 1;
+ emulated = kvmppc_handle_load(run, vcpu,
+ KVM_MMIO_REG_FPR|rt, 4, 1);
+ kvmppc_set_gpr(vcpu, ra, vcpu->arch.vaddr_accessed);
+ break;
+
+ case OP_LFD:
+ if (kvmppc_check_fp_disabled(vcpu))
+ return EMULATE_DONE;
+ emulated = kvmppc_handle_load(run, vcpu,
+ KVM_MMIO_REG_FPR|rt, 8, 1);
+ break;
+
+ case OP_LFDU:
+ if (kvmppc_check_fp_disabled(vcpu))
+ return EMULATE_DONE;
+ emulated = kvmppc_handle_load(run, vcpu,
+ KVM_MMIO_REG_FPR|rt, 8, 1);
+ kvmppc_set_gpr(vcpu, ra, vcpu->arch.vaddr_accessed);
+ break;
+#endif
+
default:
emulated = EMULATE_FAIL;
break;
diff --git a/arch/powerpc/kvm/powerpc.c b/arch/powerpc/kvm/powerpc.c
index 0e42aa8a279f..6c7244879bfe 100644
--- a/arch/powerpc/kvm/powerpc.c
+++ b/arch/powerpc/kvm/powerpc.c
@@ -37,6 +37,7 @@
#include <asm/cputhreads.h>
#include <asm/irqflags.h>
#include <asm/iommu.h>
+#include <asm/switch_to.h>
#include "timing.h"
#include "irq.h"
#include "../mm/mmu_decl.h"
@@ -801,6 +802,129 @@ void kvm_arch_irq_bypass_del_producer(struct irq_bypass_consumer *cons,
kvm->arch.kvm_ops->irq_bypass_del_producer(cons, prod);
}
+#ifdef CONFIG_VSX
+static inline int kvmppc_get_vsr_dword_offset(int index)
+{
+ int offset;
+
+ if ((index != 0) && (index != 1))
+ return -1;
+
+#ifdef __BIG_ENDIAN
+ offset = index;
+#else
+ offset = 1 - index;
+#endif
+
+ return offset;
+}
+
+static inline int kvmppc_get_vsr_word_offset(int index)
+{
+ int offset;
+
+ if ((index > 3) || (index < 0))
+ return -1;
+
+#ifdef __BIG_ENDIAN
+ offset = index;
+#else
+ offset = 3 - index;
+#endif
+ return offset;
+}
+
+static inline void kvmppc_set_vsr_dword(struct kvm_vcpu *vcpu,
+ u64 gpr)
+{
+ union kvmppc_one_reg val;
+ int offset = kvmppc_get_vsr_dword_offset(vcpu->arch.mmio_vsx_offset);
+ int index = vcpu->arch.io_gpr & KVM_MMIO_REG_MASK;
+
+ if (offset == -1)
+ return;
+
+ if (vcpu->arch.mmio_vsx_tx_sx_enabled) {
+ val.vval = VCPU_VSX_VR(vcpu, index);
+ val.vsxval[offset] = gpr;
+ VCPU_VSX_VR(vcpu, index) = val.vval;
+ } else {
+ VCPU_VSX_FPR(vcpu, index, offset) = gpr;
+ }
+}
+
+static inline void kvmppc_set_vsr_dword_dump(struct kvm_vcpu *vcpu,
+ u64 gpr)
+{
+ union kvmppc_one_reg val;
+ int index = vcpu->arch.io_gpr & KVM_MMIO_REG_MASK;
+
+ if (vcpu->arch.mmio_vsx_tx_sx_enabled) {
+ val.vval = VCPU_VSX_VR(vcpu, index);
+ val.vsxval[0] = gpr;
+ val.vsxval[1] = gpr;
+ VCPU_VSX_VR(vcpu, index) = val.vval;
+ } else {
+ VCPU_VSX_FPR(vcpu, index, 0) = gpr;
+ VCPU_VSX_FPR(vcpu, index, 1) = gpr;
+ }
+}
+
+static inline void kvmppc_set_vsr_word(struct kvm_vcpu *vcpu,
+ u32 gpr32)
+{
+ union kvmppc_one_reg val;
+ int offset = kvmppc_get_vsr_word_offset(vcpu->arch.mmio_vsx_offset);
+ int index = vcpu->arch.io_gpr & KVM_MMIO_REG_MASK;
+ int dword_offset, word_offset;
+
+ if (offset == -1)
+ return;
+
+ if (vcpu->arch.mmio_vsx_tx_sx_enabled) {
+ val.vval = VCPU_VSX_VR(vcpu, index);
+ val.vsx32val[offset] = gpr32;
+ VCPU_VSX_VR(vcpu, index) = val.vval;
+ } else {
+ dword_offset = offset / 2;
+ word_offset = offset % 2;
+ val.vsxval[0] = VCPU_VSX_FPR(vcpu, index, dword_offset);
+ val.vsx32val[word_offset] = gpr32;
+ VCPU_VSX_FPR(vcpu, index, dword_offset) = val.vsxval[0];
+ }
+}
+#endif /* CONFIG_VSX */
+
+#ifdef CONFIG_PPC_FPU
+static inline u64 sp_to_dp(u32 fprs)
+{
+ u64 fprd;
+
+ preempt_disable();
+ enable_kernel_fp();
+ asm ("lfs%U1%X1 0,%1; stfd%U0%X0 0,%0" : "=m" (fprd) : "m" (fprs)
+ : "fr0");
+ preempt_enable();
+ return fprd;
+}
+
+static inline u32 dp_to_sp(u64 fprd)
+{
+ u32 fprs;
+
+ preempt_disable();
+ enable_kernel_fp();
+ asm ("lfd%U1%X1 0,%1; stfs%U0%X0 0,%0" : "=m" (fprs) : "m" (fprd)
+ : "fr0");
+ preempt_enable();
+ return fprs;
+}
+
+#else
+#define sp_to_dp(x) (x)
+#define dp_to_sp(x) (x)
+#endif /* CONFIG_PPC_FPU */
+
static void kvmppc_complete_mmio_load(struct kvm_vcpu *vcpu,
struct kvm_run *run)
{
@@ -827,6 +951,10 @@ static void kvmppc_complete_mmio_load(struct kvm_vcpu *vcpu,
}
}
+ /* conversion between single and double precision */
+ if ((vcpu->arch.mmio_sp64_extend) && (run->mmio.len == 4))
+ gpr = sp_to_dp(gpr);
+
if (vcpu->arch.mmio_sign_extend) {
switch (run->mmio.len) {
#ifdef CONFIG_PPC64
@@ -843,8 +971,6 @@ static void kvmppc_complete_mmio_load(struct kvm_vcpu *vcpu,
}
}
- kvmppc_set_gpr(vcpu, vcpu->arch.io_gpr, gpr);
-
switch (vcpu->arch.io_gpr & KVM_MMIO_REG_EXT_MASK) {
case KVM_MMIO_REG_GPR:
kvmppc_set_gpr(vcpu, vcpu->arch.io_gpr, gpr);
@@ -861,6 +987,17 @@ static void kvmppc_complete_mmio_load(struct kvm_vcpu *vcpu,
vcpu->arch.qpr[vcpu->arch.io_gpr & KVM_MMIO_REG_MASK] = gpr;
break;
#endif
+#ifdef CONFIG_VSX
+ case KVM_MMIO_REG_VSX:
+ if (vcpu->arch.mmio_vsx_copy_type == KVMPPC_VSX_COPY_DWORD)
+ kvmppc_set_vsr_dword(vcpu, gpr);
+ else if (vcpu->arch.mmio_vsx_copy_type == KVMPPC_VSX_COPY_WORD)
+ kvmppc_set_vsr_word(vcpu, gpr);
+ else if (vcpu->arch.mmio_vsx_copy_type ==
+ KVMPPC_VSX_COPY_DWORD_LOAD_DUMP)
+ kvmppc_set_vsr_dword_dump(vcpu, gpr);
+ break;
+#endif
default:
BUG();
}
@@ -927,6 +1064,35 @@ int kvmppc_handle_loads(struct kvm_run *run, struct kvm_vcpu *vcpu,
return __kvmppc_handle_load(run, vcpu, rt, bytes, is_default_endian, 1);
}
+#ifdef CONFIG_VSX
+int kvmppc_handle_vsx_load(struct kvm_run *run, struct kvm_vcpu *vcpu,
+ unsigned int rt, unsigned int bytes,
+ int is_default_endian, int mmio_sign_extend)
+{
+ enum emulation_result emulated = EMULATE_DONE;
+
+ /* Currently, mmio_vsx_copy_nums only allowed to be less than 4 */
+ if ( (vcpu->arch.mmio_vsx_copy_nums > 4) ||
+ (vcpu->arch.mmio_vsx_copy_nums < 0) ) {
+ return EMULATE_FAIL;
+ }
+
+ while (vcpu->arch.mmio_vsx_copy_nums) {
+ emulated = __kvmppc_handle_load(run, vcpu, rt, bytes,
+ is_default_endian, mmio_sign_extend);
+
+ if (emulated != EMULATE_DONE)
+ break;
+
+ vcpu->arch.paddr_accessed += run->mmio.len;
+
+ vcpu->arch.mmio_vsx_copy_nums--;
+ vcpu->arch.mmio_vsx_offset++;
+ }
+ return emulated;
+}
+#endif /* CONFIG_VSX */
+
int kvmppc_handle_store(struct kvm_run *run, struct kvm_vcpu *vcpu,
u64 val, unsigned int bytes, int is_default_endian)
{
@@ -952,6 +1118,9 @@ int kvmppc_handle_store(struct kvm_run *run, struct kvm_vcpu *vcpu,
vcpu->mmio_needed = 1;
vcpu->mmio_is_write = 1;
+ if ((vcpu->arch.mmio_sp64_extend) && (bytes == 4))
+ val = dp_to_sp(val);
+
/* Store the value at the lowest bytes in 'data'. */
if (!host_swabbed) {
switch (bytes) {
@@ -985,6 +1154,129 @@ int kvmppc_handle_store(struct kvm_run *run, struct kvm_vcpu *vcpu,
}
EXPORT_SYMBOL_GPL(kvmppc_handle_store);
+#ifdef CONFIG_VSX
+static inline int kvmppc_get_vsr_data(struct kvm_vcpu *vcpu, int rs, u64 *val)
+{
+ u32 dword_offset, word_offset;
+ union kvmppc_one_reg reg;
+ int vsx_offset = 0;
+ int copy_type = vcpu->arch.mmio_vsx_copy_type;
+ int result = 0;
+
+ switch (copy_type) {
+ case KVMPPC_VSX_COPY_DWORD:
+ vsx_offset =
+ kvmppc_get_vsr_dword_offset(vcpu->arch.mmio_vsx_offset);
+
+ if (vsx_offset == -1) {
+ result = -1;
+ break;
+ }
+
+ if (!vcpu->arch.mmio_vsx_tx_sx_enabled) {
+ *val = VCPU_VSX_FPR(vcpu, rs, vsx_offset);
+ } else {
+ reg.vval = VCPU_VSX_VR(vcpu, rs);
+ *val = reg.vsxval[vsx_offset];
+ }
+ break;
+
+ case KVMPPC_VSX_COPY_WORD:
+ vsx_offset =
+ kvmppc_get_vsr_word_offset(vcpu->arch.mmio_vsx_offset);
+
+ if (vsx_offset == -1) {
+ result = -1;
+ break;
+ }
+
+ if (!vcpu->arch.mmio_vsx_tx_sx_enabled) {
+ dword_offset = vsx_offset / 2;
+ word_offset = vsx_offset % 2;
+ reg.vsxval[0] = VCPU_VSX_FPR(vcpu, rs, dword_offset);
+ *val = reg.vsx32val[word_offset];
+ } else {
+ reg.vval = VCPU_VSX_VR(vcpu, rs);
+ *val = reg.vsx32val[vsx_offset];
+ }
+ break;
+
+ default:
+ result = -1;
+ break;
+ }
+
+ return result;
+}
+
+int kvmppc_handle_vsx_store(struct kvm_run *run, struct kvm_vcpu *vcpu,
+ int rs, unsigned int bytes, int is_default_endian)
+{
+ u64 val;
+ enum emulation_result emulated = EMULATE_DONE;
+
+ vcpu->arch.io_gpr = rs;
+
+ /* Currently, mmio_vsx_copy_nums only allowed to be less than 4 */
+ if ( (vcpu->arch.mmio_vsx_copy_nums > 4) ||
+ (vcpu->arch.mmio_vsx_copy_nums < 0) ) {
+ return EMULATE_FAIL;
+ }
+
+ while (vcpu->arch.mmio_vsx_copy_nums) {
+ if (kvmppc_get_vsr_data(vcpu, rs, &val) == -1)
+ return EMULATE_FAIL;
+
+ emulated = kvmppc_handle_store(run, vcpu,
+ val, bytes, is_default_endian);
+
+ if (emulated != EMULATE_DONE)
+ break;
+
+ vcpu->arch.paddr_accessed += run->mmio.len;
+
+ vcpu->arch.mmio_vsx_copy_nums--;
+ vcpu->arch.mmio_vsx_offset++;
+ }
+
+ return emulated;
+}
+
+static int kvmppc_emulate_mmio_vsx_loadstore(struct kvm_vcpu *vcpu,
+ struct kvm_run *run)
+{
+ enum emulation_result emulated = EMULATE_FAIL;
+ int r;
+
+ vcpu->arch.paddr_accessed += run->mmio.len;
+
+ if (!vcpu->mmio_is_write) {
+ emulated = kvmppc_handle_vsx_load(run, vcpu, vcpu->arch.io_gpr,
+ run->mmio.len, 1, vcpu->arch.mmio_sign_extend);
+ } else {
+ emulated = kvmppc_handle_vsx_store(run, vcpu,
+ vcpu->arch.io_gpr, run->mmio.len, 1);
+ }
+
+ switch (emulated) {
+ case EMULATE_DO_MMIO:
+ run->exit_reason = KVM_EXIT_MMIO;
+ r = RESUME_HOST;
+ break;
+ case EMULATE_FAIL:
+ pr_info("KVM: MMIO emulation failed (VSX repeat)\n");
+ run->exit_reason = KVM_EXIT_INTERNAL_ERROR;
+ run->internal.suberror = KVM_INTERNAL_ERROR_EMULATION;
+ r = RESUME_HOST;
+ break;
+ default:
+ r = RESUME_GUEST;
+ break;
+ }
+ return r;
+}
+#endif /* CONFIG_VSX */
+
int kvm_vcpu_ioctl_get_one_reg(struct kvm_vcpu *vcpu, struct kvm_one_reg *reg)
{
int r = 0;
@@ -1087,13 +1379,24 @@ int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu, struct kvm_run *run)
int r;
sigset_t sigsaved;
- if (vcpu->sigset_active)
- sigprocmask(SIG_SETMASK, &vcpu->sigset, &sigsaved);
-
if (vcpu->mmio_needed) {
+ vcpu->mmio_needed = 0;
if (!vcpu->mmio_is_write)
kvmppc_complete_mmio_load(vcpu, run);
- vcpu->mmio_needed = 0;
+#ifdef CONFIG_VSX
+ if (vcpu->arch.mmio_vsx_copy_nums > 0) {
+ vcpu->arch.mmio_vsx_copy_nums--;
+ vcpu->arch.mmio_vsx_offset++;
+ }
+
+ if (vcpu->arch.mmio_vsx_copy_nums > 0) {
+ r = kvmppc_emulate_mmio_vsx_loadstore(vcpu, run);
+ if (r == RESUME_HOST) {
+ vcpu->mmio_needed = 1;
+ return r;
+ }
+ }
+#endif
} else if (vcpu->arch.osi_needed) {
u64 *gprs = run->osi.gprs;
int i;
@@ -1115,6 +1418,9 @@ int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu, struct kvm_run *run)
#endif
}
+ if (vcpu->sigset_active)
+ sigprocmask(SIG_SETMASK, &vcpu->sigset, &sigsaved);
+
if (run->immediate_exit)
r = -EINTR;
else