diff options
author | Linus Torvalds <torvalds@linux-foundation.org> | 2019-05-17 10:33:30 -0700 |
---|---|---|
committer | Linus Torvalds <torvalds@linux-foundation.org> | 2019-05-17 10:33:30 -0700 |
commit | 0ef0fd351550130129bbdb77362488befd7b69d2 (patch) | |
tree | 23186172f5f85c06e18e3ee1a9619879df03c5df /arch/powerpc/kvm | |
parent | 4489da7183099f569a7d3dd819c975073c04bc72 (diff) | |
parent | c011d23ba046826ccf8c4a4a6c1d01c9ccaa1403 (diff) |
Merge tag 'for-linus' of git://git.kernel.org/pub/scm/virt/kvm/kvm
Pull KVM updates from Paolo Bonzini:
"ARM:
- support for SVE and Pointer Authentication in guests
- PMU improvements
POWER:
- support for direct access to the POWER9 XIVE interrupt controller
- memory and performance optimizations
x86:
- support for accessing memory not backed by struct page
- fixes and refactoring
Generic:
- dirty page tracking improvements"
* tag 'for-linus' of git://git.kernel.org/pub/scm/virt/kvm/kvm: (155 commits)
kvm: fix compilation on aarch64
Revert "KVM: nVMX: Expose RDPMC-exiting only when guest supports PMU"
kvm: x86: Fix L1TF mitigation for shadow MMU
KVM: nVMX: Disable intercept for FS/GS base MSRs in vmcs02 when possible
KVM: PPC: Book3S: Remove useless checks in 'release' method of KVM device
KVM: PPC: Book3S HV: XIVE: Fix spelling mistake "acessing" -> "accessing"
KVM: PPC: Book3S HV: Make sure to load LPID for radix VCPUs
kvm: nVMX: Set nested_run_pending in vmx_set_nested_state after checks complete
tests: kvm: Add tests for KVM_SET_NESTED_STATE
KVM: nVMX: KVM_SET_NESTED_STATE - Tear down old EVMCS state before setting new state
tests: kvm: Add tests for KVM_CAP_MAX_VCPUS and KVM_CAP_MAX_CPU_ID
tests: kvm: Add tests to .gitignore
KVM: Introduce KVM_CAP_MANUAL_DIRTY_LOG_PROTECT2
KVM: Fix kvm_clear_dirty_log_protect off-by-(minus-)one
KVM: Fix the bitmap range to copy during clear dirty
KVM: arm64: Fix ptrauth ID register masking logic
KVM: x86: use direct accessors for RIP and RSP
KVM: VMX: Use accessors for GPRs outside of dedicated caching logic
KVM: x86: Omit caching logic for always-available GPRs
kvm, x86: Properly check whether a pfn is an MMIO or not
...
Diffstat (limited to 'arch/powerpc/kvm')
-rw-r--r-- | arch/powerpc/kvm/Makefile | 2 | ||||
-rw-r--r-- | arch/powerpc/kvm/book3s.c | 42 | ||||
-rw-r--r-- | arch/powerpc/kvm/book3s_64_vio.c | 96 | ||||
-rw-r--r-- | arch/powerpc/kvm/book3s_64_vio_hv.c | 105 | ||||
-rw-r--r-- | arch/powerpc/kvm/book3s_hv.c | 152 | ||||
-rw-r--r-- | arch/powerpc/kvm/book3s_hv_builtin.c | 57 | ||||
-rw-r--r-- | arch/powerpc/kvm/book3s_hv_rm_mmu.c | 144 | ||||
-rw-r--r-- | arch/powerpc/kvm/book3s_hv_rmhandlers.S | 86 | ||||
-rw-r--r-- | arch/powerpc/kvm/book3s_xive.c | 250 | ||||
-rw-r--r-- | arch/powerpc/kvm/book3s_xive.h | 37 | ||||
-rw-r--r-- | arch/powerpc/kvm/book3s_xive_native.c | 1249 | ||||
-rw-r--r-- | arch/powerpc/kvm/book3s_xive_template.c | 78 | ||||
-rw-r--r-- | arch/powerpc/kvm/powerpc.c | 40 |
13 files changed, 2069 insertions, 269 deletions
diff --git a/arch/powerpc/kvm/Makefile b/arch/powerpc/kvm/Makefile index 3223aec88b2c..4c67cc79de7c 100644 --- a/arch/powerpc/kvm/Makefile +++ b/arch/powerpc/kvm/Makefile @@ -94,7 +94,7 @@ endif kvm-book3s_64-objs-$(CONFIG_KVM_XICS) += \ book3s_xics.o -kvm-book3s_64-objs-$(CONFIG_KVM_XIVE) += book3s_xive.o +kvm-book3s_64-objs-$(CONFIG_KVM_XIVE) += book3s_xive.o book3s_xive_native.o kvm-book3s_64-objs-$(CONFIG_SPAPR_TCE_IOMMU) += book3s_64_vio.o kvm-book3s_64-module-objs := \ diff --git a/arch/powerpc/kvm/book3s.c b/arch/powerpc/kvm/book3s.c index 10c5579d20ce..61a212d0daf0 100644 --- a/arch/powerpc/kvm/book3s.c +++ b/arch/powerpc/kvm/book3s.c @@ -651,6 +651,18 @@ int kvmppc_get_one_reg(struct kvm_vcpu *vcpu, u64 id, *val = get_reg_val(id, kvmppc_xics_get_icp(vcpu)); break; #endif /* CONFIG_KVM_XICS */ +#ifdef CONFIG_KVM_XIVE + case KVM_REG_PPC_VP_STATE: + if (!vcpu->arch.xive_vcpu) { + r = -ENXIO; + break; + } + if (xive_enabled()) + r = kvmppc_xive_native_get_vp(vcpu, val); + else + r = -ENXIO; + break; +#endif /* CONFIG_KVM_XIVE */ case KVM_REG_PPC_FSCR: *val = get_reg_val(id, vcpu->arch.fscr); break; @@ -724,6 +736,18 @@ int kvmppc_set_one_reg(struct kvm_vcpu *vcpu, u64 id, r = kvmppc_xics_set_icp(vcpu, set_reg_val(id, *val)); break; #endif /* CONFIG_KVM_XICS */ +#ifdef CONFIG_KVM_XIVE + case KVM_REG_PPC_VP_STATE: + if (!vcpu->arch.xive_vcpu) { + r = -ENXIO; + break; + } + if (xive_enabled()) + r = kvmppc_xive_native_set_vp(vcpu, val); + else + r = -ENXIO; + break; +#endif /* CONFIG_KVM_XIVE */ case KVM_REG_PPC_FSCR: vcpu->arch.fscr = set_reg_val(id, *val); break; @@ -891,6 +915,17 @@ void kvmppc_core_destroy_vm(struct kvm *kvm) kvmppc_rtas_tokens_free(kvm); WARN_ON(!list_empty(&kvm->arch.spapr_tce_tables)); #endif + +#ifdef CONFIG_KVM_XICS + /* + * Free the XIVE devices which are not directly freed by the + * device 'release' method + */ + kfree(kvm->arch.xive_devices.native); + kvm->arch.xive_devices.native = NULL; + kfree(kvm->arch.xive_devices.xics_on_xive); + kvm->arch.xive_devices.xics_on_xive = NULL; +#endif /* CONFIG_KVM_XICS */ } int kvmppc_h_logical_ci_load(struct kvm_vcpu *vcpu) @@ -1050,6 +1085,9 @@ static int kvmppc_book3s_init(void) if (xics_on_xive()) { kvmppc_xive_init_module(); kvm_register_device_ops(&kvm_xive_ops, KVM_DEV_TYPE_XICS); + kvmppc_xive_native_init_module(); + kvm_register_device_ops(&kvm_xive_native_ops, + KVM_DEV_TYPE_XIVE); } else #endif kvm_register_device_ops(&kvm_xics_ops, KVM_DEV_TYPE_XICS); @@ -1060,8 +1098,10 @@ static int kvmppc_book3s_init(void) static void kvmppc_book3s_exit(void) { #ifdef CONFIG_KVM_XICS - if (xics_on_xive()) + if (xics_on_xive()) { kvmppc_xive_exit_module(); + kvmppc_xive_native_exit_module(); + } #endif #ifdef CONFIG_KVM_BOOK3S_32_HANDLER kvmppc_book3s_exit_pr(); diff --git a/arch/powerpc/kvm/book3s_64_vio.c b/arch/powerpc/kvm/book3s_64_vio.c index f100e331e69b..66270e07449a 100644 --- a/arch/powerpc/kvm/book3s_64_vio.c +++ b/arch/powerpc/kvm/book3s_64_vio.c @@ -228,11 +228,33 @@ static void release_spapr_tce_table(struct rcu_head *head) unsigned long i, npages = kvmppc_tce_pages(stt->size); for (i = 0; i < npages; i++) - __free_page(stt->pages[i]); + if (stt->pages[i]) + __free_page(stt->pages[i]); kfree(stt); } +static struct page *kvm_spapr_get_tce_page(struct kvmppc_spapr_tce_table *stt, + unsigned long sttpage) +{ + struct page *page = stt->pages[sttpage]; + + if (page) + return page; + + mutex_lock(&stt->alloc_lock); + page = stt->pages[sttpage]; + if (!page) { + page = alloc_page(GFP_KERNEL | __GFP_ZERO); + WARN_ON_ONCE(!page); + if (page) + stt->pages[sttpage] = page; + } + mutex_unlock(&stt->alloc_lock); + + return page; +} + static vm_fault_t kvm_spapr_tce_fault(struct vm_fault *vmf) { struct kvmppc_spapr_tce_table *stt = vmf->vma->vm_file->private_data; @@ -241,7 +263,10 @@ static vm_fault_t kvm_spapr_tce_fault(struct vm_fault *vmf) if (vmf->pgoff >= kvmppc_tce_pages(stt->size)) return VM_FAULT_SIGBUS; - page = stt->pages[vmf->pgoff]; + page = kvm_spapr_get_tce_page(stt, vmf->pgoff); + if (!page) + return VM_FAULT_OOM; + get_page(page); vmf->page = page; return 0; @@ -296,7 +321,6 @@ long kvm_vm_ioctl_create_spapr_tce(struct kvm *kvm, struct kvmppc_spapr_tce_table *siter; unsigned long npages, size = args->size; int ret = -ENOMEM; - int i; if (!args->size || args->page_shift < 12 || args->page_shift > 34 || (args->offset + args->size > (ULLONG_MAX >> args->page_shift))) @@ -318,14 +342,9 @@ long kvm_vm_ioctl_create_spapr_tce(struct kvm *kvm, stt->offset = args->offset; stt->size = size; stt->kvm = kvm; + mutex_init(&stt->alloc_lock); INIT_LIST_HEAD_RCU(&stt->iommu_tables); - for (i = 0; i < npages; i++) { - stt->pages[i] = alloc_page(GFP_KERNEL | __GFP_ZERO); - if (!stt->pages[i]) - goto fail; - } - mutex_lock(&kvm->lock); /* Check this LIOBN hasn't been previously allocated */ @@ -352,17 +371,28 @@ long kvm_vm_ioctl_create_spapr_tce(struct kvm *kvm, if (ret >= 0) return ret; - fail: - for (i = 0; i < npages; i++) - if (stt->pages[i]) - __free_page(stt->pages[i]); - kfree(stt); fail_acct: kvmppc_account_memlimit(kvmppc_stt_pages(npages), false); return ret; } +static long kvmppc_tce_to_ua(struct kvm *kvm, unsigned long tce, + unsigned long *ua) +{ + unsigned long gfn = tce >> PAGE_SHIFT; + struct kvm_memory_slot *memslot; + + memslot = search_memslots(kvm_memslots(kvm), gfn); + if (!memslot) + return -EINVAL; + + *ua = __gfn_to_hva_memslot(memslot, gfn) | + (tce & ~(PAGE_MASK | TCE_PCI_READ | TCE_PCI_WRITE)); + + return 0; +} + static long kvmppc_tce_validate(struct kvmppc_spapr_tce_table *stt, unsigned long tce) { @@ -378,7 +408,7 @@ static long kvmppc_tce_validate(struct kvmppc_spapr_tce_table *stt, if (iommu_tce_check_gpa(stt->page_shift, gpa)) return H_TOO_HARD; - if (kvmppc_tce_to_ua(stt->kvm, tce, &ua, NULL)) + if (kvmppc_tce_to_ua(stt->kvm, tce, &ua)) return H_TOO_HARD; list_for_each_entry_rcu(stit, &stt->iommu_tables, next) { @@ -397,6 +427,36 @@ static long kvmppc_tce_validate(struct kvmppc_spapr_tce_table *stt, return H_SUCCESS; } +/* + * Handles TCE requests for emulated devices. + * Puts guest TCE values to the table and expects user space to convert them. + * Cannot fail so kvmppc_tce_validate must be called before it. + */ +static void kvmppc_tce_put(struct kvmppc_spapr_tce_table *stt, + unsigned long idx, unsigned long tce) +{ + struct page *page; + u64 *tbl; + unsigned long sttpage; + + idx -= stt->offset; + sttpage = idx / TCES_PER_PAGE; + page = stt->pages[sttpage]; + + if (!page) { + /* We allow any TCE, not just with read|write permissions */ + if (!tce) + return; + + page = kvm_spapr_get_tce_page(stt, sttpage); + if (!page) + return; + } + tbl = page_to_virt(page); + + tbl[idx % TCES_PER_PAGE] = tce; +} + static void kvmppc_clear_tce(struct mm_struct *mm, struct iommu_table *tbl, unsigned long entry) { @@ -551,7 +611,7 @@ long kvmppc_h_put_tce(struct kvm_vcpu *vcpu, unsigned long liobn, dir = iommu_tce_direction(tce); - if ((dir != DMA_NONE) && kvmppc_tce_to_ua(vcpu->kvm, tce, &ua, NULL)) { + if ((dir != DMA_NONE) && kvmppc_tce_to_ua(vcpu->kvm, tce, &ua)) { ret = H_PARAMETER; goto unlock_exit; } @@ -612,7 +672,7 @@ long kvmppc_h_put_tce_indirect(struct kvm_vcpu *vcpu, return ret; idx = srcu_read_lock(&vcpu->kvm->srcu); - if (kvmppc_tce_to_ua(vcpu->kvm, tce_list, &ua, NULL)) { + if (kvmppc_tce_to_ua(vcpu->kvm, tce_list, &ua)) { ret = H_TOO_HARD; goto unlock_exit; } @@ -647,7 +707,7 @@ long kvmppc_h_put_tce_indirect(struct kvm_vcpu *vcpu, } tce = be64_to_cpu(tce); - if (kvmppc_tce_to_ua(vcpu->kvm, tce, &ua, NULL)) + if (kvmppc_tce_to_ua(vcpu->kvm, tce, &ua)) return H_PARAMETER; list_for_each_entry_lockless(stit, &stt->iommu_tables, next) { diff --git a/arch/powerpc/kvm/book3s_64_vio_hv.c b/arch/powerpc/kvm/book3s_64_vio_hv.c index 2206bc729b9a..484b47fa3960 100644 --- a/arch/powerpc/kvm/book3s_64_vio_hv.c +++ b/arch/powerpc/kvm/book3s_64_vio_hv.c @@ -66,8 +66,6 @@ #endif -#define TCES_PER_PAGE (PAGE_SIZE / sizeof(u64)) - /* * Finds a TCE table descriptor by LIOBN. * @@ -88,6 +86,25 @@ struct kvmppc_spapr_tce_table *kvmppc_find_table(struct kvm *kvm, EXPORT_SYMBOL_GPL(kvmppc_find_table); #ifdef CONFIG_KVM_BOOK3S_HV_POSSIBLE +static long kvmppc_rm_tce_to_ua(struct kvm *kvm, unsigned long tce, + unsigned long *ua, unsigned long **prmap) +{ + unsigned long gfn = tce >> PAGE_SHIFT; + struct kvm_memory_slot *memslot; + + memslot = search_memslots(kvm_memslots_raw(kvm), gfn); + if (!memslot) + return -EINVAL; + + *ua = __gfn_to_hva_memslot(memslot, gfn) | + (tce & ~(PAGE_MASK | TCE_PCI_READ | TCE_PCI_WRITE)); + + if (prmap) + *prmap = &memslot->arch.rmap[gfn - memslot->base_gfn]; + + return 0; +} + /* * Validates TCE address. * At the moment flags and page mask are validated. @@ -111,7 +128,7 @@ static long kvmppc_rm_tce_validate(struct kvmppc_spapr_tce_table *stt, if (iommu_tce_check_gpa(stt->page_shift, gpa)) return H_PARAMETER; - if (kvmppc_tce_to_ua(stt->kvm, tce, &ua, NULL)) + if (kvmppc_rm_tce_to_ua(stt->kvm, tce, &ua, NULL)) return H_TOO_HARD; list_for_each_entry_lockless(stit, &stt->iommu_tables, next) { @@ -129,7 +146,6 @@ static long kvmppc_rm_tce_validate(struct kvmppc_spapr_tce_table *stt, return H_SUCCESS; } -#endif /* CONFIG_KVM_BOOK3S_HV_POSSIBLE */ /* Note on the use of page_address() in real mode, * @@ -161,13 +177,9 @@ static u64 *kvmppc_page_address(struct page *page) /* * Handles TCE requests for emulated devices. * Puts guest TCE values to the table and expects user space to convert them. - * Called in both real and virtual modes. - * Cannot fail so kvmppc_tce_validate must be called before it. - * - * WARNING: This will be called in real-mode on HV KVM and virtual - * mode on PR KVM + * Cannot fail so kvmppc_rm_tce_validate must be called before it. */ -void kvmppc_tce_put(struct kvmppc_spapr_tce_table *stt, +static void kvmppc_rm_tce_put(struct kvmppc_spapr_tce_table *stt, unsigned long idx, unsigned long tce) { struct page *page; @@ -175,35 +187,48 @@ void kvmppc_tce_put(struct kvmppc_spapr_tce_table *stt, idx -= stt->offset; page = stt->pages[idx / TCES_PER_PAGE]; + /* + * page must not be NULL in real mode, + * kvmppc_rm_ioba_validate() must have taken care of this. + */ + WARN_ON_ONCE_RM(!page); tbl = kvmppc_page_address(page); tbl[idx % TCES_PER_PAGE] = tce; } -EXPORT_SYMBOL_GPL(kvmppc_tce_put); -long kvmppc_tce_to_ua(struct kvm *kvm, unsigned long tce, - unsigned long *ua, unsigned long **prmap) +/* + * TCEs pages are allocated in kvmppc_rm_tce_put() which won't be able to do so + * in real mode. + * Check if kvmppc_rm_tce_put() can succeed in real mode, i.e. a TCEs page is + * allocated or not required (when clearing a tce entry). + */ +static long kvmppc_rm_ioba_validate(struct kvmppc_spapr_tce_table *stt, + unsigned long ioba, unsigned long npages, bool clearing) { - unsigned long gfn = tce >> PAGE_SHIFT; - struct kvm_memory_slot *memslot; + unsigned long i, idx, sttpage, sttpages; + unsigned long ret = kvmppc_ioba_validate(stt, ioba, npages); - memslot = search_memslots(kvm_memslots(kvm), gfn); - if (!memslot) - return -EINVAL; - - *ua = __gfn_to_hva_memslot(memslot, gfn) | - (tce & ~(PAGE_MASK | TCE_PCI_READ | TCE_PCI_WRITE)); + if (ret) + return ret; + /* + * clearing==true says kvmppc_rm_tce_put won't be allocating pages + * for empty tces. + */ + if (clearing) + return H_SUCCESS; -#ifdef CONFIG_KVM_BOOK3S_HV_POSSIBLE - if (prmap) - *prmap = &memslot->arch.rmap[gfn - memslot->base_gfn]; -#endif + idx = (ioba >> stt->page_shift) - stt->offset; + sttpage = idx / TCES_PER_PAGE; + sttpages = _ALIGN_UP(idx % TCES_PER_PAGE + npages, TCES_PER_PAGE) / + TCES_PER_PAGE; + for (i = sttpage; i < sttpage + sttpages; ++i) + if (!stt->pages[i]) + return H_TOO_HARD; - return 0; + return H_SUCCESS; } -EXPORT_SYMBOL_GPL(kvmppc_tce_to_ua); -#ifdef CONFIG_KVM_BOOK3S_HV_POSSIBLE static long iommu_tce_xchg_rm(struct mm_struct *mm, struct iommu_table *tbl, unsigned long entry, unsigned long *hpa, enum dma_data_direction *direction) @@ -381,7 +406,7 @@ long kvmppc_rm_h_put_tce(struct kvm_vcpu *vcpu, unsigned long liobn, if (!stt) return H_TOO_HARD; - ret = kvmppc_ioba_validate(stt, ioba, 1); + ret = kvmppc_rm_ioba_validate(stt, ioba, 1, tce == 0); if (ret != H_SUCCESS) return ret; @@ -390,7 +415,7 @@ long kvmppc_rm_h_put_tce(struct kvm_vcpu *vcpu, unsigned long liobn, return ret; dir = iommu_tce_direction(tce); - if ((dir != DMA_NONE) && kvmppc_tce_to_ua(vcpu->kvm, tce, &ua, NULL)) + if ((dir != DMA_NONE) && kvmppc_rm_tce_to_ua(vcpu->kvm, tce, &ua, NULL)) return H_PARAMETER; entry = ioba >> stt->page_shift; @@ -409,7 +434,7 @@ long kvmppc_rm_h_put_tce(struct kvm_vcpu *vcpu, unsigned long liobn, } } - kvmppc_tce_put(stt, entry, tce); + kvmppc_rm_tce_put(stt, entry, tce); return H_SUCCESS; } @@ -480,7 +505,7 @@ long kvmppc_rm_h_put_tce_indirect(struct kvm_vcpu *vcpu, if (tce_list & (SZ_4K - 1)) return H_PARAMETER; - ret = kvmppc_ioba_validate(stt, ioba, npages); + ret = kvmppc_rm_ioba_validate(stt, ioba, npages, false); if (ret != H_SUCCESS) return ret; @@ -492,7 +517,7 @@ long kvmppc_rm_h_put_tce_indirect(struct kvm_vcpu *vcpu, */ struct mm_iommu_table_group_mem_t *mem; - if (kvmppc_tce_to_ua(vcpu->kvm, tce_list, &ua, NULL)) + if (kvmppc_rm_tce_to_ua(vcpu->kvm, tce_list, &ua, NULL)) return H_TOO_HARD; mem = mm_iommu_lookup_rm(vcpu->kvm->mm, ua, IOMMU_PAGE_SIZE_4K); @@ -508,7 +533,7 @@ long kvmppc_rm_h_put_tce_indirect(struct kvm_vcpu *vcpu, * We do not require memory to be preregistered in this case * so lock rmap and do __find_linux_pte_or_hugepte(). */ - if (kvmppc_tce_to_ua(vcpu->kvm, tce_list, &ua, &rmap)) + if (kvmppc_rm_tce_to_ua(vcpu->kvm, tce_list, &ua, &rmap)) return H_TOO_HARD; rmap = (void *) vmalloc_to_phys(rmap); @@ -542,7 +567,7 @@ long kvmppc_rm_h_put_tce_indirect(struct kvm_vcpu *vcpu, unsigned long tce = be64_to_cpu(((u64 *)tces)[i]); ua = 0; - if (kvmppc_tce_to_ua(vcpu->kvm, tce, &ua, NULL)) + if (kvmppc_rm_tce_to_ua(vcpu->kvm, tce, &ua, NULL)) return H_PARAMETER; list_for_each_entry_lockless(stit, &stt->iommu_tables, next) { @@ -557,7 +582,7 @@ long kvmppc_rm_h_put_tce_indirect(struct kvm_vcpu *vcpu, } } - kvmppc_tce_put(stt, entry + i, tce); + kvmppc_rm_tce_put(stt, entry + i, tce); } unlock_exit: @@ -583,7 +608,7 @@ long kvmppc_rm_h_stuff_tce(struct kvm_vcpu *vcpu, if (!stt) return H_TOO_HARD; - ret = kvmppc_ioba_validate(stt, ioba, npages); + ret = kvmppc_rm_ioba_validate(stt, ioba, npages, tce_value == 0); if (ret != H_SUCCESS) return ret; @@ -610,7 +635,7 @@ long kvmppc_rm_h_stuff_tce(struct kvm_vcpu *vcpu, } for (i = 0; i < npages; ++i, ioba += (1ULL << stt->page_shift)) - kvmppc_tce_put(stt, ioba >> stt->page_shift, tce_value); + kvmppc_rm_tce_put(stt, ioba >> stt->page_shift, tce_value); return H_SUCCESS; } @@ -635,6 +660,10 @@ long kvmppc_h_get_tce(struct kvm_vcpu *vcpu, unsigned long liobn, idx = (ioba >> stt->page_shift) - stt->offset; page = stt->pages[idx / TCES_PER_PAGE]; + if (!page) { + vcpu->arch.regs.gpr[4] = 0; + return H_SUCCESS; + } tbl = (u64 *)page_address(page); vcpu->arch.regs.gpr[4] = tbl[idx % TCES_PER_PAGE]; diff --git a/arch/powerpc/kvm/book3s_hv.c b/arch/powerpc/kvm/book3s_hv.c index 7bdcd4d7a9f0..d5fc624e0655 100644 --- a/arch/powerpc/kvm/book3s_hv.c +++ b/arch/powerpc/kvm/book3s_hv.c @@ -750,7 +750,7 @@ static bool kvmppc_doorbell_pending(struct kvm_vcpu *vcpu) /* * Ensure that the read of vcore->dpdes comes after the read * of vcpu->doorbell_request. This barrier matches the - * smb_wmb() in kvmppc_guest_entry_inject(). + * smp_wmb() in kvmppc_guest_entry_inject(). */ smp_rmb(); vc = vcpu->arch.vcore; @@ -802,6 +802,80 @@ static int kvmppc_h_set_mode(struct kvm_vcpu *vcpu, unsigned long mflags, } } +/* Copy guest memory in place - must reside within a single memslot */ +static int kvmppc_copy_guest(struct kvm *kvm, gpa_t to, gpa_t from, + unsigned long len) +{ + struct kvm_memory_slot *to_memslot = NULL; + struct kvm_memory_slot *from_memslot = NULL; + unsigned long to_addr, from_addr; + int r; + + /* Get HPA for from address */ + from_memslot = gfn_to_memslot(kvm, from >> PAGE_SHIFT); + if (!from_memslot) + return -EFAULT; + if ((from + len) >= ((from_memslot->base_gfn + from_memslot->npages) + << PAGE_SHIFT)) + return -EINVAL; + from_addr = gfn_to_hva_memslot(from_memslot, from >> PAGE_SHIFT); + if (kvm_is_error_hva(from_addr)) + return -EFAULT; + from_addr |= (from & (PAGE_SIZE - 1)); + + /* Get HPA for to address */ + to_memslot = gfn_to_memslot(kvm, to >> PAGE_SHIFT); + if (!to_memslot) + return -EFAULT; + if ((to + len) >= ((to_memslot->base_gfn + to_memslot->npages) + << PAGE_SHIFT)) + return -EINVAL; + to_addr = gfn_to_hva_memslot(to_memslot, to >> PAGE_SHIFT); + if (kvm_is_error_hva(to_addr)) + return -EFAULT; + to_addr |= (to & (PAGE_SIZE - 1)); + + /* Perform copy */ + r = raw_copy_in_user((void __user *)to_addr, (void __user *)from_addr, + len); + if (r) + return -EFAULT; + mark_page_dirty(kvm, to >> PAGE_SHIFT); + return 0; +} + +static long kvmppc_h_page_init(struct kvm_vcpu *vcpu, unsigned long flags, + unsigned long dest, unsigned long src) +{ + u64 pg_sz = SZ_4K; /* 4K page size */ + u64 pg_mask = SZ_4K - 1; + int ret; + + /* Check for invalid flags (H_PAGE_SET_LOANED covers all CMO flags) */ + if (flags & ~(H_ICACHE_INVALIDATE | H_ICACHE_SYNCHRONIZE | + H_ZERO_PAGE | H_COPY_PAGE | H_PAGE_SET_LOANED)) + return H_PARAMETER; + + /* dest (and src if copy_page flag set) must be page aligned */ + if ((dest & pg_mask) || ((flags & H_COPY_PAGE) && (src & pg_mask))) + return H_PARAMETER; + + /* zero and/or copy the page as determined by the flags */ + if (flags & H_COPY_PAGE) { + ret = kvmppc_copy_guest(vcpu->kvm, dest, src, pg_sz); + if (ret < 0) + return H_PARAMETER; + } else if (flags & H_ZERO_PAGE) { + ret = kvm_clear_guest(vcpu->kvm, dest, pg_sz); + if (ret < 0) + return H_PARAMETER; + } + + /* We can ignore the remaining flags */ + + return H_SUCCESS; +} + static int kvm_arch_vcpu_yield_to(struct kvm_vcpu *target) { struct kvmppc_vcore *vcore = target->arch.vcore; @@ -1004,6 +1078,11 @@ int kvmppc_pseries_do_hcall(struct kvm_vcpu *vcpu) if (nesting_enabled(vcpu->kvm)) ret = kvmhv_copy_tofrom_guest_nested(vcpu); break; + case H_PAGE_INIT: + ret = kvmppc_h_page_init(vcpu, kvmppc_get_gpr(vcpu, 4), + kvmppc_get_gpr(vcpu, 5), + kvmppc_get_gpr(vcpu, 6)); + break; default: return RESUME_HOST; } @@ -1048,6 +1127,7 @@ static int kvmppc_hcall_impl_hv(unsigned long cmd) case H_IPOLL: case H_XIRR_X: #endif + case H_PAGE_INIT: return 1; } @@ -2505,37 +2585,6 @@ static void kvmppc_prepare_radix_vcpu(struct kvm_vcpu *vcpu, int pcpu) } } -static void kvmppc_radix_check_need_tlb_flush(struct kvm *kvm, int pcpu, - struct kvm_nested_guest *nested) -{ - cpumask_t *need_tlb_flush; - int lpid; - - if (!cpu_has_feature(CPU_FTR_HVMODE)) - return; - - if (cpu_has_feature(CPU_FTR_ARCH_300)) - pcpu &= ~0x3UL; - - if (nested) { - lpid = nested->shadow_lpid; - need_tlb_flush = &nested->need_tlb_flush; - } else { - lpid = kvm->arch.lpid; - need_tlb_flush = &kvm->arch.need_tlb_flush; - } - - mtspr(SPRN_LPID, lpid); - isync(); - smp_mb(); - - if (cpumask_test_cpu(pcpu, need_tlb_flush)) { - radix__local_flush_tlb_lpid_guest(lpid); - /* Clear the bit after the TLB flush */ - cpumask_clear_cpu(pcpu, need_tlb_flush); - } -} - static void kvmppc_start_thread(struct kvm_vcpu *vcpu, struct kvmppc_vcore *vc) { int cpu; @@ -3229,19 +3278,11 @@ static noinline void kvmppc_run_core(struct kvmppc_vcore *vc) for (sub = 0; sub < core_info.n_subcores; ++sub) spin_unlock(&core_info.vc[sub]->lock); - if (kvm_is_radix(vc->kvm)) { - /* - * Do we need to flush the process scoped TLB for the LPAR? - * - * On POWER9, individual threads can come in here, but the - * TLB is shared between the 4 threads in a core, hence - * invalidating on one thread invalidates for all. - * Thus we make all 4 threads use the same bit here. - * - * Hash must be flushed in realmode in order to use tlbiel. - */ - kvmppc_radix_check_need_tlb_flush(vc->kvm, pcpu, NULL); - } + guest_enter_irqoff(); + + srcu_idx = srcu_read_lock(&vc->kvm->srcu); + + this_cpu_disable_ftrace(); /* * Interrupts will be enabled once we get into the guest, @@ -3249,19 +3290,14 @@ static noinline void kvmppc_run_core(struct kvmppc_vcore *vc) */ trace_hardirqs_on(); - guest_enter_irqoff(); - - srcu_idx = srcu_read_lock(&vc->kvm->srcu); - - this_cpu_disable_ftrace(); - trap = __kvmppc_vcore_entry(); + trace_hardirqs_off(); + this_cpu_enable_ftrace(); srcu_read_unlock(&vc->kvm->srcu, srcu_idx); - trace_hardirqs_off(); set_irq_happened(trap); spin_lock(&vc->lock); @@ -3514,6 +3550,7 @@ int kvmhv_p9_guest_entry(struct kvm_vcpu *vcpu, u64 time_limit, #ifdef CONFIG_ALTIVEC load_vr_state(&vcpu->arch.vr); #endif + mtspr(SPRN_VRSAVE, vcpu->arch.vrsave); mtspr(SPRN_DSCR, vcpu->arch.dscr); mtspr(SPRN_IAMR, vcpu->arch.iamr); @@ -3605,6 +3642,7 @@ int kvmhv_p9_guest_entry(struct kvm_vcpu *vcpu, u64 time_limit, #ifdef CONFIG_ALTIVEC store_vr_state(&vcpu->arch.vr); #endif + vcpu->arch.vrsave = mfspr(SPRN_VRSAVE); if (cpu_has_feature(CPU_FTR_TM) || cpu_has_feature(CPU_FTR_P9_TM_HV_ASSIST)) @@ -3970,7 +4008,7 @@ int kvmhv_run_single_vcpu(struct kvm_run *kvm_run, unsigned long lpcr) { int trap, r, pcpu; - int srcu_idx; + int srcu_idx, lpid; struct kvmppc_vcore *vc; struct kvm *kvm = vcpu->kvm; struct kvm_nested_guest *nested = vcpu->arch.nested; @@ -4046,8 +4084,12 @@ int kvmhv_run_single_vcpu(struct kvm_run *kvm_run, vc->vcore_state = VCORE_RUNNING; trace_kvmppc_run_core(vc, 0); - if (cpu_has_feature(CPU_FTR_HVMODE)) - kvmppc_radix_check_need_tlb_flush(kvm, pcpu, nested); + if (cpu_has_feature(CPU_FTR_HVMODE)) { + lpid = nested ? nested->shadow_lpid : kvm->arch.lpid; + mtspr(SPRN_LPID, lpid); + isync(); + kvmppc_check_need_tlb_flush(kvm, pcpu, nested); + } trace_hardirqs_on(); guest_enter_irqoff(); diff --git a/arch/powerpc/kvm/book3s_hv_builtin.c b/arch/powerpc/kvm/book3s_hv_builtin.c index b0cf22477e87..6035d24f1d1d 100644 --- a/arch/powerpc/kvm/book3s_hv_builtin.c +++ b/arch/powerpc/kvm/book3s_hv_builtin.c @@ -805,3 +805,60 @@ void kvmppc_guest_entry_inject_int(struct kvm_vcpu *vcpu) vcpu->arch.doorbell_request = 0; } } + +static void flush_guest_tlb(struct kvm *kvm) +{ + unsigned long rb, set; + + rb = PPC_BIT(52); /* IS = 2 */ + if (kvm_is_radix(kvm)) { + /* R=1 PRS=1 RIC=2 */ + asm volatile(PPC_TLBIEL(%0, %4, %3, %2, %1) + : : "r" (rb), "i" (1), "i" (1), "i" (2), + "r" (0) : "memory"); + for (set = 1; set < kvm->arch.tlb_sets; ++set) { + rb += PPC_BIT(51); /* increment set number */ + /* R=1 PRS=1 RIC=0 */ + asm volatile(PPC_TLBIEL(%0, %4, %3, %2, %1) + : : "r" (rb), "i" (1), "i" (1), "i" (0), + "r" (0) : "memory"); + } + } else { + for (set = 0; set < kvm->arch.tlb_sets; ++set) { + /* R=0 PRS=0 RIC=0 */ + asm volatile(PPC_TLBIEL(%0, %4, %3, %2, %1) + : : "r" (rb), "i" (0), "i" (0), "i" (0), + "r" (0) : "memory"); + rb += PPC_BIT(51); /* increment set number */ + } + } + asm volatile("ptesync": : :"memory"); +} + +void kvmppc_check_need_tlb_flush(struct kvm *kvm, int pcpu, + struct kvm_nested_guest *nested) +{ + cpumask_t *need_tlb_flush; + + /* + * On POWER9, individual threads can come in here, but the + * TLB is shared between the 4 threads in a core, hence + * invalidating on one thread invalidates for all. + * Thus we make all 4 threads use the same bit. + */ + if (cpu_has_feature(CPU_FTR_ARCH_300)) + pcpu = cpu_first_thread_sibling(pcpu); + + if (nested) + need_tlb_flush = &nested->need_tlb_flush; + else + need_tlb_flush = &kvm->arch.need_tlb_flush; + + if (cpumask_test_cpu(pcpu, need_tlb_flush)) { + flush_guest_tlb(kvm); + + /* Clear the bit after the TLB flush */ + cpumask_clear_cpu(pcpu, need_tlb_flush); + } +} +EXPORT_SYMBOL_GPL(kvmppc_check_need_tlb_flush); diff --git a/arch/powerpc/kvm/book3s_hv_rm_mmu.c b/arch/powerpc/kvm/book3s_hv_rm_mmu.c index 3b3791ed74a6..8431ad1e8391 100644 --- a/arch/powerpc/kvm/book3s_hv_rm_mmu.c +++ b/arch/powerpc/kvm/book3s_hv_rm_mmu.c @@ -13,6 +13,7 @@ #include <linux/hugetlb.h> #include <linux/module.h> #include <linux/log2.h> +#include <linux/sizes.h> #include <asm/trace.h> #include <asm/kvm_ppc.h> @@ -867,6 +868,149 @@ long kvmppc_h_clear_mod(struct kvm_vcpu *vcpu, unsigned long flags, return ret; } +static int kvmppc_get_hpa(struct kvm_vcpu *vcpu, unsigned long gpa, + int writing, unsigned long *hpa, + struct kvm_memory_slot **memslot_p) +{ + struct kvm *kvm = vcpu->kvm; + struct kvm_memory_slot *memslot; + unsigned long gfn, hva, pa, psize = PAGE_SHIFT; + unsigned int shift; + pte_t *ptep, pte; + + /* Find the memslot for this address */ + gfn = gpa >> PAGE_SHIFT; + memslot = __gfn_to_memslot(kvm_memslots_raw(kvm), gfn); + if (!memslot || (memslot->flags & KVM_MEMSLOT_INVALID)) + return H_PARAMETER; + + /* Translate to host virtual address */ + hva = __gfn_to_hva_memslot(memslot, gfn); + + /* Try to find the host pte for that virtual address */ + ptep = __find_linux_pte(vcpu->arch.pgdir, hva, NULL, &shift); + if (!ptep) + return H_TOO_HARD; + pte = kvmppc_read_update_linux_pte(ptep, writing); + if (!pte_present(pte)) + return H_TOO_HARD; + + /* Convert to a physical address */ + if (shift) + psize = 1UL << shift; + pa = pte_pfn(pte) << PAGE_SHIFT; + pa |= hva & (psize - 1); + pa |= gpa & ~PAGE_MASK; + + if (hpa) + *hpa = pa; + if (memslot_p) + *memslot_p = memslot; + + return H_SUCCESS; +} + +static long kvmppc_do_h_page_init_zero(struct kvm_vcpu *vcpu, + unsigned long dest) +{ + struct kvm_memory_slot *memslot; + struct kvm *kvm = vcpu->kvm; + unsigned long pa, mmu_seq; + long ret = H_SUCCESS; + int i; + + /* Used later to detect if we might have been invalidated */ + mmu_seq = kvm->mmu_notifier_seq; + smp_rmb(); + + ret = kvmppc_get_hpa(vcpu, dest, 1, &pa, &memslot); + if (ret != H_SUCCESS) + return ret; + + /* Check if we've been invalidated */ + raw_spin_lock(&kvm->mmu_lock.rlock); + if (mmu_notifier_retry(kvm, mmu_seq)) { + ret = H_TOO_HARD; + goto out_unlock; + } + + /* Zero the page */ + for (i = 0; i < SZ_4K; i += L1_CACHE_BYTES, pa += L1_CACHE_BYTES) + dcbz((void *)pa); + kvmppc_update_dirty_map(memslot, dest >> PAGE_SHIFT, PAGE_SIZE); + +out_unlock: + raw_spin_unlock(&kvm->mmu_lock.rlock); + return ret; +} + +static long kvmppc_do_h_page_init_copy(struct kvm_vcpu *vcpu, + unsigned long dest, unsigned long src) +{ + unsigned long dest_pa, src_pa, mmu_seq; + struct kvm_memory_slot *dest_memslot; + struct kvm *kvm = vcpu->kvm; + long ret = H_SUCCESS; + + /* Used later to detect if we might have been invalidated */ + mmu_seq = kvm->mmu_notifier_seq; + smp_rmb(); + + ret = kvmppc_get_hpa(vcpu, dest, 1, &dest_pa, &dest_memslot); + if (ret != H_SUCCESS) + return ret; + ret = kvmppc_get_hpa(vcpu, src, 0, &src_pa, NULL); + if (ret != H_SUCCESS) + return ret; + + /* Check if we've been invalidated */ + raw_spin_lock(&kvm->mmu_lock.rlock); + if (mmu_notifier_retry(kvm, mmu_seq)) { + ret = H_TOO_HARD; + goto out_unlock; + } + + /* Copy the page */ + memcpy((void *)dest_pa, (void *)src_pa, SZ_4K); + + kvmppc_update_dirty_map(dest_memslot, dest >> PAGE_SHIFT, PAGE_SIZE); + +out_unlock: + raw_spin_unlock(&kvm->mmu_lock.rlock); + return ret; +} + +long kvmppc_rm_h_page_init(struct kvm_vcpu *vcpu, unsigned long flags, + unsigned long dest, unsigned long src) +{ + struct kvm *kvm = vcpu->kvm; + u64 pg_mask = SZ_4K - 1; /* 4K page size */ + long ret = H_SUCCESS; + + /* Don't handle radix mode here, go up to the virtual mode handler */ + if (kvm_is_radix(kvm)) + return H_TOO_HARD; + + /* Check for invalid flags (H_PAGE_SET_LOANED covers all CMO flags) */ + if (flags & ~(H_ICACHE_INVALIDATE | H_ICACHE_SYNCHRONIZE | + H_ZERO_PAGE | H_COPY_PAGE | H_PAGE_SET_LOANED)) + return H_PARAMETER; + + /* dest (and src if copy_page flag set) must be page aligned */ + if ((dest & pg_mask) || ((flags & H_COPY_PAGE) && (src & pg_mask))) + return H_PARAMETER; + + /* zero and/or copy the page as determined by the flags */ + if (flags & H_COPY_PAGE) + ret = kvmppc_do_h_page_init_copy(vcpu, dest, src); + else if (flags & H_ZERO_PAGE) + ret = kvmppc_do_h_page_init_zero(vcpu, dest); + + /* We can ignore the other flags */ + + return ret; +} + void kvmppc_invalidate_hpte(struct kvm *kvm, __be64 *hptep, unsigned long pte_index) { diff --git a/arch/powerpc/kvm/book3s_hv_rmhandlers.S b/arch/powerpc/kvm/book3s_hv_rmhandlers.S index dd014308f065..f9b2620fbecd 100644 --- a/arch/powerpc/kvm/book3s_hv_rmhandlers.S +++ b/arch/powerpc/kvm/book3s_hv_rmhandlers.S @@ -589,11 +589,8 @@ kvmppc_hv_entry: 1: #endif - /* Use cr7 as an indication of radix mode */ ld r5, HSTATE_KVM_VCORE(r13) ld r9, VCORE_KVM(r5) /* pointer to struct kvm */ - lbz r0, KVM_RADIX(r9) - cmpwi cr7, r0, 0 /* * POWER7/POWER8 host -> guest partition switch code. @@ -616,9 +613,6 @@ kvmppc_hv_entry: cmpwi r6,0 bne 10f - /* Radix has already switched LPID and flushed core TLB */ - bne cr7, 22f - lwz r7,KVM_LPID(r9) BEGIN_FTR_SECTION ld r6,KVM_SDR1(r9) @@ -630,41 +624,13 @@ END_FTR_SECTION_IFCLR(CPU_FTR_ARCH_300) mtspr SPRN_LPID,r7 isync - /* See if we need to flush the TLB. Hash has to be done in RM */ - lhz r6,PACAPACAINDEX(r13) /* test_bit(cpu, need_tlb_flush) */ -BEGIN_FTR_SECTION - /* - * On POWER9, individual threads can come in here, but the - * TLB is shared between the 4 threads in a core, hence - * invalidating on one thread invalidates for all. - * Thus we make all 4 threads use the same bit here. - */ - clrrdi r6,r6,2 -END_FTR_SECTION_IFSET(CPU_FTR_ARCH_300) - clrldi r7,r6,64-6 /* extract bit number (6 bits) */ - srdi r6,r6,6 /* doubleword number */ - sldi r6,r6,3 /* address offset */ - add r6,r6,r9 - addi r6,r6,KVM_NEED_FLUSH /* dword in kvm->arch.need_tlb_flush */ - li r8,1 - sld r8,r8,r7 - ld r7,0(r6) - and. r7,r7,r8 - beq 22f - /* Flush the TLB of any entries for this LPID */ - lwz r0,KVM_TLB_SETS(r9) - mtctr r0 - li r7,0x800 /* IS field = 0b10 */ - ptesync - li r0,0 /* RS for P9 version of tlbiel */ -28: tlbiel r7 /* On P9, rs=0, RIC=0, PRS=0, R=0 */ - addi r7,r7,0x1000 - bdnz 28b - ptesync -23: ldarx r7,0,r6 /* clear the bit after TLB flushed */ - andc r7,r7,r8 - stdcx. r7,0,r6 - bne 23b + /* See if we need to flush the TLB. */ + mr r3, r9 /* kvm pointer */ + lhz r4, PACAPACAINDEX(r13) /* physical cpu number */ + li r5, 0 /* nested vcpu pointer */ + bl kvmppc_check_need_tlb_flush + nop + ld r5, HSTATE_KVM_VCORE(r13) /* Add timebase offset onto timebase */ 22: ld r8,VCORE_TB_OFFSET(r5) @@ -980,17 +946,27 @@ ALT_FTR_SECTION_END_IFCLR(CPU_FTR_ARCH_300) #ifdef CONFIG_KVM_XICS /* We are entering the guest on that thread, push VCPU to XIVE */ - ld r10, HSTATE_XIVE_TIMA_PHYS(r13) - cmpldi cr0, r10, 0 - beq no_xive ld r11, VCPU_XIVE_SAVED_STATE(r4) li r9, TM_QW1_OS + lwz r8, VCPU_XIVE_CAM_WORD(r4) + li r7, TM_QW1_OS + TM_WORD2 + mfmsr r0 + andi. r0, r0, MSR_DR /* in real mode? */ + beq 2f + ld r10, HSTATE_XIVE_TIMA_VIRT(r13) + cmpldi cr1, r10, 0 + beq cr1, no_xive + eieio + stdx r11,r9,r10 + stwx r8,r7,r10 + b 3f +2: ld r10, HSTATE_XIVE_TIMA_PHYS(r13) + cmpldi cr1, r10, 0 + beq cr1, no_xive eieio stdcix r11,r9,r10 - lwz r11, VCPU_XIVE_CAM_WORD(r4) - li r9, TM_QW1_OS + TM_WORD2 - stwcix r11,r9,r10 - li r9, 1 + stwcix r8,r7,r10 +3: li r9, 1 stb r9, VCPU_XIVE_PUSHED(r4) eieio @@ -1009,12 +985,16 @@ ALT_FTR_SECTION_END_IFCLR(CPU_FTR_ARCH_300) * on, we mask it. */ lbz r0, VCPU_XIVE_ESC_ON(r4) - cmpwi r0,0 - beq 1f - ld r10, VCPU_XIVE_ESC_RADDR(r4) + cmpwi cr1, r0,0 + beq cr1, 1f li r9, XIVE_ESB_SET_PQ_01 + beq 4f /* in real mode? */ + ld r10, VCPU_XIVE_ESC_VADDR(r4) + ldx r0, r10, r9 + b 5f +4: ld r10, VCPU_XIVE_ESC_RADDR(r4) ldcix r0, r10, r9 - sync +5: sync /* We have a possible subtle race here: The escalation interrupt might * have fired and be on its way to the host queue while we mask it, @@ -2292,7 +2272,7 @@ hcall_real_table: #endif .long 0 /* 0x24 - H_SET_SPRG0 */ .long DOTSYM(kvmppc_h_set_dabr) - hcall_real_table - .long 0 /* 0x2c */ + .long DOTSYM(kvmppc_rm_h_page_init) - hcall_real_table .long 0 /* 0x30 */ .long 0 /* 0x34 */ .long 0 /* 0x38 */ diff --git a/arch/powerpc/kvm/book3s_xive.c b/arch/powerpc/kvm/book3s_xive.c index f78d002f0fe0..4953957333b7 100644 --- a/arch/powerpc/kvm/book3s_xive.c +++ b/arch/powerpc/kvm/book3s_xive.c @@ -166,7 +166,8 @@ static irqreturn_t xive_esc_irq(int irq, void *data) return IRQ_HANDLED; } -static int xive_attach_escalation(struct kvm_vcpu *vcpu, u8 prio) +int kvmppc_xive_attach_escalation(struct kvm_vcpu *vcpu, u8 prio, + bool single_escalation) { struct kvmppc_xive_vcpu *xc = vcpu->arch.xive_vcpu; struct xive_q *q = &xc->queues[prio]; @@ -185,7 +186,7 @@ static int xive_attach_escalation(struct kvm_vcpu *vcpu, u8 prio) return -EIO; } - if (xc->xive->single_escalation) + if (single_escalation) name = kasprintf(GFP_KERNEL, "kvm-%d-%d", vcpu->kvm->arch.lpid, xc->server_num); else @@ -217,7 +218,7 @@ static int xive_attach_escalation(struct kvm_vcpu *vcpu, u8 prio) * interrupt, thus leaving it effectively masked after * it fires once. */ - if (xc->xive->single_escalation) { + if (single_escalation) { struct irq_data *d = irq_get_irq_data(xc->esc_virq[prio]); struct xive_irq_data *xd = irq_data_get_irq_handler_data(d); @@ -291,7 +292,8 @@ static int xive_check_provisioning(struct kvm *kvm, u8 prio) continue; rc = xive_provision_queue(vcpu, prio); if (rc == 0 && !xive->single_escalation) - xive_attach_escalation(vcpu, prio); + kvmppc_xive_attach_escalation(vcpu, prio, + xive->single_escalation); if (rc) return rc; } @@ -342,7 +344,7 @@ static int xive_try_pick_queue(struct kvm_vcpu *vcpu, u8 prio) return atomic_add_unless(&q->count, 1, max) ? 0 : -EBUSY; } -static int xive_select_target(struct kvm *kvm, u32 *server, u8 prio) +int kvmppc_xive_select_target(struct kvm *kvm, u32 *server, u8 prio) { struct kvm_vcpu *vcpu; int i, rc; @@ -380,11 +382,6 @@ static int xive_select_target(struct kvm *kvm, u32 *server, u8 prio) return -EBUSY; } -static u32 xive_vp(struct kvmppc_xive *xive, u32 server) -{ - return xive->vp_base + kvmppc_pack_vcpu_id(xive->kvm, server); -} - static u8 xive_lock_and_mask(struct kvmppc_xive *xive, struct kvmppc_xive_src_block *sb, struct kvmppc_xive_irq_state *state) @@ -430,8 +427,8 @@ static u8 xive_lock_and_mask(struct kvmppc_xive *xive, */ if (xd->flags & OPAL_XIVE_IRQ_MASK_VIA_FW) { xive_native_configure_irq(hw_num, - xive_vp(xive, state->act_server), - MASKED, state->number); + kvmppc_xive_vp(xive, state->act_server), + MASKED, state->number); /* set old_p so we can track if an H_EOI was done */ state->old_p = true; state->old_q = false; @@ -486,8 +483,8 @@ static void xive_finish_unmask(struct kvmppc_xive *xive, */ if (xd->flags & OPAL_XIVE_IRQ_MASK_VIA_FW) { xive_native_configure_irq(hw_num, - xive_vp(xive, state->act_server), - state->act_priority, state->number); + kvmppc_xive_vp(xive, state->act_server), + state->act_priority, state->number); /* If an EOI is needed, do it here */ if (!state->old_p) xive_vm_source_eoi(hw_num, xd); @@ -535,7 +532,7 @@ static int xive_target_interrupt(struct kvm *kvm, * priority. The count for that new target will have * already been incremented. */ - rc = xive_select_target(kvm, &server, prio); + rc = kvmppc_xive_select_target(kvm, &server, prio); /* * We failed to find a target ? Not much we can do @@ -563,7 +560,7 @@ static int xive_target_interrupt(struct kvm *kvm, kvmppc_xive_select_irq(state, &hw_num, NULL); return xive_native_configure_irq(hw_num, - xive_vp(xive, server), + kvmppc_xive_vp(xive, server), prio, state->number); } @@ -849,7 +846,8 @@ int kvmppc_xive_set_icp(struct kvm_vcpu *vcpu, u64 icpval) /* * We can't update the state of a "pushed" VCPU, but that - * shouldn't happen. + * shouldn't happen because the vcpu->mutex makes running a + * vcpu mutually exclusive with doing one_reg get/set on it. */ if (WARN_ON(vcpu->arch.xive_pushed)) return -EIO; @@ -940,6 +938,13 @@ int kvmppc_xive_set_mapped(struct kvm *kvm, unsigned long guest_irq, /* Turn the IPI hard off */ xive_vm_esb_load(&state->ipi_data, XIVE_ESB_SET_PQ_01); + /* + * Reset ESB guest mapping. Needed when ESB pages are exposed + * to the guest in XIVE native mode + */ + if (xive->ops && xive->ops->reset_mapped) + xive->ops->reset_mapped(kvm, guest_irq); + /* Grab info about irq */ state->pt_number = hw_irq; state->pt_data = irq_data_get_irq_handler_data(host_data); @@ -951,7 +956,7 @@ int kvmppc_xive_set_mapped(struct kvm *kvm, unsigned long guest_irq, * which is fine for a never started interrupt. */ xive_native_configure_irq(hw_irq, - xive_vp(xive, state->act_server), + kvmppc_xive_vp(xive, state->act_server), state->act_priority, state->number); /* @@ -1025,9 +1030,17 @@ int kvmppc_xive_clr_mapped(struct kvm *kvm, unsigned long guest_irq, state->pt_number = 0; state->pt_data = NULL; + /* + * Reset ESB guest mapping. Needed when ESB pages are exposed + * to the guest in XIVE native mode + */ + if (xive->ops && xive->ops->reset_mapped) { + xive->ops->reset_mapped(kvm, guest_irq); + } + /* Reconfigure the IPI */ xive_native_configure_irq(state->ipi_number, - xive_vp(xive, state->act_server), + kvmppc_xive_vp(xive, state->act_server), state->act_priority, state->number); /* @@ -1049,7 +1062,7 @@ int kvmppc_xive_clr_mapped(struct kvm *kvm, unsigned long guest_irq, } EXPORT_SYMBOL_GPL(kvmppc_xive_clr_mapped); -static void kvmppc_xive_disable_vcpu_interrupts(struct kvm_vcpu *vcpu) +void kvmppc_xive_disable_vcpu_interrupts(struct kvm_vcpu *vcpu) { struct kvmppc_xive_vcpu *xc = vcpu->arch.xive_vcpu; struct kvm *kvm = vcpu->kvm; @@ -1083,14 +1096,35 @@ static void kvmppc_xive_disable_vcpu_interrupts(struct kvm_vcpu *vcpu) arch_spin_unlock(&sb->lock); } } + + /* Disable vcpu's escalation interrupt */ + if (vcpu->arch.xive_esc_on) { + __raw_readq((void __iomem *)(vcpu->arch.xive_esc_vaddr + + XIVE_ESB_SET_PQ_01)); + vcpu->arch.xive_esc_on = false; + } + + /* + * Clear pointers to escalation interrupt ESB. + * This is safe because the vcpu->mutex is held, preventing + * any other CPU from concurrently executing a KVM_RUN ioctl. + */ + vcpu->arch.xive_esc_vaddr = 0; + vcpu->arch.xive_esc_raddr = 0; } void kvmppc_xive_cleanup_vcpu(struct kvm_vcpu *vcpu) { struct kvmppc_xive_vcpu *xc = vcpu->arch.xive_vcpu; - struct kvmppc_xive *xive = xc->xive; + struct kvmppc_xive *xive = vcpu->kvm->arch.xive; int i; + if (!kvmppc_xics_enabled(vcpu)) + return; + + if (!xc) + return; + pr_devel("cleanup_vcpu(cpu=%d)\n", xc->server_num); /* Ensure no interrupt is still routed to that VP */ @@ -1129,6 +1163,10 @@ void kvmppc_xive_cleanup_vcpu(struct kvm_vcpu *vcpu) } /* Free the VP */ kfree(xc); + + /* Cleanup the vcpu */ + vcpu->arch.irq_type = KVMPPC_IRQ_DEFAULT; + vcpu->arch.xive_vcpu = NULL; } int kvmppc_xive_connect_vcpu(struct kvm_device *dev, @@ -1146,7 +1184,7 @@ int kvmppc_xive_connect_vcpu(struct kvm_device *dev, } if (xive->kvm != vcpu->kvm) return -EPERM; - if (vcpu->arch.irq_type) + if (vcpu->arch.irq_type != KVMPPC_IRQ_DEFAULT) return -EBUSY; if (kvmppc_xive_find_server(vcpu->kvm, cpu)) { pr_devel("Duplicate !\n"); @@ -1166,7 +1204,7 @@ int kvmppc_xive_connect_vcpu(struct kvm_device *dev, xc->xive = xive; xc->vcpu = vcpu; xc->server_num = cpu; - xc->vp_id = xive_vp(xive, cpu); + xc->vp_id = kvmppc_xive_vp(xive, cpu); xc->mfrr = 0xff; xc->valid = true; @@ -1219,7 +1257,8 @@ int kvmppc_xive_connect_vcpu(struct kvm_device *dev, if (xive->qmap & (1 << i)) { r = xive_provision_queue(vcpu, i); if (r == 0 && !xive->single_escalation) - xive_attach_escalation(vcpu, i); + kvmppc_xive_attach_escalation( + vcpu, i, xive->single_escalation); if (r) goto bail; } else { @@ -1234,7 +1273,7 @@ int kvmppc_xive_connect_vcpu(struct kvm_device *dev, } /* If not done above, attach priority 0 escalation */ - r = xive_attach_escalation(vcpu, 0); + r = kvmppc_xive_attach_escalation(vcpu, 0, xive->single_escalation); if (r) goto bail; @@ -1485,8 +1524,8 @@ static int xive_get_source(struct kvmppc_xive *xive, long irq, u64 addr) return 0; } -static struct kvmppc_xive_src_block *xive_create_src_block(struct kvmppc_xive *xive, - int irq) +struct kvmppc_xive_src_block *kvmppc_xive_create_src_block( + struct kvmppc_xive *xive, int irq) { struct kvm *kvm = xive->kvm; struct kvmppc_xive_src_block *sb; @@ -1509,6 +1548,7 @@ static struct kvmppc_xive_src_block *xive_create_src_block(struct kvmppc_xive *x for (i = 0; i < KVMPPC_XICS_IRQ_PER_ICS; i++) { sb->irq_state[i].number = (bid << KVMPPC_XICS_ICS_SHIFT) | i; + sb->irq_state[i].eisn = 0; sb->irq_state[i].guest_priority = MASKED; sb->irq_state[i].saved_priority = MASKED; sb->irq_state[i].act_priority = MASKED; @@ -1565,7 +1605,7 @@ static int xive_set_source(struct kvmppc_xive *xive, long irq, u64 addr) sb = kvmppc_xive_find_source(xive, irq, &idx); if (!sb) { pr_devel("No source, creating source block...\n"); - sb = xive_create_src_block(xive, irq); + sb = kvmppc_xive_create_src_block(xive, irq); if (!sb) { pr_devel("Failed to create block...\n"); return -ENOMEM; @@ -1789,7 +1829,7 @@ static void kvmppc_xive_cleanup_irq(u32 hw_num, struct xive_irq_data *xd) xive_cleanup_irq_data(xd); } -static void kvmppc_xive_free_sources(struct kvmppc_xive_src_block *sb) +void kvmppc_xive_free_sources(struct kvmppc_xive_src_block *sb) { int i; @@ -1810,16 +1850,55 @@ static void kvmppc_xive_free_sources(struct kvmppc_xive_src_block *sb) } } -static void kvmppc_xive_free(struct kvm_device *dev) +/* + * Called when device fd is closed. kvm->lock is held. + */ +static void kvmppc_xive_release(struct kvm_device *dev) { struct kvmppc_xive *xive = dev->private; struct kvm *kvm = xive->kvm; + struct kvm_vcpu *vcpu; int i; + int was_ready; + + pr_devel("Releasing xive device\n"); debugfs_remove(xive->dentry); - if (kvm) - kvm->arch.xive = NULL; + /* + * Clearing mmu_ready temporarily while holding kvm->lock + * is a way of ensuring that no vcpus can enter the guest + * until we drop kvm->lock. Doing kick_all_cpus_sync() + * ensures that any vcpu executing inside the guest has + * exited the guest. Once kick_all_cpus_sync() has finished, + * we know that no vcpu can be executing the XIVE push or + * pull code, or executing a XICS hcall. + * + * Since this is the device release function, we know that + * userspace does not have any open fd referring to the + * device. Therefore there can not be any of the device + * attribute set/get functions being executed concurrently, + * and similarly, the connect_vcpu and set/clr_mapped + * functions also cannot be being executed. + */ + was_ready = kvm->arch.mmu_ready; + kvm->arch.mmu_ready = 0; + kick_all_cpus_sync(); + + /* + * We should clean up the vCPU interrupt presenters first. + */ + kvm_for_each_vcpu(i, vcpu, kvm) { + /* + * Take vcpu->mutex to ensure that no one_reg get/set ioctl + * (i.e. kvmppc_xive_[gs]et_icp) can be done concurrently. + */ + mutex_lock(&vcpu->mutex); + kvmppc_xive_cleanup_vcpu(vcpu); + mutex_unlock(&vcpu->mutex); + } + + kvm->arch.xive = NULL; /* Mask and free interrupts */ for (i = 0; i <= xive->max_sbid; i++) { @@ -1832,11 +1911,47 @@ static void kvmppc_xive_free(struct kvm_device *dev) if (xive->vp_base != XIVE_INVALID_VP) xive_native_free_vp_block(xive->vp_base); + kvm->arch.mmu_ready = was_ready; + + /* + * A reference of the kvmppc_xive pointer is now kept under + * the xive_devices struct of the machine for reuse. It is + * freed when the VM is destroyed for now until we fix all the + * execution paths. + */ - kfree(xive); kfree(dev); } +/* + * When the guest chooses the interrupt mode (XICS legacy or XIVE + * native), the VM will switch of KVM device. The previous device will + * be "released" before the new one is created. + * + * Until we are sure all execution paths are well protected, provide a + * fail safe (transitional) method for device destruction, in which + * the XIVE device pointer is recycled and not directly freed. + */ +struct kvmppc_xive *kvmppc_xive_get_device(struct kvm *kvm, u32 type) +{ + struct kvmppc_xive **kvm_xive_device = type == KVM_DEV_TYPE_XIVE ? + &kvm->arch.xive_devices.native : + &kvm->arch.xive_devices.xics_on_xive; + struct kvmppc_xive *xive = *kvm_xive_device; + + if (!xive) { + xive = kzalloc(sizeof(*xive), GFP_KERNEL); + *kvm_xive_device = xive; + } else { + memset(xive, 0, sizeof(*xive)); + } + + return xive; +} + +/* + * Create a XICS device with XIVE backend. kvm->lock is held. + */ static int kvmppc_xive_create(struct kvm_device *dev, u32 type) { struct kvmppc_xive *xive; @@ -1845,7 +1960,7 @@ static int kvmppc_xive_create(struct kvm_device *dev, u32 type) pr_devel("Creating xive for partition\n"); - xive = kzalloc(sizeof(*xive), GFP_KERNEL); + xive = kvmppc_xive_get_device(kvm, type); if (!xive) return -ENOMEM; @@ -1883,6 +1998,43 @@ static int kvmppc_xive_create(struct kvm_device *dev, u32 type) return 0; } +int kvmppc_xive_debug_show_queues(struct seq_file *m, struct kvm_vcpu *vcpu) +{ + struct kvmppc_xive_vcpu *xc = vcpu->arch.xive_vcpu; + unsigned int i; + + for (i = 0; i < KVMPPC_XIVE_Q_COUNT; i++) { + struct xive_q *q = &xc->queues[i]; + u32 i0, i1, idx; + + if (!q->qpage && !xc->esc_virq[i]) + continue; + + seq_printf(m, " [q%d]: ", i); + + if (q->qpage) { + idx = q->idx; + i0 = be32_to_cpup(q->qpage + idx); + idx = (idx + 1) & q->msk; + i1 = be32_to_cpup(q->qpage + idx); + seq_printf(m, "T=%d %08x %08x...\n", q->toggle, + i0, i1); + } + if (xc->esc_virq[i]) { + struct irq_data *d = irq_get_irq_data(xc->esc_virq[i]); + struct xive_irq_data *xd = + irq_data_get_irq_handler_data(d); + u64 pq = xive_vm_esb_load(xd, XIVE_ESB_GET); + + seq_printf(m, "E:%c%c I(%d:%llx:%llx)", + (pq & XIVE_ESB_VAL_P) ? 'P' : 'p', + (pq & XIVE_ESB_VAL_Q) ? 'Q' : 'q', + xc->esc_virq[i], pq, xd->eoi_page); + seq_puts(m, "\n"); + } + } + return 0; +} static int xive_debug_show(struct seq_file *m, void *private) { @@ -1908,7 +2060,6 @@ static int xive_debug_show(struct seq_file *m, void *private) kvm_for_each_vcpu(i, vcpu, kvm) { struct kvmppc_xive_vcpu *xc = vcpu->arch.xive_vcpu; - unsigned int i; if (!xc) continue; @@ -1918,33 +2069,8 @@ static int xive_debug_show(struct seq_file *m, void *private) xc->server_num, xc->cppr, xc->hw_cppr, xc->mfrr, xc->pending, xc->stat_rm_h_xirr, xc->stat_vm_h_xirr); - for (i = 0; i < KVMPPC_XIVE_Q_COUNT; i++) { - struct xive_q *q = &xc->queues[i]; - u32 i0, i1, idx; - - if (!q->qpage && !xc->esc_virq[i]) - continue; - seq_printf(m, " [q%d]: ", i); - - if (q->qpage) { - idx = q->idx; - i0 = be32_to_cpup(q->qpage + idx); - idx = (idx + 1) & q->msk; - i1 = be32_to_cpup(q->qpage + idx); - seq_printf(m, "T=%d %08x %08x... \n", q->toggle, i0, i1); - } - if (xc->esc_virq[i]) { - struct irq_data *d = irq_get_irq_data(xc->esc_virq[i]); - struct xive_irq_data *xd = irq_data_get_irq_handler_data(d); - u64 pq = xive_vm_esb_load(xd, XIVE_ESB_GET); - seq_printf(m, "E:%c%c I(%d:%llx:%llx)", - (pq & XIVE_ESB_VAL_P) ? 'P' : 'p', - (pq & XIVE_ESB_VAL_Q) ? 'Q' : 'q', - xc->esc_virq[i], pq, xd->eoi_page); - seq_printf(m, "\n"); - } - } + kvmppc_xive_debug_show_queues(m, vcpu); t_rm_h_xirr += xc->stat_rm_h_xirr; t_rm_h_ipoll += xc->stat_rm_h_ipoll; @@ -1999,7 +2125,7 @@ struct kvm_device_ops kvm_xive_ops = { .name = "kvm-xive", .create = kvmppc_xive_create, .init = kvmppc_xive_init, - .destroy = kvmppc_xive_free, + .release = kvmppc_xive_release, .set_attr = xive_set_attr, .get_attr = xive_get_attr, .has_attr = xive_has_attr, diff --git a/arch/powerpc/kvm/book3s_xive.h b/arch/powerpc/kvm/book3s_xive.h index a08ae6fd4c51..426146332984 100644 --- a/arch/powerpc/kvm/book3s_xive.h +++ b/arch/powerpc/kvm/book3s_xive.h @@ -13,6 +13,13 @@ #include "book3s_xics.h" /* + * The XIVE Interrupt source numbers are within the range 0 to + * KVMPPC_XICS_NR_IRQS. + */ +#define KVMPPC_XIVE_FIRST_IRQ 0 +#define KVMPPC_XIVE_NR_IRQS KVMPPC_XICS_NR_IRQS + +/* * State for one guest irq source. * * For each guest source we allocate a HW interrupt in the XIVE @@ -54,6 +61,9 @@ struct kvmppc_xive_irq_state { bool saved_p; bool saved_q; u8 saved_scan_prio; + + /* Xive native */ + u32 eisn; /* Guest Effective IRQ number */ }; /* Select the "right" interrupt (IPI vs. passthrough) */ @@ -84,6 +94,11 @@ struct kvmppc_xive_src_block { struct kvmppc_xive_irq_state irq_state[KVMPPC_XICS_IRQ_PER_ICS]; }; +struct kvmppc_xive; + +struct kvmppc_xive_ops { + int (*reset_mapped)(struct kvm *kvm, unsigned long guest_irq); +}; struct kvmppc_xive { struct kvm *kvm; @@ -122,6 +137,10 @@ struct kvmppc_xive { /* Flags */ u8 single_escalation; + + struct kvmppc_xive_ops *ops; + struct address_space *mapping; + struct mutex mapping_lock; }; #define KVMPPC_XIVE_Q_COUNT 8 @@ -198,6 +217,11 @@ static inline struct kvmppc_xive_src_block *kvmppc_xive_find_source(struct kvmpp return xive->src_blocks[bid]; } +static inline u32 kvmppc_xive_vp(struct kvmppc_xive *xive, u32 server) +{ + return xive->vp_base + kvmppc_pack_vcpu_id(xive->kvm, server); +} + /* * Mapping between guest priorities and host priorities * is as follow. @@ -248,5 +272,18 @@ extern int (*__xive_vm_h_ipi)(struct kvm_vcpu *vcpu, unsigned long server, extern int (*__xive_vm_h_cppr)(struct kvm_vcpu *vcpu, unsigned long cppr); extern int (*__xive_vm_h_eoi)(struct kvm_vcpu *vcpu, unsigned long xirr); +/* + * Common Xive routines for XICS-over-XIVE and XIVE native + */ +void kvmppc_xive_disable_vcpu_interrupts(struct kvm_vcpu *vcpu); +int kvmppc_xive_debug_show_queues(struct seq_file *m, struct kvm_vcpu *vcpu); +struct kvmppc_xive_src_block *kvmppc_xive_create_src_block( + struct kvmppc_xive *xive, int irq); +void kvmppc_xive_free_sources(struct kvmppc_xive_src_block *sb); +int kvmppc_xive_select_target(struct kvm *kvm, u32 *server, u8 prio); +int kvmppc_xive_attach_escalation(struct kvm_vcpu *vcpu, u8 prio, + bool single_escalation); +struct kvmppc_xive *kvmppc_xive_get_device(struct kvm *kvm, u32 type); + #endif /* CONFIG_KVM_XICS */ #endif /* _KVM_PPC_BOOK3S_XICS_H */ diff --git a/arch/powerpc/kvm/book3s_xive_native.c b/arch/powerpc/kvm/book3s_xive_native.c new file mode 100644 index 000000000000..6a8e698c4b6e --- /dev/null +++ b/arch/powerpc/kvm/book3s_xive_native.c @@ -0,0 +1,1249 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Copyright (c) 2017-2019, IBM Corporation. + */ + +#define pr_fmt(fmt) "xive-kvm: " fmt + +#include <linux/kernel.h> +#include <linux/kvm_host.h> +#include <linux/err.h> +#include <linux/gfp.h> +#include <linux/spinlock.h> +#include <linux/delay.h> +#include <linux/file.h> +#include <asm/uaccess.h> +#include <asm/kvm_book3s.h> +#include <asm/kvm_ppc.h> +#include <asm/hvcall.h> +#include <asm/xive.h> +#include <asm/xive-regs.h> +#include <asm/debug.h> +#include <asm/debugfs.h> +#include <asm/opal.h> + +#include <linux/debugfs.h> +#include <linux/seq_file.h> + +#include "book3s_xive.h" + +static u8 xive_vm_esb_load(struct xive_irq_data *xd, u32 offset) +{ + u64 val; + + if (xd->flags & XIVE_IRQ_FLAG_SHIFT_BUG) + offset |= offset << 4; + + val = in_be64(xd->eoi_mmio + offset); + return (u8)val; +} + +static void kvmppc_xive_native_cleanup_queue(struct kvm_vcpu *vcpu, int prio) +{ + struct kvmppc_xive_vcpu *xc = vcpu->arch.xive_vcpu; + struct xive_q *q = &xc->queues[prio]; + + xive_native_disable_queue(xc->vp_id, q, prio); + if (q->qpage) { + put_page(virt_to_page(q->qpage)); + q->qpage = NULL; + } +} + +void kvmppc_xive_native_cleanup_vcpu(struct kvm_vcpu *vcpu) +{ + struct kvmppc_xive_vcpu *xc = vcpu->arch.xive_vcpu; + int i; + + if (!kvmppc_xive_enabled(vcpu)) + return; + + if (!xc) + return; + + pr_devel("native_cleanup_vcpu(cpu=%d)\n", xc->server_num); + + /* Ensure no interrupt is still routed to that VP */ + xc->valid = false; + kvmppc_xive_disable_vcpu_interrupts(vcpu); + + /* Disable the VP */ + xive_native_disable_vp(xc->vp_id); + + /* Free the queues & associated interrupts */ + for (i = 0; i < KVMPPC_XIVE_Q_COUNT; i++) { + /* Free the escalation irq */ + if (xc->esc_virq[i]) { + free_irq(xc->esc_virq[i], vcpu); + irq_dispose_mapping(xc->esc_virq[i]); + kfree(xc->esc_virq_names[i]); + xc->esc_virq[i] = 0; + } + + /* Free the queue */ + kvmppc_xive_native_cleanup_queue(vcpu, i); + } + + /* Free the VP */ + kfree(xc); + + /* Cleanup the vcpu */ + vcpu->arch.irq_type = KVMPPC_IRQ_DEFAULT; + vcpu->arch.xive_vcpu = NULL; +} + +int kvmppc_xive_native_connect_vcpu(struct kvm_device *dev, + struct kvm_vcpu *vcpu, u32 server_num) +{ + struct kvmppc_xive *xive = dev->private; + struct kvmppc_xive_vcpu *xc = NULL; + int rc; + + pr_devel("native_connect_vcpu(server=%d)\n", server_num); + + if (dev->ops != &kvm_xive_native_ops) { + pr_devel("Wrong ops !\n"); + return -EPERM; + } + if (xive->kvm != vcpu->kvm) + return -EPERM; + if (vcpu->arch.irq_type != KVMPPC_IRQ_DEFAULT) + return -EBUSY; + if (server_num >= KVM_MAX_VCPUS) { + pr_devel("Out of bounds !\n"); + return -EINVAL; + } + + mutex_lock(&vcpu->kvm->lock); + + if (kvmppc_xive_find_server(vcpu->kvm, server_num)) { + pr_devel("Duplicate !\n"); + rc = -EEXIST; + goto bail; + } + + xc = kzalloc(sizeof(*xc), GFP_KERNEL); + if (!xc) { + rc = -ENOMEM; + goto bail; + } + + vcpu->arch.xive_vcpu = xc; + xc->xive = xive; + xc->vcpu = vcpu; + xc->server_num = server_num; + + xc->vp_id = kvmppc_xive_vp(xive, server_num); + xc->valid = true; + vcpu->arch.irq_type = KVMPPC_IRQ_XIVE; + + rc = xive_native_get_vp_info(xc->vp_id, &xc->vp_cam, &xc->vp_chip_id); + if (rc) { + pr_err("Failed to get VP info from OPAL: %d\n", rc); + goto bail; + } + + /* + * Enable the VP first as the single escalation mode will + * affect escalation interrupts numbering + */ + rc = xive_native_enable_vp(xc->vp_id, xive->single_escalation); + if (rc) { + pr_err("Failed to enable VP in OPAL: %d\n", rc); + goto bail; + } + + /* Configure VCPU fields for use by assembly push/pull */ + vcpu->arch.xive_saved_state.w01 = cpu_to_be64(0xff000000); + vcpu->arch.xive_cam_word = cpu_to_be32(xc->vp_cam | TM_QW1W2_VO); + + /* TODO: reset all queues to a clean state ? */ +bail: + mutex_unlock(&vcpu->kvm->lock); + if (rc) + kvmppc_xive_native_cleanup_vcpu(vcpu); + + return rc; +} + +/* + * Device passthrough support + */ +static int kvmppc_xive_native_reset_mapped(struct kvm *kvm, unsigned long irq) +{ + struct kvmppc_xive *xive = kvm->arch.xive; + + if (irq >= KVMPPC_XIVE_NR_IRQS) + return -EINVAL; + + /* + * Clear the ESB pages of the IRQ number being mapped (or + * unmapped) into the guest and let the the VM fault handler + * repopulate with the appropriate ESB pages (device or IC) + */ + pr_debug("clearing esb pages for girq 0x%lx\n", irq); + mutex_lock(&xive->mapping_lock); + if (xive->mapping) + unmap_mapping_range(xive->mapping, + irq * (2ull << PAGE_SHIFT), + 2ull << PAGE_SHIFT, 1); + mutex_unlock(&xive->mapping_lock); + return 0; +} + +static struct kvmppc_xive_ops kvmppc_xive_native_ops = { + .reset_mapped = kvmppc_xive_native_reset_mapped, +}; + +static vm_fault_t xive_native_esb_fault(struct vm_fault *vmf) +{ + struct vm_area_struct *vma = vmf->vma; + struct kvm_device *dev = vma->vm_file->private_data; + struct kvmppc_xive *xive = dev->private; + struct kvmppc_xive_src_block *sb; + struct kvmppc_xive_irq_state *state; + struct xive_irq_data *xd; + u32 hw_num; + u16 src; + u64 page; + unsigned long irq; + u64 page_offset; + + /* + * Linux/KVM uses a two pages ESB setting, one for trigger and + * one for EOI + */ + page_offset = vmf->pgoff - vma->vm_pgoff; + irq = page_offset / 2; + + sb = kvmppc_xive_find_source(xive, irq, &src); + if (!sb) { + pr_devel("%s: source %lx not found !\n", __func__, irq); + return VM_FAULT_SIGBUS; + } + + state = &sb->irq_state[src]; + kvmppc_xive_select_irq(state, &hw_num, &xd); + + arch_spin_lock(&sb->lock); + + /* + * first/even page is for trigger + * second/odd page is for EOI and management. + */ + page = page_offset % 2 ? xd->eoi_page : xd->trig_page; + arch_spin_unlock(&sb->lock); + + if (WARN_ON(!page)) { + pr_err("%s: accessing invalid ESB page for source %lx !\n", + __func__, irq); + return VM_FAULT_SIGBUS; + } + + vmf_insert_pfn(vma, vmf->address, page >> PAGE_SHIFT); + return VM_FAULT_NOPAGE; +} + +static const struct vm_operations_struct xive_native_esb_vmops = { + .fault = xive_native_esb_fault, +}; + +static vm_fault_t xive_native_tima_fault(struct vm_fault *vmf) +{ + struct vm_area_struct *vma = vmf->vma; + + switch (vmf->pgoff - vma->vm_pgoff) { + case 0: /* HW - forbid access */ + case 1: /* HV - forbid access */ + return VM_FAULT_SIGBUS; + case 2: /* OS */ + vmf_insert_pfn(vma, vmf->address, xive_tima_os >> PAGE_SHIFT); + return VM_FAULT_NOPAGE; + case 3: /* USER - TODO */ + default: + return VM_FAULT_SIGBUS; + } +} + +static const struct vm_operations_struct xive_native_tima_vmops = { + .fault = xive_native_tima_fault, +}; + +static int kvmppc_xive_native_mmap(struct kvm_device *dev, + struct vm_area_struct *vma) +{ + struct kvmppc_xive *xive = dev->private; + + /* We only allow mappings at fixed offset for now */ + if (vma->vm_pgoff == KVM_XIVE_TIMA_PAGE_OFFSET) { + if (vma_pages(vma) > 4) + return -EINVAL; + vma->vm_ops = &xive_native_tima_vmops; + } else if (vma->vm_pgoff == KVM_XIVE_ESB_PAGE_OFFSET) { + if (vma_pages(vma) > KVMPPC_XIVE_NR_IRQS * 2) + return -EINVAL; + vma->vm_ops = &xive_native_esb_vmops; + } else { + return -EINVAL; + } + + vma->vm_flags |= VM_IO | VM_PFNMAP; + vma->vm_page_prot = pgprot_noncached_wc(vma->vm_page_prot); + + /* + * Grab the KVM device file address_space to be able to clear + * the ESB pages mapping when a device is passed-through into + * the guest. + */ + xive->mapping = vma->vm_file->f_mapping; + return 0; +} + +static int kvmppc_xive_native_set_source(struct kvmppc_xive *xive, long irq, + u64 addr) +{ + struct kvmppc_xive_src_block *sb; + struct kvmppc_xive_irq_state *state; + u64 __user *ubufp = (u64 __user *) addr; + u64 val; + u16 idx; + int rc; + + pr_devel("%s irq=0x%lx\n", __func__, irq); + + if (irq < KVMPPC_XIVE_FIRST_IRQ || irq >= KVMPPC_XIVE_NR_IRQS) + return -E2BIG; + + sb = kvmppc_xive_find_source(xive, irq, &idx); + if (!sb) { + pr_debug("No source, creating source block...\n"); + sb = kvmppc_xive_create_src_block(xive, irq); + if (!sb) { + pr_err("Failed to create block...\n"); + return -ENOMEM; + } + } + state = &sb->irq_state[idx]; + + if (get_user(val, ubufp)) { + pr_err("fault getting user info !\n"); + return -EFAULT; + } + + arch_spin_lock(&sb->lock); + + /* + * If the source doesn't already have an IPI, allocate + * one and get the corresponding data + */ + if (!state->ipi_number) { + state->ipi_number = xive_native_alloc_irq(); + if (state->ipi_number == 0) { + pr_err("Failed to allocate IRQ !\n"); + rc = -ENXIO; + goto unlock; + } + xive_native_populate_irq_data(state->ipi_number, + &state->ipi_data); + pr_debug("%s allocated hw_irq=0x%x for irq=0x%lx\n", __func__, + state->ipi_number, irq); + } + + /* Restore LSI state */ + if (val & KVM_XIVE_LEVEL_SENSITIVE) { + state->lsi = true; + if (val & KVM_XIVE_LEVEL_ASSERTED) + state->asserted = true; + pr_devel(" LSI ! Asserted=%d\n", state->asserted); + } + + /* Mask IRQ to start with */ + state->act_server = 0; + state->act_priority = MASKED; + xive_vm_esb_load(&state->ipi_data, XIVE_ESB_SET_PQ_01); + xive_native_configure_irq(state->ipi_number, 0, MASKED, 0); + + /* Increment the number of valid sources and mark this one valid */ + if (!state->valid) + xive->src_count++; + state->valid = true; + + rc = 0; + +unlock: + arch_spin_unlock(&sb->lock); + + return rc; +} + +static int kvmppc_xive_native_update_source_config(struct kvmppc_xive *xive, + struct kvmppc_xive_src_block *sb, + struct kvmppc_xive_irq_state *state, + u32 server, u8 priority, bool masked, + u32 eisn) +{ + struct kvm *kvm = xive->kvm; + u32 hw_num; + int rc = 0; + + arch_spin_lock(&sb->lock); + + if (state->act_server == server && state->act_priority == priority && + state->eisn == eisn) + goto unlock; + + pr_devel("new_act_prio=%d new_act_server=%d mask=%d act_server=%d act_prio=%d\n", + priority, server, masked, state->act_server, + state->act_priority); + + kvmppc_xive_select_irq(state, &hw_num, NULL); + + if (priority != MASKED && !masked) { + rc = kvmppc_xive_select_target(kvm, &server, priority); + if (rc) + goto unlock; + + state->act_priority = priority; + state->act_server = server; + state->eisn = eisn; + + rc = xive_native_configure_irq(hw_num, + kvmppc_xive_vp(xive, server), + priority, eisn); + } else { + state->act_priority = MASKED; + state->act_server = 0; + state->eisn = 0; + + rc = xive_native_configure_irq(hw_num, 0, MASKED, 0); + } + +unlock: + arch_spin_unlock(&sb->lock); + return rc; +} + +static int kvmppc_xive_native_set_source_config(struct kvmppc_xive *xive, + long irq, u64 addr) +{ + struct kvmppc_xive_src_block *sb; + struct kvmppc_xive_irq_state *state; + u64 __user *ubufp = (u64 __user *) addr; + u16 src; + u64 kvm_cfg; + u32 server; + u8 priority; + bool masked; + u32 eisn; + + sb = kvmppc_xive_find_source(xive, irq, &src); + if (!sb) + return -ENOENT; + + state = &sb->irq_state[src]; + + if (!state->valid) + return -EINVAL; + + if (get_user(kvm_cfg, ubufp)) + return -EFAULT; + + pr_devel("%s irq=0x%lx cfg=%016llx\n", __func__, irq, kvm_cfg); + + priority = (kvm_cfg & KVM_XIVE_SOURCE_PRIORITY_MASK) >> + KVM_XIVE_SOURCE_PRIORITY_SHIFT; + server = (kvm_cfg & KVM_XIVE_SOURCE_SERVER_MASK) >> + KVM_XIVE_SOURCE_SERVER_SHIFT; + masked = (kvm_cfg & KVM_XIVE_SOURCE_MASKED_MASK) >> + KVM_XIVE_SOURCE_MASKED_SHIFT; + eisn = (kvm_cfg & KVM_XIVE_SOURCE_EISN_MASK) >> + KVM_XIVE_SOURCE_EISN_SHIFT; + + if (priority != xive_prio_from_guest(priority)) { + pr_err("invalid priority for queue %d for VCPU %d\n", + priority, server); + return -EINVAL; + } + + return kvmppc_xive_native_update_source_config(xive, sb, state, server, + priority, masked, eisn); +} + +static int kvmppc_xive_native_sync_source(struct kvmppc_xive *xive, + long irq, u64 addr) +{ + struct kvmppc_xive_src_block *sb; + struct kvmppc_xive_irq_state *state; + struct xive_irq_data *xd; + u32 hw_num; + u16 src; + int rc = 0; + + pr_devel("%s irq=0x%lx", __func__, irq); + + sb = kvmppc_xive_find_source(xive, irq, &src); + if (!sb) + return -ENOENT; + + state = &sb->irq_state[src]; + + rc = -EINVAL; + + arch_spin_lock(&sb->lock); + + if (state->valid) { + kvmppc_xive_select_irq(state, &hw_num, &xd); + xive_native_sync_source(hw_num); + rc = 0; + } + + arch_spin_unlock(&sb->lock); + return rc; +} + +static int xive_native_validate_queue_size(u32 qshift) +{ + /* + * We only support 64K pages for the moment. This is also + * advertised in the DT property "ibm,xive-eq-sizes" + */ + switch (qshift) { + case 0: /* EQ reset */ + case 16: + return 0; + case 12: + case 21: + case 24: + default: + return -EINVAL; + } +} + +static int kvmppc_xive_native_set_queue_config(struct kvmppc_xive *xive, + long eq_idx, u64 addr) +{ + struct kvm *kvm = xive->kvm; + struct kvm_vcpu *vcpu; + struct kvmppc_xive_vcpu *xc; + void __user *ubufp = (void __user *) addr; + u32 server; + u8 priority; + struct kvm_ppc_xive_eq kvm_eq; + int rc; + __be32 *qaddr = 0; + struct page *page; + struct xive_q *q; + gfn_t gfn; + unsigned long page_size; + + /* + * Demangle priority/server tuple from the EQ identifier + */ + priority = (eq_idx & KVM_XIVE_EQ_PRIORITY_MASK) >> + KVM_XIVE_EQ_PRIORITY_SHIFT; + server = (eq_idx & KVM_XIVE_EQ_SERVER_MASK) >> + KVM_XIVE_EQ_SERVER_SHIFT; + + if (copy_from_user(&kvm_eq, ubufp, sizeof(kvm_eq))) + return -EFAULT; + + vcpu = kvmppc_xive_find_server(kvm, server); + if (!vcpu) { + pr_err("Can't find server %d\n", server); + return -ENOENT; + } + xc = vcpu->arch.xive_vcpu; + + if (priority != xive_prio_from_guest(priority)) { + pr_err("Trying to restore invalid queue %d for VCPU %d\n", + priority, server); + return -EINVAL; + } + q = &xc->queues[priority]; + + pr_devel("%s VCPU %d priority %d fl:%x shift:%d addr:%llx g:%d idx:%d\n", + __func__, server, priority, kvm_eq.flags, + kvm_eq.qshift, kvm_eq.qaddr, kvm_eq.qtoggle, kvm_eq.qindex); + + /* + * sPAPR specifies a "Unconditional Notify (n) flag" for the + * H_INT_SET_QUEUE_CONFIG hcall which forces notification + * without using the coalescing mechanisms provided by the + * XIVE END ESBs. This is required on KVM as notification + * using the END ESBs is not supported. + */ + if (kvm_eq.flags != KVM_XIVE_EQ_ALWAYS_NOTIFY) { + pr_err("invalid flags %d\n", kvm_eq.flags); + return -EINVAL; + } + + rc = xive_native_validate_queue_size(kvm_eq.qshift); + if (rc) { + pr_err("invalid queue size %d\n", kvm_eq.qshift); + return rc; + } + + /* reset queue and disable queueing */ + if (!kvm_eq.qshift) { + q->guest_qaddr = 0; + q->guest_qshift = 0; + + rc = xive_native_configure_queue(xc->vp_id, q, priority, + NULL, 0, true); + if (rc) { + pr_err("Failed to reset queue %d for VCPU %d: %d\n", + priority, xc->server_num, rc); + return rc; + } + + if (q->qpage) { + put_page(virt_to_page(q->qpage)); + q->qpage = NULL; + } + + return 0; + } + + if (kvm_eq.qaddr & ((1ull << kvm_eq.qshift) - 1)) { + pr_err("queue page is not aligned %llx/%llx\n", kvm_eq.qaddr, + 1ull << kvm_eq.qshift); + return -EINVAL; + } + + gfn = gpa_to_gfn(kvm_eq.qaddr); + page = gfn_to_page(kvm, gfn); + if (is_error_page(page)) { + pr_err("Couldn't get queue page %llx!\n", kvm_eq.qaddr); + return -EINVAL; + } + + page_size = kvm_host_page_size(kvm, gfn); + if (1ull << kvm_eq.qshift > page_size) { + pr_warn("Incompatible host page size %lx!\n", page_size); + return -EINVAL; + } + + qaddr = page_to_virt(page) + (kvm_eq.qaddr & ~PAGE_MASK); + + /* + * Backup the queue page guest address to the mark EQ page + * dirty for migration. + */ + q->guest_qaddr = kvm_eq.qaddr; + q->guest_qshift = kvm_eq.qshift; + + /* + * Unconditional Notification is forced by default at the + * OPAL level because the use of END ESBs is not supported by + * Linux. + */ + rc = xive_native_configure_queue(xc->vp_id, q, priority, + (__be32 *) qaddr, kvm_eq.qshift, true); + if (rc) { + pr_err("Failed to configure queue %d for VCPU %d: %d\n", + priority, xc->server_num, rc); + put_page(page); + return rc; + } + + /* + * Only restore the queue state when needed. When doing the + * H_INT_SET_SOURCE_CONFIG hcall, it should not. + */ + if (kvm_eq.qtoggle != 1 || kvm_eq.qindex != 0) { + rc = xive_native_set_queue_state(xc->vp_id, priority, + kvm_eq.qtoggle, + kvm_eq.qindex); + if (rc) + goto error; + } + + rc = kvmppc_xive_attach_escalation(vcpu, priority, + xive->single_escalation); +error: + if (rc) + kvmppc_xive_native_cleanup_queue(vcpu, priority); + return rc; +} + +static int kvmppc_xive_native_get_queue_config(struct kvmppc_xive *xive, + long eq_idx, u64 addr) +{ + struct kvm *kvm = xive->kvm; + struct kvm_vcpu *vcpu; + struct kvmppc_xive_vcpu *xc; + struct xive_q *q; + void __user *ubufp = (u64 __user *) addr; + u32 server; + u8 priority; + struct kvm_ppc_xive_eq kvm_eq; + u64 qaddr; + u64 qshift; + u64 qeoi_page; + u32 escalate_irq; + u64 qflags; + int rc; + + /* + * Demangle priority/server tuple from the EQ identifier + */ + priority = (eq_idx & KVM_XIVE_EQ_PRIORITY_MASK) >> + KVM_XIVE_EQ_PRIORITY_SHIFT; + server = (eq_idx & KVM_XIVE_EQ_SERVER_MASK) >> + KVM_XIVE_EQ_SERVER_SHIFT; + + vcpu = kvmppc_xive_find_server(kvm, server); + if (!vcpu) { + pr_err("Can't find server %d\n", server); + return -ENOENT; + } + xc = vcpu->arch.xive_vcpu; + + if (priority != xive_prio_from_guest(priority)) { + pr_err("invalid priority for queue %d for VCPU %d\n", + priority, server); + return -EINVAL; + } + q = &xc->queues[priority]; + + memset(&kvm_eq, 0, sizeof(kvm_eq)); + + if (!q->qpage) + return 0; + + rc = xive_native_get_queue_info(xc->vp_id, priority, &qaddr, &qshift, + &qeoi_page, &escalate_irq, &qflags); + if (rc) + return rc; + + kvm_eq.flags = 0; + if (qflags & OPAL_XIVE_EQ_ALWAYS_NOTIFY) + kvm_eq.flags |= KVM_XIVE_EQ_ALWAYS_NOTIFY; + + kvm_eq.qshift = q->guest_qshift; + kvm_eq.qaddr = q->guest_qaddr; + + rc = xive_native_get_queue_state(xc->vp_id, priority, &kvm_eq.qtoggle, + &kvm_eq.qindex); + if (rc) + return rc; + + pr_devel("%s VCPU %d priority %d fl:%x shift:%d addr:%llx g:%d idx:%d\n", + __func__, server, priority, kvm_eq.flags, + kvm_eq.qshift, kvm_eq.qaddr, kvm_eq.qtoggle, kvm_eq.qindex); + + if (copy_to_user(ubufp, &kvm_eq, sizeof(kvm_eq))) + return -EFAULT; + + return 0; +} + +static void kvmppc_xive_reset_sources(struct kvmppc_xive_src_block *sb) +{ + int i; + + for (i = 0; i < KVMPPC_XICS_IRQ_PER_ICS; i++) { + struct kvmppc_xive_irq_state *state = &sb->irq_state[i]; + + if (!state->valid) + continue; + + if (state->act_priority == MASKED) + continue; + + state->eisn = 0; + state->act_server = 0; + state->act_priority = MASKED; + xive_vm_esb_load(&state->ipi_data, XIVE_ESB_SET_PQ_01); + xive_native_configure_irq(state->ipi_number, 0, MASKED, 0); + if (state->pt_number) { + xive_vm_esb_load(state->pt_data, XIVE_ESB_SET_PQ_01); + xive_native_configure_irq(state->pt_number, + 0, MASKED, 0); + } + } +} + +static int kvmppc_xive_reset(struct kvmppc_xive *xive) +{ + struct kvm *kvm = xive->kvm; + struct kvm_vcpu *vcpu; + unsigned int i; + + pr_devel("%s\n", __func__); + + mutex_lock(&kvm->lock); + + kvm_for_each_vcpu(i, vcpu, kvm) { + struct kvmppc_xive_vcpu *xc = vcpu->arch.xive_vcpu; + unsigned int prio; + + if (!xc) + continue; + + kvmppc_xive_disable_vcpu_interrupts(vcpu); + + for (prio = 0; prio < KVMPPC_XIVE_Q_COUNT; prio++) { + + /* Single escalation, no queue 7 */ + if (prio == 7 && xive->single_escalation) + break; + + if (xc->esc_virq[prio]) { + free_irq(xc->esc_virq[prio], vcpu); + irq_dispose_mapping(xc->esc_virq[prio]); + kfree(xc->esc_virq_names[prio]); + xc->esc_virq[prio] = 0; + } + + kvmppc_xive_native_cleanup_queue(vcpu, prio); + } + } + + for (i = 0; i <= xive->max_sbid; i++) { + struct kvmppc_xive_src_block *sb = xive->src_blocks[i]; + + if (sb) { + arch_spin_lock(&sb->lock); + kvmppc_xive_reset_sources(sb); + arch_spin_unlock(&sb->lock); + } + } + + mutex_unlock(&kvm->lock); + + return 0; +} + +static void kvmppc_xive_native_sync_sources(struct kvmppc_xive_src_block *sb) +{ + int j; + + for (j = 0; j < KVMPPC_XICS_IRQ_PER_ICS; j++) { + struct kvmppc_xive_irq_state *state = &sb->irq_state[j]; + struct xive_irq_data *xd; + u32 hw_num; + + if (!state->valid) + continue; + + /* + * The struct kvmppc_xive_irq_state reflects the state + * of the EAS configuration and not the state of the + * source. The source is masked setting the PQ bits to + * '-Q', which is what is being done before calling + * the KVM_DEV_XIVE_EQ_SYNC control. + * + * If a source EAS is configured, OPAL syncs the XIVE + * IC of the source and the XIVE IC of the previous + * target if any. + * + * So it should be fine ignoring MASKED sources as + * they have been synced already. + */ + if (state->act_priority == MASKED) + continue; + + kvmppc_xive_select_irq(state, &hw_num, &xd); + xive_native_sync_source(hw_num); + xive_native_sync_queue(hw_num); + } +} + +static int kvmppc_xive_native_vcpu_eq_sync(struct kvm_vcpu *vcpu) +{ + struct kvmppc_xive_vcpu *xc = vcpu->arch.xive_vcpu; + unsigned int prio; + + if (!xc) + return -ENOENT; + + for (prio = 0; prio < KVMPPC_XIVE_Q_COUNT; prio++) { + struct xive_q *q = &xc->queues[prio]; + + if (!q->qpage) + continue; + + /* Mark EQ page dirty for migration */ + mark_page_dirty(vcpu->kvm, gpa_to_gfn(q->guest_qaddr)); + } + return 0; +} + +static int kvmppc_xive_native_eq_sync(struct kvmppc_xive *xive) +{ + struct kvm *kvm = xive->kvm; + struct kvm_vcpu *vcpu; + unsigned int i; + + pr_devel("%s\n", __func__); + + mutex_lock(&kvm->lock); + for (i = 0; i <= xive->max_sbid; i++) { + struct kvmppc_xive_src_block *sb = xive->src_blocks[i]; + + if (sb) { + arch_spin_lock(&sb->lock); + kvmppc_xive_native_sync_sources(sb); + arch_spin_unlock(&sb->lock); + } + } + + kvm_for_each_vcpu(i, vcpu, kvm) { + kvmppc_xive_native_vcpu_eq_sync(vcpu); + } + mutex_unlock(&kvm->lock); + + return 0; +} + +static int kvmppc_xive_native_set_attr(struct kvm_device *dev, + struct kvm_device_attr *attr) +{ + struct kvmppc_xive *xive = dev->private; + + switch (attr->group) { + case KVM_DEV_XIVE_GRP_CTRL: + switch (attr->attr) { + case KVM_DEV_XIVE_RESET: + return kvmppc_xive_reset(xive); + case KVM_DEV_XIVE_EQ_SYNC: + return kvmppc_xive_native_eq_sync(xive); + } + break; + case KVM_DEV_XIVE_GRP_SOURCE: + return kvmppc_xive_native_set_source(xive, attr->attr, + attr->addr); + case KVM_DEV_XIVE_GRP_SOURCE_CONFIG: + return kvmppc_xive_native_set_source_config(xive, attr->attr, + attr->addr); + case KVM_DEV_XIVE_GRP_EQ_CONFIG: + return kvmppc_xive_native_set_queue_config(xive, attr->attr, + attr->addr); + case KVM_DEV_XIVE_GRP_SOURCE_SYNC: + return kvmppc_xive_native_sync_source(xive, attr->attr, + attr->addr); + } + return -ENXIO; +} + +static int kvmppc_xive_native_get_attr(struct kvm_device *dev, + struct kvm_device_attr *attr) +{ + struct kvmppc_xive *xive = dev->private; + + switch (attr->group) { + case KVM_DEV_XIVE_GRP_EQ_CONFIG: + return kvmppc_xive_native_get_queue_config(xive, attr->attr, + attr->addr); + } + return -ENXIO; +} + +static int kvmppc_xive_native_has_attr(struct kvm_device *dev, + struct kvm_device_attr *attr) +{ + switch (attr->group) { + case KVM_DEV_XIVE_GRP_CTRL: + switch (attr->attr) { + case KVM_DEV_XIVE_RESET: + case KVM_DEV_XIVE_EQ_SYNC: + return 0; + } + break; + case KVM_DEV_XIVE_GRP_SOURCE: + case KVM_DEV_XIVE_GRP_SOURCE_CONFIG: + case KVM_DEV_XIVE_GRP_SOURCE_SYNC: + if (attr->attr >= KVMPPC_XIVE_FIRST_IRQ && + attr->attr < KVMPPC_XIVE_NR_IRQS) + return 0; + break; + case KVM_DEV_XIVE_GRP_EQ_CONFIG: + return 0; + } + return -ENXIO; +} + +/* + * Called when device fd is closed + */ +static void kvmppc_xive_native_release(struct kvm_device *dev) +{ + struct kvmppc_xive *xive = dev->private; + struct kvm *kvm = xive->kvm; + struct kvm_vcpu *vcpu; + int i; + int was_ready; + + debugfs_remove(xive->dentry); + + pr_devel("Releasing xive native device\n"); + + /* + * Clearing mmu_ready temporarily while holding kvm->lock + * is a way of ensuring that no vcpus can enter the guest + * until we drop kvm->lock. Doing kick_all_cpus_sync() + * ensures that any vcpu executing inside the guest has + * exited the guest. Once kick_all_cpus_sync() has finished, + * we know that no vcpu can be executing the XIVE push or + * pull code or accessing the XIVE MMIO regions. + * + * Since this is the device release function, we know that + * userspace does not have any open fd or mmap referring to + * the device. Therefore there can not be any of the + * device attribute set/get, mmap, or page fault functions + * being executed concurrently, and similarly, the + * connect_vcpu and set/clr_mapped functions also cannot + * be being executed. + */ + was_ready = kvm->arch.mmu_ready; + kvm->arch.mmu_ready = 0; + kick_all_cpus_sync(); + + /* + * We should clean up the vCPU interrupt presenters first. + */ + kvm_for_each_vcpu(i, vcpu, kvm) { + /* + * Take vcpu->mutex to ensure that no one_reg get/set ioctl + * (i.e. kvmppc_xive_native_[gs]et_vp) can be being done. + */ + mutex_lock(&vcpu->mutex); + kvmppc_xive_native_cleanup_vcpu(vcpu); + mutex_unlock(&vcpu->mutex); + } + + kvm->arch.xive = NULL; + + for (i = 0; i <= xive->max_sbid; i++) { + if (xive->src_blocks[i]) + kvmppc_xive_free_sources(xive->src_blocks[i]); + kfree(xive->src_blocks[i]); + xive->src_blocks[i] = NULL; + } + + if (xive->vp_base != XIVE_INVALID_VP) + xive_native_free_vp_block(xive->vp_base); + + kvm->arch.mmu_ready = was_ready; + + /* + * A reference of the kvmppc_xive pointer is now kept under + * the xive_devices struct of the machine for reuse. It is + * freed when the VM is destroyed for now until we fix all the + * execution paths. + */ + + kfree(dev); +} + +/* + * Create a XIVE device. kvm->lock is held. + */ +static int kvmppc_xive_native_create(struct kvm_device *dev, u32 type) +{ + struct kvmppc_xive *xive; + struct kvm *kvm = dev->kvm; + int ret = 0; + + pr_devel("Creating xive native device\n"); + + if (kvm->arch.xive) + return -EEXIST; + + xive = kvmppc_xive_get_device(kvm, type); + if (!xive) + return -ENOMEM; + + dev->private = xive; + xive->dev = dev; + xive->kvm = kvm; + kvm->arch.xive = xive; + mutex_init(&xive->mapping_lock); + + /* + * Allocate a bunch of VPs. KVM_MAX_VCPUS is a large value for + * a default. Getting the max number of CPUs the VM was + * configured with would improve our usage of the XIVE VP space. + */ + xive->vp_base = xive_native_alloc_vp_block(KVM_MAX_VCPUS); + pr_devel("VP_Base=%x\n", xive->vp_base); + + if (xive->vp_base == XIVE_INVALID_VP) + ret = -ENXIO; + + xive->single_escalation = xive_native_has_single_escalation(); + xive->ops = &kvmppc_xive_native_ops; + + if (ret) + kfree(xive); + + return ret; +} + +/* + * Interrupt Pending Buffer (IPB) offset + */ +#define TM_IPB_SHIFT 40 +#define TM_IPB_MASK (((u64) 0xFF) << TM_IPB_SHIFT) + +int kvmppc_xive_native_get_vp(struct kvm_vcpu *vcpu, union kvmppc_one_reg *val) +{ + struct kvmppc_xive_vcpu *xc = vcpu->arch.xive_vcpu; + u64 opal_state; + int rc; + + if (!kvmppc_xive_enabled(vcpu)) + return -EPERM; + + if (!xc) + return -ENOENT; + + /* Thread context registers. We only care about IPB and CPPR */ + val->xive_timaval[0] = vcpu->arch.xive_saved_state.w01; + + /* Get the VP state from OPAL */ + rc = xive_native_get_vp_state(xc->vp_id, &opal_state); + if (rc) + return rc; + + /* + * Capture the backup of IPB register in the NVT structure and + * merge it in our KVM VP state. + */ + val->xive_timaval[0] |= cpu_to_be64(opal_state & TM_IPB_MASK); + + pr_devel("%s NSR=%02x CPPR=%02x IBP=%02x PIPR=%02x w01=%016llx w2=%08x opal=%016llx\n", + __func__, + vcpu->arch.xive_saved_state.nsr, + vcpu->arch.xive_saved_state.cppr, + vcpu->arch.xive_saved_state.ipb, + vcpu->arch.xive_saved_state.pipr, + vcpu->arch.xive_saved_state.w01, + (u32) vcpu->arch.xive_cam_word, opal_state); + + return 0; +} + +int kvmppc_xive_native_set_vp(struct kvm_vcpu *vcpu, union kvmppc_one_reg *val) +{ + struct kvmppc_xive_vcpu *xc = vcpu->arch.xive_vcpu; + struct kvmppc_xive *xive = vcpu->kvm->arch.xive; + + pr_devel("%s w01=%016llx vp=%016llx\n", __func__, + val->xive_timaval[0], val->xive_timaval[1]); + + if (!kvmppc_xive_enabled(vcpu)) + return -EPERM; + + if (!xc || !xive) + return -ENOENT; + + /* We can't update the state of a "pushed" VCPU */ + if (WARN_ON(vcpu->arch.xive_pushed)) + return -EBUSY; + + /* + * Restore the thread context registers. IPB and CPPR should + * be the only ones that matter. + */ + vcpu->arch.xive_saved_state.w01 = val->xive_timaval[0]; + + /* + * There is no need to restore the XIVE internal state (IPB + * stored in the NVT) as the IPB register was merged in KVM VP + * state when captured. + */ + return 0; +} + +static int xive_native_debug_show(struct seq_file *m, void *private) +{ + struct kvmppc_xive *xive = m->private; + struct kvm *kvm = xive->kvm; + struct kvm_vcpu *vcpu; + unsigned int i; + + if (!kvm) + return 0; + + seq_puts(m, "=========\nVCPU state\n=========\n"); + + kvm_for_each_vcpu(i, vcpu, kvm) { + struct kvmppc_xive_vcpu *xc = vcpu->arch.xive_vcpu; + + if (!xc) + continue; + + seq_printf(m, "cpu server %#x NSR=%02x CPPR=%02x IBP=%02x PIPR=%02x w01=%016llx w2=%08x\n", + xc->server_num, + vcpu->arch.xive_saved_state.nsr, + vcpu->arch.xive_saved_state.cppr, + vcpu->arch.xive_saved_state.ipb, + vcpu->arch.xive_saved_state.pipr, + vcpu->arch.xive_saved_state.w01, + (u32) vcpu->arch.xive_cam_word); + + kvmppc_xive_debug_show_queues(m, vcpu); + } + + return 0; +} + +static int xive_native_debug_open(struct inode *inode, struct file *file) +{ + return single_open(file, xive_native_debug_show, inode->i_private); +} + +static const struct file_operations xive_native_debug_fops = { + .open = xive_native_debug_open, + .read = seq_read, + .llseek = seq_lseek, + .release = single_release, +}; + +static void xive_native_debugfs_init(struct kvmppc_xive *xive) +{ + char *name; + + name = kasprintf(GFP_KERNEL, "kvm-xive-%p", xive); + if (!name) { + pr_err("%s: no memory for name\n", __func__); + return; + } + + xive->dentry = debugfs_create_file(name, 0444, powerpc_debugfs_root, + xive, &xive_native_debug_fops); + + pr_debug("%s: created %s\n", __func__, name); + kfree(name); +} + +static void kvmppc_xive_native_init(struct kvm_device *dev) +{ + struct kvmppc_xive *xive = (struct kvmppc_xive *)dev->private; + + /* Register some debug interfaces */ + xive_native_debugfs_init(xive); +} + +struct kvm_device_ops kvm_xive_native_ops = { + .name = "kvm-xive-native", + .create = kvmppc_xive_native_create, + .init = kvmppc_xive_native_init, + .release = kvmppc_xive_native_release, + .set_attr = kvmppc_xive_native_set_attr, + .get_attr = kvmppc_xive_native_get_attr, + .has_attr = kvmppc_xive_native_has_attr, + .mmap = kvmppc_xive_native_mmap, +}; + +void kvmppc_xive_native_init_module(void) +{ + ; +} + +void kvmppc_xive_native_exit_module(void) +{ + ; +} diff --git a/arch/powerpc/kvm/book3s_xive_template.c b/arch/powerpc/kvm/book3s_xive_template.c index 033363d6e764..0737acfd17f1 100644 --- a/arch/powerpc/kvm/book3s_xive_template.c +++ b/arch/powerpc/kvm/book3s_xive_template.c @@ -130,24 +130,14 @@ static u32 GLUE(X_PFX,scan_interrupts)(struct kvmppc_xive_vcpu *xc, */ prio = ffs(pending) - 1; - /* - * If the most favoured prio we found pending is less - * favored (or equal) than a pending IPI, we return - * the IPI instead. - * - * Note: If pending was 0 and mfrr is 0xff, we will - * not spurriously take an IPI because mfrr cannot - * then be smaller than cppr. - */ - if (prio >= xc->mfrr && xc->mfrr < xc->cppr) { - prio = xc->mfrr; - hirq = XICS_IPI; - break; - } - /* Don't scan past the guest cppr */ - if (prio >= xc->cppr || prio > 7) + if (prio >= xc->cppr || prio > 7) { + if (xc->mfrr < xc->cppr) { + prio = xc->mfrr; + hirq = XICS_IPI; + } break; + } /* Grab queue and pointers */ q = &xc->queues[prio]; @@ -184,9 +174,12 @@ skip_ipi: * been set and another occurrence of the IPI will trigger. */ if (hirq == XICS_IPI || (prio == 0 && !qpage)) { - if (scan_type == scan_fetch) + if (scan_type == scan_fetch) { GLUE(X_PFX,source_eoi)(xc->vp_ipi, &xc->vp_ipi_data); + q->idx = idx; + q->toggle = toggle; + } /* Loop back on same queue with updated idx/toggle */ #ifdef XIVE_RUNTIME_CHECKS WARN_ON(hirq && hirq != XICS_IPI); @@ -199,32 +192,41 @@ skip_ipi: if (hirq == XICS_DUMMY) goto skip_ipi; - /* If fetching, update queue pointers */ - if (scan_type == scan_fetch) { - q->idx = idx; - q->toggle = toggle; - } - - /* Something found, stop searching */ - if (hirq) - break; - - /* Clear the pending bit on the now empty queue */ - pending &= ~(1 << prio); + /* Clear the pending bit if the queue is now empty */ + if (!hirq) { + pending &= ~(1 << prio); - /* - * Check if the queue count needs adjusting due to - * interrupts being moved away. - */ - if (atomic_read(&q->pending_count)) { - int p = atomic_xchg(&q->pending_count, 0); - if (p) { + /* + * Check if the queue count needs adjusting due to + * interrupts being moved away. + */ + if (atomic_read(&q->pending_count)) { + int p = atomic_xchg(&q->pending_count, 0); + if (p) { #ifdef XIVE_RUNTIME_CHECKS - WARN_ON(p > atomic_read(&q->count)); + WARN_ON(p > atomic_read(&q->count)); #endif - atomic_sub(p, &q->count); + atomic_sub(p, &q->count); + } } } + + /* + * If the most favoured prio we found pending is less + * favored (or equal) than a pending IPI, we return + * the IPI instead. + */ + if (prio >= xc->mfrr && xc->mfrr < xc->cppr) { + prio = xc->mfrr; + hirq = XICS_IPI; + break; + } + + /* If fetching, update queue pointers */ + if (scan_type == scan_fetch) { + q->idx = idx; + q->toggle = toggle; + } } /* If we are just taking a "peek", do nothing else */ diff --git a/arch/powerpc/kvm/powerpc.c b/arch/powerpc/kvm/powerpc.c index 8885377ec3e0..3393b166817a 100644 --- a/arch/powerpc/kvm/powerpc.c +++ b/arch/powerpc/kvm/powerpc.c @@ -570,6 +570,16 @@ int kvm_vm_ioctl_check_extension(struct kvm *kvm, long ext) case KVM_CAP_PPC_GET_CPU_CHAR: r = 1; break; +#ifdef CONFIG_KVM_XIVE + case KVM_CAP_PPC_IRQ_XIVE: + /* + * We need XIVE to be enabled on the platform (implies + * a POWER9 processor) and the PowerNV platform, as + * nested is not yet supported. + */ + r = xive_enabled() && !!cpu_has_feature(CPU_FTR_HVMODE); + break; +#endif case KVM_CAP_PPC_ALLOC_HTAB: r = hv_enabled; @@ -644,9 +654,6 @@ int kvm_vm_ioctl_check_extension(struct kvm *kvm, long ext) else r = num_online_cpus(); break; - case KVM_CAP_NR_MEMSLOTS: - r = KVM_USER_MEM_SLOTS; - break; case KVM_CAP_MAX_VCPUS: r = KVM_MAX_VCPUS; break; @@ -753,6 +760,9 @@ void kvm_arch_vcpu_free(struct kvm_vcpu *vcpu) else kvmppc_xics_free_icp(vcpu); break; + case KVMPPC_IRQ_XIVE: + kvmppc_xive_native_cleanup_vcpu(vcpu); + break; } kvmppc_core_vcpu_free(vcpu); @@ -1941,6 +1951,30 @@ static int kvm_vcpu_ioctl_enable_cap(struct kvm_vcpu *vcpu, break; } #endif /* CONFIG_KVM_XICS */ +#ifdef CONFIG_KVM_XIVE + case KVM_CAP_PPC_IRQ_XIVE: { + struct fd f; + struct kvm_device *dev; + + r = -EBADF; + f = fdget(cap->args[0]); + if (!f.file) + break; + + r = -ENXIO; + if (!xive_enabled()) + break; + + r = -EPERM; + dev = kvm_device_from_filp(f.file); + if (dev) + r = kvmppc_xive_native_connect_vcpu(dev, vcpu, + cap->args[1]); + + fdput(f); + break; + } +#endif /* CONFIG_KVM_XIVE */ #ifdef CONFIG_KVM_BOOK3S_HV_POSSIBLE case KVM_CAP_PPC_FWNMI: r = -EINVAL; |