summaryrefslogtreecommitdiff
path: root/virt/kvm
diff options
context:
space:
mode:
Diffstat (limited to 'virt/kvm')
-rw-r--r--virt/kvm/assigned-dev.c1026
-rw-r--r--virt/kvm/eventfd.c7
-rw-r--r--virt/kvm/ioapic.c687
-rw-r--r--virt/kvm/ioapic.h104
-rw-r--r--virt/kvm/iommu.c358
-rw-r--r--virt/kvm/irq_comm.c369
-rw-r--r--virt/kvm/kvm_main.c133
7 files changed, 58 insertions, 2626 deletions
diff --git a/virt/kvm/assigned-dev.c b/virt/kvm/assigned-dev.c
deleted file mode 100644
index e05000e200d2..000000000000
--- a/virt/kvm/assigned-dev.c
+++ /dev/null
@@ -1,1026 +0,0 @@
-/*
- * Kernel-based Virtual Machine - device assignment support
- *
- * Copyright (C) 2010 Red Hat, Inc. and/or its affiliates.
- *
- * This work is licensed under the terms of the GNU GPL, version 2. See
- * the COPYING file in the top-level directory.
- *
- */
-
-#include <linux/kvm_host.h>
-#include <linux/kvm.h>
-#include <linux/uaccess.h>
-#include <linux/vmalloc.h>
-#include <linux/errno.h>
-#include <linux/spinlock.h>
-#include <linux/pci.h>
-#include <linux/interrupt.h>
-#include <linux/slab.h>
-#include <linux/namei.h>
-#include <linux/fs.h>
-#include "irq.h"
-
-static struct kvm_assigned_dev_kernel *kvm_find_assigned_dev(struct list_head *head,
- int assigned_dev_id)
-{
- struct list_head *ptr;
- struct kvm_assigned_dev_kernel *match;
-
- list_for_each(ptr, head) {
- match = list_entry(ptr, struct kvm_assigned_dev_kernel, list);
- if (match->assigned_dev_id == assigned_dev_id)
- return match;
- }
- return NULL;
-}
-
-static int find_index_from_host_irq(struct kvm_assigned_dev_kernel
- *assigned_dev, int irq)
-{
- int i, index;
- struct msix_entry *host_msix_entries;
-
- host_msix_entries = assigned_dev->host_msix_entries;
-
- index = -1;
- for (i = 0; i < assigned_dev->entries_nr; i++)
- if (irq == host_msix_entries[i].vector) {
- index = i;
- break;
- }
- if (index < 0)
- printk(KERN_WARNING "Fail to find correlated MSI-X entry!\n");
-
- return index;
-}
-
-static irqreturn_t kvm_assigned_dev_intx(int irq, void *dev_id)
-{
- struct kvm_assigned_dev_kernel *assigned_dev = dev_id;
- int ret;
-
- spin_lock(&assigned_dev->intx_lock);
- if (pci_check_and_mask_intx(assigned_dev->dev)) {
- assigned_dev->host_irq_disabled = true;
- ret = IRQ_WAKE_THREAD;
- } else
- ret = IRQ_NONE;
- spin_unlock(&assigned_dev->intx_lock);
-
- return ret;
-}
-
-static void
-kvm_assigned_dev_raise_guest_irq(struct kvm_assigned_dev_kernel *assigned_dev,
- int vector)
-{
- if (unlikely(assigned_dev->irq_requested_type &
- KVM_DEV_IRQ_GUEST_INTX)) {
- spin_lock(&assigned_dev->intx_mask_lock);
- if (!(assigned_dev->flags & KVM_DEV_ASSIGN_MASK_INTX))
- kvm_set_irq(assigned_dev->kvm,
- assigned_dev->irq_source_id, vector, 1,
- false);
- spin_unlock(&assigned_dev->intx_mask_lock);
- } else
- kvm_set_irq(assigned_dev->kvm, assigned_dev->irq_source_id,
- vector, 1, false);
-}
-
-static irqreturn_t kvm_assigned_dev_thread_intx(int irq, void *dev_id)
-{
- struct kvm_assigned_dev_kernel *assigned_dev = dev_id;
-
- if (!(assigned_dev->flags & KVM_DEV_ASSIGN_PCI_2_3)) {
- spin_lock_irq(&assigned_dev->intx_lock);
- disable_irq_nosync(irq);
- assigned_dev->host_irq_disabled = true;
- spin_unlock_irq(&assigned_dev->intx_lock);
- }
-
- kvm_assigned_dev_raise_guest_irq(assigned_dev,
- assigned_dev->guest_irq);
-
- return IRQ_HANDLED;
-}
-
-#ifdef __KVM_HAVE_MSI
-static irqreturn_t kvm_assigned_dev_msi(int irq, void *dev_id)
-{
- struct kvm_assigned_dev_kernel *assigned_dev = dev_id;
- int ret = kvm_set_irq_inatomic(assigned_dev->kvm,
- assigned_dev->irq_source_id,
- assigned_dev->guest_irq, 1);
- return unlikely(ret == -EWOULDBLOCK) ? IRQ_WAKE_THREAD : IRQ_HANDLED;
-}
-
-static irqreturn_t kvm_assigned_dev_thread_msi(int irq, void *dev_id)
-{
- struct kvm_assigned_dev_kernel *assigned_dev = dev_id;
-
- kvm_assigned_dev_raise_guest_irq(assigned_dev,
- assigned_dev->guest_irq);
-
- return IRQ_HANDLED;
-}
-#endif
-
-#ifdef __KVM_HAVE_MSIX
-static irqreturn_t kvm_assigned_dev_msix(int irq, void *dev_id)
-{
- struct kvm_assigned_dev_kernel *assigned_dev = dev_id;
- int index = find_index_from_host_irq(assigned_dev, irq);
- u32 vector;
- int ret = 0;
-
- if (index >= 0) {
- vector = assigned_dev->guest_msix_entries[index].vector;
- ret = kvm_set_irq_inatomic(assigned_dev->kvm,
- assigned_dev->irq_source_id,
- vector, 1);
- }
-
- return unlikely(ret == -EWOULDBLOCK) ? IRQ_WAKE_THREAD : IRQ_HANDLED;
-}
-
-static irqreturn_t kvm_assigned_dev_thread_msix(int irq, void *dev_id)
-{
- struct kvm_assigned_dev_kernel *assigned_dev = dev_id;
- int index = find_index_from_host_irq(assigned_dev, irq);
- u32 vector;
-
- if (index >= 0) {
- vector = assigned_dev->guest_msix_entries[index].vector;
- kvm_assigned_dev_raise_guest_irq(assigned_dev, vector);
- }
-
- return IRQ_HANDLED;
-}
-#endif
-
-/* Ack the irq line for an assigned device */
-static void kvm_assigned_dev_ack_irq(struct kvm_irq_ack_notifier *kian)
-{
- struct kvm_assigned_dev_kernel *dev =
- container_of(kian, struct kvm_assigned_dev_kernel,
- ack_notifier);
-
- kvm_set_irq(dev->kvm, dev->irq_source_id, dev->guest_irq, 0, false);
-
- spin_lock(&dev->intx_mask_lock);
-
- if (!(dev->flags & KVM_DEV_ASSIGN_MASK_INTX)) {
- bool reassert = false;
-
- spin_lock_irq(&dev->intx_lock);
- /*
- * The guest IRQ may be shared so this ack can come from an
- * IRQ for another guest device.
- */
- if (dev->host_irq_disabled) {
- if (!(dev->flags & KVM_DEV_ASSIGN_PCI_2_3))
- enable_irq(dev->host_irq);
- else if (!pci_check_and_unmask_intx(dev->dev))
- reassert = true;
- dev->host_irq_disabled = reassert;
- }
- spin_unlock_irq(&dev->intx_lock);
-
- if (reassert)
- kvm_set_irq(dev->kvm, dev->irq_source_id,
- dev->guest_irq, 1, false);
- }
-
- spin_unlock(&dev->intx_mask_lock);
-}
-
-static void deassign_guest_irq(struct kvm *kvm,
- struct kvm_assigned_dev_kernel *assigned_dev)
-{
- if (assigned_dev->ack_notifier.gsi != -1)
- kvm_unregister_irq_ack_notifier(kvm,
- &assigned_dev->ack_notifier);
-
- kvm_set_irq(assigned_dev->kvm, assigned_dev->irq_source_id,
- assigned_dev->guest_irq, 0, false);
-
- if (assigned_dev->irq_source_id != -1)
- kvm_free_irq_source_id(kvm, assigned_dev->irq_source_id);
- assigned_dev->irq_source_id = -1;
- assigned_dev->irq_requested_type &= ~(KVM_DEV_IRQ_GUEST_MASK);
-}
-
-/* The function implicit hold kvm->lock mutex due to cancel_work_sync() */
-static void deassign_host_irq(struct kvm *kvm,
- struct kvm_assigned_dev_kernel *assigned_dev)
-{
- /*
- * We disable irq here to prevent further events.
- *
- * Notice this maybe result in nested disable if the interrupt type is
- * INTx, but it's OK for we are going to free it.
- *
- * If this function is a part of VM destroy, please ensure that till
- * now, the kvm state is still legal for probably we also have to wait
- * on a currently running IRQ handler.
- */
- if (assigned_dev->irq_requested_type & KVM_DEV_IRQ_HOST_MSIX) {
- int i;
- for (i = 0; i < assigned_dev->entries_nr; i++)
- disable_irq(assigned_dev->host_msix_entries[i].vector);
-
- for (i = 0; i < assigned_dev->entries_nr; i++)
- free_irq(assigned_dev->host_msix_entries[i].vector,
- assigned_dev);
-
- assigned_dev->entries_nr = 0;
- kfree(assigned_dev->host_msix_entries);
- kfree(assigned_dev->guest_msix_entries);
- pci_disable_msix(assigned_dev->dev);
- } else {
- /* Deal with MSI and INTx */
- if ((assigned_dev->irq_requested_type &
- KVM_DEV_IRQ_HOST_INTX) &&
- (assigned_dev->flags & KVM_DEV_ASSIGN_PCI_2_3)) {
- spin_lock_irq(&assigned_dev->intx_lock);
- pci_intx(assigned_dev->dev, false);
- spin_unlock_irq(&assigned_dev->intx_lock);
- synchronize_irq(assigned_dev->host_irq);
- } else
- disable_irq(assigned_dev->host_irq);
-
- free_irq(assigned_dev->host_irq, assigned_dev);
-
- if (assigned_dev->irq_requested_type & KVM_DEV_IRQ_HOST_MSI)
- pci_disable_msi(assigned_dev->dev);
- }
-
- assigned_dev->irq_requested_type &= ~(KVM_DEV_IRQ_HOST_MASK);
-}
-
-static int kvm_deassign_irq(struct kvm *kvm,
- struct kvm_assigned_dev_kernel *assigned_dev,
- unsigned long irq_requested_type)
-{
- unsigned long guest_irq_type, host_irq_type;
-
- if (!irqchip_in_kernel(kvm))
- return -EINVAL;
- /* no irq assignment to deassign */
- if (!assigned_dev->irq_requested_type)
- return -ENXIO;
-
- host_irq_type = irq_requested_type & KVM_DEV_IRQ_HOST_MASK;
- guest_irq_type = irq_requested_type & KVM_DEV_IRQ_GUEST_MASK;
-
- if (host_irq_type)
- deassign_host_irq(kvm, assigned_dev);
- if (guest_irq_type)
- deassign_guest_irq(kvm, assigned_dev);
-
- return 0;
-}
-
-static void kvm_free_assigned_irq(struct kvm *kvm,
- struct kvm_assigned_dev_kernel *assigned_dev)
-{
- kvm_deassign_irq(kvm, assigned_dev, assigned_dev->irq_requested_type);
-}
-
-static void kvm_free_assigned_device(struct kvm *kvm,
- struct kvm_assigned_dev_kernel
- *assigned_dev)
-{
- kvm_free_assigned_irq(kvm, assigned_dev);
-
- pci_reset_function(assigned_dev->dev);
- if (pci_load_and_free_saved_state(assigned_dev->dev,
- &assigned_dev->pci_saved_state))
- printk(KERN_INFO "%s: Couldn't reload %s saved state\n",
- __func__, dev_name(&assigned_dev->dev->dev));
- else
- pci_restore_state(assigned_dev->dev);
-
- pci_clear_dev_assigned(assigned_dev->dev);
-
- pci_release_regions(assigned_dev->dev);
- pci_disable_device(assigned_dev->dev);
- pci_dev_put(assigned_dev->dev);
-
- list_del(&assigned_dev->list);
- kfree(assigned_dev);
-}
-
-void kvm_free_all_assigned_devices(struct kvm *kvm)
-{
- struct list_head *ptr, *ptr2;
- struct kvm_assigned_dev_kernel *assigned_dev;
-
- list_for_each_safe(ptr, ptr2, &kvm->arch.assigned_dev_head) {
- assigned_dev = list_entry(ptr,
- struct kvm_assigned_dev_kernel,
- list);
-
- kvm_free_assigned_device(kvm, assigned_dev);
- }
-}
-
-static int assigned_device_enable_host_intx(struct kvm *kvm,
- struct kvm_assigned_dev_kernel *dev)
-{
- irq_handler_t irq_handler;
- unsigned long flags;
-
- dev->host_irq = dev->dev->irq;
-
- /*
- * We can only share the IRQ line with other host devices if we are
- * able to disable the IRQ source at device-level - independently of
- * the guest driver. Otherwise host devices may suffer from unbounded
- * IRQ latencies when the guest keeps the line asserted.
- */
- if (dev->flags & KVM_DEV_ASSIGN_PCI_2_3) {
- irq_handler = kvm_assigned_dev_intx;
- flags = IRQF_SHARED;
- } else {
- irq_handler = NULL;
- flags = IRQF_ONESHOT;
- }
- if (request_threaded_irq(dev->host_irq, irq_handler,
- kvm_assigned_dev_thread_intx, flags,
- dev->irq_name, dev))
- return -EIO;
-
- if (dev->flags & KVM_DEV_ASSIGN_PCI_2_3) {
- spin_lock_irq(&dev->intx_lock);
- pci_intx(dev->dev, true);
- spin_unlock_irq(&dev->intx_lock);
- }
- return 0;
-}
-
-#ifdef __KVM_HAVE_MSI
-static int assigned_device_enable_host_msi(struct kvm *kvm,
- struct kvm_assigned_dev_kernel *dev)
-{
- int r;
-
- if (!dev->dev->msi_enabled) {
- r = pci_enable_msi(dev->dev);
- if (r)
- return r;
- }
-
- dev->host_irq = dev->dev->irq;
- if (request_threaded_irq(dev->host_irq, kvm_assigned_dev_msi,
- kvm_assigned_dev_thread_msi, 0,
- dev->irq_name, dev)) {
- pci_disable_msi(dev->dev);
- return -EIO;
- }
-
- return 0;
-}
-#endif
-
-#ifdef __KVM_HAVE_MSIX
-static int assigned_device_enable_host_msix(struct kvm *kvm,
- struct kvm_assigned_dev_kernel *dev)
-{
- int i, r = -EINVAL;
-
- /* host_msix_entries and guest_msix_entries should have been
- * initialized */
- if (dev->entries_nr == 0)
- return r;
-
- r = pci_enable_msix_exact(dev->dev,
- dev->host_msix_entries, dev->entries_nr);
- if (r)
- return r;
-
- for (i = 0; i < dev->entries_nr; i++) {
- r = request_threaded_irq(dev->host_msix_entries[i].vector,
- kvm_assigned_dev_msix,
- kvm_assigned_dev_thread_msix,
- 0, dev->irq_name, dev);
- if (r)
- goto err;
- }
-
- return 0;
-err:
- for (i -= 1; i >= 0; i--)
- free_irq(dev->host_msix_entries[i].vector, dev);
- pci_disable_msix(dev->dev);
- return r;
-}
-
-#endif
-
-static int assigned_device_enable_guest_intx(struct kvm *kvm,
- struct kvm_assigned_dev_kernel *dev,
- struct kvm_assigned_irq *irq)
-{
- dev->guest_irq = irq->guest_irq;
- dev->ack_notifier.gsi = irq->guest_irq;
- return 0;
-}
-
-#ifdef __KVM_HAVE_MSI
-static int assigned_device_enable_guest_msi(struct kvm *kvm,
- struct kvm_assigned_dev_kernel *dev,
- struct kvm_assigned_irq *irq)
-{
- dev->guest_irq = irq->guest_irq;
- dev->ack_notifier.gsi = -1;
- return 0;
-}
-#endif
-
-#ifdef __KVM_HAVE_MSIX
-static int assigned_device_enable_guest_msix(struct kvm *kvm,
- struct kvm_assigned_dev_kernel *dev,
- struct kvm_assigned_irq *irq)
-{
- dev->guest_irq = irq->guest_irq;
- dev->ack_notifier.gsi = -1;
- return 0;
-}
-#endif
-
-static int assign_host_irq(struct kvm *kvm,
- struct kvm_assigned_dev_kernel *dev,
- __u32 host_irq_type)
-{
- int r = -EEXIST;
-
- if (dev->irq_requested_type & KVM_DEV_IRQ_HOST_MASK)
- return r;
-
- snprintf(dev->irq_name, sizeof(dev->irq_name), "kvm:%s",
- pci_name(dev->dev));
-
- switch (host_irq_type) {
- case KVM_DEV_IRQ_HOST_INTX:
- r = assigned_device_enable_host_intx(kvm, dev);
- break;
-#ifdef __KVM_HAVE_MSI
- case KVM_DEV_IRQ_HOST_MSI:
- r = assigned_device_enable_host_msi(kvm, dev);
- break;
-#endif
-#ifdef __KVM_HAVE_MSIX
- case KVM_DEV_IRQ_HOST_MSIX:
- r = assigned_device_enable_host_msix(kvm, dev);
- break;
-#endif
- default:
- r = -EINVAL;
- }
- dev->host_irq_disabled = false;
-
- if (!r)
- dev->irq_requested_type |= host_irq_type;
-
- return r;
-}
-
-static int assign_guest_irq(struct kvm *kvm,
- struct kvm_assigned_dev_kernel *dev,
- struct kvm_assigned_irq *irq,
- unsigned long guest_irq_type)
-{
- int id;
- int r = -EEXIST;
-
- if (dev->irq_requested_type & KVM_DEV_IRQ_GUEST_MASK)
- return r;
-
- id = kvm_request_irq_source_id(kvm);
- if (id < 0)
- return id;
-
- dev->irq_source_id = id;
-
- switch (guest_irq_type) {
- case KVM_DEV_IRQ_GUEST_INTX:
- r = assigned_device_enable_guest_intx(kvm, dev, irq);
- break;
-#ifdef __KVM_HAVE_MSI
- case KVM_DEV_IRQ_GUEST_MSI:
- r = assigned_device_enable_guest_msi(kvm, dev, irq);
- break;
-#endif
-#ifdef __KVM_HAVE_MSIX
- case KVM_DEV_IRQ_GUEST_MSIX:
- r = assigned_device_enable_guest_msix(kvm, dev, irq);
- break;
-#endif
- default:
- r = -EINVAL;
- }
-
- if (!r) {
- dev->irq_requested_type |= guest_irq_type;
- if (dev->ack_notifier.gsi != -1)
- kvm_register_irq_ack_notifier(kvm, &dev->ack_notifier);
- } else {
- kvm_free_irq_source_id(kvm, dev->irq_source_id);
- dev->irq_source_id = -1;
- }
-
- return r;
-}
-
-/* TODO Deal with KVM_DEV_IRQ_ASSIGNED_MASK_MSIX */
-static int kvm_vm_ioctl_assign_irq(struct kvm *kvm,
- struct kvm_assigned_irq *assigned_irq)
-{
- int r = -EINVAL;
- struct kvm_assigned_dev_kernel *match;
- unsigned long host_irq_type, guest_irq_type;
-
- if (!irqchip_in_kernel(kvm))
- return r;
-
- mutex_lock(&kvm->lock);
- r = -ENODEV;
- match = kvm_find_assigned_dev(&kvm->arch.assigned_dev_head,
- assigned_irq->assigned_dev_id);
- if (!match)
- goto out;
-
- host_irq_type = (assigned_irq->flags & KVM_DEV_IRQ_HOST_MASK);
- guest_irq_type = (assigned_irq->flags & KVM_DEV_IRQ_GUEST_MASK);
-
- r = -EINVAL;
- /* can only assign one type at a time */
- if (hweight_long(host_irq_type) > 1)
- goto out;
- if (hweight_long(guest_irq_type) > 1)
- goto out;
- if (host_irq_type == 0 && guest_irq_type == 0)
- goto out;
-
- r = 0;
- if (host_irq_type)
- r = assign_host_irq(kvm, match, host_irq_type);
- if (r)
- goto out;
-
- if (guest_irq_type)
- r = assign_guest_irq(kvm, match, assigned_irq, guest_irq_type);
-out:
- mutex_unlock(&kvm->lock);
- return r;
-}
-
-static int kvm_vm_ioctl_deassign_dev_irq(struct kvm *kvm,
- struct kvm_assigned_irq
- *assigned_irq)
-{
- int r = -ENODEV;
- struct kvm_assigned_dev_kernel *match;
- unsigned long irq_type;
-
- mutex_lock(&kvm->lock);
-
- match = kvm_find_assigned_dev(&kvm->arch.assigned_dev_head,
- assigned_irq->assigned_dev_id);
- if (!match)
- goto out;
-
- irq_type = assigned_irq->flags & (KVM_DEV_IRQ_HOST_MASK |
- KVM_DEV_IRQ_GUEST_MASK);
- r = kvm_deassign_irq(kvm, match, irq_type);
-out:
- mutex_unlock(&kvm->lock);
- return r;
-}
-
-/*
- * We want to test whether the caller has been granted permissions to
- * use this device. To be able to configure and control the device,
- * the user needs access to PCI configuration space and BAR resources.
- * These are accessed through PCI sysfs. PCI config space is often
- * passed to the process calling this ioctl via file descriptor, so we
- * can't rely on access to that file. We can check for permissions
- * on each of the BAR resource files, which is a pretty clear
- * indicator that the user has been granted access to the device.
- */
-static int probe_sysfs_permissions(struct pci_dev *dev)
-{
-#ifdef CONFIG_SYSFS
- int i;
- bool bar_found = false;
-
- for (i = PCI_STD_RESOURCES; i <= PCI_STD_RESOURCE_END; i++) {
- char *kpath, *syspath;
- struct path path;
- struct inode *inode;
- int r;
-
- if (!pci_resource_len(dev, i))
- continue;
-
- kpath = kobject_get_path(&dev->dev.kobj, GFP_KERNEL);
- if (!kpath)
- return -ENOMEM;
-
- /* Per sysfs-rules, sysfs is always at /sys */
- syspath = kasprintf(GFP_KERNEL, "/sys%s/resource%d", kpath, i);
- kfree(kpath);
- if (!syspath)
- return -ENOMEM;
-
- r = kern_path(syspath, LOOKUP_FOLLOW, &path);
- kfree(syspath);
- if (r)
- return r;
-
- inode = path.dentry->d_inode;
-
- r = inode_permission(inode, MAY_READ | MAY_WRITE | MAY_ACCESS);
- path_put(&path);
- if (r)
- return r;
-
- bar_found = true;
- }
-
- /* If no resources, probably something special */
- if (!bar_found)
- return -EPERM;
-
- return 0;
-#else
- return -EINVAL; /* No way to control the device without sysfs */
-#endif
-}
-
-static int kvm_vm_ioctl_assign_device(struct kvm *kvm,
- struct kvm_assigned_pci_dev *assigned_dev)
-{
- int r = 0, idx;
- struct kvm_assigned_dev_kernel *match;
- struct pci_dev *dev;
-
- if (!(assigned_dev->flags & KVM_DEV_ASSIGN_ENABLE_IOMMU))
- return -EINVAL;
-
- mutex_lock(&kvm->lock);
- idx = srcu_read_lock(&kvm->srcu);
-
- match = kvm_find_assigned_dev(&kvm->arch.assigned_dev_head,
- assigned_dev->assigned_dev_id);
- if (match) {
- /* device already assigned */
- r = -EEXIST;
- goto out;
- }
-
- match = kzalloc(sizeof(struct kvm_assigned_dev_kernel), GFP_KERNEL);
- if (match == NULL) {
- printk(KERN_INFO "%s: Couldn't allocate memory\n",
- __func__);
- r = -ENOMEM;
- goto out;
- }
- dev = pci_get_domain_bus_and_slot(assigned_dev->segnr,
- assigned_dev->busnr,
- assigned_dev->devfn);
- if (!dev) {
- printk(KERN_INFO "%s: host device not found\n", __func__);
- r = -EINVAL;
- goto out_free;
- }
-
- /* Don't allow bridges to be assigned */
- if (dev->hdr_type != PCI_HEADER_TYPE_NORMAL) {
- r = -EPERM;
- goto out_put;
- }
-
- r = probe_sysfs_permissions(dev);
- if (r)
- goto out_put;
-
- if (pci_enable_device(dev)) {
- printk(KERN_INFO "%s: Could not enable PCI device\n", __func__);
- r = -EBUSY;
- goto out_put;
- }
- r = pci_request_regions(dev, "kvm_assigned_device");
- if (r) {
- printk(KERN_INFO "%s: Could not get access to device regions\n",
- __func__);
- goto out_disable;
- }
-
- pci_reset_function(dev);
- pci_save_state(dev);
- match->pci_saved_state = pci_store_saved_state(dev);
- if (!match->pci_saved_state)
- printk(KERN_DEBUG "%s: Couldn't store %s saved state\n",
- __func__, dev_name(&dev->dev));
-
- if (!pci_intx_mask_supported(dev))
- assigned_dev->flags &= ~KVM_DEV_ASSIGN_PCI_2_3;
-
- match->assigned_dev_id = assigned_dev->assigned_dev_id;
- match->host_segnr = assigned_dev->segnr;
- match->host_busnr = assigned_dev->busnr;
- match->host_devfn = assigned_dev->devfn;
- match->flags = assigned_dev->flags;
- match->dev = dev;
- spin_lock_init(&match->intx_lock);
- spin_lock_init(&match->intx_mask_lock);
- match->irq_source_id = -1;
- match->kvm = kvm;
- match->ack_notifier.irq_acked = kvm_assigned_dev_ack_irq;
-
- list_add(&match->list, &kvm->arch.assigned_dev_head);
-
- if (!kvm->arch.iommu_domain) {
- r = kvm_iommu_map_guest(kvm);
- if (r)
- goto out_list_del;
- }
- r = kvm_assign_device(kvm, match);
- if (r)
- goto out_list_del;
-
-out:
- srcu_read_unlock(&kvm->srcu, idx);
- mutex_unlock(&kvm->lock);
- return r;
-out_list_del:
- if (pci_load_and_free_saved_state(dev, &match->pci_saved_state))
- printk(KERN_INFO "%s: Couldn't reload %s saved state\n",
- __func__, dev_name(&dev->dev));
- list_del(&match->list);
- pci_release_regions(dev);
-out_disable:
- pci_disable_device(dev);
-out_put:
- pci_dev_put(dev);
-out_free:
- kfree(match);
- srcu_read_unlock(&kvm->srcu, idx);
- mutex_unlock(&kvm->lock);
- return r;
-}
-
-static int kvm_vm_ioctl_deassign_device(struct kvm *kvm,
- struct kvm_assigned_pci_dev *assigned_dev)
-{
- int r = 0;
- struct kvm_assigned_dev_kernel *match;
-
- mutex_lock(&kvm->lock);
-
- match = kvm_find_assigned_dev(&kvm->arch.assigned_dev_head,
- assigned_dev->assigned_dev_id);
- if (!match) {
- printk(KERN_INFO "%s: device hasn't been assigned before, "
- "so cannot be deassigned\n", __func__);
- r = -EINVAL;
- goto out;
- }
-
- kvm_deassign_device(kvm, match);
-
- kvm_free_assigned_device(kvm, match);
-
-out:
- mutex_unlock(&kvm->lock);
- return r;
-}
-
-
-#ifdef __KVM_HAVE_MSIX
-static int kvm_vm_ioctl_set_msix_nr(struct kvm *kvm,
- struct kvm_assigned_msix_nr *entry_nr)
-{
- int r = 0;
- struct kvm_assigned_dev_kernel *adev;
-
- mutex_lock(&kvm->lock);
-
- adev = kvm_find_assigned_dev(&kvm->arch.assigned_dev_head,
- entry_nr->assigned_dev_id);
- if (!adev) {
- r = -EINVAL;
- goto msix_nr_out;
- }
-
- if (adev->entries_nr == 0) {
- adev->entries_nr = entry_nr->entry_nr;
- if (adev->entries_nr == 0 ||
- adev->entries_nr > KVM_MAX_MSIX_PER_DEV) {
- r = -EINVAL;
- goto msix_nr_out;
- }
-
- adev->host_msix_entries = kzalloc(sizeof(struct msix_entry) *
- entry_nr->entry_nr,
- GFP_KERNEL);
- if (!adev->host_msix_entries) {
- r = -ENOMEM;
- goto msix_nr_out;
- }
- adev->guest_msix_entries =
- kzalloc(sizeof(struct msix_entry) * entry_nr->entry_nr,
- GFP_KERNEL);
- if (!adev->guest_msix_entries) {
- kfree(adev->host_msix_entries);
- r = -ENOMEM;
- goto msix_nr_out;
- }
- } else /* Not allowed set MSI-X number twice */
- r = -EINVAL;
-msix_nr_out:
- mutex_unlock(&kvm->lock);
- return r;
-}
-
-static int kvm_vm_ioctl_set_msix_entry(struct kvm *kvm,
- struct kvm_assigned_msix_entry *entry)
-{
- int r = 0, i;
- struct kvm_assigned_dev_kernel *adev;
-
- mutex_lock(&kvm->lock);
-
- adev = kvm_find_assigned_dev(&kvm->arch.assigned_dev_head,
- entry->assigned_dev_id);
-
- if (!adev) {
- r = -EINVAL;
- goto msix_entry_out;
- }
-
- for (i = 0; i < adev->entries_nr; i++)
- if (adev->guest_msix_entries[i].vector == 0 ||
- adev->guest_msix_entries[i].entry == entry->entry) {
- adev->guest_msix_entries[i].entry = entry->entry;
- adev->guest_msix_entries[i].vector = entry->gsi;
- adev->host_msix_entries[i].entry = entry->entry;
- break;
- }
- if (i == adev->entries_nr) {
- r = -ENOSPC;
- goto msix_entry_out;
- }
-
-msix_entry_out:
- mutex_unlock(&kvm->lock);
-
- return r;
-}
-#endif
-
-static int kvm_vm_ioctl_set_pci_irq_mask(struct kvm *kvm,
- struct kvm_assigned_pci_dev *assigned_dev)
-{
- int r = 0;
- struct kvm_assigned_dev_kernel *match;
-
- mutex_lock(&kvm->lock);
-
- match = kvm_find_assigned_dev(&kvm->arch.assigned_dev_head,
- assigned_dev->assigned_dev_id);
- if (!match) {
- r = -ENODEV;
- goto out;
- }
-
- spin_lock(&match->intx_mask_lock);
-
- match->flags &= ~KVM_DEV_ASSIGN_MASK_INTX;
- match->flags |= assigned_dev->flags & KVM_DEV_ASSIGN_MASK_INTX;
-
- if (match->irq_requested_type & KVM_DEV_IRQ_GUEST_INTX) {
- if (assigned_dev->flags & KVM_DEV_ASSIGN_MASK_INTX) {
- kvm_set_irq(match->kvm, match->irq_source_id,
- match->guest_irq, 0, false);
- /*
- * Masking at hardware-level is performed on demand,
- * i.e. when an IRQ actually arrives at the host.
- */
- } else if (!(assigned_dev->flags & KVM_DEV_ASSIGN_PCI_2_3)) {
- /*
- * Unmask the IRQ line if required. Unmasking at
- * device level will be performed by user space.
- */
- spin_lock_irq(&match->intx_lock);
- if (match->host_irq_disabled) {
- enable_irq(match->host_irq);
- match->host_irq_disabled = false;
- }
- spin_unlock_irq(&match->intx_lock);
- }
- }
-
- spin_unlock(&match->intx_mask_lock);
-
-out:
- mutex_unlock(&kvm->lock);
- return r;
-}
-
-long kvm_vm_ioctl_assigned_device(struct kvm *kvm, unsigned ioctl,
- unsigned long arg)
-{
- void __user *argp = (void __user *)arg;
- int r;
-
- switch (ioctl) {
- case KVM_ASSIGN_PCI_DEVICE: {
- struct kvm_assigned_pci_dev assigned_dev;
-
- r = -EFAULT;
- if (copy_from_user(&assigned_dev, argp, sizeof assigned_dev))
- goto out;
- r = kvm_vm_ioctl_assign_device(kvm, &assigned_dev);
- if (r)
- goto out;
- break;
- }
- case KVM_ASSIGN_IRQ: {
- r = -EOPNOTSUPP;
- break;
- }
- case KVM_ASSIGN_DEV_IRQ: {
- struct kvm_assigned_irq assigned_irq;
-
- r = -EFAULT;
- if (copy_from_user(&assigned_irq, argp, sizeof assigned_irq))
- goto out;
- r = kvm_vm_ioctl_assign_irq(kvm, &assigned_irq);
- if (r)
- goto out;
- break;
- }
- case KVM_DEASSIGN_DEV_IRQ: {
- struct kvm_assigned_irq assigned_irq;
-
- r = -EFAULT;
- if (copy_from_user(&assigned_irq, argp, sizeof assigned_irq))
- goto out;
- r = kvm_vm_ioctl_deassign_dev_irq(kvm, &assigned_irq);
- if (r)
- goto out;
- break;
- }
- case KVM_DEASSIGN_PCI_DEVICE: {
- struct kvm_assigned_pci_dev assigned_dev;
-
- r = -EFAULT;
- if (copy_from_user(&assigned_dev, argp, sizeof assigned_dev))
- goto out;
- r = kvm_vm_ioctl_deassign_device(kvm, &assigned_dev);
- if (r)
- goto out;
- break;
- }
-#ifdef __KVM_HAVE_MSIX
- case KVM_ASSIGN_SET_MSIX_NR: {
- struct kvm_assigned_msix_nr entry_nr;
- r = -EFAULT;
- if (copy_from_user(&entry_nr, argp, sizeof entry_nr))
- goto out;
- r = kvm_vm_ioctl_set_msix_nr(kvm, &entry_nr);
- if (r)
- goto out;
- break;
- }
- case KVM_ASSIGN_SET_MSIX_ENTRY: {
- struct kvm_assigned_msix_entry entry;
- r = -EFAULT;
- if (copy_from_user(&entry, argp, sizeof entry))
- goto out;
- r = kvm_vm_ioctl_set_msix_entry(kvm, &entry);
- if (r)
- goto out;
- break;
- }
-#endif
- case KVM_ASSIGN_SET_INTX_MASK: {
- struct kvm_assigned_pci_dev assigned_dev;
-
- r = -EFAULT;
- if (copy_from_user(&assigned_dev, argp, sizeof assigned_dev))
- goto out;
- r = kvm_vm_ioctl_set_pci_irq_mask(kvm, &assigned_dev);
- break;
- }
- default:
- r = -ENOTTY;
- break;
- }
-out:
- return r;
-}
diff --git a/virt/kvm/eventfd.c b/virt/kvm/eventfd.c
index b0fb390943c6..148b2392c762 100644
--- a/virt/kvm/eventfd.c
+++ b/virt/kvm/eventfd.c
@@ -36,9 +36,6 @@
#include <linux/seqlock.h>
#include <trace/events/kvm.h>
-#ifdef __KVM_HAVE_IOAPIC
-#include "ioapic.h"
-#endif
#include "iodev.h"
#ifdef CONFIG_HAVE_KVM_IRQFD
@@ -492,9 +489,7 @@ void kvm_register_irq_ack_notifier(struct kvm *kvm,
mutex_lock(&kvm->irq_lock);
hlist_add_head_rcu(&kian->link, &kvm->irq_ack_notifier_list);
mutex_unlock(&kvm->irq_lock);
-#ifdef __KVM_HAVE_IOAPIC
kvm_vcpu_request_scan_ioapic(kvm);
-#endif
}
void kvm_unregister_irq_ack_notifier(struct kvm *kvm,
@@ -504,9 +499,7 @@ void kvm_unregister_irq_ack_notifier(struct kvm *kvm,
hlist_del_init_rcu(&kian->link);
mutex_unlock(&kvm->irq_lock);
synchronize_srcu(&kvm->irq_srcu);
-#ifdef __KVM_HAVE_IOAPIC
kvm_vcpu_request_scan_ioapic(kvm);
-#endif
}
#endif
diff --git a/virt/kvm/ioapic.c b/virt/kvm/ioapic.c
deleted file mode 100644
index 0ba4057d271b..000000000000
--- a/virt/kvm/ioapic.c
+++ /dev/null
@@ -1,687 +0,0 @@
-/*
- * Copyright (C) 2001 MandrakeSoft S.A.
- * Copyright 2010 Red Hat, Inc. and/or its affiliates.
- *
- * MandrakeSoft S.A.
- * 43, rue d'Aboukir
- * 75002 Paris - France
- * http://www.linux-mandrake.com/
- * http://www.mandrakesoft.com/
- *
- * This library is free software; you can redistribute it and/or
- * modify it under the terms of the GNU Lesser General Public
- * License as published by the Free Software Foundation; either
- * version 2 of the License, or (at your option) any later version.
- *
- * This library is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- * Lesser General Public License for more details.
- *
- * You should have received a copy of the GNU Lesser General Public
- * License along with this library; if not, write to the Free Software
- * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
- *
- * Yunhong Jiang <yunhong.jiang@intel.com>
- * Yaozu (Eddie) Dong <eddie.dong@intel.com>
- * Based on Xen 3.1 code.
- */
-
-#include <linux/kvm_host.h>
-#include <linux/kvm.h>
-#include <linux/mm.h>
-#include <linux/highmem.h>
-#include <linux/smp.h>
-#include <linux/hrtimer.h>
-#include <linux/io.h>
-#include <linux/slab.h>
-#include <linux/export.h>
-#include <asm/processor.h>
-#include <asm/page.h>
-#include <asm/current.h>
-#include <trace/events/kvm.h>
-
-#include "ioapic.h"
-#include "lapic.h"
-#include "irq.h"
-
-#if 0
-#define ioapic_debug(fmt,arg...) printk(KERN_WARNING fmt,##arg)
-#else
-#define ioapic_debug(fmt, arg...)
-#endif
-static int ioapic_service(struct kvm_ioapic *vioapic, int irq,
- bool line_status);
-
-static unsigned long ioapic_read_indirect(struct kvm_ioapic *ioapic,
- unsigned long addr,
- unsigned long length)
-{
- unsigned long result = 0;
-
- switch (ioapic->ioregsel) {
- case IOAPIC_REG_VERSION:
- result = ((((IOAPIC_NUM_PINS - 1) & 0xff) << 16)
- | (IOAPIC_VERSION_ID & 0xff));
- break;
-
- case IOAPIC_REG_APIC_ID:
- case IOAPIC_REG_ARB_ID:
- result = ((ioapic->id & 0xf) << 24);
- break;
-
- default:
- {
- u32 redir_index = (ioapic->ioregsel - 0x10) >> 1;
- u64 redir_content;
-
- if (redir_index < IOAPIC_NUM_PINS)
- redir_content =
- ioapic->redirtbl[redir_index].bits;
- else
- redir_content = ~0ULL;
-
- result = (ioapic->ioregsel & 0x1) ?
- (redir_content >> 32) & 0xffffffff :
- redir_content & 0xffffffff;
- break;
- }
- }
-
- return result;
-}
-
-static void rtc_irq_eoi_tracking_reset(struct kvm_ioapic *ioapic)
-{
- ioapic->rtc_status.pending_eoi = 0;
- bitmap_zero(ioapic->rtc_status.dest_map, KVM_MAX_VCPUS);
-}
-
-static void kvm_rtc_eoi_tracking_restore_all(struct kvm_ioapic *ioapic);
-
-static void rtc_status_pending_eoi_check_valid(struct kvm_ioapic *ioapic)
-{
- if (WARN_ON(ioapic->rtc_status.pending_eoi < 0))
- kvm_rtc_eoi_tracking_restore_all(ioapic);
-}
-
-static void __rtc_irq_eoi_tracking_restore_one(struct kvm_vcpu *vcpu)
-{
- bool new_val, old_val;
- struct kvm_ioapic *ioapic = vcpu->kvm->arch.vioapic;
- union kvm_ioapic_redirect_entry *e;
-
- e = &ioapic->redirtbl[RTC_GSI];
- if (!kvm_apic_match_dest(vcpu, NULL, 0, e->fields.dest_id,
- e->fields.dest_mode))
- return;
-
- new_val = kvm_apic_pending_eoi(vcpu, e->fields.vector);
- old_val = test_bit(vcpu->vcpu_id, ioapic->rtc_status.dest_map);
-
- if (new_val == old_val)
- return;
-
- if (new_val) {
- __set_bit(vcpu->vcpu_id, ioapic->rtc_status.dest_map);
- ioapic->rtc_status.pending_eoi++;
- } else {
- __clear_bit(vcpu->vcpu_id, ioapic->rtc_status.dest_map);
- ioapic->rtc_status.pending_eoi--;
- rtc_status_pending_eoi_check_valid(ioapic);
- }
-}
-
-void kvm_rtc_eoi_tracking_restore_one(struct kvm_vcpu *vcpu)
-{
- struct kvm_ioapic *ioapic = vcpu->kvm->arch.vioapic;
-
- spin_lock(&ioapic->lock);
- __rtc_irq_eoi_tracking_restore_one(vcpu);
- spin_unlock(&ioapic->lock);
-}
-
-static void kvm_rtc_eoi_tracking_restore_all(struct kvm_ioapic *ioapic)
-{
- struct kvm_vcpu *vcpu;
- int i;
-
- if (RTC_GSI >= IOAPIC_NUM_PINS)
- return;
-
- rtc_irq_eoi_tracking_reset(ioapic);
- kvm_for_each_vcpu(i, vcpu, ioapic->kvm)
- __rtc_irq_eoi_tracking_restore_one(vcpu);
-}
-
-static void rtc_irq_eoi(struct kvm_ioapic *ioapic, struct kvm_vcpu *vcpu)
-{
- if (test_and_clear_bit(vcpu->vcpu_id, ioapic->rtc_status.dest_map)) {
- --ioapic->rtc_status.pending_eoi;
- rtc_status_pending_eoi_check_valid(ioapic);
- }
-}
-
-static bool rtc_irq_check_coalesced(struct kvm_ioapic *ioapic)
-{
- if (ioapic->rtc_status.pending_eoi > 0)
- return true; /* coalesced */
-
- return false;
-}
-
-static int ioapic_set_irq(struct kvm_ioapic *ioapic, unsigned int irq,
- int irq_level, bool line_status)
-{
- union kvm_ioapic_redirect_entry entry;
- u32 mask = 1 << irq;
- u32 old_irr;
- int edge, ret;
-
- entry = ioapic->redirtbl[irq];
- edge = (entry.fields.trig_mode == IOAPIC_EDGE_TRIG);
-
- if (!irq_level) {
- ioapic->irr &= ~mask;
- ret = 1;
- goto out;
- }
-
- /*
- * Return 0 for coalesced interrupts; for edge-triggered interrupts,
- * this only happens if a previous edge has not been delivered due
- * do masking. For level interrupts, the remote_irr field tells
- * us if the interrupt is waiting for an EOI.
- *
- * RTC is special: it is edge-triggered, but userspace likes to know
- * if it has been already ack-ed via EOI because coalesced RTC
- * interrupts lead to time drift in Windows guests. So we track
- * EOI manually for the RTC interrupt.
- */
- if (irq == RTC_GSI && line_status &&
- rtc_irq_check_coalesced(ioapic)) {
- ret = 0;
- goto out;
- }
-
- old_irr = ioapic->irr;
- ioapic->irr |= mask;
- if ((edge && old_irr == ioapic->irr) ||
- (!edge && entry.fields.remote_irr)) {
- ret = 0;
- goto out;
- }
-
- ret = ioapic_service(ioapic, irq, line_status);
-
-out:
- trace_kvm_ioapic_set_irq(entry.bits, irq, ret == 0);
- return ret;
-}
-
-static void kvm_ioapic_inject_all(struct kvm_ioapic *ioapic, unsigned long irr)
-{
- u32 idx;
-
- rtc_irq_eoi_tracking_reset(ioapic);
- for_each_set_bit(idx, &irr, IOAPIC_NUM_PINS)
- ioapic_set_irq(ioapic, idx, 1, true);
-
- kvm_rtc_eoi_tracking_restore_all(ioapic);
-}
-
-
-static void update_handled_vectors(struct kvm_ioapic *ioapic)
-{
- DECLARE_BITMAP(handled_vectors, 256);
- int i;
-
- memset(handled_vectors, 0, sizeof(handled_vectors));
- for (i = 0; i < IOAPIC_NUM_PINS; ++i)
- __set_bit(ioapic->redirtbl[i].fields.vector, handled_vectors);
- memcpy(ioapic->handled_vectors, handled_vectors,
- sizeof(handled_vectors));
- smp_wmb();
-}
-
-void kvm_ioapic_scan_entry(struct kvm_vcpu *vcpu, u64 *eoi_exit_bitmap,
- u32 *tmr)
-{
- struct kvm_ioapic *ioapic = vcpu->kvm->arch.vioapic;
- union kvm_ioapic_redirect_entry *e;
- int index;
-
- spin_lock(&ioapic->lock);
- for (index = 0; index < IOAPIC_NUM_PINS; index++) {
- e = &ioapic->redirtbl[index];
- if (e->fields.trig_mode == IOAPIC_LEVEL_TRIG ||
- kvm_irq_has_notifier(ioapic->kvm, KVM_IRQCHIP_IOAPIC, index) ||
- index == RTC_GSI) {
- if (kvm_apic_match_dest(vcpu, NULL, 0,
- e->fields.dest_id, e->fields.dest_mode)) {
- __set_bit(e->fields.vector,
- (unsigned long *)eoi_exit_bitmap);
- if (e->fields.trig_mode == IOAPIC_LEVEL_TRIG)
- __set_bit(e->fields.vector,
- (unsigned long *)tmr);
- }
- }
- }
- spin_unlock(&ioapic->lock);
-}
-
-#ifdef CONFIG_X86
-void kvm_vcpu_request_scan_ioapic(struct kvm *kvm)
-{
- struct kvm_ioapic *ioapic = kvm->arch.vioapic;
-
- if (!ioapic)
- return;
- kvm_make_scan_ioapic_request(kvm);
-}
-#else
-void kvm_vcpu_request_scan_ioapic(struct kvm *kvm)
-{
- return;
-}
-#endif
-
-static void ioapic_write_indirect(struct kvm_ioapic *ioapic, u32 val)
-{
- unsigned index;
- bool mask_before, mask_after;
- union kvm_ioapic_redirect_entry *e;
-
- switch (ioapic->ioregsel) {
- case IOAPIC_REG_VERSION:
- /* Writes are ignored. */
- break;
-
- case IOAPIC_REG_APIC_ID:
- ioapic->id = (val >> 24) & 0xf;
- break;
-
- case IOAPIC_REG_ARB_ID:
- break;
-
- default:
- index = (ioapic->ioregsel - 0x10) >> 1;
-
- ioapic_debug("change redir index %x val %x\n", index, val);
- if (index >= IOAPIC_NUM_PINS)
- return;
- e = &ioapic->redirtbl[index];
- mask_before = e->fields.mask;
- if (ioapic->ioregsel & 1) {
- e->bits &= 0xffffffff;
- e->bits |= (u64) val << 32;
- } else {
- e->bits &= ~0xffffffffULL;
- e->bits |= (u32) val;
- e->fields.remote_irr = 0;
- }
- update_handled_vectors(ioapic);
- mask_after = e->fields.mask;
- if (mask_before != mask_after)
- kvm_fire_mask_notifiers(ioapic->kvm, KVM_IRQCHIP_IOAPIC, index, mask_after);
- if (e->fields.trig_mode == IOAPIC_LEVEL_TRIG
- && ioapic->irr & (1 << index))
- ioapic_service(ioapic, index, false);
- kvm_vcpu_request_scan_ioapic(ioapic->kvm);
- break;
- }
-}
-
-static int ioapic_service(struct kvm_ioapic *ioapic, int irq, bool line_status)
-{
- union kvm_ioapic_redirect_entry *entry = &ioapic->redirtbl[irq];
- struct kvm_lapic_irq irqe;
- int ret;
-
- if (entry->fields.mask)
- return -1;
-
- ioapic_debug("dest=%x dest_mode=%x delivery_mode=%x "
- "vector=%x trig_mode=%x\n",
- entry->fields.dest_id, entry->fields.dest_mode,
- entry->fields.delivery_mode, entry->fields.vector,
- entry->fields.trig_mode);
-
- irqe.dest_id = entry->fields.dest_id;
- irqe.vector = entry->fields.vector;
- irqe.dest_mode = entry->fields.dest_mode;
- irqe.trig_mode = entry->fields.trig_mode;
- irqe.delivery_mode = entry->fields.delivery_mode << 8;
- irqe.level = 1;
- irqe.shorthand = 0;
-
- if (irqe.trig_mode == IOAPIC_EDGE_TRIG)
- ioapic->irr &= ~(1 << irq);
-
- if (irq == RTC_GSI && line_status) {
- /*
- * pending_eoi cannot ever become negative (see
- * rtc_status_pending_eoi_check_valid) and the caller
- * ensures that it is only called if it is >= zero, namely
- * if rtc_irq_check_coalesced returns false).
- */
- BUG_ON(ioapic->rtc_status.pending_eoi != 0);
- ret = kvm_irq_delivery_to_apic(ioapic->kvm, NULL, &irqe,
- ioapic->rtc_status.dest_map);
- ioapic->rtc_status.pending_eoi = (ret < 0 ? 0 : ret);
- } else
- ret = kvm_irq_delivery_to_apic(ioapic->kvm, NULL, &irqe, NULL);
-
- if (ret && irqe.trig_mode == IOAPIC_LEVEL_TRIG)
- entry->fields.remote_irr = 1;
-
- return ret;
-}
-
-int kvm_ioapic_set_irq(struct kvm_ioapic *ioapic, int irq, int irq_source_id,
- int level, bool line_status)
-{
- int ret, irq_level;
-
- BUG_ON(irq < 0 || irq >= IOAPIC_NUM_PINS);
-
- spin_lock(&ioapic->lock);
- irq_level = __kvm_irq_line_state(&ioapic->irq_states[irq],
- irq_source_id, level);
- ret = ioapic_set_irq(ioapic, irq, irq_level, line_status);
-
- spin_unlock(&ioapic->lock);
-
- return ret;
-}
-
-void kvm_ioapic_clear_all(struct kvm_ioapic *ioapic, int irq_source_id)
-{
- int i;
-
- spin_lock(&ioapic->lock);
- for (i = 0; i < KVM_IOAPIC_NUM_PINS; i++)
- __clear_bit(irq_source_id, &ioapic->irq_states[i]);
- spin_unlock(&ioapic->lock);
-}
-
-static void kvm_ioapic_eoi_inject_work(struct work_struct *work)
-{
- int i;
- struct kvm_ioapic *ioapic = container_of(work, struct kvm_ioapic,
- eoi_inject.work);
- spin_lock(&ioapic->lock);
- for (i = 0; i < IOAPIC_NUM_PINS; i++) {
- union kvm_ioapic_redirect_entry *ent = &ioapic->redirtbl[i];
-
- if (ent->fields.trig_mode != IOAPIC_LEVEL_TRIG)
- continue;
-
- if (ioapic->irr & (1 << i) && !ent->fields.remote_irr)
- ioapic_service(ioapic, i, false);
- }
- spin_unlock(&ioapic->lock);
-}
-
-#define IOAPIC_SUCCESSIVE_IRQ_MAX_COUNT 10000
-
-static void __kvm_ioapic_update_eoi(struct kvm_vcpu *vcpu,
- struct kvm_ioapic *ioapic, int vector, int trigger_mode)
-{
- int i;
-
- for (i = 0; i < IOAPIC_NUM_PINS; i++) {
- union kvm_ioapic_redirect_entry *ent = &ioapic->redirtbl[i];
-
- if (ent->fields.vector != vector)
- continue;
-
- if (i == RTC_GSI)
- rtc_irq_eoi(ioapic, vcpu);
- /*
- * We are dropping lock while calling ack notifiers because ack
- * notifier callbacks for assigned devices call into IOAPIC
- * recursively. Since remote_irr is cleared only after call
- * to notifiers if the same vector will be delivered while lock
- * is dropped it will be put into irr and will be delivered
- * after ack notifier returns.
- */
- spin_unlock(&ioapic->lock);
- kvm_notify_acked_irq(ioapic->kvm, KVM_IRQCHIP_IOAPIC, i);
- spin_lock(&ioapic->lock);
-
- if (trigger_mode != IOAPIC_LEVEL_TRIG)
- continue;
-
- ASSERT(ent->fields.trig_mode == IOAPIC_LEVEL_TRIG);
- ent->fields.remote_irr = 0;
- if (!ent->fields.mask && (ioapic->irr & (1 << i))) {
- ++ioapic->irq_eoi[i];
- if (ioapic->irq_eoi[i] == IOAPIC_SUCCESSIVE_IRQ_MAX_COUNT) {
- /*
- * Real hardware does not deliver the interrupt
- * immediately during eoi broadcast, and this
- * lets a buggy guest make slow progress
- * even if it does not correctly handle a
- * level-triggered interrupt. Emulate this
- * behavior if we detect an interrupt storm.
- */
- schedule_delayed_work(&ioapic->eoi_inject, HZ / 100);
- ioapic->irq_eoi[i] = 0;
- trace_kvm_ioapic_delayed_eoi_inj(ent->bits);
- } else {
- ioapic_service(ioapic, i, false);
- }
- } else {
- ioapic->irq_eoi[i] = 0;
- }
- }
-}
-
-bool kvm_ioapic_handles_vector(struct kvm *kvm, int vector)
-{
- struct kvm_ioapic *ioapic = kvm->arch.vioapic;
- smp_rmb();
- return test_bit(vector, ioapic->handled_vectors);
-}
-
-void kvm_ioapic_update_eoi(struct kvm_vcpu *vcpu, int vector, int trigger_mode)
-{
- struct kvm_ioapic *ioapic = vcpu->kvm->arch.vioapic;
-
- spin_lock(&ioapic->lock);
- __kvm_ioapic_update_eoi(vcpu, ioapic, vector, trigger_mode);
- spin_unlock(&ioapic->lock);
-}
-
-static inline struct kvm_ioapic *to_ioapic(struct kvm_io_device *dev)
-{
- return container_of(dev, struct kvm_ioapic, dev);
-}
-
-static inline int ioapic_in_range(struct kvm_ioapic *ioapic, gpa_t addr)
-{
- return ((addr >= ioapic->base_address &&
- (addr < ioapic->base_address + IOAPIC_MEM_LENGTH)));
-}
-
-static int ioapic_mmio_read(struct kvm_io_device *this, gpa_t addr, int len,
- void *val)
-{
- struct kvm_ioapic *ioapic = to_ioapic(this);
- u32 result;
- if (!ioapic_in_range(ioapic, addr))
- return -EOPNOTSUPP;
-
- ioapic_debug("addr %lx\n", (unsigned long)addr);
- ASSERT(!(addr & 0xf)); /* check alignment */
-
- addr &= 0xff;
- spin_lock(&ioapic->lock);
- switch (addr) {
- case IOAPIC_REG_SELECT:
- result = ioapic->ioregsel;
- break;
-
- case IOAPIC_REG_WINDOW:
- result = ioapic_read_indirect(ioapic, addr, len);
- break;
-
- default:
- result = 0;
- break;
- }
- spin_unlock(&ioapic->lock);
-
- switch (len) {
- case 8:
- *(u64 *) val = result;
- break;
- case 1:
- case 2:
- case 4:
- memcpy(val, (char *)&result, len);
- break;
- default:
- printk(KERN_WARNING "ioapic: wrong length %d\n", len);
- }
- return 0;
-}
-
-static int ioapic_mmio_write(struct kvm_io_device *this, gpa_t addr, int len,
- const void *val)
-{
- struct kvm_ioapic *ioapic = to_ioapic(this);
- u32 data;
- if (!ioapic_in_range(ioapic, addr))
- return -EOPNOTSUPP;
-
- ioapic_debug("ioapic_mmio_write addr=%p len=%d val=%p\n",
- (void*)addr, len, val);
- ASSERT(!(addr & 0xf)); /* check alignment */
-
- switch (len) {
- case 8:
- case 4:
- data = *(u32 *) val;
- break;
- case 2:
- data = *(u16 *) val;
- break;
- case 1:
- data = *(u8 *) val;
- break;
- default:
- printk(KERN_WARNING "ioapic: Unsupported size %d\n", len);
- return 0;
- }
-
- addr &= 0xff;
- spin_lock(&ioapic->lock);
- switch (addr) {
- case IOAPIC_REG_SELECT:
- ioapic->ioregsel = data & 0xFF; /* 8-bit register */
- break;
-
- case IOAPIC_REG_WINDOW:
- ioapic_write_indirect(ioapic, data);
- break;
-#ifdef CONFIG_IA64
- case IOAPIC_REG_EOI:
- __kvm_ioapic_update_eoi(NULL, ioapic, data, IOAPIC_LEVEL_TRIG);
- break;
-#endif
-
- default:
- break;
- }
- spin_unlock(&ioapic->lock);
- return 0;
-}
-
-static void kvm_ioapic_reset(struct kvm_ioapic *ioapic)
-{
- int i;
-
- cancel_delayed_work_sync(&ioapic->eoi_inject);
- for (i = 0; i < IOAPIC_NUM_PINS; i++)
- ioapic->redirtbl[i].fields.mask = 1;
- ioapic->base_address = IOAPIC_DEFAULT_BASE_ADDRESS;
- ioapic->ioregsel = 0;
- ioapic->irr = 0;
- ioapic->id = 0;
- memset(ioapic->irq_eoi, 0x00, IOAPIC_NUM_PINS);
- rtc_irq_eoi_tracking_reset(ioapic);
- update_handled_vectors(ioapic);
-}
-
-static const struct kvm_io_device_ops ioapic_mmio_ops = {
- .read = ioapic_mmio_read,
- .write = ioapic_mmio_write,
-};
-
-int kvm_ioapic_init(struct kvm *kvm)
-{
- struct kvm_ioapic *ioapic;
- int ret;
-
- ioapic = kzalloc(sizeof(struct kvm_ioapic), GFP_KERNEL);
- if (!ioapic)
- return -ENOMEM;
- spin_lock_init(&ioapic->lock);
- INIT_DELAYED_WORK(&ioapic->eoi_inject, kvm_ioapic_eoi_inject_work);
- kvm->arch.vioapic = ioapic;
- kvm_ioapic_reset(ioapic);
- kvm_iodevice_init(&ioapic->dev, &ioapic_mmio_ops);
- ioapic->kvm = kvm;
- mutex_lock(&kvm->slots_lock);
- ret = kvm_io_bus_register_dev(kvm, KVM_MMIO_BUS, ioapic->base_address,
- IOAPIC_MEM_LENGTH, &ioapic->dev);
- mutex_unlock(&kvm->slots_lock);
- if (ret < 0) {
- kvm->arch.vioapic = NULL;
- kfree(ioapic);
- }
-
- return ret;
-}
-
-void kvm_ioapic_destroy(struct kvm *kvm)
-{
- struct kvm_ioapic *ioapic = kvm->arch.vioapic;
-
- cancel_delayed_work_sync(&ioapic->eoi_inject);
- if (ioapic) {
- kvm_io_bus_unregister_dev(kvm, KVM_MMIO_BUS, &ioapic->dev);
- kvm->arch.vioapic = NULL;
- kfree(ioapic);
- }
-}
-
-int kvm_get_ioapic(struct kvm *kvm, struct kvm_ioapic_state *state)
-{
- struct kvm_ioapic *ioapic = ioapic_irqchip(kvm);
- if (!ioapic)
- return -EINVAL;
-
- spin_lock(&ioapic->lock);
- memcpy(state, ioapic, sizeof(struct kvm_ioapic_state));
- spin_unlock(&ioapic->lock);
- return 0;
-}
-
-int kvm_set_ioapic(struct kvm *kvm, struct kvm_ioapic_state *state)
-{
- struct kvm_ioapic *ioapic = ioapic_irqchip(kvm);
- if (!ioapic)
- return -EINVAL;
-
- spin_lock(&ioapic->lock);
- memcpy(ioapic, state, sizeof(struct kvm_ioapic_state));
- ioapic->irr = 0;
- update_handled_vectors(ioapic);
- kvm_vcpu_request_scan_ioapic(kvm);
- kvm_ioapic_inject_all(ioapic, state->irr);
- spin_unlock(&ioapic->lock);
- return 0;
-}
diff --git a/virt/kvm/ioapic.h b/virt/kvm/ioapic.h
deleted file mode 100644
index e23b70634f1e..000000000000
--- a/virt/kvm/ioapic.h
+++ /dev/null
@@ -1,104 +0,0 @@
-#ifndef __KVM_IO_APIC_H
-#define __KVM_IO_APIC_H
-
-#include <linux/kvm_host.h>
-
-#include "iodev.h"
-
-struct kvm;
-struct kvm_vcpu;
-
-#define IOAPIC_NUM_PINS KVM_IOAPIC_NUM_PINS
-#define IOAPIC_VERSION_ID 0x11 /* IOAPIC version */
-#define IOAPIC_EDGE_TRIG 0
-#define IOAPIC_LEVEL_TRIG 1
-
-#define IOAPIC_DEFAULT_BASE_ADDRESS 0xfec00000
-#define IOAPIC_MEM_LENGTH 0x100
-
-/* Direct registers. */
-#define IOAPIC_REG_SELECT 0x00
-#define IOAPIC_REG_WINDOW 0x10
-#define IOAPIC_REG_EOI 0x40 /* IA64 IOSAPIC only */
-
-/* Indirect registers. */
-#define IOAPIC_REG_APIC_ID 0x00 /* x86 IOAPIC only */
-#define IOAPIC_REG_VERSION 0x01
-#define IOAPIC_REG_ARB_ID 0x02 /* x86 IOAPIC only */
-
-/*ioapic delivery mode*/
-#define IOAPIC_FIXED 0x0
-#define IOAPIC_LOWEST_PRIORITY 0x1
-#define IOAPIC_PMI 0x2
-#define IOAPIC_NMI 0x4
-#define IOAPIC_INIT 0x5
-#define IOAPIC_EXTINT 0x7
-
-#ifdef CONFIG_X86
-#define RTC_GSI 8
-#else
-#define RTC_GSI -1U
-#endif
-
-struct rtc_status {
- int pending_eoi;
- DECLARE_BITMAP(dest_map, KVM_MAX_VCPUS);
-};
-
-struct kvm_ioapic {
- u64 base_address;
- u32 ioregsel;
- u32 id;
- u32 irr;
- u32 pad;
- union kvm_ioapic_redirect_entry redirtbl[IOAPIC_NUM_PINS];
- unsigned long irq_states[IOAPIC_NUM_PINS];
- struct kvm_io_device dev;
- struct kvm *kvm;
- void (*ack_notifier)(void *opaque, int irq);
- spinlock_t lock;
- DECLARE_BITMAP(handled_vectors, 256);
- struct rtc_status rtc_status;
- struct delayed_work eoi_inject;
- u32 irq_eoi[IOAPIC_NUM_PINS];
-};
-
-#ifdef DEBUG
-#define ASSERT(x) \
-do { \
- if (!(x)) { \
- printk(KERN_EMERG "assertion failed %s: %d: %s\n", \
- __FILE__, __LINE__, #x); \
- BUG(); \
- } \
-} while (0)
-#else
-#define ASSERT(x) do { } while (0)
-#endif
-
-static inline struct kvm_ioapic *ioapic_irqchip(struct kvm *kvm)
-{
- return kvm->arch.vioapic;
-}
-
-void kvm_rtc_eoi_tracking_restore_one(struct kvm_vcpu *vcpu);
-int kvm_apic_match_dest(struct kvm_vcpu *vcpu, struct kvm_lapic *source,
- int short_hand, int dest, int dest_mode);
-int kvm_apic_compare_prio(struct kvm_vcpu *vcpu1, struct kvm_vcpu *vcpu2);
-void kvm_ioapic_update_eoi(struct kvm_vcpu *vcpu, int vector,
- int trigger_mode);
-bool kvm_ioapic_handles_vector(struct kvm *kvm, int vector);
-int kvm_ioapic_init(struct kvm *kvm);
-void kvm_ioapic_destroy(struct kvm *kvm);
-int kvm_ioapic_set_irq(struct kvm_ioapic *ioapic, int irq, int irq_source_id,
- int level, bool line_status);
-void kvm_ioapic_clear_all(struct kvm_ioapic *ioapic, int irq_source_id);
-int kvm_irq_delivery_to_apic(struct kvm *kvm, struct kvm_lapic *src,
- struct kvm_lapic_irq *irq, unsigned long *dest_map);
-int kvm_get_ioapic(struct kvm *kvm, struct kvm_ioapic_state *state);
-int kvm_set_ioapic(struct kvm *kvm, struct kvm_ioapic_state *state);
-void kvm_vcpu_request_scan_ioapic(struct kvm *kvm);
-void kvm_ioapic_scan_entry(struct kvm_vcpu *vcpu, u64 *eoi_exit_bitmap,
- u32 *tmr);
-
-#endif
diff --git a/virt/kvm/iommu.c b/virt/kvm/iommu.c
deleted file mode 100644
index c1e6ae989a43..000000000000
--- a/virt/kvm/iommu.c
+++ /dev/null
@@ -1,358 +0,0 @@
-/*
- * Copyright (c) 2006, Intel Corporation.
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms and conditions of the GNU General Public License,
- * version 2, as published by the Free Software Foundation.
- *
- * This program is distributed in the hope it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
- * more details.
- *
- * You should have received a copy of the GNU General Public License along with
- * this program; if not, write to the Free Software Foundation, Inc., 59 Temple
- * Place - Suite 330, Boston, MA 02111-1307 USA.
- *
- * Copyright (C) 2006-2008 Intel Corporation
- * Copyright IBM Corporation, 2008
- * Copyright 2010 Red Hat, Inc. and/or its affiliates.
- *
- * Author: Allen M. Kay <allen.m.kay@intel.com>
- * Author: Weidong Han <weidong.han@intel.com>
- * Author: Ben-Ami Yassour <benami@il.ibm.com>
- */
-
-#include <linux/list.h>
-#include <linux/kvm_host.h>
-#include <linux/module.h>
-#include <linux/pci.h>
-#include <linux/stat.h>
-#include <linux/dmar.h>
-#include <linux/iommu.h>
-#include <linux/intel-iommu.h>
-
-static bool allow_unsafe_assigned_interrupts;
-module_param_named(allow_unsafe_assigned_interrupts,
- allow_unsafe_assigned_interrupts, bool, S_IRUGO | S_IWUSR);
-MODULE_PARM_DESC(allow_unsafe_assigned_interrupts,
- "Enable device assignment on platforms without interrupt remapping support.");
-
-static int kvm_iommu_unmap_memslots(struct kvm *kvm);
-static void kvm_iommu_put_pages(struct kvm *kvm,
- gfn_t base_gfn, unsigned long npages);
-
-static pfn_t kvm_pin_pages(struct kvm_memory_slot *slot, gfn_t gfn,
- unsigned long npages)
-{
- gfn_t end_gfn;
- pfn_t pfn;
-
- pfn = gfn_to_pfn_memslot(slot, gfn);
- end_gfn = gfn + npages;
- gfn += 1;
-
- if (is_error_noslot_pfn(pfn))
- return pfn;
-
- while (gfn < end_gfn)
- gfn_to_pfn_memslot(slot, gfn++);
-
- return pfn;
-}
-
-static void kvm_unpin_pages(struct kvm *kvm, pfn_t pfn, unsigned long npages)
-{
- unsigned long i;
-
- for (i = 0; i < npages; ++i)
- kvm_release_pfn_clean(pfn + i);
-}
-
-int kvm_iommu_map_pages(struct kvm *kvm, struct kvm_memory_slot *slot)
-{
- gfn_t gfn, end_gfn;
- pfn_t pfn;
- int r = 0;
- struct iommu_domain *domain = kvm->arch.iommu_domain;
- int flags;
-
- /* check if iommu exists and in use */
- if (!domain)
- return 0;
-
- gfn = slot->base_gfn;
- end_gfn = gfn + slot->npages;
-
- flags = IOMMU_READ;
- if (!(slot->flags & KVM_MEM_READONLY))
- flags |= IOMMU_WRITE;
- if (!kvm->arch.iommu_noncoherent)
- flags |= IOMMU_CACHE;
-
-
- while (gfn < end_gfn) {
- unsigned long page_size;
-
- /* Check if already mapped */
- if (iommu_iova_to_phys(domain, gfn_to_gpa(gfn))) {
- gfn += 1;
- continue;
- }
-
- /* Get the page size we could use to map */
- page_size = kvm_host_page_size(kvm, gfn);
-
- /* Make sure the page_size does not exceed the memslot */
- while ((gfn + (page_size >> PAGE_SHIFT)) > end_gfn)
- page_size >>= 1;
-
- /* Make sure gfn is aligned to the page size we want to map */
- while ((gfn << PAGE_SHIFT) & (page_size - 1))
- page_size >>= 1;
-
- /* Make sure hva is aligned to the page size we want to map */
- while (__gfn_to_hva_memslot(slot, gfn) & (page_size - 1))
- page_size >>= 1;
-
- /*
- * Pin all pages we are about to map in memory. This is
- * important because we unmap and unpin in 4kb steps later.
- */
- pfn = kvm_pin_pages(slot, gfn, page_size >> PAGE_SHIFT);
- if (is_error_noslot_pfn(pfn)) {
- gfn += 1;
- continue;
- }
-
- /* Map into IO address space */
- r = iommu_map(domain, gfn_to_gpa(gfn), pfn_to_hpa(pfn),
- page_size, flags);
- if (r) {
- printk(KERN_ERR "kvm_iommu_map_address:"
- "iommu failed to map pfn=%llx\n", pfn);
- kvm_unpin_pages(kvm, pfn, page_size >> PAGE_SHIFT);
- goto unmap_pages;
- }
-
- gfn += page_size >> PAGE_SHIFT;
-
-
- }
-
- return 0;
-
-unmap_pages:
- kvm_iommu_put_pages(kvm, slot->base_gfn, gfn - slot->base_gfn);
- return r;
-}
-
-static int kvm_iommu_map_memslots(struct kvm *kvm)
-{
- int idx, r = 0;
- struct kvm_memslots *slots;
- struct kvm_memory_slot *memslot;
-
- if (kvm->arch.iommu_noncoherent)
- kvm_arch_register_noncoherent_dma(kvm);
-
- idx = srcu_read_lock(&kvm->srcu);
- slots = kvm_memslots(kvm);
-
- kvm_for_each_memslot(memslot, slots) {
- r = kvm_iommu_map_pages(kvm, memslot);
- if (r)
- break;
- }
- srcu_read_unlock(&kvm->srcu, idx);
-
- return r;
-}
-
-int kvm_assign_device(struct kvm *kvm,
- struct kvm_assigned_dev_kernel *assigned_dev)
-{
- struct pci_dev *pdev = NULL;
- struct iommu_domain *domain = kvm->arch.iommu_domain;
- int r;
- bool noncoherent;
-
- /* check if iommu exists and in use */
- if (!domain)
- return 0;
-
- pdev = assigned_dev->dev;
- if (pdev == NULL)
- return -ENODEV;
-
- r = iommu_attach_device(domain, &pdev->dev);
- if (r) {
- dev_err(&pdev->dev, "kvm assign device failed ret %d", r);
- return r;
- }
-
- noncoherent = !iommu_capable(&pci_bus_type, IOMMU_CAP_CACHE_COHERENCY);
-
- /* Check if need to update IOMMU page table for guest memory */
- if (noncoherent != kvm->arch.iommu_noncoherent) {
- kvm_iommu_unmap_memslots(kvm);
- kvm->arch.iommu_noncoherent = noncoherent;
- r = kvm_iommu_map_memslots(kvm);
- if (r)
- goto out_unmap;
- }
-
- pci_set_dev_assigned(pdev);
-
- dev_info(&pdev->dev, "kvm assign device\n");
-
- return 0;
-out_unmap:
- kvm_iommu_unmap_memslots(kvm);
- return r;
-}
-
-int kvm_deassign_device(struct kvm *kvm,
- struct kvm_assigned_dev_kernel *assigned_dev)
-{
- struct iommu_domain *domain = kvm->arch.iommu_domain;
- struct pci_dev *pdev = NULL;
-
- /* check if iommu exists and in use */
- if (!domain)
- return 0;
-
- pdev = assigned_dev->dev;
- if (pdev == NULL)
- return -ENODEV;
-
- iommu_detach_device(domain, &pdev->dev);
-
- pci_clear_dev_assigned(pdev);
-
- dev_info(&pdev->dev, "kvm deassign device\n");
-
- return 0;
-}
-
-int kvm_iommu_map_guest(struct kvm *kvm)
-{
- int r;
-
- if (!iommu_present(&pci_bus_type)) {
- printk(KERN_ERR "%s: iommu not found\n", __func__);
- return -ENODEV;
- }
-
- mutex_lock(&kvm->slots_lock);
-
- kvm->arch.iommu_domain = iommu_domain_alloc(&pci_bus_type);
- if (!kvm->arch.iommu_domain) {
- r = -ENOMEM;
- goto out_unlock;
- }
-
- if (!allow_unsafe_assigned_interrupts &&
- !iommu_capable(&pci_bus_type, IOMMU_CAP_INTR_REMAP)) {
- printk(KERN_WARNING "%s: No interrupt remapping support,"
- " disallowing device assignment."
- " Re-enble with \"allow_unsafe_assigned_interrupts=1\""
- " module option.\n", __func__);
- iommu_domain_free(kvm->arch.iommu_domain);
- kvm->arch.iommu_domain = NULL;
- r = -EPERM;
- goto out_unlock;
- }
-
- r = kvm_iommu_map_memslots(kvm);
- if (r)
- kvm_iommu_unmap_memslots(kvm);
-
-out_unlock:
- mutex_unlock(&kvm->slots_lock);
- return r;
-}
-
-static void kvm_iommu_put_pages(struct kvm *kvm,
- gfn_t base_gfn, unsigned long npages)
-{
- struct iommu_domain *domain;
- gfn_t end_gfn, gfn;
- pfn_t pfn;
- u64 phys;
-
- domain = kvm->arch.iommu_domain;
- end_gfn = base_gfn + npages;
- gfn = base_gfn;
-
- /* check if iommu exists and in use */
- if (!domain)
- return;
-
- while (gfn < end_gfn) {
- unsigned long unmap_pages;
- size_t size;
-
- /* Get physical address */
- phys = iommu_iova_to_phys(domain, gfn_to_gpa(gfn));
-
- if (!phys) {
- gfn++;
- continue;
- }
-
- pfn = phys >> PAGE_SHIFT;
-
- /* Unmap address from IO address space */
- size = iommu_unmap(domain, gfn_to_gpa(gfn), PAGE_SIZE);
- unmap_pages = 1ULL << get_order(size);
-
- /* Unpin all pages we just unmapped to not leak any memory */
- kvm_unpin_pages(kvm, pfn, unmap_pages);
-
- gfn += unmap_pages;
- }
-}
-
-void kvm_iommu_unmap_pages(struct kvm *kvm, struct kvm_memory_slot *slot)
-{
- kvm_iommu_put_pages(kvm, slot->base_gfn, slot->npages);
-}
-
-static int kvm_iommu_unmap_memslots(struct kvm *kvm)
-{
- int idx;
- struct kvm_memslots *slots;
- struct kvm_memory_slot *memslot;
-
- idx = srcu_read_lock(&kvm->srcu);
- slots = kvm_memslots(kvm);
-
- kvm_for_each_memslot(memslot, slots)
- kvm_iommu_unmap_pages(kvm, memslot);
-
- srcu_read_unlock(&kvm->srcu, idx);
-
- if (kvm->arch.iommu_noncoherent)
- kvm_arch_unregister_noncoherent_dma(kvm);
-
- return 0;
-}
-
-int kvm_iommu_unmap_guest(struct kvm *kvm)
-{
- struct iommu_domain *domain = kvm->arch.iommu_domain;
-
- /* check if iommu exists and in use */
- if (!domain)
- return 0;
-
- mutex_lock(&kvm->slots_lock);
- kvm_iommu_unmap_memslots(kvm);
- kvm->arch.iommu_domain = NULL;
- kvm->arch.iommu_noncoherent = false;
- mutex_unlock(&kvm->slots_lock);
-
- iommu_domain_free(domain);
- return 0;
-}
diff --git a/virt/kvm/irq_comm.c b/virt/kvm/irq_comm.c
deleted file mode 100644
index 963b8995a9e8..000000000000
--- a/virt/kvm/irq_comm.c
+++ /dev/null
@@ -1,369 +0,0 @@
-/*
- * irq_comm.c: Common API for in kernel interrupt controller
- * Copyright (c) 2007, Intel Corporation.
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms and conditions of the GNU General Public License,
- * version 2, as published by the Free Software Foundation.
- *
- * This program is distributed in the hope it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
- * more details.
- *
- * You should have received a copy of the GNU General Public License along with
- * this program; if not, write to the Free Software Foundation, Inc., 59 Temple
- * Place - Suite 330, Boston, MA 02111-1307 USA.
- * Authors:
- * Yaozu (Eddie) Dong <Eddie.dong@intel.com>
- *
- * Copyright 2010 Red Hat, Inc. and/or its affiliates.
- */
-
-#include <linux/kvm_host.h>
-#include <linux/slab.h>
-#include <linux/export.h>
-#include <trace/events/kvm.h>
-
-#include <asm/msidef.h>
-#ifdef CONFIG_IA64
-#include <asm/iosapic.h>
-#endif
-
-#include "irq.h"
-
-#include "ioapic.h"
-
-static int kvm_set_pic_irq(struct kvm_kernel_irq_routing_entry *e,
- struct kvm *kvm, int irq_source_id, int level,
- bool line_status)
-{
-#ifdef CONFIG_X86
- struct kvm_pic *pic = pic_irqchip(kvm);
- return kvm_pic_set_irq(pic, e->irqchip.pin, irq_source_id, level);
-#else
- return -1;
-#endif
-}
-
-static int kvm_set_ioapic_irq(struct kvm_kernel_irq_routing_entry *e,
- struct kvm *kvm, int irq_source_id, int level,
- bool line_status)
-{
- struct kvm_ioapic *ioapic = kvm->arch.vioapic;
- return kvm_ioapic_set_irq(ioapic, e->irqchip.pin, irq_source_id, level,
- line_status);
-}
-
-inline static bool kvm_is_dm_lowest_prio(struct kvm_lapic_irq *irq)
-{
-#ifdef CONFIG_IA64
- return irq->delivery_mode ==
- (IOSAPIC_LOWEST_PRIORITY << IOSAPIC_DELIVERY_SHIFT);
-#else
- return irq->delivery_mode == APIC_DM_LOWEST;
-#endif
-}
-
-int kvm_irq_delivery_to_apic(struct kvm *kvm, struct kvm_lapic *src,
- struct kvm_lapic_irq *irq, unsigned long *dest_map)
-{
- int i, r = -1;
- struct kvm_vcpu *vcpu, *lowest = NULL;
-
- if (irq->dest_mode == 0 && irq->dest_id == 0xff &&
- kvm_is_dm_lowest_prio(irq)) {
- printk(KERN_INFO "kvm: apic: phys broadcast and lowest prio\n");
- irq->delivery_mode = APIC_DM_FIXED;
- }
-
- if (kvm_irq_delivery_to_apic_fast(kvm, src, irq, &r, dest_map))
- return r;
-
- kvm_for_each_vcpu(i, vcpu, kvm) {
- if (!kvm_apic_present(vcpu))
- continue;
-
- if (!kvm_apic_match_dest(vcpu, src, irq->shorthand,
- irq->dest_id, irq->dest_mode))
- continue;
-
- if (!kvm_is_dm_lowest_prio(irq)) {
- if (r < 0)
- r = 0;
- r += kvm_apic_set_irq(vcpu, irq, dest_map);
- } else if (kvm_lapic_enabled(vcpu)) {
- if (!lowest)
- lowest = vcpu;
- else if (kvm_apic_compare_prio(vcpu, lowest) < 0)
- lowest = vcpu;
- }
- }
-
- if (lowest)
- r = kvm_apic_set_irq(lowest, irq, dest_map);
-
- return r;
-}
-
-static inline void kvm_set_msi_irq(struct kvm_kernel_irq_routing_entry *e,
- struct kvm_lapic_irq *irq)
-{
- trace_kvm_msi_set_irq(e->msi.address_lo, e->msi.data);
-
- irq->dest_id = (e->msi.address_lo &
- MSI_ADDR_DEST_ID_MASK) >> MSI_ADDR_DEST_ID_SHIFT;
- irq->vector = (e->msi.data &
- MSI_DATA_VECTOR_MASK) >> MSI_DATA_VECTOR_SHIFT;
- irq->dest_mode = (1 << MSI_ADDR_DEST_MODE_SHIFT) & e->msi.address_lo;
- irq->trig_mode = (1 << MSI_DATA_TRIGGER_SHIFT) & e->msi.data;
- irq->delivery_mode = e->msi.data & 0x700;
- irq->level = 1;
- irq->shorthand = 0;
- /* TODO Deal with RH bit of MSI message address */
-}
-
-int kvm_set_msi(struct kvm_kernel_irq_routing_entry *e,
- struct kvm *kvm, int irq_source_id, int level, bool line_status)
-{
- struct kvm_lapic_irq irq;
-
- if (!level)
- return -1;
-
- kvm_set_msi_irq(e, &irq);
-
- return kvm_irq_delivery_to_apic(kvm, NULL, &irq, NULL);
-}
-
-
-static int kvm_set_msi_inatomic(struct kvm_kernel_irq_routing_entry *e,
- struct kvm *kvm)
-{
- struct kvm_lapic_irq irq;
- int r;
-
- kvm_set_msi_irq(e, &irq);
-
- if (kvm_irq_delivery_to_apic_fast(kvm, NULL, &irq, &r, NULL))
- return r;
- else
- return -EWOULDBLOCK;
-}
-
-/*
- * Deliver an IRQ in an atomic context if we can, or return a failure,
- * user can retry in a process context.
- * Return value:
- * -EWOULDBLOCK - Can't deliver in atomic context: retry in a process context.
- * Other values - No need to retry.
- */
-int kvm_set_irq_inatomic(struct kvm *kvm, int irq_source_id, u32 irq, int level)
-{
- struct kvm_kernel_irq_routing_entry entries[KVM_NR_IRQCHIPS];
- struct kvm_kernel_irq_routing_entry *e;
- int ret = -EINVAL;
- int idx;
-
- trace_kvm_set_irq(irq, level, irq_source_id);
-
- /*
- * Injection into either PIC or IOAPIC might need to scan all CPUs,
- * which would need to be retried from thread context; when same GSI
- * is connected to both PIC and IOAPIC, we'd have to report a
- * partial failure here.
- * Since there's no easy way to do this, we only support injecting MSI
- * which is limited to 1:1 GSI mapping.
- */
- idx = srcu_read_lock(&kvm->irq_srcu);
- if (kvm_irq_map_gsi(kvm, entries, irq) > 0) {
- e = &entries[0];
- if (likely(e->type == KVM_IRQ_ROUTING_MSI))
- ret = kvm_set_msi_inatomic(e, kvm);
- else
- ret = -EWOULDBLOCK;
- }
- srcu_read_unlock(&kvm->irq_srcu, idx);
- return ret;
-}
-
-int kvm_request_irq_source_id(struct kvm *kvm)
-{
- unsigned long *bitmap = &kvm->arch.irq_sources_bitmap;
- int irq_source_id;
-
- mutex_lock(&kvm->irq_lock);
- irq_source_id = find_first_zero_bit(bitmap, BITS_PER_LONG);
-
- if (irq_source_id >= BITS_PER_LONG) {
- printk(KERN_WARNING "kvm: exhaust allocatable IRQ sources!\n");
- irq_source_id = -EFAULT;
- goto unlock;
- }
-
- ASSERT(irq_source_id != KVM_USERSPACE_IRQ_SOURCE_ID);
-#ifdef CONFIG_X86
- ASSERT(irq_source_id != KVM_IRQFD_RESAMPLE_IRQ_SOURCE_ID);
-#endif
- set_bit(irq_source_id, bitmap);
-unlock:
- mutex_unlock(&kvm->irq_lock);
-
- return irq_source_id;
-}
-
-void kvm_free_irq_source_id(struct kvm *kvm, int irq_source_id)
-{
- ASSERT(irq_source_id != KVM_USERSPACE_IRQ_SOURCE_ID);
-#ifdef CONFIG_X86
- ASSERT(irq_source_id != KVM_IRQFD_RESAMPLE_IRQ_SOURCE_ID);
-#endif
-
- mutex_lock(&kvm->irq_lock);
- if (irq_source_id < 0 ||
- irq_source_id >= BITS_PER_LONG) {
- printk(KERN_ERR "kvm: IRQ source ID out of range!\n");
- goto unlock;
- }
- clear_bit(irq_source_id, &kvm->arch.irq_sources_bitmap);
- if (!irqchip_in_kernel(kvm))
- goto unlock;
-
- kvm_ioapic_clear_all(kvm->arch.vioapic, irq_source_id);
-#ifdef CONFIG_X86
- kvm_pic_clear_all(pic_irqchip(kvm), irq_source_id);
-#endif
-unlock:
- mutex_unlock(&kvm->irq_lock);
-}
-
-void kvm_register_irq_mask_notifier(struct kvm *kvm, int irq,
- struct kvm_irq_mask_notifier *kimn)
-{
- mutex_lock(&kvm->irq_lock);
- kimn->irq = irq;
- hlist_add_head_rcu(&kimn->link, &kvm->mask_notifier_list);
- mutex_unlock(&kvm->irq_lock);
-}
-
-void kvm_unregister_irq_mask_notifier(struct kvm *kvm, int irq,
- struct kvm_irq_mask_notifier *kimn)
-{
- mutex_lock(&kvm->irq_lock);
- hlist_del_rcu(&kimn->link);
- mutex_unlock(&kvm->irq_lock);
- synchronize_srcu(&kvm->irq_srcu);
-}
-
-void kvm_fire_mask_notifiers(struct kvm *kvm, unsigned irqchip, unsigned pin,
- bool mask)
-{
- struct kvm_irq_mask_notifier *kimn;
- int idx, gsi;
-
- idx = srcu_read_lock(&kvm->irq_srcu);
- gsi = kvm_irq_map_chip_pin(kvm, irqchip, pin);
- if (gsi != -1)
- hlist_for_each_entry_rcu(kimn, &kvm->mask_notifier_list, link)
- if (kimn->irq == gsi)
- kimn->func(kimn, mask);
- srcu_read_unlock(&kvm->irq_srcu, idx);
-}
-
-int kvm_set_routing_entry(struct kvm_kernel_irq_routing_entry *e,
- const struct kvm_irq_routing_entry *ue)
-{
- int r = -EINVAL;
- int delta;
- unsigned max_pin;
-
- switch (ue->type) {
- case KVM_IRQ_ROUTING_IRQCHIP:
- delta = 0;
- switch (ue->u.irqchip.irqchip) {
- case KVM_IRQCHIP_PIC_MASTER:
- e->set = kvm_set_pic_irq;
- max_pin = PIC_NUM_PINS;
- break;
- case KVM_IRQCHIP_PIC_SLAVE:
- e->set = kvm_set_pic_irq;
- max_pin = PIC_NUM_PINS;
- delta = 8;
- break;
- case KVM_IRQCHIP_IOAPIC:
- max_pin = KVM_IOAPIC_NUM_PINS;
- e->set = kvm_set_ioapic_irq;
- break;
- default:
- goto out;
- }
- e->irqchip.irqchip = ue->u.irqchip.irqchip;
- e->irqchip.pin = ue->u.irqchip.pin + delta;
- if (e->irqchip.pin >= max_pin)
- goto out;
- break;
- case KVM_IRQ_ROUTING_MSI:
- e->set = kvm_set_msi;
- e->msi.address_lo = ue->u.msi.address_lo;
- e->msi.address_hi = ue->u.msi.address_hi;
- e->msi.data = ue->u.msi.data;
- break;
- default:
- goto out;
- }
-
- r = 0;
-out:
- return r;
-}
-
-#define IOAPIC_ROUTING_ENTRY(irq) \
- { .gsi = irq, .type = KVM_IRQ_ROUTING_IRQCHIP, \
- .u.irqchip = { .irqchip = KVM_IRQCHIP_IOAPIC, .pin = (irq) } }
-#define ROUTING_ENTRY1(irq) IOAPIC_ROUTING_ENTRY(irq)
-
-#ifdef CONFIG_X86
-# define PIC_ROUTING_ENTRY(irq) \
- { .gsi = irq, .type = KVM_IRQ_ROUTING_IRQCHIP, \
- .u.irqchip = { .irqchip = SELECT_PIC(irq), .pin = (irq) % 8 } }
-# define ROUTING_ENTRY2(irq) \
- IOAPIC_ROUTING_ENTRY(irq), PIC_ROUTING_ENTRY(irq)
-#else
-# define ROUTING_ENTRY2(irq) \
- IOAPIC_ROUTING_ENTRY(irq)
-#endif
-
-static const struct kvm_irq_routing_entry default_routing[] = {
- ROUTING_ENTRY2(0), ROUTING_ENTRY2(1),
- ROUTING_ENTRY2(2), ROUTING_ENTRY2(3),
- ROUTING_ENTRY2(4), ROUTING_ENTRY2(5),
- ROUTING_ENTRY2(6), ROUTING_ENTRY2(7),
- ROUTING_ENTRY2(8), ROUTING_ENTRY2(9),
- ROUTING_ENTRY2(10), ROUTING_ENTRY2(11),
- ROUTING_ENTRY2(12), ROUTING_ENTRY2(13),
- ROUTING_ENTRY2(14), ROUTING_ENTRY2(15),
- ROUTING_ENTRY1(16), ROUTING_ENTRY1(17),
- ROUTING_ENTRY1(18), ROUTING_ENTRY1(19),
- ROUTING_ENTRY1(20), ROUTING_ENTRY1(21),
- ROUTING_ENTRY1(22), ROUTING_ENTRY1(23),
-#ifdef CONFIG_IA64
- ROUTING_ENTRY1(24), ROUTING_ENTRY1(25),
- ROUTING_ENTRY1(26), ROUTING_ENTRY1(27),
- ROUTING_ENTRY1(28), ROUTING_ENTRY1(29),
- ROUTING_ENTRY1(30), ROUTING_ENTRY1(31),
- ROUTING_ENTRY1(32), ROUTING_ENTRY1(33),
- ROUTING_ENTRY1(34), ROUTING_ENTRY1(35),
- ROUTING_ENTRY1(36), ROUTING_ENTRY1(37),
- ROUTING_ENTRY1(38), ROUTING_ENTRY1(39),
- ROUTING_ENTRY1(40), ROUTING_ENTRY1(41),
- ROUTING_ENTRY1(42), ROUTING_ENTRY1(43),
- ROUTING_ENTRY1(44), ROUTING_ENTRY1(45),
- ROUTING_ENTRY1(46), ROUTING_ENTRY1(47),
-#endif
-};
-
-int kvm_setup_default_irq_routing(struct kvm *kvm)
-{
- return kvm_set_irq_routing(kvm, default_routing,
- ARRAY_SIZE(default_routing), 0);
-}
diff --git a/virt/kvm/kvm_main.c b/virt/kvm/kvm_main.c
index 3cee7b167052..f5283438ee05 100644
--- a/virt/kvm/kvm_main.c
+++ b/virt/kvm/kvm_main.c
@@ -124,15 +124,6 @@ int vcpu_load(struct kvm_vcpu *vcpu)
if (mutex_lock_killable(&vcpu->mutex))
return -EINTR;
- if (unlikely(vcpu->pid != current->pids[PIDTYPE_PID].pid)) {
- /* The thread running this VCPU changed. */
- struct pid *oldpid = vcpu->pid;
- struct pid *newpid = get_task_pid(current, PIDTYPE_PID);
- rcu_assign_pointer(vcpu->pid, newpid);
- if (oldpid)
- synchronize_rcu();
- put_pid(oldpid);
- }
cpu = get_cpu();
preempt_notifier_register(&vcpu->preempt_notifier);
kvm_arch_vcpu_load(vcpu, cpu);
@@ -468,9 +459,6 @@ static struct kvm *kvm_create_vm(unsigned long type)
if (r)
goto out_err_no_disable;
-#ifdef CONFIG_HAVE_KVM_IRQCHIP
- INIT_HLIST_HEAD(&kvm->mask_notifier_list);
-#endif
#ifdef CONFIG_HAVE_KVM_IRQFD
INIT_HLIST_HEAD(&kvm->irq_ack_notifier_list);
#endif
@@ -668,48 +656,46 @@ static int kvm_create_dirty_bitmap(struct kvm_memory_slot *memslot)
return 0;
}
-static int cmp_memslot(const void *slot1, const void *slot2)
-{
- struct kvm_memory_slot *s1, *s2;
-
- s1 = (struct kvm_memory_slot *)slot1;
- s2 = (struct kvm_memory_slot *)slot2;
-
- if (s1->npages < s2->npages)
- return 1;
- if (s1->npages > s2->npages)
- return -1;
-
- return 0;
-}
-
/*
- * Sort the memslots base on its size, so the larger slots
- * will get better fit.
+ * Insert memslot and re-sort memslots based on their GFN,
+ * so binary search could be used to lookup GFN.
+ * Sorting algorithm takes advantage of having initially
+ * sorted array and known changed memslot position.
*/
-static void sort_memslots(struct kvm_memslots *slots)
-{
- int i;
-
- sort(slots->memslots, KVM_MEM_SLOTS_NUM,
- sizeof(struct kvm_memory_slot), cmp_memslot, NULL);
-
- for (i = 0; i < KVM_MEM_SLOTS_NUM; i++)
- slots->id_to_index[slots->memslots[i].id] = i;
-}
-
static void update_memslots(struct kvm_memslots *slots,
struct kvm_memory_slot *new)
{
- if (new) {
- int id = new->id;
- struct kvm_memory_slot *old = id_to_memslot(slots, id);
- unsigned long npages = old->npages;
+ int id = new->id;
+ int i = slots->id_to_index[id];
+ struct kvm_memory_slot *mslots = slots->memslots;
- *old = *new;
- if (new->npages != npages)
- sort_memslots(slots);
+ WARN_ON(mslots[i].id != id);
+ if (!new->npages) {
+ new->base_gfn = 0;
+ if (mslots[i].npages)
+ slots->used_slots--;
+ } else {
+ if (!mslots[i].npages)
+ slots->used_slots++;
}
+
+ while (i < KVM_MEM_SLOTS_NUM - 1 &&
+ new->base_gfn <= mslots[i + 1].base_gfn) {
+ if (!mslots[i + 1].npages)
+ break;
+ mslots[i] = mslots[i + 1];
+ slots->id_to_index[mslots[i].id] = i;
+ i++;
+ }
+ while (i > 0 &&
+ new->base_gfn > mslots[i - 1].base_gfn) {
+ mslots[i] = mslots[i - 1];
+ slots->id_to_index[mslots[i].id] = i;
+ i--;
+ }
+
+ mslots[i] = *new;
+ slots->id_to_index[mslots[i].id] = i;
}
static int check_memory_region_flags(struct kvm_userspace_memory_region *mem)
@@ -727,7 +713,7 @@ static int check_memory_region_flags(struct kvm_userspace_memory_region *mem)
}
static struct kvm_memslots *install_new_memslots(struct kvm *kvm,
- struct kvm_memslots *slots, struct kvm_memory_slot *new)
+ struct kvm_memslots *slots)
{
struct kvm_memslots *old_memslots = kvm->memslots;
@@ -738,7 +724,6 @@ static struct kvm_memslots *install_new_memslots(struct kvm *kvm,
WARN_ON(old_memslots->generation & 1);
slots->generation = old_memslots->generation + 1;
- update_memslots(slots, new);
rcu_assign_pointer(kvm->memslots, slots);
synchronize_srcu_expedited(&kvm->srcu);
@@ -760,7 +745,7 @@ static struct kvm_memslots *install_new_memslots(struct kvm *kvm,
*
* Discontiguous memory is allowed, mostly for framebuffers.
*
- * Must be called holding mmap_sem for write.
+ * Must be called holding kvm->slots_lock for write.
*/
int __kvm_set_memory_region(struct kvm *kvm,
struct kvm_userspace_memory_region *mem)
@@ -866,15 +851,16 @@ int __kvm_set_memory_region(struct kvm *kvm,
goto out_free;
}
+ slots = kmemdup(kvm->memslots, sizeof(struct kvm_memslots),
+ GFP_KERNEL);
+ if (!slots)
+ goto out_free;
+
if ((change == KVM_MR_DELETE) || (change == KVM_MR_MOVE)) {
- slots = kmemdup(kvm->memslots, sizeof(struct kvm_memslots),
- GFP_KERNEL);
- if (!slots)
- goto out_free;
slot = id_to_memslot(slots, mem->slot);
slot->flags |= KVM_MEMSLOT_INVALID;
- old_memslots = install_new_memslots(kvm, slots, NULL);
+ old_memslots = install_new_memslots(kvm, slots);
/* slot was deleted or moved, clear iommu mapping */
kvm_iommu_unmap_pages(kvm, &old);
@@ -886,6 +872,12 @@ int __kvm_set_memory_region(struct kvm *kvm,
* - kvm_is_visible_gfn (mmu_check_roots)
*/
kvm_arch_flush_shadow_memslot(kvm, slot);
+
+ /*
+ * We can re-use the old_memslots from above, the only difference
+ * from the currently installed memslots is the invalid flag. This
+ * will get overwritten by update_memslots anyway.
+ */
slots = old_memslots;
}
@@ -893,26 +885,14 @@ int __kvm_set_memory_region(struct kvm *kvm,
if (r)
goto out_slots;
- r = -ENOMEM;
- /*
- * We can re-use the old_memslots from above, the only difference
- * from the currently installed memslots is the invalid flag. This
- * will get overwritten by update_memslots anyway.
- */
- if (!slots) {
- slots = kmemdup(kvm->memslots, sizeof(struct kvm_memslots),
- GFP_KERNEL);
- if (!slots)
- goto out_free;
- }
-
/* actual memory is freed via old in kvm_free_physmem_slot below */
if (change == KVM_MR_DELETE) {
new.dirty_bitmap = NULL;
memset(&new.arch, 0, sizeof(new.arch));
}
- old_memslots = install_new_memslots(kvm, slots, &new);
+ update_memslots(slots, &new);
+ old_memslots = install_new_memslots(kvm, slots);
kvm_arch_commit_memory_region(kvm, mem, &old, change);
@@ -1799,10 +1779,6 @@ int kvm_vcpu_yield_to(struct kvm_vcpu *target)
rcu_read_unlock();
if (!task)
return ret;
- if (task->flags & PF_VCPU) {
- put_task_struct(task);
- return ret;
- }
ret = yield_to(task, 1);
put_task_struct(task);
@@ -2065,6 +2041,15 @@ static long kvm_vcpu_ioctl(struct file *filp,
r = -EINVAL;
if (arg)
goto out;
+ if (unlikely(vcpu->pid != current->pids[PIDTYPE_PID].pid)) {
+ /* The thread running this VCPU changed. */
+ struct pid *oldpid = vcpu->pid;
+ struct pid *newpid = get_task_pid(current, PIDTYPE_PID);
+ rcu_assign_pointer(vcpu->pid, newpid);
+ if (oldpid)
+ synchronize_rcu();
+ put_pid(oldpid);
+ }
r = kvm_arch_vcpu_ioctl_run(vcpu, vcpu->run);
trace_kvm_userspace_exit(vcpu->run->exit_reason, r);
break;
@@ -2599,8 +2584,6 @@ static long kvm_vm_ioctl(struct file *filp,
break;
default:
r = kvm_arch_vm_ioctl(filp, ioctl, arg);
- if (r == -ENOTTY)
- r = kvm_vm_ioctl_assigned_device(kvm, ioctl, arg);
}
out:
return r;