diff options
author | Jason Gunthorpe <jgg@nvidia.com> | 2021-03-30 09:53:05 -0600 |
---|---|---|
committer | Alex Williamson <alex.williamson@redhat.com> | 2021-04-06 11:55:10 -0600 |
commit | 0bfc6a4ea63c2adac71a824397ef48f28dbc5e47 (patch) | |
tree | 2bc5ba0a34d54f7dece5067b4fa435588507d3e2 /drivers/vfio | |
parent | 5e42c999445bd0ae86e35affeb3e7c473d74a893 (diff) |
vfio: Split creation of a vfio_device into init and register ops
This makes the struct vfio_device part of the public interface so it
can be used with container_of and so forth, as is typical for a Linux
subystem.
This is the first step to bring some type-safety to the vfio interface by
allowing the replacement of 'void *' and 'struct device *' inputs with a
simple and clear 'struct vfio_device *'
For now the self-allocating vfio_add_group_dev() interface is kept so each
user can be updated as a separate patch.
The expected usage pattern is
driver core probe() function:
my_device = kzalloc(sizeof(*mydevice));
vfio_init_group_dev(&my_device->vdev, dev, ops, mydevice);
/* other driver specific prep */
vfio_register_group_dev(&my_device->vdev);
dev_set_drvdata(dev, my_device);
driver core remove() function:
my_device = dev_get_drvdata(dev);
vfio_unregister_group_dev(&my_device->vdev);
/* other driver specific tear down */
kfree(my_device);
Allowing the driver to be able to use the drvdata and vfio_device to go
to/from its own data.
The pattern also makes it clear that vfio_register_group_dev() must be
last in the sequence, as once it is called the core code can immediately
start calling ops. The init/register gap is provided to allow for the
driver to do setup before ops can be called and thus avoid races.
Reviewed-by: Christoph Hellwig <hch@lst.de>
Reviewed-by: Liu Yi L <yi.l.liu@intel.com>
Reviewed-by: Cornelia Huck <cohuck@redhat.com>
Reviewed-by: Max Gurtovoy <mgurtovoy@nvidia.com>
Reviewed-by: Kevin Tian <kevin.tian@intel.com>
Reviewed-by: Eric Auger <eric.auger@redhat.com>
Signed-off-by: Jason Gunthorpe <jgg@nvidia.com>
Message-Id: <3-v3-225de1400dfc+4e074-vfio1_jgg@nvidia.com>
Signed-off-by: Alex Williamson <alex.williamson@redhat.com>
Diffstat (limited to 'drivers/vfio')
-rw-r--r-- | drivers/vfio/vfio.c | 125 |
1 files changed, 65 insertions, 60 deletions
diff --git a/drivers/vfio/vfio.c b/drivers/vfio/vfio.c index 32660e8a69ae..2ea430de505b 100644 --- a/drivers/vfio/vfio.c +++ b/drivers/vfio/vfio.c @@ -89,16 +89,6 @@ struct vfio_group { struct blocking_notifier_head notifier; }; -struct vfio_device { - refcount_t refcount; - struct completion comp; - struct device *dev; - const struct vfio_device_ops *ops; - struct vfio_group *group; - struct list_head group_next; - void *device_data; -}; - #ifdef CONFIG_VFIO_NOIOMMU static bool noiommu __read_mostly; module_param_named(enable_unsafe_noiommu_mode, @@ -532,35 +522,6 @@ static struct vfio_group *vfio_group_get_from_dev(struct device *dev) /** * Device objects - create, release, get, put, search */ -static -struct vfio_device *vfio_group_create_device(struct vfio_group *group, - struct device *dev, - const struct vfio_device_ops *ops, - void *device_data) -{ - struct vfio_device *device; - - device = kzalloc(sizeof(*device), GFP_KERNEL); - if (!device) - return ERR_PTR(-ENOMEM); - - refcount_set(&device->refcount, 1); - init_completion(&device->comp); - device->dev = dev; - /* Our reference on group is moved to the device */ - device->group = group; - device->ops = ops; - device->device_data = device_data; - dev_set_drvdata(dev, device); - - mutex_lock(&group->device_lock); - list_add(&device->group_next, &group->device_list); - group->dev_counter++; - mutex_unlock(&group->device_lock); - - return device; -} - /* Device reference always implies a group reference */ void vfio_device_put(struct vfio_device *device) { @@ -779,14 +740,23 @@ static int vfio_iommu_group_notifier(struct notifier_block *nb, /** * VFIO driver API */ -int vfio_add_group_dev(struct device *dev, - const struct vfio_device_ops *ops, void *device_data) +void vfio_init_group_dev(struct vfio_device *device, struct device *dev, + const struct vfio_device_ops *ops, void *device_data) +{ + init_completion(&device->comp); + device->dev = dev; + device->ops = ops; + device->device_data = device_data; +} +EXPORT_SYMBOL_GPL(vfio_init_group_dev); + +int vfio_register_group_dev(struct vfio_device *device) { + struct vfio_device *existing_device; struct iommu_group *iommu_group; struct vfio_group *group; - struct vfio_device *device; - iommu_group = iommu_group_get(dev); + iommu_group = iommu_group_get(device->dev); if (!iommu_group) return -EINVAL; @@ -805,21 +775,50 @@ int vfio_add_group_dev(struct device *dev, iommu_group_put(iommu_group); } - device = vfio_group_get_device(group, dev); - if (device) { - dev_WARN(dev, "Device already exists on group %d\n", + existing_device = vfio_group_get_device(group, device->dev); + if (existing_device) { + dev_WARN(device->dev, "Device already exists on group %d\n", iommu_group_id(iommu_group)); - vfio_device_put(device); + vfio_device_put(existing_device); vfio_group_put(group); return -EBUSY; } - device = vfio_group_create_device(group, dev, ops, device_data); - if (IS_ERR(device)) { - vfio_group_put(group); - return PTR_ERR(device); - } + /* Our reference on group is moved to the device */ + device->group = group; + + /* Refcounting can't start until the driver calls register */ + refcount_set(&device->refcount, 1); + + mutex_lock(&group->device_lock); + list_add(&device->group_next, &group->device_list); + group->dev_counter++; + mutex_unlock(&group->device_lock); + + return 0; +} +EXPORT_SYMBOL_GPL(vfio_register_group_dev); + +int vfio_add_group_dev(struct device *dev, const struct vfio_device_ops *ops, + void *device_data) +{ + struct vfio_device *device; + int ret; + + device = kzalloc(sizeof(*device), GFP_KERNEL); + if (!device) + return -ENOMEM; + + vfio_init_group_dev(device, dev, ops, device_data); + ret = vfio_register_group_dev(device); + if (ret) + goto err_kfree; + dev_set_drvdata(dev, device); return 0; + +err_kfree: + kfree(device); + return ret; } EXPORT_SYMBOL_GPL(vfio_add_group_dev); @@ -887,11 +886,9 @@ EXPORT_SYMBOL_GPL(vfio_device_data); /* * Decrement the device reference count and wait for the device to be * removed. Open file descriptors for the device... */ -void *vfio_del_group_dev(struct device *dev) +void vfio_unregister_group_dev(struct vfio_device *device) { - struct vfio_device *device = dev_get_drvdata(dev); struct vfio_group *group = device->group; - void *device_data = device->device_data; struct vfio_unbound_dev *unbound; unsigned int i = 0; bool interrupted = false; @@ -908,7 +905,7 @@ void *vfio_del_group_dev(struct device *dev) */ unbound = kzalloc(sizeof(*unbound), GFP_KERNEL); if (unbound) { - unbound->dev = dev; + unbound->dev = device->dev; mutex_lock(&group->unbound_lock); list_add(&unbound->unbound_next, &group->unbound_list); mutex_unlock(&group->unbound_lock); @@ -919,7 +916,7 @@ void *vfio_del_group_dev(struct device *dev) rc = try_wait_for_completion(&device->comp); while (rc <= 0) { if (device->ops->request) - device->ops->request(device_data, i++); + device->ops->request(device->device_data, i++); if (interrupted) { rc = wait_for_completion_timeout(&device->comp, @@ -929,7 +926,7 @@ void *vfio_del_group_dev(struct device *dev) &device->comp, HZ * 10); if (rc < 0) { interrupted = true; - dev_warn(dev, + dev_warn(device->dev, "Device is currently in use, task" " \"%s\" (%d) " "blocked until device is released", @@ -960,11 +957,19 @@ void *vfio_del_group_dev(struct device *dev) if (list_empty(&group->device_list)) wait_event(group->container_q, !group->container); - /* Matches the get in vfio_group_create_device() */ + /* Matches the get in vfio_register_group_dev() */ vfio_group_put(group); +} +EXPORT_SYMBOL_GPL(vfio_unregister_group_dev); + +void *vfio_del_group_dev(struct device *dev) +{ + struct vfio_device *device = dev_get_drvdata(dev); + void *device_data = device->device_data; + + vfio_unregister_group_dev(device); dev_set_drvdata(dev, NULL); kfree(device); - return device_data; } EXPORT_SYMBOL_GPL(vfio_del_group_dev); |