summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorMark Bloch <markb@mellanox.com>2019-03-28 15:27:35 +0200
committerJason Gunthorpe <jgg@mellanox.com>2019-04-10 15:05:39 -0300
commitda796ccb3e0eba24b15beedb168178c9b74ce6f2 (patch)
tree318a90b9ab3e06d417dbc9620afdfa238d560bab
parent4a6dc8552ab2f670fdff317a5ac1bc42f85a8772 (diff)
RDMA/mlx5: Move ports allocation to outside of INIT stage
In downstream patches we will need access to the ports before doing any stages, in order to set net device per representor. Signed-off-by: Mark Bloch <markb@mellanox.com> Signed-off-by: Leon Romanovsky <leonro@mellanox.com> Signed-off-by: Jason Gunthorpe <jgg@mellanox.com>
-rw-r--r--drivers/infiniband/hw/mlx5/ib_rep.c12
-rw-r--r--drivers/infiniband/hw/mlx5/main.c24
2 files changed, 22 insertions, 14 deletions
diff --git a/drivers/infiniband/hw/mlx5/ib_rep.c b/drivers/infiniband/hw/mlx5/ib_rep.c
index 87d553396fb4..14ac728b460c 100644
--- a/drivers/infiniband/hw/mlx5/ib_rep.c
+++ b/drivers/infiniband/hw/mlx5/ib_rep.c
@@ -51,6 +51,7 @@ mlx5_ib_vport_rep_load(struct mlx5_core_dev *dev, struct mlx5_eswitch_rep *rep)
{
const struct mlx5_ib_profile *profile;
struct mlx5_ib_dev *ibdev;
+ int num_ports = 1;
if (rep->vport == MLX5_VPORT_UPLINK)
profile = &uplink_rep_profile;
@@ -61,10 +62,17 @@ mlx5_ib_vport_rep_load(struct mlx5_core_dev *dev, struct mlx5_eswitch_rep *rep)
if (!ibdev)
return -ENOMEM;
+ ibdev->port = kcalloc(num_ports, sizeof(*ibdev->port),
+ GFP_KERNEL);
+ if (!ibdev->port) {
+ ib_dealloc_device(&ibdev->ib_dev);
+ return -ENOMEM;
+ }
+
ibdev->rep = rep;
ibdev->mdev = dev;
- ibdev->num_ports = max(MLX5_CAP_GEN(dev, num_ports),
- MLX5_CAP_GEN(dev, num_vhca_ports));
+ ibdev->num_ports = num_ports;
+
if (!__mlx5_ib_add(ibdev, profile))
return -EINVAL;
diff --git a/drivers/infiniband/hw/mlx5/main.c b/drivers/infiniband/hw/mlx5/main.c
index 23f31069ec0a..0d86b5266960 100644
--- a/drivers/infiniband/hw/mlx5/main.c
+++ b/drivers/infiniband/hw/mlx5/main.c
@@ -5844,7 +5844,6 @@ void mlx5_ib_stage_init_cleanup(struct mlx5_ib_dev *dev)
srcu_barrier(&dev->mr_srcu);
cleanup_srcu_struct(&dev->mr_srcu);
}
- kfree(dev->port);
}
int mlx5_ib_stage_init_init(struct mlx5_ib_dev *dev)
@@ -5853,11 +5852,6 @@ int mlx5_ib_stage_init_init(struct mlx5_ib_dev *dev)
int err;
int i;
- dev->port = kcalloc(dev->num_ports, sizeof(*dev->port),
- GFP_KERNEL);
- if (!dev->port)
- return -ENOMEM;
-
for (i = 0; i < dev->num_ports; i++) {
spin_lock_init(&dev->port[i].mp.mpi_lock);
rwlock_init(&dev->port[i].roce.netdev_lock);
@@ -5865,7 +5859,7 @@ int mlx5_ib_stage_init_init(struct mlx5_ib_dev *dev)
err = mlx5_ib_init_multiport_master(dev);
if (err)
- goto err_free_port;
+ return err;
if (!mlx5_core_mp_enabled(mdev)) {
for (i = 1; i <= dev->num_ports; i++) {
@@ -5906,9 +5900,6 @@ int mlx5_ib_stage_init_init(struct mlx5_ib_dev *dev)
err_mp:
mlx5_ib_cleanup_multiport_master(dev);
-err_free_port:
- kfree(dev->port);
-
return -ENOMEM;
}
@@ -6418,6 +6409,7 @@ void __mlx5_ib_remove(struct mlx5_ib_dev *dev,
profile->stage[stage].cleanup(dev);
}
+ kfree(dev->port);
ib_dealloc_device(&dev->ib_dev);
}
@@ -6593,6 +6585,7 @@ static void *mlx5_ib_add(struct mlx5_core_dev *mdev)
enum rdma_link_layer ll;
struct mlx5_ib_dev *dev;
int port_type_cap;
+ int num_ports;
printk_once(KERN_INFO "%s", mlx5_version);
@@ -6608,13 +6601,20 @@ static void *mlx5_ib_add(struct mlx5_core_dev *mdev)
if (mlx5_core_is_mp_slave(mdev) && ll == IB_LINK_LAYER_ETHERNET)
return mlx5_ib_add_slave_port(mdev);
+ num_ports = max(MLX5_CAP_GEN(mdev, num_ports),
+ MLX5_CAP_GEN(mdev, num_vhca_ports));
dev = ib_alloc_device(mlx5_ib_dev, ib_dev);
if (!dev)
return NULL;
+ dev->port = kcalloc(num_ports, sizeof(*dev->port),
+ GFP_KERNEL);
+ if (!dev->port) {
+ ib_dealloc_device((struct ib_device *)dev);
+ return NULL;
+ }
dev->mdev = mdev;
- dev->num_ports = max(MLX5_CAP_GEN(mdev, num_ports),
- MLX5_CAP_GEN(mdev, num_vhca_ports));
+ dev->num_ports = num_ports;
return __mlx5_ib_add(dev, &pf_profile);
}