From 5ab2d89b85a6c5c2d7604c9674f77a68f3d24f91 Mon Sep 17 00:00:00 2001 From: Leon Romanovsky Date: Thu, 17 Aug 2017 15:50:32 +0300 Subject: IB/cma: Fix erroneous validation of supported default GID type When rdma_cm is initializing a cma_device it checks if this device supports the preferred default GID type. This check was done in a wrong way and therefore sometimes rdma_cm is coming up with default GID type that is not supported by the device. Fix that by checking for supported GID type properly. Fixes: 3c7f67d1880d ("IB/cma: Fix default RoCE type setting") Signed-off-by: Moni Shoua Signed-off-by: Leon Romanovsky Reviewed-by: Selvin Xavier Signed-off-by: Doug Ledford --- drivers/infiniband/core/cma.c | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) (limited to 'drivers/infiniband') diff --git a/drivers/infiniband/core/cma.c b/drivers/infiniband/core/cma.c index b76de2e2b209..852c8fec8088 100644 --- a/drivers/infiniband/core/cma.c +++ b/drivers/infiniband/core/cma.c @@ -72,7 +72,7 @@ MODULE_LICENSE("Dual BSD/GPL"); #define CMA_MAX_CM_RETRIES 15 #define CMA_CM_MRA_SETTING (IB_CM_MRA_FLAG_DELAY | 24) #define CMA_IBOE_PACKET_LIFETIME 18 -#define CMA_PREFERRED_ROCE_GID_TYPE (1 << IB_GID_TYPE_ROCE_UDP_ENCAP) +#define CMA_PREFERRED_ROCE_GID_TYPE IB_GID_TYPE_ROCE_UDP_ENCAP static const char * const cma_events[] = { [RDMA_CM_EVENT_ADDR_RESOLVED] = "address resolved", @@ -4282,7 +4282,7 @@ static void cma_add_one(struct ib_device *device) for (i = rdma_start_port(device); i <= rdma_end_port(device); i++) { supported_gids = roce_gid_type_mask_support(device, i); WARN_ON(!supported_gids); - if (supported_gids & CMA_PREFERRED_ROCE_GID_TYPE) + if (supported_gids & (1 << CMA_PREFERRED_ROCE_GID_TYPE)) cma_dev->default_gid_type[i - rdma_start_port(device)] = CMA_PREFERRED_ROCE_GID_TYPE; else -- cgit v1.2.3 From 89caa0538ecf2114e1badbb1e75120d25bec985e Mon Sep 17 00:00:00 2001 From: Parav Pandit Date: Thu, 17 Aug 2017 15:50:33 +0300 Subject: IB/uverbs: Introduce and use helper functions to copy ah attributes This patch introduces two helper functions to copy ah attributes from uverbs to internal ib_ah_attr structure and the other way during modify qp and query qp respectively. Signed-off-by: Parav Pandit Reviewed-by: Daniel Jurgens Signed-off-by: Leon Romanovsky Reviewed-by: Dennis Dalessandro Signed-off-by: Doug Ledford --- drivers/infiniband/core/uverbs_cmd.c | 127 ++++++++++++++--------------------- 1 file changed, 51 insertions(+), 76 deletions(-) (limited to 'drivers/infiniband') diff --git a/drivers/infiniband/core/uverbs_cmd.c b/drivers/infiniband/core/uverbs_cmd.c index 59696f568f53..8e9fea03dec4 100644 --- a/drivers/infiniband/core/uverbs_cmd.c +++ b/drivers/infiniband/core/uverbs_cmd.c @@ -1820,6 +1820,28 @@ err_put: return ret; } +static void copy_ah_attr_to_uverbs(struct ib_uverbs_qp_dest *uverb_attr, + struct rdma_ah_attr *rdma_attr) +{ + const struct ib_global_route *grh; + + uverb_attr->dlid = rdma_ah_get_dlid(rdma_attr); + uverb_attr->sl = rdma_ah_get_sl(rdma_attr); + uverb_attr->src_path_bits = rdma_ah_get_path_bits(rdma_attr); + uverb_attr->static_rate = rdma_ah_get_static_rate(rdma_attr); + uverb_attr->is_global = !!(rdma_ah_get_ah_flags(rdma_attr) & + IB_AH_GRH); + if (uverb_attr->is_global) { + grh = rdma_ah_read_grh(rdma_attr); + memcpy(uverb_attr->dgid, grh->dgid.raw, 16); + uverb_attr->flow_label = grh->flow_label; + uverb_attr->sgid_index = grh->sgid_index; + uverb_attr->hop_limit = grh->hop_limit; + uverb_attr->traffic_class = grh->traffic_class; + } + uverb_attr->port_num = rdma_ah_get_port_num(rdma_attr); +} + ssize_t ib_uverbs_query_qp(struct ib_uverbs_file *file, struct ib_device *ib_dev, const char __user *buf, int in_len, @@ -1830,7 +1852,6 @@ ssize_t ib_uverbs_query_qp(struct ib_uverbs_file *file, struct ib_qp *qp; struct ib_qp_attr *attr; struct ib_qp_init_attr *init_attr; - const struct ib_global_route *grh; int ret; if (copy_from_user(&cmd, buf, sizeof cmd)) @@ -1880,39 +1901,8 @@ ssize_t ib_uverbs_query_qp(struct ib_uverbs_file *file, resp.alt_port_num = attr->alt_port_num; resp.alt_timeout = attr->alt_timeout; - resp.dest.dlid = rdma_ah_get_dlid(&attr->ah_attr); - resp.dest.sl = rdma_ah_get_sl(&attr->ah_attr); - resp.dest.src_path_bits = rdma_ah_get_path_bits(&attr->ah_attr); - resp.dest.static_rate = rdma_ah_get_static_rate(&attr->ah_attr); - resp.dest.is_global = !!(rdma_ah_get_ah_flags(&attr->ah_attr) & - IB_AH_GRH); - if (resp.dest.is_global) { - grh = rdma_ah_read_grh(&attr->ah_attr); - memcpy(resp.dest.dgid, grh->dgid.raw, 16); - resp.dest.flow_label = grh->flow_label; - resp.dest.sgid_index = grh->sgid_index; - resp.dest.hop_limit = grh->hop_limit; - resp.dest.traffic_class = grh->traffic_class; - } - resp.dest.port_num = rdma_ah_get_port_num(&attr->ah_attr); - - resp.alt_dest.dlid = rdma_ah_get_dlid(&attr->alt_ah_attr); - resp.alt_dest.sl = rdma_ah_get_sl(&attr->alt_ah_attr); - resp.alt_dest.src_path_bits = rdma_ah_get_path_bits(&attr->alt_ah_attr); - resp.alt_dest.static_rate - = rdma_ah_get_static_rate(&attr->alt_ah_attr); - resp.alt_dest.is_global - = !!(rdma_ah_get_ah_flags(&attr->alt_ah_attr) & - IB_AH_GRH); - if (resp.alt_dest.is_global) { - grh = rdma_ah_read_grh(&attr->alt_ah_attr); - memcpy(resp.alt_dest.dgid, grh->dgid.raw, 16); - resp.alt_dest.flow_label = grh->flow_label; - resp.alt_dest.sgid_index = grh->sgid_index; - resp.alt_dest.hop_limit = grh->hop_limit; - resp.alt_dest.traffic_class = grh->traffic_class; - } - resp.alt_dest.port_num = rdma_ah_get_port_num(&attr->alt_ah_attr); + copy_ah_attr_to_uverbs(&resp.dest, &attr->ah_attr); + copy_ah_attr_to_uverbs(&resp.alt_dest, &attr->alt_ah_attr); resp.max_send_wr = init_attr->cap.max_send_wr; resp.max_recv_wr = init_attr->cap.max_recv_wr; @@ -1946,6 +1936,29 @@ static int modify_qp_mask(enum ib_qp_type qp_type, int mask) } } +static void copy_ah_attr_from_uverbs(struct ib_device *dev, + struct rdma_ah_attr *rdma_attr, + struct ib_uverbs_qp_dest *uverb_attr) +{ + rdma_attr->type = rdma_ah_find_type(dev, uverb_attr->port_num); + if (uverb_attr->is_global) { + rdma_ah_set_grh(rdma_attr, NULL, + uverb_attr->flow_label, + uverb_attr->sgid_index, + uverb_attr->hop_limit, + uverb_attr->traffic_class); + rdma_ah_set_dgid_raw(rdma_attr, uverb_attr->dgid); + } else { + rdma_ah_set_ah_flags(rdma_attr, 0); + } + rdma_ah_set_dlid(rdma_attr, uverb_attr->dlid); + rdma_ah_set_sl(rdma_attr, uverb_attr->sl); + rdma_ah_set_path_bits(rdma_attr, uverb_attr->src_path_bits); + rdma_ah_set_static_rate(rdma_attr, uverb_attr->static_rate); + rdma_ah_set_port_num(rdma_attr, uverb_attr->port_num); + rdma_ah_set_make_grd(rdma_attr, false); +} + static int modify_qp(struct ib_uverbs_file *file, struct ib_uverbs_ex_modify_qp *cmd, struct ib_udata *udata) { @@ -1993,50 +2006,12 @@ static int modify_qp(struct ib_uverbs_file *file, attr->rate_limit = cmd->rate_limit; if (cmd->base.attr_mask & IB_QP_AV) - attr->ah_attr.type = rdma_ah_find_type(qp->device, - cmd->base.dest.port_num); - if (cmd->base.dest.is_global) { - rdma_ah_set_grh(&attr->ah_attr, NULL, - cmd->base.dest.flow_label, - cmd->base.dest.sgid_index, - cmd->base.dest.hop_limit, - cmd->base.dest.traffic_class); - rdma_ah_set_dgid_raw(&attr->ah_attr, cmd->base.dest.dgid); - } else { - rdma_ah_set_ah_flags(&attr->ah_attr, 0); - } - rdma_ah_set_dlid(&attr->ah_attr, cmd->base.dest.dlid); - rdma_ah_set_sl(&attr->ah_attr, cmd->base.dest.sl); - rdma_ah_set_path_bits(&attr->ah_attr, cmd->base.dest.src_path_bits); - rdma_ah_set_static_rate(&attr->ah_attr, cmd->base.dest.static_rate); - rdma_ah_set_port_num(&attr->ah_attr, - cmd->base.dest.port_num); - rdma_ah_set_make_grd(&attr->ah_attr, false); + copy_ah_attr_from_uverbs(qp->device, &attr->ah_attr, + &cmd->base.dest); if (cmd->base.attr_mask & IB_QP_ALT_PATH) - attr->alt_ah_attr.type = - rdma_ah_find_type(qp->device, cmd->base.dest.port_num); - if (cmd->base.alt_dest.is_global) { - rdma_ah_set_grh(&attr->alt_ah_attr, NULL, - cmd->base.alt_dest.flow_label, - cmd->base.alt_dest.sgid_index, - cmd->base.alt_dest.hop_limit, - cmd->base.alt_dest.traffic_class); - rdma_ah_set_dgid_raw(&attr->alt_ah_attr, - cmd->base.alt_dest.dgid); - } else { - rdma_ah_set_ah_flags(&attr->alt_ah_attr, 0); - } - - rdma_ah_set_dlid(&attr->alt_ah_attr, cmd->base.alt_dest.dlid); - rdma_ah_set_sl(&attr->alt_ah_attr, cmd->base.alt_dest.sl); - rdma_ah_set_path_bits(&attr->alt_ah_attr, - cmd->base.alt_dest.src_path_bits); - rdma_ah_set_static_rate(&attr->alt_ah_attr, - cmd->base.alt_dest.static_rate); - rdma_ah_set_port_num(&attr->alt_ah_attr, - cmd->base.alt_dest.port_num); - rdma_ah_set_make_grd(&attr->alt_ah_attr, false); + copy_ah_attr_from_uverbs(qp->device, &attr->alt_ah_attr, + &cmd->base.alt_dest); ret = ib_modify_qp_with_udata(qp, attr, modify_qp_mask(qp->qp_type, -- cgit v1.2.3 From fba02e6cb775078bc0bdf207197e0a44b1f85aca Mon Sep 17 00:00:00 2001 From: Leon Romanovsky Date: Thu, 17 Aug 2017 15:50:34 +0300 Subject: RDMA/mlx4: Don't use uninitialized variable Avoid usage of uninitialized variable. Fixes: 3078f5f1bd8b ("IB/mlx4: Add support for RSS QP") Signed-off-by: Leon Romanovsky Signed-off-by: Doug Ledford --- drivers/infiniband/hw/mlx4/qp.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'drivers/infiniband') diff --git a/drivers/infiniband/hw/mlx4/qp.c b/drivers/infiniband/hw/mlx4/qp.c index b40d50c643e1..f7ee1792ba6d 100644 --- a/drivers/infiniband/hw/mlx4/qp.c +++ b/drivers/infiniband/hw/mlx4/qp.c @@ -2027,8 +2027,8 @@ static u8 gid_type_to_qpc(enum ib_gid_type gid_type) */ static int bringup_rss_rwqs(struct ib_rwq_ind_table *ind_tbl, u8 port_num) { + int err = 0; int i; - int err; for (i = 0; i < (1 << ind_tbl->log_ind_tbl_size); i++) { struct ib_wq *ibwq = ind_tbl->ind_tbl[i]; -- cgit v1.2.3 From dcc9881e6767559c04faf15804ac145a2ea026cb Mon Sep 17 00:00:00 2001 From: Leon Romanovsky Date: Thu, 17 Aug 2017 15:50:36 +0300 Subject: RDMA/(core, ulp): Convert register/unregister event handler to be void The functions ib_register_event_handler() and ib_unregister_event_handler() always returned success and they can't fail. Let's convert those functions to be void, remove redundant checks and cleanup tons of goto statements. Signed-off-by: Leon Romanovsky Reviewed-by: Dennis Dalessandro Signed-off-by: Doug Ledford --- drivers/infiniband/core/cache.c | 23 ++++++++--------------- drivers/infiniband/core/device.c | 8 ++------ drivers/infiniband/core/sa_query.c | 3 +-- drivers/infiniband/core/uverbs_main.c | 13 +------------ drivers/infiniband/ulp/ipoib/ipoib_main.c | 10 +--------- drivers/infiniband/ulp/iser/iser_verbs.c | 6 ++---- drivers/infiniband/ulp/opa_vnic/opa_vnic_vema.c | 7 +------ drivers/infiniband/ulp/srpt/ib_srpt.c | 5 ++--- 8 files changed, 18 insertions(+), 57 deletions(-) (limited to 'drivers/infiniband') diff --git a/drivers/infiniband/core/cache.c b/drivers/infiniband/core/cache.c index efc94304dee3..77515638c55c 100644 --- a/drivers/infiniband/core/cache.c +++ b/drivers/infiniband/core/cache.c @@ -1199,30 +1199,23 @@ int ib_cache_setup_one(struct ib_device *device) device->cache.ports = kzalloc(sizeof(*device->cache.ports) * (rdma_end_port(device) - rdma_start_port(device) + 1), GFP_KERNEL); - if (!device->cache.ports) { - err = -ENOMEM; - goto out; - } + if (!device->cache.ports) + return -ENOMEM; err = gid_table_setup_one(device); - if (err) - goto out; + if (err) { + kfree(device->cache.ports); + device->cache.ports = NULL; + return err; + } for (p = 0; p <= rdma_end_port(device) - rdma_start_port(device); ++p) ib_cache_update(device, p + rdma_start_port(device), true); INIT_IB_EVENT_HANDLER(&device->cache.event_handler, device, ib_cache_event); - err = ib_register_event_handler(&device->cache.event_handler); - if (err) - goto err; - + ib_register_event_handler(&device->cache.event_handler); return 0; - -err: - gid_table_cleanup_one(device); -out: - return err; } void ib_cache_release_one(struct ib_device *device) diff --git a/drivers/infiniband/core/device.c b/drivers/infiniband/core/device.c index fc6be1175183..ec4786777447 100644 --- a/drivers/infiniband/core/device.c +++ b/drivers/infiniband/core/device.c @@ -747,7 +747,7 @@ EXPORT_SYMBOL(ib_set_client_data); * chapter 11 of the InfiniBand Architecture Specification). This * callback may occur in interrupt context. */ -int ib_register_event_handler (struct ib_event_handler *event_handler) +void ib_register_event_handler(struct ib_event_handler *event_handler) { unsigned long flags; @@ -755,8 +755,6 @@ int ib_register_event_handler (struct ib_event_handler *event_handler) list_add_tail(&event_handler->list, &event_handler->device->event_handler_list); spin_unlock_irqrestore(&event_handler->device->event_handler_lock, flags); - - return 0; } EXPORT_SYMBOL(ib_register_event_handler); @@ -767,15 +765,13 @@ EXPORT_SYMBOL(ib_register_event_handler); * Unregister an event handler registered with * ib_register_event_handler(). */ -int ib_unregister_event_handler(struct ib_event_handler *event_handler) +void ib_unregister_event_handler(struct ib_event_handler *event_handler) { unsigned long flags; spin_lock_irqsave(&event_handler->device->event_handler_lock, flags); list_del(&event_handler->list); spin_unlock_irqrestore(&event_handler->device->event_handler_lock, flags); - - return 0; } EXPORT_SYMBOL(ib_unregister_event_handler); diff --git a/drivers/infiniband/core/sa_query.c b/drivers/infiniband/core/sa_query.c index 0179b21bad34..ab5e1024fea9 100644 --- a/drivers/infiniband/core/sa_query.c +++ b/drivers/infiniband/core/sa_query.c @@ -2417,8 +2417,7 @@ static void ib_sa_add_one(struct ib_device *device) */ INIT_IB_EVENT_HANDLER(&sa_dev->event_handler, device, ib_sa_event); - if (ib_register_event_handler(&sa_dev->event_handler)) - goto err; + ib_register_event_handler(&sa_dev->event_handler); for (i = 0; i <= e - s; ++i) { if (rdma_cap_ib_sa(device, i + 1)) diff --git a/drivers/infiniband/core/uverbs_main.c b/drivers/infiniband/core/uverbs_main.c index 5e530d2bee44..defeda33e27f 100644 --- a/drivers/infiniband/core/uverbs_main.c +++ b/drivers/infiniband/core/uverbs_main.c @@ -595,7 +595,6 @@ struct file *ib_uverbs_alloc_async_event_file(struct ib_uverbs_file *uverbs_file { struct ib_uverbs_async_event_file *ev_file; struct file *filp; - int ret; ev_file = kzalloc(sizeof(*ev_file), GFP_KERNEL); if (!ev_file) @@ -621,21 +620,11 @@ struct file *ib_uverbs_alloc_async_event_file(struct ib_uverbs_file *uverbs_file INIT_IB_EVENT_HANDLER(&uverbs_file->event_handler, ib_dev, ib_uverbs_event_handler); - ret = ib_register_event_handler(&uverbs_file->event_handler); - if (ret) - goto err_put_file; - + ib_register_event_handler(&uverbs_file->event_handler); /* At that point async file stuff was fully set */ return filp; -err_put_file: - fput(filp); - kref_put(&uverbs_file->async_file->ref, - ib_uverbs_release_async_event_file); - uverbs_file->async_file = NULL; - return ERR_PTR(ret); - err_put_refs: kref_put(&ev_file->uverbs_file->ref, ib_uverbs_release_file); kref_put(&ev_file->ref, ib_uverbs_release_async_event_file); diff --git a/drivers/infiniband/ulp/ipoib/ipoib_main.c b/drivers/infiniband/ulp/ipoib/ipoib_main.c index ee9f5d281b37..344e8d3d47bd 100644 --- a/drivers/infiniband/ulp/ipoib/ipoib_main.c +++ b/drivers/infiniband/ulp/ipoib/ipoib_main.c @@ -2227,13 +2227,7 @@ static struct net_device *ipoib_add_port(const char *format, INIT_IB_EVENT_HANDLER(&priv->event_handler, priv->ca, ipoib_event); - result = ib_register_event_handler(&priv->event_handler); - if (result < 0) { - printk(KERN_WARNING "%s: ib_register_event_handler failed for " - "port %d (ret = %d)\n", - hca->name, port, result); - goto event_failed; - } + ib_register_event_handler(&priv->event_handler); result = register_netdev(priv->dev); if (result) { @@ -2266,8 +2260,6 @@ register_failed: set_bit(IPOIB_STOP_NEIGH_GC, &priv->flags); cancel_delayed_work(&priv->neigh_reap_task); flush_workqueue(priv->wq); - -event_failed: ipoib_dev_cleanup(priv->dev); device_init_failed: diff --git a/drivers/infiniband/ulp/iser/iser_verbs.c b/drivers/infiniband/ulp/iser/iser_verbs.c index 26a004e97ae0..55a73b0ed4c6 100644 --- a/drivers/infiniband/ulp/iser/iser_verbs.c +++ b/drivers/infiniband/ulp/iser/iser_verbs.c @@ -106,9 +106,7 @@ static int iser_create_device_ib_res(struct iser_device *device) INIT_IB_EVENT_HANDLER(&device->event_handler, ib_dev, iser_event_handler); - if (ib_register_event_handler(&device->event_handler)) - goto cq_err; - + ib_register_event_handler(&device->event_handler); return 0; cq_err: @@ -141,7 +139,7 @@ static void iser_free_device_ib_res(struct iser_device *device) comp->cq = NULL; } - (void)ib_unregister_event_handler(&device->event_handler); + ib_unregister_event_handler(&device->event_handler); ib_dealloc_pd(device->pd); kfree(device->comps); diff --git a/drivers/infiniband/ulp/opa_vnic/opa_vnic_vema.c b/drivers/infiniband/ulp/opa_vnic/opa_vnic_vema.c index 57b862b94dca..21f0b481edcc 100644 --- a/drivers/infiniband/ulp/opa_vnic/opa_vnic_vema.c +++ b/drivers/infiniband/ulp/opa_vnic/opa_vnic_vema.c @@ -954,12 +954,7 @@ static int vema_register(struct opa_vnic_ctrl_port *cport) INIT_IB_EVENT_HANDLER(&port->event_handler, cport->ibdev, opa_vnic_event); - ret = ib_register_event_handler(&port->event_handler); - if (ret) { - c_err("port %d: event handler register failed\n", i); - vema_unregister(cport); - return ret; - } + ib_register_event_handler(&port->event_handler); idr_init(&port->vport_idr); mutex_init(&port->lock); diff --git a/drivers/infiniband/ulp/srpt/ib_srpt.c b/drivers/infiniband/ulp/srpt/ib_srpt.c index 402275be0931..9e8e9220f816 100644 --- a/drivers/infiniband/ulp/srpt/ib_srpt.c +++ b/drivers/infiniband/ulp/srpt/ib_srpt.c @@ -2238,7 +2238,7 @@ static int srpt_write_pending(struct se_cmd *se_cmd) cqe, first_wr); cqe = NULL; } - + ret = ib_post_send(ch->qp, first_wr, &bad_wr); if (ret) { pr_err("%s: ib_post_send() returned %d for %d (avail: %d)\n", @@ -2530,8 +2530,7 @@ static void srpt_add_one(struct ib_device *device) INIT_IB_EVENT_HANDLER(&sdev->event_handler, sdev->device, srpt_event_handler); - if (ib_register_event_handler(&sdev->event_handler)) - goto err_cm; + ib_register_event_handler(&sdev->event_handler); sdev->ioctx_ring = (struct srpt_recv_ioctx **) srpt_alloc_ioctx_ring(sdev, sdev->srq_size, -- cgit v1.2.3 From cdc596d89e39521cf412209a19b8baeb8d788cdc Mon Sep 17 00:00:00 2001 From: Leon Romanovsky Date: Thu, 17 Aug 2017 15:50:38 +0300 Subject: RDMA/core: Delete BUG() from unreachable flow Remove call to BUG() in case wrong node_type was provided. This flow is unreachable, because node_types are supplied from specific enum. Signed-off-by: Leon Romanovsky Reviewed-by: Dennis Dalessandro Signed-off-by: Doug Ledford --- drivers/infiniband/core/verbs.c | 20 +++++++------------- 1 file changed, 7 insertions(+), 13 deletions(-) (limited to 'drivers/infiniband') diff --git a/drivers/infiniband/core/verbs.c b/drivers/infiniband/core/verbs.c index 6a7cbc1540aa..4c1485e01c83 100644 --- a/drivers/infiniband/core/verbs.c +++ b/drivers/infiniband/core/verbs.c @@ -180,21 +180,15 @@ EXPORT_SYMBOL(ib_rate_to_mbps); __attribute_const__ enum rdma_transport_type rdma_node_get_transport(enum rdma_node_type node_type) { - switch (node_type) { - case RDMA_NODE_IB_CA: - case RDMA_NODE_IB_SWITCH: - case RDMA_NODE_IB_ROUTER: - return RDMA_TRANSPORT_IB; - case RDMA_NODE_RNIC: - return RDMA_TRANSPORT_IWARP; - case RDMA_NODE_USNIC: + + if (node_type == RDMA_NODE_USNIC) return RDMA_TRANSPORT_USNIC; - case RDMA_NODE_USNIC_UDP: + if (node_type == RDMA_NODE_USNIC_UDP) return RDMA_TRANSPORT_USNIC_UDP; - default: - BUG(); - return 0; - } + if (node_type == RDMA_NODE_RNIC) + return RDMA_TRANSPORT_IWARP; + + return RDMA_TRANSPORT_IB; } EXPORT_SYMBOL(rdma_node_get_transport); -- cgit v1.2.3 From 82901e3eb8f72326b06ac8c7285462d90cd3a56c Mon Sep 17 00:00:00 2001 From: Leon Romanovsky Date: Thu, 17 Aug 2017 15:50:39 +0300 Subject: RDMA/core: Refactor get link layer wrapper The return values from rdma_node_get_transport() are strict and IB_LINK_LAYER_UNSPECIFIED is unreachable in this flow. Signed-off-by: Leon Romanovsky Reviewed-by: Dennis Dalessandro Signed-off-by: Doug Ledford --- drivers/infiniband/core/verbs.c | 14 +++++--------- 1 file changed, 5 insertions(+), 9 deletions(-) (limited to 'drivers/infiniband') diff --git a/drivers/infiniband/core/verbs.c b/drivers/infiniband/core/verbs.c index 4c1485e01c83..b29d0ff94463 100644 --- a/drivers/infiniband/core/verbs.c +++ b/drivers/infiniband/core/verbs.c @@ -194,19 +194,15 @@ EXPORT_SYMBOL(rdma_node_get_transport); enum rdma_link_layer rdma_port_get_link_layer(struct ib_device *device, u8 port_num) { + enum rdma_transport_type lt; if (device->get_link_layer) return device->get_link_layer(device, port_num); - switch (rdma_node_get_transport(device->node_type)) { - case RDMA_TRANSPORT_IB: + lt = rdma_node_get_transport(device->node_type); + if (lt == RDMA_TRANSPORT_IB) return IB_LINK_LAYER_INFINIBAND; - case RDMA_TRANSPORT_IWARP: - case RDMA_TRANSPORT_USNIC: - case RDMA_TRANSPORT_USNIC_UDP: - return IB_LINK_LAYER_ETHERNET; - default: - return IB_LINK_LAYER_UNSPECIFIED; - } + + return IB_LINK_LAYER_ETHERNET; } EXPORT_SYMBOL(rdma_port_get_link_layer); -- cgit v1.2.3 From 31f97bad06446ca2d929a363a4b5cd494f276ba5 Mon Sep 17 00:00:00 2001 From: Leon Romanovsky Date: Thu, 17 Aug 2017 15:50:40 +0300 Subject: RDMA/mlx4: Remove gfp_mask argument from acquire_group call All callers of acquire_group() passed the same gfp_mask to it and it is safe to remove it. Signed-off-by: Leon Romanovsky Reviewed-by: Dennis Dalessandro Signed-off-by: Doug Ledford --- drivers/infiniband/hw/mlx4/mcg.c | 9 ++++----- 1 file changed, 4 insertions(+), 5 deletions(-) (limited to 'drivers/infiniband') diff --git a/drivers/infiniband/hw/mlx4/mcg.c b/drivers/infiniband/hw/mlx4/mcg.c index b73f89700ef9..70eb9f917303 100644 --- a/drivers/infiniband/hw/mlx4/mcg.c +++ b/drivers/infiniband/hw/mlx4/mcg.c @@ -808,8 +808,7 @@ static ssize_t sysfs_show_group(struct device *dev, struct device_attribute *attr, char *buf); static struct mcast_group *acquire_group(struct mlx4_ib_demux_ctx *ctx, - union ib_gid *mgid, int create, - gfp_t gfp_mask) + union ib_gid *mgid, int create) { struct mcast_group *group, *cur_group; int is_mgid0; @@ -825,7 +824,7 @@ static struct mcast_group *acquire_group(struct mlx4_ib_demux_ctx *ctx, if (!create) return ERR_PTR(-ENOENT); - group = kzalloc(sizeof *group, gfp_mask); + group = kzalloc(sizeof(*group), GFP_KERNEL); if (!group) return ERR_PTR(-ENOMEM); @@ -892,7 +891,7 @@ int mlx4_ib_mcg_demux_handler(struct ib_device *ibdev, int port, int slave, case IB_MGMT_METHOD_GET_RESP: case IB_SA_METHOD_DELETE_RESP: mutex_lock(&ctx->mcg_table_lock); - group = acquire_group(ctx, &rec->mgid, 0, GFP_KERNEL); + group = acquire_group(ctx, &rec->mgid, 0); mutex_unlock(&ctx->mcg_table_lock); if (IS_ERR(group)) { if (mad->mad_hdr.method == IB_MGMT_METHOD_GET_RESP) { @@ -954,7 +953,7 @@ int mlx4_ib_mcg_multiplex_handler(struct ib_device *ibdev, int port, req->sa_mad = *sa_mad; mutex_lock(&ctx->mcg_table_lock); - group = acquire_group(ctx, &rec->mgid, may_create, GFP_KERNEL); + group = acquire_group(ctx, &rec->mgid, may_create); mutex_unlock(&ctx->mcg_table_lock); if (IS_ERR(group)) { kfree(req); -- cgit v1.2.3 From 5d50f400e56fbc7a14ef3f8d42ba47710e455881 Mon Sep 17 00:00:00 2001 From: Leon Romanovsky Date: Thu, 17 Aug 2017 15:50:41 +0300 Subject: RDMA/usnic: Fix remove address space warning Sparse tool complains with the following error: drivers/infiniband/hw/usnic/usnic_ib_main.c:445:16: warning: cast removes address space of expression Fix it by doing casting on correct field and convert function helper which sets ifaddr to be void, because there are no users who are interested in returned value. Fixes: c7845bcafe4d ("IB/usnic: Add UDP support in u*verbs.c, u*main.c and u*util.h") Signed-off-by: Leon Romanovsky Signed-off-by: Doug Ledford --- drivers/infiniband/hw/usnic/usnic_fwd.c | 12 ++---------- drivers/infiniband/hw/usnic/usnic_fwd.h | 2 +- drivers/infiniband/hw/usnic/usnic_ib_main.c | 10 ++++++---- 3 files changed, 9 insertions(+), 15 deletions(-) (limited to 'drivers/infiniband') diff --git a/drivers/infiniband/hw/usnic/usnic_fwd.c b/drivers/infiniband/hw/usnic/usnic_fwd.c index 3c37dd59c04e..995a26b65156 100644 --- a/drivers/infiniband/hw/usnic/usnic_fwd.c +++ b/drivers/infiniband/hw/usnic/usnic_fwd.c @@ -110,20 +110,12 @@ void usnic_fwd_set_mac(struct usnic_fwd_dev *ufdev, char mac[ETH_ALEN]) spin_unlock(&ufdev->lock); } -int usnic_fwd_add_ipaddr(struct usnic_fwd_dev *ufdev, __be32 inaddr) +void usnic_fwd_add_ipaddr(struct usnic_fwd_dev *ufdev, __be32 inaddr) { - int status; - spin_lock(&ufdev->lock); - if (ufdev->inaddr == 0) { + if (!ufdev->inaddr) ufdev->inaddr = inaddr; - status = 0; - } else { - status = -EFAULT; - } spin_unlock(&ufdev->lock); - - return status; } void usnic_fwd_del_ipaddr(struct usnic_fwd_dev *ufdev) diff --git a/drivers/infiniband/hw/usnic/usnic_fwd.h b/drivers/infiniband/hw/usnic/usnic_fwd.h index b2ac22be0731..0b2cc4e79707 100644 --- a/drivers/infiniband/hw/usnic/usnic_fwd.h +++ b/drivers/infiniband/hw/usnic/usnic_fwd.h @@ -75,7 +75,7 @@ struct usnic_fwd_dev *usnic_fwd_dev_alloc(struct pci_dev *pdev); void usnic_fwd_dev_free(struct usnic_fwd_dev *ufdev); void usnic_fwd_set_mac(struct usnic_fwd_dev *ufdev, char mac[ETH_ALEN]); -int usnic_fwd_add_ipaddr(struct usnic_fwd_dev *ufdev, __be32 inaddr); +void usnic_fwd_add_ipaddr(struct usnic_fwd_dev *ufdev, __be32 inaddr); void usnic_fwd_del_ipaddr(struct usnic_fwd_dev *ufdev); void usnic_fwd_carrier_up(struct usnic_fwd_dev *ufdev); void usnic_fwd_carrier_down(struct usnic_fwd_dev *ufdev); diff --git a/drivers/infiniband/hw/usnic/usnic_ib_main.c b/drivers/infiniband/hw/usnic/usnic_ib_main.c index e86700f994cb..f45e99a938e0 100644 --- a/drivers/infiniband/hw/usnic/usnic_ib_main.c +++ b/drivers/infiniband/hw/usnic/usnic_ib_main.c @@ -351,7 +351,7 @@ static void *usnic_ib_device_add(struct pci_dev *dev) { struct usnic_ib_dev *us_ibdev; union ib_gid gid; - struct in_ifaddr *in; + struct in_device *ind; struct net_device *netdev; usnic_dbg("\n"); @@ -441,9 +441,11 @@ static void *usnic_ib_device_add(struct pci_dev *dev) if (netif_carrier_ok(us_ibdev->netdev)) usnic_fwd_carrier_up(us_ibdev->ufdev); - in = ((struct in_device *)(netdev->ip_ptr))->ifa_list; - if (in != NULL) - usnic_fwd_add_ipaddr(us_ibdev->ufdev, in->ifa_address); + ind = in_dev_get(netdev); + if (ind->ifa_list) + usnic_fwd_add_ipaddr(us_ibdev->ufdev, + ind->ifa_list->ifa_address); + in_dev_put(ind); usnic_mac_ip_to_gid(us_ibdev->netdev->perm_addr, us_ibdev->ufdev->inaddr, &gid.raw[0]); -- cgit v1.2.3 From 623248955d985b86cd778c63efe6a7ad03a018c4 Mon Sep 17 00:00:00 2001 From: Leon Romanovsky Date: Thu, 17 Aug 2017 15:50:42 +0300 Subject: RDMA/mthca: Make explicit conversion to 64bit value The "lg" variable is declared as int so in all places where this variable is used as a shift operand, the output will be int too. This produces the following smatch warning: drivers/infiniband/hw/mthca/mthca_cmd.c:701 mthca_map_cmd() warn: should '1 << lg' be a 64 bit type? Simple declaration of "1" to be "1ULL" will fix the issue. Signed-off-by: Leon Romanovsky Reviewed-by: Bart Van Assche Signed-off-by: Doug Ledford --- drivers/infiniband/hw/mthca/mthca_cmd.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'drivers/infiniband') diff --git a/drivers/infiniband/hw/mthca/mthca_cmd.c b/drivers/infiniband/hw/mthca/mthca_cmd.c index a1900d2a371b..419a2a20c047 100644 --- a/drivers/infiniband/hw/mthca/mthca_cmd.c +++ b/drivers/infiniband/hw/mthca/mthca_cmd.c @@ -698,7 +698,7 @@ static int mthca_map_cmd(struct mthca_dev *dev, u16 op, struct mthca_icm *icm, for (i = 0; i < mthca_icm_size(&iter) >> lg; ++i) { if (virt != -1) { pages[nent * 2] = cpu_to_be64(virt); - virt += 1 << lg; + virt += 1ULL << lg; } pages[nent * 2 + 1] = -- cgit v1.2.3 From faa9141c2268ccbd469bd876ba97e98b38f50fae Mon Sep 17 00:00:00 2001 From: Talat Batheesh Date: Thu, 17 Aug 2017 15:50:43 +0300 Subject: IB/mlx4: Fix some spelling mistakes Fix spelling mistakes in remarks "retrun"->"return" "cancell"->"cancel" Signed-off-by: Talat Batheesh Signed-off-by: Leon Romanovsky Signed-off-by: Doug Ledford --- drivers/infiniband/hw/mlx4/alias_GUID.c | 2 +- drivers/infiniband/hw/mlx4/cq.c | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) (limited to 'drivers/infiniband') diff --git a/drivers/infiniband/hw/mlx4/alias_GUID.c b/drivers/infiniband/hw/mlx4/alias_GUID.c index 0e4f60cfc59d..155b4dfc0ae8 100644 --- a/drivers/infiniband/hw/mlx4/alias_GUID.c +++ b/drivers/infiniband/hw/mlx4/alias_GUID.c @@ -781,7 +781,7 @@ void mlx4_ib_init_alias_guid_work(struct mlx4_ib_dev *dev, int port) spin_lock_irqsave(&dev->sriov.going_down_lock, flags); spin_lock_irqsave(&dev->sriov.alias_guid.ag_work_lock, flags1); if (!dev->sriov.is_going_down) { - /* If there is pending one should cancell then run, otherwise + /* If there is pending one should cancel then run, otherwise * won't run till previous one is ended as same work * struct is used. */ diff --git a/drivers/infiniband/hw/mlx4/cq.c b/drivers/infiniband/hw/mlx4/cq.c index 95382faa7ad1..cab796341697 100644 --- a/drivers/infiniband/hw/mlx4/cq.c +++ b/drivers/infiniband/hw/mlx4/cq.c @@ -637,7 +637,7 @@ static void mlx4_ib_poll_sw_comp(struct mlx4_ib_cq *cq, int num_entries, struct mlx4_ib_qp *qp; *npolled = 0; - /* Find uncompleted WQEs belonging to that cq and retrun + /* Find uncompleted WQEs belonging to that cq and return * simulated FLUSH_ERR completions */ list_for_each_entry(qp, &cq->send_qp_list, cq_send_list) { -- cgit v1.2.3 From 4edf8d5caf52968f3ebb58140835cfa1cfd72a2d Mon Sep 17 00:00:00 2001 From: Talat Batheesh Date: Thu, 17 Aug 2017 15:50:44 +0300 Subject: IB/mlx5: Fix some spelling mistakes Fix spelling mistakes in remarks "retrun"->"return" "Decalring"->"Declaring" Signed-off-by: Talat Batheesh Signed-off-by: Leon Romanovsky Signed-off-by: Doug Ledford --- drivers/infiniband/hw/mlx5/cq.c | 2 +- drivers/infiniband/hw/mlx5/mad.c | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) (limited to 'drivers/infiniband') diff --git a/drivers/infiniband/hw/mlx5/cq.c b/drivers/infiniband/hw/mlx5/cq.c index c155df465c44..2aa53f427685 100644 --- a/drivers/infiniband/hw/mlx5/cq.c +++ b/drivers/infiniband/hw/mlx5/cq.c @@ -499,7 +499,7 @@ static void mlx5_ib_poll_sw_comp(struct mlx5_ib_cq *cq, int num_entries, struct mlx5_ib_qp *qp; *npolled = 0; - /* Find uncompleted WQEs belonging to that cq and retrun mmics ones */ + /* Find uncompleted WQEs belonging to that cq and return mmics ones */ list_for_each_entry(qp, &cq->list_send_qp, cq_send_list) { sw_send_comp(qp, num_entries, wc + *npolled, npolled); if (*npolled >= num_entries) diff --git a/drivers/infiniband/hw/mlx5/mad.c b/drivers/infiniband/hw/mlx5/mad.c index 18cfe5bf0fa3..1003b0133a49 100644 --- a/drivers/infiniband/hw/mlx5/mad.c +++ b/drivers/infiniband/hw/mlx5/mad.c @@ -204,7 +204,7 @@ static int process_pma_cmd(struct ib_device *ibdev, u8 port_num, int err; void *out_cnt; - /* Decalring support of extended counters */ + /* Declaring support of extended counters */ if (in_mad->mad_hdr.attr_id == IB_PMA_CLASS_PORT_INFO) { struct ib_class_port_info cpi = {}; -- cgit v1.2.3 From 4a5fd5d2965cb889f30ad94a3bfd83da70aa2c9c Mon Sep 17 00:00:00 2001 From: Maor Gottlieb Date: Thu, 17 Aug 2017 15:50:45 +0300 Subject: IB/mlx5: Add necessary delay drop assignment Assign the statistics and configuration structure pointer on success. Fixes: fe248c3a5837 ('IB/mlx5: Add delay drop configuration and statistics') Signed-off-by: Maor Gottlieb Signed-off-by: Leon Romanovsky Signed-off-by: Doug Ledford --- drivers/infiniband/hw/mlx5/main.c | 2 ++ 1 file changed, 2 insertions(+) (limited to 'drivers/infiniband') diff --git a/drivers/infiniband/hw/mlx5/main.c b/drivers/infiniband/hw/mlx5/main.c index 55045e204518..1a67668f092f 100644 --- a/drivers/infiniband/hw/mlx5/main.c +++ b/drivers/infiniband/hw/mlx5/main.c @@ -3793,6 +3793,8 @@ static int delay_drop_debugfs_init(struct mlx5_ib_dev *dev) if (!dbg->timeout_debugfs) goto out_debugfs; + dev->delay_drop.dbg = dbg; + return 0; out_debugfs: -- cgit v1.2.3 From c3f1ee292f23cf49b43c0e9cf8c52bec821b6649 Mon Sep 17 00:00:00 2001 From: Guy Levi Date: Thu, 17 Aug 2017 15:50:46 +0300 Subject: IB/mlx4: Fix RSS QP type in creation verb The mlx4 was designed to support QP type of MLX4_IB_QPT_RAW_PACKET. Fixes: 3078f5f1bd8b ("IB/mlx4: Add support for RSS QP") Signed-off-by: Guy Levi Signed-off-by: Leon Romanovsky Signed-off-by: Doug Ledford --- drivers/infiniband/hw/mlx4/qp.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'drivers/infiniband') diff --git a/drivers/infiniband/hw/mlx4/qp.c b/drivers/infiniband/hw/mlx4/qp.c index f7ee1792ba6d..0d2923c2225f 100644 --- a/drivers/infiniband/hw/mlx4/qp.c +++ b/drivers/infiniband/hw/mlx4/qp.c @@ -748,7 +748,7 @@ static int create_qp_rss(struct mlx4_ib_dev *dev, struct ib_pd *ibpd, INIT_LIST_HEAD(&qp->gid_list); INIT_LIST_HEAD(&qp->steering_rules); - qp->mlx4_ib_qp_type = MLX4_IB_QPT_RAW_ETHERTYPE; + qp->mlx4_ib_qp_type = MLX4_IB_QPT_RAW_PACKET; qp->state = IB_QPS_RESET; /* Set dummy send resources to be compatible with HV and PRM */ -- cgit v1.2.3 From 078b3573030346df0cdc46d798c0f434dc53c2cc Mon Sep 17 00:00:00 2001 From: Guy Levi Date: Thu, 17 Aug 2017 15:50:47 +0300 Subject: IB/mlx4: Fix struct mlx4_ib_create_wq alignment The mlx4 ABI defines to have structures with alignment of 64B. Fixes: 400b1ebcfe31 ("IB/mlx4: Add support for WQ related verbs") Signed-off-by: Guy Levi Signed-off-by: Leon Romanovsky Signed-off-by: Doug Ledford --- drivers/infiniband/hw/mlx4/qp.c | 9 ++++----- 1 file changed, 4 insertions(+), 5 deletions(-) (limited to 'drivers/infiniband') diff --git a/drivers/infiniband/hw/mlx4/qp.c b/drivers/infiniband/hw/mlx4/qp.c index 0d2923c2225f..c3958fcfed75 100644 --- a/drivers/infiniband/hw/mlx4/qp.c +++ b/drivers/infiniband/hw/mlx4/qp.c @@ -1046,9 +1046,8 @@ static int create_qp_common(struct mlx4_ib_dev *dev, struct ib_pd *pd, } if (src == MLX4_IB_RWQ_SRC) { - if (ucmd.wq.comp_mask || ucmd.wq.reserved1 || - ucmd.wq.reserved[0] || ucmd.wq.reserved[1] || - ucmd.wq.reserved[2]) { + if (ucmd.wq.comp_mask || ucmd.wq.reserved[0] || + ucmd.wq.reserved[1] || ucmd.wq.reserved[2]) { pr_debug("user command isn't supported\n"); err = -EOPNOTSUPP; goto err; @@ -4146,8 +4145,8 @@ struct ib_wq *mlx4_ib_create_wq(struct ib_pd *pd, if (!(udata && pd->uobject)) return ERR_PTR(-EINVAL); - required_cmd_sz = offsetof(typeof(ucmd), reserved) + - sizeof(ucmd.reserved); + required_cmd_sz = offsetof(typeof(ucmd), comp_mask) + + sizeof(ucmd.comp_mask); if (udata->inlen < required_cmd_sz) { pr_debug("invalid inlen\n"); return ERR_PTR(-EINVAL); -- cgit v1.2.3 From f9bfea992e7d7557c40a58e8536ad8c6f177de25 Mon Sep 17 00:00:00 2001 From: Guy Levi Date: Thu, 17 Aug 2017 15:50:49 +0300 Subject: IB/mlx4: Check that reserved fields in mlx4_ib_create_qp_rss are zero According to mlx4 convention, need to fail the command due to a non-zero value in the user data which is expected to be zero. Fixes: 3078f5f1bd8b ("IB/mlx4: Add support for RSS QP") Signed-off-by: Guy Levi Signed-off-by: Leon Romanovsky Signed-off-by: Doug Ledford --- drivers/infiniband/hw/mlx4/qp.c | 3 +++ 1 file changed, 3 insertions(+) (limited to 'drivers/infiniband') diff --git a/drivers/infiniband/hw/mlx4/qp.c b/drivers/infiniband/hw/mlx4/qp.c index c3958fcfed75..1ac148ddb250 100644 --- a/drivers/infiniband/hw/mlx4/qp.c +++ b/drivers/infiniband/hw/mlx4/qp.c @@ -812,6 +812,9 @@ static struct ib_qp *_mlx4_ib_create_qp_rss(struct ib_pd *pd, return ERR_PTR(-EFAULT); } + if (memchr_inv(ucmd.reserved, 0, sizeof(ucmd.reserved))) + return ERR_PTR(-EOPNOTSUPP); + if (ucmd.comp_mask || ucmd.reserved1) return ERR_PTR(-EOPNOTSUPP); -- cgit v1.2.3 From 69956d83267e29e38cda7d506c4085932789fef2 Mon Sep 17 00:00:00 2001 From: Erez Shitrit Date: Thu, 17 Aug 2017 15:50:50 +0300 Subject: IB/ipoib: Sync between remove_one to sysfs calls that use rtnl_lock In order to avoid deadlock between sysfs functions (like create/delete child) and remove_one (both of them are using the sysfs lock and rtnl_lock) the driver will use a state mutex for sync. That will fix traces as the following: schedule+0x3e/0x90 kernfs_drain+0x75/0xf0 ? wait_woken+0x90/0x90 __kernfs_remove+0x12e/0x1c0 kernfs_remove+0x25/0x40 sysfs_remove_dir+0x57/0x90 kobject_del+0x22/0x60 device_del+0x195/0x230 pm_runtime_set_memalloc_noio+0xac/0xf0 netdev_unregister_kobject+0x71/0x80 rollback_registered_many+0x205/0x2f0 rollback_registered+0x31/0x40 unregister_netdevice_queue+0x58/0xb0 unregister_netdev+0x20/0x30 ipoib_remove_one+0xb7/0x240 [ib_ipoib] ib_unregister_device+0xbc/0x1b0 [ib_core] ib_unregister_mad_agent+0x29/0x30 [ib_core] mlx4_ib_remove+0x67/0x280 [mlx4_ib] INFO: task echo:24082 blocked for more than 120 seconds. Tainted: G OE 4.1.12-37.5.1.el6uek.x86_64 #2 "echo 0 > /proc/sys/kernel/hung_task_timeout_secs" disables this message. Call Trace: schedule+0x3e/0x90 schedule_preempt_disabled+0xe/0x10 __mutex_lock_slowpath+0x95/0x110 ? _rcu_barrier+0x177/0x220 mutex_lock+0x23/0x40 rtnl_lock+0x15/0x20 netdev_run_todo+0x81/0x1f0 rtnl_unlock+0xe/0x10 ipoib_vlan_delete+0x12f/0x1c0 [ib_ipoib] delete_child+0x69/0x80 [ib_ipoib] dev_attr_store+0x20/0x30 sysfs_kf_write+0x41/0x50 Signed-off-by: Erez Shitrit Reviewed-by: Alex Vesker Signed-off-by: Leon Romanovsky Signed-off-by: Doug Ledford --- drivers/infiniband/ulp/ipoib/ipoib.h | 1 + drivers/infiniband/ulp/ipoib/ipoib_cm.c | 8 +++++++- drivers/infiniband/ulp/ipoib/ipoib_main.c | 5 +++++ drivers/infiniband/ulp/ipoib/ipoib_vlan.c | 22 ++++++++++++++++++---- 4 files changed, 31 insertions(+), 5 deletions(-) (limited to 'drivers/infiniband') diff --git a/drivers/infiniband/ulp/ipoib/ipoib.h b/drivers/infiniband/ulp/ipoib/ipoib.h index 16c1af1c706e..4a5c7a07a631 100644 --- a/drivers/infiniband/ulp/ipoib/ipoib.h +++ b/drivers/infiniband/ulp/ipoib/ipoib.h @@ -337,6 +337,7 @@ struct ipoib_dev_priv { struct rw_semaphore vlan_rwsem; struct mutex mcast_mutex; + struct mutex sysfs_mutex; struct rb_root path_tree; struct list_head path_list; diff --git a/drivers/infiniband/ulp/ipoib/ipoib_cm.c b/drivers/infiniband/ulp/ipoib/ipoib_cm.c index d69410c2ed97..14b62f7472b4 100644 --- a/drivers/infiniband/ulp/ipoib/ipoib_cm.c +++ b/drivers/infiniband/ulp/ipoib/ipoib_cm.c @@ -1506,9 +1506,14 @@ static ssize_t set_mode(struct device *d, struct device_attribute *attr, if (test_bit(IPOIB_FLAG_GOING_DOWN, &priv->flags)) return -EPERM; - if (!rtnl_trylock()) + if (!mutex_trylock(&priv->sysfs_mutex)) return restart_syscall(); + if (!rtnl_trylock()) { + mutex_unlock(&priv->sysfs_mutex); + return restart_syscall(); + } + ret = ipoib_set_mode(dev, buf); /* The assumption is that the function ipoib_set_mode returned @@ -1517,6 +1522,7 @@ static ssize_t set_mode(struct device *d, struct device_attribute *attr, */ if (ret != -EBUSY) rtnl_unlock(); + mutex_unlock(&priv->sysfs_mutex); return (!ret || ret == -EBUSY) ? count : ret; } diff --git a/drivers/infiniband/ulp/ipoib/ipoib_main.c b/drivers/infiniband/ulp/ipoib/ipoib_main.c index 344e8d3d47bd..5f143445daa9 100644 --- a/drivers/infiniband/ulp/ipoib/ipoib_main.c +++ b/drivers/infiniband/ulp/ipoib/ipoib_main.c @@ -1878,6 +1878,7 @@ static void ipoib_build_priv(struct net_device *dev) spin_lock_init(&priv->lock); init_rwsem(&priv->vlan_rwsem); mutex_init(&priv->mcast_mutex); + mutex_init(&priv->sysfs_mutex); INIT_LIST_HEAD(&priv->path_list); INIT_LIST_HEAD(&priv->child_intfs); @@ -2329,7 +2330,11 @@ static void ipoib_remove_one(struct ib_device *device, void *client_data) cancel_delayed_work(&priv->neigh_reap_task); flush_workqueue(priv->wq); + /* Wrap rtnl_lock/unlock with mutex to protect sysfs calls */ + mutex_lock(&priv->sysfs_mutex); unregister_netdev(priv->dev); + mutex_unlock(&priv->sysfs_mutex); + rn->free_rdma_netdev(priv->dev); list_for_each_entry_safe(cpriv, tcpriv, &priv->child_intfs, list) diff --git a/drivers/infiniband/ulp/ipoib/ipoib_vlan.c b/drivers/infiniband/ulp/ipoib/ipoib_vlan.c index 081b33deff1b..9927cd6b7082 100644 --- a/drivers/infiniband/ulp/ipoib/ipoib_vlan.c +++ b/drivers/infiniband/ulp/ipoib/ipoib_vlan.c @@ -133,12 +133,20 @@ int ipoib_vlan_add(struct net_device *pdev, unsigned short pkey) snprintf(intf_name, sizeof intf_name, "%s.%04x", ppriv->dev->name, pkey); - if (!rtnl_trylock()) + if (!mutex_trylock(&ppriv->sysfs_mutex)) return restart_syscall(); + if (!rtnl_trylock()) { + mutex_unlock(&ppriv->sysfs_mutex); + return restart_syscall(); + } + priv = ipoib_intf_alloc(ppriv->ca, ppriv->port, intf_name); - if (!priv) + if (!priv) { + rtnl_unlock(); + mutex_unlock(&ppriv->sysfs_mutex); return -ENOMEM; + } down_write(&ppriv->vlan_rwsem); @@ -164,8 +172,8 @@ int ipoib_vlan_add(struct net_device *pdev, unsigned short pkey) out: up_write(&ppriv->vlan_rwsem); - rtnl_unlock(); + mutex_unlock(&ppriv->sysfs_mutex); if (result) { free_netdev(priv->dev); @@ -188,8 +196,13 @@ int ipoib_vlan_delete(struct net_device *pdev, unsigned short pkey) if (test_bit(IPOIB_FLAG_GOING_DOWN, &ppriv->flags)) return -EPERM; - if (!rtnl_trylock()) + if (!mutex_trylock(&ppriv->sysfs_mutex)) + return restart_syscall(); + + if (!rtnl_trylock()) { + mutex_unlock(&ppriv->sysfs_mutex); return restart_syscall(); + } down_write(&ppriv->vlan_rwsem); list_for_each_entry_safe(priv, tpriv, &ppriv->child_intfs, list) { @@ -208,6 +221,7 @@ int ipoib_vlan_delete(struct net_device *pdev, unsigned short pkey) } rtnl_unlock(); + mutex_unlock(&ppriv->sysfs_mutex); if (dev) { free_netdev(dev); -- cgit v1.2.3 From fab773cb51150ea29ec041d5d001845fac026a3b Mon Sep 17 00:00:00 2001 From: Kamal Heib Date: Thu, 17 Aug 2017 15:50:52 +0300 Subject: IB/rxe: Make rxe_counter_name static rxe_counter_name is used in rxe_hw_counters.c only. Make it static. Fixes: 0b1e5b99a48b ('IB/rxe: Add port protocol stats') Signed-off-by: Kamal Heib Reviewed-by: Yonatan Cohen Signed-off-by: Leon Romanovsky Signed-off-by: Doug Ledford --- drivers/infiniband/sw/rxe/rxe_hw_counters.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'drivers/infiniband') diff --git a/drivers/infiniband/sw/rxe/rxe_hw_counters.c b/drivers/infiniband/sw/rxe/rxe_hw_counters.c index 7ef90aad7dfd..6aeb7a165e46 100644 --- a/drivers/infiniband/sw/rxe/rxe_hw_counters.c +++ b/drivers/infiniband/sw/rxe/rxe_hw_counters.c @@ -33,7 +33,7 @@ #include "rxe.h" #include "rxe_hw_counters.h" -const char * const rxe_counter_name[] = { +static const char * const rxe_counter_name[] = { [RXE_CNT_SENT_PKTS] = "sent_pkts", [RXE_CNT_RCVD_PKTS] = "rcvd_pkts", [RXE_CNT_DUP_REQ] = "duplicate_request", -- cgit v1.2.3 From 84305d71d6ca9a2e6a33d9b403639f1cdeeee846 Mon Sep 17 00:00:00 2001 From: Leon Romanovsky Date: Thu, 17 Aug 2017 15:50:53 +0300 Subject: RDMA/mlx5: Limit scope of get vector affinity local function The mlx5_ib_get_vector_affinity() call is local to main.c file and there is no need to be declared globally visible. Fixes: 40b24403f33e ("mlx5: support ->get_vector_affinity") Signed-off-by: Leon Romanovsky Signed-off-by: Doug Ledford --- drivers/infiniband/hw/mlx5/main.c | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) (limited to 'drivers/infiniband') diff --git a/drivers/infiniband/hw/mlx5/main.c b/drivers/infiniband/hw/mlx5/main.c index 1a67668f092f..762ef6bf219e 100644 --- a/drivers/infiniband/hw/mlx5/main.c +++ b/drivers/infiniband/hw/mlx5/main.c @@ -3819,8 +3819,8 @@ static void init_delay_drop(struct mlx5_ib_dev *dev) mlx5_ib_warn(dev, "Failed to init delay drop debugfs\n"); } -const struct cpumask *mlx5_ib_get_vector_affinity(struct ib_device *ibdev, - int comp_vector) +static const struct cpumask * +mlx5_ib_get_vector_affinity(struct ib_device *ibdev, int comp_vector) { struct mlx5_ib_dev *dev = to_mdev(ibdev); -- cgit v1.2.3 From 17bf1ad2e80db7c9bdafec9c2da72389247194e9 Mon Sep 17 00:00:00 2001 From: Leon Romanovsky Date: Thu, 17 Aug 2017 15:50:54 +0300 Subject: RDMA/mlx4: Properly annotate link layer variable The rdma_port_get_link_layer() returns enum rdma_link_layer as a return value, hence it is better to store the return value in specially annotated variable and not in int. Signed-off-by: Leon Romanovsky Signed-off-by: Doug Ledford --- drivers/infiniband/hw/mlx4/qp.c | 6 ++---- 1 file changed, 2 insertions(+), 4 deletions(-) (limited to 'drivers/infiniband') diff --git a/drivers/infiniband/hw/mlx4/qp.c b/drivers/infiniband/hw/mlx4/qp.c index 1ac148ddb250..2747abde2ea8 100644 --- a/drivers/infiniband/hw/mlx4/qp.c +++ b/drivers/infiniband/hw/mlx4/qp.c @@ -2725,19 +2725,17 @@ enum { static int _mlx4_ib_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr, int attr_mask, struct ib_udata *udata) { + enum rdma_link_layer ll = IB_LINK_LAYER_UNSPECIFIED; struct mlx4_ib_dev *dev = to_mdev(ibqp->device); struct mlx4_ib_qp *qp = to_mqp(ibqp); enum ib_qp_state cur_state, new_state; int err = -EINVAL; - int ll; mutex_lock(&qp->mutex); cur_state = attr_mask & IB_QP_CUR_STATE ? attr->cur_qp_state : qp->state; new_state = attr_mask & IB_QP_STATE ? attr->qp_state : cur_state; - if (cur_state == new_state && cur_state == IB_QPS_RESET) { - ll = IB_LINK_LAYER_UNSPECIFIED; - } else { + if (cur_state != new_state || cur_state != IB_QPS_RESET) { int port = attr_mask & IB_QP_PORT ? attr->port_num : qp->port; ll = rdma_port_get_link_layer(&dev->ib_dev, port); } -- cgit v1.2.3 From 4b7ee6781f6aab3d236021af36aa4c95a089f402 Mon Sep 17 00:00:00 2001 From: Leon Romanovsky Date: Thu, 17 Aug 2017 15:50:55 +0300 Subject: RDMA/nes: Remove zeroed parameter from port query callback There is no need to explicitly zero parameters, because the structure requested to be filled already initialized to zeros. Signed-off-by: Leon Romanovsky Signed-off-by: Doug Ledford --- drivers/infiniband/hw/nes/nes_verbs.c | 5 ----- 1 file changed, 5 deletions(-) (limited to 'drivers/infiniband') diff --git a/drivers/infiniband/hw/nes/nes_verbs.c b/drivers/infiniband/hw/nes/nes_verbs.c index c2943e39d2f9..f0dc5f4aa177 100644 --- a/drivers/infiniband/hw/nes/nes_verbs.c +++ b/drivers/infiniband/hw/nes/nes_verbs.c @@ -481,21 +481,16 @@ static int nes_query_port(struct ib_device *ibdev, u8 port, struct ib_port_attr props->active_mtu = ib_mtu_int_to_enum(netdev->mtu); props->lid = 1; - props->lmc = 0; - props->sm_lid = 0; - props->sm_sl = 0; if (netif_queue_stopped(netdev)) props->state = IB_PORT_DOWN; else if (nesvnic->linkup) props->state = IB_PORT_ACTIVE; else props->state = IB_PORT_DOWN; - props->phys_state = 0; props->port_cap_flags = IB_PORT_CM_SUP | IB_PORT_REINIT_SUP | IB_PORT_VENDOR_CLASS_SUP | IB_PORT_BOOT_MGMT_SUP; props->gid_tbl_len = 1; props->pkey_tbl_len = 1; - props->qkey_viol_cntr = 0; props->active_width = IB_WIDTH_4X; props->active_speed = IB_SPEED_SDR; props->max_msg_sz = 0x80000000; -- cgit v1.2.3 From 96dc3fc5f1d66b20cdf839d571c7b907e08d5d00 Mon Sep 17 00:00:00 2001 From: Noa Osherovich Date: Thu, 17 Aug 2017 15:52:28 +0300 Subject: IB/mlx5: Expose software parsing for Raw Ethernet QP Software parsing (SWP) is a feature that can be used to instruct the device to stop using its internal parser and to parse packets on the transmit path according to offsets set for each packets. Through this feature, the device allows the handling of checksum and LSO by the hardware according to the location of IP and TCP/UDP headers. Enable SW parsing on Raw Ethernet send queue by default if firmware supports it and report these capabilities to user space. Signed-off-by: Noa Osherovich Reviewed-by: Maor Gottlieb Signed-off-by: Leon Romanovsky Signed-off-by: Doug Ledford --- drivers/infiniband/hw/mlx5/main.c | 21 +++++++++++++++++++++ drivers/infiniband/hw/mlx5/qp.c | 3 +++ 2 files changed, 24 insertions(+) (limited to 'drivers/infiniband') diff --git a/drivers/infiniband/hw/mlx5/main.c b/drivers/infiniband/hw/mlx5/main.c index 762ef6bf219e..07789450ec42 100644 --- a/drivers/infiniband/hw/mlx5/main.c +++ b/drivers/infiniband/hw/mlx5/main.c @@ -811,6 +811,27 @@ static int mlx5_ib_query_device(struct ib_device *ibdev, if (field_avail(typeof(resp), reserved, uhw->outlen)) resp.response_length += sizeof(resp.reserved); + if (field_avail(typeof(resp), sw_parsing_caps, + uhw->outlen)) { + resp.response_length += sizeof(resp.sw_parsing_caps); + if (MLX5_CAP_ETH(mdev, swp)) { + resp.sw_parsing_caps.sw_parsing_offloads |= + MLX5_IB_SW_PARSING; + + if (MLX5_CAP_ETH(mdev, swp_csum)) + resp.sw_parsing_caps.sw_parsing_offloads |= + MLX5_IB_SW_PARSING_CSUM; + + if (MLX5_CAP_ETH(mdev, swp_lso)) + resp.sw_parsing_caps.sw_parsing_offloads |= + MLX5_IB_SW_PARSING_LSO; + + if (resp.sw_parsing_caps.sw_parsing_offloads) + resp.sw_parsing_caps.supported_qpts = + BIT(IB_QPT_RAW_PACKET); + } + } + if (uhw->outlen) { err = ib_copy_to_udata(uhw, &resp, resp.response_length); diff --git a/drivers/infiniband/hw/mlx5/qp.c b/drivers/infiniband/hw/mlx5/qp.c index bc49d14e0a00..656773196f27 100644 --- a/drivers/infiniband/hw/mlx5/qp.c +++ b/drivers/infiniband/hw/mlx5/qp.c @@ -1088,6 +1088,9 @@ static int create_raw_packet_qp_sq(struct mlx5_ib_dev *dev, MLX5_SET(sqc, sqc, cqn, MLX5_GET(qpc, qpc, cqn_snd)); MLX5_SET(sqc, sqc, tis_lst_sz, 1); MLX5_SET(sqc, sqc, tis_num_0, sq->tisn); + if (MLX5_CAP_GEN(dev->mdev, eth_net_offloads) && + MLX5_CAP_ETH(dev->mdev, swp)) + MLX5_SET(sqc, sqc, allow_swp, 1); wq = MLX5_ADDR_OF(sqc, sqc, wq); MLX5_SET(wq, wq, wq_type, MLX5_WQ_TYPE_CYCLIC); -- cgit v1.2.3 From 8b7ff7f3b301de52924cb2cf3fed47b181893116 Mon Sep 17 00:00:00 2001 From: Ilya Lesokhin Date: Thu, 17 Aug 2017 15:52:29 +0300 Subject: IB/mlx5: Enable UMR for MRs created with reg_create This patch is the first step in decoupling UMR usage and allocation from the MR cache. The only functional change in this patch is to enables UMR for MRs created with reg_create. This change fixes a bug where ODP memory regions that were not allocated from the MR cache did not have UMR enabled. Signed-off-by: Ilya Lesokhin Signed-off-by: Leon Romanovsky Signed-off-by: Doug Ledford --- drivers/infiniband/hw/mlx5/mlx5_ib.h | 2 +- drivers/infiniband/hw/mlx5/mr.c | 33 ++++++++++++++------------------- 2 files changed, 15 insertions(+), 20 deletions(-) (limited to 'drivers/infiniband') diff --git a/drivers/infiniband/hw/mlx5/mlx5_ib.h b/drivers/infiniband/hw/mlx5/mlx5_ib.h index 7ac991070020..b3380e8beacf 100644 --- a/drivers/infiniband/hw/mlx5/mlx5_ib.h +++ b/drivers/infiniband/hw/mlx5/mlx5_ib.h @@ -503,7 +503,7 @@ struct mlx5_ib_mr { struct mlx5_shared_mr_info *smr_info; struct list_head list; int order; - int umred; + bool allocated_from_cache; int npages; struct mlx5_ib_dev *dev; u32 out[MLX5_ST_SZ_DW(create_mkey_out)]; diff --git a/drivers/infiniband/hw/mlx5/mr.c b/drivers/infiniband/hw/mlx5/mr.c index a0eb2f96179a..bc87016021e3 100644 --- a/drivers/infiniband/hw/mlx5/mr.c +++ b/drivers/infiniband/hw/mlx5/mr.c @@ -48,8 +48,7 @@ enum { #define MLX5_UMR_ALIGN 2048 static int clean_mr(struct mlx5_ib_mr *mr); -static int max_umr_order(struct mlx5_ib_dev *dev); -static int use_umr(struct mlx5_ib_dev *dev, int order); +static int mr_cache_max_order(struct mlx5_ib_dev *dev); static int unreg_umr(struct mlx5_ib_dev *dev, struct mlx5_ib_mr *mr); static int destroy_mkey(struct mlx5_ib_dev *dev, struct mlx5_ib_mr *mr) @@ -184,7 +183,7 @@ static int add_keys(struct mlx5_ib_dev *dev, int c, int num) break; } mr->order = ent->order; - mr->umred = 1; + mr->allocated_from_cache = 1; mr->dev = dev; MLX5_SET(mkc, mkc, free, 1); @@ -497,7 +496,7 @@ static struct mlx5_ib_mr *alloc_cached_mr(struct mlx5_ib_dev *dev, int order) int i; c = order2idx(dev, order); - last_umr_cache_entry = order2idx(dev, max_umr_order(dev)); + last_umr_cache_entry = order2idx(dev, mr_cache_max_order(dev)); if (c < 0 || c > last_umr_cache_entry) { mlx5_ib_warn(dev, "order %d, cache index %d\n", order, c); return NULL; @@ -677,12 +676,12 @@ int mlx5_mr_cache_init(struct mlx5_ib_dev *dev) INIT_DELAYED_WORK(&ent->dwork, delayed_cache_work_func); queue_work(cache->wq, &ent->work); - if (i > MAX_UMR_CACHE_ENTRY) { + if (i > MR_CACHE_LAST_STD_ENTRY) { mlx5_odp_init_mr_cache_entry(ent); continue; } - if (!use_umr(dev, ent->order)) + if (ent->order > mr_cache_max_order(dev)) continue; ent->page = PAGE_SHIFT; @@ -819,18 +818,13 @@ static int get_octo_len(u64 addr, u64 len, int page_size) return (npages + 1) / 2; } -static int max_umr_order(struct mlx5_ib_dev *dev) +static int mr_cache_max_order(struct mlx5_ib_dev *dev) { if (MLX5_CAP_GEN(dev->mdev, umr_extended_translation_offset)) - return MAX_UMR_CACHE_ENTRY + 2; + return MR_CACHE_LAST_STD_ENTRY + 2; return MLX5_MAX_UMR_SHIFT; } -static int use_umr(struct mlx5_ib_dev *dev, int order) -{ - return order <= max_umr_order(dev); -} - static int mr_umem_get(struct ib_pd *pd, u64 start, u64 length, int access_flags, struct ib_umem **umem, int *npages, int *page_shift, int *ncont, @@ -1149,6 +1143,7 @@ static struct mlx5_ib_mr *reg_create(struct ib_mr *ibmr, struct ib_pd *pd, MLX5_SET(mkc, mkc, rr, !!(access_flags & IB_ACCESS_REMOTE_READ)); MLX5_SET(mkc, mkc, lw, !!(access_flags & IB_ACCESS_LOCAL_WRITE)); MLX5_SET(mkc, mkc, lr, 1); + MLX5_SET(mkc, mkc, umr_en, 1); MLX5_SET64(mkc, mkc, start_addr, virt_addr); MLX5_SET64(mkc, mkc, len, length); @@ -1231,7 +1226,7 @@ struct ib_mr *mlx5_ib_reg_user_mr(struct ib_pd *pd, u64 start, u64 length, if (err < 0) return ERR_PTR(err); - if (use_umr(dev, order)) { + if (order <= mr_cache_max_order(dev)) { mr = reg_umr(pd, umem, virt_addr, length, ncont, page_shift, order, access_flags); if (PTR_ERR(mr) == -EAGAIN) { @@ -1355,7 +1350,7 @@ int mlx5_ib_rereg_user_mr(struct ib_mr *ib_mr, int flags, u64 start, /* * UMR can't be used - MKey needs to be replaced. */ - if (mr->umred) { + if (mr->allocated_from_cache) { err = unreg_umr(dev, mr); if (err) mlx5_ib_warn(dev, "Failed to unregister MR\n"); @@ -1373,7 +1368,7 @@ int mlx5_ib_rereg_user_mr(struct ib_mr *ib_mr, int flags, u64 start, if (IS_ERR(mr)) return PTR_ERR(mr); - mr->umred = 0; + mr->allocated_from_cache = 0; } else { /* * Send a UMR WQE @@ -1461,7 +1456,7 @@ mlx5_free_priv_descs(struct mlx5_ib_mr *mr) static int clean_mr(struct mlx5_ib_mr *mr) { struct mlx5_ib_dev *dev = to_mdev(mr->ibmr.device); - int umred = mr->umred; + int allocated_from_cache = mr->allocated_from_cache; int err; if (mr->sig) { @@ -1479,7 +1474,7 @@ static int clean_mr(struct mlx5_ib_mr *mr) mlx5_free_priv_descs(mr); - if (!umred) { + if (!allocated_from_cache) { err = destroy_mkey(dev, mr); if (err) { mlx5_ib_warn(dev, "failed to destroy mkey 0x%x (%d)\n", @@ -1490,7 +1485,7 @@ static int clean_mr(struct mlx5_ib_mr *mr) mlx5_mr_cache_free(dev, mr); } - if (!umred) + if (!allocated_from_cache) kfree(mr); return 0; -- cgit v1.2.3 From ff740aefecb98da4605df1cada7904c44eaee161 Mon Sep 17 00:00:00 2001 From: Ilya Lesokhin Date: Thu, 17 Aug 2017 15:52:30 +0300 Subject: IB/mlx5: Decouple MR allocation and population flows mlx5 compatible devices have two ways of populating the MTT table of an MKEY: using a FW command and using a UMR WQE. A UMR is much faster, so it should be used whenever possible. Unfortunately the code today uses UMR only if the MKEY was allocated from the MR cache. Fix the code to use UMR even for MKEYs that were allocated using a FW command. Signed-off-by: Ilya Lesokhin Signed-off-by: Leon Romanovsky Signed-off-by: Doug Ledford --- drivers/infiniband/hw/mlx5/mr.c | 75 ++++++++++++++++++++++++----------------- 1 file changed, 45 insertions(+), 30 deletions(-) (limited to 'drivers/infiniband') diff --git a/drivers/infiniband/hw/mlx5/mr.c b/drivers/infiniband/hw/mlx5/mr.c index bc87016021e3..aa6f71570b77 100644 --- a/drivers/infiniband/hw/mlx5/mr.c +++ b/drivers/infiniband/hw/mlx5/mr.c @@ -898,7 +898,8 @@ static int mlx5_ib_post_send_wait(struct mlx5_ib_dev *dev, return err; } -static struct mlx5_ib_mr *reg_umr(struct ib_pd *pd, struct ib_umem *umem, +static struct mlx5_ib_mr *alloc_mr_from_cache( + struct ib_pd *pd, struct ib_umem *umem, u64 virt_addr, u64 len, int npages, int page_shift, int order, int access_flags) { @@ -930,16 +931,6 @@ static struct mlx5_ib_mr *reg_umr(struct ib_pd *pd, struct ib_umem *umem, mr->mmkey.size = len; mr->mmkey.pd = to_mpd(pd)->pdn; - err = mlx5_ib_update_xlt(mr, 0, npages, page_shift, - MLX5_IB_UPD_XLT_ENABLE); - - if (err) { - mlx5_mr_cache_free(dev, mr); - return ERR_PTR(err); - } - - mr->live = 1; - return mr; } @@ -1105,7 +1096,8 @@ free_xlt: static struct mlx5_ib_mr *reg_create(struct ib_mr *ibmr, struct ib_pd *pd, u64 virt_addr, u64 length, struct ib_umem *umem, int npages, - int page_shift, int access_flags) + int page_shift, int access_flags, + bool populate) { struct mlx5_ib_dev *dev = to_mdev(pd->device); struct mlx5_ib_mr *mr; @@ -1120,15 +1112,19 @@ static struct mlx5_ib_mr *reg_create(struct ib_mr *ibmr, struct ib_pd *pd, if (!mr) return ERR_PTR(-ENOMEM); - inlen = MLX5_ST_SZ_BYTES(create_mkey_in) + - sizeof(*pas) * ((npages + 1) / 2) * 2; + mr->ibmr.pd = pd; + mr->access_flags = access_flags; + + inlen = MLX5_ST_SZ_BYTES(create_mkey_in); + if (populate) + inlen += sizeof(*pas) * roundup(npages, 2); in = kvzalloc(inlen, GFP_KERNEL); if (!in) { err = -ENOMEM; goto err_1; } pas = (__be64 *)MLX5_ADDR_OF(create_mkey_in, in, klm_pas_mtt); - if (!(access_flags & IB_ACCESS_ON_DEMAND)) + if (populate && !(access_flags & IB_ACCESS_ON_DEMAND)) mlx5_ib_populate_pas(dev, umem, page_shift, pas, pg_cap ? MLX5_IB_MTT_PRESENT : 0); @@ -1137,6 +1133,7 @@ static struct mlx5_ib_mr *reg_create(struct ib_mr *ibmr, struct ib_pd *pd, MLX5_SET(create_mkey_in, in, pg_access, !!(pg_cap)); mkc = MLX5_ADDR_OF(create_mkey_in, in, memory_key_mkey_entry); + MLX5_SET(mkc, mkc, free, !populate); MLX5_SET(mkc, mkc, access_mode, MLX5_MKC_ACCESS_MODE_MTT); MLX5_SET(mkc, mkc, a, !!(access_flags & IB_ACCESS_REMOTE_ATOMIC)); MLX5_SET(mkc, mkc, rw, !!(access_flags & IB_ACCESS_REMOTE_WRITE)); @@ -1153,8 +1150,10 @@ static struct mlx5_ib_mr *reg_create(struct ib_mr *ibmr, struct ib_pd *pd, get_octo_len(virt_addr, length, 1 << page_shift)); MLX5_SET(mkc, mkc, log_page_size, page_shift); MLX5_SET(mkc, mkc, qpn, 0xffffff); - MLX5_SET(create_mkey_in, in, translations_octword_actual_size, - get_octo_len(virt_addr, length, 1 << page_shift)); + if (populate) { + MLX5_SET(create_mkey_in, in, translations_octword_actual_size, + get_octo_len(virt_addr, length, 1 << page_shift)); + } err = mlx5_core_create_mkey(dev->mdev, &mr->mmkey, in, inlen); if (err) { @@ -1163,9 +1162,7 @@ static struct mlx5_ib_mr *reg_create(struct ib_mr *ibmr, struct ib_pd *pd, } mr->mmkey.type = MLX5_MKEY_MR; mr->desc_size = sizeof(struct mlx5_mtt); - mr->umem = umem; mr->dev = dev; - mr->live = 1; kvfree(in); mlx5_ib_dbg(dev, "mkey = 0x%x\n", mr->mmkey.key); @@ -1205,6 +1202,7 @@ struct ib_mr *mlx5_ib_reg_user_mr(struct ib_pd *pd, u64 start, u64 length, int ncont; int order; int err; + bool use_umr = true; mlx5_ib_dbg(dev, "start 0x%llx, virt_addr 0x%llx, length 0x%llx, access_flags 0x%x\n", start, virt_addr, length, access_flags); @@ -1223,27 +1221,29 @@ struct ib_mr *mlx5_ib_reg_user_mr(struct ib_pd *pd, u64 start, u64 length, err = mr_umem_get(pd, start, length, access_flags, &umem, &npages, &page_shift, &ncont, &order); - if (err < 0) + if (err < 0) return ERR_PTR(err); if (order <= mr_cache_max_order(dev)) { - mr = reg_umr(pd, umem, virt_addr, length, ncont, page_shift, - order, access_flags); + mr = alloc_mr_from_cache(pd, umem, virt_addr, length, ncont, + page_shift, order, access_flags); if (PTR_ERR(mr) == -EAGAIN) { mlx5_ib_dbg(dev, "cache empty for order %d", order); mr = NULL; } - } else if (access_flags & IB_ACCESS_ON_DEMAND && - !MLX5_CAP_GEN(dev->mdev, umr_extended_translation_offset)) { - err = -EINVAL; - pr_err("Got MR registration for ODP MR > 512MB, not supported for Connect-IB"); - goto error; + } else if (!MLX5_CAP_GEN(dev->mdev, umr_extended_translation_offset)) { + if (access_flags & IB_ACCESS_ON_DEMAND) { + err = -EINVAL; + pr_err("Got MR registration for ODP MR > 512MB, not supported for Connect-IB"); + goto error; + } + use_umr = false; } if (!mr) { mutex_lock(&dev->slow_path_mutex); mr = reg_create(NULL, pd, virt_addr, length, umem, ncont, - page_shift, access_flags); + page_shift, access_flags, !use_umr); mutex_unlock(&dev->slow_path_mutex); } @@ -1261,8 +1261,22 @@ struct ib_mr *mlx5_ib_reg_user_mr(struct ib_pd *pd, u64 start, u64 length, update_odp_mr(mr); #endif - return &mr->ibmr; + if (use_umr) { + int update_xlt_flags = MLX5_IB_UPD_XLT_ENABLE; + + if (access_flags & IB_ACCESS_ON_DEMAND) + update_xlt_flags |= MLX5_IB_UPD_XLT_ZAP; + err = mlx5_ib_update_xlt(mr, 0, ncont, page_shift, + update_xlt_flags); + if (err) { + mlx5_ib_dereg_mr(&mr->ibmr); + return ERR_PTR(err); + } + } + + mr->live = 1; + return &mr->ibmr; error: ib_umem_release(umem); return ERR_PTR(err); @@ -1363,12 +1377,13 @@ int mlx5_ib_rereg_user_mr(struct ib_mr *ib_mr, int flags, u64 start, return err; mr = reg_create(ib_mr, pd, addr, len, mr->umem, ncont, - page_shift, access_flags); + page_shift, access_flags, true); if (IS_ERR(mr)) return PTR_ERR(mr); mr->allocated_from_cache = 0; + mr->live = 1; } else { /* * Send a UMR WQE -- cgit v1.2.3 From 5942d8ae411775b76e5e1ab0cce57b0666516f2d Mon Sep 17 00:00:00 2001 From: Kamal Heib Date: Thu, 17 Aug 2017 15:52:31 +0300 Subject: IB/mlx5: Fix memory leak in clean_mr error path In clean_mr error path the 'mr' should be freed. Fixes: e126ba97dba9 ('mlx5: Add driver for Mellanox Connect-IB adapters') Signed-off-by: Kamal Heib Signed-off-by: Doug Ledford --- drivers/infiniband/hw/mlx5/mr.c | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) (limited to 'drivers/infiniband') diff --git a/drivers/infiniband/hw/mlx5/mr.c b/drivers/infiniband/hw/mlx5/mr.c index aa6f71570b77..8cf53eb13148 100644 --- a/drivers/infiniband/hw/mlx5/mr.c +++ b/drivers/infiniband/hw/mlx5/mr.c @@ -1490,19 +1490,19 @@ static int clean_mr(struct mlx5_ib_mr *mr) mlx5_free_priv_descs(mr); if (!allocated_from_cache) { + u32 key = mr->mmkey.key; + err = destroy_mkey(dev, mr); + kfree(mr); if (err) { mlx5_ib_warn(dev, "failed to destroy mkey 0x%x (%d)\n", - mr->mmkey.key, err); + key, err); return err; } } else { mlx5_mr_cache_free(dev, mr); } - if (!allocated_from_cache) - kfree(mr); - return 0; } -- cgit v1.2.3 From 7b4cdaae73ee833975a767cf54a3354d355b3f8d Mon Sep 17 00:00:00 2001 From: Ilya Lesokhin Date: Thu, 17 Aug 2017 15:52:32 +0300 Subject: IB/mlx5: Fix integer overflow when page_shift == 31 Fix a bug where MR registration fails when mlx5_ib_cont_pages indicates that the MR can be mapped using 2GB pages (page_shift == 31). Fixes: e126ba97dba9 ("mlx5: Add driver for Mellanox Connect-IB adapters") Signed-off-by: Ilya Lesokhin Signed-off-by: Leon Romanovsky Signed-off-by: Doug Ledford --- drivers/infiniband/hw/mlx5/mr.c | 9 +++++---- 1 file changed, 5 insertions(+), 4 deletions(-) (limited to 'drivers/infiniband') diff --git a/drivers/infiniband/hw/mlx5/mr.c b/drivers/infiniband/hw/mlx5/mr.c index 8cf53eb13148..0e2789d9bb4d 100644 --- a/drivers/infiniband/hw/mlx5/mr.c +++ b/drivers/infiniband/hw/mlx5/mr.c @@ -808,13 +808,14 @@ err_free: return ERR_PTR(err); } -static int get_octo_len(u64 addr, u64 len, int page_size) +static int get_octo_len(u64 addr, u64 len, int page_shift) { + u64 page_size = 1ULL << page_shift; u64 offset; int npages; offset = addr & (page_size - 1); - npages = ALIGN(len + offset, page_size) >> ilog2(page_size); + npages = ALIGN(len + offset, page_size) >> page_shift; return (npages + 1) / 2; } @@ -1147,12 +1148,12 @@ static struct mlx5_ib_mr *reg_create(struct ib_mr *ibmr, struct ib_pd *pd, MLX5_SET(mkc, mkc, pd, to_mpd(pd)->pdn); MLX5_SET(mkc, mkc, bsf_octword_size, 0); MLX5_SET(mkc, mkc, translations_octword_size, - get_octo_len(virt_addr, length, 1 << page_shift)); + get_octo_len(virt_addr, length, page_shift)); MLX5_SET(mkc, mkc, log_page_size, page_shift); MLX5_SET(mkc, mkc, qpn, 0xffffff); if (populate) { MLX5_SET(create_mkey_in, in, translations_octword_actual_size, - get_octo_len(virt_addr, length, 1 << page_shift)); + get_octo_len(virt_addr, length, page_shift)); } err = mlx5_core_create_mkey(dev->mdev, &mr->mmkey, in, inlen); -- cgit v1.2.3 From a550ddfc543e250798048cf4eabe721cd85ac724 Mon Sep 17 00:00:00 2001 From: Yishai Hadas Date: Thu, 17 Aug 2017 15:52:33 +0300 Subject: IB/mlx5: Add support for multi underlay QP Set underlay QPN as part of flow rule when it's applicable. There is one root flow table in the NIC RX namespace and all the underlay QPs steer the traffic to this flow table. In order to prevent QP to get traffic which is not target to its underlay QP, we need to set the underlay QP number as part of the steering matching. Note: When multicast traffic is sent the QPN filtering is done by the firmware as some early step. Adding the QPN match on the flow table entry is wrong as by that time the target QPN holds the multicast address (e.g. FF(s)) and it won't match. Signed-off-by: Yishai Hadas Signed-off-by: Leon Romanovsky Signed-off-by: Doug Ledford --- drivers/infiniband/hw/mlx5/main.c | 49 +++++++++++++++++++++++++++++++++------ 1 file changed, 42 insertions(+), 7 deletions(-) (limited to 'drivers/infiniband') diff --git a/drivers/infiniband/hw/mlx5/main.c b/drivers/infiniband/hw/mlx5/main.c index 07789450ec42..8f7e46090f9a 100644 --- a/drivers/infiniband/hw/mlx5/main.c +++ b/drivers/infiniband/hw/mlx5/main.c @@ -2125,7 +2125,7 @@ static int parse_flow_attr(struct mlx5_core_dev *mdev, u32 *match_c, * it won't fall into the multicast flow steering table and this rule * could steal other multicast packets. */ -static bool flow_is_multicast_only(struct ib_flow_attr *ib_attr) +static bool flow_is_multicast_only(const struct ib_flow_attr *ib_attr) { union ib_flow_spec *flow_spec; @@ -2337,10 +2337,31 @@ static struct mlx5_ib_flow_prio *get_flow_table(struct mlx5_ib_dev *dev, return err ? ERR_PTR(err) : prio; } -static struct mlx5_ib_flow_handler *create_flow_rule(struct mlx5_ib_dev *dev, - struct mlx5_ib_flow_prio *ft_prio, - const struct ib_flow_attr *flow_attr, - struct mlx5_flow_destination *dst) +static void set_underlay_qp(struct mlx5_ib_dev *dev, + struct mlx5_flow_spec *spec, + u32 underlay_qpn) +{ + void *misc_params_c = MLX5_ADDR_OF(fte_match_param, + spec->match_criteria, + misc_parameters); + void *misc_params_v = MLX5_ADDR_OF(fte_match_param, spec->match_value, + misc_parameters); + + if (underlay_qpn && + MLX5_CAP_FLOWTABLE_NIC_RX(dev->mdev, + ft_field_support.bth_dst_qp)) { + MLX5_SET(fte_match_set_misc, + misc_params_v, bth_dst_qp, underlay_qpn); + MLX5_SET(fte_match_set_misc, + misc_params_c, bth_dst_qp, 0xffffff); + } +} + +static struct mlx5_ib_flow_handler *_create_flow_rule(struct mlx5_ib_dev *dev, + struct mlx5_ib_flow_prio *ft_prio, + const struct ib_flow_attr *flow_attr, + struct mlx5_flow_destination *dst, + u32 underlay_qpn) { struct mlx5_flow_table *ft = ft_prio->flow_table; struct mlx5_ib_flow_handler *handler; @@ -2376,6 +2397,9 @@ static struct mlx5_ib_flow_handler *create_flow_rule(struct mlx5_ib_dev *dev, ib_flow += ((union ib_flow_spec *)ib_flow)->size; } + if (!flow_is_multicast_only(flow_attr)) + set_underlay_qp(dev, spec, underlay_qpn); + spec->match_criteria_enable = get_match_criteria_enable(spec->match_criteria); if (is_drop) { flow_act.action = MLX5_FLOW_CONTEXT_ACTION_DROP; @@ -2415,6 +2439,14 @@ free: return err ? ERR_PTR(err) : handler; } +static struct mlx5_ib_flow_handler *create_flow_rule(struct mlx5_ib_dev *dev, + struct mlx5_ib_flow_prio *ft_prio, + const struct ib_flow_attr *flow_attr, + struct mlx5_flow_destination *dst) +{ + return _create_flow_rule(dev, ft_prio, flow_attr, dst, 0); +} + static struct mlx5_ib_flow_handler *create_dont_trap_rule(struct mlx5_ib_dev *dev, struct mlx5_ib_flow_prio *ft_prio, struct ib_flow_attr *flow_attr, @@ -2551,6 +2583,7 @@ static struct ib_flow *mlx5_ib_create_flow(struct ib_qp *qp, struct mlx5_ib_flow_prio *ft_prio_tx = NULL; struct mlx5_ib_flow_prio *ft_prio; int err; + int underlay_qpn; if (flow_attr->priority > MLX5_IB_FLOW_LAST_PRIO) return ERR_PTR(-ENOMEM); @@ -2591,8 +2624,10 @@ static struct ib_flow *mlx5_ib_create_flow(struct ib_qp *qp, handler = create_dont_trap_rule(dev, ft_prio, flow_attr, dst); } else { - handler = create_flow_rule(dev, ft_prio, flow_attr, - dst); + underlay_qpn = (mqp->flags & MLX5_IB_QP_UNDERLAY) ? + mqp->underlay_qpn : 0; + handler = _create_flow_rule(dev, ft_prio, flow_attr, + dst, underlay_qpn); } } else if (flow_attr->type == IB_FLOW_ATTR_ALL_DEFAULT || flow_attr->type == IB_FLOW_ATTR_MC_DEFAULT) { -- cgit v1.2.3 From 795b609c8b59f8f20fa9d72bf8b4ae3b8aa5582c Mon Sep 17 00:00:00 2001 From: Bodong Wang Date: Thu, 17 Aug 2017 15:52:34 +0300 Subject: IB/mlx5: Allow posting multi packet send WQEs if hardware supports Set the field to allow posting multi packet send WQEs if hardware supports this feature. This doesn't mean the send WQEs will be for multi packet unless the send WQE was prepared according to multi packet send WQE format. User space shall use flag MLX5_IB_ALLOW_MPW to check if hardware supports MPW and allows MPW in SQ context. Signed-off-by: Bodong Wang Reviewed-by: Daniel Jurgens Signed-off-by: Leon Romanovsky Signed-off-by: Doug Ledford --- drivers/infiniband/hw/mlx5/main.c | 5 +++-- drivers/infiniband/hw/mlx5/qp.c | 2 ++ 2 files changed, 5 insertions(+), 2 deletions(-) (limited to 'drivers/infiniband') diff --git a/drivers/infiniband/hw/mlx5/main.c b/drivers/infiniband/hw/mlx5/main.c index 8f7e46090f9a..ba0a97d6f677 100644 --- a/drivers/infiniband/hw/mlx5/main.c +++ b/drivers/infiniband/hw/mlx5/main.c @@ -802,8 +802,9 @@ static int mlx5_ib_query_device(struct ib_device *ibdev, if (field_avail(typeof(resp), mlx5_ib_support_multi_pkt_send_wqes, uhw->outlen)) { - resp.mlx5_ib_support_multi_pkt_send_wqes = - MLX5_CAP_ETH(mdev, multi_pkt_send_wqe); + if (MLX5_CAP_ETH(mdev, multi_pkt_send_wqe)) + resp.mlx5_ib_support_multi_pkt_send_wqes = + MLX5_IB_ALLOW_MPW; resp.response_length += sizeof(resp.mlx5_ib_support_multi_pkt_send_wqes); } diff --git a/drivers/infiniband/hw/mlx5/qp.c b/drivers/infiniband/hw/mlx5/qp.c index 656773196f27..d6df88a78d5e 100644 --- a/drivers/infiniband/hw/mlx5/qp.c +++ b/drivers/infiniband/hw/mlx5/qp.c @@ -1083,6 +1083,8 @@ static int create_raw_packet_qp_sq(struct mlx5_ib_dev *dev, sqc = MLX5_ADDR_OF(create_sq_in, in, ctx); MLX5_SET(sqc, sqc, flush_in_error_en, 1); + if (MLX5_CAP_ETH(dev->mdev, multi_pkt_send_wqe)) + MLX5_SET(sqc, sqc, allow_multi_pkt_send_wqe, 1); MLX5_SET(sqc, sqc, state, MLX5_SQC_STATE_RST); MLX5_SET(sqc, sqc, user_index, MLX5_GET(qpc, qpc, user_index)); MLX5_SET(sqc, sqc, cqn, MLX5_GET(qpc, qpc, cqn_snd)); -- cgit v1.2.3 From 050da902adde8faf6b1bef15ac4876ae145358f4 Mon Sep 17 00:00:00 2001 From: Bodong Wang Date: Thu, 17 Aug 2017 15:52:35 +0300 Subject: IB/mlx5: Report mlx5 enhanced multi packet WQE capability Expose enhanced multi packet WQE capability to user space through query_device by uhw. Signed-off-by: Bodong Wang Reviewed-by: Daniel Jurgens Signed-off-by: Leon Romanovsky Signed-off-by: Doug Ledford --- drivers/infiniband/hw/mlx5/main.c | 5 +++++ 1 file changed, 5 insertions(+) (limited to 'drivers/infiniband') diff --git a/drivers/infiniband/hw/mlx5/main.c b/drivers/infiniband/hw/mlx5/main.c index ba0a97d6f677..62e6298810e7 100644 --- a/drivers/infiniband/hw/mlx5/main.c +++ b/drivers/infiniband/hw/mlx5/main.c @@ -805,6 +805,11 @@ static int mlx5_ib_query_device(struct ib_device *ibdev, if (MLX5_CAP_ETH(mdev, multi_pkt_send_wqe)) resp.mlx5_ib_support_multi_pkt_send_wqes = MLX5_IB_ALLOW_MPW; + + if (MLX5_CAP_ETH(mdev, enhanced_multi_pkt_send_wqe)) + resp.mlx5_ib_support_multi_pkt_send_wqes |= + MLX5_IB_SUPPORT_EMPW; + resp.response_length += sizeof(resp.mlx5_ib_support_multi_pkt_send_wqes); } -- cgit v1.2.3