From 081ea5195a11c9f1eaa8393be603b75982f91b7d Mon Sep 17 00:00:00 2001 From: Parav Pandit Date: Sun, 26 Jan 2020 16:26:47 +0200 Subject: RDMA/cma: Use a helper function to enqueue resolve work items To avoid errors, with attaching ownership of work item and its cm_id refcount which is decremented in work handler, tie them up in single helper function. Also avoid code duplication. Link: https://lore.kernel.org/r/20200126142652.104803-3-leon@kernel.org Signed-off-by: Parav Pandit Signed-off-by: Leon Romanovsky Signed-off-by: Jason Gunthorpe --- drivers/infiniband/core/cma.c | 16 ++++++++-------- 1 file changed, 8 insertions(+), 8 deletions(-) (limited to 'drivers/infiniband/core/cma.c') diff --git a/drivers/infiniband/core/cma.c b/drivers/infiniband/core/cma.c index 72f032160c4b..8f16ebb413c2 100644 --- a/drivers/infiniband/core/cma.c +++ b/drivers/infiniband/core/cma.c @@ -2687,14 +2687,18 @@ static void cma_init_resolve_route_work(struct cma_work *work, work->event.event = RDMA_CM_EVENT_ROUTE_RESOLVED; } -static void cma_init_resolve_addr_work(struct cma_work *work, - struct rdma_id_private *id_priv) +static void enqueue_resolve_addr_work(struct cma_work *work, + struct rdma_id_private *id_priv) { + atomic_inc(&id_priv->refcount); + work->id = id_priv; INIT_WORK(&work->work, cma_work_handler); work->old_state = RDMA_CM_ADDR_QUERY; work->new_state = RDMA_CM_ADDR_RESOLVED; work->event.event = RDMA_CM_EVENT_ADDR_RESOLVED; + + queue_work(cma_wq, &work->work); } static int cma_resolve_ib_route(struct rdma_id_private *id_priv, @@ -3148,9 +3152,7 @@ static int cma_resolve_loopback(struct rdma_id_private *id_priv) rdma_addr_get_sgid(&id_priv->id.route.addr.dev_addr, &gid); rdma_addr_set_dgid(&id_priv->id.route.addr.dev_addr, &gid); - atomic_inc(&id_priv->refcount); - cma_init_resolve_addr_work(work, id_priv); - queue_work(cma_wq, &work->work); + enqueue_resolve_addr_work(work, id_priv); return 0; err: kfree(work); @@ -3175,9 +3177,7 @@ static int cma_resolve_ib_addr(struct rdma_id_private *id_priv) rdma_addr_set_dgid(&id_priv->id.route.addr.dev_addr, (union ib_gid *) &(((struct sockaddr_ib *) &id_priv->id.route.addr.dst_addr)->sib_addr)); - atomic_inc(&id_priv->refcount); - cma_init_resolve_addr_work(work, id_priv); - queue_work(cma_wq, &work->work); + enqueue_resolve_addr_work(work, id_priv); return 0; err: kfree(work); -- cgit v1.2.3 From cc055dd3a71352759a6c7ecaee612eeaef93ef22 Mon Sep 17 00:00:00 2001 From: Parav Pandit Date: Sun, 26 Jan 2020 16:26:48 +0200 Subject: RDMA/cma: Use RDMA device port iterator Use RDMA device port iterator to avoid open coding. Link: https://lore.kernel.org/r/20200126142652.104803-4-leon@kernel.org Signed-off-by: Parav Pandit Signed-off-by: Leon Romanovsky Signed-off-by: Jason Gunthorpe --- drivers/infiniband/core/cma.c | 13 +++++++------ 1 file changed, 7 insertions(+), 6 deletions(-) (limited to 'drivers/infiniband/core/cma.c') diff --git a/drivers/infiniband/core/cma.c b/drivers/infiniband/core/cma.c index 8f16ebb413c2..34c62eae08d8 100644 --- a/drivers/infiniband/core/cma.c +++ b/drivers/infiniband/core/cma.c @@ -728,8 +728,8 @@ static int cma_iw_acquire_dev(struct rdma_id_private *id_priv, struct cma_device *cma_dev; enum ib_gid_type gid_type; int ret = -ENODEV; + unsigned int port; union ib_gid gid; - u8 port; if (dev_addr->dev_type != ARPHRD_INFINIBAND && id_priv->id.ps == RDMA_PS_IPOIB) @@ -753,7 +753,8 @@ static int cma_iw_acquire_dev(struct rdma_id_private *id_priv, } list_for_each_entry(cma_dev, &dev_list, list) { - for (port = 1; port <= cma_dev->device->phys_port_cnt; ++port) { + rdma_for_each_port (cma_dev->device, port) { + if (listen_id_priv->cma_dev == cma_dev && listen_id_priv->id.port_num == port) continue; @@ -786,8 +787,8 @@ static int cma_resolve_ib_dev(struct rdma_id_private *id_priv) struct cma_device *cma_dev, *cur_dev; struct sockaddr_ib *addr; union ib_gid gid, sgid, *dgid; + unsigned int p; u16 pkey, index; - u8 p; enum ib_port_state port_state; int i; @@ -798,7 +799,7 @@ static int cma_resolve_ib_dev(struct rdma_id_private *id_priv) mutex_lock(&lock); list_for_each_entry(cur_dev, &dev_list, list) { - for (p = 1; p <= cur_dev->device->phys_port_cnt; ++p) { + rdma_for_each_port (cur_dev->device, p) { if (!rdma_cap_af_ib(cur_dev->device, p)) continue; @@ -3029,9 +3030,9 @@ static int cma_bind_loopback(struct rdma_id_private *id_priv) struct cma_device *cma_dev, *cur_dev; union ib_gid gid; enum ib_port_state port_state; + unsigned int p; u16 pkey; int ret; - u8 p; cma_dev = NULL; mutex_lock(&lock); @@ -3043,7 +3044,7 @@ static int cma_bind_loopback(struct rdma_id_private *id_priv) if (!cma_dev) cma_dev = cur_dev; - for (p = 1; p <= cur_dev->device->phys_port_cnt; ++p) { + rdma_for_each_port (cur_dev->device, p) { if (!ib_get_cached_port_state(cur_dev->device, p, &port_state) && port_state == IB_PORT_ACTIVE) { cma_dev = cur_dev; -- cgit v1.2.3 From 5ff8c8fa44c2cb74f3066ec4a531265db69b86c5 Mon Sep 17 00:00:00 2001 From: Parav Pandit Date: Sun, 26 Jan 2020 16:26:49 +0200 Subject: RDMA/cma: Rename cma_device ref/deref helpers to to get/put Helper functions which increment/decrement reference count of the structure read better when they are named with the get/put suffix. Hence, rename cma_ref/deref_dev() to cma_dev_get/put(). Link: https://lore.kernel.org/r/20200126142652.104803-5-leon@kernel.org Signed-off-by: Parav Pandit Signed-off-by: Leon Romanovsky Signed-off-by: Jason Gunthorpe --- drivers/infiniband/core/cma.c | 22 +++++++++++----------- 1 file changed, 11 insertions(+), 11 deletions(-) (limited to 'drivers/infiniband/core/cma.c') diff --git a/drivers/infiniband/core/cma.c b/drivers/infiniband/core/cma.c index 34c62eae08d8..7e16d1b001ff 100644 --- a/drivers/infiniband/core/cma.c +++ b/drivers/infiniband/core/cma.c @@ -247,11 +247,17 @@ enum { CMA_OPTION_AFONLY, }; -void cma_ref_dev(struct cma_device *cma_dev) +void cma_dev_get(struct cma_device *cma_dev) { atomic_inc(&cma_dev->refcount); } +void cma_dev_put(struct cma_device *cma_dev) +{ + if (atomic_dec_and_test(&cma_dev->refcount)) + complete(&cma_dev->comp); +} + struct cma_device *cma_enum_devices_by_ibdev(cma_device_filter filter, void *cookie) { @@ -267,7 +273,7 @@ struct cma_device *cma_enum_devices_by_ibdev(cma_device_filter filter, } if (found_cma_dev) - cma_ref_dev(found_cma_dev); + cma_dev_get(found_cma_dev); mutex_unlock(&lock); return found_cma_dev; } @@ -463,7 +469,7 @@ static int cma_igmp_send(struct net_device *ndev, union ib_gid *mgid, bool join) static void _cma_attach_to_dev(struct rdma_id_private *id_priv, struct cma_device *cma_dev) { - cma_ref_dev(cma_dev); + cma_dev_get(cma_dev); id_priv->cma_dev = cma_dev; id_priv->id.device = cma_dev->device; id_priv->id.route.addr.dev_addr.transport = @@ -484,12 +490,6 @@ static void cma_attach_to_dev(struct rdma_id_private *id_priv, rdma_start_port(cma_dev->device)]; } -void cma_deref_dev(struct cma_device *cma_dev) -{ - if (atomic_dec_and_test(&cma_dev->refcount)) - complete(&cma_dev->comp); -} - static inline void release_mc(struct kref *kref) { struct cma_multicast *mc = container_of(kref, struct cma_multicast, mcref); @@ -502,7 +502,7 @@ static void cma_release_dev(struct rdma_id_private *id_priv) { mutex_lock(&lock); list_del(&id_priv->list); - cma_deref_dev(id_priv->cma_dev); + cma_dev_put(id_priv->cma_dev); id_priv->cma_dev = NULL; mutex_unlock(&lock); } @@ -4728,7 +4728,7 @@ static void cma_process_remove(struct cma_device *cma_dev) } mutex_unlock(&lock); - cma_deref_dev(cma_dev); + cma_dev_put(cma_dev); wait_for_completion(&cma_dev->comp); } -- cgit v1.2.3 From be439912e7c2e3e78ebd087932c165a83bdca6b5 Mon Sep 17 00:00:00 2001 From: Parav Pandit Date: Sun, 26 Jan 2020 16:26:50 +0200 Subject: RDMA/cma: Use refcount API to reflect refcount Use the refcount variant to capture the reference counting of the cma device structure. Link: https://lore.kernel.org/r/20200126142652.104803-6-leon@kernel.org Signed-off-by: Parav Pandit Signed-off-by: Leon Romanovsky Signed-off-by: Jason Gunthorpe --- drivers/infiniband/core/cma.c | 9 ++++----- 1 file changed, 4 insertions(+), 5 deletions(-) (limited to 'drivers/infiniband/core/cma.c') diff --git a/drivers/infiniband/core/cma.c b/drivers/infiniband/core/cma.c index 7e16d1b001ff..d43f7ce759f2 100644 --- a/drivers/infiniband/core/cma.c +++ b/drivers/infiniband/core/cma.c @@ -199,7 +199,7 @@ struct cma_device { struct list_head list; struct ib_device *device; struct completion comp; - atomic_t refcount; + refcount_t refcount; struct list_head id_list; enum ib_gid_type *default_gid_type; u8 *default_roce_tos; @@ -249,12 +249,12 @@ enum { void cma_dev_get(struct cma_device *cma_dev) { - atomic_inc(&cma_dev->refcount); + refcount_inc(&cma_dev->refcount); } void cma_dev_put(struct cma_device *cma_dev) { - if (atomic_dec_and_test(&cma_dev->refcount)) + if (refcount_dec_and_test(&cma_dev->refcount)) complete(&cma_dev->comp); } @@ -754,7 +754,6 @@ static int cma_iw_acquire_dev(struct rdma_id_private *id_priv, list_for_each_entry(cma_dev, &dev_list, list) { rdma_for_each_port (cma_dev->device, port) { - if (listen_id_priv->cma_dev == cma_dev && listen_id_priv->id.port_num == port) continue; @@ -4657,7 +4656,7 @@ static void cma_add_one(struct ib_device *device) } init_completion(&cma_dev->comp); - atomic_set(&cma_dev->refcount, 1); + refcount_set(&cma_dev->refcount, 1); INIT_LIST_HEAD(&cma_dev->id_list); ib_set_client_data(device, &cma_client, cma_dev); -- cgit v1.2.3 From e368d23f57f6a08341d35c44255f2d8e7695152b Mon Sep 17 00:00:00 2001 From: Parav Pandit Date: Sun, 26 Jan 2020 16:26:51 +0200 Subject: RDMA/cma: Rename cma_device ref/deref helpers to to get/put Helper functions which increment/decrement reference count of a structure read better when they are named with the get/put suffix. Hence, rename cma_ref/deref_id() to cma_id_get/put(). Also use cma_get_id() wrapper to find the balancing put() calls. Link: https://lore.kernel.org/r/20200126142652.104803-7-leon@kernel.org Signed-off-by: Parav Pandit Signed-off-by: Leon Romanovsky Signed-off-by: Jason Gunthorpe --- drivers/infiniband/core/cma.c | 42 ++++++++++++++++++++++++------------------ 1 file changed, 24 insertions(+), 18 deletions(-) (limited to 'drivers/infiniband/core/cma.c') diff --git a/drivers/infiniband/core/cma.c b/drivers/infiniband/core/cma.c index d43f7ce759f2..605afeed122f 100644 --- a/drivers/infiniband/core/cma.c +++ b/drivers/infiniband/core/cma.c @@ -840,7 +840,12 @@ found: return 0; } -static void cma_deref_id(struct rdma_id_private *id_priv) +static void cma_id_get(struct rdma_id_private *id_priv) +{ + atomic_inc(&id_priv->refcount); +} + +static void cma_id_put(struct rdma_id_private *id_priv) { if (atomic_dec_and_test(&id_priv->refcount)) complete(&id_priv->comp); @@ -1846,11 +1851,11 @@ void rdma_destroy_id(struct rdma_cm_id *id) } cma_release_port(id_priv); - cma_deref_id(id_priv); + cma_id_put(id_priv); wait_for_completion(&id_priv->comp); if (id_priv->internal_id) - cma_deref_id(id_priv->id.context); + cma_id_put(id_priv->id.context); kfree(id_priv->id.route.path_rec); @@ -2187,7 +2192,7 @@ static int cma_ib_req_handler(struct ib_cm_id *cm_id, * Protect against the user destroying conn_id from another thread * until we're done accessing it. */ - atomic_inc(&conn_id->refcount); + cma_id_get(conn_id); ret = cma_cm_event_handler(conn_id, &event); if (ret) goto err3; @@ -2204,13 +2209,13 @@ static int cma_ib_req_handler(struct ib_cm_id *cm_id, mutex_unlock(&lock); mutex_unlock(&conn_id->handler_mutex); mutex_unlock(&listen_id->handler_mutex); - cma_deref_id(conn_id); + cma_id_put(conn_id); if (net_dev) dev_put(net_dev); return 0; err3: - cma_deref_id(conn_id); + cma_id_put(conn_id); /* Destroy the CM ID by returning a non-zero value. */ conn_id->cm_id.ib = NULL; err2: @@ -2391,7 +2396,7 @@ static int iw_conn_req_handler(struct iw_cm_id *cm_id, * Protect against the user destroying conn_id from another thread * until we're done accessing it. */ - atomic_inc(&conn_id->refcount); + cma_id_get(conn_id); ret = cma_cm_event_handler(conn_id, &event); if (ret) { /* User wants to destroy the CM ID */ @@ -2399,13 +2404,13 @@ static int iw_conn_req_handler(struct iw_cm_id *cm_id, cma_exch(conn_id, RDMA_CM_DESTROYING); mutex_unlock(&conn_id->handler_mutex); mutex_unlock(&listen_id->handler_mutex); - cma_deref_id(conn_id); + cma_id_put(conn_id); rdma_destroy_id(&conn_id->id); return ret; } mutex_unlock(&conn_id->handler_mutex); - cma_deref_id(conn_id); + cma_id_put(conn_id); out: mutex_unlock(&listen_id->handler_mutex); @@ -2492,7 +2497,7 @@ static void cma_listen_on_dev(struct rdma_id_private *id_priv, _cma_attach_to_dev(dev_id_priv, cma_dev); list_add_tail(&dev_id_priv->listen_list, &id_priv->listen_list); - atomic_inc(&id_priv->refcount); + cma_id_get(id_priv); dev_id_priv->internal_id = 1; dev_id_priv->afonly = id_priv->afonly; dev_id_priv->tos_set = id_priv->tos_set; @@ -2647,7 +2652,7 @@ static void cma_work_handler(struct work_struct *_work) } out: mutex_unlock(&id_priv->handler_mutex); - cma_deref_id(id_priv); + cma_id_put(id_priv); if (destroy) rdma_destroy_id(&id_priv->id); kfree(work); @@ -2671,7 +2676,7 @@ static void cma_ndev_work_handler(struct work_struct *_work) out: mutex_unlock(&id_priv->handler_mutex); - cma_deref_id(id_priv); + cma_id_put(id_priv); if (destroy) rdma_destroy_id(&id_priv->id); kfree(work); @@ -2690,7 +2695,8 @@ static void cma_init_resolve_route_work(struct cma_work *work, static void enqueue_resolve_addr_work(struct cma_work *work, struct rdma_id_private *id_priv) { - atomic_inc(&id_priv->refcount); + /* Balances with cma_id_put() in cma_work_handler */ + cma_id_get(id_priv); work->id = id_priv; INIT_WORK(&work->work, cma_work_handler); @@ -2986,7 +2992,7 @@ int rdma_resolve_route(struct rdma_cm_id *id, unsigned long timeout_ms) if (!cma_comp_exch(id_priv, RDMA_CM_ADDR_RESOLVED, RDMA_CM_ROUTE_QUERY)) return -EINVAL; - atomic_inc(&id_priv->refcount); + cma_id_get(id_priv); if (rdma_cap_ib_sa(id->device, id->port_num)) ret = cma_resolve_ib_route(id_priv, timeout_ms); else if (rdma_protocol_roce(id->device, id->port_num)) @@ -3002,7 +3008,7 @@ int rdma_resolve_route(struct rdma_cm_id *id, unsigned long timeout_ms) return 0; err: cma_comp_exch(id_priv, RDMA_CM_ROUTE_QUERY, RDMA_CM_ADDR_RESOLVED); - cma_deref_id(id_priv); + cma_id_put(id_priv); return ret; } EXPORT_SYMBOL(rdma_resolve_route); @@ -4581,7 +4587,7 @@ static int cma_netdev_change(struct net_device *ndev, struct rdma_id_private *id INIT_WORK(&work->work, cma_ndev_work_handler); work->id = id_priv; work->event.event = RDMA_CM_EVENT_ADDR_CHANGE; - atomic_inc(&id_priv->refcount); + cma_id_get(id_priv); queue_work(cma_wq, &work->work); } @@ -4715,11 +4721,11 @@ static void cma_process_remove(struct cma_device *cma_dev) list_del(&id_priv->listen_list); list_del_init(&id_priv->list); - atomic_inc(&id_priv->refcount); + cma_id_get(id_priv); mutex_unlock(&lock); ret = id_priv->internal_id ? 1 : cma_remove_id_dev(id_priv); - cma_deref_id(id_priv); + cma_id_put(id_priv); if (ret) rdma_destroy_id(&id_priv->id); -- cgit v1.2.3 From 43fb5892cdfaa3bbe170aade07d4a38086636cca Mon Sep 17 00:00:00 2001 From: Parav Pandit Date: Sun, 26 Jan 2020 16:26:52 +0200 Subject: RDMA/cma: Use refcount API to reflect refcount Use a refcount_t for atomics being used as a refcount. Link: https://lore.kernel.org/r/20200126142652.104803-8-leon@kernel.org Signed-off-by: Parav Pandit Signed-off-by: Leon Romanovsky Signed-off-by: Jason Gunthorpe --- drivers/infiniband/core/cma.c | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) (limited to 'drivers/infiniband/core/cma.c') diff --git a/drivers/infiniband/core/cma.c b/drivers/infiniband/core/cma.c index 605afeed122f..5165158a7aaa 100644 --- a/drivers/infiniband/core/cma.c +++ b/drivers/infiniband/core/cma.c @@ -842,12 +842,12 @@ found: static void cma_id_get(struct rdma_id_private *id_priv) { - atomic_inc(&id_priv->refcount); + refcount_inc(&id_priv->refcount); } static void cma_id_put(struct rdma_id_private *id_priv) { - if (atomic_dec_and_test(&id_priv->refcount)) + if (refcount_dec_and_test(&id_priv->refcount)) complete(&id_priv->comp); } @@ -875,7 +875,7 @@ struct rdma_cm_id *__rdma_create_id(struct net *net, spin_lock_init(&id_priv->lock); mutex_init(&id_priv->qp_mutex); init_completion(&id_priv->comp); - atomic_set(&id_priv->refcount, 1); + refcount_set(&id_priv->refcount, 1); mutex_init(&id_priv->handler_mutex); INIT_LIST_HEAD(&id_priv->listen_list); INIT_LIST_HEAD(&id_priv->mc_list); -- cgit v1.2.3 From 32ac9e4399b12d3e54d312a0e0e30ed5cd19bd4e Mon Sep 17 00:00:00 2001 From: Jason Gunthorpe Date: Thu, 27 Feb 2020 16:36:51 -0400 Subject: RDMA/cma: Teach lockdep about the order of rtnl and lock This lock ordering only happens when bonding is enabled and a certain bonding related event fires. However, since it can happen this is a global restriction on lock ordering. Teach lockdep about the order directly and unconditionally so bugs here are found quickly. See https://syzkaller.appspot.com/bug?extid=55de90ab5f44172b0c90 Link: https://lore.kernel.org/r/20200227203651.GA27185@ziepe.ca Signed-off-by: Jason Gunthorpe --- drivers/infiniband/core/cma.c | 13 +++++++++++++ 1 file changed, 13 insertions(+) (limited to 'drivers/infiniband/core/cma.c') diff --git a/drivers/infiniband/core/cma.c b/drivers/infiniband/core/cma.c index 468814e8c504..4df75ab4ee9d 100644 --- a/drivers/infiniband/core/cma.c +++ b/drivers/infiniband/core/cma.c @@ -4796,6 +4796,19 @@ static int __init cma_init(void) { int ret; + /* + * There is a rare lock ordering dependency in cma_netdev_callback() + * that only happens when bonding is enabled. Teach lockdep that rtnl + * must never be nested under lock so it can find these without having + * to test with bonding. + */ + if (IS_ENABLED(CONFIG_LOCKDEP)) { + rtnl_lock(); + mutex_lock(&lock); + mutex_unlock(&lock); + rtnl_unlock(); + } + cma_wq = alloc_ordered_workqueue("rdma_cm", WQ_MEM_RECLAIM); if (!cma_wq) return -ENOMEM; -- cgit v1.2.3 From 987914ab841e2ec281a35b54348ab109b4c0bb4e Mon Sep 17 00:00:00 2001 From: Avihai Horon Date: Wed, 18 Mar 2020 12:17:41 +0200 Subject: RDMA/cm: Update num_paths in cma_resolve_iboe_route error flow After a successful allocation of path_rec, num_paths is set to 1, but any error after such allocation will leave num_paths uncleared. This causes to de-referencing a NULL pointer later on. Hence, num_paths needs to be set back to 0 if such an error occurs. The following crash from syzkaller revealed it. kasan: CONFIG_KASAN_INLINE enabled kasan: GPF could be caused by NULL-ptr deref or user memory access general protection fault: 0000 [#1] SMP DEBUG_PAGEALLOC KASAN PTI CPU: 0 PID: 357 Comm: syz-executor060 Not tainted 4.18.0+ #311 Hardware name: QEMU Standard PC (i440FX + PIIX, 1996), BIOS rel-1.11.0-0-g63451fca13-prebuilt.qemu-project.org 04/01/2014 RIP: 0010:ib_copy_path_rec_to_user+0x94/0x3e0 Code: f1 f1 f1 f1 c7 40 0c 00 00 f4 f4 65 48 8b 04 25 28 00 00 00 48 89 45 c8 31 c0 e8 d7 60 24 ff 48 8d 7b 4c 48 89 f8 48 c1 e8 03 <42> 0f b6 14 30 48 89 f8 83 e0 07 83 c0 03 38 d0 7c 08 84 d2 0f 85 RSP: 0018:ffff88006586f980 EFLAGS: 00010207 RAX: 0000000000000009 RBX: 0000000000000000 RCX: 1ffff1000d5fe475 RDX: ffff8800621e17c0 RSI: ffffffff820d45f9 RDI: 000000000000004c RBP: ffff88006586fa50 R08: ffffed000cb0df73 R09: ffffed000cb0df72 R10: ffff88006586fa70 R11: ffffed000cb0df73 R12: 1ffff1000cb0df30 R13: ffff88006586fae8 R14: dffffc0000000000 R15: ffff88006aff2200 FS: 00000000016fc880(0000) GS:ffff88006d000000(0000) knlGS:0000000000000000 CS: 0010 DS: 0000 ES: 0000 CR0: 0000000080050033 CR2: 0000000020000040 CR3: 0000000063fec000 CR4: 00000000000006b0 DR0: 0000000000000000 DR1: 0000000000000000 DR2: 0000000000000000 DR3: 0000000000000000 DR6: 00000000fffe0ff0 DR7: 0000000000000400 Call Trace: ? ib_copy_path_rec_from_user+0xcc0/0xcc0 ? __mutex_unlock_slowpath+0xfc/0x670 ? wait_for_completion+0x3b0/0x3b0 ? ucma_query_route+0x818/0xc60 ucma_query_route+0x818/0xc60 ? ucma_listen+0x1b0/0x1b0 ? sched_clock_cpu+0x18/0x1d0 ? sched_clock_cpu+0x18/0x1d0 ? ucma_listen+0x1b0/0x1b0 ? ucma_write+0x292/0x460 ucma_write+0x292/0x460 ? ucma_close_id+0x60/0x60 ? sched_clock_cpu+0x18/0x1d0 ? sched_clock_cpu+0x18/0x1d0 __vfs_write+0xf7/0x620 ? ucma_close_id+0x60/0x60 ? kernel_read+0x110/0x110 ? time_hardirqs_on+0x19/0x580 ? lock_acquire+0x18b/0x3a0 ? finish_task_switch+0xf3/0x5d0 ? _raw_spin_unlock_irq+0x29/0x40 ? _raw_spin_unlock_irq+0x29/0x40 ? finish_task_switch+0x1be/0x5d0 ? __switch_to_asm+0x34/0x70 ? __switch_to_asm+0x40/0x70 ? security_file_permission+0x172/0x1e0 vfs_write+0x192/0x460 ksys_write+0xc6/0x1a0 ? __ia32_sys_read+0xb0/0xb0 ? entry_SYSCALL_64_after_hwframe+0x3e/0xbe ? do_syscall_64+0x1d/0x470 do_syscall_64+0x9e/0x470 entry_SYSCALL_64_after_hwframe+0x49/0xbe Fixes: 3c86aa70bf67 ("RDMA/cm: Add RDMA CM support for IBoE devices") Link: https://lore.kernel.org/r/20200318101741.47211-1-leon@kernel.org Signed-off-by: Avihai Horon Reviewed-by: Maor Gottlieb Signed-off-by: Leon Romanovsky Signed-off-by: Jason Gunthorpe --- drivers/infiniband/core/cma.c | 1 + 1 file changed, 1 insertion(+) (limited to 'drivers/infiniband/core/cma.c') diff --git a/drivers/infiniband/core/cma.c b/drivers/infiniband/core/cma.c index 4df75ab4ee9d..26e6f7df247b 100644 --- a/drivers/infiniband/core/cma.c +++ b/drivers/infiniband/core/cma.c @@ -2978,6 +2978,7 @@ static int cma_resolve_iboe_route(struct rdma_id_private *id_priv) err2: kfree(route->path_rec); route->path_rec = NULL; + route->num_paths = 0; err1: kfree(work); return ret; -- cgit v1.2.3