summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorJason Gunthorpe <jgg@nvidia.com>2021-06-02 13:27:04 +0300
committerJason Gunthorpe <jgg@nvidia.com>2021-06-02 15:41:57 -0300
commitefafae671707524608889d904f59b0f7bad87a0e (patch)
tree1da7a6543fcb39c1d640a8abb2ff93983961cd6c
parentc1cf6d9f743aad09b231752c12845ba7083b28f7 (diff)
IB/cm: Tidy remaining cm_msg free paths
Now that all the free paths are explicit cm_free_msg() will only be called for msgs's allocated with cm_alloc_msg(), so we can assume the context is set. Place it after the allocation function it is paired with for clarity. Also remove a bogus NULL assignment in one place after a cancel. This does nothing other than disable completions to become events, but changing the state already did that. Link: https://lore.kernel.org/r/082fd3552be0d1a2c19b1c4cefb5f3f0e3e68e82.1622629024.git.leonro@nvidia.com Signed-off-by: Leon Romanovsky <leonro@nvidia.com> Signed-off-by: Jason Gunthorpe <jgg@nvidia.com>
-rw-r--r--drivers/infiniband/core/cm.c20
1 files changed, 10 insertions, 10 deletions
diff --git a/drivers/infiniband/core/cm.c b/drivers/infiniband/core/cm.c
index 8dbc39ea4612..1f0bc31ca0e2 100644
--- a/drivers/infiniband/core/cm.c
+++ b/drivers/infiniband/core/cm.c
@@ -367,6 +367,16 @@ out:
return ERR_PTR(ret);
}
+static void cm_free_msg(struct ib_mad_send_buf *msg)
+{
+ struct cm_id_private *cm_id_priv = msg->context[0];
+
+ if (msg->ah)
+ rdma_destroy_ah(msg->ah, 0);
+ cm_deref_id(cm_id_priv);
+ ib_free_send_mad(msg);
+}
+
static struct ib_mad_send_buf *
cm_alloc_priv_msg(struct cm_id_private *cm_id_priv)
{
@@ -420,15 +430,6 @@ static int cm_create_response_msg_ah(struct cm_port *port,
return 0;
}
-static void cm_free_msg(struct ib_mad_send_buf *msg)
-{
- if (msg->ah)
- rdma_destroy_ah(msg->ah, 0);
- if (msg->context[0])
- cm_deref_id(msg->context[0]);
- ib_free_send_mad(msg);
-}
-
static int cm_alloc_response_msg(struct cm_port *port,
struct ib_mad_recv_wc *mad_recv_wc,
struct ib_mad_send_buf **msg)
@@ -3455,7 +3456,6 @@ static int cm_apr_handler(struct cm_work *work)
}
cm_id_priv->id.lap_state = IB_CM_LAP_IDLE;
ib_cancel_mad(cm_id_priv->av.port->mad_agent, cm_id_priv->msg);
- cm_id_priv->msg = NULL;
cm_queue_work_unlock(cm_id_priv, work);
return 0;
out: