diff options
author | Jack Wang <jinpu.wang@ionos.com> | 2021-06-14 11:03:37 +0200 |
---|---|---|
committer | Jason Gunthorpe <jgg@nvidia.com> | 2021-06-18 13:47:13 -0300 |
commit | a95fbe2abafdad800cc9a1ee6a08501c6835c8ba (patch) | |
tree | 6fb05ac9ef19b655bd7d92d40f6f204e77f9b136 | |
parent | 354462eb7f528dadd68e8a0e7e6d69794b801f95 (diff) |
RDMA/rtrs: Check device max_qp_wr limit when create QP
Currently we only check device max_qp_wr limit for IO connection, but not
for service connection. We should check for both.
So save the max_qp_wr device limit in wr_limit, and use it for both IO
connections and service connections.
While at it, also remove an outdated comments.
Link: https://lore.kernel.org/r/20210614090337.29557-6-jinpu.wang@ionos.com
Suggested-by: Leon Romanovsky <leonro@nvidia.com>
Signed-off-by: Jack Wang <jinpu.wang@ionos.com>
Signed-off-by: Gioh Kim <gi-oh.kim@ionos.com>
Reviewed-by: Leon Romanovsky <leonro@nvidia.com>
Signed-off-by: Jason Gunthorpe <jgg@nvidia.com>
-rw-r--r-- | drivers/infiniband/ulp/rtrs/rtrs-clt.c | 29 | ||||
-rw-r--r-- | drivers/infiniband/ulp/rtrs/rtrs-srv.c | 13 |
2 files changed, 19 insertions, 23 deletions
diff --git a/drivers/infiniband/ulp/rtrs/rtrs-clt.c b/drivers/infiniband/ulp/rtrs/rtrs-clt.c index 67ff5bf9bfa8..125e0bead262 100644 --- a/drivers/infiniband/ulp/rtrs/rtrs-clt.c +++ b/drivers/infiniband/ulp/rtrs/rtrs-clt.c @@ -1572,21 +1572,12 @@ static void destroy_con(struct rtrs_clt_con *con) static int create_con_cq_qp(struct rtrs_clt_con *con) { struct rtrs_clt_sess *sess = to_clt_sess(con->c.sess); - u32 max_send_wr, max_recv_wr, cq_num, max_send_sge; + u32 max_send_wr, max_recv_wr, cq_num, max_send_sge, wr_limit; int err, cq_vector; struct rtrs_msg_rkey_rsp *rsp; lockdep_assert_held(&con->con_mutex); if (con->c.cid == 0) { - /* - * Two (request + registration) completion for send - * Two for recv if always_invalidate is set on server - * or one for recv. - * + 2 for drain and heartbeat - * in case qp gets into error state. - */ - max_send_wr = SERVICE_CON_QUEUE_DEPTH * 2 + 2; - max_recv_wr = SERVICE_CON_QUEUE_DEPTH * 2 + 2; max_send_sge = 1; /* We must be the first here */ if (WARN_ON(sess->s.dev)) @@ -1606,6 +1597,17 @@ static int create_con_cq_qp(struct rtrs_clt_con *con) } sess->s.dev_ref = 1; query_fast_reg_mode(sess); + wr_limit = sess->s.dev->ib_dev->attrs.max_qp_wr; + /* + * Two (request + registration) completion for send + * Two for recv if always_invalidate is set on server + * or one for recv. + * + 2 for drain and heartbeat + * in case qp gets into error state. + */ + max_send_wr = + min_t(int, wr_limit, SERVICE_CON_QUEUE_DEPTH * 2 + 2); + max_recv_wr = max_send_wr; } else { /* * Here we assume that session members are correctly set. @@ -1617,14 +1619,13 @@ static int create_con_cq_qp(struct rtrs_clt_con *con) if (WARN_ON(!sess->queue_depth)) return -EINVAL; + wr_limit = sess->s.dev->ib_dev->attrs.max_qp_wr; /* Shared between connections */ sess->s.dev_ref++; - max_send_wr = - min_t(int, sess->s.dev->ib_dev->attrs.max_qp_wr, + max_send_wr = min_t(int, wr_limit, /* QD * (REQ + RSP + FR REGS or INVS) + drain */ sess->queue_depth * 3 + 1); - max_recv_wr = - min_t(int, sess->s.dev->ib_dev->attrs.max_qp_wr, + max_recv_wr = min_t(int, wr_limit, sess->queue_depth * 3 + 1); max_send_sge = sess->clt->max_segments + 1; } diff --git a/drivers/infiniband/ulp/rtrs/rtrs-srv.c b/drivers/infiniband/ulp/rtrs/rtrs-srv.c index c10dfc296259..1a30fd833792 100644 --- a/drivers/infiniband/ulp/rtrs/rtrs-srv.c +++ b/drivers/infiniband/ulp/rtrs/rtrs-srv.c @@ -1649,22 +1649,17 @@ static int create_con(struct rtrs_srv_sess *sess, con->c.sess = &sess->s; con->c.cid = cid; atomic_set(&con->wr_cnt, 1); + wr_limit = sess->s.dev->ib_dev->attrs.max_qp_wr; if (con->c.cid == 0) { /* * All receive and all send (each requiring invalidate) * + 2 for drain and heartbeat */ - max_send_wr = SERVICE_CON_QUEUE_DEPTH * 2 + 2; - max_recv_wr = SERVICE_CON_QUEUE_DEPTH * 2 + 2; + max_send_wr = min_t(int, wr_limit, + SERVICE_CON_QUEUE_DEPTH * 2 + 2); + max_recv_wr = max_send_wr; } else { - /* - * In theory we might have queue_depth * 32 - * outstanding requests if an unsafe global key is used - * and we have queue_depth read requests each consisting - * of 32 different addresses. div 3 for mlx5. - */ - wr_limit = sess->s.dev->ib_dev->attrs.max_qp_wr / 3; /* when always_invlaidate enalbed, we need linv+rinv+mr+imm */ if (always_invalidate) max_send_wr = |