diff options
author | Steve Wise <swise@opengridcomputing.com> | 2017-11-09 07:14:43 -0800 |
---|---|---|
committer | Doug Ledford <dledford@redhat.com> | 2017-11-13 16:59:22 -0500 |
commit | cbb40fadd31c6bbc59104e58ac95c6ef492d038b (patch) | |
tree | eb5c56c4496ea3b53f2a306c97412ccf263a00a8 /drivers/infiniband | |
parent | 1c8f1da5d851b92aeb81dbbb9ebd516f6e2588f5 (diff) |
iw_cxgb4: only call the cq comp_handler when the cq is armed
The ULPs completion handler should only be called if the CQ is
armed for notification.
Signed-off-by: Steve Wise <swise@opengridcomputing.com>
Signed-off-by: Doug Ledford <dledford@redhat.com>
Diffstat (limited to 'drivers/infiniband')
-rw-r--r-- | drivers/infiniband/hw/cxgb4/ev.c | 8 | ||||
-rw-r--r-- | drivers/infiniband/hw/cxgb4/qp.c | 20 |
2 files changed, 17 insertions, 11 deletions
diff --git a/drivers/infiniband/hw/cxgb4/ev.c b/drivers/infiniband/hw/cxgb4/ev.c index b8c7cc938bce..a252d5c40ae3 100644 --- a/drivers/infiniband/hw/cxgb4/ev.c +++ b/drivers/infiniband/hw/cxgb4/ev.c @@ -109,9 +109,11 @@ static void post_qp_event(struct c4iw_dev *dev, struct c4iw_cq *chp, if (qhp->ibqp.event_handler) (*qhp->ibqp.event_handler)(&event, qhp->ibqp.qp_context); - spin_lock_irqsave(&chp->comp_handler_lock, flag); - (*chp->ibcq.comp_handler)(&chp->ibcq, chp->ibcq.cq_context); - spin_unlock_irqrestore(&chp->comp_handler_lock, flag); + if (t4_clear_cq_armed(&chp->cq)) { + spin_lock_irqsave(&chp->comp_handler_lock, flag); + (*chp->ibcq.comp_handler)(&chp->ibcq, chp->ibcq.cq_context); + spin_unlock_irqrestore(&chp->comp_handler_lock, flag); + } } void c4iw_ev_dispatch(struct c4iw_dev *dev, struct t4_cqe *err_cqe) diff --git a/drivers/infiniband/hw/cxgb4/qp.c b/drivers/infiniband/hw/cxgb4/qp.c index 1374b41201a9..fefc5fed1778 100644 --- a/drivers/infiniband/hw/cxgb4/qp.c +++ b/drivers/infiniband/hw/cxgb4/qp.c @@ -813,10 +813,12 @@ static void complete_sq_drain_wr(struct c4iw_qp *qhp, struct ib_send_wr *wr) t4_swcq_produce(cq); spin_unlock_irqrestore(&schp->lock, flag); - spin_lock_irqsave(&schp->comp_handler_lock, flag); - (*schp->ibcq.comp_handler)(&schp->ibcq, - schp->ibcq.cq_context); - spin_unlock_irqrestore(&schp->comp_handler_lock, flag); + if (t4_clear_cq_armed(&schp->cq)) { + spin_lock_irqsave(&schp->comp_handler_lock, flag); + (*schp->ibcq.comp_handler)(&schp->ibcq, + schp->ibcq.cq_context); + spin_unlock_irqrestore(&schp->comp_handler_lock, flag); + } } static void complete_rq_drain_wr(struct c4iw_qp *qhp, struct ib_recv_wr *wr) @@ -842,10 +844,12 @@ static void complete_rq_drain_wr(struct c4iw_qp *qhp, struct ib_recv_wr *wr) t4_swcq_produce(cq); spin_unlock_irqrestore(&rchp->lock, flag); - spin_lock_irqsave(&rchp->comp_handler_lock, flag); - (*rchp->ibcq.comp_handler)(&rchp->ibcq, - rchp->ibcq.cq_context); - spin_unlock_irqrestore(&rchp->comp_handler_lock, flag); + if (t4_clear_cq_armed(&rchp->cq)) { + spin_lock_irqsave(&rchp->comp_handler_lock, flag); + (*rchp->ibcq.comp_handler)(&rchp->ibcq, + rchp->ibcq.cq_context); + spin_unlock_irqrestore(&rchp->comp_handler_lock, flag); + } } int c4iw_post_send(struct ib_qp *ibqp, struct ib_send_wr *wr, |