diff options
author | Israel Rukshin <israelr@mellanox.com> | 2019-10-13 19:57:31 +0300 |
---|---|---|
committer | Jens Axboe <axboe@kernel.dk> | 2019-11-04 10:56:40 -0700 |
commit | 58a8df67e057e979e76f8dc881766da3f7137f99 (patch) | |
tree | cd51a6846725db606e2dca98f56cf18fadb3ac5b /drivers/nvme/host | |
parent | bcde5f0fc7d318c98d4234b52bcc1a87fc2162d9 (diff) |
nvme: introduce nvme_is_aen_req function
This function improves code readability and reduces code duplication.
Signed-off-by: Israel Rukshin <israelr@mellanox.com>
Signed-off-by: Max Gurtovoy <maxg@mellanox.com>
Reviewed-by: Christoph Hellwig <hch@lst.de>
Signed-off-by: Keith Busch <kbusch@kernel.org>
Signed-off-by: Jens Axboe <axboe@kernel.dk>
Diffstat (limited to 'drivers/nvme/host')
-rw-r--r-- | drivers/nvme/host/nvme.h | 5 | ||||
-rw-r--r-- | drivers/nvme/host/pci.c | 3 | ||||
-rw-r--r-- | drivers/nvme/host/rdma.c | 4 | ||||
-rw-r--r-- | drivers/nvme/host/tcp.c | 4 |
4 files changed, 10 insertions, 6 deletions
diff --git a/drivers/nvme/host/nvme.h b/drivers/nvme/host/nvme.h index 38a83ef5bcd3..912f9500ed11 100644 --- a/drivers/nvme/host/nvme.h +++ b/drivers/nvme/host/nvme.h @@ -445,6 +445,11 @@ static inline void nvme_put_ctrl(struct nvme_ctrl *ctrl) put_device(ctrl->device); } +static inline bool nvme_is_aen_req(u16 qid, __u16 command_id) +{ + return !qid && command_id >= NVME_AQ_BLK_MQ_DEPTH; +} + void nvme_complete_rq(struct request *req); bool nvme_cancel_request(struct request *req, void *data, bool reserved); bool nvme_change_ctrl_state(struct nvme_ctrl *ctrl, diff --git a/drivers/nvme/host/pci.c b/drivers/nvme/host/pci.c index bb88681f4dc3..7082116e9206 100644 --- a/drivers/nvme/host/pci.c +++ b/drivers/nvme/host/pci.c @@ -967,8 +967,7 @@ static inline void nvme_handle_cqe(struct nvme_queue *nvmeq, u16 idx) * aborts. We don't even bother to allocate a struct request * for them but rather special case them here. */ - if (unlikely(nvmeq->qid == 0 && - cqe->command_id >= NVME_AQ_BLK_MQ_DEPTH)) { + if (unlikely(nvme_is_aen_req(nvmeq->qid, cqe->command_id))) { nvme_complete_async_event(&nvmeq->dev->ctrl, cqe->status, &cqe->result); return; diff --git a/drivers/nvme/host/rdma.c b/drivers/nvme/host/rdma.c index 4d280160dd3f..154fa4e32ad8 100644 --- a/drivers/nvme/host/rdma.c +++ b/drivers/nvme/host/rdma.c @@ -1501,8 +1501,8 @@ static void nvme_rdma_recv_done(struct ib_cq *cq, struct ib_wc *wc) * aborts. We don't even bother to allocate a struct request * for them but rather special case them here. */ - if (unlikely(nvme_rdma_queue_idx(queue) == 0 && - cqe->command_id >= NVME_AQ_BLK_MQ_DEPTH)) + if (unlikely(nvme_is_aen_req(nvme_rdma_queue_idx(queue), + cqe->command_id))) nvme_complete_async_event(&queue->ctrl->ctrl, cqe->status, &cqe->result); else diff --git a/drivers/nvme/host/tcp.c b/drivers/nvme/host/tcp.c index 385a5212c10f..124fda67613a 100644 --- a/drivers/nvme/host/tcp.c +++ b/drivers/nvme/host/tcp.c @@ -491,8 +491,8 @@ static int nvme_tcp_handle_comp(struct nvme_tcp_queue *queue, * aborts. We don't even bother to allocate a struct request * for them but rather special case them here. */ - if (unlikely(nvme_tcp_queue_id(queue) == 0 && - cqe->command_id >= NVME_AQ_BLK_MQ_DEPTH)) + if (unlikely(nvme_is_aen_req(nvme_tcp_queue_id(queue), + cqe->command_id))) nvme_complete_async_event(&queue->ctrl->ctrl, cqe->status, &cqe->result); else |