diff options
author | Christoph Hellwig <hch@lst.de> | 2019-03-05 05:46:58 -0700 |
---|---|---|
committer | Christoph Hellwig <hch@lst.de> | 2019-04-05 08:07:58 +0200 |
commit | d43f1ccfad053dbefba1d15443cdc36ca60958f0 (patch) | |
tree | 2fe4bce6cf433961841611c0d882d32733e55ada /drivers/nvme/host | |
parent | 4aedb705437f6f98b45f45c394e6803ca67abd33 (diff) |
nvme-pci: remove the inline scatterlist optimization
We'll have a better way to optimize for small I/O that doesn't
require it soon, so remove the existing inline_sg case to make that
optimization easier to implement.
Signed-off-by: Christoph Hellwig <hch@lst.de>
Reviewed-by: Keith Busch <keith.busch@intel.com>
Reviewed-by: Sagi Grimberg <sagi@grimberg.me>
Reviewed-by: Chaitanya Kulkarni <chaitanya.kulkarni@wdc.com>
Diffstat (limited to 'drivers/nvme/host')
-rw-r--r-- | drivers/nvme/host/pci.c | 38 |
1 files changed, 6 insertions, 32 deletions
diff --git a/drivers/nvme/host/pci.c b/drivers/nvme/host/pci.c index 10e6b5d055e9..bd7e4209ab36 100644 --- a/drivers/nvme/host/pci.c +++ b/drivers/nvme/host/pci.c @@ -223,7 +223,6 @@ struct nvme_iod { dma_addr_t first_dma; dma_addr_t meta_dma; struct scatterlist *sg; - struct scatterlist inline_sg[0]; }; /* @@ -371,12 +370,6 @@ static bool nvme_dbbuf_update_and_check_event(u16 value, u32 *dbbuf_db, } /* - * Max size of iod being embedded in the request payload - */ -#define NVME_INT_PAGES 2 -#define NVME_INT_BYTES(dev) (NVME_INT_PAGES * (dev)->ctrl.page_size) - -/* * Will slightly overestimate the number of pages needed. This is OK * as it only leads to a small amount of wasted memory for the lifetime of * the I/O. @@ -410,15 +403,6 @@ static unsigned int nvme_pci_iod_alloc_size(struct nvme_dev *dev, return alloc_size + sizeof(struct scatterlist) * nseg; } -static unsigned int nvme_pci_cmd_size(struct nvme_dev *dev, bool use_sgl) -{ - unsigned int alloc_size = nvme_pci_iod_alloc_size(dev, - NVME_INT_BYTES(dev), NVME_INT_PAGES, - use_sgl); - - return sizeof(struct nvme_iod) + alloc_size; -} - static int nvme_admin_init_hctx(struct blk_mq_hw_ctx *hctx, void *data, unsigned int hctx_idx) { @@ -621,8 +605,7 @@ static void nvme_unmap_data(struct nvme_dev *dev, struct request *req) dma_addr = next_dma_addr; } - if (iod->sg != iod->inline_sg) - mempool_free(iod->sg, dev->iod_mempool); + mempool_free(iod->sg, dev->iod_mempool); } static void nvme_print_sgl(struct scatterlist *sgl, int nents) @@ -822,14 +805,9 @@ static blk_status_t nvme_map_data(struct nvme_dev *dev, struct request *req, blk_status_t ret = BLK_STS_IOERR; int nr_mapped; - if (blk_rq_payload_bytes(req) > NVME_INT_BYTES(dev) || - blk_rq_nr_phys_segments(req) > NVME_INT_PAGES) { - iod->sg = mempool_alloc(dev->iod_mempool, GFP_ATOMIC); - if (!iod->sg) - return BLK_STS_RESOURCE; - } else { - iod->sg = iod->inline_sg; - } + iod->sg = mempool_alloc(dev->iod_mempool, GFP_ATOMIC); + if (!iod->sg) + return BLK_STS_RESOURCE; iod->use_sgl = nvme_pci_use_sgls(dev, req); @@ -1612,7 +1590,7 @@ static int nvme_alloc_admin_tags(struct nvme_dev *dev) dev->admin_tagset.queue_depth = NVME_AQ_MQ_TAG_DEPTH; dev->admin_tagset.timeout = ADMIN_TIMEOUT; dev->admin_tagset.numa_node = dev_to_node(dev->dev); - dev->admin_tagset.cmd_size = nvme_pci_cmd_size(dev, false); + dev->admin_tagset.cmd_size = sizeof(struct nvme_iod); dev->admin_tagset.flags = BLK_MQ_F_NO_SCHED; dev->admin_tagset.driver_data = dev; @@ -2257,11 +2235,7 @@ static int nvme_dev_add(struct nvme_dev *dev) dev->tagset.numa_node = dev_to_node(dev->dev); dev->tagset.queue_depth = min_t(int, dev->q_depth, BLK_MQ_MAX_DEPTH) - 1; - dev->tagset.cmd_size = nvme_pci_cmd_size(dev, false); - if ((dev->ctrl.sgls & ((1 << 0) | (1 << 1))) && sgl_threshold) { - dev->tagset.cmd_size = max(dev->tagset.cmd_size, - nvme_pci_cmd_size(dev, true)); - } + dev->tagset.cmd_size = sizeof(struct nvme_iod); dev->tagset.flags = BLK_MQ_F_SHOULD_MERGE; dev->tagset.driver_data = dev; |