diff options
author | Ming Lei <ming.lei@redhat.com> | 2019-06-18 09:37:45 +0800 |
---|---|---|
committer | Martin K. Petersen <martin.petersen@oracle.com> | 2019-06-20 15:20:43 -0400 |
commit | 3c1a30df6d9c21c3235b6af5f25da2765b19d05b (patch) | |
tree | bab97916f1b56ed9525130fc9d9104ab0c9a7158 /drivers/scsi/mvumi.c | |
parent | 46e8e475a160be5e31e99171b7c0c8a21eb4d6ad (diff) |
scsi: mvumi: use sg helper to iterate over scatterlist
Unlike the legacy I/O path, scsi-mq preallocates a large array to hold
the scatterlist for each request. This static allocation can consume
substantial amounts of memory on modern controllers which support a
large number of concurrently outstanding requests.
To facilitate a switch to a smaller static allocation combined with a
dynamic allocation for requests that need it, we need to make sure all
SCSI drivers handle chained scatterlists correctly.
Convert remaining drivers that directly dereference the scatterlist
array to using the iterator functions.
[mkp: clarified commit message and folded in build fix reported by zeroday]
Reviewed-by: Christoph Hellwig <hch@lst.de>
Reviewed-by: Bart Van Assche <bvanassche@acm.org>
Reviewed-by: Ewan D. Milne <emilne@redhat.com>
Signed-off-by: Ming Lei <ming.lei@redhat.com>
Signed-off-by: Martin K. Petersen <martin.petersen@oracle.com>
Diffstat (limited to 'drivers/scsi/mvumi.c')
-rw-r--r-- | drivers/scsi/mvumi.c | 11 |
1 files changed, 5 insertions, 6 deletions
diff --git a/drivers/scsi/mvumi.c b/drivers/scsi/mvumi.c index a5410615edac..53f3563aca22 100644 --- a/drivers/scsi/mvumi.c +++ b/drivers/scsi/mvumi.c @@ -211,23 +211,22 @@ static int mvumi_make_sgl(struct mvumi_hba *mhba, struct scsi_cmnd *scmd, unsigned int sgnum = scsi_sg_count(scmd); dma_addr_t busaddr; - sg = scsi_sglist(scmd); - *sg_count = dma_map_sg(&mhba->pdev->dev, sg, sgnum, + *sg_count = dma_map_sg(&mhba->pdev->dev, scsi_sglist(scmd), sgnum, scmd->sc_data_direction); if (*sg_count > mhba->max_sge) { dev_err(&mhba->pdev->dev, "sg count[0x%x] is bigger than max sg[0x%x].\n", *sg_count, mhba->max_sge); - dma_unmap_sg(&mhba->pdev->dev, sg, sgnum, + dma_unmap_sg(&mhba->pdev->dev, scsi_sglist(scmd), sgnum, scmd->sc_data_direction); return -1; } - for (i = 0; i < *sg_count; i++) { - busaddr = sg_dma_address(&sg[i]); + scsi_for_each_sg(scmd, sg, *sg_count, i) { + busaddr = sg_dma_address(sg); m_sg->baseaddr_l = cpu_to_le32(lower_32_bits(busaddr)); m_sg->baseaddr_h = cpu_to_le32(upper_32_bits(busaddr)); m_sg->flags = 0; - sgd_setsz(mhba, m_sg, cpu_to_le32(sg_dma_len(&sg[i]))); + sgd_setsz(mhba, m_sg, cpu_to_le32(sg_dma_len(sg))); if ((i + 1) == *sg_count) m_sg->flags |= 1U << mhba->eot_flag; |