diff options
author | Adrian Hunter <adrian.hunter@intel.com> | 2016-11-29 12:09:12 +0200 |
---|---|---|
committer | Ulf Hansson <ulf.hansson@linaro.org> | 2016-12-05 10:31:05 +0100 |
commit | f2b8b522cf643baa367b6834a49ff3e12cfa9136 (patch) | |
tree | b58c75bd94ccd37b9c7ce9f45602d901812c106f /drivers/mmc | |
parent | c853982ece93da10e508a5dab621478623deb324 (diff) |
mmc: queue: Factor out mmc_queue_alloc_bounce_sgs()
In preparation for supporting a queue of requests, factor out
mmc_queue_alloc_bounce_sgs().
Signed-off-by: Adrian Hunter <adrian.hunter@intel.com>
Reviewed-by: Linus Walleij <linus.walleij@linaro.org>
Reviewed-by: Harjani Ritesh <riteshh@codeaurora.org>
[Ulf: Fixed compiler warning]
Signed-off-by: Ulf Hansson <ulf.hansson@linaro.org>
Diffstat (limited to 'drivers/mmc')
-rw-r--r-- | drivers/mmc/card/queue.c | 44 |
1 files changed, 28 insertions, 16 deletions
diff --git a/drivers/mmc/card/queue.c b/drivers/mmc/card/queue.c index cca143afd12f..46b7b1f0cade 100644 --- a/drivers/mmc/card/queue.c +++ b/drivers/mmc/card/queue.c @@ -211,6 +211,30 @@ static bool mmc_queue_alloc_bounce_bufs(struct mmc_queue *mq, return true; } + +static int mmc_queue_alloc_bounce_sgs(struct mmc_queue *mq, + unsigned int bouncesz) +{ + struct mmc_queue_req *mqrq_cur = mq->mqrq_cur; + struct mmc_queue_req *mqrq_prev = mq->mqrq_prev; + int ret; + + mqrq_cur->sg = mmc_alloc_sg(1, &ret); + if (ret) + return ret; + + mqrq_cur->bounce_sg = mmc_alloc_sg(bouncesz / 512, &ret); + if (ret) + return ret; + + mqrq_prev->sg = mmc_alloc_sg(1, &ret); + if (ret) + return ret; + + mqrq_prev->bounce_sg = mmc_alloc_sg(bouncesz / 512, &ret); + + return ret; +} #endif /** @@ -227,6 +251,7 @@ int mmc_init_queue(struct mmc_queue *mq, struct mmc_card *card, { struct mmc_host *host = card->host; u64 limit = BLK_BOUNCE_HIGH; + bool bounce = false; int ret; struct mmc_queue_req *mqrq_cur = &mq->mqrq[0]; struct mmc_queue_req *mqrq_prev = &mq->mqrq[1]; @@ -269,28 +294,15 @@ int mmc_init_queue(struct mmc_queue *mq, struct mmc_card *card, blk_queue_max_segments(mq->queue, bouncesz / 512); blk_queue_max_segment_size(mq->queue, bouncesz); - mqrq_cur->sg = mmc_alloc_sg(1, &ret); - if (ret) - goto cleanup_queue; - - mqrq_cur->bounce_sg = - mmc_alloc_sg(bouncesz / 512, &ret); - if (ret) - goto cleanup_queue; - - mqrq_prev->sg = mmc_alloc_sg(1, &ret); - if (ret) - goto cleanup_queue; - - mqrq_prev->bounce_sg = - mmc_alloc_sg(bouncesz / 512, &ret); + ret = mmc_queue_alloc_bounce_sgs(mq, bouncesz); if (ret) goto cleanup_queue; + bounce = true; } } #endif - if (!mqrq_cur->bounce_buf && !mqrq_prev->bounce_buf) { + if (!bounce) { blk_queue_bounce_limit(mq->queue, limit); blk_queue_max_hw_sectors(mq->queue, min(host->max_blk_count, host->max_req_size / 512)); |