diff options
Diffstat (limited to 'drivers/mmc/card')
-rw-r--r-- | drivers/mmc/card/block.c | 491 | ||||
-rw-r--r-- | drivers/mmc/card/queue.c | 128 | ||||
-rw-r--r-- | drivers/mmc/card/queue.h | 25 |
3 files changed, 612 insertions, 32 deletions
diff --git a/drivers/mmc/card/block.c b/drivers/mmc/card/block.c index 21056b9ef0a0..5bab73b91c20 100644 --- a/drivers/mmc/card/block.c +++ b/drivers/mmc/card/block.c @@ -59,6 +59,12 @@ MODULE_ALIAS("mmc:block"); #define INAND_CMD38_ARG_SECTRIM2 0x88 #define MMC_BLK_TIMEOUT_MS (10 * 60 * 1000) /* 10 minute timeout */ +#define mmc_req_rel_wr(req) (((req->cmd_flags & REQ_FUA) || \ + (req->cmd_flags & REQ_META)) && \ + (rq_data_dir(req) == WRITE)) +#define PACKED_CMD_VER 0x01 +#define PACKED_CMD_WR 0x02 + static DEFINE_MUTEX(block_mutex); /* @@ -89,6 +95,7 @@ struct mmc_blk_data { unsigned int flags; #define MMC_BLK_CMD23 (1 << 0) /* Can do SET_BLOCK_COUNT for multiblock */ #define MMC_BLK_REL_WR (1 << 1) /* MMC Reliable write support */ +#define MMC_BLK_PACKED_CMD (1 << 2) /* MMC packed command support */ unsigned int usage; unsigned int read_only; @@ -113,15 +120,10 @@ struct mmc_blk_data { static DEFINE_MUTEX(open_lock); -enum mmc_blk_status { - MMC_BLK_SUCCESS = 0, - MMC_BLK_PARTIAL, - MMC_BLK_CMD_ERR, - MMC_BLK_RETRY, - MMC_BLK_ABORT, - MMC_BLK_DATA_ERR, - MMC_BLK_ECC_ERR, - MMC_BLK_NOMEDIUM, +enum { + MMC_PACKED_NR_IDX = -1, + MMC_PACKED_NR_ZERO, + MMC_PACKED_NR_SINGLE, }; module_param(perdev_minors, int, 0444); @@ -131,6 +133,19 @@ static inline int mmc_blk_part_switch(struct mmc_card *card, struct mmc_blk_data *md); static int get_card_status(struct mmc_card *card, u32 *status, int retries); +static inline void mmc_blk_clear_packed(struct mmc_queue_req *mqrq) +{ + struct mmc_packed *packed = mqrq->packed; + + BUG_ON(!packed); + + mqrq->cmd_type = MMC_PACKED_NONE; + packed->nr_entries = MMC_PACKED_NR_ZERO; + packed->idx_failure = MMC_PACKED_NR_IDX; + packed->retries = 0; + packed->blocks = 0; +} + static struct mmc_blk_data *mmc_blk_get(struct gendisk *disk) { struct mmc_blk_data *md; @@ -1148,12 +1163,78 @@ static int mmc_blk_err_check(struct mmc_card *card, if (!brq->data.bytes_xfered) return MMC_BLK_RETRY; + if (mmc_packed_cmd(mq_mrq->cmd_type)) { + if (unlikely(brq->data.blocks << 9 != brq->data.bytes_xfered)) + return MMC_BLK_PARTIAL; + else + return MMC_BLK_SUCCESS; + } + if (blk_rq_bytes(req) != brq->data.bytes_xfered) return MMC_BLK_PARTIAL; return MMC_BLK_SUCCESS; } +static int mmc_blk_packed_err_check(struct mmc_card *card, + struct mmc_async_req *areq) +{ + struct mmc_queue_req *mq_rq = container_of(areq, struct mmc_queue_req, + mmc_active); + struct request *req = mq_rq->req; + struct mmc_packed *packed = mq_rq->packed; + int err, check, status; + u8 *ext_csd; + + BUG_ON(!packed); + + packed->retries--; + check = mmc_blk_err_check(card, areq); + err = get_card_status(card, &status, 0); + if (err) { + pr_err("%s: error %d sending status command\n", + req->rq_disk->disk_name, err); + return MMC_BLK_ABORT; + } + + if (status & R1_EXCEPTION_EVENT) { + ext_csd = kzalloc(512, GFP_KERNEL); + if (!ext_csd) { + pr_err("%s: unable to allocate buffer for ext_csd\n", + req->rq_disk->disk_name); + return -ENOMEM; + } + + err = mmc_send_ext_csd(card, ext_csd); + if (err) { + pr_err("%s: error %d sending ext_csd\n", + req->rq_disk->disk_name, err); + check = MMC_BLK_ABORT; + goto free; + } + + if ((ext_csd[EXT_CSD_EXP_EVENTS_STATUS] & + EXT_CSD_PACKED_FAILURE) && + (ext_csd[EXT_CSD_PACKED_CMD_STATUS] & + EXT_CSD_PACKED_GENERIC_ERROR)) { + if (ext_csd[EXT_CSD_PACKED_CMD_STATUS] & + EXT_CSD_PACKED_INDEXED_ERROR) { + packed->idx_failure = + ext_csd[EXT_CSD_PACKED_FAILURE_INDEX] - 1; + check = MMC_BLK_PARTIAL; + } + pr_err("%s: packed cmd failed, nr %u, sectors %u, " + "failure index: %d\n", + req->rq_disk->disk_name, packed->nr_entries, + packed->blocks, packed->idx_failure); + } +free: + kfree(ext_csd); + } + + return check; +} + static void mmc_blk_rw_rq_prep(struct mmc_queue_req *mqrq, struct mmc_card *card, int disable_multi, @@ -1308,10 +1389,221 @@ static void mmc_blk_rw_rq_prep(struct mmc_queue_req *mqrq, mmc_queue_bounce_pre(mqrq); } +static inline u8 mmc_calc_packed_hdr_segs(struct request_queue *q, + struct mmc_card *card) +{ + unsigned int hdr_sz = mmc_large_sector(card) ? 4096 : 512; + unsigned int max_seg_sz = queue_max_segment_size(q); + unsigned int len, nr_segs = 0; + + do { + len = min(hdr_sz, max_seg_sz); + hdr_sz -= len; + nr_segs++; + } while (hdr_sz); + + return nr_segs; +} + +static u8 mmc_blk_prep_packed_list(struct mmc_queue *mq, struct request *req) +{ + struct request_queue *q = mq->queue; + struct mmc_card *card = mq->card; + struct request *cur = req, *next = NULL; + struct mmc_blk_data *md = mq->data; + struct mmc_queue_req *mqrq = mq->mqrq_cur; + bool en_rel_wr = card->ext_csd.rel_param & EXT_CSD_WR_REL_PARAM_EN; + unsigned int req_sectors = 0, phys_segments = 0; + unsigned int max_blk_count, max_phys_segs; + bool put_back = true; + u8 max_packed_rw = 0; + u8 reqs = 0; + + if (!(md->flags & MMC_BLK_PACKED_CMD)) + goto no_packed; + + if ((rq_data_dir(cur) == WRITE) && + mmc_host_packed_wr(card->host)) + max_packed_rw = card->ext_csd.max_packed_writes; + + if (max_packed_rw == 0) + goto no_packed; + + if (mmc_req_rel_wr(cur) && + (md->flags & MMC_BLK_REL_WR) && !en_rel_wr) + goto no_packed; + + if (mmc_large_sector(card) && + !IS_ALIGNED(blk_rq_sectors(cur), 8)) + goto no_packed; + + mmc_blk_clear_packed(mqrq); + + max_blk_count = min(card->host->max_blk_count, + card->host->max_req_size >> 9); + if (unlikely(max_blk_count > 0xffff)) + max_blk_count = 0xffff; + + max_phys_segs = queue_max_segments(q); + req_sectors += blk_rq_sectors(cur); + phys_segments += cur->nr_phys_segments; + + if (rq_data_dir(cur) == WRITE) { + req_sectors += mmc_large_sector(card) ? 8 : 1; + phys_segments += mmc_calc_packed_hdr_segs(q, card); + } + + do { + if (reqs >= max_packed_rw - 1) { + put_back = false; + break; + } + + spin_lock_irq(q->queue_lock); + next = blk_fetch_request(q); + spin_unlock_irq(q->queue_lock); + if (!next) { + put_back = false; + break; + } + + if (mmc_large_sector(card) && + !IS_ALIGNED(blk_rq_sectors(next), 8)) + break; + + if (next->cmd_flags & REQ_DISCARD || + next->cmd_flags & REQ_FLUSH) + break; + + if (rq_data_dir(cur) != rq_data_dir(next)) + break; + + if (mmc_req_rel_wr(next) && + (md->flags & MMC_BLK_REL_WR) && !en_rel_wr) + break; + + req_sectors += blk_rq_sectors(next); + if (req_sectors > max_blk_count) + break; + + phys_segments += next->nr_phys_segments; + if (phys_segments > max_phys_segs) + break; + + list_add_tail(&next->queuelist, &mqrq->packed->list); + cur = next; + reqs++; + } while (1); + + if (put_back) { + spin_lock_irq(q->queue_lock); + blk_requeue_request(q, next); + spin_unlock_irq(q->queue_lock); + } + + if (reqs > 0) { + list_add(&req->queuelist, &mqrq->packed->list); + mqrq->packed->nr_entries = ++reqs; + mqrq->packed->retries = reqs; + return reqs; + } + +no_packed: + mqrq->cmd_type = MMC_PACKED_NONE; + return 0; +} + +static void mmc_blk_packed_hdr_wrq_prep(struct mmc_queue_req *mqrq, + struct mmc_card *card, + struct mmc_queue *mq) +{ + struct mmc_blk_request *brq = &mqrq->brq; + struct request *req = mqrq->req; + struct request *prq; + struct mmc_blk_data *md = mq->data; + struct mmc_packed *packed = mqrq->packed; + bool do_rel_wr, do_data_tag; + u32 *packed_cmd_hdr; + u8 hdr_blocks; + u8 i = 1; + + BUG_ON(!packed); + + mqrq->cmd_type = MMC_PACKED_WRITE; + packed->blocks = 0; + packed->idx_failure = MMC_PACKED_NR_IDX; + + packed_cmd_hdr = packed->cmd_hdr; + memset(packed_cmd_hdr, 0, sizeof(packed->cmd_hdr)); + packed_cmd_hdr[0] = (packed->nr_entries << 16) | + (PACKED_CMD_WR << 8) | PACKED_CMD_VER; + hdr_blocks = mmc_large_sector(card) ? 8 : 1; + + /* + * Argument for each entry of packed group + */ + list_for_each_entry(prq, &packed->list, queuelist) { + do_rel_wr = mmc_req_rel_wr(prq) && (md->flags & MMC_BLK_REL_WR); + do_data_tag = (card->ext_csd.data_tag_unit_size) && + (prq->cmd_flags & REQ_META) && + (rq_data_dir(prq) == WRITE) && + ((brq->data.blocks * brq->data.blksz) >= + card->ext_csd.data_tag_unit_size); + /* Argument of CMD23 */ + packed_cmd_hdr[(i * 2)] = + (do_rel_wr ? MMC_CMD23_ARG_REL_WR : 0) | + (do_data_tag ? MMC_CMD23_ARG_TAG_REQ : 0) | + blk_rq_sectors(prq); + /* Argument of CMD18 or CMD25 */ + packed_cmd_hdr[((i * 2)) + 1] = + mmc_card_blockaddr(card) ? + blk_rq_pos(prq) : blk_rq_pos(prq) << 9; + packed->blocks += blk_rq_sectors(prq); + i++; + } + + memset(brq, 0, sizeof(struct mmc_blk_request)); + brq->mrq.cmd = &brq->cmd; + brq->mrq.data = &brq->data; + brq->mrq.sbc = &brq->sbc; + brq->mrq.stop = &brq->stop; + + brq->sbc.opcode = MMC_SET_BLOCK_COUNT; + brq->sbc.arg = MMC_CMD23_ARG_PACKED | (packed->blocks + hdr_blocks); + brq->sbc.flags = MMC_RSP_R1 | MMC_CMD_AC; + + brq->cmd.opcode = MMC_WRITE_MULTIPLE_BLOCK; + brq->cmd.arg = blk_rq_pos(req); + if (!mmc_card_blockaddr(card)) + brq->cmd.arg <<= 9; + brq->cmd.flags = MMC_RSP_SPI_R1 | MMC_RSP_R1 | MMC_CMD_ADTC; + + brq->data.blksz = 512; + brq->data.blocks = packed->blocks + hdr_blocks; + brq->data.flags |= MMC_DATA_WRITE; + + brq->stop.opcode = MMC_STOP_TRANSMISSION; + brq->stop.arg = 0; + brq->stop.flags = MMC_RSP_SPI_R1B | MMC_RSP_R1B | MMC_CMD_AC; + + mmc_set_data_timeout(&brq->data, card); + + brq->data.sg = mqrq->sg; + brq->data.sg_len = mmc_queue_map_sg(mq, mqrq); + + mqrq->mmc_active.mrq = &brq->mrq; + mqrq->mmc_active.err_check = mmc_blk_packed_err_check; + + mmc_queue_bounce_pre(mqrq); +} + static int mmc_blk_cmd_err(struct mmc_blk_data *md, struct mmc_card *card, struct mmc_blk_request *brq, struct request *req, int ret) { + struct mmc_queue_req *mq_rq; + mq_rq = container_of(brq, struct mmc_queue_req, brq); + /* * If this is an SD card and we're writing, we can first * mark the known good sectors as ok. @@ -1328,11 +1620,84 @@ static int mmc_blk_cmd_err(struct mmc_blk_data *md, struct mmc_card *card, ret = blk_end_request(req, 0, blocks << 9); } } else { - ret = blk_end_request(req, 0, brq->data.bytes_xfered); + if (!mmc_packed_cmd(mq_rq->cmd_type)) + ret = blk_end_request(req, 0, brq->data.bytes_xfered); } return ret; } +static int mmc_blk_end_packed_req(struct mmc_queue_req *mq_rq) +{ + struct request *prq; + struct mmc_packed *packed = mq_rq->packed; + int idx = packed->idx_failure, i = 0; + int ret = 0; + + BUG_ON(!packed); + + while (!list_empty(&packed->list)) { + prq = list_entry_rq(packed->list.next); + if (idx == i) { + /* retry from error index */ + packed->nr_entries -= idx; + mq_rq->req = prq; + ret = 1; + + if (packed->nr_entries == MMC_PACKED_NR_SINGLE) { + list_del_init(&prq->queuelist); + mmc_blk_clear_packed(mq_rq); + } + return ret; + } + list_del_init(&prq->queuelist); + blk_end_request(prq, 0, blk_rq_bytes(prq)); + i++; + } + + mmc_blk_clear_packed(mq_rq); + return ret; +} + +static void mmc_blk_abort_packed_req(struct mmc_queue_req *mq_rq) +{ + struct request *prq; + struct mmc_packed *packed = mq_rq->packed; + + BUG_ON(!packed); + + while (!list_empty(&packed->list)) { + prq = list_entry_rq(packed->list.next); + list_del_init(&prq->queuelist); + blk_end_request(prq, -EIO, blk_rq_bytes(prq)); + } + + mmc_blk_clear_packed(mq_rq); +} + +static void mmc_blk_revert_packed_req(struct mmc_queue *mq, + struct mmc_queue_req *mq_rq) +{ + struct request *prq; + struct request_queue *q = mq->queue; + struct mmc_packed *packed = mq_rq->packed; + + BUG_ON(!packed); + + while (!list_empty(&packed->list)) { + prq = list_entry_rq(packed->list.prev); + if (prq->queuelist.prev != &packed->list) { + list_del_init(&prq->queuelist); + spin_lock_irq(q->queue_lock); + blk_requeue_request(mq->queue, prq); + spin_unlock_irq(q->queue_lock); + } else { + list_del_init(&prq->queuelist); + } + } + + mmc_blk_clear_packed(mq_rq); +} + static int mmc_blk_issue_rw_rq(struct mmc_queue *mq, struct request *rqc) { struct mmc_blk_data *md = mq->data; @@ -1343,10 +1708,15 @@ static int mmc_blk_issue_rw_rq(struct mmc_queue *mq, struct request *rqc) struct mmc_queue_req *mq_rq; struct request *req = rqc; struct mmc_async_req *areq; + const u8 packed_nr = 2; + u8 reqs = 0; if (!rqc && !mq->mqrq_prev->req) return 0; + if (rqc) + reqs = mmc_blk_prep_packed_list(mq, rqc); + do { if (rqc) { /* @@ -1357,15 +1727,24 @@ static int mmc_blk_issue_rw_rq(struct mmc_queue *mq, struct request *rqc) (card->ext_csd.data_sector_size == 4096)) { pr_err("%s: Transfer size is not 4KB sector size aligned\n", req->rq_disk->disk_name); + mq_rq = mq->mqrq_cur; goto cmd_abort; } - mmc_blk_rw_rq_prep(mq->mqrq_cur, card, 0, mq); + + if (reqs >= packed_nr) + mmc_blk_packed_hdr_wrq_prep(mq->mqrq_cur, + card, mq); + else + mmc_blk_rw_rq_prep(mq->mqrq_cur, card, 0, mq); areq = &mq->mqrq_cur->mmc_active; } else areq = NULL; areq = mmc_start_req(card->host, areq, (int *) &status); - if (!areq) + if (!areq) { + if (status == MMC_BLK_NEW_REQUEST) + mq->flags |= MMC_QUEUE_NEW_REQUEST; return 0; + } mq_rq = container_of(areq, struct mmc_queue_req, mmc_active); brq = &mq_rq->brq; @@ -1380,8 +1759,15 @@ static int mmc_blk_issue_rw_rq(struct mmc_queue *mq, struct request *rqc) * A block was successfully transferred. */ mmc_blk_reset_success(md, type); - ret = blk_end_request(req, 0, + + if (mmc_packed_cmd(mq_rq->cmd_type)) { + ret = mmc_blk_end_packed_req(mq_rq); + break; + } else { + ret = blk_end_request(req, 0, brq->data.bytes_xfered); + } + /* * If the blk_end_request function returns non-zero even * though all data has been transferred and no errors @@ -1414,7 +1800,8 @@ static int mmc_blk_issue_rw_rq(struct mmc_queue *mq, struct request *rqc) err = mmc_blk_reset(md, card->host, type); if (!err) break; - if (err == -ENODEV) + if (err == -ENODEV || + mmc_packed_cmd(mq_rq->cmd_type)) goto cmd_abort; /* Fall through */ } @@ -1438,30 +1825,62 @@ static int mmc_blk_issue_rw_rq(struct mmc_queue *mq, struct request *rqc) break; case MMC_BLK_NOMEDIUM: goto cmd_abort; + default: + pr_err("%s: Unhandled return value (%d)", + req->rq_disk->disk_name, status); + goto cmd_abort; } if (ret) { - /* - * In case of a incomplete request - * prepare it again and resend. - */ - mmc_blk_rw_rq_prep(mq_rq, card, disable_multi, mq); - mmc_start_req(card->host, &mq_rq->mmc_active, NULL); + if (mmc_packed_cmd(mq_rq->cmd_type)) { + if (!mq_rq->packed->retries) + goto cmd_abort; + mmc_blk_packed_hdr_wrq_prep(mq_rq, card, mq); + mmc_start_req(card->host, + &mq_rq->mmc_active, NULL); + } else { + + /* + * In case of a incomplete request + * prepare it again and resend. + */ + mmc_blk_rw_rq_prep(mq_rq, card, + disable_multi, mq); + mmc_start_req(card->host, + &mq_rq->mmc_active, NULL); + } } } while (ret); return 1; cmd_abort: - if (mmc_card_removed(card)) - req->cmd_flags |= REQ_QUIET; - while (ret) - ret = blk_end_request(req, -EIO, blk_rq_cur_bytes(req)); + if (mmc_packed_cmd(mq_rq->cmd_type)) { + mmc_blk_abort_packed_req(mq_rq); + } else { + if (mmc_card_removed(card)) + req->cmd_flags |= REQ_QUIET; + while (ret) + ret = blk_end_request(req, -EIO, + blk_rq_cur_bytes(req)); + } start_new_req: if (rqc) { - mmc_blk_rw_rq_prep(mq->mqrq_cur, card, 0, mq); - mmc_start_req(card->host, &mq->mqrq_cur->mmc_active, NULL); + if (mmc_card_removed(card)) { + rqc->cmd_flags |= REQ_QUIET; + blk_end_request_all(rqc, -EIO); + } else { + /* + * If current request is packed, it needs to put back. + */ + if (mmc_packed_cmd(mq->mqrq_cur->cmd_type)) + mmc_blk_revert_packed_req(mq, mq->mqrq_cur); + + mmc_blk_rw_rq_prep(mq->mqrq_cur, card, 0, mq); + mmc_start_req(card->host, + &mq->mqrq_cur->mmc_active, NULL); + } } return 0; @@ -1472,6 +1891,8 @@ static int mmc_blk_issue_rq(struct mmc_queue *mq, struct request *req) int ret; struct mmc_blk_data *md = mq->data; struct mmc_card *card = md->queue.card; + struct mmc_host *host = card->host; + unsigned long flags; if (req && !mq->mqrq_prev->req) /* claim host only for the first request */ @@ -1486,6 +1907,7 @@ static int mmc_blk_issue_rq(struct mmc_queue *mq, struct request *req) goto out; } + mq->flags &= ~MMC_QUEUE_NEW_REQUEST; if (req && req->cmd_flags & REQ_DISCARD) { /* complete ongoing async transfer before issuing discard */ if (card->host->areq) @@ -1501,11 +1923,16 @@ static int mmc_blk_issue_rq(struct mmc_queue *mq, struct request *req) mmc_blk_issue_rw_rq(mq, NULL); ret = mmc_blk_issue_flush(mq, req); } else { + if (!req && host->areq) { + spin_lock_irqsave(&host->context_info.lock, flags); + host->context_info.is_waiting_last_req = true; + spin_unlock_irqrestore(&host->context_info.lock, flags); + } ret = mmc_blk_issue_rw_rq(mq, req); } out: - if (!req) + if (!req && !(mq->flags & MMC_QUEUE_NEW_REQUEST)) /* release host only when there are no more requests */ mmc_release_host(card->host); return ret; @@ -1624,6 +2051,14 @@ static struct mmc_blk_data *mmc_blk_alloc_req(struct mmc_card *card, blk_queue_flush(md->queue.queue, REQ_FLUSH | REQ_FUA); } + if (mmc_card_mmc(card) && + (area_type == MMC_BLK_DATA_AREA_MAIN) && + (md->flags & MMC_BLK_CMD23) && + card->ext_csd.packed_event_en) { + if (!mmc_packed_init(&md->queue, card)) + md->flags |= MMC_BLK_PACKED_CMD; + } + return md; err_putdisk: @@ -1732,6 +2167,8 @@ static void mmc_blk_remove_req(struct mmc_blk_data *md) /* Then flush out any already in there */ mmc_cleanup_queue(&md->queue); + if (md->flags & MMC_BLK_PACKED_CMD) + mmc_packed_clean(&md->queue); mmc_blk_put(md); } } diff --git a/drivers/mmc/card/queue.c b/drivers/mmc/card/queue.c index fadf52eb5d70..fa4e44ee7961 100644 --- a/drivers/mmc/card/queue.c +++ b/drivers/mmc/card/queue.c @@ -22,7 +22,8 @@ #define MMC_QUEUE_BOUNCESZ 65536 -#define MMC_QUEUE_SUSPENDED (1 << 0) + +#define MMC_REQ_SPECIAL_MASK (REQ_DISCARD | REQ_FLUSH) /* * Prepare a MMC request. This just filters out odd stuff. @@ -58,6 +59,7 @@ static int mmc_queue_thread(void *d) do { struct request *req = NULL; struct mmc_queue_req *tmp; + unsigned int cmd_flags = 0; spin_lock_irq(q->queue_lock); set_current_state(TASK_INTERRUPTIBLE); @@ -67,12 +69,23 @@ static int mmc_queue_thread(void *d) if (req || mq->mqrq_prev->req) { set_current_state(TASK_RUNNING); + cmd_flags = req ? req->cmd_flags : 0; mq->issue_fn(mq, req); + if (mq->flags & MMC_QUEUE_NEW_REQUEST) { + mq->flags &= ~MMC_QUEUE_NEW_REQUEST; + continue; /* fetch again */ + } /* * Current request becomes previous request * and vice versa. + * In case of special requests, current request + * has been finished. Do not assign it to previous + * request. */ + if (cmd_flags & MMC_REQ_SPECIAL_MASK) + mq->mqrq_cur->req = NULL; + mq->mqrq_prev->brq.mrq.data = NULL; mq->mqrq_prev->req = NULL; tmp = mq->mqrq_prev; @@ -103,6 +116,8 @@ static void mmc_request_fn(struct request_queue *q) { struct mmc_queue *mq = q->queuedata; struct request *req; + unsigned long flags; + struct mmc_context_info *cntx; if (!mq) { while ((req = blk_fetch_request(q)) != NULL) { @@ -112,7 +127,20 @@ static void mmc_request_fn(struct request_queue *q) return; } - if (!mq->mqrq_cur->req && !mq->mqrq_prev->req) + cntx = &mq->card->host->context_info; + if (!mq->mqrq_cur->req && mq->mqrq_prev->req) { + /* + * New MMC request arrived when MMC thread may be + * blocked on the previous request to be complete + * with no current request fetched + */ + spin_lock_irqsave(&cntx->lock, flags); + if (cntx->is_waiting_last_req) { + cntx->is_new_req = true; + wake_up_interruptible(&cntx->wait); + } + spin_unlock_irqrestore(&cntx->lock, flags); + } else if (!mq->mqrq_cur->req && !mq->mqrq_prev->req) wake_up_process(mq->thread); } @@ -334,6 +362,49 @@ void mmc_cleanup_queue(struct mmc_queue *mq) } EXPORT_SYMBOL(mmc_cleanup_queue); +int mmc_packed_init(struct mmc_queue *mq, struct mmc_card *card) +{ + struct mmc_queue_req *mqrq_cur = &mq->mqrq[0]; + struct mmc_queue_req *mqrq_prev = &mq->mqrq[1]; + int ret = 0; + + + mqrq_cur->packed = kzalloc(sizeof(struct mmc_packed), GFP_KERNEL); + if (!mqrq_cur->packed) { + pr_warn("%s: unable to allocate packed cmd for mqrq_cur\n", + mmc_card_name(card)); + ret = -ENOMEM; + goto out; + } + + mqrq_prev->packed = kzalloc(sizeof(struct mmc_packed), GFP_KERNEL); + if (!mqrq_prev->packed) { + pr_warn("%s: unable to allocate packed cmd for mqrq_prev\n", + mmc_card_name(card)); + kfree(mqrq_cur->packed); + mqrq_cur->packed = NULL; + ret = -ENOMEM; + goto out; + } + + INIT_LIST_HEAD(&mqrq_cur->packed->list); + INIT_LIST_HEAD(&mqrq_prev->packed->list); + +out: + return ret; +} + +void mmc_packed_clean(struct mmc_queue *mq) +{ + struct mmc_queue_req *mqrq_cur = &mq->mqrq[0]; + struct mmc_queue_req *mqrq_prev = &mq->mqrq[1]; + + kfree(mqrq_cur->packed); + mqrq_cur->packed = NULL; + kfree(mqrq_prev->packed); + mqrq_prev->packed = NULL; +} + /** * mmc_queue_suspend - suspend a MMC request queue * @mq: MMC queue to suspend @@ -378,6 +449,41 @@ void mmc_queue_resume(struct mmc_queue *mq) } } +static unsigned int mmc_queue_packed_map_sg(struct mmc_queue *mq, + struct mmc_packed *packed, + struct scatterlist *sg, + enum mmc_packed_type cmd_type) +{ + struct scatterlist *__sg = sg; + unsigned int sg_len = 0; + struct request *req; + + if (mmc_packed_wr(cmd_type)) { + unsigned int hdr_sz = mmc_large_sector(mq->card) ? 4096 : 512; + unsigned int max_seg_sz = queue_max_segment_size(mq->queue); + unsigned int len, remain, offset = 0; + u8 *buf = (u8 *)packed->cmd_hdr; + + remain = hdr_sz; + do { + len = min(remain, max_seg_sz); + sg_set_buf(__sg, buf + offset, len); + offset += len; + remain -= len; + (__sg++)->page_link &= ~0x02; + sg_len++; + } while (remain); + } + + list_for_each_entry(req, &packed->list, queuelist) { + sg_len += blk_rq_map_sg(mq->queue, req, __sg); + __sg = sg + (sg_len - 1); + (__sg++)->page_link &= ~0x02; + } + sg_mark_end(sg + (sg_len - 1)); + return sg_len; +} + /* * Prepare the sg list(s) to be handed of to the host driver */ @@ -386,14 +492,26 @@ unsigned int mmc_queue_map_sg(struct mmc_queue *mq, struct mmc_queue_req *mqrq) unsigned int sg_len; size_t buflen; struct scatterlist *sg; + enum mmc_packed_type cmd_type; int i; - if (!mqrq->bounce_buf) - return blk_rq_map_sg(mq->queue, mqrq->req, mqrq->sg); + cmd_type = mqrq->cmd_type; + + if (!mqrq->bounce_buf) { + if (mmc_packed_cmd(cmd_type)) + return mmc_queue_packed_map_sg(mq, mqrq->packed, + mqrq->sg, cmd_type); + else + return blk_rq_map_sg(mq->queue, mqrq->req, mqrq->sg); + } BUG_ON(!mqrq->bounce_sg); - sg_len = blk_rq_map_sg(mq->queue, mqrq->req, mqrq->bounce_sg); + if (mmc_packed_cmd(cmd_type)) + sg_len = mmc_queue_packed_map_sg(mq, mqrq->packed, + mqrq->bounce_sg, cmd_type); + else + sg_len = blk_rq_map_sg(mq->queue, mqrq->req, mqrq->bounce_sg); mqrq->bounce_sg_len = sg_len; diff --git a/drivers/mmc/card/queue.h b/drivers/mmc/card/queue.h index d2a1eb4b9f9f..031bf6376c99 100644 --- a/drivers/mmc/card/queue.h +++ b/drivers/mmc/card/queue.h @@ -12,6 +12,23 @@ struct mmc_blk_request { struct mmc_data data; }; +enum mmc_packed_type { + MMC_PACKED_NONE = 0, + MMC_PACKED_WRITE, +}; + +#define mmc_packed_cmd(type) ((type) != MMC_PACKED_NONE) +#define mmc_packed_wr(type) ((type) == MMC_PACKED_WRITE) + +struct mmc_packed { + struct list_head list; + u32 cmd_hdr[1024]; + unsigned int blocks; + u8 nr_entries; + u8 retries; + s16 idx_failure; +}; + struct mmc_queue_req { struct request *req; struct mmc_blk_request brq; @@ -20,6 +37,8 @@ struct mmc_queue_req { struct scatterlist *bounce_sg; unsigned int bounce_sg_len; struct mmc_async_req mmc_active; + enum mmc_packed_type cmd_type; + struct mmc_packed *packed; }; struct mmc_queue { @@ -27,6 +46,9 @@ struct mmc_queue { struct task_struct *thread; struct semaphore thread_sem; unsigned int flags; +#define MMC_QUEUE_SUSPENDED (1 << 0) +#define MMC_QUEUE_NEW_REQUEST (1 << 1) + int (*issue_fn)(struct mmc_queue *, struct request *); void *data; struct request_queue *queue; @@ -46,4 +68,7 @@ extern unsigned int mmc_queue_map_sg(struct mmc_queue *, extern void mmc_queue_bounce_pre(struct mmc_queue_req *); extern void mmc_queue_bounce_post(struct mmc_queue_req *); +extern int mmc_packed_init(struct mmc_queue *, struct mmc_card *); +extern void mmc_packed_clean(struct mmc_queue *); + #endif |