diff options
| author | Subhash Jadavani <subhashj@codeaurora.org> | 2015-11-13 12:15:53 -0800 |
|---|---|---|
| committer | Subhash Jadavani <subhashj@codeaurora.org> | 2016-05-31 15:27:52 -0700 |
| commit | 6c8e3e70c92fdfe2813817c401ec05f2bc6c6a9d (patch) | |
| tree | d81323dd03063d0a6ecd70ae52b94ec29e4e677a /drivers/mmc | |
| parent | f9d5b446e37d67d6f05d8674e0005a6c428c5c2b (diff) | |
mmc: block: workaround for timeout issue with some vendor devices
Commit 66a7393a3ba9685d1eddfbce72e3ef8f4848f19f ("mmc: block: ensure CMDQ
is empty before queuing cache flush") added a workaround for particular
vendor's eMMC devices. Workaround was to wait for all the outstanding
requests to finish up before queuing the flush request. Now detailed
root cause analysis from vendor shows that original issue can happen only
if DCMD command is sent to device too quickly (within less than 6
microseconds) after completion of previous small sector (less than 8
sectors) read operations. Hence with this change, we are fine tuning the
previous workaround such that it would almost have no impact on the storage
benchmark performance numbers.
Change-Id: I1df1c5d7bbcd7b526236651077b7dade2626cb30
Signed-off-by: Subhash Jadavani <subhashj@codeaurora.org>
[subhashj@codeaurora.org: fixed trivial merge conflicts]
Signed-off-by: Subhash Jadavani <subhashj@codeaurora.org>
Diffstat (limited to 'drivers/mmc')
| -rw-r--r-- | drivers/mmc/card/block.c | 50 |
1 files changed, 36 insertions, 14 deletions
diff --git a/drivers/mmc/card/block.c b/drivers/mmc/card/block.c index 6dad4673090f..120e4577b678 100644 --- a/drivers/mmc/card/block.c +++ b/drivers/mmc/card/block.c @@ -2912,6 +2912,15 @@ static int mmc_blk_cmdq_issue_rw_rq(struct mmc_queue *mq, struct request *req) mc_rq = mmc_blk_cmdq_rw_prep(active_mqrq, mq); ret = mmc_blk_cmdq_start_req(card->host, mc_rq); + + if (!ret && (card->quirks & MMC_QUIRK_CMDQ_EMPTY_BEFORE_DCMD)) { + unsigned int sectors = blk_rq_sectors(req); + + if (((sectors > 0) && (sectors < 8)) + && (rq_data_dir(req) == READ)) + host->cmdq_ctx.active_small_sector_read_reqs++; + } + return ret; } @@ -3487,6 +3496,32 @@ static int mmc_blk_cmdq_issue_rq(struct mmc_queue *mq, struct request *req) } if (req) { + struct mmc_host *host = card->host; + struct mmc_cmdq_context_info *ctx = &host->cmdq_ctx; + + if ((cmd_flags & (REQ_FLUSH | REQ_DISCARD)) && + (card->quirks & MMC_QUIRK_CMDQ_EMPTY_BEFORE_DCMD) && + ctx->active_small_sector_read_reqs) { + ret = wait_event_interruptible(ctx->queue_empty_wq, + !ctx->active_reqs); + if (ret) { + pr_err("%s: failed while waiting for the CMDQ to be empty %s err (%d)\n", + mmc_hostname(host), + __func__, ret); + BUG_ON(1); + } + /* clear the counter now */ + ctx->active_small_sector_read_reqs = 0; + /* + * If there were small sector (less than 8 sectors) read + * operations in progress then we have to wait for the + * outstanding requests to finish and should also have + * atleast 6 microseconds delay before queuing the DCMD + * request. + */ + udelay(MMC_QUIRK_CMDQ_DELAY_BEFORE_DCMD); + } + if (cmd_flags & REQ_DISCARD) { if (cmd_flags & REQ_SECURE && !(card->quirks & MMC_QUIRK_SEC_ERASE_TRIM_BROKEN)) @@ -3494,19 +3529,6 @@ static int mmc_blk_cmdq_issue_rq(struct mmc_queue *mq, struct request *req) else ret = mmc_blk_cmdq_issue_discard_rq(mq, req); } else if (cmd_flags & REQ_FLUSH) { - if (card->quirks & - MMC_QUIRK_CMDQ_EMPTY_BEFORE_FLUSH) { - ret = wait_event_interruptible( - card->host->cmdq_ctx.queue_empty_wq, - (!card->host->cmdq_ctx.active_reqs)); - if (ret) { - pr_err("%s: failed while waiting for the CMDQ to be empty %s err (%d)\n", - mmc_hostname(card->host), - __func__, ret); - BUG_ON(1); - } - } - ret = mmc_blk_cmdq_issue_flush_rq(mq, req); } else { ret = mmc_blk_cmdq_issue_rw_rq(mq, req); @@ -3975,7 +3997,7 @@ static const struct mmc_fixup blk_fixups[] = MMC_FIXUP("MMC32G", CID_MANFID_TOSHIBA, CID_OEMID_ANY, add_quirk_mmc, MMC_QUIRK_BLK_NO_CMD23), MMC_FIXUP(CID_NAME_ANY, CID_MANFID_TOSHIBA, CID_OEMID_ANY, - add_quirk_mmc, MMC_QUIRK_CMDQ_EMPTY_BEFORE_FLUSH), + add_quirk_mmc, MMC_QUIRK_CMDQ_EMPTY_BEFORE_DCMD), /* * Some Micron MMC cards needs longer data read timeout than |
