summaryrefslogtreecommitdiff
path: root/drivers/mmc/card/block.c
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/mmc/card/block.c')
-rw-r--r--drivers/mmc/card/block.c47
1 files changed, 40 insertions, 7 deletions
diff --git a/drivers/mmc/card/block.c b/drivers/mmc/card/block.c
index 7d2ceda7f80e..13e0df67d3b7 100644
--- a/drivers/mmc/card/block.c
+++ b/drivers/mmc/card/block.c
@@ -1048,6 +1048,12 @@ static int mmc_blk_ioctl_rpmb_cmd(struct block_device *bdev,
goto idata_free;
}
+ /*
+ * Ensure rpmb_req_pending flag is synchronized between multiple
+ * entities which may use rpmb ioclts with a lock.
+ */
+ mutex_lock(&card->host->rpmb_req_mutex);
+ atomic_set(&card->host->rpmb_req_pending, 1);
mmc_get_card(card);
if (mmc_card_doing_bkops(card)) {
@@ -1163,6 +1169,9 @@ static int mmc_blk_ioctl_rpmb_cmd(struct block_device *bdev,
cmd_rel_host:
mmc_put_card(card);
+ atomic_set(&card->host->rpmb_req_pending, 0);
+ mutex_unlock(&card->host->rpmb_req_mutex);
+
idata_free:
for (i = 0; i < MMC_IOC_MAX_RPMB_CMD; i++) {
@@ -1292,9 +1301,26 @@ static int mmc_blk_ioctl_multi_cmd(struct block_device *bdev,
mmc_get_card(card);
+ if (mmc_card_cmdq(card)) {
+ err = mmc_cmdq_halt(card->host, true);
+ if (err) {
+ pr_err("%s: halt failed while doing %s err (%d)\n",
+ mmc_hostname(card->host),
+ __func__, err);
+ mmc_put_card(card);
+ goto cmd_done;
+ }
+ }
+
for (i = 0; i < num_of_cmds && !ioc_err; i++)
ioc_err = __mmc_blk_ioctl_cmd(card, md, idata[i]);
+ if (mmc_card_cmdq(card)) {
+ if (mmc_cmdq_halt(card->host, false))
+ pr_err("%s: %s: cmdq unhalt failed\n",
+ mmc_hostname(card->host), __func__);
+ }
+
mmc_put_card(card);
/* copy to user if data and response */
@@ -3173,11 +3199,11 @@ static struct mmc_cmdq_req *mmc_blk_cmdq_rw_prep(
static void mmc_blk_cmdq_requeue_rw_rq(struct mmc_queue *mq,
struct request *req)
{
- struct mmc_card *card = mq->card;
- struct mmc_host *host = card->host;
+ struct request_queue *q = req->q;
- blk_requeue_request(req->q, req);
- mmc_put_card(host->card);
+ spin_lock_irq(q->queue_lock);
+ blk_requeue_request(q, req);
+ spin_unlock_irq(q->queue_lock);
}
static int mmc_blk_cmdq_issue_rw_rq(struct mmc_queue *mq, struct request *req)
@@ -3605,7 +3631,7 @@ void mmc_blk_cmdq_complete_rq(struct request *rq)
* or disable state so cannot receive any completion of
* other requests.
*/
- BUG_ON(test_bit(CMDQ_STATE_ERR, &ctx_info->curr_state));
+ WARN_ON(test_bit(CMDQ_STATE_ERR, &ctx_info->curr_state));
/* clear pending request */
BUG_ON(!test_and_clear_bit(cmdq_req->tag,
@@ -3639,7 +3665,7 @@ void mmc_blk_cmdq_complete_rq(struct request *rq)
out:
mmc_cmdq_clk_scaling_stop_busy(host, true, is_dcmd);
- if (!test_bit(CMDQ_STATE_ERR, &ctx_info->curr_state)) {
+ if (!(err || cmdq_req->resp_err)) {
mmc_host_clk_release(host);
wake_up(&ctx_info->wait);
mmc_put_card(host->card);
@@ -4065,9 +4091,16 @@ static int mmc_blk_cmdq_issue_rq(struct mmc_queue *mq, struct request *req)
* If issuing of the request fails with eitehr EBUSY or
* EAGAIN error, re-queue the request.
* This case would occur with ICE calls.
+ * For request which gets completed successfully or
+ * errored out, we release host lock in completion or
+ * error handling softirq context. But here the request
+ * is neither completed nor erred-out, so release the
+ * host lock explicitly.
*/
- if (ret == -EBUSY || ret == -EAGAIN)
+ if (ret == -EBUSY || ret == -EAGAIN) {
mmc_blk_cmdq_requeue_rw_rq(mq, req);
+ mmc_put_card(host->card);
+ }
}
}