summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorRitesh Harjani <riteshh@codeaurora.org>2015-12-14 10:07:33 +0530
committerSubhash Jadavani <subhashj@codeaurora.org>2016-05-31 15:28:07 -0700
commitf34fd7c3def3e89640d8b4dec01e0d2e6d8bea96 (patch)
tree9251aaf16d3a61bc36b8d853920415837d4059cb
parent313d5a3851a40ea408b20f1184ff067aa7a4f386 (diff)
mmc: cmdq_hci: Add atomic context support in certain cmdq APIs
cmdq_halt and cmdq_disable gets called from cmdq_irq in case of error. Thus add cmdq_disable_nosync and unhalt support in cmdq_halt_poll which can be called from irq context. Change-Id: I172e0e29a5584f02dd96c8af5ea1b97dc8c46083 Signed-off-by: Ritesh Harjani <riteshh@codeaurora.org>
-rw-r--r--drivers/mmc/host/cmdq_hci.c36
1 files changed, 27 insertions, 9 deletions
diff --git a/drivers/mmc/host/cmdq_hci.c b/drivers/mmc/host/cmdq_hci.c
index dd75efe6cf97..17c5f9d69801 100644
--- a/drivers/mmc/host/cmdq_hci.c
+++ b/drivers/mmc/host/cmdq_hci.c
@@ -37,7 +37,7 @@
/* 1 sec */
#define HALT_TIMEOUT_MS 1000
-static int cmdq_halt_poll(struct mmc_host *mmc);
+static int cmdq_halt_poll(struct mmc_host *mmc, bool halt);
static int cmdq_halt(struct mmc_host *mmc, bool halt);
#ifdef CONFIG_PM_RUNTIME
@@ -427,11 +427,10 @@ out:
return err;
}
-static void cmdq_disable(struct mmc_host *mmc, bool soft)
+static void cmdq_disable_nosync(struct mmc_host *mmc, bool soft)
{
struct cmdq_host *cq_host = (struct cmdq_host *)mmc_cmdq_private(mmc);
- cmdq_runtime_pm_get(cq_host);
if (soft) {
cmdq_writel(cq_host, cmdq_readl(
cq_host, CQCFG) & ~(CQ_ENABLE),
@@ -440,11 +439,19 @@ static void cmdq_disable(struct mmc_host *mmc, bool soft)
if (cq_host->ops->enhanced_strobe_mask)
cq_host->ops->enhanced_strobe_mask(mmc, false);
- cmdq_runtime_pm_put(cq_host);
cq_host->enabled = false;
mmc_host_set_cq_disable(mmc);
}
+static void cmdq_disable(struct mmc_host *mmc, bool soft)
+{
+ struct cmdq_host *cq_host = (struct cmdq_host *)mmc_cmdq_private(mmc);
+
+ cmdq_runtime_pm_get(cq_host);
+ cmdq_disable_nosync(mmc, soft);
+ cmdq_runtime_pm_put(cq_host);
+}
+
static void cmdq_reset(struct mmc_host *mmc, bool soft)
{
struct cmdq_host *cq_host = (struct cmdq_host *)mmc_cmdq_private(mmc);
@@ -791,7 +798,7 @@ irqreturn_t cmdq_irq(struct mmc_host *mmc, int err)
* CMDQ error handling will make sure that it is unhalted after
* handling all the errors.
*/
- ret = cmdq_halt_poll(mmc);
+ ret = cmdq_halt_poll(mmc, true);
if (ret)
pr_err("%s: %s: halt failed ret=%d\n",
mmc_hostname(mmc), __func__, ret);
@@ -812,7 +819,7 @@ irqreturn_t cmdq_irq(struct mmc_host *mmc, int err)
if (!dbr_set) {
pr_err("%s: spurious/force error interrupt\n",
mmc_hostname(mmc));
- cmdq_halt(mmc, false);
+ cmdq_halt_poll(mmc, false);
mmc_host_clr_halt(mmc);
return IRQ_HANDLED;
}
@@ -857,7 +864,7 @@ skip_cqterri:
* from processing any further requests
*/
if (ret)
- cmdq_disable(mmc, true);
+ cmdq_disable_nosync(mmc, true);
/*
* CQE detected a reponse error from device
@@ -923,14 +930,25 @@ EXPORT_SYMBOL(cmdq_irq);
/* cmdq_halt_poll - Halting CQE using polling method.
* @mmc: struct mmc_host
- * This is used mainly from interrupt context to halt
+ * @halt: bool halt
+ * This is used mainly from interrupt context to halt/unhalt
* CQE engine.
*/
-static int cmdq_halt_poll(struct mmc_host *mmc)
+static int cmdq_halt_poll(struct mmc_host *mmc, bool halt)
{
struct cmdq_host *cq_host = (struct cmdq_host *)mmc_cmdq_private(mmc);
int retries = 100;
+ if (!halt) {
+ if (cq_host->ops->set_data_timeout)
+ cq_host->ops->set_data_timeout(mmc, 0xf);
+ if (cq_host->ops->clear_set_irqs)
+ cq_host->ops->clear_set_irqs(mmc, true);
+ cmdq_writel(cq_host, cmdq_readl(cq_host, CQCTL) & ~HALT,
+ CQCTL);
+ return 0;
+ }
+
cmdq_set_halt_irq(cq_host, false);
cmdq_writel(cq_host, cmdq_readl(cq_host, CQCTL) | HALT, CQCTL);
while (retries) {