summaryrefslogtreecommitdiff
path: root/drivers/mmc
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/mmc')
-rw-r--r--drivers/mmc/Kconfig8
-rw-r--r--drivers/mmc/card/Kconfig11
-rw-r--r--drivers/mmc/card/Makefile1
-rw-r--r--drivers/mmc/card/block.c1898
-rw-r--r--drivers/mmc/card/mmc_block_test.c2038
-rw-r--r--drivers/mmc/card/mmc_test.c3
-rw-r--r--drivers/mmc/card/queue.c385
-rw-r--r--drivers/mmc/card/queue.h36
-rw-r--r--drivers/mmc/core/Kconfig21
-rw-r--r--drivers/mmc/core/Makefile1
-rw-r--r--drivers/mmc/core/bus.c49
-rw-r--r--drivers/mmc/core/bus.h2
-rw-r--r--drivers/mmc/core/core.c1867
-rw-r--r--drivers/mmc/core/core.h28
-rw-r--r--drivers/mmc/core/debugfs.c507
-rw-r--r--drivers/mmc/core/host.c497
-rw-r--r--drivers/mmc/core/mmc.c1207
-rw-r--r--drivers/mmc/core/mmc_ops.c92
-rw-r--r--drivers/mmc/core/mmc_ops.h4
-rw-r--r--drivers/mmc/core/quirks.c85
-rw-r--r--drivers/mmc/core/ring_buffer.c123
-rw-r--r--drivers/mmc/core/sd.c182
-rw-r--r--drivers/mmc/core/sdio.c151
-rw-r--r--drivers/mmc/core/sdio_cis.c14
-rw-r--r--drivers/mmc/core/sdio_irq.c34
-rw-r--r--drivers/mmc/host/Kconfig42
-rw-r--r--drivers/mmc/host/Makefile4
-rw-r--r--drivers/mmc/host/cmdq_hci.c1362
-rw-r--r--drivers/mmc/host/cmdq_hci.h251
-rw-r--r--drivers/mmc/host/sdhci-msm-ice.c565
-rw-r--r--drivers/mmc/host/sdhci-msm-ice.h174
-rw-r--r--drivers/mmc/host/sdhci-msm.c4883
-rw-r--r--drivers/mmc/host/sdhci-msm.h245
-rw-r--r--drivers/mmc/host/sdhci.c1215
-rw-r--r--drivers/mmc/host/sdhci.h152
35 files changed, 17342 insertions, 795 deletions
diff --git a/drivers/mmc/Kconfig b/drivers/mmc/Kconfig
index f2eeb38efa65..91165514156e 100644
--- a/drivers/mmc/Kconfig
+++ b/drivers/mmc/Kconfig
@@ -19,6 +19,14 @@ config MMC_DEBUG
This is an option for use by developers; most people should
say N here. This enables MMC core and driver debugging.
+config MMC_PERF_PROFILING
+ bool "MMC performance profiling"
+ depends on MMC != n
+ default n
+ help
+ If you say Y here, support will be added for collecting
+ performance numbers at the MMC Queue and Host layers.
+
if MMC
source "drivers/mmc/core/Kconfig"
diff --git a/drivers/mmc/card/Kconfig b/drivers/mmc/card/Kconfig
index 6142ec1b9dfb..91f2445b6ac8 100644
--- a/drivers/mmc/card/Kconfig
+++ b/drivers/mmc/card/Kconfig
@@ -50,6 +50,17 @@ config MMC_BLOCK_BOUNCE
If unsure, say Y here.
+config MMC_BLOCK_DEFERRED_RESUME
+ bool "Defer MMC layer resume until I/O is requested"
+ depends on MMC_BLOCK
+ default n
+ help
+ Say Y here to enable deferred MMC resume until I/O
+ is requested.
+
+ This will reduce overall resume latency and
+ save power when there is an SD card inserted but not being used.
+
config SDIO_UART
tristate "SDIO UART/GPS class support"
depends on TTY
diff --git a/drivers/mmc/card/Makefile b/drivers/mmc/card/Makefile
index c73b406a06cd..d55107fb4551 100644
--- a/drivers/mmc/card/Makefile
+++ b/drivers/mmc/card/Makefile
@@ -8,3 +8,4 @@ obj-$(CONFIG_MMC_TEST) += mmc_test.o
obj-$(CONFIG_SDIO_UART) += sdio_uart.o
+obj-$(CONFIG_MMC_BLOCK_TEST) += mmc_block_test.o
diff --git a/drivers/mmc/card/block.c b/drivers/mmc/card/block.c
index 8d169d5a8b01..d39b4056c169 100644
--- a/drivers/mmc/card/block.c
+++ b/drivers/mmc/card/block.c
@@ -30,16 +30,19 @@
#include <linux/blkdev.h>
#include <linux/mutex.h>
#include <linux/scatterlist.h>
+#include <linux/bitops.h>
#include <linux/string_helpers.h>
#include <linux/delay.h>
#include <linux/capability.h>
#include <linux/compat.h>
#include <linux/pm_runtime.h>
+#include <linux/ioprio.h>
#include <trace/events/mmc.h>
#include <linux/mmc/ioctl.h>
#include <linux/mmc/card.h>
+#include <linux/mmc/core.h>
#include <linux/mmc/host.h>
#include <linux/mmc/mmc.h>
#include <linux/mmc/sd.h>
@@ -60,15 +63,33 @@ MODULE_ALIAS("mmc:block");
#define INAND_CMD38_ARG_SECERASE 0x80
#define INAND_CMD38_ARG_SECTRIM1 0x81
#define INAND_CMD38_ARG_SECTRIM2 0x88
-#define MMC_BLK_TIMEOUT_MS (10 * 60 * 1000) /* 10 minute timeout */
+#define MMC_BLK_TIMEOUT_MS (30 * 1000) /* 30 sec timeout */
#define MMC_SANITIZE_REQ_TIMEOUT 240000
#define MMC_EXTRACT_INDEX_FROM_ARG(x) ((x & 0x00FF0000) >> 16)
+#define MMC_CMDQ_STOP_TIMEOUT_MS 100
#define mmc_req_rel_wr(req) ((req->cmd_flags & REQ_FUA) && \
(rq_data_dir(req) == WRITE))
#define PACKED_CMD_VER 0x01
#define PACKED_CMD_WR 0x02
-
+#define PACKED_TRIGGER_MAX_ELEMENTS 5000
+
+#define MMC_BLK_MAX_RETRIES 5 /* max # of retries before aborting a command */
+#define MMC_BLK_UPDATE_STOP_REASON(stats, reason) \
+ do { \
+ if (stats->enabled) \
+ stats->pack_stop_reason[reason]++; \
+ } while (0)
+
+#define MAX_RETRIES 5
+#define PCKD_TRGR_INIT_MEAN_POTEN 17
+#define PCKD_TRGR_POTEN_LOWER_BOUND 5
+#define PCKD_TRGR_URGENT_PENALTY 2
+#define PCKD_TRGR_LOWER_BOUND 5
+#define PCKD_TRGR_PRECISION_MULTIPLIER 100
+
+static struct mmc_cmdq_req *mmc_cmdq_prep_dcmd(
+ struct mmc_queue_req *mqrq, struct mmc_queue *mq);
static DEFINE_MUTEX(block_mutex);
/*
@@ -103,6 +124,7 @@ struct mmc_blk_data {
#define MMC_BLK_CMD23 (1 << 0) /* Can do SET_BLOCK_COUNT for multiblock */
#define MMC_BLK_REL_WR (1 << 1) /* MMC Reliable write support */
#define MMC_BLK_PACKED_CMD (1 << 2) /* MMC packed command support */
+#define MMC_BLK_CMD_QUEUE (1 << 3) /* MMC command queue support */
unsigned int usage;
unsigned int read_only;
@@ -113,6 +135,8 @@ struct mmc_blk_data {
#define MMC_BLK_WRITE BIT(1)
#define MMC_BLK_DISCARD BIT(2)
#define MMC_BLK_SECDISCARD BIT(3)
+#define MMC_BLK_FLUSH BIT(4)
+#define MMC_BLK_PARTSWITCH BIT(5)
/*
* Only set in main mmc_blk_data associated
@@ -122,6 +146,8 @@ struct mmc_blk_data {
unsigned int part_curr;
struct device_attribute force_ro;
struct device_attribute power_ro_lock;
+ struct device_attribute num_wr_reqs_to_start_packing;
+ struct device_attribute no_pack_for_random;
int area_type;
};
@@ -139,6 +165,8 @@ MODULE_PARM_DESC(perdev_minors, "Minors numbers to allocate per device");
static inline int mmc_blk_part_switch(struct mmc_card *card,
struct mmc_blk_data *md);
static int get_card_status(struct mmc_card *card, u32 *status, int retries);
+static int mmc_blk_cmdq_switch(struct mmc_card *card,
+ struct mmc_blk_data *md, bool enable);
static inline void mmc_blk_clear_packed(struct mmc_queue_req *mqrq)
{
@@ -195,9 +223,13 @@ static ssize_t power_ro_lock_show(struct device *dev,
{
int ret;
struct mmc_blk_data *md = mmc_blk_get(dev_to_disk(dev));
- struct mmc_card *card = md->queue.card;
+ struct mmc_card *card;
int locked = 0;
+ if (!md)
+ return -EINVAL;
+
+ card = md->queue.card;
if (card->ext_csd.boot_ro_lock & EXT_CSD_BOOT_WP_B_PERM_WP_EN)
locked = 2;
else if (card->ext_csd.boot_ro_lock & EXT_CSD_BOOT_WP_B_PWR_WP_EN)
@@ -225,6 +257,8 @@ static ssize_t power_ro_lock_store(struct device *dev,
return count;
md = mmc_blk_get(dev_to_disk(dev));
+ if (!md)
+ return -EINVAL;
card = md->queue.card;
mmc_get_card(card);
@@ -262,6 +296,9 @@ static ssize_t force_ro_show(struct device *dev, struct device_attribute *attr,
int ret;
struct mmc_blk_data *md = mmc_blk_get(dev_to_disk(dev));
+ if (!md)
+ return -EINVAL;
+
ret = snprintf(buf, PAGE_SIZE, "%d\n",
get_disk_ro(dev_to_disk(dev)) ^
md->read_only);
@@ -276,6 +313,10 @@ static ssize_t force_ro_store(struct device *dev, struct device_attribute *attr,
char *end;
struct mmc_blk_data *md = mmc_blk_get(dev_to_disk(dev));
unsigned long set = simple_strtoul(buf, &end, 0);
+
+ if (!md)
+ return -EINVAL;
+
if (end == buf) {
ret = -EINVAL;
goto out;
@@ -531,6 +572,118 @@ static void mmc_blk_simulate_delay(
#define mmc_blk_simulate_delay(mq, req, waitfor)
#endif
+static ssize_t
+num_wr_reqs_to_start_packing_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct mmc_blk_data *md = mmc_blk_get(dev_to_disk(dev));
+ int num_wr_reqs_to_start_packing;
+ int ret;
+
+ if (!md)
+ return -EINVAL;
+ num_wr_reqs_to_start_packing = md->queue.num_wr_reqs_to_start_packing;
+
+ ret = snprintf(buf, PAGE_SIZE, "%d\n", num_wr_reqs_to_start_packing);
+
+ mmc_blk_put(md);
+ return ret;
+}
+
+static ssize_t
+num_wr_reqs_to_start_packing_store(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ int value;
+ struct mmc_blk_data *md = mmc_blk_get(dev_to_disk(dev));
+ struct mmc_card *card;
+ int ret = count;
+
+ if (!md)
+ return -EINVAL;
+
+ card = md->queue.card;
+ if (!card) {
+ ret = -EINVAL;
+ goto exit;
+ }
+
+ sscanf(buf, "%d", &value);
+
+ if (value >= 0) {
+ md->queue.num_wr_reqs_to_start_packing =
+ min_t(int, value, (int)card->ext_csd.max_packed_writes);
+
+ pr_debug("%s: trigger to pack: new value = %d",
+ mmc_hostname(card->host),
+ md->queue.num_wr_reqs_to_start_packing);
+ } else {
+ pr_err("%s: value %d is not valid. old value remains = %d",
+ mmc_hostname(card->host), value,
+ md->queue.num_wr_reqs_to_start_packing);
+ ret = -EINVAL;
+ }
+
+exit:
+ mmc_blk_put(md);
+ return ret;
+}
+
+static ssize_t
+no_pack_for_random_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct mmc_blk_data *md = mmc_blk_get(dev_to_disk(dev));
+ int ret;
+
+ if (!md)
+ return -EINVAL;
+ ret = snprintf(buf, PAGE_SIZE, "%d\n", md->queue.no_pack_for_random);
+
+ mmc_blk_put(md);
+ return ret;
+}
+
+static ssize_t
+no_pack_for_random_store(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ int value;
+ struct mmc_blk_data *md = mmc_blk_get(dev_to_disk(dev));
+ struct mmc_card *card;
+ int ret = count;
+
+ if (!md)
+ return -EINVAL;
+
+ card = md->queue.card;
+ if (!card) {
+ ret = -EINVAL;
+ goto exit;
+ }
+
+ sscanf(buf, "%d", &value);
+
+ if (value < 0) {
+ pr_err("%s: value %d is not valid. old value remains = %d",
+ mmc_hostname(card->host), value,
+ md->queue.no_pack_for_random);
+ ret = -EINVAL;
+ goto exit;
+ }
+
+ md->queue.no_pack_for_random = (value > 0) ? true : false;
+
+ pr_debug("%s: no_pack_for_random: new value = %d",
+ mmc_hostname(card->host),
+ md->queue.no_pack_for_random);
+
+exit:
+ mmc_blk_put(md);
+ return ret;
+}
static int mmc_blk_open(struct block_device *bdev, fmode_t mode)
{
@@ -679,11 +832,12 @@ static int ioctl_do_sanitize(struct mmc_card *card)
{
int err;
- if (!mmc_can_sanitize(card)) {
- pr_warn("%s: %s - SANITIZE is not supported\n",
+ if (!mmc_can_sanitize(card) &&
+ (card->host->caps2 & MMC_CAP2_SANITIZE)) {
+ pr_warn("%s: %s - SANITIZE is not supported\n",
mmc_hostname(card->host), __func__);
- err = -EOPNOTSUPP;
- goto out;
+ err = -EOPNOTSUPP;
+ goto out;
}
pr_debug("%s: %s - SANITIZE IN PROGRESS...\n",
@@ -713,19 +867,22 @@ static int __mmc_blk_ioctl_cmd(struct mmc_card *card, struct mmc_blk_data *md,
struct mmc_request mrq = {NULL};
struct scatterlist sg;
int err;
- int is_rpmb = false;
- u32 status = 0;
if (!card || !md || !idata)
return -EINVAL;
- if (md->area_type & MMC_BLK_DATA_AREA_RPMB)
- is_rpmb = true;
-
cmd.opcode = idata->ic.opcode;
cmd.arg = idata->ic.arg;
cmd.flags = idata->ic.flags;
+ if (idata->ic.postsleep_max_us < idata->ic.postsleep_min_us) {
+ pr_err("%s: min value: %u must not be greater than max value: %u\n",
+ __func__, idata->ic.postsleep_min_us,
+ idata->ic.postsleep_max_us);
+ WARN_ON(1);
+ return -EPERM;
+ }
+
if (idata->buf_bytes) {
data.sg = &sg;
data.sg_len = 1;
@@ -764,6 +921,15 @@ static int __mmc_blk_ioctl_cmd(struct mmc_card *card, struct mmc_blk_data *md,
mrq.cmd = &cmd;
+ if (mmc_card_doing_bkops(card)) {
+ err = mmc_stop_bkops(card);
+ if (err) {
+ dev_err(mmc_dev(card->host),
+ "%s: stop_bkops failed %d\n", __func__, err);
+ return err;
+ }
+ }
+
err = mmc_blk_part_switch(card, md);
if (err)
return err;
@@ -774,13 +940,6 @@ static int __mmc_blk_ioctl_cmd(struct mmc_card *card, struct mmc_blk_data *md,
return err;
}
- if (is_rpmb) {
- err = mmc_set_blockcount(card, data.blocks,
- idata->ic.write_flag & (1 << 31));
- if (err)
- return err;
- }
-
if ((MMC_EXTRACT_INDEX_FROM_ARG(cmd.arg) == EXT_CSD_SANITIZE_START) &&
(cmd.opcode == MMC_SWITCH)) {
err = ioctl_do_sanitize(card);
@@ -814,7 +973,183 @@ static int __mmc_blk_ioctl_cmd(struct mmc_card *card, struct mmc_blk_data *md,
memcpy(&(idata->ic.response), cmd.resp, sizeof(cmd.resp));
- if (is_rpmb) {
+ return err;
+}
+
+struct mmc_blk_ioc_rpmb_data {
+ struct mmc_blk_ioc_data *data[MMC_IOC_MAX_RPMB_CMD];
+};
+
+static struct mmc_blk_ioc_rpmb_data *mmc_blk_ioctl_rpmb_copy_from_user(
+ struct mmc_ioc_rpmb __user *user)
+{
+ struct mmc_blk_ioc_rpmb_data *idata;
+ int err, i;
+
+ idata = kzalloc(sizeof(*idata), GFP_KERNEL);
+ if (!idata) {
+ err = -ENOMEM;
+ goto out;
+ }
+
+ for (i = 0; i < MMC_IOC_MAX_RPMB_CMD; i++) {
+ idata->data[i] = mmc_blk_ioctl_copy_from_user(&(user->cmds[i]));
+ if (IS_ERR(idata->data[i])) {
+ err = PTR_ERR(idata->data[i]);
+ goto copy_err;
+ }
+ }
+
+ return idata;
+
+copy_err:
+ while (--i >= 0) {
+ kfree(idata->data[i]->buf);
+ kfree(idata->data[i]);
+ }
+ kfree(idata);
+out:
+ return ERR_PTR(err);
+}
+
+static int mmc_blk_ioctl_rpmb_cmd(struct block_device *bdev,
+ struct mmc_ioc_rpmb __user *ic_ptr)
+{
+ struct mmc_blk_ioc_rpmb_data *idata;
+ struct mmc_blk_data *md;
+ struct mmc_card *card = NULL;
+ struct mmc_command cmd = {0};
+ struct mmc_data data = {0};
+ struct mmc_request mrq = {NULL};
+ struct scatterlist sg;
+ int err = 0, i = 0;
+ u32 status = 0;
+
+ /* The caller must have CAP_SYS_RAWIO */
+ if (!capable(CAP_SYS_RAWIO))
+ return -EPERM;
+
+ md = mmc_blk_get(bdev->bd_disk);
+ /* make sure this is a rpmb partition */
+ if ((!md) || (!(md->area_type & MMC_BLK_DATA_AREA_RPMB))) {
+ err = -EINVAL;
+ return err;
+ }
+
+ idata = mmc_blk_ioctl_rpmb_copy_from_user(ic_ptr);
+ if (IS_ERR(idata)) {
+ err = PTR_ERR(idata);
+ goto cmd_done;
+ }
+
+ card = md->queue.card;
+ if (IS_ERR(card)) {
+ err = PTR_ERR(card);
+ goto idata_free;
+ }
+
+ mmc_get_card(card);
+
+ if (mmc_card_doing_bkops(card)) {
+ if (mmc_card_cmdq(card)) {
+ err = mmc_cmdq_halt(card->host, true);
+ if (err)
+ goto cmd_rel_host;
+ }
+ err = mmc_stop_bkops(card);
+ if (err) {
+ dev_err(mmc_dev(card->host),
+ "%s: stop_bkops failed %d\n", __func__, err);
+ goto cmd_rel_host;
+ }
+ if (mmc_card_cmdq(card)) {
+ err = mmc_cmdq_halt(card->host, false);
+ if (err)
+ goto cmd_rel_host;
+ }
+ }
+
+ err = mmc_blk_part_switch(card, md);
+ if (err)
+ goto cmd_rel_host;
+
+ for (i = 0; i < MMC_IOC_MAX_RPMB_CMD; i++) {
+ struct mmc_blk_ioc_data *curr_data;
+ struct mmc_ioc_cmd *curr_cmd;
+
+ curr_data = idata->data[i];
+ curr_cmd = &curr_data->ic;
+ if (!curr_cmd->opcode)
+ break;
+
+ cmd.opcode = curr_cmd->opcode;
+ cmd.arg = curr_cmd->arg;
+ cmd.flags = curr_cmd->flags;
+
+ if (curr_data->buf_bytes) {
+ data.sg = &sg;
+ data.sg_len = 1;
+ data.blksz = curr_cmd->blksz;
+ data.blocks = curr_cmd->blocks;
+
+ sg_init_one(data.sg, curr_data->buf,
+ curr_data->buf_bytes);
+
+ if (curr_cmd->write_flag)
+ data.flags = MMC_DATA_WRITE;
+ else
+ data.flags = MMC_DATA_READ;
+
+ /* data.flags must already be set before doing this. */
+ mmc_set_data_timeout(&data, card);
+
+ /*
+ * Allow overriding the timeout_ns for empirical tuning.
+ */
+ if (curr_cmd->data_timeout_ns)
+ data.timeout_ns = curr_cmd->data_timeout_ns;
+
+ mrq.data = &data;
+ }
+
+ mrq.cmd = &cmd;
+
+ err = mmc_set_blockcount(card, data.blocks,
+ curr_cmd->write_flag & (1 << 31));
+ if (err)
+ goto cmd_rel_host;
+
+ mmc_wait_for_req(card->host, &mrq);
+
+ if (cmd.error) {
+ dev_err(mmc_dev(card->host), "%s: cmd error %d\n",
+ __func__, cmd.error);
+ err = cmd.error;
+ goto cmd_rel_host;
+ }
+ if (data.error) {
+ dev_err(mmc_dev(card->host), "%s: data error %d\n",
+ __func__, data.error);
+ err = data.error;
+ goto cmd_rel_host;
+ }
+
+ if (copy_to_user(&(ic_ptr->cmds[i].response), cmd.resp,
+ sizeof(cmd.resp))) {
+ err = -EFAULT;
+ goto cmd_rel_host;
+ }
+
+ if (!curr_cmd->write_flag) {
+ if (copy_to_user((void __user *)(unsigned long)
+ curr_cmd->data_ptr,
+ curr_data->buf,
+ curr_data->buf_bytes)) {
+ err = -EFAULT;
+ goto cmd_rel_host;
+ }
+ }
+
/*
* Ensure RPMB command has completed by polling CMD13
* "Send Status".
@@ -826,6 +1161,20 @@ static int __mmc_blk_ioctl_cmd(struct mmc_card *card, struct mmc_blk_data *md,
__func__, status, err);
}
+cmd_rel_host:
+ mmc_put_card(card);
+
+idata_free:
+ for (i = 0; i < MMC_IOC_MAX_RPMB_CMD; i++) {
+ kfree(idata->data[i]->buf);
+ kfree(idata->data[i]);
+ }
+ kfree(idata);
+
+cmd_done:
+ mmc_blk_put(md);
+ if (card && card->cmdq_init)
+ wake_up(&card->host->cmdq_ctx.wait);
return err;
}
@@ -846,9 +1195,8 @@ static int mmc_blk_ioctl_cmd(struct block_device *bdev,
return -EPERM;
idata = mmc_blk_ioctl_copy_from_user(ic_ptr);
- if (IS_ERR(idata))
+ if (IS_ERR_OR_NULL(idata))
return PTR_ERR(idata);
-
md = mmc_blk_get(bdev->bd_disk);
if (!md) {
err = -EINVAL;
@@ -856,19 +1204,36 @@ static int mmc_blk_ioctl_cmd(struct block_device *bdev,
}
card = md->queue.card;
- if (IS_ERR(card)) {
+ if (IS_ERR_OR_NULL(card)) {
err = PTR_ERR(card);
goto cmd_done;
}
mmc_get_card(card);
+ if (mmc_card_cmdq(card)) {
+ err = mmc_cmdq_halt_on_empty_queue(card->host);
+ if (err) {
+ pr_err("%s: halt failed while doing %s err (%d)\n",
+ mmc_hostname(card->host),
+ __func__, err);
+ mmc_put_card(card);
+ goto cmd_done;
+ }
+ }
+
ioc_err = __mmc_blk_ioctl_cmd(card, md, idata);
mmc_put_card(card);
err = mmc_blk_ioctl_copy_to_user(ic_ptr, idata);
+ if (mmc_card_cmdq(card)) {
+ if (mmc_cmdq_halt(card->host, false))
+ pr_err("%s: %s: cmdq unhalt failed\n",
+ mmc_hostname(card->host), __func__);
+ }
+
cmd_done:
mmc_blk_put(md);
cmd_err:
@@ -954,6 +1319,9 @@ static int mmc_blk_ioctl(struct block_device *bdev, fmode_t mode,
case MMC_IOC_CMD:
return mmc_blk_ioctl_cmd(bdev,
(struct mmc_ioc_cmd __user *)arg);
+ case MMC_IOC_RPMB_CMD:
+ return mmc_blk_ioctl_rpmb_cmd(bdev,
+ (struct mmc_ioc_rpmb __user *)arg);
case MMC_IOC_MULTI_CMD:
return mmc_blk_ioctl_multi_cmd(bdev,
(struct mmc_ioc_multi_cmd __user *)arg);
@@ -981,28 +1349,92 @@ static const struct block_device_operations mmc_bdops = {
#endif
};
+static int mmc_blk_cmdq_switch(struct mmc_card *card,
+ struct mmc_blk_data *md, bool enable)
+{
+ int ret = 0;
+ bool cmdq_mode = !!mmc_card_cmdq(card);
+ struct mmc_host *host = card->host;
+ struct mmc_cmdq_context_info *ctx = &host->cmdq_ctx;
+
+ if (!(card->host->caps2 & MMC_CAP2_CMD_QUEUE) ||
+ !card->ext_csd.cmdq_support ||
+ (enable && !(md->flags & MMC_BLK_CMD_QUEUE)) ||
+ (cmdq_mode == enable))
+ return 0;
+
+ if (enable) {
+ ret = mmc_set_blocklen(card, MMC_CARD_CMDQ_BLK_SIZE);
+ if (ret) {
+ pr_err("%s: failed (%d) to set block-size to %d\n",
+ __func__, ret, MMC_CARD_CMDQ_BLK_SIZE);
+ goto out;
+ }
+
+ } else {
+ if (!test_bit(CMDQ_STATE_HALT, &ctx->curr_state)) {
+ ret = mmc_cmdq_halt(host, true);
+ if (ret) {
+ pr_err("%s: halt: failed: %d\n",
+ mmc_hostname(host), ret);
+ goto out;
+ }
+ }
+ }
+
+ ret = mmc_switch(card, EXT_CSD_CMD_SET_NORMAL,
+ EXT_CSD_CMDQ, enable,
+ card->ext_csd.generic_cmd6_time);
+ if (ret) {
+ pr_err("%s: cmdq mode %sable failed %d\n",
+ md->disk->disk_name, enable ? "en" : "dis", ret);
+ goto out;
+ }
+
+ if (enable)
+ mmc_card_set_cmdq(card);
+ else
+ mmc_card_clr_cmdq(card);
+out:
+ return ret;
+}
+
static inline int mmc_blk_part_switch(struct mmc_card *card,
struct mmc_blk_data *md)
{
int ret;
struct mmc_blk_data *main_md = dev_get_drvdata(&card->dev);
- if (main_md->part_curr == md->part_type)
+ if ((main_md->part_curr == md->part_type) &&
+ (card->part_curr == md->part_type))
return 0;
if (mmc_card_mmc(card)) {
u8 part_config = card->ext_csd.part_config;
+ if (md->part_type) {
+ /* disable CQ mode for non-user data partitions */
+ ret = mmc_blk_cmdq_switch(card, md, false);
+ if (ret)
+ return ret;
+ }
+
part_config &= ~EXT_CSD_PART_CONFIG_ACC_MASK;
part_config |= md->part_type;
ret = mmc_switch(card, EXT_CSD_CMD_SET_NORMAL,
EXT_CSD_PART_CONFIG, part_config,
card->ext_csd.part_time);
- if (ret)
+
+ if (ret) {
+ pr_err("%s: mmc_blk_part_switch failure, %d -> %d\n",
+ mmc_hostname(card->host), main_md->part_curr,
+ md->part_type);
return ret;
+ }
card->ext_csd.part_config = part_config;
+ card->part_curr = md->part_type;
}
main_md->part_curr = md->part_type;
@@ -1183,18 +1615,21 @@ static int mmc_blk_cmd_error(struct request *req, const char *name, int error,
switch (error) {
case -EILSEQ:
/* response crc error, retry the r/w cmd */
- pr_err("%s: %s sending %s command, card status %#x\n",
- req->rq_disk->disk_name, "response CRC error",
+ pr_err_ratelimited(
+ "%s: response CRC error sending %s command, card status %#x\n",
+ req->rq_disk->disk_name,
name, status);
return ERR_RETRY;
case -ETIMEDOUT:
- pr_err("%s: %s sending %s command, card status %#x\n",
- req->rq_disk->disk_name, "timed out", name, status);
+ pr_err_ratelimited(
+ "%s: timed out sending %s command, card status %#x\n",
+ req->rq_disk->disk_name, name, status);
/* If the status cmd initially failed, retry the r/w cmd */
if (!status_valid) {
- pr_err("%s: status not valid, retrying timeout\n", req->rq_disk->disk_name);
+ pr_err_ratelimited("%s: status not valid, retrying timeout\n",
+ req->rq_disk->disk_name);
return ERR_RETRY;
}
/*
@@ -1203,17 +1638,22 @@ static int mmc_blk_cmd_error(struct request *req, const char *name, int error,
* have corrected the state problem above.
*/
if (status & (R1_COM_CRC_ERROR | R1_ILLEGAL_COMMAND)) {
- pr_err("%s: command error, retrying timeout\n", req->rq_disk->disk_name);
+ pr_err_ratelimited(
+ "%s: command error, retrying timeout\n",
+ req->rq_disk->disk_name);
return ERR_RETRY;
}
/* Otherwise abort the command */
- pr_err("%s: not retrying timeout\n", req->rq_disk->disk_name);
+ pr_err_ratelimited(
+ "%s: not retrying timeout\n",
+ req->rq_disk->disk_name);
return ERR_ABORT;
default:
/* We don't understand the error code the driver gave us */
- pr_err("%s: unknown error %d sending read/write command, card status %#x\n",
+ pr_err_ratelimited(
+ "%s: unknown error %d sending read/write command, card status %#x\n",
req->rq_disk->disk_name, error, status);
return ERR_ABORT;
}
@@ -1261,12 +1701,14 @@ static int mmc_blk_cmd_recovery(struct mmc_card *card, struct request *req,
mmc_retune_recheck(card->host);
prev_cmd_status_valid = false;
- pr_err("%s: error %d sending status command, %sing\n",
+ pr_err_ratelimited("%s: error %d sending status command, %sing\n",
req->rq_disk->disk_name, err, retry ? "retry" : "abort");
}
/* We couldn't get a response from the card. Give up. */
if (err) {
+ if (card->err_in_sdr104)
+ return ERR_RETRY;
/* Check if the card is removed */
if (mmc_detect_card_removed(card->host))
return ERR_NOMEDIUM;
@@ -1352,8 +1794,15 @@ static int mmc_blk_reset(struct mmc_blk_data *md, struct mmc_host *host,
md->reset_done |= type;
err = mmc_hw_reset(host);
+ if (err && err != -EOPNOTSUPP) {
+ /* We failed to reset so we need to abort the request */
+ pr_err("%s: %s: failed to reset %d\n", mmc_hostname(host),
+ __func__, err);
+ return -ENODEV;
+ }
+
/* Ensure we switch back to the correct partition */
- if (err != -EOPNOTSUPP) {
+ if (host->card) {
struct mmc_blk_data *main_md =
dev_get_drvdata(&host->card->dev);
int part_err;
@@ -1388,6 +1837,77 @@ int mmc_access_rpmb(struct mmc_queue *mq)
return false;
}
+static struct mmc_cmdq_req *mmc_blk_cmdq_prep_discard_req(struct mmc_queue *mq,
+ struct request *req)
+{
+ struct mmc_blk_data *md = mq->data;
+ struct mmc_card *card = md->queue.card;
+ struct mmc_host *host = card->host;
+ struct mmc_cmdq_context_info *ctx_info = &host->cmdq_ctx;
+ struct mmc_cmdq_req *cmdq_req;
+ struct mmc_queue_req *active_mqrq;
+
+ BUG_ON(req->tag > card->ext_csd.cmdq_depth);
+ BUG_ON(test_and_set_bit(req->tag, &host->cmdq_ctx.active_reqs));
+
+ set_bit(CMDQ_STATE_DCMD_ACTIVE, &ctx_info->curr_state);
+
+ active_mqrq = &mq->mqrq_cmdq[req->tag];
+ active_mqrq->req = req;
+
+ cmdq_req = mmc_cmdq_prep_dcmd(active_mqrq, mq);
+ cmdq_req->cmdq_req_flags |= QBR;
+ cmdq_req->mrq.cmd = &cmdq_req->cmd;
+ cmdq_req->tag = req->tag;
+ return cmdq_req;
+}
+
+static int mmc_blk_cmdq_issue_discard_rq(struct mmc_queue *mq,
+ struct request *req)
+{
+ struct mmc_blk_data *md = mq->data;
+ struct mmc_card *card = md->queue.card;
+ struct mmc_cmdq_req *cmdq_req = NULL;
+ unsigned int from, nr, arg;
+ int err = 0;
+
+ if (!mmc_can_erase(card)) {
+ err = -EOPNOTSUPP;
+ blk_end_request(req, err, blk_rq_bytes(req));
+ goto out;
+ }
+
+ from = blk_rq_pos(req);
+ nr = blk_rq_sectors(req);
+
+ if (mmc_can_discard(card))
+ arg = MMC_DISCARD_ARG;
+ else if (mmc_can_trim(card))
+ arg = MMC_TRIM_ARG;
+ else
+ arg = MMC_ERASE_ARG;
+
+ cmdq_req = mmc_blk_cmdq_prep_discard_req(mq, req);
+ if (card->quirks & MMC_QUIRK_INAND_CMD38) {
+ __mmc_switch_cmdq_mode(cmdq_req->mrq.cmd,
+ EXT_CSD_CMD_SET_NORMAL,
+ INAND_CMD38_ARG_EXT_CSD,
+ arg == MMC_TRIM_ARG ?
+ INAND_CMD38_ARG_TRIM :
+ INAND_CMD38_ARG_ERASE,
+ 0, true, false);
+ err = mmc_cmdq_wait_for_dcmd(card->host, cmdq_req);
+ if (err)
+ goto clear_dcmd;
+ }
+ err = mmc_cmdq_erase(cmdq_req, card, from, nr, arg);
+clear_dcmd:
+ mmc_host_clk_hold(card->host);
+ blk_complete_request(req);
+out:
+ return err ? 1 : 0;
+}
+
static int mmc_blk_issue_discard_rq(struct mmc_queue *mq, struct request *req)
{
struct mmc_blk_data *md = mq->data;
@@ -1431,6 +1951,69 @@ out:
return err ? 0 : 1;
}
+static int mmc_blk_cmdq_issue_secdiscard_rq(struct mmc_queue *mq,
+ struct request *req)
+{
+ struct mmc_blk_data *md = mq->data;
+ struct mmc_card *card = md->queue.card;
+ struct mmc_cmdq_req *cmdq_req = NULL;
+ unsigned int from, nr, arg;
+ int err = 0;
+
+ if (!(mmc_can_secure_erase_trim(card))) {
+ err = -EOPNOTSUPP;
+ blk_end_request(req, err, blk_rq_bytes(req));
+ goto out;
+ }
+
+ from = blk_rq_pos(req);
+ nr = blk_rq_sectors(req);
+
+ if (mmc_can_trim(card) && !mmc_erase_group_aligned(card, from, nr))
+ arg = MMC_SECURE_TRIM1_ARG;
+ else
+ arg = MMC_SECURE_ERASE_ARG;
+
+ cmdq_req = mmc_blk_cmdq_prep_discard_req(mq, req);
+ if (card->quirks & MMC_QUIRK_INAND_CMD38) {
+ __mmc_switch_cmdq_mode(cmdq_req->mrq.cmd,
+ EXT_CSD_CMD_SET_NORMAL,
+ INAND_CMD38_ARG_EXT_CSD,
+ arg == MMC_SECURE_TRIM1_ARG ?
+ INAND_CMD38_ARG_SECTRIM1 :
+ INAND_CMD38_ARG_SECERASE,
+ 0, true, false);
+ err = mmc_cmdq_wait_for_dcmd(card->host, cmdq_req);
+ if (err)
+ goto clear_dcmd;
+ }
+
+ err = mmc_cmdq_erase(cmdq_req, card, from, nr, arg);
+ if (err)
+ goto clear_dcmd;
+
+ if (arg == MMC_SECURE_TRIM1_ARG) {
+ if (card->quirks & MMC_QUIRK_INAND_CMD38) {
+ __mmc_switch_cmdq_mode(cmdq_req->mrq.cmd,
+ EXT_CSD_CMD_SET_NORMAL,
+ INAND_CMD38_ARG_EXT_CSD,
+ INAND_CMD38_ARG_SECTRIM2,
+ 0, true, false);
+ err = mmc_cmdq_wait_for_dcmd(card->host, cmdq_req);
+ if (err)
+ goto clear_dcmd;
+ }
+
+ err = mmc_cmdq_erase(cmdq_req, card, from, nr,
+ MMC_SECURE_TRIM2_ARG);
+ }
+clear_dcmd:
+ mmc_host_clk_hold(card->host);
+ blk_complete_request(req);
+out:
+ return err ? 1 : 0;
+}
+
static int mmc_blk_issue_secdiscard_rq(struct mmc_queue *mq,
struct request *req)
{
@@ -1504,10 +2087,47 @@ static int mmc_blk_issue_flush(struct mmc_queue *mq, struct request *req)
struct mmc_card *card = md->queue.card;
int ret = 0;
- ret = mmc_flush_cache(card);
- if (ret)
+ if (!req)
+ return 0;
+
+ if (req->cmd_flags & REQ_BARRIER) {
+ /*
+ * If eMMC cache flush policy is set to 1, then the device
+ * shall flush the requests in First-In-First-Out (FIFO) order.
+ * In this case, as per spec, the host must not send any cache
+ * barrier requests as they are redundant and add unnecessary
+ * overhead to both device and host.
+ */
+ if (card->ext_csd.cache_flush_policy & 1)
+ goto end_req;
+
+ /*
+ * In case barrier is not supported or enabled in the device,
+ * use flush as a fallback option.
+ */
+ ret = mmc_cache_barrier(card);
+ if (ret)
+ ret = mmc_flush_cache(card);
+ } else if (req->cmd_flags & REQ_FLUSH) {
+ ret = mmc_flush_cache(card);
+ }
+ if (ret == -ENODEV) {
+ pr_err("%s: %s: restart mmc card",
+ req->rq_disk->disk_name, __func__);
+ if (mmc_blk_reset(md, card->host, MMC_BLK_FLUSH))
+ pr_err("%s: %s: fail to restart mmc",
+ req->rq_disk->disk_name, __func__);
+ else
+ mmc_blk_reset_success(md, MMC_BLK_FLUSH);
+ }
+
+ if (ret) {
+ pr_err("%s: %s: notify flush error to upper layers",
+ req->rq_disk->disk_name, __func__);
ret = -EIO;
+ }
+end_req:
#ifdef CONFIG_MMC_SIMULATE_MAX_SPEED
else if (atomic_read(&mq->cache_size)) {
long used = mmc_blk_cache_used(mq, jiffies);
@@ -1571,6 +2191,18 @@ static int mmc_blk_err_check(struct mmc_card *card,
int need_retune = card->host->need_retune;
int ecc_err = 0, gen_err = 0;
+ if (card->host->sdr104_wa && mmc_card_sd(card) &&
+ (card->host->ios.timing == MMC_TIMING_UHS_SDR104) &&
+ !card->sdr104_blocked &&
+ (brq->data.error == -EILSEQ ||
+ brq->data.error == -EIO ||
+ brq->data.error == -ETIMEDOUT ||
+ brq->cmd.error == -EILSEQ ||
+ brq->cmd.error == -EIO ||
+ brq->cmd.error == -ETIMEDOUT ||
+ brq->sbc.error))
+ card->err_in_sdr104 = true;
+
/*
* sbc.error indicates a problem with the set block count
* command. No data will have been transferred.
@@ -1755,6 +2387,7 @@ static void mmc_blk_rw_rq_prep(struct mmc_queue_req *mqrq,
brq->stop.arg = 0;
brq->data.blocks = blk_rq_sectors(req);
+ brq->data.fault_injected = false;
/*
* The block layer doesn't support all sector count
* restrictions, so we need to be prepared for too big
@@ -1878,6 +2511,7 @@ static void mmc_blk_rw_rq_prep(struct mmc_queue_req *mqrq,
}
mqrq->mmc_active.mrq = &brq->mrq;
+ mqrq->mmc_active.mrq->req = mqrq->req;
mqrq->mmc_active.err_check = mmc_blk_err_check;
mmc_queue_bounce_pre(mqrq);
@@ -1899,6 +2533,178 @@ static inline u8 mmc_calc_packed_hdr_segs(struct request_queue *q,
return nr_segs;
}
+/**
+ * mmc_blk_disable_wr_packing() - disables packing mode
+ * @mq: MMC queue.
+ *
+ */
+void mmc_blk_disable_wr_packing(struct mmc_queue *mq)
+{
+ if (mq) {
+ mq->wr_packing_enabled = false;
+ mq->num_of_potential_packed_wr_reqs = 0;
+ }
+}
+EXPORT_SYMBOL(mmc_blk_disable_wr_packing);
+
+static int get_packed_trigger(int potential, struct mmc_card *card,
+ struct request *req, int curr_trigger)
+{
+ static int num_mean_elements = 1;
+ static unsigned long mean_potential = PCKD_TRGR_INIT_MEAN_POTEN;
+ unsigned int trigger = curr_trigger;
+ unsigned int pckd_trgr_upper_bound = card->ext_csd.max_packed_writes;
+
+ /* scale down the upper bound to 75% */
+ pckd_trgr_upper_bound = (pckd_trgr_upper_bound * 3) / 4;
+
+ /*
+ * since the most common calls for this function are with small
+ * potential write values and since we don't want these calls to affect
+ * the packed trigger, set a lower bound and ignore calls with
+ * potential lower than that bound
+ */
+ if (potential <= PCKD_TRGR_POTEN_LOWER_BOUND)
+ return trigger;
+
+ /*
+ * this is to prevent integer overflow in the following calculation:
+ * once every PACKED_TRIGGER_MAX_ELEMENTS reset the algorithm
+ */
+ if (num_mean_elements > PACKED_TRIGGER_MAX_ELEMENTS) {
+ num_mean_elements = 1;
+ mean_potential = PCKD_TRGR_INIT_MEAN_POTEN;
+ }
+
+ /*
+ * get next mean value based on previous mean value and current
+ * potential packed writes. Calculation is as follows:
+ * mean_pot[i+1] =
+ * ((mean_pot[i] * num_mean_elem) + potential)/(num_mean_elem + 1)
+ */
+ mean_potential *= num_mean_elements;
+ /*
+ * add num_mean_elements so that the division of two integers doesn't
+ * lower mean_potential too much
+ */
+ if (potential > mean_potential)
+ mean_potential += num_mean_elements;
+ mean_potential += potential;
+ /* this is for gaining more precision when dividing two integers */
+ mean_potential *= PCKD_TRGR_PRECISION_MULTIPLIER;
+ /* this completes the mean calculation */
+ mean_potential /= ++num_mean_elements;
+ mean_potential /= PCKD_TRGR_PRECISION_MULTIPLIER;
+
+ /*
+ * if current potential packed writes is greater than the mean potential
+ * then the heuristic is that the following workload will contain many
+ * write requests, therefore we lower the packed trigger. In the
+ * opposite case we want to increase the trigger in order to get less
+ * packing events.
+ */
+ if (potential >= mean_potential)
+ trigger = (trigger <= PCKD_TRGR_LOWER_BOUND) ?
+ PCKD_TRGR_LOWER_BOUND : trigger - 1;
+ else
+ trigger = (trigger >= pckd_trgr_upper_bound) ?
+ pckd_trgr_upper_bound : trigger + 1;
+
+ /*
+ * an urgent read request indicates a packed list being interrupted
+ * by this read, therefore we aim for less packing, hence the trigger
+ * gets increased
+ */
+ if (req && (req->cmd_flags & REQ_URGENT) && (rq_data_dir(req) == READ))
+ trigger += PCKD_TRGR_URGENT_PENALTY;
+
+ return trigger;
+}
+
+static void mmc_blk_write_packing_control(struct mmc_queue *mq,
+ struct request *req)
+{
+ struct mmc_host *host = mq->card->host;
+ int data_dir;
+
+ if (!(host->caps2 & MMC_CAP2_PACKED_WR))
+ return;
+
+ /* Support for the write packing on eMMC 4.5 or later */
+ if (mq->card->ext_csd.rev <= 5)
+ return;
+
+ /*
+ * In case the packing control is not supported by the host, it should
+ * not have an effect on the write packing. Therefore we have to enable
+ * the write packing
+ */
+ if (!(host->caps2 & MMC_CAP2_PACKED_WR_CONTROL)) {
+ mq->wr_packing_enabled = true;
+ return;
+ }
+
+ if (!req || (req && (req->cmd_flags & REQ_FLUSH))) {
+ if (mq->num_of_potential_packed_wr_reqs >
+ mq->num_wr_reqs_to_start_packing)
+ mq->wr_packing_enabled = true;
+ mq->num_wr_reqs_to_start_packing =
+ get_packed_trigger(mq->num_of_potential_packed_wr_reqs,
+ mq->card, req,
+ mq->num_wr_reqs_to_start_packing);
+ mq->num_of_potential_packed_wr_reqs = 0;
+ return;
+ }
+
+ data_dir = rq_data_dir(req);
+
+ if (data_dir == READ) {
+ mmc_blk_disable_wr_packing(mq);
+ mq->num_wr_reqs_to_start_packing =
+ get_packed_trigger(mq->num_of_potential_packed_wr_reqs,
+ mq->card, req,
+ mq->num_wr_reqs_to_start_packing);
+ mq->num_of_potential_packed_wr_reqs = 0;
+ mq->wr_packing_enabled = false;
+ return;
+ } else if (data_dir == WRITE) {
+ mq->num_of_potential_packed_wr_reqs++;
+ }
+
+ if (mq->num_of_potential_packed_wr_reqs >
+ mq->num_wr_reqs_to_start_packing)
+ mq->wr_packing_enabled = true;
+}
+
+struct mmc_wr_pack_stats *mmc_blk_get_packed_statistics(struct mmc_card *card)
+{
+ if (!card)
+ return NULL;
+
+ return &card->wr_pack_stats;
+}
+EXPORT_SYMBOL(mmc_blk_get_packed_statistics);
+
+void mmc_blk_init_packed_statistics(struct mmc_card *card)
+{
+ int max_num_of_packed_reqs = 0;
+
+ if (!card || !card->wr_pack_stats.packing_events)
+ return;
+
+ max_num_of_packed_reqs = card->ext_csd.max_packed_writes;
+
+ spin_lock(&card->wr_pack_stats.lock);
+ memset(card->wr_pack_stats.packing_events, 0,
+ (max_num_of_packed_reqs + 1) *
+ sizeof(*card->wr_pack_stats.packing_events));
+ memset(&card->wr_pack_stats.pack_stop_reason, 0,
+ sizeof(card->wr_pack_stats.pack_stop_reason));
+ card->wr_pack_stats.enabled = true;
+ spin_unlock(&card->wr_pack_stats.lock);
+}
+EXPORT_SYMBOL(mmc_blk_init_packed_statistics);
+
static u8 mmc_blk_prep_packed_list(struct mmc_queue *mq, struct request *req)
{
struct request_queue *q = mq->queue;
@@ -1912,10 +2718,14 @@ static u8 mmc_blk_prep_packed_list(struct mmc_queue *mq, struct request *req)
bool put_back = true;
u8 max_packed_rw = 0;
u8 reqs = 0;
+ struct mmc_wr_pack_stats *stats = &card->wr_pack_stats;
if (!(md->flags & MMC_BLK_PACKED_CMD))
goto no_packed;
+ if (!mq->wr_packing_enabled)
+ goto no_packed;
+
if ((rq_data_dir(cur) == WRITE) &&
mmc_host_packed_wr(card->host))
max_packed_rw = card->ext_csd.max_packed_writes;
@@ -1931,6 +2741,9 @@ static u8 mmc_blk_prep_packed_list(struct mmc_queue *mq, struct request *req)
!IS_ALIGNED(blk_rq_sectors(cur), 8))
goto no_packed;
+ if (cur->cmd_flags & REQ_FUA)
+ goto no_packed;
+
mmc_blk_clear_packed(mqrq);
max_blk_count = min(card->host->max_blk_count,
@@ -1947,6 +2760,7 @@ static u8 mmc_blk_prep_packed_list(struct mmc_queue *mq, struct request *req)
phys_segments += mmc_calc_packed_hdr_segs(q, card);
}
+ spin_lock(&stats->lock);
do {
if (reqs >= max_packed_rw - 1) {
put_back = false;
@@ -1957,33 +2771,63 @@ static u8 mmc_blk_prep_packed_list(struct mmc_queue *mq, struct request *req)
next = blk_fetch_request(q);
spin_unlock_irq(q->queue_lock);
if (!next) {
+ MMC_BLK_UPDATE_STOP_REASON(stats, EMPTY_QUEUE);
put_back = false;
break;
}
if (mmc_large_sector(card) &&
- !IS_ALIGNED(blk_rq_sectors(next), 8))
+ !IS_ALIGNED(blk_rq_sectors(next), 8)) {
+ MMC_BLK_UPDATE_STOP_REASON(stats, LARGE_SEC_ALIGN);
break;
+ }
if (next->cmd_flags & REQ_DISCARD ||
- next->cmd_flags & REQ_FLUSH)
+ next->cmd_flags & REQ_FLUSH) {
+ MMC_BLK_UPDATE_STOP_REASON(stats, FLUSH_OR_DISCARD);
break;
+ }
- if (rq_data_dir(cur) != rq_data_dir(next))
+ if (next->cmd_flags & REQ_FUA) {
+ MMC_BLK_UPDATE_STOP_REASON(stats, FUA);
break;
+ }
+
+ if (rq_data_dir(cur) != rq_data_dir(next)) {
+ MMC_BLK_UPDATE_STOP_REASON(stats, WRONG_DATA_DIR);
+ break;
+ }
if (mmc_req_rel_wr(next) &&
- (md->flags & MMC_BLK_REL_WR) && !en_rel_wr)
+ (md->flags & MMC_BLK_REL_WR) && !en_rel_wr) {
+ MMC_BLK_UPDATE_STOP_REASON(stats, REL_WRITE);
break;
+ }
req_sectors += blk_rq_sectors(next);
- if (req_sectors > max_blk_count)
+ if (req_sectors > max_blk_count) {
+ if (stats->enabled)
+ stats->pack_stop_reason[EXCEEDS_SECTORS]++;
break;
+ }
phys_segments += next->nr_phys_segments;
- if (phys_segments > max_phys_segs)
+ if (phys_segments > max_phys_segs) {
+ MMC_BLK_UPDATE_STOP_REASON(stats, EXCEEDS_SEGMENTS);
break;
+ }
+
+ if (mq->no_pack_for_random) {
+ if ((blk_rq_pos(cur) + blk_rq_sectors(cur)) !=
+ blk_rq_pos(next)) {
+ MMC_BLK_UPDATE_STOP_REASON(stats, RANDOM);
+ put_back = 1;
+ break;
+ }
+ }
+ if (rq_data_dir(next) == WRITE)
+ mq->num_of_potential_packed_wr_reqs++;
list_add_tail(&next->queuelist, &mqrq->packed->list);
cur = next;
reqs++;
@@ -1995,6 +2839,15 @@ static u8 mmc_blk_prep_packed_list(struct mmc_queue *mq, struct request *req)
spin_unlock_irq(q->queue_lock);
}
+ if (stats->enabled) {
+ if (reqs + 1 <= card->ext_csd.max_packed_writes)
+ stats->packing_events[reqs + 1]++;
+ if (reqs + 1 == max_packed_rw)
+ MMC_BLK_UPDATE_STOP_REASON(stats, THRESHOLD);
+ }
+
+ spin_unlock(&stats->lock);
+
if (reqs > 0) {
list_add(&req->queuelist, &mqrq->packed->list);
mqrq->packed->nr_entries = ++reqs;
@@ -2075,6 +2928,7 @@ static void mmc_blk_packed_hdr_wrq_prep(struct mmc_queue_req *mqrq,
brq->data.blksz = 512;
brq->data.blocks = packed->blocks + hdr_blocks;
brq->data.flags |= MMC_DATA_WRITE;
+ brq->data.fault_injected = false;
brq->stop.opcode = MMC_STOP_TRANSMISSION;
brq->stop.arg = 0;
@@ -2086,7 +2940,18 @@ static void mmc_blk_packed_hdr_wrq_prep(struct mmc_queue_req *mqrq,
brq->data.sg_len = mmc_queue_map_sg(mq, mqrq);
mqrq->mmc_active.mrq = &brq->mrq;
- mqrq->mmc_active.err_check = mmc_blk_packed_err_check;
+
+ /*
+ * This is intended for packed commands tests usage - in case these
+ * functions are not in use the respective pointers are NULL
+ */
+ if (mq->err_check_fn)
+ mqrq->mmc_active.err_check = mq->err_check_fn;
+ else
+ mqrq->mmc_active.err_check = mmc_blk_packed_err_check;
+
+ if (mq->packed_test_fn)
+ mq->packed_test_fn(mq->queue, mqrq);
mmc_queue_bounce_pre(mqrq);
}
@@ -2108,11 +2973,12 @@ static int mmc_blk_cmd_err(struct mmc_blk_data *md, struct mmc_card *card,
*/
if (mmc_card_sd(card)) {
u32 blocks;
-
- blocks = mmc_sd_num_wr_blocks(card);
- if (blocks != (u32)-1) {
- ret = blk_end_request(req, 0, blocks << 9);
- }
+ if (!brq->data.fault_injected) {
+ blocks = mmc_sd_num_wr_blocks(card);
+ if (blocks != (u32)-1)
+ ret = blk_end_request(req, 0, blocks << 9);
+ } else
+ ret = blk_end_request(req, 0, brq->data.bytes_xfered);
} else {
if (!mmc_packed_cmd(mq_rq->cmd_type))
ret = blk_end_request(req, 0, brq->data.bytes_xfered);
@@ -2192,6 +3058,595 @@ static void mmc_blk_revert_packed_req(struct mmc_queue *mq,
mmc_blk_clear_packed(mq_rq);
}
+static int mmc_blk_cmdq_start_req(struct mmc_host *host,
+ struct mmc_cmdq_req *cmdq_req)
+{
+ struct mmc_request *mrq = &cmdq_req->mrq;
+
+ mrq->done = mmc_blk_cmdq_req_done;
+ return mmc_cmdq_start_req(host, cmdq_req);
+}
+
+/* prepare for non-data commands */
+static struct mmc_cmdq_req *mmc_cmdq_prep_dcmd(
+ struct mmc_queue_req *mqrq, struct mmc_queue *mq)
+{
+ struct request *req = mqrq->req;
+ struct mmc_cmdq_req *cmdq_req = &mqrq->cmdq_req;
+
+ memset(&mqrq->cmdq_req, 0, sizeof(struct mmc_cmdq_req));
+
+ cmdq_req->mrq.data = NULL;
+ cmdq_req->cmd_flags = req->cmd_flags;
+ cmdq_req->mrq.req = mqrq->req;
+ req->special = mqrq;
+ cmdq_req->cmdq_req_flags |= DCMD;
+ cmdq_req->mrq.cmdq_req = cmdq_req;
+
+ return &mqrq->cmdq_req;
+}
+
+
+#define IS_RT_CLASS_REQ(x) \
+ (IOPRIO_PRIO_CLASS(req_get_ioprio(x)) == IOPRIO_CLASS_RT)
+
+static struct mmc_cmdq_req *mmc_blk_cmdq_rw_prep(
+ struct mmc_queue_req *mqrq, struct mmc_queue *mq)
+{
+ struct mmc_card *card = mq->card;
+ struct request *req = mqrq->req;
+ struct mmc_blk_data *md = mq->data;
+ bool do_rel_wr = mmc_req_rel_wr(req) && (md->flags & MMC_BLK_REL_WR);
+ bool do_data_tag;
+ bool read_dir = (rq_data_dir(req) == READ);
+ bool prio = IS_RT_CLASS_REQ(req);
+ struct mmc_cmdq_req *cmdq_rq = &mqrq->cmdq_req;
+
+ memset(&mqrq->cmdq_req, 0, sizeof(struct mmc_cmdq_req));
+
+ cmdq_rq->tag = req->tag;
+ if (read_dir) {
+ cmdq_rq->cmdq_req_flags |= DIR;
+ cmdq_rq->data.flags = MMC_DATA_READ;
+ } else {
+ cmdq_rq->data.flags = MMC_DATA_WRITE;
+ }
+ if (prio)
+ cmdq_rq->cmdq_req_flags |= PRIO;
+
+ if (do_rel_wr)
+ cmdq_rq->cmdq_req_flags |= REL_WR;
+
+ cmdq_rq->data.blocks = blk_rq_sectors(req);
+ cmdq_rq->blk_addr = blk_rq_pos(req);
+ cmdq_rq->data.blksz = MMC_CARD_CMDQ_BLK_SIZE;
+
+ mmc_set_data_timeout(&cmdq_rq->data, card);
+
+ do_data_tag = (card->ext_csd.data_tag_unit_size) &&
+ (req->cmd_flags & REQ_META) &&
+ (rq_data_dir(req) == WRITE) &&
+ ((cmdq_rq->data.blocks * cmdq_rq->data.blksz) >=
+ card->ext_csd.data_tag_unit_size);
+ if (do_data_tag)
+ cmdq_rq->cmdq_req_flags |= DAT_TAG;
+ cmdq_rq->data.sg = mqrq->sg;
+ cmdq_rq->data.sg_len = mmc_queue_map_sg(mq, mqrq);
+
+ /*
+ * Adjust the sg list so it is the same size as the
+ * request.
+ */
+ if (cmdq_rq->data.blocks > card->host->max_blk_count)
+ cmdq_rq->data.blocks = card->host->max_blk_count;
+
+ if (cmdq_rq->data.blocks != blk_rq_sectors(req)) {
+ int i, data_size = cmdq_rq->data.blocks << 9;
+ struct scatterlist *sg;
+
+ for_each_sg(cmdq_rq->data.sg, sg, cmdq_rq->data.sg_len, i) {
+ data_size -= sg->length;
+ if (data_size <= 0) {
+ sg->length += data_size;
+ i++;
+ break;
+ }
+ }
+ cmdq_rq->data.sg_len = i;
+ }
+
+ mqrq->cmdq_req.cmd_flags = req->cmd_flags;
+ mqrq->cmdq_req.mrq.req = mqrq->req;
+ mqrq->cmdq_req.mrq.cmdq_req = &mqrq->cmdq_req;
+ mqrq->cmdq_req.mrq.data = &mqrq->cmdq_req.data;
+ mqrq->req->special = mqrq;
+
+ pr_debug("%s: %s: mrq: 0x%p req: 0x%p mqrq: 0x%p bytes to xf: %d mmc_cmdq_req: 0x%p card-addr: 0x%08x dir(r-1/w-0): %d\n",
+ mmc_hostname(card->host), __func__, &mqrq->cmdq_req.mrq,
+ mqrq->req, mqrq, (cmdq_rq->data.blocks * cmdq_rq->data.blksz),
+ cmdq_rq, cmdq_rq->blk_addr,
+ (cmdq_rq->cmdq_req_flags & DIR) ? 1 : 0);
+
+ return &mqrq->cmdq_req;
+}
+
+static int mmc_blk_cmdq_issue_rw_rq(struct mmc_queue *mq, struct request *req)
+{
+ struct mmc_queue_req *active_mqrq;
+ struct mmc_card *card = mq->card;
+ struct mmc_host *host = card->host;
+ struct mmc_cmdq_context_info *ctx = &host->cmdq_ctx;
+ struct mmc_cmdq_req *mc_rq;
+ u8 active_small_sector_read = 0;
+ int ret = 0;
+
+ mmc_deferred_scaling(host);
+ mmc_cmdq_clk_scaling_start_busy(host, true);
+
+ BUG_ON((req->tag < 0) || (req->tag > card->ext_csd.cmdq_depth));
+ BUG_ON(test_and_set_bit(req->tag, &host->cmdq_ctx.data_active_reqs));
+ BUG_ON(test_and_set_bit(req->tag, &host->cmdq_ctx.active_reqs));
+
+ active_mqrq = &mq->mqrq_cmdq[req->tag];
+ active_mqrq->req = req;
+
+ mc_rq = mmc_blk_cmdq_rw_prep(active_mqrq, mq);
+
+ if (card->quirks & MMC_QUIRK_CMDQ_EMPTY_BEFORE_DCMD) {
+ unsigned int sectors = blk_rq_sectors(req);
+
+ if (((sectors > 0) && (sectors < 8))
+ && (rq_data_dir(req) == READ))
+ active_small_sector_read = 1;
+ }
+ ret = mmc_blk_cmdq_start_req(card->host, mc_rq);
+ if (!ret && active_small_sector_read)
+ host->cmdq_ctx.active_small_sector_read_reqs++;
+ /*
+ * When in SVS2 on low load scenario and there are lots of requests
+ * queued for CMDQ we need to wait till the queue is empty to scale
+ * back up to Nominal even if there is a sudden increase in load.
+ * This impacts performance where lots of IO get executed in SVS2
+ * frequency since the queue is full. As SVS2 is a low load use case
+ * we can serialize the requests and not queue them in parallel
+ * without impacting other use cases. This makes sure the queue gets
+ * empty faster and we will be able to scale up to Nominal frequency
+ * when needed.
+ */
+ if (!ret && (host->clk_scaling.state == MMC_LOAD_LOW))
+ wait_event_interruptible(ctx->queue_empty_wq,
+ (!ctx->active_reqs));
+
+ return ret;
+}
+
+/*
+ * Issues a flush (dcmd) request
+ */
+int mmc_blk_cmdq_issue_flush_rq(struct mmc_queue *mq, struct request *req)
+{
+ int err;
+ struct mmc_queue_req *active_mqrq;
+ struct mmc_card *card = mq->card;
+ struct mmc_host *host;
+ struct mmc_cmdq_req *cmdq_req;
+ struct mmc_cmdq_context_info *ctx_info;
+
+ BUG_ON(!card);
+ host = card->host;
+ BUG_ON(!host);
+ BUG_ON(req->tag > card->ext_csd.cmdq_depth);
+ BUG_ON(test_and_set_bit(req->tag, &host->cmdq_ctx.active_reqs));
+
+ ctx_info = &host->cmdq_ctx;
+
+ set_bit(CMDQ_STATE_DCMD_ACTIVE, &ctx_info->curr_state);
+
+ active_mqrq = &mq->mqrq_cmdq[req->tag];
+ active_mqrq->req = req;
+
+ cmdq_req = mmc_cmdq_prep_dcmd(active_mqrq, mq);
+ cmdq_req->cmdq_req_flags |= QBR;
+ cmdq_req->mrq.cmd = &cmdq_req->cmd;
+ cmdq_req->tag = req->tag;
+
+ err = mmc_cmdq_prepare_flush(cmdq_req->mrq.cmd);
+ if (err) {
+ pr_err("%s: failed (%d) preparing flush req\n",
+ mmc_hostname(host), err);
+ return err;
+ }
+ err = mmc_blk_cmdq_start_req(card->host, cmdq_req);
+ return err;
+}
+EXPORT_SYMBOL(mmc_blk_cmdq_issue_flush_rq);
+
+static void mmc_blk_cmdq_reset(struct mmc_host *host, bool clear_all)
+{
+ int err = 0;
+
+ if (mmc_cmdq_halt(host, true)) {
+ pr_err("%s: halt failed\n", mmc_hostname(host));
+ goto reset;
+ }
+
+ if (clear_all)
+ mmc_cmdq_discard_queue(host, 0);
+reset:
+ mmc_host_clk_hold(host);
+ host->cmdq_ops->disable(host, true);
+ mmc_host_clk_release(host);
+ err = mmc_cmdq_hw_reset(host);
+ if (err && err != -EOPNOTSUPP) {
+ pr_err("%s: failed to cmdq_hw_reset err = %d\n",
+ mmc_hostname(host), err);
+ mmc_host_clk_hold(host);
+ host->cmdq_ops->enable(host);
+ mmc_host_clk_release(host);
+ mmc_cmdq_halt(host, false);
+ goto out;
+ }
+ /*
+ * CMDQ HW reset would have already made CQE
+ * in unhalted state, but reflect the same
+ * in software state of cmdq_ctx.
+ */
+ mmc_host_clr_halt(host);
+out:
+ return;
+}
+
+/**
+ * is_cmdq_dcmd_req - Checks if tag belongs to DCMD request.
+ * @q: request_queue pointer.
+ * @tag: tag number of request to check.
+ *
+ * This function checks if the request with tag number "tag"
+ * is a DCMD request or not based on cmdq_req_flags set.
+ *
+ * returns true if DCMD req, otherwise false.
+ */
+static bool is_cmdq_dcmd_req(struct request_queue *q, int tag)
+{
+ struct request *req;
+ struct mmc_queue_req *mq_rq;
+ struct mmc_cmdq_req *cmdq_req;
+
+ req = blk_queue_find_tag(q, tag);
+ if (WARN_ON(!req))
+ goto out;
+ mq_rq = req->special;
+ if (WARN_ON(!mq_rq))
+ goto out;
+ cmdq_req = &(mq_rq->cmdq_req);
+ return (cmdq_req->cmdq_req_flags & DCMD);
+out:
+ return -ENOENT;
+}
+
+/**
+ * mmc_blk_cmdq_reset_all - Reset everything for CMDQ block request.
+ * @host: mmc_host pointer.
+ * @err: error for which reset is performed.
+ *
+ * This function implements reset_all functionality for
+ * cmdq. It resets the controller, power cycle the card,
+ * and invalidate all busy tags(requeue all request back to
+ * elevator).
+ */
+static void mmc_blk_cmdq_reset_all(struct mmc_host *host, int err)
+{
+ struct mmc_request *mrq = host->err_mrq;
+ struct mmc_card *card = host->card;
+ struct mmc_cmdq_context_info *ctx_info = &host->cmdq_ctx;
+ struct request_queue *q;
+ int itag = 0;
+ int ret = 0;
+
+ if (WARN_ON(!mrq))
+ return;
+
+ q = mrq->req->q;
+ WARN_ON(!test_bit(CMDQ_STATE_ERR, &ctx_info->curr_state));
+
+ #ifdef CONFIG_MMC_CLKGATE
+ pr_debug("%s: %s: active_reqs = %lu, clk_requests = %d\n",
+ mmc_hostname(host), __func__,
+ ctx_info->active_reqs, host->clk_requests);
+ #endif
+
+ mmc_blk_cmdq_reset(host, false);
+
+ for_each_set_bit(itag, &ctx_info->active_reqs,
+ host->num_cq_slots) {
+ ret = is_cmdq_dcmd_req(q, itag);
+ if (WARN_ON(ret == -ENOENT))
+ continue;
+ if (!ret) {
+ WARN_ON(!test_and_clear_bit(itag,
+ &ctx_info->data_active_reqs));
+ mmc_cmdq_post_req(host, itag, err);
+ } else {
+ clear_bit(CMDQ_STATE_DCMD_ACTIVE,
+ &ctx_info->curr_state);
+ }
+ WARN_ON(!test_and_clear_bit(itag,
+ &ctx_info->active_reqs));
+ mmc_host_clk_release(host);
+ mmc_put_card(card);
+ }
+
+ spin_lock_irq(q->queue_lock);
+ blk_queue_invalidate_tags(q);
+ spin_unlock_irq(q->queue_lock);
+}
+
+static void mmc_blk_cmdq_shutdown(struct mmc_queue *mq)
+{
+ int err;
+ struct mmc_card *card = mq->card;
+ struct mmc_host *host = card->host;
+
+ mmc_get_card(card);
+ mmc_host_clk_hold(host);
+ err = mmc_cmdq_halt(host, true);
+ if (err) {
+ pr_err("%s: halt: failed: %d\n", __func__, err);
+ goto out;
+ }
+
+ /* disable CQ mode in card */
+ if (mmc_card_cmdq(card)) {
+ err = mmc_switch(card, EXT_CSD_CMD_SET_NORMAL,
+ EXT_CSD_CMDQ, 0,
+ card->ext_csd.generic_cmd6_time);
+ if (err) {
+ pr_err("%s: failed to switch card to legacy mode: %d\n",
+ __func__, err);
+ goto out;
+ }
+ mmc_card_clr_cmdq(card);
+ }
+ host->cmdq_ops->disable(host, false);
+ host->card->cmdq_init = false;
+out:
+ mmc_host_clk_release(host);
+ mmc_put_card(card);
+}
+
+static enum blk_eh_timer_return mmc_blk_cmdq_req_timed_out(struct request *req)
+{
+ struct mmc_queue *mq = req->q->queuedata;
+ struct mmc_host *host = mq->card->host;
+ struct mmc_queue_req *mq_rq = req->special;
+ struct mmc_request *mrq;
+ struct mmc_cmdq_req *cmdq_req;
+ struct mmc_cmdq_context_info *ctx_info = &host->cmdq_ctx;
+
+ BUG_ON(!host);
+
+ /*
+ * The mmc_queue_req will be present only if the request
+ * is issued to the LLD. The request could be fetched from
+ * block layer queue but could be waiting to be issued
+ * (for e.g. clock scaling is waiting for an empty cmdq queue)
+ * Reset the timer in such cases to give LLD more time
+ */
+ if (!mq_rq) {
+ pr_warn("%s: restart timer for tag: %d\n", __func__, req->tag);
+ return BLK_EH_RESET_TIMER;
+ }
+
+ mrq = &mq_rq->cmdq_req.mrq;
+ cmdq_req = &mq_rq->cmdq_req;
+
+ BUG_ON(!mrq || !cmdq_req);
+
+ if (cmdq_req->cmdq_req_flags & DCMD)
+ mrq->cmd->error = -ETIMEDOUT;
+ else
+ mrq->data->error = -ETIMEDOUT;
+
+ if (mrq->cmd && mrq->cmd->error) {
+ if (!(mrq->req->cmd_flags & REQ_FLUSH)) {
+ /*
+ * Notify completion for non flush commands like
+ * discard that wait for DCMD finish.
+ */
+ set_bit(CMDQ_STATE_REQ_TIMED_OUT,
+ &ctx_info->curr_state);
+ complete(&mrq->completion);
+ return BLK_EH_NOT_HANDLED;
+ }
+ }
+
+ if (test_bit(CMDQ_STATE_REQ_TIMED_OUT, &ctx_info->curr_state) ||
+ test_bit(CMDQ_STATE_ERR, &ctx_info->curr_state))
+ return BLK_EH_NOT_HANDLED;
+
+ set_bit(CMDQ_STATE_REQ_TIMED_OUT, &ctx_info->curr_state);
+ return BLK_EH_HANDLED;
+}
+
+/*
+ * mmc_blk_cmdq_err: error handling of cmdq error requests.
+ * Function should be called in context of error out request
+ * which has claim_host and rpm acquired.
+ * This may be called with CQ engine halted. Make sure to
+ * unhalt it after error recovery.
+ *
+ * TODO: Currently cmdq error handler does reset_all in case
+ * of any erorr. Need to optimize error handling.
+ */
+static void mmc_blk_cmdq_err(struct mmc_queue *mq)
+{
+ struct mmc_host *host = mq->card->host;
+ struct mmc_request *mrq = host->err_mrq;
+ struct mmc_cmdq_context_info *ctx_info = &host->cmdq_ctx;
+ struct request_queue *q;
+ int err, ret;
+ u32 status = 0;
+
+ mmc_host_clk_hold(host);
+ host->cmdq_ops->dumpstate(host);
+ mmc_host_clk_release(host);
+
+ if (WARN_ON(!mrq))
+ return;
+
+ q = mrq->req->q;
+ err = mmc_cmdq_halt(host, true);
+ if (err) {
+ pr_err("halt: failed: %d\n", err);
+ goto reset;
+ }
+
+ /* RED error - Fatal: requires reset */
+ if (mrq->cmdq_req->resp_err) {
+ err = mrq->cmdq_req->resp_err;
+ goto reset;
+ }
+
+ /*
+ * TIMEOUT errrors can happen because of execution error
+ * in the last command. So send cmd 13 to get device status
+ */
+ if ((mrq->cmd && (mrq->cmd->error == -ETIMEDOUT)) ||
+ (mrq->data && (mrq->data->error == -ETIMEDOUT))) {
+ if (mmc_host_halt(host) || mmc_host_cq_disable(host)) {
+ ret = get_card_status(host->card, &status, 0);
+ if (ret)
+ pr_err("%s: CMD13 failed with err %d\n",
+ mmc_hostname(host), ret);
+ }
+ pr_err("%s: Timeout error detected with device status 0x%08x\n",
+ mmc_hostname(host), status);
+ }
+
+ /*
+ * In case of software request time-out, we schedule err work only for
+ * the first error out request and handles all other request in flight
+ * here.
+ */
+ if (test_bit(CMDQ_STATE_REQ_TIMED_OUT, &ctx_info->curr_state)) {
+ err = -ETIMEDOUT;
+ } else if (mrq->data && mrq->data->error) {
+ err = mrq->data->error;
+ } else if (mrq->cmd && mrq->cmd->error) {
+ /* DCMD commands */
+ err = mrq->cmd->error;
+ }
+
+reset:
+ mmc_blk_cmdq_reset_all(host, err);
+ if (mrq->cmdq_req->resp_err)
+ mrq->cmdq_req->resp_err = false;
+ mmc_cmdq_halt(host, false);
+
+ host->err_mrq = NULL;
+ clear_bit(CMDQ_STATE_REQ_TIMED_OUT, &ctx_info->curr_state);
+ WARN_ON(!test_and_clear_bit(CMDQ_STATE_ERR, &ctx_info->curr_state));
+ wake_up(&ctx_info->wait);
+}
+
+/* invoked by block layer in softirq context */
+void mmc_blk_cmdq_complete_rq(struct request *rq)
+{
+ struct mmc_queue_req *mq_rq = rq->special;
+ struct mmc_request *mrq = &mq_rq->cmdq_req.mrq;
+ struct mmc_host *host = mrq->host;
+ struct mmc_cmdq_context_info *ctx_info = &host->cmdq_ctx;
+ struct mmc_cmdq_req *cmdq_req = &mq_rq->cmdq_req;
+ struct mmc_queue *mq = (struct mmc_queue *)rq->q->queuedata;
+ int err = 0;
+ bool is_dcmd = false;
+
+ if (mrq->cmd && mrq->cmd->error)
+ err = mrq->cmd->error;
+ else if (mrq->data && mrq->data->error)
+ err = mrq->data->error;
+
+ if ((err || cmdq_req->resp_err) && !cmdq_req->skip_err_handling) {
+ pr_err("%s: %s: txfr error(%d)/resp_err(%d)\n",
+ mmc_hostname(mrq->host), __func__, err,
+ cmdq_req->resp_err);
+ if (test_bit(CMDQ_STATE_ERR, &ctx_info->curr_state)) {
+ pr_err("%s: CQ in error state, ending current req: %d\n",
+ __func__, err);
+ } else {
+ set_bit(CMDQ_STATE_ERR, &ctx_info->curr_state);
+ BUG_ON(host->err_mrq != NULL);
+ host->err_mrq = mrq;
+ schedule_work(&mq->cmdq_err_work);
+ }
+ goto out;
+ }
+ /*
+ * In case of error CMDQ is expected to be either in halted
+ * or disable state so cannot receive any completion of
+ * other requests.
+ */
+ BUG_ON(test_bit(CMDQ_STATE_ERR, &ctx_info->curr_state));
+
+ /* clear pending request */
+ BUG_ON(!test_and_clear_bit(cmdq_req->tag,
+ &ctx_info->active_reqs));
+ if (cmdq_req->cmdq_req_flags & DCMD)
+ is_dcmd = true;
+ else
+ BUG_ON(!test_and_clear_bit(cmdq_req->tag,
+ &ctx_info->data_active_reqs));
+ if (!is_dcmd)
+ mmc_cmdq_post_req(host, cmdq_req->tag, err);
+ if (cmdq_req->cmdq_req_flags & DCMD) {
+ clear_bit(CMDQ_STATE_DCMD_ACTIVE, &ctx_info->curr_state);
+ blk_end_request_all(rq, err);
+ goto out;
+ }
+ /*
+ * In case of error, cmdq_req->data.bytes_xfered is set to 0.
+ * If we call blk_end_request() with nr_bytes as 0 then the request
+ * never gets completed. So in case of error, to complete a request
+ * with error we should use blk_end_request_all().
+ */
+ if (err && cmdq_req->skip_err_handling) {
+ cmdq_req->skip_err_handling = false;
+ blk_end_request_all(rq, err);
+ goto out;
+ }
+
+ blk_end_request(rq, err, cmdq_req->data.bytes_xfered);
+
+out:
+
+ mmc_cmdq_clk_scaling_stop_busy(host, true, is_dcmd);
+ if (!test_bit(CMDQ_STATE_ERR, &ctx_info->curr_state)) {
+ mmc_host_clk_release(host);
+ wake_up(&ctx_info->wait);
+ mmc_put_card(host->card);
+ }
+
+ if (!ctx_info->active_reqs)
+ wake_up_interruptible(&host->cmdq_ctx.queue_empty_wq);
+
+ if (blk_queue_stopped(mq->queue) && !ctx_info->active_reqs)
+ complete(&mq->cmdq_shutdown_complete);
+
+ return;
+}
+
+/*
+ * Complete reqs from block layer softirq context
+ * Invoked in irq context
+ */
+void mmc_blk_cmdq_req_done(struct mmc_request *mrq)
+{
+ struct request *req = mrq->req;
+
+ blk_complete_request(req);
+}
+EXPORT_SYMBOL(mmc_blk_cmdq_req_done);
+
static int mmc_blk_issue_rw_rq(struct mmc_queue *mq, struct request *rqc)
{
struct mmc_blk_data *md = mq->data;
@@ -2204,6 +3659,7 @@ static int mmc_blk_issue_rw_rq(struct mmc_queue *mq, struct request *rqc)
struct mmc_async_req *areq;
const u8 packed_nr = 2;
u8 reqs = 0;
+ bool reset = false;
#ifdef CONFIG_MMC_SIMULATE_MAX_SPEED
unsigned long waitfor = jiffies;
#endif
@@ -2239,7 +3695,7 @@ static int mmc_blk_issue_rw_rq(struct mmc_queue *mq, struct request *rqc)
areq = mmc_start_req(card->host, areq, (int *) &status);
if (!areq) {
if (status == MMC_BLK_NEW_REQUEST)
- mq->flags |= MMC_QUEUE_NEW_REQUEST;
+ set_bit(MMC_QUEUE_NEW_REQUEST, &mq->flags);
return 0;
}
@@ -2249,6 +3705,26 @@ static int mmc_blk_issue_rw_rq(struct mmc_queue *mq, struct request *rqc)
type = rq_data_dir(req) == READ ? MMC_BLK_READ : MMC_BLK_WRITE;
mmc_queue_bounce_post(mq_rq);
+ if (card->err_in_sdr104) {
+ /*
+ * Data CRC/timeout errors will manifest as CMD/DATA
+ * ERR. But we'd like to retry these too.
+ * Moreover, no harm done if this fails too for multiple
+ * times, we anyway reduce the bus-speed and retry the
+ * same request.
+ * If that fails too, we don't override this status.
+ */
+ if (status == MMC_BLK_ABORT ||
+ status == MMC_BLK_CMD_ERR ||
+ status == MMC_BLK_DATA_ERR ||
+ status == MMC_BLK_RETRY)
+ /* reset on all of these errors and retry */
+ reset = true;
+
+ status = MMC_BLK_RETRY;
+ card->err_in_sdr104 = false;
+ }
+
switch (status) {
case MMC_BLK_SUCCESS:
case MMC_BLK_PARTIAL:
@@ -2289,11 +3765,36 @@ static int mmc_blk_issue_rw_rq(struct mmc_queue *mq, struct request *rqc)
break;
case MMC_BLK_RETRY:
retune_retry_done = brq->retune_retry_done;
- if (retry++ < 5)
+ if (retry++ < MMC_BLK_MAX_RETRIES) {
+ break;
+ } else if (reset) {
+ reset = false;
+ /*
+ * If we exhaust all the retries due to
+ * CRC/timeout errors in SDR140 mode with UHS SD
+ * cards, re-configure the card in SDR50
+ * bus-speed mode.
+ * All subsequent re-init of this card will be
+ * in SDR50 mode, unless it is removed and
+ * re-inserted. When new UHS SD cards are
+ * inserted, it may start at SDR104 mode if
+ * supported by the card.
+ */
+ pr_err("%s: blocked SDR104, lower the bus-speed (SDR50 / DDR50)\n",
+ req->rq_disk->disk_name);
+ mmc_host_clear_sdr104(card->host);
+ mmc_suspend_clk_scaling(card->host);
+ mmc_blk_reset(md, card->host, type);
+ /* SDR104 mode is blocked from now on */
+ card->sdr104_blocked = true;
+ /* retry 5 times again */
+ retry = 0;
break;
+ }
/* Fall through */
case MMC_BLK_ABORT:
- if (!mmc_blk_reset(md, card->host, type))
+ if (!mmc_blk_reset(md, card->host, type) &&
+ (retry++ < (MMC_BLK_MAX_RETRIES + 1)))
break;
goto cmd_abort;
case MMC_BLK_DATA_ERR: {
@@ -2302,10 +3803,7 @@ static int mmc_blk_issue_rw_rq(struct mmc_queue *mq, struct request *rqc)
err = mmc_blk_reset(md, card->host, type);
if (!err)
break;
- if (err == -ENODEV ||
- mmc_packed_cmd(mq_rq->cmd_type))
- goto cmd_abort;
- /* Fall through */
+ goto cmd_abort;
}
case MMC_BLK_ECC_ERR:
if (brq->data.blocks > 1) {
@@ -2389,6 +3887,132 @@ static int mmc_blk_issue_rw_rq(struct mmc_queue *mq, struct request *rqc)
return 0;
}
+static inline int mmc_blk_cmdq_part_switch(struct mmc_card *card,
+ struct mmc_blk_data *md)
+{
+ struct mmc_blk_data *main_md = mmc_get_drvdata(card);
+ struct mmc_host *host = card->host;
+ struct mmc_cmdq_context_info *ctx = &host->cmdq_ctx;
+ u8 part_config = card->ext_csd.part_config;
+
+ if ((main_md->part_curr == md->part_type) &&
+ (card->part_curr == md->part_type))
+ return 0;
+
+ WARN_ON(!((card->host->caps2 & MMC_CAP2_CMD_QUEUE) &&
+ card->ext_csd.cmdq_support &&
+ (md->flags & MMC_BLK_CMD_QUEUE)));
+
+ if (!test_bit(CMDQ_STATE_HALT, &ctx->curr_state))
+ WARN_ON(mmc_cmdq_halt(host, true));
+
+ /* disable CQ mode in card */
+ if (mmc_card_cmdq(card)) {
+ WARN_ON(mmc_switch(card, EXT_CSD_CMD_SET_NORMAL,
+ EXT_CSD_CMDQ, 0,
+ card->ext_csd.generic_cmd6_time));
+ mmc_card_clr_cmdq(card);
+ }
+
+ part_config &= ~EXT_CSD_PART_CONFIG_ACC_MASK;
+ part_config |= md->part_type;
+
+ WARN_ON(mmc_switch(card, EXT_CSD_CMD_SET_NORMAL,
+ EXT_CSD_PART_CONFIG, part_config,
+ card->ext_csd.part_time));
+
+ card->ext_csd.part_config = part_config;
+ card->part_curr = md->part_type;
+
+ main_md->part_curr = md->part_type;
+
+ WARN_ON(mmc_blk_cmdq_switch(card, md, true));
+ WARN_ON(mmc_cmdq_halt(host, false));
+
+ return 0;
+}
+
+static int mmc_blk_cmdq_issue_rq(struct mmc_queue *mq, struct request *req)
+{
+ int ret;
+ struct mmc_blk_data *md = mq->data;
+ struct mmc_card *card = md->queue.card;
+ unsigned int cmd_flags = req ? req->cmd_flags : 0;
+
+ mmc_get_card(card);
+
+ if (!card->host->cmdq_ctx.active_reqs && mmc_card_doing_bkops(card)) {
+ ret = mmc_cmdq_halt(card->host, true);
+ if (ret)
+ goto out;
+ ret = mmc_stop_bkops(card);
+ if (ret) {
+ pr_err("%s: %s: mmc_stop_bkops failed %d\n",
+ md->disk->disk_name, __func__, ret);
+ goto out;
+ }
+ ret = mmc_cmdq_halt(card->host, false);
+ if (ret)
+ goto out;
+ }
+
+ ret = mmc_blk_cmdq_part_switch(card, md);
+ if (ret) {
+ pr_err("%s: %s: partition switch failed %d\n",
+ md->disk->disk_name, __func__, ret);
+ goto out;
+ }
+
+ if (req) {
+ struct mmc_host *host = card->host;
+ struct mmc_cmdq_context_info *ctx = &host->cmdq_ctx;
+
+ if ((cmd_flags & (REQ_FLUSH | REQ_DISCARD)) &&
+ (card->quirks & MMC_QUIRK_CMDQ_EMPTY_BEFORE_DCMD) &&
+ ctx->active_small_sector_read_reqs) {
+ ret = wait_event_interruptible(ctx->queue_empty_wq,
+ !ctx->active_reqs);
+ if (ret) {
+ pr_err("%s: failed while waiting for the CMDQ to be empty %s err (%d)\n",
+ mmc_hostname(host),
+ __func__, ret);
+ BUG_ON(1);
+ }
+ /* clear the counter now */
+ ctx->active_small_sector_read_reqs = 0;
+ /*
+ * If there were small sector (less than 8 sectors) read
+ * operations in progress then we have to wait for the
+ * outstanding requests to finish and should also have
+ * atleast 6 microseconds delay before queuing the DCMD
+ * request.
+ */
+ udelay(MMC_QUIRK_CMDQ_DELAY_BEFORE_DCMD);
+ }
+
+ if (cmd_flags & REQ_DISCARD) {
+ if (cmd_flags & REQ_SECURE &&
+ !(card->quirks & MMC_QUIRK_SEC_ERASE_TRIM_BROKEN))
+ ret = mmc_blk_cmdq_issue_secdiscard_rq(mq, req);
+ else
+ ret = mmc_blk_cmdq_issue_discard_rq(mq, req);
+ } else if (cmd_flags & REQ_FLUSH) {
+ ret = mmc_blk_cmdq_issue_flush_rq(mq, req);
+ } else {
+ ret = mmc_blk_cmdq_issue_rw_rq(mq, req);
+ }
+ }
+
+ return ret;
+
+out:
+ if (req)
+ blk_end_request_all(req, ret);
+ mmc_put_card(card);
+
+ return ret;
+}
+
static int mmc_blk_issue_rq(struct mmc_queue *mq, struct request *req)
{
int ret;
@@ -2397,13 +4021,31 @@ static int mmc_blk_issue_rq(struct mmc_queue *mq, struct request *req)
struct mmc_host *host = card->host;
unsigned long flags;
unsigned int cmd_flags = req ? req->cmd_flags : 0;
+ int err;
- if (req && !mq->mqrq_prev->req)
+ if (req && !mq->mqrq_prev->req) {
/* claim host only for the first request */
mmc_get_card(card);
+ if (mmc_card_doing_bkops(host->card)) {
+ ret = mmc_stop_bkops(host->card);
+ if (ret)
+ goto out;
+ }
+ }
+
ret = mmc_blk_part_switch(card, md);
+
if (ret) {
+ err = mmc_blk_reset(md, card->host, MMC_BLK_PARTSWITCH);
+ if (!err) {
+ pr_err("%s: mmc_blk_reset(MMC_BLK_PARTSWITCH) succeeded.\n",
+ mmc_hostname(host));
+ mmc_blk_reset_success(md, MMC_BLK_PARTSWITCH);
+ } else
+ pr_err("%s: mmc_blk_reset(MMC_BLK_PARTSWITCH) failed.\n",
+ mmc_hostname(host));
+
if (req) {
blk_end_request_all(req, -EIO);
}
@@ -2411,16 +4053,19 @@ static int mmc_blk_issue_rq(struct mmc_queue *mq, struct request *req)
goto out;
}
- mq->flags &= ~MMC_QUEUE_NEW_REQUEST;
+ mmc_blk_write_packing_control(mq, req);
+
+ clear_bit(MMC_QUEUE_NEW_REQUEST, &mq->flags);
if (cmd_flags & REQ_DISCARD) {
/* complete ongoing async transfer before issuing discard */
if (card->host->areq)
mmc_blk_issue_rw_rq(mq, NULL);
- if (req->cmd_flags & REQ_SECURE)
+ if (cmd_flags & REQ_SECURE &&
+ !(card->quirks & MMC_QUIRK_SEC_ERASE_TRIM_BROKEN))
ret = mmc_blk_issue_secdiscard_rq(mq, req);
else
ret = mmc_blk_issue_discard_rq(mq, req);
- } else if (cmd_flags & REQ_FLUSH) {
+ } else if (cmd_flags & (REQ_FLUSH | REQ_BARRIER)) {
/* complete ongoing async transfer before issuing flush */
if (card->host->areq)
mmc_blk_issue_rw_rq(mq, NULL);
@@ -2435,7 +4080,7 @@ static int mmc_blk_issue_rq(struct mmc_queue *mq, struct request *req)
}
out:
- if ((!req && !(mq->flags & MMC_QUEUE_NEW_REQUEST)) ||
+ if ((!req && !(test_bit(MMC_QUEUE_NEW_REQUEST, &mq->flags))) ||
(cmd_flags & MMC_REQ_SPECIAL_MASK))
/*
* Release host when there are no more requests
@@ -2505,7 +4150,7 @@ static struct mmc_blk_data *mmc_blk_alloc_req(struct mmc_card *card,
INIT_LIST_HEAD(&md->part);
md->usage = 1;
- ret = mmc_init_queue(&md->queue, card, &md->lock, subname);
+ ret = mmc_init_queue(&md->queue, card, NULL, subname, area_type);
if (ret)
goto err_putdisk;
@@ -2562,7 +4207,16 @@ static struct mmc_blk_data *mmc_blk_alloc_req(struct mmc_card *card,
blk_queue_flush(md->queue.queue, REQ_FLUSH | REQ_FUA);
}
- if (mmc_card_mmc(card) &&
+ if (card->cmdq_init) {
+ md->flags |= MMC_BLK_CMD_QUEUE;
+ md->queue.cmdq_complete_fn = mmc_blk_cmdq_complete_rq;
+ md->queue.cmdq_issue_fn = mmc_blk_cmdq_issue_rq;
+ md->queue.cmdq_error_fn = mmc_blk_cmdq_err;
+ md->queue.cmdq_req_timed_out = mmc_blk_cmdq_req_timed_out;
+ md->queue.cmdq_shutdown = mmc_blk_cmdq_shutdown;
+ }
+
+ if (mmc_card_mmc(card) && !card->cmdq_init &&
(area_type == MMC_BLK_DATA_AREA_MAIN) &&
(md->flags & MMC_BLK_CMD23) &&
card->ext_csd.packed_event_en) {
@@ -2575,8 +4229,11 @@ static struct mmc_blk_data *mmc_blk_alloc_req(struct mmc_card *card,
err_putdisk:
put_disk(md->disk);
err_kfree:
+ if (!subname)
+ __clear_bit(md->name_idx, name_use);
kfree(md);
out:
+ __clear_bit(devidx, dev_use);
return ERR_PTR(ret);
}
@@ -2672,6 +4329,10 @@ static void mmc_blk_remove_req(struct mmc_blk_data *md)
mmc_cleanup_queue(&md->queue);
if (md->flags & MMC_BLK_PACKED_CMD)
mmc_packed_clean(&md->queue);
+ if (md->flags & MMC_BLK_CMD_QUEUE)
+ mmc_cmdq_clean(&md->queue, card);
+ device_remove_file(disk_to_dev(md->disk),
+ &md->num_wr_reqs_to_start_packing);
if (md->disk->flags & GENHD_FL_UP) {
device_remove_file(disk_to_dev(md->disk), &md->force_ro);
if ((md->area_type & MMC_BLK_DATA_AREA_BOOT) &&
@@ -2760,8 +4421,37 @@ static int mmc_add_disk(struct mmc_blk_data *md)
if (ret)
goto power_ro_lock_fail;
}
+
+ md->num_wr_reqs_to_start_packing.show =
+ num_wr_reqs_to_start_packing_show;
+ md->num_wr_reqs_to_start_packing.store =
+ num_wr_reqs_to_start_packing_store;
+ sysfs_attr_init(&md->num_wr_reqs_to_start_packing.attr);
+ md->num_wr_reqs_to_start_packing.attr.name =
+ "num_wr_reqs_to_start_packing";
+ md->num_wr_reqs_to_start_packing.attr.mode = S_IRUGO | S_IWUSR;
+ ret = device_create_file(disk_to_dev(md->disk),
+ &md->num_wr_reqs_to_start_packing);
+ if (ret)
+ goto num_wr_reqs_to_start_packing_fail;
+
+ md->no_pack_for_random.show = no_pack_for_random_show;
+ md->no_pack_for_random.store = no_pack_for_random_store;
+ sysfs_attr_init(&md->no_pack_for_random.attr);
+ md->no_pack_for_random.attr.name = "no_pack_for_random";
+ md->no_pack_for_random.attr.mode = S_IRUGO | S_IWUSR;
+ ret = device_create_file(disk_to_dev(md->disk),
+ &md->no_pack_for_random);
+ if (ret)
+ goto no_pack_for_random_fails;
+
return ret;
+no_pack_for_random_fails:
+ device_remove_file(disk_to_dev(md->disk),
+ &md->num_wr_reqs_to_start_packing);
+num_wr_reqs_to_start_packing_fail:
+ device_remove_file(disk_to_dev(md->disk), &md->power_ro_lock);
power_ro_lock_fail:
#ifdef CONFIG_MMC_SIMULATE_MAX_SPEED
device_remove_file(disk_to_dev(md->disk), &dev_attr_cache_size);
@@ -2784,6 +4474,11 @@ force_ro_fail:
#define CID_MANFID_SAMSUNG 0x15
#define CID_MANFID_KINGSTON 0x70
+#define CID_MANFID_SANDISK 0x2
+#define CID_MANFID_TOSHIBA 0x11
+#define CID_MANFID_MICRON 0x13
+#define CID_MANFID_SAMSUNG 0x15
+
static const struct mmc_fixup blk_fixups[] =
{
MMC_FIXUP("SEM02G", CID_MANFID_SANDISK, 0x100, add_quirk,
@@ -2815,6 +4510,8 @@ static const struct mmc_fixup blk_fixups[] =
MMC_QUIRK_BLK_NO_CMD23),
MMC_FIXUP("MMC32G", CID_MANFID_TOSHIBA, CID_OEMID_ANY, add_quirk_mmc,
MMC_QUIRK_BLK_NO_CMD23),
+ MMC_FIXUP(CID_NAME_ANY, CID_MANFID_TOSHIBA, CID_OEMID_ANY,
+ add_quirk_mmc, MMC_QUIRK_CMDQ_EMPTY_BEFORE_DCMD),
/*
* Some MMC cards need longer data read timeout than indicated in CSD.
@@ -2825,6 +4522,20 @@ static const struct mmc_fixup blk_fixups[] =
MMC_QUIRK_LONG_READ_TIME),
/*
+ * Some Samsung MMC cards need longer data read timeout than
+ * indicated in CSD.
+ */
+ MMC_FIXUP("Q7XSAB", CID_MANFID_SAMSUNG, 0x100, add_quirk_mmc,
+ MMC_QUIRK_LONG_READ_TIME),
+
+ /*
+ * Hynix eMMC cards need longer data read timeout than
+ * indicated in CSD.
+ */
+ MMC_FIXUP(CID_NAME_ANY, CID_MANFID_HYNIX, CID_OEMID_ANY, add_quirk_mmc,
+ MMC_QUIRK_LONG_READ_TIME),
+
+ /*
* On these Samsung MoviNAND parts, performing secure erase or
* secure trim can result in unrecoverable corruption due to a
* firmware bug.
@@ -2855,6 +4566,32 @@ static const struct mmc_fixup blk_fixups[] =
MMC_FIXUP("V10016", CID_MANFID_KINGSTON, CID_OEMID_ANY, add_quirk_mmc,
MMC_QUIRK_TRIM_BROKEN),
+ /* Some INAND MCP devices advertise incorrect timeout values */
+ MMC_FIXUP("SEM04G", 0x45, CID_OEMID_ANY, add_quirk_mmc,
+ MMC_QUIRK_INAND_DATA_TIMEOUT),
+
+ /*
+ * On these Samsung MoviNAND parts, performing secure erase or
+ * secure trim can result in unrecoverable corruption due to a
+ * firmware bug.
+ */
+ MMC_FIXUP("M8G2FA", CID_MANFID_SAMSUNG, CID_OEMID_ANY, add_quirk_mmc,
+ MMC_QUIRK_SEC_ERASE_TRIM_BROKEN),
+ MMC_FIXUP("MAG4FA", CID_MANFID_SAMSUNG, CID_OEMID_ANY, add_quirk_mmc,
+ MMC_QUIRK_SEC_ERASE_TRIM_BROKEN),
+ MMC_FIXUP("MBG8FA", CID_MANFID_SAMSUNG, CID_OEMID_ANY, add_quirk_mmc,
+ MMC_QUIRK_SEC_ERASE_TRIM_BROKEN),
+ MMC_FIXUP("MCGAFA", CID_MANFID_SAMSUNG, CID_OEMID_ANY, add_quirk_mmc,
+ MMC_QUIRK_SEC_ERASE_TRIM_BROKEN),
+ MMC_FIXUP("VAL00M", CID_MANFID_SAMSUNG, CID_OEMID_ANY, add_quirk_mmc,
+ MMC_QUIRK_SEC_ERASE_TRIM_BROKEN),
+ MMC_FIXUP("VYL00M", CID_MANFID_SAMSUNG, CID_OEMID_ANY, add_quirk_mmc,
+ MMC_QUIRK_SEC_ERASE_TRIM_BROKEN),
+ MMC_FIXUP("KYL00M", CID_MANFID_SAMSUNG, CID_OEMID_ANY, add_quirk_mmc,
+ MMC_QUIRK_SEC_ERASE_TRIM_BROKEN),
+ MMC_FIXUP("VZL00M", CID_MANFID_SAMSUNG, CID_OEMID_ANY, add_quirk_mmc,
+ MMC_QUIRK_SEC_ERASE_TRIM_BROKEN),
+
END_FIXUP
};
@@ -2886,6 +4623,10 @@ static int mmc_blk_probe(struct mmc_card *card)
dev_set_drvdata(&card->dev, md);
+#ifdef CONFIG_MMC_BLOCK_DEFERRED_RESUME
+ mmc_set_bus_resume_policy(card->host, 1);
+#endif
+
if (mmc_add_disk(md))
goto out;
@@ -2894,7 +4635,8 @@ static int mmc_blk_probe(struct mmc_card *card)
goto out;
}
- pm_runtime_set_autosuspend_delay(&card->dev, 3000);
+ pm_runtime_use_autosuspend(&card->dev);
+ pm_runtime_set_autosuspend_delay(&card->dev, MMC_AUTOSUSPEND_DELAY_MS);
pm_runtime_use_autosuspend(&card->dev);
/*
@@ -2928,25 +4670,41 @@ static void mmc_blk_remove(struct mmc_card *card)
pm_runtime_put_noidle(&card->dev);
mmc_blk_remove_req(md);
dev_set_drvdata(&card->dev, NULL);
+#ifdef CONFIG_MMC_BLOCK_DEFERRED_RESUME
+ mmc_set_bus_resume_policy(card->host, 0);
+#endif
}
-static int _mmc_blk_suspend(struct mmc_card *card)
+static int _mmc_blk_suspend(struct mmc_card *card, bool wait)
{
struct mmc_blk_data *part_md;
struct mmc_blk_data *md = dev_get_drvdata(&card->dev);
+ int rc = 0;
if (md) {
- mmc_queue_suspend(&md->queue);
+ rc = mmc_queue_suspend(&md->queue, wait);
+ if (rc)
+ goto out;
list_for_each_entry(part_md, &md->part, part) {
- mmc_queue_suspend(&part_md->queue);
+ rc = mmc_queue_suspend(&part_md->queue, wait);
+ if (rc)
+ goto out_resume;
}
}
- return 0;
+ goto out;
+
+ out_resume:
+ mmc_queue_resume(&md->queue);
+ list_for_each_entry(part_md, &md->part, part) {
+ mmc_queue_resume(&part_md->queue);
+ }
+ out:
+ return rc;
}
static void mmc_blk_shutdown(struct mmc_card *card)
{
- _mmc_blk_suspend(card);
+ _mmc_blk_suspend(card, 1);
}
#ifdef CONFIG_PM_SLEEP
@@ -2954,7 +4712,7 @@ static int mmc_blk_suspend(struct device *dev)
{
struct mmc_card *card = mmc_dev_to_card(dev);
- return _mmc_blk_suspend(card);
+ return _mmc_blk_suspend(card, 0);
}
static int mmc_blk_resume(struct device *dev)
diff --git a/drivers/mmc/card/mmc_block_test.c b/drivers/mmc/card/mmc_block_test.c
new file mode 100644
index 000000000000..967affa11d9e
--- /dev/null
+++ b/drivers/mmc/card/mmc_block_test.c
@@ -0,0 +1,2038 @@
+/* Copyright (c) 2012, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ */
+
+/* MMC block test */
+
+#include <linux/module.h>
+#include <linux/blkdev.h>
+#include <linux/debugfs.h>
+#include <linux/mmc/card.h>
+#include <linux/mmc/host.h>
+#include <linux/delay.h>
+#include <linux/test-iosched.h>
+#include "queue.h"
+
+#define MODULE_NAME "mmc_block_test"
+#define TEST_MAX_SECTOR_RANGE (600*1024*1024) /* 600 MB */
+#define TEST_MAX_BIOS_PER_REQ 120
+#define CMD23_PACKED_BIT (1 << 30)
+#define LARGE_PRIME_1 1103515367
+#define LARGE_PRIME_2 35757
+#define PACKED_HDR_VER_MASK 0x000000FF
+#define PACKED_HDR_RW_MASK 0x0000FF00
+#define PACKED_HDR_NUM_REQS_MASK 0x00FF0000
+#define PACKED_HDR_BITS_16_TO_29_SET 0x3FFF0000
+
+#define test_pr_debug(fmt, args...) pr_debug("%s: "fmt"\n", MODULE_NAME, args)
+#define test_pr_info(fmt, args...) pr_info("%s: "fmt"\n", MODULE_NAME, args)
+#define test_pr_err(fmt, args...) pr_err("%s: "fmt"\n", MODULE_NAME, args)
+
+enum is_random {
+ NON_RANDOM_TEST,
+ RANDOM_TEST,
+};
+
+enum mmc_block_test_testcases {
+ /* Start of send write packing test group */
+ SEND_WRITE_PACKING_MIN_TESTCASE,
+ TEST_STOP_DUE_TO_READ = SEND_WRITE_PACKING_MIN_TESTCASE,
+ TEST_STOP_DUE_TO_READ_AFTER_MAX_REQS,
+ TEST_STOP_DUE_TO_FLUSH,
+ TEST_STOP_DUE_TO_FLUSH_AFTER_MAX_REQS,
+ TEST_STOP_DUE_TO_EMPTY_QUEUE,
+ TEST_STOP_DUE_TO_MAX_REQ_NUM,
+ TEST_STOP_DUE_TO_THRESHOLD,
+ SEND_WRITE_PACKING_MAX_TESTCASE = TEST_STOP_DUE_TO_THRESHOLD,
+
+ /* Start of err check test group */
+ ERR_CHECK_MIN_TESTCASE,
+ TEST_RET_ABORT = ERR_CHECK_MIN_TESTCASE,
+ TEST_RET_PARTIAL_FOLLOWED_BY_SUCCESS,
+ TEST_RET_PARTIAL_FOLLOWED_BY_ABORT,
+ TEST_RET_PARTIAL_MULTIPLE_UNTIL_SUCCESS,
+ TEST_RET_PARTIAL_MAX_FAIL_IDX,
+ TEST_RET_RETRY,
+ TEST_RET_CMD_ERR,
+ TEST_RET_DATA_ERR,
+ ERR_CHECK_MAX_TESTCASE = TEST_RET_DATA_ERR,
+
+ /* Start of send invalid test group */
+ INVALID_CMD_MIN_TESTCASE,
+ TEST_HDR_INVALID_VERSION = INVALID_CMD_MIN_TESTCASE,
+ TEST_HDR_WRONG_WRITE_CODE,
+ TEST_HDR_INVALID_RW_CODE,
+ TEST_HDR_DIFFERENT_ADDRESSES,
+ TEST_HDR_REQ_NUM_SMALLER_THAN_ACTUAL,
+ TEST_HDR_REQ_NUM_LARGER_THAN_ACTUAL,
+ TEST_HDR_CMD23_PACKED_BIT_SET,
+ TEST_CMD23_MAX_PACKED_WRITES,
+ TEST_CMD23_ZERO_PACKED_WRITES,
+ TEST_CMD23_PACKED_BIT_UNSET,
+ TEST_CMD23_REL_WR_BIT_SET,
+ TEST_CMD23_BITS_16TO29_SET,
+ TEST_CMD23_HDR_BLK_NOT_IN_COUNT,
+ INVALID_CMD_MAX_TESTCASE = TEST_CMD23_HDR_BLK_NOT_IN_COUNT,
+
+ /*
+ * Start of packing control test group.
+ * in these next testcases the abbreviation FB = followed by
+ */
+ PACKING_CONTROL_MIN_TESTCASE,
+ TEST_PACKING_EXP_ONE_OVER_TRIGGER_FB_READ =
+ PACKING_CONTROL_MIN_TESTCASE,
+ TEST_PACKING_EXP_N_OVER_TRIGGER,
+ TEST_PACKING_EXP_N_OVER_TRIGGER_FB_READ,
+ TEST_PACKING_EXP_N_OVER_TRIGGER_FLUSH_N,
+ TEST_PACKING_EXP_THRESHOLD_OVER_TRIGGER,
+ TEST_PACKING_NOT_EXP_LESS_THAN_TRIGGER_REQUESTS,
+ TEST_PACKING_NOT_EXP_TRIGGER_REQUESTS,
+ TEST_PACKING_NOT_EXP_TRIGGER_READ_TRIGGER,
+ TEST_PACKING_NOT_EXP_TRIGGER_FLUSH_TRIGGER,
+ TEST_PACK_MIX_PACKED_NO_PACKED_PACKED,
+ TEST_PACK_MIX_NO_PACKED_PACKED_NO_PACKED,
+ PACKING_CONTROL_MAX_TESTCASE = TEST_PACK_MIX_NO_PACKED_PACKED_NO_PACKED,
+};
+
+enum mmc_block_test_group {
+ TEST_NO_GROUP,
+ TEST_GENERAL_GROUP,
+ TEST_SEND_WRITE_PACKING_GROUP,
+ TEST_ERR_CHECK_GROUP,
+ TEST_SEND_INVALID_GROUP,
+ TEST_PACKING_CONTROL_GROUP,
+};
+
+struct mmc_block_test_debug {
+ struct dentry *send_write_packing_test;
+ struct dentry *err_check_test;
+ struct dentry *send_invalid_packed_test;
+ struct dentry *random_test_seed;
+ struct dentry *packing_control_test;
+};
+
+struct mmc_block_test_data {
+ /* The number of write requests that the test will issue */
+ int num_requests;
+ /* The expected write packing statistics for the current test */
+ struct mmc_wr_pack_stats exp_packed_stats;
+ /*
+ * A user-defined seed for random choices of number of bios written in
+ * a request, and of number of requests issued in a test
+ * This field is randomly updated after each use
+ */
+ unsigned int random_test_seed;
+ /* A retry counter used in err_check tests */
+ int err_check_counter;
+ /* Can be one of the values of enum test_group */
+ enum mmc_block_test_group test_group;
+ /*
+ * Indicates if the current testcase is running with random values of
+ * num_requests and num_bios (in each request)
+ */
+ int is_random;
+ /* Data structure for debugfs dentrys */
+ struct mmc_block_test_debug debug;
+ /*
+ * Data structure containing individual test information, including
+ * self-defined specific data
+ */
+ struct test_info test_info;
+ /* mmc block device test */
+ struct blk_dev_test_type bdt;
+};
+
+static struct mmc_block_test_data *mbtd;
+
+void print_mmc_packing_stats(struct mmc_card *card)
+{
+ int i;
+ int max_num_of_packed_reqs = 0;
+
+ if ((!card) || (!card->wr_pack_stats.packing_events))
+ return;
+
+ max_num_of_packed_reqs = card->ext_csd.max_packed_writes;
+
+ spin_lock(&card->wr_pack_stats.lock);
+
+ pr_info("%s: write packing statistics:\n",
+ mmc_hostname(card->host));
+
+ for (i = 1 ; i <= max_num_of_packed_reqs ; ++i) {
+ if (card->wr_pack_stats.packing_events[i] != 0)
+ pr_info("%s: Packed %d reqs - %d times\n",
+ mmc_hostname(card->host), i,
+ card->wr_pack_stats.packing_events[i]);
+ }
+
+ pr_info("%s: stopped packing due to the following reasons:\n",
+ mmc_hostname(card->host));
+
+ if (card->wr_pack_stats.pack_stop_reason[EXCEEDS_SEGMENTS])
+ pr_info("%s: %d times: exceedmax num of segments\n",
+ mmc_hostname(card->host),
+ card->wr_pack_stats.pack_stop_reason[EXCEEDS_SEGMENTS]);
+ if (card->wr_pack_stats.pack_stop_reason[EXCEEDS_SECTORS])
+ pr_info("%s: %d times: exceeding the max num of sectors\n",
+ mmc_hostname(card->host),
+ card->wr_pack_stats.pack_stop_reason[EXCEEDS_SECTORS]);
+ if (card->wr_pack_stats.pack_stop_reason[WRONG_DATA_DIR])
+ pr_info("%s: %d times: wrong data direction\n",
+ mmc_hostname(card->host),
+ card->wr_pack_stats.pack_stop_reason[WRONG_DATA_DIR]);
+ if (card->wr_pack_stats.pack_stop_reason[FLUSH_OR_DISCARD])
+ pr_info("%s: %d times: flush or discard\n",
+ mmc_hostname(card->host),
+ card->wr_pack_stats.pack_stop_reason[FLUSH_OR_DISCARD]);
+ if (card->wr_pack_stats.pack_stop_reason[EMPTY_QUEUE])
+ pr_info("%s: %d times: empty queue\n",
+ mmc_hostname(card->host),
+ card->wr_pack_stats.pack_stop_reason[EMPTY_QUEUE]);
+ if (card->wr_pack_stats.pack_stop_reason[REL_WRITE])
+ pr_info("%s: %d times: rel write\n",
+ mmc_hostname(card->host),
+ card->wr_pack_stats.pack_stop_reason[REL_WRITE]);
+ if (card->wr_pack_stats.pack_stop_reason[THRESHOLD])
+ pr_info("%s: %d times: Threshold\n",
+ mmc_hostname(card->host),
+ card->wr_pack_stats.pack_stop_reason[THRESHOLD]);
+
+ spin_unlock(&card->wr_pack_stats.lock);
+}
+
+/*
+ * A callback assigned to the packed_test_fn field.
+ * Called from block layer in mmc_blk_packed_hdr_wrq_prep.
+ * Here we alter the packed header or CMD23 in order to send an invalid
+ * packed command to the card.
+ */
+static void test_invalid_packed_cmd(struct request_queue *q,
+ struct mmc_queue_req *mqrq)
+{
+ struct mmc_queue *mq = q->queuedata;
+ u32 *packed_cmd_hdr = mqrq->packed->cmd_hdr;
+ struct request *req = mqrq->req;
+ struct request *second_rq;
+ struct test_request *test_rq;
+ struct mmc_blk_request *brq = &mqrq->brq;
+ int num_requests;
+ int max_packed_reqs;
+
+ if (!mq) {
+ test_pr_err("%s: NULL mq", __func__);
+ return;
+ }
+
+ test_rq = (struct test_request *)req->elv.priv[0];
+ if (!test_rq) {
+ test_pr_err("%s: NULL test_rq", __func__);
+ return;
+ }
+ max_packed_reqs = mq->card->ext_csd.max_packed_writes;
+
+ switch (mbtd->test_info.testcase) {
+ case TEST_HDR_INVALID_VERSION:
+ test_pr_info("%s: set invalid header version", __func__);
+ /* Put 0 in header version field (1 byte, offset 0 in header) */
+ packed_cmd_hdr[0] = packed_cmd_hdr[0] & ~PACKED_HDR_VER_MASK;
+ break;
+ case TEST_HDR_WRONG_WRITE_CODE:
+ test_pr_info("%s: wrong write code", __func__);
+ /* Set R/W field with R value (1 byte, offset 1 in header) */
+ packed_cmd_hdr[0] = packed_cmd_hdr[0] & ~PACKED_HDR_RW_MASK;
+ packed_cmd_hdr[0] = packed_cmd_hdr[0] | 0x00000100;
+ break;
+ case TEST_HDR_INVALID_RW_CODE:
+ test_pr_info("%s: invalid r/w code", __func__);
+ /* Set R/W field with invalid value */
+ packed_cmd_hdr[0] = packed_cmd_hdr[0] & ~PACKED_HDR_RW_MASK;
+ packed_cmd_hdr[0] = packed_cmd_hdr[0] | 0x00000400;
+ break;
+ case TEST_HDR_DIFFERENT_ADDRESSES:
+ test_pr_info("%s: different addresses", __func__);
+ second_rq = list_entry(req->queuelist.next, struct request,
+ queuelist);
+ test_pr_info("%s: test_rq->sector=%ld, second_rq->sector=%ld",
+ __func__, (long)req->__sector,
+ (long)second_rq->__sector);
+ /*
+ * Put start sector of second write request in the first write
+ * request's cmd25 argument in the packed header
+ */
+ packed_cmd_hdr[3] = second_rq->__sector;
+ break;
+ case TEST_HDR_REQ_NUM_SMALLER_THAN_ACTUAL:
+ test_pr_info("%s: request num smaller than actual" , __func__);
+ num_requests = (packed_cmd_hdr[0] & PACKED_HDR_NUM_REQS_MASK)
+ >> 16;
+ /* num of entries is decremented by 1 */
+ num_requests = (num_requests - 1) << 16;
+ /*
+ * Set number of requests field in packed write header to be
+ * smaller than the actual number (1 byte, offset 2 in header)
+ */
+ packed_cmd_hdr[0] = (packed_cmd_hdr[0] &
+ ~PACKED_HDR_NUM_REQS_MASK) + num_requests;
+ break;
+ case TEST_HDR_REQ_NUM_LARGER_THAN_ACTUAL:
+ test_pr_info("%s: request num larger than actual" , __func__);
+ num_requests = (packed_cmd_hdr[0] & PACKED_HDR_NUM_REQS_MASK)
+ >> 16;
+ /* num of entries is incremented by 1 */
+ num_requests = (num_requests + 1) << 16;
+ /*
+ * Set number of requests field in packed write header to be
+ * larger than the actual number (1 byte, offset 2 in header).
+ */
+ packed_cmd_hdr[0] = (packed_cmd_hdr[0] &
+ ~PACKED_HDR_NUM_REQS_MASK) + num_requests;
+ break;
+ case TEST_HDR_CMD23_PACKED_BIT_SET:
+ test_pr_info("%s: header CMD23 packed bit set" , __func__);
+ /*
+ * Set packed bit (bit 30) in cmd23 argument of first and second
+ * write requests in packed write header.
+ * These are located at bytes 2 and 4 in packed write header
+ */
+ packed_cmd_hdr[2] = packed_cmd_hdr[2] | CMD23_PACKED_BIT;
+ packed_cmd_hdr[4] = packed_cmd_hdr[4] | CMD23_PACKED_BIT;
+ break;
+ case TEST_CMD23_MAX_PACKED_WRITES:
+ test_pr_info("%s: CMD23 request num > max_packed_reqs",
+ __func__);
+ /*
+ * Set the individual packed cmd23 request num to
+ * max_packed_reqs + 1
+ */
+ brq->sbc.arg = MMC_CMD23_ARG_PACKED | (max_packed_reqs + 1);
+ break;
+ case TEST_CMD23_ZERO_PACKED_WRITES:
+ test_pr_info("%s: CMD23 request num = 0", __func__);
+ /* Set the individual packed cmd23 request num to zero */
+ brq->sbc.arg = MMC_CMD23_ARG_PACKED;
+ break;
+ case TEST_CMD23_PACKED_BIT_UNSET:
+ test_pr_info("%s: CMD23 packed bit unset", __func__);
+ /*
+ * Set the individual packed cmd23 packed bit to 0,
+ * although there is a packed write request
+ */
+ brq->sbc.arg &= ~CMD23_PACKED_BIT;
+ break;
+ case TEST_CMD23_REL_WR_BIT_SET:
+ test_pr_info("%s: CMD23 REL WR bit set", __func__);
+ /* Set the individual packed cmd23 reliable write bit */
+ brq->sbc.arg = MMC_CMD23_ARG_PACKED | MMC_CMD23_ARG_REL_WR;
+ break;
+ case TEST_CMD23_BITS_16TO29_SET:
+ test_pr_info("%s: CMD23 bits [16-29] set", __func__);
+ brq->sbc.arg = MMC_CMD23_ARG_PACKED |
+ PACKED_HDR_BITS_16_TO_29_SET;
+ break;
+ case TEST_CMD23_HDR_BLK_NOT_IN_COUNT:
+ test_pr_info("%s: CMD23 hdr not in block count", __func__);
+ brq->sbc.arg = MMC_CMD23_ARG_PACKED |
+ ((rq_data_dir(req) == READ) ? 0 : mqrq->packed->blocks);
+ break;
+ default:
+ test_pr_err("%s: unexpected testcase %d",
+ __func__, mbtd->test_info.testcase);
+ break;
+ }
+}
+
+/*
+ * A callback assigned to the err_check_fn field of the mmc_request by the
+ * MMC/card/block layer.
+ * Called upon request completion by the MMC/core layer.
+ * Here we emulate an error return value from the card.
+ */
+static int test_err_check(struct mmc_card *card, struct mmc_async_req *areq)
+{
+ struct mmc_queue_req *mq_rq = container_of(areq, struct mmc_queue_req,
+ mmc_active);
+ struct request_queue *req_q = test_iosched_get_req_queue();
+ struct mmc_queue *mq;
+ int max_packed_reqs;
+ int ret = 0;
+
+ if (req_q)
+ mq = req_q->queuedata;
+ else {
+ test_pr_err("%s: NULL request_queue", __func__);
+ return 0;
+ }
+
+ if (!mq) {
+ test_pr_err("%s: %s: NULL mq", __func__,
+ mmc_hostname(card->host));
+ return 0;
+ }
+
+ max_packed_reqs = mq->card->ext_csd.max_packed_writes;
+
+ if (!mq_rq) {
+ test_pr_err("%s: %s: NULL mq_rq", __func__,
+ mmc_hostname(card->host));
+ return 0;
+ }
+
+ switch (mbtd->test_info.testcase) {
+ case TEST_RET_ABORT:
+ test_pr_info("%s: return abort", __func__);
+ ret = MMC_BLK_ABORT;
+ break;
+ case TEST_RET_PARTIAL_FOLLOWED_BY_SUCCESS:
+ test_pr_info("%s: return partial followed by success",
+ __func__);
+ /*
+ * Since in this testcase num_requests is always >= 2,
+ * we can be sure that packed_fail_idx is always >= 1
+ */
+ mq_rq->packed->idx_failure = (mbtd->num_requests / 2);
+ test_pr_info("%s: packed_fail_idx = %d"
+ , __func__, mq_rq->packed->idx_failure);
+ mq->err_check_fn = NULL;
+ ret = MMC_BLK_PARTIAL;
+ break;
+ case TEST_RET_PARTIAL_FOLLOWED_BY_ABORT:
+ if (!mbtd->err_check_counter) {
+ test_pr_info("%s: return partial followed by abort",
+ __func__);
+ mbtd->err_check_counter++;
+ /*
+ * Since in this testcase num_requests is always >= 3,
+ * we have that packed_fail_idx is always >= 1
+ */
+ mq_rq->packed->idx_failure = (mbtd->num_requests / 2);
+ test_pr_info("%s: packed_fail_idx = %d"
+ , __func__, mq_rq->packed->idx_failure);
+ ret = MMC_BLK_PARTIAL;
+ break;
+ }
+ mbtd->err_check_counter = 0;
+ mq->err_check_fn = NULL;
+ ret = MMC_BLK_ABORT;
+ break;
+ case TEST_RET_PARTIAL_MULTIPLE_UNTIL_SUCCESS:
+ test_pr_info("%s: return partial multiple until success",
+ __func__);
+ if (++mbtd->err_check_counter >= (mbtd->num_requests)) {
+ mq->err_check_fn = NULL;
+ mbtd->err_check_counter = 0;
+ ret = MMC_BLK_PARTIAL;
+ break;
+ }
+ mq_rq->packed->idx_failure = 1;
+ ret = MMC_BLK_PARTIAL;
+ break;
+ case TEST_RET_PARTIAL_MAX_FAIL_IDX:
+ test_pr_info("%s: return partial max fail_idx", __func__);
+ mq_rq->packed->idx_failure = max_packed_reqs - 1;
+ mq->err_check_fn = NULL;
+ ret = MMC_BLK_PARTIAL;
+ break;
+ case TEST_RET_RETRY:
+ test_pr_info("%s: return retry", __func__);
+ ret = MMC_BLK_RETRY;
+ break;
+ case TEST_RET_CMD_ERR:
+ test_pr_info("%s: return cmd err", __func__);
+ ret = MMC_BLK_CMD_ERR;
+ break;
+ case TEST_RET_DATA_ERR:
+ test_pr_info("%s: return data err", __func__);
+ ret = MMC_BLK_DATA_ERR;
+ break;
+ default:
+ test_pr_err("%s: unexpected testcase %d",
+ __func__, mbtd->test_info.testcase);
+ }
+
+ return ret;
+}
+
+/*
+ * This is a specific implementation for the get_test_case_str_fn function
+ * pointer in the test_info data structure. Given a valid test_data instance,
+ * the function returns a string resembling the test name, based on the testcase
+ */
+static char *get_test_case_str(struct test_data *td)
+{
+ if (!td) {
+ test_pr_err("%s: NULL td", __func__);
+ return NULL;
+ }
+
+ switch (td->test_info.testcase) {
+ case TEST_STOP_DUE_TO_FLUSH:
+ return " stop due to flush";
+ case TEST_STOP_DUE_TO_FLUSH_AFTER_MAX_REQS:
+ return " stop due to flush after max-1 reqs";
+ case TEST_STOP_DUE_TO_READ:
+ return " stop due to read";
+ case TEST_STOP_DUE_TO_READ_AFTER_MAX_REQS:
+ return "Test stop due to read after max-1 reqs";
+ case TEST_STOP_DUE_TO_EMPTY_QUEUE:
+ return "Test stop due to empty queue";
+ case TEST_STOP_DUE_TO_MAX_REQ_NUM:
+ return "Test stop due to max req num";
+ case TEST_STOP_DUE_TO_THRESHOLD:
+ return "Test stop due to exceeding threshold";
+ case TEST_RET_ABORT:
+ return "Test err_check return abort";
+ case TEST_RET_PARTIAL_FOLLOWED_BY_SUCCESS:
+ return "Test err_check return partial followed by success";
+ case TEST_RET_PARTIAL_FOLLOWED_BY_ABORT:
+ return "Test err_check return partial followed by abort";
+ case TEST_RET_PARTIAL_MULTIPLE_UNTIL_SUCCESS:
+ return "Test err_check return partial multiple until success";
+ case TEST_RET_PARTIAL_MAX_FAIL_IDX:
+ return "Test err_check return partial max fail index";
+ case TEST_RET_RETRY:
+ return "Test err_check return retry";
+ case TEST_RET_CMD_ERR:
+ return "Test err_check return cmd error";
+ case TEST_RET_DATA_ERR:
+ return "Test err_check return data error";
+ case TEST_HDR_INVALID_VERSION:
+ return "Test invalid - wrong header version";
+ case TEST_HDR_WRONG_WRITE_CODE:
+ return "Test invalid - wrong write code";
+ case TEST_HDR_INVALID_RW_CODE:
+ return "Test invalid - wrong R/W code";
+ case TEST_HDR_DIFFERENT_ADDRESSES:
+ return "Test invalid - header different addresses";
+ case TEST_HDR_REQ_NUM_SMALLER_THAN_ACTUAL:
+ return "Test invalid - header req num smaller than actual";
+ case TEST_HDR_REQ_NUM_LARGER_THAN_ACTUAL:
+ return "Test invalid - header req num larger than actual";
+ case TEST_HDR_CMD23_PACKED_BIT_SET:
+ return "Test invalid - header cmd23 packed bit set";
+ case TEST_CMD23_MAX_PACKED_WRITES:
+ return "Test invalid - cmd23 max packed writes";
+ case TEST_CMD23_ZERO_PACKED_WRITES:
+ return "Test invalid - cmd23 zero packed writes";
+ case TEST_CMD23_PACKED_BIT_UNSET:
+ return "Test invalid - cmd23 packed bit unset";
+ case TEST_CMD23_REL_WR_BIT_SET:
+ return "Test invalid - cmd23 rel wr bit set";
+ case TEST_CMD23_BITS_16TO29_SET:
+ return "Test invalid - cmd23 bits [16-29] set";
+ case TEST_CMD23_HDR_BLK_NOT_IN_COUNT:
+ return "Test invalid - cmd23 header block not in count";
+ case TEST_PACKING_EXP_N_OVER_TRIGGER:
+ return "\nTest packing control - pack n";
+ case TEST_PACKING_EXP_N_OVER_TRIGGER_FB_READ:
+ return "\nTest packing control - pack n followed by read";
+ case TEST_PACKING_EXP_N_OVER_TRIGGER_FLUSH_N:
+ return "\nTest packing control - pack n followed by flush";
+ case TEST_PACKING_EXP_ONE_OVER_TRIGGER_FB_READ:
+ return "\nTest packing control - pack one followed by read";
+ case TEST_PACKING_EXP_THRESHOLD_OVER_TRIGGER:
+ return "\nTest packing control - pack threshold";
+ case TEST_PACKING_NOT_EXP_LESS_THAN_TRIGGER_REQUESTS:
+ return "\nTest packing control - no packing";
+ case TEST_PACKING_NOT_EXP_TRIGGER_REQUESTS:
+ return "\nTest packing control - no packing, trigger requests";
+ case TEST_PACKING_NOT_EXP_TRIGGER_READ_TRIGGER:
+ return "\nTest packing control - no pack, trigger-read-trigger";
+ case TEST_PACKING_NOT_EXP_TRIGGER_FLUSH_TRIGGER:
+ return "\nTest packing control- no pack, trigger-flush-trigger";
+ case TEST_PACK_MIX_PACKED_NO_PACKED_PACKED:
+ return "\nTest packing control - mix: pack -> no pack -> pack";
+ case TEST_PACK_MIX_NO_PACKED_PACKED_NO_PACKED:
+ return "\nTest packing control - mix: no pack->pack->no pack";
+ default:
+ return "Unknown testcase";
+ }
+
+ return NULL;
+}
+
+/*
+ * Compare individual testcase's statistics to the expected statistics:
+ * Compare stop reason and number of packing events
+ */
+static int check_wr_packing_statistics(struct test_data *td)
+{
+ struct mmc_wr_pack_stats *mmc_packed_stats;
+ struct mmc_queue *mq = td->req_q->queuedata;
+ int max_packed_reqs = mq->card->ext_csd.max_packed_writes;
+ int i;
+ struct mmc_card *card = mq->card;
+ struct mmc_wr_pack_stats expected_stats;
+ int *stop_reason;
+ int ret = 0;
+
+ if (!mq) {
+ test_pr_err("%s: NULL mq", __func__);
+ return -EINVAL;
+ }
+
+ expected_stats = mbtd->exp_packed_stats;
+
+ mmc_packed_stats = mmc_blk_get_packed_statistics(card);
+ if (!mmc_packed_stats) {
+ test_pr_err("%s: NULL mmc_packed_stats", __func__);
+ return -EINVAL;
+ }
+
+ if (!mmc_packed_stats->packing_events) {
+ test_pr_err("%s: NULL packing_events", __func__);
+ return -EINVAL;
+ }
+
+ spin_lock(&mmc_packed_stats->lock);
+
+ if (!mmc_packed_stats->enabled) {
+ test_pr_err("%s write packing statistics are not enabled",
+ __func__);
+ ret = -EINVAL;
+ goto exit_err;
+ }
+
+ stop_reason = mmc_packed_stats->pack_stop_reason;
+
+ for (i = 1; i <= max_packed_reqs; ++i) {
+ if (mmc_packed_stats->packing_events[i] !=
+ expected_stats.packing_events[i]) {
+ test_pr_err(
+ "%s: Wrong pack stats in index %d, got %d, expected %d",
+ __func__, i, mmc_packed_stats->packing_events[i],
+ expected_stats.packing_events[i]);
+ if (td->fs_wr_reqs_during_test)
+ goto cancel_round;
+ ret = -EINVAL;
+ goto exit_err;
+ }
+ }
+
+ if (mmc_packed_stats->pack_stop_reason[EXCEEDS_SEGMENTS] !=
+ expected_stats.pack_stop_reason[EXCEEDS_SEGMENTS]) {
+ test_pr_err(
+ "%s: Wrong pack stop reason EXCEEDS_SEGMENTS %d, expected %d",
+ __func__, stop_reason[EXCEEDS_SEGMENTS],
+ expected_stats.pack_stop_reason[EXCEEDS_SEGMENTS]);
+ if (td->fs_wr_reqs_during_test)
+ goto cancel_round;
+ ret = -EINVAL;
+ goto exit_err;
+ }
+
+ if (mmc_packed_stats->pack_stop_reason[EXCEEDS_SECTORS] !=
+ expected_stats.pack_stop_reason[EXCEEDS_SECTORS]) {
+ test_pr_err(
+ "%s: Wrong pack stop reason EXCEEDS_SECTORS %d, expected %d",
+ __func__, stop_reason[EXCEEDS_SECTORS],
+ expected_stats.pack_stop_reason[EXCEEDS_SECTORS]);
+ if (td->fs_wr_reqs_during_test)
+ goto cancel_round;
+ ret = -EINVAL;
+ goto exit_err;
+ }
+
+ if (mmc_packed_stats->pack_stop_reason[WRONG_DATA_DIR] !=
+ expected_stats.pack_stop_reason[WRONG_DATA_DIR]) {
+ test_pr_err(
+ "%s: Wrong pack stop reason WRONG_DATA_DIR %d, expected %d",
+ __func__, stop_reason[WRONG_DATA_DIR],
+ expected_stats.pack_stop_reason[WRONG_DATA_DIR]);
+ if (td->fs_wr_reqs_during_test)
+ goto cancel_round;
+ ret = -EINVAL;
+ goto exit_err;
+ }
+
+ if (mmc_packed_stats->pack_stop_reason[FLUSH_OR_DISCARD] !=
+ expected_stats.pack_stop_reason[FLUSH_OR_DISCARD]) {
+ test_pr_err(
+ "%s: Wrong pack stop reason FLUSH_OR_DISCARD %d, expected %d",
+ __func__, stop_reason[FLUSH_OR_DISCARD],
+ expected_stats.pack_stop_reason[FLUSH_OR_DISCARD]);
+ if (td->fs_wr_reqs_during_test)
+ goto cancel_round;
+ ret = -EINVAL;
+ goto exit_err;
+ }
+
+ if (mmc_packed_stats->pack_stop_reason[EMPTY_QUEUE] !=
+ expected_stats.pack_stop_reason[EMPTY_QUEUE]) {
+ test_pr_err(
+ "%s: Wrong pack stop reason EMPTY_QUEUE %d, expected %d",
+ __func__, stop_reason[EMPTY_QUEUE],
+ expected_stats.pack_stop_reason[EMPTY_QUEUE]);
+ if (td->fs_wr_reqs_during_test)
+ goto cancel_round;
+ ret = -EINVAL;
+ goto exit_err;
+ }
+
+ if (mmc_packed_stats->pack_stop_reason[REL_WRITE] !=
+ expected_stats.pack_stop_reason[REL_WRITE]) {
+ test_pr_err(
+ "%s: Wrong pack stop reason REL_WRITE %d, expected %d",
+ __func__, stop_reason[REL_WRITE],
+ expected_stats.pack_stop_reason[REL_WRITE]);
+ if (td->fs_wr_reqs_during_test)
+ goto cancel_round;
+ ret = -EINVAL;
+ goto exit_err;
+ }
+
+exit_err:
+ spin_unlock(&mmc_packed_stats->lock);
+ if (ret && mmc_packed_stats->enabled)
+ print_mmc_packing_stats(card);
+ return ret;
+cancel_round:
+ spin_unlock(&mmc_packed_stats->lock);
+ test_iosched_set_ignore_round(true);
+ return 0;
+}
+
+/*
+ * Pseudo-randomly choose a seed based on the last seed, and update it in
+ * seed_number. then return seed_number (mod max_val), or min_val.
+ */
+static unsigned int pseudo_random_seed(unsigned int *seed_number,
+ unsigned int min_val,
+ unsigned int max_val)
+{
+ int ret = 0;
+
+ if (!seed_number)
+ return 0;
+
+ *seed_number = ((unsigned int)(((unsigned long)*seed_number *
+ (unsigned long)LARGE_PRIME_1) + LARGE_PRIME_2));
+ ret = (unsigned int)((*seed_number) % max_val);
+
+ return (ret > min_val ? ret : min_val);
+}
+
+/*
+ * Given a pseudo-random seed, find a pseudo-random num_of_bios.
+ * Make sure that num_of_bios is not larger than TEST_MAX_SECTOR_RANGE
+ */
+static void pseudo_rnd_num_of_bios(unsigned int *num_bios_seed,
+ unsigned int *num_of_bios)
+{
+ do {
+ *num_of_bios = pseudo_random_seed(num_bios_seed, 1,
+ TEST_MAX_BIOS_PER_REQ);
+ if (!(*num_of_bios))
+ *num_of_bios = 1;
+ } while ((*num_of_bios) * BIO_U32_SIZE * 4 > TEST_MAX_SECTOR_RANGE);
+}
+
+/* Add a single read request to the given td's request queue */
+static int prepare_request_add_read(struct test_data *td)
+{
+ int ret;
+ int start_sec;
+
+ if (td)
+ start_sec = td->start_sector;
+ else {
+ test_pr_err("%s: NULL td", __func__);
+ return 0;
+ }
+
+ test_pr_info("%s: Adding a read request, first req_id=%d", __func__,
+ td->wr_rd_next_req_id);
+
+ ret = test_iosched_add_wr_rd_test_req(0, READ, start_sec, 2,
+ TEST_PATTERN_5A, NULL);
+ if (ret) {
+ test_pr_err("%s: failed to add a read request", __func__);
+ return ret;
+ }
+
+ return 0;
+}
+
+/* Add a single flush request to the given td's request queue */
+static int prepare_request_add_flush(struct test_data *td)
+{
+ int ret;
+
+ if (!td) {
+ test_pr_err("%s: NULL td", __func__);
+ return 0;
+ }
+
+ test_pr_info("%s: Adding a flush request, first req_id=%d", __func__,
+ td->unique_next_req_id);
+ ret = test_iosched_add_unique_test_req(0, REQ_UNIQUE_FLUSH,
+ 0, 0, NULL);
+ if (ret) {
+ test_pr_err("%s: failed to add a flush request", __func__);
+ return ret;
+ }
+
+ return ret;
+}
+
+/*
+ * Add num_requets amount of write requests to the given td's request queue.
+ * If random test mode is chosen we pseudo-randomly choose the number of bios
+ * for each write request, otherwise add between 1 to 5 bio per request.
+ */
+static int prepare_request_add_write_reqs(struct test_data *td,
+ int num_requests, int is_err_expected,
+ int is_random)
+{
+ int i;
+ unsigned int start_sec;
+ int num_bios;
+ int ret = 0;
+ unsigned int *bio_seed = &mbtd->random_test_seed;
+
+ if (td)
+ start_sec = td->start_sector;
+ else {
+ test_pr_err("%s: NULL td", __func__);
+ return ret;
+ }
+
+ test_pr_info("%s: Adding %d write requests, first req_id=%d", __func__,
+ num_requests, td->wr_rd_next_req_id);
+
+ for (i = 1; i <= num_requests; i++) {
+ start_sec = td->start_sector + 4096 * td->num_of_write_bios;
+ if (is_random)
+ pseudo_rnd_num_of_bios(bio_seed, &num_bios);
+ else
+ /*
+ * For the non-random case, give num_bios a value
+ * between 1 and 5, to keep a small number of BIOs
+ */
+ num_bios = (i%5)+1;
+
+ ret = test_iosched_add_wr_rd_test_req(is_err_expected, WRITE,
+ start_sec, num_bios, TEST_PATTERN_5A, NULL);
+
+ if (ret) {
+ test_pr_err("%s: failed to add a write request",
+ __func__);
+ return ret;
+ }
+ }
+ return 0;
+}
+
+/*
+ * Prepare the write, read and flush requests for a generic packed commands
+ * testcase
+ */
+static int prepare_packed_requests(struct test_data *td, int is_err_expected,
+ int num_requests, int is_random)
+{
+ int ret = 0;
+ struct mmc_queue *mq;
+ int max_packed_reqs;
+ struct request_queue *req_q;
+
+ if (!td) {
+ pr_err("%s: NULL td", __func__);
+ return -EINVAL;
+ }
+
+ req_q = td->req_q;
+
+ if (!req_q) {
+ pr_err("%s: NULL request queue", __func__);
+ return -EINVAL;
+ }
+
+ mq = req_q->queuedata;
+ if (!mq) {
+ test_pr_err("%s: NULL mq", __func__);
+ return -EINVAL;
+ }
+
+ max_packed_reqs = mq->card->ext_csd.max_packed_writes;
+
+ if (mbtd->random_test_seed <= 0) {
+ mbtd->random_test_seed =
+ (unsigned int)(get_jiffies_64() & 0xFFFF);
+ test_pr_info("%s: got seed from jiffies %d",
+ __func__, mbtd->random_test_seed);
+ }
+
+ ret = prepare_request_add_write_reqs(td, num_requests, is_err_expected,
+ is_random);
+ if (ret)
+ return ret;
+
+ /* Avoid memory corruption in upcoming stats set */
+ if (td->test_info.testcase == TEST_STOP_DUE_TO_THRESHOLD)
+ num_requests--;
+
+ memset((void *)mbtd->exp_packed_stats.pack_stop_reason, 0,
+ sizeof(mbtd->exp_packed_stats.pack_stop_reason));
+ memset(mbtd->exp_packed_stats.packing_events, 0,
+ (max_packed_reqs + 1) * sizeof(u32));
+ if (num_requests <= max_packed_reqs)
+ mbtd->exp_packed_stats.packing_events[num_requests] = 1;
+
+ switch (td->test_info.testcase) {
+ case TEST_STOP_DUE_TO_FLUSH:
+ case TEST_STOP_DUE_TO_FLUSH_AFTER_MAX_REQS:
+ ret = prepare_request_add_flush(td);
+ if (ret)
+ return ret;
+
+ mbtd->exp_packed_stats.pack_stop_reason[FLUSH_OR_DISCARD] = 1;
+ break;
+ case TEST_STOP_DUE_TO_READ:
+ case TEST_STOP_DUE_TO_READ_AFTER_MAX_REQS:
+ ret = prepare_request_add_read(td);
+ if (ret)
+ return ret;
+
+ mbtd->exp_packed_stats.pack_stop_reason[WRONG_DATA_DIR] = 1;
+ break;
+ case TEST_STOP_DUE_TO_THRESHOLD:
+ mbtd->exp_packed_stats.packing_events[num_requests] = 1;
+ mbtd->exp_packed_stats.packing_events[1] = 1;
+ mbtd->exp_packed_stats.pack_stop_reason[THRESHOLD] = 1;
+ mbtd->exp_packed_stats.pack_stop_reason[EMPTY_QUEUE] = 1;
+ break;
+ case TEST_STOP_DUE_TO_MAX_REQ_NUM:
+ case TEST_RET_PARTIAL_MAX_FAIL_IDX:
+ mbtd->exp_packed_stats.pack_stop_reason[THRESHOLD] = 1;
+ break;
+ default:
+ mbtd->exp_packed_stats.pack_stop_reason[EMPTY_QUEUE] = 1;
+ }
+ mbtd->num_requests = num_requests;
+
+ return 0;
+}
+
+/*
+ * Prepare the write, read and flush requests for the packing control
+ * testcases
+ */
+static int prepare_packed_control_tests_requests(struct test_data *td,
+ int is_err_expected, int num_requests, int is_random)
+{
+ int ret = 0;
+ struct mmc_queue *mq;
+ int max_packed_reqs;
+ int temp_num_req = num_requests;
+ struct request_queue *req_q;
+ int test_packed_trigger;
+ int num_packed_reqs;
+
+ if (!td) {
+ test_pr_err("%s: NULL td\n", __func__);
+ return -EINVAL;
+ }
+
+ req_q = td->req_q;
+
+ if (!req_q) {
+ test_pr_err("%s: NULL request queue\n", __func__);
+ return -EINVAL;
+ }
+
+ mq = req_q->queuedata;
+ if (!mq) {
+ test_pr_err("%s: NULL mq", __func__);
+ return -EINVAL;
+ }
+
+ max_packed_reqs = mq->card->ext_csd.max_packed_writes;
+ test_packed_trigger = mq->num_wr_reqs_to_start_packing;
+ num_packed_reqs = num_requests - test_packed_trigger;
+
+ if (mbtd->random_test_seed == 0) {
+ mbtd->random_test_seed =
+ (unsigned int)(get_jiffies_64() & 0xFFFF);
+ test_pr_info("%s: got seed from jiffies %d",
+ __func__, mbtd->random_test_seed);
+ }
+
+ if (td->test_info.testcase ==
+ TEST_PACK_MIX_NO_PACKED_PACKED_NO_PACKED) {
+ temp_num_req = num_requests;
+ num_requests = test_packed_trigger - 1;
+ }
+
+ /* Verify that the packing is disabled before starting the test */
+ mq->wr_packing_enabled = false;
+ mq->num_of_potential_packed_wr_reqs = 0;
+
+ if (td->test_info.testcase == TEST_PACK_MIX_PACKED_NO_PACKED_PACKED) {
+ mq->num_of_potential_packed_wr_reqs = test_packed_trigger + 1;
+ mq->wr_packing_enabled = true;
+ num_requests = test_packed_trigger + 2;
+ }
+
+ ret = prepare_request_add_write_reqs(td, num_requests, is_err_expected,
+ is_random);
+ if (ret)
+ goto exit;
+
+ if (td->test_info.testcase == TEST_PACK_MIX_NO_PACKED_PACKED_NO_PACKED)
+ num_requests = temp_num_req;
+
+ memset((void *)mbtd->exp_packed_stats.pack_stop_reason, 0,
+ sizeof(mbtd->exp_packed_stats.pack_stop_reason));
+ memset(mbtd->exp_packed_stats.packing_events, 0,
+ (max_packed_reqs + 1) * sizeof(u32));
+
+ switch (td->test_info.testcase) {
+ case TEST_PACKING_EXP_N_OVER_TRIGGER_FB_READ:
+ case TEST_PACKING_EXP_ONE_OVER_TRIGGER_FB_READ:
+ ret = prepare_request_add_read(td);
+ if (ret)
+ goto exit;
+
+ mbtd->exp_packed_stats.pack_stop_reason[WRONG_DATA_DIR] = 1;
+ mbtd->exp_packed_stats.packing_events[num_packed_reqs] = 1;
+ break;
+ case TEST_PACKING_EXP_N_OVER_TRIGGER_FLUSH_N:
+ ret = prepare_request_add_flush(td);
+ if (ret)
+ goto exit;
+
+ ret = prepare_request_add_write_reqs(td, num_packed_reqs,
+ is_err_expected, is_random);
+ if (ret)
+ goto exit;
+
+ mbtd->exp_packed_stats.pack_stop_reason[EMPTY_QUEUE] = 1;
+ mbtd->exp_packed_stats.pack_stop_reason[FLUSH_OR_DISCARD] = 1;
+ mbtd->exp_packed_stats.packing_events[num_packed_reqs] = 2;
+ break;
+ case TEST_PACKING_NOT_EXP_TRIGGER_READ_TRIGGER:
+ ret = prepare_request_add_read(td);
+ if (ret)
+ goto exit;
+
+ ret = prepare_request_add_write_reqs(td, test_packed_trigger,
+ is_err_expected, is_random);
+ if (ret)
+ goto exit;
+
+ mbtd->exp_packed_stats.packing_events[num_packed_reqs] = 1;
+ break;
+ case TEST_PACKING_NOT_EXP_TRIGGER_FLUSH_TRIGGER:
+ ret = prepare_request_add_flush(td);
+ if (ret)
+ goto exit;
+
+ ret = prepare_request_add_write_reqs(td, test_packed_trigger,
+ is_err_expected, is_random);
+ if (ret)
+ goto exit;
+
+ mbtd->exp_packed_stats.packing_events[num_packed_reqs] = 1;
+ break;
+ case TEST_PACK_MIX_PACKED_NO_PACKED_PACKED:
+ ret = prepare_request_add_read(td);
+ if (ret)
+ goto exit;
+
+ ret = prepare_request_add_write_reqs(td, test_packed_trigger-1,
+ is_err_expected, is_random);
+ if (ret)
+ goto exit;
+
+ ret = prepare_request_add_write_reqs(td, num_requests,
+ is_err_expected, is_random);
+ if (ret)
+ goto exit;
+
+ mbtd->exp_packed_stats.packing_events[num_requests] = 1;
+ mbtd->exp_packed_stats.packing_events[num_requests-1] = 1;
+ mbtd->exp_packed_stats.pack_stop_reason[WRONG_DATA_DIR] = 1;
+ mbtd->exp_packed_stats.pack_stop_reason[EMPTY_QUEUE] = 1;
+ break;
+ case TEST_PACK_MIX_NO_PACKED_PACKED_NO_PACKED:
+ ret = prepare_request_add_read(td);
+ if (ret)
+ goto exit;
+
+ ret = prepare_request_add_write_reqs(td, num_requests,
+ is_err_expected, is_random);
+ if (ret)
+ goto exit;
+
+ ret = prepare_request_add_read(td);
+ if (ret)
+ goto exit;
+
+ ret = prepare_request_add_write_reqs(td, test_packed_trigger-1,
+ is_err_expected, is_random);
+ if (ret)
+ goto exit;
+
+ mbtd->exp_packed_stats.pack_stop_reason[WRONG_DATA_DIR] = 1;
+ mbtd->exp_packed_stats.packing_events[num_packed_reqs] = 1;
+ break;
+ case TEST_PACKING_NOT_EXP_LESS_THAN_TRIGGER_REQUESTS:
+ case TEST_PACKING_NOT_EXP_TRIGGER_REQUESTS:
+ mbtd->exp_packed_stats.packing_events[num_packed_reqs] = 1;
+ break;
+ default:
+ mbtd->exp_packed_stats.pack_stop_reason[EMPTY_QUEUE] = 1;
+ mbtd->exp_packed_stats.packing_events[num_packed_reqs] = 1;
+ }
+ mbtd->num_requests = num_requests;
+
+exit:
+ return ret;
+}
+
+/*
+ * Prepare requests for the TEST_RET_PARTIAL_FOLLOWED_BY_ABORT testcase.
+ * In this testcase we have mixed error expectations from different
+ * write requests, hence the special prepare function.
+ */
+static int prepare_partial_followed_by_abort(struct test_data *td,
+ int num_requests)
+{
+ int i, start_address;
+ int is_err_expected = 0;
+ int ret = 0;
+ struct mmc_queue *mq = test_iosched_get_req_queue()->queuedata;
+ int max_packed_reqs;
+
+ if (!mq) {
+ test_pr_err("%s: NULL mq", __func__);
+ return -EINVAL;
+ }
+
+ max_packed_reqs = mq->card->ext_csd.max_packed_writes;
+
+ for (i = 1; i <= num_requests; i++) {
+ if (i > (num_requests / 2))
+ is_err_expected = 1;
+
+ start_address = td->start_sector + 4096 * td->num_of_write_bios;
+ ret = test_iosched_add_wr_rd_test_req(is_err_expected, WRITE,
+ start_address, (i % 5) + 1, TEST_PATTERN_5A,
+ NULL);
+ if (ret) {
+ test_pr_err("%s: failed to add a write request",
+ __func__);
+ return ret;
+ }
+ }
+
+ memset((void *)&mbtd->exp_packed_stats.pack_stop_reason, 0,
+ sizeof(mbtd->exp_packed_stats.pack_stop_reason));
+ memset(mbtd->exp_packed_stats.packing_events, 0,
+ (max_packed_reqs + 1) * sizeof(u32));
+ mbtd->exp_packed_stats.packing_events[num_requests] = 1;
+ mbtd->exp_packed_stats.pack_stop_reason[EMPTY_QUEUE] = 1;
+
+ mbtd->num_requests = num_requests;
+
+ return ret;
+}
+
+/*
+ * Get number of write requests for current testcase. If random test mode was
+ * chosen, pseudo-randomly choose the number of requests, otherwise set to
+ * two less than the packing threshold.
+ */
+static int get_num_requests(struct test_data *td)
+{
+ int *seed = &mbtd->random_test_seed;
+ struct request_queue *req_q;
+ struct mmc_queue *mq;
+ int max_num_requests;
+ int num_requests;
+ int min_num_requests = 2;
+ int is_random = mbtd->is_random;
+ int max_for_double;
+ int test_packed_trigger;
+
+ req_q = test_iosched_get_req_queue();
+ if (req_q)
+ mq = req_q->queuedata;
+ else {
+ test_pr_err("%s: NULL request queue", __func__);
+ return 0;
+ }
+
+ if (!mq) {
+ test_pr_err("%s: NULL mq", __func__);
+ return -EINVAL;
+ }
+
+ max_num_requests = mq->card->ext_csd.max_packed_writes;
+ num_requests = max_num_requests - 2;
+ test_packed_trigger = mq->num_wr_reqs_to_start_packing;
+
+ /*
+ * Here max_for_double is intended for packed control testcases
+ * in which we issue many write requests. It's purpose is to prevent
+ * exceeding max number of req_queue requests.
+ */
+ max_for_double = max_num_requests - 10;
+
+ if (td->test_info.testcase ==
+ TEST_PACKING_NOT_EXP_LESS_THAN_TRIGGER_REQUESTS)
+ /* Don't expect packing, so issue up to trigger-1 reqs */
+ num_requests = test_packed_trigger - 1;
+
+ if (is_random) {
+ if (td->test_info.testcase ==
+ TEST_RET_PARTIAL_FOLLOWED_BY_ABORT)
+ /*
+ * Here we don't want num_requests to be less than 1
+ * as a consequence of division by 2.
+ */
+ min_num_requests = 3;
+
+ if (td->test_info.testcase ==
+ TEST_PACKING_NOT_EXP_LESS_THAN_TRIGGER_REQUESTS)
+ /* Don't expect packing, so issue up to trigger reqs */
+ max_num_requests = test_packed_trigger;
+
+ num_requests = pseudo_random_seed(seed, min_num_requests,
+ max_num_requests - 1);
+ }
+
+ if (td->test_info.testcase ==
+ TEST_PACKING_NOT_EXP_LESS_THAN_TRIGGER_REQUESTS)
+ num_requests -= test_packed_trigger;
+
+ if (td->test_info.testcase == TEST_PACKING_EXP_N_OVER_TRIGGER_FLUSH_N)
+ num_requests =
+ num_requests > max_for_double ? max_for_double : num_requests;
+
+ if (mbtd->test_group == TEST_PACKING_CONTROL_GROUP)
+ num_requests += test_packed_trigger;
+
+ if (td->test_info.testcase == TEST_PACKING_NOT_EXP_TRIGGER_REQUESTS)
+ num_requests = test_packed_trigger;
+
+ return num_requests;
+}
+
+/*
+ * An implementation for the prepare_test_fn pointer in the test_info
+ * data structure. According to the testcase we add the right number of requests
+ * and decide if an error is expected or not.
+ */
+static int prepare_test(struct test_data *td)
+{
+ struct mmc_queue *mq = test_iosched_get_req_queue()->queuedata;
+ int max_num_requests;
+ int num_requests = 0;
+ int ret = 0;
+ int is_random = mbtd->is_random;
+ int test_packed_trigger = mq->num_wr_reqs_to_start_packing;
+
+ if (!mq) {
+ test_pr_err("%s: NULL mq", __func__);
+ return -EINVAL;
+ }
+
+ max_num_requests = mq->card->ext_csd.max_packed_writes;
+
+ if (is_random && mbtd->random_test_seed == 0) {
+ mbtd->random_test_seed =
+ (unsigned int)(get_jiffies_64() & 0xFFFF);
+ test_pr_info("%s: got seed from jiffies %d",
+ __func__, mbtd->random_test_seed);
+ }
+
+ num_requests = get_num_requests(td);
+
+ if (mbtd->test_group == TEST_SEND_INVALID_GROUP)
+ mq->packed_test_fn =
+ test_invalid_packed_cmd;
+
+ if (mbtd->test_group == TEST_ERR_CHECK_GROUP)
+ mq->err_check_fn = test_err_check;
+
+ switch (td->test_info.testcase) {
+ case TEST_STOP_DUE_TO_FLUSH:
+ case TEST_STOP_DUE_TO_READ:
+ case TEST_RET_PARTIAL_FOLLOWED_BY_SUCCESS:
+ case TEST_RET_PARTIAL_MULTIPLE_UNTIL_SUCCESS:
+ case TEST_STOP_DUE_TO_EMPTY_QUEUE:
+ case TEST_CMD23_PACKED_BIT_UNSET:
+ ret = prepare_packed_requests(td, 0, num_requests, is_random);
+ break;
+ case TEST_STOP_DUE_TO_FLUSH_AFTER_MAX_REQS:
+ case TEST_STOP_DUE_TO_READ_AFTER_MAX_REQS:
+ ret = prepare_packed_requests(td, 0, max_num_requests - 1,
+ is_random);
+ break;
+ case TEST_RET_PARTIAL_FOLLOWED_BY_ABORT:
+ ret = prepare_partial_followed_by_abort(td, num_requests);
+ break;
+ case TEST_STOP_DUE_TO_MAX_REQ_NUM:
+ case TEST_RET_PARTIAL_MAX_FAIL_IDX:
+ ret = prepare_packed_requests(td, 0, max_num_requests,
+ is_random);
+ break;
+ case TEST_STOP_DUE_TO_THRESHOLD:
+ ret = prepare_packed_requests(td, 0, max_num_requests + 1,
+ is_random);
+ break;
+ case TEST_RET_ABORT:
+ case TEST_RET_RETRY:
+ case TEST_RET_CMD_ERR:
+ case TEST_RET_DATA_ERR:
+ case TEST_HDR_INVALID_VERSION:
+ case TEST_HDR_WRONG_WRITE_CODE:
+ case TEST_HDR_INVALID_RW_CODE:
+ case TEST_HDR_DIFFERENT_ADDRESSES:
+ case TEST_HDR_REQ_NUM_SMALLER_THAN_ACTUAL:
+ case TEST_HDR_REQ_NUM_LARGER_THAN_ACTUAL:
+ case TEST_CMD23_MAX_PACKED_WRITES:
+ case TEST_CMD23_ZERO_PACKED_WRITES:
+ case TEST_CMD23_REL_WR_BIT_SET:
+ case TEST_CMD23_BITS_16TO29_SET:
+ case TEST_CMD23_HDR_BLK_NOT_IN_COUNT:
+ case TEST_HDR_CMD23_PACKED_BIT_SET:
+ ret = prepare_packed_requests(td, 1, num_requests, is_random);
+ break;
+ case TEST_PACKING_EXP_N_OVER_TRIGGER:
+ case TEST_PACKING_EXP_N_OVER_TRIGGER_FB_READ:
+ case TEST_PACKING_NOT_EXP_TRIGGER_REQUESTS:
+ case TEST_PACKING_NOT_EXP_LESS_THAN_TRIGGER_REQUESTS:
+ case TEST_PACK_MIX_PACKED_NO_PACKED_PACKED:
+ case TEST_PACK_MIX_NO_PACKED_PACKED_NO_PACKED:
+ ret = prepare_packed_control_tests_requests(td, 0, num_requests,
+ is_random);
+ break;
+ case TEST_PACKING_EXP_THRESHOLD_OVER_TRIGGER:
+ ret = prepare_packed_control_tests_requests(td, 0,
+ max_num_requests, is_random);
+ break;
+ case TEST_PACKING_EXP_ONE_OVER_TRIGGER_FB_READ:
+ ret = prepare_packed_control_tests_requests(td, 0,
+ test_packed_trigger + 1,
+ is_random);
+ break;
+ case TEST_PACKING_EXP_N_OVER_TRIGGER_FLUSH_N:
+ ret = prepare_packed_control_tests_requests(td, 0, num_requests,
+ is_random);
+ break;
+ case TEST_PACKING_NOT_EXP_TRIGGER_READ_TRIGGER:
+ case TEST_PACKING_NOT_EXP_TRIGGER_FLUSH_TRIGGER:
+ ret = prepare_packed_control_tests_requests(td, 0,
+ test_packed_trigger, is_random);
+ break;
+ default:
+ test_pr_info("%s: Invalid test case...", __func__);
+ return -EINVAL;
+ }
+
+ return ret;
+}
+
+static int run_packed_test(struct test_data *td)
+{
+ struct mmc_queue *mq;
+ struct request_queue *req_q;
+
+ if (!td) {
+ pr_err("%s: NULL td", __func__);
+ return -EINVAL;
+ }
+
+ req_q = td->req_q;
+
+ if (!req_q) {
+ pr_err("%s: NULL request queue", __func__);
+ return -EINVAL;
+ }
+
+ mq = req_q->queuedata;
+ if (!mq) {
+ test_pr_err("%s: NULL mq", __func__);
+ return -EINVAL;
+ }
+ mmc_blk_init_packed_statistics(mq->card);
+
+ if (td->test_info.testcase != TEST_PACK_MIX_PACKED_NO_PACKED_PACKED) {
+ /*
+ * Verify that the packing is disabled before starting the
+ * test
+ */
+ mq->wr_packing_enabled = false;
+ mq->num_of_potential_packed_wr_reqs = 0;
+ }
+
+ __blk_run_queue(td->req_q);
+
+ return 0;
+}
+
+/*
+ * An implementation for the post_test_fn in the test_info data structure.
+ * In our case we just reset the function pointers in the mmc_queue in order for
+ * the FS to be able to dispatch it's requests correctly after the test is
+ * finished.
+ */
+static int post_test(struct test_data *td)
+{
+ struct mmc_queue *mq;
+
+ if (!td)
+ return -EINVAL;
+
+ mq = td->req_q->queuedata;
+
+ if (!mq) {
+ test_pr_err("%s: NULL mq", __func__);
+ return -EINVAL;
+ }
+
+ mq->packed_test_fn = NULL;
+ mq->err_check_fn = NULL;
+
+ return 0;
+}
+
+/*
+ * This function checks, based on the current test's test_group, that the
+ * packed commands capability and control are set right. In addition, we check
+ * if the card supports the packed command feature.
+ */
+static int validate_packed_commands_settings(void)
+{
+ struct request_queue *req_q;
+ struct mmc_queue *mq;
+ int max_num_requests;
+ struct mmc_host *host;
+
+ req_q = test_iosched_get_req_queue();
+ if (!req_q) {
+ test_pr_err("%s: test_iosched_get_req_queue failed", __func__);
+ test_iosched_set_test_result(TEST_FAILED);
+ return -EINVAL;
+ }
+
+ mq = req_q->queuedata;
+ if (!mq) {
+ test_pr_err("%s: NULL mq", __func__);
+ return -EINVAL;
+ }
+
+ max_num_requests = mq->card->ext_csd.max_packed_writes;
+ host = mq->card->host;
+
+ if (!(host->caps2 && MMC_CAP2_PACKED_WR)) {
+ test_pr_err("%s: Packed Write capability disabled, exit test",
+ __func__);
+ test_iosched_set_test_result(TEST_NOT_SUPPORTED);
+ return -EINVAL;
+ }
+
+ if (max_num_requests == 0) {
+ test_pr_err(
+ "%s: no write packing support, ext_csd.max_packed_writes=%d",
+ __func__, mq->card->ext_csd.max_packed_writes);
+ test_iosched_set_test_result(TEST_NOT_SUPPORTED);
+ return -EINVAL;
+ }
+
+ test_pr_info("%s: max number of packed requests supported is %d ",
+ __func__, max_num_requests);
+
+ switch (mbtd->test_group) {
+ case TEST_SEND_WRITE_PACKING_GROUP:
+ case TEST_ERR_CHECK_GROUP:
+ case TEST_SEND_INVALID_GROUP:
+ /* disable the packing control */
+ host->caps2 &= ~MMC_CAP2_PACKED_WR_CONTROL;
+ break;
+ case TEST_PACKING_CONTROL_GROUP:
+ host->caps2 |= MMC_CAP2_PACKED_WR_CONTROL;
+ break;
+ default:
+ break;
+ }
+
+ return 0;
+}
+
+static bool message_repeat;
+static int test_open(struct inode *inode, struct file *file)
+{
+ file->private_data = inode->i_private;
+ message_repeat = 1;
+ return 0;
+}
+
+/* send_packing TEST */
+static ssize_t send_write_packing_test_write(struct file *file,
+ const char __user *buf,
+ size_t count,
+ loff_t *ppos)
+{
+ int ret = 0;
+ int i = 0;
+ int number = -1;
+ int j = 0;
+
+ test_pr_info("%s: -- send_write_packing TEST --", __func__);
+
+ sscanf(buf, "%d", &number);
+
+ if (number <= 0)
+ number = 1;
+
+
+ mbtd->test_group = TEST_SEND_WRITE_PACKING_GROUP;
+
+ if (validate_packed_commands_settings())
+ return count;
+
+ if (mbtd->random_test_seed > 0)
+ test_pr_info("%s: Test seed: %d", __func__,
+ mbtd->random_test_seed);
+
+ memset(&mbtd->test_info, 0, sizeof(struct test_info));
+
+ mbtd->test_info.data = mbtd;
+ mbtd->test_info.prepare_test_fn = prepare_test;
+ mbtd->test_info.run_test_fn = run_packed_test;
+ mbtd->test_info.check_test_result_fn = check_wr_packing_statistics;
+ mbtd->test_info.get_test_case_str_fn = get_test_case_str;
+ mbtd->test_info.post_test_fn = post_test;
+
+ for (i = 0; i < number; ++i) {
+ test_pr_info("%s: Cycle # %d / %d", __func__, i+1, number);
+ test_pr_info("%s: ====================", __func__);
+
+ for (j = SEND_WRITE_PACKING_MIN_TESTCASE;
+ j <= SEND_WRITE_PACKING_MAX_TESTCASE; j++) {
+
+ mbtd->test_info.testcase = j;
+ mbtd->is_random = RANDOM_TEST;
+ ret = test_iosched_start_test(&mbtd->test_info);
+ if (ret)
+ break;
+ /* Allow FS requests to be dispatched */
+ msleep(1000);
+ mbtd->test_info.testcase = j;
+ mbtd->is_random = NON_RANDOM_TEST;
+ ret = test_iosched_start_test(&mbtd->test_info);
+ if (ret)
+ break;
+ /* Allow FS requests to be dispatched */
+ msleep(1000);
+ }
+ }
+
+ test_pr_info("%s: Completed all the test cases.", __func__);
+
+ return count;
+}
+
+static ssize_t send_write_packing_test_read(struct file *file,
+ char __user *buffer,
+ size_t count,
+ loff_t *offset)
+{
+ memset((void *)buffer, 0, count);
+
+ snprintf(buffer, count,
+ "\nsend_write_packing_test\n"
+ "=========\n"
+ "Description:\n"
+ "This test checks the following scenarios\n"
+ "- Pack due to FLUSH message\n"
+ "- Pack due to FLUSH after threshold writes\n"
+ "- Pack due to READ message\n"
+ "- Pack due to READ after threshold writes\n"
+ "- Pack due to empty queue\n"
+ "- Pack due to threshold writes\n"
+ "- Pack due to one over threshold writes\n");
+
+ if (message_repeat == 1) {
+ message_repeat = 0;
+ return strnlen(buffer, count);
+ } else {
+ return 0;
+ }
+}
+
+const struct file_operations send_write_packing_test_ops = {
+ .open = test_open,
+ .write = send_write_packing_test_write,
+ .read = send_write_packing_test_read,
+};
+
+/* err_check TEST */
+static ssize_t err_check_test_write(struct file *file,
+ const char __user *buf,
+ size_t count,
+ loff_t *ppos)
+{
+ int ret = 0;
+ int i = 0;
+ int number = -1;
+ int j = 0;
+
+ test_pr_info("%s: -- err_check TEST --", __func__);
+
+ sscanf(buf, "%d", &number);
+
+ if (number <= 0)
+ number = 1;
+
+ mbtd->test_group = TEST_ERR_CHECK_GROUP;
+
+ if (validate_packed_commands_settings())
+ return count;
+
+ if (mbtd->random_test_seed > 0)
+ test_pr_info("%s: Test seed: %d", __func__,
+ mbtd->random_test_seed);
+
+ memset(&mbtd->test_info, 0, sizeof(struct test_info));
+
+ mbtd->test_info.data = mbtd;
+ mbtd->test_info.prepare_test_fn = prepare_test;
+ mbtd->test_info.run_test_fn = run_packed_test;
+ mbtd->test_info.check_test_result_fn = check_wr_packing_statistics;
+ mbtd->test_info.get_test_case_str_fn = get_test_case_str;
+ mbtd->test_info.post_test_fn = post_test;
+
+ for (i = 0; i < number; ++i) {
+ test_pr_info("%s: Cycle # %d / %d", __func__, i+1, number);
+ test_pr_info("%s: ====================", __func__);
+
+ for (j = ERR_CHECK_MIN_TESTCASE;
+ j <= ERR_CHECK_MAX_TESTCASE ; j++) {
+ mbtd->test_info.testcase = j;
+ mbtd->is_random = RANDOM_TEST;
+ ret = test_iosched_start_test(&mbtd->test_info);
+ if (ret)
+ break;
+ /* Allow FS requests to be dispatched */
+ msleep(1000);
+ mbtd->test_info.testcase = j;
+ mbtd->is_random = NON_RANDOM_TEST;
+ ret = test_iosched_start_test(&mbtd->test_info);
+ if (ret)
+ break;
+ /* Allow FS requests to be dispatched */
+ msleep(1000);
+ }
+ }
+
+ test_pr_info("%s: Completed all the test cases.", __func__);
+
+ return count;
+}
+
+static ssize_t err_check_test_read(struct file *file,
+ char __user *buffer,
+ size_t count,
+ loff_t *offset)
+{
+ memset((void *)buffer, 0, count);
+
+ snprintf(buffer, count,
+ "\nerr_check_TEST\n"
+ "=========\n"
+ "Description:\n"
+ "This test checks the following scenarios\n"
+ "- Return ABORT\n"
+ "- Return PARTIAL followed by success\n"
+ "- Return PARTIAL followed by abort\n"
+ "- Return PARTIAL multiple times until success\n"
+ "- Return PARTIAL with fail index = threshold\n"
+ "- Return RETRY\n"
+ "- Return CMD_ERR\n"
+ "- Return DATA_ERR\n");
+
+ if (message_repeat == 1) {
+ message_repeat = 0;
+ return strnlen(buffer, count);
+ } else {
+ return 0;
+ }
+}
+
+const struct file_operations err_check_test_ops = {
+ .open = test_open,
+ .write = err_check_test_write,
+ .read = err_check_test_read,
+};
+
+/* send_invalid_packed TEST */
+static ssize_t send_invalid_packed_test_write(struct file *file,
+ const char __user *buf,
+ size_t count,
+ loff_t *ppos)
+{
+ int ret = 0;
+ int i = 0;
+ int number = -1;
+ int j = 0;
+ int num_of_failures = 0;
+
+ test_pr_info("%s: -- send_invalid_packed TEST --", __func__);
+
+ sscanf(buf, "%d", &number);
+
+ if (number <= 0)
+ number = 1;
+
+ mbtd->test_group = TEST_SEND_INVALID_GROUP;
+
+ if (validate_packed_commands_settings())
+ return count;
+
+ if (mbtd->random_test_seed > 0)
+ test_pr_info("%s: Test seed: %d", __func__,
+ mbtd->random_test_seed);
+
+ memset(&mbtd->test_info, 0, sizeof(struct test_info));
+
+ mbtd->test_info.data = mbtd;
+ mbtd->test_info.prepare_test_fn = prepare_test;
+ mbtd->test_info.run_test_fn = run_packed_test;
+ mbtd->test_info.check_test_result_fn = check_wr_packing_statistics;
+ mbtd->test_info.get_test_case_str_fn = get_test_case_str;
+ mbtd->test_info.post_test_fn = post_test;
+
+ for (i = 0; i < number; ++i) {
+ test_pr_info("%s: Cycle # %d / %d", __func__, i+1, number);
+ test_pr_info("%s: ====================", __func__);
+
+ for (j = INVALID_CMD_MIN_TESTCASE;
+ j <= INVALID_CMD_MAX_TESTCASE ; j++) {
+
+ mbtd->test_info.testcase = j;
+ mbtd->is_random = RANDOM_TEST;
+ ret = test_iosched_start_test(&mbtd->test_info);
+ if (ret)
+ num_of_failures++;
+ /* Allow FS requests to be dispatched */
+ msleep(1000);
+
+ mbtd->test_info.testcase = j;
+ mbtd->is_random = NON_RANDOM_TEST;
+ ret = test_iosched_start_test(&mbtd->test_info);
+ if (ret)
+ num_of_failures++;
+ /* Allow FS requests to be dispatched */
+ msleep(1000);
+ }
+ }
+
+ test_pr_info("%s: Completed all the test cases.", __func__);
+
+ if (num_of_failures > 0) {
+ test_iosched_set_test_result(TEST_FAILED);
+ test_pr_err(
+ "There were %d failures during the test, TEST FAILED",
+ num_of_failures);
+ }
+ return count;
+}
+
+static ssize_t send_invalid_packed_test_read(struct file *file,
+ char __user *buffer,
+ size_t count,
+ loff_t *offset)
+{
+ memset((void *)buffer, 0, count);
+
+ snprintf(buffer, count,
+ "\nsend_invalid_packed_TEST\n"
+ "=========\n"
+ "Description:\n"
+ "This test checks the following scenarios\n"
+ "- Send an invalid header version\n"
+ "- Send the wrong write code\n"
+ "- Send an invalid R/W code\n"
+ "- Send wrong start address in header\n"
+ "- Send header with block_count smaller than actual\n"
+ "- Send header with block_count larger than actual\n"
+ "- Send header CMD23 packed bit set\n"
+ "- Send CMD23 with block count over threshold\n"
+ "- Send CMD23 with block_count equals zero\n"
+ "- Send CMD23 packed bit unset\n"
+ "- Send CMD23 reliable write bit set\n"
+ "- Send CMD23 bits [16-29] set\n"
+ "- Send CMD23 header block not in block_count\n");
+
+ if (message_repeat == 1) {
+ message_repeat = 0;
+ return strnlen(buffer, count);
+ } else {
+ return 0;
+ }
+}
+
+const struct file_operations send_invalid_packed_test_ops = {
+ .open = test_open,
+ .write = send_invalid_packed_test_write,
+ .read = send_invalid_packed_test_read,
+};
+
+/* packing_control TEST */
+static ssize_t write_packing_control_test_write(struct file *file,
+ const char __user *buf,
+ size_t count,
+ loff_t *ppos)
+{
+ int ret = 0;
+ int i = 0;
+ int number = -1;
+ int j = 0;
+ struct mmc_queue *mq = test_iosched_get_req_queue()->queuedata;
+ int max_num_requests = mq->card->ext_csd.max_packed_writes;
+ int test_successful = 1;
+
+ test_pr_info("%s: -- write_packing_control TEST --", __func__);
+
+ sscanf(buf, "%d", &number);
+
+ if (number <= 0)
+ number = 1;
+
+ test_pr_info("%s: max_num_requests = %d ", __func__,
+ max_num_requests);
+
+ memset(&mbtd->test_info, 0, sizeof(struct test_info));
+ mbtd->test_group = TEST_PACKING_CONTROL_GROUP;
+
+ if (validate_packed_commands_settings())
+ return count;
+
+ mbtd->test_info.data = mbtd;
+ mbtd->test_info.prepare_test_fn = prepare_test;
+ mbtd->test_info.run_test_fn = run_packed_test;
+ mbtd->test_info.check_test_result_fn = check_wr_packing_statistics;
+ mbtd->test_info.get_test_case_str_fn = get_test_case_str;
+
+ for (i = 0; i < number; ++i) {
+ test_pr_info("%s: Cycle # %d / %d", __func__, i+1, number);
+ test_pr_info("%s: ====================", __func__);
+
+ for (j = PACKING_CONTROL_MIN_TESTCASE;
+ j <= PACKING_CONTROL_MAX_TESTCASE; j++) {
+
+ test_successful = 1;
+ mbtd->test_info.testcase = j;
+ mbtd->is_random = RANDOM_TEST;
+ ret = test_iosched_start_test(&mbtd->test_info);
+ if (ret) {
+ test_successful = 0;
+ break;
+ }
+ /* Allow FS requests to be dispatched */
+ msleep(1000);
+
+ mbtd->test_info.testcase = j;
+ mbtd->is_random = NON_RANDOM_TEST;
+ ret = test_iosched_start_test(&mbtd->test_info);
+ if (ret) {
+ test_successful = 0;
+ break;
+ }
+ /* Allow FS requests to be dispatched */
+ msleep(1000);
+ }
+
+ if (!test_successful)
+ break;
+ }
+
+ test_pr_info("%s: Completed all the test cases.", __func__);
+
+ return count;
+}
+
+static ssize_t write_packing_control_test_read(struct file *file,
+ char __user *buffer,
+ size_t count,
+ loff_t *offset)
+{
+ memset((void *)buffer, 0, count);
+
+ snprintf(buffer, count,
+ "\nwrite_packing_control_test\n"
+ "=========\n"
+ "Description:\n"
+ "This test checks the following scenarios\n"
+ "- Packing expected - one over trigger\n"
+ "- Packing expected - N over trigger\n"
+ "- Packing expected - N over trigger followed by read\n"
+ "- Packing expected - N over trigger followed by flush\n"
+ "- Packing expected - threshold over trigger FB by flush\n"
+ "- Packing not expected - less than trigger\n"
+ "- Packing not expected - trigger requests\n"
+ "- Packing not expected - trigger, read, trigger\n"
+ "- Mixed state - packing -> no packing -> packing\n"
+ "- Mixed state - no packing -> packing -> no packing\n");
+
+ if (message_repeat == 1) {
+ message_repeat = 0;
+ return strnlen(buffer, count);
+ } else {
+ return 0;
+ }
+}
+
+const struct file_operations write_packing_control_test_ops = {
+ .open = test_open,
+ .write = write_packing_control_test_write,
+ .read = write_packing_control_test_read,
+};
+
+static void mmc_block_test_debugfs_cleanup(void)
+{
+ debugfs_remove(mbtd->debug.random_test_seed);
+ debugfs_remove(mbtd->debug.send_write_packing_test);
+ debugfs_remove(mbtd->debug.err_check_test);
+ debugfs_remove(mbtd->debug.send_invalid_packed_test);
+ debugfs_remove(mbtd->debug.packing_control_test);
+}
+
+static int mmc_block_test_debugfs_init(void)
+{
+ struct dentry *utils_root, *tests_root;
+
+ utils_root = test_iosched_get_debugfs_utils_root();
+ tests_root = test_iosched_get_debugfs_tests_root();
+
+ if (!utils_root || !tests_root)
+ return -EINVAL;
+
+ mbtd->debug.random_test_seed = debugfs_create_u32(
+ "random_test_seed",
+ S_IRUGO | S_IWUGO,
+ utils_root,
+ &mbtd->random_test_seed);
+
+ if (!mbtd->debug.random_test_seed)
+ goto err_nomem;
+
+ mbtd->debug.send_write_packing_test =
+ debugfs_create_file("send_write_packing_test",
+ S_IRUGO | S_IWUGO,
+ tests_root,
+ NULL,
+ &send_write_packing_test_ops);
+
+ if (!mbtd->debug.send_write_packing_test)
+ goto err_nomem;
+
+ mbtd->debug.err_check_test =
+ debugfs_create_file("err_check_test",
+ S_IRUGO | S_IWUGO,
+ tests_root,
+ NULL,
+ &err_check_test_ops);
+
+ if (!mbtd->debug.err_check_test)
+ goto err_nomem;
+
+ mbtd->debug.send_invalid_packed_test =
+ debugfs_create_file("send_invalid_packed_test",
+ S_IRUGO | S_IWUGO,
+ tests_root,
+ NULL,
+ &send_invalid_packed_test_ops);
+
+ if (!mbtd->debug.send_invalid_packed_test)
+ goto err_nomem;
+
+ mbtd->debug.packing_control_test = debugfs_create_file(
+ "packing_control_test",
+ S_IRUGO | S_IWUGO,
+ tests_root,
+ NULL,
+ &write_packing_control_test_ops);
+
+ if (!mbtd->debug.packing_control_test)
+ goto err_nomem;
+
+ return 0;
+
+err_nomem:
+ mmc_block_test_debugfs_cleanup();
+ return -ENOMEM;
+}
+
+static void mmc_block_test_probe(void)
+{
+ struct request_queue *q = test_iosched_get_req_queue();
+ struct mmc_queue *mq;
+ int max_packed_reqs;
+
+ if (!q) {
+ test_pr_err("%s: NULL request queue", __func__);
+ return;
+ }
+
+ mq = q->queuedata;
+ if (!mq) {
+ test_pr_err("%s: NULL mq", __func__);
+ return;
+ }
+
+ max_packed_reqs = mq->card->ext_csd.max_packed_writes;
+ mbtd->exp_packed_stats.packing_events =
+ kzalloc((max_packed_reqs + 1) *
+ sizeof(*mbtd->exp_packed_stats.packing_events),
+ GFP_KERNEL);
+
+ mmc_block_test_debugfs_init();
+}
+
+static void mmc_block_test_remove(void)
+{
+ mmc_block_test_debugfs_cleanup();
+}
+
+static int __init mmc_block_test_init(void)
+{
+ mbtd = kzalloc(sizeof(struct mmc_block_test_data), GFP_KERNEL);
+ if (!mbtd) {
+ test_pr_err("%s: failed to allocate mmc_block_test_data",
+ __func__);
+ return -ENODEV;
+ }
+
+ mbtd->bdt.init_fn = mmc_block_test_probe;
+ mbtd->bdt.exit_fn = mmc_block_test_remove;
+ INIT_LIST_HEAD(&mbtd->bdt.list);
+ test_iosched_register(&mbtd->bdt);
+
+ return 0;
+}
+
+static void __exit mmc_block_test_exit(void)
+{
+ test_iosched_unregister(&mbtd->bdt);
+ kfree(mbtd);
+}
+
+module_init(mmc_block_test_init);
+module_exit(mmc_block_test_exit);
+
+MODULE_LICENSE("GPL v2");
+MODULE_DESCRIPTION("MMC block test");
diff --git a/drivers/mmc/card/mmc_test.c b/drivers/mmc/card/mmc_test.c
index 9a11aaa6e985..12c66919f06f 100644
--- a/drivers/mmc/card/mmc_test.c
+++ b/drivers/mmc/card/mmc_test.c
@@ -2807,7 +2807,8 @@ static ssize_t mtf_test_write(struct file *file, const char __user *buf,
}
#ifdef CONFIG_HIGHMEM
- __free_pages(test->highmem, BUFFER_ORDER);
+ if (test->highmem)
+ __free_pages(test->highmem, BUFFER_ORDER);
#endif
kfree(test->buffer);
kfree(test);
diff --git a/drivers/mmc/card/queue.c b/drivers/mmc/card/queue.c
index 6a4cd2bb4629..ccf22eb5bdc0 100644
--- a/drivers/mmc/card/queue.c
+++ b/drivers/mmc/card/queue.c
@@ -16,6 +16,8 @@
#include <linux/kthread.h>
#include <linux/scatterlist.h>
#include <linux/dma-mapping.h>
+#include <linux/bitops.h>
+#include <linux/delay.h>
#include <linux/mmc/card.h>
#include <linux/mmc/host.h>
@@ -25,6 +27,13 @@
#define MMC_QUEUE_BOUNCESZ 65536
/*
+ * Based on benchmark tests the default num of requests to trigger the write
+ * packing was determined, to keep the read latency as low as possible and
+ * manage to keep the high write throughput.
+ */
+#define DEFAULT_NUM_REQS_TO_START_PACK 17
+
+/*
* Prepare a MMC request. This just filters out odd stuff.
*/
static int mmc_prep_request(struct request_queue *q, struct request *req)
@@ -47,10 +56,93 @@ static int mmc_prep_request(struct request_queue *q, struct request *req)
return BLKPREP_OK;
}
+static struct request *mmc_peek_request(struct mmc_queue *mq)
+{
+ struct request_queue *q = mq->queue;
+ mq->cmdq_req_peeked = NULL;
+
+ spin_lock_irq(q->queue_lock);
+ if (!blk_queue_stopped(q))
+ mq->cmdq_req_peeked = blk_peek_request(q);
+ spin_unlock_irq(q->queue_lock);
+
+ return mq->cmdq_req_peeked;
+}
+
+static bool mmc_check_blk_queue_start_tag(struct request_queue *q,
+ struct request *req)
+{
+ int ret;
+
+ spin_lock_irq(q->queue_lock);
+ ret = blk_queue_start_tag(q, req);
+ spin_unlock_irq(q->queue_lock);
+
+ return !!ret;
+}
+
+static inline void mmc_cmdq_ready_wait(struct mmc_host *host,
+ struct mmc_queue *mq)
+{
+ struct mmc_cmdq_context_info *ctx = &host->cmdq_ctx;
+ struct request_queue *q = mq->queue;
+
+ /*
+ * Wait until all of the following conditions are true:
+ * 1. There is a request pending in the block layer queue
+ * to be processed.
+ * 2. If the peeked request is flush/discard then there shouldn't
+ * be any other direct command active.
+ * 3. cmdq state should be unhalted.
+ * 4. cmdq state shouldn't be in error state.
+ * 5. free tag available to process the new request.
+ */
+ wait_event(ctx->wait, kthread_should_stop()
+ || (mmc_peek_request(mq) &&
+ !((mq->cmdq_req_peeked->cmd_flags & (REQ_FLUSH | REQ_DISCARD))
+ && test_bit(CMDQ_STATE_DCMD_ACTIVE, &ctx->curr_state))
+ && !(!host->card->part_curr && !mmc_card_suspended(host->card)
+ && mmc_host_halt(host))
+ && !(!host->card->part_curr && mmc_host_cq_disable(host) &&
+ !mmc_card_suspended(host->card))
+ && !test_bit(CMDQ_STATE_ERR, &ctx->curr_state)
+ && !mmc_check_blk_queue_start_tag(q, mq->cmdq_req_peeked)));
+}
+
+static int mmc_cmdq_thread(void *d)
+{
+ struct mmc_queue *mq = d;
+ struct mmc_card *card = mq->card;
+ struct mmc_host *host = card->host;
+
+ current->flags |= PF_MEMALLOC;
+ if (card->host->wakeup_on_idle)
+ set_wake_up_idle(true);
+
+ while (1) {
+ int ret = 0;
+
+ mmc_cmdq_ready_wait(host, mq);
+ if (kthread_should_stop())
+ break;
+
+ ret = mq->cmdq_issue_fn(mq, mq->cmdq_req_peeked);
+ /*
+ * Don't requeue if issue_fn fails.
+ * Recovery will be come by completion softirq
+ * Also we end the request if there is a partition switch error,
+ * so we should not requeue the request here.
+ */
+ } /* loop */
+
+ return 0;
+}
+
static int mmc_queue_thread(void *d)
{
struct mmc_queue *mq = d;
struct request_queue *q = mq->queue;
+ struct mmc_card *card = mq->card;
struct sched_param scheduler_params = {0};
scheduler_params.sched_priority = 1;
@@ -58,6 +150,8 @@ static int mmc_queue_thread(void *d)
sched_setscheduler(current, SCHED_FIFO, &scheduler_params);
current->flags |= PF_MEMALLOC;
+ if (card->host->wakeup_on_idle)
+ set_wake_up_idle(true);
down(&mq->thread_sem);
do {
@@ -75,8 +169,8 @@ static int mmc_queue_thread(void *d)
cmd_flags = req ? req->cmd_flags : 0;
mq->issue_fn(mq, req);
cond_resched();
- if (mq->flags & MMC_QUEUE_NEW_REQUEST) {
- mq->flags &= ~MMC_QUEUE_NEW_REQUEST;
+ if (test_bit(MMC_QUEUE_NEW_REQUEST, &mq->flags)) {
+ clear_bit(MMC_QUEUE_NEW_REQUEST, &mq->flags);
continue; /* fetch again */
}
@@ -108,6 +202,13 @@ static int mmc_queue_thread(void *d)
return 0;
}
+static void mmc_cmdq_dispatch_req(struct request_queue *q)
+{
+ struct mmc_queue *mq = q->queuedata;
+
+ wake_up(&mq->card->host->cmdq_ctx.wait);
+}
+
/*
* Generic MMC request handler. This is called for any queue on a
* particular host. When the host is not busy, we look for a request
@@ -183,6 +284,32 @@ static void mmc_queue_setup_discard(struct request_queue *q,
}
/**
+ * mmc_blk_cmdq_setup_queue
+ * @mq: mmc queue
+ * @card: card to attach to this queue
+ *
+ * Setup queue for CMDQ supporting MMC card
+ */
+void mmc_cmdq_setup_queue(struct mmc_queue *mq, struct mmc_card *card)
+{
+ u64 limit = BLK_BOUNCE_HIGH;
+ struct mmc_host *host = card->host;
+
+ if (mmc_dev(host)->dma_mask && *mmc_dev(host)->dma_mask)
+ limit = *mmc_dev(host)->dma_mask;
+
+ queue_flag_set_unlocked(QUEUE_FLAG_NONROT, mq->queue);
+ if (mmc_can_erase(card))
+ mmc_queue_setup_discard(mq->queue, card);
+
+ blk_queue_bounce_limit(mq->queue, limit);
+ blk_queue_max_hw_sectors(mq->queue, min(host->max_blk_count,
+ host->max_req_size / 512));
+ blk_queue_max_segment_size(mq->queue, host->max_seg_size);
+ blk_queue_max_segments(mq->queue, host->max_segs);
+}
+
+/**
* mmc_init_queue - initialise a queue structure.
* @mq: mmc queue
* @card: mmc card to attach this queue
@@ -192,7 +319,7 @@ static void mmc_queue_setup_discard(struct request_queue *q,
* Initialise a MMC card request queue.
*/
int mmc_init_queue(struct mmc_queue *mq, struct mmc_card *card,
- spinlock_t *lock, const char *subname)
+ spinlock_t *lock, const char *subname, int area_type)
{
struct mmc_host *host = card->host;
u64 limit = BLK_BOUNCE_HIGH;
@@ -204,6 +331,37 @@ int mmc_init_queue(struct mmc_queue *mq, struct mmc_card *card,
limit = (u64)dma_max_pfn(mmc_dev(host)) << PAGE_SHIFT;
mq->card = card;
+ if (card->ext_csd.cmdq_support &&
+ (area_type == MMC_BLK_DATA_AREA_MAIN)) {
+ mq->queue = blk_init_queue(mmc_cmdq_dispatch_req, lock);
+ if (!mq->queue)
+ return -ENOMEM;
+ mmc_cmdq_setup_queue(mq, card);
+ ret = mmc_cmdq_init(mq, card);
+ if (ret) {
+ pr_err("%s: %d: cmdq: unable to set-up\n",
+ mmc_hostname(card->host), ret);
+ blk_cleanup_queue(mq->queue);
+ } else {
+ sema_init(&mq->thread_sem, 1);
+ /* hook for pm qos cmdq init */
+ if (card->host->cmdq_ops->init)
+ card->host->cmdq_ops->init(card->host);
+ mq->queue->queuedata = mq;
+ mq->thread = kthread_run(mmc_cmdq_thread, mq,
+ "mmc-cmdqd/%d%s",
+ host->index,
+ subname ? subname : "");
+ if (IS_ERR(mq->thread)) {
+ pr_err("%s: %d: cmdq: failed to start mmc-cmdqd thread\n",
+ mmc_hostname(card->host), ret);
+ ret = PTR_ERR(mq->thread);
+ }
+
+ return ret;
+ }
+ }
+
mq->queue = blk_init_queue(mmc_request_fn, lock);
if (!mq->queue)
return -ENOMEM;
@@ -211,6 +369,9 @@ int mmc_init_queue(struct mmc_queue *mq, struct mmc_card *card,
mq->mqrq_cur = mqrq_cur;
mq->mqrq_prev = mqrq_prev;
mq->queue->queuedata = mq;
+ mq->num_wr_reqs_to_start_packing =
+ min_t(int, (int)card->ext_csd.max_packed_writes,
+ DEFAULT_NUM_REQS_TO_START_PACK);
blk_queue_prep_rq(mq->queue, mmc_prep_request);
queue_flag_set_unlocked(QUEUE_FLAG_NONROT, mq->queue);
@@ -276,24 +437,49 @@ int mmc_init_queue(struct mmc_queue *mq, struct mmc_card *card,
#endif
if (!mqrq_cur->bounce_buf && !mqrq_prev->bounce_buf) {
+ unsigned int max_segs = host->max_segs;
+
blk_queue_bounce_limit(mq->queue, limit);
blk_queue_max_hw_sectors(mq->queue,
min(host->max_blk_count, host->max_req_size / 512));
- blk_queue_max_segments(mq->queue, host->max_segs);
blk_queue_max_segment_size(mq->queue, host->max_seg_size);
+retry:
+ blk_queue_max_segments(mq->queue, host->max_segs);
mqrq_cur->sg = mmc_alloc_sg(host->max_segs, &ret);
- if (ret)
+ if (ret == -ENOMEM)
+ goto cur_sg_alloc_failed;
+ else if (ret)
goto cleanup_queue;
-
mqrq_prev->sg = mmc_alloc_sg(host->max_segs, &ret);
- if (ret)
+ if (ret == -ENOMEM)
+ goto prev_sg_alloc_failed;
+ else if (ret)
+ goto cleanup_queue;
+
+ goto success;
+
+prev_sg_alloc_failed:
+ kfree(mqrq_cur->sg);
+ mqrq_cur->sg = NULL;
+cur_sg_alloc_failed:
+ host->max_segs /= 2;
+ if (host->max_segs) {
+ goto retry;
+ } else {
+ host->max_segs = max_segs;
goto cleanup_queue;
+ }
}
+success:
sema_init(&mq->thread_sem, 1);
+ /* hook for pm qos legacy init */
+ if (card->host->ops->init)
+ card->host->ops->init(card->host);
+
mq->thread = kthread_run(mmc_queue_thread, mq, "mmcqd/%d%s",
host->index, subname ? subname : "");
@@ -408,28 +594,192 @@ void mmc_packed_clean(struct mmc_queue *mq)
mqrq_prev->packed = NULL;
}
+static void mmc_cmdq_softirq_done(struct request *rq)
+{
+ struct mmc_queue *mq = rq->q->queuedata;
+ mq->cmdq_complete_fn(rq);
+}
+
+static void mmc_cmdq_error_work(struct work_struct *work)
+{
+ struct mmc_queue *mq = container_of(work, struct mmc_queue,
+ cmdq_err_work);
+
+ mq->cmdq_error_fn(mq);
+}
+
+enum blk_eh_timer_return mmc_cmdq_rq_timed_out(struct request *req)
+{
+ struct mmc_queue *mq = req->q->queuedata;
+
+ pr_err("%s: request with tag: %d flags: 0x%llx timed out\n",
+ mmc_hostname(mq->card->host), req->tag, req->cmd_flags);
+
+ return mq->cmdq_req_timed_out(req);
+}
+
+int mmc_cmdq_init(struct mmc_queue *mq, struct mmc_card *card)
+{
+ int i, ret = 0;
+ /* one slot is reserved for dcmd requests */
+ int q_depth = card->ext_csd.cmdq_depth - 1;
+
+ card->cmdq_init = false;
+ if (!(card->host->caps2 & MMC_CAP2_CMD_QUEUE)) {
+ ret = -ENOTSUPP;
+ goto out;
+ }
+
+ init_waitqueue_head(&card->host->cmdq_ctx.queue_empty_wq);
+ init_waitqueue_head(&card->host->cmdq_ctx.wait);
+
+ mq->mqrq_cmdq = kzalloc(
+ sizeof(struct mmc_queue_req) * q_depth, GFP_KERNEL);
+ if (!mq->mqrq_cmdq) {
+ pr_warn("%s: unable to allocate mqrq's for q_depth %d\n",
+ mmc_card_name(card), q_depth);
+ ret = -ENOMEM;
+ goto out;
+ }
+
+ /* sg is allocated for data request slots only */
+ for (i = 0; i < q_depth; i++) {
+ mq->mqrq_cmdq[i].sg = mmc_alloc_sg(card->host->max_segs, &ret);
+ if (ret) {
+ pr_warn("%s: unable to allocate cmdq sg of size %d\n",
+ mmc_card_name(card),
+ card->host->max_segs);
+ goto free_mqrq_sg;
+ }
+ }
+
+ ret = blk_queue_init_tags(mq->queue, q_depth, NULL, BLK_TAG_ALLOC_FIFO);
+ if (ret) {
+ pr_warn("%s: unable to allocate cmdq tags %d\n",
+ mmc_card_name(card), q_depth);
+ goto free_mqrq_sg;
+ }
+
+ blk_queue_softirq_done(mq->queue, mmc_cmdq_softirq_done);
+ INIT_WORK(&mq->cmdq_err_work, mmc_cmdq_error_work);
+ init_completion(&mq->cmdq_shutdown_complete);
+ init_completion(&mq->cmdq_pending_req_done);
+
+ blk_queue_rq_timed_out(mq->queue, mmc_cmdq_rq_timed_out);
+ blk_queue_rq_timeout(mq->queue, 120 * HZ);
+ card->cmdq_init = true;
+
+ goto out;
+
+free_mqrq_sg:
+ for (i = 0; i < q_depth; i++)
+ kfree(mq->mqrq_cmdq[i].sg);
+ kfree(mq->mqrq_cmdq);
+ mq->mqrq_cmdq = NULL;
+out:
+ return ret;
+}
+
+void mmc_cmdq_clean(struct mmc_queue *mq, struct mmc_card *card)
+{
+ int i;
+ int q_depth = card->ext_csd.cmdq_depth - 1;
+
+ blk_free_tags(mq->queue->queue_tags);
+ mq->queue->queue_tags = NULL;
+ blk_queue_free_tags(mq->queue);
+
+ for (i = 0; i < q_depth; i++)
+ kfree(mq->mqrq_cmdq[i].sg);
+ kfree(mq->mqrq_cmdq);
+ mq->mqrq_cmdq = NULL;
+}
+
/**
* mmc_queue_suspend - suspend a MMC request queue
* @mq: MMC queue to suspend
+ * @wait: Wait till MMC request queue is empty
*
* Stop the block request queue, and wait for our thread to
* complete any outstanding requests. This ensures that we
* won't suspend while a request is being processed.
*/
-void mmc_queue_suspend(struct mmc_queue *mq)
+int mmc_queue_suspend(struct mmc_queue *mq, int wait)
{
struct request_queue *q = mq->queue;
unsigned long flags;
+ int rc = 0;
+ struct mmc_card *card = mq->card;
+ struct request *req;
- if (!(mq->flags & MMC_QUEUE_SUSPENDED)) {
- mq->flags |= MMC_QUEUE_SUSPENDED;
+ if (card->cmdq_init && blk_queue_tagged(q)) {
+ struct mmc_host *host = card->host;
- spin_lock_irqsave(q->queue_lock, flags);
- blk_stop_queue(q);
- spin_unlock_irqrestore(q->queue_lock, flags);
+ if (test_and_set_bit(MMC_QUEUE_SUSPENDED, &mq->flags))
+ goto out;
+
+ if (wait) {
+
+ /*
+ * After blk_cleanup_queue is called, wait for all
+ * active_reqs to complete.
+ * Then wait for cmdq thread to exit before calling
+ * cmdq shutdown to avoid race between issuing
+ * requests and shutdown of cmdq.
+ */
+ blk_cleanup_queue(q);
+
+ if (host->cmdq_ctx.active_reqs)
+ wait_for_completion(
+ &mq->cmdq_shutdown_complete);
+ kthread_stop(mq->thread);
+ mq->cmdq_shutdown(mq);
+ } else {
+ spin_lock_irqsave(q->queue_lock, flags);
+ blk_stop_queue(q);
+ wake_up(&host->cmdq_ctx.wait);
+ req = blk_peek_request(q);
+ if (req || mq->cmdq_req_peeked ||
+ host->cmdq_ctx.active_reqs) {
+ clear_bit(MMC_QUEUE_SUSPENDED, &mq->flags);
+ blk_start_queue(q);
+ rc = -EBUSY;
+ }
+ spin_unlock_irqrestore(q->queue_lock, flags);
+ }
- down(&mq->thread_sem);
+ goto out;
}
+
+ if (!(test_and_set_bit(MMC_QUEUE_SUSPENDED, &mq->flags))) {
+ if (!wait) {
+ /* suspend/stop the queue in case of suspend */
+ spin_lock_irqsave(q->queue_lock, flags);
+ blk_stop_queue(q);
+ spin_unlock_irqrestore(q->queue_lock, flags);
+ } else {
+ /* shutdown the queue in case of shutdown/reboot */
+ blk_cleanup_queue(q);
+ }
+
+ rc = down_trylock(&mq->thread_sem);
+ if (rc && !wait) {
+ /*
+ * Failed to take the lock so better to abort the
+ * suspend because mmcqd thread is processing requests.
+ */
+ clear_bit(MMC_QUEUE_SUSPENDED, &mq->flags);
+ spin_lock_irqsave(q->queue_lock, flags);
+ blk_start_queue(q);
+ spin_unlock_irqrestore(q->queue_lock, flags);
+ rc = -EBUSY;
+ } else if (rc && wait) {
+ down(&mq->thread_sem);
+ rc = 0;
+ }
+ }
+out:
+ return rc;
}
/**
@@ -439,12 +789,13 @@ void mmc_queue_suspend(struct mmc_queue *mq)
void mmc_queue_resume(struct mmc_queue *mq)
{
struct request_queue *q = mq->queue;
+ struct mmc_card *card = mq->card;
unsigned long flags;
- if (mq->flags & MMC_QUEUE_SUSPENDED) {
- mq->flags &= ~MMC_QUEUE_SUSPENDED;
+ if (test_and_clear_bit(MMC_QUEUE_SUSPENDED, &mq->flags)) {
- up(&mq->thread_sem);
+ if (!(card->cmdq_init && blk_queue_tagged(q)))
+ up(&mq->thread_sem);
spin_lock_irqsave(q->queue_lock, flags);
blk_start_queue(q);
diff --git a/drivers/mmc/card/queue.h b/drivers/mmc/card/queue.h
index 1dc4c99f52a1..505712f0e1b0 100644
--- a/drivers/mmc/card/queue.h
+++ b/drivers/mmc/card/queue.h
@@ -42,22 +42,41 @@ struct mmc_queue_req {
struct mmc_async_req mmc_active;
enum mmc_packed_type cmd_type;
struct mmc_packed *packed;
+ struct mmc_cmdq_req cmdq_req;
};
struct mmc_queue {
struct mmc_card *card;
struct task_struct *thread;
struct semaphore thread_sem;
- unsigned int flags;
-#define MMC_QUEUE_SUSPENDED (1 << 0)
-#define MMC_QUEUE_NEW_REQUEST (1 << 1)
+ unsigned long flags;
+#define MMC_QUEUE_SUSPENDED 0
+#define MMC_QUEUE_NEW_REQUEST 1
- int (*issue_fn)(struct mmc_queue *, struct request *);
+ int (*issue_fn)(struct mmc_queue *, struct request *);
+ int (*cmdq_issue_fn)(struct mmc_queue *,
+ struct request *);
+ void (*cmdq_complete_fn)(struct request *);
+ void (*cmdq_error_fn)(struct mmc_queue *);
+ enum blk_eh_timer_return (*cmdq_req_timed_out)(struct request *);
void *data;
struct request_queue *queue;
struct mmc_queue_req mqrq[2];
struct mmc_queue_req *mqrq_cur;
struct mmc_queue_req *mqrq_prev;
+ struct mmc_queue_req *mqrq_cmdq;
+ bool wr_packing_enabled;
+ int num_of_potential_packed_wr_reqs;
+ int num_wr_reqs_to_start_packing;
+ bool no_pack_for_random;
+ struct work_struct cmdq_err_work;
+
+ struct completion cmdq_pending_req_done;
+ struct completion cmdq_shutdown_complete;
+ struct request *cmdq_req_peeked;
+ int (*err_check_fn) (struct mmc_card *, struct mmc_async_req *);
+ void (*packed_test_fn) (struct request_queue *, struct mmc_queue_req *);
+ void (*cmdq_shutdown)(struct mmc_queue *);
#ifdef CONFIG_MMC_SIMULATE_MAX_SPEED
atomic_t max_write_speed;
atomic_t max_read_speed;
@@ -69,9 +88,9 @@ struct mmc_queue {
};
extern int mmc_init_queue(struct mmc_queue *, struct mmc_card *, spinlock_t *,
- const char *);
+ const char *, int);
extern void mmc_cleanup_queue(struct mmc_queue *);
-extern void mmc_queue_suspend(struct mmc_queue *);
+extern int mmc_queue_suspend(struct mmc_queue *, int);
extern void mmc_queue_resume(struct mmc_queue *);
extern unsigned int mmc_queue_map_sg(struct mmc_queue *,
@@ -84,4 +103,9 @@ extern void mmc_packed_clean(struct mmc_queue *);
extern int mmc_access_rpmb(struct mmc_queue *);
+extern void print_mmc_packing_stats(struct mmc_card *card);
+
+extern int mmc_cmdq_init(struct mmc_queue *mq, struct mmc_card *card);
+extern void mmc_cmdq_clean(struct mmc_queue *mq, struct mmc_card *card);
+
#endif
diff --git a/drivers/mmc/core/Kconfig b/drivers/mmc/core/Kconfig
index 87cc07dedd9f..9e0ccdc44d6b 100644
--- a/drivers/mmc/core/Kconfig
+++ b/drivers/mmc/core/Kconfig
@@ -2,6 +2,17 @@
# MMC core configuration
#
+config MMC_RING_BUFFER
+ bool "MMC_RING_BUFFER"
+ depends on MMC
+ default n
+ help
+ This enables the ring buffer tracing of significant
+ events for mmc driver to provide command history for
+ debugging purpose.
+
+ If unsure, say N.
+
config MMC_EMBEDDED_SDIO
boolean "MMC embedded SDIO device support (EXPERIMENTAL)"
help
@@ -16,3 +27,13 @@ config MMC_PARANOID_SD_INIT
about re-trying SD init requests. This can be a useful
work-around for buggy controllers and hardware. Enable
if you are experiencing issues with SD detection.
+
+config MMC_CLKGATE
+ bool "MMC host clock gating"
+ help
+ This will attempt to aggressively gate the clock to the MMC card.
+ This is done to save power due to gating off the logic and bus
+ noise when the MMC card is not in use. Your host driver has to
+ support handling this in order for it to be of any use.
+
+ If unsure, say N.
diff --git a/drivers/mmc/core/Makefile b/drivers/mmc/core/Makefile
index 2c25138f28b7..60781dd192ab 100644
--- a/drivers/mmc/core/Makefile
+++ b/drivers/mmc/core/Makefile
@@ -10,3 +10,4 @@ mmc_core-y := core.o bus.o host.o \
quirks.o slot-gpio.o
mmc_core-$(CONFIG_OF) += pwrseq.o pwrseq_simple.o pwrseq_emmc.o
mmc_core-$(CONFIG_DEBUG_FS) += debugfs.o
+obj-$(CONFIG_MMC_RING_BUFFER) += ring_buffer.o
diff --git a/drivers/mmc/core/bus.c b/drivers/mmc/core/bus.c
index 972ff844cf5a..311f6d639d06 100644
--- a/drivers/mmc/core/bus.c
+++ b/drivers/mmc/core/bus.c
@@ -132,6 +132,16 @@ static void mmc_bus_shutdown(struct device *dev)
struct mmc_host *host = card->host;
int ret;
+ if (!drv) {
+ pr_debug("%s: %s: drv is NULL\n", dev_name(dev), __func__);
+ return;
+ }
+
+ if (!card) {
+ pr_debug("%s: %s: card is NULL\n", dev_name(dev), __func__);
+ return;
+ }
+
if (dev->driver && drv->shutdown)
drv->shutdown(card);
@@ -154,7 +164,22 @@ static int mmc_bus_suspend(struct device *dev)
if (ret)
return ret;
+ if (mmc_bus_needs_resume(host))
+ return 0;
ret = host->bus_ops->suspend(host);
+
+ /*
+ * bus_ops->suspend may fail due to some reason
+ * In such cases if we return error to PM framework
+ * from here without calling pm_generic_resume then mmc
+ * request may get stuck since PM framework will assume
+ * that mmc bus is not suspended (because of error) and
+ * it won't call resume again.
+ *
+ * So in case of error call pm_generic_resume().
+ */
+ if (ret)
+ pm_generic_resume(dev);
return ret;
}
@@ -164,11 +189,17 @@ static int mmc_bus_resume(struct device *dev)
struct mmc_host *host = card->host;
int ret;
+ if (mmc_bus_manual_resume(host)) {
+ host->bus_resume_flags |= MMC_BUSRESUME_NEEDS_RESUME;
+ goto skip_full_resume;
+ }
+
ret = host->bus_ops->resume(host);
if (ret)
pr_warn("%s: error %d during resume (card was removed?)\n",
mmc_hostname(host), ret);
+skip_full_resume:
ret = pm_generic_resume(dev);
return ret;
}
@@ -180,6 +211,9 @@ static int mmc_runtime_suspend(struct device *dev)
struct mmc_card *card = mmc_dev_to_card(dev);
struct mmc_host *host = card->host;
+ if (mmc_bus_needs_resume(host))
+ return 0;
+
return host->bus_ops->runtime_suspend(host);
}
@@ -188,8 +222,12 @@ static int mmc_runtime_resume(struct device *dev)
struct mmc_card *card = mmc_dev_to_card(dev);
struct mmc_host *host = card->host;
+ if (mmc_bus_needs_resume(host))
+ host->bus_resume_flags &= ~MMC_BUSRESUME_NEEDS_RESUME;
+
return host->bus_ops->runtime_resume(host);
}
+
#endif /* !CONFIG_PM */
static const struct dev_pm_ops mmc_bus_pm_ops = {
@@ -273,6 +311,9 @@ struct mmc_card *mmc_alloc_card(struct mmc_host *host, struct device_type *type)
card->dev.release = mmc_release_card;
card->dev.type = type;
+ spin_lock_init(&card->wr_pack_stats.lock);
+ spin_lock_init(&card->bkops.stats.lock);
+
return card;
}
@@ -349,6 +390,12 @@ int mmc_add_card(struct mmc_card *card)
card->dev.of_node = mmc_of_find_child_device(card->host, 0);
+ if (mmc_card_sdio(card)) {
+ ret = device_init_wakeup(&card->dev, true);
+ if (ret)
+ pr_err("%s: %s: failed to init wakeup: %d\n",
+ mmc_hostname(card->host), __func__, ret);
+ }
ret = device_add(&card->dev);
if (ret)
return ret;
@@ -380,6 +427,8 @@ void mmc_remove_card(struct mmc_card *card)
of_node_put(card->dev.of_node);
}
+ kfree(card->wr_pack_stats.packing_events);
+
put_device(&card->dev);
}
diff --git a/drivers/mmc/core/bus.h b/drivers/mmc/core/bus.h
index 00a19710b6b4..3f3f24b2a757 100644
--- a/drivers/mmc/core/bus.h
+++ b/drivers/mmc/core/bus.h
@@ -15,7 +15,7 @@
static ssize_t mmc_##name##_show (struct device *dev, struct device_attribute *attr, char *buf) \
{ \
struct mmc_card *card = mmc_dev_to_card(dev); \
- return sprintf(buf, fmt, args); \
+ return snprintf(buf, PAGE_SIZE, fmt, args); \
} \
static DEVICE_ATTR(name, S_IRUGO, mmc_##name##_show, NULL)
diff --git a/drivers/mmc/core/core.c b/drivers/mmc/core/core.c
index 2986e270d19a..0da9c5caea13 100644
--- a/drivers/mmc/core/core.c
+++ b/drivers/mmc/core/core.c
@@ -14,6 +14,7 @@
#include <linux/init.h>
#include <linux/interrupt.h>
#include <linux/completion.h>
+#include <linux/devfreq.h>
#include <linux/device.h>
#include <linux/delay.h>
#include <linux/pagemap.h>
@@ -29,6 +30,8 @@
#include <linux/random.h>
#include <linux/slab.h>
#include <linux/of.h>
+#include <linux/pm.h>
+#include <linux/jiffies.h>
#define CREATE_TRACE_POINTS
#include <trace/events/mmc.h>
@@ -117,6 +120,7 @@ static void mmc_should_fail_request(struct mmc_host *host,
data->error = data_errors[prandom_u32() % ARRAY_SIZE(data_errors)];
data->bytes_xfered = (prandom_u32() % (data->bytes_xfered >> 9)) << 9;
+ data->fault_injected = true;
}
#else /* CONFIG_FAIL_MMC_REQUEST */
@@ -128,6 +132,807 @@ static inline void mmc_should_fail_request(struct mmc_host *host,
#endif /* CONFIG_FAIL_MMC_REQUEST */
+static bool mmc_is_data_request(struct mmc_request *mmc_request)
+{
+ switch (mmc_request->cmd->opcode) {
+ case MMC_READ_SINGLE_BLOCK:
+ case MMC_READ_MULTIPLE_BLOCK:
+ case MMC_WRITE_BLOCK:
+ case MMC_WRITE_MULTIPLE_BLOCK:
+ return true;
+ default:
+ return false;
+ }
+}
+
+static void mmc_clk_scaling_start_busy(struct mmc_host *host, bool lock_needed)
+{
+ struct mmc_devfeq_clk_scaling *clk_scaling = &host->clk_scaling;
+
+ if (!clk_scaling->enable)
+ return;
+
+ if (lock_needed)
+ spin_lock_bh(&clk_scaling->lock);
+
+ clk_scaling->start_busy = ktime_get();
+ clk_scaling->is_busy_started = true;
+
+ if (lock_needed)
+ spin_unlock_bh(&clk_scaling->lock);
+}
+
+static void mmc_clk_scaling_stop_busy(struct mmc_host *host, bool lock_needed)
+{
+ struct mmc_devfeq_clk_scaling *clk_scaling = &host->clk_scaling;
+
+ if (!clk_scaling->enable)
+ return;
+
+ if (lock_needed)
+ spin_lock_bh(&clk_scaling->lock);
+
+ if (!clk_scaling->is_busy_started) {
+ WARN_ON(1);
+ goto out;
+ }
+
+ clk_scaling->total_busy_time_us +=
+ ktime_to_us(ktime_sub(ktime_get(),
+ clk_scaling->start_busy));
+ pr_debug("%s: accumulated busy time is %lu usec\n",
+ mmc_hostname(host), clk_scaling->total_busy_time_us);
+ clk_scaling->is_busy_started = false;
+
+out:
+ if (lock_needed)
+ spin_unlock_bh(&clk_scaling->lock);
+}
+
+/**
+ * mmc_cmdq_clk_scaling_start_busy() - start busy timer for data requests
+ * @host: pointer to mmc host structure
+ * @lock_needed: flag indication if locking is needed
+ *
+ * This function starts the busy timer in case it was not already started.
+ */
+void mmc_cmdq_clk_scaling_start_busy(struct mmc_host *host,
+ bool lock_needed)
+{
+ if (!host->clk_scaling.enable)
+ return;
+
+ if (lock_needed)
+ spin_lock_bh(&host->clk_scaling.lock);
+
+ if (!host->clk_scaling.is_busy_started &&
+ !test_bit(CMDQ_STATE_DCMD_ACTIVE,
+ &host->cmdq_ctx.curr_state)) {
+ host->clk_scaling.start_busy = ktime_get();
+ host->clk_scaling.is_busy_started = true;
+ }
+
+ if (lock_needed)
+ spin_unlock_bh(&host->clk_scaling.lock);
+}
+EXPORT_SYMBOL(mmc_cmdq_clk_scaling_start_busy);
+
+/**
+ * mmc_cmdq_clk_scaling_stop_busy() - stop busy timer for last data requests
+ * @host: pointer to mmc host structure
+ * @lock_needed: flag indication if locking is needed
+ *
+ * This function stops the busy timer in case it is the last data request.
+ * In case the current request is not the last one, the busy time till
+ * now will be accumulated and the counter will be restarted.
+ */
+void mmc_cmdq_clk_scaling_stop_busy(struct mmc_host *host,
+ bool lock_needed, bool is_cmdq_dcmd)
+{
+ if (!host->clk_scaling.enable)
+ return;
+
+ if (lock_needed)
+ spin_lock_bh(&host->clk_scaling.lock);
+
+ /*
+ * For CQ mode: In completion of DCMD request, start busy time in
+ * case of pending data requests
+ */
+ if (is_cmdq_dcmd) {
+ if (host->cmdq_ctx.data_active_reqs) {
+ host->clk_scaling.is_busy_started = true;
+ host->clk_scaling.start_busy = ktime_get();
+ }
+ goto out;
+ }
+
+ host->clk_scaling.total_busy_time_us +=
+ ktime_to_us(ktime_sub(ktime_get(),
+ host->clk_scaling.start_busy));
+
+ if (host->cmdq_ctx.data_active_reqs) {
+ host->clk_scaling.is_busy_started = true;
+ host->clk_scaling.start_busy = ktime_get();
+ } else {
+ host->clk_scaling.is_busy_started = false;
+ }
+out:
+ if (lock_needed)
+ spin_unlock_bh(&host->clk_scaling.lock);
+
+}
+EXPORT_SYMBOL(mmc_cmdq_clk_scaling_stop_busy);
+
+/**
+ * mmc_can_scale_clk() - Check clock scaling capability
+ * @host: pointer to mmc host structure
+ */
+bool mmc_can_scale_clk(struct mmc_host *host)
+{
+ if (!host) {
+ pr_err("bad host parameter\n");
+ WARN_ON(1);
+ return false;
+ }
+
+ return host->caps2 & MMC_CAP2_CLK_SCALE;
+}
+EXPORT_SYMBOL(mmc_can_scale_clk);
+
+static int mmc_devfreq_get_dev_status(struct device *dev,
+ struct devfreq_dev_status *status)
+{
+ struct mmc_host *host = container_of(dev, struct mmc_host, class_dev);
+ struct mmc_devfeq_clk_scaling *clk_scaling;
+
+ if (!host) {
+ pr_err("bad host parameter\n");
+ WARN_ON(1);
+ return -EINVAL;
+ }
+
+ clk_scaling = &host->clk_scaling;
+
+ if (!clk_scaling->enable)
+ return 0;
+
+ spin_lock_bh(&clk_scaling->lock);
+
+ /* accumulate the busy time of ongoing work */
+ memset(status, 0, sizeof(*status));
+ if (clk_scaling->is_busy_started) {
+ if (mmc_card_cmdq(host->card)) {
+ /* the "busy-timer" will be restarted in case there
+ * are pending data requests */
+ mmc_cmdq_clk_scaling_stop_busy(host, false, false);
+ } else {
+ mmc_clk_scaling_stop_busy(host, false);
+ mmc_clk_scaling_start_busy(host, false);
+ }
+ }
+
+ status->busy_time = clk_scaling->total_busy_time_us;
+ status->total_time = ktime_to_us(ktime_sub(ktime_get(),
+ clk_scaling->measure_interval_start));
+ clk_scaling->total_busy_time_us = 0;
+ status->current_frequency = clk_scaling->curr_freq;
+ clk_scaling->measure_interval_start = ktime_get();
+
+ pr_debug("%s: status: load = %lu%% - total_time=%lu busy_time = %lu, clk=%lu\n",
+ mmc_hostname(host),
+ (status->busy_time*100)/status->total_time,
+ status->total_time, status->busy_time,
+ status->current_frequency);
+
+ spin_unlock_bh(&clk_scaling->lock);
+
+ return 0;
+}
+
+static bool mmc_is_valid_state_for_clk_scaling(struct mmc_host *host)
+{
+ struct mmc_card *card = host->card;
+ u32 status;
+
+ /*
+ * If the current partition type is RPMB, clock switching may not
+ * work properly as sending tuning command (CMD21) is illegal in
+ * this mode.
+ */
+ if (!card || (mmc_card_mmc(card) &&
+ (card->part_curr == EXT_CSD_PART_CONFIG_ACC_RPMB ||
+ mmc_card_doing_bkops(card))))
+ return false;
+
+ if (mmc_send_status(card, &status)) {
+ pr_err("%s: Get card status fail\n", mmc_hostname(card->host));
+ return false;
+ }
+
+ return R1_CURRENT_STATE(status) == R1_STATE_TRAN;
+}
+
+int mmc_cmdq_halt_on_empty_queue(struct mmc_host *host)
+{
+ int err = 0;
+
+ err = wait_event_interruptible(host->cmdq_ctx.queue_empty_wq,
+ (!host->cmdq_ctx.active_reqs));
+ if (host->cmdq_ctx.active_reqs) {
+ pr_err("%s: %s: unexpected active requests (%lu)\n",
+ mmc_hostname(host), __func__,
+ host->cmdq_ctx.active_reqs);
+ return -EPERM;
+ }
+
+ err = mmc_cmdq_halt(host, true);
+ if (err) {
+ pr_err("%s: %s: mmc_cmdq_halt failed (%d)\n",
+ mmc_hostname(host), __func__, err);
+ goto out;
+ }
+
+out:
+ return err;
+}
+EXPORT_SYMBOL(mmc_cmdq_halt_on_empty_queue);
+
+int mmc_clk_update_freq(struct mmc_host *host,
+ unsigned long freq, enum mmc_load state)
+{
+ int err = 0;
+ bool cmdq_mode;
+
+ if (!host) {
+ pr_err("bad host parameter\n");
+ WARN_ON(1);
+ return -EINVAL;
+ }
+
+ mmc_host_clk_hold(host);
+ cmdq_mode = mmc_card_cmdq(host->card);
+
+ /* make sure the card supports the frequency we want */
+ if (unlikely(freq > host->card->clk_scaling_highest)) {
+ freq = host->card->clk_scaling_highest;
+ pr_warn("%s: %s: frequency was overridden to %lu\n",
+ mmc_hostname(host), __func__,
+ host->card->clk_scaling_highest);
+ }
+
+ if (unlikely(freq < host->card->clk_scaling_lowest)) {
+ freq = host->card->clk_scaling_lowest;
+ pr_warn("%s: %s: frequency was overridden to %lu\n",
+ mmc_hostname(host), __func__,
+ host->card->clk_scaling_lowest);
+ }
+
+ if (freq == host->clk_scaling.curr_freq)
+ goto out;
+
+ if (host->ops->notify_load) {
+ err = host->ops->notify_load(host, state);
+ if (err) {
+ pr_err("%s: %s: fail on notify_load\n",
+ mmc_hostname(host), __func__);
+ goto out;
+ }
+ }
+
+ if (cmdq_mode) {
+ err = mmc_cmdq_halt_on_empty_queue(host);
+ if (err) {
+ pr_err("%s: %s: failed halting queue (%d)\n",
+ mmc_hostname(host), __func__, err);
+ goto halt_failed;
+ }
+ }
+
+ if (!mmc_is_valid_state_for_clk_scaling(host)) {
+ pr_debug("%s: invalid state for clock scaling - skipping",
+ mmc_hostname(host));
+ goto invalid_state;
+ }
+
+ err = host->bus_ops->change_bus_speed(host, &freq);
+ if (!err)
+ host->clk_scaling.curr_freq = freq;
+ else
+ pr_err("%s: %s: failed (%d) at freq=%lu\n",
+ mmc_hostname(host), __func__, err, freq);
+
+invalid_state:
+ if (cmdq_mode) {
+ if (mmc_cmdq_halt(host, false))
+ pr_err("%s: %s: cmdq unhalt failed\n",
+ mmc_hostname(host), __func__);
+ }
+
+halt_failed:
+ if (err) {
+ /* restore previous state */
+ if (host->ops->notify_load)
+ if (host->ops->notify_load(host,
+ host->clk_scaling.state))
+ pr_err("%s: %s: fail on notify_load restore\n",
+ mmc_hostname(host), __func__);
+ }
+out:
+ mmc_host_clk_release(host);
+ return err;
+}
+EXPORT_SYMBOL(mmc_clk_update_freq);
+
+void mmc_recovery_fallback_lower_speed(struct mmc_host *host)
+{
+ if (!host->card)
+ return;
+
+ if (host->sdr104_wa && mmc_card_sd(host->card) &&
+ (host->ios.timing == MMC_TIMING_UHS_SDR104) &&
+ !host->card->sdr104_blocked) {
+ pr_err("%s: %s: blocked SDR104, lower the bus-speed (SDR50 / DDR50)\n",
+ mmc_hostname(host), __func__);
+ mmc_host_clear_sdr104(host);
+ mmc_hw_reset(host);
+ host->card->sdr104_blocked = true;
+ }
+}
+
+static int mmc_devfreq_set_target(struct device *dev,
+ unsigned long *freq, u32 devfreq_flags)
+{
+ struct mmc_host *host = container_of(dev, struct mmc_host, class_dev);
+ struct mmc_devfeq_clk_scaling *clk_scaling;
+ int err = 0;
+ int abort;
+ unsigned long pflags = current->flags;
+
+ /* Ensure scaling would happen even in memory pressure conditions */
+ current->flags |= PF_MEMALLOC;
+
+ if (!(host && freq)) {
+ pr_err("%s: unexpected host/freq parameter\n", __func__);
+ err = -EINVAL;
+ goto out;
+ }
+
+ clk_scaling = &host->clk_scaling;
+
+ if (!clk_scaling->enable)
+ goto out;
+
+ pr_debug("%s: target freq = %lu (%s)\n", mmc_hostname(host),
+ *freq, current->comm);
+
+ if ((clk_scaling->curr_freq == *freq) ||
+ clk_scaling->skip_clk_scale_freq_update)
+ goto out;
+
+ /* No need to scale the clocks if they are gated */
+ if (!host->ios.clock)
+ goto out;
+
+ spin_lock_bh(&clk_scaling->lock);
+ if (clk_scaling->clk_scaling_in_progress) {
+ pr_debug("%s: clocks scaling is already in-progress by mmc thread\n",
+ mmc_hostname(host));
+ spin_unlock_bh(&clk_scaling->lock);
+ goto out;
+ }
+ clk_scaling->need_freq_change = true;
+ clk_scaling->target_freq = *freq;
+ clk_scaling->state = *freq < clk_scaling->curr_freq ?
+ MMC_LOAD_LOW : MMC_LOAD_HIGH;
+ spin_unlock_bh(&clk_scaling->lock);
+
+ abort = __mmc_claim_host(host, &clk_scaling->devfreq_abort);
+ if (abort)
+ goto out;
+
+ if (mmc_card_sd(host->card) && host->card->sdr104_blocked)
+ goto rel_host;
+
+ /*
+ * In case we were able to claim host there is no need to
+ * defer the frequency change. It will be done now
+ */
+ clk_scaling->need_freq_change = false;
+
+ mmc_host_clk_hold(host);
+ err = mmc_clk_update_freq(host, *freq, clk_scaling->state);
+ if (err && err != -EAGAIN) {
+ pr_err("%s: clock scale to %lu failed with error %d\n",
+ mmc_hostname(host), *freq, err);
+ mmc_recovery_fallback_lower_speed(host);
+ } else {
+ pr_debug("%s: clock change to %lu finished successfully (%s)\n",
+ mmc_hostname(host), *freq, current->comm);
+ }
+
+
+ mmc_host_clk_release(host);
+rel_host:
+ mmc_release_host(host);
+out:
+ tsk_restore_flags(current, pflags, PF_MEMALLOC);
+ return err;
+}
+
+/**
+ * mmc_deferred_scaling() - scale clocks from data path (mmc thread context)
+ * @host: pointer to mmc host structure
+ *
+ * This function does clock scaling in case "need_freq_change" flag was set
+ * by the clock scaling logic.
+ */
+void mmc_deferred_scaling(struct mmc_host *host)
+{
+ unsigned long target_freq;
+ int err;
+
+ if (!host->clk_scaling.enable)
+ return;
+
+ if (mmc_card_sd(host->card) && host->card->sdr104_blocked)
+ return;
+
+ spin_lock_bh(&host->clk_scaling.lock);
+
+ if (host->clk_scaling.clk_scaling_in_progress ||
+ !(host->clk_scaling.need_freq_change)) {
+ spin_unlock_bh(&host->clk_scaling.lock);
+ return;
+ }
+
+
+ atomic_inc(&host->clk_scaling.devfreq_abort);
+ target_freq = host->clk_scaling.target_freq;
+ host->clk_scaling.clk_scaling_in_progress = true;
+ host->clk_scaling.need_freq_change = false;
+ spin_unlock_bh(&host->clk_scaling.lock);
+ pr_debug("%s: doing deferred frequency change (%lu) (%s)\n",
+ mmc_hostname(host),
+ target_freq, current->comm);
+
+ err = mmc_clk_update_freq(host, target_freq,
+ host->clk_scaling.state);
+ if (err && err != -EAGAIN) {
+ pr_err("%s: failed on deferred scale clocks (%d)\n",
+ mmc_hostname(host), err);
+ mmc_recovery_fallback_lower_speed(host);
+ } else {
+ pr_debug("%s: clocks were successfully scaled to %lu (%s)\n",
+ mmc_hostname(host),
+ target_freq, current->comm);
+ }
+ host->clk_scaling.clk_scaling_in_progress = false;
+ atomic_dec(&host->clk_scaling.devfreq_abort);
+}
+EXPORT_SYMBOL(mmc_deferred_scaling);
+
+static int mmc_devfreq_create_freq_table(struct mmc_host *host)
+{
+ int i;
+ struct mmc_devfeq_clk_scaling *clk_scaling = &host->clk_scaling;
+
+ pr_debug("%s: supported: lowest=%lu, highest=%lu\n",
+ mmc_hostname(host),
+ host->card->clk_scaling_lowest,
+ host->card->clk_scaling_highest);
+
+ /*
+ * Create the frequency table and initialize it with default values.
+ * Initialize it with platform specific frequencies if the frequency
+ * table supplied by platform driver is present, otherwise initialize
+ * it with min and max frequencies supported by the card.
+ */
+ if (!clk_scaling->freq_table) {
+ if (clk_scaling->pltfm_freq_table_sz)
+ clk_scaling->freq_table_sz =
+ clk_scaling->pltfm_freq_table_sz;
+ else
+ clk_scaling->freq_table_sz = 2;
+
+ clk_scaling->freq_table = kzalloc(
+ (clk_scaling->freq_table_sz *
+ sizeof(*(clk_scaling->freq_table))), GFP_KERNEL);
+ if (!clk_scaling->freq_table)
+ return -ENOMEM;
+
+ if (clk_scaling->pltfm_freq_table) {
+ memcpy(clk_scaling->freq_table,
+ clk_scaling->pltfm_freq_table,
+ (clk_scaling->pltfm_freq_table_sz *
+ sizeof(*(clk_scaling->pltfm_freq_table))));
+ } else {
+ pr_debug("%s: no frequency table defined - setting default\n",
+ mmc_hostname(host));
+ clk_scaling->freq_table[0] =
+ host->card->clk_scaling_lowest;
+ clk_scaling->freq_table[1] =
+ host->card->clk_scaling_highest;
+ goto out;
+ }
+ }
+
+ if (host->card->clk_scaling_lowest >
+ clk_scaling->freq_table[0])
+ pr_debug("%s: frequency table undershot possible freq\n",
+ mmc_hostname(host));
+
+ for (i = 0; i < clk_scaling->freq_table_sz; i++) {
+ if (clk_scaling->freq_table[i] <=
+ host->card->clk_scaling_highest)
+ continue;
+ clk_scaling->freq_table[i] =
+ host->card->clk_scaling_highest;
+ clk_scaling->freq_table_sz = i + 1;
+ pr_debug("%s: frequency table overshot possible freq (%d)\n",
+ mmc_hostname(host), clk_scaling->freq_table[i]);
+ break;
+ }
+
+out:
+ clk_scaling->devfreq_profile.freq_table = clk_scaling->freq_table;
+ clk_scaling->devfreq_profile.max_state = clk_scaling->freq_table_sz;
+
+ for (i = 0; i < clk_scaling->freq_table_sz; i++)
+ pr_debug("%s: freq[%d] = %u\n",
+ mmc_hostname(host), i, clk_scaling->freq_table[i]);
+
+ return 0;
+}
+
+/**
+ * mmc_init_devfreq_clk_scaling() - Initialize clock scaling
+ * @host: pointer to mmc host structure
+ *
+ * Initialize clock scaling for supported hosts. It is assumed that the caller
+ * ensure clock is running at maximum possible frequency before calling this
+ * function. Shall use struct devfreq_simple_ondemand_data to configure
+ * governor.
+ */
+int mmc_init_clk_scaling(struct mmc_host *host)
+{
+ int err;
+
+ if (!host || !host->card) {
+ pr_err("%s: unexpected host/card parameters\n",
+ __func__);
+ return -EINVAL;
+ }
+
+ if (!mmc_can_scale_clk(host) ||
+ !host->bus_ops->change_bus_speed) {
+ pr_debug("%s: clock scaling is not supported\n",
+ mmc_hostname(host));
+ return 0;
+ }
+
+ pr_debug("registering %s dev (%p) to devfreq",
+ mmc_hostname(host),
+ mmc_classdev(host));
+
+ if (host->clk_scaling.devfreq) {
+ pr_err("%s: dev is already registered for dev %p\n",
+ mmc_hostname(host),
+ mmc_dev(host));
+ return -EPERM;
+ }
+ spin_lock_init(&host->clk_scaling.lock);
+ atomic_set(&host->clk_scaling.devfreq_abort, 0);
+ host->clk_scaling.curr_freq = host->ios.clock;
+ host->clk_scaling.clk_scaling_in_progress = false;
+ host->clk_scaling.need_freq_change = false;
+ host->clk_scaling.is_busy_started = false;
+
+ host->clk_scaling.devfreq_profile.polling_ms =
+ host->clk_scaling.polling_delay_ms;
+ host->clk_scaling.devfreq_profile.get_dev_status =
+ mmc_devfreq_get_dev_status;
+ host->clk_scaling.devfreq_profile.target = mmc_devfreq_set_target;
+ host->clk_scaling.devfreq_profile.initial_freq = host->ios.clock;
+
+ host->clk_scaling.ondemand_gov_data.simple_scaling = true;
+ host->clk_scaling.ondemand_gov_data.upthreshold =
+ host->clk_scaling.upthreshold;
+ host->clk_scaling.ondemand_gov_data.downdifferential =
+ host->clk_scaling.upthreshold - host->clk_scaling.downthreshold;
+
+ err = mmc_devfreq_create_freq_table(host);
+ if (err) {
+ pr_err("%s: fail to create devfreq frequency table\n",
+ mmc_hostname(host));
+ return err;
+ }
+
+ pr_debug("%s: adding devfreq with: upthreshold=%u downthreshold=%u polling=%u\n",
+ mmc_hostname(host),
+ host->clk_scaling.ondemand_gov_data.upthreshold,
+ host->clk_scaling.ondemand_gov_data.downdifferential,
+ host->clk_scaling.devfreq_profile.polling_ms);
+ host->clk_scaling.devfreq = devfreq_add_device(
+ mmc_classdev(host),
+ &host->clk_scaling.devfreq_profile,
+ "simple_ondemand",
+ &host->clk_scaling.ondemand_gov_data);
+ if (!host->clk_scaling.devfreq) {
+ pr_err("%s: unable to register with devfreq\n",
+ mmc_hostname(host));
+ return -EPERM;
+ }
+
+ pr_debug("%s: clk scaling is enabled for device %s (%p) with devfreq %p (clock = %uHz)\n",
+ mmc_hostname(host),
+ dev_name(mmc_classdev(host)),
+ mmc_classdev(host),
+ host->clk_scaling.devfreq,
+ host->ios.clock);
+
+ host->clk_scaling.enable = true;
+
+ return err;
+}
+EXPORT_SYMBOL(mmc_init_clk_scaling);
+
+/**
+ * mmc_suspend_clk_scaling() - suspend clock scaling
+ * @host: pointer to mmc host structure
+ *
+ * This API will suspend devfreq feature for the specific host.
+ * The statistics collected by mmc will be cleared.
+ * This function is intended to be called by the pm callbacks
+ * (e.g. runtime_suspend, suspend) of the mmc device
+ */
+int mmc_suspend_clk_scaling(struct mmc_host *host)
+{
+ int err;
+
+ if (!host) {
+ WARN(1, "bad host parameter\n");
+ return -EINVAL;
+ }
+
+ if (!mmc_can_scale_clk(host) || !host->clk_scaling.enable)
+ return 0;
+
+ if (!host->clk_scaling.devfreq) {
+ pr_err("%s: %s: no devfreq is assosiated with this device\n",
+ mmc_hostname(host), __func__);
+ return -EPERM;
+ }
+
+ atomic_inc(&host->clk_scaling.devfreq_abort);
+ wake_up(&host->wq);
+ err = devfreq_suspend_device(host->clk_scaling.devfreq);
+ if (err) {
+ pr_err("%s: %s: failed to suspend devfreq\n",
+ mmc_hostname(host), __func__);
+ return err;
+ }
+ host->clk_scaling.enable = false;
+
+ host->clk_scaling.total_busy_time_us = 0;
+
+ pr_debug("%s: devfreq suspended\n", mmc_hostname(host));
+
+ return 0;
+}
+EXPORT_SYMBOL(mmc_suspend_clk_scaling);
+
+/**
+ * mmc_resume_clk_scaling() - resume clock scaling
+ * @host: pointer to mmc host structure
+ *
+ * This API will resume devfreq feature for the specific host.
+ * This API is intended to be called by the pm callbacks
+ * (e.g. runtime_suspend, suspend) of the mmc device
+ */
+int mmc_resume_clk_scaling(struct mmc_host *host)
+{
+ int err = 0;
+ u32 max_clk_idx = 0;
+ u32 devfreq_max_clk = 0;
+ u32 devfreq_min_clk = 0;
+
+ if (!host) {
+ WARN(1, "bad host parameter\n");
+ return -EINVAL;
+ }
+
+ if (!mmc_can_scale_clk(host))
+ return 0;
+
+ /*
+ * If clock scaling is already exited when resume is called, like
+ * during mmc shutdown, it is not an error and should not fail the
+ * API calling this.
+ */
+ if (!host->clk_scaling.devfreq) {
+ pr_warn("%s: %s: no devfreq is assosiated with this device\n",
+ mmc_hostname(host), __func__);
+ return 0;
+ }
+
+ atomic_set(&host->clk_scaling.devfreq_abort, 0);
+
+ max_clk_idx = host->clk_scaling.freq_table_sz - 1;
+ devfreq_max_clk = host->clk_scaling.freq_table[max_clk_idx];
+ devfreq_min_clk = host->clk_scaling.freq_table[0];
+
+ host->clk_scaling.curr_freq = devfreq_max_clk;
+ if (host->ios.clock < host->clk_scaling.freq_table[max_clk_idx])
+ host->clk_scaling.curr_freq = devfreq_min_clk;
+
+ host->clk_scaling.clk_scaling_in_progress = false;
+ host->clk_scaling.need_freq_change = false;
+
+ err = devfreq_resume_device(host->clk_scaling.devfreq);
+ if (err) {
+ pr_err("%s: %s: failed to resume devfreq (%d)\n",
+ mmc_hostname(host), __func__, err);
+ } else {
+ host->clk_scaling.enable = true;
+ pr_debug("%s: devfreq resumed\n", mmc_hostname(host));
+ }
+
+ return err;
+}
+EXPORT_SYMBOL(mmc_resume_clk_scaling);
+
+/**
+ * mmc_exit_devfreq_clk_scaling() - Disable clock scaling
+ * @host: pointer to mmc host structure
+ *
+ * Disable clock scaling permanently.
+ */
+int mmc_exit_clk_scaling(struct mmc_host *host)
+{
+ int err;
+
+ if (!host) {
+ pr_err("%s: bad host parameter\n", __func__);
+ WARN_ON(1);
+ return -EINVAL;
+ }
+
+ if (!mmc_can_scale_clk(host))
+ return 0;
+
+ if (!host->clk_scaling.devfreq) {
+ pr_err("%s: %s: no devfreq is assosiated with this device\n",
+ mmc_hostname(host), __func__);
+ return -EPERM;
+ }
+
+ err = mmc_suspend_clk_scaling(host);
+ if (err) {
+ pr_err("%s: %s: fail to suspend clock scaling (%d)\n",
+ mmc_hostname(host), __func__, err);
+ return err;
+ }
+
+ err = devfreq_remove_device(host->clk_scaling.devfreq);
+ if (err) {
+ pr_err("%s: remove devfreq failed (%d)\n",
+ mmc_hostname(host), err);
+ return err;
+ }
+
+ host->clk_scaling.devfreq = NULL;
+ atomic_set(&host->clk_scaling.devfreq_abort, 1);
+
+ kfree(host->clk_scaling.freq_table);
+ host->clk_scaling.freq_table = NULL;
+
+ pr_debug("%s: devfreq was removed\n", mmc_hostname(host));
+
+ return 0;
+}
+EXPORT_SYMBOL(mmc_exit_clk_scaling);
+
/**
* mmc_request_done - finish processing an MMC request
* @host: MMC host which completed request
@@ -140,6 +945,12 @@ void mmc_request_done(struct mmc_host *host, struct mmc_request *mrq)
{
struct mmc_command *cmd = mrq->cmd;
int err = cmd->error;
+#ifdef CONFIG_MMC_PERF_PROFILING
+ ktime_t diff;
+#endif
+
+ if (host->clk_scaling.is_busy_started)
+ mmc_clk_scaling_stop_busy(host, true);
/* Flag re-tuning needed on CRC errors */
if ((cmd->opcode != MMC_SEND_TUNING_BLOCK &&
@@ -180,6 +991,24 @@ void mmc_request_done(struct mmc_host *host, struct mmc_request *mrq)
cmd->resp[2], cmd->resp[3]);
if (mrq->data) {
+#ifdef CONFIG_MMC_PERF_PROFILING
+ if (host->perf_enable) {
+ diff = ktime_sub(ktime_get(), host->perf.start);
+ if (mrq->data->flags == MMC_DATA_READ) {
+ host->perf.rbytes_drv +=
+ mrq->data->bytes_xfered;
+ host->perf.rtime_drv =
+ ktime_add(host->perf.rtime_drv,
+ diff);
+ } else {
+ host->perf.wbytes_drv +=
+ mrq->data->bytes_xfered;
+ host->perf.wtime_drv =
+ ktime_add(host->perf.wtime_drv,
+ diff);
+ }
+ }
+#endif
pr_debug("%s: %d bytes transferred: %d\n",
mmc_hostname(host),
mrq->data->bytes_xfered, mrq->data->error);
@@ -209,6 +1038,8 @@ void mmc_request_done(struct mmc_host *host, struct mmc_request *mrq)
if (mrq->done)
mrq->done(mrq);
+
+ mmc_host_clk_release(host);
}
}
@@ -311,32 +1142,194 @@ static int mmc_start_request(struct mmc_host *host, struct mmc_request *mrq)
mrq->stop->error = 0;
mrq->stop->mrq = mrq;
}
+#ifdef CONFIG_MMC_PERF_PROFILING
+ if (host->perf_enable)
+ host->perf.start = ktime_get();
+#endif
}
+ mmc_host_clk_hold(host);
led_trigger_event(host->led, LED_FULL);
+
+ if (mmc_is_data_request(mrq)) {
+ mmc_deferred_scaling(host);
+ mmc_clk_scaling_start_busy(host, true);
+ }
+
__mmc_start_request(host, mrq);
return 0;
}
+static void mmc_start_cmdq_request(struct mmc_host *host,
+ struct mmc_request *mrq)
+{
+ if (mrq->data) {
+ pr_debug("%s: blksz %d blocks %d flags %08x tsac %lu ms nsac %d\n",
+ mmc_hostname(host), mrq->data->blksz,
+ mrq->data->blocks, mrq->data->flags,
+ mrq->data->timeout_ns / NSEC_PER_MSEC,
+ mrq->data->timeout_clks);
+
+ BUG_ON(mrq->data->blksz > host->max_blk_size);
+ BUG_ON(mrq->data->blocks > host->max_blk_count);
+ BUG_ON(mrq->data->blocks * mrq->data->blksz >
+ host->max_req_size);
+ mrq->data->error = 0;
+ mrq->data->mrq = mrq;
+ }
+
+ if (mrq->cmd) {
+ mrq->cmd->error = 0;
+ mrq->cmd->mrq = mrq;
+ }
+
+ mmc_host_clk_hold(host);
+ if (likely(host->cmdq_ops->request))
+ host->cmdq_ops->request(host, mrq);
+ else
+ pr_err("%s: %s: issue request failed\n", mmc_hostname(host),
+ __func__);
+}
+
/**
- * mmc_start_bkops - start BKOPS for supported cards
+ * mmc_blk_init_bkops_statistics - initialize bkops statistics
* @card: MMC card to start BKOPS
- * @form_exception: A flag to indicate if this function was
- * called due to an exception raised by the card
*
- * Start background operations whenever requested.
- * When the urgent BKOPS bit is set in a R1 command response
- * then background operations should be started immediately.
+ * Initialize and enable the bkops statistics
+ */
+void mmc_blk_init_bkops_statistics(struct mmc_card *card)
+{
+ int i;
+ struct mmc_bkops_stats *stats;
+
+ if (!card)
+ return;
+
+ stats = &card->bkops.stats;
+ spin_lock(&stats->lock);
+
+ stats->manual_start = 0;
+ stats->hpi = 0;
+ stats->auto_start = 0;
+ stats->auto_stop = 0;
+ for (i = 0 ; i < MMC_BKOPS_NUM_SEVERITY_LEVELS ; i++)
+ stats->level[i] = 0;
+ stats->enabled = true;
+
+ spin_unlock(&stats->lock);
+}
+EXPORT_SYMBOL(mmc_blk_init_bkops_statistics);
+
+static void mmc_update_bkops_hpi(struct mmc_bkops_stats *stats)
+{
+ spin_lock_irq(&stats->lock);
+ if (stats->enabled)
+ stats->hpi++;
+ spin_unlock_irq(&stats->lock);
+}
+
+static void mmc_update_bkops_start(struct mmc_bkops_stats *stats)
+{
+ spin_lock_irq(&stats->lock);
+ if (stats->enabled)
+ stats->manual_start++;
+ spin_unlock_irq(&stats->lock);
+}
+
+static void mmc_update_bkops_auto_on(struct mmc_bkops_stats *stats)
+{
+ spin_lock_irq(&stats->lock);
+ if (stats->enabled)
+ stats->auto_start++;
+ spin_unlock_irq(&stats->lock);
+}
+
+static void mmc_update_bkops_auto_off(struct mmc_bkops_stats *stats)
+{
+ spin_lock_irq(&stats->lock);
+ if (stats->enabled)
+ stats->auto_stop++;
+ spin_unlock_irq(&stats->lock);
+}
+
+static void mmc_update_bkops_level(struct mmc_bkops_stats *stats,
+ unsigned level)
+{
+ BUG_ON(level >= MMC_BKOPS_NUM_SEVERITY_LEVELS);
+ spin_lock_irq(&stats->lock);
+ if (stats->enabled)
+ stats->level[level]++;
+ spin_unlock_irq(&stats->lock);
+}
+
+/**
+ * mmc_set_auto_bkops - set auto BKOPS for supported cards
+ * @card: MMC card to start BKOPS
+ * @enable: enable/disable flag
+ * Configure the card to run automatic BKOPS.
+ *
+ * Should be called when host is claimed.
+*/
+int mmc_set_auto_bkops(struct mmc_card *card, bool enable)
+{
+ int ret = 0;
+ u8 bkops_en;
+
+ BUG_ON(!card);
+ enable = !!enable;
+
+ if (unlikely(!mmc_card_support_auto_bkops(card))) {
+ pr_err("%s: %s: card doesn't support auto bkops\n",
+ mmc_hostname(card->host), __func__);
+ return -EPERM;
+ }
+
+ if (enable) {
+ if (mmc_card_doing_auto_bkops(card))
+ goto out;
+ bkops_en = card->ext_csd.bkops_en | EXT_CSD_BKOPS_AUTO_EN;
+ } else {
+ if (!mmc_card_doing_auto_bkops(card))
+ goto out;
+ bkops_en = card->ext_csd.bkops_en & ~EXT_CSD_BKOPS_AUTO_EN;
+ }
+
+ ret = mmc_switch(card, EXT_CSD_CMD_SET_NORMAL, EXT_CSD_BKOPS_EN,
+ bkops_en, 0);
+ if (ret) {
+ pr_err("%s: %s: error in setting auto bkops to %d (%d)\n",
+ mmc_hostname(card->host), __func__, enable, ret);
+ } else {
+ if (enable) {
+ mmc_card_set_auto_bkops(card);
+ mmc_update_bkops_auto_on(&card->bkops.stats);
+ } else {
+ mmc_card_clr_auto_bkops(card);
+ mmc_update_bkops_auto_off(&card->bkops.stats);
+ }
+ card->ext_csd.bkops_en = bkops_en;
+ pr_debug("%s: %s: bkops state %x\n",
+ mmc_hostname(card->host), __func__, bkops_en);
+ }
+out:
+ return ret;
+}
+EXPORT_SYMBOL(mmc_set_auto_bkops);
+
+/**
+ * mmc_check_bkops - check BKOPS for supported cards
+ * @card: MMC card to check BKOPS
+ *
+ * Read the BKOPS status in order to determine whether the
+ * card requires bkops to be started.
*/
-void mmc_start_bkops(struct mmc_card *card, bool from_exception)
+void mmc_check_bkops(struct mmc_card *card)
{
int err;
- int timeout;
- bool use_busy_signal;
BUG_ON(!card);
- if (!card->ext_csd.man_bkops_en || mmc_card_doing_bkops(card))
+ if (mmc_card_doing_bkops(card))
return;
err = mmc_read_bkops_status(card);
@@ -346,47 +1339,50 @@ void mmc_start_bkops(struct mmc_card *card, bool from_exception)
return;
}
- if (!card->ext_csd.raw_bkops_status)
- return;
+ card->bkops.needs_check = false;
+
+ mmc_update_bkops_level(&card->bkops.stats,
+ card->ext_csd.raw_bkops_status);
+
+ card->bkops.needs_bkops = card->ext_csd.raw_bkops_status > 0;
+}
+EXPORT_SYMBOL(mmc_check_bkops);
+
+/**
+ * mmc_start_manual_bkops - start BKOPS for supported cards
+ * @card: MMC card to start BKOPS
+ *
+ * Send START_BKOPS to the card.
+ * The function should be called with claimed host.
+*/
+void mmc_start_manual_bkops(struct mmc_card *card)
+{
+ int err;
+
+ BUG_ON(!card);
- if (card->ext_csd.raw_bkops_status < EXT_CSD_BKOPS_LEVEL_2 &&
- from_exception)
+ if (unlikely(!mmc_card_configured_manual_bkops(card)))
return;
- mmc_claim_host(card->host);
- if (card->ext_csd.raw_bkops_status >= EXT_CSD_BKOPS_LEVEL_2) {
- timeout = MMC_BKOPS_MAX_TIMEOUT;
- use_busy_signal = true;
- } else {
- timeout = 0;
- use_busy_signal = false;
- }
+ if (mmc_card_doing_bkops(card))
+ return;
mmc_retune_hold(card->host);
- err = __mmc_switch(card, EXT_CSD_CMD_SET_NORMAL,
- EXT_CSD_BKOPS_START, 1, timeout,
- use_busy_signal, true, false);
+ err = __mmc_switch(card, EXT_CSD_CMD_SET_NORMAL, EXT_CSD_BKOPS_START,
+ 1, 0, false, true, false);
if (err) {
- pr_warn("%s: Error %d starting bkops\n",
- mmc_hostname(card->host), err);
- mmc_retune_release(card->host);
- goto out;
+ pr_err("%s: Error %d starting manual bkops\n",
+ mmc_hostname(card->host), err);
+ } else {
+ mmc_card_set_doing_bkops(card);
+ mmc_update_bkops_start(&card->bkops.stats);
+ card->bkops.needs_bkops = false;
}
- /*
- * For urgent bkops status (LEVEL_2 and more)
- * bkops executed synchronously, otherwise
- * the operation is in progress
- */
- if (!use_busy_signal)
- mmc_card_set_doing_bkops(card);
- else
- mmc_retune_release(card->host);
-out:
- mmc_release_host(card->host);
+ mmc_retune_release(card->host);
}
-EXPORT_SYMBOL(mmc_start_bkops);
+EXPORT_SYMBOL(mmc_start_manual_bkops);
/*
* mmc_wait_data_done() - done callback for data request
@@ -396,10 +1392,13 @@ EXPORT_SYMBOL(mmc_start_bkops);
*/
static void mmc_wait_data_done(struct mmc_request *mrq)
{
+ unsigned long flags;
struct mmc_context_info *context_info = &mrq->host->context_info;
+ spin_lock_irqsave(&context_info->lock, flags);
context_info->is_done_rcv = true;
wake_up_interruptible(&context_info->wait);
+ spin_unlock_irqrestore(&context_info->lock, flags);
}
static void mmc_wait_done(struct mmc_request *mrq)
@@ -465,6 +1464,7 @@ static int mmc_wait_for_data_req_done(struct mmc_host *host,
struct mmc_command *cmd;
struct mmc_context_info *context_info = &host->context_info;
int err;
+ bool is_done_rcv = false;
unsigned long flags;
while (1) {
@@ -472,9 +1472,10 @@ static int mmc_wait_for_data_req_done(struct mmc_host *host,
(context_info->is_done_rcv ||
context_info->is_new_req));
spin_lock_irqsave(&context_info->lock, flags);
+ is_done_rcv = context_info->is_done_rcv;
context_info->is_waiting_last_req = false;
spin_unlock_irqrestore(&context_info->lock, flags);
- if (context_info->is_done_rcv) {
+ if (is_done_rcv) {
context_info->is_done_rcv = false;
context_info->is_new_req = false;
cmd = mrq->cmd;
@@ -510,20 +1511,20 @@ static void mmc_wait_for_req_done(struct mmc_host *host,
struct mmc_command *cmd;
while (1) {
- wait_for_completion(&mrq->completion);
+ wait_for_completion_io(&mrq->completion);
cmd = mrq->cmd;
/*
- * If host has timed out waiting for the sanitize
+ * If host has timed out waiting for the sanitize/bkops
* to complete, card might be still in programming state
* so let's try to bring the card out of programming
* state.
*/
- if (cmd->sanitize_busy && cmd->error == -ETIMEDOUT) {
+ if ((cmd->bkops_busy || cmd->sanitize_busy) && cmd->error == -ETIMEDOUT) {
if (!mmc_interrupt_hpi(host->card)) {
- pr_warn("%s: %s: Interrupted sanitize\n",
- mmc_hostname(host), __func__);
+ pr_warn("%s: %s: Interrupted sanitize/bkops\n",
+ mmc_hostname(host), __func__);
cmd->error = 0;
break;
} else {
@@ -532,8 +1533,13 @@ static void mmc_wait_for_req_done(struct mmc_host *host,
}
}
if (!cmd->error || !cmd->retries ||
- mmc_card_removed(host->card))
+ mmc_card_removed(host->card)) {
+ if (cmd->error && !cmd->retries &&
+ cmd->opcode != MMC_SEND_STATUS &&
+ cmd->opcode != MMC_SEND_TUNING_BLOCK)
+ mmc_recovery_fallback_lower_speed(host);
break;
+ }
mmc_retune_recheck(host);
@@ -561,8 +1567,11 @@ static void mmc_wait_for_req_done(struct mmc_host *host,
static void mmc_pre_req(struct mmc_host *host, struct mmc_request *mrq,
bool is_first_req)
{
- if (host->ops->pre_req)
+ if (host->ops->pre_req) {
+ mmc_host_clk_hold(host);
host->ops->pre_req(host, mrq, is_first_req);
+ mmc_host_clk_release(host);
+ }
}
/**
@@ -577,10 +1586,141 @@ static void mmc_pre_req(struct mmc_host *host, struct mmc_request *mrq,
static void mmc_post_req(struct mmc_host *host, struct mmc_request *mrq,
int err)
{
- if (host->ops->post_req)
+ if (host->ops->post_req) {
+ mmc_host_clk_hold(host);
host->ops->post_req(host, mrq, err);
+ mmc_host_clk_release(host);
+ }
+}
+
+/**
+ * mmc_cmdq_discard_card_queue - discard the task[s] in the device
+ * @host: host instance
+ * @tasks: mask of tasks to be knocked off
+ * 0: remove all queued tasks
+ */
+int mmc_cmdq_discard_queue(struct mmc_host *host, u32 tasks)
+{
+ return mmc_discard_queue(host, tasks);
+}
+EXPORT_SYMBOL(mmc_cmdq_discard_queue);
+
+
+/**
+ * mmc_cmdq_post_req - post process of a completed request
+ * @host: host instance
+ * @tag: the request tag.
+ * @err: non-zero is error, success otherwise
+ */
+void mmc_cmdq_post_req(struct mmc_host *host, int tag, int err)
+{
+ if (likely(host->cmdq_ops->post_req))
+ host->cmdq_ops->post_req(host, tag, err);
+}
+EXPORT_SYMBOL(mmc_cmdq_post_req);
+
+/**
+ * mmc_cmdq_halt - halt/un-halt the command queue engine
+ * @host: host instance
+ * @halt: true - halt, un-halt otherwise
+ *
+ * Host halts the command queue engine. It should complete
+ * the ongoing transfer and release the bus.
+ * All legacy commands can be sent upon successful
+ * completion of this function.
+ * Returns 0 on success, negative otherwise
+ */
+int mmc_cmdq_halt(struct mmc_host *host, bool halt)
+{
+ int err = 0;
+
+ if (mmc_host_cq_disable(host)) {
+ pr_debug("%s: %s: CQE is already disabled\n",
+ mmc_hostname(host), __func__);
+ return 0;
+ }
+
+ if ((halt && mmc_host_halt(host)) ||
+ (!halt && !mmc_host_halt(host))) {
+ pr_debug("%s: %s: CQE is already %s\n", mmc_hostname(host),
+ __func__, halt ? "halted" : "un-halted");
+ return 0;
+ }
+
+ mmc_host_clk_hold(host);
+ if (host->cmdq_ops->halt) {
+ err = host->cmdq_ops->halt(host, halt);
+ if (!err && host->ops->notify_halt)
+ host->ops->notify_halt(host, halt);
+ if (!err && halt)
+ mmc_host_set_halt(host);
+ else if (!err && !halt) {
+ mmc_host_clr_halt(host);
+ wake_up(&host->cmdq_ctx.wait);
+ }
+ } else {
+ err = -ENOSYS;
+ }
+ mmc_host_clk_release(host);
+ return err;
+}
+EXPORT_SYMBOL(mmc_cmdq_halt);
+
+int mmc_cmdq_start_req(struct mmc_host *host, struct mmc_cmdq_req *cmdq_req)
+{
+ struct mmc_request *mrq = &cmdq_req->mrq;
+
+ mrq->host = host;
+ if (mmc_card_removed(host->card)) {
+ mrq->cmd->error = -ENOMEDIUM;
+ return -ENOMEDIUM;
+ }
+ mmc_start_cmdq_request(host, mrq);
+ return 0;
+}
+EXPORT_SYMBOL(mmc_cmdq_start_req);
+
+static void mmc_cmdq_dcmd_req_done(struct mmc_request *mrq)
+{
+ mmc_host_clk_release(mrq->host);
+ complete(&mrq->completion);
}
+int mmc_cmdq_wait_for_dcmd(struct mmc_host *host,
+ struct mmc_cmdq_req *cmdq_req)
+{
+ struct mmc_request *mrq = &cmdq_req->mrq;
+ struct mmc_command *cmd = mrq->cmd;
+ int err = 0;
+
+ init_completion(&mrq->completion);
+ mrq->done = mmc_cmdq_dcmd_req_done;
+ err = mmc_cmdq_start_req(host, cmdq_req);
+ if (err)
+ return err;
+
+ wait_for_completion_io(&mrq->completion);
+ if (cmd->error) {
+ pr_err("%s: DCMD %d failed with err %d\n",
+ mmc_hostname(host), cmd->opcode,
+ cmd->error);
+ err = cmd->error;
+ mmc_host_clk_hold(host);
+ host->cmdq_ops->dumpstate(host);
+ mmc_host_clk_release(host);
+ }
+ return err;
+}
+EXPORT_SYMBOL(mmc_cmdq_wait_for_dcmd);
+
+int mmc_cmdq_prepare_flush(struct mmc_command *cmd)
+{
+ return __mmc_switch_cmdq_mode(cmd, EXT_CSD_CMD_SET_NORMAL,
+ EXT_CSD_FLUSH_CACHE, 1,
+ 0, true, true);
+}
+EXPORT_SYMBOL(mmc_cmdq_prepare_flush);
+
/**
* mmc_start_req - start a non-blocking request
* @host: MMC host to start command
@@ -601,7 +1741,6 @@ struct mmc_async_req *mmc_start_req(struct mmc_host *host,
struct mmc_async_req *areq, int *error)
{
int err = 0;
- int start_err = 0;
struct mmc_async_req *data = host->areq;
/* Prepare a new request */
@@ -631,7 +1770,7 @@ struct mmc_async_req *mmc_start_req(struct mmc_host *host,
if (areq)
mmc_post_req(host, areq->mrq, -EINVAL);
- mmc_start_bkops(host->card, true);
+ mmc_check_bkops(host->card);
/* prepare the request again */
if (areq)
@@ -650,14 +1789,13 @@ struct mmc_async_req *mmc_start_req(struct mmc_host *host,
trace_mmc_blk_rw_start(areq->mrq->cmd->opcode,
areq->mrq->cmd->arg,
areq->mrq->data);
- start_err = __mmc_start_data_req(host, areq->mrq);
+ __mmc_start_data_req(host, areq->mrq);
}
if (host->areq)
mmc_post_req(host, host->areq->mrq, 0);
- /* Cancel a prepared request if it was not started. */
- if ((err || start_err) && areq)
+ if (err && areq)
mmc_post_req(host, areq->mrq, -EINVAL);
if (err)
@@ -682,6 +1820,10 @@ EXPORT_SYMBOL(mmc_start_req);
*/
void mmc_wait_for_req(struct mmc_host *host, struct mmc_request *mrq)
{
+#ifdef CONFIG_MMC_BLOCK_DEFERRED_RESUME
+ if (mmc_bus_needs_resume(host))
+ mmc_resume_bus(host);
+#endif
__mmc_start_req(host, mrq);
mmc_wait_for_req_done(host, mrq);
}
@@ -735,8 +1877,6 @@ int mmc_interrupt_hpi(struct mmc_card *card)
}
err = mmc_send_hpi_cmd(card, &status);
- if (err)
- goto out;
prg_wait = jiffies + msecs_to_jiffies(card->ext_csd.out_of_int_time);
do {
@@ -744,8 +1884,13 @@ int mmc_interrupt_hpi(struct mmc_card *card)
if (!err && R1_CURRENT_STATE(status) == R1_STATE_TRAN)
break;
- if (time_after(jiffies, prg_wait))
- err = -ETIMEDOUT;
+ if (time_after(jiffies, prg_wait)) {
+ err = mmc_send_status(card, &status);
+ if (!err && R1_CURRENT_STATE(status) != R1_STATE_TRAN)
+ err = -ETIMEDOUT;
+ else
+ break;
+ }
} while (!err);
out:
@@ -797,6 +1942,11 @@ int mmc_stop_bkops(struct mmc_card *card)
int err = 0;
BUG_ON(!card);
+ if (unlikely(!mmc_card_configured_manual_bkops(card)))
+ goto out;
+ if (!mmc_card_doing_bkops(card))
+ goto out;
+
err = mmc_interrupt_hpi(card);
/*
@@ -805,10 +1955,11 @@ int mmc_stop_bkops(struct mmc_card *card)
*/
if (!err || (err == -EINVAL)) {
mmc_card_clr_doing_bkops(card);
+ mmc_update_bkops_hpi(&card->bkops.stats);
mmc_retune_release(card->host);
err = 0;
}
-
+out:
return err;
}
EXPORT_SYMBOL(mmc_stop_bkops);
@@ -824,8 +1975,14 @@ int mmc_read_bkops_status(struct mmc_card *card)
if (err)
return err;
- card->ext_csd.raw_bkops_status = ext_csd[EXT_CSD_BKOPS_STATUS];
- card->ext_csd.raw_exception_status = ext_csd[EXT_CSD_EXP_EVENTS_STATUS];
+ card->ext_csd.raw_bkops_status = ext_csd[EXT_CSD_BKOPS_STATUS] &
+ MMC_BKOPS_URGENCY_MASK;
+ card->ext_csd.raw_exception_status =
+ ext_csd[EXT_CSD_EXP_EVENTS_STATUS] & (EXT_CSD_URGENT_BKOPS |
+ EXT_CSD_DYNCAP_NEEDED |
+ EXT_CSD_SYSPOOL_EXHAUSTED
+ | EXT_CSD_PACKED_FAILURE);
+
kfree(ext_csd);
return 0;
}
@@ -843,6 +2000,10 @@ void mmc_set_data_timeout(struct mmc_data *data, const struct mmc_card *card)
{
unsigned int mult;
+ if (!card) {
+ WARN_ON(1);
+ return;
+ }
/*
* SDIO cards only define an upper 1 s limit on access.
*/
@@ -874,9 +2035,9 @@ void mmc_set_data_timeout(struct mmc_data *data, const struct mmc_card *card)
unsigned int timeout_us, limit_us;
timeout_us = data->timeout_ns / 1000;
- if (card->host->ios.clock)
+ if (mmc_host_clk_rate(card->host))
timeout_us += data->timeout_clks * 1000 /
- (card->host->ios.clock / 1000);
+ (mmc_host_clk_rate(card->host) / 1000);
if (data->flags & MMC_DATA_WRITE)
/*
@@ -909,9 +2070,11 @@ void mmc_set_data_timeout(struct mmc_data *data, const struct mmc_card *card)
* Address this by setting the read timeout to a "reasonably high"
* value. For the cards tested, 600ms has proven enough. If necessary,
* this value can be increased if other problematic cards require this.
+ * Certain Hynix 5.x cards giving read timeout even with 300ms.
+ * Increasing further to max value (4s).
*/
if (mmc_card_long_read_time(card) && data->flags & MMC_DATA_READ) {
- data->timeout_ns = 600000000;
+ data->timeout_ns = 4000000000u;
data->timeout_clks = 0;
}
@@ -930,6 +2093,11 @@ void mmc_set_data_timeout(struct mmc_data *data, const struct mmc_card *card)
data->timeout_ns = 100000000; /* 100ms */
}
}
+ /* Increase the timeout values for some bad INAND MCP devices */
+ if (card->quirks & MMC_QUIRK_INAND_DATA_TIMEOUT) {
+ data->timeout_ns = 4000000000u; /* 4s */
+ data->timeout_clks = 0;
+ }
}
EXPORT_SYMBOL(mmc_set_data_timeout);
@@ -980,6 +2148,7 @@ int __mmc_claim_host(struct mmc_host *host, atomic_t *abort)
might_sleep();
add_wait_queue(&host->wq, &wait);
+
spin_lock_irqsave(&host->lock, flags);
while (1) {
set_current_state(TASK_UNINTERRUPTIBLE);
@@ -1005,11 +2174,53 @@ int __mmc_claim_host(struct mmc_host *host, atomic_t *abort)
if (pm)
pm_runtime_get_sync(mmc_dev(host));
+ if (host->ops->enable && !stop && host->claim_cnt == 1)
+ host->ops->enable(host);
+
return stop;
}
EXPORT_SYMBOL(__mmc_claim_host);
/**
+ * mmc_try_claim_host - try exclusively to claim a host
+ * and keep trying for given time, with a gap of 10ms
+ * @host: mmc host to claim
+ * @dealy_ms: delay in ms
+ *
+ * Returns %1 if the host is claimed, %0 otherwise.
+ */
+int mmc_try_claim_host(struct mmc_host *host, unsigned int delay_ms)
+{
+ int claimed_host = 0;
+ unsigned long flags;
+ int retry_cnt = delay_ms/10;
+ bool pm = false;
+
+ do {
+ spin_lock_irqsave(&host->lock, flags);
+ if (!host->claimed || host->claimer == current) {
+ host->claimed = 1;
+ host->claimer = current;
+ host->claim_cnt += 1;
+ claimed_host = 1;
+ if (host->claim_cnt == 1)
+ pm = true;
+ }
+ spin_unlock_irqrestore(&host->lock, flags);
+ if (!claimed_host)
+ mmc_delay(10);
+ } while (!claimed_host && retry_cnt--);
+
+ if (pm)
+ pm_runtime_get_sync(mmc_dev(host));
+
+ if (host->ops->enable && claimed_host && host->claim_cnt == 1)
+ host->ops->enable(host);
+ return claimed_host;
+}
+EXPORT_SYMBOL(mmc_try_claim_host);
+
+/**
* mmc_release_host - release a host
* @host: mmc host to release
*
@@ -1022,6 +2233,9 @@ void mmc_release_host(struct mmc_host *host)
WARN_ON(!host->claimed);
+ if (host->ops->disable && host->claim_cnt == 1)
+ host->ops->disable(host);
+
spin_lock_irqsave(&host->lock, flags);
if (--host->claim_cnt) {
/* Release for nested claim */
@@ -1045,9 +2259,14 @@ void mmc_get_card(struct mmc_card *card)
{
pm_runtime_get_sync(&card->dev);
mmc_claim_host(card->host);
+#ifdef CONFIG_MMC_BLOCK_DEFERRED_RESUME
+ if (mmc_bus_needs_resume(card->host))
+ mmc_resume_bus(card->host);
+#endif
}
EXPORT_SYMBOL(mmc_get_card);
+
/*
* This is a helper function, which releases the host and drops the runtime
* pm reference for the card device.
@@ -1064,7 +2283,7 @@ EXPORT_SYMBOL(mmc_put_card);
* Internal function that does the actual ios call to the host driver,
* optionally printing some debug output.
*/
-static inline void mmc_set_ios(struct mmc_host *host)
+void mmc_set_ios(struct mmc_host *host)
{
struct mmc_ios *ios = &host->ios;
@@ -1074,23 +2293,41 @@ static inline void mmc_set_ios(struct mmc_host *host)
ios->power_mode, ios->chip_select, ios->vdd,
ios->bus_width, ios->timing);
+ if (ios->clock > 0)
+ mmc_set_ungated(host);
host->ops->set_ios(host, ios);
+ if (ios->old_rate != ios->clock) {
+ if (likely(ios->clk_ts)) {
+ char trace_info[80];
+ snprintf(trace_info, 80,
+ "%s: freq_KHz %d --> %d | t = %d",
+ mmc_hostname(host), ios->old_rate / 1000,
+ ios->clock / 1000, jiffies_to_msecs(
+ (long)jiffies - (long)ios->clk_ts));
+ trace_mmc_clk(trace_info);
+ }
+ ios->old_rate = ios->clock;
+ ios->clk_ts = jiffies;
+ }
}
+EXPORT_SYMBOL(mmc_set_ios);
/*
* Control chip select pin on a host.
*/
void mmc_set_chip_select(struct mmc_host *host, int mode)
{
+ mmc_host_clk_hold(host);
host->ios.chip_select = mode;
mmc_set_ios(host);
+ mmc_host_clk_release(host);
}
/*
* Sets the host clock to the highest possible frequency that
* is below "hz".
*/
-void mmc_set_clock(struct mmc_host *host, unsigned int hz)
+static void __mmc_set_clock(struct mmc_host *host, unsigned int hz)
{
WARN_ON(hz && hz < host->f_min);
@@ -1101,6 +2338,81 @@ void mmc_set_clock(struct mmc_host *host, unsigned int hz)
mmc_set_ios(host);
}
+void mmc_set_clock(struct mmc_host *host, unsigned int hz)
+{
+ mmc_host_clk_hold(host);
+ __mmc_set_clock(host, hz);
+ mmc_host_clk_release(host);
+}
+
+#ifdef CONFIG_MMC_CLKGATE
+/*
+ * This gates the clock by setting it to 0 Hz.
+ */
+void mmc_gate_clock(struct mmc_host *host)
+{
+ unsigned long flags;
+
+ WARN_ON(!host->ios.clock);
+
+ spin_lock_irqsave(&host->clk_lock, flags);
+ host->clk_old = host->ios.clock;
+ host->ios.clock = 0;
+ host->clk_gated = true;
+ spin_unlock_irqrestore(&host->clk_lock, flags);
+ mmc_set_ios(host);
+}
+
+/*
+ * This restores the clock from gating by using the cached
+ * clock value.
+ */
+void mmc_ungate_clock(struct mmc_host *host)
+{
+ /*
+ * We should previously have gated the clock, so the clock shall
+ * be 0 here! The clock may however be 0 during initialization,
+ * when some request operations are performed before setting
+ * the frequency. When ungate is requested in that situation
+ * we just ignore the call.
+ */
+ if (host->clk_old) {
+ WARN_ON(host->ios.clock);
+ /* This call will also set host->clk_gated to false */
+ __mmc_set_clock(host, host->clk_old);
+ /*
+ * We have seen that host controller's clock tuning circuit may
+ * go out of sync if controller clocks are gated.
+ * To workaround this issue, we are triggering retuning of the
+ * tuning circuit after ungating the controller clocks.
+ */
+ mmc_retune_needed(host);
+ }
+}
+
+void mmc_set_ungated(struct mmc_host *host)
+{
+ unsigned long flags;
+
+ /*
+ * We've been given a new frequency while the clock is gated,
+ * so make sure we regard this as ungating it.
+ */
+ spin_lock_irqsave(&host->clk_lock, flags);
+ host->clk_gated = false;
+ spin_unlock_irqrestore(&host->clk_lock, flags);
+}
+
+#else
+void mmc_set_ungated(struct mmc_host *host)
+{
+}
+
+void mmc_gate_clock(struct mmc_host *host)
+{
+}
+#endif
+
int mmc_execute_tuning(struct mmc_card *card)
{
struct mmc_host *host = card->host;
@@ -1115,7 +2427,9 @@ int mmc_execute_tuning(struct mmc_card *card)
else
opcode = MMC_SEND_TUNING_BLOCK;
+ mmc_host_clk_hold(host);
err = host->ops->execute_tuning(host, opcode);
+ mmc_host_clk_release(host);
if (err)
pr_err("%s: tuning execution failed\n", mmc_hostname(host));
@@ -1130,8 +2444,10 @@ int mmc_execute_tuning(struct mmc_card *card)
*/
void mmc_set_bus_mode(struct mmc_host *host, unsigned int mode)
{
+ mmc_host_clk_hold(host);
host->ios.bus_mode = mode;
mmc_set_ios(host);
+ mmc_host_clk_release(host);
}
/*
@@ -1139,8 +2455,10 @@ void mmc_set_bus_mode(struct mmc_host *host, unsigned int mode)
*/
void mmc_set_bus_width(struct mmc_host *host, unsigned int width)
{
+ mmc_host_clk_hold(host);
host->ios.bus_width = width;
mmc_set_ios(host);
+ mmc_host_clk_release(host);
}
/*
@@ -1152,9 +2470,10 @@ void mmc_set_initial_state(struct mmc_host *host)
if (mmc_host_is_spi(host))
host->ios.chip_select = MMC_CS_HIGH;
- else
+ else {
host->ios.chip_select = MMC_CS_DONTCARE;
- host->ios.bus_mode = MMC_BUSMODE_PUSHPULL;
+ host->ios.bus_mode = MMC_BUSMODE_OPENDRAIN;
+ }
host->ios.bus_width = MMC_BUS_WIDTH_1;
host->ios.timing = MMC_TIMING_LEGACY;
host->ios.drv_type = 0;
@@ -1581,8 +2900,11 @@ int __mmc_set_signal_voltage(struct mmc_host *host, int signal_voltage)
int old_signal_voltage = host->ios.signal_voltage;
host->ios.signal_voltage = signal_voltage;
- if (host->ops->start_signal_voltage_switch)
+ if (host->ops->start_signal_voltage_switch) {
+ mmc_host_clk_hold(host);
err = host->ops->start_signal_voltage_switch(host, &host->ios);
+ mmc_host_clk_release(host);
+ }
if (err)
host->ios.signal_voltage = old_signal_voltage;
@@ -1620,13 +2942,19 @@ int mmc_set_signal_voltage(struct mmc_host *host, int signal_voltage, u32 ocr)
cmd.arg = 0;
cmd.flags = MMC_RSP_R1 | MMC_CMD_AC;
+ /*
+ * Hold the clock reference so clock doesn't get auto gated during this
+ * voltage switch sequence.
+ */
+ mmc_host_clk_hold(host);
err = mmc_wait_for_cmd(host, &cmd, 0);
if (err)
- return err;
-
- if (!mmc_host_is_spi(host) && (cmd.resp[0] & R1_ERROR))
- return -EIO;
+ goto err_command;
+ if (!mmc_host_is_spi(host) && (cmd.resp[0] & R1_ERROR)) {
+ err = -EIO;
+ goto err_command;
+ }
/*
* The card should drive cmd and dat[0:3] low immediately
* after the response of cmd11, but wait 1 ms to be sure
@@ -1640,6 +2968,7 @@ int mmc_set_signal_voltage(struct mmc_host *host, int signal_voltage, u32 ocr)
* During a signal voltage level switch, the clock must be gated
* for 5 ms according to the SD spec
*/
+ host->card_clock_off = true;
clock = host->ios.clock;
host->ios.clock = 0;
mmc_set_ios(host);
@@ -1650,6 +2979,9 @@ int mmc_set_signal_voltage(struct mmc_host *host, int signal_voltage, u32 ocr)
* sent CMD11, so a power cycle is required anyway
*/
err = -EAGAIN;
+ host->ios.clock = clock;
+ mmc_set_ios(host);
+ host->card_clock_off = false;
goto power_cycle;
}
@@ -1658,6 +2990,7 @@ int mmc_set_signal_voltage(struct mmc_host *host, int signal_voltage, u32 ocr)
host->ios.clock = clock;
mmc_set_ios(host);
+ host->card_clock_off = false;
/* Wait for at least 1 ms according to spec */
mmc_delay(1);
@@ -1675,6 +3008,9 @@ power_cycle:
mmc_power_cycle(host, ocr);
}
+err_command:
+ mmc_host_clk_release(host);
+
return err;
}
@@ -1683,8 +3019,10 @@ power_cycle:
*/
void mmc_set_timing(struct mmc_host *host, unsigned int timing)
{
+ mmc_host_clk_hold(host);
host->ios.timing = timing;
mmc_set_ios(host);
+ mmc_host_clk_release(host);
}
/*
@@ -1692,8 +3030,10 @@ void mmc_set_timing(struct mmc_host *host, unsigned int timing)
*/
void mmc_set_driver_type(struct mmc_host *host, unsigned int drv_type)
{
+ mmc_host_clk_hold(host);
host->ios.drv_type = drv_type;
mmc_set_ios(host);
+ mmc_host_clk_release(host);
}
int mmc_select_drive_strength(struct mmc_card *card, unsigned int max_dtr,
@@ -1701,6 +3041,7 @@ int mmc_select_drive_strength(struct mmc_card *card, unsigned int max_dtr,
{
struct mmc_host *host = card->host;
int host_drv_type = SD_DRIVER_TYPE_B;
+ int drive_strength;
*drv_type = 0;
@@ -1723,10 +3064,14 @@ int mmc_select_drive_strength(struct mmc_card *card, unsigned int max_dtr,
* information and let the hardware specific code
* return what is possible given the options
*/
- return host->ops->select_drive_strength(card, max_dtr,
- host_drv_type,
- card_drv_type,
- drv_type);
+ mmc_host_clk_hold(host);
+ drive_strength = host->ops->select_drive_strength(card, max_dtr,
+ host_drv_type,
+ card_drv_type,
+ drv_type);
+ mmc_host_clk_release(host);
+
+ return drive_strength;
}
/*
@@ -1745,6 +3090,8 @@ void mmc_power_up(struct mmc_host *host, u32 ocr)
if (host->ios.power_mode == MMC_POWER_ON)
return;
+ mmc_host_clk_hold(host);
+
mmc_pwrseq_pre_power_on(host);
host->ios.vdd = fls(ocr) - 1;
@@ -1778,6 +3125,8 @@ void mmc_power_up(struct mmc_host *host, u32 ocr)
* time required to reach a stable voltage.
*/
mmc_delay(10);
+
+ mmc_host_clk_release(host);
}
void mmc_power_off(struct mmc_host *host)
@@ -1785,6 +3134,8 @@ void mmc_power_off(struct mmc_host *host)
if (host->ios.power_mode == MMC_POWER_OFF)
return;
+ mmc_host_clk_hold(host);
+
mmc_pwrseq_power_off(host);
host->ios.clock = 0;
@@ -1800,6 +3151,8 @@ void mmc_power_off(struct mmc_host *host)
* can be successfully turned on again.
*/
mmc_delay(1);
+
+ mmc_host_clk_release(host);
}
void mmc_power_cycle(struct mmc_host *host, u32 ocr)
@@ -1849,6 +3202,40 @@ static inline void mmc_bus_put(struct mmc_host *host)
spin_unlock_irqrestore(&host->lock, flags);
}
+int mmc_resume_bus(struct mmc_host *host)
+{
+ unsigned long flags;
+ int err = 0;
+
+ if (!mmc_bus_needs_resume(host))
+ return -EINVAL;
+
+ pr_debug("%s: Starting deferred resume\n", mmc_hostname(host));
+ spin_lock_irqsave(&host->lock, flags);
+ host->bus_resume_flags &= ~MMC_BUSRESUME_NEEDS_RESUME;
+ spin_unlock_irqrestore(&host->lock, flags);
+
+ mmc_bus_get(host);
+ if (host->bus_ops && !host->bus_dead && host->card) {
+ mmc_power_up(host, host->card->ocr);
+ BUG_ON(!host->bus_ops->resume);
+ host->bus_ops->resume(host);
+ if (mmc_card_cmdq(host->card)) {
+ err = mmc_cmdq_halt(host, false);
+ if (err)
+ pr_err("%s: %s: unhalt failed: %d\n",
+ mmc_hostname(host), __func__, err);
+ else
+ mmc_card_clr_suspended(host->card);
+ }
+ }
+
+ mmc_bus_put(host);
+ pr_debug("%s: Deferred resume completed\n", mmc_hostname(host));
+ return 0;
+}
+EXPORT_SYMBOL(mmc_resume_bus);
+
/*
* Assign a mmc bus handler to a host. Only one bus handler may control a
* host at any given time.
@@ -2015,7 +3402,7 @@ static unsigned int mmc_mmc_erase_timeout(struct mmc_card *card,
*/
timeout_clks <<= 1;
timeout_us += (timeout_clks * 1000) /
- (card->host->ios.clock / 1000);
+ (mmc_host_clk_rate(card->host) / 1000);
erase_timeout = timeout_us / 1000;
@@ -2082,20 +3469,9 @@ static unsigned int mmc_erase_timeout(struct mmc_card *card,
return mmc_mmc_erase_timeout(card, arg, qty);
}
-static int mmc_do_erase(struct mmc_card *card, unsigned int from,
- unsigned int to, unsigned int arg)
+static u32 mmc_get_erase_qty(struct mmc_card *card, u32 from, u32 to)
{
- struct mmc_command cmd = {0};
- unsigned int qty = 0;
- unsigned long timeout;
- unsigned int fr, nr;
- int err;
-
- fr = from;
- nr = to - from + 1;
- trace_mmc_blk_erase_start(arg, fr, nr);
-
- mmc_retune_hold(card->host);
+ u32 qty = 0;
/*
* qty is used to calculate the erase timeout which depends on how many
@@ -2121,12 +3497,122 @@ static int mmc_do_erase(struct mmc_card *card, unsigned int from,
else
qty += ((to / card->erase_size) -
(from / card->erase_size)) + 1;
+ return qty;
+}
+
+static int mmc_cmdq_send_erase_cmd(struct mmc_cmdq_req *cmdq_req,
+ struct mmc_card *card, u32 opcode, u32 arg, u32 qty)
+{
+ struct mmc_command *cmd = cmdq_req->mrq.cmd;
+ int err;
+
+ memset(cmd, 0, sizeof(struct mmc_command));
+
+ cmd->opcode = opcode;
+ cmd->arg = arg;
+ if (cmd->opcode == MMC_ERASE) {
+ cmd->flags = MMC_RSP_SPI_R1B | MMC_RSP_R1B | MMC_CMD_AC;
+ cmd->busy_timeout = mmc_erase_timeout(card, arg, qty);
+ } else {
+ cmd->flags = MMC_RSP_SPI_R1 | MMC_RSP_R1 | MMC_CMD_AC;
+ }
+
+ err = mmc_cmdq_wait_for_dcmd(card->host, cmdq_req);
+ if (err) {
+ pr_err("mmc_erase: group start error %d, status %#x\n",
+ err, cmd->resp[0]);
+ return -EIO;
+ }
+ return 0;
+}
+
+static int mmc_cmdq_do_erase(struct mmc_cmdq_req *cmdq_req,
+ struct mmc_card *card, unsigned int from,
+ unsigned int to, unsigned int arg)
+{
+ struct mmc_command *cmd = cmdq_req->mrq.cmd;
+ unsigned int qty = 0;
+ unsigned long timeout;
+ unsigned int fr, nr;
+ int err;
+
+ fr = from;
+ nr = to - from + 1;
+ trace_mmc_blk_erase_start(arg, fr, nr);
+
+ qty = mmc_get_erase_qty(card, from, to);
+
+ if (!mmc_card_blockaddr(card)) {
+ from <<= 9;
+ to <<= 9;
+ }
+
+ err = mmc_cmdq_send_erase_cmd(cmdq_req, card, MMC_ERASE_GROUP_START,
+ from, qty);
+ if (err)
+ goto out;
+
+ err = mmc_cmdq_send_erase_cmd(cmdq_req, card, MMC_ERASE_GROUP_END,
+ to, qty);
+ if (err)
+ goto out;
+
+ err = mmc_cmdq_send_erase_cmd(cmdq_req, card, MMC_ERASE,
+ arg, qty);
+ if (err)
+ goto out;
+
+ timeout = jiffies + msecs_to_jiffies(MMC_CORE_TIMEOUT_MS);
+ do {
+ memset(cmd, 0, sizeof(struct mmc_command));
+ cmd->opcode = MMC_SEND_STATUS;
+ cmd->arg = card->rca << 16;
+ cmd->flags = MMC_RSP_R1 | MMC_CMD_AC;
+ /* Do not retry else we can't see errors */
+ err = mmc_cmdq_wait_for_dcmd(card->host, cmdq_req);
+ if (err || (cmd->resp[0] & 0xFDF92000)) {
+ pr_err("error %d requesting status %#x\n",
+ err, cmd->resp[0]);
+ err = -EIO;
+ goto out;
+ }
+ /* Timeout if the device never becomes ready for data and
+ * never leaves the program state.
+ */
+ if (time_after(jiffies, timeout)) {
+ pr_err("%s: Card stuck in programming state! %s\n",
+ mmc_hostname(card->host), __func__);
+ err = -EIO;
+ goto out;
+ }
+ } while (!(cmd->resp[0] & R1_READY_FOR_DATA) ||
+ (R1_CURRENT_STATE(cmd->resp[0]) == R1_STATE_PRG));
+out:
+ trace_mmc_blk_erase_end(arg, fr, nr);
+ return err;
+}
+
+static int mmc_do_erase(struct mmc_card *card, unsigned int from,
+ unsigned int to, unsigned int arg)
+{
+ struct mmc_command cmd = {0};
+ unsigned int qty = 0;
+ unsigned long timeout;
+ unsigned int fr, nr;
+ int err;
+
+ fr = from;
+ nr = to - from + 1;
+ trace_mmc_blk_erase_start(arg, fr, nr);
+
+ qty = mmc_get_erase_qty(card, from, to);
if (!mmc_card_blockaddr(card)) {
from <<= 9;
to <<= 9;
}
+ mmc_retune_hold(card->host);
if (mmc_card_sd(card))
cmd.opcode = SD_ERASE_WR_BLK_START;
else
@@ -2205,21 +3691,9 @@ out:
return err;
}
-/**
- * mmc_erase - erase sectors.
- * @card: card to erase
- * @from: first sector to erase
- * @nr: number of sectors to erase
- * @arg: erase command argument (SD supports only %MMC_ERASE_ARG)
- *
- * Caller must claim host before calling this function.
- */
-int mmc_erase(struct mmc_card *card, unsigned int from, unsigned int nr,
- unsigned int arg)
+int mmc_erase_sanity_check(struct mmc_card *card, unsigned int from,
+ unsigned int nr, unsigned int arg)
{
- unsigned int rem, to = from + nr;
- int err;
-
if (!(card->host->caps & MMC_CAP_ERASE) ||
!(card->csd.cmdclass & CCC_ERASE))
return -EOPNOTSUPP;
@@ -2242,6 +3716,68 @@ int mmc_erase(struct mmc_card *card, unsigned int from, unsigned int nr,
if (from % card->erase_size || nr % card->erase_size)
return -EINVAL;
}
+ return 0;
+}
+
+int mmc_cmdq_erase(struct mmc_cmdq_req *cmdq_req,
+ struct mmc_card *card, unsigned int from, unsigned int nr,
+ unsigned int arg)
+{
+ unsigned int rem, to = from + nr;
+ int ret;
+
+ ret = mmc_erase_sanity_check(card, from, nr, arg);
+ if (ret)
+ return ret;
+
+ if (arg == MMC_ERASE_ARG) {
+ rem = from % card->erase_size;
+ if (rem) {
+ rem = card->erase_size - rem;
+ from += rem;
+ if (nr > rem)
+ nr -= rem;
+ else
+ return 0;
+ }
+ rem = nr % card->erase_size;
+ if (rem)
+ nr -= rem;
+ }
+
+ if (nr == 0)
+ return 0;
+
+ to = from + nr;
+
+ if (to <= from)
+ return -EINVAL;
+
+ /* 'from' and 'to' are inclusive */
+ to -= 1;
+
+ return mmc_cmdq_do_erase(cmdq_req, card, from, to, arg);
+}
+EXPORT_SYMBOL(mmc_cmdq_erase);
+
+/**
+ * mmc_erase - erase sectors.
+ * @card: card to erase
+ * @from: first sector to erase
+ * @nr: number of sectors to erase
+ * @arg: erase command argument (SD supports only %MMC_ERASE_ARG)
+ *
+ * Caller must claim host before calling this function.
+ */
+int mmc_erase(struct mmc_card *card, unsigned int from, unsigned int nr,
+ unsigned int arg)
+{
+ unsigned int rem, to = from + nr;
+ int ret;
+
+ ret = mmc_erase_sanity_check(card, from, nr, arg);
+ if (ret)
+ return ret;
if (arg == MMC_ERASE_ARG) {
rem = from % card->erase_size;
@@ -2279,10 +3815,10 @@ int mmc_erase(struct mmc_card *card, unsigned int from, unsigned int nr,
*/
rem = card->erase_size - (from % card->erase_size);
if ((arg & MMC_TRIM_ARGS) && (card->eg_boundary) && (nr > rem)) {
- err = mmc_do_erase(card, from, from + rem - 1, arg);
+ ret = mmc_do_erase(card, from, from + rem - 1, arg);
from += rem;
- if ((err) || (to <= from))
- return err;
+ if ((ret) || (to <= from))
+ return ret;
}
return mmc_do_erase(card, from, to, arg);
@@ -2412,7 +3948,8 @@ unsigned int mmc_calc_max_discard(struct mmc_card *card)
struct mmc_host *host = card->host;
unsigned int max_discard, max_trim;
- if (!host->max_busy_timeout)
+ if (!host->max_busy_timeout ||
+ (host->caps2 & MMC_CAP2_MAX_DISCARD_SIZE))
return UINT_MAX;
/*
@@ -2469,9 +4006,26 @@ static void mmc_hw_reset_for_init(struct mmc_host *host)
{
if (!(host->caps & MMC_CAP_HW_RESET) || !host->ops->hw_reset)
return;
+ mmc_host_clk_hold(host);
host->ops->hw_reset(host);
+ mmc_host_clk_release(host);
}
+/*
+ * mmc_cmdq_hw_reset: Helper API for doing
+ * reset_all of host and reinitializing card.
+ * This must be called with mmc_claim_host
+ * acquired by the caller.
+ */
+int mmc_cmdq_hw_reset(struct mmc_host *host)
+{
+ if (!host->bus_ops->reset)
+ return -EOPNOTSUPP;
+
+ return host->bus_ops->reset(host);
+}
+EXPORT_SYMBOL(mmc_cmdq_hw_reset);
+
int mmc_hw_reset(struct mmc_host *host)
{
int ret;
@@ -2488,8 +4042,9 @@ int mmc_hw_reset(struct mmc_host *host)
ret = host->bus_ops->reset(host);
mmc_bus_put(host);
- if (ret != -EOPNOTSUPP)
- pr_warn("%s: tried to reset card\n", mmc_hostname(host));
+ if (ret)
+ pr_warn("%s: tried to reset card, got error %d\n",
+ mmc_hostname(host), ret);
return ret;
}
@@ -2558,8 +4113,18 @@ int _mmc_detect_card_removed(struct mmc_host *host)
}
if (ret) {
- mmc_card_set_removed(host->card);
- pr_debug("%s: card remove detected\n", mmc_hostname(host));
+ if (host->ops->get_cd && host->ops->get_cd(host)) {
+ mmc_recovery_fallback_lower_speed(host);
+ ret = 0;
+ } else {
+ mmc_card_set_removed(host->card);
+ if (host->card->sdr104_blocked) {
+ mmc_host_set_sdr104(host);
+ host->card->sdr104_blocked = false;
+ }
+ pr_debug("%s: card remove detected\n",
+ mmc_hostname(host));
+ }
}
return ret;
@@ -2602,17 +4167,21 @@ EXPORT_SYMBOL(mmc_detect_card_removed);
void mmc_rescan(struct work_struct *work)
{
+ unsigned long flags;
struct mmc_host *host =
container_of(work, struct mmc_host, detect.work);
- int i;
if (host->trigger_card_event && host->ops->card_event) {
host->ops->card_event(host);
host->trigger_card_event = false;
}
- if (host->rescan_disable)
+ spin_lock_irqsave(&host->lock, flags);
+ if (host->rescan_disable) {
+ spin_unlock_irqrestore(&host->lock, flags);
return;
+ }
+ spin_unlock_irqrestore(&host->lock, flags);
/* If there is a non-removable card registered, only scan once */
if ((host->caps & MMC_CAP_NONREMOVABLE) && host->rescan_entered)
@@ -2659,12 +4228,7 @@ void mmc_rescan(struct work_struct *work)
}
mmc_claim_host(host);
- for (i = 0; i < ARRAY_SIZE(freqs); i++) {
- if (!mmc_rescan_try_freq(host, max(freqs[i], host->f_min)))
- break;
- if (freqs[i] <= host->f_min)
- break;
- }
+ mmc_rescan_try_freq(host, host->f_min);
mmc_release_host(host);
out:
@@ -2674,18 +4238,18 @@ void mmc_rescan(struct work_struct *work)
void mmc_start_host(struct mmc_host *host)
{
+ mmc_claim_host(host);
host->f_init = max(freqs[0], host->f_min);
host->rescan_disable = 0;
host->ios.power_mode = MMC_POWER_UNDEFINED;
- mmc_claim_host(host);
if (host->caps2 & MMC_CAP2_NO_PRESCAN_POWERUP)
mmc_power_off(host);
else
mmc_power_up(host, host->ocr_avail);
- mmc_release_host(host);
mmc_gpiod_request_cd_irq(host);
+ mmc_release_host(host);
_mmc_detect_change(host, 0, false);
}
@@ -2769,7 +4333,9 @@ int mmc_power_restore_host(struct mmc_host *host)
}
mmc_power_up(host, host->card->ocr);
+ mmc_claim_host(host);
ret = host->bus_ops->power_restore(host);
+ mmc_release_host(host);
mmc_bus_put(host);
@@ -2778,6 +4344,40 @@ int mmc_power_restore_host(struct mmc_host *host)
EXPORT_SYMBOL(mmc_power_restore_host);
/*
+ * Add barrier request to the requests in cache
+ */
+int mmc_cache_barrier(struct mmc_card *card)
+{
+ struct mmc_host *host = card->host;
+ int err = 0;
+
+ if (!card->ext_csd.cache_ctrl ||
+ (card->quirks & MMC_QUIRK_CACHE_DISABLE))
+ goto out;
+
+ if (!mmc_card_mmc(card))
+ goto out;
+
+ if (!card->ext_csd.barrier_en)
+ return -ENOTSUPP;
+
+ /*
+ * If a device receives maximum supported barrier
+ * requests, a barrier command is treated as a
+ * flush command. Hence, it is betetr to use
+ * flush timeout instead a generic CMD6 timeout
+ */
+ err = mmc_switch(card, EXT_CSD_CMD_SET_NORMAL,
+ EXT_CSD_FLUSH_CACHE, 0x2, 0);
+ if (err)
+ pr_err("%s: cache barrier error %d\n",
+ mmc_hostname(host), err);
+out:
+ return err;
+}
+EXPORT_SYMBOL(mmc_cache_barrier);
+
+/*
* Flush the cache to the non-volatile storage.
*/
int mmc_flush_cache(struct mmc_card *card)
@@ -2786,12 +4386,23 @@ int mmc_flush_cache(struct mmc_card *card)
if (mmc_card_mmc(card) &&
(card->ext_csd.cache_size > 0) &&
- (card->ext_csd.cache_ctrl & 1)) {
+ (card->ext_csd.cache_ctrl & 1) &&
+ (!(card->quirks & MMC_QUIRK_CACHE_DISABLE))) {
err = mmc_switch(card, EXT_CSD_CMD_SET_NORMAL,
EXT_CSD_FLUSH_CACHE, 1, 0);
- if (err)
+ if (err == -ETIMEDOUT) {
+ pr_err("%s: cache flush timeout\n",
+ mmc_hostname(card->host));
+ err = mmc_interrupt_hpi(card);
+ if (err) {
+ pr_err("%s: mmc_interrupt_hpi() failed (%d)\n",
+ mmc_hostname(card->host), err);
+ err = -ENODEV;
+ }
+ } else if (err) {
pr_err("%s: cache flush error %d\n",
mmc_hostname(card->host), err);
+ }
}
return err;
@@ -2845,6 +4456,10 @@ int mmc_pm_notify(struct notifier_block *notify_block,
spin_lock_irqsave(&host->lock, flags);
host->rescan_disable = 0;
+ if (mmc_bus_manual_resume(host)) {
+ spin_unlock_irqrestore(&host->lock, flags);
+ break;
+ }
spin_unlock_irqrestore(&host->lock, flags);
_mmc_detect_change(host, 0, false);
diff --git a/drivers/mmc/core/core.h b/drivers/mmc/core/core.h
index 09241e56d628..c66187299598 100644
--- a/drivers/mmc/core/core.h
+++ b/drivers/mmc/core/core.h
@@ -15,21 +15,6 @@
#define MMC_CMD_RETRIES 3
-struct mmc_bus_ops {
- void (*remove)(struct mmc_host *);
- void (*detect)(struct mmc_host *);
- int (*pre_suspend)(struct mmc_host *);
- int (*suspend)(struct mmc_host *);
- int (*resume)(struct mmc_host *);
- int (*runtime_suspend)(struct mmc_host *);
- int (*runtime_resume)(struct mmc_host *);
- int (*power_save)(struct mmc_host *);
- int (*power_restore)(struct mmc_host *);
- int (*alive)(struct mmc_host *);
- int (*shutdown)(struct mmc_host *);
- int (*reset)(struct mmc_host *);
-};
-
void mmc_attach_bus(struct mmc_host *host, const struct mmc_bus_ops *ops);
void mmc_detach_bus(struct mmc_host *host);
@@ -40,6 +25,11 @@ void mmc_init_erase(struct mmc_card *card);
void mmc_set_chip_select(struct mmc_host *host, int mode);
void mmc_set_clock(struct mmc_host *host, unsigned int hz);
+int mmc_clk_update_freq(struct mmc_host *host,
+ unsigned long freq, enum mmc_load state);
+void mmc_gate_clock(struct mmc_host *host);
+void mmc_ungate_clock(struct mmc_host *host);
+void mmc_set_ungated(struct mmc_host *host);
void mmc_set_bus_mode(struct mmc_host *host, unsigned int mode);
void mmc_set_bus_width(struct mmc_host *host, unsigned int width);
u32 mmc_select_voltage(struct mmc_host *host, u32 ocr);
@@ -59,6 +49,8 @@ static inline void mmc_delay(unsigned int ms)
if (ms < 1000 / HZ) {
cond_resched();
mdelay(ms);
+ } else if (ms < jiffies_to_msecs(2)) {
+ usleep_range(ms * 1000, (ms + 1) * 1000);
} else {
msleep(ms);
}
@@ -86,6 +78,12 @@ void mmc_remove_card_debugfs(struct mmc_card *card);
void mmc_init_context_info(struct mmc_host *host);
+extern bool mmc_can_scale_clk(struct mmc_host *host);
+extern int mmc_init_clk_scaling(struct mmc_host *host);
+extern int mmc_resume_clk_scaling(struct mmc_host *host);
+extern int mmc_exit_clk_scaling(struct mmc_host *host);
+extern unsigned long mmc_get_max_frequency(struct mmc_host *host);
+
int mmc_execute_tuning(struct mmc_card *card);
int mmc_hs200_to_hs400(struct mmc_card *card);
int mmc_hs400_to_hs200(struct mmc_card *card);
diff --git a/drivers/mmc/core/debugfs.c b/drivers/mmc/core/debugfs.c
index 154aced0b91b..72bfdd835178 100644
--- a/drivers/mmc/core/debugfs.c
+++ b/drivers/mmc/core/debugfs.c
@@ -15,6 +15,7 @@
#include <linux/slab.h>
#include <linux/stat.h>
#include <linux/fault-inject.h>
+#include <linux/uaccess.h>
#include <linux/mmc/card.h>
#include <linux/mmc/host.h>
@@ -31,6 +32,26 @@ module_param(fail_request, charp, 0);
#endif /* CONFIG_FAIL_MMC_REQUEST */
/* The debugfs functions are optimized away when CONFIG_DEBUG_FS isn't set. */
+static int mmc_ring_buffer_show(struct seq_file *s, void *data)
+{
+ struct mmc_host *mmc = s->private;
+
+ mmc_dump_trace_buffer(mmc, s);
+ return 0;
+}
+
+static int mmc_ring_buffer_open(struct inode *inode, struct file *file)
+{
+ return single_open(file, mmc_ring_buffer_show, inode->i_private);
+}
+
+static const struct file_operations mmc_ring_buffer_fops = {
+ .open = mmc_ring_buffer_open,
+ .read = seq_read,
+ .llseek = seq_lseek,
+ .release = single_release,
+};
+
static int mmc_ios_show(struct seq_file *s, void *data)
{
static const char *vdd_str[] = {
@@ -233,6 +254,132 @@ static int mmc_clock_opt_set(void *data, u64 val)
DEFINE_SIMPLE_ATTRIBUTE(mmc_clock_fops, mmc_clock_opt_get, mmc_clock_opt_set,
"%llu\n");
+#include <linux/delay.h>
+
+static int mmc_scale_get(void *data, u64 *val)
+{
+ struct mmc_host *host = data;
+
+ *val = host->clk_scaling.curr_freq;
+
+ return 0;
+}
+
+static int mmc_scale_set(void *data, u64 val)
+{
+ int err = 0;
+ struct mmc_host *host = data;
+
+ mmc_claim_host(host);
+ mmc_host_clk_hold(host);
+
+ /* change frequency from sysfs manually */
+ err = mmc_clk_update_freq(host, val, host->clk_scaling.state);
+ if (err == -EAGAIN)
+ err = 0;
+ else if (err)
+ pr_err("%s: clock scale to %llu failed with error %d\n",
+ mmc_hostname(host), val, err);
+ else
+ pr_debug("%s: clock change to %llu finished successfully (%s)\n",
+ mmc_hostname(host), val, current->comm);
+
+ mmc_host_clk_release(host);
+ mmc_release_host(host);
+
+ return err;
+}
+
+DEFINE_SIMPLE_ATTRIBUTE(mmc_scale_fops, mmc_scale_get, mmc_scale_set,
+ "%llu\n");
+
+static int mmc_max_clock_get(void *data, u64 *val)
+{
+ struct mmc_host *host = data;
+
+ if (!host)
+ return -EINVAL;
+
+ *val = host->f_max;
+
+ return 0;
+}
+
+static int mmc_max_clock_set(void *data, u64 val)
+{
+ struct mmc_host *host = data;
+ int err = -EINVAL;
+ unsigned long freq = val;
+ unsigned int old_freq;
+
+ if (!host || (val < host->f_min))
+ goto out;
+
+ mmc_claim_host(host);
+ if (host->bus_ops && host->bus_ops->change_bus_speed) {
+ old_freq = host->f_max;
+ host->f_max = freq;
+
+ err = host->bus_ops->change_bus_speed(host, &freq);
+
+ if (err)
+ host->f_max = old_freq;
+ }
+ mmc_release_host(host);
+out:
+ return err;
+}
+
+DEFINE_SIMPLE_ATTRIBUTE(mmc_max_clock_fops, mmc_max_clock_get,
+ mmc_max_clock_set, "%llu\n");
+
+static int mmc_force_err_set(void *data, u64 val)
+{
+ struct mmc_host *host = data;
+
+ if (host && host->card && host->ops &&
+ host->ops->force_err_irq) {
+ /*
+ * To access the force error irq reg, we need to make
+ * sure the host is powered up and host clock is ticking.
+ */
+ mmc_get_card(host->card);
+ host->ops->force_err_irq(host, val);
+ mmc_put_card(host->card);
+ }
+
+ return 0;
+}
+
+DEFINE_SIMPLE_ATTRIBUTE(mmc_force_err_fops, NULL, mmc_force_err_set, "%llu\n");
+
+static int mmc_err_state_get(void *data, u64 *val)
+{
+ struct mmc_host *host = data;
+
+ if (!host)
+ return -EINVAL;
+
+ *val = host->err_occurred ? 1 : 0;
+
+ return 0;
+}
+
+static int mmc_err_state_clear(void *data, u64 val)
+{
+ struct mmc_host *host = data;
+
+ if (!host)
+ return -EINVAL;
+
+ host->err_occurred = false;
+
+ return 0;
+}
+
+DEFINE_SIMPLE_ATTRIBUTE(mmc_err_state, mmc_err_state_get,
+ mmc_err_state_clear, "%llu\n");
+
void mmc_add_host_debugfs(struct mmc_host *host)
{
struct dentry *root;
@@ -255,6 +402,38 @@ void mmc_add_host_debugfs(struct mmc_host *host)
&mmc_clock_fops))
goto err_node;
+ if (!debugfs_create_file("max_clock", S_IRUSR | S_IWUSR, root, host,
+ &mmc_max_clock_fops))
+ goto err_node;
+
+ if (!debugfs_create_file("scale", S_IRUSR | S_IWUSR, root, host,
+ &mmc_scale_fops))
+ goto err_node;
+
+ if (!debugfs_create_bool("skip_clk_scale_freq_update",
+ S_IRUSR | S_IWUSR, root,
+ &host->clk_scaling.skip_clk_scale_freq_update))
+ goto err_node;
+
+ if (!debugfs_create_bool("cmdq_task_history",
+ S_IRUSR | S_IWUSR, root,
+ &host->cmdq_thist_enabled))
+ goto err_node;
+
+#ifdef CONFIG_MMC_RING_BUFFER
+ if (!debugfs_create_file("ring_buffer", S_IRUSR,
+ root, host, &mmc_ring_buffer_fops))
+ goto err_node;
+#endif
+ if (!debugfs_create_file("err_state", S_IRUSR | S_IWUSR, root, host,
+ &mmc_err_state))
+ goto err_node;
+
+#ifdef CONFIG_MMC_CLKGATE
+ if (!debugfs_create_u32("clk_delay", (S_IRUSR | S_IWUSR),
+ root, &host->clk_delay))
+ goto err_node;
+#endif
#ifdef CONFIG_FAIL_MMC_REQUEST
if (fail_request)
setup_fault_attr(&fail_default_attr, fail_request);
@@ -264,6 +443,10 @@ void mmc_add_host_debugfs(struct mmc_host *host)
&host->fail_mmc_request)))
goto err_node;
#endif
+ if (!debugfs_create_file("force_error", S_IWUSR, root, host,
+ &mmc_force_err_fops))
+ goto err_node;
+
return;
err_node:
@@ -285,11 +468,26 @@ static int mmc_dbg_card_status_get(void *data, u64 *val)
int ret;
mmc_get_card(card);
+ if (mmc_card_cmdq(card)) {
+ ret = mmc_cmdq_halt_on_empty_queue(card->host);
+ if (ret) {
+ pr_err("%s: halt failed while doing %s err (%d)\n",
+ mmc_hostname(card->host), __func__,
+ ret);
+ goto out;
+ }
+ }
ret = mmc_send_status(data, &status);
if (!ret)
*val = status;
+ if (mmc_card_cmdq(card)) {
+ if (mmc_cmdq_halt(card->host, false))
+ pr_err("%s: %s: cmdq unhalt failed\n",
+ mmc_hostname(card->host), __func__);
+ }
+out:
mmc_put_card(card);
return ret;
@@ -312,8 +510,18 @@ static int mmc_ext_csd_open(struct inode *inode, struct file *filp)
return -ENOMEM;
mmc_get_card(card);
+ if (mmc_card_cmdq(card)) {
+ err = mmc_cmdq_halt_on_empty_queue(card->host);
+ if (err) {
+ pr_err("%s: halt failed while doing %s err (%d)\n",
+ mmc_hostname(card->host), __func__,
+ err);
+ mmc_put_card(card);
+ goto out_free_halt;
+ }
+ }
+
err = mmc_get_ext_csd(card, &ext_csd);
- mmc_put_card(card);
if (err)
goto out_free;
@@ -323,10 +531,25 @@ static int mmc_ext_csd_open(struct inode *inode, struct file *filp)
BUG_ON(n != EXT_CSD_STR_LEN);
filp->private_data = buf;
+
+ if (mmc_card_cmdq(card)) {
+ if (mmc_cmdq_halt(card->host, false))
+ pr_err("%s: %s: cmdq unhalt failed\n",
+ mmc_hostname(card->host), __func__);
+ }
+
+ mmc_put_card(card);
kfree(ext_csd);
return 0;
out_free:
+ if (mmc_card_cmdq(card)) {
+ if (mmc_cmdq_halt(card->host, false))
+ pr_err("%s: %s: cmdq unhalt failed\n",
+ mmc_hostname(card->host), __func__);
+ }
+ mmc_put_card(card);
+out_free_halt:
kfree(buf);
return err;
}
@@ -353,6 +576,275 @@ static const struct file_operations mmc_dbg_ext_csd_fops = {
.llseek = default_llseek,
};
+static int mmc_wr_pack_stats_open(struct inode *inode, struct file *filp)
+{
+ struct mmc_card *card = inode->i_private;
+
+ filp->private_data = card;
+ card->wr_pack_stats.print_in_read = 1;
+ return 0;
+}
+
+#define TEMP_BUF_SIZE 256
+static ssize_t mmc_wr_pack_stats_read(struct file *filp, char __user *ubuf,
+ size_t cnt, loff_t *ppos)
+{
+ struct mmc_card *card = filp->private_data;
+ struct mmc_wr_pack_stats *pack_stats;
+ int i;
+ int max_num_of_packed_reqs = 0;
+ char *temp_buf;
+
+ if (!card)
+ return cnt;
+
+ if (!access_ok(VERIFY_WRITE, ubuf, cnt))
+ return cnt;
+
+ if (!card->wr_pack_stats.print_in_read)
+ return 0;
+
+ if (!card->wr_pack_stats.enabled) {
+ pr_info("%s: write packing statistics are disabled\n",
+ mmc_hostname(card->host));
+ goto exit;
+ }
+
+ pack_stats = &card->wr_pack_stats;
+
+ if (!pack_stats->packing_events) {
+ pr_info("%s: NULL packing_events\n", mmc_hostname(card->host));
+ goto exit;
+ }
+
+ max_num_of_packed_reqs = card->ext_csd.max_packed_writes;
+
+ temp_buf = kmalloc(TEMP_BUF_SIZE, GFP_KERNEL);
+ if (!temp_buf)
+ goto exit;
+
+ spin_lock(&pack_stats->lock);
+
+ snprintf(temp_buf, TEMP_BUF_SIZE, "%s: write packing statistics:\n",
+ mmc_hostname(card->host));
+ strlcat(ubuf, temp_buf, cnt);
+
+ for (i = 1 ; i <= max_num_of_packed_reqs ; ++i) {
+ if (pack_stats->packing_events[i]) {
+ snprintf(temp_buf, TEMP_BUF_SIZE,
+ "%s: Packed %d reqs - %d times\n",
+ mmc_hostname(card->host), i,
+ pack_stats->packing_events[i]);
+ strlcat(ubuf, temp_buf, cnt);
+ }
+ }
+
+ snprintf(temp_buf, TEMP_BUF_SIZE,
+ "%s: stopped packing due to the following reasons:\n",
+ mmc_hostname(card->host));
+ strlcat(ubuf, temp_buf, cnt);
+
+ if (pack_stats->pack_stop_reason[EXCEEDS_SEGMENTS]) {
+ snprintf(temp_buf, TEMP_BUF_SIZE,
+ "%s: %d times: exceed max num of segments\n",
+ mmc_hostname(card->host),
+ pack_stats->pack_stop_reason[EXCEEDS_SEGMENTS]);
+ strlcat(ubuf, temp_buf, cnt);
+ }
+ if (pack_stats->pack_stop_reason[EXCEEDS_SECTORS]) {
+ snprintf(temp_buf, TEMP_BUF_SIZE,
+ "%s: %d times: exceed max num of sectors\n",
+ mmc_hostname(card->host),
+ pack_stats->pack_stop_reason[EXCEEDS_SECTORS]);
+ strlcat(ubuf, temp_buf, cnt);
+ }
+ if (pack_stats->pack_stop_reason[WRONG_DATA_DIR]) {
+ snprintf(temp_buf, TEMP_BUF_SIZE,
+ "%s: %d times: wrong data direction\n",
+ mmc_hostname(card->host),
+ pack_stats->pack_stop_reason[WRONG_DATA_DIR]);
+ strlcat(ubuf, temp_buf, cnt);
+ }
+ if (pack_stats->pack_stop_reason[FLUSH_OR_DISCARD]) {
+ snprintf(temp_buf, TEMP_BUF_SIZE,
+ "%s: %d times: flush or discard\n",
+ mmc_hostname(card->host),
+ pack_stats->pack_stop_reason[FLUSH_OR_DISCARD]);
+ strlcat(ubuf, temp_buf, cnt);
+ }
+ if (pack_stats->pack_stop_reason[EMPTY_QUEUE]) {
+ snprintf(temp_buf, TEMP_BUF_SIZE,
+ "%s: %d times: empty queue\n",
+ mmc_hostname(card->host),
+ pack_stats->pack_stop_reason[EMPTY_QUEUE]);
+ strlcat(ubuf, temp_buf, cnt);
+ }
+ if (pack_stats->pack_stop_reason[REL_WRITE]) {
+ snprintf(temp_buf, TEMP_BUF_SIZE,
+ "%s: %d times: rel write\n",
+ mmc_hostname(card->host),
+ pack_stats->pack_stop_reason[REL_WRITE]);
+ strlcat(ubuf, temp_buf, cnt);
+ }
+ if (pack_stats->pack_stop_reason[THRESHOLD]) {
+ snprintf(temp_buf, TEMP_BUF_SIZE,
+ "%s: %d times: Threshold\n",
+ mmc_hostname(card->host),
+ pack_stats->pack_stop_reason[THRESHOLD]);
+ strlcat(ubuf, temp_buf, cnt);
+ }
+
+ if (pack_stats->pack_stop_reason[LARGE_SEC_ALIGN]) {
+ snprintf(temp_buf, TEMP_BUF_SIZE,
+ "%s: %d times: Large sector alignment\n",
+ mmc_hostname(card->host),
+ pack_stats->pack_stop_reason[LARGE_SEC_ALIGN]);
+ strlcat(ubuf, temp_buf, cnt);
+ }
+ if (pack_stats->pack_stop_reason[RANDOM]) {
+ snprintf(temp_buf, TEMP_BUF_SIZE,
+ "%s: %d times: random request\n",
+ mmc_hostname(card->host),
+ pack_stats->pack_stop_reason[RANDOM]);
+ strlcat(ubuf, temp_buf, cnt);
+ }
+ if (pack_stats->pack_stop_reason[FUA]) {
+ snprintf(temp_buf, TEMP_BUF_SIZE,
+ "%s: %d times: fua request\n",
+ mmc_hostname(card->host),
+ pack_stats->pack_stop_reason[FUA]);
+ strlcat(ubuf, temp_buf, cnt);
+ }
+
+ spin_unlock(&pack_stats->lock);
+
+ kfree(temp_buf);
+
+ pr_info("%s", ubuf);
+
+exit:
+ if (card->wr_pack_stats.print_in_read == 1) {
+ card->wr_pack_stats.print_in_read = 0;
+ return strnlen(ubuf, cnt);
+ }
+
+ return 0;
+}
+
+static ssize_t mmc_wr_pack_stats_write(struct file *filp,
+ const char __user *ubuf, size_t cnt,
+ loff_t *ppos)
+{
+ struct mmc_card *card = filp->private_data;
+ int value;
+
+ if (!card)
+ return cnt;
+
+ if (!access_ok(VERIFY_READ, ubuf, cnt))
+ return cnt;
+
+ sscanf(ubuf, "%d", &value);
+ if (value) {
+ mmc_blk_init_packed_statistics(card);
+ } else {
+ spin_lock(&card->wr_pack_stats.lock);
+ card->wr_pack_stats.enabled = false;
+ spin_unlock(&card->wr_pack_stats.lock);
+ }
+
+ return cnt;
+}
+
+static const struct file_operations mmc_dbg_wr_pack_stats_fops = {
+ .open = mmc_wr_pack_stats_open,
+ .read = mmc_wr_pack_stats_read,
+ .write = mmc_wr_pack_stats_write,
+};
+
+static int mmc_bkops_stats_read(struct seq_file *file, void *data)
+{
+ struct mmc_card *card = file->private;
+ struct mmc_bkops_stats *stats;
+ int i;
+
+ if (!card)
+ return -EINVAL;
+
+ stats = &card->bkops.stats;
+
+ if (!stats->enabled) {
+ pr_info("%s: bkops statistics are disabled\n",
+ mmc_hostname(card->host));
+ goto exit;
+ }
+
+ spin_lock(&stats->lock);
+
+ seq_printf(file, "%s: bkops statistics:\n",
+ mmc_hostname(card->host));
+ seq_printf(file, "%s: BKOPS: sent START_BKOPS to device: %u\n",
+ mmc_hostname(card->host), stats->manual_start);
+ seq_printf(file, "%s: BKOPS: stopped due to HPI: %u\n",
+ mmc_hostname(card->host), stats->hpi);
+ seq_printf(file, "%s: BKOPS: sent AUTO_EN set to 1: %u\n",
+ mmc_hostname(card->host), stats->auto_start);
+ seq_printf(file, "%s: BKOPS: sent AUTO_EN set to 0: %u\n",
+ mmc_hostname(card->host), stats->auto_stop);
+
+ for (i = 0 ; i < MMC_BKOPS_NUM_SEVERITY_LEVELS ; ++i)
+ seq_printf(file, "%s: BKOPS: due to level %d: %u\n",
+ mmc_hostname(card->host), i, stats->level[i]);
+
+ spin_unlock(&stats->lock);
+
+exit:
+
+ return 0;
+}
+
+static ssize_t mmc_bkops_stats_write(struct file *filp,
+ const char __user *ubuf, size_t cnt,
+ loff_t *ppos)
+{
+ struct mmc_card *card = filp->f_mapping->host->i_private;
+ int value;
+ struct mmc_bkops_stats *stats;
+ int err;
+
+ if (!card)
+ return cnt;
+
+ stats = &card->bkops.stats;
+
+ err = kstrtoint_from_user(ubuf, cnt, 0, &value);
+ if (err) {
+ pr_err("%s: %s: error parsing input from user (%d)\n",
+ mmc_hostname(card->host), __func__, err);
+ return err;
+ }
+ if (value) {
+ mmc_blk_init_bkops_statistics(card);
+ } else {
+ spin_lock(&stats->lock);
+ stats->enabled = false;
+ spin_unlock(&stats->lock);
+ }
+
+ return cnt;
+}
+
+static int mmc_bkops_stats_open(struct inode *inode, struct file *file)
+{
+ return single_open(file, mmc_bkops_stats_read, inode->i_private);
+}
+
+static const struct file_operations mmc_dbg_bkops_stats_fops = {
+ .open = mmc_bkops_stats_open,
+ .read = seq_read,
+ .write = mmc_bkops_stats_write,
+};
+
void mmc_add_card_debugfs(struct mmc_card *card)
{
struct mmc_host *host = card->host;
@@ -385,6 +877,19 @@ void mmc_add_card_debugfs(struct mmc_card *card)
&mmc_dbg_ext_csd_fops))
goto err;
+ if (mmc_card_mmc(card) && (card->ext_csd.rev >= 6) &&
+ (card->host->caps2 & MMC_CAP2_PACKED_WR))
+ if (!debugfs_create_file("wr_pack_stats", S_IRUSR, root, card,
+ &mmc_dbg_wr_pack_stats_fops))
+ goto err;
+
+ if (mmc_card_mmc(card) && (card->ext_csd.rev >= 5) &&
+ (mmc_card_configured_auto_bkops(card) ||
+ mmc_card_configured_manual_bkops(card)))
+ if (!debugfs_create_file("bkops_stats", S_IRUSR, root, card,
+ &mmc_dbg_bkops_stats_fops))
+ goto err;
+
return;
err:
diff --git a/drivers/mmc/core/host.c b/drivers/mmc/core/host.c
index 443fdfc22d8a..ae54302be8fd 100644
--- a/drivers/mmc/core/host.c
+++ b/drivers/mmc/core/host.c
@@ -4,6 +4,7 @@
* Copyright (C) 2003 Russell King, All Rights Reserved.
* Copyright (C) 2007-2008 Pierre Ossman
* Copyright (C) 2010 Linus Walleij
+ * Copyright (c) 2012, The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
@@ -25,6 +26,8 @@
#include <linux/mmc/host.h>
#include <linux/mmc/card.h>
+#include <linux/mmc/ring_buffer.h>
+
#include <linux/mmc/slot-gpio.h>
#include "core.h"
@@ -32,6 +35,11 @@
#include "slot-gpio.h"
#include "pwrseq.h"
+#define cls_dev_to_mmc_host(d) container_of(d, struct mmc_host, class_dev)
+#define MMC_DEVFRQ_DEFAULT_UP_THRESHOLD 35
+#define MMC_DEVFRQ_DEFAULT_DOWN_THRESHOLD 5
+#define MMC_DEVFRQ_DEFAULT_POLLING_MSEC 100
+
static DEFINE_IDR(mmc_host_idr);
static DEFINE_SPINLOCK(mmc_host_lock);
@@ -59,6 +67,259 @@ void mmc_unregister_host_class(void)
class_unregister(&mmc_host_class);
}
+#ifdef CONFIG_MMC_CLKGATE
+static ssize_t clkgate_delay_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct mmc_host *host = cls_dev_to_mmc_host(dev);
+ return snprintf(buf, PAGE_SIZE, "%lu\n", host->clkgate_delay);
+}
+
+static ssize_t clkgate_delay_store(struct device *dev,
+ struct device_attribute *attr, const char *buf, size_t count)
+{
+ struct mmc_host *host = cls_dev_to_mmc_host(dev);
+ unsigned long flags, value;
+
+ if (kstrtoul(buf, 0, &value))
+ return -EINVAL;
+
+ spin_lock_irqsave(&host->clk_lock, flags);
+ host->clkgate_delay = value;
+ spin_unlock_irqrestore(&host->clk_lock, flags);
+ return count;
+}
+
+/*
+ * Enabling clock gating will make the core call out to the host
+ * once up and once down when it performs a request or card operation
+ * intermingled in any fashion. The driver will see this through
+ * set_ios() operations with ios.clock field set to 0 to gate (disable)
+ * the block clock, and to the old frequency to enable it again.
+ */
+static void mmc_host_clk_gate_delayed(struct mmc_host *host)
+{
+ unsigned long tick_ns;
+ unsigned long freq = host->ios.clock;
+ unsigned long flags;
+
+ if (!freq) {
+ pr_debug("%s: frequency set to 0 in disable function, "
+ "this means the clock is already disabled.\n",
+ mmc_hostname(host));
+ return;
+ }
+ /*
+ * New requests may have appeared while we were scheduling,
+ * then there is no reason to delay the check before
+ * clk_disable().
+ */
+ spin_lock_irqsave(&host->clk_lock, flags);
+
+ /*
+ * Delay n bus cycles (at least 8 from MMC spec) before attempting
+ * to disable the MCI block clock. The reference count may have
+ * gone up again after this delay due to rescheduling!
+ */
+ if (!host->clk_requests) {
+ spin_unlock_irqrestore(&host->clk_lock, flags);
+ tick_ns = DIV_ROUND_UP(1000000000, freq);
+ ndelay(host->clk_delay * tick_ns);
+ } else {
+ /* New users appeared while waiting for this work */
+ spin_unlock_irqrestore(&host->clk_lock, flags);
+ return;
+ }
+ mutex_lock(&host->clk_gate_mutex);
+ spin_lock_irqsave(&host->clk_lock, flags);
+ if (!host->clk_requests) {
+ spin_unlock_irqrestore(&host->clk_lock, flags);
+ /* This will set host->ios.clock to 0 */
+ mmc_gate_clock(host);
+ spin_lock_irqsave(&host->clk_lock, flags);
+ pr_debug("%s: gated MCI clock\n", mmc_hostname(host));
+ }
+ spin_unlock_irqrestore(&host->clk_lock, flags);
+ mutex_unlock(&host->clk_gate_mutex);
+}
+
+/*
+ * Internal work. Work to disable the clock at some later point.
+ */
+static void mmc_host_clk_gate_work(struct work_struct *work)
+{
+ struct mmc_host *host = container_of(work, struct mmc_host,
+ clk_gate_work.work);
+
+ mmc_host_clk_gate_delayed(host);
+}
+
+/**
+ * mmc_host_clk_hold - ungate hardware MCI clocks
+ * @host: host to ungate.
+ *
+ * Makes sure the host ios.clock is restored to a non-zero value
+ * past this call. Increase clock reference count and ungate clock
+ * if we're the first user.
+ */
+void mmc_host_clk_hold(struct mmc_host *host)
+{
+ unsigned long flags;
+
+ /* cancel any clock gating work scheduled by mmc_host_clk_release() */
+ cancel_delayed_work_sync(&host->clk_gate_work);
+ mutex_lock(&host->clk_gate_mutex);
+ spin_lock_irqsave(&host->clk_lock, flags);
+ if (host->clk_gated) {
+ spin_unlock_irqrestore(&host->clk_lock, flags);
+ mmc_ungate_clock(host);
+
+ spin_lock_irqsave(&host->clk_lock, flags);
+ pr_debug("%s: ungated MCI clock\n", mmc_hostname(host));
+ }
+ host->clk_requests++;
+ spin_unlock_irqrestore(&host->clk_lock, flags);
+ mutex_unlock(&host->clk_gate_mutex);
+}
+
+/**
+ * mmc_host_may_gate_card - check if this card may be gated
+ * @card: card to check.
+ */
+bool mmc_host_may_gate_card(struct mmc_card *card)
+{
+ /* If there is no card we may gate it */
+ if (!card)
+ return true;
+
+ /*
+ * SDIO3.0 card allows the clock to be gated off so check if
+ * that is the case or not.
+ */
+ if (mmc_card_sdio(card) && card->cccr.async_intr_sup)
+ return true;
+
+ /*
+ * Don't gate SDIO cards! These need to be clocked at all times
+ * since they may be independent systems generating interrupts
+ * and other events. The clock requests counter from the core will
+ * go down to zero since the core does not need it, but we will not
+ * gate the clock, because there is somebody out there that may still
+ * be using it.
+ */
+ return !(card->quirks & MMC_QUIRK_BROKEN_CLK_GATING);
+}
+
+/**
+ * mmc_host_clk_release - gate off hardware MCI clocks
+ * @host: host to gate.
+ *
+ * Calls the host driver with ios.clock set to zero as often as possible
+ * in order to gate off hardware MCI clocks. Decrease clock reference
+ * count and schedule disabling of clock.
+ */
+void mmc_host_clk_release(struct mmc_host *host)
+{
+ unsigned long flags;
+
+ spin_lock_irqsave(&host->clk_lock, flags);
+ host->clk_requests--;
+ if (mmc_host_may_gate_card(host->card) &&
+ !host->clk_requests)
+ schedule_delayed_work(&host->clk_gate_work,
+ msecs_to_jiffies(host->clkgate_delay));
+ spin_unlock_irqrestore(&host->clk_lock, flags);
+}
+
+/**
+ * mmc_host_clk_rate - get current clock frequency setting
+ * @host: host to get the clock frequency for.
+ *
+ * Returns current clock frequency regardless of gating.
+ */
+unsigned int mmc_host_clk_rate(struct mmc_host *host)
+{
+ unsigned long freq;
+ unsigned long flags;
+
+ spin_lock_irqsave(&host->clk_lock, flags);
+ if (host->clk_gated)
+ freq = host->clk_old;
+ else
+ freq = host->ios.clock;
+ spin_unlock_irqrestore(&host->clk_lock, flags);
+ return freq;
+}
+
+/**
+ * mmc_host_clk_init - set up clock gating code
+ * @host: host with potential clock to control
+ */
+static inline void mmc_host_clk_init(struct mmc_host *host)
+{
+ host->clk_requests = 0;
+ /* Hold MCI clock for 8 cycles by default */
+ host->clk_delay = 8;
+ /*
+ * Default clock gating delay is 0ms to avoid wasting power.
+ * This value can be tuned by writing into sysfs entry.
+ */
+ host->clkgate_delay = 0;
+ host->clk_gated = false;
+ INIT_DELAYED_WORK(&host->clk_gate_work, mmc_host_clk_gate_work);
+ spin_lock_init(&host->clk_lock);
+ mutex_init(&host->clk_gate_mutex);
+}
+
+/**
+ * mmc_host_clk_exit - shut down clock gating code
+ * @host: host with potential clock to control
+ */
+static inline void mmc_host_clk_exit(struct mmc_host *host)
+{
+ /*
+ * Wait for any outstanding gate and then make sure we're
+ * ungated before exiting.
+ */
+ if (cancel_delayed_work_sync(&host->clk_gate_work))
+ mmc_host_clk_gate_delayed(host);
+ if (host->clk_gated)
+ mmc_host_clk_hold(host);
+ /* There should be only one user now */
+ WARN_ON(host->clk_requests > 1);
+}
+
+static inline void mmc_host_clk_sysfs_init(struct mmc_host *host)
+{
+ host->clkgate_delay_attr.show = clkgate_delay_show;
+ host->clkgate_delay_attr.store = clkgate_delay_store;
+ sysfs_attr_init(&host->clkgate_delay_attr.attr);
+ host->clkgate_delay_attr.attr.name = "clkgate_delay";
+ host->clkgate_delay_attr.attr.mode = S_IRUGO | S_IWUSR;
+ if (device_create_file(&host->class_dev, &host->clkgate_delay_attr))
+ pr_err("%s: Failed to create clkgate_delay sysfs entry\n",
+ mmc_hostname(host));
+}
+#else
+
+static inline void mmc_host_clk_init(struct mmc_host *host)
+{
+}
+
+static inline void mmc_host_clk_exit(struct mmc_host *host)
+{
+}
+
+static inline void mmc_host_clk_sysfs_init(struct mmc_host *host)
+{
+}
+
+bool mmc_host_may_gate_card(struct mmc_card *card)
+{
+ return false;
+}
+#endif
+
void mmc_retune_enable(struct mmc_host *host)
{
host->can_retune = 1;
@@ -66,6 +327,7 @@ void mmc_retune_enable(struct mmc_host *host)
mod_timer(&host->retune_timer,
jiffies + host->retune_period * HZ);
}
+EXPORT_SYMBOL(mmc_retune_enable);
void mmc_retune_disable(struct mmc_host *host)
{
@@ -74,6 +336,7 @@ void mmc_retune_disable(struct mmc_host *host)
host->retune_now = 0;
host->need_retune = 0;
}
+EXPORT_SYMBOL(mmc_retune_disable);
void mmc_retune_timer_stop(struct mmc_host *host)
{
@@ -343,6 +606,8 @@ struct mmc_host *mmc_alloc_host(int extra, struct device *dev)
return NULL;
}
+ mmc_host_clk_init(host);
+
spin_lock_init(&host->lock);
init_waitqueue_head(&host->wq);
INIT_DELAYED_WORK(&host->detect, mmc_rescan);
@@ -367,6 +632,217 @@ struct mmc_host *mmc_alloc_host(int extra, struct device *dev)
EXPORT_SYMBOL(mmc_alloc_host);
+static ssize_t show_enable(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct mmc_host *host = cls_dev_to_mmc_host(dev);
+
+ if (!host)
+ return -EINVAL;
+
+ return snprintf(buf, PAGE_SIZE, "%d\n", mmc_can_scale_clk(host));
+}
+
+static ssize_t store_enable(struct device *dev,
+ struct device_attribute *attr, const char *buf, size_t count)
+{
+ struct mmc_host *host = cls_dev_to_mmc_host(dev);
+ unsigned long value;
+
+ if (!host || !host->card || kstrtoul(buf, 0, &value))
+ return -EINVAL;
+
+ mmc_get_card(host->card);
+
+ if (!value) {
+ /* Suspend the clock scaling and mask host capability */
+ if (host->clk_scaling.enable)
+ mmc_suspend_clk_scaling(host);
+ host->caps2 &= ~MMC_CAP2_CLK_SCALE;
+ host->clk_scaling.state = MMC_LOAD_HIGH;
+ /* Set to max. frequency when disabling */
+ mmc_clk_update_freq(host, host->card->clk_scaling_highest,
+ host->clk_scaling.state);
+ } else if (value) {
+ /* Unmask host capability and resume scaling */
+ host->caps2 |= MMC_CAP2_CLK_SCALE;
+ if (!host->clk_scaling.enable)
+ mmc_resume_clk_scaling(host);
+ }
+
+ mmc_put_card(host->card);
+
+ return count;
+}
+
+static ssize_t show_up_threshold(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct mmc_host *host = cls_dev_to_mmc_host(dev);
+
+ if (!host)
+ return -EINVAL;
+
+ return snprintf(buf, PAGE_SIZE, "%d\n", host->clk_scaling.upthreshold);
+}
+
+#define MAX_PERCENTAGE 100
+static ssize_t store_up_threshold(struct device *dev,
+ struct device_attribute *attr, const char *buf, size_t count)
+{
+ struct mmc_host *host = cls_dev_to_mmc_host(dev);
+ unsigned long value;
+
+ if (!host || kstrtoul(buf, 0, &value) || (value > MAX_PERCENTAGE))
+ return -EINVAL;
+
+ host->clk_scaling.upthreshold = value;
+
+ pr_debug("%s: clkscale_up_thresh set to %lu\n",
+ mmc_hostname(host), value);
+ return count;
+}
+
+static ssize_t show_down_threshold(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct mmc_host *host = cls_dev_to_mmc_host(dev);
+
+ if (!host)
+ return -EINVAL;
+
+ return snprintf(buf, PAGE_SIZE, "%d\n",
+ host->clk_scaling.downthreshold);
+}
+
+static ssize_t store_down_threshold(struct device *dev,
+ struct device_attribute *attr, const char *buf, size_t count)
+{
+ struct mmc_host *host = cls_dev_to_mmc_host(dev);
+ unsigned long value;
+
+ if (!host || kstrtoul(buf, 0, &value) || (value > MAX_PERCENTAGE))
+ return -EINVAL;
+
+ host->clk_scaling.downthreshold = value;
+
+ pr_debug("%s: clkscale_down_thresh set to %lu\n",
+ mmc_hostname(host), value);
+ return count;
+}
+
+static ssize_t show_polling(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct mmc_host *host = cls_dev_to_mmc_host(dev);
+
+ if (!host)
+ return -EINVAL;
+
+ return snprintf(buf, PAGE_SIZE, "%lu milliseconds\n",
+ host->clk_scaling.polling_delay_ms);
+}
+
+static ssize_t store_polling(struct device *dev,
+ struct device_attribute *attr, const char *buf, size_t count)
+{
+ struct mmc_host *host = cls_dev_to_mmc_host(dev);
+ unsigned long value;
+
+ if (!host || kstrtoul(buf, 0, &value))
+ return -EINVAL;
+
+ host->clk_scaling.polling_delay_ms = value;
+
+ pr_debug("%s: clkscale_polling_delay_ms set to %lu\n",
+ mmc_hostname(host), value);
+ return count;
+}
+
+DEVICE_ATTR(enable, S_IRUGO | S_IWUSR,
+ show_enable, store_enable);
+DEVICE_ATTR(polling_interval, S_IRUGO | S_IWUSR,
+ show_polling, store_polling);
+DEVICE_ATTR(up_threshold, S_IRUGO | S_IWUSR,
+ show_up_threshold, store_up_threshold);
+DEVICE_ATTR(down_threshold, S_IRUGO | S_IWUSR,
+ show_down_threshold, store_down_threshold);
+
+static struct attribute *clk_scaling_attrs[] = {
+ &dev_attr_enable.attr,
+ &dev_attr_up_threshold.attr,
+ &dev_attr_down_threshold.attr,
+ &dev_attr_polling_interval.attr,
+ NULL,
+};
+
+static struct attribute_group clk_scaling_attr_grp = {
+ .name = "clk_scaling",
+ .attrs = clk_scaling_attrs,
+};
+
+#ifdef CONFIG_MMC_PERF_PROFILING
+static ssize_t
+show_perf(struct device *dev, struct device_attribute *attr, char *buf)
+{
+ struct mmc_host *host = cls_dev_to_mmc_host(dev);
+ int64_t rtime_drv, wtime_drv;
+ unsigned long rbytes_drv, wbytes_drv, flags;
+
+ spin_lock_irqsave(&host->lock, flags);
+
+ rbytes_drv = host->perf.rbytes_drv;
+ wbytes_drv = host->perf.wbytes_drv;
+
+ rtime_drv = ktime_to_us(host->perf.rtime_drv);
+ wtime_drv = ktime_to_us(host->perf.wtime_drv);
+
+ spin_unlock_irqrestore(&host->lock, flags);
+
+ return snprintf(buf, PAGE_SIZE, "Write performance at driver Level:"
+ "%lu bytes in %lld microseconds\n"
+ "Read performance at driver Level:"
+ "%lu bytes in %lld microseconds\n",
+ wbytes_drv, wtime_drv,
+ rbytes_drv, rtime_drv);
+}
+
+static ssize_t
+set_perf(struct device *dev, struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ struct mmc_host *host = cls_dev_to_mmc_host(dev);
+ int64_t value;
+ unsigned long flags;
+
+ sscanf(buf, "%lld", &value);
+ spin_lock_irqsave(&host->lock, flags);
+ if (!value) {
+ memset(&host->perf, 0, sizeof(host->perf));
+ host->perf_enable = false;
+ } else {
+ host->perf_enable = true;
+ }
+ spin_unlock_irqrestore(&host->lock, flags);
+
+ return count;
+}
+
+static DEVICE_ATTR(perf, S_IRUGO | S_IWUSR,
+ show_perf, set_perf);
+
+#endif
+
+static struct attribute *dev_attrs[] = {
+#ifdef CONFIG_MMC_PERF_PROFILING
+ &dev_attr_perf.attr,
+#endif
+ NULL,
+};
+static struct attribute_group dev_attr_grp = {
+ .attrs = dev_attrs,
+};
+
/**
* mmc_add_host - initialise host hardware
* @host: mmc host
@@ -388,9 +864,26 @@ int mmc_add_host(struct mmc_host *host)
led_trigger_register_simple(dev_name(&host->class_dev), &host->led);
+ host->clk_scaling.upthreshold = MMC_DEVFRQ_DEFAULT_UP_THRESHOLD;
+ host->clk_scaling.downthreshold = MMC_DEVFRQ_DEFAULT_DOWN_THRESHOLD;
+ host->clk_scaling.polling_delay_ms = MMC_DEVFRQ_DEFAULT_POLLING_MSEC;
+ host->clk_scaling.skip_clk_scale_freq_update = false;
+
#ifdef CONFIG_DEBUG_FS
mmc_add_host_debugfs(host);
#endif
+ mmc_host_clk_sysfs_init(host);
+ mmc_trace_init(host);
+
+ err = sysfs_create_group(&host->class_dev.kobj, &clk_scaling_attr_grp);
+ if (err)
+ pr_err("%s: failed to create clk scale sysfs group with err %d\n",
+ __func__, err);
+
+ err = sysfs_create_group(&host->class_dev.kobj, &dev_attr_grp);
+ if (err)
+ pr_err("%s: failed to create sysfs group with err %d\n",
+ __func__, err);
#ifdef CONFIG_BLOCK
mmc_latency_hist_sysfs_init(host);
@@ -423,6 +916,8 @@ void mmc_remove_host(struct mmc_host *host)
#ifdef CONFIG_DEBUG_FS
mmc_remove_host_debugfs(host);
#endif
+ sysfs_remove_group(&host->parent->kobj, &dev_attr_grp);
+ sysfs_remove_group(&host->class_dev.kobj, &clk_scaling_attr_grp);
#ifdef CONFIG_BLOCK
mmc_latency_hist_sysfs_exit(host);
@@ -431,6 +926,8 @@ void mmc_remove_host(struct mmc_host *host)
device_del(&host->class_dev);
led_trigger_unregister_simple(host->led);
+
+ mmc_host_clk_exit(host);
}
EXPORT_SYMBOL(mmc_remove_host);
diff --git a/drivers/mmc/core/mmc.c b/drivers/mmc/core/mmc.c
index 83b9bf880834..a28d6b98a042 100644
--- a/drivers/mmc/core/mmc.c
+++ b/drivers/mmc/core/mmc.c
@@ -19,6 +19,8 @@
#include <linux/mmc/host.h>
#include <linux/mmc/card.h>
#include <linux/mmc/mmc.h>
+#include <linux/reboot.h>
+#include <trace/events/mmc.h>
#include "core.h"
#include "host.h"
@@ -59,6 +61,7 @@ static const unsigned int tacc_mant[] = {
__res & __mask; \
})
+static int mmc_switch_status(struct mmc_card *card, bool ignore_crc);
/*
* Given the decoded CSD structure, decode the raw CID to our CID structure.
*/
@@ -124,6 +127,19 @@ static void mmc_set_erase_size(struct mmc_card *card)
mmc_init_erase(card);
}
+static const struct mmc_fixup mmc_fixups[] = {
+
+ /* avoid HPI for specific cards */
+ MMC_FIXUP_EXT_CSD_REV("MMC16G", CID_MANFID_KINGSTON, CID_OEMID_ANY,
+ add_quirk, MMC_QUIRK_BROKEN_HPI, MMC_V4_41),
+
+ /* Disable cache for specific cards */
+ MMC_FIXUP("MMC16G", CID_MANFID_KINGSTON, CID_OEMID_ANY,
+ add_quirk_mmc, MMC_QUIRK_CACHE_DISABLE),
+
+ END_FIXUP
+};
+
/*
* Given a 128-bit response, decode to our card CSD structure.
*/
@@ -494,22 +510,41 @@ static int mmc_decode_ext_csd(struct mmc_card *card, u8 *ext_csd)
ext_csd[EXT_CSD_PWR_CL_DDR_200_360];
}
+ /* check whether the eMMC card supports HPI */
+ if ((ext_csd[EXT_CSD_HPI_FEATURES] & 0x1) &&
+ !(card->quirks & MMC_QUIRK_BROKEN_HPI)) {
+ card->ext_csd.hpi = 1;
+ if (ext_csd[EXT_CSD_HPI_FEATURES] & 0x2)
+ card->ext_csd.hpi_cmd = MMC_STOP_TRANSMISSION;
+ else
+ card->ext_csd.hpi_cmd = MMC_SEND_STATUS;
+ /*
+ * Indicate the maximum timeout to close
+ * a command interrupted by HPI
+ */
+ card->ext_csd.out_of_int_time =
+ ext_csd[EXT_CSD_OUT_OF_INTERRUPT_TIME] * 10;
+ pr_info("%s: Out-of-interrupt timeout is %d[ms]\n",
+ mmc_hostname(card->host),
+ card->ext_csd.out_of_int_time);
+ }
+
if (card->ext_csd.rev >= 5) {
/* Adjust production date as per JEDEC JESD84-B451 */
if (card->cid.year < 2010)
card->cid.year += 16;
/* check whether the eMMC card supports BKOPS */
- if (ext_csd[EXT_CSD_BKOPS_SUPPORT] & 0x1) {
+ if ((ext_csd[EXT_CSD_BKOPS_SUPPORT] & 0x1) &&
+ card->ext_csd.hpi) {
card->ext_csd.bkops = 1;
- card->ext_csd.man_bkops_en =
- (ext_csd[EXT_CSD_BKOPS_EN] &
- EXT_CSD_MANUAL_BKOPS_MASK);
+ card->ext_csd.bkops_en = ext_csd[EXT_CSD_BKOPS_EN];
card->ext_csd.raw_bkops_status =
ext_csd[EXT_CSD_BKOPS_STATUS];
- if (!card->ext_csd.man_bkops_en)
- pr_info("%s: MAN_BKOPS_EN bit is not set\n",
- mmc_hostname(card->host));
+ if (!card->ext_csd.bkops_en)
+ pr_info("%s: BKOPS_EN equals 0x%x\n",
+ mmc_hostname(card->host),
+ card->ext_csd.bkops_en);
}
/* check whether the eMMC card supports HPI */
@@ -531,6 +566,19 @@ static int mmc_decode_ext_csd(struct mmc_card *card, u8 *ext_csd)
card->ext_csd.rst_n_function = ext_csd[EXT_CSD_RST_N_FUNCTION];
/*
+ * Some eMMC vendors violate eMMC 5.0 spec and set
+ * REL_WR_SEC_C register to 0x10 to indicate the
+ * ability of RPMB throughput improvement thus lead
+ * to failure when TZ module write data to RPMB
+ * partition. So check bit[4] of EXT_CSD[166] and
+ * if it is not set then change value of REL_WR_SEC_C
+ * to 0x1 directly ignoring value of EXT_CSD[222].
+ */
+ if (!(card->ext_csd.rel_param &
+ EXT_CSD_WR_REL_PARAM_EN_RPMB_REL_WR))
+ card->ext_csd.rel_sectors = 0x1;
+
+ /*
* RPMB regions are defined in multiples of 128K.
*/
card->ext_csd.raw_rpmb_size_mult = ext_csd[EXT_CSD_RPMB_MULT];
@@ -585,6 +633,46 @@ static int mmc_decode_ext_csd(struct mmc_card *card, u8 *ext_csd)
card->ext_csd.data_sector_size = 512;
}
+ if (card->ext_csd.rev >= 7) {
+ /* Enhance Strobe is supported since v5.1 which rev should be
+ * 8 but some eMMC devices can support it with rev 7. So handle
+ * Enhance Strobe here.
+ */
+ card->ext_csd.strobe_support = ext_csd[EXT_CSD_STROBE_SUPPORT];
+ card->ext_csd.cmdq_support = ext_csd[EXT_CSD_CMDQ_SUPPORT];
+ card->ext_csd.fw_version = ext_csd[EXT_CSD_FIRMWARE_VERSION];
+ pr_info("%s: eMMC FW version: 0x%02x\n",
+ mmc_hostname(card->host),
+ card->ext_csd.fw_version);
+ if (card->ext_csd.cmdq_support) {
+ /*
+ * Queue Depth = N + 1,
+ * see JEDEC JESD84-B51 section 7.4.19
+ */
+ card->ext_csd.cmdq_depth =
+ ext_csd[EXT_CSD_CMDQ_DEPTH] + 1;
+ pr_info("%s: CMDQ supported: depth: %d\n",
+ mmc_hostname(card->host),
+ card->ext_csd.cmdq_depth);
+ }
+ card->ext_csd.barrier_support =
+ ext_csd[EXT_CSD_BARRIER_SUPPORT];
+ card->ext_csd.cache_flush_policy =
+ ext_csd[EXT_CSD_CACHE_FLUSH_POLICY];
+ pr_info("%s: cache barrier support %d flush policy %d\n",
+ mmc_hostname(card->host),
+ card->ext_csd.barrier_support,
+ card->ext_csd.cache_flush_policy);
+ card->ext_csd.enhanced_rpmb_supported =
+ (card->ext_csd.rel_param &
+ EXT_CSD_WR_REL_PARAM_EN_RPMB_REL_WR);
+ } else {
+ card->ext_csd.cmdq_support = 0;
+ card->ext_csd.cmdq_depth = 0;
+ card->ext_csd.barrier_support = 0;
+ card->ext_csd.cache_flush_policy = 0;
+ }
+
/* eMMC v5 or later */
if (card->ext_csd.rev >= 7) {
memcpy(card->ext_csd.fwrev, &ext_csd[EXT_CSD_FIRMWARE_VERSION],
@@ -605,6 +693,7 @@ out:
static int mmc_read_ext_csd(struct mmc_card *card)
{
+ struct mmc_host *host = card->host;
u8 *ext_csd;
int err;
@@ -613,6 +702,9 @@ static int mmc_read_ext_csd(struct mmc_card *card)
err = mmc_get_ext_csd(card, &ext_csd);
if (err) {
+ pr_err("%s: %s: mmc_get_ext_csd() fails %d\n",
+ mmc_hostname(host), __func__, err);
+
/* If the host or the card can't do the switch,
* fail more gracefully. */
if ((err != -EINVAL)
@@ -737,6 +829,8 @@ MMC_DEV_ATTR(enhanced_area_offset, "%llu\n",
card->ext_csd.enhanced_area_offset);
MMC_DEV_ATTR(enhanced_area_size, "%u\n", card->ext_csd.enhanced_area_size);
MMC_DEV_ATTR(raw_rpmb_size_mult, "%#x\n", card->ext_csd.raw_rpmb_size_mult);
+MMC_DEV_ATTR(enhanced_rpmb_supported, "%#x\n",
+ card->ext_csd.enhanced_rpmb_supported);
MMC_DEV_ATTR(rel_sectors, "%#x\n", card->ext_csd.rel_sectors);
static ssize_t mmc_fwrev_show(struct device *dev,
@@ -775,6 +869,7 @@ static struct attribute *mmc_std_attrs[] = {
&dev_attr_enhanced_area_offset.attr,
&dev_attr_enhanced_area_size.attr,
&dev_attr_raw_rpmb_size_mult.attr,
+ &dev_attr_enhanced_rpmb_supported.attr,
&dev_attr_rel_sectors.attr,
NULL,
};
@@ -909,11 +1004,11 @@ static void mmc_set_bus_speed(struct mmc_card *card)
*/
static int mmc_select_bus_width(struct mmc_card *card)
{
- static unsigned ext_csd_bits[] = {
+ static const unsigned ext_csd_bits[] = {
EXT_CSD_BUS_WIDTH_8,
EXT_CSD_BUS_WIDTH_4,
};
- static unsigned bus_widths[] = {
+ static const unsigned bus_widths[] = {
MMC_BUS_WIDTH_8,
MMC_BUS_WIDTH_4,
};
@@ -983,9 +1078,11 @@ static int mmc_select_hs(struct mmc_card *card)
err = __mmc_switch(card, EXT_CSD_CMD_SET_NORMAL,
EXT_CSD_HS_TIMING, EXT_CSD_TIMING_HS,
card->ext_csd.generic_cmd6_time,
- true, true, true);
- if (!err)
+ true, false, true);
+ if (!err) {
mmc_set_timing(card->host, MMC_TIMING_MMC_HS);
+ err = mmc_switch_status(card, false);
+ }
return err;
}
@@ -1009,10 +1106,11 @@ static int mmc_select_hs_ddr(struct mmc_card *card)
ext_csd_bits = (bus_width == MMC_BUS_WIDTH_8) ?
EXT_CSD_DDR_BUS_WIDTH_8 : EXT_CSD_DDR_BUS_WIDTH_4;
- err = mmc_switch(card, EXT_CSD_CMD_SET_NORMAL,
+ err = __mmc_switch(card, EXT_CSD_CMD_SET_NORMAL,
EXT_CSD_BUS_WIDTH,
ext_csd_bits,
- card->ext_csd.generic_cmd6_time);
+ card->ext_csd.generic_cmd6_time,
+ true, false, false);
if (err) {
pr_err("%s: switch to bus width %d ddr failed\n",
mmc_hostname(host), 1 << bus_width);
@@ -1055,19 +1153,21 @@ static int mmc_select_hs_ddr(struct mmc_card *card)
if (err)
err = __mmc_set_signal_voltage(host, MMC_SIGNAL_VOLTAGE_330);
- if (!err)
+ if (!err) {
mmc_set_timing(host, MMC_TIMING_MMC_DDR52);
+ err = mmc_switch_status(card, false);
+ }
return err;
}
/* Caller must hold re-tuning */
-static int mmc_switch_status(struct mmc_card *card)
+static int mmc_switch_status(struct mmc_card *card, bool ignore_crc)
{
u32 status;
int err;
- err = mmc_send_status(card, &status);
+ err = __mmc_send_status(card, &status, ignore_crc);
if (err)
return err;
@@ -1085,17 +1185,32 @@ static int mmc_select_hs400(struct mmc_card *card)
/*
* HS400 mode requires 8-bit bus width
*/
- if (!(card->mmc_avail_type & EXT_CSD_CARD_TYPE_HS400 &&
- host->ios.bus_width == MMC_BUS_WIDTH_8))
- return 0;
+ if (card->ext_csd.strobe_support) {
+ if (!(card->mmc_avail_type & EXT_CSD_CARD_TYPE_HS400 &&
+ host->caps & MMC_CAP_8_BIT_DATA))
+ return 0;
+
+ /* For Enhance Strobe flow. For non Enhance Strobe, signal
+ * voltage will not be set.
+ */
+ if (card->mmc_avail_type & EXT_CSD_CARD_TYPE_HS200_1_2V)
+ err = __mmc_set_signal_voltage(host,
+ MMC_SIGNAL_VOLTAGE_120);
+
+ if (err && card->mmc_avail_type & EXT_CSD_CARD_TYPE_HS200_1_8V)
+ err = __mmc_set_signal_voltage(host,
+ MMC_SIGNAL_VOLTAGE_180);
+ if (err)
+ return err;
+ } else {
+ if (!(card->mmc_avail_type & EXT_CSD_CARD_TYPE_HS400 &&
+ host->ios.bus_width == MMC_BUS_WIDTH_8))
+ return 0;
+ }
if (host->caps & MMC_CAP_WAIT_WHILE_BUSY)
send_status = false;
- /* Reduce frequency to HS frequency */
- max_dtr = card->ext_csd.hs_max_dtr;
- mmc_set_clock(host, max_dtr);
-
/* Switch card to HS mode */
val = EXT_CSD_TIMING_HS;
err = __mmc_switch(card, EXT_CSD_CMD_SET_NORMAL,
@@ -1111,16 +1226,28 @@ static int mmc_select_hs400(struct mmc_card *card)
/* Set host controller to HS timing */
mmc_set_timing(card->host, MMC_TIMING_MMC_HS);
+ /* Reduce frequency to HS frequency */
+ max_dtr = card->ext_csd.hs_max_dtr;
+ mmc_set_clock(host, max_dtr);
+
if (!send_status) {
- err = mmc_switch_status(card);
+ err = mmc_switch_status(card, false);
if (err)
goto out_err;
}
+ val = EXT_CSD_DDR_BUS_WIDTH_8;
+ if (card->ext_csd.strobe_support) {
+ err = mmc_select_bus_width(card);
+ if (IS_ERR_VALUE(err))
+ return err;
+ val |= EXT_CSD_BUS_WIDTH_STROBE;
+ }
+
/* Switch card to DDR */
err = mmc_switch(card, EXT_CSD_CMD_SET_NORMAL,
EXT_CSD_BUS_WIDTH,
- EXT_CSD_DDR_BUS_WIDTH_8,
+ val,
card->ext_csd.generic_cmd6_time);
if (err) {
pr_err("%s: switch to bus width for hs400 failed, err:%d\n",
@@ -1145,8 +1272,29 @@ static int mmc_select_hs400(struct mmc_card *card)
mmc_set_timing(host, MMC_TIMING_MMC_HS400);
mmc_set_bus_speed(card);
+ if (card->ext_csd.strobe_support && host->ops->enhanced_strobe) {
+ mmc_host_clk_hold(host);
+ err = host->ops->enhanced_strobe(host);
+ mmc_host_clk_release(host);
+ } else if ((host->caps2 & MMC_CAP2_HS400_POST_TUNING) &&
+ host->ops->execute_tuning) {
+ mmc_host_clk_hold(host);
+ err = host->ops->execute_tuning(host,
+ MMC_SEND_TUNING_BLOCK_HS200);
+ mmc_host_clk_release(host);
+
+ if (err)
+ pr_warn("%s: tuning execution failed\n",
+ mmc_hostname(host));
+ }
+
+ /*
+ * Sending of CMD13 should be done after the host calibration
+ * for enhanced_strobe or HS400 mode is completed.
+ * Otherwise may see CMD13 timeouts or CRC errors.
+ */
if (!send_status) {
- err = mmc_switch_status(card);
+ err = mmc_switch_status(card, false);
if (err)
goto out_err;
}
@@ -1175,10 +1323,6 @@ int mmc_hs400_to_hs200(struct mmc_card *card)
if (host->caps & MMC_CAP_WAIT_WHILE_BUSY)
send_status = false;
- /* Reduce frequency to HS */
- max_dtr = card->ext_csd.hs_max_dtr;
- mmc_set_clock(host, max_dtr);
-
/* Switch HS400 to HS DDR */
val = EXT_CSD_TIMING_HS;
err = __mmc_switch(card, EXT_CSD_CMD_SET_NORMAL, EXT_CSD_HS_TIMING,
@@ -1189,8 +1333,12 @@ int mmc_hs400_to_hs200(struct mmc_card *card)
mmc_set_timing(host, MMC_TIMING_MMC_DDR52);
+ /* Reduce frequency to HS */
+ max_dtr = card->ext_csd.hs_max_dtr;
+ mmc_set_clock(host, max_dtr);
+
if (!send_status) {
- err = mmc_switch_status(card);
+ err = mmc_switch_status(card, false);
if (err)
goto out_err;
}
@@ -1205,7 +1353,7 @@ int mmc_hs400_to_hs200(struct mmc_card *card)
mmc_set_timing(host, MMC_TIMING_MMC_HS);
if (!send_status) {
- err = mmc_switch_status(card);
+ err = mmc_switch_status(card, false);
if (err)
goto out_err;
}
@@ -1222,7 +1370,7 @@ int mmc_hs400_to_hs200(struct mmc_card *card)
mmc_set_timing(host, MMC_TIMING_MMC_HS200);
if (!send_status) {
- err = mmc_switch_status(card);
+ err = mmc_switch_status(card, false);
if (err)
goto out_err;
}
@@ -1301,7 +1449,12 @@ static int mmc_select_hs200(struct mmc_card *card)
old_timing = host->ios.timing;
mmc_set_timing(host, MMC_TIMING_MMC_HS200);
if (!send_status) {
- err = mmc_switch_status(card);
+ /*
+ * Since after switching to hs200, crc errors might
+ * occur for commands send before tuning.
+ * So ignore crc error for cmd13.
+ */
+ err = mmc_switch_status(card, true);
/*
* mmc_select_timing() assumes timing has not changed if
* it is a switch error.
@@ -1317,6 +1470,17 @@ err:
return err;
}
+static int mmc_reboot_notify(struct notifier_block *notify_block,
+ unsigned long event, void *unused)
+{
+ struct mmc_card *card = container_of(
+ notify_block, struct mmc_card, reboot_notify);
+
+ card->pon_type = (event != SYS_RESTART) ? MMC_LONG_PON : MMC_SHRT_PON;
+
+ return NOTIFY_OK;
+}
+
/*
* Activate High Speed or HS200 mode if supported.
*/
@@ -1327,7 +1491,12 @@ static int mmc_select_timing(struct mmc_card *card)
if (!mmc_can_ext_csd(card))
goto bus_speed;
- if (card->mmc_avail_type & EXT_CSD_CARD_TYPE_HS200)
+ /* For Enhance Strobe HS400 flow */
+ if (card->ext_csd.strobe_support &&
+ card->mmc_avail_type & EXT_CSD_CARD_TYPE_HS400 &&
+ card->host->caps & MMC_CAP_8_BIT_DATA)
+ err = mmc_select_hs400(card);
+ else if (card->mmc_avail_type & EXT_CSD_CARD_TYPE_HS200)
err = mmc_select_hs200(card);
else if (card->mmc_avail_type & EXT_CSD_CARD_TYPE_HS)
err = mmc_select_hs(card);
@@ -1366,12 +1535,242 @@ static int mmc_hs200_tuning(struct mmc_card *card)
*/
if (card->mmc_avail_type & EXT_CSD_CARD_TYPE_HS400 &&
host->ios.bus_width == MMC_BUS_WIDTH_8)
- if (host->ops->prepare_hs400_tuning)
- host->ops->prepare_hs400_tuning(host, &host->ios);
+ mmc_set_timing(host, MMC_TIMING_MMC_HS400);
return mmc_execute_tuning(card);
}
+static int mmc_select_cmdq(struct mmc_card *card)
+{
+ struct mmc_host *host = card->host;
+ int ret = 0;
+
+ if (!host->cmdq_ops) {
+ pr_err("%s: host controller doesn't support CMDQ\n",
+ mmc_hostname(host));
+ return 0;
+ }
+
+ ret = mmc_set_blocklen(card, MMC_CARD_CMDQ_BLK_SIZE);
+ if (ret)
+ goto out;
+
+ ret = mmc_switch(card, EXT_CSD_CMD_SET_NORMAL, EXT_CSD_CMDQ, 1,
+ card->ext_csd.generic_cmd6_time);
+ if (ret)
+ goto out;
+
+ mmc_card_set_cmdq(card);
+ mmc_host_clk_hold(card->host);
+ ret = host->cmdq_ops->enable(card->host);
+ if (ret) {
+ mmc_host_clk_release(card->host);
+ pr_err("%s: failed (%d) enabling CMDQ on host\n",
+ mmc_hostname(host), ret);
+ mmc_card_clr_cmdq(card);
+ ret = mmc_switch(card, EXT_CSD_CMD_SET_NORMAL, EXT_CSD_CMDQ, 0,
+ card->ext_csd.generic_cmd6_time);
+ goto out;
+ }
+
+ mmc_host_clk_release(card->host);
+ pr_info_once("%s: CMDQ enabled on card\n", mmc_hostname(host));
+out:
+ return ret;
+}
+
+static int mmc_select_hs_ddr52(struct mmc_host *host)
+{
+ int err;
+
+ mmc_select_hs(host->card);
+ err = mmc_select_bus_width(host->card);
+ if (err < 0) {
+ pr_err("%s: %s: select_bus_width failed(%d)\n",
+ mmc_hostname(host), __func__, err);
+ return err;
+ }
+
+ err = mmc_select_hs_ddr(host->card);
+ mmc_set_clock(host, MMC_HIGH_52_MAX_DTR);
+
+ return err;
+}
+
+/*
+ * Scale down from HS400 to HS in order to allow frequency change.
+ * This is needed for cards that doesn't support changing frequency in HS400
+ */
+static int mmc_scale_low(struct mmc_host *host, unsigned long freq)
+{
+ int err = 0;
+
+ mmc_set_timing(host, MMC_TIMING_LEGACY);
+ mmc_set_clock(host, MMC_HIGH_26_MAX_DTR);
+
+ if (host->clk_scaling.lower_bus_speed_mode &
+ MMC_SCALING_LOWER_DDR52_MODE) {
+ err = mmc_select_hs_ddr52(host);
+ if (err)
+ pr_err("%s: %s: failed to switch to DDR52: err: %d\n",
+ mmc_hostname(host), __func__, err);
+ else
+ return err;
+ }
+
+ err = mmc_select_hs(host->card);
+ if (err) {
+ pr_err("%s: %s: scaling low: failed (%d)\n",
+ mmc_hostname(host), __func__, err);
+ return err;
+ }
+
+ err = mmc_select_bus_width(host->card);
+ if (err < 0) {
+ pr_err("%s: %s: select_bus_width failed(%d)\n",
+ mmc_hostname(host), __func__, err);
+ return err;
+ }
+
+ mmc_set_clock(host, freq);
+
+ return 0;
+}
+
+/*
+ * Scale UP from HS to HS200/H400
+ */
+static int mmc_scale_high(struct mmc_host *host)
+{
+ int err = 0;
+
+ if (mmc_card_ddr52(host->card)) {
+ mmc_set_timing(host, MMC_TIMING_LEGACY);
+ mmc_set_clock(host, MMC_HIGH_26_MAX_DTR);
+ }
+
+ if (!host->card->ext_csd.strobe_support) {
+ if (!(host->card->mmc_avail_type & EXT_CSD_CARD_TYPE_HS200)) {
+ pr_err("%s: %s: card does not support HS200\n",
+ mmc_hostname(host), __func__);
+ WARN_ON(1);
+ return -EPERM;
+ }
+
+ err = mmc_select_hs200(host->card);
+ if (err) {
+ pr_err("%s: %s: selecting HS200 failed (%d)\n",
+ mmc_hostname(host), __func__, err);
+ return err;
+ }
+
+ mmc_set_bus_speed(host->card);
+
+ err = mmc_hs200_tuning(host->card);
+ if (err) {
+ pr_err("%s: %s: hs200 tuning failed (%d)\n",
+ mmc_hostname(host), __func__, err);
+ return err;
+ }
+
+ if (!(host->card->mmc_avail_type & EXT_CSD_CARD_TYPE_HS400)) {
+ pr_debug("%s: card does not support HS400\n",
+ mmc_hostname(host));
+ return 0;
+ }
+ }
+
+ err = mmc_select_hs400(host->card);
+ if (err) {
+ pr_err("%s: %s: select hs400 failed (%d)\n",
+ mmc_hostname(host), __func__, err);
+ return err;
+ }
+
+ return err;
+}
+
+static int mmc_set_clock_bus_speed(struct mmc_card *card, unsigned long freq)
+{
+ int err = 0;
+
+ if (freq == MMC_HS200_MAX_DTR)
+ err = mmc_scale_high(card->host);
+ else
+ err = mmc_scale_low(card->host, freq);
+
+ return err;
+}
+
+static inline unsigned long mmc_ddr_freq_accommodation(unsigned long freq)
+{
+ if (freq == MMC_HIGH_DDR_MAX_DTR)
+ return freq;
+
+ return freq/2;
+}
+
+/**
+ * mmc_change_bus_speed() - Change MMC card bus frequency at runtime
+ * @host: pointer to mmc host structure
+ * @freq: pointer to desired frequency to be set
+ *
+ * Change the MMC card bus frequency at runtime after the card is
+ * initialized. Callers are expected to make sure of the card's
+ * state (DATA/RCV/TRANSFER) before changing the frequency at runtime.
+ *
+ * If the frequency to change is greater than max. supported by card,
+ * *freq is changed to max. supported by card. If it is less than min.
+ * supported by host, *freq is changed to min. supported by host.
+ * Host is assumed to be calimed while calling this funciton.
+ */
+static int mmc_change_bus_speed(struct mmc_host *host, unsigned long *freq)
+{
+ int err = 0;
+ struct mmc_card *card;
+ unsigned long actual_freq;
+
+ card = host->card;
+
+ if (!card || !freq) {
+ err = -EINVAL;
+ goto out;
+ }
+ actual_freq = *freq;
+
+ WARN_ON(!host->claimed);
+
+ /*
+ * For scaling up/down HS400 we'll need special handling,
+ * for other timings we can simply do clock frequency change
+ */
+ if (mmc_card_hs400(card) ||
+ (!mmc_card_hs200(host->card) && *freq == MMC_HS200_MAX_DTR)) {
+ err = mmc_set_clock_bus_speed(card, *freq);
+ if (err) {
+ pr_err("%s: %s: failed (%d)to set bus and clock speed (freq=%lu)\n",
+ mmc_hostname(host), __func__, err, *freq);
+ goto out;
+ }
+ } else if (mmc_card_hs200(host->card)) {
+ mmc_set_clock(host, *freq);
+ err = mmc_hs200_tuning(host->card);
+ if (err) {
+ pr_warn("%s: %s: tuning execution failed %d\n",
+ mmc_hostname(card->host),
+ __func__, err);
+ mmc_set_clock(host, host->clk_scaling.curr_freq);
+ }
+ } else {
+ if (mmc_card_ddr52(host->card))
+ actual_freq = mmc_ddr_freq_accommodation(*freq);
+ mmc_set_clock(host, actual_freq);
+ }
+
+out:
+ return err;
+}
+
/*
* Handle the detection and initialisation of a card.
*
@@ -1400,20 +1799,27 @@ static int mmc_init_card(struct mmc_host *host, u32 ocr,
* respond.
* mmc_go_idle is needed for eMMC that are asleep
*/
+reinit:
mmc_go_idle(host);
/* The extra bit indicates that we support high capacity */
err = mmc_send_op_cond(host, ocr | (1 << 30), &rocr);
- if (err)
+ if (err) {
+ pr_err("%s: %s: mmc_send_op_cond() fails %d\n",
+ mmc_hostname(host), __func__, err);
goto err;
+ }
/*
* For SPI, enable CRC as appropriate.
*/
if (mmc_host_is_spi(host)) {
err = mmc_spi_set_crc(host, use_spi_crc);
- if (err)
+ if (err) {
+ pr_err("%s: %s: mmc_spi_set_crc() fails %d\n",
+ mmc_hostname(host), __func__, err);
goto err;
+ }
}
/*
@@ -1423,12 +1829,17 @@ static int mmc_init_card(struct mmc_host *host, u32 ocr,
err = mmc_send_cid(host, cid);
else
err = mmc_all_send_cid(host, cid);
- if (err)
+ if (err) {
+ pr_err("%s: %s: mmc_send_cid() fails %d\n",
+ mmc_hostname(host), __func__, err);
goto err;
+ }
if (oldcard) {
if (memcmp(cid, oldcard->raw_cid, sizeof(cid)) != 0) {
err = -ENOENT;
+ pr_err("%s: %s: CID memcmp failed %d\n",
+ mmc_hostname(host), __func__, err);
goto err;
}
@@ -1440,6 +1851,8 @@ static int mmc_init_card(struct mmc_host *host, u32 ocr,
card = mmc_alloc_card(host, &mmc_type);
if (IS_ERR(card)) {
err = PTR_ERR(card);
+ pr_err("%s: %s: no memory to allocate for card %d\n",
+ mmc_hostname(host), __func__, err);
goto err;
}
@@ -1447,6 +1860,8 @@ static int mmc_init_card(struct mmc_host *host, u32 ocr,
card->type = MMC_TYPE_MMC;
card->rca = 1;
memcpy(card->raw_cid, cid, sizeof(card->raw_cid));
+ host->card = card;
+ card->reboot_notify.notifier_call = mmc_reboot_notify;
}
/*
@@ -1460,8 +1875,11 @@ static int mmc_init_card(struct mmc_host *host, u32 ocr,
*/
if (!mmc_host_is_spi(host)) {
err = mmc_set_relative_addr(card);
- if (err)
+ if (err) {
+ pr_err("%s: %s: mmc_set_relative_addr() fails %d\n",
+ mmc_hostname(host), __func__, err);
goto free_card;
+ }
mmc_set_bus_mode(host, MMC_BUSMODE_PUSHPULL);
}
@@ -1471,15 +1889,24 @@ static int mmc_init_card(struct mmc_host *host, u32 ocr,
* Fetch CSD from card.
*/
err = mmc_send_csd(card, card->raw_csd);
- if (err)
+ if (err) {
+ pr_err("%s: %s: mmc_send_csd() fails %d\n",
+ mmc_hostname(host), __func__, err);
goto free_card;
+ }
err = mmc_decode_csd(card);
- if (err)
+ if (err) {
+ pr_err("%s: %s: mmc_decode_csd() fails %d\n",
+ mmc_hostname(host), __func__, err);
goto free_card;
+ }
err = mmc_decode_cid(card);
- if (err)
+ if (err) {
+ pr_err("%s: %s: mmc_decode_cid() fails %d\n",
+ mmc_hostname(host), __func__, err);
goto free_card;
+ }
}
/*
@@ -1494,15 +1921,21 @@ static int mmc_init_card(struct mmc_host *host, u32 ocr,
*/
if (!mmc_host_is_spi(host)) {
err = mmc_select_card(card);
- if (err)
+ if (err) {
+ pr_err("%s: %s: mmc_select_card() fails %d\n",
+ mmc_hostname(host), __func__, err);
goto free_card;
+ }
}
if (!oldcard) {
/* Read extended CSD. */
err = mmc_read_ext_csd(card);
- if (err)
+ if (err) {
+ pr_err("%s: %s: mmc_read_ext_csd() fails %d\n",
+ mmc_hostname(host), __func__, err);
goto free_card;
+ }
/* If doing byte addressing, check if required to do sector
* addressing. Handle the case of <2GB cards needing sector
@@ -1514,6 +1947,9 @@ static int mmc_init_card(struct mmc_host *host, u32 ocr,
/* Erase size depends on CSD and Extended CSD */
mmc_set_erase_size(card);
+
+ if (card->ext_csd.sectors && (rocr & MMC_CARD_SECTOR_ADDR))
+ mmc_card_set_blockaddr(card);
}
/*
@@ -1526,8 +1962,11 @@ static int mmc_init_card(struct mmc_host *host, u32 ocr,
EXT_CSD_ERASE_GROUP_DEF, 1,
card->ext_csd.generic_cmd6_time);
- if (err && err != -EBADMSG)
+ if (err && err != -EBADMSG) {
+ pr_err("%s: %s: mmc_switch() for ERASE_GRP_DEF fails %d\n",
+ mmc_hostname(host), __func__, err);
goto free_card;
+ }
if (err) {
err = 0;
@@ -1557,8 +1996,13 @@ static int mmc_init_card(struct mmc_host *host, u32 ocr,
err = mmc_switch(card, EXT_CSD_CMD_SET_NORMAL, EXT_CSD_PART_CONFIG,
card->ext_csd.part_config,
card->ext_csd.part_time);
- if (err && err != -EBADMSG)
+ if (err && err != -EBADMSG) {
+ pr_err("%s: %s: mmc_switch() for PART_CONFIG fails %d\n",
+ mmc_hostname(host), __func__, err);
goto free_card;
+ }
+ card->part_curr = card->ext_csd.part_config &
+ EXT_CSD_PART_CONFIG_ACC_MASK;
}
/*
@@ -1569,8 +2013,11 @@ static int mmc_init_card(struct mmc_host *host, u32 ocr,
EXT_CSD_POWER_OFF_NOTIFICATION,
EXT_CSD_POWER_ON,
card->ext_csd.generic_cmd6_time);
- if (err && err != -EBADMSG)
+ if (err && err != -EBADMSG) {
+ pr_err("%s: %s: mmc_switch() for POWER_ON PON fails %d\n",
+ mmc_hostname(host), __func__, err);
goto free_card;
+ }
/*
* The err can be -EBADMSG or 0,
@@ -1584,8 +2031,11 @@ static int mmc_init_card(struct mmc_host *host, u32 ocr,
* Select timing interface
*/
err = mmc_select_timing(card);
- if (err)
+ if (err) {
+ pr_err("%s: %s: mmc_select_timing() fails %d\n",
+ mmc_hostname(host), __func__, err);
goto free_card;
+ }
if (mmc_card_hs200(card)) {
err = mmc_hs200_tuning(card);
@@ -1595,7 +2045,7 @@ static int mmc_init_card(struct mmc_host *host, u32 ocr,
err = mmc_select_hs400(card);
if (err)
goto free_card;
- } else {
+ } else if (!mmc_card_hs400(card)) {
/* Select the desired bus width optionally */
err = mmc_select_bus_width(card);
if (!IS_ERR_VALUE(err) && mmc_card_hs(card)) {
@@ -1605,6 +2055,16 @@ static int mmc_init_card(struct mmc_host *host, u32 ocr,
}
}
+ card->clk_scaling_lowest = host->f_min;
+ if ((card->mmc_avail_type | EXT_CSD_CARD_TYPE_HS400) ||
+ (card->mmc_avail_type | EXT_CSD_CARD_TYPE_HS200))
+ card->clk_scaling_highest = card->ext_csd.hs200_max_dtr;
+ else if ((card->mmc_avail_type | EXT_CSD_CARD_TYPE_HS) ||
+ (card->mmc_avail_type | EXT_CSD_CARD_TYPE_DDR_52))
+ card->clk_scaling_highest = card->ext_csd.hs_max_dtr;
+ else
+ card->clk_scaling_highest = card->csd.max_dtr;
+
/*
* Choose the power class with selected bus interface
*/
@@ -1617,8 +2077,11 @@ static int mmc_init_card(struct mmc_host *host, u32 ocr,
err = mmc_switch(card, EXT_CSD_CMD_SET_NORMAL,
EXT_CSD_HPI_MGMT, 1,
card->ext_csd.generic_cmd6_time);
- if (err && err != -EBADMSG)
+ if (err && err != -EBADMSG) {
+ pr_err("%s: %s: mmc_switch() for HPI_MGMT fails %d\n",
+ mmc_hostname(host), __func__, err);
goto free_card;
+ }
if (err) {
pr_warn("%s: Enabling HPI failed\n",
mmc_hostname(card->host));
@@ -1630,24 +2093,69 @@ static int mmc_init_card(struct mmc_host *host, u32 ocr,
/*
* If cache size is higher than 0, this indicates
* the existence of cache and it can be turned on.
+ * If HPI is not supported then cache shouldn't be enabled.
*/
if (card->ext_csd.cache_size > 0) {
- err = mmc_switch(card, EXT_CSD_CMD_SET_NORMAL,
- EXT_CSD_CACHE_CTRL, 1,
- card->ext_csd.generic_cmd6_time);
- if (err && err != -EBADMSG)
- goto free_card;
+ if (card->ext_csd.hpi_en &&
+ (!(card->quirks & MMC_QUIRK_CACHE_DISABLE))) {
+ err = mmc_switch(card, EXT_CSD_CMD_SET_NORMAL,
+ EXT_CSD_CACHE_CTRL, 1,
+ card->ext_csd.generic_cmd6_time);
+ if (err && err != -EBADMSG) {
+ pr_err("%s: %s: fail on CACHE_CTRL ON %d\n",
+ mmc_hostname(host), __func__, err);
+ goto free_card;
+ }
- /*
- * Only if no error, cache is turned on successfully.
- */
- if (err) {
- pr_warn("%s: Cache is supported, but failed to turn on (%d)\n",
- mmc_hostname(card->host), err);
- card->ext_csd.cache_ctrl = 0;
- err = 0;
+ /*
+ * Only if no error, cache is turned on successfully.
+ */
+ if (err) {
+ pr_warn("%s: Cache is supported, but failed to turn on (%d)\n",
+ mmc_hostname(card->host), err);
+ card->ext_csd.cache_ctrl = 0;
+ err = 0;
+ } else {
+ card->ext_csd.cache_ctrl = 1;
+ }
+ /* enable cache barrier if supported by the device */
+ if (card->ext_csd.cache_ctrl &&
+ card->ext_csd.barrier_support) {
+ err = mmc_switch(card, EXT_CSD_CMD_SET_NORMAL,
+ EXT_CSD_BARRIER_CTRL, 1,
+ card->ext_csd.generic_cmd6_time);
+ if (err && err != -EBADMSG) {
+ pr_err("%s: %s: mmc_switch() for BARRIER_CTRL fails %d\n",
+ mmc_hostname(host), __func__,
+ err);
+ goto free_card;
+ }
+ if (err) {
+ pr_warn("%s: Barrier is supported but failed to turn on (%d)\n",
+ mmc_hostname(card->host), err);
+ card->ext_csd.barrier_en = 0;
+ err = 0;
+ } else {
+ card->ext_csd.barrier_en = 1;
+ }
+ }
} else {
- card->ext_csd.cache_ctrl = 1;
+ /*
+ * mmc standard doesn't say what is the card default
+ * value for EXT_CSD_CACHE_CTRL.
+ * Hence, cache may be enabled by default by
+ * card vendors.
+ * Thus, it is best to explicitly disable cache in case
+ * we want to avoid cache.
+ */
+ err = mmc_switch(card, EXT_CSD_CMD_SET_NORMAL,
+ EXT_CSD_CACHE_CTRL, 0,
+ card->ext_csd.generic_cmd6_time);
+ if (err) {
+ pr_err("%s: %s: fail on CACHE_CTRL OFF %d\n",
+ mmc_hostname(host), __func__, err);
+ goto free_card;
+ }
}
}
@@ -1662,8 +2170,11 @@ static int mmc_init_card(struct mmc_host *host, u32 ocr,
EXT_CSD_EXP_EVENTS_CTRL,
EXT_CSD_PACKED_EVENT_EN,
card->ext_csd.generic_cmd6_time);
- if (err && err != -EBADMSG)
+ if (err && err != -EBADMSG) {
+ pr_err("%s: %s: mmc_switch() for EXP_EVENTS_CTRL fails %d\n",
+ mmc_hostname(host), __func__, err);
goto free_card;
+ }
if (err) {
pr_warn("%s: Enabling packed event failed\n",
mmc_hostname(card->host));
@@ -1672,42 +2183,125 @@ static int mmc_init_card(struct mmc_host *host, u32 ocr,
} else {
card->ext_csd.packed_event_en = 1;
}
+
}
- if (!oldcard)
- host->card = card;
+ if (!oldcard) {
+ if ((host->caps2 & MMC_CAP2_PACKED_CMD) &&
+ (card->ext_csd.max_packed_writes > 0)) {
+ /*
+ * We would like to keep the statistics in an index
+ * that equals the num of packed requests
+ * (1 to max_packed_writes)
+ */
+ card->wr_pack_stats.packing_events = kzalloc(
+ (card->ext_csd.max_packed_writes + 1) *
+ sizeof(*card->wr_pack_stats.packing_events),
+ GFP_KERNEL);
+ if (!card->wr_pack_stats.packing_events) {
+ pr_err("%s: %s: no memory for packing events\n",
+ mmc_hostname(host), __func__);
+ goto free_card;
+ }
+ }
+ }
+
+ /*
+ * Start auto bkops, if supported.
+ *
+ * Note: This leaves the possibility of having both manual and
+ * auto bkops running in parallel. The runtime implementation
+ * will allow this, but ignore bkops exceptions on the premises
+ * that auto bkops will eventually kick in and the device will
+ * handle bkops without START_BKOPS from the host.
+ */
+ if (mmc_card_support_auto_bkops(card)) {
+ /*
+ * Ignore the return value of setting auto bkops.
+ * If it failed, will run in backward compatible mode.
+ */
+ (void)mmc_set_auto_bkops(card, true);
+ }
+
+ if (card->ext_csd.cmdq_support && (card->host->caps2 &
+ MMC_CAP2_CMD_QUEUE)) {
+ err = mmc_select_cmdq(card);
+ if (err) {
+ pr_err("%s: selecting CMDQ mode: failed: %d\n",
+ mmc_hostname(card->host), err);
+ card->ext_csd.cmdq_support = 0;
+ oldcard = card;
+ goto reinit;
+ }
+ }
return 0;
free_card:
- if (!oldcard)
+ if (!oldcard) {
+ host->card = NULL;
mmc_remove_card(card);
+ }
err:
return err;
}
-static int mmc_can_sleep(struct mmc_card *card)
+static int mmc_can_sleepawake(struct mmc_host *host)
{
- return (card && card->ext_csd.rev >= 3);
+ return host && (host->caps2 & MMC_CAP2_SLEEP_AWAKE) && host->card &&
+ (host->card->ext_csd.rev >= 3);
}
-static int mmc_sleep(struct mmc_host *host)
+static int mmc_sleepawake(struct mmc_host *host, bool sleep)
{
struct mmc_command cmd = {0};
struct mmc_card *card = host->card;
- unsigned int timeout_ms = DIV_ROUND_UP(card->ext_csd.sa_timeout, 10000);
+ unsigned int timeout_ms;
int err;
+ if (!card) {
+ pr_err("%s: %s: invalid card\n", mmc_hostname(host), __func__);
+ return -EINVAL;
+ }
+
+ timeout_ms = DIV_ROUND_UP(card->ext_csd.sa_timeout, 10000);
+ if (card->ext_csd.rev >= 3 &&
+ card->part_curr == EXT_CSD_PART_CONFIG_ACC_RPMB) {
+ u8 part_config = card->ext_csd.part_config;
+
+ /*
+ * If the last access before suspend is RPMB access, then
+ * switch to default part config so that sleep command CMD5
+ * and deselect CMD7 can be sent to the card.
+ */
+ part_config &= ~EXT_CSD_PART_CONFIG_ACC_MASK;
+ err = mmc_switch(card, EXT_CSD_CMD_SET_NORMAL,
+ EXT_CSD_PART_CONFIG,
+ part_config,
+ card->ext_csd.part_time);
+ if (err) {
+ pr_err("%s: %s: failed to switch to default part config %x\n",
+ mmc_hostname(host), __func__, part_config);
+ return err;
+ }
+ card->ext_csd.part_config = part_config;
+ card->part_curr = card->ext_csd.part_config &
+ EXT_CSD_PART_CONFIG_ACC_MASK;
+ }
+
/* Re-tuning can't be done once the card is deselected */
mmc_retune_hold(host);
- err = mmc_deselect_cards(host);
- if (err)
- goto out_release;
+ if (sleep) {
+ err = mmc_deselect_cards(host);
+ if (err)
+ goto out_release;
+ }
cmd.opcode = MMC_SLEEP_AWAKE;
cmd.arg = card->rca << 16;
- cmd.arg |= 1 << 15;
+ if (sleep)
+ cmd.arg |= 1 << 15;
/*
* If the max_busy_timeout of the host is specified, validate it against
@@ -1735,6 +2329,9 @@ static int mmc_sleep(struct mmc_host *host)
if (!cmd.busy_timeout || !(host->caps & MMC_CAP_WAIT_WHILE_BUSY))
mmc_delay(timeout_ms);
+ if (!sleep)
+ err = mmc_select_card(card);
+
out_release:
mmc_retune_release(host);
return err;
@@ -1769,6 +2366,27 @@ static int mmc_poweroff_notify(struct mmc_card *card, unsigned int notify_type)
return err;
}
+int mmc_send_pon(struct mmc_card *card)
+{
+ int err = 0;
+ struct mmc_host *host = card->host;
+
+ if (!mmc_can_poweroff_notify(card))
+ goto out;
+
+ mmc_get_card(card);
+ if (card->pon_type & MMC_LONG_PON)
+ err = mmc_poweroff_notify(host->card, EXT_CSD_POWER_OFF_LONG);
+ else if (card->pon_type & MMC_SHRT_PON)
+ err = mmc_poweroff_notify(host->card, EXT_CSD_POWER_OFF_SHORT);
+ if (err)
+ pr_warn("%s: error %d sending PON type %u",
+ mmc_hostname(host), err, card->pon_type);
+ mmc_put_card(card);
+out:
+ return err;
+}
+
/*
* Host is being removed. Free up the current card.
*/
@@ -1777,8 +2395,14 @@ static void mmc_remove(struct mmc_host *host)
BUG_ON(!host);
BUG_ON(!host->card);
+ unregister_reboot_notifier(&host->card->reboot_notify);
+
+ mmc_exit_clk_scaling(host);
mmc_remove_card(host->card);
+
+ mmc_claim_host(host);
host->card = NULL;
+ mmc_release_host(host);
}
/*
@@ -1818,44 +2442,228 @@ static void mmc_detect(struct mmc_host *host)
}
}
+static int mmc_cache_card_ext_csd(struct mmc_host *host)
+{
+ int err;
+ u8 *ext_csd;
+ struct mmc_card *card = host->card;
+
+ err = mmc_get_ext_csd(card, &ext_csd);
+ if (err || !ext_csd) {
+ pr_err("%s: %s: mmc_get_ext_csd failed (%d)\n",
+ mmc_hostname(host), __func__, err);
+ return err;
+ }
+
+ /* only cache read/write fields that the sw changes */
+ card->ext_csd.raw_ext_csd_cmdq = ext_csd[EXT_CSD_CMDQ];
+ card->ext_csd.raw_ext_csd_cache_ctrl = ext_csd[EXT_CSD_CACHE_CTRL];
+ card->ext_csd.raw_ext_csd_bus_width = ext_csd[EXT_CSD_BUS_WIDTH];
+ card->ext_csd.raw_ext_csd_hs_timing = ext_csd[EXT_CSD_HS_TIMING];
+
+ kfree(ext_csd);
+
+ return 0;
+}
+
+static int mmc_test_awake_ext_csd(struct mmc_host *host)
+{
+ int err;
+ u8 *ext_csd;
+ struct mmc_card *card = host->card;
+
+ err = mmc_get_ext_csd(card, &ext_csd);
+ if (err || !ext_csd) {
+ pr_err("%s: %s: mmc_get_ext_csd failed (%d)\n",
+ mmc_hostname(host), __func__, err);
+ return err;
+ }
+
+ /* only compare read/write fields that the sw changes */
+ pr_debug("%s: %s: type(cached:current) cmdq(%d:%d) cache_ctrl(%d:%d) bus_width (%d:%d) timing(%d:%d)\n",
+ mmc_hostname(host), __func__,
+ card->ext_csd.raw_ext_csd_cmdq,
+ ext_csd[EXT_CSD_CMDQ],
+ card->ext_csd.raw_ext_csd_cache_ctrl,
+ ext_csd[EXT_CSD_CACHE_CTRL],
+ card->ext_csd.raw_ext_csd_bus_width,
+ ext_csd[EXT_CSD_BUS_WIDTH],
+ card->ext_csd.raw_ext_csd_hs_timing,
+ ext_csd[EXT_CSD_HS_TIMING]);
+
+ err = !((card->ext_csd.raw_ext_csd_cmdq ==
+ ext_csd[EXT_CSD_CMDQ]) &&
+ (card->ext_csd.raw_ext_csd_cache_ctrl ==
+ ext_csd[EXT_CSD_CACHE_CTRL]) &&
+ (card->ext_csd.raw_ext_csd_bus_width ==
+ ext_csd[EXT_CSD_BUS_WIDTH]) &&
+ (card->ext_csd.raw_ext_csd_hs_timing ==
+ ext_csd[EXT_CSD_HS_TIMING]));
+
+ kfree(ext_csd);
+
+ return err;
+}
+
static int _mmc_suspend(struct mmc_host *host, bool is_suspend)
{
- int err = 0;
- unsigned int notify_type = is_suspend ? EXT_CSD_POWER_OFF_SHORT :
- EXT_CSD_POWER_OFF_LONG;
+ int err = 0, ret;
BUG_ON(!host);
BUG_ON(!host->card);
+ err = mmc_suspend_clk_scaling(host);
+ if (err) {
+ pr_err("%s: %s: fail to suspend clock scaling (%d)\n",
+ mmc_hostname(host), __func__, err);
+ if (host->card->cmdq_init)
+ wake_up(&host->cmdq_ctx.wait);
+ return err;
+ }
+
mmc_claim_host(host);
if (mmc_card_suspended(host->card))
goto out;
+ if (host->card->cmdq_init) {
+ BUG_ON(host->cmdq_ctx.active_reqs);
+
+ err = mmc_cmdq_halt(host, true);
+ if (err) {
+ pr_err("%s: halt: failed: %d\n", __func__, err);
+ goto out;
+ }
+ mmc_host_clk_hold(host);
+ host->cmdq_ops->disable(host, true);
+ mmc_host_clk_release(host);
+ }
+
if (mmc_card_doing_bkops(host->card)) {
err = mmc_stop_bkops(host->card);
if (err)
- goto out;
+ goto out_err;
}
err = mmc_flush_cache(host->card);
if (err)
- goto out;
+ goto out_err;
- if (mmc_can_poweroff_notify(host->card) &&
- ((host->caps2 & MMC_CAP2_FULL_PWR_CYCLE) || !is_suspend))
- err = mmc_poweroff_notify(host->card, notify_type);
- else if (mmc_can_sleep(host->card))
- err = mmc_sleep(host);
- else if (!mmc_host_is_spi(host))
+ if (mmc_can_sleepawake(host)) {
+ /*
+ * For caching host->ios to cached_ios we need to
+ * make sure that clocks are not gated otherwise
+ * cached_ios->clock will be 0.
+ */
+ mmc_host_clk_hold(host);
+ memcpy(&host->cached_ios, &host->ios, sizeof(host->cached_ios));
+ mmc_cache_card_ext_csd(host);
+ err = mmc_sleepawake(host, true);
+ mmc_host_clk_release(host);
+ } else if (!mmc_host_is_spi(host)) {
err = mmc_deselect_cards(host);
+ }
- if (!err) {
- mmc_power_off(host);
- mmc_card_set_suspended(host->card);
+ if (err)
+ goto out_err;
+ mmc_power_off(host);
+ mmc_card_set_suspended(host->card);
+
+ goto out;
+
+out_err:
+ /*
+ * In case of err let's put controller back in cmdq mode and unhalt
+ * the controller.
+ * We expect cmdq_enable and unhalt won't return any error
+ * since it is anyway enabling few registers.
+ */
+ if (host->card->cmdq_init) {
+ mmc_host_clk_hold(host);
+ ret = host->cmdq_ops->enable(host);
+ if (ret)
+ pr_err("%s: %s: enabling CMDQ mode failed (%d)\n",
+ mmc_hostname(host), __func__, ret);
+ mmc_host_clk_release(host);
+ mmc_cmdq_halt(host, false);
}
+
out:
+ /* Kick CMDQ thread to process any requests came in while suspending */
+ if (host->card->cmdq_init)
+ wake_up(&host->cmdq_ctx.wait);
+
mmc_release_host(host);
+ if (err)
+ mmc_resume_clk_scaling(host);
+ return err;
+}
+
+static int mmc_partial_init(struct mmc_host *host)
+{
+ int err = 0;
+ struct mmc_card *card = host->card;
+
+ pr_debug("%s: %s: starting partial init\n",
+ mmc_hostname(host), __func__);
+
+ mmc_set_bus_width(host, host->cached_ios.bus_width);
+ mmc_set_timing(host, host->cached_ios.timing);
+ mmc_set_clock(host, host->cached_ios.clock);
+ mmc_set_bus_mode(host, host->cached_ios.bus_mode);
+
+ mmc_host_clk_hold(host);
+
+ if (mmc_card_hs400(card)) {
+ if (card->ext_csd.strobe_support && host->ops->enhanced_strobe)
+ err = host->ops->enhanced_strobe(host);
+ else if (host->ops->execute_tuning)
+ err = host->ops->execute_tuning(host,
+ MMC_SEND_TUNING_BLOCK_HS200);
+ } else if (mmc_card_hs200(card) && host->ops->execute_tuning) {
+ err = host->ops->execute_tuning(host,
+ MMC_SEND_TUNING_BLOCK_HS200);
+ if (err)
+ pr_warn("%s: %s: tuning execution failed (%d)\n",
+ mmc_hostname(host), __func__, err);
+ }
+
+ /*
+ * The ext_csd is read to make sure the card did not went through
+ * Power-failure during sleep period.
+ * A subset of the W/E_P, W/C_P register will be tested. In case
+ * these registers values are different from the values that were
+ * cached during suspend, we will conclude that a Power-failure occurred
+ * and will do full initialization sequence.
+ * In addition, full init sequence also transfer ext_csd before moving
+ * to CMDQ mode which has a side affect of configuring SDHCI registers
+ * which needed to be done before moving to CMDQ mode. The same
+ * registers need to be configured for partial init.
+ */
+ err = mmc_test_awake_ext_csd(host);
+ if (err) {
+ pr_debug("%s: %s: fail on ext_csd read (%d)\n",
+ mmc_hostname(host), __func__, err);
+ goto out;
+ }
+ pr_debug("%s: %s: reading and comparing ext_csd successful\n",
+ mmc_hostname(host), __func__);
+
+ if (card->ext_csd.cmdq_support && (card->host->caps2 &
+ MMC_CAP2_CMD_QUEUE)) {
+ err = mmc_select_cmdq(card);
+ if (err) {
+ pr_warn("%s: %s: enabling CMDQ mode failed (%d)\n",
+ mmc_hostname(card->host),
+ __func__, err);
+ }
+ }
+out:
+ mmc_host_clk_release(host);
+
+ pr_debug("%s: %s: done partial init (%d)\n",
+ mmc_hostname(host), __func__, err);
+
return err;
}
@@ -1865,13 +2673,18 @@ out:
static int mmc_suspend(struct mmc_host *host)
{
int err;
+ ktime_t start = ktime_get();
+ MMC_TRACE(host, "%s: Enter\n", __func__);
err = _mmc_suspend(host, true);
if (!err) {
pm_runtime_disable(&host->card->dev);
pm_runtime_set_suspended(&host->card->dev);
}
+ trace_mmc_suspend(mmc_hostname(host), err,
+ ktime_to_us(ktime_sub(ktime_get(), start)));
+ MMC_TRACE(host, "%s: Exit err: %d\n", __func__, err);
return err;
}
@@ -1881,43 +2694,61 @@ static int mmc_suspend(struct mmc_host *host)
*/
static int _mmc_resume(struct mmc_host *host)
{
- int err = 0;
+ int err = -ENOSYS;
+ int retries;
BUG_ON(!host);
BUG_ON(!host->card);
mmc_claim_host(host);
- if (!mmc_card_suspended(host->card))
+ if (!mmc_card_suspended(host->card)) {
+ mmc_release_host(host);
goto out;
+ }
mmc_power_up(host, host->card->ocr);
- err = mmc_init_card(host, host->card->ocr, host->card);
- mmc_card_clr_suspended(host->card);
+ retries = 3;
+ while (retries) {
+ if (mmc_can_sleepawake(host)) {
+ err = mmc_sleepawake(host, false);
+ if (!err)
+ err = mmc_partial_init(host);
+ if (err)
+ pr_err("%s: %s: awake failed (%d), fallback to full init\n",
+ mmc_hostname(host), __func__, err);
+ }
-out:
- mmc_release_host(host);
- return err;
-}
+ if (err)
+ err = mmc_init_card(host, host->card->ocr, host->card);
-/*
- * Shutdown callback
- */
-static int mmc_shutdown(struct mmc_host *host)
-{
- int err = 0;
+ if (err) {
+ pr_err("%s: MMC card re-init failed rc = %d (retries = %d)\n",
+ mmc_hostname(host), err, retries);
+ retries--;
+ mmc_power_off(host);
+ usleep_range(5000, 5500);
+ mmc_power_up(host, host->card->ocr);
+ mmc_select_voltage(host, host->card->ocr);
+ continue;
+ }
+ break;
+ }
+ if (!err && mmc_card_cmdq(host->card)) {
+ err = mmc_cmdq_halt(host, false);
+ if (err)
+ pr_err("%s: un-halt: failed: %d\n", __func__, err);
+ }
+ mmc_card_clr_suspended(host->card);
- /*
- * In a specific case for poweroff notify, we need to resume the card
- * before we can shutdown it properly.
- */
- if (mmc_can_poweroff_notify(host->card) &&
- !(host->caps2 & MMC_CAP2_FULL_PWR_CYCLE))
- err = _mmc_resume(host);
+ mmc_release_host(host);
- if (!err)
- err = _mmc_suspend(host, false);
+ err = mmc_resume_clk_scaling(host);
+ if (err)
+ pr_err("%s: %s: fail to resume clock scaling (%d)\n",
+ mmc_hostname(host), __func__, err);
+out:
return err;
}
@@ -1927,7 +2758,9 @@ static int mmc_shutdown(struct mmc_host *host)
static int mmc_resume(struct mmc_host *host)
{
int err = 0;
+ ktime_t start = ktime_get();
+ MMC_TRACE(host, "%s: Enter\n", __func__);
if (!(host->caps & MMC_CAP_RUNTIME_RESUME)) {
err = _mmc_resume(host);
pm_runtime_set_active(&host->card->dev);
@@ -1935,24 +2768,95 @@ static int mmc_resume(struct mmc_host *host)
}
pm_runtime_enable(&host->card->dev);
+ trace_mmc_resume(mmc_hostname(host), err,
+ ktime_to_us(ktime_sub(ktime_get(), start)));
+ MMC_TRACE(host, "%s: Exit err: %d\n", __func__, err);
return err;
}
+#define MAX_DEFER_SUSPEND_COUNTER 20
+static bool mmc_process_bkops(struct mmc_host *host)
+{
+ int err = 0;
+ bool is_running = false;
+ u32 status;
+
+ mmc_claim_host(host);
+ if (mmc_card_cmdq(host->card)) {
+ BUG_ON(host->cmdq_ctx.active_reqs);
+
+ err = mmc_cmdq_halt(host, true);
+ if (err) {
+ pr_err("%s: halt: failed: %d\n", __func__, err);
+ goto unhalt;
+ }
+ }
+
+ if (mmc_card_doing_bkops(host->card)) {
+ /* check that manual bkops finished */
+ err = mmc_send_status(host->card, &status);
+ if (err) {
+ pr_err("%s: Get card status fail\n", __func__);
+ goto unhalt;
+ }
+ if (R1_CURRENT_STATE(status) != R1_STATE_PRG) {
+ mmc_card_clr_doing_bkops(host->card);
+ goto unhalt;
+ }
+ } else {
+ mmc_check_bkops(host->card);
+ }
+
+ if (host->card->bkops.needs_bkops &&
+ !mmc_card_support_auto_bkops(host->card))
+ mmc_start_manual_bkops(host->card);
+
+unhalt:
+ if (mmc_card_cmdq(host->card)) {
+ err = mmc_cmdq_halt(host, false);
+ if (err)
+ pr_err("%s: unhalt: failed: %d\n", __func__, err);
+ }
+ mmc_release_host(host);
+
+ if (host->card->bkops.needs_bkops ||
+ mmc_card_doing_bkops(host->card)) {
+ if (host->card->bkops.retry_counter++ <
+ MAX_DEFER_SUSPEND_COUNTER) {
+ host->card->bkops.needs_check = true;
+ is_running = true;
+ } else {
+ host->card->bkops.retry_counter = 0;
+ }
+ }
+ return is_running;
+}
+
/*
* Callback for runtime_suspend.
*/
static int mmc_runtime_suspend(struct mmc_host *host)
{
int err;
+ ktime_t start = ktime_get();
if (!(host->caps & MMC_CAP_AGGRESSIVE_PM))
return 0;
+ if (mmc_process_bkops(host)) {
+ pm_runtime_mark_last_busy(&host->card->dev);
+ pr_debug("%s: defered, need bkops\n", __func__);
+ return -EBUSY;
+ }
+
+ MMC_TRACE(host, "%s\n", __func__);
err = _mmc_suspend(host, true);
if (err)
pr_err("%s: error %d doing aggressive suspend\n",
mmc_hostname(host), err);
+ trace_mmc_runtime_suspend(mmc_hostname(host), err,
+ ktime_to_us(ktime_sub(ktime_get(), start)));
return err;
}
@@ -1962,16 +2866,21 @@ static int mmc_runtime_suspend(struct mmc_host *host)
static int mmc_runtime_resume(struct mmc_host *host)
{
int err;
+ ktime_t start = ktime_get();
if (!(host->caps & (MMC_CAP_AGGRESSIVE_PM | MMC_CAP_RUNTIME_RESUME)))
return 0;
+ MMC_TRACE(host, "%s\n", __func__);
err = _mmc_resume(host);
if (err)
pr_err("%s: error %d doing aggressive resume\n",
mmc_hostname(host), err);
- return 0;
+ trace_mmc_runtime_resume(mmc_hostname(host), err,
+ ktime_to_us(ktime_sub(ktime_get(), start)));
+
+ return err;
}
int mmc_can_reset(struct mmc_card *card)
@@ -1988,21 +2897,58 @@ EXPORT_SYMBOL(mmc_can_reset);
static int mmc_reset(struct mmc_host *host)
{
struct mmc_card *card = host->card;
+ int ret;
+
+ if ((host->caps & MMC_CAP_HW_RESET) && host->ops->hw_reset &&
+ mmc_can_reset(card)) {
+ /* If the card accept RST_n signal, send it. */
+ mmc_set_clock(host, host->f_init);
+ host->ops->hw_reset(host);
+ /* Set initial state and call mmc_set_ios */
+ mmc_set_initial_state(host);
+ } else {
+ /* Do a brute force power cycle */
+ mmc_power_cycle(host, card->ocr);
+ }
- if (!(host->caps & MMC_CAP_HW_RESET) || !host->ops->hw_reset)
- return -EOPNOTSUPP;
+ /* Suspend clk scaling to avoid switching frequencies intermittently */
- if (!mmc_can_reset(card))
- return -EOPNOTSUPP;
+ ret = mmc_suspend_clk_scaling(host);
+ if (ret) {
+ pr_err("%s: %s: fail to suspend clock scaling (%d)\n",
+ mmc_hostname(host), __func__, ret);
+ return ret;
+ }
- mmc_set_clock(host, host->f_init);
+ ret = mmc_init_card(host, host->card->ocr, host->card);
+ if (ret) {
+ pr_err("%s: %s: mmc_init_card failed (%d)\n",
+ mmc_hostname(host), __func__, ret);
+ return ret;
+ }
- host->ops->hw_reset(host);
+ ret = mmc_resume_clk_scaling(host);
+ if (ret)
+ pr_err("%s: %s: fail to resume clock scaling (%d)\n",
+ mmc_hostname(host), __func__, ret);
- /* Set initial state and call mmc_set_ios */
- mmc_set_initial_state(host);
+ return ret;
+}
- return mmc_init_card(host, card->ocr, card);
+static int mmc_shutdown(struct mmc_host *host)
+{
+ struct mmc_card *card = host->card;
+
+ /*
+ * Exit clock scaling so that it doesn't kick in after
+ * power off notification is sent
+ */
+ if (host->caps2 & MMC_CAP2_CLK_SCALE)
+ mmc_exit_clk_scaling(card->host);
+ /* send power off notification */
+ if (mmc_card_mmc(card))
+ mmc_send_pon(card);
+ return 0;
}
static const struct mmc_bus_ops mmc_ops = {
@@ -2013,8 +2959,9 @@ static const struct mmc_bus_ops mmc_ops = {
.runtime_suspend = mmc_runtime_suspend,
.runtime_resume = mmc_runtime_resume,
.alive = mmc_alive,
- .shutdown = mmc_shutdown,
+ .change_bus_speed = mmc_change_bus_speed,
.reset = mmc_reset,
+ .shutdown = mmc_shutdown,
};
/*
@@ -2072,6 +3019,14 @@ int mmc_attach_mmc(struct mmc_host *host)
goto remove_card;
mmc_claim_host(host);
+ err = mmc_init_clk_scaling(host);
+ if (err) {
+ mmc_release_host(host);
+ goto remove_card;
+ }
+
+ register_reboot_notifier(&host->card->reboot_notify);
+
return 0;
remove_card:
diff --git a/drivers/mmc/core/mmc_ops.c b/drivers/mmc/core/mmc_ops.c
index 1f444269ebbe..de406431e5a4 100644
--- a/drivers/mmc/core/mmc_ops.c
+++ b/drivers/mmc/core/mmc_ops.c
@@ -54,7 +54,7 @@ static const u8 tuning_blk_pattern_8bit[] = {
0xff, 0x77, 0x77, 0xff, 0x77, 0xbb, 0xdd, 0xee,
};
-static inline int __mmc_send_status(struct mmc_card *card, u32 *status,
+int __mmc_send_status(struct mmc_card *card, u32 *status,
bool ignore_crc)
{
int err;
@@ -466,6 +466,45 @@ int mmc_switch_status_error(struct mmc_host *host, u32 status)
}
/**
+ * mmc_prepare_switch - helper; prepare to modify EXT_CSD register
+ * @card: the MMC card associated with the data transfer
+ * @set: cmd set values
+ * @index: EXT_CSD register index
+ * @value: value to program into EXT_CSD register
+ * @tout_ms: timeout (ms) for operation performed by register write,
+ * timeout of zero implies maximum possible timeout
+ * @use_busy_signal: use the busy signal as response type
+ *
+ * Helper to prepare to modify EXT_CSD register for selected card.
+ */
+
+static inline void mmc_prepare_switch(struct mmc_command *cmd, u8 index,
+ u8 value, u8 set, unsigned int tout_ms,
+ bool use_busy_signal)
+{
+ cmd->opcode = MMC_SWITCH;
+ cmd->arg = (MMC_SWITCH_MODE_WRITE_BYTE << 24) |
+ (index << 16) |
+ (value << 8) |
+ set;
+ cmd->flags = MMC_CMD_AC;
+ cmd->busy_timeout = tout_ms;
+ if (use_busy_signal)
+ cmd->flags |= MMC_RSP_SPI_R1B | MMC_RSP_R1B;
+ else
+ cmd->flags |= MMC_RSP_SPI_R1 | MMC_RSP_R1;
+}
+
+int __mmc_switch_cmdq_mode(struct mmc_command *cmd, u8 set, u8 index, u8 value,
+ unsigned int timeout_ms, bool use_busy_signal,
+ bool ignore_timeout)
+{
+ mmc_prepare_switch(cmd, index, value, set, timeout_ms, use_busy_signal);
+ return 0;
+}
+EXPORT_SYMBOL(__mmc_switch_cmdq_mode);
+
+/**
* __mmc_switch - modify EXT_CSD register
* @card: the MMC card associated with the data transfer
* @set: cmd set values
@@ -489,6 +528,7 @@ int __mmc_switch(struct mmc_card *card, u8 set, u8 index, u8 value,
unsigned long timeout;
u32 status = 0;
bool use_r1b_resp = use_busy_signal;
+ int retries = 5;
mmc_retune_hold(host);
@@ -502,12 +542,8 @@ int __mmc_switch(struct mmc_card *card, u8 set, u8 index, u8 value,
(timeout_ms > host->max_busy_timeout))
use_r1b_resp = false;
- cmd.opcode = MMC_SWITCH;
- cmd.arg = (MMC_SWITCH_MODE_WRITE_BYTE << 24) |
- (index << 16) |
- (value << 8) |
- set;
- cmd.flags = MMC_CMD_AC;
+ mmc_prepare_switch(&cmd, index, value, set, timeout_ms,
+ use_r1b_resp);
if (use_r1b_resp) {
cmd.flags |= MMC_RSP_SPI_R1B | MMC_RSP_R1B;
/*
@@ -521,6 +557,8 @@ int __mmc_switch(struct mmc_card *card, u8 set, u8 index, u8 value,
if (index == EXT_CSD_SANITIZE_START)
cmd.sanitize_busy = true;
+ else if (index == EXT_CSD_BKOPS_START)
+ cmd.bkops_busy = true;
err = mmc_wait_for_cmd(host, &cmd, MMC_CMD_RETRIES);
if (err)
@@ -566,10 +604,17 @@ int __mmc_switch(struct mmc_card *card, u8 set, u8 index, u8 value,
/* Timeout if the device never leaves the program state. */
if (time_after(jiffies, timeout)) {
- pr_err("%s: Card stuck in programming state! %s\n",
- mmc_hostname(host), __func__);
- err = -ETIMEDOUT;
- goto out;
+ pr_err("%s: Card stuck in programming state! %s, timeout:%ums, retries:%d\n",
+ mmc_hostname(host), __func__,
+ timeout_ms, retries);
+ if (retries)
+ timeout = jiffies +
+ msecs_to_jiffies(timeout_ms);
+ else {
+ err = -ETIMEDOUT;
+ goto out;
+ }
+ retries--;
}
} while (R1_CURRENT_STATE(status) == R1_STATE_PRG);
@@ -713,7 +758,10 @@ mmc_send_bus_test(struct mmc_card *card, struct mmc_host *host, u8 opcode,
data.sg = &sg;
data.sg_len = 1;
+ data.timeout_ns = 1000000;
+ data.timeout_clks = 0;
mmc_set_data_timeout(&data, card);
+
sg_init_one(&sg, data_buf, len);
mmc_wait_for_req(host, &mrq);
err = 0;
@@ -762,7 +810,7 @@ int mmc_send_hpi_cmd(struct mmc_card *card, u32 *status)
unsigned int opcode;
int err;
- if (!card->ext_csd.hpi) {
+ if (!card->ext_csd.hpi_en) {
pr_warn("%s: Card didn't support HPI command\n",
mmc_hostname(card->host));
return -EINVAL;
@@ -779,7 +827,7 @@ int mmc_send_hpi_cmd(struct mmc_card *card, u32 *status)
err = mmc_wait_for_cmd(card->host, &cmd, 0);
if (err) {
- pr_warn("%s: error %d interrupting operation. "
+ pr_debug("%s: error %d interrupting operation. "
"HPI command response %#x\n", mmc_hostname(card->host),
err, cmd.resp[0]);
return err;
@@ -794,3 +842,21 @@ int mmc_can_ext_csd(struct mmc_card *card)
{
return (card && card->csd.mmca_vsn > CSD_SPEC_VER_3);
}
+
+int mmc_discard_queue(struct mmc_host *host, u32 tasks)
+{
+ struct mmc_command cmd = {0};
+
+ cmd.opcode = MMC_CMDQ_TASK_MGMT;
+ if (tasks) {
+ cmd.arg = DISCARD_TASK;
+ cmd.arg |= (tasks << 16);
+ } else {
+ cmd.arg = DISCARD_QUEUE;
+ }
+
+ cmd.flags = MMC_RSP_R1B | MMC_CMD_AC;
+
+ return mmc_wait_for_cmd(host, &cmd, 0);
+}
+EXPORT_SYMBOL(mmc_discard_queue);
diff --git a/drivers/mmc/core/mmc_ops.h b/drivers/mmc/core/mmc_ops.h
index f1b8e81aaa28..ad1058c1adfd 100644
--- a/drivers/mmc/core/mmc_ops.h
+++ b/drivers/mmc/core/mmc_ops.h
@@ -27,10 +27,12 @@ int mmc_spi_set_crc(struct mmc_host *host, int use_crc);
int mmc_bus_test(struct mmc_card *card, u8 bus_width);
int mmc_send_hpi_cmd(struct mmc_card *card, u32 *status);
int mmc_can_ext_csd(struct mmc_card *card);
+int mmc_discard_queue(struct mmc_host *host, u32 tasks);
int mmc_switch_status_error(struct mmc_host *host, u32 status);
int __mmc_switch(struct mmc_card *card, u8 set, u8 index, u8 value,
unsigned int timeout_ms, bool use_busy_signal, bool send_status,
bool ignore_crc);
-
+int __mmc_send_status(struct mmc_card *card, u32 *status,
+ bool ignore_crc);
#endif
diff --git a/drivers/mmc/core/quirks.c b/drivers/mmc/core/quirks.c
index fad660b95809..071adc101158 100644
--- a/drivers/mmc/core/quirks.c
+++ b/drivers/mmc/core/quirks.c
@@ -35,7 +35,85 @@
#define SDIO_DEVICE_ID_MARVELL_8797_F0 0x9128
#endif
+#ifndef SDIO_VENDOR_ID_MSM
+#define SDIO_VENDOR_ID_MSM 0x0070
+#endif
+
+#ifndef SDIO_DEVICE_ID_MSM_WCN1314
+#define SDIO_DEVICE_ID_MSM_WCN1314 0x2881
+#endif
+
+#ifndef SDIO_VENDOR_ID_MSM_QCA
+#define SDIO_VENDOR_ID_MSM_QCA 0x271
+#endif
+
+#ifndef SDIO_DEVICE_ID_MSM_QCA_AR6003_1
+#define SDIO_DEVICE_ID_MSM_QCA_AR6003_1 0x300
+#endif
+
+#ifndef SDIO_DEVICE_ID_MSM_QCA_AR6003_2
+#define SDIO_DEVICE_ID_MSM_QCA_AR6003_2 0x301
+#endif
+
+#ifndef SDIO_DEVICE_ID_MSM_QCA_AR6004_1
+#define SDIO_DEVICE_ID_MSM_QCA_AR6004_1 0x400
+#endif
+
+#ifndef SDIO_DEVICE_ID_MSM_QCA_AR6004_2
+#define SDIO_DEVICE_ID_MSM_QCA_AR6004_2 0x401
+#endif
+
+#ifndef SDIO_VENDOR_ID_QCA6574
+#define SDIO_VENDOR_ID_QCA6574 0x271
+#endif
+
+#ifndef SDIO_DEVICE_ID_QCA6574
+#define SDIO_DEVICE_ID_QCA6574 0x50a
+#endif
+
+#ifndef SDIO_VENDOR_ID_QCA9377
+#define SDIO_VENDOR_ID_QCA9377 0x271
+#endif
+
+#ifndef SDIO_DEVICE_ID_QCA9377
+#define SDIO_DEVICE_ID_QCA9377 0x701
+#endif
+
+
+/*
+ * This hook just adds a quirk for all sdio devices
+ */
+static void add_quirk_for_sdio_devices(struct mmc_card *card, int data)
+{
+ if (mmc_card_sdio(card))
+ card->quirks |= data;
+}
+
static const struct mmc_fixup mmc_fixup_methods[] = {
+ /* by default sdio devices are considered CLK_GATING broken */
+ /* good cards will be whitelisted as they are tested */
+ SDIO_FIXUP(SDIO_ANY_ID, SDIO_ANY_ID,
+ add_quirk_for_sdio_devices,
+ MMC_QUIRK_BROKEN_CLK_GATING),
+
+ SDIO_FIXUP(SDIO_VENDOR_ID_TI, SDIO_DEVICE_ID_TI_WL1271,
+ remove_quirk, MMC_QUIRK_BROKEN_CLK_GATING),
+
+ SDIO_FIXUP(SDIO_VENDOR_ID_MSM, SDIO_DEVICE_ID_MSM_WCN1314,
+ remove_quirk, MMC_QUIRK_BROKEN_CLK_GATING),
+
+ SDIO_FIXUP(SDIO_VENDOR_ID_MSM_QCA, SDIO_DEVICE_ID_MSM_QCA_AR6003_1,
+ remove_quirk, MMC_QUIRK_BROKEN_CLK_GATING),
+
+ SDIO_FIXUP(SDIO_VENDOR_ID_MSM_QCA, SDIO_DEVICE_ID_MSM_QCA_AR6003_2,
+ remove_quirk, MMC_QUIRK_BROKEN_CLK_GATING),
+
+ SDIO_FIXUP(SDIO_VENDOR_ID_MSM_QCA, SDIO_DEVICE_ID_MSM_QCA_AR6004_1,
+ remove_quirk, MMC_QUIRK_BROKEN_CLK_GATING),
+
+ SDIO_FIXUP(SDIO_VENDOR_ID_MSM_QCA, SDIO_DEVICE_ID_MSM_QCA_AR6004_2,
+ remove_quirk, MMC_QUIRK_BROKEN_CLK_GATING),
+
SDIO_FIXUP(SDIO_VENDOR_ID_TI, SDIO_DEVICE_ID_TI_WL1271,
add_quirk, MMC_QUIRK_NONSTD_FUNC_IF),
@@ -48,6 +126,11 @@ static const struct mmc_fixup mmc_fixup_methods[] = {
SDIO_FIXUP(SDIO_VENDOR_ID_MARVELL, SDIO_DEVICE_ID_MARVELL_8797_F0,
add_quirk, MMC_QUIRK_BROKEN_IRQ_POLLING),
+ SDIO_FIXUP(SDIO_VENDOR_ID_QCA6574, SDIO_DEVICE_ID_QCA6574,
+ add_quirk, MMC_QUIRK_QCA6574_SETTINGS),
+
+ SDIO_FIXUP(SDIO_VENDOR_ID_QCA9377, SDIO_DEVICE_ID_QCA9377,
+ add_quirk, MMC_QUIRK_QCA9377_SETTINGS),
END_FIXUP
};
@@ -68,6 +151,8 @@ void mmc_fixup_device(struct mmc_card *card, const struct mmc_fixup *table)
(f->name == CID_NAME_ANY ||
!strncmp(f->name, card->cid.prod_name,
sizeof(card->cid.prod_name))) &&
+ (f->ext_csd_rev == EXT_CSD_REV_ANY ||
+ f->ext_csd_rev == card->ext_csd.rev) &&
(f->cis_vendor == card->cis.vendor ||
f->cis_vendor == (u16) SDIO_ANY_ID) &&
(f->cis_device == card->cis.device ||
diff --git a/drivers/mmc/core/ring_buffer.c b/drivers/mmc/core/ring_buffer.c
new file mode 100644
index 000000000000..83945e1cae40
--- /dev/null
+++ b/drivers/mmc/core/ring_buffer.c
@@ -0,0 +1,123 @@
+/*
+ * Copyright (c) 2017, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/mmc/ring_buffer.h>
+#include <linux/mmc/host.h>
+
+void mmc_stop_tracing(struct mmc_host *mmc)
+{
+ mmc->trace_buf.stop_tracing = true;
+}
+
+void mmc_trace_write(struct mmc_host *mmc,
+ const char *fmt, ...)
+{
+ unsigned int idx;
+ va_list args;
+ char *event;
+ unsigned long flags;
+ char str[MMC_TRACE_EVENT_SZ];
+
+ if (unlikely(!mmc->trace_buf.data) ||
+ unlikely(mmc->trace_buf.stop_tracing))
+ return;
+
+ /*
+ * Here an increment and modulus is used to keep
+ * index within array bounds. The cast to unsigned is
+ * necessary so increment and rolover wraps to 0 correctly
+ */
+ spin_lock_irqsave(&mmc->trace_buf.trace_lock, flags);
+ mmc->trace_buf.wr_idx += 1;
+ idx = ((unsigned int)mmc->trace_buf.wr_idx) &
+ (MMC_TRACE_RBUF_NUM_EVENTS - 1);
+ spin_unlock_irqrestore(&mmc->trace_buf.trace_lock, flags);
+
+ /* Catch some unlikely machine specific wrap-around bug */
+ if (unlikely(idx > (MMC_TRACE_RBUF_NUM_EVENTS - 1))) {
+ pr_err("%s: %s: Invalid idx:%d for mmc trace, tracing stopped !\n",
+ mmc_hostname(mmc), __func__, idx);
+ mmc_stop_tracing(mmc);
+ return;
+ }
+
+ event = &mmc->trace_buf.data[idx * MMC_TRACE_EVENT_SZ];
+ va_start(args, fmt);
+ snprintf(str, MMC_TRACE_EVENT_SZ, "<%d> %lld: %s: %s",
+ raw_smp_processor_id(),
+ ktime_to_ns(ktime_get()),
+ mmc_hostname(mmc), fmt);
+ memset(event, '\0', MMC_TRACE_EVENT_SZ);
+ vscnprintf(event, MMC_TRACE_EVENT_SZ, str, args);
+ va_end(args);
+}
+
+void mmc_trace_init(struct mmc_host *mmc)
+{
+ BUILD_BUG_ON_NOT_POWER_OF_2(MMC_TRACE_RBUF_NUM_EVENTS);
+
+ mmc->trace_buf.data = (char *)
+ __get_free_pages(GFP_KERNEL|__GFP_ZERO,
+ MMC_TRACE_RBUF_SZ_ORDER);
+
+ if (!mmc->trace_buf.data) {
+ pr_err("%s: %s: Unable to allocate trace for mmc\n",
+ __func__, mmc_hostname(mmc));
+ return;
+ }
+
+ spin_lock_init(&mmc->trace_buf.trace_lock);
+ mmc->trace_buf.wr_idx = -1;
+}
+
+void mmc_trace_free(struct mmc_host *mmc)
+{
+ if (mmc->trace_buf.data)
+ free_pages((unsigned long)mmc->trace_buf.data,
+ MMC_TRACE_RBUF_SZ_ORDER);
+}
+
+void mmc_dump_trace_buffer(struct mmc_host *mmc, struct seq_file *s)
+{
+ unsigned int idx, cur_idx;
+ unsigned int N = MMC_TRACE_RBUF_NUM_EVENTS - 1;
+ char *event;
+ unsigned long flags;
+
+ if (!mmc->trace_buf.data)
+ return;
+
+ spin_lock_irqsave(&mmc->trace_buf.trace_lock, flags);
+ idx = ((unsigned int)mmc->trace_buf.wr_idx) & N;
+ cur_idx = (idx + 1) & N;
+
+ do {
+ event = &mmc->trace_buf.data[cur_idx * MMC_TRACE_EVENT_SZ];
+ if (s)
+ seq_printf(s, "%s", (char *)event);
+ else
+ pr_err("%s", (char *)event);
+ cur_idx = (cur_idx + 1) & N;
+ if (cur_idx == idx) {
+ event =
+ &mmc->trace_buf.data[cur_idx * MMC_TRACE_EVENT_SZ];
+ if (s)
+ seq_printf(s, "latest_event: %s",
+ (char *)event);
+ else
+ pr_err("latest_event: %s", (char *)event);
+ break;
+ }
+ } while (1);
+ spin_unlock_irqrestore(&mmc->trace_buf.trace_lock, flags);
+}
diff --git a/drivers/mmc/core/sd.c b/drivers/mmc/core/sd.c
index ee145d4cc541..5033107f6e26 100644
--- a/drivers/mmc/core/sd.c
+++ b/drivers/mmc/core/sd.c
@@ -27,6 +27,12 @@
#include "sd.h"
#include "sd_ops.h"
+#define UHS_SDR104_MIN_DTR (100 * 1000 * 1000)
+#define UHS_DDR50_MIN_DTR (50 * 1000 * 1000)
+#define UHS_SDR50_MIN_DTR (50 * 1000 * 1000)
+#define UHS_SDR25_MIN_DTR (25 * 1000 * 1000)
+#define UHS_SDR12_MIN_DTR (12.5 * 1000 * 1000)
+
static const unsigned int tran_exp[] = {
10000, 100000, 1000000, 10000000,
0, 0, 0, 0
@@ -369,9 +375,9 @@ int mmc_sd_switch_hs(struct mmc_card *card)
goto out;
if ((status[16] & 0xF) != 1) {
- pr_warn("%s: Problem switching card into high-speed mode!\n",
- mmc_hostname(card->host));
- err = 0;
+ pr_warn("%s: Problem switching card into high-speed mode!, status:%x\n",
+ mmc_hostname(card->host), (status[16] & 0xF));
+ err = -EBUSY;
} else {
err = 1;
}
@@ -425,24 +431,28 @@ static void sd_update_bus_speed_mode(struct mmc_card *card)
}
if ((card->host->caps & MMC_CAP_UHS_SDR104) &&
- (card->sw_caps.sd3_bus_mode & SD_MODE_UHS_SDR104)) {
- card->sd_bus_speed = UHS_SDR104_BUS_SPEED;
- } else if ((card->host->caps & MMC_CAP_UHS_DDR50) &&
- (card->sw_caps.sd3_bus_mode & SD_MODE_UHS_DDR50)) {
- card->sd_bus_speed = UHS_DDR50_BUS_SPEED;
+ (card->sw_caps.sd3_bus_mode & SD_MODE_UHS_SDR104) &&
+ (card->host->f_max > UHS_SDR104_MIN_DTR)) {
+ card->sd_bus_speed = UHS_SDR104_BUS_SPEED;
} else if ((card->host->caps & (MMC_CAP_UHS_SDR104 |
MMC_CAP_UHS_SDR50)) && (card->sw_caps.sd3_bus_mode &
- SD_MODE_UHS_SDR50)) {
- card->sd_bus_speed = UHS_SDR50_BUS_SPEED;
+ SD_MODE_UHS_SDR50) &&
+ (card->host->f_max > UHS_SDR50_MIN_DTR)) {
+ card->sd_bus_speed = UHS_SDR50_BUS_SPEED;
+ } else if ((card->host->caps & MMC_CAP_UHS_DDR50) &&
+ (card->sw_caps.sd3_bus_mode & SD_MODE_UHS_DDR50) &&
+ (card->host->f_max > UHS_DDR50_MIN_DTR)) {
+ card->sd_bus_speed = UHS_DDR50_BUS_SPEED;
} else if ((card->host->caps & (MMC_CAP_UHS_SDR104 |
MMC_CAP_UHS_SDR50 | MMC_CAP_UHS_SDR25)) &&
- (card->sw_caps.sd3_bus_mode & SD_MODE_UHS_SDR25)) {
- card->sd_bus_speed = UHS_SDR25_BUS_SPEED;
+ (card->sw_caps.sd3_bus_mode & SD_MODE_UHS_SDR25) &&
+ (card->host->f_max > UHS_SDR25_MIN_DTR)) {
+ card->sd_bus_speed = UHS_SDR25_BUS_SPEED;
} else if ((card->host->caps & (MMC_CAP_UHS_SDR104 |
MMC_CAP_UHS_SDR50 | MMC_CAP_UHS_SDR25 |
MMC_CAP_UHS_SDR12)) && (card->sw_caps.sd3_bus_mode &
SD_MODE_UHS_SDR12)) {
- card->sd_bus_speed = UHS_SDR12_BUS_SPEED;
+ card->sd_bus_speed = UHS_SDR12_BUS_SPEED;
}
}
@@ -480,15 +490,17 @@ static int sd_set_bus_speed_mode(struct mmc_card *card, u8 *status)
if (err)
return err;
- if ((status[16] & 0xF) != card->sd_bus_speed)
- pr_warn("%s: Problem setting bus speed mode!\n",
- mmc_hostname(card->host));
- else {
+ if ((status[16] & 0xF) != card->sd_bus_speed) {
+ pr_warn("%s: Problem setting bus speed mode(%u)! max_dtr:%u, timing:%u, status:%x\n",
+ mmc_hostname(card->host), card->sd_bus_speed,
+ card->sw_caps.uhs_max_dtr, timing, (status[16] & 0xF));
+ err = -EBUSY;
+ } else {
mmc_set_timing(card->host, timing);
mmc_set_clock(card->host, card->sw_caps.uhs_max_dtr);
}
- return 0;
+ return err;
}
/* Get host's max current setting at its current voltage */
@@ -569,6 +581,64 @@ static int sd_set_current_limit(struct mmc_card *card, u8 *status)
return 0;
}
+/**
+ * mmc_sd_change_bus_speed() - Change SD card bus frequency at runtime
+ * @host: pointer to mmc host structure
+ * @freq: pointer to desired frequency to be set
+ *
+ * Change the SD card bus frequency at runtime after the card is
+ * initialized. Callers are expected to make sure of the card's
+ * state (DATA/RCV/TRANSFER) beforing changing the frequency at runtime.
+ *
+ * If the frequency to change is greater than max. supported by card,
+ * *freq is changed to max. supported by card and if it is less than min.
+ * supported by host, *freq is changed to min. supported by host.
+ */
+static int mmc_sd_change_bus_speed(struct mmc_host *host, unsigned long *freq)
+{
+ int err = 0;
+ struct mmc_card *card;
+
+ mmc_claim_host(host);
+ /*
+ * Assign card pointer after claiming host to avoid race
+ * conditions that may arise during removal of the card.
+ */
+ card = host->card;
+
+ /* sanity checks */
+ if (!card || !freq) {
+ err = -EINVAL;
+ goto out;
+ }
+
+ mmc_set_clock(host, (unsigned int) (*freq));
+
+ if (!mmc_host_is_spi(card->host) && mmc_card_uhs(card)
+ && card->host->ops->execute_tuning) {
+ /*
+ * We try to probe host driver for tuning for any
+ * frequency, it is host driver responsibility to
+ * perform actual tuning only when required.
+ */
+ mmc_host_clk_hold(card->host);
+ err = card->host->ops->execute_tuning(card->host,
+ MMC_SEND_TUNING_BLOCK);
+ mmc_host_clk_release(card->host);
+
+ if (err) {
+ pr_warn("%s: %s: tuning execution failed %d. Restoring to previous clock %lu\n",
+ mmc_hostname(card->host), __func__, err,
+ host->clk_scaling.curr_freq);
+ mmc_set_clock(host, host->clk_scaling.curr_freq);
+ }
+ }
+
+out:
+ mmc_release_host(host);
+ return err;
+}
+
/*
* UHS-I specific initialization procedure
*/
@@ -800,7 +870,9 @@ static int mmc_sd_get_ro(struct mmc_host *host)
if (!host->ops->get_ro)
return -1;
+ mmc_host_clk_hold(host);
ro = host->ops->get_ro(host);
+ mmc_host_clk_release(host);
return ro;
}
@@ -895,7 +967,10 @@ unsigned mmc_sd_get_max_clock(struct mmc_card *card)
{
unsigned max_dtr = (unsigned int)-1;
- if (mmc_card_hs(card)) {
+ if (mmc_card_uhs(card)) {
+ if (max_dtr > card->sw_caps.uhs_max_dtr)
+ max_dtr = card->sw_caps.uhs_max_dtr;
+ } else if (mmc_card_hs(card)) {
if (max_dtr > card->sw_caps.hs_max_dtr)
max_dtr = card->sw_caps.hs_max_dtr;
} else if (max_dtr > card->csd.max_dtr) {
@@ -957,6 +1032,7 @@ static int mmc_sd_init_card(struct mmc_host *host, u32 ocr,
err = mmc_send_relative_addr(host, &card->rca);
if (err)
goto free_card;
+ host->card = card;
}
if (!oldcard) {
@@ -1020,12 +1096,16 @@ static int mmc_sd_init_card(struct mmc_host *host, u32 ocr,
}
}
- host->card = card;
+ card->clk_scaling_highest = mmc_sd_get_max_clock(card);
+ card->clk_scaling_lowest = host->f_min;
+
return 0;
free_card:
- if (!oldcard)
+ if (!oldcard) {
+ host->card = NULL;
mmc_remove_card(card);
+ }
return err;
}
@@ -1038,8 +1118,12 @@ static void mmc_sd_remove(struct mmc_host *host)
BUG_ON(!host);
BUG_ON(!host->card);
+ mmc_exit_clk_scaling(host);
mmc_remove_card(host->card);
+
+ mmc_claim_host(host);
host->card = NULL;
+ mmc_release_host(host);
}
/*
@@ -1063,7 +1147,17 @@ static void mmc_sd_detect(struct mmc_host *host)
BUG_ON(!host);
BUG_ON(!host->card);
- mmc_get_card(host->card);
+ /*
+ * Try to acquire claim host. If failed to get the lock in 2 sec,
+ * just return; This is to ensure that when this call is invoked
+ * due to pm_suspend, not to block suspend for longer duration.
+ */
+ pm_runtime_get_sync(&host->card->dev);
+ if (!mmc_try_claim_host(host, 2000)) {
+ pm_runtime_mark_last_busy(&host->card->dev);
+ pm_runtime_put_autosuspend(&host->card->dev);
+ return;
+ }
/*
* Just check if our card has been removed.
@@ -1081,6 +1175,7 @@ static void mmc_sd_detect(struct mmc_host *host)
if (!retries) {
printk(KERN_ERR "%s(%s): Unable to re-detect card (%d)\n",
__func__, mmc_hostname(host), err);
+ err = _mmc_detect_card_removed(host);
}
#else
err = _mmc_detect_card_removed(host);
@@ -1105,6 +1200,13 @@ static int _mmc_sd_suspend(struct mmc_host *host)
BUG_ON(!host);
BUG_ON(!host->card);
+ err = mmc_suspend_clk_scaling(host);
+ if (err) {
+ pr_err("%s: %s: fail to suspend clock scaling (%d)\n",
+ mmc_hostname(host), __func__, err);
+ return err;
+ }
+
mmc_claim_host(host);
if (mmc_card_suspended(host->card))
@@ -1130,11 +1232,13 @@ static int mmc_sd_suspend(struct mmc_host *host)
{
int err;
+ MMC_TRACE(host, "%s: Enter\n", __func__);
err = _mmc_sd_suspend(host);
if (!err) {
pm_runtime_disable(&host->card->dev);
pm_runtime_set_suspended(&host->card->dev);
}
+ MMC_TRACE(host, "%s: Exit err: %d\n", __func__, err);
return err;
}
@@ -1167,8 +1271,11 @@ static int _mmc_sd_resume(struct mmc_host *host)
if (err) {
printk(KERN_ERR "%s: Re-init card rc = %d (retries = %d)\n",
mmc_hostname(host), err, retries);
- mdelay(5);
retries--;
+ mmc_power_off(host);
+ usleep_range(5000, 5500);
+ mmc_power_up(host, host->card->ocr);
+ mmc_select_voltage(host, host->card->ocr);
continue;
}
break;
@@ -1176,8 +1283,22 @@ static int _mmc_sd_resume(struct mmc_host *host)
#else
err = mmc_sd_init_card(host, host->card->ocr, host->card);
#endif
+ if (err) {
+ pr_err("%s: %s: mmc_sd_init_card_failed (%d)\n",
+ mmc_hostname(host), __func__, err);
+ goto out;
+ }
mmc_card_clr_suspended(host->card);
+ if (host->card->sdr104_blocked)
+ goto out;
+ err = mmc_resume_clk_scaling(host);
+ if (err) {
+ pr_err("%s: %s: fail to resume clock scaling (%d)\n",
+ mmc_hostname(host), __func__, err);
+ goto out;
+ }
+
out:
mmc_release_host(host);
return err;
@@ -1190,12 +1311,14 @@ static int mmc_sd_resume(struct mmc_host *host)
{
int err = 0;
+ MMC_TRACE(host, "%s: Enter\n", __func__);
if (!(host->caps & MMC_CAP_RUNTIME_RESUME)) {
err = _mmc_sd_resume(host);
pm_runtime_set_active(&host->card->dev);
pm_runtime_mark_last_busy(&host->card->dev);
}
pm_runtime_enable(&host->card->dev);
+ MMC_TRACE(host, "%s: Exit err: %d\n", __func__, err);
return err;
}
@@ -1250,7 +1373,7 @@ static const struct mmc_bus_ops mmc_sd_ops = {
.suspend = mmc_sd_suspend,
.resume = mmc_sd_resume,
.alive = mmc_sd_alive,
- .shutdown = mmc_sd_suspend,
+ .change_bus_speed = mmc_sd_change_bus_speed,
.reset = mmc_sd_reset,
};
@@ -1306,6 +1429,10 @@ int mmc_attach_sd(struct mmc_host *host)
err = mmc_sd_init_card(host, rocr, NULL);
if (err) {
retries--;
+ mmc_power_off(host);
+ usleep_range(5000, 5500);
+ mmc_power_up(host, rocr);
+ mmc_select_voltage(host, rocr);
continue;
}
break;
@@ -1328,6 +1455,13 @@ int mmc_attach_sd(struct mmc_host *host)
goto remove_card;
mmc_claim_host(host);
+
+ err = mmc_init_clk_scaling(host);
+ if (err) {
+ mmc_release_host(host);
+ goto remove_card;
+ }
+
return 0;
remove_card:
diff --git a/drivers/mmc/core/sdio.c b/drivers/mmc/core/sdio.c
index b47957122fd7..13a2f2d14d12 100644
--- a/drivers/mmc/core/sdio.c
+++ b/drivers/mmc/core/sdio.c
@@ -188,6 +188,23 @@ static int sdio_read_cccr(struct mmc_card *card, u32 ocr)
card->sw_caps.sd3_drv_type |= SD_DRIVER_TYPE_C;
if (data & SDIO_DRIVE_SDTD)
card->sw_caps.sd3_drv_type |= SD_DRIVER_TYPE_D;
+
+ ret = mmc_io_rw_direct(card, 0, 0,
+ SDIO_CCCR_INTERRUPT_EXTENSION, 0, &data);
+ if (ret)
+ goto out;
+ if (data & SDIO_SUPPORT_ASYNC_INTR) {
+ if (card->host->caps2 &
+ MMC_CAP2_ASYNC_SDIO_IRQ_4BIT_MODE) {
+ data |= SDIO_ENABLE_ASYNC_INTR;
+ ret = mmc_io_rw_direct(card, 1, 0,
+ SDIO_CCCR_INTERRUPT_EXTENSION,
+ data, NULL);
+ if (ret)
+ goto out;
+ card->cccr.async_intr_sup = 1;
+ }
+ }
}
/* if no uhs mode ensure we check for high speed */
@@ -206,12 +223,60 @@ out:
return ret;
}
+static void sdio_enable_vendor_specific_settings(struct mmc_card *card)
+{
+ int ret;
+ u8 settings;
+
+ if (mmc_enable_qca6574_settings(card) ||
+ mmc_enable_qca9377_settings(card)) {
+ ret = mmc_io_rw_direct(card, 1, 0, 0xF2, 0x0F, NULL);
+ if (ret) {
+ pr_crit("%s: failed to write to fn 0xf2 %d\n",
+ mmc_hostname(card->host), ret);
+ goto out;
+ }
+
+ ret = mmc_io_rw_direct(card, 0, 0, 0xF1, 0, &settings);
+ if (ret) {
+ pr_crit("%s: failed to read fn 0xf1 %d\n",
+ mmc_hostname(card->host), ret);
+ goto out;
+ }
+
+ settings |= 0x80;
+ ret = mmc_io_rw_direct(card, 1, 0, 0xF1, settings, NULL);
+ if (ret) {
+ pr_crit("%s: failed to write to fn 0xf1 %d\n",
+ mmc_hostname(card->host), ret);
+ goto out;
+ }
+
+ ret = mmc_io_rw_direct(card, 0, 0, 0xF0, 0, &settings);
+ if (ret) {
+ pr_crit("%s: failed to read fn 0xf0 %d\n",
+ mmc_hostname(card->host), ret);
+ goto out;
+ }
+
+ settings |= 0x20;
+ ret = mmc_io_rw_direct(card, 1, 0, 0xF0, settings, NULL);
+ if (ret) {
+ pr_crit("%s: failed to write to fn 0xf0 %d\n",
+ mmc_hostname(card->host), ret);
+ goto out;
+ }
+ }
+out:
+ return;
+}
+
static int sdio_enable_wide(struct mmc_card *card)
{
int ret;
u8 ctrl;
- if (!(card->host->caps & MMC_CAP_4_BIT_DATA))
+ if (!(card->host->caps & (MMC_CAP_4_BIT_DATA | MMC_CAP_8_BIT_DATA)))
return 0;
if (card->cccr.low_speed && !card->cccr.wide_bus)
@@ -227,7 +292,10 @@ static int sdio_enable_wide(struct mmc_card *card)
/* set as 4-bit bus width */
ctrl &= ~SDIO_BUS_WIDTH_MASK;
- ctrl |= SDIO_BUS_WIDTH_4BIT;
+ if (card->host->caps & MMC_CAP_8_BIT_DATA)
+ ctrl |= SDIO_BUS_WIDTH_8BIT;
+ else if (card->host->caps & MMC_CAP_4_BIT_DATA)
+ ctrl |= SDIO_BUS_WIDTH_4BIT;
ret = mmc_io_rw_direct(card, 1, 0, SDIO_CCCR_IF, ctrl, NULL);
if (ret)
@@ -268,7 +336,7 @@ static int sdio_disable_wide(struct mmc_card *card)
int ret;
u8 ctrl;
- if (!(card->host->caps & MMC_CAP_4_BIT_DATA))
+ if (!(card->host->caps & (MMC_CAP_4_BIT_DATA | MMC_CAP_8_BIT_DATA)))
return 0;
if (card->cccr.low_speed && !card->cccr.wide_bus)
@@ -278,10 +346,10 @@ static int sdio_disable_wide(struct mmc_card *card)
if (ret)
return ret;
- if (!(ctrl & SDIO_BUS_WIDTH_4BIT))
+ if (!(ctrl & (SDIO_BUS_WIDTH_4BIT | SDIO_BUS_WIDTH_8BIT)))
return 0;
- ctrl &= ~SDIO_BUS_WIDTH_4BIT;
+ ctrl &= ~(SDIO_BUS_WIDTH_4BIT | SDIO_BUS_WIDTH_8BIT);
ctrl |= SDIO_BUS_ASYNC_INT;
ret = mmc_io_rw_direct(card, 1, 0, SDIO_CCCR_IF, ctrl, NULL);
@@ -498,6 +566,9 @@ static int sdio_set_bus_speed_mode(struct mmc_card *card)
if (err)
return err;
+ /* Vendor specific settings based on card quirks */
+ sdio_enable_vendor_specific_settings(card);
+
speed &= ~SDIO_SPEED_BSS_MASK;
speed |= bus_speed;
err = mmc_io_rw_direct(card, 1, 0, SDIO_CCCR_SPEED, speed, NULL);
@@ -624,8 +695,11 @@ try_again:
/*
* Call the optional HC's init_card function to handle quirks.
*/
- if (host->ops->init_card)
+ if (host->ops->init_card) {
+ mmc_host_clk_hold(host);
host->ops->init_card(host, card);
+ mmc_host_clk_release(host);
+ }
/*
* If the host and card support UHS-I mode request the card
@@ -792,7 +866,12 @@ try_again:
* Switch to wider bus (if supported).
*/
err = sdio_enable_4bit_bus(card);
- if (err)
+ if (err > 0) {
+ if (card->host->caps & MMC_CAP_8_BIT_DATA)
+ mmc_set_bus_width(card->host, MMC_BUS_WIDTH_8);
+ else if (card->host->caps & MMC_CAP_4_BIT_DATA)
+ mmc_set_bus_width(card->host, MMC_BUS_WIDTH_4);
+ } else if (err)
goto remove;
}
finish:
@@ -919,6 +998,7 @@ static int mmc_sdio_pre_suspend(struct mmc_host *host)
*/
static int mmc_sdio_suspend(struct mmc_host *host)
{
+ MMC_TRACE(host, "%s: Enter\n", __func__);
mmc_claim_host(host);
if (mmc_card_keep_power(host) && mmc_card_wake_sdio_irq(host))
@@ -926,13 +1006,15 @@ static int mmc_sdio_suspend(struct mmc_host *host)
if (!mmc_card_keep_power(host)) {
mmc_power_off(host);
+ } else if (host->ios.clock) {
+ mmc_gate_clock(host);
} else if (host->retune_period) {
mmc_retune_timer_stop(host);
mmc_retune_needed(host);
}
mmc_release_host(host);
-
+ MMC_TRACE(host, "%s: Exit\n", __func__);
return 0;
}
@@ -943,6 +1025,7 @@ static int mmc_sdio_resume(struct mmc_host *host)
BUG_ON(!host);
BUG_ON(!host->card);
+ MMC_TRACE(host, "%s: Enter\n", __func__);
/* Basic card reinitialization. */
mmc_claim_host(host);
@@ -975,18 +1058,30 @@ static int mmc_sdio_resume(struct mmc_host *host)
} else if (mmc_card_keep_power(host) && mmc_card_wake_sdio_irq(host)) {
/* We may have switched to 1-bit mode during suspend */
err = sdio_enable_4bit_bus(host->card);
+ if (err > 0) {
+ if (host->caps & MMC_CAP_8_BIT_DATA)
+ mmc_set_bus_width(host, MMC_BUS_WIDTH_8);
+ else if (host->caps & MMC_CAP_4_BIT_DATA)
+ mmc_set_bus_width(host, MMC_BUS_WIDTH_4);
+ err = 0;
+ }
}
if (!err && host->sdio_irqs) {
- if (!(host->caps2 & MMC_CAP2_SDIO_IRQ_NOTHREAD))
+ if (!(host->caps2 & MMC_CAP2_SDIO_IRQ_NOTHREAD)) {
wake_up_process(host->sdio_irq_thread);
- else if (host->caps & MMC_CAP_SDIO_IRQ)
+ } else if (host->caps & MMC_CAP_SDIO_IRQ) {
+ mmc_host_clk_hold(host);
host->ops->enable_sdio_irq(host, 1);
+ mmc_host_clk_release(host);
+ }
}
mmc_release_host(host);
host->pm_flags &= ~MMC_PM_KEEP_POWER;
+ host->pm_flags &= ~MMC_PM_WAKE_SDIO_IRQ;
+ MMC_TRACE(host, "%s: Exit err: %d\n", __func__, err);
return err;
}
@@ -1221,40 +1316,6 @@ err:
int sdio_reset_comm(struct mmc_card *card)
{
- struct mmc_host *host = card->host;
- u32 ocr;
- u32 rocr;
- int err;
-
- printk("%s():\n", __func__);
- mmc_claim_host(host);
-
- mmc_retune_disable(host);
-
- mmc_go_idle(host);
-
- mmc_set_clock(host, host->f_min);
-
- err = mmc_send_io_op_cond(host, 0, &ocr);
- if (err)
- goto err;
-
- rocr = mmc_select_voltage(host, ocr);
- if (!rocr) {
- err = -EINVAL;
- goto err;
- }
-
- err = mmc_sdio_init_card(host, rocr, card, 0);
- if (err)
- goto err;
-
- mmc_release_host(host);
- return 0;
-err:
- printk("%s: Error resetting SDIO communications (%d)\n",
- mmc_hostname(host), err);
- mmc_release_host(host);
- return err;
+ return mmc_power_restore_host(card->host);
}
EXPORT_SYMBOL(sdio_reset_comm);
diff --git a/drivers/mmc/core/sdio_cis.c b/drivers/mmc/core/sdio_cis.c
index 8e94e555b788..8b4266a11ee0 100644
--- a/drivers/mmc/core/sdio_cis.c
+++ b/drivers/mmc/core/sdio_cis.c
@@ -55,7 +55,7 @@ static int cistpl_vers_1(struct mmc_card *card, struct sdio_func *func,
for (i = 0; i < nr_strings; i++) {
buffer[i] = string;
- strcpy(string, buf);
+ strlcpy(string, buf, strlen(buf) + 1);
string += strlen(string) + 1;
buf += strlen(buf) + 1;
}
@@ -270,8 +270,16 @@ static int sdio_read_cis(struct mmc_card *card, struct sdio_func *func)
break;
/* null entries have no link field or data */
- if (tpl_code == 0x00)
- continue;
+ if (tpl_code == 0x00) {
+ if (card->cis.vendor == 0x70 &&
+ (card->cis.device == 0x2460 ||
+ card->cis.device == 0x0460 ||
+ card->cis.device == 0x23F1 ||
+ card->cis.device == 0x23F0))
+ break;
+ else
+ continue;
+ }
ret = mmc_io_rw_direct(card, 0, 0, ptr++, 0, &tpl_link);
if (ret)
diff --git a/drivers/mmc/core/sdio_irq.c b/drivers/mmc/core/sdio_irq.c
index 91bbbfb29f3f..95589d1fef18 100644
--- a/drivers/mmc/core/sdio_irq.c
+++ b/drivers/mmc/core/sdio_irq.c
@@ -93,7 +93,9 @@ void sdio_run_irqs(struct mmc_host *host)
{
mmc_claim_host(host);
host->sdio_irq_pending = true;
+ mmc_host_clk_hold(host);
process_sdio_pending_irqs(host);
+ mmc_host_clk_release(host);
mmc_release_host(host);
}
EXPORT_SYMBOL_GPL(sdio_run_irqs);
@@ -104,6 +106,7 @@ static int sdio_irq_thread(void *_host)
struct sched_param param = { .sched_priority = 1 };
unsigned long period, idle_period;
int ret;
+ bool ws;
sched_setscheduler(current, SCHED_FIFO, &param);
@@ -137,6 +140,17 @@ static int sdio_irq_thread(void *_host)
ret = __mmc_claim_host(host, &host->sdio_irq_thread_abort);
if (ret)
break;
+ ws = false;
+ /*
+ * prevent suspend if it has started when scheduled;
+ * 100 msec (approx. value) should be enough for the system to
+ * resume and attend to the card's request
+ */
+ if ((host->dev_status == DEV_SUSPENDING) ||
+ (host->dev_status == DEV_SUSPENDED)) {
+ pm_wakeup_event(&host->card->dev, 100);
+ ws = true;
+ }
ret = process_sdio_pending_irqs(host);
host->sdio_irq_pending = false;
mmc_release_host(host);
@@ -168,15 +182,27 @@ static int sdio_irq_thread(void *_host)
}
set_current_state(TASK_INTERRUPTIBLE);
- if (host->caps & MMC_CAP_SDIO_IRQ)
+ if (host->caps & MMC_CAP_SDIO_IRQ) {
+ mmc_host_clk_hold(host);
host->ops->enable_sdio_irq(host, 1);
+ mmc_host_clk_release(host);
+ }
+ /*
+ * function drivers would have processed the event from card
+ * unless suspended, hence release wake source
+ */
+ if (ws && (host->dev_status == DEV_RESUMED))
+ pm_relax(&host->card->dev);
if (!kthread_should_stop())
schedule_timeout(period);
set_current_state(TASK_RUNNING);
} while (!kthread_should_stop());
- if (host->caps & MMC_CAP_SDIO_IRQ)
+ if (host->caps & MMC_CAP_SDIO_IRQ) {
+ mmc_host_clk_hold(host);
host->ops->enable_sdio_irq(host, 0);
+ mmc_host_clk_release(host);
+ }
pr_debug("%s: IRQ thread exiting with code %d\n",
mmc_hostname(host), ret);
@@ -202,7 +228,9 @@ static int sdio_card_irq_get(struct mmc_card *card)
return err;
}
} else if (host->caps & MMC_CAP_SDIO_IRQ) {
+ mmc_host_clk_hold(host);
host->ops->enable_sdio_irq(host, 1);
+ mmc_host_clk_release(host);
}
}
@@ -221,7 +249,9 @@ static int sdio_card_irq_put(struct mmc_card *card)
atomic_set(&host->sdio_irq_thread_abort, 1);
kthread_stop(host->sdio_irq_thread);
} else if (host->caps & MMC_CAP_SDIO_IRQ) {
+ mmc_host_clk_hold(host);
host->ops->enable_sdio_irq(host, 0);
+ mmc_host_clk_release(host);
}
}
diff --git a/drivers/mmc/host/Kconfig b/drivers/mmc/host/Kconfig
index 2e6d2fff1096..01959bd2d523 100644
--- a/drivers/mmc/host/Kconfig
+++ b/drivers/mmc/host/Kconfig
@@ -406,18 +406,39 @@ config MMC_ATMELMCI
If unsure, say N.
config MMC_SDHCI_MSM
- tristate "Qualcomm SDHCI Controller Support"
- depends on ARCH_QCOM || (ARM && COMPILE_TEST)
+ tristate "Qualcomm Technologies, Inc. SDHCI Controller Support"
+ depends on ARCH_QCOM || ARCH_MSM || (ARM && COMPILE_TEST)
depends on MMC_SDHCI_PLTFM
+ select PM_DEVFREQ
+ select DEVFREQ_GOV_SIMPLE_ONDEMAND
help
This selects the Secure Digital Host Controller Interface (SDHCI)
- support present in Qualcomm SOCs. The controller supports
- SD/MMC/SDIO devices.
+ support present in Qualcomm Technologies, Inc. SOCs. The controller
+ supports SD/MMC/SDIO devices.
If you have a controller with this interface, say Y or M here.
If unsure, say N.
+config MMC_SDHCI_MSM_ICE
+ bool "Qualcomm Technologies, Inc Inline Crypto Engine for SDHCI core"
+ depends on MMC_SDHCI_MSM && CRYPTO_DEV_QCOM_ICE
+ help
+ This selects the QTI specific additions to support Inline Crypto
+ Engine (ICE). ICE accelerates the crypto operations and maintains
+ the high SDHCI performance.
+
+ Select this if you have ICE supported for SDHCI on QTI chipset.
+ If unsure, say N.
+
+config MMC_MSM
+ tristate "Qualcomm SDCC Controller Support"
+ depends on MMC && (ARCH_MSM7X00A || ARCH_MSM7X30 || ARCH_QSD8X50)
+ help
+ This provides support for the SD/MMC cell found in the
+ MSM and QSD SOCs from Qualcomm. The controller also has
+ support for SDIO devices.
+
config MMC_MXC
tristate "Freescale i.MX21/27/31 or MPC512x Multimedia Card support"
depends on ARCH_MXC || PPC_MPC512x
@@ -773,6 +794,19 @@ config MMC_SUNXI
This selects support for the SD/MMC Host Controller on
Allwinner sunxi SoCs.
+config MMC_CQ_HCI
+ tristate "Command Queue Support"
+ depends on HAS_DMA
+ help
+ This selects the Command Queue Host Controller Interface (CQHCI)
+ support present in host controllers of Qualcomm Technologies, Inc
+ amongst others.
+ This controller supports eMMC devices with command queue support.
+
+ If you have a controller with this interface, say Y or M here.
+
+ If unsure, say N.
+
config MMC_TOSHIBA_PCI
tristate "Toshiba Type A SD/MMC Card Interface Driver"
depends on PCI
diff --git a/drivers/mmc/host/Makefile b/drivers/mmc/host/Makefile
index 3595f83e89dd..b9cbe592f5e3 100644
--- a/drivers/mmc/host/Makefile
+++ b/drivers/mmc/host/Makefile
@@ -72,9 +72,11 @@ obj-$(CONFIG_MMC_SDHCI_OF_ESDHC) += sdhci-of-esdhc.o
obj-$(CONFIG_MMC_SDHCI_OF_HLWD) += sdhci-of-hlwd.o
obj-$(CONFIG_MMC_SDHCI_BCM_KONA) += sdhci-bcm-kona.o
obj-$(CONFIG_MMC_SDHCI_BCM2835) += sdhci-bcm2835.o
-obj-$(CONFIG_MMC_SDHCI_IPROC) += sdhci-iproc.o
obj-$(CONFIG_MMC_SDHCI_MSM) += sdhci-msm.o
+obj-$(CONFIG_MMC_SDHCI_MSM_ICE) += sdhci-msm-ice.o
+obj-$(CONFIG_MMC_SDHCI_IPROC) += sdhci-iproc.o
obj-$(CONFIG_MMC_SDHCI_ST) += sdhci-st.o
+obj-$(CONFIG_MMC_CQ_HCI) += cmdq_hci.o
ifeq ($(CONFIG_CB710_DEBUG),y)
CFLAGS-cb710-mmc += -DDEBUG
diff --git a/drivers/mmc/host/cmdq_hci.c b/drivers/mmc/host/cmdq_hci.c
new file mode 100644
index 000000000000..3f741f83a436
--- /dev/null
+++ b/drivers/mmc/host/cmdq_hci.c
@@ -0,0 +1,1362 @@
+/* Copyright (c) 2015-2017 The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/delay.h>
+#include <linux/highmem.h>
+#include <linux/io.h>
+#include <linux/module.h>
+#include <linux/dma-mapping.h>
+#include <linux/slab.h>
+#include <linux/scatterlist.h>
+#include <linux/platform_device.h>
+#include <linux/blkdev.h>
+
+#include <linux/mmc/mmc.h>
+#include <linux/mmc/host.h>
+#include <linux/mmc/card.h>
+#include <linux/pm_runtime.h>
+#include <linux/workqueue.h>
+
+#include "cmdq_hci.h"
+#include "sdhci.h"
+#include "sdhci-msm.h"
+
+#define DCMD_SLOT 31
+#define NUM_SLOTS 32
+
+/* 10 sec */
+#define HALT_TIMEOUT_MS 10000
+
+static int cmdq_halt_poll(struct mmc_host *mmc, bool halt);
+static int cmdq_halt(struct mmc_host *mmc, bool halt);
+
+#ifdef CONFIG_PM_RUNTIME
+static int cmdq_runtime_pm_get(struct cmdq_host *host)
+{
+ return pm_runtime_get_sync(host->mmc->parent);
+}
+static int cmdq_runtime_pm_put(struct cmdq_host *host)
+{
+ pm_runtime_mark_last_busy(host->mmc->parent);
+ return pm_runtime_put_autosuspend(host->mmc->parent);
+}
+#else
+static inline int cmdq_runtime_pm_get(struct cmdq_host *host)
+{
+ return 0;
+}
+static inline int cmdq_runtime_pm_put(struct cmdq_host *host)
+{
+ return 0;
+}
+#endif
+static inline struct mmc_request *get_req_by_tag(struct cmdq_host *cq_host,
+ unsigned int tag)
+{
+ return cq_host->mrq_slot[tag];
+}
+
+static inline u8 *get_desc(struct cmdq_host *cq_host, u8 tag)
+{
+ return cq_host->desc_base + (tag * cq_host->slot_sz);
+}
+
+static inline u8 *get_link_desc(struct cmdq_host *cq_host, u8 tag)
+{
+ u8 *desc = get_desc(cq_host, tag);
+
+ return desc + cq_host->task_desc_len;
+}
+
+static inline dma_addr_t get_trans_desc_dma(struct cmdq_host *cq_host, u8 tag)
+{
+ return cq_host->trans_desc_dma_base +
+ (cq_host->mmc->max_segs * tag *
+ cq_host->trans_desc_len);
+}
+
+static inline u8 *get_trans_desc(struct cmdq_host *cq_host, u8 tag)
+{
+ return cq_host->trans_desc_base +
+ (cq_host->trans_desc_len * cq_host->mmc->max_segs * tag);
+}
+
+static void setup_trans_desc(struct cmdq_host *cq_host, u8 tag)
+{
+ u8 *link_temp;
+ dma_addr_t trans_temp;
+
+ link_temp = get_link_desc(cq_host, tag);
+ trans_temp = get_trans_desc_dma(cq_host, tag);
+
+ memset(link_temp, 0, cq_host->link_desc_len);
+ if (cq_host->link_desc_len > 8)
+ *(link_temp + 8) = 0;
+
+ if (tag == DCMD_SLOT) {
+ *link_temp = VALID(0) | ACT(0) | END(1);
+ return;
+ }
+
+ *link_temp = VALID(1) | ACT(0x6) | END(0);
+
+ if (cq_host->dma64) {
+ __le64 *data_addr = (__le64 __force *)(link_temp + 4);
+ data_addr[0] = cpu_to_le64(trans_temp);
+ } else {
+ __le32 *data_addr = (__le32 __force *)(link_temp + 4);
+ data_addr[0] = cpu_to_le32(trans_temp);
+ }
+}
+
+static void cmdq_set_halt_irq(struct cmdq_host *cq_host, bool enable)
+{
+ u32 ier;
+
+ ier = cmdq_readl(cq_host, CQISTE);
+ if (enable) {
+ cmdq_writel(cq_host, ier | HALT, CQISTE);
+ cmdq_writel(cq_host, ier | HALT, CQISGE);
+ } else {
+ cmdq_writel(cq_host, ier & ~HALT, CQISTE);
+ cmdq_writel(cq_host, ier & ~HALT, CQISGE);
+ }
+}
+
+static void cmdq_clear_set_irqs(struct cmdq_host *cq_host, u32 clear, u32 set)
+{
+ u32 ier;
+
+ ier = cmdq_readl(cq_host, CQISTE);
+ ier &= ~clear;
+ ier |= set;
+ cmdq_writel(cq_host, ier, CQISTE);
+ cmdq_writel(cq_host, ier, CQISGE);
+ /* ensure the writes are done */
+ mb();
+}
+
+static int cmdq_clear_task_poll(struct cmdq_host *cq_host, unsigned int tag)
+{
+ int retries = 100;
+
+ cmdq_clear_set_irqs(cq_host, CQIS_TCL, 0);
+ cmdq_writel(cq_host, 1<<tag, CQTCLR);
+ while (retries) {
+ /*
+ * Task Clear register and doorbell,
+ * both should indicate that task is cleared
+ */
+ if ((cmdq_readl(cq_host, CQTCLR) & 1<<tag) ||
+ (cmdq_readl(cq_host, CQTDBR) & 1<<tag)) {
+ udelay(5);
+ retries--;
+ continue;
+ } else
+ break;
+ }
+
+ cmdq_clear_set_irqs(cq_host, 0, CQIS_TCL);
+ return retries ? 0 : -ETIMEDOUT;
+}
+
+#define DRV_NAME "cmdq-host"
+
+static void cmdq_dump_task_history(struct cmdq_host *cq_host)
+{
+ int i;
+
+ if (likely(!cq_host->mmc->cmdq_thist_enabled))
+ return;
+
+ if (!cq_host->thist) {
+ pr_err("%s: %s: CMDQ task history buffer not allocated\n",
+ mmc_hostname(cq_host->mmc), __func__);
+ return;
+ }
+
+ pr_err("---- Circular Task History ----\n");
+ pr_err(DRV_NAME ": Last entry index: %d", cq_host->thist_idx - 1);
+
+ for (i = 0; i < cq_host->num_slots; i++) {
+ pr_err(DRV_NAME ": [%02d]%s Task: 0x%08x | Args: 0x%08x\n", i,
+ (cq_host->thist[i].is_dcmd) ? "DCMD" : "DATA",
+ lower_32_bits(cq_host->thist[i].task),
+ upper_32_bits(cq_host->thist[i].task));
+ }
+ pr_err("-------------------------\n");
+}
+
+static void cmdq_dump_adma_mem(struct cmdq_host *cq_host)
+{
+ struct mmc_host *mmc = cq_host->mmc;
+ dma_addr_t desc_dma;
+ int tag = 0;
+ unsigned long data_active_reqs =
+ mmc->cmdq_ctx.data_active_reqs;
+ unsigned long desc_size =
+ (cq_host->mmc->max_segs * cq_host->trans_desc_len);
+
+ for_each_set_bit(tag, &data_active_reqs, cq_host->num_slots) {
+ desc_dma = get_trans_desc_dma(cq_host, tag);
+ pr_err("%s: %s: tag = %d, trans_dma(phys) = %pad, trans_desc(virt) = 0x%p\n",
+ mmc_hostname(mmc), __func__, tag,
+ &desc_dma, get_trans_desc(cq_host, tag));
+ print_hex_dump(KERN_ERR, "cmdq-adma:", DUMP_PREFIX_ADDRESS,
+ 32, 8, get_trans_desc(cq_host, tag),
+ (desc_size), false);
+ }
+}
+
+static void cmdq_dumpregs(struct cmdq_host *cq_host)
+{
+ struct mmc_host *mmc = cq_host->mmc;
+ int offset = 0;
+
+ if (cq_host->offset_changed)
+ offset = CQ_V5_VENDOR_CFG;
+
+ MMC_TRACE(mmc,
+ "%s: 0x0C=0x%08x 0x10=0x%08x 0x14=0x%08x 0x18=0x%08x 0x28=0x%08x 0x2C=0x%08x 0x30=0x%08x 0x34=0x%08x 0x54=0x%08x 0x58=0x%08x 0x5C=0x%08x 0x48=0x%08x\n",
+ __func__, cmdq_readl(cq_host, CQCTL), cmdq_readl(cq_host, CQIS),
+ cmdq_readl(cq_host, CQISTE), cmdq_readl(cq_host, CQISGE),
+ cmdq_readl(cq_host, CQTDBR), cmdq_readl(cq_host, CQTCN),
+ cmdq_readl(cq_host, CQDQS), cmdq_readl(cq_host, CQDPT),
+ cmdq_readl(cq_host, CQTERRI), cmdq_readl(cq_host, CQCRI),
+ cmdq_readl(cq_host, CQCRA), cmdq_readl(cq_host, CQCRDCT));
+ pr_err(DRV_NAME ": ========== REGISTER DUMP (%s)==========\n",
+ mmc_hostname(mmc));
+
+ pr_err(DRV_NAME ": Caps: 0x%08x | Version: 0x%08x\n",
+ cmdq_readl(cq_host, CQCAP),
+ cmdq_readl(cq_host, CQVER));
+ pr_err(DRV_NAME ": Queing config: 0x%08x | Queue Ctrl: 0x%08x\n",
+ cmdq_readl(cq_host, CQCFG),
+ cmdq_readl(cq_host, CQCTL));
+ pr_err(DRV_NAME ": Int stat: 0x%08x | Int enab: 0x%08x\n",
+ cmdq_readl(cq_host, CQIS),
+ cmdq_readl(cq_host, CQISTE));
+ pr_err(DRV_NAME ": Int sig: 0x%08x | Int Coal: 0x%08x\n",
+ cmdq_readl(cq_host, CQISGE),
+ cmdq_readl(cq_host, CQIC));
+ pr_err(DRV_NAME ": TDL base: 0x%08x | TDL up32: 0x%08x\n",
+ cmdq_readl(cq_host, CQTDLBA),
+ cmdq_readl(cq_host, CQTDLBAU));
+ pr_err(DRV_NAME ": Doorbell: 0x%08x | Comp Notif: 0x%08x\n",
+ cmdq_readl(cq_host, CQTDBR),
+ cmdq_readl(cq_host, CQTCN));
+ pr_err(DRV_NAME ": Dev queue: 0x%08x | Dev Pend: 0x%08x\n",
+ cmdq_readl(cq_host, CQDQS),
+ cmdq_readl(cq_host, CQDPT));
+ pr_err(DRV_NAME ": Task clr: 0x%08x | Send stat 1: 0x%08x\n",
+ cmdq_readl(cq_host, CQTCLR),
+ cmdq_readl(cq_host, CQSSC1));
+ pr_err(DRV_NAME ": Send stat 2: 0x%08x | DCMD resp: 0x%08x\n",
+ cmdq_readl(cq_host, CQSSC2),
+ cmdq_readl(cq_host, CQCRDCT));
+ pr_err(DRV_NAME ": Resp err mask: 0x%08x | Task err: 0x%08x\n",
+ cmdq_readl(cq_host, CQRMEM),
+ cmdq_readl(cq_host, CQTERRI));
+ pr_err(DRV_NAME ": Resp idx 0x%08x | Resp arg: 0x%08x\n",
+ cmdq_readl(cq_host, CQCRI),
+ cmdq_readl(cq_host, CQCRA));
+ pr_err(DRV_NAME": Vendor cfg 0x%08x\n",
+ cmdq_readl(cq_host, CQ_VENDOR_CFG + offset));
+ pr_err(DRV_NAME ": ===========================================\n");
+
+ cmdq_dump_task_history(cq_host);
+ if (cq_host->ops->dump_vendor_regs)
+ cq_host->ops->dump_vendor_regs(mmc);
+}
+
+/**
+ * The allocated descriptor table for task, link & transfer descritors
+ * looks like:
+ * |----------|
+ * |task desc | |->|----------|
+ * |----------| | |trans desc|
+ * |link desc-|->| |----------|
+ * |----------| .
+ * . .
+ * no. of slots max-segs
+ * . |----------|
+ * |----------|
+ * The idea here is to create the [task+trans] table and mark & point the
+ * link desc to the transfer desc table on a per slot basis.
+ */
+static int cmdq_host_alloc_tdl(struct cmdq_host *cq_host)
+{
+
+ size_t desc_size;
+ size_t data_size;
+ int i = 0;
+
+ /* task descriptor can be 64/128 bit irrespective of arch */
+ if (cq_host->caps & CMDQ_TASK_DESC_SZ_128) {
+ cmdq_writel(cq_host, cmdq_readl(cq_host, CQCFG) |
+ CQ_TASK_DESC_SZ, CQCFG);
+ cq_host->task_desc_len = 16;
+ } else {
+ cq_host->task_desc_len = 8;
+ }
+
+ /*
+ * 96 bits length of transfer desc instead of 128 bits which means
+ * ADMA would expect next valid descriptor at the 96th bit
+ * or 128th bit
+ */
+ if (cq_host->dma64) {
+ if (cq_host->quirks & CMDQ_QUIRK_SHORT_TXFR_DESC_SZ)
+ cq_host->trans_desc_len = 12;
+ else
+ cq_host->trans_desc_len = 16;
+ cq_host->link_desc_len = 16;
+ } else {
+ cq_host->trans_desc_len = 8;
+ cq_host->link_desc_len = 8;
+ }
+
+ /* total size of a slot: 1 task & 1 transfer (link) */
+ cq_host->slot_sz = cq_host->task_desc_len + cq_host->link_desc_len;
+
+ desc_size = cq_host->slot_sz * cq_host->num_slots;
+
+ data_size = cq_host->trans_desc_len * cq_host->mmc->max_segs *
+ (cq_host->num_slots - 1);
+
+ pr_info("%s: desc_size: %d data_sz: %d slot-sz: %d\n", __func__,
+ (int)desc_size, (int)data_size, cq_host->slot_sz);
+
+ /*
+ * allocate a dma-mapped chunk of memory for the descriptors
+ * allocate a dma-mapped chunk of memory for link descriptors
+ * setup each link-desc memory offset per slot-number to
+ * the descriptor table.
+ */
+ cq_host->desc_base = dmam_alloc_coherent(mmc_dev(cq_host->mmc),
+ desc_size,
+ &cq_host->desc_dma_base,
+ GFP_KERNEL);
+ cq_host->trans_desc_base = dmam_alloc_coherent(mmc_dev(cq_host->mmc),
+ data_size,
+ &cq_host->trans_desc_dma_base,
+ GFP_KERNEL);
+ cq_host->thist = devm_kzalloc(mmc_dev(cq_host->mmc),
+ (sizeof(*cq_host->thist) *
+ cq_host->num_slots),
+ GFP_KERNEL);
+ if (!cq_host->desc_base || !cq_host->trans_desc_base)
+ return -ENOMEM;
+
+ pr_info("desc-base: 0x%p trans-base: 0x%p\n desc_dma 0x%llx trans_dma: 0x%llx\n",
+ cq_host->desc_base, cq_host->trans_desc_base,
+ (unsigned long long)cq_host->desc_dma_base,
+ (unsigned long long) cq_host->trans_desc_dma_base);
+
+ for (; i < (cq_host->num_slots); i++)
+ setup_trans_desc(cq_host, i);
+
+ return 0;
+}
+
+static int cmdq_enable(struct mmc_host *mmc)
+{
+ int err = 0;
+ u32 cqcfg;
+ u32 cqcap = 0;
+ bool dcmd_enable;
+ struct cmdq_host *cq_host = mmc_cmdq_private(mmc);
+
+ if (!cq_host || !mmc->card || !mmc_card_cmdq(mmc->card)) {
+ err = -EINVAL;
+ goto out;
+ }
+
+ if (cq_host->enabled)
+ goto out;
+
+ cmdq_runtime_pm_get(cq_host);
+ cqcfg = cmdq_readl(cq_host, CQCFG);
+ if (cqcfg & 0x1) {
+ pr_info("%s: %s: cq_host is already enabled\n",
+ mmc_hostname(mmc), __func__);
+ WARN_ON(1);
+ goto pm_ref_count;
+ }
+
+ if (cq_host->quirks & CMDQ_QUIRK_NO_DCMD)
+ dcmd_enable = false;
+ else
+ dcmd_enable = true;
+
+ cqcfg = ((cq_host->caps & CMDQ_TASK_DESC_SZ_128 ? CQ_TASK_DESC_SZ : 0) |
+ (dcmd_enable ? CQ_DCMD : 0));
+
+ cqcap = cmdq_readl(cq_host, CQCAP);
+ if (cqcap & CQCAP_CS) {
+ /*
+ * In case host controller supports cryptographic operations
+ * then, it uses 128bit task descriptor. Upper 64 bits of task
+ * descriptor would be used to pass crypto specific informaton.
+ */
+ cq_host->caps |= CMDQ_CAP_CRYPTO_SUPPORT |
+ CMDQ_TASK_DESC_SZ_128;
+ cqcfg |= CQ_ICE_ENABLE;
+ /*
+ * For SDHC v5.0 onwards, ICE 3.0 specific registers are added
+ * in CQ register space, due to which few CQ registers are
+ * shifted. Set offset_changed boolean to use updated address.
+ */
+ cq_host->offset_changed = true;
+ }
+
+ cmdq_writel(cq_host, cqcfg, CQCFG);
+ /* enable CQ_HOST */
+ cmdq_writel(cq_host, cmdq_readl(cq_host, CQCFG) | CQ_ENABLE,
+ CQCFG);
+
+ if (!cq_host->desc_base ||
+ !cq_host->trans_desc_base) {
+ err = cmdq_host_alloc_tdl(cq_host);
+ if (err)
+ goto pm_ref_count;
+ }
+
+ cmdq_writel(cq_host, lower_32_bits(cq_host->desc_dma_base), CQTDLBA);
+ cmdq_writel(cq_host, upper_32_bits(cq_host->desc_dma_base), CQTDLBAU);
+
+ /*
+ * disable all vendor interrupts
+ * enable CMDQ interrupts
+ * enable the vendor error interrupts
+ */
+ if (cq_host->ops->clear_set_irqs)
+ cq_host->ops->clear_set_irqs(mmc, true);
+
+ cmdq_clear_set_irqs(cq_host, 0x0, CQ_INT_ALL);
+
+ /* cq_host would use this rca to address the card */
+ cmdq_writel(cq_host, mmc->card->rca, CQSSC2);
+
+ /* send QSR at lesser intervals than the default */
+ cmdq_writel(cq_host, SEND_QSR_INTERVAL, CQSSC1);
+
+ /* enable bkops exception indication */
+ if (mmc_card_configured_manual_bkops(mmc->card) &&
+ !mmc_card_configured_auto_bkops(mmc->card))
+ cmdq_writel(cq_host, cmdq_readl(cq_host, CQRMEM) | CQ_EXCEPTION,
+ CQRMEM);
+
+ /* ensure the writes are done before enabling CQE */
+ mb();
+
+ cq_host->enabled = true;
+ mmc_host_clr_cq_disable(mmc);
+
+ if (cq_host->ops->set_transfer_params)
+ cq_host->ops->set_transfer_params(mmc);
+
+ if (cq_host->ops->set_block_size)
+ cq_host->ops->set_block_size(cq_host->mmc);
+
+ if (cq_host->ops->set_data_timeout)
+ cq_host->ops->set_data_timeout(mmc, 0xf);
+
+ if (cq_host->ops->clear_set_dumpregs)
+ cq_host->ops->clear_set_dumpregs(mmc, 1);
+
+ if (cq_host->ops->enhanced_strobe_mask)
+ cq_host->ops->enhanced_strobe_mask(mmc, true);
+
+pm_ref_count:
+ cmdq_runtime_pm_put(cq_host);
+out:
+ MMC_TRACE(mmc, "%s: CQ enabled err: %d\n", __func__, err);
+ return err;
+}
+
+static void cmdq_disable_nosync(struct mmc_host *mmc, bool soft)
+{
+ struct cmdq_host *cq_host = (struct cmdq_host *)mmc_cmdq_private(mmc);
+
+ if (soft) {
+ cmdq_writel(cq_host, cmdq_readl(
+ cq_host, CQCFG) & ~(CQ_ENABLE),
+ CQCFG);
+ }
+ if (cq_host->ops->enhanced_strobe_mask)
+ cq_host->ops->enhanced_strobe_mask(mmc, false);
+
+ cq_host->enabled = false;
+ mmc_host_set_cq_disable(mmc);
+ MMC_TRACE(mmc, "%s: CQ disabled\n", __func__);
+}
+
+static void cmdq_disable(struct mmc_host *mmc, bool soft)
+{
+ struct cmdq_host *cq_host = (struct cmdq_host *)mmc_cmdq_private(mmc);
+
+ cmdq_runtime_pm_get(cq_host);
+ cmdq_disable_nosync(mmc, soft);
+ cmdq_runtime_pm_put(cq_host);
+}
+
+static void cmdq_reset(struct mmc_host *mmc, bool soft)
+{
+ struct cmdq_host *cq_host = (struct cmdq_host *)mmc_cmdq_private(mmc);
+ unsigned int cqcfg;
+ unsigned int tdlba;
+ unsigned int tdlbau;
+ unsigned int rca;
+ int ret;
+
+ cmdq_runtime_pm_get(cq_host);
+ cqcfg = cmdq_readl(cq_host, CQCFG);
+ tdlba = cmdq_readl(cq_host, CQTDLBA);
+ tdlbau = cmdq_readl(cq_host, CQTDLBAU);
+ rca = cmdq_readl(cq_host, CQSSC2);
+
+ cmdq_disable(mmc, true);
+
+ if (cq_host->ops->reset) {
+ ret = cq_host->ops->reset(mmc);
+ if (ret) {
+ pr_crit("%s: reset CMDQ controller: failed\n",
+ mmc_hostname(mmc));
+ BUG();
+ }
+ }
+
+ cmdq_writel(cq_host, tdlba, CQTDLBA);
+ cmdq_writel(cq_host, tdlbau, CQTDLBAU);
+
+ if (cq_host->ops->clear_set_irqs)
+ cq_host->ops->clear_set_irqs(mmc, true);
+
+ cmdq_clear_set_irqs(cq_host, 0x0, CQ_INT_ALL);
+
+ /* cq_host would use this rca to address the card */
+ cmdq_writel(cq_host, rca, CQSSC2);
+
+ /* ensure the writes are done before enabling CQE */
+ mb();
+
+ cmdq_writel(cq_host, cqcfg, CQCFG);
+ cmdq_runtime_pm_put(cq_host);
+ cq_host->enabled = true;
+ mmc_host_clr_cq_disable(mmc);
+}
+
+static void cmdq_prep_task_desc(struct mmc_request *mrq,
+ u64 *data, bool intr, bool qbr)
+{
+ struct mmc_cmdq_req *cmdq_req = mrq->cmdq_req;
+ u32 req_flags = cmdq_req->cmdq_req_flags;
+
+ pr_debug("%s: %s: data-tag: 0x%08x - dir: %d - prio: %d - cnt: 0x%08x - addr: 0x%llx\n",
+ mmc_hostname(mrq->host), __func__,
+ !!(req_flags & DAT_TAG), !!(req_flags & DIR),
+ !!(req_flags & PRIO), cmdq_req->data.blocks,
+ (u64)mrq->cmdq_req->blk_addr);
+
+ *data = VALID(1) |
+ END(1) |
+ INT(intr) |
+ ACT(0x5) |
+ FORCED_PROG(!!(req_flags & FORCED_PRG)) |
+ CONTEXT(mrq->cmdq_req->ctx_id) |
+ DATA_TAG(!!(req_flags & DAT_TAG)) |
+ DATA_DIR(!!(req_flags & DIR)) |
+ PRIORITY(!!(req_flags & PRIO)) |
+ QBAR(qbr) |
+ REL_WRITE(!!(req_flags & REL_WR)) |
+ BLK_COUNT(mrq->cmdq_req->data.blocks) |
+ BLK_ADDR((u64)mrq->cmdq_req->blk_addr);
+
+ MMC_TRACE(mrq->host,
+ "%s: Task: 0x%08x | Args: 0x%08x | cnt: 0x%08x\n", __func__,
+ lower_32_bits(*data),
+ upper_32_bits(*data),
+ mrq->cmdq_req->data.blocks);
+}
+
+static int cmdq_dma_map(struct mmc_host *host, struct mmc_request *mrq)
+{
+ int sg_count;
+ struct mmc_data *data = mrq->data;
+
+ if (!data)
+ return -EINVAL;
+
+ sg_count = dma_map_sg(mmc_dev(host), data->sg,
+ data->sg_len,
+ (data->flags & MMC_DATA_WRITE) ?
+ DMA_TO_DEVICE : DMA_FROM_DEVICE);
+ if (!sg_count) {
+ pr_err("%s: sg-len: %d\n", __func__, data->sg_len);
+ return -ENOMEM;
+ }
+
+ return sg_count;
+}
+
+static void cmdq_set_tran_desc(u8 *desc, dma_addr_t addr, int len,
+ bool end, bool is_dma64)
+{
+ __le32 *attr = (__le32 __force *)desc;
+
+ *attr = (VALID(1) |
+ END(end ? 1 : 0) |
+ INT(0) |
+ ACT(0x4) |
+ DAT_LENGTH(len));
+
+ if (is_dma64) {
+ __le64 *dataddr = (__le64 __force *)(desc + 4);
+
+ dataddr[0] = cpu_to_le64(addr);
+ } else {
+ __le32 *dataddr = (__le32 __force *)(desc + 4);
+
+ dataddr[0] = cpu_to_le32(addr);
+ }
+}
+
+static int cmdq_prep_tran_desc(struct mmc_request *mrq,
+ struct cmdq_host *cq_host, int tag)
+{
+ struct mmc_data *data = mrq->data;
+ int i, sg_count, len;
+ bool end = false;
+ dma_addr_t addr;
+ u8 *desc;
+ struct scatterlist *sg;
+
+ sg_count = cmdq_dma_map(mrq->host, mrq);
+ if (sg_count < 0) {
+ pr_err("%s: %s: unable to map sg lists, %d\n",
+ mmc_hostname(mrq->host), __func__, sg_count);
+ return sg_count;
+ }
+
+ desc = get_trans_desc(cq_host, tag);
+ memset(desc, 0, cq_host->trans_desc_len * cq_host->mmc->max_segs);
+
+ for_each_sg(data->sg, sg, sg_count, i) {
+ addr = sg_dma_address(sg);
+ len = sg_dma_len(sg);
+
+ if ((i+1) == sg_count)
+ end = true;
+ cmdq_set_tran_desc(desc, addr, len, end, cq_host->dma64);
+ desc += cq_host->trans_desc_len;
+ }
+
+ pr_debug("%s: req: 0x%p tag: %d calc_trans_des: 0x%p sg-cnt: %d\n",
+ __func__, mrq->req, tag, desc, sg_count);
+
+ return 0;
+}
+
+static void cmdq_log_task_desc_history(struct cmdq_host *cq_host, u64 task,
+ bool is_dcmd)
+{
+ if (likely(!cq_host->mmc->cmdq_thist_enabled))
+ return;
+
+ if (!cq_host->thist) {
+ pr_err("%s: %s: CMDQ task history buffer not allocated\n",
+ mmc_hostname(cq_host->mmc), __func__);
+ return;
+ }
+
+ if (cq_host->thist_idx >= cq_host->num_slots)
+ cq_host->thist_idx = 0;
+
+ cq_host->thist[cq_host->thist_idx].is_dcmd = is_dcmd;
+ memcpy(&cq_host->thist[cq_host->thist_idx++].task,
+ &task, cq_host->task_desc_len);
+}
+
+static void cmdq_prep_dcmd_desc(struct mmc_host *mmc,
+ struct mmc_request *mrq)
+{
+ u64 *task_desc = NULL;
+ u64 data = 0;
+ u8 resp_type;
+ u8 *desc;
+ __le64 *dataddr;
+ struct cmdq_host *cq_host = mmc_cmdq_private(mmc);
+ u8 timing;
+
+ if (!(mrq->cmd->flags & MMC_RSP_PRESENT)) {
+ resp_type = 0x0;
+ timing = 0x1;
+ } else {
+ if (mrq->cmd->flags & MMC_RSP_BUSY) {
+ resp_type = 0x3;
+ timing = 0x0;
+ } else {
+ resp_type = 0x2;
+ timing = 0x1;
+ }
+ }
+
+ task_desc = (__le64 __force *)get_desc(cq_host, cq_host->dcmd_slot);
+ memset(task_desc, 0, cq_host->task_desc_len);
+ data |= (VALID(1) |
+ END(1) |
+ INT(1) |
+ QBAR(1) |
+ ACT(0x5) |
+ CMD_INDEX(mrq->cmd->opcode) |
+ CMD_TIMING(timing) | RESP_TYPE(resp_type));
+ *task_desc |= data;
+ desc = (u8 *)task_desc;
+ pr_debug("cmdq: dcmd: cmd: %d timing: %d resp: %d\n",
+ mrq->cmd->opcode, timing, resp_type);
+ dataddr = (__le64 __force *)(desc + 4);
+ dataddr[0] = cpu_to_le64((u64)mrq->cmd->arg);
+ cmdq_log_task_desc_history(cq_host, *task_desc, true);
+ MMC_TRACE(mrq->host,
+ "%s: DCMD: Task: 0x%08x | Args: 0x%08x\n",
+ __func__,
+ lower_32_bits(*task_desc),
+ upper_32_bits(*task_desc));
+}
+
+static inline
+void cmdq_prep_crypto_desc(struct cmdq_host *cq_host, u64 *task_desc,
+ u64 ice_ctx)
+{
+ u64 *ice_desc = NULL;
+
+ if (cq_host->caps & CMDQ_CAP_CRYPTO_SUPPORT) {
+ /*
+ * Get the address of ice context for the given task descriptor.
+ * ice context is present in the upper 64bits of task descriptor
+ * ice_conext_base_address = task_desc + 8-bytes
+ */
+ ice_desc = (__le64 __force *)((u8 *)task_desc +
+ CQ_TASK_DESC_TASK_PARAMS_SIZE);
+ memset(ice_desc, 0, CQ_TASK_DESC_ICE_PARAMS_SIZE);
+
+ /*
+ * Assign upper 64bits data of task descritor with ice context
+ */
+ if (ice_ctx)
+ *ice_desc = cpu_to_le64(ice_ctx);
+ }
+}
+
+static void cmdq_pm_qos_vote(struct sdhci_host *host, struct mmc_request *mrq)
+{
+ struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
+ struct sdhci_msm_host *msm_host = pltfm_host->priv;
+
+ sdhci_msm_pm_qos_cpu_vote(host,
+ msm_host->pdata->pm_qos_data.cmdq_latency, mrq->req->cpu);
+}
+
+static void cmdq_pm_qos_unvote(struct sdhci_host *host, struct mmc_request *mrq)
+{
+ /* use async as we're inside an atomic context (soft-irq) */
+ sdhci_msm_pm_qos_cpu_unvote(host, mrq->req->cpu, true);
+}
+
+static int cmdq_request(struct mmc_host *mmc, struct mmc_request *mrq)
+{
+ int err = 0;
+ u64 data = 0;
+ u64 *task_desc = NULL;
+ u32 tag = mrq->cmdq_req->tag;
+ struct cmdq_host *cq_host = (struct cmdq_host *)mmc_cmdq_private(mmc);
+ struct sdhci_host *host = mmc_priv(mmc);
+ u64 ice_ctx = 0;
+
+ if (!cq_host->enabled) {
+ pr_err("%s: CMDQ host not enabled yet !!!\n",
+ mmc_hostname(mmc));
+ err = -EINVAL;
+ goto out;
+ }
+
+ cmdq_runtime_pm_get(cq_host);
+
+ if (mrq->cmdq_req->cmdq_req_flags & DCMD) {
+ cmdq_prep_dcmd_desc(mmc, mrq);
+ cq_host->mrq_slot[DCMD_SLOT] = mrq;
+ /* DCMD's are always issued on a fixed slot */
+ tag = DCMD_SLOT;
+ goto ring_doorbell;
+ }
+
+ if (cq_host->ops->crypto_cfg) {
+ err = cq_host->ops->crypto_cfg(mmc, mrq, tag, &ice_ctx);
+ if (err) {
+ pr_err("%s: failed to configure crypto: err %d tag %d\n",
+ mmc_hostname(mmc), err, tag);
+ goto out;
+ }
+ }
+
+ task_desc = (__le64 __force *)get_desc(cq_host, tag);
+
+ cmdq_prep_task_desc(mrq, &data, 1,
+ (mrq->cmdq_req->cmdq_req_flags & QBR));
+ *task_desc = cpu_to_le64(data);
+
+ cmdq_prep_crypto_desc(cq_host, task_desc, ice_ctx);
+
+ cmdq_log_task_desc_history(cq_host, *task_desc, false);
+
+ err = cmdq_prep_tran_desc(mrq, cq_host, tag);
+ if (err) {
+ pr_err("%s: %s: failed to setup tx desc: %d\n",
+ mmc_hostname(mmc), __func__, err);
+ goto out;
+ }
+
+ cq_host->mrq_slot[tag] = mrq;
+
+ /* PM QoS */
+ sdhci_msm_pm_qos_irq_vote(host);
+ cmdq_pm_qos_vote(host, mrq);
+ring_doorbell:
+ /* Ensure the task descriptor list is flushed before ringing doorbell */
+ wmb();
+ if (cmdq_readl(cq_host, CQTDBR) & (1 << tag)) {
+ cmdq_dumpregs(cq_host);
+ BUG_ON(1);
+ }
+ MMC_TRACE(mmc, "%s: tag: %d\n", __func__, tag);
+ cmdq_writel(cq_host, 1 << tag, CQTDBR);
+ /* Commit the doorbell write immediately */
+ wmb();
+
+out:
+ return err;
+}
+
+static void cmdq_finish_data(struct mmc_host *mmc, unsigned int tag)
+{
+ struct mmc_request *mrq;
+ struct cmdq_host *cq_host = (struct cmdq_host *)mmc_cmdq_private(mmc);
+ int offset = 0;
+ int err = 0;
+
+ if (cq_host->offset_changed)
+ offset = CQ_V5_VENDOR_CFG;
+ mrq = get_req_by_tag(cq_host, tag);
+ if (tag == cq_host->dcmd_slot)
+ mrq->cmd->resp[0] = cmdq_readl(cq_host, CQCRDCT);
+
+ if (mrq->cmdq_req->cmdq_req_flags & DCMD)
+ cmdq_writel(cq_host,
+ cmdq_readl(cq_host, CQ_VENDOR_CFG + offset) |
+ CMDQ_SEND_STATUS_TRIGGER, CQ_VENDOR_CFG + offset);
+
+ cmdq_runtime_pm_put(cq_host);
+
+ if (cq_host->ops->crypto_cfg_end) {
+ err = cq_host->ops->crypto_cfg_end(mmc, mrq);
+ if (err) {
+ pr_err("%s: failed to end ice config: err %d tag %d\n",
+ mmc_hostname(mmc), err, tag);
+ }
+ }
+ if (!(cq_host->caps & CMDQ_CAP_CRYPTO_SUPPORT) &&
+ cq_host->ops->crypto_cfg_reset)
+ cq_host->ops->crypto_cfg_reset(mmc, tag);
+ mrq->done(mrq);
+}
+
+irqreturn_t cmdq_irq(struct mmc_host *mmc, int err)
+{
+ u32 status;
+ unsigned long tag = 0, comp_status;
+ struct cmdq_host *cq_host = (struct cmdq_host *)mmc_cmdq_private(mmc);
+ unsigned long err_info = 0;
+ struct mmc_request *mrq;
+ int ret;
+ u32 dbr_set = 0;
+ u32 dev_pend_set = 0;
+ int stat_err = 0;
+
+ status = cmdq_readl(cq_host, CQIS);
+
+ if (!status && !err)
+ return IRQ_NONE;
+ MMC_TRACE(mmc, "%s: CQIS: 0x%x err: %d\n",
+ __func__, status, err);
+
+ stat_err = status & (CQIS_RED | CQIS_GCE | CQIS_ICCE);
+
+ if (err || stat_err) {
+ err_info = cmdq_readl(cq_host, CQTERRI);
+ pr_err("%s: err: %d status: 0x%08x task-err-info (0x%08lx)\n",
+ mmc_hostname(mmc), err, status, err_info);
+
+ /*
+ * Need to halt CQE in case of error in interrupt context itself
+ * otherwise CQE may proceed with sending CMD to device even if
+ * CQE/card is in error state.
+ * CMDQ error handling will make sure that it is unhalted after
+ * handling all the errors.
+ */
+ ret = cmdq_halt_poll(mmc, true);
+ if (ret)
+ pr_err("%s: %s: halt failed ret=%d\n",
+ mmc_hostname(mmc), __func__, ret);
+
+ /*
+ * Clear the CQIS after halting incase of error. This is done
+ * because if CQIS is cleared before halting, the CQ will
+ * continue with issueing commands for rest of requests with
+ * Doorbell rung. This will overwrite the Resp Arg register.
+ * So CQ must be halted first and then CQIS cleared incase
+ * of error
+ */
+ cmdq_writel(cq_host, status, CQIS);
+
+ cmdq_dumpregs(cq_host);
+
+ if (!err_info) {
+ /*
+ * It may so happen sometimes for few errors(like ADMA)
+ * that HW cannot give CQTERRI info.
+ * Thus below is a HW WA for recovering from such
+ * scenario.
+ * - To halt/disable CQE and do reset_all.
+ * Since there is no way to know which tag would
+ * have caused such error, so check for any first
+ * bit set in doorbell and proceed with an error.
+ */
+ dbr_set = cmdq_readl(cq_host, CQTDBR);
+ if (!dbr_set) {
+ pr_err("%s: spurious/force error interrupt\n",
+ mmc_hostname(mmc));
+ cmdq_halt_poll(mmc, false);
+ mmc_host_clr_halt(mmc);
+ return IRQ_HANDLED;
+ }
+
+ tag = ffs(dbr_set) - 1;
+ pr_err("%s: error tag selected: tag = %lu\n",
+ mmc_hostname(mmc), tag);
+ mrq = get_req_by_tag(cq_host, tag);
+ if (mrq->data)
+ mrq->data->error = err;
+ else
+ mrq->cmd->error = err;
+ /*
+ * Get ADMA descriptor memory in case of ADMA
+ * error for debug.
+ */
+ if (err == -EIO)
+ cmdq_dump_adma_mem(cq_host);
+ goto skip_cqterri;
+ }
+
+ if (err_info & CQ_RMEFV) {
+ tag = GET_CMD_ERR_TAG(err_info);
+ pr_err("%s: CMD err tag: %lu\n", __func__, tag);
+
+ mrq = get_req_by_tag(cq_host, tag);
+ /* CMD44/45/46/47 will not have a valid cmd */
+ if (mrq->cmd)
+ mrq->cmd->error = err;
+ else
+ mrq->data->error = err;
+ } else if (err_info & CQ_DTEFV) {
+ tag = GET_DAT_ERR_TAG(err_info);
+ pr_err("%s: Dat err tag: %lu\n", __func__, tag);
+ mrq = get_req_by_tag(cq_host, tag);
+ mrq->data->error = err;
+ }
+
+skip_cqterri:
+ /*
+ * If CQE halt fails then, disable CQE
+ * from processing any further requests
+ */
+ if (ret) {
+ cmdq_disable_nosync(mmc, true);
+ /*
+ * Enable legacy interrupts as CQE halt has failed.
+ * This is needed to send legacy commands like status
+ * cmd as part of error handling work.
+ */
+ if (cq_host->ops->clear_set_irqs)
+ cq_host->ops->clear_set_irqs(mmc, false);
+ }
+
+ /*
+ * CQE detected a reponse error from device
+ * In most cases, this would require a reset.
+ */
+ if (stat_err & CQIS_RED) {
+ /*
+ * will check if the RED error is due to a bkops
+ * exception once the queue is empty
+ */
+ BUG_ON(!mmc->card);
+ if (mmc_card_configured_manual_bkops(mmc->card) ||
+ mmc_card_configured_auto_bkops(mmc->card))
+ mmc->card->bkops.needs_check = true;
+
+ mrq->cmdq_req->resp_err = true;
+ pr_err("%s: Response error (0x%08x) from card !!!",
+ mmc_hostname(mmc), cmdq_readl(cq_host, CQCRA));
+
+ } else {
+ mrq->cmdq_req->resp_idx = cmdq_readl(cq_host, CQCRI);
+ mrq->cmdq_req->resp_arg = cmdq_readl(cq_host, CQCRA);
+ }
+
+ /*
+ * Generic Crypto error detected by CQE.
+ * Its a fatal, would require cmdq reset.
+ */
+ if (stat_err & CQIS_GCE) {
+ if (mrq->data)
+ mrq->data->error = -EIO;
+ pr_err("%s: Crypto generic error while processing task %lu!",
+ mmc_hostname(mmc), tag);
+ MMC_TRACE(mmc, "%s: GCE error detected with tag %lu\n",
+ __func__, tag);
+ }
+ /*
+ * Invalid crypto config error detected by CQE, clear the task.
+ * Task can be cleared only when CQE is halt state.
+ */
+ if (stat_err & CQIS_ICCE) {
+ /*
+ * Invalid Crypto Config Error is detected at the
+ * beginning of the transfer before the actual execution
+ * started. So just clear the task in CQE. No need to
+ * clear in device. Only the task which caused ICCE has
+ * to be cleared. Other tasks can be continue processing
+ * The first task which is about to be prepared would
+ * cause ICCE Error.
+ */
+ dbr_set = cmdq_readl(cq_host, CQTDBR);
+ dev_pend_set = cmdq_readl(cq_host, CQDPT);
+ if (dbr_set ^ dev_pend_set)
+ tag = ffs(dbr_set ^ dev_pend_set) - 1;
+ mrq = get_req_by_tag(cq_host, tag);
+ pr_err("%s: Crypto config error while processing task %lu!",
+ mmc_hostname(mmc), tag);
+ MMC_TRACE(mmc, "%s: ICCE error with tag %lu\n",
+ __func__, tag);
+ if (mrq->data)
+ mrq->data->error = -EIO;
+ else if (mrq->cmd)
+ mrq->cmd->error = -EIO;
+ /*
+ * If CQE is halted and tag is valid then clear the task
+ * then un-halt CQE and set flag to skip error recovery.
+ * If any of the condtions is not met thene it will
+ * enter into default error recovery path.
+ */
+ if (!ret && (dbr_set ^ dev_pend_set)) {
+ ret = cmdq_clear_task_poll(cq_host, tag);
+ if (ret) {
+ pr_err("%s: %s: task[%lu] clear failed ret=%d\n",
+ mmc_hostname(mmc),
+ __func__, tag, ret);
+ } else if (!cmdq_halt_poll(mmc, false)) {
+ mrq->cmdq_req->skip_err_handling = true;
+ }
+ }
+ }
+ cmdq_finish_data(mmc, tag);
+ } else {
+ cmdq_writel(cq_host, status, CQIS);
+ }
+
+ if (status & CQIS_TCC) {
+ /* read CQTCN and complete the request */
+ comp_status = cmdq_readl(cq_host, CQTCN);
+ if (!comp_status)
+ goto out;
+ /*
+ * The CQTCN must be cleared before notifying req completion
+ * to upper layers to avoid missing completion notification
+ * of new requests with the same tag.
+ */
+ cmdq_writel(cq_host, comp_status, CQTCN);
+ /*
+ * A write memory barrier is necessary to guarantee that CQTCN
+ * gets cleared first before next doorbell for the same tag is
+ * set but that is already achieved by the barrier present
+ * before setting doorbell, hence one is not needed here.
+ */
+ for_each_set_bit(tag, &comp_status, cq_host->num_slots) {
+ mrq = get_req_by_tag(cq_host, tag);
+ if (!((mrq->cmd && mrq->cmd->error) ||
+ mrq->cmdq_req->resp_err ||
+ (mrq->data && mrq->data->error))) {
+ /* complete the corresponding mrq */
+ pr_debug("%s: completing tag -> %lu\n",
+ mmc_hostname(mmc), tag);
+ MMC_TRACE(mmc, "%s: completing tag -> %lu\n",
+ __func__, tag);
+ cmdq_finish_data(mmc, tag);
+ }
+ }
+ }
+
+ if (status & CQIS_HAC) {
+ if (cq_host->ops->post_cqe_halt)
+ cq_host->ops->post_cqe_halt(mmc);
+ /* halt done: re-enable legacy interrupts */
+ if (cq_host->ops->clear_set_irqs)
+ cq_host->ops->clear_set_irqs(mmc, false);
+ /* halt is completed, wakeup waiting thread */
+ complete(&cq_host->halt_comp);
+ }
+
+out:
+ return IRQ_HANDLED;
+}
+EXPORT_SYMBOL(cmdq_irq);
+
+/* cmdq_halt_poll - Halting CQE using polling method.
+ * @mmc: struct mmc_host
+ * @halt: bool halt
+ * This is used mainly from interrupt context to halt/unhalt
+ * CQE engine.
+ */
+static int cmdq_halt_poll(struct mmc_host *mmc, bool halt)
+{
+ struct cmdq_host *cq_host = (struct cmdq_host *)mmc_cmdq_private(mmc);
+ int retries = 100;
+
+ if (!halt) {
+ if (cq_host->ops->set_data_timeout)
+ cq_host->ops->set_data_timeout(mmc, 0xf);
+ if (cq_host->ops->clear_set_irqs)
+ cq_host->ops->clear_set_irqs(mmc, true);
+ cmdq_writel(cq_host, cmdq_readl(cq_host, CQCTL) & ~HALT,
+ CQCTL);
+ mmc_host_clr_halt(mmc);
+ return 0;
+ }
+
+ cmdq_set_halt_irq(cq_host, false);
+ cmdq_writel(cq_host, cmdq_readl(cq_host, CQCTL) | HALT, CQCTL);
+ while (retries) {
+ if (!(cmdq_readl(cq_host, CQCTL) & HALT)) {
+ udelay(5);
+ retries--;
+ continue;
+ } else {
+ if (cq_host->ops->post_cqe_halt)
+ cq_host->ops->post_cqe_halt(mmc);
+ /* halt done: re-enable legacy interrupts */
+ if (cq_host->ops->clear_set_irqs)
+ cq_host->ops->clear_set_irqs(mmc,
+ false);
+ mmc_host_set_halt(mmc);
+ break;
+ }
+ }
+ cmdq_set_halt_irq(cq_host, true);
+ return retries ? 0 : -ETIMEDOUT;
+}
+
+/* May sleep */
+static int cmdq_halt(struct mmc_host *mmc, bool halt)
+{
+ struct cmdq_host *cq_host = (struct cmdq_host *)mmc_cmdq_private(mmc);
+ u32 ret = 0;
+ u32 config = 0;
+ int retries = 3;
+
+ cmdq_runtime_pm_get(cq_host);
+ if (halt) {
+ while (retries) {
+ cmdq_writel(cq_host, cmdq_readl(cq_host, CQCTL) | HALT,
+ CQCTL);
+ ret = wait_for_completion_timeout(&cq_host->halt_comp,
+ msecs_to_jiffies(HALT_TIMEOUT_MS));
+ if (!ret) {
+ pr_warn("%s: %s: HAC int timeout\n",
+ mmc_hostname(mmc), __func__);
+ if ((cmdq_readl(cq_host, CQCTL) & HALT)) {
+ /*
+ * Don't retry if CQE is halted but irq
+ * is not triggered in timeout period.
+ * And since we are returning error,
+ * un-halt CQE. Since irq was not fired
+ * yet, no need to set other params
+ */
+ retries = 0;
+ config = cmdq_readl(cq_host, CQCTL);
+ config &= ~HALT;
+ cmdq_writel(cq_host, config, CQCTL);
+ } else {
+ pr_warn("%s: %s: retryng halt (%d)\n",
+ mmc_hostname(mmc), __func__,
+ retries);
+ retries--;
+ continue;
+ }
+ } else {
+ MMC_TRACE(mmc, "%s: halt done , retries: %d\n",
+ __func__, retries);
+ break;
+ }
+ }
+ ret = retries ? 0 : -ETIMEDOUT;
+ } else {
+ if (cq_host->ops->set_transfer_params)
+ cq_host->ops->set_transfer_params(mmc);
+ if (cq_host->ops->set_block_size)
+ cq_host->ops->set_block_size(mmc);
+ if (cq_host->ops->set_data_timeout)
+ cq_host->ops->set_data_timeout(mmc, 0xf);
+ if (cq_host->ops->clear_set_irqs)
+ cq_host->ops->clear_set_irqs(mmc, true);
+ MMC_TRACE(mmc, "%s: unhalt done\n", __func__);
+ cmdq_writel(cq_host, cmdq_readl(cq_host, CQCTL) & ~HALT,
+ CQCTL);
+ }
+ cmdq_runtime_pm_put(cq_host);
+ return ret;
+}
+
+static void cmdq_post_req(struct mmc_host *mmc, int tag, int err)
+{
+ struct cmdq_host *cq_host;
+ struct mmc_request *mrq;
+ struct mmc_data *data;
+ struct sdhci_host *sdhci_host = mmc_priv(mmc);
+
+ if (WARN_ON(!mmc))
+ return;
+
+ cq_host = (struct cmdq_host *)mmc_cmdq_private(mmc);
+ mrq = get_req_by_tag(cq_host, tag);
+ data = mrq->data;
+
+ if (data) {
+ data->error = err;
+ dma_unmap_sg(mmc_dev(mmc), data->sg, data->sg_len,
+ (data->flags & MMC_DATA_READ) ?
+ DMA_FROM_DEVICE : DMA_TO_DEVICE);
+ if (err)
+ data->bytes_xfered = 0;
+ else
+ data->bytes_xfered = blk_rq_bytes(mrq->req);
+
+ /* we're in atomic context (soft-irq) so unvote async. */
+ sdhci_msm_pm_qos_irq_unvote(sdhci_host, true);
+ cmdq_pm_qos_unvote(sdhci_host, mrq);
+ }
+}
+
+static void cmdq_dumpstate(struct mmc_host *mmc)
+{
+ struct cmdq_host *cq_host = (struct cmdq_host *)mmc_cmdq_private(mmc);
+ cmdq_runtime_pm_get(cq_host);
+ cmdq_dumpregs(cq_host);
+ cmdq_runtime_pm_put(cq_host);
+}
+
+static int cmdq_late_init(struct mmc_host *mmc)
+{
+ struct sdhci_host *host = mmc_priv(mmc);
+ struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
+ struct sdhci_msm_host *msm_host = pltfm_host->priv;
+
+ /*
+ * TODO: This should basically move to something like "sdhci-cmdq-msm"
+ * for msm specific implementation.
+ */
+ sdhci_msm_pm_qos_irq_init(host);
+
+ if (msm_host->pdata->pm_qos_data.cmdq_valid)
+ sdhci_msm_pm_qos_cpu_init(host,
+ msm_host->pdata->pm_qos_data.cmdq_latency);
+ return 0;
+}
+
+static const struct mmc_cmdq_host_ops cmdq_host_ops = {
+ .init = cmdq_late_init,
+ .enable = cmdq_enable,
+ .disable = cmdq_disable,
+ .request = cmdq_request,
+ .post_req = cmdq_post_req,
+ .halt = cmdq_halt,
+ .reset = cmdq_reset,
+ .dumpstate = cmdq_dumpstate,
+};
+
+struct cmdq_host *cmdq_pltfm_init(struct platform_device *pdev)
+{
+ struct cmdq_host *cq_host;
+ struct resource *cmdq_memres = NULL;
+
+ /* check and setup CMDQ interface */
+ cmdq_memres = platform_get_resource_byname(pdev, IORESOURCE_MEM,
+ "cmdq_mem");
+ if (!cmdq_memres) {
+ dev_dbg(&pdev->dev, "CMDQ not supported\n");
+ return ERR_PTR(-EINVAL);
+ }
+
+ cq_host = kzalloc(sizeof(*cq_host), GFP_KERNEL);
+ if (!cq_host) {
+ dev_err(&pdev->dev, "failed to allocate memory for CMDQ\n");
+ return ERR_PTR(-ENOMEM);
+ }
+ cq_host->mmio = devm_ioremap(&pdev->dev,
+ cmdq_memres->start,
+ resource_size(cmdq_memres));
+ if (!cq_host->mmio) {
+ dev_err(&pdev->dev, "failed to remap cmdq regs\n");
+ kfree(cq_host);
+ return ERR_PTR(-EBUSY);
+ }
+ dev_dbg(&pdev->dev, "CMDQ ioremap: done\n");
+
+ return cq_host;
+}
+EXPORT_SYMBOL(cmdq_pltfm_init);
+
+int cmdq_init(struct cmdq_host *cq_host, struct mmc_host *mmc,
+ bool dma64)
+{
+ int err = 0;
+
+ cq_host->dma64 = dma64;
+ cq_host->mmc = mmc;
+ cq_host->mmc->cmdq_private = cq_host;
+
+ cq_host->num_slots = NUM_SLOTS;
+ cq_host->dcmd_slot = DCMD_SLOT;
+
+ mmc->cmdq_ops = &cmdq_host_ops;
+ mmc->num_cq_slots = NUM_SLOTS;
+ mmc->dcmd_cq_slot = DCMD_SLOT;
+
+ cq_host->mrq_slot = kzalloc(sizeof(cq_host->mrq_slot) *
+ cq_host->num_slots, GFP_KERNEL);
+ if (!cq_host->mrq_slot)
+ return -ENOMEM;
+
+ init_completion(&cq_host->halt_comp);
+ return err;
+}
+EXPORT_SYMBOL(cmdq_init);
diff --git a/drivers/mmc/host/cmdq_hci.h b/drivers/mmc/host/cmdq_hci.h
new file mode 100644
index 000000000000..ee5e6549fa4a
--- /dev/null
+++ b/drivers/mmc/host/cmdq_hci.h
@@ -0,0 +1,251 @@
+/* Copyright (c) 2015-2017, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+#ifndef LINUX_MMC_CQ_HCI_H
+#define LINUX_MMC_CQ_HCI_H
+#include <linux/mmc/core.h>
+
+/* registers */
+/* version */
+#define CQVER 0x00
+/* capabilities */
+#define CQCAP 0x04
+#define CQCAP_CS (1 << 28)
+/* configuration */
+#define CQCFG 0x08
+#define CQ_DCMD 0x00001000
+#define CQ_TASK_DESC_SZ 0x00000100
+#define CQ_ENABLE 0x00000001
+#define CQ_ICE_ENABLE 0x00000002
+
+/* control */
+#define CQCTL 0x0C
+#define CLEAR_ALL_TASKS 0x00000100
+#define HALT 0x00000001
+
+/* interrupt status */
+#define CQIS 0x10
+#define CQIS_HAC (1 << 0)
+#define CQIS_TCC (1 << 1)
+#define CQIS_RED (1 << 2)
+#define CQIS_TCL (1 << 3)
+#define CQIS_GCE (1 << 4)
+#define CQIS_ICCE (1 << 5)
+
+/* interrupt status enable */
+#define CQISTE 0x14
+
+/* interrupt signal enable */
+#define CQISGE 0x18
+
+/* interrupt coalescing */
+#define CQIC 0x1C
+#define CQIC_ENABLE (1 << 31)
+#define CQIC_RESET (1 << 16)
+#define CQIC_ICCTHWEN (1 << 15)
+#define CQIC_ICCTH(x) ((x & 0x1F) << 8)
+#define CQIC_ICTOVALWEN (1 << 7)
+#define CQIC_ICTOVAL(x) (x & 0x7F)
+
+/* task list base address */
+#define CQTDLBA 0x20
+
+/* task list base address upper */
+#define CQTDLBAU 0x24
+
+/* door-bell */
+#define CQTDBR 0x28
+
+/* task completion notification */
+#define CQTCN 0x2C
+
+/* device queue status */
+#define CQDQS 0x30
+
+/* device pending tasks */
+#define CQDPT 0x34
+
+/* task clear */
+#define CQTCLR 0x38
+
+/* send status config 1 */
+#define CQSSC1 0x40
+/*
+ * Value n means CQE would send CMD13 during the transfer of data block
+ * BLOCK_CNT-n
+ */
+#define SEND_QSR_INTERVAL 0x70001
+
+/* send status config 2 */
+#define CQSSC2 0x44
+
+/* response for dcmd */
+#define CQCRDCT 0x48
+
+/* response mode error mask */
+#define CQRMEM 0x50
+#define CQ_EXCEPTION (1 << 6)
+
+/* task error info */
+#define CQTERRI 0x54
+
+/* CQTERRI bit fields */
+#define CQ_RMECI 0x1F
+#define CQ_RMETI (0x1F << 8)
+#define CQ_RMEFV (1 << 15)
+#define CQ_DTECI (0x3F << 16)
+#define CQ_DTETI (0x1F << 24)
+#define CQ_DTEFV (1 << 31)
+
+#define GET_CMD_ERR_TAG(__r__) ((__r__ & CQ_RMETI) >> 8)
+#define GET_DAT_ERR_TAG(__r__) ((__r__ & CQ_DTETI) >> 24)
+
+/* command response index */
+#define CQCRI 0x58
+
+/* command response argument */
+#define CQCRA 0x5C
+
+#define CQ_INT_ALL 0x3F
+#define CQIC_DEFAULT_ICCTH 31
+#define CQIC_DEFAULT_ICTOVAL 1
+
+/* attribute fields */
+#define VALID(x) ((x & 1) << 0)
+#define END(x) ((x & 1) << 1)
+#define INT(x) ((x & 1) << 2)
+#define ACT(x) ((x & 0x7) << 3)
+
+/* data command task descriptor fields */
+#define FORCED_PROG(x) ((x & 1) << 6)
+#define CONTEXT(x) ((x & 0xF) << 7)
+#define DATA_TAG(x) ((x & 1) << 11)
+#define DATA_DIR(x) ((x & 1) << 12)
+#define PRIORITY(x) ((x & 1) << 13)
+#define QBAR(x) ((x & 1) << 14)
+#define REL_WRITE(x) ((x & 1) << 15)
+#define BLK_COUNT(x) ((x & 0xFFFF) << 16)
+#define BLK_ADDR(x) ((x & 0xFFFFFFFF) << 32)
+
+/* direct command task descriptor fields */
+#define CMD_INDEX(x) ((x & 0x3F) << 16)
+#define CMD_TIMING(x) ((x & 1) << 22)
+#define RESP_TYPE(x) ((x & 0x3) << 23)
+
+/* transfer descriptor fields */
+#define DAT_LENGTH(x) ((x & 0xFFFF) << 16)
+#define DAT_ADDR_LO(x) ((x & 0xFFFFFFFF) << 32)
+#define DAT_ADDR_HI(x) ((x & 0xFFFFFFFF) << 0)
+
+/*
+ * Add new macro for updated CQ vendor specific
+ * register address for SDHC v5.0 onwards.
+ */
+#define CQ_V5_VENDOR_CFG 0x900
+#define CQ_VENDOR_CFG 0x100
+#define CMDQ_SEND_STATUS_TRIGGER (1 << 31)
+
+#define CQ_TASK_DESC_TASK_PARAMS_SIZE 8
+#define CQ_TASK_DESC_ICE_PARAMS_SIZE 8
+
+struct task_history {
+ u64 task;
+ bool is_dcmd;
+};
+
+struct cmdq_host {
+ const struct cmdq_host_ops *ops;
+ void __iomem *mmio;
+ struct mmc_host *mmc;
+
+ /* 64 bit DMA */
+ bool dma64;
+ int num_slots;
+
+ u32 dcmd_slot;
+ u32 caps;
+#define CMDQ_TASK_DESC_SZ_128 0x1
+#define CMDQ_CAP_CRYPTO_SUPPORT 0x2
+
+ u32 quirks;
+#define CMDQ_QUIRK_SHORT_TXFR_DESC_SZ 0x1
+#define CMDQ_QUIRK_NO_DCMD 0x2
+
+ bool enabled;
+ bool halted;
+ bool init_done;
+ bool offset_changed;
+
+ u8 *desc_base;
+
+ /* total descriptor size */
+ u8 slot_sz;
+
+ /* 64/128 bit depends on CQCFG */
+ u8 task_desc_len;
+
+ /* 64 bit on 32-bit arch, 128 bit on 64-bit */
+ u8 link_desc_len;
+
+ u8 *trans_desc_base;
+ /* same length as transfer descriptor */
+ u8 trans_desc_len;
+
+ dma_addr_t desc_dma_base;
+ dma_addr_t trans_desc_dma_base;
+
+ struct task_history *thist;
+ u8 thist_idx;
+
+ struct completion halt_comp;
+ struct mmc_request **mrq_slot;
+ void *private;
+};
+
+struct cmdq_host_ops {
+ void (*set_transfer_params)(struct mmc_host *mmc);
+ void (*set_data_timeout)(struct mmc_host *mmc, u32 val);
+ void (*clear_set_irqs)(struct mmc_host *mmc, bool clear);
+ void (*set_block_size)(struct mmc_host *mmc);
+ void (*dump_vendor_regs)(struct mmc_host *mmc);
+ void (*write_l)(struct cmdq_host *host, u32 val, int reg);
+ u32 (*read_l)(struct cmdq_host *host, int reg);
+ void (*clear_set_dumpregs)(struct mmc_host *mmc, bool set);
+ void (*enhanced_strobe_mask)(struct mmc_host *mmc, bool set);
+ int (*reset)(struct mmc_host *mmc);
+ int (*crypto_cfg)(struct mmc_host *mmc, struct mmc_request *mrq,
+ u32 slot, u64 *ice_ctx);
+ int (*crypto_cfg_end)(struct mmc_host *mmc, struct mmc_request *mrq);
+ void (*crypto_cfg_reset)(struct mmc_host *mmc, unsigned int slot);
+ void (*post_cqe_halt)(struct mmc_host *mmc);
+};
+
+static inline void cmdq_writel(struct cmdq_host *host, u32 val, int reg)
+{
+ if (unlikely(host->ops && host->ops->write_l))
+ host->ops->write_l(host, val, reg);
+ else
+ writel_relaxed(val, host->mmio + reg);
+}
+
+static inline u32 cmdq_readl(struct cmdq_host *host, int reg)
+{
+ if (unlikely(host->ops && host->ops->read_l))
+ return host->ops->read_l(host, reg);
+ else
+ return readl_relaxed(host->mmio + reg);
+}
+
+extern irqreturn_t cmdq_irq(struct mmc_host *mmc, int err);
+extern int cmdq_init(struct cmdq_host *cq_host, struct mmc_host *mmc,
+ bool dma64);
+extern struct cmdq_host *cmdq_pltfm_init(struct platform_device *pdev);
+#endif
diff --git a/drivers/mmc/host/sdhci-msm-ice.c b/drivers/mmc/host/sdhci-msm-ice.c
new file mode 100644
index 000000000000..e73bdfd424cc
--- /dev/null
+++ b/drivers/mmc/host/sdhci-msm-ice.c
@@ -0,0 +1,565 @@
+/*
+ * Copyright (c) 2015, 2017, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include "sdhci-msm-ice.h"
+
+static void sdhci_msm_ice_error_cb(void *host_ctrl, u32 error)
+{
+ struct sdhci_msm_host *msm_host = (struct sdhci_msm_host *)host_ctrl;
+
+ dev_err(&msm_host->pdev->dev, "%s: Error in ice operation 0x%x",
+ __func__, error);
+
+ if (msm_host->ice.state == SDHCI_MSM_ICE_STATE_ACTIVE)
+ msm_host->ice.state = SDHCI_MSM_ICE_STATE_DISABLED;
+}
+
+static struct platform_device *sdhci_msm_ice_get_pdevice(struct device *dev)
+{
+ struct device_node *node;
+ struct platform_device *ice_pdev = NULL;
+
+ node = of_parse_phandle(dev->of_node, SDHC_MSM_CRYPTO_LABEL, 0);
+ if (!node) {
+ dev_dbg(dev, "%s: sdhc-msm-crypto property not specified\n",
+ __func__);
+ goto out;
+ }
+ ice_pdev = qcom_ice_get_pdevice(node);
+out:
+ return ice_pdev;
+}
+
+static
+struct qcom_ice_variant_ops *sdhci_msm_ice_get_vops(struct device *dev)
+{
+ struct qcom_ice_variant_ops *ice_vops = NULL;
+ struct device_node *node;
+
+ node = of_parse_phandle(dev->of_node, SDHC_MSM_CRYPTO_LABEL, 0);
+ if (!node) {
+ dev_dbg(dev, "%s: sdhc-msm-crypto property not specified\n",
+ __func__);
+ goto out;
+ }
+ ice_vops = qcom_ice_get_variant_ops(node);
+ of_node_put(node);
+out:
+ return ice_vops;
+}
+
+static
+void sdhci_msm_enable_ice_hci(struct sdhci_host *host, bool enable)
+{
+ struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
+ struct sdhci_msm_host *msm_host = pltfm_host->priv;
+ u32 config = 0;
+ u32 ice_cap = 0;
+
+ /*
+ * Enable the cryptographic support inside SDHC.
+ * This is a global config which needs to be enabled
+ * all the time.
+ * Only when it it is enabled, the ICE_HCI capability
+ * will get reflected in CQCAP register.
+ */
+ config = readl_relaxed(host->ioaddr + HC_VENDOR_SPECIFIC_FUNC4);
+
+ if (enable)
+ config &= ~DISABLE_CRYPTO;
+ else
+ config |= DISABLE_CRYPTO;
+ writel_relaxed(config, host->ioaddr + HC_VENDOR_SPECIFIC_FUNC4);
+
+ /*
+ * CQCAP register is in different register space from above
+ * ice global enable register. So a mb() is required to ensure
+ * above write gets completed before reading the CQCAP register.
+ */
+ mb();
+
+ /*
+ * Check if ICE HCI capability support is present
+ * If present, enable it.
+ */
+ ice_cap = readl_relaxed(msm_host->cryptoio + ICE_CQ_CAPABILITIES);
+ if (ice_cap & ICE_HCI_SUPPORT) {
+ config = readl_relaxed(msm_host->cryptoio + ICE_CQ_CONFIG);
+
+ if (enable)
+ config |= CRYPTO_GENERAL_ENABLE;
+ else
+ config &= ~CRYPTO_GENERAL_ENABLE;
+ writel_relaxed(config, msm_host->cryptoio + ICE_CQ_CONFIG);
+ }
+}
+
+int sdhci_msm_ice_get_dev(struct sdhci_host *host)
+{
+ struct device *sdhc_dev;
+ struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
+ struct sdhci_msm_host *msm_host = pltfm_host->priv;
+
+ if (!msm_host || !msm_host->pdev) {
+ pr_err("%s: invalid msm_host %p or msm_host->pdev\n",
+ __func__, msm_host);
+ return -EINVAL;
+ }
+
+ sdhc_dev = &msm_host->pdev->dev;
+ msm_host->ice.vops = sdhci_msm_ice_get_vops(sdhc_dev);
+ msm_host->ice.pdev = sdhci_msm_ice_get_pdevice(sdhc_dev);
+
+ if (msm_host->ice.pdev == ERR_PTR(-EPROBE_DEFER)) {
+ dev_err(sdhc_dev, "%s: ICE device not probed yet\n",
+ __func__);
+ msm_host->ice.pdev = NULL;
+ msm_host->ice.vops = NULL;
+ return -EPROBE_DEFER;
+ }
+
+ if (!msm_host->ice.pdev) {
+ dev_dbg(sdhc_dev, "%s: invalid platform device\n", __func__);
+ msm_host->ice.vops = NULL;
+ return -ENODEV;
+ }
+ if (!msm_host->ice.vops) {
+ dev_dbg(sdhc_dev, "%s: invalid ice vops\n", __func__);
+ msm_host->ice.pdev = NULL;
+ return -ENODEV;
+ }
+ msm_host->ice.state = SDHCI_MSM_ICE_STATE_DISABLED;
+ return 0;
+}
+
+static
+int sdhci_msm_ice_pltfm_init(struct sdhci_msm_host *msm_host)
+{
+ struct resource *ice_memres = NULL;
+ struct platform_device *pdev = msm_host->pdev;
+ int err = 0;
+
+ if (!msm_host->ice_hci_support)
+ goto out;
+ /*
+ * ICE HCI registers are present in cmdq register space.
+ * So map the cmdq mem for accessing ICE HCI registers.
+ */
+ ice_memres = platform_get_resource_byname(pdev,
+ IORESOURCE_MEM, "cmdq_mem");
+ if (!ice_memres) {
+ dev_err(&pdev->dev, "Failed to get iomem resource for ice\n");
+ err = -EINVAL;
+ goto out;
+ }
+ msm_host->cryptoio = devm_ioremap(&pdev->dev,
+ ice_memres->start,
+ resource_size(ice_memres));
+ if (!msm_host->cryptoio) {
+ dev_err(&pdev->dev, "Failed to remap registers\n");
+ err = -ENOMEM;
+ }
+out:
+ return err;
+}
+
+int sdhci_msm_ice_init(struct sdhci_host *host)
+{
+ struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
+ struct sdhci_msm_host *msm_host = pltfm_host->priv;
+ int err = 0;
+
+ if (msm_host->ice.vops->init) {
+ err = sdhci_msm_ice_pltfm_init(msm_host);
+ if (err)
+ goto out;
+
+ if (msm_host->ice_hci_support)
+ sdhci_msm_enable_ice_hci(host, true);
+
+ err = msm_host->ice.vops->init(msm_host->ice.pdev,
+ msm_host,
+ sdhci_msm_ice_error_cb);
+ if (err) {
+ pr_err("%s: ice init err %d\n",
+ mmc_hostname(host->mmc), err);
+ sdhci_msm_ice_print_regs(host);
+ if (msm_host->ice_hci_support)
+ sdhci_msm_enable_ice_hci(host, false);
+ goto out;
+ }
+ msm_host->ice.state = SDHCI_MSM_ICE_STATE_ACTIVE;
+ }
+
+out:
+ return err;
+}
+
+void sdhci_msm_ice_cfg_reset(struct sdhci_host *host, u32 slot)
+{
+ writel_relaxed(SDHCI_MSM_ICE_ENABLE_BYPASS,
+ host->ioaddr + CORE_VENDOR_SPEC_ICE_CTRL_INFO_3_n + 16 * slot);
+}
+
+static
+int sdhci_msm_ice_get_cfg(struct sdhci_msm_host *msm_host, struct request *req,
+ unsigned int *bypass, short *key_index)
+{
+ int err = 0;
+ struct ice_data_setting ice_set;
+
+ memset(&ice_set, 0, sizeof(struct ice_data_setting));
+ if (msm_host->ice.vops->config_start) {
+ err = msm_host->ice.vops->config_start(
+ msm_host->ice.pdev,
+ req, &ice_set, false);
+ if (err) {
+ pr_err("%s: ice config failed %d\n",
+ mmc_hostname(msm_host->mmc), err);
+ return err;
+ }
+ }
+ /* if writing data command */
+ if (rq_data_dir(req) == WRITE)
+ *bypass = ice_set.encr_bypass ?
+ SDHCI_MSM_ICE_ENABLE_BYPASS :
+ SDHCI_MSM_ICE_DISABLE_BYPASS;
+ /* if reading data command */
+ else if (rq_data_dir(req) == READ)
+ *bypass = ice_set.decr_bypass ?
+ SDHCI_MSM_ICE_ENABLE_BYPASS :
+ SDHCI_MSM_ICE_DISABLE_BYPASS;
+ *key_index = ice_set.crypto_data.key_index;
+ return err;
+}
+
+static
+void sdhci_msm_ice_update_cfg(struct sdhci_host *host, u64 lba,
+ u32 slot, unsigned int bypass, short key_index)
+{
+ unsigned int ctrl_info_val = 0;
+
+ /* Configure ICE index */
+ ctrl_info_val =
+ (key_index &
+ MASK_SDHCI_MSM_ICE_CTRL_INFO_KEY_INDEX)
+ << OFFSET_SDHCI_MSM_ICE_CTRL_INFO_KEY_INDEX;
+
+ /* Configure data unit size of transfer request */
+ ctrl_info_val |=
+ (SDHCI_MSM_ICE_TR_DATA_UNIT_512_B &
+ MASK_SDHCI_MSM_ICE_CTRL_INFO_CDU)
+ << OFFSET_SDHCI_MSM_ICE_CTRL_INFO_CDU;
+
+ /* Configure ICE bypass mode */
+ ctrl_info_val |=
+ (bypass & MASK_SDHCI_MSM_ICE_CTRL_INFO_BYPASS)
+ << OFFSET_SDHCI_MSM_ICE_CTRL_INFO_BYPASS;
+
+ writel_relaxed((lba & 0xFFFFFFFF),
+ host->ioaddr + CORE_VENDOR_SPEC_ICE_CTRL_INFO_1_n + 16 * slot);
+ writel_relaxed(((lba >> 32) & 0xFFFFFFFF),
+ host->ioaddr + CORE_VENDOR_SPEC_ICE_CTRL_INFO_2_n + 16 * slot);
+ writel_relaxed(ctrl_info_val,
+ host->ioaddr + CORE_VENDOR_SPEC_ICE_CTRL_INFO_3_n + 16 * slot);
+ /* Ensure ICE registers are configured before issuing SDHCI request */
+ mb();
+}
+
+static inline
+void sdhci_msm_ice_hci_update_cmdq_cfg(u64 dun, unsigned int bypass,
+ short key_index, u64 *ice_ctx)
+{
+ /*
+ * The naming convention got changed between ICE2.0 and ICE3.0
+ * registers fields. Below is the equivalent names for
+ * ICE3.0 Vs ICE2.0:
+ * Data Unit Number(DUN) == Logical Base address(LBA)
+ * Crypto Configuration index (CCI) == Key Index
+ * Crypto Enable (CE) == !BYPASS
+ */
+ if (ice_ctx)
+ *ice_ctx = DATA_UNIT_NUM(dun) |
+ CRYPTO_CONFIG_INDEX(key_index) |
+ CRYPTO_ENABLE(!bypass);
+}
+
+static
+void sdhci_msm_ice_hci_update_noncq_cfg(struct sdhci_host *host,
+ u64 dun, unsigned int bypass, short key_index)
+{
+ struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
+ struct sdhci_msm_host *msm_host = pltfm_host->priv;
+ unsigned int crypto_params = 0;
+ /*
+ * The naming convention got changed between ICE2.0 and ICE3.0
+ * registers fields. Below is the equivalent names for
+ * ICE3.0 Vs ICE2.0:
+ * Data Unit Number(DUN) == Logical Base address(LBA)
+ * Crypto Configuration index (CCI) == Key Index
+ * Crypto Enable (CE) == !BYPASS
+ */
+ /* Configure ICE bypass mode */
+ crypto_params |=
+ (!bypass & MASK_SDHCI_MSM_ICE_HCI_PARAM_CE)
+ << OFFSET_SDHCI_MSM_ICE_HCI_PARAM_CE;
+ /* Configure Crypto Configure Index (CCI) */
+ crypto_params |= (key_index &
+ MASK_SDHCI_MSM_ICE_HCI_PARAM_CCI)
+ << OFFSET_SDHCI_MSM_ICE_HCI_PARAM_CCI;
+
+ writel_relaxed((crypto_params & 0xFFFFFFFF),
+ msm_host->cryptoio + ICE_NONCQ_CRYPTO_PARAMS);
+
+ /* Update DUN */
+ writel_relaxed((dun & 0xFFFFFFFF),
+ msm_host->cryptoio + ICE_NONCQ_CRYPTO_DUN);
+ /* Ensure ICE registers are configured before issuing SDHCI request */
+ mb();
+}
+
+int sdhci_msm_ice_cfg(struct sdhci_host *host, struct mmc_request *mrq,
+ u32 slot)
+{
+ struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
+ struct sdhci_msm_host *msm_host = pltfm_host->priv;
+ int err = 0;
+ short key_index = 0;
+ sector_t lba = 0;
+ unsigned int bypass = SDHCI_MSM_ICE_ENABLE_BYPASS;
+ struct request *req;
+
+ if (msm_host->ice.state != SDHCI_MSM_ICE_STATE_ACTIVE) {
+ pr_err("%s: ice is in invalid state %d\n",
+ mmc_hostname(host->mmc), msm_host->ice.state);
+ return -EINVAL;
+ }
+
+ WARN_ON(!mrq);
+ if (!mrq)
+ return -EINVAL;
+ req = mrq->req;
+ if (req) {
+ lba = req->__sector;
+ err = sdhci_msm_ice_get_cfg(msm_host, req, &bypass, &key_index);
+ if (err)
+ return err;
+ pr_debug("%s: %s: slot %d bypass %d key_index %d\n",
+ mmc_hostname(host->mmc),
+ (rq_data_dir(req) == WRITE) ? "WRITE" : "READ",
+ slot, bypass, key_index);
+ }
+
+ if (msm_host->ice_hci_support) {
+ /* For ICE HCI / ICE3.0 */
+ sdhci_msm_ice_hci_update_noncq_cfg(host, lba, bypass,
+ key_index);
+ } else {
+ /* For ICE versions earlier to ICE3.0 */
+ sdhci_msm_ice_update_cfg(host, lba, slot, bypass, key_index);
+ }
+ return 0;
+}
+
+int sdhci_msm_ice_cmdq_cfg(struct sdhci_host *host,
+ struct mmc_request *mrq, u32 slot, u64 *ice_ctx)
+{
+ struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
+ struct sdhci_msm_host *msm_host = pltfm_host->priv;
+ int err = 0;
+ short key_index = 0;
+ sector_t lba = 0;
+ unsigned int bypass = SDHCI_MSM_ICE_ENABLE_BYPASS;
+ struct request *req;
+
+ if (msm_host->ice.state != SDHCI_MSM_ICE_STATE_ACTIVE) {
+ pr_err("%s: ice is in invalid state %d\n",
+ mmc_hostname(host->mmc), msm_host->ice.state);
+ return -EINVAL;
+ }
+
+ WARN_ON(!mrq);
+ if (!mrq)
+ return -EINVAL;
+ req = mrq->req;
+ if (req) {
+ lba = req->__sector;
+ err = sdhci_msm_ice_get_cfg(msm_host, req, &bypass, &key_index);
+ if (err)
+ return err;
+ pr_debug("%s: %s: slot %d bypass %d key_index %d\n",
+ mmc_hostname(host->mmc),
+ (rq_data_dir(req) == WRITE) ? "WRITE" : "READ",
+ slot, bypass, key_index);
+ }
+
+ if (msm_host->ice_hci_support) {
+ /* For ICE HCI / ICE3.0 */
+ sdhci_msm_ice_hci_update_cmdq_cfg(lba, bypass, key_index,
+ ice_ctx);
+ } else {
+ /* For ICE versions earlier to ICE3.0 */
+ sdhci_msm_ice_update_cfg(host, lba, slot, bypass, key_index);
+ }
+ return 0;
+}
+
+int sdhci_msm_ice_cfg_end(struct sdhci_host *host, struct mmc_request *mrq)
+{
+ struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
+ struct sdhci_msm_host *msm_host = pltfm_host->priv;
+ int err = 0;
+ struct request *req;
+
+ if (!host->is_crypto_en)
+ return 0;
+
+ if (msm_host->ice.state != SDHCI_MSM_ICE_STATE_ACTIVE) {
+ pr_err("%s: ice is in invalid state %d\n",
+ mmc_hostname(host->mmc), msm_host->ice.state);
+ return -EINVAL;
+ }
+
+ req = mrq->req;
+ if (req) {
+ if (msm_host->ice.vops->config_end) {
+ err = msm_host->ice.vops->config_end(req);
+ if (err) {
+ pr_err("%s: ice config end failed %d\n",
+ mmc_hostname(host->mmc), err);
+ return err;
+ }
+ }
+ }
+
+ return 0;
+}
+
+int sdhci_msm_ice_reset(struct sdhci_host *host)
+{
+ struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
+ struct sdhci_msm_host *msm_host = pltfm_host->priv;
+ int err = 0;
+
+ if (msm_host->ice.state != SDHCI_MSM_ICE_STATE_ACTIVE) {
+ pr_err("%s: ice is in invalid state before reset %d\n",
+ mmc_hostname(host->mmc), msm_host->ice.state);
+ return -EINVAL;
+ }
+
+ if (msm_host->ice.vops->reset) {
+ err = msm_host->ice.vops->reset(msm_host->ice.pdev);
+ if (err) {
+ pr_err("%s: ice reset failed %d\n",
+ mmc_hostname(host->mmc), err);
+ sdhci_msm_ice_print_regs(host);
+ return err;
+ }
+ }
+
+ /* If ICE HCI support is present then re-enable it */
+ if (msm_host->ice_hci_support)
+ sdhci_msm_enable_ice_hci(host, true);
+
+ if (msm_host->ice.state != SDHCI_MSM_ICE_STATE_ACTIVE) {
+ pr_err("%s: ice is in invalid state after reset %d\n",
+ mmc_hostname(host->mmc), msm_host->ice.state);
+ return -EINVAL;
+ }
+ return 0;
+}
+
+int sdhci_msm_ice_resume(struct sdhci_host *host)
+{
+ struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
+ struct sdhci_msm_host *msm_host = pltfm_host->priv;
+ int err = 0;
+
+ if (msm_host->ice.state !=
+ SDHCI_MSM_ICE_STATE_SUSPENDED) {
+ pr_err("%s: ice is in invalid state before resume %d\n",
+ mmc_hostname(host->mmc), msm_host->ice.state);
+ return -EINVAL;
+ }
+
+ if (msm_host->ice.vops->resume) {
+ err = msm_host->ice.vops->resume(msm_host->ice.pdev);
+ if (err) {
+ pr_err("%s: ice resume failed %d\n",
+ mmc_hostname(host->mmc), err);
+ return err;
+ }
+ }
+
+ msm_host->ice.state = SDHCI_MSM_ICE_STATE_ACTIVE;
+ return 0;
+}
+
+int sdhci_msm_ice_suspend(struct sdhci_host *host)
+{
+ struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
+ struct sdhci_msm_host *msm_host = pltfm_host->priv;
+ int err = 0;
+
+ if (msm_host->ice.state !=
+ SDHCI_MSM_ICE_STATE_ACTIVE) {
+ pr_err("%s: ice is in invalid state before resume %d\n",
+ mmc_hostname(host->mmc), msm_host->ice.state);
+ return -EINVAL;
+ }
+
+ if (msm_host->ice.vops->suspend) {
+ err = msm_host->ice.vops->suspend(msm_host->ice.pdev);
+ if (err) {
+ pr_err("%s: ice suspend failed %d\n",
+ mmc_hostname(host->mmc), err);
+ return -EINVAL;
+ }
+ }
+ msm_host->ice.state = SDHCI_MSM_ICE_STATE_SUSPENDED;
+ return 0;
+}
+
+int sdhci_msm_ice_get_status(struct sdhci_host *host, int *ice_status)
+{
+ struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
+ struct sdhci_msm_host *msm_host = pltfm_host->priv;
+ int stat = -EINVAL;
+
+ if (msm_host->ice.state != SDHCI_MSM_ICE_STATE_ACTIVE) {
+ pr_err("%s: ice is in invalid state %d\n",
+ mmc_hostname(host->mmc), msm_host->ice.state);
+ return -EINVAL;
+ }
+
+ if (msm_host->ice.vops->status) {
+ *ice_status = 0;
+ stat = msm_host->ice.vops->status(msm_host->ice.pdev);
+ if (stat < 0) {
+ pr_err("%s: ice get sts failed %d\n",
+ mmc_hostname(host->mmc), stat);
+ return -EINVAL;
+ }
+ *ice_status = stat;
+ }
+ return 0;
+}
+
+void sdhci_msm_ice_print_regs(struct sdhci_host *host)
+{
+ struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
+ struct sdhci_msm_host *msm_host = pltfm_host->priv;
+
+ if (msm_host->ice.vops->debug)
+ msm_host->ice.vops->debug(msm_host->ice.pdev);
+}
diff --git a/drivers/mmc/host/sdhci-msm-ice.h b/drivers/mmc/host/sdhci-msm-ice.h
new file mode 100644
index 000000000000..7699464cf71e
--- /dev/null
+++ b/drivers/mmc/host/sdhci-msm-ice.h
@@ -0,0 +1,174 @@
+/*
+ * Copyright (c) 2015, 2017, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#ifndef __SDHCI_MSM_ICE_H__
+#define __SDHCI_MSM_ICE_H__
+
+#include <linux/io.h>
+#include <linux/of.h>
+#include <linux/blkdev.h>
+#include <crypto/ice.h>
+
+#include "sdhci-msm.h"
+
+#define SDHC_MSM_CRYPTO_LABEL "sdhc-msm-crypto"
+/* Timeout waiting for ICE initialization, that requires TZ access */
+#define SDHCI_MSM_ICE_COMPLETION_TIMEOUT_MS 500
+
+/*
+ * SDHCI host controller ICE registers. There are n [0..31]
+ * of each of these registers
+ */
+#define NUM_SDHCI_MSM_ICE_CTRL_INFO_n_REGS 32
+
+#define CORE_VENDOR_SPEC_ICE_CTRL 0x300
+#define CORE_VENDOR_SPEC_ICE_CTRL_INFO_1_n 0x304
+#define CORE_VENDOR_SPEC_ICE_CTRL_INFO_2_n 0x308
+#define CORE_VENDOR_SPEC_ICE_CTRL_INFO_3_n 0x30C
+
+/* ICE3.0 register which got added cmdq reg space */
+#define ICE_CQ_CAPABILITIES 0x04
+#define ICE_HCI_SUPPORT (1 << 28)
+#define ICE_CQ_CONFIG 0x08
+#define CRYPTO_GENERAL_ENABLE (1 << 1)
+#define ICE_NONCQ_CRYPTO_PARAMS 0x70
+#define ICE_NONCQ_CRYPTO_DUN 0x74
+
+/* ICE3.0 register which got added hc reg space */
+#define HC_VENDOR_SPECIFIC_FUNC4 0x260
+#define DISABLE_CRYPTO (1 << 15)
+#define HC_VENDOR_SPECIFIC_ICE_CTRL 0x800
+#define ICE_SW_RST_EN (1 << 0)
+
+/* SDHCI MSM ICE CTRL Info register offset */
+enum {
+ OFFSET_SDHCI_MSM_ICE_CTRL_INFO_BYPASS = 0,
+ OFFSET_SDHCI_MSM_ICE_CTRL_INFO_KEY_INDEX = 1,
+ OFFSET_SDHCI_MSM_ICE_CTRL_INFO_CDU = 6,
+ OFFSET_SDHCI_MSM_ICE_HCI_PARAM_CCI = 0,
+ OFFSET_SDHCI_MSM_ICE_HCI_PARAM_CE = 8,
+};
+
+/* SDHCI MSM ICE CTRL Info register masks */
+enum {
+ MASK_SDHCI_MSM_ICE_CTRL_INFO_BYPASS = 0x1,
+ MASK_SDHCI_MSM_ICE_CTRL_INFO_KEY_INDEX = 0x1F,
+ MASK_SDHCI_MSM_ICE_CTRL_INFO_CDU = 0x7,
+ MASK_SDHCI_MSM_ICE_HCI_PARAM_CE = 0x1,
+ MASK_SDHCI_MSM_ICE_HCI_PARAM_CCI = 0xff
+};
+
+/* SDHCI MSM ICE encryption/decryption bypass state */
+enum {
+ SDHCI_MSM_ICE_DISABLE_BYPASS = 0,
+ SDHCI_MSM_ICE_ENABLE_BYPASS = 1,
+};
+
+/* SDHCI MSM ICE Crypto Data Unit of target DUN of Transfer Request */
+enum {
+ SDHCI_MSM_ICE_TR_DATA_UNIT_512_B = 0,
+ SDHCI_MSM_ICE_TR_DATA_UNIT_1_KB = 1,
+ SDHCI_MSM_ICE_TR_DATA_UNIT_2_KB = 2,
+ SDHCI_MSM_ICE_TR_DATA_UNIT_4_KB = 3,
+ SDHCI_MSM_ICE_TR_DATA_UNIT_8_KB = 4,
+ SDHCI_MSM_ICE_TR_DATA_UNIT_16_KB = 5,
+ SDHCI_MSM_ICE_TR_DATA_UNIT_32_KB = 6,
+ SDHCI_MSM_ICE_TR_DATA_UNIT_64_KB = 7,
+};
+
+/* SDHCI MSM ICE internal state */
+enum {
+ SDHCI_MSM_ICE_STATE_DISABLED = 0,
+ SDHCI_MSM_ICE_STATE_ACTIVE = 1,
+ SDHCI_MSM_ICE_STATE_SUSPENDED = 2,
+};
+
+/* crypto context fields in cmdq data command task descriptor */
+#define DATA_UNIT_NUM(x) (((u64)(x) & 0xFFFFFFFF) << 0)
+#define CRYPTO_CONFIG_INDEX(x) (((u64)(x) & 0xFF) << 32)
+#define CRYPTO_ENABLE(x) (((u64)(x) & 0x1) << 47)
+
+#ifdef CONFIG_MMC_SDHCI_MSM_ICE
+int sdhci_msm_ice_get_dev(struct sdhci_host *host);
+int sdhci_msm_ice_init(struct sdhci_host *host);
+void sdhci_msm_ice_cfg_reset(struct sdhci_host *host, u32 slot);
+int sdhci_msm_ice_cfg(struct sdhci_host *host, struct mmc_request *mrq,
+ u32 slot);
+int sdhci_msm_ice_cmdq_cfg(struct sdhci_host *host,
+ struct mmc_request *mrq, u32 slot, u64 *ice_ctx);
+int sdhci_msm_ice_cfg_end(struct sdhci_host *host, struct mmc_request *mrq);
+int sdhci_msm_ice_reset(struct sdhci_host *host);
+int sdhci_msm_ice_resume(struct sdhci_host *host);
+int sdhci_msm_ice_suspend(struct sdhci_host *host);
+int sdhci_msm_ice_get_status(struct sdhci_host *host, int *ice_status);
+void sdhci_msm_ice_print_regs(struct sdhci_host *host);
+#else
+inline int sdhci_msm_ice_get_dev(struct sdhci_host *host)
+{
+ struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
+ struct sdhci_msm_host *msm_host = pltfm_host->priv;
+
+ if (msm_host) {
+ msm_host->ice.pdev = NULL;
+ msm_host->ice.vops = NULL;
+ }
+ return -ENODEV;
+}
+inline int sdhci_msm_ice_init(struct sdhci_host *host)
+{
+ return 0;
+}
+
+inline void sdhci_msm_ice_cfg_reset(struct sdhci_host *host, u32 slot)
+{
+}
+
+inline int sdhci_msm_ice_cfg(struct sdhci_host *host,
+ struct mmc_request *mrq, u32 slot)
+{
+ return 0;
+}
+inline int sdhci_msm_ice_cmdq_cfg(struct sdhci_host *host,
+ struct mmc_request *mrq, u32 slot, u64 *ice_ctx)
+{
+ return 0;
+}
+inline int sdhci_msm_ice_cfg_end(struct sdhci_host *host,
+ struct mmc_request *mrq)
+{
+ return 0;
+}
+inline int sdhci_msm_ice_reset(struct sdhci_host *host)
+{
+ return 0;
+}
+inline int sdhci_msm_ice_resume(struct sdhci_host *host)
+{
+ return 0;
+}
+inline int sdhci_msm_ice_suspend(struct sdhci_host *host)
+{
+ return 0;
+}
+inline int sdhci_msm_ice_get_status(struct sdhci_host *host,
+ int *ice_status)
+{
+ return 0;
+}
+inline void sdhci_msm_ice_print_regs(struct sdhci_host *host)
+{
+ return;
+}
+#endif /* CONFIG_MMC_SDHCI_MSM_ICE */
+#endif /* __SDHCI_MSM_ICE_H__ */
diff --git a/drivers/mmc/host/sdhci-msm.c b/drivers/mmc/host/sdhci-msm.c
index 4695bee203ea..907763ddf234 100644
--- a/drivers/mmc/host/sdhci-msm.c
+++ b/drivers/mmc/host/sdhci-msm.c
@@ -1,7 +1,8 @@
/*
- * drivers/mmc/host/sdhci-msm.c - Qualcomm SDHCI Platform driver
+ * drivers/mmc/host/sdhci-msm.c - Qualcomm Technologies, Inc. MSM SDHCI Platform
+ * driver source file
*
- * Copyright (c) 2013-2014, The Linux Foundation. All rights reserved.
+ * Copyright (c) 2012-2017, The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
@@ -15,97 +16,497 @@
*/
#include <linux/module.h>
+#include <linux/mmc/host.h>
+#include <linux/mmc/card.h>
+#include <linux/mmc/sdio_func.h>
+#include <linux/gfp.h>
+#include <linux/of.h>
#include <linux/of_device.h>
+#include <linux/of_gpio.h>
+#include <linux/regulator/consumer.h>
+#include <linux/types.h>
+#include <linux/input.h>
+#include <linux/platform_device.h>
+#include <linux/wait.h>
+#include <linux/io.h>
#include <linux/delay.h>
-#include <linux/mmc/mmc.h>
+#include <linux/scatterlist.h>
#include <linux/slab.h>
+#include <linux/mmc/slot-gpio.h>
+#include <linux/dma-mapping.h>
+#include <linux/iopoll.h>
+#include <linux/pinctrl/consumer.h>
+#include <linux/msm-bus.h>
+#include <linux/pm_runtime.h>
+#include <trace/events/mmc.h>
-#include "sdhci-pltfm.h"
+#include "sdhci-msm.h"
+#include "sdhci-msm-ice.h"
+#include "cmdq_hci.h"
-#define CORE_MCI_VERSION 0x50
+#define QOS_REMOVE_DELAY_MS 10
+#define CORE_POWER 0x0
+#define CORE_SW_RST (1 << 7)
+
+#define SDHCI_VER_100 0x2B
+
+#define CORE_VERSION_STEP_MASK 0x0000FFFF
+#define CORE_VERSION_MINOR_MASK 0x0FFF0000
+#define CORE_VERSION_MINOR_SHIFT 16
+#define CORE_VERSION_MAJOR_MASK 0xF0000000
#define CORE_VERSION_MAJOR_SHIFT 28
-#define CORE_VERSION_MAJOR_MASK (0xf << CORE_VERSION_MAJOR_SHIFT)
-#define CORE_VERSION_MINOR_MASK 0xff
+#define CORE_VERSION_TARGET_MASK 0x000000FF
+#define SDHCI_MSM_VER_420 0x49
+
+#define SWITCHABLE_SIGNALLING_VOL (1 << 29)
#define CORE_HC_MODE 0x78
#define HC_MODE_EN 0x1
-#define CORE_POWER 0x0
-#define CORE_SW_RST BIT(7)
+#define FF_CLK_SW_RST_DIS (1 << 13)
+
+#define CORE_PWRCTL_BUS_OFF 0x01
+#define CORE_PWRCTL_BUS_ON (1 << 1)
+#define CORE_PWRCTL_IO_LOW (1 << 2)
+#define CORE_PWRCTL_IO_HIGH (1 << 3)
+#define CORE_PWRCTL_BUS_SUCCESS 0x01
+#define CORE_PWRCTL_BUS_FAIL (1 << 1)
+#define CORE_PWRCTL_IO_SUCCESS (1 << 2)
+#define CORE_PWRCTL_IO_FAIL (1 << 3)
+
+#define INT_MASK 0xF
#define MAX_PHASES 16
-#define CORE_DLL_LOCK BIT(7)
-#define CORE_DLL_EN BIT(16)
-#define CORE_CDR_EN BIT(17)
-#define CORE_CK_OUT_EN BIT(18)
-#define CORE_CDR_EXT_EN BIT(19)
-#define CORE_DLL_PDN BIT(29)
-#define CORE_DLL_RST BIT(30)
-#define CORE_DLL_CONFIG 0x100
-#define CORE_DLL_STATUS 0x108
-
-#define CORE_VENDOR_SPEC 0x10c
-#define CORE_CLK_PWRSAVE BIT(1)
-
-#define CORE_VENDOR_SPEC_CAPABILITIES0 0x11c
-
-#define CDR_SELEXT_SHIFT 20
-#define CDR_SELEXT_MASK (0xf << CDR_SELEXT_SHIFT)
-#define CMUX_SHIFT_PHASE_SHIFT 24
-#define CMUX_SHIFT_PHASE_MASK (7 << CMUX_SHIFT_PHASE_SHIFT)
-
-struct sdhci_msm_host {
- struct platform_device *pdev;
- void __iomem *core_mem; /* MSM SDCC mapped address */
- struct clk *clk; /* main SD/MMC bus clock */
- struct clk *pclk; /* SDHC peripheral bus clock */
- struct clk *bus_clk; /* SDHC bus voter clock */
- struct mmc_host *mmc;
- struct sdhci_pltfm_data sdhci_msm_pdata;
+
+#define CORE_CMD_DAT_TRACK_SEL (1 << 0)
+#define CORE_DLL_EN (1 << 16)
+#define CORE_CDR_EN (1 << 17)
+#define CORE_CK_OUT_EN (1 << 18)
+#define CORE_CDR_EXT_EN (1 << 19)
+#define CORE_DLL_PDN (1 << 29)
+#define CORE_DLL_RST (1 << 30)
+
+#define CORE_DLL_LOCK (1 << 7)
+#define CORE_DDR_DLL_LOCK (1 << 11)
+
+#define CORE_CLK_PWRSAVE (1 << 1)
+#define CORE_HC_MCLK_SEL_DFLT (2 << 8)
+#define CORE_HC_MCLK_SEL_HS400 (3 << 8)
+#define CORE_HC_MCLK_SEL_MASK (3 << 8)
+#define CORE_HC_AUTO_CMD21_EN (1 << 6)
+#define CORE_IO_PAD_PWR_SWITCH_EN (1 << 15)
+#define CORE_IO_PAD_PWR_SWITCH (1 << 16)
+#define CORE_HC_SELECT_IN_EN (1 << 18)
+#define CORE_HC_SELECT_IN_HS400 (6 << 19)
+#define CORE_HC_SELECT_IN_MASK (7 << 19)
+#define CORE_VENDOR_SPEC_POR_VAL 0xA1C
+
+#define HC_SW_RST_WAIT_IDLE_DIS (1 << 20)
+#define HC_SW_RST_REQ (1 << 21)
+#define CORE_ONE_MID_EN (1 << 25)
+
+#define CORE_8_BIT_SUPPORT (1 << 18)
+#define CORE_3_3V_SUPPORT (1 << 24)
+#define CORE_3_0V_SUPPORT (1 << 25)
+#define CORE_1_8V_SUPPORT (1 << 26)
+#define CORE_SYS_BUS_SUPPORT_64_BIT BIT(28)
+
+#define CORE_CSR_CDC_CTLR_CFG0 0x130
+#define CORE_SW_TRIG_FULL_CALIB (1 << 16)
+#define CORE_HW_AUTOCAL_ENA (1 << 17)
+
+#define CORE_CSR_CDC_CTLR_CFG1 0x134
+#define CORE_CSR_CDC_CAL_TIMER_CFG0 0x138
+#define CORE_TIMER_ENA (1 << 16)
+
+#define CORE_CSR_CDC_CAL_TIMER_CFG1 0x13C
+#define CORE_CSR_CDC_REFCOUNT_CFG 0x140
+#define CORE_CSR_CDC_COARSE_CAL_CFG 0x144
+#define CORE_CDC_OFFSET_CFG 0x14C
+#define CORE_CSR_CDC_DELAY_CFG 0x150
+#define CORE_CDC_SLAVE_DDA_CFG 0x160
+#define CORE_CSR_CDC_STATUS0 0x164
+#define CORE_CALIBRATION_DONE (1 << 0)
+
+#define CORE_CDC_ERROR_CODE_MASK 0x7000000
+
+#define CQ_CMD_DBG_RAM 0x110
+#define CQ_CMD_DBG_RAM_WA 0x150
+#define CQ_CMD_DBG_RAM_OL 0x154
+
+#define CORE_CSR_CDC_GEN_CFG 0x178
+#define CORE_CDC_SWITCH_BYPASS_OFF (1 << 0)
+#define CORE_CDC_SWITCH_RC_EN (1 << 1)
+
+#define CORE_CDC_T4_DLY_SEL (1 << 0)
+#define CORE_CMDIN_RCLK_EN (1 << 1)
+#define CORE_START_CDC_TRAFFIC (1 << 6)
+
+#define CORE_PWRSAVE_DLL (1 << 3)
+#define CORE_CMDEN_HS400_INPUT_MASK_CNT (1 << 13)
+
+#define CORE_DDR_CAL_EN (1 << 0)
+#define CORE_FLL_CYCLE_CNT (1 << 18)
+#define CORE_DLL_CLOCK_DISABLE (1 << 21)
+
+#define DDR_CONFIG_POR_VAL 0x80040853
+#define DDR_CONFIG_PRG_RCLK_DLY_MASK 0x1FF
+#define DDR_CONFIG_PRG_RCLK_DLY 115
+#define DDR_CONFIG_2_POR_VAL 0x80040873
+
+/* 512 descriptors */
+#define SDHCI_MSM_MAX_SEGMENTS (1 << 9)
+#define SDHCI_MSM_MMC_CLK_GATE_DELAY 200 /* msecs */
+
+#define CORE_FREQ_100MHZ (100 * 1000 * 1000)
+#define TCXO_FREQ 19200000
+
+#define INVALID_TUNING_PHASE -1
+#define sdhci_is_valid_gpio_wakeup_int(_h) ((_h)->pdata->sdiowakeup_irq >= 0)
+
+#define NUM_TUNING_PHASES 16
+#define MAX_DRV_TYPES_SUPPORTED_HS200 4
+#define MSM_AUTOSUSPEND_DELAY_MS 100
+
+struct sdhci_msm_offset {
+ u32 CORE_MCI_DATA_CNT;
+ u32 CORE_MCI_STATUS;
+ u32 CORE_MCI_FIFO_CNT;
+ u32 CORE_MCI_VERSION;
+ u32 CORE_GENERICS;
+ u32 CORE_TESTBUS_CONFIG;
+ u32 CORE_TESTBUS_SEL2_BIT;
+ u32 CORE_TESTBUS_ENA;
+ u32 CORE_TESTBUS_SEL2;
+ u32 CORE_PWRCTL_STATUS;
+ u32 CORE_PWRCTL_MASK;
+ u32 CORE_PWRCTL_CLEAR;
+ u32 CORE_PWRCTL_CTL;
+ u32 CORE_SDCC_DEBUG_REG;
+ u32 CORE_DLL_CONFIG;
+ u32 CORE_DLL_STATUS;
+ u32 CORE_VENDOR_SPEC;
+ u32 CORE_VENDOR_SPEC_ADMA_ERR_ADDR0;
+ u32 CORE_VENDOR_SPEC_ADMA_ERR_ADDR1;
+ u32 CORE_VENDOR_SPEC_FUNC2;
+ u32 CORE_VENDOR_SPEC_CAPABILITIES0;
+ u32 CORE_DDR_200_CFG;
+ u32 CORE_VENDOR_SPEC3;
+ u32 CORE_DLL_CONFIG_2;
+ u32 CORE_DDR_CONFIG;
+ u32 CORE_DDR_CONFIG_2;
+};
+
+struct sdhci_msm_offset sdhci_msm_offset_mci_removed = {
+ .CORE_MCI_DATA_CNT = 0x35C,
+ .CORE_MCI_STATUS = 0x324,
+ .CORE_MCI_FIFO_CNT = 0x308,
+ .CORE_MCI_VERSION = 0x318,
+ .CORE_GENERICS = 0x320,
+ .CORE_TESTBUS_CONFIG = 0x32C,
+ .CORE_TESTBUS_SEL2_BIT = 3,
+ .CORE_TESTBUS_ENA = (1 << 31),
+ .CORE_TESTBUS_SEL2 = (1 << 3),
+ .CORE_PWRCTL_STATUS = 0x240,
+ .CORE_PWRCTL_MASK = 0x244,
+ .CORE_PWRCTL_CLEAR = 0x248,
+ .CORE_PWRCTL_CTL = 0x24C,
+ .CORE_SDCC_DEBUG_REG = 0x358,
+ .CORE_DLL_CONFIG = 0x200,
+ .CORE_DLL_STATUS = 0x208,
+ .CORE_VENDOR_SPEC = 0x20C,
+ .CORE_VENDOR_SPEC_ADMA_ERR_ADDR0 = 0x214,
+ .CORE_VENDOR_SPEC_ADMA_ERR_ADDR1 = 0x218,
+ .CORE_VENDOR_SPEC_FUNC2 = 0x210,
+ .CORE_VENDOR_SPEC_CAPABILITIES0 = 0x21C,
+ .CORE_DDR_200_CFG = 0x224,
+ .CORE_VENDOR_SPEC3 = 0x250,
+ .CORE_DLL_CONFIG_2 = 0x254,
+ .CORE_DDR_CONFIG = 0x258,
+ .CORE_DDR_CONFIG_2 = 0x25C,
};
-/* Platform specific tuning */
-static inline int msm_dll_poll_ck_out_en(struct sdhci_host *host, u8 poll)
+struct sdhci_msm_offset sdhci_msm_offset_mci_present = {
+ .CORE_MCI_DATA_CNT = 0x30,
+ .CORE_MCI_STATUS = 0x34,
+ .CORE_MCI_FIFO_CNT = 0x44,
+ .CORE_MCI_VERSION = 0x050,
+ .CORE_GENERICS = 0x70,
+ .CORE_TESTBUS_CONFIG = 0x0CC,
+ .CORE_TESTBUS_SEL2_BIT = 4,
+ .CORE_TESTBUS_ENA = (1 << 3),
+ .CORE_TESTBUS_SEL2 = (1 << 4),
+ .CORE_PWRCTL_STATUS = 0xDC,
+ .CORE_PWRCTL_MASK = 0xE0,
+ .CORE_PWRCTL_CLEAR = 0xE4,
+ .CORE_PWRCTL_CTL = 0xE8,
+ .CORE_SDCC_DEBUG_REG = 0x124,
+ .CORE_DLL_CONFIG = 0x100,
+ .CORE_DLL_STATUS = 0x108,
+ .CORE_VENDOR_SPEC = 0x10C,
+ .CORE_VENDOR_SPEC_ADMA_ERR_ADDR0 = 0x114,
+ .CORE_VENDOR_SPEC_ADMA_ERR_ADDR1 = 0x118,
+ .CORE_VENDOR_SPEC_FUNC2 = 0x110,
+ .CORE_VENDOR_SPEC_CAPABILITIES0 = 0x11C,
+ .CORE_DDR_200_CFG = 0x184,
+ .CORE_VENDOR_SPEC3 = 0x1B0,
+ .CORE_DLL_CONFIG_2 = 0x1B4,
+ .CORE_DDR_CONFIG = 0x1B8,
+ .CORE_DDR_CONFIG_2 = 0x1BC,
+};
+
+u8 sdhci_msm_readb_relaxed(struct sdhci_host *host, u32 offset)
{
+ struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
+ struct sdhci_msm_host *msm_host = pltfm_host->priv;
+ void __iomem *base_addr;
+
+ if (msm_host->mci_removed)
+ base_addr = host->ioaddr;
+ else
+ base_addr = msm_host->core_mem;
+
+ return readb_relaxed(base_addr + offset);
+}
+
+u32 sdhci_msm_readl_relaxed(struct sdhci_host *host, u32 offset)
+{
+ struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
+ struct sdhci_msm_host *msm_host = pltfm_host->priv;
+ void __iomem *base_addr;
+
+ if (msm_host->mci_removed)
+ base_addr = host->ioaddr;
+ else
+ base_addr = msm_host->core_mem;
+
+ return readl_relaxed(base_addr + offset);
+}
+
+void sdhci_msm_writeb_relaxed(u8 val, struct sdhci_host *host, u32 offset)
+{
+ struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
+ struct sdhci_msm_host *msm_host = pltfm_host->priv;
+ void __iomem *base_addr;
+
+ if (msm_host->mci_removed)
+ base_addr = host->ioaddr;
+ else
+ base_addr = msm_host->core_mem;
+
+ writeb_relaxed(val, base_addr + offset);
+}
+
+void sdhci_msm_writel_relaxed(u32 val, struct sdhci_host *host, u32 offset)
+{
+ struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
+ struct sdhci_msm_host *msm_host = pltfm_host->priv;
+ void __iomem *base_addr;
+
+ if (msm_host->mci_removed)
+ base_addr = host->ioaddr;
+ else
+ base_addr = msm_host->core_mem;
+
+ writel_relaxed(val, base_addr + offset);
+}
+
+/* Timeout value to avoid infinite waiting for pwr_irq */
+#define MSM_PWR_IRQ_TIMEOUT_MS 5000
+
+static const u32 tuning_block_64[] = {
+ 0x00FF0FFF, 0xCCC3CCFF, 0xFFCC3CC3, 0xEFFEFFFE,
+ 0xDDFFDFFF, 0xFBFFFBFF, 0xFF7FFFBF, 0xEFBDF777,
+ 0xF0FFF0FF, 0x3CCCFC0F, 0xCFCC33CC, 0xEEFFEFFF,
+ 0xFDFFFDFF, 0xFFBFFFDF, 0xFFF7FFBB, 0xDE7B7FF7
+};
+
+static const u32 tuning_block_128[] = {
+ 0xFF00FFFF, 0x0000FFFF, 0xCCCCFFFF, 0xCCCC33CC,
+ 0xCC3333CC, 0xFFFFCCCC, 0xFFFFEEFF, 0xFFEEEEFF,
+ 0xFFDDFFFF, 0xDDDDFFFF, 0xBBFFFFFF, 0xBBFFFFFF,
+ 0xFFFFFFBB, 0xFFFFFF77, 0x77FF7777, 0xFFEEDDBB,
+ 0x00FFFFFF, 0x00FFFFFF, 0xCCFFFF00, 0xCC33CCCC,
+ 0x3333CCCC, 0xFFCCCCCC, 0xFFEEFFFF, 0xEEEEFFFF,
+ 0xDDFFFFFF, 0xDDFFFFFF, 0xFFFFFFDD, 0xFFFFFFBB,
+ 0xFFFFBBBB, 0xFFFF77FF, 0xFF7777FF, 0xEEDDBB77
+};
+
+/* global to hold each slot instance for debug */
+static struct sdhci_msm_host *sdhci_slot[2];
+
+static int disable_slots;
+/* root can write, others read */
+module_param(disable_slots, int, S_IRUGO|S_IWUSR);
+
+static bool nocmdq;
+module_param(nocmdq, bool, S_IRUGO|S_IWUSR);
+
+enum vdd_io_level {
+ /* set vdd_io_data->low_vol_level */
+ VDD_IO_LOW,
+ /* set vdd_io_data->high_vol_level */
+ VDD_IO_HIGH,
+ /*
+ * set whatever there in voltage_level (third argument) of
+ * sdhci_msm_set_vdd_io_vol() function.
+ */
+ VDD_IO_SET_LEVEL,
+};
+
+/* MSM platform specific tuning */
+static inline int msm_dll_poll_ck_out_en(struct sdhci_host *host,
+ u8 poll)
+{
+ int rc = 0;
u32 wait_cnt = 50;
- u8 ck_out_en;
+ u8 ck_out_en = 0;
struct mmc_host *mmc = host->mmc;
+ struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
+ struct sdhci_msm_host *msm_host = pltfm_host->priv;
+ const struct sdhci_msm_offset *msm_host_offset =
+ msm_host->offset;
- /* Poll for CK_OUT_EN bit. max. poll time = 50us */
- ck_out_en = !!(readl_relaxed(host->ioaddr + CORE_DLL_CONFIG) &
- CORE_CK_OUT_EN);
+ /* poll for CK_OUT_EN bit. max. poll time = 50us */
+ ck_out_en = !!(readl_relaxed(host->ioaddr +
+ msm_host_offset->CORE_DLL_CONFIG) & CORE_CK_OUT_EN);
while (ck_out_en != poll) {
if (--wait_cnt == 0) {
- dev_err(mmc_dev(mmc), "%s: CK_OUT_EN bit is not %d\n",
- mmc_hostname(mmc), poll);
- return -ETIMEDOUT;
+ pr_err("%s: %s: CK_OUT_EN bit is not %d\n",
+ mmc_hostname(mmc), __func__, poll);
+ rc = -ETIMEDOUT;
+ goto out;
}
udelay(1);
- ck_out_en = !!(readl_relaxed(host->ioaddr + CORE_DLL_CONFIG) &
- CORE_CK_OUT_EN);
+ ck_out_en = !!(readl_relaxed(host->ioaddr +
+ msm_host_offset->CORE_DLL_CONFIG) & CORE_CK_OUT_EN);
}
+out:
+ return rc;
+}
- return 0;
+/*
+ * Enable CDR to track changes of DAT lines and adjust sampling
+ * point according to voltage/temperature variations
+ */
+static int msm_enable_cdr_cm_sdc4_dll(struct sdhci_host *host)
+{
+ int rc = 0;
+ u32 config;
+ struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
+ struct sdhci_msm_host *msm_host = pltfm_host->priv;
+ const struct sdhci_msm_offset *msm_host_offset =
+ msm_host->offset;
+
+ config = readl_relaxed(host->ioaddr +
+ msm_host_offset->CORE_DLL_CONFIG);
+ config |= CORE_CDR_EN;
+ config &= ~(CORE_CDR_EXT_EN | CORE_CK_OUT_EN);
+ writel_relaxed(config, host->ioaddr +
+ msm_host_offset->CORE_DLL_CONFIG);
+
+ rc = msm_dll_poll_ck_out_en(host, 0);
+ if (rc)
+ goto err;
+
+ writel_relaxed((readl_relaxed(host->ioaddr +
+ msm_host_offset->CORE_DLL_CONFIG) | CORE_CK_OUT_EN),
+ host->ioaddr + msm_host_offset->CORE_DLL_CONFIG);
+
+ rc = msm_dll_poll_ck_out_en(host, 1);
+ if (rc)
+ goto err;
+ goto out;
+err:
+ pr_err("%s: %s: failed\n", mmc_hostname(host->mmc), __func__);
+out:
+ return rc;
+}
+
+static ssize_t store_auto_cmd21(struct device *dev, struct device_attribute
+ *attr, const char *buf, size_t count)
+{
+ struct sdhci_host *host = dev_get_drvdata(dev);
+ struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
+ struct sdhci_msm_host *msm_host = pltfm_host->priv;
+ u32 tmp;
+ unsigned long flags;
+
+ if (!kstrtou32(buf, 0, &tmp)) {
+ spin_lock_irqsave(&host->lock, flags);
+ msm_host->en_auto_cmd21 = !!tmp;
+ spin_unlock_irqrestore(&host->lock, flags);
+ }
+ return count;
+}
+
+static ssize_t show_auto_cmd21(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct sdhci_host *host = dev_get_drvdata(dev);
+ struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
+ struct sdhci_msm_host *msm_host = pltfm_host->priv;
+
+ return snprintf(buf, PAGE_SIZE, "%d\n", msm_host->en_auto_cmd21);
+}
+
+/* MSM auto-tuning handler */
+static int sdhci_msm_config_auto_tuning_cmd(struct sdhci_host *host,
+ bool enable,
+ u32 type)
+{
+ int rc = 0;
+ struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
+ struct sdhci_msm_host *msm_host = pltfm_host->priv;
+ const struct sdhci_msm_offset *msm_host_offset =
+ msm_host->offset;
+ u32 val = 0;
+
+ if (!msm_host->en_auto_cmd21)
+ return 0;
+
+ if (type == MMC_SEND_TUNING_BLOCK_HS200)
+ val = CORE_HC_AUTO_CMD21_EN;
+ else
+ return 0;
+
+ if (enable) {
+ rc = msm_enable_cdr_cm_sdc4_dll(host);
+ writel_relaxed(readl_relaxed(host->ioaddr +
+ msm_host_offset->CORE_VENDOR_SPEC) | val,
+ host->ioaddr + msm_host_offset->CORE_VENDOR_SPEC);
+ } else {
+ writel_relaxed(readl_relaxed(host->ioaddr +
+ msm_host_offset->CORE_VENDOR_SPEC) & ~val,
+ host->ioaddr + msm_host_offset->CORE_VENDOR_SPEC);
+ }
+ return rc;
}
static int msm_config_cm_dll_phase(struct sdhci_host *host, u8 phase)
{
- int rc;
- static const u8 grey_coded_phase_table[] = {
- 0x0, 0x1, 0x3, 0x2, 0x6, 0x7, 0x5, 0x4,
- 0xc, 0xd, 0xf, 0xe, 0xa, 0xb, 0x9, 0x8
- };
+ int rc = 0;
+ struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
+ struct sdhci_msm_host *msm_host = pltfm_host->priv;
+ const struct sdhci_msm_offset *msm_host_offset =
+ msm_host->offset;
+ u8 grey_coded_phase_table[] = {0x0, 0x1, 0x3, 0x2, 0x6, 0x7, 0x5, 0x4,
+ 0xC, 0xD, 0xF, 0xE, 0xA, 0xB, 0x9,
+ 0x8};
unsigned long flags;
u32 config;
struct mmc_host *mmc = host->mmc;
+ pr_debug("%s: Enter %s\n", mmc_hostname(mmc), __func__);
spin_lock_irqsave(&host->lock, flags);
- config = readl_relaxed(host->ioaddr + CORE_DLL_CONFIG);
+ config = readl_relaxed(host->ioaddr +
+ msm_host_offset->CORE_DLL_CONFIG);
config &= ~(CORE_CDR_EN | CORE_CK_OUT_EN);
config |= (CORE_CDR_EXT_EN | CORE_DLL_EN);
- writel_relaxed(config, host->ioaddr + CORE_DLL_CONFIG);
+ writel_relaxed(config, host->ioaddr +
+ msm_host_offset->CORE_DLL_CONFIG);
/* Wait until CK_OUT_EN bit of DLL_CONFIG register becomes '0' */
rc = msm_dll_poll_ck_out_en(host, 0);
@@ -116,31 +517,36 @@ static int msm_config_cm_dll_phase(struct sdhci_host *host, u8 phase)
* Write the selected DLL clock output phase (0 ... 15)
* to CDR_SELEXT bit field of DLL_CONFIG register.
*/
- config = readl_relaxed(host->ioaddr + CORE_DLL_CONFIG);
- config &= ~CDR_SELEXT_MASK;
- config |= grey_coded_phase_table[phase] << CDR_SELEXT_SHIFT;
- writel_relaxed(config, host->ioaddr + CORE_DLL_CONFIG);
+ writel_relaxed(((readl_relaxed(host->ioaddr +
+ msm_host_offset->CORE_DLL_CONFIG)
+ & ~(0xF << 20))
+ | (grey_coded_phase_table[phase] << 20)),
+ host->ioaddr + msm_host_offset->CORE_DLL_CONFIG);
/* Set CK_OUT_EN bit of DLL_CONFIG register to 1. */
- writel_relaxed((readl_relaxed(host->ioaddr + CORE_DLL_CONFIG)
- | CORE_CK_OUT_EN), host->ioaddr + CORE_DLL_CONFIG);
+ writel_relaxed((readl_relaxed(host->ioaddr +
+ msm_host_offset->CORE_DLL_CONFIG) | CORE_CK_OUT_EN),
+ host->ioaddr + msm_host_offset->CORE_DLL_CONFIG);
/* Wait until CK_OUT_EN bit of DLL_CONFIG register becomes '1' */
rc = msm_dll_poll_ck_out_en(host, 1);
if (rc)
goto err_out;
- config = readl_relaxed(host->ioaddr + CORE_DLL_CONFIG);
+ config = readl_relaxed(host->ioaddr +
+ msm_host_offset->CORE_DLL_CONFIG);
config |= CORE_CDR_EN;
config &= ~CORE_CDR_EXT_EN;
- writel_relaxed(config, host->ioaddr + CORE_DLL_CONFIG);
+ writel_relaxed(config, host->ioaddr +
+ msm_host_offset->CORE_DLL_CONFIG);
goto out;
err_out:
- dev_err(mmc_dev(mmc), "%s: Failed to set DLL phase: %d\n",
- mmc_hostname(mmc), phase);
+ pr_err("%s: %s: Failed to set DLL phase: %d\n",
+ mmc_hostname(mmc), __func__, phase);
out:
spin_unlock_irqrestore(&host->lock, flags);
+ pr_debug("%s: Exit %s\n", mmc_hostname(mmc), __func__);
return rc;
}
@@ -148,26 +554,27 @@ out:
* Find out the greatest range of consecuitive selected
* DLL clock output phases that can be used as sampling
* setting for SD3.0 UHS-I card read operation (in SDR104
- * timing mode) or for eMMC4.5 card read operation (in HS200
- * timing mode).
+ * timing mode) or for eMMC4.5 card read operation (in
+ * HS400/HS200 timing mode).
* Select the 3/4 of the range and configure the DLL with the
* selected DLL clock output phase.
*/
static int msm_find_most_appropriate_phase(struct sdhci_host *host,
- u8 *phase_table, u8 total_phases)
+ u8 *phase_table, u8 total_phases)
{
int ret;
u8 ranges[MAX_PHASES][MAX_PHASES] = { {0}, {0} };
- u8 phases_per_row[MAX_PHASES] = { 0 };
+ u8 phases_per_row[MAX_PHASES] = {0};
int row_index = 0, col_index = 0, selected_row_index = 0, curr_max = 0;
int i, cnt, phase_0_raw_index = 0, phase_15_raw_index = 0;
bool phase_0_found = false, phase_15_found = false;
struct mmc_host *mmc = host->mmc;
+ pr_debug("%s: Enter %s\n", mmc_hostname(mmc), __func__);
if (!total_phases || (total_phases > MAX_PHASES)) {
- dev_err(mmc_dev(mmc), "%s: Invalid argument: total_phases=%d\n",
- mmc_hostname(mmc), total_phases);
+ pr_err("%s: %s: invalid argument: total_phases=%d\n",
+ mmc_hostname(mmc), __func__, total_phases);
return -EINVAL;
}
@@ -225,7 +632,7 @@ static int msm_find_most_appropriate_phase(struct sdhci_host *host,
i = phases_15;
for (cnt = 0; cnt < phases_0; cnt++) {
ranges[phase_15_raw_index][i] =
- ranges[phase_0_raw_index][cnt];
+ ranges[phase_0_raw_index][cnt];
if (++i >= MAX_PHASES)
break;
}
@@ -241,24 +648,29 @@ static int msm_find_most_appropriate_phase(struct sdhci_host *host,
}
}
- i = (curr_max * 3) / 4;
+ i = ((curr_max * 3) / 4);
if (i)
i--;
- ret = ranges[selected_row_index][i];
+ ret = (int)ranges[selected_row_index][i];
if (ret >= MAX_PHASES) {
ret = -EINVAL;
- dev_err(mmc_dev(mmc), "%s: Invalid phase selected=%d\n",
- mmc_hostname(mmc), ret);
+ pr_err("%s: %s: invalid phase selected=%d\n",
+ mmc_hostname(mmc), __func__, ret);
}
+ pr_debug("%s: Exit %s\n", mmc_hostname(mmc), __func__);
return ret;
}
static inline void msm_cm_dll_set_freq(struct sdhci_host *host)
{
- u32 mclk_freq = 0, config;
+ u32 mclk_freq = 0;
+ struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
+ struct sdhci_msm_host *msm_host = pltfm_host->priv;
+ const struct sdhci_msm_offset *msm_host_offset =
+ msm_host->offset;
/* Program the MCLK value to MCLK_FREQ bit field */
if (host->clock <= 112000000)
@@ -278,117 +690,639 @@ static inline void msm_cm_dll_set_freq(struct sdhci_host *host)
else if (host->clock <= 200000000)
mclk_freq = 7;
- config = readl_relaxed(host->ioaddr + CORE_DLL_CONFIG);
- config &= ~CMUX_SHIFT_PHASE_MASK;
- config |= mclk_freq << CMUX_SHIFT_PHASE_SHIFT;
- writel_relaxed(config, host->ioaddr + CORE_DLL_CONFIG);
+ writel_relaxed(((readl_relaxed(host->ioaddr +
+ msm_host_offset->CORE_DLL_CONFIG)
+ & ~(7 << 24)) | (mclk_freq << 24)),
+ host->ioaddr + msm_host_offset->CORE_DLL_CONFIG);
}
-/* Initialize the DLL (Programmable Delay Line) */
+/* Initialize the DLL (Programmable Delay Line ) */
static int msm_init_cm_dll(struct sdhci_host *host)
{
+ struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
+ struct sdhci_msm_host *msm_host = pltfm_host->priv;
+ const struct sdhci_msm_offset *msm_host_offset =
+ msm_host->offset;
struct mmc_host *mmc = host->mmc;
- int wait_cnt = 50;
+ int rc = 0;
unsigned long flags;
+ u32 wait_cnt;
+ bool prev_pwrsave, curr_pwrsave;
+ pr_debug("%s: Enter %s\n", mmc_hostname(mmc), __func__);
spin_lock_irqsave(&host->lock, flags);
-
+ prev_pwrsave = !!(readl_relaxed(host->ioaddr +
+ msm_host_offset->CORE_VENDOR_SPEC) & CORE_CLK_PWRSAVE);
+ curr_pwrsave = prev_pwrsave;
/*
* Make sure that clock is always enabled when DLL
* tuning is in progress. Keeping PWRSAVE ON may
- * turn off the clock.
+ * turn off the clock. So let's disable the PWRSAVE
+ * here and re-enable it once tuning is completed.
*/
- writel_relaxed((readl_relaxed(host->ioaddr + CORE_VENDOR_SPEC)
- & ~CORE_CLK_PWRSAVE), host->ioaddr + CORE_VENDOR_SPEC);
+ if (prev_pwrsave) {
+ writel_relaxed((readl_relaxed(host->ioaddr +
+ msm_host_offset->CORE_VENDOR_SPEC)
+ & ~CORE_CLK_PWRSAVE), host->ioaddr +
+ msm_host_offset->CORE_VENDOR_SPEC);
+ curr_pwrsave = false;
+ }
+
+ if (msm_host->use_updated_dll_reset) {
+ /* Disable the DLL clock */
+ writel_relaxed((readl_relaxed(host->ioaddr +
+ msm_host_offset->CORE_DLL_CONFIG)
+ & ~CORE_CK_OUT_EN), host->ioaddr +
+ msm_host_offset->CORE_DLL_CONFIG);
+
+ writel_relaxed((readl_relaxed(host->ioaddr +
+ msm_host_offset->CORE_DLL_CONFIG_2)
+ | CORE_DLL_CLOCK_DISABLE), host->ioaddr +
+ msm_host_offset->CORE_DLL_CONFIG_2);
+ }
/* Write 1 to DLL_RST bit of DLL_CONFIG register */
- writel_relaxed((readl_relaxed(host->ioaddr + CORE_DLL_CONFIG)
- | CORE_DLL_RST), host->ioaddr + CORE_DLL_CONFIG);
+ writel_relaxed((readl_relaxed(host->ioaddr +
+ msm_host_offset->CORE_DLL_CONFIG) | CORE_DLL_RST),
+ host->ioaddr + msm_host_offset->CORE_DLL_CONFIG);
/* Write 1 to DLL_PDN bit of DLL_CONFIG register */
- writel_relaxed((readl_relaxed(host->ioaddr + CORE_DLL_CONFIG)
- | CORE_DLL_PDN), host->ioaddr + CORE_DLL_CONFIG);
+ writel_relaxed((readl_relaxed(host->ioaddr +
+ msm_host_offset->CORE_DLL_CONFIG) | CORE_DLL_PDN),
+ host->ioaddr + msm_host_offset->CORE_DLL_CONFIG);
msm_cm_dll_set_freq(host);
+ if (msm_host->use_updated_dll_reset) {
+ u32 mclk_freq = 0;
+
+ if ((readl_relaxed(host->ioaddr +
+ msm_host_offset->CORE_DLL_CONFIG_2)
+ & CORE_FLL_CYCLE_CNT))
+ mclk_freq = (u32) ((host->clock / TCXO_FREQ) * 8);
+ else
+ mclk_freq = (u32) ((host->clock / TCXO_FREQ) * 4);
+
+ writel_relaxed(((readl_relaxed(host->ioaddr +
+ msm_host_offset->CORE_DLL_CONFIG_2)
+ & ~(0xFF << 10)) | (mclk_freq << 10)),
+ host->ioaddr + msm_host_offset->CORE_DLL_CONFIG_2);
+ /* wait for 5us before enabling DLL clock */
+ udelay(5);
+ }
+
/* Write 0 to DLL_RST bit of DLL_CONFIG register */
- writel_relaxed((readl_relaxed(host->ioaddr + CORE_DLL_CONFIG)
- & ~CORE_DLL_RST), host->ioaddr + CORE_DLL_CONFIG);
+ writel_relaxed((readl_relaxed(host->ioaddr +
+ msm_host_offset->CORE_DLL_CONFIG) & ~CORE_DLL_RST),
+ host->ioaddr + msm_host_offset->CORE_DLL_CONFIG);
/* Write 0 to DLL_PDN bit of DLL_CONFIG register */
- writel_relaxed((readl_relaxed(host->ioaddr + CORE_DLL_CONFIG)
- & ~CORE_DLL_PDN), host->ioaddr + CORE_DLL_CONFIG);
+ writel_relaxed((readl_relaxed(host->ioaddr +
+ msm_host_offset->CORE_DLL_CONFIG) & ~CORE_DLL_PDN),
+ host->ioaddr + msm_host_offset->CORE_DLL_CONFIG);
+
+ if (msm_host->use_updated_dll_reset) {
+ msm_cm_dll_set_freq(host);
+ /* Enable the DLL clock */
+ writel_relaxed((readl_relaxed(host->ioaddr +
+ msm_host_offset->CORE_DLL_CONFIG_2)
+ & ~CORE_DLL_CLOCK_DISABLE), host->ioaddr +
+ msm_host_offset->CORE_DLL_CONFIG_2);
+ }
/* Set DLL_EN bit to 1. */
- writel_relaxed((readl_relaxed(host->ioaddr + CORE_DLL_CONFIG)
- | CORE_DLL_EN), host->ioaddr + CORE_DLL_CONFIG);
+ writel_relaxed((readl_relaxed(host->ioaddr +
+ msm_host_offset->CORE_DLL_CONFIG) | CORE_DLL_EN),
+ host->ioaddr + msm_host_offset->CORE_DLL_CONFIG);
/* Set CK_OUT_EN bit to 1. */
- writel_relaxed((readl_relaxed(host->ioaddr + CORE_DLL_CONFIG)
- | CORE_CK_OUT_EN), host->ioaddr + CORE_DLL_CONFIG);
+ writel_relaxed((readl_relaxed(host->ioaddr +
+ msm_host_offset->CORE_DLL_CONFIG)
+ | CORE_CK_OUT_EN), host->ioaddr +
+ msm_host_offset->CORE_DLL_CONFIG);
+ wait_cnt = 50;
/* Wait until DLL_LOCK bit of DLL_STATUS register becomes '1' */
- while (!(readl_relaxed(host->ioaddr + CORE_DLL_STATUS) &
- CORE_DLL_LOCK)) {
+ while (!(readl_relaxed(host->ioaddr +
+ msm_host_offset->CORE_DLL_STATUS) & CORE_DLL_LOCK)) {
/* max. wait for 50us sec for LOCK bit to be set */
if (--wait_cnt == 0) {
- dev_err(mmc_dev(mmc), "%s: DLL failed to LOCK\n",
- mmc_hostname(mmc));
- spin_unlock_irqrestore(&host->lock, flags);
- return -ETIMEDOUT;
+ pr_err("%s: %s: DLL failed to LOCK\n",
+ mmc_hostname(mmc), __func__);
+ rc = -ETIMEDOUT;
+ goto out;
}
+ /* wait for 1us before polling again */
udelay(1);
}
+out:
+ /* Restore the correct PWRSAVE state */
+ if (prev_pwrsave ^ curr_pwrsave) {
+ u32 reg = readl_relaxed(host->ioaddr +
+ msm_host_offset->CORE_VENDOR_SPEC);
+
+ if (prev_pwrsave)
+ reg |= CORE_CLK_PWRSAVE;
+ else
+ reg &= ~CORE_CLK_PWRSAVE;
+
+ writel_relaxed(reg, host->ioaddr +
+ msm_host_offset->CORE_VENDOR_SPEC);
+ }
+
spin_unlock_irqrestore(&host->lock, flags);
- return 0;
+ pr_debug("%s: Exit %s\n", mmc_hostname(mmc), __func__);
+ return rc;
}
-static int sdhci_msm_execute_tuning(struct sdhci_host *host, u32 opcode)
+static int sdhci_msm_cdclp533_calibration(struct sdhci_host *host)
{
+ u32 calib_done;
+ int ret = 0;
+ int cdc_err = 0;
+ struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
+ struct sdhci_msm_host *msm_host = pltfm_host->priv;
+ const struct sdhci_msm_offset *msm_host_offset =
+ msm_host->offset;
+
+ pr_debug("%s: Enter %s\n", mmc_hostname(host->mmc), __func__);
+
+ /* Write 0 to CDC_T4_DLY_SEL field in VENDOR_SPEC_DDR200_CFG */
+ writel_relaxed((readl_relaxed(host->ioaddr +
+ msm_host_offset->CORE_DDR_200_CFG)
+ & ~CORE_CDC_T4_DLY_SEL),
+ host->ioaddr + msm_host_offset->CORE_DDR_200_CFG);
+
+ /* Write 0 to CDC_SWITCH_BYPASS_OFF field in CORE_CSR_CDC_GEN_CFG */
+ writel_relaxed((readl_relaxed(host->ioaddr + CORE_CSR_CDC_GEN_CFG)
+ & ~CORE_CDC_SWITCH_BYPASS_OFF),
+ host->ioaddr + CORE_CSR_CDC_GEN_CFG);
+
+ /* Write 1 to CDC_SWITCH_RC_EN field in CORE_CSR_CDC_GEN_CFG */
+ writel_relaxed((readl_relaxed(host->ioaddr + CORE_CSR_CDC_GEN_CFG)
+ | CORE_CDC_SWITCH_RC_EN),
+ host->ioaddr + CORE_CSR_CDC_GEN_CFG);
+
+ /* Write 0 to START_CDC_TRAFFIC field in CORE_DDR200_CFG */
+ writel_relaxed((readl_relaxed(host->ioaddr +
+ msm_host_offset->CORE_DDR_200_CFG)
+ & ~CORE_START_CDC_TRAFFIC),
+ host->ioaddr + msm_host_offset->CORE_DDR_200_CFG);
+
+ /*
+ * Perform CDC Register Initialization Sequence
+ *
+ * CORE_CSR_CDC_CTLR_CFG0 0x11800EC
+ * CORE_CSR_CDC_CTLR_CFG1 0x3011111
+ * CORE_CSR_CDC_CAL_TIMER_CFG0 0x1201000
+ * CORE_CSR_CDC_CAL_TIMER_CFG1 0x4
+ * CORE_CSR_CDC_REFCOUNT_CFG 0xCB732020
+ * CORE_CSR_CDC_COARSE_CAL_CFG 0xB19
+ * CORE_CSR_CDC_DELAY_CFG 0x3AC
+ * CORE_CDC_OFFSET_CFG 0x0
+ * CORE_CDC_SLAVE_DDA_CFG 0x16334
+ */
+
+ writel_relaxed(0x11800EC, host->ioaddr + CORE_CSR_CDC_CTLR_CFG0);
+ writel_relaxed(0x3011111, host->ioaddr + CORE_CSR_CDC_CTLR_CFG1);
+ writel_relaxed(0x1201000, host->ioaddr + CORE_CSR_CDC_CAL_TIMER_CFG0);
+ writel_relaxed(0x4, host->ioaddr + CORE_CSR_CDC_CAL_TIMER_CFG1);
+ writel_relaxed(0xCB732020, host->ioaddr + CORE_CSR_CDC_REFCOUNT_CFG);
+ writel_relaxed(0xB19, host->ioaddr + CORE_CSR_CDC_COARSE_CAL_CFG);
+ writel_relaxed(0x4E2, host->ioaddr + CORE_CSR_CDC_DELAY_CFG);
+ writel_relaxed(0x0, host->ioaddr + CORE_CDC_OFFSET_CFG);
+ writel_relaxed(0x16334, host->ioaddr + CORE_CDC_SLAVE_DDA_CFG);
+
+ /* CDC HW Calibration */
+
+ /* Write 1 to SW_TRIG_FULL_CALIB field in CORE_CSR_CDC_CTLR_CFG0 */
+ writel_relaxed((readl_relaxed(host->ioaddr + CORE_CSR_CDC_CTLR_CFG0)
+ | CORE_SW_TRIG_FULL_CALIB),
+ host->ioaddr + CORE_CSR_CDC_CTLR_CFG0);
+
+ /* Write 0 to SW_TRIG_FULL_CALIB field in CORE_CSR_CDC_CTLR_CFG0 */
+ writel_relaxed((readl_relaxed(host->ioaddr + CORE_CSR_CDC_CTLR_CFG0)
+ & ~CORE_SW_TRIG_FULL_CALIB),
+ host->ioaddr + CORE_CSR_CDC_CTLR_CFG0);
+
+ /* Write 1 to HW_AUTOCAL_ENA field in CORE_CSR_CDC_CTLR_CFG0 */
+ writel_relaxed((readl_relaxed(host->ioaddr + CORE_CSR_CDC_CTLR_CFG0)
+ | CORE_HW_AUTOCAL_ENA),
+ host->ioaddr + CORE_CSR_CDC_CTLR_CFG0);
+
+ /* Write 1 to TIMER_ENA field in CORE_CSR_CDC_CAL_TIMER_CFG0 */
+ writel_relaxed((readl_relaxed(host->ioaddr +
+ CORE_CSR_CDC_CAL_TIMER_CFG0) | CORE_TIMER_ENA),
+ host->ioaddr + CORE_CSR_CDC_CAL_TIMER_CFG0);
+
+ mb();
+
+ /* Poll on CALIBRATION_DONE field in CORE_CSR_CDC_STATUS0 to be 1 */
+ ret = readl_poll_timeout(host->ioaddr + CORE_CSR_CDC_STATUS0,
+ calib_done, (calib_done & CORE_CALIBRATION_DONE), 1, 50);
+
+ if (ret == -ETIMEDOUT) {
+ pr_err("%s: %s: CDC Calibration was not completed\n",
+ mmc_hostname(host->mmc), __func__);
+ goto out;
+ }
+
+ /* Verify CDC_ERROR_CODE field in CORE_CSR_CDC_STATUS0 is 0 */
+ cdc_err = readl_relaxed(host->ioaddr + CORE_CSR_CDC_STATUS0)
+ & CORE_CDC_ERROR_CODE_MASK;
+ if (cdc_err) {
+ pr_err("%s: %s: CDC Error Code %d\n",
+ mmc_hostname(host->mmc), __func__, cdc_err);
+ ret = -EINVAL;
+ goto out;
+ }
+
+ /* Write 1 to START_CDC_TRAFFIC field in CORE_DDR200_CFG */
+ writel_relaxed((readl_relaxed(host->ioaddr +
+ msm_host_offset->CORE_DDR_200_CFG)
+ | CORE_START_CDC_TRAFFIC),
+ host->ioaddr + msm_host_offset->CORE_DDR_200_CFG);
+out:
+ pr_debug("%s: Exit %s, ret:%d\n", mmc_hostname(host->mmc),
+ __func__, ret);
+ return ret;
+}
+
+static int sdhci_msm_cm_dll_sdc4_calibration(struct sdhci_host *host)
+{
+ struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
+ struct sdhci_msm_host *msm_host = pltfm_host->priv;
+ const struct sdhci_msm_offset *msm_host_offset =
+ msm_host->offset;
+ u32 dll_status, ddr_config;
+ int ret = 0;
+
+ pr_debug("%s: Enter %s\n", mmc_hostname(host->mmc), __func__);
+
+ /*
+ * Reprogramming the value in case it might have been modified by
+ * bootloaders.
+ */
+ if (msm_host->rclk_delay_fix) {
+ writel_relaxed(DDR_CONFIG_2_POR_VAL, host->ioaddr +
+ msm_host_offset->CORE_DDR_CONFIG_2);
+ } else {
+ ddr_config = DDR_CONFIG_POR_VAL &
+ ~DDR_CONFIG_PRG_RCLK_DLY_MASK;
+ ddr_config |= DDR_CONFIG_PRG_RCLK_DLY;
+ writel_relaxed(ddr_config, host->ioaddr +
+ msm_host_offset->CORE_DDR_CONFIG);
+ }
+
+ if (msm_host->enhanced_strobe && mmc_card_strobe(msm_host->mmc->card))
+ writel_relaxed((readl_relaxed(host->ioaddr +
+ msm_host_offset->CORE_DDR_200_CFG)
+ | CORE_CMDIN_RCLK_EN), host->ioaddr +
+ msm_host_offset->CORE_DDR_200_CFG);
+
+ /* Write 1 to DDR_CAL_EN field in CORE_DLL_CONFIG_2 */
+ writel_relaxed((readl_relaxed(host->ioaddr +
+ msm_host_offset->CORE_DLL_CONFIG_2)
+ | CORE_DDR_CAL_EN),
+ host->ioaddr + msm_host_offset->CORE_DLL_CONFIG_2);
+
+ /* Poll on DDR_DLL_LOCK bit in CORE_DLL_STATUS to be set */
+ ret = readl_poll_timeout(host->ioaddr +
+ msm_host_offset->CORE_DLL_STATUS,
+ dll_status, (dll_status & CORE_DDR_DLL_LOCK), 10, 1000);
+
+ if (ret == -ETIMEDOUT) {
+ pr_err("%s: %s: CM_DLL_SDC4 Calibration was not completed\n",
+ mmc_hostname(host->mmc), __func__);
+ goto out;
+ }
+
+ /*
+ * set CORE_PWRSAVE_DLL bit in CORE_VENDOR_SPEC3.
+ * when MCLK is gated OFF, it is not gated for less than 0.5us
+ * and MCLK must be switched on for at-least 1us before DATA
+ * starts coming. Controllers with 14lpp tech DLL cannot
+ * guarantee above requirement. So PWRSAVE_DLL should not be
+ * turned on for host controllers using this DLL.
+ */
+ if (!msm_host->use_14lpp_dll)
+ writel_relaxed((readl_relaxed(host->ioaddr +
+ msm_host_offset->CORE_VENDOR_SPEC3)
+ | CORE_PWRSAVE_DLL), host->ioaddr +
+ msm_host_offset->CORE_VENDOR_SPEC3);
+ mb();
+out:
+ pr_debug("%s: Exit %s, ret:%d\n", mmc_hostname(host->mmc),
+ __func__, ret);
+ return ret;
+}
+
+static int sdhci_msm_enhanced_strobe(struct sdhci_host *host)
+{
+ int ret = 0;
+ struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
+ struct sdhci_msm_host *msm_host = pltfm_host->priv;
+ struct mmc_host *mmc = host->mmc;
+
+ pr_debug("%s: Enter %s\n", mmc_hostname(host->mmc), __func__);
+
+ if (!msm_host->enhanced_strobe || !mmc_card_strobe(mmc->card)) {
+ pr_debug("%s: host/card does not support hs400 enhanced strobe\n",
+ mmc_hostname(mmc));
+ return -EINVAL;
+ }
+
+ if (msm_host->calibration_done ||
+ !(mmc->ios.timing == MMC_TIMING_MMC_HS400)) {
+ return 0;
+ }
+
+ /*
+ * Reset the tuning block.
+ */
+ ret = msm_init_cm_dll(host);
+ if (ret)
+ goto out;
+
+ ret = sdhci_msm_cm_dll_sdc4_calibration(host);
+out:
+ if (!ret)
+ msm_host->calibration_done = true;
+ pr_debug("%s: Exit %s, ret:%d\n", mmc_hostname(host->mmc),
+ __func__, ret);
+ return ret;
+}
+
+static int sdhci_msm_hs400_dll_calibration(struct sdhci_host *host)
+{
+ int ret = 0;
+ struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
+ struct sdhci_msm_host *msm_host = pltfm_host->priv;
+ const struct sdhci_msm_offset *msm_host_offset =
+ msm_host->offset;
+
+ pr_debug("%s: Enter %s\n", mmc_hostname(host->mmc), __func__);
+
+ /*
+ * Retuning in HS400 (DDR mode) will fail, just reset the
+ * tuning block and restore the saved tuning phase.
+ */
+ ret = msm_init_cm_dll(host);
+ if (ret)
+ goto out;
+
+ /* Set the selected phase in delay line hw block */
+ ret = msm_config_cm_dll_phase(host, msm_host->saved_tuning_phase);
+ if (ret)
+ goto out;
+
+ /* Write 1 to CMD_DAT_TRACK_SEL field in DLL_CONFIG */
+ writel_relaxed((readl_relaxed(host->ioaddr +
+ msm_host_offset->CORE_DLL_CONFIG)
+ | CORE_CMD_DAT_TRACK_SEL), host->ioaddr +
+ msm_host_offset->CORE_DLL_CONFIG);
+
+ if (msm_host->use_cdclp533)
+ /* Calibrate CDCLP533 DLL HW */
+ ret = sdhci_msm_cdclp533_calibration(host);
+ else
+ /* Calibrate CM_DLL_SDC4 HW */
+ ret = sdhci_msm_cm_dll_sdc4_calibration(host);
+out:
+ pr_debug("%s: Exit %s, ret:%d\n", mmc_hostname(host->mmc),
+ __func__, ret);
+ return ret;
+}
+
+static void sdhci_msm_set_mmc_drv_type(struct sdhci_host *host, u32 opcode,
+ u8 drv_type)
+{
+ struct mmc_command cmd = {0};
+ struct mmc_request mrq = {NULL};
+ struct mmc_host *mmc = host->mmc;
+ u8 val = ((drv_type << 4) | 2);
+
+ cmd.opcode = MMC_SWITCH;
+ cmd.arg = (MMC_SWITCH_MODE_WRITE_BYTE << 24) |
+ (EXT_CSD_HS_TIMING << 16) |
+ (val << 8) |
+ EXT_CSD_CMD_SET_NORMAL;
+ cmd.flags = MMC_CMD_AC | MMC_RSP_R1B;
+ /* 1 sec */
+ cmd.busy_timeout = 1000 * 1000;
+
+ memset(cmd.resp, 0, sizeof(cmd.resp));
+ cmd.retries = 3;
+
+ mrq.cmd = &cmd;
+ cmd.data = NULL;
+
+ mmc_wait_for_req(mmc, &mrq);
+ pr_debug("%s: %s: set card drive type to %d\n",
+ mmc_hostname(mmc), __func__,
+ drv_type);
+}
+
+int sdhci_msm_execute_tuning(struct sdhci_host *host, u32 opcode)
+{
+ unsigned long flags;
int tuning_seq_cnt = 3;
- u8 phase, tuned_phases[16], tuned_phase_cnt = 0;
+ u8 phase, *data_buf, tuned_phases[NUM_TUNING_PHASES], tuned_phase_cnt;
+ const u32 *tuning_block_pattern = tuning_block_64;
+ int size = sizeof(tuning_block_64); /* Tuning pattern size in bytes */
int rc;
struct mmc_host *mmc = host->mmc;
- struct mmc_ios ios = host->mmc->ios;
+ struct mmc_ios ios = host->mmc->ios;
+ struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
+ struct sdhci_msm_host *msm_host = pltfm_host->priv;
+ u8 drv_type = 0;
+ bool drv_type_changed = false;
+ struct mmc_card *card = host->mmc->card;
+ int sts_retry;
+ u8 last_good_phase = 0;
/*
* Tuning is required for SDR104, HS200 and HS400 cards and
* if clock frequency is greater than 100MHz in these modes.
*/
- if (host->clock <= 100 * 1000 * 1000 ||
- !((ios.timing == MMC_TIMING_MMC_HS200) ||
- (ios.timing == MMC_TIMING_UHS_SDR104)))
+ if (host->clock <= CORE_FREQ_100MHZ ||
+ !((ios.timing == MMC_TIMING_MMC_HS400) ||
+ (ios.timing == MMC_TIMING_MMC_HS200) ||
+ (ios.timing == MMC_TIMING_UHS_SDR104)))
+ return 0;
+
+ /*
+ * Don't allow re-tuning for CRC errors observed for any commands
+ * that are sent during tuning sequence itself.
+ */
+ if (msm_host->tuning_in_progress)
return 0;
+ msm_host->tuning_in_progress = true;
+ pr_debug("%s: Enter %s\n", mmc_hostname(mmc), __func__);
+
+ /* CDC/SDC4 DLL HW calibration is only required for HS400 mode*/
+ if (msm_host->tuning_done && !msm_host->calibration_done &&
+ (mmc->ios.timing == MMC_TIMING_MMC_HS400)) {
+ rc = sdhci_msm_hs400_dll_calibration(host);
+ spin_lock_irqsave(&host->lock, flags);
+ if (!rc)
+ msm_host->calibration_done = true;
+ spin_unlock_irqrestore(&host->lock, flags);
+ goto out;
+ }
+
+ spin_lock_irqsave(&host->lock, flags);
+
+ if ((opcode == MMC_SEND_TUNING_BLOCK_HS200) &&
+ (mmc->ios.bus_width == MMC_BUS_WIDTH_8)) {
+ tuning_block_pattern = tuning_block_128;
+ size = sizeof(tuning_block_128);
+ }
+ spin_unlock_irqrestore(&host->lock, flags);
+
+ data_buf = kmalloc(size, GFP_KERNEL);
+ if (!data_buf) {
+ rc = -ENOMEM;
+ goto out;
+ }
retry:
- /* First of all reset the tuning block */
+ tuned_phase_cnt = 0;
+
+ /* first of all reset the tuning block */
rc = msm_init_cm_dll(host);
if (rc)
- return rc;
+ goto kfree;
phase = 0;
do {
- /* Set the phase in delay line hw block */
+ struct mmc_command cmd = {0};
+ struct mmc_data data = {0};
+ struct mmc_request mrq = {
+ .cmd = &cmd,
+ .data = &data
+ };
+ struct scatterlist sg;
+ struct mmc_command sts_cmd = {0};
+
+ /* set the phase in delay line hw block */
rc = msm_config_cm_dll_phase(host, phase);
if (rc)
- return rc;
+ goto kfree;
- rc = mmc_send_tuning(mmc, opcode, NULL);
- if (!rc) {
- /* Tuning is successful at this tuning point */
+ cmd.opcode = opcode;
+ cmd.flags = MMC_RSP_R1 | MMC_CMD_ADTC;
+
+ data.blksz = size;
+ data.blocks = 1;
+ data.flags = MMC_DATA_READ;
+ data.timeout_ns = 1000 * 1000 * 1000; /* 1 sec */
+
+ data.sg = &sg;
+ data.sg_len = 1;
+ sg_init_one(&sg, data_buf, size);
+ memset(data_buf, 0, size);
+ mmc_wait_for_req(mmc, &mrq);
+
+ if (card && (cmd.error || data.error)) {
+ /*
+ * Set the dll to last known good phase while sending
+ * status command to ensure that status command won't
+ * fail due to bad phase.
+ */
+ if (tuned_phase_cnt)
+ last_good_phase =
+ tuned_phases[tuned_phase_cnt-1];
+ else if (msm_host->saved_tuning_phase !=
+ INVALID_TUNING_PHASE)
+ last_good_phase = msm_host->saved_tuning_phase;
+
+ rc = msm_config_cm_dll_phase(host, last_good_phase);
+ if (rc)
+ goto kfree;
+
+ sts_cmd.opcode = MMC_SEND_STATUS;
+ sts_cmd.arg = card->rca << 16;
+ sts_cmd.flags = MMC_RSP_R1 | MMC_CMD_AC;
+ sts_retry = 5;
+ while (sts_retry) {
+ mmc_wait_for_cmd(mmc, &sts_cmd, 0);
+
+ if (sts_cmd.error ||
+ (R1_CURRENT_STATE(sts_cmd.resp[0])
+ != R1_STATE_TRAN)) {
+ sts_retry--;
+ /*
+ * wait for at least 146 MCLK cycles for
+ * the card to move to TRANS state. As
+ * the MCLK would be min 200MHz for
+ * tuning, we need max 0.73us delay. To
+ * be on safer side 1ms delay is given.
+ */
+ usleep_range(1000, 1200);
+ pr_debug("%s: phase %d sts cmd err %d resp 0x%x\n",
+ mmc_hostname(mmc), phase,
+ sts_cmd.error, sts_cmd.resp[0]);
+ continue;
+ }
+ break;
+ };
+ }
+
+ if (!cmd.error && !data.error &&
+ !memcmp(data_buf, tuning_block_pattern, size)) {
+ /* tuning is successful at this tuning point */
tuned_phases[tuned_phase_cnt++] = phase;
- dev_dbg(mmc_dev(mmc), "%s: Found good phase = %d\n",
- mmc_hostname(mmc), phase);
+ pr_debug("%s: %s: found *** good *** phase = %d\n",
+ mmc_hostname(mmc), __func__, phase);
+ } else {
+ pr_debug("%s: %s: found ## bad ## phase = %d\n",
+ mmc_hostname(mmc), __func__, phase);
+ }
+ } while (++phase < 16);
+
+ if ((tuned_phase_cnt == NUM_TUNING_PHASES) &&
+ card && mmc_card_mmc(card)) {
+ /*
+ * If all phases pass then its a problem. So change the card's
+ * drive type to a different value, if supported and repeat
+ * tuning until at least one phase fails. Then set the original
+ * drive type back.
+ *
+ * If all the phases still pass after trying all possible
+ * drive types, then one of those 16 phases will be picked.
+ * This is no different from what was going on before the
+ * modification to change drive type and retune.
+ */
+ pr_debug("%s: tuned phases count: %d\n", mmc_hostname(mmc),
+ tuned_phase_cnt);
+
+ /* set drive type to other value . default setting is 0x0 */
+ while (++drv_type <= MAX_DRV_TYPES_SUPPORTED_HS200) {
+ pr_debug("%s: trying different drive strength (%d)\n",
+ mmc_hostname(mmc), drv_type);
+ if (card->ext_csd.raw_driver_strength &
+ (1 << drv_type)) {
+ sdhci_msm_set_mmc_drv_type(host, opcode,
+ drv_type);
+ if (!drv_type_changed)
+ drv_type_changed = true;
+ goto retry;
+ }
}
- } while (++phase < ARRAY_SIZE(tuned_phases));
+ }
+
+ /* reset drive type to default (50 ohm) if changed */
+ if (drv_type_changed)
+ sdhci_msm_set_mmc_drv_type(host, opcode, 0);
if (tuned_phase_cnt) {
rc = msm_find_most_appropriate_phase(host, tuned_phases,
- tuned_phase_cnt);
+ tuned_phase_cnt);
if (rc < 0)
- return rc;
+ goto kfree;
else
- phase = rc;
+ phase = (u8)rc;
/*
* Finally set the selected phase in delay
@@ -396,70 +1330,3020 @@ retry:
*/
rc = msm_config_cm_dll_phase(host, phase);
if (rc)
- return rc;
- dev_dbg(mmc_dev(mmc), "%s: Setting the tuning phase to %d\n",
- mmc_hostname(mmc), phase);
+ goto kfree;
+ msm_host->saved_tuning_phase = phase;
+ pr_debug("%s: %s: finally setting the tuning phase to %d\n",
+ mmc_hostname(mmc), __func__, phase);
} else {
if (--tuning_seq_cnt)
goto retry;
- /* Tuning failed */
- dev_dbg(mmc_dev(mmc), "%s: No tuning point found\n",
- mmc_hostname(mmc));
+ /* tuning failed */
+ pr_err("%s: %s: no tuning point found\n",
+ mmc_hostname(mmc), __func__);
rc = -EIO;
}
+kfree:
+ kfree(data_buf);
+out:
+ spin_lock_irqsave(&host->lock, flags);
+ if (!rc)
+ msm_host->tuning_done = true;
+ spin_unlock_irqrestore(&host->lock, flags);
+ msm_host->tuning_in_progress = false;
+ pr_debug("%s: Exit %s, err(%d)\n", mmc_hostname(mmc), __func__, rc);
return rc;
}
-static const struct of_device_id sdhci_msm_dt_match[] = {
- { .compatible = "qcom,sdhci-msm-v4" },
- {},
-};
+static int sdhci_msm_setup_gpio(struct sdhci_msm_pltfm_data *pdata, bool enable)
+{
+ struct sdhci_msm_gpio_data *curr;
+ int i, ret = 0;
-MODULE_DEVICE_TABLE(of, sdhci_msm_dt_match);
+ curr = pdata->pin_data->gpio_data;
+ for (i = 0; i < curr->size; i++) {
+ if (!gpio_is_valid(curr->gpio[i].no)) {
+ ret = -EINVAL;
+ pr_err("%s: Invalid gpio = %d\n", __func__,
+ curr->gpio[i].no);
+ goto free_gpios;
+ }
+ if (enable) {
+ ret = gpio_request(curr->gpio[i].no,
+ curr->gpio[i].name);
+ if (ret) {
+ pr_err("%s: gpio_request(%d, %s) failed %d\n",
+ __func__, curr->gpio[i].no,
+ curr->gpio[i].name, ret);
+ goto free_gpios;
+ }
+ curr->gpio[i].is_enabled = true;
+ } else {
+ gpio_free(curr->gpio[i].no);
+ curr->gpio[i].is_enabled = false;
+ }
+ }
+ return ret;
+
+free_gpios:
+ for (i--; i >= 0; i--) {
+ gpio_free(curr->gpio[i].no);
+ curr->gpio[i].is_enabled = false;
+ }
+ return ret;
+}
+
+static int sdhci_msm_setup_pinctrl(struct sdhci_msm_pltfm_data *pdata,
+ bool enable)
+{
+ int ret = 0;
+
+ if (enable)
+ ret = pinctrl_select_state(pdata->pctrl_data->pctrl,
+ pdata->pctrl_data->pins_active);
+ else
+ ret = pinctrl_select_state(pdata->pctrl_data->pctrl,
+ pdata->pctrl_data->pins_sleep);
+
+ if (ret < 0)
+ pr_err("%s state for pinctrl failed with %d\n",
+ enable ? "Enabling" : "Disabling", ret);
+
+ return ret;
+}
+
+static int sdhci_msm_setup_pins(struct sdhci_msm_pltfm_data *pdata, bool enable)
+{
+ int ret = 0;
+
+ if (pdata->pin_cfg_sts == enable) {
+ return 0;
+ } else if (pdata->pctrl_data) {
+ ret = sdhci_msm_setup_pinctrl(pdata, enable);
+ goto out;
+ } else if (!pdata->pin_data) {
+ return 0;
+ }
+ if (pdata->pin_data->is_gpio)
+ ret = sdhci_msm_setup_gpio(pdata, enable);
+out:
+ if (!ret)
+ pdata->pin_cfg_sts = enable;
+
+ return ret;
+}
+
+static int sdhci_msm_dt_get_array(struct device *dev, const char *prop_name,
+ u32 **out, int *len, u32 size)
+{
+ int ret = 0;
+ struct device_node *np = dev->of_node;
+ size_t sz;
+ u32 *arr = NULL;
+
+ if (!of_get_property(np, prop_name, len)) {
+ ret = -EINVAL;
+ goto out;
+ }
+ sz = *len = *len / sizeof(*arr);
+ if (sz <= 0 || (size > 0 && (sz > size))) {
+ dev_err(dev, "%s invalid size\n", prop_name);
+ ret = -EINVAL;
+ goto out;
+ }
+
+ arr = devm_kzalloc(dev, sz * sizeof(*arr), GFP_KERNEL);
+ if (!arr) {
+ dev_err(dev, "%s failed allocating memory\n", prop_name);
+ ret = -ENOMEM;
+ goto out;
+ }
+
+ ret = of_property_read_u32_array(np, prop_name, arr, sz);
+ if (ret < 0) {
+ dev_err(dev, "%s failed reading array %d\n", prop_name, ret);
+ goto out;
+ }
+ *out = arr;
+out:
+ if (ret)
+ *len = 0;
+ return ret;
+}
+
+#define MAX_PROP_SIZE 32
+static int sdhci_msm_dt_parse_vreg_info(struct device *dev,
+ struct sdhci_msm_reg_data **vreg_data, const char *vreg_name)
+{
+ int len, ret = 0;
+ const __be32 *prop;
+ char prop_name[MAX_PROP_SIZE];
+ struct sdhci_msm_reg_data *vreg;
+ struct device_node *np = dev->of_node;
+
+ snprintf(prop_name, MAX_PROP_SIZE, "%s-supply", vreg_name);
+ if (!of_parse_phandle(np, prop_name, 0)) {
+ dev_info(dev, "No vreg data found for %s\n", vreg_name);
+ return ret;
+ }
+
+ vreg = devm_kzalloc(dev, sizeof(*vreg), GFP_KERNEL);
+ if (!vreg) {
+ dev_err(dev, "No memory for vreg: %s\n", vreg_name);
+ ret = -ENOMEM;
+ return ret;
+ }
+
+ vreg->name = vreg_name;
+
+ snprintf(prop_name, MAX_PROP_SIZE,
+ "qcom,%s-always-on", vreg_name);
+ if (of_get_property(np, prop_name, NULL))
+ vreg->is_always_on = true;
+
+ snprintf(prop_name, MAX_PROP_SIZE,
+ "qcom,%s-lpm-sup", vreg_name);
+ if (of_get_property(np, prop_name, NULL))
+ vreg->lpm_sup = true;
+
+ snprintf(prop_name, MAX_PROP_SIZE,
+ "qcom,%s-voltage-level", vreg_name);
+ prop = of_get_property(np, prop_name, &len);
+ if (!prop || (len != (2 * sizeof(__be32)))) {
+ dev_warn(dev, "%s %s property\n",
+ prop ? "invalid format" : "no", prop_name);
+ } else {
+ vreg->low_vol_level = be32_to_cpup(&prop[0]);
+ vreg->high_vol_level = be32_to_cpup(&prop[1]);
+ }
+
+ snprintf(prop_name, MAX_PROP_SIZE,
+ "qcom,%s-current-level", vreg_name);
+ prop = of_get_property(np, prop_name, &len);
+ if (!prop || (len != (2 * sizeof(__be32)))) {
+ dev_warn(dev, "%s %s property\n",
+ prop ? "invalid format" : "no", prop_name);
+ } else {
+ vreg->lpm_uA = be32_to_cpup(&prop[0]);
+ vreg->hpm_uA = be32_to_cpup(&prop[1]);
+ }
+
+ *vreg_data = vreg;
+ dev_dbg(dev, "%s: %s %s vol=[%d %d]uV, curr=[%d %d]uA\n",
+ vreg->name, vreg->is_always_on ? "always_on," : "",
+ vreg->lpm_sup ? "lpm_sup," : "", vreg->low_vol_level,
+ vreg->high_vol_level, vreg->lpm_uA, vreg->hpm_uA);
+
+ return ret;
+}
+
+/* GPIO/Pad data extraction */
+static int sdhci_msm_parse_pinctrl_info(struct device *dev,
+ struct sdhci_msm_pltfm_data *pdata)
+{
+ struct sdhci_pinctrl_data *pctrl_data;
+ struct pinctrl *pctrl;
+ int ret = 0;
+
+ /* Try to obtain pinctrl handle */
+ pctrl = devm_pinctrl_get(dev);
+ if (IS_ERR(pctrl)) {
+ ret = PTR_ERR(pctrl);
+ goto out;
+ }
+ pctrl_data = devm_kzalloc(dev, sizeof(*pctrl_data), GFP_KERNEL);
+ if (!pctrl_data) {
+ dev_err(dev, "No memory for sdhci_pinctrl_data\n");
+ ret = -ENOMEM;
+ goto out;
+ }
+ pctrl_data->pctrl = pctrl;
+ /* Look-up and keep the states handy to be used later */
+ pctrl_data->pins_active = pinctrl_lookup_state(
+ pctrl_data->pctrl, "active");
+ if (IS_ERR(pctrl_data->pins_active)) {
+ ret = PTR_ERR(pctrl_data->pins_active);
+ dev_err(dev, "Could not get active pinstates, err:%d\n", ret);
+ goto out;
+ }
+ pctrl_data->pins_sleep = pinctrl_lookup_state(
+ pctrl_data->pctrl, "sleep");
+ if (IS_ERR(pctrl_data->pins_sleep)) {
+ ret = PTR_ERR(pctrl_data->pins_sleep);
+ dev_err(dev, "Could not get sleep pinstates, err:%d\n", ret);
+ goto out;
+ }
+ pdata->pctrl_data = pctrl_data;
+out:
+ return ret;
+}
+
+#define GPIO_NAME_MAX_LEN 32
+static int sdhci_msm_dt_parse_gpio_info(struct device *dev,
+ struct sdhci_msm_pltfm_data *pdata)
+{
+ int ret = 0, cnt, i;
+ struct sdhci_msm_pin_data *pin_data;
+ struct device_node *np = dev->of_node;
+
+ ret = sdhci_msm_parse_pinctrl_info(dev, pdata);
+ if (!ret) {
+ goto out;
+ } else if (ret == -EPROBE_DEFER) {
+ dev_err(dev, "Pinctrl framework not registered, err:%d\n", ret);
+ goto out;
+ } else {
+ dev_err(dev, "Parsing Pinctrl failed with %d, falling back on GPIO lib\n",
+ ret);
+ ret = 0;
+ }
+ pin_data = devm_kzalloc(dev, sizeof(*pin_data), GFP_KERNEL);
+ if (!pin_data) {
+ dev_err(dev, "No memory for pin_data\n");
+ ret = -ENOMEM;
+ goto out;
+ }
+
+ cnt = of_gpio_count(np);
+ if (cnt > 0) {
+ pin_data->is_gpio = true;
+ pin_data->gpio_data = devm_kzalloc(dev,
+ sizeof(struct sdhci_msm_gpio_data), GFP_KERNEL);
+ if (!pin_data->gpio_data) {
+ dev_err(dev, "No memory for gpio_data\n");
+ ret = -ENOMEM;
+ goto out;
+ }
+ pin_data->gpio_data->size = cnt;
+ pin_data->gpio_data->gpio = devm_kzalloc(dev, cnt *
+ sizeof(struct sdhci_msm_gpio), GFP_KERNEL);
+
+ if (!pin_data->gpio_data->gpio) {
+ dev_err(dev, "No memory for gpio\n");
+ ret = -ENOMEM;
+ goto out;
+ }
+ for (i = 0; i < cnt; i++) {
+ const char *name = NULL;
+ char result[GPIO_NAME_MAX_LEN];
+ pin_data->gpio_data->gpio[i].no = of_get_gpio(np, i);
+ of_property_read_string_index(np,
+ "qcom,gpio-names", i, &name);
+
+ snprintf(result, GPIO_NAME_MAX_LEN, "%s-%s",
+ dev_name(dev), name ? name : "?");
+ pin_data->gpio_data->gpio[i].name = result;
+ dev_dbg(dev, "%s: gpio[%s] = %d\n", __func__,
+ pin_data->gpio_data->gpio[i].name,
+ pin_data->gpio_data->gpio[i].no);
+ }
+ }
+ pdata->pin_data = pin_data;
+out:
+ if (ret)
+ dev_err(dev, "%s failed with err %d\n", __func__, ret);
+ return ret;
+}
+
+#ifdef CONFIG_SMP
+static inline void parse_affine_irq(struct sdhci_msm_pltfm_data *pdata)
+{
+ pdata->pm_qos_data.irq_req_type = PM_QOS_REQ_AFFINE_IRQ;
+}
+#else
+static inline void parse_affine_irq(struct sdhci_msm_pltfm_data *pdata) { }
+#endif
+
+static int sdhci_msm_pm_qos_parse_irq(struct device *dev,
+ struct sdhci_msm_pltfm_data *pdata)
+{
+ struct device_node *np = dev->of_node;
+ const char *str;
+ u32 cpu;
+ int ret = 0;
+ int i;
+
+ pdata->pm_qos_data.irq_valid = false;
+ pdata->pm_qos_data.irq_req_type = PM_QOS_REQ_AFFINE_CORES;
+ if (!of_property_read_string(np, "qcom,pm-qos-irq-type", &str) &&
+ !strcmp(str, "affine_irq")) {
+ parse_affine_irq(pdata);
+ }
+
+ /* must specify cpu for "affine_cores" type */
+ if (pdata->pm_qos_data.irq_req_type == PM_QOS_REQ_AFFINE_CORES) {
+ pdata->pm_qos_data.irq_cpu = -1;
+ ret = of_property_read_u32(np, "qcom,pm-qos-irq-cpu", &cpu);
+ if (ret) {
+ dev_err(dev, "%s: error %d reading irq cpu\n", __func__,
+ ret);
+ goto out;
+ }
+ if (cpu < 0 || cpu >= num_possible_cpus()) {
+ dev_err(dev, "%s: invalid irq cpu %d (NR_CPUS=%d)\n",
+ __func__, cpu, num_possible_cpus());
+ ret = -EINVAL;
+ goto out;
+ }
+ pdata->pm_qos_data.irq_cpu = cpu;
+ }
+
+ if (of_property_count_u32_elems(np, "qcom,pm-qos-irq-latency") !=
+ SDHCI_POWER_POLICY_NUM) {
+ dev_err(dev, "%s: could not read %d values for 'qcom,pm-qos-irq-latency'\n",
+ __func__, SDHCI_POWER_POLICY_NUM);
+ ret = -EINVAL;
+ goto out;
+ }
+
+ for (i = 0; i < SDHCI_POWER_POLICY_NUM; i++)
+ of_property_read_u32_index(np, "qcom,pm-qos-irq-latency", i,
+ &pdata->pm_qos_data.irq_latency.latency[i]);
+
+ pdata->pm_qos_data.irq_valid = true;
+out:
+ return ret;
+}
+
+static int sdhci_msm_pm_qos_parse_cpu_groups(struct device *dev,
+ struct sdhci_msm_pltfm_data *pdata)
+{
+ struct device_node *np = dev->of_node;
+ u32 mask;
+ int nr_groups;
+ int ret;
+ int i;
+
+ /* Read cpu group mapping */
+ nr_groups = of_property_count_u32_elems(np, "qcom,pm-qos-cpu-groups");
+ if (nr_groups <= 0) {
+ ret = -EINVAL;
+ goto out;
+ }
+ pdata->pm_qos_data.cpu_group_map.nr_groups = nr_groups;
+ pdata->pm_qos_data.cpu_group_map.mask =
+ kcalloc(nr_groups, sizeof(cpumask_t), GFP_KERNEL);
+ if (!pdata->pm_qos_data.cpu_group_map.mask) {
+ ret = -ENOMEM;
+ goto out;
+ }
+
+ for (i = 0; i < nr_groups; i++) {
+ of_property_read_u32_index(np, "qcom,pm-qos-cpu-groups",
+ i, &mask);
+
+ pdata->pm_qos_data.cpu_group_map.mask[i].bits[0] = mask;
+ if (!cpumask_subset(&pdata->pm_qos_data.cpu_group_map.mask[i],
+ cpu_possible_mask)) {
+ dev_err(dev, "%s: invalid mask 0x%x of cpu group #%d\n",
+ __func__, mask, i);
+ ret = -EINVAL;
+ goto free_res;
+ }
+ }
+ return 0;
+
+free_res:
+ kfree(pdata->pm_qos_data.cpu_group_map.mask);
+out:
+ return ret;
+}
+
+static int sdhci_msm_pm_qos_parse_latency(struct device *dev, const char *name,
+ int nr_groups, struct sdhci_msm_pm_qos_latency **latency)
+{
+ struct device_node *np = dev->of_node;
+ struct sdhci_msm_pm_qos_latency *values;
+ int ret;
+ int i;
+ int group;
+ int cfg;
+
+ ret = of_property_count_u32_elems(np, name);
+ if (ret > 0 && ret != SDHCI_POWER_POLICY_NUM * nr_groups) {
+ dev_err(dev, "%s: invalid number of values for property %s: expected=%d actual=%d\n",
+ __func__, name, SDHCI_POWER_POLICY_NUM * nr_groups,
+ ret);
+ return -EINVAL;
+ } else if (ret < 0) {
+ return ret;
+ }
+
+ values = kcalloc(nr_groups, sizeof(struct sdhci_msm_pm_qos_latency),
+ GFP_KERNEL);
+ if (!values)
+ return -ENOMEM;
+
+ for (i = 0; i < SDHCI_POWER_POLICY_NUM * nr_groups; i++) {
+ group = i / SDHCI_POWER_POLICY_NUM;
+ cfg = i % SDHCI_POWER_POLICY_NUM;
+ of_property_read_u32_index(np, name, i,
+ &(values[group].latency[cfg]));
+ }
+
+ *latency = values;
+ return 0;
+}
+
+static void sdhci_msm_pm_qos_parse(struct device *dev,
+ struct sdhci_msm_pltfm_data *pdata)
+{
+ if (sdhci_msm_pm_qos_parse_irq(dev, pdata))
+ dev_notice(dev, "%s: PM QoS voting for IRQ will be disabled\n",
+ __func__);
+
+ if (!sdhci_msm_pm_qos_parse_cpu_groups(dev, pdata)) {
+ pdata->pm_qos_data.cmdq_valid =
+ !sdhci_msm_pm_qos_parse_latency(dev,
+ "qcom,pm-qos-cmdq-latency-us",
+ pdata->pm_qos_data.cpu_group_map.nr_groups,
+ &pdata->pm_qos_data.cmdq_latency);
+ pdata->pm_qos_data.legacy_valid =
+ !sdhci_msm_pm_qos_parse_latency(dev,
+ "qcom,pm-qos-legacy-latency-us",
+ pdata->pm_qos_data.cpu_group_map.nr_groups,
+ &pdata->pm_qos_data.latency);
+ if (!pdata->pm_qos_data.cmdq_valid &&
+ !pdata->pm_qos_data.legacy_valid) {
+ /* clean-up previously allocated arrays */
+ kfree(pdata->pm_qos_data.latency);
+ kfree(pdata->pm_qos_data.cmdq_latency);
+ dev_err(dev, "%s: invalid PM QoS latency values. Voting for cpu group will be disabled\n",
+ __func__);
+ }
+ } else {
+ dev_notice(dev, "%s: PM QoS voting for cpu group will be disabled\n",
+ __func__);
+ }
+}
+
+/* Parse platform data */
+static
+struct sdhci_msm_pltfm_data *sdhci_msm_populate_pdata(struct device *dev,
+ struct sdhci_msm_host *msm_host)
+{
+ struct sdhci_msm_pltfm_data *pdata = NULL;
+ struct device_node *np = dev->of_node;
+ u32 bus_width = 0;
+ int len, i;
+ int clk_table_len;
+ u32 *clk_table = NULL;
+ int ice_clk_table_len;
+ u32 *ice_clk_table = NULL;
+ enum of_gpio_flags flags = OF_GPIO_ACTIVE_LOW;
+ const char *lower_bus_speed = NULL;
+
+ pdata = devm_kzalloc(dev, sizeof(*pdata), GFP_KERNEL);
+ if (!pdata) {
+ dev_err(dev, "failed to allocate memory for platform data\n");
+ goto out;
+ }
+
+ pdata->status_gpio = of_get_named_gpio_flags(np, "cd-gpios", 0, &flags);
+ if (gpio_is_valid(pdata->status_gpio) && !(flags & OF_GPIO_ACTIVE_LOW))
+ pdata->caps2 |= MMC_CAP2_CD_ACTIVE_HIGH;
+
+ of_property_read_u32(np, "qcom,bus-width", &bus_width);
+ if (bus_width == 8)
+ pdata->mmc_bus_width = MMC_CAP_8_BIT_DATA;
+ else if (bus_width == 4)
+ pdata->mmc_bus_width = MMC_CAP_4_BIT_DATA;
+ else {
+ dev_notice(dev, "invalid bus-width, default to 1-bit mode\n");
+ pdata->mmc_bus_width = 0;
+ }
+
+ if (sdhci_msm_dt_get_array(dev, "qcom,devfreq,freq-table",
+ &msm_host->mmc->clk_scaling.pltfm_freq_table,
+ &msm_host->mmc->clk_scaling.pltfm_freq_table_sz, 0))
+ pr_debug("%s: no clock scaling frequencies were supplied\n",
+ dev_name(dev));
+ else if (!msm_host->mmc->clk_scaling.pltfm_freq_table ||
+ !msm_host->mmc->clk_scaling.pltfm_freq_table_sz)
+ dev_err(dev, "bad dts clock scaling frequencies\n");
+
+ /*
+ * Few hosts can support DDR52 mode at the same lower
+ * system voltage corner as high-speed mode. In such cases,
+ * it is always better to put it in DDR mode which will
+ * improve the performance without any power impact.
+ */
+ if (!of_property_read_string(np, "qcom,scaling-lower-bus-speed-mode",
+ &lower_bus_speed)) {
+ if (!strcmp(lower_bus_speed, "DDR52"))
+ msm_host->mmc->clk_scaling.lower_bus_speed_mode |=
+ MMC_SCALING_LOWER_DDR52_MODE;
+ }
+
+ if (sdhci_msm_dt_get_array(dev, "qcom,clk-rates",
+ &clk_table, &clk_table_len, 0)) {
+ dev_err(dev, "failed parsing supported clock rates\n");
+ goto out;
+ }
+ if (!clk_table || !clk_table_len) {
+ dev_err(dev, "Invalid clock table\n");
+ goto out;
+ }
+ pdata->sup_clk_table = clk_table;
+ pdata->sup_clk_cnt = clk_table_len;
+
+ if (msm_host->ice.pdev) {
+ if (sdhci_msm_dt_get_array(dev, "qcom,ice-clk-rates",
+ &ice_clk_table, &ice_clk_table_len, 0)) {
+ dev_err(dev, "failed parsing supported ice clock rates\n");
+ goto out;
+ }
+ if (!ice_clk_table || !ice_clk_table_len) {
+ dev_err(dev, "Invalid clock table\n");
+ goto out;
+ }
+ if (ice_clk_table_len != 2) {
+ dev_err(dev, "Need max and min frequencies in the table\n");
+ goto out;
+ }
+ pdata->sup_ice_clk_table = ice_clk_table;
+ pdata->sup_ice_clk_cnt = ice_clk_table_len;
+ pdata->ice_clk_max = pdata->sup_ice_clk_table[0];
+ pdata->ice_clk_min = pdata->sup_ice_clk_table[1];
+ dev_dbg(dev, "supported ICE clock rates (Hz): max: %u min: %u\n",
+ pdata->ice_clk_max, pdata->ice_clk_min);
+ }
+
+ pdata->vreg_data = devm_kzalloc(dev, sizeof(struct
+ sdhci_msm_slot_reg_data),
+ GFP_KERNEL);
+ if (!pdata->vreg_data) {
+ dev_err(dev, "failed to allocate memory for vreg data\n");
+ goto out;
+ }
+
+ if (sdhci_msm_dt_parse_vreg_info(dev, &pdata->vreg_data->vdd_data,
+ "vdd")) {
+ dev_err(dev, "failed parsing vdd data\n");
+ goto out;
+ }
+ if (sdhci_msm_dt_parse_vreg_info(dev,
+ &pdata->vreg_data->vdd_io_data,
+ "vdd-io")) {
+ dev_err(dev, "failed parsing vdd-io data\n");
+ goto out;
+ }
+
+ if (sdhci_msm_dt_parse_gpio_info(dev, pdata)) {
+ dev_err(dev, "failed parsing gpio data\n");
+ goto out;
+ }
+
+ len = of_property_count_strings(np, "qcom,bus-speed-mode");
+
+ for (i = 0; i < len; i++) {
+ const char *name = NULL;
+
+ of_property_read_string_index(np,
+ "qcom,bus-speed-mode", i, &name);
+ if (!name)
+ continue;
+
+ if (!strncmp(name, "HS400_1p8v", sizeof("HS400_1p8v")))
+ pdata->caps2 |= MMC_CAP2_HS400_1_8V;
+ else if (!strncmp(name, "HS400_1p2v", sizeof("HS400_1p2v")))
+ pdata->caps2 |= MMC_CAP2_HS400_1_2V;
+ else if (!strncmp(name, "HS200_1p8v", sizeof("HS200_1p8v")))
+ pdata->caps2 |= MMC_CAP2_HS200_1_8V_SDR;
+ else if (!strncmp(name, "HS200_1p2v", sizeof("HS200_1p2v")))
+ pdata->caps2 |= MMC_CAP2_HS200_1_2V_SDR;
+ else if (!strncmp(name, "DDR_1p8v", sizeof("DDR_1p8v")))
+ pdata->caps |= MMC_CAP_1_8V_DDR
+ | MMC_CAP_UHS_DDR50;
+ else if (!strncmp(name, "DDR_1p2v", sizeof("DDR_1p2v")))
+ pdata->caps |= MMC_CAP_1_2V_DDR
+ | MMC_CAP_UHS_DDR50;
+ }
+
+ if (of_get_property(np, "qcom,nonremovable", NULL))
+ pdata->nonremovable = true;
+
+ if (of_get_property(np, "qcom,nonhotplug", NULL))
+ pdata->nonhotplug = true;
+
+ pdata->largeaddressbus =
+ of_property_read_bool(np, "qcom,large-address-bus");
+
+ if (of_property_read_bool(np, "qcom,wakeup-on-idle"))
+ msm_host->mmc->wakeup_on_idle = true;
+
+ sdhci_msm_pm_qos_parse(dev, pdata);
+
+ if (of_get_property(np, "qcom,core_3_0v_support", NULL))
+ msm_host->core_3_0v_support = true;
+
+ pdata->sdr104_wa = of_property_read_bool(np, "qcom,sdr104-wa");
+
+ return pdata;
+out:
+ return NULL;
+}
+
+/* Returns required bandwidth in Bytes per Sec */
+static unsigned int sdhci_get_bw_required(struct sdhci_host *host,
+ struct mmc_ios *ios)
+{
+ struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
+ struct sdhci_msm_host *msm_host = pltfm_host->priv;
+
+ unsigned int bw;
+
+ bw = msm_host->clk_rate;
+ /*
+ * For DDR mode, SDCC controller clock will be at
+ * the double rate than the actual clock that goes to card.
+ */
+ if (ios->bus_width == MMC_BUS_WIDTH_4)
+ bw /= 2;
+ else if (ios->bus_width == MMC_BUS_WIDTH_1)
+ bw /= 8;
+
+ return bw;
+}
+
+static int sdhci_msm_bus_get_vote_for_bw(struct sdhci_msm_host *host,
+ unsigned int bw)
+{
+ unsigned int *table = host->pdata->voting_data->bw_vecs;
+ unsigned int size = host->pdata->voting_data->bw_vecs_size;
+ int i;
+
+ if (host->msm_bus_vote.is_max_bw_needed && bw)
+ return host->msm_bus_vote.max_bw_vote;
+
+ for (i = 0; i < size; i++) {
+ if (bw <= table[i])
+ break;
+ }
+
+ if (i && (i == size))
+ i--;
+
+ return i;
+}
+
+/*
+ * This function must be called with host lock acquired.
+ * Caller of this function should also ensure that msm bus client
+ * handle is not null.
+ */
+static inline int sdhci_msm_bus_set_vote(struct sdhci_msm_host *msm_host,
+ int vote,
+ unsigned long *flags)
+{
+ struct sdhci_host *host = platform_get_drvdata(msm_host->pdev);
+ int rc = 0;
+
+ BUG_ON(!flags);
+
+ if (vote != msm_host->msm_bus_vote.curr_vote) {
+ spin_unlock_irqrestore(&host->lock, *flags);
+ rc = msm_bus_scale_client_update_request(
+ msm_host->msm_bus_vote.client_handle, vote);
+ spin_lock_irqsave(&host->lock, *flags);
+ if (rc) {
+ pr_err("%s: msm_bus_scale_client_update_request() failed: bus_client_handle=0x%x, vote=%d, err=%d\n",
+ mmc_hostname(host->mmc),
+ msm_host->msm_bus_vote.client_handle, vote, rc);
+ goto out;
+ }
+ msm_host->msm_bus_vote.curr_vote = vote;
+ }
+out:
+ return rc;
+}
+
+/*
+ * Internal work. Work to set 0 bandwidth for msm bus.
+ */
+static void sdhci_msm_bus_work(struct work_struct *work)
+{
+ struct sdhci_msm_host *msm_host;
+ struct sdhci_host *host;
+ unsigned long flags;
+
+ msm_host = container_of(work, struct sdhci_msm_host,
+ msm_bus_vote.vote_work.work);
+ host = platform_get_drvdata(msm_host->pdev);
+
+ if (!msm_host->msm_bus_vote.client_handle)
+ return;
+
+ spin_lock_irqsave(&host->lock, flags);
+ /* don't vote for 0 bandwidth if any request is in progress */
+ if (!host->mrq) {
+ sdhci_msm_bus_set_vote(msm_host,
+ msm_host->msm_bus_vote.min_bw_vote, &flags);
+ } else
+ pr_warning("%s: %s: Transfer in progress. skipping bus voting to 0 bandwidth\n",
+ mmc_hostname(host->mmc), __func__);
+ spin_unlock_irqrestore(&host->lock, flags);
+}
+
+/*
+ * This function cancels any scheduled delayed work and sets the bus
+ * vote based on bw (bandwidth) argument.
+ */
+static void sdhci_msm_bus_cancel_work_and_set_vote(struct sdhci_host *host,
+ unsigned int bw)
+{
+ int vote;
+ unsigned long flags;
+ struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
+ struct sdhci_msm_host *msm_host = pltfm_host->priv;
+
+ cancel_delayed_work_sync(&msm_host->msm_bus_vote.vote_work);
+ spin_lock_irqsave(&host->lock, flags);
+ vote = sdhci_msm_bus_get_vote_for_bw(msm_host, bw);
+ sdhci_msm_bus_set_vote(msm_host, vote, &flags);
+ spin_unlock_irqrestore(&host->lock, flags);
+}
+
+#define MSM_MMC_BUS_VOTING_DELAY 200 /* msecs */
+
+/* This function queues a work which will set the bandwidth requiement to 0 */
+static void sdhci_msm_bus_queue_work(struct sdhci_host *host)
+{
+ unsigned long flags;
+ struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
+ struct sdhci_msm_host *msm_host = pltfm_host->priv;
+
+ spin_lock_irqsave(&host->lock, flags);
+ if (msm_host->msm_bus_vote.min_bw_vote !=
+ msm_host->msm_bus_vote.curr_vote)
+ queue_delayed_work(system_wq,
+ &msm_host->msm_bus_vote.vote_work,
+ msecs_to_jiffies(MSM_MMC_BUS_VOTING_DELAY));
+ spin_unlock_irqrestore(&host->lock, flags);
+}
+
+static int sdhci_msm_bus_register(struct sdhci_msm_host *host,
+ struct platform_device *pdev)
+{
+ int rc = 0;
+ struct msm_bus_scale_pdata *bus_pdata;
+
+ struct sdhci_msm_bus_voting_data *data;
+ struct device *dev = &pdev->dev;
+
+ data = devm_kzalloc(dev,
+ sizeof(struct sdhci_msm_bus_voting_data), GFP_KERNEL);
+ if (!data) {
+ dev_err(&pdev->dev,
+ "%s: failed to allocate memory\n", __func__);
+ rc = -ENOMEM;
+ goto out;
+ }
+ data->bus_pdata = msm_bus_cl_get_pdata(pdev);
+ if (data->bus_pdata) {
+ rc = sdhci_msm_dt_get_array(dev, "qcom,bus-bw-vectors-bps",
+ &data->bw_vecs, &data->bw_vecs_size, 0);
+ if (rc) {
+ dev_err(&pdev->dev,
+ "%s: Failed to get bus-bw-vectors-bps\n",
+ __func__);
+ goto out;
+ }
+ host->pdata->voting_data = data;
+ }
+ if (host->pdata->voting_data &&
+ host->pdata->voting_data->bus_pdata &&
+ host->pdata->voting_data->bw_vecs &&
+ host->pdata->voting_data->bw_vecs_size) {
+
+ bus_pdata = host->pdata->voting_data->bus_pdata;
+ host->msm_bus_vote.client_handle =
+ msm_bus_scale_register_client(bus_pdata);
+ if (!host->msm_bus_vote.client_handle) {
+ dev_err(&pdev->dev, "msm_bus_scale_register_client()\n");
+ rc = -EFAULT;
+ goto out;
+ }
+ /* cache the vote index for minimum and maximum bandwidth */
+ host->msm_bus_vote.min_bw_vote =
+ sdhci_msm_bus_get_vote_for_bw(host, 0);
+ host->msm_bus_vote.max_bw_vote =
+ sdhci_msm_bus_get_vote_for_bw(host, UINT_MAX);
+ } else {
+ devm_kfree(dev, data);
+ }
+
+out:
+ return rc;
+}
+
+static void sdhci_msm_bus_unregister(struct sdhci_msm_host *host)
+{
+ if (host->msm_bus_vote.client_handle)
+ msm_bus_scale_unregister_client(
+ host->msm_bus_vote.client_handle);
+}
+
+static void sdhci_msm_bus_voting(struct sdhci_host *host, u32 enable)
+{
+ struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
+ struct sdhci_msm_host *msm_host = pltfm_host->priv;
+ struct mmc_ios *ios = &host->mmc->ios;
+ unsigned int bw;
+
+ if (!msm_host->msm_bus_vote.client_handle)
+ return;
+
+ bw = sdhci_get_bw_required(host, ios);
+ if (enable) {
+ sdhci_msm_bus_cancel_work_and_set_vote(host, bw);
+ } else {
+ /*
+ * If clock gating is enabled, then remove the vote
+ * immediately because clocks will be disabled only
+ * after SDHCI_MSM_MMC_CLK_GATE_DELAY and thus no
+ * additional delay is required to remove the bus vote.
+ */
+#ifdef CONFIG_MMC_CLKGATE
+ if (host->mmc->clkgate_delay)
+ sdhci_msm_bus_cancel_work_and_set_vote(host, 0);
+ else
+#endif
+ sdhci_msm_bus_queue_work(host);
+ }
+}
+
+/* Regulator utility functions */
+static int sdhci_msm_vreg_init_reg(struct device *dev,
+ struct sdhci_msm_reg_data *vreg)
+{
+ int ret = 0;
+
+ /* check if regulator is already initialized? */
+ if (vreg->reg)
+ goto out;
+
+ /* Get the regulator handle */
+ vreg->reg = devm_regulator_get(dev, vreg->name);
+ if (IS_ERR(vreg->reg)) {
+ ret = PTR_ERR(vreg->reg);
+ pr_err("%s: devm_regulator_get(%s) failed. ret=%d\n",
+ __func__, vreg->name, ret);
+ goto out;
+ }
+
+ if (regulator_count_voltages(vreg->reg) > 0) {
+ vreg->set_voltage_sup = true;
+ /* sanity check */
+ if (!vreg->high_vol_level || !vreg->hpm_uA) {
+ pr_err("%s: %s invalid constraints specified\n",
+ __func__, vreg->name);
+ ret = -EINVAL;
+ }
+ }
+
+out:
+ return ret;
+}
+
+static void sdhci_msm_vreg_deinit_reg(struct sdhci_msm_reg_data *vreg)
+{
+ if (vreg->reg)
+ devm_regulator_put(vreg->reg);
+}
+
+static int sdhci_msm_vreg_set_optimum_mode(struct sdhci_msm_reg_data
+ *vreg, int uA_load)
+{
+ int ret = 0;
+
+ /*
+ * regulators that do not support regulator_set_voltage also
+ * do not support regulator_set_optimum_mode
+ */
+ if (vreg->set_voltage_sup) {
+ ret = regulator_set_load(vreg->reg, uA_load);
+ if (ret < 0)
+ pr_err("%s: regulator_set_load(reg=%s,uA_load=%d) failed. ret=%d\n",
+ __func__, vreg->name, uA_load, ret);
+ else
+ /*
+ * regulator_set_load() can return non zero
+ * value even for success case.
+ */
+ ret = 0;
+ }
+ return ret;
+}
+
+static int sdhci_msm_vreg_set_voltage(struct sdhci_msm_reg_data *vreg,
+ int min_uV, int max_uV)
+{
+ int ret = 0;
+ if (vreg->set_voltage_sup) {
+ ret = regulator_set_voltage(vreg->reg, min_uV, max_uV);
+ if (ret) {
+ pr_err("%s: regulator_set_voltage(%s)failed. min_uV=%d,max_uV=%d,ret=%d\n",
+ __func__, vreg->name, min_uV, max_uV, ret);
+ }
+ }
+
+ return ret;
+}
+
+static int sdhci_msm_vreg_enable(struct sdhci_msm_reg_data *vreg)
+{
+ int ret = 0;
+
+ /* Put regulator in HPM (high power mode) */
+ ret = sdhci_msm_vreg_set_optimum_mode(vreg, vreg->hpm_uA);
+ if (ret < 0)
+ return ret;
+
+ if (!vreg->is_enabled) {
+ /* Set voltage level */
+ ret = sdhci_msm_vreg_set_voltage(vreg, vreg->high_vol_level,
+ vreg->high_vol_level);
+ if (ret)
+ return ret;
+ }
+ ret = regulator_enable(vreg->reg);
+ if (ret) {
+ pr_err("%s: regulator_enable(%s) failed. ret=%d\n",
+ __func__, vreg->name, ret);
+ return ret;
+ }
+ vreg->is_enabled = true;
+ return ret;
+}
+
+static int sdhci_msm_vreg_disable(struct sdhci_msm_reg_data *vreg)
+{
+ int ret = 0;
+
+ /* Never disable regulator marked as always_on */
+ if (vreg->is_enabled && !vreg->is_always_on) {
+ ret = regulator_disable(vreg->reg);
+ if (ret) {
+ pr_err("%s: regulator_disable(%s) failed. ret=%d\n",
+ __func__, vreg->name, ret);
+ goto out;
+ }
+ vreg->is_enabled = false;
+
+ ret = sdhci_msm_vreg_set_optimum_mode(vreg, 0);
+ if (ret < 0)
+ goto out;
+
+ /* Set min. voltage level to 0 */
+ ret = sdhci_msm_vreg_set_voltage(vreg, 0, vreg->high_vol_level);
+ if (ret)
+ goto out;
+ } else if (vreg->is_enabled && vreg->is_always_on) {
+ if (vreg->lpm_sup) {
+ /* Put always_on regulator in LPM (low power mode) */
+ ret = sdhci_msm_vreg_set_optimum_mode(vreg,
+ vreg->lpm_uA);
+ if (ret < 0)
+ goto out;
+ }
+ }
+out:
+ return ret;
+}
+
+static int sdhci_msm_setup_vreg(struct sdhci_msm_pltfm_data *pdata,
+ bool enable, bool is_init)
+{
+ int ret = 0, i;
+ struct sdhci_msm_slot_reg_data *curr_slot;
+ struct sdhci_msm_reg_data *vreg_table[2];
+
+ curr_slot = pdata->vreg_data;
+ if (!curr_slot) {
+ pr_debug("%s: vreg info unavailable,assuming the slot is powered by always on domain\n",
+ __func__);
+ goto out;
+ }
+
+ vreg_table[0] = curr_slot->vdd_data;
+ vreg_table[1] = curr_slot->vdd_io_data;
+
+ for (i = 0; i < ARRAY_SIZE(vreg_table); i++) {
+ if (vreg_table[i]) {
+ if (enable)
+ ret = sdhci_msm_vreg_enable(vreg_table[i]);
+ else
+ ret = sdhci_msm_vreg_disable(vreg_table[i]);
+ if (ret)
+ goto out;
+ }
+ }
+out:
+ return ret;
+}
+
+/* This init function should be called only once for each SDHC slot */
+static int sdhci_msm_vreg_init(struct device *dev,
+ struct sdhci_msm_pltfm_data *pdata,
+ bool is_init)
+{
+ int ret = 0;
+ struct sdhci_msm_slot_reg_data *curr_slot;
+ struct sdhci_msm_reg_data *curr_vdd_reg, *curr_vdd_io_reg;
+
+ curr_slot = pdata->vreg_data;
+ if (!curr_slot)
+ goto out;
+
+ curr_vdd_reg = curr_slot->vdd_data;
+ curr_vdd_io_reg = curr_slot->vdd_io_data;
+
+ if (!is_init)
+ /* Deregister all regulators from regulator framework */
+ goto vdd_io_reg_deinit;
+
+ /*
+ * Get the regulator handle from voltage regulator framework
+ * and then try to set the voltage level for the regulator
+ */
+ if (curr_vdd_reg) {
+ ret = sdhci_msm_vreg_init_reg(dev, curr_vdd_reg);
+ if (ret)
+ goto out;
+ }
+ if (curr_vdd_io_reg) {
+ ret = sdhci_msm_vreg_init_reg(dev, curr_vdd_io_reg);
+ if (ret)
+ goto vdd_reg_deinit;
+ }
+
+ if (ret)
+ dev_err(dev, "vreg reset failed (%d)\n", ret);
+ goto out;
+
+vdd_io_reg_deinit:
+ if (curr_vdd_io_reg)
+ sdhci_msm_vreg_deinit_reg(curr_vdd_io_reg);
+vdd_reg_deinit:
+ if (curr_vdd_reg)
+ sdhci_msm_vreg_deinit_reg(curr_vdd_reg);
+out:
+ return ret;
+}
+
+
+static int sdhci_msm_set_vdd_io_vol(struct sdhci_msm_pltfm_data *pdata,
+ enum vdd_io_level level,
+ unsigned int voltage_level)
+{
+ int ret = 0;
+ int set_level;
+ struct sdhci_msm_reg_data *vdd_io_reg;
+
+ if (!pdata->vreg_data)
+ return ret;
+
+ vdd_io_reg = pdata->vreg_data->vdd_io_data;
+ if (vdd_io_reg && vdd_io_reg->is_enabled) {
+ switch (level) {
+ case VDD_IO_LOW:
+ set_level = vdd_io_reg->low_vol_level;
+ break;
+ case VDD_IO_HIGH:
+ set_level = vdd_io_reg->high_vol_level;
+ break;
+ case VDD_IO_SET_LEVEL:
+ set_level = voltage_level;
+ break;
+ default:
+ pr_err("%s: invalid argument level = %d",
+ __func__, level);
+ ret = -EINVAL;
+ return ret;
+ }
+ ret = sdhci_msm_vreg_set_voltage(vdd_io_reg, set_level,
+ set_level);
+ }
+ return ret;
+}
+
+/*
+ * Acquire spin-lock host->lock before calling this function
+ */
+static void sdhci_msm_cfg_sdiowakeup_gpio_irq(struct sdhci_host *host,
+ bool enable)
+{
+ struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
+ struct sdhci_msm_host *msm_host = pltfm_host->priv;
+
+ if (enable && !msm_host->is_sdiowakeup_enabled)
+ enable_irq(msm_host->pdata->sdiowakeup_irq);
+ else if (!enable && msm_host->is_sdiowakeup_enabled)
+ disable_irq_nosync(msm_host->pdata->sdiowakeup_irq);
+ else
+ dev_warn(&msm_host->pdev->dev, "%s: wakeup to config: %d curr: %d\n",
+ __func__, enable, msm_host->is_sdiowakeup_enabled);
+ msm_host->is_sdiowakeup_enabled = enable;
+}
+
+static irqreturn_t sdhci_msm_sdiowakeup_irq(int irq, void *data)
+{
+ struct sdhci_host *host = (struct sdhci_host *)data;
+ struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
+ struct sdhci_msm_host *msm_host = pltfm_host->priv;
+
+ unsigned long flags;
+
+ pr_debug("%s: irq (%d) received\n", __func__, irq);
+
+ spin_lock_irqsave(&host->lock, flags);
+ sdhci_msm_cfg_sdiowakeup_gpio_irq(host, false);
+ spin_unlock_irqrestore(&host->lock, flags);
+ msm_host->sdio_pending_processing = true;
+
+ return IRQ_HANDLED;
+}
+
+void sdhci_msm_dump_pwr_ctrl_regs(struct sdhci_host *host)
+{
+ struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
+ struct sdhci_msm_host *msm_host = pltfm_host->priv;
+ const struct sdhci_msm_offset *msm_host_offset =
+ msm_host->offset;
+ unsigned int irq_flags = 0;
+ struct irq_desc *pwr_irq_desc = irq_to_desc(msm_host->pwr_irq);
+
+ if (pwr_irq_desc)
+ irq_flags = pwr_irq_desc->irq_data.common->state_use_accessors;
+
+ pr_err("%s: PWRCTL_STATUS: 0x%08x | PWRCTL_MASK: 0x%08x | PWRCTL_CTL: 0x%08x, pwr isr state=0x%x\n",
+ mmc_hostname(host->mmc),
+ sdhci_msm_readl_relaxed(host,
+ msm_host_offset->CORE_PWRCTL_STATUS),
+ sdhci_msm_readl_relaxed(host,
+ msm_host_offset->CORE_PWRCTL_MASK),
+ sdhci_msm_readl_relaxed(host,
+ msm_host_offset->CORE_PWRCTL_CTL), irq_flags);
+
+ MMC_TRACE(host->mmc,
+ "%s: Sts: 0x%08x | Mask: 0x%08x | Ctrl: 0x%08x, pwr isr state=0x%x\n",
+ __func__,
+ sdhci_msm_readb_relaxed(host,
+ msm_host_offset->CORE_PWRCTL_STATUS),
+ sdhci_msm_readb_relaxed(host,
+ msm_host_offset->CORE_PWRCTL_MASK),
+ sdhci_msm_readb_relaxed(host,
+ msm_host_offset->CORE_PWRCTL_CTL), irq_flags);
+}
+
+static irqreturn_t sdhci_msm_pwr_irq(int irq, void *data)
+{
+ struct sdhci_host *host = (struct sdhci_host *)data;
+ struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
+ struct sdhci_msm_host *msm_host = pltfm_host->priv;
+ const struct sdhci_msm_offset *msm_host_offset =
+ msm_host->offset;
+ u8 irq_status = 0;
+ u8 irq_ack = 0;
+ int ret = 0;
+ int pwr_state = 0, io_level = 0;
+ unsigned long flags;
+ int retry = 10;
+
+ irq_status = sdhci_msm_readb_relaxed(host,
+ msm_host_offset->CORE_PWRCTL_STATUS);
+
+ pr_debug("%s: Received IRQ(%d), status=0x%x\n",
+ mmc_hostname(msm_host->mmc), irq, irq_status);
+
+ /* Clear the interrupt */
+ sdhci_msm_writeb_relaxed(irq_status, host,
+ msm_host_offset->CORE_PWRCTL_CLEAR);
+
+ /*
+ * SDHC has core_mem and hc_mem device memory and these memory
+ * addresses do not fall within 1KB region. Hence, any update to
+ * core_mem address space would require an mb() to ensure this gets
+ * completed before its next update to registers within hc_mem.
+ */
+ mb();
+ /*
+ * There is a rare HW scenario where the first clear pulse could be
+ * lost when actual reset and clear/read of status register is
+ * happening at a time. Hence, retry for at least 10 times to make
+ * sure status register is cleared. Otherwise, this will result in
+ * a spurious power IRQ resulting in system instability.
+ */
+ while (irq_status & sdhci_msm_readb_relaxed(host,
+ msm_host_offset->CORE_PWRCTL_STATUS)) {
+ if (retry == 0) {
+ pr_err("%s: Timedout clearing (0x%x) pwrctl status register\n",
+ mmc_hostname(host->mmc), irq_status);
+ sdhci_msm_dump_pwr_ctrl_regs(host);
+ BUG_ON(1);
+ }
+ sdhci_msm_writeb_relaxed(irq_status, host,
+ msm_host_offset->CORE_PWRCTL_CLEAR);
+ retry--;
+ udelay(10);
+ }
+ if (likely(retry < 10))
+ pr_debug("%s: success clearing (0x%x) pwrctl status register, retries left %d\n",
+ mmc_hostname(host->mmc), irq_status, retry);
+
+ /* Handle BUS ON/OFF*/
+ if (irq_status & CORE_PWRCTL_BUS_ON) {
+ ret = sdhci_msm_setup_vreg(msm_host->pdata, true, false);
+ if (!ret) {
+ ret = sdhci_msm_setup_pins(msm_host->pdata, true);
+ ret |= sdhci_msm_set_vdd_io_vol(msm_host->pdata,
+ VDD_IO_HIGH, 0);
+ }
+ if (ret)
+ irq_ack |= CORE_PWRCTL_BUS_FAIL;
+ else
+ irq_ack |= CORE_PWRCTL_BUS_SUCCESS;
+
+ pwr_state = REQ_BUS_ON;
+ io_level = REQ_IO_HIGH;
+ }
+ if (irq_status & CORE_PWRCTL_BUS_OFF) {
+ if (msm_host->pltfm_init_done)
+ ret = sdhci_msm_setup_vreg(msm_host->pdata,
+ false, false);
+ if (!ret) {
+ ret = sdhci_msm_setup_pins(msm_host->pdata, false);
+ ret |= sdhci_msm_set_vdd_io_vol(msm_host->pdata,
+ VDD_IO_LOW, 0);
+ }
+ if (ret)
+ irq_ack |= CORE_PWRCTL_BUS_FAIL;
+ else
+ irq_ack |= CORE_PWRCTL_BUS_SUCCESS;
+
+ pwr_state = REQ_BUS_OFF;
+ io_level = REQ_IO_LOW;
+ }
+ /* Handle IO LOW/HIGH */
+ if (irq_status & CORE_PWRCTL_IO_LOW) {
+ /* Switch voltage Low */
+ ret = sdhci_msm_set_vdd_io_vol(msm_host->pdata, VDD_IO_LOW, 0);
+ if (ret)
+ irq_ack |= CORE_PWRCTL_IO_FAIL;
+ else
+ irq_ack |= CORE_PWRCTL_IO_SUCCESS;
+
+ io_level = REQ_IO_LOW;
+ }
+ if (irq_status & CORE_PWRCTL_IO_HIGH) {
+ /* Switch voltage High */
+ ret = sdhci_msm_set_vdd_io_vol(msm_host->pdata, VDD_IO_HIGH, 0);
+ if (ret)
+ irq_ack |= CORE_PWRCTL_IO_FAIL;
+ else
+ irq_ack |= CORE_PWRCTL_IO_SUCCESS;
+
+ io_level = REQ_IO_HIGH;
+ }
+
+ /* ACK status to the core */
+ sdhci_msm_writeb_relaxed(irq_ack, host,
+ msm_host_offset->CORE_PWRCTL_CTL);
+ /*
+ * SDHC has core_mem and hc_mem device memory and these memory
+ * addresses do not fall within 1KB region. Hence, any update to
+ * core_mem address space would require an mb() to ensure this gets
+ * completed before its next update to registers within hc_mem.
+ */
+ mb();
+
+ if ((io_level & REQ_IO_HIGH) &&
+ (msm_host->caps_0 & CORE_3_0V_SUPPORT) &&
+ !msm_host->core_3_0v_support)
+ writel_relaxed((readl_relaxed(host->ioaddr +
+ msm_host_offset->CORE_VENDOR_SPEC) &
+ ~CORE_IO_PAD_PWR_SWITCH), host->ioaddr +
+ msm_host_offset->CORE_VENDOR_SPEC);
+ else if ((io_level & REQ_IO_LOW) ||
+ (msm_host->caps_0 & CORE_1_8V_SUPPORT))
+ writel_relaxed((readl_relaxed(host->ioaddr +
+ msm_host_offset->CORE_VENDOR_SPEC) |
+ CORE_IO_PAD_PWR_SWITCH), host->ioaddr +
+ msm_host_offset->CORE_VENDOR_SPEC);
+ mb();
+
+ pr_debug("%s: Handled IRQ(%d), ret=%d, ack=0x%x\n",
+ mmc_hostname(msm_host->mmc), irq, ret, irq_ack);
+ spin_lock_irqsave(&host->lock, flags);
+ if (pwr_state)
+ msm_host->curr_pwr_state = pwr_state;
+ if (io_level)
+ msm_host->curr_io_level = io_level;
+ complete(&msm_host->pwr_irq_completion);
+ spin_unlock_irqrestore(&host->lock, flags);
+
+ return IRQ_HANDLED;
+}
+
+static ssize_t
+show_polling(struct device *dev, struct device_attribute *attr, char *buf)
+{
+ struct sdhci_host *host = dev_get_drvdata(dev);
+ int poll;
+ unsigned long flags;
+
+ spin_lock_irqsave(&host->lock, flags);
+ poll = !!(host->mmc->caps & MMC_CAP_NEEDS_POLL);
+ spin_unlock_irqrestore(&host->lock, flags);
+
+ return snprintf(buf, PAGE_SIZE, "%d\n", poll);
+}
+
+static ssize_t
+store_polling(struct device *dev, struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ struct sdhci_host *host = dev_get_drvdata(dev);
+ int value;
+ unsigned long flags;
+
+ if (!kstrtou32(buf, 0, &value)) {
+ spin_lock_irqsave(&host->lock, flags);
+ if (value) {
+ host->mmc->caps |= MMC_CAP_NEEDS_POLL;
+ mmc_detect_change(host->mmc, 0);
+ } else {
+ host->mmc->caps &= ~MMC_CAP_NEEDS_POLL;
+ }
+ spin_unlock_irqrestore(&host->lock, flags);
+ }
+ return count;
+}
+
+static ssize_t
+show_sdhci_max_bus_bw(struct device *dev, struct device_attribute *attr,
+ char *buf)
+{
+ struct sdhci_host *host = dev_get_drvdata(dev);
+ struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
+ struct sdhci_msm_host *msm_host = pltfm_host->priv;
+
+ return snprintf(buf, PAGE_SIZE, "%u\n",
+ msm_host->msm_bus_vote.is_max_bw_needed);
+}
+
+static ssize_t
+store_sdhci_max_bus_bw(struct device *dev, struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ struct sdhci_host *host = dev_get_drvdata(dev);
+ struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
+ struct sdhci_msm_host *msm_host = pltfm_host->priv;
+ uint32_t value;
+ unsigned long flags;
+
+ if (!kstrtou32(buf, 0, &value)) {
+ spin_lock_irqsave(&host->lock, flags);
+ msm_host->msm_bus_vote.is_max_bw_needed = !!value;
+ spin_unlock_irqrestore(&host->lock, flags);
+ }
+ return count;
+}
+
+static void sdhci_msm_check_power_status(struct sdhci_host *host, u32 req_type)
+{
+ struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
+ struct sdhci_msm_host *msm_host = pltfm_host->priv;
+ const struct sdhci_msm_offset *msm_host_offset =
+ msm_host->offset;
+ unsigned long flags;
+ bool done = false;
+ u32 io_sig_sts = SWITCHABLE_SIGNALLING_VOL;
+
+ spin_lock_irqsave(&host->lock, flags);
+ pr_debug("%s: %s: request %d curr_pwr_state %x curr_io_level %x\n",
+ mmc_hostname(host->mmc), __func__, req_type,
+ msm_host->curr_pwr_state, msm_host->curr_io_level);
+ if (!msm_host->mci_removed)
+ io_sig_sts = sdhci_msm_readl_relaxed(host,
+ msm_host_offset->CORE_GENERICS);
+
+ /*
+ * The IRQ for request type IO High/Low will be generated when -
+ * 1. SWITCHABLE_SIGNALLING_VOL is enabled in HW.
+ * 2. If 1 is true and when there is a state change in 1.8V enable
+ * bit (bit 3) of SDHCI_HOST_CONTROL2 register. The reset state of
+ * that bit is 0 which indicates 3.3V IO voltage. So, when MMC core
+ * layer tries to set it to 3.3V before card detection happens, the
+ * IRQ doesn't get triggered as there is no state change in this bit.
+ * The driver already handles this case by changing the IO voltage
+ * level to high as part of controller power up sequence. Hence, check
+ * for host->pwr to handle a case where IO voltage high request is
+ * issued even before controller power up.
+ */
+ if (req_type & (REQ_IO_HIGH | REQ_IO_LOW)) {
+ if (!(io_sig_sts & SWITCHABLE_SIGNALLING_VOL) ||
+ ((req_type & REQ_IO_HIGH) && !host->pwr)) {
+ pr_debug("%s: do not wait for power IRQ that never comes\n",
+ mmc_hostname(host->mmc));
+ spin_unlock_irqrestore(&host->lock, flags);
+ return;
+ }
+ }
+
+ if ((req_type & msm_host->curr_pwr_state) ||
+ (req_type & msm_host->curr_io_level))
+ done = true;
+ spin_unlock_irqrestore(&host->lock, flags);
+
+ /*
+ * This is needed here to hanlde a case where IRQ gets
+ * triggered even before this function is called so that
+ * x->done counter of completion gets reset. Otherwise,
+ * next call to wait_for_completion returns immediately
+ * without actually waiting for the IRQ to be handled.
+ */
+ if (done)
+ init_completion(&msm_host->pwr_irq_completion);
+ else if (!wait_for_completion_timeout(&msm_host->pwr_irq_completion,
+ msecs_to_jiffies(MSM_PWR_IRQ_TIMEOUT_MS))) {
+ __WARN_printf("%s: request(%d) timed out waiting for pwr_irq\n",
+ mmc_hostname(host->mmc), req_type);
+ MMC_TRACE(host->mmc,
+ "%s: request(%d) timed out waiting for pwr_irq\n",
+ __func__, req_type);
+ sdhci_msm_dump_pwr_ctrl_regs(host);
+ }
+ pr_debug("%s: %s: request %d done\n", mmc_hostname(host->mmc),
+ __func__, req_type);
+}
+
+static void sdhci_msm_toggle_cdr(struct sdhci_host *host, bool enable)
+{
+ struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
+ struct sdhci_msm_host *msm_host = pltfm_host->priv;
+ const struct sdhci_msm_offset *msm_host_offset =
+ msm_host->offset;
+ u32 config = readl_relaxed(host->ioaddr +
+ msm_host_offset->CORE_DLL_CONFIG);
+
+ if (enable) {
+ config |= CORE_CDR_EN;
+ config &= ~CORE_CDR_EXT_EN;
+ writel_relaxed(config, host->ioaddr +
+ msm_host_offset->CORE_DLL_CONFIG);
+ } else {
+ config &= ~CORE_CDR_EN;
+ config |= CORE_CDR_EXT_EN;
+ writel_relaxed(config, host->ioaddr +
+ msm_host_offset->CORE_DLL_CONFIG);
+ }
+}
+
+static unsigned int sdhci_msm_max_segs(void)
+{
+ return SDHCI_MSM_MAX_SEGMENTS;
+}
+
+static unsigned int sdhci_msm_get_min_clock(struct sdhci_host *host)
+{
+ struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
+ struct sdhci_msm_host *msm_host = pltfm_host->priv;
+
+ return msm_host->pdata->sup_clk_table[0];
+}
+
+static unsigned int sdhci_msm_get_max_clock(struct sdhci_host *host)
+{
+ struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
+ struct sdhci_msm_host *msm_host = pltfm_host->priv;
+ int max_clk_index = msm_host->pdata->sup_clk_cnt;
+
+ return msm_host->pdata->sup_clk_table[max_clk_index - 1];
+}
+
+static unsigned int sdhci_msm_get_sup_clk_rate(struct sdhci_host *host,
+ u32 req_clk)
+{
+ struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
+ struct sdhci_msm_host *msm_host = pltfm_host->priv;
+ unsigned int sel_clk = -1;
+ unsigned char cnt;
+
+ if (req_clk < sdhci_msm_get_min_clock(host)) {
+ sel_clk = sdhci_msm_get_min_clock(host);
+ return sel_clk;
+ }
+
+ for (cnt = 0; cnt < msm_host->pdata->sup_clk_cnt; cnt++) {
+ if (msm_host->pdata->sup_clk_table[cnt] > req_clk) {
+ break;
+ } else if (msm_host->pdata->sup_clk_table[cnt] == req_clk) {
+ sel_clk = msm_host->pdata->sup_clk_table[cnt];
+ break;
+ } else {
+ sel_clk = msm_host->pdata->sup_clk_table[cnt];
+ }
+ }
+ return sel_clk;
+}
+
+static int sdhci_msm_enable_controller_clock(struct sdhci_host *host)
+{
+ struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
+ struct sdhci_msm_host *msm_host = pltfm_host->priv;
+ int rc = 0;
+
+ if (atomic_read(&msm_host->controller_clock))
+ return 0;
+
+ sdhci_msm_bus_voting(host, 1);
+
+ if (!IS_ERR(msm_host->pclk)) {
+ rc = clk_prepare_enable(msm_host->pclk);
+ if (rc) {
+ pr_err("%s: %s: failed to enable the pclk with error %d\n",
+ mmc_hostname(host->mmc), __func__, rc);
+ goto remove_vote;
+ }
+ }
+
+ rc = clk_prepare_enable(msm_host->clk);
+ if (rc) {
+ pr_err("%s: %s: failed to enable the host-clk with error %d\n",
+ mmc_hostname(host->mmc), __func__, rc);
+ goto disable_pclk;
+ }
+
+ if (!IS_ERR(msm_host->ice_clk)) {
+ rc = clk_prepare_enable(msm_host->ice_clk);
+ if (rc) {
+ pr_err("%s: %s: failed to enable the ice-clk with error %d\n",
+ mmc_hostname(host->mmc), __func__, rc);
+ goto disable_host_clk;
+ }
+ }
+ atomic_set(&msm_host->controller_clock, 1);
+ pr_debug("%s: %s: enabled controller clock\n",
+ mmc_hostname(host->mmc), __func__);
+ goto out;
+
+disable_host_clk:
+ if (!IS_ERR(msm_host->clk))
+ clk_disable_unprepare(msm_host->clk);
+disable_pclk:
+ if (!IS_ERR(msm_host->pclk))
+ clk_disable_unprepare(msm_host->pclk);
+remove_vote:
+ if (msm_host->msm_bus_vote.client_handle)
+ sdhci_msm_bus_cancel_work_and_set_vote(host, 0);
+out:
+ return rc;
+}
+
+static void sdhci_msm_disable_controller_clock(struct sdhci_host *host)
+{
+ struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
+ struct sdhci_msm_host *msm_host = pltfm_host->priv;
+
+ if (atomic_read(&msm_host->controller_clock)) {
+ if (!IS_ERR(msm_host->clk))
+ clk_disable_unprepare(msm_host->clk);
+ if (!IS_ERR(msm_host->pclk))
+ clk_disable_unprepare(msm_host->pclk);
+ if (!IS_ERR(msm_host->ice_clk))
+ clk_disable_unprepare(msm_host->ice_clk);
+ sdhci_msm_bus_voting(host, 0);
+ atomic_set(&msm_host->controller_clock, 0);
+ pr_debug("%s: %s: disabled controller clock\n",
+ mmc_hostname(host->mmc), __func__);
+ }
+}
+
+static int sdhci_msm_prepare_clocks(struct sdhci_host *host, bool enable)
+{
+ struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
+ struct sdhci_msm_host *msm_host = pltfm_host->priv;
+ int rc = 0;
+
+ if (enable && !atomic_read(&msm_host->clks_on)) {
+ pr_debug("%s: request to enable clocks\n",
+ mmc_hostname(host->mmc));
+
+ /*
+ * The bus-width or the clock rate might have changed
+ * after controller clocks are enbaled, update bus vote
+ * in such case.
+ */
+ if (atomic_read(&msm_host->controller_clock))
+ sdhci_msm_bus_voting(host, 1);
+
+ rc = sdhci_msm_enable_controller_clock(host);
+ if (rc)
+ goto remove_vote;
+
+ if (!IS_ERR_OR_NULL(msm_host->bus_clk)) {
+ rc = clk_prepare_enable(msm_host->bus_clk);
+ if (rc) {
+ pr_err("%s: %s: failed to enable the bus-clock with error %d\n",
+ mmc_hostname(host->mmc), __func__, rc);
+ goto disable_controller_clk;
+ }
+ }
+ if (!IS_ERR(msm_host->ff_clk)) {
+ rc = clk_prepare_enable(msm_host->ff_clk);
+ if (rc) {
+ pr_err("%s: %s: failed to enable the ff_clk with error %d\n",
+ mmc_hostname(host->mmc), __func__, rc);
+ goto disable_bus_clk;
+ }
+ }
+ if (!IS_ERR(msm_host->sleep_clk)) {
+ rc = clk_prepare_enable(msm_host->sleep_clk);
+ if (rc) {
+ pr_err("%s: %s: failed to enable the sleep_clk with error %d\n",
+ mmc_hostname(host->mmc), __func__, rc);
+ goto disable_ff_clk;
+ }
+ }
+ mb();
+
+ } else if (!enable && atomic_read(&msm_host->clks_on)) {
+ sdhci_writew(host, 0, SDHCI_CLOCK_CONTROL);
+ mb();
+ /*
+ * During 1.8V signal switching the clock source must
+ * still be ON as it requires accessing SDHC
+ * registers (SDHCi host control2 register bit 3 must
+ * be written and polled after stopping the SDCLK).
+ */
+ if (host->mmc->card_clock_off)
+ return 0;
+ pr_debug("%s: request to disable clocks\n",
+ mmc_hostname(host->mmc));
+ if (!IS_ERR_OR_NULL(msm_host->sleep_clk))
+ clk_disable_unprepare(msm_host->sleep_clk);
+ if (!IS_ERR_OR_NULL(msm_host->ff_clk))
+ clk_disable_unprepare(msm_host->ff_clk);
+ clk_disable_unprepare(msm_host->clk);
+ if (!IS_ERR(msm_host->ice_clk))
+ clk_disable_unprepare(msm_host->ice_clk);
+ if (!IS_ERR(msm_host->pclk))
+ clk_disable_unprepare(msm_host->pclk);
+ if (!IS_ERR_OR_NULL(msm_host->bus_clk))
+ clk_disable_unprepare(msm_host->bus_clk);
+
+ atomic_set(&msm_host->controller_clock, 0);
+ sdhci_msm_bus_voting(host, 0);
+ }
+ atomic_set(&msm_host->clks_on, enable);
+ goto out;
+disable_ff_clk:
+ if (!IS_ERR_OR_NULL(msm_host->ff_clk))
+ clk_disable_unprepare(msm_host->ff_clk);
+disable_bus_clk:
+ if (!IS_ERR_OR_NULL(msm_host->bus_clk))
+ clk_disable_unprepare(msm_host->bus_clk);
+disable_controller_clk:
+ if (!IS_ERR_OR_NULL(msm_host->clk))
+ clk_disable_unprepare(msm_host->clk);
+ if (!IS_ERR(msm_host->ice_clk))
+ clk_disable_unprepare(msm_host->ice_clk);
+ if (!IS_ERR_OR_NULL(msm_host->pclk))
+ clk_disable_unprepare(msm_host->pclk);
+ atomic_set(&msm_host->controller_clock, 0);
+remove_vote:
+ if (msm_host->msm_bus_vote.client_handle)
+ sdhci_msm_bus_cancel_work_and_set_vote(host, 0);
+out:
+ return rc;
+}
+
+static void sdhci_msm_set_clock(struct sdhci_host *host, unsigned int clock)
+{
+ int rc;
+ struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
+ struct sdhci_msm_host *msm_host = pltfm_host->priv;
+ const struct sdhci_msm_offset *msm_host_offset =
+ msm_host->offset;
+ struct mmc_card *card = host->mmc->card;
+ struct mmc_ios curr_ios = host->mmc->ios;
+ u32 sup_clock, ddr_clock, dll_lock;
+ bool curr_pwrsave;
+
+ if (!clock) {
+ /*
+ * disable pwrsave to ensure clock is not auto-gated until
+ * the rate is >400KHz (initialization complete).
+ */
+ writel_relaxed(readl_relaxed(host->ioaddr +
+ msm_host_offset->CORE_VENDOR_SPEC) &
+ ~CORE_CLK_PWRSAVE, host->ioaddr +
+ msm_host_offset->CORE_VENDOR_SPEC);
+ sdhci_msm_prepare_clocks(host, false);
+ host->clock = clock;
+ goto out;
+ }
+
+ rc = sdhci_msm_prepare_clocks(host, true);
+ if (rc)
+ goto out;
+
+ curr_pwrsave = !!(readl_relaxed(host->ioaddr +
+ msm_host_offset->CORE_VENDOR_SPEC) & CORE_CLK_PWRSAVE);
+ if ((clock > 400000) &&
+ !curr_pwrsave && card && mmc_host_may_gate_card(card))
+ writel_relaxed(readl_relaxed(host->ioaddr +
+ msm_host_offset->CORE_VENDOR_SPEC)
+ | CORE_CLK_PWRSAVE, host->ioaddr +
+ msm_host_offset->CORE_VENDOR_SPEC);
+ /*
+ * Disable pwrsave for a newly added card if doesn't allow clock
+ * gating.
+ */
+ else if (curr_pwrsave && card && !mmc_host_may_gate_card(card))
+ writel_relaxed(readl_relaxed(host->ioaddr +
+ msm_host_offset->CORE_VENDOR_SPEC)
+ & ~CORE_CLK_PWRSAVE, host->ioaddr +
+ msm_host_offset->CORE_VENDOR_SPEC);
+
+ sup_clock = sdhci_msm_get_sup_clk_rate(host, clock);
+ if ((curr_ios.timing == MMC_TIMING_UHS_DDR50) ||
+ (curr_ios.timing == MMC_TIMING_MMC_DDR52) ||
+ (curr_ios.timing == MMC_TIMING_MMC_HS400)) {
+ /*
+ * The SDHC requires internal clock frequency to be double the
+ * actual clock that will be set for DDR mode. The controller
+ * uses the faster clock(100/400MHz) for some of its parts and
+ * send the actual required clock (50/200MHz) to the card.
+ */
+ ddr_clock = clock * 2;
+ sup_clock = sdhci_msm_get_sup_clk_rate(host,
+ ddr_clock);
+ }
+
+ /*
+ * In general all timing modes are controlled via UHS mode select in
+ * Host Control2 register. eMMC specific HS200/HS400 doesn't have
+ * their respective modes defined here, hence we use these values.
+ *
+ * HS200 - SDR104 (Since they both are equivalent in functionality)
+ * HS400 - This involves multiple configurations
+ * Initially SDR104 - when tuning is required as HS200
+ * Then when switching to DDR @ 400MHz (HS400) we use
+ * the vendor specific HC_SELECT_IN to control the mode.
+ *
+ * In addition to controlling the modes we also need to select the
+ * correct input clock for DLL depending on the mode.
+ *
+ * HS400 - divided clock (free running MCLK/2)
+ * All other modes - default (free running MCLK)
+ */
+ if (curr_ios.timing == MMC_TIMING_MMC_HS400) {
+ /* Select the divided clock (free running MCLK/2) */
+ writel_relaxed(((readl_relaxed(host->ioaddr +
+ msm_host_offset->CORE_VENDOR_SPEC)
+ & ~CORE_HC_MCLK_SEL_MASK)
+ | CORE_HC_MCLK_SEL_HS400), host->ioaddr +
+ msm_host_offset->CORE_VENDOR_SPEC);
+ /*
+ * Select HS400 mode using the HC_SELECT_IN from VENDOR SPEC
+ * register
+ */
+ if ((msm_host->tuning_done ||
+ (card && mmc_card_strobe(card) &&
+ msm_host->enhanced_strobe)) &&
+ !msm_host->calibration_done) {
+ /*
+ * Write 0x6 to HC_SELECT_IN and 1 to HC_SELECT_IN_EN
+ * field in VENDOR_SPEC_FUNC
+ */
+ writel_relaxed((readl_relaxed(host->ioaddr + \
+ msm_host_offset->CORE_VENDOR_SPEC)
+ | CORE_HC_SELECT_IN_HS400
+ | CORE_HC_SELECT_IN_EN), host->ioaddr +
+ msm_host_offset->CORE_VENDOR_SPEC);
+ }
+ if (!host->mmc->ios.old_rate && !msm_host->use_cdclp533) {
+ /*
+ * Poll on DLL_LOCK and DDR_DLL_LOCK bits in
+ * CORE_DLL_STATUS to be set. This should get set
+ * with in 15 us at 200 MHz.
+ */
+ rc = readl_poll_timeout(host->ioaddr +
+ msm_host_offset->CORE_DLL_STATUS,
+ dll_lock, (dll_lock & (CORE_DLL_LOCK |
+ CORE_DDR_DLL_LOCK)), 10, 1000);
+ if (rc == -ETIMEDOUT)
+ pr_err("%s: Unable to get DLL_LOCK/DDR_DLL_LOCK, dll_status: 0x%08x\n",
+ mmc_hostname(host->mmc),
+ dll_lock);
+ }
+ } else {
+ if (!msm_host->use_cdclp533)
+ /* set CORE_PWRSAVE_DLL bit in CORE_VENDOR_SPEC3 */
+ writel_relaxed((readl_relaxed(host->ioaddr +
+ msm_host_offset->CORE_VENDOR_SPEC3)
+ & ~CORE_PWRSAVE_DLL), host->ioaddr +
+ msm_host_offset->CORE_VENDOR_SPEC3);
+
+ /* Select the default clock (free running MCLK) */
+ writel_relaxed(((readl_relaxed(host->ioaddr +
+ msm_host_offset->CORE_VENDOR_SPEC)
+ & ~CORE_HC_MCLK_SEL_MASK)
+ | CORE_HC_MCLK_SEL_DFLT), host->ioaddr +
+ msm_host_offset->CORE_VENDOR_SPEC);
+
+ /*
+ * Disable HC_SELECT_IN to be able to use the UHS mode select
+ * configuration from Host Control2 register for all other
+ * modes.
+ *
+ * Write 0 to HC_SELECT_IN and HC_SELECT_IN_EN field
+ * in VENDOR_SPEC_FUNC
+ */
+ writel_relaxed((readl_relaxed(host->ioaddr +
+ msm_host_offset->CORE_VENDOR_SPEC)
+ & ~CORE_HC_SELECT_IN_EN
+ & ~CORE_HC_SELECT_IN_MASK), host->ioaddr +
+ msm_host_offset->CORE_VENDOR_SPEC);
+ }
+ mb();
+
+ if (sup_clock != msm_host->clk_rate) {
+ pr_debug("%s: %s: setting clk rate to %u\n",
+ mmc_hostname(host->mmc), __func__, sup_clock);
+ rc = clk_set_rate(msm_host->clk, sup_clock);
+ if (rc) {
+ pr_err("%s: %s: Failed to set rate %u for host-clk : %d\n",
+ mmc_hostname(host->mmc), __func__,
+ sup_clock, rc);
+ goto out;
+ }
+ msm_host->clk_rate = sup_clock;
+ host->clock = clock;
+ /*
+ * Update the bus vote in case of frequency change due to
+ * clock scaling.
+ */
+ sdhci_msm_bus_voting(host, 1);
+ }
+out:
+ sdhci_set_clock(host, clock);
+}
+
+static void sdhci_msm_set_uhs_signaling(struct sdhci_host *host,
+ unsigned int uhs)
+{
+ struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
+ struct sdhci_msm_host *msm_host = pltfm_host->priv;
+ const struct sdhci_msm_offset *msm_host_offset =
+ msm_host->offset;
+ u16 ctrl_2;
+
+ ctrl_2 = sdhci_readw(host, SDHCI_HOST_CONTROL2);
+ /* Select Bus Speed Mode for host */
+ ctrl_2 &= ~SDHCI_CTRL_UHS_MASK;
+ if ((uhs == MMC_TIMING_MMC_HS400) ||
+ (uhs == MMC_TIMING_MMC_HS200) ||
+ (uhs == MMC_TIMING_UHS_SDR104))
+ ctrl_2 |= SDHCI_CTRL_UHS_SDR104;
+ else if (uhs == MMC_TIMING_UHS_SDR12)
+ ctrl_2 |= SDHCI_CTRL_UHS_SDR12;
+ else if (uhs == MMC_TIMING_UHS_SDR25)
+ ctrl_2 |= SDHCI_CTRL_UHS_SDR25;
+ else if (uhs == MMC_TIMING_UHS_SDR50)
+ ctrl_2 |= SDHCI_CTRL_UHS_SDR50;
+ else if ((uhs == MMC_TIMING_UHS_DDR50) ||
+ (uhs == MMC_TIMING_MMC_DDR52))
+ ctrl_2 |= SDHCI_CTRL_UHS_DDR50;
+ /*
+ * When clock frquency is less than 100MHz, the feedback clock must be
+ * provided and DLL must not be used so that tuning can be skipped. To
+ * provide feedback clock, the mode selection can be any value less
+ * than 3'b011 in bits [2:0] of HOST CONTROL2 register.
+ */
+ if (host->clock <= CORE_FREQ_100MHZ) {
+ if ((uhs == MMC_TIMING_MMC_HS400) ||
+ (uhs == MMC_TIMING_MMC_HS200) ||
+ (uhs == MMC_TIMING_UHS_SDR104))
+ ctrl_2 &= ~SDHCI_CTRL_UHS_MASK;
+
+ /*
+ * Make sure DLL is disabled when not required
+ *
+ * Write 1 to DLL_RST bit of DLL_CONFIG register
+ */
+ writel_relaxed((readl_relaxed(host->ioaddr +
+ msm_host_offset->CORE_DLL_CONFIG)
+ | CORE_DLL_RST), host->ioaddr +
+ msm_host_offset->CORE_DLL_CONFIG);
+
+ /* Write 1 to DLL_PDN bit of DLL_CONFIG register */
+ writel_relaxed((readl_relaxed(host->ioaddr +
+ msm_host_offset->CORE_DLL_CONFIG)
+ | CORE_DLL_PDN), host->ioaddr +
+ msm_host_offset->CORE_DLL_CONFIG);
+ mb();
+
+ /*
+ * The DLL needs to be restored and CDCLP533 recalibrated
+ * when the clock frequency is set back to 400MHz.
+ */
+ msm_host->calibration_done = false;
+ }
+
+ pr_debug("%s: %s-clock:%u uhs mode:%u ctrl_2:0x%x\n",
+ mmc_hostname(host->mmc), __func__, host->clock, uhs, ctrl_2);
+ sdhci_writew(host, ctrl_2, SDHCI_HOST_CONTROL2);
+
+}
+
+#define MAX_TEST_BUS 60
+#define DRV_NAME "cmdq-host"
+static void sdhci_msm_cmdq_dump_debug_ram(struct sdhci_host *host)
+{
+ int i = 0;
+ struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
+ struct sdhci_msm_host *msm_host = pltfm_host->priv;
+ const struct sdhci_msm_offset *msm_host_offset =
+ msm_host->offset;
+ struct cmdq_host *cq_host = host->cq_host;
+
+ u32 version = sdhci_msm_readl_relaxed(host,
+ msm_host_offset->CORE_MCI_VERSION);
+ u16 minor = version & CORE_VERSION_TARGET_MASK;
+ /* registers offset changed starting from 4.2.0 */
+ int offset = minor >= SDHCI_MSM_VER_420 ? 0 : 0x48;
+
+ if (cq_host->offset_changed)
+ offset += CQ_V5_VENDOR_CFG;
+ pr_err("---- Debug RAM dump ----\n");
+ pr_err(DRV_NAME ": Debug RAM wrap-around: 0x%08x | Debug RAM overlap: 0x%08x\n",
+ cmdq_readl(cq_host, CQ_CMD_DBG_RAM_WA + offset),
+ cmdq_readl(cq_host, CQ_CMD_DBG_RAM_OL + offset));
+
+ while (i < 16) {
+ pr_err(DRV_NAME ": Debug RAM dump [%d]: 0x%08x\n", i,
+ cmdq_readl(cq_host, CQ_CMD_DBG_RAM + offset + (4 * i)));
+ i++;
+ }
+ pr_err("-------------------------\n");
+}
+
+static void sdhci_msm_cache_debug_data(struct sdhci_host *host)
+{
+ struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
+ struct sdhci_msm_host *msm_host = pltfm_host->priv;
+ struct sdhci_msm_debug_data *cached_data = &msm_host->cached_data;
+
+ memcpy(&cached_data->copy_mmc, msm_host->mmc,
+ sizeof(struct mmc_host));
+ if (msm_host->mmc->card)
+ memcpy(&cached_data->copy_card, msm_host->mmc->card,
+ sizeof(struct mmc_card));
+ memcpy(&cached_data->copy_host, host,
+ sizeof(struct sdhci_host));
+}
+
+void sdhci_msm_dump_vendor_regs(struct sdhci_host *host)
+{
+ struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
+ struct sdhci_msm_host *msm_host = pltfm_host->priv;
+ const struct sdhci_msm_offset *msm_host_offset =
+ msm_host->offset;
+ int tbsel, tbsel2;
+ int i, index = 0;
+ u32 test_bus_val = 0;
+ u32 debug_reg[MAX_TEST_BUS] = {0};
+ u32 sts = 0;
+
+ sdhci_msm_cache_debug_data(host);
+ pr_info("----------- VENDOR REGISTER DUMP -----------\n");
+ if (host->cq_host)
+ sdhci_msm_cmdq_dump_debug_ram(host);
+
+ MMC_TRACE(host->mmc, "Data cnt: 0x%08x | Fifo cnt: 0x%08x\n",
+ sdhci_msm_readl_relaxed(host,
+ msm_host_offset->CORE_MCI_DATA_CNT),
+ sdhci_msm_readl_relaxed(host,
+ msm_host_offset->CORE_MCI_FIFO_CNT));
+ pr_info("Data cnt: 0x%08x | Fifo cnt: 0x%08x | Int sts: 0x%08x\n",
+ sdhci_msm_readl_relaxed(host,
+ msm_host_offset->CORE_MCI_DATA_CNT),
+ sdhci_msm_readl_relaxed(host,
+ msm_host_offset->CORE_MCI_FIFO_CNT),
+ sdhci_msm_readl_relaxed(host,
+ msm_host_offset->CORE_MCI_STATUS));
+ pr_info("DLL cfg: 0x%08x | DLL sts: 0x%08x | SDCC ver: 0x%08x\n",
+ readl_relaxed(host->ioaddr +
+ msm_host_offset->CORE_DLL_CONFIG),
+ readl_relaxed(host->ioaddr +
+ msm_host_offset->CORE_DLL_STATUS),
+ sdhci_msm_readl_relaxed(host,
+ msm_host_offset->CORE_MCI_VERSION));
+ pr_info("Vndr func: 0x%08x | Vndr adma err : addr0: 0x%08x addr1: 0x%08x\n",
+ readl_relaxed(host->ioaddr +
+ msm_host_offset->CORE_VENDOR_SPEC),
+ readl_relaxed(host->ioaddr +
+ msm_host_offset->CORE_VENDOR_SPEC_ADMA_ERR_ADDR0),
+ readl_relaxed(host->ioaddr +
+ msm_host_offset->CORE_VENDOR_SPEC_ADMA_ERR_ADDR1));
+ pr_info("Vndr func2: 0x%08x\n",
+ readl_relaxed(host->ioaddr +
+ msm_host_offset->CORE_VENDOR_SPEC_FUNC2));
+
+ /*
+ * tbsel indicates [2:0] bits and tbsel2 indicates [7:4] bits
+ * of CORE_TESTBUS_CONFIG register.
+ *
+ * To select test bus 0 to 7 use tbsel and to select any test bus
+ * above 7 use (tbsel2 | tbsel) to get the test bus number. For eg,
+ * to select test bus 14, write 0x1E to CORE_TESTBUS_CONFIG register
+ * i.e., tbsel2[7:4] = 0001, tbsel[2:0] = 110.
+ */
+ for (tbsel2 = 0; tbsel2 < 7; tbsel2++) {
+ for (tbsel = 0; tbsel < 8; tbsel++) {
+ if (index >= MAX_TEST_BUS)
+ break;
+ test_bus_val =
+ (tbsel2 << msm_host_offset->CORE_TESTBUS_SEL2_BIT) |
+ tbsel | msm_host_offset->CORE_TESTBUS_ENA;
+ sdhci_msm_writel_relaxed(test_bus_val, host,
+ msm_host_offset->CORE_TESTBUS_CONFIG);
+ debug_reg[index++] = sdhci_msm_readl_relaxed(host,
+ msm_host_offset->CORE_SDCC_DEBUG_REG);
+ }
+ }
+ for (i = 0; i < MAX_TEST_BUS; i = i + 4)
+ pr_info(" Test bus[%d to %d]: 0x%08x 0x%08x 0x%08x 0x%08x\n",
+ i, i + 3, debug_reg[i], debug_reg[i+1],
+ debug_reg[i+2], debug_reg[i+3]);
+ if (host->is_crypto_en) {
+ sdhci_msm_ice_get_status(host, &sts);
+ pr_info("%s: ICE status %x\n", mmc_hostname(host->mmc), sts);
+ sdhci_msm_ice_print_regs(host);
+ }
+}
+
+void sdhci_msm_reset(struct sdhci_host *host, u8 mask)
+{
+ struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
+ struct sdhci_msm_host *msm_host = pltfm_host->priv;
+
+ /* Set ICE core to be reset in sync with SDHC core */
+ if (msm_host->ice.pdev) {
+ if (msm_host->ice_hci_support)
+ writel_relaxed(1, host->ioaddr +
+ HC_VENDOR_SPECIFIC_ICE_CTRL);
+ else
+ writel_relaxed(1,
+ host->ioaddr + CORE_VENDOR_SPEC_ICE_CTRL);
+ }
+
+ sdhci_reset(host, mask);
+}
+
+/*
+ * sdhci_msm_enhanced_strobe_mask :-
+ * Before running CMDQ transfers in HS400 Enhanced Strobe mode,
+ * SW should write 3 to
+ * HC_VENDOR_SPECIFIC_FUNC3.CMDEN_HS400_INPUT_MASK_CNT register.
+ * The default reset value of this register is 2.
+ */
+static void sdhci_msm_enhanced_strobe_mask(struct sdhci_host *host, bool set)
+{
+ struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
+ struct sdhci_msm_host *msm_host = pltfm_host->priv;
+ const struct sdhci_msm_offset *msm_host_offset =
+ msm_host->offset;
+
+ if (!msm_host->enhanced_strobe ||
+ !mmc_card_strobe(msm_host->mmc->card)) {
+ pr_debug("%s: host/card does not support hs400 enhanced strobe\n",
+ mmc_hostname(host->mmc));
+ return;
+ }
+
+ if (set) {
+ writel_relaxed((readl_relaxed(host->ioaddr +
+ msm_host_offset->CORE_VENDOR_SPEC3)
+ | CORE_CMDEN_HS400_INPUT_MASK_CNT),
+ host->ioaddr + msm_host_offset->CORE_VENDOR_SPEC3);
+ } else {
+ writel_relaxed((readl_relaxed(host->ioaddr +
+ msm_host_offset->CORE_VENDOR_SPEC3)
+ & ~CORE_CMDEN_HS400_INPUT_MASK_CNT),
+ host->ioaddr + msm_host_offset->CORE_VENDOR_SPEC3);
+ }
+}
+
+static void sdhci_msm_clear_set_dumpregs(struct sdhci_host *host, bool set)
+{
+ struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
+ struct sdhci_msm_host *msm_host = pltfm_host->priv;
+ const struct sdhci_msm_offset *msm_host_offset =
+ msm_host->offset;
+
+ if (set) {
+ sdhci_msm_writel_relaxed(msm_host_offset->CORE_TESTBUS_ENA,
+ host, msm_host_offset->CORE_TESTBUS_CONFIG);
+ } else {
+ u32 value;
+ value = sdhci_msm_readl_relaxed(host,
+ msm_host_offset->CORE_TESTBUS_CONFIG);
+ value &= ~(msm_host_offset->CORE_TESTBUS_ENA);
+ sdhci_msm_writel_relaxed(value, host,
+ msm_host_offset->CORE_TESTBUS_CONFIG);
+ }
+}
+
+int sdhci_msm_notify_load(struct sdhci_host *host, enum mmc_load state)
+{
+ struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
+ struct sdhci_msm_host *msm_host = pltfm_host->priv;
+ int ret = 0;
+ u32 clk_rate = 0;
+
+ if (!IS_ERR(msm_host->ice_clk)) {
+ clk_rate = (state == MMC_LOAD_LOW) ?
+ msm_host->pdata->ice_clk_min :
+ msm_host->pdata->ice_clk_max;
+ if (msm_host->ice_clk_rate == clk_rate)
+ return 0;
+ pr_debug("%s: changing ICE clk rate to %u\n",
+ mmc_hostname(host->mmc), clk_rate);
+ ret = clk_set_rate(msm_host->ice_clk, clk_rate);
+ if (ret) {
+ pr_err("%s: ICE_CLK rate set failed (%d) for %u\n",
+ mmc_hostname(host->mmc), ret, clk_rate);
+ return ret;
+ }
+ msm_host->ice_clk_rate = clk_rate;
+ }
+ return 0;
+}
+
+void sdhci_msm_reset_workaround(struct sdhci_host *host, u32 enable)
+{
+ u32 vendor_func2;
+ unsigned long timeout;
+ struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
+ struct sdhci_msm_host *msm_host = pltfm_host->priv;
+ const struct sdhci_msm_offset *msm_host_offset =
+ msm_host->offset;
+
+ vendor_func2 = readl_relaxed(host->ioaddr +
+ msm_host_offset->CORE_VENDOR_SPEC_FUNC2);
+
+ if (enable) {
+ writel_relaxed(vendor_func2 | HC_SW_RST_REQ, host->ioaddr +
+ msm_host_offset->CORE_VENDOR_SPEC_FUNC2);
+ timeout = 10000;
+ while (readl_relaxed(host->ioaddr +
+ msm_host_offset->CORE_VENDOR_SPEC_FUNC2) & HC_SW_RST_REQ) {
+ if (timeout == 0) {
+ pr_info("%s: Applying wait idle disable workaround\n",
+ mmc_hostname(host->mmc));
+ /*
+ * Apply the reset workaround to not wait for
+ * pending data transfers on AXI before
+ * resetting the controller. This could be
+ * risky if the transfers were stuck on the
+ * AXI bus.
+ */
+ vendor_func2 = readl_relaxed(host->ioaddr +
+ msm_host_offset->CORE_VENDOR_SPEC_FUNC2);
+ writel_relaxed(vendor_func2 |
+ HC_SW_RST_WAIT_IDLE_DIS, host->ioaddr +
+ msm_host_offset->CORE_VENDOR_SPEC_FUNC2);
+ host->reset_wa_t = ktime_get();
+ return;
+ }
+ timeout--;
+ udelay(10);
+ }
+ pr_info("%s: waiting for SW_RST_REQ is successful\n",
+ mmc_hostname(host->mmc));
+ } else {
+ writel_relaxed(vendor_func2 & ~HC_SW_RST_WAIT_IDLE_DIS,
+ host->ioaddr + msm_host_offset->CORE_VENDOR_SPEC_FUNC2);
+ }
+}
+
+static void sdhci_msm_pm_qos_irq_unvote_work(struct work_struct *work)
+{
+ struct sdhci_msm_pm_qos_irq *pm_qos_irq =
+ container_of(work, struct sdhci_msm_pm_qos_irq,
+ unvote_work.work);
+
+ if (atomic_read(&pm_qos_irq->counter))
+ return;
+
+ pm_qos_irq->latency = PM_QOS_DEFAULT_VALUE;
+ pm_qos_update_request(&pm_qos_irq->req, pm_qos_irq->latency);
+}
+
+void sdhci_msm_pm_qos_irq_vote(struct sdhci_host *host)
+{
+ struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
+ struct sdhci_msm_host *msm_host = pltfm_host->priv;
+ struct sdhci_msm_pm_qos_latency *latency =
+ &msm_host->pdata->pm_qos_data.irq_latency;
+ int counter;
+
+ if (!msm_host->pm_qos_irq.enabled)
+ return;
+
+ counter = atomic_inc_return(&msm_host->pm_qos_irq.counter);
+ /* Make sure to update the voting in case power policy has changed */
+ if (msm_host->pm_qos_irq.latency == latency->latency[host->power_policy]
+ && counter > 1)
+ return;
+
+ cancel_delayed_work_sync(&msm_host->pm_qos_irq.unvote_work);
+ msm_host->pm_qos_irq.latency = latency->latency[host->power_policy];
+ pm_qos_update_request(&msm_host->pm_qos_irq.req,
+ msm_host->pm_qos_irq.latency);
+}
+
+void sdhci_msm_pm_qos_irq_unvote(struct sdhci_host *host, bool async)
+{
+ struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
+ struct sdhci_msm_host *msm_host = pltfm_host->priv;
+ int counter;
+
+ if (!msm_host->pm_qos_irq.enabled)
+ return;
+
+ if (atomic_read(&msm_host->pm_qos_irq.counter)) {
+ counter = atomic_dec_return(&msm_host->pm_qos_irq.counter);
+ } else {
+ WARN(1, "attempt to decrement pm_qos_irq.counter when it's 0");
+ return;
+ }
+
+ if (counter)
+ return;
+
+ if (async) {
+ schedule_delayed_work(&msm_host->pm_qos_irq.unvote_work,
+ msecs_to_jiffies(QOS_REMOVE_DELAY_MS));
+ return;
+ }
+
+ msm_host->pm_qos_irq.latency = PM_QOS_DEFAULT_VALUE;
+ pm_qos_update_request(&msm_host->pm_qos_irq.req,
+ msm_host->pm_qos_irq.latency);
+}
+
+static ssize_t
+sdhci_msm_pm_qos_irq_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct sdhci_host *host = dev_get_drvdata(dev);
+ struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
+ struct sdhci_msm_host *msm_host = pltfm_host->priv;
+ struct sdhci_msm_pm_qos_irq *irq = &msm_host->pm_qos_irq;
+
+ return snprintf(buf, PAGE_SIZE,
+ "IRQ PM QoS: enabled=%d, counter=%d, latency=%d\n",
+ irq->enabled, atomic_read(&irq->counter), irq->latency);
+}
+
+static ssize_t
+sdhci_msm_pm_qos_irq_enable_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct sdhci_host *host = dev_get_drvdata(dev);
+ struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
+ struct sdhci_msm_host *msm_host = pltfm_host->priv;
+
+ return snprintf(buf, PAGE_SIZE, "%u\n", msm_host->pm_qos_irq.enabled);
+}
+
+static ssize_t
+sdhci_msm_pm_qos_irq_enable_store(struct device *dev,
+ struct device_attribute *attr, const char *buf, size_t count)
+{
+ struct sdhci_host *host = dev_get_drvdata(dev);
+ struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
+ struct sdhci_msm_host *msm_host = pltfm_host->priv;
+ uint32_t value;
+ bool enable;
+ int ret;
+
+ ret = kstrtou32(buf, 0, &value);
+ if (ret)
+ goto out;
+ enable = !!value;
+
+ if (enable == msm_host->pm_qos_irq.enabled)
+ goto out;
+
+ msm_host->pm_qos_irq.enabled = enable;
+ if (!enable) {
+ cancel_delayed_work_sync(&msm_host->pm_qos_irq.unvote_work);
+ atomic_set(&msm_host->pm_qos_irq.counter, 0);
+ msm_host->pm_qos_irq.latency = PM_QOS_DEFAULT_VALUE;
+ pm_qos_update_request(&msm_host->pm_qos_irq.req,
+ msm_host->pm_qos_irq.latency);
+ }
+
+out:
+ return count;
+}
+
+#ifdef CONFIG_SMP
+static inline void set_affine_irq(struct sdhci_msm_host *msm_host,
+ struct sdhci_host *host)
+{
+ msm_host->pm_qos_irq.req.irq = host->irq;
+}
+#else
+static inline void set_affine_irq(struct sdhci_msm_host *msm_host,
+ struct sdhci_host *host) { }
+#endif
+
+void sdhci_msm_pm_qos_irq_init(struct sdhci_host *host)
+{
+ struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
+ struct sdhci_msm_host *msm_host = pltfm_host->priv;
+ struct sdhci_msm_pm_qos_latency *irq_latency;
+ int ret;
+
+ if (!msm_host->pdata->pm_qos_data.irq_valid)
+ return;
+
+ /* Initialize only once as this gets called per partition */
+ if (msm_host->pm_qos_irq.enabled)
+ return;
+
+ atomic_set(&msm_host->pm_qos_irq.counter, 0);
+ msm_host->pm_qos_irq.req.type =
+ msm_host->pdata->pm_qos_data.irq_req_type;
+ if ((msm_host->pm_qos_irq.req.type != PM_QOS_REQ_AFFINE_CORES) &&
+ (msm_host->pm_qos_irq.req.type != PM_QOS_REQ_ALL_CORES))
+ set_affine_irq(msm_host, host);
+ else
+ cpumask_copy(&msm_host->pm_qos_irq.req.cpus_affine,
+ cpumask_of(msm_host->pdata->pm_qos_data.irq_cpu));
+
+ INIT_DELAYED_WORK(&msm_host->pm_qos_irq.unvote_work,
+ sdhci_msm_pm_qos_irq_unvote_work);
+ /* For initialization phase, set the performance latency */
+ irq_latency = &msm_host->pdata->pm_qos_data.irq_latency;
+ msm_host->pm_qos_irq.latency =
+ irq_latency->latency[SDHCI_PERFORMANCE_MODE];
+ pm_qos_add_request(&msm_host->pm_qos_irq.req, PM_QOS_CPU_DMA_LATENCY,
+ msm_host->pm_qos_irq.latency);
+ msm_host->pm_qos_irq.enabled = true;
+
+ /* sysfs */
+ msm_host->pm_qos_irq.enable_attr.show =
+ sdhci_msm_pm_qos_irq_enable_show;
+ msm_host->pm_qos_irq.enable_attr.store =
+ sdhci_msm_pm_qos_irq_enable_store;
+ sysfs_attr_init(&msm_host->pm_qos_irq.enable_attr.attr);
+ msm_host->pm_qos_irq.enable_attr.attr.name = "pm_qos_irq_enable";
+ msm_host->pm_qos_irq.enable_attr.attr.mode = S_IRUGO | S_IWUSR;
+ ret = device_create_file(&msm_host->pdev->dev,
+ &msm_host->pm_qos_irq.enable_attr);
+ if (ret)
+ pr_err("%s: fail to create pm_qos_irq_enable (%d)\n",
+ __func__, ret);
+
+ msm_host->pm_qos_irq.status_attr.show = sdhci_msm_pm_qos_irq_show;
+ msm_host->pm_qos_irq.status_attr.store = NULL;
+ sysfs_attr_init(&msm_host->pm_qos_irq.status_attr.attr);
+ msm_host->pm_qos_irq.status_attr.attr.name = "pm_qos_irq_status";
+ msm_host->pm_qos_irq.status_attr.attr.mode = S_IRUGO;
+ ret = device_create_file(&msm_host->pdev->dev,
+ &msm_host->pm_qos_irq.status_attr);
+ if (ret)
+ pr_err("%s: fail to create pm_qos_irq_status (%d)\n",
+ __func__, ret);
+}
+
+static ssize_t sdhci_msm_pm_qos_group_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct sdhci_host *host = dev_get_drvdata(dev);
+ struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
+ struct sdhci_msm_host *msm_host = pltfm_host->priv;
+ struct sdhci_msm_pm_qos_group *group;
+ int i;
+ int nr_groups = msm_host->pdata->pm_qos_data.cpu_group_map.nr_groups;
+ int offset = 0;
+
+ for (i = 0; i < nr_groups; i++) {
+ group = &msm_host->pm_qos[i];
+ offset += snprintf(&buf[offset], PAGE_SIZE,
+ "Group #%d (mask=0x%lx) PM QoS: enabled=%d, counter=%d, latency=%d\n",
+ i, group->req.cpus_affine.bits[0],
+ msm_host->pm_qos_group_enable,
+ atomic_read(&group->counter),
+ group->latency);
+ }
+
+ return offset;
+}
+
+static ssize_t sdhci_msm_pm_qos_group_enable_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct sdhci_host *host = dev_get_drvdata(dev);
+ struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
+ struct sdhci_msm_host *msm_host = pltfm_host->priv;
+
+ return snprintf(buf, PAGE_SIZE, "%s\n",
+ msm_host->pm_qos_group_enable ? "enabled" : "disabled");
+}
+
+static ssize_t sdhci_msm_pm_qos_group_enable_store(struct device *dev,
+ struct device_attribute *attr, const char *buf, size_t count)
+{
+ struct sdhci_host *host = dev_get_drvdata(dev);
+ struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
+ struct sdhci_msm_host *msm_host = pltfm_host->priv;
+ int nr_groups = msm_host->pdata->pm_qos_data.cpu_group_map.nr_groups;
+ uint32_t value;
+ bool enable;
+ int ret;
+ int i;
+
+ ret = kstrtou32(buf, 0, &value);
+ if (ret)
+ goto out;
+ enable = !!value;
+
+ if (enable == msm_host->pm_qos_group_enable)
+ goto out;
+
+ msm_host->pm_qos_group_enable = enable;
+ if (!enable) {
+ for (i = 0; i < nr_groups; i++) {
+ cancel_delayed_work_sync(
+ &msm_host->pm_qos[i].unvote_work);
+ atomic_set(&msm_host->pm_qos[i].counter, 0);
+ msm_host->pm_qos[i].latency = PM_QOS_DEFAULT_VALUE;
+ pm_qos_update_request(&msm_host->pm_qos[i].req,
+ msm_host->pm_qos[i].latency);
+ }
+ }
+
+out:
+ return count;
+}
+
+static int sdhci_msm_get_cpu_group(struct sdhci_msm_host *msm_host, int cpu)
+{
+ int i;
+ struct sdhci_msm_cpu_group_map *map =
+ &msm_host->pdata->pm_qos_data.cpu_group_map;
+
+ if (cpu < 0)
+ goto not_found;
+
+ for (i = 0; i < map->nr_groups; i++)
+ if (cpumask_test_cpu(cpu, &map->mask[i]))
+ return i;
+
+not_found:
+ return -EINVAL;
+}
+
+void sdhci_msm_pm_qos_cpu_vote(struct sdhci_host *host,
+ struct sdhci_msm_pm_qos_latency *latency, int cpu)
+{
+ struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
+ struct sdhci_msm_host *msm_host = pltfm_host->priv;
+ int group = sdhci_msm_get_cpu_group(msm_host, cpu);
+ struct sdhci_msm_pm_qos_group *pm_qos_group;
+ int counter;
+
+ if (!msm_host->pm_qos_group_enable || group < 0)
+ return;
+
+ pm_qos_group = &msm_host->pm_qos[group];
+ counter = atomic_inc_return(&pm_qos_group->counter);
+
+ /* Make sure to update the voting in case power policy has changed */
+ if (pm_qos_group->latency == latency->latency[host->power_policy]
+ && counter > 1)
+ return;
+
+ cancel_delayed_work_sync(&pm_qos_group->unvote_work);
+
+ pm_qos_group->latency = latency->latency[host->power_policy];
+ pm_qos_update_request(&pm_qos_group->req, pm_qos_group->latency);
+}
+
+static void sdhci_msm_pm_qos_cpu_unvote_work(struct work_struct *work)
+{
+ struct sdhci_msm_pm_qos_group *group =
+ container_of(work, struct sdhci_msm_pm_qos_group,
+ unvote_work.work);
+
+ if (atomic_read(&group->counter))
+ return;
+
+ group->latency = PM_QOS_DEFAULT_VALUE;
+ pm_qos_update_request(&group->req, group->latency);
+}
+
+bool sdhci_msm_pm_qos_cpu_unvote(struct sdhci_host *host, int cpu, bool async)
+{
+ struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
+ struct sdhci_msm_host *msm_host = pltfm_host->priv;
+ int group = sdhci_msm_get_cpu_group(msm_host, cpu);
+
+ if (!msm_host->pm_qos_group_enable || group < 0 ||
+ atomic_dec_return(&msm_host->pm_qos[group].counter))
+ return false;
+
+ if (async) {
+ schedule_delayed_work(&msm_host->pm_qos[group].unvote_work,
+ msecs_to_jiffies(QOS_REMOVE_DELAY_MS));
+ return true;
+ }
+
+ msm_host->pm_qos[group].latency = PM_QOS_DEFAULT_VALUE;
+ pm_qos_update_request(&msm_host->pm_qos[group].req,
+ msm_host->pm_qos[group].latency);
+ return true;
+}
+
+void sdhci_msm_pm_qos_cpu_init(struct sdhci_host *host,
+ struct sdhci_msm_pm_qos_latency *latency)
+{
+ struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
+ struct sdhci_msm_host *msm_host = pltfm_host->priv;
+ int nr_groups = msm_host->pdata->pm_qos_data.cpu_group_map.nr_groups;
+ struct sdhci_msm_pm_qos_group *group;
+ int i;
+ int ret;
+
+ if (msm_host->pm_qos_group_enable)
+ return;
+
+ msm_host->pm_qos = kcalloc(nr_groups, sizeof(*msm_host->pm_qos),
+ GFP_KERNEL);
+ if (!msm_host->pm_qos)
+ return;
+
+ for (i = 0; i < nr_groups; i++) {
+ group = &msm_host->pm_qos[i];
+ INIT_DELAYED_WORK(&group->unvote_work,
+ sdhci_msm_pm_qos_cpu_unvote_work);
+ atomic_set(&group->counter, 0);
+ group->req.type = PM_QOS_REQ_AFFINE_CORES;
+ cpumask_copy(&group->req.cpus_affine,
+ &msm_host->pdata->pm_qos_data.cpu_group_map.mask[i]);
+ /* We set default latency here for all pm_qos cpu groups. */
+ group->latency = PM_QOS_DEFAULT_VALUE;
+ pm_qos_add_request(&group->req, PM_QOS_CPU_DMA_LATENCY,
+ group->latency);
+ pr_info("%s (): voted for group #%d (mask=0x%lx) latency=%d (0x%p)\n",
+ __func__, i,
+ group->req.cpus_affine.bits[0],
+ group->latency,
+ &latency[i].latency[SDHCI_PERFORMANCE_MODE]);
+ }
+ msm_host->pm_qos_prev_cpu = -1;
+ msm_host->pm_qos_group_enable = true;
+
+ /* sysfs */
+ msm_host->pm_qos_group_status_attr.show = sdhci_msm_pm_qos_group_show;
+ msm_host->pm_qos_group_status_attr.store = NULL;
+ sysfs_attr_init(&msm_host->pm_qos_group_status_attr.attr);
+ msm_host->pm_qos_group_status_attr.attr.name =
+ "pm_qos_cpu_groups_status";
+ msm_host->pm_qos_group_status_attr.attr.mode = S_IRUGO;
+ ret = device_create_file(&msm_host->pdev->dev,
+ &msm_host->pm_qos_group_status_attr);
+ if (ret)
+ dev_err(&msm_host->pdev->dev, "%s: fail to create pm_qos_group_status_attr (%d)\n",
+ __func__, ret);
+ msm_host->pm_qos_group_enable_attr.show =
+ sdhci_msm_pm_qos_group_enable_show;
+ msm_host->pm_qos_group_enable_attr.store =
+ sdhci_msm_pm_qos_group_enable_store;
+ sysfs_attr_init(&msm_host->pm_qos_group_enable_attr.attr);
+ msm_host->pm_qos_group_enable_attr.attr.name =
+ "pm_qos_cpu_groups_enable";
+ msm_host->pm_qos_group_enable_attr.attr.mode = S_IRUGO;
+ ret = device_create_file(&msm_host->pdev->dev,
+ &msm_host->pm_qos_group_enable_attr);
+ if (ret)
+ dev_err(&msm_host->pdev->dev, "%s: fail to create pm_qos_group_enable_attr (%d)\n",
+ __func__, ret);
+}
+
+static void sdhci_msm_pre_req(struct sdhci_host *host,
+ struct mmc_request *mmc_req)
+{
+ int cpu;
+ int group;
+ struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
+ struct sdhci_msm_host *msm_host = pltfm_host->priv;
+ int prev_group = sdhci_msm_get_cpu_group(msm_host,
+ msm_host->pm_qos_prev_cpu);
+
+ sdhci_msm_pm_qos_irq_vote(host);
+
+ cpu = get_cpu();
+ put_cpu();
+ group = sdhci_msm_get_cpu_group(msm_host, cpu);
+ if (group < 0)
+ return;
+
+ if (group != prev_group && prev_group >= 0) {
+ sdhci_msm_pm_qos_cpu_unvote(host,
+ msm_host->pm_qos_prev_cpu, false);
+ prev_group = -1; /* make sure to vote for new group */
+ }
+
+ if (prev_group < 0) {
+ sdhci_msm_pm_qos_cpu_vote(host,
+ msm_host->pdata->pm_qos_data.latency, cpu);
+ msm_host->pm_qos_prev_cpu = cpu;
+ }
+}
+
+static void sdhci_msm_post_req(struct sdhci_host *host,
+ struct mmc_request *mmc_req)
+{
+ struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
+ struct sdhci_msm_host *msm_host = pltfm_host->priv;
+
+ sdhci_msm_pm_qos_irq_unvote(host, false);
+
+ if (sdhci_msm_pm_qos_cpu_unvote(host, msm_host->pm_qos_prev_cpu, false))
+ msm_host->pm_qos_prev_cpu = -1;
+}
+
+static void sdhci_msm_init(struct sdhci_host *host)
+{
+ struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
+ struct sdhci_msm_host *msm_host = pltfm_host->priv;
+
+ sdhci_msm_pm_qos_irq_init(host);
+
+ if (msm_host->pdata->pm_qos_data.legacy_valid)
+ sdhci_msm_pm_qos_cpu_init(host,
+ msm_host->pdata->pm_qos_data.latency);
+}
+
+static unsigned int sdhci_msm_get_current_limit(struct sdhci_host *host)
+{
+ struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
+ struct sdhci_msm_host *msm_host = pltfm_host->priv;
+ struct sdhci_msm_slot_reg_data *curr_slot = msm_host->pdata->vreg_data;
+ u32 max_curr = 0;
+
+ if (curr_slot && curr_slot->vdd_data)
+ max_curr = curr_slot->vdd_data->hpm_uA;
+
+ return max_curr;
+}
static struct sdhci_ops sdhci_msm_ops = {
+ .crypto_engine_cfg = sdhci_msm_ice_cfg,
+ .crypto_engine_cmdq_cfg = sdhci_msm_ice_cmdq_cfg,
+ .crypto_engine_cfg_end = sdhci_msm_ice_cfg_end,
+ .crypto_cfg_reset = sdhci_msm_ice_cfg_reset,
+ .crypto_engine_reset = sdhci_msm_ice_reset,
+ .set_uhs_signaling = sdhci_msm_set_uhs_signaling,
+ .check_power_status = sdhci_msm_check_power_status,
.platform_execute_tuning = sdhci_msm_execute_tuning,
- .reset = sdhci_reset,
- .set_clock = sdhci_set_clock,
+ .enhanced_strobe = sdhci_msm_enhanced_strobe,
+ .toggle_cdr = sdhci_msm_toggle_cdr,
+ .get_max_segments = sdhci_msm_max_segs,
+ .set_clock = sdhci_msm_set_clock,
+ .get_min_clock = sdhci_msm_get_min_clock,
+ .get_max_clock = sdhci_msm_get_max_clock,
+ .dump_vendor_regs = sdhci_msm_dump_vendor_regs,
+ .config_auto_tuning_cmd = sdhci_msm_config_auto_tuning_cmd,
+ .enable_controller_clock = sdhci_msm_enable_controller_clock,
.set_bus_width = sdhci_set_bus_width,
- .set_uhs_signaling = sdhci_set_uhs_signaling,
+ .reset = sdhci_msm_reset,
+ .clear_set_dumpregs = sdhci_msm_clear_set_dumpregs,
+ .enhanced_strobe_mask = sdhci_msm_enhanced_strobe_mask,
+ .notify_load = sdhci_msm_notify_load,
+ .reset_workaround = sdhci_msm_reset_workaround,
+ .init = sdhci_msm_init,
+ .pre_req = sdhci_msm_pre_req,
+ .post_req = sdhci_msm_post_req,
+ .get_current_limit = sdhci_msm_get_current_limit,
};
+static void sdhci_set_default_hw_caps(struct sdhci_msm_host *msm_host,
+ struct sdhci_host *host)
+{
+ u32 version, caps = 0;
+ u16 minor;
+ u8 major;
+ u32 val;
+ const struct sdhci_msm_offset *msm_host_offset =
+ msm_host->offset;
+
+ version = sdhci_msm_readl_relaxed(host,
+ msm_host_offset->CORE_MCI_VERSION);
+ major = (version & CORE_VERSION_MAJOR_MASK) >>
+ CORE_VERSION_MAJOR_SHIFT;
+ minor = version & CORE_VERSION_TARGET_MASK;
+
+ caps = readl_relaxed(host->ioaddr + SDHCI_CAPABILITIES);
+
+ /*
+ * Starting with SDCC 5 controller (core major version = 1)
+ * controller won't advertise 3.0v, 1.8v and 8-bit features
+ * except for some targets.
+ */
+ if (major >= 1 && minor != 0x11 && minor != 0x12) {
+ struct sdhci_msm_reg_data *vdd_io_reg;
+ /*
+ * Enable 1.8V support capability on controllers that
+ * support dual voltage
+ */
+ vdd_io_reg = msm_host->pdata->vreg_data->vdd_io_data;
+ if (vdd_io_reg && (vdd_io_reg->high_vol_level > 2700000))
+ caps |= CORE_3_0V_SUPPORT;
+ if (vdd_io_reg && (vdd_io_reg->low_vol_level < 1950000))
+ caps |= CORE_1_8V_SUPPORT;
+ if (msm_host->pdata->mmc_bus_width == MMC_CAP_8_BIT_DATA)
+ caps |= CORE_8_BIT_SUPPORT;
+ }
+
+ /*
+ * Enable one MID mode for SDCC5 (major 1) on 8916/8939 (minor 0x2e) and
+ * on 8992 (minor 0x3e) as a workaround to reset for data stuck issue.
+ */
+ if (major == 1 && (minor == 0x2e || minor == 0x3e)) {
+ host->quirks2 |= SDHCI_QUIRK2_USE_RESET_WORKAROUND;
+ val = readl_relaxed(host->ioaddr +
+ msm_host_offset->CORE_VENDOR_SPEC_FUNC2);
+ writel_relaxed((val | CORE_ONE_MID_EN),
+ host->ioaddr + msm_host_offset->CORE_VENDOR_SPEC_FUNC2);
+ }
+ /*
+ * SDCC 5 controller with major version 1, minor version 0x34 and later
+ * with HS 400 mode support will use CM DLL instead of CDC LP 533 DLL.
+ */
+ if ((major == 1) && (minor < 0x34))
+ msm_host->use_cdclp533 = true;
+
+ /*
+ * SDCC 5 controller with major version 1, minor version 0x42 and later
+ * will require additional steps when resetting DLL.
+ * It also supports HS400 enhanced strobe mode.
+ */
+ if ((major == 1) && (minor >= 0x42)) {
+ msm_host->use_updated_dll_reset = true;
+ msm_host->enhanced_strobe = true;
+ }
+
+ /*
+ * SDCC 5 controller with major version 1 and minor version 0x42,
+ * 0x46 and 0x49 currently uses 14lpp tech DLL whose internal
+ * gating cannot guarantee MCLK timing requirement i.e.
+ * when MCLK is gated OFF, it is not gated for less than 0.5us
+ * and MCLK must be switched on for at-least 1us before DATA
+ * starts coming.
+ */
+ if ((major == 1) && ((minor == 0x42) || (minor == 0x46) ||
+ (minor == 0x49)))
+ msm_host->use_14lpp_dll = true;
+
+ /* Fake 3.0V support for SDIO devices which requires such voltage */
+ if (msm_host->core_3_0v_support) {
+ caps |= CORE_3_0V_SUPPORT;
+ writel_relaxed((readl_relaxed(host->ioaddr +
+ SDHCI_CAPABILITIES) | caps), host->ioaddr +
+ msm_host_offset->CORE_VENDOR_SPEC_CAPABILITIES0);
+ }
+
+ if ((major == 1) && (minor >= 0x49))
+ msm_host->rclk_delay_fix = true;
+ /*
+ * Mask 64-bit support for controller with 32-bit address bus so that
+ * smaller descriptor size will be used and improve memory consumption.
+ */
+ if (!msm_host->pdata->largeaddressbus)
+ caps &= ~CORE_SYS_BUS_SUPPORT_64_BIT;
+
+ writel_relaxed(caps, host->ioaddr +
+ msm_host_offset->CORE_VENDOR_SPEC_CAPABILITIES0);
+ /* keep track of the value in SDHCI_CAPABILITIES */
+ msm_host->caps_0 = caps;
+
+ if ((major == 1) && (minor >= 0x6b)) {
+ msm_host->ice_hci_support = true;
+ host->cdr_support = true;
+ }
+}
+
+#ifdef CONFIG_MMC_CQ_HCI
+static void sdhci_msm_cmdq_init(struct sdhci_host *host,
+ struct platform_device *pdev)
+{
+ struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
+ struct sdhci_msm_host *msm_host = pltfm_host->priv;
+
+ if (nocmdq) {
+ dev_dbg(&pdev->dev, "CMDQ disabled via cmdline\n");
+ return;
+ }
+
+ host->cq_host = cmdq_pltfm_init(pdev);
+ if (IS_ERR(host->cq_host)) {
+ dev_dbg(&pdev->dev, "cmdq-pltfm init: failed: %ld\n",
+ PTR_ERR(host->cq_host));
+ host->cq_host = NULL;
+ } else {
+ msm_host->mmc->caps2 |= MMC_CAP2_CMD_QUEUE;
+ }
+}
+#else
+static void sdhci_msm_cmdq_init(struct sdhci_host *host,
+ struct platform_device *pdev)
+{
+
+}
+#endif
+
+static bool sdhci_msm_is_bootdevice(struct device *dev)
+{
+ if (strnstr(saved_command_line, "androidboot.bootdevice=",
+ strlen(saved_command_line))) {
+ char search_string[50];
+
+ snprintf(search_string, ARRAY_SIZE(search_string),
+ "androidboot.bootdevice=%s", dev_name(dev));
+ if (strnstr(saved_command_line, search_string,
+ strlen(saved_command_line)))
+ return true;
+ else
+ return false;
+ }
+
+ /*
+ * "androidboot.bootdevice=" argument is not present then
+ * return true as we don't know the boot device anyways.
+ */
+ return true;
+}
+
static int sdhci_msm_probe(struct platform_device *pdev)
{
+ const struct sdhci_msm_offset *msm_host_offset;
struct sdhci_host *host;
struct sdhci_pltfm_host *pltfm_host;
struct sdhci_msm_host *msm_host;
- struct resource *core_memres;
- int ret;
- u16 host_version, core_minor;
- u32 core_version, caps;
- u8 core_major;
+ struct resource *core_memres = NULL;
+ int ret = 0, dead = 0;
+ u16 host_version;
+ u32 irq_status, irq_ctl;
+ struct resource *tlmm_memres = NULL;
+ void __iomem *tlmm_mem;
+ unsigned long flags;
- msm_host = devm_kzalloc(&pdev->dev, sizeof(*msm_host), GFP_KERNEL);
- if (!msm_host)
- return -ENOMEM;
+ pr_debug("%s: Enter %s\n", dev_name(&pdev->dev), __func__);
+ msm_host = devm_kzalloc(&pdev->dev, sizeof(struct sdhci_msm_host),
+ GFP_KERNEL);
+ if (!msm_host) {
+ ret = -ENOMEM;
+ goto out;
+ }
+ if (of_find_compatible_node(NULL, NULL, "qcom,sdhci-msm-v5")) {
+ msm_host->mci_removed = true;
+ msm_host->offset = &sdhci_msm_offset_mci_removed;
+ } else {
+ msm_host->mci_removed = false;
+ msm_host->offset = &sdhci_msm_offset_mci_present;
+ }
+ msm_host_offset = msm_host->offset;
msm_host->sdhci_msm_pdata.ops = &sdhci_msm_ops;
host = sdhci_pltfm_init(pdev, &msm_host->sdhci_msm_pdata, 0);
- if (IS_ERR(host))
- return PTR_ERR(host);
+ if (IS_ERR(host)) {
+ ret = PTR_ERR(host);
+ goto out_host_free;
+ }
pltfm_host = sdhci_priv(host);
pltfm_host->priv = msm_host;
msm_host->mmc = host->mmc;
msm_host->pdev = pdev;
- ret = mmc_of_parse(host->mmc);
- if (ret)
+ /* get the ice device vops if present */
+ ret = sdhci_msm_ice_get_dev(host);
+ if (ret == -EPROBE_DEFER) {
+ /*
+ * SDHCI driver might be probed before ICE driver does.
+ * In that case we would like to return EPROBE_DEFER code
+ * in order to delay its probing.
+ */
+ dev_err(&pdev->dev, "%s: required ICE device not probed yet err = %d\n",
+ __func__, ret);
goto pltfm_free;
- sdhci_get_of_property(pdev);
+ } else if (ret == -ENODEV) {
+ /*
+ * ICE device is not enabled in DTS file. No need for further
+ * initialization of ICE driver.
+ */
+ dev_warn(&pdev->dev, "%s: ICE device is not enabled",
+ __func__);
+ } else if (ret) {
+ dev_err(&pdev->dev, "%s: sdhci_msm_ice_get_dev failed %d\n",
+ __func__, ret);
+ goto pltfm_free;
+ }
+
+ /* Extract platform data */
+ if (pdev->dev.of_node) {
+ ret = of_alias_get_id(pdev->dev.of_node, "sdhc");
+ if (ret <= 0) {
+ dev_err(&pdev->dev, "Failed to get slot index %d\n",
+ ret);
+ goto pltfm_free;
+ }
+
+ /* skip the probe if eMMC isn't a boot device */
+ if ((ret == 1) && !sdhci_msm_is_bootdevice(&pdev->dev)) {
+ ret = -ENODEV;
+ goto pltfm_free;
+ }
+
+ if (disable_slots & (1 << (ret - 1))) {
+ dev_info(&pdev->dev, "%s: Slot %d disabled\n", __func__,
+ ret);
+ ret = -ENODEV;
+ goto pltfm_free;
+ }
+
+ if (ret <= 2)
+ sdhci_slot[ret-1] = msm_host;
+
+ msm_host->pdata = sdhci_msm_populate_pdata(&pdev->dev,
+ msm_host);
+ if (!msm_host->pdata) {
+ dev_err(&pdev->dev, "DT parsing error\n");
+ goto pltfm_free;
+ }
+ } else {
+ dev_err(&pdev->dev, "No device tree node\n");
+ goto pltfm_free;
+ }
+
+ /* Setup Clocks */
/* Setup SDCC bus voter clock. */
- msm_host->bus_clk = devm_clk_get(&pdev->dev, "bus");
- if (!IS_ERR(msm_host->bus_clk)) {
+ msm_host->bus_clk = devm_clk_get(&pdev->dev, "bus_clk");
+ if (!IS_ERR_OR_NULL(msm_host->bus_clk)) {
/* Vote for max. clk rate for max. performance */
ret = clk_set_rate(msm_host->bus_clk, INT_MAX);
if (ret)
@@ -470,99 +4354,420 @@ static int sdhci_msm_probe(struct platform_device *pdev)
}
/* Setup main peripheral bus clock */
- msm_host->pclk = devm_clk_get(&pdev->dev, "iface");
- if (IS_ERR(msm_host->pclk)) {
- ret = PTR_ERR(msm_host->pclk);
- dev_err(&pdev->dev, "Perpheral clk setup failed (%d)\n", ret);
- goto bus_clk_disable;
+ msm_host->pclk = devm_clk_get(&pdev->dev, "iface_clk");
+ if (!IS_ERR(msm_host->pclk)) {
+ ret = clk_prepare_enable(msm_host->pclk);
+ if (ret)
+ goto bus_clk_disable;
}
+ atomic_set(&msm_host->controller_clock, 1);
- ret = clk_prepare_enable(msm_host->pclk);
- if (ret)
- goto bus_clk_disable;
+ if (msm_host->ice.pdev) {
+ /* Setup SDC ICE clock */
+ msm_host->ice_clk = devm_clk_get(&pdev->dev, "ice_core_clk");
+ if (!IS_ERR(msm_host->ice_clk)) {
+ /* ICE core has only one clock frequency for now */
+ ret = clk_set_rate(msm_host->ice_clk,
+ msm_host->pdata->ice_clk_max);
+ if (ret) {
+ dev_err(&pdev->dev, "ICE_CLK rate set failed (%d) for %u\n",
+ ret,
+ msm_host->pdata->ice_clk_max);
+ goto pclk_disable;
+ }
+ ret = clk_prepare_enable(msm_host->ice_clk);
+ if (ret)
+ goto pclk_disable;
+
+ msm_host->ice_clk_rate =
+ msm_host->pdata->ice_clk_max;
+ }
+ }
/* Setup SDC MMC clock */
- msm_host->clk = devm_clk_get(&pdev->dev, "core");
+ msm_host->clk = devm_clk_get(&pdev->dev, "core_clk");
if (IS_ERR(msm_host->clk)) {
ret = PTR_ERR(msm_host->clk);
- dev_err(&pdev->dev, "SDC MMC clk setup failed (%d)\n", ret);
goto pclk_disable;
}
- /* Vote for maximum clock rate for maximum performance */
- ret = clk_set_rate(msm_host->clk, INT_MAX);
- if (ret)
- dev_warn(&pdev->dev, "core clock boost failed\n");
-
+ /* Set to the minimum supported clock frequency */
+ ret = clk_set_rate(msm_host->clk, sdhci_msm_get_min_clock(host));
+ if (ret) {
+ dev_err(&pdev->dev, "MClk rate set failed (%d)\n", ret);
+ goto pclk_disable;
+ }
ret = clk_prepare_enable(msm_host->clk);
if (ret)
goto pclk_disable;
- core_memres = platform_get_resource(pdev, IORESOURCE_MEM, 1);
- msm_host->core_mem = devm_ioremap_resource(&pdev->dev, core_memres);
+ msm_host->clk_rate = sdhci_msm_get_min_clock(host);
+ atomic_set(&msm_host->clks_on, 1);
+
+ /* Setup CDC calibration fixed feedback clock */
+ msm_host->ff_clk = devm_clk_get(&pdev->dev, "cal_clk");
+ if (!IS_ERR(msm_host->ff_clk)) {
+ ret = clk_prepare_enable(msm_host->ff_clk);
+ if (ret)
+ goto clk_disable;
+ }
- if (IS_ERR(msm_host->core_mem)) {
- dev_err(&pdev->dev, "Failed to remap registers\n");
- ret = PTR_ERR(msm_host->core_mem);
- goto clk_disable;
+ /* Setup CDC calibration sleep clock */
+ msm_host->sleep_clk = devm_clk_get(&pdev->dev, "sleep_clk");
+ if (!IS_ERR(msm_host->sleep_clk)) {
+ ret = clk_prepare_enable(msm_host->sleep_clk);
+ if (ret)
+ goto ff_clk_disable;
+ }
+
+ msm_host->saved_tuning_phase = INVALID_TUNING_PHASE;
+
+ ret = sdhci_msm_bus_register(msm_host, pdev);
+ if (ret)
+ goto sleep_clk_disable;
+
+ if (msm_host->msm_bus_vote.client_handle)
+ INIT_DELAYED_WORK(&msm_host->msm_bus_vote.vote_work,
+ sdhci_msm_bus_work);
+ sdhci_msm_bus_voting(host, 1);
+
+ /* Setup regulators */
+ ret = sdhci_msm_vreg_init(&pdev->dev, msm_host->pdata, true);
+ if (ret) {
+ dev_err(&pdev->dev, "Regulator setup failed (%d)\n", ret);
+ goto bus_unregister;
}
/* Reset the core and Enable SDHC mode */
- writel_relaxed(readl_relaxed(msm_host->core_mem + CORE_POWER) |
- CORE_SW_RST, msm_host->core_mem + CORE_POWER);
+ core_memres = platform_get_resource_byname(pdev,
+ IORESOURCE_MEM, "core_mem");
+ if (!msm_host->mci_removed) {
+ if (!core_memres) {
+ dev_err(&pdev->dev, "Failed to get iomem resource\n");
+ goto vreg_deinit;
+ }
+ msm_host->core_mem = devm_ioremap(&pdev->dev,
+ core_memres->start, resource_size(core_memres));
+
+ if (!msm_host->core_mem) {
+ dev_err(&pdev->dev, "Failed to remap registers\n");
+ ret = -ENOMEM;
+ goto vreg_deinit;
+ }
+ }
+
+ tlmm_memres = platform_get_resource_byname(pdev,
+ IORESOURCE_MEM, "tlmm_mem");
+ if (tlmm_memres) {
+ tlmm_mem = devm_ioremap(&pdev->dev, tlmm_memres->start,
+ resource_size(tlmm_memres));
+
+ if (!tlmm_mem) {
+ dev_err(&pdev->dev, "Failed to remap tlmm registers\n");
+ ret = -ENOMEM;
+ goto vreg_deinit;
+ }
+ writel_relaxed(readl_relaxed(tlmm_mem) | 0x2, tlmm_mem);
+ dev_dbg(&pdev->dev, "tlmm reg %pa value 0x%08x\n",
+ &tlmm_memres->start, readl_relaxed(tlmm_mem));
+ }
+
+ /*
+ * Reset the vendor spec register to power on reset state.
+ */
+ writel_relaxed(CORE_VENDOR_SPEC_POR_VAL,
+ host->ioaddr + msm_host_offset->CORE_VENDOR_SPEC);
+
+ if (!msm_host->mci_removed) {
+ /* Set HC_MODE_EN bit in HC_MODE register */
+ writel_relaxed(HC_MODE_EN, (msm_host->core_mem + CORE_HC_MODE));
- /* SW reset can take upto 10HCLK + 15MCLK cycles. (min 40us) */
- usleep_range(1000, 5000);
- if (readl(msm_host->core_mem + CORE_POWER) & CORE_SW_RST) {
- dev_err(&pdev->dev, "Stuck in reset\n");
- ret = -ETIMEDOUT;
- goto clk_disable;
+ /* Set FF_CLK_SW_RST_DIS bit in HC_MODE register */
+ writel_relaxed(readl_relaxed(msm_host->core_mem +
+ CORE_HC_MODE) | FF_CLK_SW_RST_DIS,
+ msm_host->core_mem + CORE_HC_MODE);
}
+ sdhci_set_default_hw_caps(msm_host, host);
+
+ /*
+ * Set the PAD_PWR_SWTICH_EN bit so that the PAD_PWR_SWITCH bit can
+ * be used as required later on.
+ */
+ writel_relaxed((readl_relaxed(host->ioaddr +
+ msm_host_offset->CORE_VENDOR_SPEC) |
+ CORE_IO_PAD_PWR_SWITCH_EN), host->ioaddr +
+ msm_host_offset->CORE_VENDOR_SPEC);
+ /*
+ * CORE_SW_RST above may trigger power irq if previous status of PWRCTL
+ * was either BUS_ON or IO_HIGH_V. So before we enable the power irq
+ * interrupt in GIC (by registering the interrupt handler), we need to
+ * ensure that any pending power irq interrupt status is acknowledged
+ * otherwise power irq interrupt handler would be fired prematurely.
+ */
+ irq_status = sdhci_msm_readl_relaxed(host,
+ msm_host_offset->CORE_PWRCTL_STATUS);
+ sdhci_msm_writel_relaxed(irq_status, host,
+ msm_host_offset->CORE_PWRCTL_CLEAR);
+ irq_ctl = sdhci_msm_readl_relaxed(host,
+ msm_host_offset->CORE_PWRCTL_CTL);
+
+ if (irq_status & (CORE_PWRCTL_BUS_ON | CORE_PWRCTL_BUS_OFF))
+ irq_ctl |= CORE_PWRCTL_BUS_SUCCESS;
+ if (irq_status & (CORE_PWRCTL_IO_HIGH | CORE_PWRCTL_IO_LOW))
+ irq_ctl |= CORE_PWRCTL_IO_SUCCESS;
+ sdhci_msm_writel_relaxed(irq_ctl, host,
+ msm_host_offset->CORE_PWRCTL_CTL);
- /* Set HC_MODE_EN bit in HC_MODE register */
- writel_relaxed(HC_MODE_EN, (msm_host->core_mem + CORE_HC_MODE));
+ /*
+ * Ensure that above writes are propogated before interrupt enablement
+ * in GIC.
+ */
+ mb();
+ /*
+ * Following are the deviations from SDHC spec v3.0 -
+ * 1. Card detection is handled using separate GPIO.
+ * 2. Bus power control is handled by interacting with PMIC.
+ */
host->quirks |= SDHCI_QUIRK_BROKEN_CARD_DETECTION;
host->quirks |= SDHCI_QUIRK_SINGLE_POWER_WRITE;
+ host->quirks |= SDHCI_QUIRK_CAP_CLOCK_BASE_BROKEN;
+ host->quirks |= SDHCI_QUIRK_NO_ENDATTR_IN_NOPDESC;
+ host->quirks2 |= SDHCI_QUIRK2_ALWAYS_USE_BASE_CLOCK;
+ host->quirks2 |= SDHCI_QUIRK2_IGNORE_DATATOUT_FOR_R1BCMD;
+ host->quirks2 |= SDHCI_QUIRK2_BROKEN_PRESET_VALUE;
+ host->quirks2 |= SDHCI_QUIRK2_USE_RESERVED_MAX_TIMEOUT;
+ host->quirks2 |= SDHCI_QUIRK2_NON_STANDARD_TUNING;
+ host->quirks2 |= SDHCI_QUIRK2_USE_PIO_FOR_EMMC_TUNING;
+
+ if (host->quirks2 & SDHCI_QUIRK2_ALWAYS_USE_BASE_CLOCK)
+ host->quirks2 |= SDHCI_QUIRK2_DIVIDE_TOUT_BY_4;
host_version = readw_relaxed((host->ioaddr + SDHCI_HOST_VERSION));
dev_dbg(&pdev->dev, "Host Version: 0x%x Vendor Version 0x%x\n",
host_version, ((host_version & SDHCI_VENDOR_VER_MASK) >>
- SDHCI_VENDOR_VER_SHIFT));
+ SDHCI_VENDOR_VER_SHIFT));
+ if (((host_version & SDHCI_VENDOR_VER_MASK) >>
+ SDHCI_VENDOR_VER_SHIFT) == SDHCI_VER_100) {
+ /*
+ * Add 40us delay in interrupt handler when
+ * operating at initialization frequency(400KHz).
+ */
+ host->quirks2 |= SDHCI_QUIRK2_SLOW_INT_CLR;
+ /*
+ * Set Software Reset for DAT line in Software
+ * Reset Register (Bit 2).
+ */
+ host->quirks2 |= SDHCI_QUIRK2_RDWR_TX_ACTIVE_EOT;
+ }
- core_version = readl_relaxed(msm_host->core_mem + CORE_MCI_VERSION);
- core_major = (core_version & CORE_VERSION_MAJOR_MASK) >>
- CORE_VERSION_MAJOR_SHIFT;
- core_minor = core_version & CORE_VERSION_MINOR_MASK;
- dev_dbg(&pdev->dev, "MCI Version: 0x%08x, major: 0x%04x, minor: 0x%02x\n",
- core_version, core_major, core_minor);
+ host->quirks2 |= SDHCI_QUIRK2_IGN_DATA_END_BIT_ERROR;
- /*
- * Support for some capabilities is not advertised by newer
- * controller versions and must be explicitly enabled.
- */
- if (core_major >= 1 && core_minor != 0x11 && core_minor != 0x12) {
- caps = readl_relaxed(host->ioaddr + SDHCI_CAPABILITIES);
- caps |= SDHCI_CAN_VDD_300 | SDHCI_CAN_DO_8BIT;
- writel_relaxed(caps, host->ioaddr +
- CORE_VENDOR_SPEC_CAPABILITIES0);
+ /* Setup PWRCTL irq */
+ msm_host->pwr_irq = platform_get_irq_byname(pdev, "pwr_irq");
+ if (msm_host->pwr_irq < 0) {
+ dev_err(&pdev->dev, "Failed to get pwr_irq by name (%d)\n",
+ msm_host->pwr_irq);
+ goto vreg_deinit;
+ }
+ ret = devm_request_threaded_irq(&pdev->dev, msm_host->pwr_irq, NULL,
+ sdhci_msm_pwr_irq, IRQF_ONESHOT,
+ dev_name(&pdev->dev), host);
+ if (ret) {
+ dev_err(&pdev->dev, "Request threaded irq(%d) failed (%d)\n",
+ msm_host->pwr_irq, ret);
+ goto vreg_deinit;
}
+ /* Enable pwr irq interrupts */
+ sdhci_msm_writel_relaxed(INT_MASK, host,
+ msm_host_offset->CORE_PWRCTL_MASK);
+
+#ifdef CONFIG_MMC_CLKGATE
+ /* Set clock gating delay to be used when CONFIG_MMC_CLKGATE is set */
+ msm_host->mmc->clkgate_delay = SDHCI_MSM_MMC_CLK_GATE_DELAY;
+#endif
+
+ /* Set host capabilities */
+ msm_host->mmc->caps |= msm_host->pdata->mmc_bus_width;
+ msm_host->mmc->caps |= msm_host->pdata->caps;
+ msm_host->mmc->caps |= MMC_CAP_AGGRESSIVE_PM;
+ msm_host->mmc->caps |= MMC_CAP_WAIT_WHILE_BUSY;
+ msm_host->mmc->caps2 |= msm_host->pdata->caps2;
+ msm_host->mmc->caps2 |= MMC_CAP2_BOOTPART_NOACC;
+ msm_host->mmc->caps2 |= MMC_CAP2_HS400_POST_TUNING;
+ msm_host->mmc->caps2 |= MMC_CAP2_CLK_SCALE;
+ msm_host->mmc->caps2 |= MMC_CAP2_SANITIZE;
+ msm_host->mmc->caps2 |= MMC_CAP2_MAX_DISCARD_SIZE;
+ msm_host->mmc->caps2 |= MMC_CAP2_SLEEP_AWAKE;
+ msm_host->mmc->pm_caps |= MMC_PM_KEEP_POWER | MMC_PM_WAKE_SDIO_IRQ;
+
+ if (msm_host->pdata->nonremovable)
+ msm_host->mmc->caps |= MMC_CAP_NONREMOVABLE;
+
+ if (msm_host->pdata->nonhotplug)
+ msm_host->mmc->caps2 |= MMC_CAP2_NONHOTPLUG;
+
+ msm_host->mmc->sdr104_wa = msm_host->pdata->sdr104_wa;
+
+ /* Initialize ICE if present */
+ if (msm_host->ice.pdev) {
+ ret = sdhci_msm_ice_init(host);
+ if (ret) {
+ dev_err(&pdev->dev, "%s: SDHCi ICE init failed (%d)\n",
+ mmc_hostname(host->mmc), ret);
+ ret = -EINVAL;
+ goto vreg_deinit;
+ }
+ host->is_crypto_en = true;
+ /* Packed commands cannot be encrypted/decrypted using ICE */
+ msm_host->mmc->caps2 &= ~(MMC_CAP2_PACKED_WR |
+ MMC_CAP2_PACKED_WR_CONTROL);
+ }
+
+ init_completion(&msm_host->pwr_irq_completion);
+
+ if (gpio_is_valid(msm_host->pdata->status_gpio)) {
+ /*
+ * Set up the card detect GPIO in active configuration before
+ * configuring it as an IRQ. Otherwise, it can be in some
+ * weird/inconsistent state resulting in flood of interrupts.
+ */
+ sdhci_msm_setup_pins(msm_host->pdata, true);
+
+ /*
+ * This delay is needed for stabilizing the card detect GPIO
+ * line after changing the pull configs.
+ */
+ usleep_range(10000, 10500);
+ ret = mmc_gpio_request_cd(msm_host->mmc,
+ msm_host->pdata->status_gpio, 0);
+ if (ret) {
+ dev_err(&pdev->dev, "%s: Failed to request card detection IRQ %d\n",
+ __func__, ret);
+ goto vreg_deinit;
+ }
+ }
+
+ if ((sdhci_readl(host, SDHCI_CAPABILITIES) & SDHCI_CAN_64BIT) &&
+ (dma_supported(mmc_dev(host->mmc), DMA_BIT_MASK(64)))) {
+ host->dma_mask = DMA_BIT_MASK(64);
+ mmc_dev(host->mmc)->dma_mask = &host->dma_mask;
+ mmc_dev(host->mmc)->coherent_dma_mask = host->dma_mask;
+ } else if (dma_supported(mmc_dev(host->mmc), DMA_BIT_MASK(32))) {
+ host->dma_mask = DMA_BIT_MASK(32);
+ mmc_dev(host->mmc)->dma_mask = &host->dma_mask;
+ mmc_dev(host->mmc)->coherent_dma_mask = host->dma_mask;
+ } else {
+ dev_err(&pdev->dev, "%s: Failed to set dma mask\n", __func__);
+ }
+
+ msm_host->pdata->sdiowakeup_irq = platform_get_irq_byname(pdev,
+ "sdiowakeup_irq");
+ if (sdhci_is_valid_gpio_wakeup_int(msm_host)) {
+ dev_info(&pdev->dev, "%s: sdiowakeup_irq = %d\n", __func__,
+ msm_host->pdata->sdiowakeup_irq);
+ msm_host->is_sdiowakeup_enabled = true;
+ ret = request_irq(msm_host->pdata->sdiowakeup_irq,
+ sdhci_msm_sdiowakeup_irq,
+ IRQF_SHARED | IRQF_TRIGGER_HIGH,
+ "sdhci-msm sdiowakeup", host);
+ if (ret) {
+ dev_err(&pdev->dev, "%s: request sdiowakeup IRQ %d: failed: %d\n",
+ __func__, msm_host->pdata->sdiowakeup_irq, ret);
+ msm_host->pdata->sdiowakeup_irq = -1;
+ msm_host->is_sdiowakeup_enabled = false;
+ goto vreg_deinit;
+ } else {
+ spin_lock_irqsave(&host->lock, flags);
+ sdhci_msm_cfg_sdiowakeup_gpio_irq(host, false);
+ msm_host->sdio_pending_processing = false;
+ spin_unlock_irqrestore(&host->lock, flags);
+ }
+ }
+
+ sdhci_msm_cmdq_init(host, pdev);
ret = sdhci_add_host(host);
+ if (ret) {
+ dev_err(&pdev->dev, "Add host failed (%d)\n", ret);
+ goto vreg_deinit;
+ }
+
+ msm_host->pltfm_init_done = true;
+
+ pm_runtime_set_active(&pdev->dev);
+ pm_runtime_enable(&pdev->dev);
+ pm_runtime_set_autosuspend_delay(&pdev->dev, MSM_AUTOSUSPEND_DELAY_MS);
+ pm_runtime_use_autosuspend(&pdev->dev);
+
+ msm_host->msm_bus_vote.max_bus_bw.show = show_sdhci_max_bus_bw;
+ msm_host->msm_bus_vote.max_bus_bw.store = store_sdhci_max_bus_bw;
+ sysfs_attr_init(&msm_host->msm_bus_vote.max_bus_bw.attr);
+ msm_host->msm_bus_vote.max_bus_bw.attr.name = "max_bus_bw";
+ msm_host->msm_bus_vote.max_bus_bw.attr.mode = S_IRUGO | S_IWUSR;
+ ret = device_create_file(&pdev->dev,
+ &msm_host->msm_bus_vote.max_bus_bw);
if (ret)
- goto clk_disable;
+ goto remove_host;
- return 0;
+ if (!gpio_is_valid(msm_host->pdata->status_gpio)) {
+ msm_host->polling.show = show_polling;
+ msm_host->polling.store = store_polling;
+ sysfs_attr_init(&msm_host->polling.attr);
+ msm_host->polling.attr.name = "polling";
+ msm_host->polling.attr.mode = S_IRUGO | S_IWUSR;
+ ret = device_create_file(&pdev->dev, &msm_host->polling);
+ if (ret)
+ goto remove_max_bus_bw_file;
+ }
+ msm_host->auto_cmd21_attr.show = show_auto_cmd21;
+ msm_host->auto_cmd21_attr.store = store_auto_cmd21;
+ sysfs_attr_init(&msm_host->auto_cmd21_attr.attr);
+ msm_host->auto_cmd21_attr.attr.name = "enable_auto_cmd21";
+ msm_host->auto_cmd21_attr.attr.mode = S_IRUGO | S_IWUSR;
+ ret = device_create_file(&pdev->dev, &msm_host->auto_cmd21_attr);
+ if (ret) {
+ pr_err("%s: %s: failed creating auto-cmd21 attr: %d\n",
+ mmc_hostname(host->mmc), __func__, ret);
+ device_remove_file(&pdev->dev, &msm_host->auto_cmd21_attr);
+ }
+ /* Successful initialization */
+ goto out;
+
+remove_max_bus_bw_file:
+ device_remove_file(&pdev->dev, &msm_host->msm_bus_vote.max_bus_bw);
+remove_host:
+ dead = (readl_relaxed(host->ioaddr + SDHCI_INT_STATUS) == 0xffffffff);
+ pm_runtime_disable(&pdev->dev);
+ sdhci_remove_host(host, dead);
+vreg_deinit:
+ sdhci_msm_vreg_init(&pdev->dev, msm_host->pdata, false);
+bus_unregister:
+ if (msm_host->msm_bus_vote.client_handle)
+ sdhci_msm_bus_cancel_work_and_set_vote(host, 0);
+ sdhci_msm_bus_unregister(msm_host);
+sleep_clk_disable:
+ if (!IS_ERR(msm_host->sleep_clk))
+ clk_disable_unprepare(msm_host->sleep_clk);
+ff_clk_disable:
+ if (!IS_ERR(msm_host->ff_clk))
+ clk_disable_unprepare(msm_host->ff_clk);
clk_disable:
- clk_disable_unprepare(msm_host->clk);
+ if (!IS_ERR(msm_host->clk))
+ clk_disable_unprepare(msm_host->clk);
pclk_disable:
- clk_disable_unprepare(msm_host->pclk);
+ if (!IS_ERR(msm_host->pclk))
+ clk_disable_unprepare(msm_host->pclk);
bus_clk_disable:
- if (!IS_ERR(msm_host->bus_clk))
+ if (!IS_ERR_OR_NULL(msm_host->bus_clk))
clk_disable_unprepare(msm_host->bus_clk);
pltfm_free:
sdhci_pltfm_free(pdev);
+out_host_free:
+ devm_kfree(&pdev->dev, msm_host);
+out:
+ pr_debug("%s: Exit %s\n", dev_name(&pdev->dev), __func__);
return ret;
}
@@ -571,28 +4776,270 @@ static int sdhci_msm_remove(struct platform_device *pdev)
struct sdhci_host *host = platform_get_drvdata(pdev);
struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
struct sdhci_msm_host *msm_host = pltfm_host->priv;
+ struct sdhci_msm_pltfm_data *pdata = msm_host->pdata;
int dead = (readl_relaxed(host->ioaddr + SDHCI_INT_STATUS) ==
- 0xffffffff);
+ 0xffffffff);
+ pr_debug("%s: %s\n", dev_name(&pdev->dev), __func__);
+ if (!gpio_is_valid(msm_host->pdata->status_gpio))
+ device_remove_file(&pdev->dev, &msm_host->polling);
+ device_remove_file(&pdev->dev, &msm_host->msm_bus_vote.max_bus_bw);
+ pm_runtime_disable(&pdev->dev);
sdhci_remove_host(host, dead);
sdhci_pltfm_free(pdev);
- clk_disable_unprepare(msm_host->clk);
- clk_disable_unprepare(msm_host->pclk);
- if (!IS_ERR(msm_host->bus_clk))
- clk_disable_unprepare(msm_host->bus_clk);
+
+ sdhci_msm_vreg_init(&pdev->dev, msm_host->pdata, false);
+
+ sdhci_msm_setup_pins(pdata, true);
+ sdhci_msm_setup_pins(pdata, false);
+
+ if (msm_host->msm_bus_vote.client_handle) {
+ sdhci_msm_bus_cancel_work_and_set_vote(host, 0);
+ sdhci_msm_bus_unregister(msm_host);
+ }
return 0;
}
+#ifdef CONFIG_PM
+static int sdhci_msm_cfg_sdio_wakeup(struct sdhci_host *host, bool enable)
+{
+ struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
+ struct sdhci_msm_host *msm_host = pltfm_host->priv;
+ unsigned long flags;
+ int ret = 0;
+
+ if (!(host->mmc->card && mmc_card_sdio(host->mmc->card) &&
+ sdhci_is_valid_gpio_wakeup_int(msm_host) &&
+ mmc_card_wake_sdio_irq(host->mmc))) {
+ msm_host->sdio_pending_processing = false;
+ return 1;
+ }
+
+ spin_lock_irqsave(&host->lock, flags);
+ if (enable) {
+ /* configure DAT1 gpio if applicable */
+ if (sdhci_is_valid_gpio_wakeup_int(msm_host)) {
+ msm_host->sdio_pending_processing = false;
+ ret = enable_irq_wake(msm_host->pdata->sdiowakeup_irq);
+ if (!ret)
+ sdhci_msm_cfg_sdiowakeup_gpio_irq(host, true);
+ goto out;
+ } else {
+ pr_err("%s: sdiowakeup_irq(%d) invalid\n",
+ mmc_hostname(host->mmc), enable);
+ }
+ } else {
+ if (sdhci_is_valid_gpio_wakeup_int(msm_host)) {
+ ret = disable_irq_wake(msm_host->pdata->sdiowakeup_irq);
+ sdhci_msm_cfg_sdiowakeup_gpio_irq(host, false);
+ msm_host->sdio_pending_processing = false;
+ } else {
+ pr_err("%s: sdiowakeup_irq(%d)invalid\n",
+ mmc_hostname(host->mmc), enable);
+
+ }
+ }
+out:
+ if (ret)
+ pr_err("%s: %s: %sable wakeup: failed: %d gpio: %d\n",
+ mmc_hostname(host->mmc), __func__, enable ? "en" : "dis",
+ ret, msm_host->pdata->sdiowakeup_irq);
+ spin_unlock_irqrestore(&host->lock, flags);
+ return ret;
+}
+
+
+static int sdhci_msm_runtime_suspend(struct device *dev)
+{
+ struct sdhci_host *host = dev_get_drvdata(dev);
+ struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
+ struct sdhci_msm_host *msm_host = pltfm_host->priv;
+ ktime_t start = ktime_get();
+ int ret;
+
+ if (host->mmc->card && mmc_card_sdio(host->mmc->card))
+ goto defer_disable_host_irq;
+
+ sdhci_cfg_irq(host, false, true);
+
+defer_disable_host_irq:
+ disable_irq(msm_host->pwr_irq);
+
+ /*
+ * Remove the vote immediately only if clocks are off in which
+ * case we might have queued work to remove vote but it may not
+ * be completed before runtime suspend or system suspend.
+ */
+ if (!atomic_read(&msm_host->clks_on)) {
+ if (msm_host->msm_bus_vote.client_handle)
+ sdhci_msm_bus_cancel_work_and_set_vote(host, 0);
+ }
+
+ if (host->is_crypto_en) {
+ ret = sdhci_msm_ice_suspend(host);
+ if (ret < 0)
+ pr_err("%s: failed to suspend crypto engine %d\n",
+ mmc_hostname(host->mmc), ret);
+ }
+ trace_sdhci_msm_runtime_suspend(mmc_hostname(host->mmc), 0,
+ ktime_to_us(ktime_sub(ktime_get(), start)));
+ return 0;
+}
+
+static int sdhci_msm_runtime_resume(struct device *dev)
+{
+ struct sdhci_host *host = dev_get_drvdata(dev);
+ struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
+ struct sdhci_msm_host *msm_host = pltfm_host->priv;
+ ktime_t start = ktime_get();
+ int ret;
+
+ if (host->is_crypto_en) {
+ ret = sdhci_msm_enable_controller_clock(host);
+ if (ret) {
+ pr_err("%s: Failed to enable reqd clocks\n",
+ mmc_hostname(host->mmc));
+ goto skip_ice_resume;
+ }
+ ret = sdhci_msm_ice_resume(host);
+ if (ret)
+ pr_err("%s: failed to resume crypto engine %d\n",
+ mmc_hostname(host->mmc), ret);
+ }
+skip_ice_resume:
+ if (host->mmc->card && mmc_card_sdio(host->mmc->card))
+ goto defer_enable_host_irq;
+
+ sdhci_cfg_irq(host, true, true);
+
+defer_enable_host_irq:
+ enable_irq(msm_host->pwr_irq);
+
+ trace_sdhci_msm_runtime_resume(mmc_hostname(host->mmc), 0,
+ ktime_to_us(ktime_sub(ktime_get(), start)));
+ return 0;
+}
+
+static int sdhci_msm_suspend(struct device *dev)
+{
+ struct sdhci_host *host = dev_get_drvdata(dev);
+ struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
+ struct sdhci_msm_host *msm_host = pltfm_host->priv;
+ int ret = 0;
+ int sdio_cfg = 0;
+ ktime_t start = ktime_get();
+
+ if (gpio_is_valid(msm_host->pdata->status_gpio) &&
+ (msm_host->mmc->slot.cd_irq >= 0))
+ disable_irq(msm_host->mmc->slot.cd_irq);
+
+ if (pm_runtime_suspended(dev)) {
+ pr_debug("%s: %s: already runtime suspended\n",
+ mmc_hostname(host->mmc), __func__);
+ goto out;
+ }
+ ret = sdhci_msm_runtime_suspend(dev);
+out:
+ sdhci_msm_disable_controller_clock(host);
+ if (host->mmc->card && mmc_card_sdio(host->mmc->card)) {
+ sdio_cfg = sdhci_msm_cfg_sdio_wakeup(host, true);
+ if (sdio_cfg)
+ sdhci_cfg_irq(host, false, true);
+ }
+
+ trace_sdhci_msm_suspend(mmc_hostname(host->mmc), ret,
+ ktime_to_us(ktime_sub(ktime_get(), start)));
+ return ret;
+}
+
+static int sdhci_msm_resume(struct device *dev)
+{
+ struct sdhci_host *host = dev_get_drvdata(dev);
+ struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
+ struct sdhci_msm_host *msm_host = pltfm_host->priv;
+ int ret = 0;
+ int sdio_cfg = 0;
+ ktime_t start = ktime_get();
+
+ if (gpio_is_valid(msm_host->pdata->status_gpio) &&
+ (msm_host->mmc->slot.cd_irq >= 0))
+ enable_irq(msm_host->mmc->slot.cd_irq);
+
+ if (pm_runtime_suspended(dev)) {
+ pr_debug("%s: %s: runtime suspended, defer system resume\n",
+ mmc_hostname(host->mmc), __func__);
+ goto out;
+ }
+
+ ret = sdhci_msm_runtime_resume(dev);
+out:
+ if (host->mmc->card && mmc_card_sdio(host->mmc->card)) {
+ sdio_cfg = sdhci_msm_cfg_sdio_wakeup(host, false);
+ if (sdio_cfg)
+ sdhci_cfg_irq(host, true, true);
+ }
+
+ trace_sdhci_msm_resume(mmc_hostname(host->mmc), ret,
+ ktime_to_us(ktime_sub(ktime_get(), start)));
+ return ret;
+}
+
+static int sdhci_msm_suspend_noirq(struct device *dev)
+{
+ struct sdhci_host *host = dev_get_drvdata(dev);
+ struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
+ struct sdhci_msm_host *msm_host = pltfm_host->priv;
+ int ret = 0;
+
+ /*
+ * ksdioirqd may be running, hence retry
+ * suspend in case the clocks are ON
+ */
+ if (atomic_read(&msm_host->clks_on)) {
+ pr_warn("%s: %s: clock ON after suspend, aborting suspend\n",
+ mmc_hostname(host->mmc), __func__);
+ ret = -EAGAIN;
+ }
+
+ if (host->mmc->card && mmc_card_sdio(host->mmc->card))
+ if (msm_host->sdio_pending_processing)
+ ret = -EBUSY;
+
+ return ret;
+}
+
+static const struct dev_pm_ops sdhci_msm_pmops = {
+ SET_SYSTEM_SLEEP_PM_OPS(sdhci_msm_suspend, sdhci_msm_resume)
+ SET_RUNTIME_PM_OPS(sdhci_msm_runtime_suspend, sdhci_msm_runtime_resume,
+ NULL)
+ .suspend_noirq = sdhci_msm_suspend_noirq,
+};
+
+#define SDHCI_MSM_PMOPS (&sdhci_msm_pmops)
+
+#else
+#define SDHCI_MSM_PMOPS NULL
+#endif
+static const struct of_device_id sdhci_msm_dt_match[] = {
+ {.compatible = "qcom,sdhci-msm"},
+ {.compatible = "qcom,sdhci-msm-v5"},
+ {},
+};
+MODULE_DEVICE_TABLE(of, sdhci_msm_dt_match);
+
static struct platform_driver sdhci_msm_driver = {
- .probe = sdhci_msm_probe,
- .remove = sdhci_msm_remove,
- .driver = {
- .name = "sdhci_msm",
- .of_match_table = sdhci_msm_dt_match,
+ .probe = sdhci_msm_probe,
+ .remove = sdhci_msm_remove,
+ .driver = {
+ .name = "sdhci_msm",
+ .owner = THIS_MODULE,
+ .probe_type = PROBE_PREFER_ASYNCHRONOUS,
+ .of_match_table = sdhci_msm_dt_match,
+ .pm = SDHCI_MSM_PMOPS,
},
};
module_platform_driver(sdhci_msm_driver);
-MODULE_DESCRIPTION("Qualcomm Secure Digital Host Controller Interface driver");
+MODULE_DESCRIPTION("Qualcomm Technologies, Inc. Secure Digital Host Controller Interface driver");
MODULE_LICENSE("GPL v2");
diff --git a/drivers/mmc/host/sdhci-msm.h b/drivers/mmc/host/sdhci-msm.h
new file mode 100644
index 000000000000..79949c2c537f
--- /dev/null
+++ b/drivers/mmc/host/sdhci-msm.h
@@ -0,0 +1,245 @@
+/*
+ * Copyright (c) 2016-2017, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#ifndef __SDHCI_MSM_H__
+#define __SDHCI_MSM_H__
+
+#include <linux/mmc/mmc.h>
+#include <linux/pm_qos.h>
+#include "sdhci-pltfm.h"
+
+/* This structure keeps information per regulator */
+struct sdhci_msm_reg_data {
+ /* voltage regulator handle */
+ struct regulator *reg;
+ /* regulator name */
+ const char *name;
+ /* voltage level to be set */
+ u32 low_vol_level;
+ u32 high_vol_level;
+ /* Load values for low power and high power mode */
+ u32 lpm_uA;
+ u32 hpm_uA;
+
+ /* is this regulator enabled? */
+ bool is_enabled;
+ /* is this regulator needs to be always on? */
+ bool is_always_on;
+ /* is low power mode setting required for this regulator? */
+ bool lpm_sup;
+ bool set_voltage_sup;
+};
+
+/*
+ * This structure keeps information for all the
+ * regulators required for a SDCC slot.
+ */
+struct sdhci_msm_slot_reg_data {
+ /* keeps VDD/VCC regulator info */
+ struct sdhci_msm_reg_data *vdd_data;
+ /* keeps VDD IO regulator info */
+ struct sdhci_msm_reg_data *vdd_io_data;
+};
+
+struct sdhci_msm_gpio {
+ u32 no;
+ const char *name;
+ bool is_enabled;
+};
+
+struct sdhci_msm_gpio_data {
+ struct sdhci_msm_gpio *gpio;
+ u8 size;
+};
+
+struct sdhci_msm_pin_data {
+ /*
+ * = 1 if controller pins are using gpios
+ * = 0 if controller has dedicated MSM pads
+ */
+ u8 is_gpio;
+ struct sdhci_msm_gpio_data *gpio_data;
+};
+
+struct sdhci_pinctrl_data {
+ struct pinctrl *pctrl;
+ struct pinctrl_state *pins_active;
+ struct pinctrl_state *pins_sleep;
+};
+
+struct sdhci_msm_bus_voting_data {
+ struct msm_bus_scale_pdata *bus_pdata;
+ unsigned int *bw_vecs;
+ unsigned int bw_vecs_size;
+};
+
+struct sdhci_msm_cpu_group_map {
+ int nr_groups;
+ cpumask_t *mask;
+};
+
+struct sdhci_msm_pm_qos_latency {
+ s32 latency[SDHCI_POWER_POLICY_NUM];
+};
+
+struct sdhci_msm_pm_qos_data {
+ struct sdhci_msm_cpu_group_map cpu_group_map;
+ enum pm_qos_req_type irq_req_type;
+ int irq_cpu;
+ struct sdhci_msm_pm_qos_latency irq_latency;
+ struct sdhci_msm_pm_qos_latency *cmdq_latency;
+ struct sdhci_msm_pm_qos_latency *latency;
+ bool irq_valid;
+ bool cmdq_valid;
+ bool legacy_valid;
+};
+
+/*
+ * PM QoS for group voting management - each cpu group defined is associated
+ * with 1 instance of this structure.
+ */
+struct sdhci_msm_pm_qos_group {
+ struct pm_qos_request req;
+ struct delayed_work unvote_work;
+ atomic_t counter;
+ s32 latency;
+};
+
+/* PM QoS HW IRQ voting */
+struct sdhci_msm_pm_qos_irq {
+ struct pm_qos_request req;
+ struct delayed_work unvote_work;
+ struct device_attribute enable_attr;
+ struct device_attribute status_attr;
+ atomic_t counter;
+ s32 latency;
+ bool enabled;
+};
+
+struct sdhci_msm_pltfm_data {
+ /* Supported UHS-I Modes */
+ u32 caps;
+
+ /* More capabilities */
+ u32 caps2;
+
+ unsigned long mmc_bus_width;
+ struct sdhci_msm_slot_reg_data *vreg_data;
+ bool nonremovable;
+ bool nonhotplug;
+ bool largeaddressbus;
+ bool pin_cfg_sts;
+ struct sdhci_msm_pin_data *pin_data;
+ struct sdhci_pinctrl_data *pctrl_data;
+ int status_gpio; /* card detection GPIO that is configured as IRQ */
+ struct sdhci_msm_bus_voting_data *voting_data;
+ u32 *sup_clk_table;
+ unsigned char sup_clk_cnt;
+ int sdiowakeup_irq;
+ u32 *sup_ice_clk_table;
+ unsigned char sup_ice_clk_cnt;
+ u32 ice_clk_max;
+ u32 ice_clk_min;
+ struct sdhci_msm_pm_qos_data pm_qos_data;
+ bool sdr104_wa;
+};
+
+struct sdhci_msm_bus_vote {
+ uint32_t client_handle;
+ uint32_t curr_vote;
+ int min_bw_vote;
+ int max_bw_vote;
+ bool is_max_bw_needed;
+ struct delayed_work vote_work;
+ struct device_attribute max_bus_bw;
+};
+
+struct sdhci_msm_ice_data {
+ struct qcom_ice_variant_ops *vops;
+ struct platform_device *pdev;
+ int state;
+};
+
+struct sdhci_msm_debug_data {
+ struct mmc_host copy_mmc;
+ struct mmc_card copy_card;
+ struct sdhci_host copy_host;
+};
+
+struct sdhci_msm_host {
+ struct platform_device *pdev;
+ void __iomem *core_mem; /* MSM SDCC mapped address */
+ void __iomem *cryptoio; /* ICE HCI mapped address */
+ bool ice_hci_support;
+ int pwr_irq; /* power irq */
+ struct clk *clk; /* main SD/MMC bus clock */
+ struct clk *pclk; /* SDHC peripheral bus clock */
+ struct clk *bus_clk; /* SDHC bus voter clock */
+ struct clk *ff_clk; /* CDC calibration fixed feedback clock */
+ struct clk *sleep_clk; /* CDC calibration sleep clock */
+ struct clk *ice_clk; /* SDHC peripheral ICE clock */
+ atomic_t clks_on; /* Set if clocks are enabled */
+ struct sdhci_msm_pltfm_data *pdata;
+ struct mmc_host *mmc;
+ struct sdhci_msm_debug_data cached_data;
+ struct sdhci_pltfm_data sdhci_msm_pdata;
+ u32 curr_pwr_state;
+ u32 curr_io_level;
+ struct completion pwr_irq_completion;
+ struct sdhci_msm_bus_vote msm_bus_vote;
+ struct device_attribute polling;
+ u32 clk_rate; /* Keeps track of current clock rate that is set */
+ bool tuning_done;
+ bool calibration_done;
+ u8 saved_tuning_phase;
+ bool en_auto_cmd21;
+ struct device_attribute auto_cmd21_attr;
+ bool is_sdiowakeup_enabled;
+ bool sdio_pending_processing;
+ atomic_t controller_clock;
+ bool use_cdclp533;
+ bool use_updated_dll_reset;
+ bool use_14lpp_dll;
+ bool enhanced_strobe;
+ bool rclk_delay_fix;
+ u32 caps_0;
+ struct sdhci_msm_ice_data ice;
+ u32 ice_clk_rate;
+ struct sdhci_msm_pm_qos_group *pm_qos;
+ int pm_qos_prev_cpu;
+ struct device_attribute pm_qos_group_enable_attr;
+ struct device_attribute pm_qos_group_status_attr;
+ bool pm_qos_group_enable;
+ struct sdhci_msm_pm_qos_irq pm_qos_irq;
+ bool tuning_in_progress;
+ bool mci_removed;
+ const struct sdhci_msm_offset *offset;
+ bool core_3_0v_support;
+ bool pltfm_init_done;
+};
+
+extern char *saved_command_line;
+
+void sdhci_msm_pm_qos_irq_init(struct sdhci_host *host);
+void sdhci_msm_pm_qos_irq_vote(struct sdhci_host *host);
+void sdhci_msm_pm_qos_irq_unvote(struct sdhci_host *host, bool async);
+
+void sdhci_msm_pm_qos_cpu_init(struct sdhci_host *host,
+ struct sdhci_msm_pm_qos_latency *latency);
+void sdhci_msm_pm_qos_cpu_vote(struct sdhci_host *host,
+ struct sdhci_msm_pm_qos_latency *latency, int cpu);
+bool sdhci_msm_pm_qos_cpu_unvote(struct sdhci_host *host, int cpu, bool async);
+
+
+#endif /* __SDHCI_MSM_H__ */
diff --git a/drivers/mmc/host/sdhci.c b/drivers/mmc/host/sdhci.c
index 62d37d2ac557..0033fea0a800 100644
--- a/drivers/mmc/host/sdhci.c
+++ b/drivers/mmc/host/sdhci.c
@@ -30,8 +30,12 @@
#include <linux/mmc/card.h>
#include <linux/mmc/sdio.h>
#include <linux/mmc/slot-gpio.h>
+#include <linux/mmc/sdio.h>
+
+#include <trace/events/mmc.h>
#include "sdhci.h"
+#include "cmdq_hci.h"
#define DRIVER_NAME "sdhci"
@@ -45,6 +49,9 @@
#define MAX_TUNING_LOOP 40
+#define SDHCI_DBG_DUMP_RS_INTERVAL (10 * HZ)
+#define SDHCI_DBG_DUMP_RS_BURST 2
+
static unsigned int debug_quirks = 0;
static unsigned int debug_quirks2;
@@ -52,10 +59,13 @@ static void sdhci_finish_data(struct sdhci_host *);
static void sdhci_finish_command(struct sdhci_host *);
static int sdhci_execute_tuning(struct mmc_host *mmc, u32 opcode);
+static int sdhci_enhanced_strobe(struct mmc_host *mmc);
static void sdhci_enable_preset_value(struct sdhci_host *host, bool enable);
static int sdhci_pre_dma_transfer(struct sdhci_host *host,
struct mmc_data *data);
static int sdhci_do_get_cd(struct sdhci_host *host);
+static bool sdhci_check_state(struct sdhci_host *);
+static void sdhci_enable_sdio_irq_nolock(struct sdhci_host *host, int enable);
#ifdef CONFIG_PM
static int sdhci_runtime_pm_get(struct sdhci_host *host);
@@ -79,60 +89,102 @@ static void sdhci_runtime_pm_bus_off(struct sdhci_host *host)
}
#endif
+static void sdhci_dump_state(struct sdhci_host *host)
+{
+ struct mmc_host *mmc = host->mmc;
+
+ #ifdef CONFIG_MMC_CLKGATE
+ pr_info("%s: clk: %d clk-gated: %d claimer: %s pwr: %d host->irq = %d\n",
+ mmc_hostname(mmc), host->clock, mmc->clk_gated,
+ mmc->claimer->comm, host->pwr,
+ (host->flags & SDHCI_HOST_IRQ_STATUS));
+ #else
+ pr_info("%s: clk: %d claimer: %s pwr: %d\n",
+ mmc_hostname(mmc), host->clock,
+ mmc->claimer->comm, host->pwr);
+ #endif
+ pr_info("%s: rpmstatus[pltfm](runtime-suspend:usage_count:disable_depth)(%d:%d:%d)\n",
+ mmc_hostname(mmc), mmc->parent->power.runtime_status,
+ atomic_read(&mmc->parent->power.usage_count),
+ mmc->parent->power.disable_depth);
+}
+
static void sdhci_dumpregs(struct sdhci_host *host)
{
- pr_debug(DRIVER_NAME ": =========== REGISTER DUMP (%s)===========\n",
+ MMC_TRACE(host->mmc,
+ "%s: 0x04=0x%08x 0x06=0x%08x 0x0E=0x%08x 0x30=0x%08x 0x34=0x%08x 0x38=0x%08x\n",
+ __func__,
+ sdhci_readw(host, SDHCI_BLOCK_SIZE),
+ sdhci_readw(host, SDHCI_BLOCK_COUNT),
+ sdhci_readw(host, SDHCI_COMMAND),
+ sdhci_readl(host, SDHCI_INT_STATUS),
+ sdhci_readl(host, SDHCI_INT_ENABLE),
+ sdhci_readl(host, SDHCI_SIGNAL_ENABLE));
+ mmc_stop_tracing(host->mmc);
+
+ pr_info(DRIVER_NAME ": =========== REGISTER DUMP (%s)===========\n",
mmc_hostname(host->mmc));
- pr_debug(DRIVER_NAME ": Sys addr: 0x%08x | Version: 0x%08x\n",
+ pr_info(DRIVER_NAME ": Sys addr: 0x%08x | Version: 0x%08x\n",
sdhci_readl(host, SDHCI_DMA_ADDRESS),
sdhci_readw(host, SDHCI_HOST_VERSION));
- pr_debug(DRIVER_NAME ": Blk size: 0x%08x | Blk cnt: 0x%08x\n",
+ pr_info(DRIVER_NAME ": Blk size: 0x%08x | Blk cnt: 0x%08x\n",
sdhci_readw(host, SDHCI_BLOCK_SIZE),
sdhci_readw(host, SDHCI_BLOCK_COUNT));
- pr_debug(DRIVER_NAME ": Argument: 0x%08x | Trn mode: 0x%08x\n",
+ pr_info(DRIVER_NAME ": Argument: 0x%08x | Trn mode: 0x%08x\n",
sdhci_readl(host, SDHCI_ARGUMENT),
sdhci_readw(host, SDHCI_TRANSFER_MODE));
- pr_debug(DRIVER_NAME ": Present: 0x%08x | Host ctl: 0x%08x\n",
+ pr_info(DRIVER_NAME ": Present: 0x%08x | Host ctl: 0x%08x\n",
sdhci_readl(host, SDHCI_PRESENT_STATE),
sdhci_readb(host, SDHCI_HOST_CONTROL));
- pr_debug(DRIVER_NAME ": Power: 0x%08x | Blk gap: 0x%08x\n",
+ pr_info(DRIVER_NAME ": Power: 0x%08x | Blk gap: 0x%08x\n",
sdhci_readb(host, SDHCI_POWER_CONTROL),
sdhci_readb(host, SDHCI_BLOCK_GAP_CONTROL));
- pr_debug(DRIVER_NAME ": Wake-up: 0x%08x | Clock: 0x%08x\n",
+ pr_info(DRIVER_NAME ": Wake-up: 0x%08x | Clock: 0x%08x\n",
sdhci_readb(host, SDHCI_WAKE_UP_CONTROL),
sdhci_readw(host, SDHCI_CLOCK_CONTROL));
- pr_debug(DRIVER_NAME ": Timeout: 0x%08x | Int stat: 0x%08x\n",
+ pr_info(DRIVER_NAME ": Timeout: 0x%08x | Int stat: 0x%08x\n",
sdhci_readb(host, SDHCI_TIMEOUT_CONTROL),
sdhci_readl(host, SDHCI_INT_STATUS));
- pr_debug(DRIVER_NAME ": Int enab: 0x%08x | Sig enab: 0x%08x\n",
+ pr_info(DRIVER_NAME ": Int enab: 0x%08x | Sig enab: 0x%08x\n",
sdhci_readl(host, SDHCI_INT_ENABLE),
sdhci_readl(host, SDHCI_SIGNAL_ENABLE));
- pr_debug(DRIVER_NAME ": AC12 err: 0x%08x | Slot int: 0x%08x\n",
- sdhci_readw(host, SDHCI_ACMD12_ERR),
+ pr_info(DRIVER_NAME ": AC12 err: 0x%08x | Slot int: 0x%08x\n",
+ host->auto_cmd_err_sts,
sdhci_readw(host, SDHCI_SLOT_INT_STATUS));
- pr_debug(DRIVER_NAME ": Caps: 0x%08x | Caps_1: 0x%08x\n",
+ pr_info(DRIVER_NAME ": Caps: 0x%08x | Caps_1: 0x%08x\n",
sdhci_readl(host, SDHCI_CAPABILITIES),
sdhci_readl(host, SDHCI_CAPABILITIES_1));
- pr_debug(DRIVER_NAME ": Cmd: 0x%08x | Max curr: 0x%08x\n",
+ pr_info(DRIVER_NAME ": Cmd: 0x%08x | Max curr: 0x%08x\n",
sdhci_readw(host, SDHCI_COMMAND),
sdhci_readl(host, SDHCI_MAX_CURRENT));
- pr_debug(DRIVER_NAME ": Host ctl2: 0x%08x\n",
+ pr_info(DRIVER_NAME ": Resp 1: 0x%08x | Resp 0: 0x%08x\n",
+ sdhci_readl(host, SDHCI_RESPONSE + 0x4),
+ sdhci_readl(host, SDHCI_RESPONSE));
+ pr_info(DRIVER_NAME ": Resp 3: 0x%08x | Resp 2: 0x%08x\n",
+ sdhci_readl(host, SDHCI_RESPONSE + 0xC),
+ sdhci_readl(host, SDHCI_RESPONSE + 0x8));
+ pr_info(DRIVER_NAME ": Host ctl2: 0x%08x\n",
sdhci_readw(host, SDHCI_HOST_CONTROL2));
if (host->flags & SDHCI_USE_ADMA) {
if (host->flags & SDHCI_USE_64_BIT_DMA)
- pr_debug(DRIVER_NAME ": ADMA Err: 0x%08x | ADMA Ptr: 0x%08x%08x\n",
+ pr_info(DRIVER_NAME ": ADMA Err: 0x%08x | ADMA Ptr: 0x%08x%08x\n",
readl(host->ioaddr + SDHCI_ADMA_ERROR),
readl(host->ioaddr + SDHCI_ADMA_ADDRESS_HI),
readl(host->ioaddr + SDHCI_ADMA_ADDRESS));
else
- pr_debug(DRIVER_NAME ": ADMA Err: 0x%08x | ADMA Ptr: 0x%08x\n",
+ pr_info(DRIVER_NAME ": ADMA Err: 0x%08x | ADMA Ptr: 0x%08x\n",
readl(host->ioaddr + SDHCI_ADMA_ERROR),
readl(host->ioaddr + SDHCI_ADMA_ADDRESS));
}
- pr_debug(DRIVER_NAME ": ===========================================\n");
+ host->mmc->err_occurred = true;
+
+ if (host->ops->dump_vendor_regs)
+ host->ops->dump_vendor_regs(host);
+ sdhci_dump_state(host);
+ pr_info(DRIVER_NAME ": ===========================================\n");
}
/*****************************************************************************\
@@ -177,6 +229,7 @@ void sdhci_reset(struct sdhci_host *host, u8 mask)
{
unsigned long timeout;
+retry_reset:
sdhci_writeb(host, mask, SDHCI_SOFTWARE_RESET);
if (mask & SDHCI_RESET_ALL) {
@@ -187,19 +240,60 @@ void sdhci_reset(struct sdhci_host *host, u8 mask)
}
/* Wait max 100 ms */
- timeout = 100;
+ timeout = 100000;
+
+ if (host->ops->check_power_status && host->pwr &&
+ (mask & SDHCI_RESET_ALL))
+ host->ops->check_power_status(host, REQ_BUS_OFF);
+
+ /* clear pending normal/error interrupt status */
+ sdhci_writel(host, sdhci_readl(host, SDHCI_INT_STATUS),
+ SDHCI_INT_STATUS);
/* hw clears the bit when it's done */
while (sdhci_readb(host, SDHCI_SOFTWARE_RESET) & mask) {
if (timeout == 0) {
pr_err("%s: Reset 0x%x never completed.\n",
mmc_hostname(host->mmc), (int)mask);
+ MMC_TRACE(host->mmc, "%s: Reset 0x%x never completed\n",
+ __func__, (int)mask);
+ if ((host->quirks2 & SDHCI_QUIRK2_USE_RESET_WORKAROUND)
+ && host->ops->reset_workaround) {
+ if (!host->reset_wa_applied) {
+ /*
+ * apply the workaround and issue
+ * reset again.
+ */
+ host->ops->reset_workaround(host, 1);
+ host->reset_wa_applied = 1;
+ host->reset_wa_cnt++;
+ goto retry_reset;
+ } else {
+ pr_err("%s: Reset 0x%x failed with workaround\n",
+ mmc_hostname(host->mmc),
+ (int)mask);
+ /* clear the workaround */
+ host->ops->reset_workaround(host, 0);
+ host->reset_wa_applied = 0;
+ }
+ }
+
sdhci_dumpregs(host);
return;
}
timeout--;
- mdelay(1);
+ udelay(1);
}
+
+ if ((host->quirks2 & SDHCI_QUIRK2_USE_RESET_WORKAROUND) &&
+ host->ops->reset_workaround && host->reset_wa_applied) {
+ pr_info("%s: Reset 0x%x successful with workaround\n",
+ mmc_hostname(host->mmc), (int)mask);
+ /* clear the workaround */
+ host->ops->reset_workaround(host, 0);
+ host->reset_wa_applied = 0;
+ }
+
}
EXPORT_SYMBOL_GPL(sdhci_reset);
@@ -221,6 +315,8 @@ static void sdhci_do_reset(struct sdhci_host *host, u8 mask)
/* Resetting the controller clears many */
host->preset_enabled = false;
}
+ if (host->is_crypto_en)
+ host->crypto_reset_reqd = true;
}
static void sdhci_set_ios(struct mmc_host *mmc, struct mmc_ios *ios);
@@ -236,7 +332,7 @@ static void sdhci_init(struct sdhci_host *host, int soft)
SDHCI_INT_DATA_CRC | SDHCI_INT_DATA_TIMEOUT |
SDHCI_INT_INDEX | SDHCI_INT_END_BIT | SDHCI_INT_CRC |
SDHCI_INT_TIMEOUT | SDHCI_INT_DATA_END |
- SDHCI_INT_RESPONSE;
+ SDHCI_INT_RESPONSE | SDHCI_INT_AUTO_CMD_ERR;
sdhci_writel(host, host->ier, SDHCI_INT_ENABLE);
sdhci_writel(host, host->ier, SDHCI_SIGNAL_ENABLE);
@@ -279,9 +375,12 @@ static void sdhci_led_control(struct led_classdev *led,
struct sdhci_host *host = container_of(led, struct sdhci_host, led);
unsigned long flags;
+ if (host->quirks & SDHCI_QUIRK_BROKEN_CARD_DETECTION)
+ return;
+
spin_lock_irqsave(&host->lock, flags);
- if (host->runtime_suspended)
+ if (host->runtime_suspended || sdhci_check_state(host))
goto out;
if (brightness == LED_OFF)
@@ -598,7 +697,10 @@ static void sdhci_adma_table_post(struct sdhci_host *host,
void *align;
char *buffer;
unsigned long flags;
- bool has_unaligned;
+ bool has_unaligned = false;
+ u32 command = SDHCI_GET_CMD(sdhci_readw(host, SDHCI_COMMAND));
+
+ trace_mmc_adma_table_post(command, data->sg_len);
if (data->flags & MMC_DATA_READ)
direction = DMA_FROM_DEVICE;
@@ -648,6 +750,7 @@ static u8 sdhci_calc_timeout(struct sdhci_host *host, struct mmc_command *cmd)
u8 count;
struct mmc_data *data = cmd->data;
unsigned target_timeout, current_timeout;
+ u32 curr_clk = 0; /* In KHz */
/*
* If the host controller provides us with an incorrect timeout
@@ -693,7 +796,14 @@ static u8 sdhci_calc_timeout(struct sdhci_host *host, struct mmc_command *cmd)
* (1) / (2) > 2^6
*/
count = 0;
- current_timeout = (1 << 13) * 1000 / host->timeout_clk;
+ if (host->quirks2 & SDHCI_QUIRK2_ALWAYS_USE_BASE_CLOCK) {
+ curr_clk = host->clock / 1000;
+ if (host->quirks2 & SDHCI_QUIRK2_DIVIDE_TOUT_BY_4)
+ curr_clk /= 4;
+ current_timeout = (1 << 13) * 1000 / curr_clk;
+ } else {
+ current_timeout = (1 << 13) * 1000 / host->timeout_clk;
+ }
while (current_timeout < target_timeout) {
count++;
current_timeout <<= 1;
@@ -701,10 +811,12 @@ static u8 sdhci_calc_timeout(struct sdhci_host *host, struct mmc_command *cmd)
break;
}
- if (count >= 0xF) {
- DBG("%s: Too large timeout 0x%x requested for CMD%d!\n",
- mmc_hostname(host->mmc), count, cmd->opcode);
- count = 0xE;
+ if (!(host->quirks2 & SDHCI_QUIRK2_USE_RESERVED_MAX_TIMEOUT)) {
+ if (count >= 0xF) {
+ DBG("%s: Too large timeout 0x%x requested for CMD%d!\n",
+ mmc_hostname(host->mmc), count, cmd->opcode);
+ count = 0xE;
+ }
}
return count;
@@ -736,6 +848,17 @@ static void sdhci_set_timeout(struct sdhci_host *host, struct mmc_command *cmd)
}
}
+static void sdhci_set_blk_size_reg(struct sdhci_host *host, unsigned int blksz,
+ unsigned int sdma_boundary)
+{
+ if (host->flags & SDHCI_USE_ADMA)
+ sdhci_writew(host, SDHCI_MAKE_BLKSZ(0, blksz),
+ SDHCI_BLOCK_SIZE);
+ else
+ sdhci_writew(host, SDHCI_MAKE_BLKSZ(sdma_boundary, blksz),
+ SDHCI_BLOCK_SIZE);
+}
+
static void sdhci_prepare_data(struct sdhci_host *host, struct mmc_command *cmd)
{
u8 ctrl;
@@ -751,7 +874,7 @@ static void sdhci_prepare_data(struct sdhci_host *host, struct mmc_command *cmd)
return;
/* Sanity checks */
- BUG_ON(data->blksz * data->blocks > 524288);
+ BUG_ON(data->blksz * data->blocks > host->mmc->max_req_size);
BUG_ON(data->blksz > host->mmc->max_blk_size);
BUG_ON(data->blocks > 65535);
@@ -762,6 +885,10 @@ static void sdhci_prepare_data(struct sdhci_host *host, struct mmc_command *cmd)
if (host->flags & (SDHCI_USE_SDMA | SDHCI_USE_ADMA))
host->flags |= SDHCI_REQ_USE_DMA;
+ if ((host->quirks2 & SDHCI_QUIRK2_USE_PIO_FOR_EMMC_TUNING) &&
+ cmd->opcode == MMC_SEND_TUNING_BLOCK_HS200)
+ host->flags &= ~SDHCI_REQ_USE_DMA;
+
/*
* FIXME: This doesn't account for merging when mapping the
* scatterlist.
@@ -828,6 +955,7 @@ static void sdhci_prepare_data(struct sdhci_host *host, struct mmc_command *cmd)
if (host->flags & SDHCI_REQ_USE_DMA) {
if (host->flags & SDHCI_USE_ADMA) {
+ trace_mmc_adma_table_pre(cmd->opcode, data->sg_len);
ret = sdhci_adma_table_pre(host, data);
if (ret) {
/*
@@ -898,9 +1026,13 @@ static void sdhci_prepare_data(struct sdhci_host *host, struct mmc_command *cmd)
sdhci_set_transfer_irqs(host);
/* Set the DMA boundary value and block size */
- sdhci_writew(host, SDHCI_MAKE_BLKSZ(SDHCI_DEFAULT_BOUNDARY_ARG,
- data->blksz), SDHCI_BLOCK_SIZE);
+ sdhci_set_blk_size_reg(host, data->blksz, SDHCI_DEFAULT_BOUNDARY_ARG);
sdhci_writew(host, data->blocks, SDHCI_BLOCK_COUNT);
+ MMC_TRACE(host->mmc,
+ "%s: 0x28=0x%08x 0x3E=0x%08x 0x06=0x%08x\n", __func__,
+ sdhci_readb(host, SDHCI_HOST_CONTROL),
+ sdhci_readw(host, SDHCI_HOST_CONTROL2),
+ sdhci_readw(host, SDHCI_BLOCK_COUNT));
}
static void sdhci_set_transfer_mode(struct sdhci_host *host,
@@ -942,12 +1074,26 @@ static void sdhci_set_transfer_mode(struct sdhci_host *host,
}
}
- if (data->flags & MMC_DATA_READ)
+ if (data->flags & MMC_DATA_READ) {
mode |= SDHCI_TRNS_READ;
+ if (host->ops->toggle_cdr) {
+ if ((cmd->opcode == MMC_SEND_TUNING_BLOCK_HS200) ||
+ (cmd->opcode == MMC_SEND_TUNING_BLOCK_HS400) ||
+ (cmd->opcode == MMC_SEND_TUNING_BLOCK))
+ host->ops->toggle_cdr(host, false);
+ else
+ host->ops->toggle_cdr(host, true);
+ }
+ }
+ if (host->ops->toggle_cdr && (data->flags & MMC_DATA_WRITE))
+ host->ops->toggle_cdr(host, false);
if (host->flags & SDHCI_REQ_USE_DMA)
mode |= SDHCI_TRNS_DMA;
sdhci_writew(host, mode, SDHCI_TRANSFER_MODE);
+ MMC_TRACE(host->mmc, "%s: 0x00=0x%08x 0x0C=0x%08x\n", __func__,
+ sdhci_readw(host, SDHCI_ARGUMENT2),
+ sdhci_readw(host, SDHCI_TRANSFER_MODE));
}
static void sdhci_finish_data(struct sdhci_host *host)
@@ -959,6 +1105,8 @@ static void sdhci_finish_data(struct sdhci_host *host)
data = host->data;
host->data = NULL;
+ MMC_TRACE(host->mmc, "%s: 0x24=0x%08x\n", __func__,
+ sdhci_readl(host, SDHCI_PRESENT_STATE));
if (host->flags & SDHCI_REQ_USE_DMA) {
if (host->flags & SDHCI_USE_ADMA)
sdhci_adma_table_post(host, data);
@@ -1017,7 +1165,7 @@ void sdhci_send_command(struct sdhci_host *host, struct mmc_command *cmd)
WARN_ON(host->cmd);
/* Wait max 10 ms */
- timeout = 10;
+ timeout = 10000;
mask = SDHCI_CMD_INHIBIT;
if ((cmd->data != NULL) || (cmd->flags & MMC_RSP_BUSY))
@@ -1032,13 +1180,16 @@ void sdhci_send_command(struct sdhci_host *host, struct mmc_command *cmd)
if (timeout == 0) {
pr_err("%s: Controller never released "
"inhibit bit(s).\n", mmc_hostname(host->mmc));
+ MMC_TRACE(host->mmc,
+ "%s :Controller never released inhibit bit(s)\n",
+ __func__);
sdhci_dumpregs(host);
cmd->error = -EIO;
tasklet_schedule(&host->finish_tasklet);
return;
}
timeout--;
- mdelay(1);
+ udelay(1);
}
timeout = jiffies;
@@ -1084,7 +1235,15 @@ void sdhci_send_command(struct sdhci_host *host, struct mmc_command *cmd)
cmd->opcode == MMC_SEND_TUNING_BLOCK_HS200)
flags |= SDHCI_CMD_DATA;
+ if (cmd->data)
+ host->data_start_time = ktime_get();
+ trace_mmc_cmd_rw_start(cmd->opcode, cmd->arg, cmd->flags);
sdhci_writew(host, SDHCI_MAKE_CMD(cmd->opcode, flags), SDHCI_COMMAND);
+ MMC_TRACE(host->mmc,
+ "%s: updated 0x8=0x%08x 0xC=0x%08x 0xE=0x%08x\n", __func__,
+ sdhci_readl(host, SDHCI_ARGUMENT),
+ sdhci_readw(host, SDHCI_TRANSFER_MODE),
+ sdhci_readw(host, SDHCI_COMMAND));
}
EXPORT_SYMBOL_GPL(sdhci_send_command);
@@ -1105,15 +1264,20 @@ static void sdhci_finish_command(struct sdhci_host *host)
sdhci_readb(host,
SDHCI_RESPONSE + (3-i)*4-1);
}
+ MMC_TRACE(host->mmc,
+ "%s: resp 0: 0x%08x resp 1: 0x%08x resp 2: 0x%08x resp 3: 0x%08x\n",
+ __func__, host->cmd->resp[0], host->cmd->resp[1],
+ host->cmd->resp[2], host->cmd->resp[3]);
} else {
host->cmd->resp[0] = sdhci_readl(host, SDHCI_RESPONSE);
+ MMC_TRACE(host->mmc, "%s: resp 0: 0x%08x\n",
+ __func__, host->cmd->resp[0]);
}
}
- host->cmd->error = 0;
-
/* Finished CMD23, now send actual command. */
if (host->cmd == host->mrq->sbc) {
+ host->cmd->error = 0;
host->cmd = NULL;
sdhci_send_command(host, host->mrq->cmd);
} else {
@@ -1173,7 +1337,8 @@ void sdhci_set_clock(struct sdhci_host *host, unsigned int clock)
host->mmc->actual_clock = 0;
- sdhci_writew(host, 0, SDHCI_CLOCK_CONTROL);
+ if (host->clock)
+ sdhci_writew(host, 0, SDHCI_CLOCK_CONTROL);
if (host->quirks2 & SDHCI_QUIRK2_NEED_DELAY_AFTER_INT_CLK_RST)
mdelay(1);
@@ -1257,6 +1422,10 @@ void sdhci_set_clock(struct sdhci_host *host, unsigned int clock)
clock_set:
if (real_div)
host->mmc->actual_clock = (host->max_clk * clk_mul) / real_div;
+
+ if (host->quirks2 & SDHCI_QUIRK2_ALWAYS_USE_BASE_CLOCK)
+ div = 0;
+
clk |= (div & SDHCI_DIV_MASK) << SDHCI_DIVIDER_SHIFT;
clk |= ((div & SDHCI_DIV_HI_MASK) >> SDHCI_DIV_MASK_LEN)
<< SDHCI_DIVIDER_HI_SHIFT;
@@ -1264,19 +1433,19 @@ clock_set:
sdhci_writew(host, clk, SDHCI_CLOCK_CONTROL);
/* Wait max 20 ms */
- timeout = 20;
+ timeout = 20000;
while (!((clk = sdhci_readw(host, SDHCI_CLOCK_CONTROL))
& SDHCI_CLOCK_INT_STABLE)) {
if (timeout == 0) {
pr_err("%s: Internal clock never "
"stabilised.\n", mmc_hostname(host->mmc));
+ MMC_TRACE(host->mmc,
+ "%s: Internal clock never stabilised.\n", __func__);
sdhci_dumpregs(host);
return;
}
timeout--;
- spin_unlock_irq(&host->lock);
- usleep_range(900, 1100);
- spin_lock_irq(&host->lock);
+ udelay(1);
}
clk |= SDHCI_CLOCK_CARD_EN;
@@ -1330,6 +1499,8 @@ static void sdhci_set_power(struct sdhci_host *host, unsigned char mode,
if (pwr == 0) {
sdhci_writeb(host, 0, SDHCI_POWER_CONTROL);
+ if (host->ops->check_power_status)
+ host->ops->check_power_status(host, REQ_BUS_OFF);
if (host->quirks2 & SDHCI_QUIRK2_CARD_ON_NEEDS_BUS_ON)
sdhci_runtime_pm_bus_off(host);
vdd = 0;
@@ -1338,20 +1509,27 @@ static void sdhci_set_power(struct sdhci_host *host, unsigned char mode,
* Spec says that we should clear the power reg before setting
* a new value. Some controllers don't seem to like this though.
*/
- if (!(host->quirks & SDHCI_QUIRK_SINGLE_POWER_WRITE))
+ if (!(host->quirks & SDHCI_QUIRK_SINGLE_POWER_WRITE)) {
sdhci_writeb(host, 0, SDHCI_POWER_CONTROL);
-
+ if (host->ops->check_power_status)
+ host->ops->check_power_status(host, REQ_BUS_OFF);
+ }
/*
* At least the Marvell CaFe chip gets confused if we set the
* voltage and set turn on power at the same time, so set the
* voltage first.
*/
- if (host->quirks & SDHCI_QUIRK_NO_SIMULT_VDD_AND_POWER)
+ if (host->quirks & SDHCI_QUIRK_NO_SIMULT_VDD_AND_POWER) {
sdhci_writeb(host, pwr, SDHCI_POWER_CONTROL);
+ if (host->ops->check_power_status)
+ host->ops->check_power_status(host, REQ_BUS_ON);
+ }
pwr |= SDHCI_POWER_ON;
sdhci_writeb(host, pwr, SDHCI_POWER_CONTROL);
+ if (host->ops->check_power_status)
+ host->ops->check_power_status(host, REQ_BUS_ON);
if (host->quirks2 & SDHCI_QUIRK2_CARD_ON_NEEDS_BUS_ON)
sdhci_runtime_pm_bus_on(host);
@@ -1371,6 +1549,148 @@ static void sdhci_set_power(struct sdhci_host *host, unsigned char mode,
* *
\*****************************************************************************/
+static int sdhci_enable(struct mmc_host *mmc)
+{
+ struct sdhci_host *host = mmc_priv(mmc);
+
+ if (host->ops->platform_bus_voting)
+ host->ops->platform_bus_voting(host, 1);
+
+ return 0;
+}
+
+static int sdhci_disable(struct mmc_host *mmc)
+{
+ struct sdhci_host *host = mmc_priv(mmc);
+
+ if (host->ops->platform_bus_voting)
+ host->ops->platform_bus_voting(host, 0);
+
+ return 0;
+}
+
+static void sdhci_notify_halt(struct mmc_host *mmc, bool halt)
+{
+ struct sdhci_host *host = mmc_priv(mmc);
+
+ pr_debug("%s: halt notification was sent, halt=%d\n",
+ mmc_hostname(mmc), halt);
+ if (host->flags & SDHCI_USE_64_BIT_DMA) {
+ if (halt)
+ host->desc_sz = 16;
+ else
+ host->desc_sz = SDHCI_ADMA2_64_DESC_SZ;
+ }
+}
+
+static inline void sdhci_update_power_policy(struct sdhci_host *host,
+ enum sdhci_power_policy policy)
+{
+ host->power_policy = policy;
+}
+
+static int sdhci_notify_load(struct mmc_host *mmc, enum mmc_load state)
+{
+ int err = 0;
+ struct sdhci_host *host = mmc_priv(mmc);
+
+ switch (state) {
+ case MMC_LOAD_HIGH:
+ sdhci_update_power_policy(host, SDHCI_PERFORMANCE_MODE);
+ break;
+ case MMC_LOAD_LOW:
+ sdhci_update_power_policy(host, SDHCI_POWER_SAVE_MODE);
+ break;
+ default:
+ err = -EINVAL;
+ break;
+ }
+
+ if (host->ops->notify_load)
+ err = host->ops->notify_load(host, state);
+
+ return err;
+}
+
+static bool sdhci_check_state(struct sdhci_host *host)
+{
+ if (!host->clock || !host->pwr)
+ return true;
+ else
+ return false;
+}
+
+static bool sdhci_check_auto_tuning(struct sdhci_host *host,
+ struct mmc_command *cmd)
+{
+ if (((cmd->opcode != MMC_READ_SINGLE_BLOCK) &&
+ (cmd->opcode != MMC_READ_MULTIPLE_BLOCK) &&
+ (cmd->opcode != SD_IO_RW_EXTENDED)) || (host->clock < 100000000))
+ return false;
+ else if (host->mmc->ios.timing == MMC_TIMING_MMC_HS200 ||
+ host->mmc->ios.timing == MMC_TIMING_UHS_SDR104)
+ return true;
+ else
+ return false;
+}
+
+static int sdhci_get_tuning_cmd(struct sdhci_host *host)
+{
+ if (!host->mmc || !host->mmc->card)
+ return 0;
+ /*
+ * If we are here, all conditions have already been true
+ * and the card can either be an eMMC or SD/SDIO
+ */
+ if (mmc_card_mmc(host->mmc->card))
+ return MMC_SEND_TUNING_BLOCK_HS200;
+ else
+ return MMC_SEND_TUNING_BLOCK;
+}
+
+static int sdhci_crypto_cfg(struct sdhci_host *host, struct mmc_request *mrq,
+ u32 slot)
+{
+ int err = 0;
+
+ if (host->crypto_reset_reqd && host->ops->crypto_engine_reset) {
+ err = host->ops->crypto_engine_reset(host);
+ if (err) {
+ pr_err("%s: crypto reset failed\n",
+ mmc_hostname(host->mmc));
+ goto out;
+ }
+ host->crypto_reset_reqd = false;
+ }
+
+ if (host->ops->crypto_engine_cfg) {
+ err = host->ops->crypto_engine_cfg(host, mrq, slot);
+ if (err) {
+ pr_err("%s: failed to configure crypto\n",
+ mmc_hostname(host->mmc));
+ goto out;
+ }
+ }
+out:
+ return err;
+}
+
+static int sdhci_crypto_cfg_end(struct sdhci_host *host,
+ struct mmc_request *mrq)
+{
+ int err = 0;
+
+ if (host->ops->crypto_engine_cfg_end) {
+ err = host->ops->crypto_engine_cfg_end(host, mrq);
+ if (err) {
+ pr_err("%s: failed to configure crypto\n",
+ mmc_hostname(host->mmc));
+ return err;
+ }
+ }
+ return 0;
+}
+
static void sdhci_request(struct mmc_host *mmc, struct mmc_request *mrq)
{
struct sdhci_host *host;
@@ -1380,16 +1700,41 @@ static void sdhci_request(struct mmc_host *mmc, struct mmc_request *mrq)
host = mmc_priv(mmc);
sdhci_runtime_pm_get(host);
+ if (sdhci_check_state(host)) {
+ sdhci_dump_state(host);
+ WARN(1, "sdhci in bad state");
+ mrq->cmd->error = -EIO;
+ if (mrq->data)
+ mrq->data->error = -EIO;
+ mmc_request_done(host->mmc, mrq);
+ sdhci_runtime_pm_put(host);
+ return;
+ }
- /* Firstly check card presence */
- present = mmc->ops->get_cd(mmc);
+ /*
+ * Firstly check card presence from cd-gpio. The return could
+ * be one of the following possibilities:
+ * negative: cd-gpio is not available
+ * zero: cd-gpio is used, and card is removed
+ * one: cd-gpio is used, and card is present
+ */
+ present = sdhci_do_get_cd(host);
+ if (present < 0) {
+ /* If polling, assume that the card is always present. */
+ if (host->quirks & SDHCI_QUIRK_BROKEN_CARD_DETECTION)
+ present = 1;
+ else
+ present = sdhci_readl(host, SDHCI_PRESENT_STATE) &
+ SDHCI_CARD_PRESENT;
+ }
spin_lock_irqsave(&host->lock, flags);
WARN_ON(host->mrq != NULL);
#ifndef SDHCI_USE_LEDS_CLASS
- sdhci_activate_led(host);
+ if (!(host->quirks2 & SDHCI_QUIRK2_BROKEN_LED_CONTROL))
+ sdhci_activate_led(host);
#endif
/*
@@ -1409,6 +1754,22 @@ static void sdhci_request(struct mmc_host *mmc, struct mmc_request *mrq)
host->mrq->cmd->error = -ENOMEDIUM;
tasklet_schedule(&host->finish_tasklet);
} else {
+ if (host->ops->config_auto_tuning_cmd) {
+ if (sdhci_check_auto_tuning(host, mrq->cmd))
+ host->ops->config_auto_tuning_cmd(host, true,
+ sdhci_get_tuning_cmd(host));
+ else
+ host->ops->config_auto_tuning_cmd(host, false,
+ sdhci_get_tuning_cmd(host));
+ }
+
+ if (host->is_crypto_en) {
+ spin_unlock_irqrestore(&host->lock, flags);
+ if (sdhci_crypto_cfg(host, mrq, 0))
+ goto end_req;
+ spin_lock_irqsave(&host->lock, flags);
+ }
+
if (mrq->sbc && !(host->flags & SDHCI_AUTO_CMD23))
sdhci_send_command(host, mrq->sbc);
else
@@ -1417,6 +1778,16 @@ static void sdhci_request(struct mmc_host *mmc, struct mmc_request *mrq)
mmiowb();
spin_unlock_irqrestore(&host->lock, flags);
+ return;
+end_req:
+ mrq->cmd->error = -EIO;
+ if (mrq->data)
+ mrq->data->error = -EIO;
+ host->mrq = NULL;
+ MMC_TRACE(host->mmc, "Request failed due to ice config\n");
+ sdhci_dumpregs(host);
+ mmc_request_done(host->mmc, mrq);
+ sdhci_runtime_pm_put(host);
}
void sdhci_set_bus_width(struct sdhci_host *host, int width)
@@ -1465,38 +1836,50 @@ void sdhci_set_uhs_signaling(struct sdhci_host *host, unsigned timing)
}
EXPORT_SYMBOL_GPL(sdhci_set_uhs_signaling);
+void sdhci_cfg_irq(struct sdhci_host *host, bool enable, bool sync)
+{
+ if (enable && !(host->flags & SDHCI_HOST_IRQ_STATUS)) {
+ enable_irq(host->irq);
+ host->flags |= SDHCI_HOST_IRQ_STATUS;
+ } else if (!enable && (host->flags & SDHCI_HOST_IRQ_STATUS)) {
+ if (sync)
+ disable_irq(host->irq);
+ else
+ disable_irq_nosync(host->irq);
+ host->flags &= ~SDHCI_HOST_IRQ_STATUS;
+ }
+}
+EXPORT_SYMBOL(sdhci_cfg_irq);
+
static void sdhci_do_set_ios(struct sdhci_host *host, struct mmc_ios *ios)
{
unsigned long flags;
u8 ctrl;
struct mmc_host *mmc = host->mmc;
-
- spin_lock_irqsave(&host->lock, flags);
+ int ret;
if (host->flags & SDHCI_DEVICE_DEAD) {
- spin_unlock_irqrestore(&host->lock, flags);
if (!IS_ERR(mmc->supply.vmmc) &&
ios->power_mode == MMC_POWER_OFF)
mmc_regulator_set_ocr(mmc, mmc->supply.vmmc, 0);
return;
}
- /*
- * Reset the chip on each power off.
- * Should clear out any weird states.
- */
- if (ios->power_mode == MMC_POWER_OFF) {
- sdhci_writel(host, 0, SDHCI_SIGNAL_ENABLE);
- sdhci_reinit(host);
- }
-
if (host->version >= SDHCI_SPEC_300 &&
(ios->power_mode == MMC_POWER_UP) &&
!(host->quirks2 & SDHCI_QUIRK2_PRESET_VALUE_BROKEN))
sdhci_enable_preset_value(host, false);
- if (!ios->clock || ios->clock != host->clock) {
+ spin_lock_irqsave(&host->lock, flags);
+ if (host->mmc && host->mmc->card &&
+ mmc_card_sdio(host->mmc->card))
+ sdhci_cfg_irq(host, false, false);
+
+ if (ios->clock &&
+ ((ios->clock != host->clock) || (ios->timing != host->timing))) {
+ spin_unlock_irqrestore(&host->lock, flags);
host->ops->set_clock(host, ios->clock);
+ spin_lock_irqsave(&host->lock, flags);
host->clock = ios->clock;
if (host->quirks & SDHCI_QUIRK_DATA_TIMEOUT_USES_SDCLK &&
@@ -1511,8 +1894,45 @@ static void sdhci_do_set_ios(struct sdhci_host *host, struct mmc_ios *ios)
host->mmc->max_busy_timeout /= host->timeout_clk;
}
}
+ if (ios->clock && host->sdio_irq_async_status)
+ sdhci_enable_sdio_irq_nolock(host, false);
+ spin_unlock_irqrestore(&host->lock, flags);
- sdhci_set_power(host, ios->power_mode, ios->vdd);
+ /*
+ * The controller clocks may be off during power-up and we may end up
+ * enabling card clock before giving power to the card. Hence, during
+ * MMC_POWER_UP enable the controller clock and turn-on the regulators.
+ * The mmc_power_up would provide the necessary delay before turning on
+ * the clocks to the card.
+ */
+ if (ios->power_mode & MMC_POWER_UP) {
+ if (host->ops->enable_controller_clock) {
+ ret = host->ops->enable_controller_clock(host);
+ if (ret) {
+ pr_err("%s: enabling controller clock: failed: %d\n",
+ mmc_hostname(host->mmc), ret);
+ } else {
+ sdhci_set_power(host, ios->power_mode, ios->vdd);
+ }
+ }
+ }
+
+ spin_lock_irqsave(&host->lock, flags);
+ if (!host->clock) {
+ if (host->mmc && host->mmc->card &&
+ mmc_card_sdio(host->mmc->card))
+ sdhci_cfg_irq(host, true, false);
+ spin_unlock_irqrestore(&host->lock, flags);
+ return;
+ }
+ spin_unlock_irqrestore(&host->lock, flags);
+
+ if (!host->ops->enable_controller_clock && (ios->power_mode &
+ (MMC_POWER_UP |
+ MMC_POWER_ON)))
+ sdhci_set_power(host, ios->power_mode, ios->vdd);
+
+ spin_lock_irqsave(&host->lock, flags);
if (host->ops->platform_send_init_74_clocks)
host->ops->platform_send_init_74_clocks(host, ios->power_mode);
@@ -1580,7 +2000,11 @@ static void sdhci_do_set_ios(struct sdhci_host *host, struct mmc_ios *ios)
sdhci_writeb(host, ctrl, SDHCI_HOST_CONTROL);
/* Re-enable SD Clock */
- host->ops->set_clock(host, host->clock);
+ if (ios->clock) {
+ spin_unlock_irqrestore(&host->lock, flags);
+ host->ops->set_clock(host, host->clock);
+ spin_lock_irqsave(&host->lock, flags);
+ }
}
/* Reset SD Clock Enable */
@@ -1607,10 +2031,15 @@ static void sdhci_do_set_ios(struct sdhci_host *host, struct mmc_ios *ios)
}
/* Re-enable SD Clock */
- host->ops->set_clock(host, host->clock);
+ if (ios->clock) {
+ spin_unlock_irqrestore(&host->lock, flags);
+ host->ops->set_clock(host, host->clock);
+ spin_lock_irqsave(&host->lock, flags);
+ }
} else
sdhci_writeb(host, ctrl, SDHCI_HOST_CONTROL);
+ spin_unlock_irqrestore(&host->lock, flags);
/*
* Some (ENE) controllers go apeshit on some ios operation,
* signalling timeout and CRC errors even on CMD0. Resetting
@@ -1619,8 +2048,25 @@ static void sdhci_do_set_ios(struct sdhci_host *host, struct mmc_ios *ios)
if (host->quirks & SDHCI_QUIRK_RESET_CMD_DATA_ON_IOS)
sdhci_do_reset(host, SDHCI_RESET_CMD | SDHCI_RESET_DATA);
- mmiowb();
+ /*
+ * Reset the chip on each power off.
+ * Should clear out any weird states.
+ */
+ if (ios->power_mode == MMC_POWER_OFF) {
+ sdhci_writel(host, 0, SDHCI_SIGNAL_ENABLE);
+ sdhci_reinit(host);
+ sdhci_set_power(host, ios->power_mode, ios->vdd);
+ }
+ if (!ios->clock)
+ host->ops->set_clock(host, ios->clock);
+
+ spin_lock_irqsave(&host->lock, flags);
+ if (host->mmc && host->mmc->card &&
+ mmc_card_sdio(host->mmc->card))
+ sdhci_cfg_irq(host, true, false);
spin_unlock_irqrestore(&host->lock, flags);
+
+ mmiowb();
}
static void sdhci_set_ios(struct mmc_host *mmc, struct mmc_ios *ios)
@@ -1732,16 +2178,28 @@ static int sdhci_get_ro(struct mmc_host *mmc)
static void sdhci_enable_sdio_irq_nolock(struct sdhci_host *host, int enable)
{
- if (!(host->flags & SDHCI_DEVICE_DEAD)) {
+ u16 ctrl = 0;
+
+ if (host->flags & SDHCI_DEVICE_DEAD)
+ return;
+
+ if (mmc_card_and_host_support_async_int(host->mmc)) {
+ ctrl = sdhci_readw(host, SDHCI_HOST_CONTROL2);
if (enable)
- host->ier |= SDHCI_INT_CARD_INT;
+ ctrl |= SDHCI_CTRL_ASYNC_INT_ENABLE;
else
- host->ier &= ~SDHCI_INT_CARD_INT;
-
- sdhci_writel(host, host->ier, SDHCI_INT_ENABLE);
- sdhci_writel(host, host->ier, SDHCI_SIGNAL_ENABLE);
- mmiowb();
+ ctrl &= ~SDHCI_CTRL_ASYNC_INT_ENABLE;
+ sdhci_writew(host, ctrl, SDHCI_HOST_CONTROL2);
}
+
+ if (enable)
+ host->ier |= SDHCI_INT_CARD_INT;
+ else
+ host->ier &= ~SDHCI_INT_CARD_INT;
+
+ sdhci_writel(host, host->ier, SDHCI_INT_ENABLE);
+ sdhci_writel(host, host->ier, SDHCI_SIGNAL_ENABLE);
+ mmiowb();
}
static void sdhci_enable_sdio_irq(struct mmc_host *mmc, int enable)
@@ -1784,6 +2242,8 @@ static int sdhci_do_start_signal_voltage_switch(struct sdhci_host *host,
/* Set 1.8V Signal Enable in the Host Control2 register to 0 */
ctrl &= ~SDHCI_CTRL_VDD_180;
sdhci_writew(host, ctrl, SDHCI_HOST_CONTROL2);
+ if (host->ops->check_power_status)
+ host->ops->check_power_status(host, REQ_IO_HIGH);
if (!IS_ERR(mmc->supply.vqmmc)) {
ret = regulator_set_voltage(mmc->supply.vqmmc, 2700000,
@@ -1823,6 +2283,8 @@ static int sdhci_do_start_signal_voltage_switch(struct sdhci_host *host,
*/
ctrl |= SDHCI_CTRL_VDD_180;
sdhci_writew(host, ctrl, SDHCI_HOST_CONTROL2);
+ if (host->ops->check_power_status)
+ host->ops->check_power_status(host, REQ_IO_LOW);
/* Some controller need to do more when switching */
if (host->ops->voltage_switch)
@@ -1893,6 +2355,19 @@ static int sdhci_prepare_hs400_tuning(struct mmc_host *mmc, struct mmc_ios *ios)
return 0;
}
+static int sdhci_enhanced_strobe(struct mmc_host *mmc)
+{
+ struct sdhci_host *host = mmc_priv(mmc);
+ int err = 0;
+
+ sdhci_runtime_pm_get(host);
+ if (host->ops->enhanced_strobe)
+ err = host->ops->enhanced_strobe(host);
+ sdhci_runtime_pm_put(host);
+
+ return err;
+}
+
static int sdhci_execute_tuning(struct mmc_host *mmc, u32 opcode)
{
struct sdhci_host *host = mmc_priv(mmc);
@@ -1922,9 +2397,10 @@ static int sdhci_execute_tuning(struct mmc_host *mmc, u32 opcode)
switch (host->timing) {
/* HS400 tuning is done in HS200 mode */
case MMC_TIMING_MMC_HS400:
- err = -EINVAL;
- goto out_unlock;
-
+ if (!(mmc->caps2 & MMC_CAP2_HS400_POST_TUNING)) {
+ err = -EINVAL;
+ goto out_unlock;
+ }
case MMC_TIMING_MMC_HS200:
/*
* Periodic re-tuning for HS400 is not expected to be needed, so
@@ -1950,7 +2426,13 @@ static int sdhci_execute_tuning(struct mmc_host *mmc, u32 opcode)
if (host->ops->platform_execute_tuning) {
spin_unlock_irqrestore(&host->lock, flags);
+ /*
+ * Make sure re-tuning won't get triggered for the CRC errors
+ * occurred while executing tuning
+ */
+ mmc_retune_disable(mmc);
err = host->ops->platform_execute_tuning(host, opcode);
+ mmc_retune_enable(mmc);
sdhci_runtime_pm_put(host);
return err;
}
@@ -2002,14 +2484,11 @@ static int sdhci_execute_tuning(struct mmc_host *mmc, u32 opcode)
*/
if (cmd.opcode == MMC_SEND_TUNING_BLOCK_HS200) {
if (mmc->ios.bus_width == MMC_BUS_WIDTH_8)
- sdhci_writew(host, SDHCI_MAKE_BLKSZ(7, 128),
- SDHCI_BLOCK_SIZE);
+ sdhci_set_blk_size_reg(host, 128, 7);
else if (mmc->ios.bus_width == MMC_BUS_WIDTH_4)
- sdhci_writew(host, SDHCI_MAKE_BLKSZ(7, 64),
- SDHCI_BLOCK_SIZE);
+ sdhci_set_blk_size_reg(host, 64, 7);
} else {
- sdhci_writew(host, SDHCI_MAKE_BLKSZ(7, 64),
- SDHCI_BLOCK_SIZE);
+ sdhci_set_blk_size_reg(host, 64, 7);
}
/*
@@ -2132,6 +2611,9 @@ static void sdhci_enable_preset_value(struct sdhci_host *host, bool enable)
if (host->version < SDHCI_SPEC_300)
return;
+ if (host->quirks2 & SDHCI_QUIRK2_BROKEN_PRESET_VALUE)
+ return;
+
/*
* We only enable or disable Preset Value if they are not already
* enabled or disabled respectively. Otherwise, we bail out.
@@ -2169,6 +2651,8 @@ static void sdhci_post_req(struct mmc_host *mmc, struct mmc_request *mrq,
DMA_TO_DEVICE : DMA_FROM_DEVICE);
data->host_cookie = COOKIE_UNMAPPED;
}
+ if (host->ops->post_req)
+ host->ops->post_req(host, mrq);
}
static int sdhci_pre_dma_transfer(struct sdhci_host *host,
@@ -2205,6 +2689,9 @@ static void sdhci_pre_req(struct mmc_host *mmc, struct mmc_request *mrq,
if (host->flags & SDHCI_REQ_USE_DMA)
sdhci_pre_dma_transfer(host, mrq->data);
+
+ if (host->ops->pre_req)
+ host->ops->pre_req(host, mrq);
}
static void sdhci_card_event(struct mmc_host *mmc)
@@ -2238,7 +2725,29 @@ static void sdhci_card_event(struct mmc_host *mmc)
spin_unlock_irqrestore(&host->lock, flags);
}
+static int sdhci_late_init(struct mmc_host *mmc)
+{
+ struct sdhci_host *host = mmc_priv(mmc);
+
+ if (host->ops->init)
+ host->ops->init(host);
+
+ return 0;
+}
+
+static void sdhci_force_err_irq(struct mmc_host *mmc, u64 errmask)
+{
+ struct sdhci_host *host = mmc_priv(mmc);
+ u16 mask = errmask & 0xFFFF;
+
+ pr_err("%s: Force raise error mask:0x%04x\n", __func__, mask);
+ sdhci_runtime_pm_get(host);
+ sdhci_writew(host, mask, SDHCI_SET_INT_ERROR);
+ sdhci_runtime_pm_put(host);
+}
+
static const struct mmc_host_ops sdhci_ops = {
+ .init = sdhci_late_init,
.request = sdhci_request,
.post_req = sdhci_post_req,
.pre_req = sdhci_pre_req,
@@ -2250,9 +2759,15 @@ static const struct mmc_host_ops sdhci_ops = {
.start_signal_voltage_switch = sdhci_start_signal_voltage_switch,
.prepare_hs400_tuning = sdhci_prepare_hs400_tuning,
.execute_tuning = sdhci_execute_tuning,
+ .enhanced_strobe = sdhci_enhanced_strobe,
.select_drive_strength = sdhci_select_drive_strength,
.card_event = sdhci_card_event,
.card_busy = sdhci_card_busy,
+ .enable = sdhci_enable,
+ .disable = sdhci_disable,
+ .notify_load = sdhci_notify_load,
+ .notify_halt = sdhci_notify_halt,
+ .force_err_irq = sdhci_force_err_irq,
};
/*****************************************************************************\
@@ -2304,19 +2819,25 @@ static void sdhci_tasklet_finish(unsigned long param)
controllers do not like that. */
sdhci_do_reset(host, SDHCI_RESET_CMD);
sdhci_do_reset(host, SDHCI_RESET_DATA);
+ } else {
+ if (host->quirks2 & SDHCI_QUIRK2_RDWR_TX_ACTIVE_EOT)
+ sdhci_reset(host, SDHCI_RESET_DATA);
}
host->mrq = NULL;
host->cmd = NULL;
host->data = NULL;
+ host->auto_cmd_err_sts = 0;
#ifndef SDHCI_USE_LEDS_CLASS
- sdhci_deactivate_led(host);
+ if (!(host->quirks2 & SDHCI_QUIRK2_BROKEN_LED_CONTROL))
+ sdhci_deactivate_led(host);
#endif
mmiowb();
spin_unlock_irqrestore(&host->lock, flags);
+ sdhci_crypto_cfg_end(host, mrq);
mmc_request_done(host->mmc, mrq);
sdhci_runtime_pm_put(host);
}
@@ -2333,9 +2854,15 @@ static void sdhci_timeout_timer(unsigned long data)
if (host->mrq) {
pr_err("%s: Timeout waiting for hardware "
"interrupt.\n", mmc_hostname(host->mmc));
+ MMC_TRACE(host->mmc, "Timeout waiting for h/w interrupt\n");
sdhci_dumpregs(host);
if (host->data) {
+ pr_info("%s: bytes to transfer: %d transferred: %d\n",
+ mmc_hostname(host->mmc),
+ (host->data->blksz * host->data->blocks),
+ (sdhci_readw(host, SDHCI_BLOCK_SIZE) & 0xFFF) *
+ sdhci_readw(host, SDHCI_BLOCK_COUNT));
host->data->error = -ETIMEDOUT;
sdhci_finish_data(host);
} else {
@@ -2360,23 +2887,63 @@ static void sdhci_timeout_timer(unsigned long data)
static void sdhci_cmd_irq(struct sdhci_host *host, u32 intmask, u32 *mask)
{
+ u16 auto_cmd_status;
BUG_ON(intmask == 0);
if (!host->cmd) {
pr_err("%s: Got command interrupt 0x%08x even "
"though no command operation was in progress.\n",
mmc_hostname(host->mmc), (unsigned)intmask);
+ MMC_TRACE(host->mmc,
+ "Got command interrupt 0x%08x even though no command operation was in progress.\n",
+ (unsigned)intmask);
sdhci_dumpregs(host);
return;
}
+ trace_mmc_cmd_rw_end(host->cmd->opcode, intmask,
+ sdhci_readl(host, SDHCI_RESPONSE));
+
if (intmask & SDHCI_INT_TIMEOUT)
host->cmd->error = -ETIMEDOUT;
else if (intmask & (SDHCI_INT_CRC | SDHCI_INT_END_BIT |
SDHCI_INT_INDEX))
host->cmd->error = -EILSEQ;
+ if (intmask & SDHCI_INT_AUTO_CMD_ERR) {
+ auto_cmd_status = host->auto_cmd_err_sts;
+ pr_err_ratelimited("%s: %s: AUTO CMD err sts 0x%08x\n",
+ mmc_hostname(host->mmc), __func__, auto_cmd_status);
+ if (auto_cmd_status & (SDHCI_AUTO_CMD12_NOT_EXEC |
+ SDHCI_AUTO_CMD_INDEX_ERR |
+ SDHCI_AUTO_CMD_ENDBIT_ERR))
+ host->cmd->error = -EIO;
+ else if (auto_cmd_status & SDHCI_AUTO_CMD_TIMEOUT_ERR)
+ host->cmd->error = -ETIMEDOUT;
+ else if (auto_cmd_status & SDHCI_AUTO_CMD_CRC_ERR)
+ host->cmd->error = -EILSEQ;
+ }
+
if (host->cmd->error) {
+ /*
+ * If this command initiates a data phase and a response
+ * CRC error is signalled, the card can start transferring
+ * data - the card may have received the command without
+ * error. We must not terminate the mmc_request early.
+ *
+ * If the card did not receive the command or returned an
+ * error which prevented it sending data, the data phase
+ * will time out.
+ *
+ * Even in case of cmd INDEX OR ENDBIT error we
+ * handle it the same way.
+ */
+ if (host->cmd->data &&
+ (((intmask & (SDHCI_INT_CRC | SDHCI_INT_TIMEOUT)) ==
+ SDHCI_INT_CRC) || (host->cmd->error == -EILSEQ))) {
+ host->cmd = NULL;
+ return;
+ }
tasklet_schedule(&host->finish_tasklet);
return;
}
@@ -2450,13 +3017,17 @@ static void sdhci_adma_show_error(struct sdhci_host *host) { }
static void sdhci_data_irq(struct sdhci_host *host, u32 intmask)
{
u32 command;
+ bool pr_msg = false;
BUG_ON(intmask == 0);
+ command = SDHCI_GET_CMD(sdhci_readw(host, SDHCI_COMMAND));
+ trace_mmc_data_rw_end(command, intmask);
+
/* CMD19 generates _only_ Buffer Read Ready interrupt */
if (intmask & SDHCI_INT_DATA_AVAIL) {
- command = SDHCI_GET_CMD(sdhci_readw(host, SDHCI_COMMAND));
- if (command == MMC_SEND_TUNING_BLOCK ||
- command == MMC_SEND_TUNING_BLOCK_HS200) {
+ if (!(host->quirks2 & SDHCI_QUIRK2_NON_STANDARD_TUNING) &&
+ (command == MMC_SEND_TUNING_BLOCK ||
+ command == MMC_SEND_TUNING_BLOCK_HS200)) {
host->tuning_done = 1;
wake_up(&host->buf_ready_int);
return;
@@ -2487,11 +3058,17 @@ static void sdhci_data_irq(struct sdhci_host *host, u32 intmask)
host->busy_handle = 1;
return;
}
+ if (host->quirks2 &
+ SDHCI_QUIRK2_IGNORE_DATATOUT_FOR_R1BCMD)
+ return;
}
pr_err("%s: Got data interrupt 0x%08x even "
"though no data operation was in progress.\n",
mmc_hostname(host->mmc), (unsigned)intmask);
+ MMC_TRACE(host->mmc,
+ "Got data interrupt 0x%08x even though no data operation was in progress.\n",
+ (unsigned)intmask);
sdhci_dumpregs(host);
return;
@@ -2502,8 +3079,7 @@ static void sdhci_data_irq(struct sdhci_host *host, u32 intmask)
else if (intmask & SDHCI_INT_DATA_END_BIT)
host->data->error = -EILSEQ;
else if ((intmask & SDHCI_INT_DATA_CRC) &&
- SDHCI_GET_CMD(sdhci_readw(host, SDHCI_COMMAND))
- != MMC_BUS_TEST_R)
+ (command != MMC_BUS_TEST_R))
host->data->error = -EILSEQ;
else if (intmask & SDHCI_INT_ADMA_ERROR) {
pr_err("%s: ADMA error\n", mmc_hostname(host->mmc));
@@ -2512,10 +3088,34 @@ static void sdhci_data_irq(struct sdhci_host *host, u32 intmask)
if (host->ops->adma_workaround)
host->ops->adma_workaround(host, intmask);
}
-
- if (host->data->error)
+ if (host->data->error) {
+ if (intmask & (SDHCI_INT_DATA_CRC | SDHCI_INT_DATA_TIMEOUT
+ | SDHCI_INT_DATA_END_BIT)) {
+ command = SDHCI_GET_CMD(sdhci_readw(host,
+ SDHCI_COMMAND));
+ if ((command != MMC_SEND_TUNING_BLOCK_HS200) &&
+ (command != MMC_SEND_TUNING_BLOCK))
+ pr_msg = true;
+ } else {
+ pr_msg = true;
+ }
+ if (pr_msg && __ratelimit(&host->dbg_dump_rs)) {
+ pr_err("%s: data txfr (0x%08x) error: %d after %lld ms\n",
+ mmc_hostname(host->mmc), intmask,
+ host->data->error, ktime_to_ms(ktime_sub(
+ ktime_get(), host->data_start_time)));
+ MMC_TRACE(host->mmc,
+ "data txfr (0x%08x) error: %d after %lld ms\n",
+ intmask, host->data->error,
+ ktime_to_ms(ktime_sub(ktime_get(),
+ host->data_start_time)));
+
+ if (!host->mmc->sdr104_wa ||
+ (host->mmc->ios.timing != MMC_TIMING_UHS_SDR104))
+ sdhci_dumpregs(host);
+ }
sdhci_finish_data(host);
- else {
+ } else {
if (intmask & (SDHCI_INT_DATA_AVAIL | SDHCI_INT_SPACE_AVAIL))
sdhci_transfer_pio(host);
@@ -2561,6 +3161,58 @@ static void sdhci_data_irq(struct sdhci_host *host, u32 intmask)
}
}
+#ifdef CONFIG_MMC_CQ_HCI
+static int sdhci_get_cmd_err(u32 intmask)
+{
+ if (intmask & SDHCI_INT_TIMEOUT)
+ return -ETIMEDOUT;
+ else if (intmask & (SDHCI_INT_CRC | SDHCI_INT_END_BIT |
+ SDHCI_INT_INDEX))
+ return -EILSEQ;
+ return 0;
+}
+
+static int sdhci_get_data_err(u32 intmask)
+{
+ if (intmask & SDHCI_INT_DATA_TIMEOUT)
+ return -ETIMEDOUT;
+ else if (intmask & (SDHCI_INT_DATA_END_BIT | SDHCI_INT_DATA_CRC))
+ return -EILSEQ;
+ else if (intmask & SDHCI_INT_ADMA_ERROR)
+ return -EIO;
+ return 0;
+}
+
+static irqreturn_t sdhci_cmdq_irq(struct sdhci_host *host, u32 intmask)
+{
+ int err = 0;
+ u32 mask = 0;
+ irqreturn_t ret;
+
+ if (intmask & SDHCI_INT_CMD_MASK)
+ err = sdhci_get_cmd_err(intmask);
+ else if (intmask & SDHCI_INT_DATA_MASK)
+ err = sdhci_get_data_err(intmask);
+
+ ret = cmdq_irq(host->mmc, err);
+ if (err) {
+ /* Clear the error interrupts */
+ mask = intmask & SDHCI_INT_ERROR_MASK;
+ sdhci_writel(host, mask, SDHCI_INT_STATUS);
+ }
+ return ret;
+
+}
+
+#else
+static irqreturn_t sdhci_cmdq_irq(struct sdhci_host *host, u32 intmask)
+{
+ pr_err("%s: Received cmdq-irq when disabled !!!!\n",
+ mmc_hostname(host->mmc));
+ return IRQ_NONE;
+}
+#endif
+
static irqreturn_t sdhci_irq(int irq, void *dev_id)
{
irqreturn_t result = IRQ_NONE;
@@ -2575,6 +3227,31 @@ static irqreturn_t sdhci_irq(int irq, void *dev_id)
return IRQ_NONE;
}
+ if (!host->clock && host->mmc->card &&
+ mmc_card_sdio(host->mmc->card)) {
+ if (!mmc_card_and_host_support_async_int(host->mmc)) {
+ spin_unlock(&host->lock);
+ return IRQ_NONE;
+ }
+ /*
+ * async card interrupt is level sensitive and received
+ * when clocks are off.
+ * If sdio card has asserted async interrupt, in that
+ * case we need to disable host->irq.
+ * Later we can disable card interrupt and re-enable
+ * host->irq.
+ */
+
+ pr_debug("%s: %s: sdio_async intr. received\n",
+ mmc_hostname(host->mmc), __func__);
+ sdhci_cfg_irq(host, false, false);
+ host->sdio_irq_async_status = true;
+ host->thread_isr |= SDHCI_INT_CARD_INT;
+ result = IRQ_WAKE_THREAD;
+ spin_unlock(&host->lock);
+ return result;
+ }
+
intmask = sdhci_readl(host, SDHCI_INT_STATUS);
if (!intmask || intmask == 0xffffffff) {
result = IRQ_NONE;
@@ -2582,6 +3259,22 @@ static irqreturn_t sdhci_irq(int irq, void *dev_id)
}
do {
+ if (host->mmc->card && mmc_card_cmdq(host->mmc->card) &&
+ !mmc_host_halt(host->mmc) && !mmc_host_cq_disable(host->mmc)) {
+ pr_debug("*** %s: cmdq intr: 0x%08x\n",
+ mmc_hostname(host->mmc),
+ intmask);
+ result = sdhci_cmdq_irq(host, intmask);
+ if (result == IRQ_HANDLED)
+ goto out;
+ }
+
+ MMC_TRACE(host->mmc,
+ "%s: intmask: 0x%x\n", __func__, intmask);
+
+ if (intmask & SDHCI_INT_AUTO_CMD_ERR)
+ host->auto_cmd_err_sts = sdhci_readw(host,
+ SDHCI_AUTO_CMD_ERR);
/* Clear selected interrupts. */
mask = intmask & (SDHCI_INT_CMD_MASK | SDHCI_INT_DATA_MASK |
SDHCI_INT_BUS_POWER);
@@ -2620,12 +3313,20 @@ static irqreturn_t sdhci_irq(int irq, void *dev_id)
result = IRQ_WAKE_THREAD;
}
- if (intmask & SDHCI_INT_CMD_MASK)
+ if (intmask & SDHCI_INT_CMD_MASK) {
+ if ((host->quirks2 & SDHCI_QUIRK2_SLOW_INT_CLR) &&
+ (host->clock <= 400000))
+ udelay(40);
sdhci_cmd_irq(host, intmask & SDHCI_INT_CMD_MASK,
&intmask);
+ }
- if (intmask & SDHCI_INT_DATA_MASK)
+ if (intmask & SDHCI_INT_DATA_MASK) {
+ if ((host->quirks2 & SDHCI_QUIRK2_SLOW_INT_CLR) &&
+ (host->clock <= 400000))
+ udelay(40);
sdhci_data_irq(host, intmask & SDHCI_INT_DATA_MASK);
+ }
if (intmask & SDHCI_INT_BUS_POWER)
pr_err("%s: Card is consuming too much power!\n",
@@ -2659,6 +3360,8 @@ out:
if (unexpected) {
pr_err("%s: Unexpected interrupt 0x%08x.\n",
mmc_hostname(host->mmc), unexpected);
+ MMC_TRACE(host->mmc, "Unexpected interrupt 0x%08x.\n",
+ unexpected);
sdhci_dumpregs(host);
}
@@ -2685,8 +3388,11 @@ static irqreturn_t sdhci_thread_irq(int irq, void *dev_id)
sdio_run_irqs(host->mmc);
spin_lock_irqsave(&host->lock, flags);
- if (host->flags & SDHCI_SDIO_IRQ_ENABLED)
+ if (host->flags & SDHCI_SDIO_IRQ_ENABLED) {
+ if (host->sdio_irq_async_status)
+ host->sdio_irq_async_status = false;
sdhci_enable_sdio_irq_nolock(host, true);
+ }
spin_unlock_irqrestore(&host->lock, flags);
}
@@ -2903,11 +3609,255 @@ struct sdhci_host *sdhci_alloc_host(struct device *dev,
host->mmc_host_ops = sdhci_ops;
mmc->ops = &host->mmc_host_ops;
+ spin_lock_init(&host->lock);
+ ratelimit_state_init(&host->dbg_dump_rs, SDHCI_DBG_DUMP_RS_INTERVAL,
+ SDHCI_DBG_DUMP_RS_BURST);
+
return host;
}
EXPORT_SYMBOL_GPL(sdhci_alloc_host);
+#ifdef CONFIG_MMC_CQ_HCI
+static void sdhci_cmdq_set_transfer_params(struct mmc_host *mmc)
+{
+ struct sdhci_host *host = mmc_priv(mmc);
+ u8 ctrl;
+
+ if (host->version >= SDHCI_SPEC_200) {
+ ctrl = sdhci_readb(host, SDHCI_HOST_CONTROL);
+ ctrl &= ~SDHCI_CTRL_DMA_MASK;
+ if (host->flags & SDHCI_USE_64_BIT_DMA)
+ ctrl |= SDHCI_CTRL_ADMA64;
+ else
+ ctrl |= SDHCI_CTRL_ADMA32;
+ sdhci_writeb(host, ctrl, SDHCI_HOST_CONTROL);
+ }
+ if (host->ops->toggle_cdr && !host->cdr_support)
+ host->ops->toggle_cdr(host, false);
+}
+
+static void sdhci_cmdq_clear_set_irqs(struct mmc_host *mmc, bool clear)
+{
+ struct sdhci_host *host = mmc_priv(mmc);
+ u32 ier = 0;
+
+ ier &= ~SDHCI_INT_ALL_MASK;
+
+ if (clear) {
+ ier = SDHCI_INT_CMDQ_EN | SDHCI_INT_ERROR_MASK;
+ sdhci_writel(host, ier, SDHCI_INT_ENABLE);
+ sdhci_writel(host, ier, SDHCI_SIGNAL_ENABLE);
+ } else {
+ ier = SDHCI_INT_BUS_POWER | SDHCI_INT_DATA_END_BIT |
+ SDHCI_INT_DATA_CRC | SDHCI_INT_DATA_TIMEOUT |
+ SDHCI_INT_INDEX | SDHCI_INT_END_BIT |
+ SDHCI_INT_CRC | SDHCI_INT_TIMEOUT |
+ SDHCI_INT_DATA_END | SDHCI_INT_RESPONSE |
+ SDHCI_INT_AUTO_CMD_ERR;
+ sdhci_writel(host, ier, SDHCI_INT_ENABLE);
+ sdhci_writel(host, ier, SDHCI_SIGNAL_ENABLE);
+ }
+}
+
+static void sdhci_cmdq_set_data_timeout(struct mmc_host *mmc, u32 val)
+{
+ struct sdhci_host *host = mmc_priv(mmc);
+
+ sdhci_writeb(host, val, SDHCI_TIMEOUT_CONTROL);
+}
+
+static void sdhci_cmdq_dump_vendor_regs(struct mmc_host *mmc)
+{
+ struct sdhci_host *host = mmc_priv(mmc);
+
+ sdhci_dumpregs(host);
+}
+
+static int sdhci_cmdq_init(struct sdhci_host *host, struct mmc_host *mmc,
+ bool dma64)
+{
+ return cmdq_init(host->cq_host, mmc, dma64);
+}
+
+static void sdhci_cmdq_set_block_size(struct mmc_host *mmc)
+{
+ struct sdhci_host *host = mmc_priv(mmc);
+
+ sdhci_set_blk_size_reg(host, 512, 0);
+}
+
+static void sdhci_enhanced_strobe_mask(struct mmc_host *mmc, bool set)
+{
+ struct sdhci_host *host = mmc_priv(mmc);
+
+ if (host->ops->enhanced_strobe_mask)
+ host->ops->enhanced_strobe_mask(host, set);
+}
+
+static void sdhci_cmdq_clear_set_dumpregs(struct mmc_host *mmc, bool set)
+{
+ struct sdhci_host *host = mmc_priv(mmc);
+
+ if (host->ops->clear_set_dumpregs)
+ host->ops->clear_set_dumpregs(host, set);
+}
+static int sdhci_cmdq_crypto_cfg(struct mmc_host *mmc,
+ struct mmc_request *mrq, u32 slot, u64 *ice_ctx)
+{
+ struct sdhci_host *host = mmc_priv(mmc);
+ int err = 0;
+
+ if (!host->is_crypto_en)
+ return 0;
+
+ if (host->crypto_reset_reqd && host->ops->crypto_engine_reset) {
+ err = host->ops->crypto_engine_reset(host);
+ if (err) {
+ pr_err("%s: crypto reset failed\n",
+ mmc_hostname(host->mmc));
+ goto out;
+ }
+ host->crypto_reset_reqd = false;
+ }
+
+ if (host->ops->crypto_engine_cmdq_cfg) {
+ err = host->ops->crypto_engine_cmdq_cfg(host, mrq,
+ slot, ice_ctx);
+ if (err) {
+ pr_err("%s: failed to configure crypto\n",
+ mmc_hostname(host->mmc));
+ goto out;
+ }
+ }
+out:
+ return err;
+}
+
+static int sdhci_cmdq_crypto_cfg_end(struct mmc_host *mmc,
+ struct mmc_request *mrq)
+{
+ struct sdhci_host *host = mmc_priv(mmc);
+
+ if (!host->is_crypto_en)
+ return 0;
+
+ return sdhci_crypto_cfg_end(host, mrq);
+}
+
+static void sdhci_cmdq_crypto_cfg_reset(struct mmc_host *mmc, unsigned int slot)
+{
+ struct sdhci_host *host = mmc_priv(mmc);
+
+ if (!host->is_crypto_en)
+ return;
+
+ if (host->ops->crypto_cfg_reset)
+ host->ops->crypto_cfg_reset(host, slot);
+}
+
+static void sdhci_cmdq_post_cqe_halt(struct mmc_host *mmc)
+{
+ struct sdhci_host *host = mmc_priv(mmc);
+
+ sdhci_writel(host, sdhci_readl(host, SDHCI_INT_ENABLE) |
+ SDHCI_INT_RESPONSE, SDHCI_INT_ENABLE);
+ sdhci_writel(host, SDHCI_INT_RESPONSE, SDHCI_INT_STATUS);
+}
+#else
+static void sdhci_cmdq_set_transfer_params(struct mmc_host *mmc)
+{
+
+}
+static void sdhci_cmdq_clear_set_irqs(struct mmc_host *mmc, bool clear)
+{
+
+}
+
+static void sdhci_cmdq_set_data_timeout(struct mmc_host *mmc, u32 val)
+{
+
+}
+
+static void sdhci_cmdq_dump_vendor_regs(struct mmc_host *mmc)
+{
+
+}
+
+static int sdhci_cmdq_init(struct sdhci_host *host, struct mmc_host *mmc,
+ bool dma64)
+{
+ return -ENOSYS;
+}
+
+static void sdhci_cmdq_set_block_size(struct mmc_host *mmc)
+{
+
+}
+
+static void sdhci_enhanced_strobe_mask(struct mmc_host *mmc, bool set)
+{
+
+}
+
+static void sdhci_cmdq_clear_set_dumpregs(struct mmc_host *mmc, bool set)
+{
+
+}
+static int sdhci_cmdq_crypto_cfg(struct mmc_host *mmc,
+ struct mmc_request *mrq, u32 slot, u64 *ice_ctx)
+{
+ return 0;
+}
+
+static int sdhci_cmdq_crypto_cfg_end(struct mmc_host *mmc,
+ struct mmc_request *mrq)
+{
+ return 0;
+}
+
+static void sdhci_cmdq_crypto_cfg_reset(struct mmc_host *mmc, unsigned int slot)
+{
+
+}
+static void sdhci_cmdq_post_cqe_halt(struct mmc_host *mmc)
+{
+}
+#endif
+
+static const struct cmdq_host_ops sdhci_cmdq_ops = {
+ .clear_set_irqs = sdhci_cmdq_clear_set_irqs,
+ .set_data_timeout = sdhci_cmdq_set_data_timeout,
+ .dump_vendor_regs = sdhci_cmdq_dump_vendor_regs,
+ .set_block_size = sdhci_cmdq_set_block_size,
+ .clear_set_dumpregs = sdhci_cmdq_clear_set_dumpregs,
+ .enhanced_strobe_mask = sdhci_enhanced_strobe_mask,
+ .crypto_cfg = sdhci_cmdq_crypto_cfg,
+ .crypto_cfg_end = sdhci_cmdq_crypto_cfg_end,
+ .crypto_cfg_reset = sdhci_cmdq_crypto_cfg_reset,
+ .post_cqe_halt = sdhci_cmdq_post_cqe_halt,
+ .set_transfer_params = sdhci_cmdq_set_transfer_params,
+};
+
+#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
+static int sdhci_is_adma2_64bit(struct sdhci_host *host)
+{
+ u32 caps;
+
+ caps = (host->quirks & SDHCI_QUIRK_MISSING_CAPS) ? host->caps :
+ sdhci_readl(host, SDHCI_CAPABILITIES);
+
+ if (caps & SDHCI_CAN_64BIT)
+ return 1;
+ return 0;
+}
+#else
+static int sdhci_is_adma2_64bit(struct sdhci_host *host)
+{
+ return 0;
+}
+#endif
+
int sdhci_add_host(struct sdhci_host *host)
{
struct mmc_host *mmc;
@@ -2980,7 +3930,7 @@ int sdhci_add_host(struct sdhci_host *host)
* SDHCI_QUIRK2_BROKEN_64_BIT_DMA must be left to the drivers to
* implement.
*/
- if (sdhci_readl(host, SDHCI_CAPABILITIES) & SDHCI_CAN_64BIT)
+ if (sdhci_is_adma2_64bit(host))
host->flags |= SDHCI_USE_64_BIT_DMA;
if (host->flags & (SDHCI_USE_SDMA | SDHCI_USE_ADMA)) {
@@ -3135,6 +4085,9 @@ int sdhci_add_host(struct sdhci_host *host)
mmc->caps |= MMC_CAP_SDIO_IRQ | MMC_CAP_ERASE | MMC_CAP_CMD23;
mmc->caps2 |= MMC_CAP2_SDIO_IRQ_NOTHREAD;
+ if (caps[0] & SDHCI_CAN_ASYNC_INT)
+ mmc->caps2 |= MMC_CAP2_ASYNC_SDIO_IRQ_4BIT_MODE;
+
if (host->quirks & SDHCI_QUIRK_MULTIBLOCK_READ_ACMD12)
host->flags |= SDHCI_AUTO_CMD12;
@@ -3167,7 +4120,8 @@ int sdhci_add_host(struct sdhci_host *host)
if ((host->quirks & SDHCI_QUIRK_BROKEN_CARD_DETECTION) &&
!(mmc->caps & MMC_CAP_NONREMOVABLE) &&
- IS_ERR_VALUE(mmc_gpio_get_cd(host->mmc)))
+ (IS_ERR_VALUE(mmc_gpio_get_cd(host->mmc)) &&
+ !(mmc->caps2 & MMC_CAP2_NONHOTPLUG)))
mmc->caps |= MMC_CAP_NEEDS_POLL;
/* If there are external regulators, get them */
@@ -3264,10 +4218,15 @@ int sdhci_add_host(struct sdhci_host *host)
* value.
*/
max_current_caps = sdhci_readl(host, SDHCI_MAX_CURRENT);
- if (!max_current_caps && !IS_ERR(mmc->supply.vmmc)) {
- int curr = regulator_get_current_limit(mmc->supply.vmmc);
- if (curr > 0) {
+ if (!max_current_caps) {
+ u32 curr = 0;
+
+ if (!IS_ERR(mmc->supply.vmmc))
+ curr = regulator_get_current_limit(mmc->supply.vmmc);
+ else if (host->ops->get_current_limit)
+ curr = host->ops->get_current_limit(host);
+ if (curr > 0) {
/* convert to SDHCI_MAX_CURRENT format */
curr = curr/1000; /* convert to mA */
curr = curr/SDHCI_MAX_CURRENT_MULTIPLIER;
@@ -3332,8 +4291,6 @@ int sdhci_add_host(struct sdhci_host *host)
return -ENODEV;
}
- spin_lock_init(&host->lock);
-
/*
* Maximum number of segments. Depends on if the hardware
* can do scatter/gather or not.
@@ -3399,6 +4356,8 @@ int sdhci_add_host(struct sdhci_host *host)
init_waitqueue_head(&host->buf_ready_int);
+ host->flags |= SDHCI_HOST_IRQ_STATUS;
+
sdhci_init(host, 0);
ret = request_threaded_irq(host->irq, sdhci_irq, sdhci_thread_irq,
@@ -3414,33 +4373,54 @@ int sdhci_add_host(struct sdhci_host *host)
#endif
#ifdef SDHCI_USE_LEDS_CLASS
- snprintf(host->led_name, sizeof(host->led_name),
- "%s::", mmc_hostname(mmc));
- host->led.name = host->led_name;
- host->led.brightness = LED_OFF;
- host->led.default_trigger = mmc_hostname(mmc);
- host->led.brightness_set = sdhci_led_control;
-
- ret = led_classdev_register(mmc_dev(mmc), &host->led);
- if (ret) {
- pr_err("%s: Failed to register LED device: %d\n",
- mmc_hostname(mmc), ret);
- goto reset;
+ if (!(host->quirks2 & SDHCI_QUIRK2_BROKEN_LED_CONTROL)) {
+ snprintf(host->led_name, sizeof(host->led_name),
+ "%s::", mmc_hostname(mmc));
+ host->led.name = host->led_name;
+ host->led.brightness = LED_OFF;
+ host->led.default_trigger = mmc_hostname(mmc);
+ host->led.brightness_set = sdhci_led_control;
+
+ ret = led_classdev_register(mmc_dev(mmc), &host->led);
+ if (ret) {
+ pr_err("%s: Failed to register LED device: %d\n",
+ mmc_hostname(mmc), ret);
+ goto reset;
+ }
}
#endif
mmiowb();
- mmc_add_host(mmc);
+ if (host->quirks2 & SDHCI_QUIRK2_IGN_DATA_END_BIT_ERROR) {
+ host->ier = (host->ier & ~SDHCI_INT_DATA_END_BIT);
+ sdhci_writel(host, host->ier, SDHCI_INT_ENABLE);
+ sdhci_writel(host, host->ier, SDHCI_SIGNAL_ENABLE);
+ }
- pr_info("%s: SDHCI controller on %s [%s] using %s\n",
- mmc_hostname(mmc), host->hw_name, dev_name(mmc_dev(mmc)),
+ if (mmc->caps2 & MMC_CAP2_CMD_QUEUE) {
+ bool dma64 = (host->flags & SDHCI_USE_64_BIT_DMA) ?
+ true : false;
+ ret = sdhci_cmdq_init(host, mmc, dma64);
+ if (ret)
+ pr_err("%s: CMDQ init: failed (%d)\n",
+ mmc_hostname(host->mmc), ret);
+ else
+ host->cq_host->ops = &sdhci_cmdq_ops;
+ }
+
+ pr_info("%s: SDHCI controller on %s [%s] using %s in %s mode\n",
+ mmc_hostname(mmc), host->hw_name, dev_name(mmc_dev(mmc)),
(host->flags & SDHCI_USE_ADMA) ?
- (host->flags & SDHCI_USE_64_BIT_DMA) ? "ADMA 64-bit" : "ADMA" :
- (host->flags & SDHCI_USE_SDMA) ? "DMA" : "PIO");
+ ((host->flags & SDHCI_USE_64_BIT_DMA) ?
+ "64-bit ADMA" : "32-bit ADMA") :
+ ((host->flags & SDHCI_USE_SDMA) ? "DMA" : "PIO"),
+ ((mmc->caps2 & MMC_CAP2_CMD_QUEUE) && !ret) ?
+ "CMDQ" : "legacy");
sdhci_enable_card_detection(host);
+ mmc_add_host(mmc);
return 0;
#ifdef SDHCI_USE_LEDS_CLASS
@@ -3481,10 +4461,11 @@ void sdhci_remove_host(struct sdhci_host *host, int dead)
sdhci_disable_card_detection(host);
- mmc_remove_host(mmc);
+ mmc_remove_host(host->mmc);
#ifdef SDHCI_USE_LEDS_CLASS
- led_classdev_unregister(&host->led);
+ if (!(host->quirks2 & SDHCI_QUIRK2_BROKEN_LED_CONTROL))
+ led_classdev_unregister(&host->led);
#endif
if (!dead)
diff --git a/drivers/mmc/host/sdhci.h b/drivers/mmc/host/sdhci.h
index 0115e9907bf8..300be7fd0f24 100644
--- a/drivers/mmc/host/sdhci.h
+++ b/drivers/mmc/host/sdhci.h
@@ -17,7 +17,7 @@
#include <linux/compiler.h>
#include <linux/types.h>
#include <linux/io.h>
-
+#include <linux/ratelimit.h>
#include <linux/mmc/host.h>
/*
@@ -137,22 +137,32 @@
#define SDHCI_INT_DATA_CRC 0x00200000
#define SDHCI_INT_DATA_END_BIT 0x00400000
#define SDHCI_INT_BUS_POWER 0x00800000
-#define SDHCI_INT_ACMD12ERR 0x01000000
+#define SDHCI_INT_AUTO_CMD_ERR 0x01000000
#define SDHCI_INT_ADMA_ERROR 0x02000000
#define SDHCI_INT_NORMAL_MASK 0x00007FFF
#define SDHCI_INT_ERROR_MASK 0xFFFF8000
#define SDHCI_INT_CMD_MASK (SDHCI_INT_RESPONSE | SDHCI_INT_TIMEOUT | \
- SDHCI_INT_CRC | SDHCI_INT_END_BIT | SDHCI_INT_INDEX)
+ SDHCI_INT_CRC | SDHCI_INT_END_BIT | SDHCI_INT_INDEX | \
+ SDHCI_INT_AUTO_CMD_ERR)
+
#define SDHCI_INT_DATA_MASK (SDHCI_INT_DATA_END | SDHCI_INT_DMA_END | \
SDHCI_INT_DATA_AVAIL | SDHCI_INT_SPACE_AVAIL | \
SDHCI_INT_DATA_TIMEOUT | SDHCI_INT_DATA_CRC | \
SDHCI_INT_DATA_END_BIT | SDHCI_INT_ADMA_ERROR | \
SDHCI_INT_BLK_GAP)
+
+#define SDHCI_INT_CMDQ_EN (0x1 << 14)
#define SDHCI_INT_ALL_MASK ((unsigned int)-1)
-#define SDHCI_ACMD12_ERR 0x3C
+#define SDHCI_AUTO_CMD_ERR 0x3C
+#define SDHCI_AUTO_CMD12_NOT_EXEC 0x0001
+#define SDHCI_AUTO_CMD_TIMEOUT_ERR 0x0002
+#define SDHCI_AUTO_CMD_CRC_ERR 0x0004
+#define SDHCI_AUTO_CMD_ENDBIT_ERR 0x0008
+#define SDHCI_AUTO_CMD_INDEX_ERR 0x0010
+#define SDHCI_AUTO_CMD12_NOT_ISSUED 0x0080
#define SDHCI_HOST_CONTROL2 0x3E
#define SDHCI_CTRL_UHS_MASK 0x0007
@@ -170,6 +180,7 @@
#define SDHCI_CTRL_DRV_TYPE_D 0x0030
#define SDHCI_CTRL_EXEC_TUNING 0x0040
#define SDHCI_CTRL_TUNED_CLK 0x0080
+#define SDHCI_CTRL_ASYNC_INT_ENABLE 0x4000
#define SDHCI_CTRL_PRESET_VAL_ENABLE 0x8000
#define SDHCI_CAPABILITIES 0x40
@@ -190,6 +201,7 @@
#define SDHCI_CAN_VDD_300 0x02000000
#define SDHCI_CAN_VDD_180 0x04000000
#define SDHCI_CAN_64BIT 0x10000000
+#define SDHCI_CAN_ASYNC_INT 0x20000000
#define SDHCI_SUPPORT_SDR50 0x00000001
#define SDHCI_SUPPORT_SDR104 0x00000002
@@ -320,6 +332,12 @@ enum sdhci_cookie {
COOKIE_GIVEN,
};
+enum sdhci_power_policy {
+ SDHCI_PERFORMANCE_MODE,
+ SDHCI_POWER_SAVE_MODE,
+ SDHCI_POWER_POLICY_NUM /* Always keep this one last */
+};
+
struct sdhci_host {
/* Data set by hardware interface driver */
const char *hw_name; /* Hardware bus name */
@@ -423,6 +441,84 @@ struct sdhci_host {
*/
#define SDHCI_QUIRK2_NEED_DELAY_AFTER_INT_CLK_RST (1<<16)
+/*
+ * Read Transfer Active/ Write Transfer Active may be not
+ * de-asserted after end of transaction. Issue reset for DAT line.
+ */
+#define SDHCI_QUIRK2_RDWR_TX_ACTIVE_EOT (1<<17)
+/*
+ * Slow interrupt clearance at 400KHz may cause
+ * host controller driver interrupt handler to
+ * be called twice.
+ */
+#define SDHCI_QUIRK2_SLOW_INT_CLR (1<<18)
+
+/*
+ * If the base clock can be scalable, then there should be no further
+ * clock dividing as the input clock itself will be scaled down to
+ * required frequency.
+ */
+#define SDHCI_QUIRK2_ALWAYS_USE_BASE_CLOCK (1<<19)
+
+/*
+ * Ignore data timeout error for R1B commands as there will be no
+ * data associated and the busy timeout value for these commands
+ * could be lager than the maximum timeout value that controller
+ * can handle.
+ */
+#define SDHCI_QUIRK2_IGNORE_DATATOUT_FOR_R1BCMD (1<<20)
+
+/*
+ * The preset value registers are not properly initialized by
+ * some hardware and hence preset value must not be enabled for
+ * such controllers.
+ */
+#define SDHCI_QUIRK2_BROKEN_PRESET_VALUE (1<<21)
+/*
+ * Some controllers define the usage of 0xF in data timeout counter
+ * register (0x2E) which is actually a reserved bit as per
+ * specification.
+ */
+#define SDHCI_QUIRK2_USE_RESERVED_MAX_TIMEOUT (1<<22)
+/*
+ * This is applicable for controllers that advertize timeout clock
+ * value in capabilities register (bit 5-0) as just 50MHz whereas the
+ * base clock frequency is 200MHz. So, the controller internally
+ * multiplies the value in timeout control register by 4 with the
+ * assumption that driver always uses fixed timeout clock value from
+ * capabilities register to calculate the timeout. But when the driver
+ * uses SDHCI_QUIRK2_ALWAYS_USE_BASE_CLOCK base clock frequency is directly
+ * controller by driver and it's rate varies upto max. 200MHz. This new quirk
+ * will be used in such cases to avoid controller mulplication when timeout is
+ * calculated based on the base clock.
+ */
+#define SDHCI_QUIRK2_DIVIDE_TOUT_BY_4 (1 << 23)
+
+/*
+ * Some SDHC controllers are unable to handle data-end bit error in
+ * 1-bit mode of SDIO.
+ */
+#define SDHCI_QUIRK2_IGN_DATA_END_BIT_ERROR (1<<24)
+
+/* Controller has nonstandard clock management */
+#define SDHCI_QUIRK_NONSTANDARD_CLOCK (1<<25)
+/* Use reset workaround in case sdhci reset timeouts */
+#define SDHCI_QUIRK2_USE_RESET_WORKAROUND (1<<26)
+/* Some controllers doesn't have have any LED control */
+#define SDHCI_QUIRK2_BROKEN_LED_CONTROL (1<<27)
+/*
+ * Some controllers doesn't follow the tuning procedure as defined in spec.
+ * The tuning data has to be compared from SW driver to validate the correct
+ * phase.
+ */
+#define SDHCI_QUIRK2_NON_STANDARD_TUNING (1 << 28)
+/*
+ * Some controllers may use PIO mode to workaround HW issues in ADMA for
+ * eMMC tuning commands.
+ */
+#define SDHCI_QUIRK2_USE_PIO_FOR_EMMC_TUNING (1 << 23)
+
+
int irq; /* Device IRQ */
void __iomem *ioaddr; /* Mapped address */
@@ -432,6 +528,7 @@ struct sdhci_host {
struct mmc_host *mmc; /* MMC structure */
struct mmc_host_ops mmc_host_ops; /* MMC host ops */
u64 dma_mask; /* custom DMA mask */
+ u64 coherent_dma_mask;
#if defined(CONFIG_LEDS_CLASS) || defined(CONFIG_LEDS_CLASS_MODULE)
struct led_classdev led; /* LED control */
@@ -453,6 +550,7 @@ struct sdhci_host {
#define SDHCI_SDR104_NEEDS_TUNING (1<<10) /* SDR104/HS200 needs tuning */
#define SDHCI_USE_64_BIT_DMA (1<<12) /* Use 64-bit DMA */
#define SDHCI_HS400_TUNING (1<<13) /* Tuning for HS400 */
+#define SDHCI_HOST_IRQ_STATUS (1<<14) /* host->irq status */
unsigned int version; /* SDHCI spec. version */
@@ -466,6 +564,7 @@ struct sdhci_host {
bool runtime_suspended; /* Host is runtime suspended */
bool bus_on; /* Bus power prevents runtime suspend */
bool preset_enabled; /* Preset is enabled */
+ bool cdr_support;
struct mmc_request *mrq; /* Current request */
struct mmc_command *cmd; /* Current command */
@@ -514,6 +613,20 @@ struct sdhci_host {
unsigned int tuning_count; /* Timer count for re-tuning */
unsigned int tuning_mode; /* Re-tuning mode supported by host */
#define SDHCI_TUNING_MODE_1 0
+ ktime_t data_start_time;
+
+ enum sdhci_power_policy power_policy;
+
+ bool is_crypto_en;
+ bool crypto_reset_reqd;
+ bool sdio_irq_async_status;
+
+ u32 auto_cmd_err_sts;
+ struct ratelimit_state dbg_dump_rs;
+ struct cmdq_host *cq_host;
+ int reset_wa_applied; /* reset workaround status */
+ ktime_t reset_wa_t; /* time when the reset workaround is applied */
+ int reset_wa_cnt; /* total number of times workaround is used */
unsigned long private[0] ____cacheline_aligned;
};
@@ -543,16 +656,46 @@ struct sdhci_ops {
unsigned int (*get_ro)(struct sdhci_host *host);
void (*reset)(struct sdhci_host *host, u8 mask);
int (*platform_execute_tuning)(struct sdhci_host *host, u32 opcode);
+ int (*crypto_engine_cfg)(struct sdhci_host *host,
+ struct mmc_request *mrq, u32 slot);
+ int (*crypto_engine_cmdq_cfg)(struct sdhci_host *host,
+ struct mmc_request *mrq, u32 slot, u64 *ice_ctx);
+ int (*crypto_engine_cfg_end)(struct sdhci_host *host,
+ struct mmc_request *mrq);
+ int (*crypto_engine_reset)(struct sdhci_host *host);
+ void (*crypto_cfg_reset)(struct sdhci_host *host, unsigned int slot);
void (*set_uhs_signaling)(struct sdhci_host *host, unsigned int uhs);
void (*hw_reset)(struct sdhci_host *host);
void (*adma_workaround)(struct sdhci_host *host, u32 intmask);
+ unsigned int (*get_max_segments)(void);
void (*platform_init)(struct sdhci_host *host);
+#define REQ_BUS_OFF (1 << 0)
+#define REQ_BUS_ON (1 << 1)
+#define REQ_IO_LOW (1 << 2)
+#define REQ_IO_HIGH (1 << 3)
void (*card_event)(struct sdhci_host *host);
+ int (*enhanced_strobe)(struct sdhci_host *host);
+ void (*platform_bus_voting)(struct sdhci_host *host, u32 enable);
+ void (*check_power_status)(struct sdhci_host *host, u32 req_type);
+ int (*config_auto_tuning_cmd)(struct sdhci_host *host,
+ bool enable,
+ u32 type);
+ int (*enable_controller_clock)(struct sdhci_host *host);
+ void (*clear_set_dumpregs)(struct sdhci_host *host, bool set);
+ void (*enhanced_strobe_mask)(struct sdhci_host *host, bool set);
+ void (*dump_vendor_regs)(struct sdhci_host *host);
+ void (*toggle_cdr)(struct sdhci_host *host, bool enable);
void (*voltage_switch)(struct sdhci_host *host);
int (*select_drive_strength)(struct sdhci_host *host,
struct mmc_card *card,
unsigned int max_dtr, int host_drv,
int card_drv, int *drv_type);
+ int (*notify_load)(struct sdhci_host *host, enum mmc_load state);
+ void (*reset_workaround)(struct sdhci_host *host, u32 enable);
+ void (*init)(struct sdhci_host *host);
+ void (*pre_req)(struct sdhci_host *host, struct mmc_request *req);
+ void (*post_req)(struct sdhci_host *host, struct mmc_request *req);
+ unsigned int (*get_current_limit)(struct sdhci_host *host);
};
#ifdef CONFIG_MMC_SDHCI_IO_ACCESSORS
@@ -672,4 +815,5 @@ extern int sdhci_runtime_suspend_host(struct sdhci_host *host);
extern int sdhci_runtime_resume_host(struct sdhci_host *host);
#endif
+void sdhci_cfg_irq(struct sdhci_host *host, bool enable, bool sync);
#endif /* __SDHCI_HW_H */