diff options
Diffstat (limited to 'drivers/mmc/card')
| -rw-r--r-- | drivers/mmc/card/Kconfig | 11 | ||||
| -rw-r--r-- | drivers/mmc/card/Makefile | 1 | ||||
| -rw-r--r-- | drivers/mmc/card/block.c | 1996 | ||||
| -rw-r--r-- | drivers/mmc/card/mmc_block_test.c | 2038 | ||||
| -rw-r--r-- | drivers/mmc/card/mmc_test.c | 3 | ||||
| -rw-r--r-- | drivers/mmc/card/queue.c | 394 | ||||
| -rw-r--r-- | drivers/mmc/card/queue.h | 36 |
7 files changed, 4385 insertions, 94 deletions
diff --git a/drivers/mmc/card/Kconfig b/drivers/mmc/card/Kconfig index 6142ec1b9dfb..91f2445b6ac8 100644 --- a/drivers/mmc/card/Kconfig +++ b/drivers/mmc/card/Kconfig @@ -50,6 +50,17 @@ config MMC_BLOCK_BOUNCE If unsure, say Y here. +config MMC_BLOCK_DEFERRED_RESUME + bool "Defer MMC layer resume until I/O is requested" + depends on MMC_BLOCK + default n + help + Say Y here to enable deferred MMC resume until I/O + is requested. + + This will reduce overall resume latency and + save power when there is an SD card inserted but not being used. + config SDIO_UART tristate "SDIO UART/GPS class support" depends on TTY diff --git a/drivers/mmc/card/Makefile b/drivers/mmc/card/Makefile index c73b406a06cd..d55107fb4551 100644 --- a/drivers/mmc/card/Makefile +++ b/drivers/mmc/card/Makefile @@ -8,3 +8,4 @@ obj-$(CONFIG_MMC_TEST) += mmc_test.o obj-$(CONFIG_SDIO_UART) += sdio_uart.o +obj-$(CONFIG_MMC_BLOCK_TEST) += mmc_block_test.o diff --git a/drivers/mmc/card/block.c b/drivers/mmc/card/block.c index f0268624c44d..b849811aaf33 100644 --- a/drivers/mmc/card/block.c +++ b/drivers/mmc/card/block.c @@ -30,16 +30,19 @@ #include <linux/blkdev.h> #include <linux/mutex.h> #include <linux/scatterlist.h> +#include <linux/bitops.h> #include <linux/string_helpers.h> #include <linux/delay.h> #include <linux/capability.h> #include <linux/compat.h> #include <linux/pm_runtime.h> +#include <linux/ioprio.h> #include <trace/events/mmc.h> #include <linux/mmc/ioctl.h> #include <linux/mmc/card.h> +#include <linux/mmc/core.h> #include <linux/mmc/host.h> #include <linux/mmc/mmc.h> #include <linux/mmc/sd.h> @@ -60,15 +63,33 @@ MODULE_ALIAS("mmc:block"); #define INAND_CMD38_ARG_SECERASE 0x80 #define INAND_CMD38_ARG_SECTRIM1 0x81 #define INAND_CMD38_ARG_SECTRIM2 0x88 -#define MMC_BLK_TIMEOUT_MS (10 * 60 * 1000) /* 10 minute timeout */ +#define MMC_BLK_TIMEOUT_MS (30 * 1000) /* 30 sec timeout */ #define MMC_SANITIZE_REQ_TIMEOUT 240000 #define MMC_EXTRACT_INDEX_FROM_ARG(x) ((x & 0x00FF0000) >> 16) +#define MMC_CMDQ_STOP_TIMEOUT_MS 100 #define mmc_req_rel_wr(req) ((req->cmd_flags & REQ_FUA) && \ (rq_data_dir(req) == WRITE)) #define PACKED_CMD_VER 0x01 #define PACKED_CMD_WR 0x02 - +#define PACKED_TRIGGER_MAX_ELEMENTS 5000 + +#define MMC_BLK_MAX_RETRIES 5 /* max # of retries before aborting a command */ +#define MMC_BLK_UPDATE_STOP_REASON(stats, reason) \ + do { \ + if (stats->enabled) \ + stats->pack_stop_reason[reason]++; \ + } while (0) + +#define MAX_RETRIES 5 +#define PCKD_TRGR_INIT_MEAN_POTEN 17 +#define PCKD_TRGR_POTEN_LOWER_BOUND 5 +#define PCKD_TRGR_URGENT_PENALTY 2 +#define PCKD_TRGR_LOWER_BOUND 5 +#define PCKD_TRGR_PRECISION_MULTIPLIER 100 + +static struct mmc_cmdq_req *mmc_cmdq_prep_dcmd( + struct mmc_queue_req *mqrq, struct mmc_queue *mq); static DEFINE_MUTEX(block_mutex); /* @@ -103,6 +124,7 @@ struct mmc_blk_data { #define MMC_BLK_CMD23 (1 << 0) /* Can do SET_BLOCK_COUNT for multiblock */ #define MMC_BLK_REL_WR (1 << 1) /* MMC Reliable write support */ #define MMC_BLK_PACKED_CMD (1 << 2) /* MMC packed command support */ +#define MMC_BLK_CMD_QUEUE (1 << 3) /* MMC command queue support */ unsigned int usage; unsigned int read_only; @@ -113,6 +135,8 @@ struct mmc_blk_data { #define MMC_BLK_WRITE BIT(1) #define MMC_BLK_DISCARD BIT(2) #define MMC_BLK_SECDISCARD BIT(3) +#define MMC_BLK_FLUSH BIT(4) +#define MMC_BLK_PARTSWITCH BIT(5) /* * Only set in main mmc_blk_data associated @@ -122,6 +146,8 @@ struct mmc_blk_data { unsigned int part_curr; struct device_attribute force_ro; struct device_attribute power_ro_lock; + struct device_attribute num_wr_reqs_to_start_packing; + struct device_attribute no_pack_for_random; int area_type; }; @@ -139,6 +165,8 @@ MODULE_PARM_DESC(perdev_minors, "Minors numbers to allocate per device"); static inline int mmc_blk_part_switch(struct mmc_card *card, struct mmc_blk_data *md); static int get_card_status(struct mmc_card *card, u32 *status, int retries); +static int mmc_blk_cmdq_switch(struct mmc_card *card, + struct mmc_blk_data *md, bool enable); static inline void mmc_blk_clear_packed(struct mmc_queue_req *mqrq) { @@ -195,9 +223,13 @@ static ssize_t power_ro_lock_show(struct device *dev, { int ret; struct mmc_blk_data *md = mmc_blk_get(dev_to_disk(dev)); - struct mmc_card *card = md->queue.card; + struct mmc_card *card; int locked = 0; + if (!md) + return -EINVAL; + + card = md->queue.card; if (card->ext_csd.boot_ro_lock & EXT_CSD_BOOT_WP_B_PERM_WP_EN) locked = 2; else if (card->ext_csd.boot_ro_lock & EXT_CSD_BOOT_WP_B_PWR_WP_EN) @@ -225,6 +257,8 @@ static ssize_t power_ro_lock_store(struct device *dev, return count; md = mmc_blk_get(dev_to_disk(dev)); + if (!md) + return -EINVAL; card = md->queue.card; mmc_get_card(card); @@ -262,6 +296,9 @@ static ssize_t force_ro_show(struct device *dev, struct device_attribute *attr, int ret; struct mmc_blk_data *md = mmc_blk_get(dev_to_disk(dev)); + if (!md) + return -EINVAL; + ret = snprintf(buf, PAGE_SIZE, "%d\n", get_disk_ro(dev_to_disk(dev)) ^ md->read_only); @@ -276,6 +313,10 @@ static ssize_t force_ro_store(struct device *dev, struct device_attribute *attr, char *end; struct mmc_blk_data *md = mmc_blk_get(dev_to_disk(dev)); unsigned long set = simple_strtoul(buf, &end, 0); + + if (!md) + return -EINVAL; + if (end == buf) { ret = -EINVAL; goto out; @@ -531,6 +572,118 @@ static void mmc_blk_simulate_delay( #define mmc_blk_simulate_delay(mq, req, waitfor) #endif +static ssize_t +num_wr_reqs_to_start_packing_show(struct device *dev, + struct device_attribute *attr, char *buf) +{ + struct mmc_blk_data *md = mmc_blk_get(dev_to_disk(dev)); + int num_wr_reqs_to_start_packing; + int ret; + + if (!md) + return -EINVAL; + num_wr_reqs_to_start_packing = md->queue.num_wr_reqs_to_start_packing; + + ret = snprintf(buf, PAGE_SIZE, "%d\n", num_wr_reqs_to_start_packing); + + mmc_blk_put(md); + return ret; +} + +static ssize_t +num_wr_reqs_to_start_packing_store(struct device *dev, + struct device_attribute *attr, + const char *buf, size_t count) +{ + int value; + struct mmc_blk_data *md = mmc_blk_get(dev_to_disk(dev)); + struct mmc_card *card; + int ret = count; + + if (!md) + return -EINVAL; + + card = md->queue.card; + if (!card) { + ret = -EINVAL; + goto exit; + } + + sscanf(buf, "%d", &value); + + if (value >= 0) { + md->queue.num_wr_reqs_to_start_packing = + min_t(int, value, (int)card->ext_csd.max_packed_writes); + + pr_debug("%s: trigger to pack: new value = %d", + mmc_hostname(card->host), + md->queue.num_wr_reqs_to_start_packing); + } else { + pr_err("%s: value %d is not valid. old value remains = %d", + mmc_hostname(card->host), value, + md->queue.num_wr_reqs_to_start_packing); + ret = -EINVAL; + } + +exit: + mmc_blk_put(md); + return ret; +} + +static ssize_t +no_pack_for_random_show(struct device *dev, + struct device_attribute *attr, char *buf) +{ + struct mmc_blk_data *md = mmc_blk_get(dev_to_disk(dev)); + int ret; + + if (!md) + return -EINVAL; + ret = snprintf(buf, PAGE_SIZE, "%d\n", md->queue.no_pack_for_random); + + mmc_blk_put(md); + return ret; +} + +static ssize_t +no_pack_for_random_store(struct device *dev, + struct device_attribute *attr, + const char *buf, size_t count) +{ + int value; + struct mmc_blk_data *md = mmc_blk_get(dev_to_disk(dev)); + struct mmc_card *card; + int ret = count; + + if (!md) + return -EINVAL; + + card = md->queue.card; + if (!card) { + ret = -EINVAL; + goto exit; + } + + sscanf(buf, "%d", &value); + + if (value < 0) { + pr_err("%s: value %d is not valid. old value remains = %d", + mmc_hostname(card->host), value, + md->queue.no_pack_for_random); + ret = -EINVAL; + goto exit; + } + + md->queue.no_pack_for_random = (value > 0) ? true : false; + + pr_debug("%s: no_pack_for_random: new value = %d", + mmc_hostname(card->host), + md->queue.no_pack_for_random); + +exit: + mmc_blk_put(md); + return ret; +} static int mmc_blk_open(struct block_device *bdev, fmode_t mode) { @@ -679,11 +832,12 @@ static int ioctl_do_sanitize(struct mmc_card *card) { int err; - if (!mmc_can_sanitize(card)) { - pr_warn("%s: %s - SANITIZE is not supported\n", + if (!mmc_can_sanitize(card) && + (card->host->caps2 & MMC_CAP2_SANITIZE)) { + pr_warn("%s: %s - SANITIZE is not supported\n", mmc_hostname(card->host), __func__); - err = -EOPNOTSUPP; - goto out; + err = -EOPNOTSUPP; + goto out; } pr_debug("%s: %s - SANITIZE IN PROGRESS...\n", @@ -713,19 +867,22 @@ static int __mmc_blk_ioctl_cmd(struct mmc_card *card, struct mmc_blk_data *md, struct mmc_request mrq = {NULL}; struct scatterlist sg; int err; - int is_rpmb = false; - u32 status = 0; if (!card || !md || !idata) return -EINVAL; - if (md->area_type & MMC_BLK_DATA_AREA_RPMB) - is_rpmb = true; - cmd.opcode = idata->ic.opcode; cmd.arg = idata->ic.arg; cmd.flags = idata->ic.flags; + if (idata->ic.postsleep_max_us < idata->ic.postsleep_min_us) { + pr_err("%s: min value: %u must not be greater than max value: %u\n", + __func__, idata->ic.postsleep_min_us, + idata->ic.postsleep_max_us); + WARN_ON(1); + return -EPERM; + } + if (idata->buf_bytes) { data.sg = &sg; data.sg_len = 1; @@ -764,6 +921,15 @@ static int __mmc_blk_ioctl_cmd(struct mmc_card *card, struct mmc_blk_data *md, mrq.cmd = &cmd; + if (mmc_card_doing_bkops(card)) { + err = mmc_stop_bkops(card); + if (err) { + dev_err(mmc_dev(card->host), + "%s: stop_bkops failed %d\n", __func__, err); + return err; + } + } + err = mmc_blk_part_switch(card, md); if (err) return err; @@ -774,13 +940,6 @@ static int __mmc_blk_ioctl_cmd(struct mmc_card *card, struct mmc_blk_data *md, return err; } - if (is_rpmb) { - err = mmc_set_blockcount(card, data.blocks, - idata->ic.write_flag & (1 << 31)); - if (err) - return err; - } - if ((MMC_EXTRACT_INDEX_FROM_ARG(cmd.arg) == EXT_CSD_SANITIZE_START) && (cmd.opcode == MMC_SWITCH)) { err = ioctl_do_sanitize(card); @@ -814,7 +973,189 @@ static int __mmc_blk_ioctl_cmd(struct mmc_card *card, struct mmc_blk_data *md, memcpy(&(idata->ic.response), cmd.resp, sizeof(cmd.resp)); - if (is_rpmb) { + return err; +} + +struct mmc_blk_ioc_rpmb_data { + struct mmc_blk_ioc_data *data[MMC_IOC_MAX_RPMB_CMD]; +}; + +static struct mmc_blk_ioc_rpmb_data *mmc_blk_ioctl_rpmb_copy_from_user( + struct mmc_ioc_rpmb __user *user) +{ + struct mmc_blk_ioc_rpmb_data *idata; + int err, i; + + idata = kzalloc(sizeof(*idata), GFP_KERNEL); + if (!idata) { + err = -ENOMEM; + goto out; + } + + for (i = 0; i < MMC_IOC_MAX_RPMB_CMD; i++) { + idata->data[i] = mmc_blk_ioctl_copy_from_user(&(user->cmds[i])); + if (IS_ERR(idata->data[i])) { + err = PTR_ERR(idata->data[i]); + goto copy_err; + } + } + + return idata; + +copy_err: + while (--i >= 0) { + kfree(idata->data[i]->buf); + kfree(idata->data[i]); + } + kfree(idata); +out: + return ERR_PTR(err); +} + +static int mmc_blk_ioctl_rpmb_cmd(struct block_device *bdev, + struct mmc_ioc_rpmb __user *ic_ptr) +{ + struct mmc_blk_ioc_rpmb_data *idata; + struct mmc_blk_data *md; + struct mmc_card *card = NULL; + struct mmc_command cmd = {0}; + struct mmc_data data = {0}; + struct mmc_request mrq = {NULL}; + struct scatterlist sg; + int err = 0, i = 0; + u32 status = 0; + + /* The caller must have CAP_SYS_RAWIO */ + if (!capable(CAP_SYS_RAWIO)) + return -EPERM; + + md = mmc_blk_get(bdev->bd_disk); + /* make sure this is a rpmb partition */ + if ((!md) || (!(md->area_type & MMC_BLK_DATA_AREA_RPMB))) { + err = -EINVAL; + return err; + } + + idata = mmc_blk_ioctl_rpmb_copy_from_user(ic_ptr); + if (IS_ERR(idata)) { + err = PTR_ERR(idata); + goto cmd_done; + } + + card = md->queue.card; + if (IS_ERR(card)) { + err = PTR_ERR(card); + goto idata_free; + } + + /* + * Ensure rpmb_req_pending flag is synchronized between multiple + * entities which may use rpmb ioclts with a lock. + */ + mutex_lock(&card->host->rpmb_req_mutex); + atomic_set(&card->host->rpmb_req_pending, 1); + mmc_get_card(card); + + if (mmc_card_doing_bkops(card)) { + if (mmc_card_cmdq(card)) { + err = mmc_cmdq_halt(card->host, true); + if (err) + goto cmd_rel_host; + } + err = mmc_stop_bkops(card); + if (err) { + dev_err(mmc_dev(card->host), + "%s: stop_bkops failed %d\n", __func__, err); + goto cmd_rel_host; + } + if (mmc_card_cmdq(card)) { + err = mmc_cmdq_halt(card->host, false); + if (err) + goto cmd_rel_host; + } + } + + err = mmc_blk_part_switch(card, md); + if (err) + goto cmd_rel_host; + + for (i = 0; i < MMC_IOC_MAX_RPMB_CMD; i++) { + struct mmc_blk_ioc_data *curr_data; + struct mmc_ioc_cmd *curr_cmd; + + curr_data = idata->data[i]; + curr_cmd = &curr_data->ic; + if (!curr_cmd->opcode) + break; + + cmd.opcode = curr_cmd->opcode; + cmd.arg = curr_cmd->arg; + cmd.flags = curr_cmd->flags; + + if (curr_data->buf_bytes) { + data.sg = &sg; + data.sg_len = 1; + data.blksz = curr_cmd->blksz; + data.blocks = curr_cmd->blocks; + + sg_init_one(data.sg, curr_data->buf, + curr_data->buf_bytes); + + if (curr_cmd->write_flag) + data.flags = MMC_DATA_WRITE; + else + data.flags = MMC_DATA_READ; + + /* data.flags must already be set before doing this. */ + mmc_set_data_timeout(&data, card); + + /* + * Allow overriding the timeout_ns for empirical tuning. + */ + if (curr_cmd->data_timeout_ns) + data.timeout_ns = curr_cmd->data_timeout_ns; + + mrq.data = &data; + } + + mrq.cmd = &cmd; + + err = mmc_set_blockcount(card, data.blocks, + curr_cmd->write_flag & (1 << 31)); + if (err) + goto cmd_rel_host; + + mmc_wait_for_req(card->host, &mrq); + + if (cmd.error) { + dev_err(mmc_dev(card->host), "%s: cmd error %d\n", + __func__, cmd.error); + err = cmd.error; + goto cmd_rel_host; + } + if (data.error) { + dev_err(mmc_dev(card->host), "%s: data error %d\n", + __func__, data.error); + err = data.error; + goto cmd_rel_host; + } + + if (copy_to_user(&(ic_ptr->cmds[i].response), cmd.resp, + sizeof(cmd.resp))) { + err = -EFAULT; + goto cmd_rel_host; + } + + if (!curr_cmd->write_flag) { + if (copy_to_user((void __user *)(unsigned long) + curr_cmd->data_ptr, + curr_data->buf, + curr_data->buf_bytes)) { + err = -EFAULT; + goto cmd_rel_host; + } + } + /* * Ensure RPMB command has completed by polling CMD13 * "Send Status". @@ -826,6 +1167,23 @@ static int __mmc_blk_ioctl_cmd(struct mmc_card *card, struct mmc_blk_data *md, __func__, status, err); } +cmd_rel_host: + mmc_put_card(card); + atomic_set(&card->host->rpmb_req_pending, 0); + mutex_unlock(&card->host->rpmb_req_mutex); + + +idata_free: + for (i = 0; i < MMC_IOC_MAX_RPMB_CMD; i++) { + kfree(idata->data[i]->buf); + kfree(idata->data[i]); + } + kfree(idata); + +cmd_done: + mmc_blk_put(md); + if (card && card->cmdq_init) + wake_up(&card->host->cmdq_ctx.wait); return err; } @@ -846,9 +1204,8 @@ static int mmc_blk_ioctl_cmd(struct block_device *bdev, return -EPERM; idata = mmc_blk_ioctl_copy_from_user(ic_ptr); - if (IS_ERR(idata)) + if (IS_ERR_OR_NULL(idata)) return PTR_ERR(idata); - md = mmc_blk_get(bdev->bd_disk); if (!md) { err = -EINVAL; @@ -856,15 +1213,32 @@ static int mmc_blk_ioctl_cmd(struct block_device *bdev, } card = md->queue.card; - if (IS_ERR(card)) { + if (IS_ERR_OR_NULL(card)) { err = PTR_ERR(card); goto cmd_done; } mmc_get_card(card); + if (mmc_card_cmdq(card)) { + err = mmc_cmdq_halt_on_empty_queue(card->host); + if (err) { + pr_err("%s: halt failed while doing %s err (%d)\n", + mmc_hostname(card->host), + __func__, err); + mmc_put_card(card); + goto cmd_done; + } + } + ioc_err = __mmc_blk_ioctl_cmd(card, md, idata); + if (mmc_card_cmdq(card)) { + if (mmc_cmdq_halt(card->host, false)) + pr_err("%s: %s: cmdq unhalt failed\n", + mmc_hostname(card->host), __func__); + } + mmc_put_card(card); err = mmc_blk_ioctl_copy_to_user(ic_ptr, idata); @@ -929,9 +1303,26 @@ static int mmc_blk_ioctl_multi_cmd(struct block_device *bdev, mmc_get_card(card); + if (mmc_card_cmdq(card)) { + err = mmc_cmdq_halt(card->host, true); + if (err) { + pr_err("%s: halt failed while doing %s err (%d)\n", + mmc_hostname(card->host), + __func__, err); + mmc_put_card(card); + goto cmd_done; + } + } + for (i = 0; i < num_of_cmds && !ioc_err; i++) ioc_err = __mmc_blk_ioctl_cmd(card, md, idata[i]); + if (mmc_card_cmdq(card)) { + if (mmc_cmdq_halt(card->host, false)) + pr_err("%s: %s: cmdq unhalt failed\n", + mmc_hostname(card->host), __func__); + } + mmc_put_card(card); /* copy to user if data and response */ @@ -956,6 +1347,9 @@ static int mmc_blk_ioctl(struct block_device *bdev, fmode_t mode, case MMC_IOC_CMD: return mmc_blk_ioctl_cmd(bdev, (struct mmc_ioc_cmd __user *)arg); + case MMC_IOC_RPMB_CMD: + return mmc_blk_ioctl_rpmb_cmd(bdev, + (struct mmc_ioc_rpmb __user *)arg); case MMC_IOC_MULTI_CMD: return mmc_blk_ioctl_multi_cmd(bdev, (struct mmc_ioc_multi_cmd __user *)arg); @@ -983,28 +1377,92 @@ static const struct block_device_operations mmc_bdops = { #endif }; +static int mmc_blk_cmdq_switch(struct mmc_card *card, + struct mmc_blk_data *md, bool enable) +{ + int ret = 0; + bool cmdq_mode = !!mmc_card_cmdq(card); + struct mmc_host *host = card->host; + struct mmc_cmdq_context_info *ctx = &host->cmdq_ctx; + + if (!(card->host->caps2 & MMC_CAP2_CMD_QUEUE) || + !card->ext_csd.cmdq_support || + (enable && !(md->flags & MMC_BLK_CMD_QUEUE)) || + (cmdq_mode == enable)) + return 0; + + if (enable) { + ret = mmc_set_blocklen(card, MMC_CARD_CMDQ_BLK_SIZE); + if (ret) { + pr_err("%s: failed (%d) to set block-size to %d\n", + __func__, ret, MMC_CARD_CMDQ_BLK_SIZE); + goto out; + } + + } else { + if (!test_bit(CMDQ_STATE_HALT, &ctx->curr_state)) { + ret = mmc_cmdq_halt(host, true); + if (ret) { + pr_err("%s: halt: failed: %d\n", + mmc_hostname(host), ret); + goto out; + } + } + } + + ret = mmc_switch(card, EXT_CSD_CMD_SET_NORMAL, + EXT_CSD_CMDQ, enable, + card->ext_csd.generic_cmd6_time); + if (ret) { + pr_err("%s: cmdq mode %sable failed %d\n", + md->disk->disk_name, enable ? "en" : "dis", ret); + goto out; + } + + if (enable) + mmc_card_set_cmdq(card); + else + mmc_card_clr_cmdq(card); +out: + return ret; +} + static inline int mmc_blk_part_switch(struct mmc_card *card, struct mmc_blk_data *md) { int ret; struct mmc_blk_data *main_md = dev_get_drvdata(&card->dev); - if (main_md->part_curr == md->part_type) + if ((main_md->part_curr == md->part_type) && + (card->part_curr == md->part_type)) return 0; if (mmc_card_mmc(card)) { u8 part_config = card->ext_csd.part_config; + if (md->part_type) { + /* disable CQ mode for non-user data partitions */ + ret = mmc_blk_cmdq_switch(card, md, false); + if (ret) + return ret; + } + part_config &= ~EXT_CSD_PART_CONFIG_ACC_MASK; part_config |= md->part_type; ret = mmc_switch(card, EXT_CSD_CMD_SET_NORMAL, EXT_CSD_PART_CONFIG, part_config, card->ext_csd.part_time); - if (ret) + + if (ret) { + pr_err("%s: mmc_blk_part_switch failure, %d -> %d\n", + mmc_hostname(card->host), main_md->part_curr, + md->part_type); return ret; + } card->ext_csd.part_config = part_config; + card->part_curr = md->part_type; } main_md->part_curr = md->part_type; @@ -1185,18 +1643,21 @@ static int mmc_blk_cmd_error(struct request *req, const char *name, int error, switch (error) { case -EILSEQ: /* response crc error, retry the r/w cmd */ - pr_err("%s: %s sending %s command, card status %#x\n", - req->rq_disk->disk_name, "response CRC error", + pr_err_ratelimited( + "%s: response CRC error sending %s command, card status %#x\n", + req->rq_disk->disk_name, name, status); return ERR_RETRY; case -ETIMEDOUT: - pr_err("%s: %s sending %s command, card status %#x\n", - req->rq_disk->disk_name, "timed out", name, status); + pr_err_ratelimited( + "%s: timed out sending %s command, card status %#x\n", + req->rq_disk->disk_name, name, status); /* If the status cmd initially failed, retry the r/w cmd */ if (!status_valid) { - pr_err("%s: status not valid, retrying timeout\n", req->rq_disk->disk_name); + pr_err_ratelimited("%s: status not valid, retrying timeout\n", + req->rq_disk->disk_name); return ERR_RETRY; } /* @@ -1205,17 +1666,22 @@ static int mmc_blk_cmd_error(struct request *req, const char *name, int error, * have corrected the state problem above. */ if (status & (R1_COM_CRC_ERROR | R1_ILLEGAL_COMMAND)) { - pr_err("%s: command error, retrying timeout\n", req->rq_disk->disk_name); + pr_err_ratelimited( + "%s: command error, retrying timeout\n", + req->rq_disk->disk_name); return ERR_RETRY; } /* Otherwise abort the command */ - pr_err("%s: not retrying timeout\n", req->rq_disk->disk_name); + pr_err_ratelimited( + "%s: not retrying timeout\n", + req->rq_disk->disk_name); return ERR_ABORT; default: /* We don't understand the error code the driver gave us */ - pr_err("%s: unknown error %d sending read/write command, card status %#x\n", + pr_err_ratelimited( + "%s: unknown error %d sending read/write command, card status %#x\n", req->rq_disk->disk_name, error, status); return ERR_ABORT; } @@ -1263,12 +1729,14 @@ static int mmc_blk_cmd_recovery(struct mmc_card *card, struct request *req, mmc_retune_recheck(card->host); prev_cmd_status_valid = false; - pr_err("%s: error %d sending status command, %sing\n", + pr_err_ratelimited("%s: error %d sending status command, %sing\n", req->rq_disk->disk_name, err, retry ? "retry" : "abort"); } /* We couldn't get a response from the card. Give up. */ if (err) { + if (card->err_in_sdr104) + return ERR_RETRY; /* Check if the card is removed */ if (mmc_detect_card_removed(card->host)) return ERR_NOMEDIUM; @@ -1354,8 +1822,15 @@ static int mmc_blk_reset(struct mmc_blk_data *md, struct mmc_host *host, md->reset_done |= type; err = mmc_hw_reset(host); + if (err && err != -EOPNOTSUPP) { + /* We failed to reset so we need to abort the request */ + pr_err("%s: %s: failed to reset %d\n", mmc_hostname(host), + __func__, err); + return -ENODEV; + } + /* Ensure we switch back to the correct partition */ - if (err != -EOPNOTSUPP) { + if (host->card) { struct mmc_blk_data *main_md = dev_get_drvdata(&host->card->dev); int part_err; @@ -1390,6 +1865,77 @@ int mmc_access_rpmb(struct mmc_queue *mq) return false; } +static struct mmc_cmdq_req *mmc_blk_cmdq_prep_discard_req(struct mmc_queue *mq, + struct request *req) +{ + struct mmc_blk_data *md = mq->data; + struct mmc_card *card = md->queue.card; + struct mmc_host *host = card->host; + struct mmc_cmdq_context_info *ctx_info = &host->cmdq_ctx; + struct mmc_cmdq_req *cmdq_req; + struct mmc_queue_req *active_mqrq; + + BUG_ON(req->tag > card->ext_csd.cmdq_depth); + BUG_ON(test_and_set_bit(req->tag, &host->cmdq_ctx.active_reqs)); + + set_bit(CMDQ_STATE_DCMD_ACTIVE, &ctx_info->curr_state); + + active_mqrq = &mq->mqrq_cmdq[req->tag]; + active_mqrq->req = req; + + cmdq_req = mmc_cmdq_prep_dcmd(active_mqrq, mq); + cmdq_req->cmdq_req_flags |= QBR; + cmdq_req->mrq.cmd = &cmdq_req->cmd; + cmdq_req->tag = req->tag; + return cmdq_req; +} + +static int mmc_blk_cmdq_issue_discard_rq(struct mmc_queue *mq, + struct request *req) +{ + struct mmc_blk_data *md = mq->data; + struct mmc_card *card = md->queue.card; + struct mmc_cmdq_req *cmdq_req = NULL; + unsigned int from, nr, arg; + int err = 0; + + if (!mmc_can_erase(card)) { + err = -EOPNOTSUPP; + blk_end_request(req, err, blk_rq_bytes(req)); + goto out; + } + + from = blk_rq_pos(req); + nr = blk_rq_sectors(req); + + if (mmc_can_discard(card)) + arg = MMC_DISCARD_ARG; + else if (mmc_can_trim(card)) + arg = MMC_TRIM_ARG; + else + arg = MMC_ERASE_ARG; + + cmdq_req = mmc_blk_cmdq_prep_discard_req(mq, req); + if (card->quirks & MMC_QUIRK_INAND_CMD38) { + __mmc_switch_cmdq_mode(cmdq_req->mrq.cmd, + EXT_CSD_CMD_SET_NORMAL, + INAND_CMD38_ARG_EXT_CSD, + arg == MMC_TRIM_ARG ? + INAND_CMD38_ARG_TRIM : + INAND_CMD38_ARG_ERASE, + 0, true, false); + err = mmc_cmdq_wait_for_dcmd(card->host, cmdq_req); + if (err) + goto clear_dcmd; + } + err = mmc_cmdq_erase(cmdq_req, card, from, nr, arg); +clear_dcmd: + mmc_host_clk_hold(card->host); + blk_complete_request(req); +out: + return err ? 1 : 0; +} + static int mmc_blk_issue_discard_rq(struct mmc_queue *mq, struct request *req) { struct mmc_blk_data *md = mq->data; @@ -1433,6 +1979,69 @@ out: return err ? 0 : 1; } +static int mmc_blk_cmdq_issue_secdiscard_rq(struct mmc_queue *mq, + struct request *req) +{ + struct mmc_blk_data *md = mq->data; + struct mmc_card *card = md->queue.card; + struct mmc_cmdq_req *cmdq_req = NULL; + unsigned int from, nr, arg; + int err = 0; + + if (!(mmc_can_secure_erase_trim(card))) { + err = -EOPNOTSUPP; + blk_end_request(req, err, blk_rq_bytes(req)); + goto out; + } + + from = blk_rq_pos(req); + nr = blk_rq_sectors(req); + + if (mmc_can_trim(card) && !mmc_erase_group_aligned(card, from, nr)) + arg = MMC_SECURE_TRIM1_ARG; + else + arg = MMC_SECURE_ERASE_ARG; + + cmdq_req = mmc_blk_cmdq_prep_discard_req(mq, req); + if (card->quirks & MMC_QUIRK_INAND_CMD38) { + __mmc_switch_cmdq_mode(cmdq_req->mrq.cmd, + EXT_CSD_CMD_SET_NORMAL, + INAND_CMD38_ARG_EXT_CSD, + arg == MMC_SECURE_TRIM1_ARG ? + INAND_CMD38_ARG_SECTRIM1 : + INAND_CMD38_ARG_SECERASE, + 0, true, false); + err = mmc_cmdq_wait_for_dcmd(card->host, cmdq_req); + if (err) + goto clear_dcmd; + } + + err = mmc_cmdq_erase(cmdq_req, card, from, nr, arg); + if (err) + goto clear_dcmd; + + if (arg == MMC_SECURE_TRIM1_ARG) { + if (card->quirks & MMC_QUIRK_INAND_CMD38) { + __mmc_switch_cmdq_mode(cmdq_req->mrq.cmd, + EXT_CSD_CMD_SET_NORMAL, + INAND_CMD38_ARG_EXT_CSD, + INAND_CMD38_ARG_SECTRIM2, + 0, true, false); + err = mmc_cmdq_wait_for_dcmd(card->host, cmdq_req); + if (err) + goto clear_dcmd; + } + + err = mmc_cmdq_erase(cmdq_req, card, from, nr, + MMC_SECURE_TRIM2_ARG); + } +clear_dcmd: + mmc_host_clk_hold(card->host); + blk_complete_request(req); +out: + return err ? 1 : 0; +} + static int mmc_blk_issue_secdiscard_rq(struct mmc_queue *mq, struct request *req) { @@ -1506,10 +2115,47 @@ static int mmc_blk_issue_flush(struct mmc_queue *mq, struct request *req) struct mmc_card *card = md->queue.card; int ret = 0; - ret = mmc_flush_cache(card); - if (ret) + if (!req) + return 0; + + if (req->cmd_flags & REQ_BARRIER) { + /* + * If eMMC cache flush policy is set to 1, then the device + * shall flush the requests in First-In-First-Out (FIFO) order. + * In this case, as per spec, the host must not send any cache + * barrier requests as they are redundant and add unnecessary + * overhead to both device and host. + */ + if (card->ext_csd.cache_flush_policy & 1) + goto end_req; + + /* + * In case barrier is not supported or enabled in the device, + * use flush as a fallback option. + */ + ret = mmc_cache_barrier(card); + if (ret) + ret = mmc_flush_cache(card); + } else if (req->cmd_flags & REQ_FLUSH) { + ret = mmc_flush_cache(card); + } + if (ret == -ENODEV) { + pr_err("%s: %s: restart mmc card", + req->rq_disk->disk_name, __func__); + if (mmc_blk_reset(md, card->host, MMC_BLK_FLUSH)) + pr_err("%s: %s: fail to restart mmc", + req->rq_disk->disk_name, __func__); + else + mmc_blk_reset_success(md, MMC_BLK_FLUSH); + } + + if (ret) { + pr_err("%s: %s: notify flush error to upper layers", + req->rq_disk->disk_name, __func__); ret = -EIO; + } +end_req: #ifdef CONFIG_MMC_SIMULATE_MAX_SPEED else if (atomic_read(&mq->cache_size)) { long used = mmc_blk_cache_used(mq, jiffies); @@ -1573,6 +2219,18 @@ static int mmc_blk_err_check(struct mmc_card *card, int need_retune = card->host->need_retune; int ecc_err = 0, gen_err = 0; + if (card->host->sdr104_wa && mmc_card_sd(card) && + (card->host->ios.timing == MMC_TIMING_UHS_SDR104) && + !card->sdr104_blocked && + (brq->data.error == -EILSEQ || + brq->data.error == -EIO || + brq->data.error == -ETIMEDOUT || + brq->cmd.error == -EILSEQ || + brq->cmd.error == -EIO || + brq->cmd.error == -ETIMEDOUT || + brq->sbc.error)) + card->err_in_sdr104 = true; + /* * sbc.error indicates a problem with the set block count * command. No data will have been transferred. @@ -1757,6 +2415,7 @@ static void mmc_blk_rw_rq_prep(struct mmc_queue_req *mqrq, brq->stop.arg = 0; brq->data.blocks = blk_rq_sectors(req); + brq->data.fault_injected = false; /* * The block layer doesn't support all sector count * restrictions, so we need to be prepared for too big @@ -1880,6 +2539,7 @@ static void mmc_blk_rw_rq_prep(struct mmc_queue_req *mqrq, } mqrq->mmc_active.mrq = &brq->mrq; + mqrq->mmc_active.mrq->req = mqrq->req; mqrq->mmc_active.err_check = mmc_blk_err_check; mmc_queue_bounce_pre(mqrq); @@ -1901,6 +2561,178 @@ static inline u8 mmc_calc_packed_hdr_segs(struct request_queue *q, return nr_segs; } +/** + * mmc_blk_disable_wr_packing() - disables packing mode + * @mq: MMC queue. + * + */ +void mmc_blk_disable_wr_packing(struct mmc_queue *mq) +{ + if (mq) { + mq->wr_packing_enabled = false; + mq->num_of_potential_packed_wr_reqs = 0; + } +} +EXPORT_SYMBOL(mmc_blk_disable_wr_packing); + +static int get_packed_trigger(int potential, struct mmc_card *card, + struct request *req, int curr_trigger) +{ + static int num_mean_elements = 1; + static unsigned long mean_potential = PCKD_TRGR_INIT_MEAN_POTEN; + unsigned int trigger = curr_trigger; + unsigned int pckd_trgr_upper_bound = card->ext_csd.max_packed_writes; + + /* scale down the upper bound to 75% */ + pckd_trgr_upper_bound = (pckd_trgr_upper_bound * 3) / 4; + + /* + * since the most common calls for this function are with small + * potential write values and since we don't want these calls to affect + * the packed trigger, set a lower bound and ignore calls with + * potential lower than that bound + */ + if (potential <= PCKD_TRGR_POTEN_LOWER_BOUND) + return trigger; + + /* + * this is to prevent integer overflow in the following calculation: + * once every PACKED_TRIGGER_MAX_ELEMENTS reset the algorithm + */ + if (num_mean_elements > PACKED_TRIGGER_MAX_ELEMENTS) { + num_mean_elements = 1; + mean_potential = PCKD_TRGR_INIT_MEAN_POTEN; + } + + /* + * get next mean value based on previous mean value and current + * potential packed writes. Calculation is as follows: + * mean_pot[i+1] = + * ((mean_pot[i] * num_mean_elem) + potential)/(num_mean_elem + 1) + */ + mean_potential *= num_mean_elements; + /* + * add num_mean_elements so that the division of two integers doesn't + * lower mean_potential too much + */ + if (potential > mean_potential) + mean_potential += num_mean_elements; + mean_potential += potential; + /* this is for gaining more precision when dividing two integers */ + mean_potential *= PCKD_TRGR_PRECISION_MULTIPLIER; + /* this completes the mean calculation */ + mean_potential /= ++num_mean_elements; + mean_potential /= PCKD_TRGR_PRECISION_MULTIPLIER; + + /* + * if current potential packed writes is greater than the mean potential + * then the heuristic is that the following workload will contain many + * write requests, therefore we lower the packed trigger. In the + * opposite case we want to increase the trigger in order to get less + * packing events. + */ + if (potential >= mean_potential) + trigger = (trigger <= PCKD_TRGR_LOWER_BOUND) ? + PCKD_TRGR_LOWER_BOUND : trigger - 1; + else + trigger = (trigger >= pckd_trgr_upper_bound) ? + pckd_trgr_upper_bound : trigger + 1; + + /* + * an urgent read request indicates a packed list being interrupted + * by this read, therefore we aim for less packing, hence the trigger + * gets increased + */ + if (req && (req->cmd_flags & REQ_URGENT) && (rq_data_dir(req) == READ)) + trigger += PCKD_TRGR_URGENT_PENALTY; + + return trigger; +} + +static void mmc_blk_write_packing_control(struct mmc_queue *mq, + struct request *req) +{ + struct mmc_host *host = mq->card->host; + int data_dir; + + if (!(host->caps2 & MMC_CAP2_PACKED_WR)) + return; + + /* Support for the write packing on eMMC 4.5 or later */ + if (mq->card->ext_csd.rev <= 5) + return; + + /* + * In case the packing control is not supported by the host, it should + * not have an effect on the write packing. Therefore we have to enable + * the write packing + */ + if (!(host->caps2 & MMC_CAP2_PACKED_WR_CONTROL)) { + mq->wr_packing_enabled = true; + return; + } + + if (!req || (req && (req->cmd_flags & REQ_FLUSH))) { + if (mq->num_of_potential_packed_wr_reqs > + mq->num_wr_reqs_to_start_packing) + mq->wr_packing_enabled = true; + mq->num_wr_reqs_to_start_packing = + get_packed_trigger(mq->num_of_potential_packed_wr_reqs, + mq->card, req, + mq->num_wr_reqs_to_start_packing); + mq->num_of_potential_packed_wr_reqs = 0; + return; + } + + data_dir = rq_data_dir(req); + + if (data_dir == READ) { + mmc_blk_disable_wr_packing(mq); + mq->num_wr_reqs_to_start_packing = + get_packed_trigger(mq->num_of_potential_packed_wr_reqs, + mq->card, req, + mq->num_wr_reqs_to_start_packing); + mq->num_of_potential_packed_wr_reqs = 0; + mq->wr_packing_enabled = false; + return; + } else if (data_dir == WRITE) { + mq->num_of_potential_packed_wr_reqs++; + } + + if (mq->num_of_potential_packed_wr_reqs > + mq->num_wr_reqs_to_start_packing) + mq->wr_packing_enabled = true; +} + +struct mmc_wr_pack_stats *mmc_blk_get_packed_statistics(struct mmc_card *card) +{ + if (!card) + return NULL; + + return &card->wr_pack_stats; +} +EXPORT_SYMBOL(mmc_blk_get_packed_statistics); + +void mmc_blk_init_packed_statistics(struct mmc_card *card) +{ + int max_num_of_packed_reqs = 0; + + if (!card || !card->wr_pack_stats.packing_events) + return; + + max_num_of_packed_reqs = card->ext_csd.max_packed_writes; + + spin_lock(&card->wr_pack_stats.lock); + memset(card->wr_pack_stats.packing_events, 0, + (max_num_of_packed_reqs + 1) * + sizeof(*card->wr_pack_stats.packing_events)); + memset(&card->wr_pack_stats.pack_stop_reason, 0, + sizeof(card->wr_pack_stats.pack_stop_reason)); + card->wr_pack_stats.enabled = true; + spin_unlock(&card->wr_pack_stats.lock); +} +EXPORT_SYMBOL(mmc_blk_init_packed_statistics); + static u8 mmc_blk_prep_packed_list(struct mmc_queue *mq, struct request *req) { struct request_queue *q = mq->queue; @@ -1914,10 +2746,14 @@ static u8 mmc_blk_prep_packed_list(struct mmc_queue *mq, struct request *req) bool put_back = true; u8 max_packed_rw = 0; u8 reqs = 0; + struct mmc_wr_pack_stats *stats = &card->wr_pack_stats; if (!(md->flags & MMC_BLK_PACKED_CMD)) goto no_packed; + if (!mq->wr_packing_enabled) + goto no_packed; + if ((rq_data_dir(cur) == WRITE) && mmc_host_packed_wr(card->host)) max_packed_rw = card->ext_csd.max_packed_writes; @@ -1933,6 +2769,9 @@ static u8 mmc_blk_prep_packed_list(struct mmc_queue *mq, struct request *req) !IS_ALIGNED(blk_rq_sectors(cur), 8)) goto no_packed; + if (cur->cmd_flags & REQ_FUA) + goto no_packed; + mmc_blk_clear_packed(mqrq); max_blk_count = min(card->host->max_blk_count, @@ -1949,6 +2788,7 @@ static u8 mmc_blk_prep_packed_list(struct mmc_queue *mq, struct request *req) phys_segments += mmc_calc_packed_hdr_segs(q, card); } + spin_lock(&stats->lock); do { if (reqs >= max_packed_rw - 1) { put_back = false; @@ -1959,33 +2799,63 @@ static u8 mmc_blk_prep_packed_list(struct mmc_queue *mq, struct request *req) next = blk_fetch_request(q); spin_unlock_irq(q->queue_lock); if (!next) { + MMC_BLK_UPDATE_STOP_REASON(stats, EMPTY_QUEUE); put_back = false; break; } if (mmc_large_sector(card) && - !IS_ALIGNED(blk_rq_sectors(next), 8)) + !IS_ALIGNED(blk_rq_sectors(next), 8)) { + MMC_BLK_UPDATE_STOP_REASON(stats, LARGE_SEC_ALIGN); break; + } if (next->cmd_flags & REQ_DISCARD || - next->cmd_flags & REQ_FLUSH) + next->cmd_flags & REQ_FLUSH) { + MMC_BLK_UPDATE_STOP_REASON(stats, FLUSH_OR_DISCARD); break; + } - if (rq_data_dir(cur) != rq_data_dir(next)) + if (next->cmd_flags & REQ_FUA) { + MMC_BLK_UPDATE_STOP_REASON(stats, FUA); break; + } + + if (rq_data_dir(cur) != rq_data_dir(next)) { + MMC_BLK_UPDATE_STOP_REASON(stats, WRONG_DATA_DIR); + break; + } if (mmc_req_rel_wr(next) && - (md->flags & MMC_BLK_REL_WR) && !en_rel_wr) + (md->flags & MMC_BLK_REL_WR) && !en_rel_wr) { + MMC_BLK_UPDATE_STOP_REASON(stats, REL_WRITE); break; + } req_sectors += blk_rq_sectors(next); - if (req_sectors > max_blk_count) + if (req_sectors > max_blk_count) { + if (stats->enabled) + stats->pack_stop_reason[EXCEEDS_SECTORS]++; break; + } phys_segments += next->nr_phys_segments; - if (phys_segments > max_phys_segs) + if (phys_segments > max_phys_segs) { + MMC_BLK_UPDATE_STOP_REASON(stats, EXCEEDS_SEGMENTS); break; + } + if (mq->no_pack_for_random) { + if ((blk_rq_pos(cur) + blk_rq_sectors(cur)) != + blk_rq_pos(next)) { + MMC_BLK_UPDATE_STOP_REASON(stats, RANDOM); + put_back = 1; + break; + } + } + + if (rq_data_dir(next) == WRITE) + mq->num_of_potential_packed_wr_reqs++; list_add_tail(&next->queuelist, &mqrq->packed->list); cur = next; reqs++; @@ -1997,6 +2867,15 @@ static u8 mmc_blk_prep_packed_list(struct mmc_queue *mq, struct request *req) spin_unlock_irq(q->queue_lock); } + if (stats->enabled) { + if (reqs + 1 <= card->ext_csd.max_packed_writes) + stats->packing_events[reqs + 1]++; + if (reqs + 1 == max_packed_rw) + MMC_BLK_UPDATE_STOP_REASON(stats, THRESHOLD); + } + + spin_unlock(&stats->lock); + if (reqs > 0) { list_add(&req->queuelist, &mqrq->packed->list); mqrq->packed->nr_entries = ++reqs; @@ -2076,6 +2955,7 @@ static void mmc_blk_packed_hdr_wrq_prep(struct mmc_queue_req *mqrq, brq->data.blksz = 512; brq->data.blocks = packed->blocks + hdr_blocks; brq->data.flags |= MMC_DATA_WRITE; + brq->data.fault_injected = false; brq->stop.opcode = MMC_STOP_TRANSMISSION; brq->stop.arg = 0; @@ -2087,7 +2967,18 @@ static void mmc_blk_packed_hdr_wrq_prep(struct mmc_queue_req *mqrq, brq->data.sg_len = mmc_queue_map_sg(mq, mqrq); mqrq->mmc_active.mrq = &brq->mrq; - mqrq->mmc_active.err_check = mmc_blk_packed_err_check; + + /* + * This is intended for packed commands tests usage - in case these + * functions are not in use the respective pointers are NULL + */ + if (mq->err_check_fn) + mqrq->mmc_active.err_check = mq->err_check_fn; + else + mqrq->mmc_active.err_check = mmc_blk_packed_err_check; + + if (mq->packed_test_fn) + mq->packed_test_fn(mq->queue, mqrq); mmc_queue_bounce_pre(mqrq); } @@ -2109,11 +3000,12 @@ static int mmc_blk_cmd_err(struct mmc_blk_data *md, struct mmc_card *card, */ if (mmc_card_sd(card)) { u32 blocks; - - blocks = mmc_sd_num_wr_blocks(card); - if (blocks != (u32)-1) { - ret = blk_end_request(req, 0, blocks << 9); - } + if (!brq->data.fault_injected) { + blocks = mmc_sd_num_wr_blocks(card); + if (blocks != (u32)-1) + ret = blk_end_request(req, 0, blocks << 9); + } else + ret = blk_end_request(req, 0, brq->data.bytes_xfered); } else { if (!mmc_packed_cmd(mq_rq->cmd_type)) ret = blk_end_request(req, 0, brq->data.bytes_xfered); @@ -2193,6 +3085,614 @@ static void mmc_blk_revert_packed_req(struct mmc_queue *mq, mmc_blk_clear_packed(mq_rq); } +static int mmc_blk_cmdq_start_req(struct mmc_host *host, + struct mmc_cmdq_req *cmdq_req) +{ + struct mmc_request *mrq = &cmdq_req->mrq; + + mrq->done = mmc_blk_cmdq_req_done; + return mmc_cmdq_start_req(host, cmdq_req); +} + +/* prepare for non-data commands */ +static struct mmc_cmdq_req *mmc_cmdq_prep_dcmd( + struct mmc_queue_req *mqrq, struct mmc_queue *mq) +{ + struct request *req = mqrq->req; + struct mmc_cmdq_req *cmdq_req = &mqrq->cmdq_req; + + memset(&mqrq->cmdq_req, 0, sizeof(struct mmc_cmdq_req)); + + cmdq_req->mrq.data = NULL; + cmdq_req->cmd_flags = req->cmd_flags; + cmdq_req->mrq.req = mqrq->req; + req->special = mqrq; + cmdq_req->cmdq_req_flags |= DCMD; + cmdq_req->mrq.cmdq_req = cmdq_req; + + return &mqrq->cmdq_req; +} + + +#define IS_RT_CLASS_REQ(x) \ + (IOPRIO_PRIO_CLASS(req_get_ioprio(x)) == IOPRIO_CLASS_RT) + +static struct mmc_cmdq_req *mmc_blk_cmdq_rw_prep( + struct mmc_queue_req *mqrq, struct mmc_queue *mq) +{ + struct mmc_card *card = mq->card; + struct request *req = mqrq->req; + struct mmc_blk_data *md = mq->data; + bool do_rel_wr = mmc_req_rel_wr(req) && (md->flags & MMC_BLK_REL_WR); + bool do_data_tag; + bool read_dir = (rq_data_dir(req) == READ); + bool prio = IS_RT_CLASS_REQ(req); + struct mmc_cmdq_req *cmdq_rq = &mqrq->cmdq_req; + + memset(&mqrq->cmdq_req, 0, sizeof(struct mmc_cmdq_req)); + + cmdq_rq->tag = req->tag; + if (read_dir) { + cmdq_rq->cmdq_req_flags |= DIR; + cmdq_rq->data.flags = MMC_DATA_READ; + } else { + cmdq_rq->data.flags = MMC_DATA_WRITE; + } + if (prio) + cmdq_rq->cmdq_req_flags |= PRIO; + + if (do_rel_wr) + cmdq_rq->cmdq_req_flags |= REL_WR; + + cmdq_rq->data.blocks = blk_rq_sectors(req); + cmdq_rq->blk_addr = blk_rq_pos(req); + cmdq_rq->data.blksz = MMC_CARD_CMDQ_BLK_SIZE; + + mmc_set_data_timeout(&cmdq_rq->data, card); + + do_data_tag = (card->ext_csd.data_tag_unit_size) && + (req->cmd_flags & REQ_META) && + (rq_data_dir(req) == WRITE) && + ((cmdq_rq->data.blocks * cmdq_rq->data.blksz) >= + card->ext_csd.data_tag_unit_size); + if (do_data_tag) + cmdq_rq->cmdq_req_flags |= DAT_TAG; + cmdq_rq->data.sg = mqrq->sg; + cmdq_rq->data.sg_len = mmc_queue_map_sg(mq, mqrq); + + /* + * Adjust the sg list so it is the same size as the + * request. + */ + if (cmdq_rq->data.blocks > card->host->max_blk_count) + cmdq_rq->data.blocks = card->host->max_blk_count; + + if (cmdq_rq->data.blocks != blk_rq_sectors(req)) { + int i, data_size = cmdq_rq->data.blocks << 9; + struct scatterlist *sg; + + for_each_sg(cmdq_rq->data.sg, sg, cmdq_rq->data.sg_len, i) { + data_size -= sg->length; + if (data_size <= 0) { + sg->length += data_size; + i++; + break; + } + } + cmdq_rq->data.sg_len = i; + } + + mqrq->cmdq_req.cmd_flags = req->cmd_flags; + mqrq->cmdq_req.mrq.req = mqrq->req; + mqrq->cmdq_req.mrq.cmdq_req = &mqrq->cmdq_req; + mqrq->cmdq_req.mrq.data = &mqrq->cmdq_req.data; + mqrq->req->special = mqrq; + + pr_debug("%s: %s: mrq: 0x%p req: 0x%p mqrq: 0x%p bytes to xf: %d mmc_cmdq_req: 0x%p card-addr: 0x%08x dir(r-1/w-0): %d\n", + mmc_hostname(card->host), __func__, &mqrq->cmdq_req.mrq, + mqrq->req, mqrq, (cmdq_rq->data.blocks * cmdq_rq->data.blksz), + cmdq_rq, cmdq_rq->blk_addr, + (cmdq_rq->cmdq_req_flags & DIR) ? 1 : 0); + + return &mqrq->cmdq_req; +} + +static void mmc_blk_cmdq_requeue_rw_rq(struct mmc_queue *mq, + struct request *req) +{ + struct request_queue *q = req->q; + + spin_lock_irq(q->queue_lock); + blk_requeue_request(q, req); + spin_unlock_irq(q->queue_lock); +} + +static int mmc_blk_cmdq_issue_rw_rq(struct mmc_queue *mq, struct request *req) +{ + struct mmc_queue_req *active_mqrq; + struct mmc_card *card = mq->card; + struct mmc_host *host = card->host; + struct mmc_cmdq_context_info *ctx = &host->cmdq_ctx; + struct mmc_cmdq_req *mc_rq; + u8 active_small_sector_read = 0; + int ret = 0; + + mmc_deferred_scaling(host); + mmc_cmdq_clk_scaling_start_busy(host, true); + + BUG_ON((req->tag < 0) || (req->tag > card->ext_csd.cmdq_depth)); + BUG_ON(test_and_set_bit(req->tag, &host->cmdq_ctx.data_active_reqs)); + BUG_ON(test_and_set_bit(req->tag, &host->cmdq_ctx.active_reqs)); + + active_mqrq = &mq->mqrq_cmdq[req->tag]; + active_mqrq->req = req; + + mc_rq = mmc_blk_cmdq_rw_prep(active_mqrq, mq); + + if (card->quirks & MMC_QUIRK_CMDQ_EMPTY_BEFORE_DCMD) { + unsigned int sectors = blk_rq_sectors(req); + + if (((sectors > 0) && (sectors < 8)) + && (rq_data_dir(req) == READ)) + active_small_sector_read = 1; + } + ret = mmc_blk_cmdq_start_req(card->host, mc_rq); + if (!ret && active_small_sector_read) + host->cmdq_ctx.active_small_sector_read_reqs++; + /* + * When in SVS2 on low load scenario and there are lots of requests + * queued for CMDQ we need to wait till the queue is empty to scale + * back up to Nominal even if there is a sudden increase in load. + * This impacts performance where lots of IO get executed in SVS2 + * frequency since the queue is full. As SVS2 is a low load use case + * we can serialize the requests and not queue them in parallel + * without impacting other use cases. This makes sure the queue gets + * empty faster and we will be able to scale up to Nominal frequency + * when needed. + */ + if (!ret && (host->clk_scaling.state == MMC_LOAD_LOW)) + wait_event_interruptible(ctx->queue_empty_wq, + (!ctx->active_reqs)); + + if (ret) { + /* clear pending request */ + WARN_ON(!test_and_clear_bit(req->tag, + &host->cmdq_ctx.data_active_reqs)); + WARN_ON(!test_and_clear_bit(req->tag, + &host->cmdq_ctx.active_reqs)); + mmc_cmdq_clk_scaling_stop_busy(host, true, false); + } + + return ret; +} + +/* + * Issues a flush (dcmd) request + */ +int mmc_blk_cmdq_issue_flush_rq(struct mmc_queue *mq, struct request *req) +{ + int err; + struct mmc_queue_req *active_mqrq; + struct mmc_card *card = mq->card; + struct mmc_host *host; + struct mmc_cmdq_req *cmdq_req; + struct mmc_cmdq_context_info *ctx_info; + + BUG_ON(!card); + host = card->host; + BUG_ON(!host); + BUG_ON(req->tag > card->ext_csd.cmdq_depth); + BUG_ON(test_and_set_bit(req->tag, &host->cmdq_ctx.active_reqs)); + + ctx_info = &host->cmdq_ctx; + + set_bit(CMDQ_STATE_DCMD_ACTIVE, &ctx_info->curr_state); + + active_mqrq = &mq->mqrq_cmdq[req->tag]; + active_mqrq->req = req; + + cmdq_req = mmc_cmdq_prep_dcmd(active_mqrq, mq); + cmdq_req->cmdq_req_flags |= QBR; + cmdq_req->mrq.cmd = &cmdq_req->cmd; + cmdq_req->tag = req->tag; + + err = mmc_cmdq_prepare_flush(cmdq_req->mrq.cmd); + if (err) { + pr_err("%s: failed (%d) preparing flush req\n", + mmc_hostname(host), err); + return err; + } + err = mmc_blk_cmdq_start_req(card->host, cmdq_req); + return err; +} +EXPORT_SYMBOL(mmc_blk_cmdq_issue_flush_rq); + +static void mmc_blk_cmdq_reset(struct mmc_host *host, bool clear_all) +{ + int err = 0; + + if (mmc_cmdq_halt(host, true)) { + pr_err("%s: halt failed\n", mmc_hostname(host)); + goto reset; + } + + if (clear_all) + mmc_cmdq_discard_queue(host, 0); +reset: + mmc_host_clk_hold(host); + host->cmdq_ops->disable(host, true); + mmc_host_clk_release(host); + err = mmc_cmdq_hw_reset(host); + if (err && err != -EOPNOTSUPP) { + pr_err("%s: failed to cmdq_hw_reset err = %d\n", + mmc_hostname(host), err); + mmc_host_clk_hold(host); + host->cmdq_ops->enable(host); + mmc_host_clk_release(host); + mmc_cmdq_halt(host, false); + goto out; + } + /* + * CMDQ HW reset would have already made CQE + * in unhalted state, but reflect the same + * in software state of cmdq_ctx. + */ + mmc_host_clr_halt(host); +out: + return; +} + +/** + * is_cmdq_dcmd_req - Checks if tag belongs to DCMD request. + * @q: request_queue pointer. + * @tag: tag number of request to check. + * + * This function checks if the request with tag number "tag" + * is a DCMD request or not based on cmdq_req_flags set. + * + * returns true if DCMD req, otherwise false. + */ +static bool is_cmdq_dcmd_req(struct request_queue *q, int tag) +{ + struct request *req; + struct mmc_queue_req *mq_rq; + struct mmc_cmdq_req *cmdq_req; + + req = blk_queue_find_tag(q, tag); + if (WARN_ON(!req)) + goto out; + mq_rq = req->special; + if (WARN_ON(!mq_rq)) + goto out; + cmdq_req = &(mq_rq->cmdq_req); + return (cmdq_req->cmdq_req_flags & DCMD); +out: + return -ENOENT; +} + +/** + * mmc_blk_cmdq_reset_all - Reset everything for CMDQ block request. + * @host: mmc_host pointer. + * @err: error for which reset is performed. + * + * This function implements reset_all functionality for + * cmdq. It resets the controller, power cycle the card, + * and invalidate all busy tags(requeue all request back to + * elevator). + */ +static void mmc_blk_cmdq_reset_all(struct mmc_host *host, int err) +{ + struct mmc_request *mrq = host->err_mrq; + struct mmc_card *card = host->card; + struct mmc_cmdq_context_info *ctx_info = &host->cmdq_ctx; + struct request_queue *q; + int itag = 0; + int ret = 0; + + if (WARN_ON(!mrq)) + return; + + q = mrq->req->q; + WARN_ON(!test_bit(CMDQ_STATE_ERR, &ctx_info->curr_state)); + + #ifdef CONFIG_MMC_CLKGATE + pr_debug("%s: %s: active_reqs = %lu, clk_requests = %d\n", + mmc_hostname(host), __func__, + ctx_info->active_reqs, host->clk_requests); + #endif + + mmc_blk_cmdq_reset(host, false); + + for_each_set_bit(itag, &ctx_info->active_reqs, + host->num_cq_slots) { + ret = is_cmdq_dcmd_req(q, itag); + if (WARN_ON(ret == -ENOENT)) + continue; + if (!ret) { + WARN_ON(!test_and_clear_bit(itag, + &ctx_info->data_active_reqs)); + mmc_cmdq_post_req(host, itag, err); + } else { + clear_bit(CMDQ_STATE_DCMD_ACTIVE, + &ctx_info->curr_state); + } + WARN_ON(!test_and_clear_bit(itag, + &ctx_info->active_reqs)); + mmc_host_clk_release(host); + mmc_put_card(card); + } + + spin_lock_irq(q->queue_lock); + blk_queue_invalidate_tags(q); + spin_unlock_irq(q->queue_lock); +} + +static void mmc_blk_cmdq_shutdown(struct mmc_queue *mq) +{ + int err; + struct mmc_card *card = mq->card; + struct mmc_host *host = card->host; + + mmc_get_card(card); + mmc_host_clk_hold(host); + err = mmc_cmdq_halt(host, true); + if (err) { + pr_err("%s: halt: failed: %d\n", __func__, err); + goto out; + } + + /* disable CQ mode in card */ + if (mmc_card_cmdq(card)) { + err = mmc_switch(card, EXT_CSD_CMD_SET_NORMAL, + EXT_CSD_CMDQ, 0, + card->ext_csd.generic_cmd6_time); + if (err) { + pr_err("%s: failed to switch card to legacy mode: %d\n", + __func__, err); + goto out; + } + mmc_card_clr_cmdq(card); + } + host->cmdq_ops->disable(host, false); + host->card->cmdq_init = false; +out: + mmc_host_clk_release(host); + mmc_put_card(card); +} + +static enum blk_eh_timer_return mmc_blk_cmdq_req_timed_out(struct request *req) +{ + struct mmc_queue *mq = req->q->queuedata; + struct mmc_host *host = mq->card->host; + struct mmc_queue_req *mq_rq = req->special; + struct mmc_request *mrq; + struct mmc_cmdq_req *cmdq_req; + struct mmc_cmdq_context_info *ctx_info = &host->cmdq_ctx; + + BUG_ON(!host); + + /* + * The mmc_queue_req will be present only if the request + * is issued to the LLD. The request could be fetched from + * block layer queue but could be waiting to be issued + * (for e.g. clock scaling is waiting for an empty cmdq queue) + * Reset the timer in such cases to give LLD more time + */ + if (!mq_rq) { + pr_warn("%s: restart timer for tag: %d\n", __func__, req->tag); + return BLK_EH_RESET_TIMER; + } + + mrq = &mq_rq->cmdq_req.mrq; + cmdq_req = &mq_rq->cmdq_req; + + BUG_ON(!mrq || !cmdq_req); + + if (cmdq_req->cmdq_req_flags & DCMD) + mrq->cmd->error = -ETIMEDOUT; + else + mrq->data->error = -ETIMEDOUT; + + if (mrq->cmd && mrq->cmd->error) { + if (!(mrq->req->cmd_flags & REQ_FLUSH)) { + /* + * Notify completion for non flush commands like + * discard that wait for DCMD finish. + */ + set_bit(CMDQ_STATE_REQ_TIMED_OUT, + &ctx_info->curr_state); + complete(&mrq->completion); + return BLK_EH_NOT_HANDLED; + } + } + + if (test_bit(CMDQ_STATE_REQ_TIMED_OUT, &ctx_info->curr_state) || + test_bit(CMDQ_STATE_ERR, &ctx_info->curr_state)) + return BLK_EH_NOT_HANDLED; + + set_bit(CMDQ_STATE_REQ_TIMED_OUT, &ctx_info->curr_state); + return BLK_EH_HANDLED; +} + +/* + * mmc_blk_cmdq_err: error handling of cmdq error requests. + * Function should be called in context of error out request + * which has claim_host and rpm acquired. + * This may be called with CQ engine halted. Make sure to + * unhalt it after error recovery. + * + * TODO: Currently cmdq error handler does reset_all in case + * of any erorr. Need to optimize error handling. + */ +static void mmc_blk_cmdq_err(struct mmc_queue *mq) +{ + struct mmc_host *host = mq->card->host; + struct mmc_request *mrq = host->err_mrq; + struct mmc_cmdq_context_info *ctx_info = &host->cmdq_ctx; + struct request_queue *q; + int err, ret; + u32 status = 0; + + mmc_host_clk_hold(host); + host->cmdq_ops->dumpstate(host); + mmc_host_clk_release(host); + + if (WARN_ON(!mrq)) + return; + + q = mrq->req->q; + err = mmc_cmdq_halt(host, true); + if (err) { + pr_err("halt: failed: %d\n", err); + goto reset; + } + + /* RED error - Fatal: requires reset */ + if (mrq->cmdq_req->resp_err) { + err = mrq->cmdq_req->resp_err; + goto reset; + } + + /* + * TIMEOUT errrors can happen because of execution error + * in the last command. So send cmd 13 to get device status + */ + if ((mrq->cmd && (mrq->cmd->error == -ETIMEDOUT)) || + (mrq->data && (mrq->data->error == -ETIMEDOUT))) { + if (mmc_host_halt(host) || mmc_host_cq_disable(host)) { + ret = get_card_status(host->card, &status, 0); + if (ret) + pr_err("%s: CMD13 failed with err %d\n", + mmc_hostname(host), ret); + } + pr_err("%s: Timeout error detected with device status 0x%08x\n", + mmc_hostname(host), status); + } + + /* + * In case of software request time-out, we schedule err work only for + * the first error out request and handles all other request in flight + * here. + */ + if (test_bit(CMDQ_STATE_REQ_TIMED_OUT, &ctx_info->curr_state)) { + err = -ETIMEDOUT; + } else if (mrq->data && mrq->data->error) { + err = mrq->data->error; + } else if (mrq->cmd && mrq->cmd->error) { + /* DCMD commands */ + err = mrq->cmd->error; + } + +reset: + mmc_blk_cmdq_reset_all(host, err); + if (mrq->cmdq_req->resp_err) + mrq->cmdq_req->resp_err = false; + mmc_cmdq_halt(host, false); + + host->err_mrq = NULL; + clear_bit(CMDQ_STATE_REQ_TIMED_OUT, &ctx_info->curr_state); + WARN_ON(!test_and_clear_bit(CMDQ_STATE_ERR, &ctx_info->curr_state)); + wake_up(&ctx_info->wait); +} + +/* invoked by block layer in softirq context */ +void mmc_blk_cmdq_complete_rq(struct request *rq) +{ + struct mmc_queue_req *mq_rq = rq->special; + struct mmc_request *mrq = &mq_rq->cmdq_req.mrq; + struct mmc_host *host = mrq->host; + struct mmc_cmdq_context_info *ctx_info = &host->cmdq_ctx; + struct mmc_cmdq_req *cmdq_req = &mq_rq->cmdq_req; + struct mmc_queue *mq = (struct mmc_queue *)rq->q->queuedata; + int err = 0; + bool is_dcmd = false; + + if (mrq->cmd && mrq->cmd->error) + err = mrq->cmd->error; + else if (mrq->data && mrq->data->error) + err = mrq->data->error; + + if ((err || cmdq_req->resp_err) && !cmdq_req->skip_err_handling) { + pr_err("%s: %s: txfr error(%d)/resp_err(%d)\n", + mmc_hostname(mrq->host), __func__, err, + cmdq_req->resp_err); + if (test_bit(CMDQ_STATE_ERR, &ctx_info->curr_state)) { + pr_err("%s: CQ in error state, ending current req: %d\n", + __func__, err); + } else { + set_bit(CMDQ_STATE_ERR, &ctx_info->curr_state); + BUG_ON(host->err_mrq != NULL); + host->err_mrq = mrq; + schedule_work(&mq->cmdq_err_work); + } + goto out; + } + /* + * In case of error CMDQ is expected to be either in halted + * or disable state so cannot receive any completion of + * other requests. + */ + WARN_ON(test_bit(CMDQ_STATE_ERR, &ctx_info->curr_state)); + + /* clear pending request */ + BUG_ON(!test_and_clear_bit(cmdq_req->tag, + &ctx_info->active_reqs)); + if (cmdq_req->cmdq_req_flags & DCMD) + is_dcmd = true; + else + BUG_ON(!test_and_clear_bit(cmdq_req->tag, + &ctx_info->data_active_reqs)); + if (!is_dcmd) + mmc_cmdq_post_req(host, cmdq_req->tag, err); + if (cmdq_req->cmdq_req_flags & DCMD) { + clear_bit(CMDQ_STATE_DCMD_ACTIVE, &ctx_info->curr_state); + blk_end_request_all(rq, err); + goto out; + } + /* + * In case of error, cmdq_req->data.bytes_xfered is set to 0. + * If we call blk_end_request() with nr_bytes as 0 then the request + * never gets completed. So in case of error, to complete a request + * with error we should use blk_end_request_all(). + */ + if (err && cmdq_req->skip_err_handling) { + cmdq_req->skip_err_handling = false; + blk_end_request_all(rq, err); + goto out; + } + + blk_end_request(rq, err, cmdq_req->data.bytes_xfered); + +out: + + mmc_cmdq_clk_scaling_stop_busy(host, true, is_dcmd); + if (!(err || cmdq_req->resp_err)) { + mmc_host_clk_release(host); + wake_up(&ctx_info->wait); + mmc_put_card(host->card); + } + + if (!ctx_info->active_reqs) + wake_up_interruptible(&host->cmdq_ctx.queue_empty_wq); + + if (blk_queue_stopped(mq->queue) && !ctx_info->active_reqs) + complete(&mq->cmdq_shutdown_complete); + + return; +} + +/* + * Complete reqs from block layer softirq context + * Invoked in irq context + */ +void mmc_blk_cmdq_req_done(struct mmc_request *mrq) +{ + struct request *req = mrq->req; + + blk_complete_request(req); +} +EXPORT_SYMBOL(mmc_blk_cmdq_req_done); + static int mmc_blk_issue_rw_rq(struct mmc_queue *mq, struct request *rqc) { struct mmc_blk_data *md = mq->data; @@ -2205,6 +3705,7 @@ static int mmc_blk_issue_rw_rq(struct mmc_queue *mq, struct request *rqc) struct mmc_async_req *areq; const u8 packed_nr = 2; u8 reqs = 0; + bool reset = false; #ifdef CONFIG_MMC_SIMULATE_MAX_SPEED unsigned long waitfor = jiffies; #endif @@ -2240,7 +3741,7 @@ static int mmc_blk_issue_rw_rq(struct mmc_queue *mq, struct request *rqc) areq = mmc_start_req(card->host, areq, (int *) &status); if (!areq) { if (status == MMC_BLK_NEW_REQUEST) - mq->flags |= MMC_QUEUE_NEW_REQUEST; + set_bit(MMC_QUEUE_NEW_REQUEST, &mq->flags); return 0; } @@ -2250,6 +3751,26 @@ static int mmc_blk_issue_rw_rq(struct mmc_queue *mq, struct request *rqc) type = rq_data_dir(req) == READ ? MMC_BLK_READ : MMC_BLK_WRITE; mmc_queue_bounce_post(mq_rq); + if (card->err_in_sdr104) { + /* + * Data CRC/timeout errors will manifest as CMD/DATA + * ERR. But we'd like to retry these too. + * Moreover, no harm done if this fails too for multiple + * times, we anyway reduce the bus-speed and retry the + * same request. + * If that fails too, we don't override this status. + */ + if (status == MMC_BLK_ABORT || + status == MMC_BLK_CMD_ERR || + status == MMC_BLK_DATA_ERR || + status == MMC_BLK_RETRY) + /* reset on all of these errors and retry */ + reset = true; + + status = MMC_BLK_RETRY; + card->err_in_sdr104 = false; + } + switch (status) { case MMC_BLK_SUCCESS: case MMC_BLK_PARTIAL: @@ -2290,11 +3811,36 @@ static int mmc_blk_issue_rw_rq(struct mmc_queue *mq, struct request *rqc) break; case MMC_BLK_RETRY: retune_retry_done = brq->retune_retry_done; - if (retry++ < 5) + if (retry++ < MMC_BLK_MAX_RETRIES) { break; + } else if (reset) { + reset = false; + /* + * If we exhaust all the retries due to + * CRC/timeout errors in SDR140 mode with UHS SD + * cards, re-configure the card in SDR50 + * bus-speed mode. + * All subsequent re-init of this card will be + * in SDR50 mode, unless it is removed and + * re-inserted. When new UHS SD cards are + * inserted, it may start at SDR104 mode if + * supported by the card. + */ + pr_err("%s: blocked SDR104, lower the bus-speed (SDR50 / DDR50)\n", + req->rq_disk->disk_name); + mmc_host_clear_sdr104(card->host); + mmc_suspend_clk_scaling(card->host); + mmc_blk_reset(md, card->host, type); + /* SDR104 mode is blocked from now on */ + card->sdr104_blocked = true; + /* retry 5 times again */ + retry = 0; + break; + } /* Fall through */ case MMC_BLK_ABORT: - if (!mmc_blk_reset(md, card->host, type)) + if (!mmc_blk_reset(md, card->host, type) && + (retry++ < (MMC_BLK_MAX_RETRIES + 1))) break; goto cmd_abort; case MMC_BLK_DATA_ERR: { @@ -2303,10 +3849,7 @@ static int mmc_blk_issue_rw_rq(struct mmc_queue *mq, struct request *rqc) err = mmc_blk_reset(md, card->host, type); if (!err) break; - if (err == -ENODEV || - mmc_packed_cmd(mq_rq->cmd_type)) - goto cmd_abort; - /* Fall through */ + goto cmd_abort; } case MMC_BLK_ECC_ERR: if (brq->data.blocks > 1) { @@ -2390,6 +3933,189 @@ static int mmc_blk_issue_rw_rq(struct mmc_queue *mq, struct request *rqc) return 0; } +static inline int mmc_blk_cmdq_part_switch(struct mmc_card *card, + struct mmc_blk_data *md) +{ + struct mmc_blk_data *main_md = mmc_get_drvdata(card); + struct mmc_host *host = card->host; + struct mmc_cmdq_context_info *ctx = &host->cmdq_ctx; + u8 part_config = card->ext_csd.part_config; + int ret = 0, err = 0; + + if ((main_md->part_curr == md->part_type) && + (card->part_curr == md->part_type)) + return 0; + + WARN_ON(!((card->host->caps2 & MMC_CAP2_CMD_QUEUE) && + card->ext_csd.cmdq_support && + (md->flags & MMC_BLK_CMD_QUEUE))); + + if (!test_bit(CMDQ_STATE_HALT, &ctx->curr_state)) { + ret = mmc_cmdq_halt(host, true); + if (ret) { + pr_err("%s: %s: halt: failed: %d\n", + mmc_hostname(host), __func__, ret); + goto out; + } + } + + /* disable CQ mode in card */ + if (mmc_card_cmdq(card)) { + ret = mmc_switch(card, EXT_CSD_CMD_SET_NORMAL, + EXT_CSD_CMDQ, 0, + card->ext_csd.generic_cmd6_time); + if (ret) { + pr_err("%s: %s: cmdq mode disable failed %d\n", + mmc_hostname(host), __func__, ret); + goto cmdq_unhalt; + } + mmc_card_clr_cmdq(card); + } + + part_config &= ~EXT_CSD_PART_CONFIG_ACC_MASK; + part_config |= md->part_type; + + ret = mmc_switch(card, EXT_CSD_CMD_SET_NORMAL, + EXT_CSD_PART_CONFIG, part_config, + card->ext_csd.part_time); + if (ret) { + pr_err("%s: %s: mmc_switch failure, %d -> %d , err = %d\n", + mmc_hostname(host), __func__, main_md->part_curr, + md->part_type, ret); + goto cmdq_switch; + } + + card->ext_csd.part_config = part_config; + card->part_curr = md->part_type; + + main_md->part_curr = md->part_type; + +cmdq_switch: + err = mmc_blk_cmdq_switch(card, md, true); + if (err) { + pr_err("%s: %s: mmc_blk_cmdq_switch failed: %d\n", + mmc_hostname(host), __func__, err); + ret = err; + goto out; + } +cmdq_unhalt: + err = mmc_cmdq_halt(host, false); + if (err) { + pr_err("%s: %s: unhalt: failed: %d\n", + mmc_hostname(host), __func__, err); + ret = err; + } +out: + return ret; +} + +static int mmc_blk_cmdq_issue_rq(struct mmc_queue *mq, struct request *req) +{ + int ret, err = 0; + struct mmc_blk_data *md = mq->data; + struct mmc_card *card = md->queue.card; + struct mmc_host *host = card->host; + unsigned int cmd_flags = req ? req->cmd_flags : 0; + + mmc_get_card(card); + + if (!card->host->cmdq_ctx.active_reqs && mmc_card_doing_bkops(card)) { + ret = mmc_cmdq_halt(card->host, true); + if (ret) + goto out; + ret = mmc_stop_bkops(card); + if (ret) { + pr_err("%s: %s: mmc_stop_bkops failed %d\n", + md->disk->disk_name, __func__, ret); + goto out; + } + ret = mmc_cmdq_halt(card->host, false); + if (ret) + goto out; + } + + ret = mmc_blk_cmdq_part_switch(card, md); + if (ret) { + pr_err("%s: %s: partition switch failed %d, resetting cmdq\n", + md->disk->disk_name, __func__, ret); + + mmc_blk_cmdq_reset(host, false); + err = mmc_blk_cmdq_part_switch(card, md); + if (!err) { + pr_err("%s: %s: partition switch success err = %d\n", + md->disk->disk_name, __func__, err); + } else { + pr_err("%s: %s: partition switch failed err = %d\n", + md->disk->disk_name, __func__, err); + ret = err; + goto out; + } + } + + if (req) { + struct mmc_host *host = card->host; + struct mmc_cmdq_context_info *ctx = &host->cmdq_ctx; + + if ((cmd_flags & (REQ_FLUSH | REQ_DISCARD)) && + (card->quirks & MMC_QUIRK_CMDQ_EMPTY_BEFORE_DCMD) && + ctx->active_small_sector_read_reqs) { + ret = wait_event_interruptible(ctx->queue_empty_wq, + !ctx->active_reqs); + if (ret) { + pr_err("%s: failed while waiting for the CMDQ to be empty %s err (%d)\n", + mmc_hostname(host), + __func__, ret); + BUG_ON(1); + } + /* clear the counter now */ + ctx->active_small_sector_read_reqs = 0; + /* + * If there were small sector (less than 8 sectors) read + * operations in progress then we have to wait for the + * outstanding requests to finish and should also have + * atleast 6 microseconds delay before queuing the DCMD + * request. + */ + udelay(MMC_QUIRK_CMDQ_DELAY_BEFORE_DCMD); + } + + if (cmd_flags & REQ_DISCARD) { + if (cmd_flags & REQ_SECURE && + !(card->quirks & MMC_QUIRK_SEC_ERASE_TRIM_BROKEN)) + ret = mmc_blk_cmdq_issue_secdiscard_rq(mq, req); + else + ret = mmc_blk_cmdq_issue_discard_rq(mq, req); + } else if (cmd_flags & REQ_FLUSH) { + ret = mmc_blk_cmdq_issue_flush_rq(mq, req); + } else { + ret = mmc_blk_cmdq_issue_rw_rq(mq, req); + /* + * If issuing of the request fails with eitehr EBUSY or + * EAGAIN error, re-queue the request. + * This case would occur with ICE calls. + * For request which gets completed successfully or + * errored out, we release host lock in completion or + * error handling softirq context. But here the request + * is neither completed nor erred-out, so release the + * host lock explicitly. + */ + if (ret == -EBUSY || ret == -EAGAIN) { + mmc_blk_cmdq_requeue_rw_rq(mq, req); + mmc_put_card(host->card); + } + } + } + + return ret; + +out: + if (req) + blk_end_request_all(req, ret); + mmc_put_card(card); + + return ret; +} + static int mmc_blk_issue_rq(struct mmc_queue *mq, struct request *req) { int ret; @@ -2398,13 +4124,31 @@ static int mmc_blk_issue_rq(struct mmc_queue *mq, struct request *req) struct mmc_host *host = card->host; unsigned long flags; unsigned int cmd_flags = req ? req->cmd_flags : 0; + int err; - if (req && !mq->mqrq_prev->req) + if (req && !mq->mqrq_prev->req) { /* claim host only for the first request */ mmc_get_card(card); + if (mmc_card_doing_bkops(host->card)) { + ret = mmc_stop_bkops(host->card); + if (ret) + goto out; + } + } + ret = mmc_blk_part_switch(card, md); + if (ret) { + err = mmc_blk_reset(md, card->host, MMC_BLK_PARTSWITCH); + if (!err) { + pr_err("%s: mmc_blk_reset(MMC_BLK_PARTSWITCH) succeeded.\n", + mmc_hostname(host)); + mmc_blk_reset_success(md, MMC_BLK_PARTSWITCH); + } else + pr_err("%s: mmc_blk_reset(MMC_BLK_PARTSWITCH) failed.\n", + mmc_hostname(host)); + if (req) { blk_end_request_all(req, -EIO); } @@ -2412,16 +4156,19 @@ static int mmc_blk_issue_rq(struct mmc_queue *mq, struct request *req) goto out; } - mq->flags &= ~MMC_QUEUE_NEW_REQUEST; + mmc_blk_write_packing_control(mq, req); + + clear_bit(MMC_QUEUE_NEW_REQUEST, &mq->flags); if (cmd_flags & REQ_DISCARD) { /* complete ongoing async transfer before issuing discard */ if (card->host->areq) mmc_blk_issue_rw_rq(mq, NULL); - if (req->cmd_flags & REQ_SECURE) + if (cmd_flags & REQ_SECURE && + !(card->quirks & MMC_QUIRK_SEC_ERASE_TRIM_BROKEN)) ret = mmc_blk_issue_secdiscard_rq(mq, req); else ret = mmc_blk_issue_discard_rq(mq, req); - } else if (cmd_flags & REQ_FLUSH) { + } else if (cmd_flags & (REQ_FLUSH | REQ_BARRIER)) { /* complete ongoing async transfer before issuing flush */ if (card->host->areq) mmc_blk_issue_rw_rq(mq, NULL); @@ -2436,7 +4183,7 @@ static int mmc_blk_issue_rq(struct mmc_queue *mq, struct request *req) } out: - if ((!req && !(mq->flags & MMC_QUEUE_NEW_REQUEST)) || + if ((!req && !(test_bit(MMC_QUEUE_NEW_REQUEST, &mq->flags))) || (cmd_flags & MMC_REQ_SPECIAL_MASK)) /* * Release host when there are no more requests @@ -2506,7 +4253,7 @@ static struct mmc_blk_data *mmc_blk_alloc_req(struct mmc_card *card, INIT_LIST_HEAD(&md->part); md->usage = 1; - ret = mmc_init_queue(&md->queue, card, &md->lock, subname); + ret = mmc_init_queue(&md->queue, card, NULL, subname, area_type); if (ret) goto err_putdisk; @@ -2563,7 +4310,16 @@ static struct mmc_blk_data *mmc_blk_alloc_req(struct mmc_card *card, blk_queue_flush(md->queue.queue, REQ_FLUSH | REQ_FUA); } - if (mmc_card_mmc(card) && + if (card->cmdq_init) { + md->flags |= MMC_BLK_CMD_QUEUE; + md->queue.cmdq_complete_fn = mmc_blk_cmdq_complete_rq; + md->queue.cmdq_issue_fn = mmc_blk_cmdq_issue_rq; + md->queue.cmdq_error_fn = mmc_blk_cmdq_err; + md->queue.cmdq_req_timed_out = mmc_blk_cmdq_req_timed_out; + md->queue.cmdq_shutdown = mmc_blk_cmdq_shutdown; + } + + if (mmc_card_mmc(card) && !card->cmdq_init && (area_type == MMC_BLK_DATA_AREA_MAIN) && (md->flags & MMC_BLK_CMD23) && card->ext_csd.packed_event_en) { @@ -2576,8 +4332,11 @@ static struct mmc_blk_data *mmc_blk_alloc_req(struct mmc_card *card, err_putdisk: put_disk(md->disk); err_kfree: + if (!subname) + __clear_bit(md->name_idx, name_use); kfree(md); out: + __clear_bit(devidx, dev_use); return ERR_PTR(ret); } @@ -2673,6 +4432,10 @@ static void mmc_blk_remove_req(struct mmc_blk_data *md) mmc_cleanup_queue(&md->queue); if (md->flags & MMC_BLK_PACKED_CMD) mmc_packed_clean(&md->queue); + if (md->flags & MMC_BLK_CMD_QUEUE) + mmc_cmdq_clean(&md->queue, card); + device_remove_file(disk_to_dev(md->disk), + &md->num_wr_reqs_to_start_packing); if (md->disk->flags & GENHD_FL_UP) { device_remove_file(disk_to_dev(md->disk), &md->force_ro); if ((md->area_type & MMC_BLK_DATA_AREA_BOOT) && @@ -2761,8 +4524,37 @@ static int mmc_add_disk(struct mmc_blk_data *md) if (ret) goto power_ro_lock_fail; } + + md->num_wr_reqs_to_start_packing.show = + num_wr_reqs_to_start_packing_show; + md->num_wr_reqs_to_start_packing.store = + num_wr_reqs_to_start_packing_store; + sysfs_attr_init(&md->num_wr_reqs_to_start_packing.attr); + md->num_wr_reqs_to_start_packing.attr.name = + "num_wr_reqs_to_start_packing"; + md->num_wr_reqs_to_start_packing.attr.mode = S_IRUGO | S_IWUSR; + ret = device_create_file(disk_to_dev(md->disk), + &md->num_wr_reqs_to_start_packing); + if (ret) + goto num_wr_reqs_to_start_packing_fail; + + md->no_pack_for_random.show = no_pack_for_random_show; + md->no_pack_for_random.store = no_pack_for_random_store; + sysfs_attr_init(&md->no_pack_for_random.attr); + md->no_pack_for_random.attr.name = "no_pack_for_random"; + md->no_pack_for_random.attr.mode = S_IRUGO | S_IWUSR; + ret = device_create_file(disk_to_dev(md->disk), + &md->no_pack_for_random); + if (ret) + goto no_pack_for_random_fails; + return ret; +no_pack_for_random_fails: + device_remove_file(disk_to_dev(md->disk), + &md->num_wr_reqs_to_start_packing); +num_wr_reqs_to_start_packing_fail: + device_remove_file(disk_to_dev(md->disk), &md->power_ro_lock); power_ro_lock_fail: #ifdef CONFIG_MMC_SIMULATE_MAX_SPEED device_remove_file(disk_to_dev(md->disk), &dev_attr_cache_size); @@ -2785,6 +4577,11 @@ force_ro_fail: #define CID_MANFID_SAMSUNG 0x15 #define CID_MANFID_KINGSTON 0x70 +#define CID_MANFID_SANDISK 0x2 +#define CID_MANFID_TOSHIBA 0x11 +#define CID_MANFID_MICRON 0x13 +#define CID_MANFID_SAMSUNG 0x15 + static const struct mmc_fixup blk_fixups[] = { MMC_FIXUP("SEM02G", CID_MANFID_SANDISK, 0x100, add_quirk, @@ -2816,6 +4613,8 @@ static const struct mmc_fixup blk_fixups[] = MMC_QUIRK_BLK_NO_CMD23), MMC_FIXUP("MMC32G", CID_MANFID_TOSHIBA, CID_OEMID_ANY, add_quirk_mmc, MMC_QUIRK_BLK_NO_CMD23), + MMC_FIXUP(CID_NAME_ANY, CID_MANFID_TOSHIBA, CID_OEMID_ANY, + add_quirk_mmc, MMC_QUIRK_CMDQ_EMPTY_BEFORE_DCMD), /* * Some MMC cards need longer data read timeout than indicated in CSD. @@ -2826,6 +4625,20 @@ static const struct mmc_fixup blk_fixups[] = MMC_QUIRK_LONG_READ_TIME), /* + * Some Samsung MMC cards need longer data read timeout than + * indicated in CSD. + */ + MMC_FIXUP("Q7XSAB", CID_MANFID_SAMSUNG, 0x100, add_quirk_mmc, + MMC_QUIRK_LONG_READ_TIME), + + /* + * Hynix eMMC cards need longer data read timeout than + * indicated in CSD. + */ + MMC_FIXUP(CID_NAME_ANY, CID_MANFID_HYNIX, CID_OEMID_ANY, add_quirk_mmc, + MMC_QUIRK_LONG_READ_TIME), + + /* * On these Samsung MoviNAND parts, performing secure erase or * secure trim can result in unrecoverable corruption due to a * firmware bug. @@ -2856,6 +4669,32 @@ static const struct mmc_fixup blk_fixups[] = MMC_FIXUP("V10016", CID_MANFID_KINGSTON, CID_OEMID_ANY, add_quirk_mmc, MMC_QUIRK_TRIM_BROKEN), + /* Some INAND MCP devices advertise incorrect timeout values */ + MMC_FIXUP("SEM04G", 0x45, CID_OEMID_ANY, add_quirk_mmc, + MMC_QUIRK_INAND_DATA_TIMEOUT), + + /* + * On these Samsung MoviNAND parts, performing secure erase or + * secure trim can result in unrecoverable corruption due to a + * firmware bug. + */ + MMC_FIXUP("M8G2FA", CID_MANFID_SAMSUNG, CID_OEMID_ANY, add_quirk_mmc, + MMC_QUIRK_SEC_ERASE_TRIM_BROKEN), + MMC_FIXUP("MAG4FA", CID_MANFID_SAMSUNG, CID_OEMID_ANY, add_quirk_mmc, + MMC_QUIRK_SEC_ERASE_TRIM_BROKEN), + MMC_FIXUP("MBG8FA", CID_MANFID_SAMSUNG, CID_OEMID_ANY, add_quirk_mmc, + MMC_QUIRK_SEC_ERASE_TRIM_BROKEN), + MMC_FIXUP("MCGAFA", CID_MANFID_SAMSUNG, CID_OEMID_ANY, add_quirk_mmc, + MMC_QUIRK_SEC_ERASE_TRIM_BROKEN), + MMC_FIXUP("VAL00M", CID_MANFID_SAMSUNG, CID_OEMID_ANY, add_quirk_mmc, + MMC_QUIRK_SEC_ERASE_TRIM_BROKEN), + MMC_FIXUP("VYL00M", CID_MANFID_SAMSUNG, CID_OEMID_ANY, add_quirk_mmc, + MMC_QUIRK_SEC_ERASE_TRIM_BROKEN), + MMC_FIXUP("KYL00M", CID_MANFID_SAMSUNG, CID_OEMID_ANY, add_quirk_mmc, + MMC_QUIRK_SEC_ERASE_TRIM_BROKEN), + MMC_FIXUP("VZL00M", CID_MANFID_SAMSUNG, CID_OEMID_ANY, add_quirk_mmc, + MMC_QUIRK_SEC_ERASE_TRIM_BROKEN), + END_FIXUP }; @@ -2887,6 +4726,8 @@ static int mmc_blk_probe(struct mmc_card *card) dev_set_drvdata(&card->dev, md); + mmc_set_bus_resume_policy(card->host, 1); + if (mmc_add_disk(md)) goto out; @@ -2895,7 +4736,8 @@ static int mmc_blk_probe(struct mmc_card *card) goto out; } - pm_runtime_set_autosuspend_delay(&card->dev, 3000); + pm_runtime_use_autosuspend(&card->dev); + pm_runtime_set_autosuspend_delay(&card->dev, MMC_AUTOSUSPEND_DELAY_MS); pm_runtime_use_autosuspend(&card->dev); /* @@ -2929,25 +4771,39 @@ static void mmc_blk_remove(struct mmc_card *card) pm_runtime_put_noidle(&card->dev); mmc_blk_remove_req(md); dev_set_drvdata(&card->dev, NULL); + mmc_set_bus_resume_policy(card->host, 0); } -static int _mmc_blk_suspend(struct mmc_card *card) +static int _mmc_blk_suspend(struct mmc_card *card, bool wait) { struct mmc_blk_data *part_md; struct mmc_blk_data *md = dev_get_drvdata(&card->dev); + int rc = 0; if (md) { - mmc_queue_suspend(&md->queue); + rc = mmc_queue_suspend(&md->queue, wait); + if (rc) + goto out; list_for_each_entry(part_md, &md->part, part) { - mmc_queue_suspend(&part_md->queue); + rc = mmc_queue_suspend(&part_md->queue, wait); + if (rc) + goto out_resume; } } - return 0; + goto out; + + out_resume: + mmc_queue_resume(&md->queue); + list_for_each_entry(part_md, &md->part, part) { + mmc_queue_resume(&part_md->queue); + } + out: + return rc; } static void mmc_blk_shutdown(struct mmc_card *card) { - _mmc_blk_suspend(card); + _mmc_blk_suspend(card, 1); } #ifdef CONFIG_PM_SLEEP @@ -2955,7 +4811,7 @@ static int mmc_blk_suspend(struct device *dev) { struct mmc_card *card = mmc_dev_to_card(dev); - return _mmc_blk_suspend(card); + return _mmc_blk_suspend(card, 0); } static int mmc_blk_resume(struct device *dev) diff --git a/drivers/mmc/card/mmc_block_test.c b/drivers/mmc/card/mmc_block_test.c new file mode 100644 index 000000000000..967affa11d9e --- /dev/null +++ b/drivers/mmc/card/mmc_block_test.c @@ -0,0 +1,2038 @@ +/* Copyright (c) 2012, The Linux Foundation. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 and + * only version 2 as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + */ + +/* MMC block test */ + +#include <linux/module.h> +#include <linux/blkdev.h> +#include <linux/debugfs.h> +#include <linux/mmc/card.h> +#include <linux/mmc/host.h> +#include <linux/delay.h> +#include <linux/test-iosched.h> +#include "queue.h" + +#define MODULE_NAME "mmc_block_test" +#define TEST_MAX_SECTOR_RANGE (600*1024*1024) /* 600 MB */ +#define TEST_MAX_BIOS_PER_REQ 120 +#define CMD23_PACKED_BIT (1 << 30) +#define LARGE_PRIME_1 1103515367 +#define LARGE_PRIME_2 35757 +#define PACKED_HDR_VER_MASK 0x000000FF +#define PACKED_HDR_RW_MASK 0x0000FF00 +#define PACKED_HDR_NUM_REQS_MASK 0x00FF0000 +#define PACKED_HDR_BITS_16_TO_29_SET 0x3FFF0000 + +#define test_pr_debug(fmt, args...) pr_debug("%s: "fmt"\n", MODULE_NAME, args) +#define test_pr_info(fmt, args...) pr_info("%s: "fmt"\n", MODULE_NAME, args) +#define test_pr_err(fmt, args...) pr_err("%s: "fmt"\n", MODULE_NAME, args) + +enum is_random { + NON_RANDOM_TEST, + RANDOM_TEST, +}; + +enum mmc_block_test_testcases { + /* Start of send write packing test group */ + SEND_WRITE_PACKING_MIN_TESTCASE, + TEST_STOP_DUE_TO_READ = SEND_WRITE_PACKING_MIN_TESTCASE, + TEST_STOP_DUE_TO_READ_AFTER_MAX_REQS, + TEST_STOP_DUE_TO_FLUSH, + TEST_STOP_DUE_TO_FLUSH_AFTER_MAX_REQS, + TEST_STOP_DUE_TO_EMPTY_QUEUE, + TEST_STOP_DUE_TO_MAX_REQ_NUM, + TEST_STOP_DUE_TO_THRESHOLD, + SEND_WRITE_PACKING_MAX_TESTCASE = TEST_STOP_DUE_TO_THRESHOLD, + + /* Start of err check test group */ + ERR_CHECK_MIN_TESTCASE, + TEST_RET_ABORT = ERR_CHECK_MIN_TESTCASE, + TEST_RET_PARTIAL_FOLLOWED_BY_SUCCESS, + TEST_RET_PARTIAL_FOLLOWED_BY_ABORT, + TEST_RET_PARTIAL_MULTIPLE_UNTIL_SUCCESS, + TEST_RET_PARTIAL_MAX_FAIL_IDX, + TEST_RET_RETRY, + TEST_RET_CMD_ERR, + TEST_RET_DATA_ERR, + ERR_CHECK_MAX_TESTCASE = TEST_RET_DATA_ERR, + + /* Start of send invalid test group */ + INVALID_CMD_MIN_TESTCASE, + TEST_HDR_INVALID_VERSION = INVALID_CMD_MIN_TESTCASE, + TEST_HDR_WRONG_WRITE_CODE, + TEST_HDR_INVALID_RW_CODE, + TEST_HDR_DIFFERENT_ADDRESSES, + TEST_HDR_REQ_NUM_SMALLER_THAN_ACTUAL, + TEST_HDR_REQ_NUM_LARGER_THAN_ACTUAL, + TEST_HDR_CMD23_PACKED_BIT_SET, + TEST_CMD23_MAX_PACKED_WRITES, + TEST_CMD23_ZERO_PACKED_WRITES, + TEST_CMD23_PACKED_BIT_UNSET, + TEST_CMD23_REL_WR_BIT_SET, + TEST_CMD23_BITS_16TO29_SET, + TEST_CMD23_HDR_BLK_NOT_IN_COUNT, + INVALID_CMD_MAX_TESTCASE = TEST_CMD23_HDR_BLK_NOT_IN_COUNT, + + /* + * Start of packing control test group. + * in these next testcases the abbreviation FB = followed by + */ + PACKING_CONTROL_MIN_TESTCASE, + TEST_PACKING_EXP_ONE_OVER_TRIGGER_FB_READ = + PACKING_CONTROL_MIN_TESTCASE, + TEST_PACKING_EXP_N_OVER_TRIGGER, + TEST_PACKING_EXP_N_OVER_TRIGGER_FB_READ, + TEST_PACKING_EXP_N_OVER_TRIGGER_FLUSH_N, + TEST_PACKING_EXP_THRESHOLD_OVER_TRIGGER, + TEST_PACKING_NOT_EXP_LESS_THAN_TRIGGER_REQUESTS, + TEST_PACKING_NOT_EXP_TRIGGER_REQUESTS, + TEST_PACKING_NOT_EXP_TRIGGER_READ_TRIGGER, + TEST_PACKING_NOT_EXP_TRIGGER_FLUSH_TRIGGER, + TEST_PACK_MIX_PACKED_NO_PACKED_PACKED, + TEST_PACK_MIX_NO_PACKED_PACKED_NO_PACKED, + PACKING_CONTROL_MAX_TESTCASE = TEST_PACK_MIX_NO_PACKED_PACKED_NO_PACKED, +}; + +enum mmc_block_test_group { + TEST_NO_GROUP, + TEST_GENERAL_GROUP, + TEST_SEND_WRITE_PACKING_GROUP, + TEST_ERR_CHECK_GROUP, + TEST_SEND_INVALID_GROUP, + TEST_PACKING_CONTROL_GROUP, +}; + +struct mmc_block_test_debug { + struct dentry *send_write_packing_test; + struct dentry *err_check_test; + struct dentry *send_invalid_packed_test; + struct dentry *random_test_seed; + struct dentry *packing_control_test; +}; + +struct mmc_block_test_data { + /* The number of write requests that the test will issue */ + int num_requests; + /* The expected write packing statistics for the current test */ + struct mmc_wr_pack_stats exp_packed_stats; + /* + * A user-defined seed for random choices of number of bios written in + * a request, and of number of requests issued in a test + * This field is randomly updated after each use + */ + unsigned int random_test_seed; + /* A retry counter used in err_check tests */ + int err_check_counter; + /* Can be one of the values of enum test_group */ + enum mmc_block_test_group test_group; + /* + * Indicates if the current testcase is running with random values of + * num_requests and num_bios (in each request) + */ + int is_random; + /* Data structure for debugfs dentrys */ + struct mmc_block_test_debug debug; + /* + * Data structure containing individual test information, including + * self-defined specific data + */ + struct test_info test_info; + /* mmc block device test */ + struct blk_dev_test_type bdt; +}; + +static struct mmc_block_test_data *mbtd; + +void print_mmc_packing_stats(struct mmc_card *card) +{ + int i; + int max_num_of_packed_reqs = 0; + + if ((!card) || (!card->wr_pack_stats.packing_events)) + return; + + max_num_of_packed_reqs = card->ext_csd.max_packed_writes; + + spin_lock(&card->wr_pack_stats.lock); + + pr_info("%s: write packing statistics:\n", + mmc_hostname(card->host)); + + for (i = 1 ; i <= max_num_of_packed_reqs ; ++i) { + if (card->wr_pack_stats.packing_events[i] != 0) + pr_info("%s: Packed %d reqs - %d times\n", + mmc_hostname(card->host), i, + card->wr_pack_stats.packing_events[i]); + } + + pr_info("%s: stopped packing due to the following reasons:\n", + mmc_hostname(card->host)); + + if (card->wr_pack_stats.pack_stop_reason[EXCEEDS_SEGMENTS]) + pr_info("%s: %d times: exceedmax num of segments\n", + mmc_hostname(card->host), + card->wr_pack_stats.pack_stop_reason[EXCEEDS_SEGMENTS]); + if (card->wr_pack_stats.pack_stop_reason[EXCEEDS_SECTORS]) + pr_info("%s: %d times: exceeding the max num of sectors\n", + mmc_hostname(card->host), + card->wr_pack_stats.pack_stop_reason[EXCEEDS_SECTORS]); + if (card->wr_pack_stats.pack_stop_reason[WRONG_DATA_DIR]) + pr_info("%s: %d times: wrong data direction\n", + mmc_hostname(card->host), + card->wr_pack_stats.pack_stop_reason[WRONG_DATA_DIR]); + if (card->wr_pack_stats.pack_stop_reason[FLUSH_OR_DISCARD]) + pr_info("%s: %d times: flush or discard\n", + mmc_hostname(card->host), + card->wr_pack_stats.pack_stop_reason[FLUSH_OR_DISCARD]); + if (card->wr_pack_stats.pack_stop_reason[EMPTY_QUEUE]) + pr_info("%s: %d times: empty queue\n", + mmc_hostname(card->host), + card->wr_pack_stats.pack_stop_reason[EMPTY_QUEUE]); + if (card->wr_pack_stats.pack_stop_reason[REL_WRITE]) + pr_info("%s: %d times: rel write\n", + mmc_hostname(card->host), + card->wr_pack_stats.pack_stop_reason[REL_WRITE]); + if (card->wr_pack_stats.pack_stop_reason[THRESHOLD]) + pr_info("%s: %d times: Threshold\n", + mmc_hostname(card->host), + card->wr_pack_stats.pack_stop_reason[THRESHOLD]); + + spin_unlock(&card->wr_pack_stats.lock); +} + +/* + * A callback assigned to the packed_test_fn field. + * Called from block layer in mmc_blk_packed_hdr_wrq_prep. + * Here we alter the packed header or CMD23 in order to send an invalid + * packed command to the card. + */ +static void test_invalid_packed_cmd(struct request_queue *q, + struct mmc_queue_req *mqrq) +{ + struct mmc_queue *mq = q->queuedata; + u32 *packed_cmd_hdr = mqrq->packed->cmd_hdr; + struct request *req = mqrq->req; + struct request *second_rq; + struct test_request *test_rq; + struct mmc_blk_request *brq = &mqrq->brq; + int num_requests; + int max_packed_reqs; + + if (!mq) { + test_pr_err("%s: NULL mq", __func__); + return; + } + + test_rq = (struct test_request *)req->elv.priv[0]; + if (!test_rq) { + test_pr_err("%s: NULL test_rq", __func__); + return; + } + max_packed_reqs = mq->card->ext_csd.max_packed_writes; + + switch (mbtd->test_info.testcase) { + case TEST_HDR_INVALID_VERSION: + test_pr_info("%s: set invalid header version", __func__); + /* Put 0 in header version field (1 byte, offset 0 in header) */ + packed_cmd_hdr[0] = packed_cmd_hdr[0] & ~PACKED_HDR_VER_MASK; + break; + case TEST_HDR_WRONG_WRITE_CODE: + test_pr_info("%s: wrong write code", __func__); + /* Set R/W field with R value (1 byte, offset 1 in header) */ + packed_cmd_hdr[0] = packed_cmd_hdr[0] & ~PACKED_HDR_RW_MASK; + packed_cmd_hdr[0] = packed_cmd_hdr[0] | 0x00000100; + break; + case TEST_HDR_INVALID_RW_CODE: + test_pr_info("%s: invalid r/w code", __func__); + /* Set R/W field with invalid value */ + packed_cmd_hdr[0] = packed_cmd_hdr[0] & ~PACKED_HDR_RW_MASK; + packed_cmd_hdr[0] = packed_cmd_hdr[0] | 0x00000400; + break; + case TEST_HDR_DIFFERENT_ADDRESSES: + test_pr_info("%s: different addresses", __func__); + second_rq = list_entry(req->queuelist.next, struct request, + queuelist); + test_pr_info("%s: test_rq->sector=%ld, second_rq->sector=%ld", + __func__, (long)req->__sector, + (long)second_rq->__sector); + /* + * Put start sector of second write request in the first write + * request's cmd25 argument in the packed header + */ + packed_cmd_hdr[3] = second_rq->__sector; + break; + case TEST_HDR_REQ_NUM_SMALLER_THAN_ACTUAL: + test_pr_info("%s: request num smaller than actual" , __func__); + num_requests = (packed_cmd_hdr[0] & PACKED_HDR_NUM_REQS_MASK) + >> 16; + /* num of entries is decremented by 1 */ + num_requests = (num_requests - 1) << 16; + /* + * Set number of requests field in packed write header to be + * smaller than the actual number (1 byte, offset 2 in header) + */ + packed_cmd_hdr[0] = (packed_cmd_hdr[0] & + ~PACKED_HDR_NUM_REQS_MASK) + num_requests; + break; + case TEST_HDR_REQ_NUM_LARGER_THAN_ACTUAL: + test_pr_info("%s: request num larger than actual" , __func__); + num_requests = (packed_cmd_hdr[0] & PACKED_HDR_NUM_REQS_MASK) + >> 16; + /* num of entries is incremented by 1 */ + num_requests = (num_requests + 1) << 16; + /* + * Set number of requests field in packed write header to be + * larger than the actual number (1 byte, offset 2 in header). + */ + packed_cmd_hdr[0] = (packed_cmd_hdr[0] & + ~PACKED_HDR_NUM_REQS_MASK) + num_requests; + break; + case TEST_HDR_CMD23_PACKED_BIT_SET: + test_pr_info("%s: header CMD23 packed bit set" , __func__); + /* + * Set packed bit (bit 30) in cmd23 argument of first and second + * write requests in packed write header. + * These are located at bytes 2 and 4 in packed write header + */ + packed_cmd_hdr[2] = packed_cmd_hdr[2] | CMD23_PACKED_BIT; + packed_cmd_hdr[4] = packed_cmd_hdr[4] | CMD23_PACKED_BIT; + break; + case TEST_CMD23_MAX_PACKED_WRITES: + test_pr_info("%s: CMD23 request num > max_packed_reqs", + __func__); + /* + * Set the individual packed cmd23 request num to + * max_packed_reqs + 1 + */ + brq->sbc.arg = MMC_CMD23_ARG_PACKED | (max_packed_reqs + 1); + break; + case TEST_CMD23_ZERO_PACKED_WRITES: + test_pr_info("%s: CMD23 request num = 0", __func__); + /* Set the individual packed cmd23 request num to zero */ + brq->sbc.arg = MMC_CMD23_ARG_PACKED; + break; + case TEST_CMD23_PACKED_BIT_UNSET: + test_pr_info("%s: CMD23 packed bit unset", __func__); + /* + * Set the individual packed cmd23 packed bit to 0, + * although there is a packed write request + */ + brq->sbc.arg &= ~CMD23_PACKED_BIT; + break; + case TEST_CMD23_REL_WR_BIT_SET: + test_pr_info("%s: CMD23 REL WR bit set", __func__); + /* Set the individual packed cmd23 reliable write bit */ + brq->sbc.arg = MMC_CMD23_ARG_PACKED | MMC_CMD23_ARG_REL_WR; + break; + case TEST_CMD23_BITS_16TO29_SET: + test_pr_info("%s: CMD23 bits [16-29] set", __func__); + brq->sbc.arg = MMC_CMD23_ARG_PACKED | + PACKED_HDR_BITS_16_TO_29_SET; + break; + case TEST_CMD23_HDR_BLK_NOT_IN_COUNT: + test_pr_info("%s: CMD23 hdr not in block count", __func__); + brq->sbc.arg = MMC_CMD23_ARG_PACKED | + ((rq_data_dir(req) == READ) ? 0 : mqrq->packed->blocks); + break; + default: + test_pr_err("%s: unexpected testcase %d", + __func__, mbtd->test_info.testcase); + break; + } +} + +/* + * A callback assigned to the err_check_fn field of the mmc_request by the + * MMC/card/block layer. + * Called upon request completion by the MMC/core layer. + * Here we emulate an error return value from the card. + */ +static int test_err_check(struct mmc_card *card, struct mmc_async_req *areq) +{ + struct mmc_queue_req *mq_rq = container_of(areq, struct mmc_queue_req, + mmc_active); + struct request_queue *req_q = test_iosched_get_req_queue(); + struct mmc_queue *mq; + int max_packed_reqs; + int ret = 0; + + if (req_q) + mq = req_q->queuedata; + else { + test_pr_err("%s: NULL request_queue", __func__); + return 0; + } + + if (!mq) { + test_pr_err("%s: %s: NULL mq", __func__, + mmc_hostname(card->host)); + return 0; + } + + max_packed_reqs = mq->card->ext_csd.max_packed_writes; + + if (!mq_rq) { + test_pr_err("%s: %s: NULL mq_rq", __func__, + mmc_hostname(card->host)); + return 0; + } + + switch (mbtd->test_info.testcase) { + case TEST_RET_ABORT: + test_pr_info("%s: return abort", __func__); + ret = MMC_BLK_ABORT; + break; + case TEST_RET_PARTIAL_FOLLOWED_BY_SUCCESS: + test_pr_info("%s: return partial followed by success", + __func__); + /* + * Since in this testcase num_requests is always >= 2, + * we can be sure that packed_fail_idx is always >= 1 + */ + mq_rq->packed->idx_failure = (mbtd->num_requests / 2); + test_pr_info("%s: packed_fail_idx = %d" + , __func__, mq_rq->packed->idx_failure); + mq->err_check_fn = NULL; + ret = MMC_BLK_PARTIAL; + break; + case TEST_RET_PARTIAL_FOLLOWED_BY_ABORT: + if (!mbtd->err_check_counter) { + test_pr_info("%s: return partial followed by abort", + __func__); + mbtd->err_check_counter++; + /* + * Since in this testcase num_requests is always >= 3, + * we have that packed_fail_idx is always >= 1 + */ + mq_rq->packed->idx_failure = (mbtd->num_requests / 2); + test_pr_info("%s: packed_fail_idx = %d" + , __func__, mq_rq->packed->idx_failure); + ret = MMC_BLK_PARTIAL; + break; + } + mbtd->err_check_counter = 0; + mq->err_check_fn = NULL; + ret = MMC_BLK_ABORT; + break; + case TEST_RET_PARTIAL_MULTIPLE_UNTIL_SUCCESS: + test_pr_info("%s: return partial multiple until success", + __func__); + if (++mbtd->err_check_counter >= (mbtd->num_requests)) { + mq->err_check_fn = NULL; + mbtd->err_check_counter = 0; + ret = MMC_BLK_PARTIAL; + break; + } + mq_rq->packed->idx_failure = 1; + ret = MMC_BLK_PARTIAL; + break; + case TEST_RET_PARTIAL_MAX_FAIL_IDX: + test_pr_info("%s: return partial max fail_idx", __func__); + mq_rq->packed->idx_failure = max_packed_reqs - 1; + mq->err_check_fn = NULL; + ret = MMC_BLK_PARTIAL; + break; + case TEST_RET_RETRY: + test_pr_info("%s: return retry", __func__); + ret = MMC_BLK_RETRY; + break; + case TEST_RET_CMD_ERR: + test_pr_info("%s: return cmd err", __func__); + ret = MMC_BLK_CMD_ERR; + break; + case TEST_RET_DATA_ERR: + test_pr_info("%s: return data err", __func__); + ret = MMC_BLK_DATA_ERR; + break; + default: + test_pr_err("%s: unexpected testcase %d", + __func__, mbtd->test_info.testcase); + } + + return ret; +} + +/* + * This is a specific implementation for the get_test_case_str_fn function + * pointer in the test_info data structure. Given a valid test_data instance, + * the function returns a string resembling the test name, based on the testcase + */ +static char *get_test_case_str(struct test_data *td) +{ + if (!td) { + test_pr_err("%s: NULL td", __func__); + return NULL; + } + + switch (td->test_info.testcase) { + case TEST_STOP_DUE_TO_FLUSH: + return " stop due to flush"; + case TEST_STOP_DUE_TO_FLUSH_AFTER_MAX_REQS: + return " stop due to flush after max-1 reqs"; + case TEST_STOP_DUE_TO_READ: + return " stop due to read"; + case TEST_STOP_DUE_TO_READ_AFTER_MAX_REQS: + return "Test stop due to read after max-1 reqs"; + case TEST_STOP_DUE_TO_EMPTY_QUEUE: + return "Test stop due to empty queue"; + case TEST_STOP_DUE_TO_MAX_REQ_NUM: + return "Test stop due to max req num"; + case TEST_STOP_DUE_TO_THRESHOLD: + return "Test stop due to exceeding threshold"; + case TEST_RET_ABORT: + return "Test err_check return abort"; + case TEST_RET_PARTIAL_FOLLOWED_BY_SUCCESS: + return "Test err_check return partial followed by success"; + case TEST_RET_PARTIAL_FOLLOWED_BY_ABORT: + return "Test err_check return partial followed by abort"; + case TEST_RET_PARTIAL_MULTIPLE_UNTIL_SUCCESS: + return "Test err_check return partial multiple until success"; + case TEST_RET_PARTIAL_MAX_FAIL_IDX: + return "Test err_check return partial max fail index"; + case TEST_RET_RETRY: + return "Test err_check return retry"; + case TEST_RET_CMD_ERR: + return "Test err_check return cmd error"; + case TEST_RET_DATA_ERR: + return "Test err_check return data error"; + case TEST_HDR_INVALID_VERSION: + return "Test invalid - wrong header version"; + case TEST_HDR_WRONG_WRITE_CODE: + return "Test invalid - wrong write code"; + case TEST_HDR_INVALID_RW_CODE: + return "Test invalid - wrong R/W code"; + case TEST_HDR_DIFFERENT_ADDRESSES: + return "Test invalid - header different addresses"; + case TEST_HDR_REQ_NUM_SMALLER_THAN_ACTUAL: + return "Test invalid - header req num smaller than actual"; + case TEST_HDR_REQ_NUM_LARGER_THAN_ACTUAL: + return "Test invalid - header req num larger than actual"; + case TEST_HDR_CMD23_PACKED_BIT_SET: + return "Test invalid - header cmd23 packed bit set"; + case TEST_CMD23_MAX_PACKED_WRITES: + return "Test invalid - cmd23 max packed writes"; + case TEST_CMD23_ZERO_PACKED_WRITES: + return "Test invalid - cmd23 zero packed writes"; + case TEST_CMD23_PACKED_BIT_UNSET: + return "Test invalid - cmd23 packed bit unset"; + case TEST_CMD23_REL_WR_BIT_SET: + return "Test invalid - cmd23 rel wr bit set"; + case TEST_CMD23_BITS_16TO29_SET: + return "Test invalid - cmd23 bits [16-29] set"; + case TEST_CMD23_HDR_BLK_NOT_IN_COUNT: + return "Test invalid - cmd23 header block not in count"; + case TEST_PACKING_EXP_N_OVER_TRIGGER: + return "\nTest packing control - pack n"; + case TEST_PACKING_EXP_N_OVER_TRIGGER_FB_READ: + return "\nTest packing control - pack n followed by read"; + case TEST_PACKING_EXP_N_OVER_TRIGGER_FLUSH_N: + return "\nTest packing control - pack n followed by flush"; + case TEST_PACKING_EXP_ONE_OVER_TRIGGER_FB_READ: + return "\nTest packing control - pack one followed by read"; + case TEST_PACKING_EXP_THRESHOLD_OVER_TRIGGER: + return "\nTest packing control - pack threshold"; + case TEST_PACKING_NOT_EXP_LESS_THAN_TRIGGER_REQUESTS: + return "\nTest packing control - no packing"; + case TEST_PACKING_NOT_EXP_TRIGGER_REQUESTS: + return "\nTest packing control - no packing, trigger requests"; + case TEST_PACKING_NOT_EXP_TRIGGER_READ_TRIGGER: + return "\nTest packing control - no pack, trigger-read-trigger"; + case TEST_PACKING_NOT_EXP_TRIGGER_FLUSH_TRIGGER: + return "\nTest packing control- no pack, trigger-flush-trigger"; + case TEST_PACK_MIX_PACKED_NO_PACKED_PACKED: + return "\nTest packing control - mix: pack -> no pack -> pack"; + case TEST_PACK_MIX_NO_PACKED_PACKED_NO_PACKED: + return "\nTest packing control - mix: no pack->pack->no pack"; + default: + return "Unknown testcase"; + } + + return NULL; +} + +/* + * Compare individual testcase's statistics to the expected statistics: + * Compare stop reason and number of packing events + */ +static int check_wr_packing_statistics(struct test_data *td) +{ + struct mmc_wr_pack_stats *mmc_packed_stats; + struct mmc_queue *mq = td->req_q->queuedata; + int max_packed_reqs = mq->card->ext_csd.max_packed_writes; + int i; + struct mmc_card *card = mq->card; + struct mmc_wr_pack_stats expected_stats; + int *stop_reason; + int ret = 0; + + if (!mq) { + test_pr_err("%s: NULL mq", __func__); + return -EINVAL; + } + + expected_stats = mbtd->exp_packed_stats; + + mmc_packed_stats = mmc_blk_get_packed_statistics(card); + if (!mmc_packed_stats) { + test_pr_err("%s: NULL mmc_packed_stats", __func__); + return -EINVAL; + } + + if (!mmc_packed_stats->packing_events) { + test_pr_err("%s: NULL packing_events", __func__); + return -EINVAL; + } + + spin_lock(&mmc_packed_stats->lock); + + if (!mmc_packed_stats->enabled) { + test_pr_err("%s write packing statistics are not enabled", + __func__); + ret = -EINVAL; + goto exit_err; + } + + stop_reason = mmc_packed_stats->pack_stop_reason; + + for (i = 1; i <= max_packed_reqs; ++i) { + if (mmc_packed_stats->packing_events[i] != + expected_stats.packing_events[i]) { + test_pr_err( + "%s: Wrong pack stats in index %d, got %d, expected %d", + __func__, i, mmc_packed_stats->packing_events[i], + expected_stats.packing_events[i]); + if (td->fs_wr_reqs_during_test) + goto cancel_round; + ret = -EINVAL; + goto exit_err; + } + } + + if (mmc_packed_stats->pack_stop_reason[EXCEEDS_SEGMENTS] != + expected_stats.pack_stop_reason[EXCEEDS_SEGMENTS]) { + test_pr_err( + "%s: Wrong pack stop reason EXCEEDS_SEGMENTS %d, expected %d", + __func__, stop_reason[EXCEEDS_SEGMENTS], + expected_stats.pack_stop_reason[EXCEEDS_SEGMENTS]); + if (td->fs_wr_reqs_during_test) + goto cancel_round; + ret = -EINVAL; + goto exit_err; + } + + if (mmc_packed_stats->pack_stop_reason[EXCEEDS_SECTORS] != + expected_stats.pack_stop_reason[EXCEEDS_SECTORS]) { + test_pr_err( + "%s: Wrong pack stop reason EXCEEDS_SECTORS %d, expected %d", + __func__, stop_reason[EXCEEDS_SECTORS], + expected_stats.pack_stop_reason[EXCEEDS_SECTORS]); + if (td->fs_wr_reqs_during_test) + goto cancel_round; + ret = -EINVAL; + goto exit_err; + } + + if (mmc_packed_stats->pack_stop_reason[WRONG_DATA_DIR] != + expected_stats.pack_stop_reason[WRONG_DATA_DIR]) { + test_pr_err( + "%s: Wrong pack stop reason WRONG_DATA_DIR %d, expected %d", + __func__, stop_reason[WRONG_DATA_DIR], + expected_stats.pack_stop_reason[WRONG_DATA_DIR]); + if (td->fs_wr_reqs_during_test) + goto cancel_round; + ret = -EINVAL; + goto exit_err; + } + + if (mmc_packed_stats->pack_stop_reason[FLUSH_OR_DISCARD] != + expected_stats.pack_stop_reason[FLUSH_OR_DISCARD]) { + test_pr_err( + "%s: Wrong pack stop reason FLUSH_OR_DISCARD %d, expected %d", + __func__, stop_reason[FLUSH_OR_DISCARD], + expected_stats.pack_stop_reason[FLUSH_OR_DISCARD]); + if (td->fs_wr_reqs_during_test) + goto cancel_round; + ret = -EINVAL; + goto exit_err; + } + + if (mmc_packed_stats->pack_stop_reason[EMPTY_QUEUE] != + expected_stats.pack_stop_reason[EMPTY_QUEUE]) { + test_pr_err( + "%s: Wrong pack stop reason EMPTY_QUEUE %d, expected %d", + __func__, stop_reason[EMPTY_QUEUE], + expected_stats.pack_stop_reason[EMPTY_QUEUE]); + if (td->fs_wr_reqs_during_test) + goto cancel_round; + ret = -EINVAL; + goto exit_err; + } + + if (mmc_packed_stats->pack_stop_reason[REL_WRITE] != + expected_stats.pack_stop_reason[REL_WRITE]) { + test_pr_err( + "%s: Wrong pack stop reason REL_WRITE %d, expected %d", + __func__, stop_reason[REL_WRITE], + expected_stats.pack_stop_reason[REL_WRITE]); + if (td->fs_wr_reqs_during_test) + goto cancel_round; + ret = -EINVAL; + goto exit_err; + } + +exit_err: + spin_unlock(&mmc_packed_stats->lock); + if (ret && mmc_packed_stats->enabled) + print_mmc_packing_stats(card); + return ret; +cancel_round: + spin_unlock(&mmc_packed_stats->lock); + test_iosched_set_ignore_round(true); + return 0; +} + +/* + * Pseudo-randomly choose a seed based on the last seed, and update it in + * seed_number. then return seed_number (mod max_val), or min_val. + */ +static unsigned int pseudo_random_seed(unsigned int *seed_number, + unsigned int min_val, + unsigned int max_val) +{ + int ret = 0; + + if (!seed_number) + return 0; + + *seed_number = ((unsigned int)(((unsigned long)*seed_number * + (unsigned long)LARGE_PRIME_1) + LARGE_PRIME_2)); + ret = (unsigned int)((*seed_number) % max_val); + + return (ret > min_val ? ret : min_val); +} + +/* + * Given a pseudo-random seed, find a pseudo-random num_of_bios. + * Make sure that num_of_bios is not larger than TEST_MAX_SECTOR_RANGE + */ +static void pseudo_rnd_num_of_bios(unsigned int *num_bios_seed, + unsigned int *num_of_bios) +{ + do { + *num_of_bios = pseudo_random_seed(num_bios_seed, 1, + TEST_MAX_BIOS_PER_REQ); + if (!(*num_of_bios)) + *num_of_bios = 1; + } while ((*num_of_bios) * BIO_U32_SIZE * 4 > TEST_MAX_SECTOR_RANGE); +} + +/* Add a single read request to the given td's request queue */ +static int prepare_request_add_read(struct test_data *td) +{ + int ret; + int start_sec; + + if (td) + start_sec = td->start_sector; + else { + test_pr_err("%s: NULL td", __func__); + return 0; + } + + test_pr_info("%s: Adding a read request, first req_id=%d", __func__, + td->wr_rd_next_req_id); + + ret = test_iosched_add_wr_rd_test_req(0, READ, start_sec, 2, + TEST_PATTERN_5A, NULL); + if (ret) { + test_pr_err("%s: failed to add a read request", __func__); + return ret; + } + + return 0; +} + +/* Add a single flush request to the given td's request queue */ +static int prepare_request_add_flush(struct test_data *td) +{ + int ret; + + if (!td) { + test_pr_err("%s: NULL td", __func__); + return 0; + } + + test_pr_info("%s: Adding a flush request, first req_id=%d", __func__, + td->unique_next_req_id); + ret = test_iosched_add_unique_test_req(0, REQ_UNIQUE_FLUSH, + 0, 0, NULL); + if (ret) { + test_pr_err("%s: failed to add a flush request", __func__); + return ret; + } + + return ret; +} + +/* + * Add num_requets amount of write requests to the given td's request queue. + * If random test mode is chosen we pseudo-randomly choose the number of bios + * for each write request, otherwise add between 1 to 5 bio per request. + */ +static int prepare_request_add_write_reqs(struct test_data *td, + int num_requests, int is_err_expected, + int is_random) +{ + int i; + unsigned int start_sec; + int num_bios; + int ret = 0; + unsigned int *bio_seed = &mbtd->random_test_seed; + + if (td) + start_sec = td->start_sector; + else { + test_pr_err("%s: NULL td", __func__); + return ret; + } + + test_pr_info("%s: Adding %d write requests, first req_id=%d", __func__, + num_requests, td->wr_rd_next_req_id); + + for (i = 1; i <= num_requests; i++) { + start_sec = td->start_sector + 4096 * td->num_of_write_bios; + if (is_random) + pseudo_rnd_num_of_bios(bio_seed, &num_bios); + else + /* + * For the non-random case, give num_bios a value + * between 1 and 5, to keep a small number of BIOs + */ + num_bios = (i%5)+1; + + ret = test_iosched_add_wr_rd_test_req(is_err_expected, WRITE, + start_sec, num_bios, TEST_PATTERN_5A, NULL); + + if (ret) { + test_pr_err("%s: failed to add a write request", + __func__); + return ret; + } + } + return 0; +} + +/* + * Prepare the write, read and flush requests for a generic packed commands + * testcase + */ +static int prepare_packed_requests(struct test_data *td, int is_err_expected, + int num_requests, int is_random) +{ + int ret = 0; + struct mmc_queue *mq; + int max_packed_reqs; + struct request_queue *req_q; + + if (!td) { + pr_err("%s: NULL td", __func__); + return -EINVAL; + } + + req_q = td->req_q; + + if (!req_q) { + pr_err("%s: NULL request queue", __func__); + return -EINVAL; + } + + mq = req_q->queuedata; + if (!mq) { + test_pr_err("%s: NULL mq", __func__); + return -EINVAL; + } + + max_packed_reqs = mq->card->ext_csd.max_packed_writes; + + if (mbtd->random_test_seed <= 0) { + mbtd->random_test_seed = + (unsigned int)(get_jiffies_64() & 0xFFFF); + test_pr_info("%s: got seed from jiffies %d", + __func__, mbtd->random_test_seed); + } + + ret = prepare_request_add_write_reqs(td, num_requests, is_err_expected, + is_random); + if (ret) + return ret; + + /* Avoid memory corruption in upcoming stats set */ + if (td->test_info.testcase == TEST_STOP_DUE_TO_THRESHOLD) + num_requests--; + + memset((void *)mbtd->exp_packed_stats.pack_stop_reason, 0, + sizeof(mbtd->exp_packed_stats.pack_stop_reason)); + memset(mbtd->exp_packed_stats.packing_events, 0, + (max_packed_reqs + 1) * sizeof(u32)); + if (num_requests <= max_packed_reqs) + mbtd->exp_packed_stats.packing_events[num_requests] = 1; + + switch (td->test_info.testcase) { + case TEST_STOP_DUE_TO_FLUSH: + case TEST_STOP_DUE_TO_FLUSH_AFTER_MAX_REQS: + ret = prepare_request_add_flush(td); + if (ret) + return ret; + + mbtd->exp_packed_stats.pack_stop_reason[FLUSH_OR_DISCARD] = 1; + break; + case TEST_STOP_DUE_TO_READ: + case TEST_STOP_DUE_TO_READ_AFTER_MAX_REQS: + ret = prepare_request_add_read(td); + if (ret) + return ret; + + mbtd->exp_packed_stats.pack_stop_reason[WRONG_DATA_DIR] = 1; + break; + case TEST_STOP_DUE_TO_THRESHOLD: + mbtd->exp_packed_stats.packing_events[num_requests] = 1; + mbtd->exp_packed_stats.packing_events[1] = 1; + mbtd->exp_packed_stats.pack_stop_reason[THRESHOLD] = 1; + mbtd->exp_packed_stats.pack_stop_reason[EMPTY_QUEUE] = 1; + break; + case TEST_STOP_DUE_TO_MAX_REQ_NUM: + case TEST_RET_PARTIAL_MAX_FAIL_IDX: + mbtd->exp_packed_stats.pack_stop_reason[THRESHOLD] = 1; + break; + default: + mbtd->exp_packed_stats.pack_stop_reason[EMPTY_QUEUE] = 1; + } + mbtd->num_requests = num_requests; + + return 0; +} + +/* + * Prepare the write, read and flush requests for the packing control + * testcases + */ +static int prepare_packed_control_tests_requests(struct test_data *td, + int is_err_expected, int num_requests, int is_random) +{ + int ret = 0; + struct mmc_queue *mq; + int max_packed_reqs; + int temp_num_req = num_requests; + struct request_queue *req_q; + int test_packed_trigger; + int num_packed_reqs; + + if (!td) { + test_pr_err("%s: NULL td\n", __func__); + return -EINVAL; + } + + req_q = td->req_q; + + if (!req_q) { + test_pr_err("%s: NULL request queue\n", __func__); + return -EINVAL; + } + + mq = req_q->queuedata; + if (!mq) { + test_pr_err("%s: NULL mq", __func__); + return -EINVAL; + } + + max_packed_reqs = mq->card->ext_csd.max_packed_writes; + test_packed_trigger = mq->num_wr_reqs_to_start_packing; + num_packed_reqs = num_requests - test_packed_trigger; + + if (mbtd->random_test_seed == 0) { + mbtd->random_test_seed = + (unsigned int)(get_jiffies_64() & 0xFFFF); + test_pr_info("%s: got seed from jiffies %d", + __func__, mbtd->random_test_seed); + } + + if (td->test_info.testcase == + TEST_PACK_MIX_NO_PACKED_PACKED_NO_PACKED) { + temp_num_req = num_requests; + num_requests = test_packed_trigger - 1; + } + + /* Verify that the packing is disabled before starting the test */ + mq->wr_packing_enabled = false; + mq->num_of_potential_packed_wr_reqs = 0; + + if (td->test_info.testcase == TEST_PACK_MIX_PACKED_NO_PACKED_PACKED) { + mq->num_of_potential_packed_wr_reqs = test_packed_trigger + 1; + mq->wr_packing_enabled = true; + num_requests = test_packed_trigger + 2; + } + + ret = prepare_request_add_write_reqs(td, num_requests, is_err_expected, + is_random); + if (ret) + goto exit; + + if (td->test_info.testcase == TEST_PACK_MIX_NO_PACKED_PACKED_NO_PACKED) + num_requests = temp_num_req; + + memset((void *)mbtd->exp_packed_stats.pack_stop_reason, 0, + sizeof(mbtd->exp_packed_stats.pack_stop_reason)); + memset(mbtd->exp_packed_stats.packing_events, 0, + (max_packed_reqs + 1) * sizeof(u32)); + + switch (td->test_info.testcase) { + case TEST_PACKING_EXP_N_OVER_TRIGGER_FB_READ: + case TEST_PACKING_EXP_ONE_OVER_TRIGGER_FB_READ: + ret = prepare_request_add_read(td); + if (ret) + goto exit; + + mbtd->exp_packed_stats.pack_stop_reason[WRONG_DATA_DIR] = 1; + mbtd->exp_packed_stats.packing_events[num_packed_reqs] = 1; + break; + case TEST_PACKING_EXP_N_OVER_TRIGGER_FLUSH_N: + ret = prepare_request_add_flush(td); + if (ret) + goto exit; + + ret = prepare_request_add_write_reqs(td, num_packed_reqs, + is_err_expected, is_random); + if (ret) + goto exit; + + mbtd->exp_packed_stats.pack_stop_reason[EMPTY_QUEUE] = 1; + mbtd->exp_packed_stats.pack_stop_reason[FLUSH_OR_DISCARD] = 1; + mbtd->exp_packed_stats.packing_events[num_packed_reqs] = 2; + break; + case TEST_PACKING_NOT_EXP_TRIGGER_READ_TRIGGER: + ret = prepare_request_add_read(td); + if (ret) + goto exit; + + ret = prepare_request_add_write_reqs(td, test_packed_trigger, + is_err_expected, is_random); + if (ret) + goto exit; + + mbtd->exp_packed_stats.packing_events[num_packed_reqs] = 1; + break; + case TEST_PACKING_NOT_EXP_TRIGGER_FLUSH_TRIGGER: + ret = prepare_request_add_flush(td); + if (ret) + goto exit; + + ret = prepare_request_add_write_reqs(td, test_packed_trigger, + is_err_expected, is_random); + if (ret) + goto exit; + + mbtd->exp_packed_stats.packing_events[num_packed_reqs] = 1; + break; + case TEST_PACK_MIX_PACKED_NO_PACKED_PACKED: + ret = prepare_request_add_read(td); + if (ret) + goto exit; + + ret = prepare_request_add_write_reqs(td, test_packed_trigger-1, + is_err_expected, is_random); + if (ret) + goto exit; + + ret = prepare_request_add_write_reqs(td, num_requests, + is_err_expected, is_random); + if (ret) + goto exit; + + mbtd->exp_packed_stats.packing_events[num_requests] = 1; + mbtd->exp_packed_stats.packing_events[num_requests-1] = 1; + mbtd->exp_packed_stats.pack_stop_reason[WRONG_DATA_DIR] = 1; + mbtd->exp_packed_stats.pack_stop_reason[EMPTY_QUEUE] = 1; + break; + case TEST_PACK_MIX_NO_PACKED_PACKED_NO_PACKED: + ret = prepare_request_add_read(td); + if (ret) + goto exit; + + ret = prepare_request_add_write_reqs(td, num_requests, + is_err_expected, is_random); + if (ret) + goto exit; + + ret = prepare_request_add_read(td); + if (ret) + goto exit; + + ret = prepare_request_add_write_reqs(td, test_packed_trigger-1, + is_err_expected, is_random); + if (ret) + goto exit; + + mbtd->exp_packed_stats.pack_stop_reason[WRONG_DATA_DIR] = 1; + mbtd->exp_packed_stats.packing_events[num_packed_reqs] = 1; + break; + case TEST_PACKING_NOT_EXP_LESS_THAN_TRIGGER_REQUESTS: + case TEST_PACKING_NOT_EXP_TRIGGER_REQUESTS: + mbtd->exp_packed_stats.packing_events[num_packed_reqs] = 1; + break; + default: + mbtd->exp_packed_stats.pack_stop_reason[EMPTY_QUEUE] = 1; + mbtd->exp_packed_stats.packing_events[num_packed_reqs] = 1; + } + mbtd->num_requests = num_requests; + +exit: + return ret; +} + +/* + * Prepare requests for the TEST_RET_PARTIAL_FOLLOWED_BY_ABORT testcase. + * In this testcase we have mixed error expectations from different + * write requests, hence the special prepare function. + */ +static int prepare_partial_followed_by_abort(struct test_data *td, + int num_requests) +{ + int i, start_address; + int is_err_expected = 0; + int ret = 0; + struct mmc_queue *mq = test_iosched_get_req_queue()->queuedata; + int max_packed_reqs; + + if (!mq) { + test_pr_err("%s: NULL mq", __func__); + return -EINVAL; + } + + max_packed_reqs = mq->card->ext_csd.max_packed_writes; + + for (i = 1; i <= num_requests; i++) { + if (i > (num_requests / 2)) + is_err_expected = 1; + + start_address = td->start_sector + 4096 * td->num_of_write_bios; + ret = test_iosched_add_wr_rd_test_req(is_err_expected, WRITE, + start_address, (i % 5) + 1, TEST_PATTERN_5A, + NULL); + if (ret) { + test_pr_err("%s: failed to add a write request", + __func__); + return ret; + } + } + + memset((void *)&mbtd->exp_packed_stats.pack_stop_reason, 0, + sizeof(mbtd->exp_packed_stats.pack_stop_reason)); + memset(mbtd->exp_packed_stats.packing_events, 0, + (max_packed_reqs + 1) * sizeof(u32)); + mbtd->exp_packed_stats.packing_events[num_requests] = 1; + mbtd->exp_packed_stats.pack_stop_reason[EMPTY_QUEUE] = 1; + + mbtd->num_requests = num_requests; + + return ret; +} + +/* + * Get number of write requests for current testcase. If random test mode was + * chosen, pseudo-randomly choose the number of requests, otherwise set to + * two less than the packing threshold. + */ +static int get_num_requests(struct test_data *td) +{ + int *seed = &mbtd->random_test_seed; + struct request_queue *req_q; + struct mmc_queue *mq; + int max_num_requests; + int num_requests; + int min_num_requests = 2; + int is_random = mbtd->is_random; + int max_for_double; + int test_packed_trigger; + + req_q = test_iosched_get_req_queue(); + if (req_q) + mq = req_q->queuedata; + else { + test_pr_err("%s: NULL request queue", __func__); + return 0; + } + + if (!mq) { + test_pr_err("%s: NULL mq", __func__); + return -EINVAL; + } + + max_num_requests = mq->card->ext_csd.max_packed_writes; + num_requests = max_num_requests - 2; + test_packed_trigger = mq->num_wr_reqs_to_start_packing; + + /* + * Here max_for_double is intended for packed control testcases + * in which we issue many write requests. It's purpose is to prevent + * exceeding max number of req_queue requests. + */ + max_for_double = max_num_requests - 10; + + if (td->test_info.testcase == + TEST_PACKING_NOT_EXP_LESS_THAN_TRIGGER_REQUESTS) + /* Don't expect packing, so issue up to trigger-1 reqs */ + num_requests = test_packed_trigger - 1; + + if (is_random) { + if (td->test_info.testcase == + TEST_RET_PARTIAL_FOLLOWED_BY_ABORT) + /* + * Here we don't want num_requests to be less than 1 + * as a consequence of division by 2. + */ + min_num_requests = 3; + + if (td->test_info.testcase == + TEST_PACKING_NOT_EXP_LESS_THAN_TRIGGER_REQUESTS) + /* Don't expect packing, so issue up to trigger reqs */ + max_num_requests = test_packed_trigger; + + num_requests = pseudo_random_seed(seed, min_num_requests, + max_num_requests - 1); + } + + if (td->test_info.testcase == + TEST_PACKING_NOT_EXP_LESS_THAN_TRIGGER_REQUESTS) + num_requests -= test_packed_trigger; + + if (td->test_info.testcase == TEST_PACKING_EXP_N_OVER_TRIGGER_FLUSH_N) + num_requests = + num_requests > max_for_double ? max_for_double : num_requests; + + if (mbtd->test_group == TEST_PACKING_CONTROL_GROUP) + num_requests += test_packed_trigger; + + if (td->test_info.testcase == TEST_PACKING_NOT_EXP_TRIGGER_REQUESTS) + num_requests = test_packed_trigger; + + return num_requests; +} + +/* + * An implementation for the prepare_test_fn pointer in the test_info + * data structure. According to the testcase we add the right number of requests + * and decide if an error is expected or not. + */ +static int prepare_test(struct test_data *td) +{ + struct mmc_queue *mq = test_iosched_get_req_queue()->queuedata; + int max_num_requests; + int num_requests = 0; + int ret = 0; + int is_random = mbtd->is_random; + int test_packed_trigger = mq->num_wr_reqs_to_start_packing; + + if (!mq) { + test_pr_err("%s: NULL mq", __func__); + return -EINVAL; + } + + max_num_requests = mq->card->ext_csd.max_packed_writes; + + if (is_random && mbtd->random_test_seed == 0) { + mbtd->random_test_seed = + (unsigned int)(get_jiffies_64() & 0xFFFF); + test_pr_info("%s: got seed from jiffies %d", + __func__, mbtd->random_test_seed); + } + + num_requests = get_num_requests(td); + + if (mbtd->test_group == TEST_SEND_INVALID_GROUP) + mq->packed_test_fn = + test_invalid_packed_cmd; + + if (mbtd->test_group == TEST_ERR_CHECK_GROUP) + mq->err_check_fn = test_err_check; + + switch (td->test_info.testcase) { + case TEST_STOP_DUE_TO_FLUSH: + case TEST_STOP_DUE_TO_READ: + case TEST_RET_PARTIAL_FOLLOWED_BY_SUCCESS: + case TEST_RET_PARTIAL_MULTIPLE_UNTIL_SUCCESS: + case TEST_STOP_DUE_TO_EMPTY_QUEUE: + case TEST_CMD23_PACKED_BIT_UNSET: + ret = prepare_packed_requests(td, 0, num_requests, is_random); + break; + case TEST_STOP_DUE_TO_FLUSH_AFTER_MAX_REQS: + case TEST_STOP_DUE_TO_READ_AFTER_MAX_REQS: + ret = prepare_packed_requests(td, 0, max_num_requests - 1, + is_random); + break; + case TEST_RET_PARTIAL_FOLLOWED_BY_ABORT: + ret = prepare_partial_followed_by_abort(td, num_requests); + break; + case TEST_STOP_DUE_TO_MAX_REQ_NUM: + case TEST_RET_PARTIAL_MAX_FAIL_IDX: + ret = prepare_packed_requests(td, 0, max_num_requests, + is_random); + break; + case TEST_STOP_DUE_TO_THRESHOLD: + ret = prepare_packed_requests(td, 0, max_num_requests + 1, + is_random); + break; + case TEST_RET_ABORT: + case TEST_RET_RETRY: + case TEST_RET_CMD_ERR: + case TEST_RET_DATA_ERR: + case TEST_HDR_INVALID_VERSION: + case TEST_HDR_WRONG_WRITE_CODE: + case TEST_HDR_INVALID_RW_CODE: + case TEST_HDR_DIFFERENT_ADDRESSES: + case TEST_HDR_REQ_NUM_SMALLER_THAN_ACTUAL: + case TEST_HDR_REQ_NUM_LARGER_THAN_ACTUAL: + case TEST_CMD23_MAX_PACKED_WRITES: + case TEST_CMD23_ZERO_PACKED_WRITES: + case TEST_CMD23_REL_WR_BIT_SET: + case TEST_CMD23_BITS_16TO29_SET: + case TEST_CMD23_HDR_BLK_NOT_IN_COUNT: + case TEST_HDR_CMD23_PACKED_BIT_SET: + ret = prepare_packed_requests(td, 1, num_requests, is_random); + break; + case TEST_PACKING_EXP_N_OVER_TRIGGER: + case TEST_PACKING_EXP_N_OVER_TRIGGER_FB_READ: + case TEST_PACKING_NOT_EXP_TRIGGER_REQUESTS: + case TEST_PACKING_NOT_EXP_LESS_THAN_TRIGGER_REQUESTS: + case TEST_PACK_MIX_PACKED_NO_PACKED_PACKED: + case TEST_PACK_MIX_NO_PACKED_PACKED_NO_PACKED: + ret = prepare_packed_control_tests_requests(td, 0, num_requests, + is_random); + break; + case TEST_PACKING_EXP_THRESHOLD_OVER_TRIGGER: + ret = prepare_packed_control_tests_requests(td, 0, + max_num_requests, is_random); + break; + case TEST_PACKING_EXP_ONE_OVER_TRIGGER_FB_READ: + ret = prepare_packed_control_tests_requests(td, 0, + test_packed_trigger + 1, + is_random); + break; + case TEST_PACKING_EXP_N_OVER_TRIGGER_FLUSH_N: + ret = prepare_packed_control_tests_requests(td, 0, num_requests, + is_random); + break; + case TEST_PACKING_NOT_EXP_TRIGGER_READ_TRIGGER: + case TEST_PACKING_NOT_EXP_TRIGGER_FLUSH_TRIGGER: + ret = prepare_packed_control_tests_requests(td, 0, + test_packed_trigger, is_random); + break; + default: + test_pr_info("%s: Invalid test case...", __func__); + return -EINVAL; + } + + return ret; +} + +static int run_packed_test(struct test_data *td) +{ + struct mmc_queue *mq; + struct request_queue *req_q; + + if (!td) { + pr_err("%s: NULL td", __func__); + return -EINVAL; + } + + req_q = td->req_q; + + if (!req_q) { + pr_err("%s: NULL request queue", __func__); + return -EINVAL; + } + + mq = req_q->queuedata; + if (!mq) { + test_pr_err("%s: NULL mq", __func__); + return -EINVAL; + } + mmc_blk_init_packed_statistics(mq->card); + + if (td->test_info.testcase != TEST_PACK_MIX_PACKED_NO_PACKED_PACKED) { + /* + * Verify that the packing is disabled before starting the + * test + */ + mq->wr_packing_enabled = false; + mq->num_of_potential_packed_wr_reqs = 0; + } + + __blk_run_queue(td->req_q); + + return 0; +} + +/* + * An implementation for the post_test_fn in the test_info data structure. + * In our case we just reset the function pointers in the mmc_queue in order for + * the FS to be able to dispatch it's requests correctly after the test is + * finished. + */ +static int post_test(struct test_data *td) +{ + struct mmc_queue *mq; + + if (!td) + return -EINVAL; + + mq = td->req_q->queuedata; + + if (!mq) { + test_pr_err("%s: NULL mq", __func__); + return -EINVAL; + } + + mq->packed_test_fn = NULL; + mq->err_check_fn = NULL; + + return 0; +} + +/* + * This function checks, based on the current test's test_group, that the + * packed commands capability and control are set right. In addition, we check + * if the card supports the packed command feature. + */ +static int validate_packed_commands_settings(void) +{ + struct request_queue *req_q; + struct mmc_queue *mq; + int max_num_requests; + struct mmc_host *host; + + req_q = test_iosched_get_req_queue(); + if (!req_q) { + test_pr_err("%s: test_iosched_get_req_queue failed", __func__); + test_iosched_set_test_result(TEST_FAILED); + return -EINVAL; + } + + mq = req_q->queuedata; + if (!mq) { + test_pr_err("%s: NULL mq", __func__); + return -EINVAL; + } + + max_num_requests = mq->card->ext_csd.max_packed_writes; + host = mq->card->host; + + if (!(host->caps2 && MMC_CAP2_PACKED_WR)) { + test_pr_err("%s: Packed Write capability disabled, exit test", + __func__); + test_iosched_set_test_result(TEST_NOT_SUPPORTED); + return -EINVAL; + } + + if (max_num_requests == 0) { + test_pr_err( + "%s: no write packing support, ext_csd.max_packed_writes=%d", + __func__, mq->card->ext_csd.max_packed_writes); + test_iosched_set_test_result(TEST_NOT_SUPPORTED); + return -EINVAL; + } + + test_pr_info("%s: max number of packed requests supported is %d ", + __func__, max_num_requests); + + switch (mbtd->test_group) { + case TEST_SEND_WRITE_PACKING_GROUP: + case TEST_ERR_CHECK_GROUP: + case TEST_SEND_INVALID_GROUP: + /* disable the packing control */ + host->caps2 &= ~MMC_CAP2_PACKED_WR_CONTROL; + break; + case TEST_PACKING_CONTROL_GROUP: + host->caps2 |= MMC_CAP2_PACKED_WR_CONTROL; + break; + default: + break; + } + + return 0; +} + +static bool message_repeat; +static int test_open(struct inode *inode, struct file *file) +{ + file->private_data = inode->i_private; + message_repeat = 1; + return 0; +} + +/* send_packing TEST */ +static ssize_t send_write_packing_test_write(struct file *file, + const char __user *buf, + size_t count, + loff_t *ppos) +{ + int ret = 0; + int i = 0; + int number = -1; + int j = 0; + + test_pr_info("%s: -- send_write_packing TEST --", __func__); + + sscanf(buf, "%d", &number); + + if (number <= 0) + number = 1; + + + mbtd->test_group = TEST_SEND_WRITE_PACKING_GROUP; + + if (validate_packed_commands_settings()) + return count; + + if (mbtd->random_test_seed > 0) + test_pr_info("%s: Test seed: %d", __func__, + mbtd->random_test_seed); + + memset(&mbtd->test_info, 0, sizeof(struct test_info)); + + mbtd->test_info.data = mbtd; + mbtd->test_info.prepare_test_fn = prepare_test; + mbtd->test_info.run_test_fn = run_packed_test; + mbtd->test_info.check_test_result_fn = check_wr_packing_statistics; + mbtd->test_info.get_test_case_str_fn = get_test_case_str; + mbtd->test_info.post_test_fn = post_test; + + for (i = 0; i < number; ++i) { + test_pr_info("%s: Cycle # %d / %d", __func__, i+1, number); + test_pr_info("%s: ====================", __func__); + + for (j = SEND_WRITE_PACKING_MIN_TESTCASE; + j <= SEND_WRITE_PACKING_MAX_TESTCASE; j++) { + + mbtd->test_info.testcase = j; + mbtd->is_random = RANDOM_TEST; + ret = test_iosched_start_test(&mbtd->test_info); + if (ret) + break; + /* Allow FS requests to be dispatched */ + msleep(1000); + mbtd->test_info.testcase = j; + mbtd->is_random = NON_RANDOM_TEST; + ret = test_iosched_start_test(&mbtd->test_info); + if (ret) + break; + /* Allow FS requests to be dispatched */ + msleep(1000); + } + } + + test_pr_info("%s: Completed all the test cases.", __func__); + + return count; +} + +static ssize_t send_write_packing_test_read(struct file *file, + char __user *buffer, + size_t count, + loff_t *offset) +{ + memset((void *)buffer, 0, count); + + snprintf(buffer, count, + "\nsend_write_packing_test\n" + "=========\n" + "Description:\n" + "This test checks the following scenarios\n" + "- Pack due to FLUSH message\n" + "- Pack due to FLUSH after threshold writes\n" + "- Pack due to READ message\n" + "- Pack due to READ after threshold writes\n" + "- Pack due to empty queue\n" + "- Pack due to threshold writes\n" + "- Pack due to one over threshold writes\n"); + + if (message_repeat == 1) { + message_repeat = 0; + return strnlen(buffer, count); + } else { + return 0; + } +} + +const struct file_operations send_write_packing_test_ops = { + .open = test_open, + .write = send_write_packing_test_write, + .read = send_write_packing_test_read, +}; + +/* err_check TEST */ +static ssize_t err_check_test_write(struct file *file, + const char __user *buf, + size_t count, + loff_t *ppos) +{ + int ret = 0; + int i = 0; + int number = -1; + int j = 0; + + test_pr_info("%s: -- err_check TEST --", __func__); + + sscanf(buf, "%d", &number); + + if (number <= 0) + number = 1; + + mbtd->test_group = TEST_ERR_CHECK_GROUP; + + if (validate_packed_commands_settings()) + return count; + + if (mbtd->random_test_seed > 0) + test_pr_info("%s: Test seed: %d", __func__, + mbtd->random_test_seed); + + memset(&mbtd->test_info, 0, sizeof(struct test_info)); + + mbtd->test_info.data = mbtd; + mbtd->test_info.prepare_test_fn = prepare_test; + mbtd->test_info.run_test_fn = run_packed_test; + mbtd->test_info.check_test_result_fn = check_wr_packing_statistics; + mbtd->test_info.get_test_case_str_fn = get_test_case_str; + mbtd->test_info.post_test_fn = post_test; + + for (i = 0; i < number; ++i) { + test_pr_info("%s: Cycle # %d / %d", __func__, i+1, number); + test_pr_info("%s: ====================", __func__); + + for (j = ERR_CHECK_MIN_TESTCASE; + j <= ERR_CHECK_MAX_TESTCASE ; j++) { + mbtd->test_info.testcase = j; + mbtd->is_random = RANDOM_TEST; + ret = test_iosched_start_test(&mbtd->test_info); + if (ret) + break; + /* Allow FS requests to be dispatched */ + msleep(1000); + mbtd->test_info.testcase = j; + mbtd->is_random = NON_RANDOM_TEST; + ret = test_iosched_start_test(&mbtd->test_info); + if (ret) + break; + /* Allow FS requests to be dispatched */ + msleep(1000); + } + } + + test_pr_info("%s: Completed all the test cases.", __func__); + + return count; +} + +static ssize_t err_check_test_read(struct file *file, + char __user *buffer, + size_t count, + loff_t *offset) +{ + memset((void *)buffer, 0, count); + + snprintf(buffer, count, + "\nerr_check_TEST\n" + "=========\n" + "Description:\n" + "This test checks the following scenarios\n" + "- Return ABORT\n" + "- Return PARTIAL followed by success\n" + "- Return PARTIAL followed by abort\n" + "- Return PARTIAL multiple times until success\n" + "- Return PARTIAL with fail index = threshold\n" + "- Return RETRY\n" + "- Return CMD_ERR\n" + "- Return DATA_ERR\n"); + + if (message_repeat == 1) { + message_repeat = 0; + return strnlen(buffer, count); + } else { + return 0; + } +} + +const struct file_operations err_check_test_ops = { + .open = test_open, + .write = err_check_test_write, + .read = err_check_test_read, +}; + +/* send_invalid_packed TEST */ +static ssize_t send_invalid_packed_test_write(struct file *file, + const char __user *buf, + size_t count, + loff_t *ppos) +{ + int ret = 0; + int i = 0; + int number = -1; + int j = 0; + int num_of_failures = 0; + + test_pr_info("%s: -- send_invalid_packed TEST --", __func__); + + sscanf(buf, "%d", &number); + + if (number <= 0) + number = 1; + + mbtd->test_group = TEST_SEND_INVALID_GROUP; + + if (validate_packed_commands_settings()) + return count; + + if (mbtd->random_test_seed > 0) + test_pr_info("%s: Test seed: %d", __func__, + mbtd->random_test_seed); + + memset(&mbtd->test_info, 0, sizeof(struct test_info)); + + mbtd->test_info.data = mbtd; + mbtd->test_info.prepare_test_fn = prepare_test; + mbtd->test_info.run_test_fn = run_packed_test; + mbtd->test_info.check_test_result_fn = check_wr_packing_statistics; + mbtd->test_info.get_test_case_str_fn = get_test_case_str; + mbtd->test_info.post_test_fn = post_test; + + for (i = 0; i < number; ++i) { + test_pr_info("%s: Cycle # %d / %d", __func__, i+1, number); + test_pr_info("%s: ====================", __func__); + + for (j = INVALID_CMD_MIN_TESTCASE; + j <= INVALID_CMD_MAX_TESTCASE ; j++) { + + mbtd->test_info.testcase = j; + mbtd->is_random = RANDOM_TEST; + ret = test_iosched_start_test(&mbtd->test_info); + if (ret) + num_of_failures++; + /* Allow FS requests to be dispatched */ + msleep(1000); + + mbtd->test_info.testcase = j; + mbtd->is_random = NON_RANDOM_TEST; + ret = test_iosched_start_test(&mbtd->test_info); + if (ret) + num_of_failures++; + /* Allow FS requests to be dispatched */ + msleep(1000); + } + } + + test_pr_info("%s: Completed all the test cases.", __func__); + + if (num_of_failures > 0) { + test_iosched_set_test_result(TEST_FAILED); + test_pr_err( + "There were %d failures during the test, TEST FAILED", + num_of_failures); + } + return count; +} + +static ssize_t send_invalid_packed_test_read(struct file *file, + char __user *buffer, + size_t count, + loff_t *offset) +{ + memset((void *)buffer, 0, count); + + snprintf(buffer, count, + "\nsend_invalid_packed_TEST\n" + "=========\n" + "Description:\n" + "This test checks the following scenarios\n" + "- Send an invalid header version\n" + "- Send the wrong write code\n" + "- Send an invalid R/W code\n" + "- Send wrong start address in header\n" + "- Send header with block_count smaller than actual\n" + "- Send header with block_count larger than actual\n" + "- Send header CMD23 packed bit set\n" + "- Send CMD23 with block count over threshold\n" + "- Send CMD23 with block_count equals zero\n" + "- Send CMD23 packed bit unset\n" + "- Send CMD23 reliable write bit set\n" + "- Send CMD23 bits [16-29] set\n" + "- Send CMD23 header block not in block_count\n"); + + if (message_repeat == 1) { + message_repeat = 0; + return strnlen(buffer, count); + } else { + return 0; + } +} + +const struct file_operations send_invalid_packed_test_ops = { + .open = test_open, + .write = send_invalid_packed_test_write, + .read = send_invalid_packed_test_read, +}; + +/* packing_control TEST */ +static ssize_t write_packing_control_test_write(struct file *file, + const char __user *buf, + size_t count, + loff_t *ppos) +{ + int ret = 0; + int i = 0; + int number = -1; + int j = 0; + struct mmc_queue *mq = test_iosched_get_req_queue()->queuedata; + int max_num_requests = mq->card->ext_csd.max_packed_writes; + int test_successful = 1; + + test_pr_info("%s: -- write_packing_control TEST --", __func__); + + sscanf(buf, "%d", &number); + + if (number <= 0) + number = 1; + + test_pr_info("%s: max_num_requests = %d ", __func__, + max_num_requests); + + memset(&mbtd->test_info, 0, sizeof(struct test_info)); + mbtd->test_group = TEST_PACKING_CONTROL_GROUP; + + if (validate_packed_commands_settings()) + return count; + + mbtd->test_info.data = mbtd; + mbtd->test_info.prepare_test_fn = prepare_test; + mbtd->test_info.run_test_fn = run_packed_test; + mbtd->test_info.check_test_result_fn = check_wr_packing_statistics; + mbtd->test_info.get_test_case_str_fn = get_test_case_str; + + for (i = 0; i < number; ++i) { + test_pr_info("%s: Cycle # %d / %d", __func__, i+1, number); + test_pr_info("%s: ====================", __func__); + + for (j = PACKING_CONTROL_MIN_TESTCASE; + j <= PACKING_CONTROL_MAX_TESTCASE; j++) { + + test_successful = 1; + mbtd->test_info.testcase = j; + mbtd->is_random = RANDOM_TEST; + ret = test_iosched_start_test(&mbtd->test_info); + if (ret) { + test_successful = 0; + break; + } + /* Allow FS requests to be dispatched */ + msleep(1000); + + mbtd->test_info.testcase = j; + mbtd->is_random = NON_RANDOM_TEST; + ret = test_iosched_start_test(&mbtd->test_info); + if (ret) { + test_successful = 0; + break; + } + /* Allow FS requests to be dispatched */ + msleep(1000); + } + + if (!test_successful) + break; + } + + test_pr_info("%s: Completed all the test cases.", __func__); + + return count; +} + +static ssize_t write_packing_control_test_read(struct file *file, + char __user *buffer, + size_t count, + loff_t *offset) +{ + memset((void *)buffer, 0, count); + + snprintf(buffer, count, + "\nwrite_packing_control_test\n" + "=========\n" + "Description:\n" + "This test checks the following scenarios\n" + "- Packing expected - one over trigger\n" + "- Packing expected - N over trigger\n" + "- Packing expected - N over trigger followed by read\n" + "- Packing expected - N over trigger followed by flush\n" + "- Packing expected - threshold over trigger FB by flush\n" + "- Packing not expected - less than trigger\n" + "- Packing not expected - trigger requests\n" + "- Packing not expected - trigger, read, trigger\n" + "- Mixed state - packing -> no packing -> packing\n" + "- Mixed state - no packing -> packing -> no packing\n"); + + if (message_repeat == 1) { + message_repeat = 0; + return strnlen(buffer, count); + } else { + return 0; + } +} + +const struct file_operations write_packing_control_test_ops = { + .open = test_open, + .write = write_packing_control_test_write, + .read = write_packing_control_test_read, +}; + +static void mmc_block_test_debugfs_cleanup(void) +{ + debugfs_remove(mbtd->debug.random_test_seed); + debugfs_remove(mbtd->debug.send_write_packing_test); + debugfs_remove(mbtd->debug.err_check_test); + debugfs_remove(mbtd->debug.send_invalid_packed_test); + debugfs_remove(mbtd->debug.packing_control_test); +} + +static int mmc_block_test_debugfs_init(void) +{ + struct dentry *utils_root, *tests_root; + + utils_root = test_iosched_get_debugfs_utils_root(); + tests_root = test_iosched_get_debugfs_tests_root(); + + if (!utils_root || !tests_root) + return -EINVAL; + + mbtd->debug.random_test_seed = debugfs_create_u32( + "random_test_seed", + S_IRUGO | S_IWUGO, + utils_root, + &mbtd->random_test_seed); + + if (!mbtd->debug.random_test_seed) + goto err_nomem; + + mbtd->debug.send_write_packing_test = + debugfs_create_file("send_write_packing_test", + S_IRUGO | S_IWUGO, + tests_root, + NULL, + &send_write_packing_test_ops); + + if (!mbtd->debug.send_write_packing_test) + goto err_nomem; + + mbtd->debug.err_check_test = + debugfs_create_file("err_check_test", + S_IRUGO | S_IWUGO, + tests_root, + NULL, + &err_check_test_ops); + + if (!mbtd->debug.err_check_test) + goto err_nomem; + + mbtd->debug.send_invalid_packed_test = + debugfs_create_file("send_invalid_packed_test", + S_IRUGO | S_IWUGO, + tests_root, + NULL, + &send_invalid_packed_test_ops); + + if (!mbtd->debug.send_invalid_packed_test) + goto err_nomem; + + mbtd->debug.packing_control_test = debugfs_create_file( + "packing_control_test", + S_IRUGO | S_IWUGO, + tests_root, + NULL, + &write_packing_control_test_ops); + + if (!mbtd->debug.packing_control_test) + goto err_nomem; + + return 0; + +err_nomem: + mmc_block_test_debugfs_cleanup(); + return -ENOMEM; +} + +static void mmc_block_test_probe(void) +{ + struct request_queue *q = test_iosched_get_req_queue(); + struct mmc_queue *mq; + int max_packed_reqs; + + if (!q) { + test_pr_err("%s: NULL request queue", __func__); + return; + } + + mq = q->queuedata; + if (!mq) { + test_pr_err("%s: NULL mq", __func__); + return; + } + + max_packed_reqs = mq->card->ext_csd.max_packed_writes; + mbtd->exp_packed_stats.packing_events = + kzalloc((max_packed_reqs + 1) * + sizeof(*mbtd->exp_packed_stats.packing_events), + GFP_KERNEL); + + mmc_block_test_debugfs_init(); +} + +static void mmc_block_test_remove(void) +{ + mmc_block_test_debugfs_cleanup(); +} + +static int __init mmc_block_test_init(void) +{ + mbtd = kzalloc(sizeof(struct mmc_block_test_data), GFP_KERNEL); + if (!mbtd) { + test_pr_err("%s: failed to allocate mmc_block_test_data", + __func__); + return -ENODEV; + } + + mbtd->bdt.init_fn = mmc_block_test_probe; + mbtd->bdt.exit_fn = mmc_block_test_remove; + INIT_LIST_HEAD(&mbtd->bdt.list); + test_iosched_register(&mbtd->bdt); + + return 0; +} + +static void __exit mmc_block_test_exit(void) +{ + test_iosched_unregister(&mbtd->bdt); + kfree(mbtd); +} + +module_init(mmc_block_test_init); +module_exit(mmc_block_test_exit); + +MODULE_LICENSE("GPL v2"); +MODULE_DESCRIPTION("MMC block test"); diff --git a/drivers/mmc/card/mmc_test.c b/drivers/mmc/card/mmc_test.c index 9a11aaa6e985..12c66919f06f 100644 --- a/drivers/mmc/card/mmc_test.c +++ b/drivers/mmc/card/mmc_test.c @@ -2807,7 +2807,8 @@ static ssize_t mtf_test_write(struct file *file, const char __user *buf, } #ifdef CONFIG_HIGHMEM - __free_pages(test->highmem, BUFFER_ORDER); + if (test->highmem) + __free_pages(test->highmem, BUFFER_ORDER); #endif kfree(test->buffer); kfree(test); diff --git a/drivers/mmc/card/queue.c b/drivers/mmc/card/queue.c index 6a4cd2bb4629..8385e5706b07 100644 --- a/drivers/mmc/card/queue.c +++ b/drivers/mmc/card/queue.c @@ -16,6 +16,8 @@ #include <linux/kthread.h> #include <linux/scatterlist.h> #include <linux/dma-mapping.h> +#include <linux/bitops.h> +#include <linux/delay.h> #include <linux/mmc/card.h> #include <linux/mmc/host.h> @@ -25,6 +27,13 @@ #define MMC_QUEUE_BOUNCESZ 65536 /* + * Based on benchmark tests the default num of requests to trigger the write + * packing was determined, to keep the read latency as low as possible and + * manage to keep the high write throughput. + */ +#define DEFAULT_NUM_REQS_TO_START_PACK 17 + +/* * Prepare a MMC request. This just filters out odd stuff. */ static int mmc_prep_request(struct request_queue *q, struct request *req) @@ -47,10 +56,102 @@ static int mmc_prep_request(struct request_queue *q, struct request *req) return BLKPREP_OK; } +static struct request *mmc_peek_request(struct mmc_queue *mq) +{ + struct request_queue *q = mq->queue; + mq->cmdq_req_peeked = NULL; + + spin_lock_irq(q->queue_lock); + if (!blk_queue_stopped(q)) + mq->cmdq_req_peeked = blk_peek_request(q); + spin_unlock_irq(q->queue_lock); + + return mq->cmdq_req_peeked; +} + +static bool mmc_check_blk_queue_start_tag(struct request_queue *q, + struct request *req) +{ + int ret; + + spin_lock_irq(q->queue_lock); + ret = blk_queue_start_tag(q, req); + spin_unlock_irq(q->queue_lock); + + return !!ret; +} + +static inline void mmc_cmdq_ready_wait(struct mmc_host *host, + struct mmc_queue *mq) +{ + struct mmc_cmdq_context_info *ctx = &host->cmdq_ctx; + struct request_queue *q = mq->queue; + + /* + * Wait until all of the following conditions are true: + * 1. There is a request pending in the block layer queue + * to be processed. + * 2. If the peeked request is flush/discard then there shouldn't + * be any other direct command active. + * 3. cmdq state should be unhalted. + * 4. cmdq state shouldn't be in error state. + * 5. There is no outstanding RPMB request pending. + * 6. free tag available to process the new request. + * (This must be the last condtion to check) + */ + wait_event(ctx->wait, kthread_should_stop() + || (mmc_peek_request(mq) && + !((mq->cmdq_req_peeked->cmd_flags & (REQ_FLUSH | REQ_DISCARD)) + && test_bit(CMDQ_STATE_DCMD_ACTIVE, &ctx->curr_state)) + && !(!host->card->part_curr && !mmc_card_suspended(host->card) + && mmc_host_halt(host)) + && !(!host->card->part_curr && mmc_host_cq_disable(host) && + !mmc_card_suspended(host->card)) + && !test_bit(CMDQ_STATE_ERR, &ctx->curr_state) + && !atomic_read(&host->rpmb_req_pending) + && !mmc_check_blk_queue_start_tag(q, mq->cmdq_req_peeked))); +} + +static int mmc_cmdq_thread(void *d) +{ + struct mmc_queue *mq = d; + struct mmc_card *card = mq->card; + struct mmc_host *host = card->host; + + struct sched_param scheduler_params = {0}; + + scheduler_params.sched_priority = 1; + + sched_setscheduler(current, SCHED_FIFO, &scheduler_params); + + current->flags |= PF_MEMALLOC; + if (card->host->wakeup_on_idle) + set_wake_up_idle(true); + + while (1) { + int ret = 0; + + mmc_cmdq_ready_wait(host, mq); + if (kthread_should_stop()) + break; + + ret = mq->cmdq_issue_fn(mq, mq->cmdq_req_peeked); + /* + * Don't requeue if issue_fn fails. + * Recovery will be come by completion softirq + * Also we end the request if there is a partition switch error, + * so we should not requeue the request here. + */ + } /* loop */ + + return 0; +} + static int mmc_queue_thread(void *d) { struct mmc_queue *mq = d; struct request_queue *q = mq->queue; + struct mmc_card *card = mq->card; struct sched_param scheduler_params = {0}; scheduler_params.sched_priority = 1; @@ -58,6 +159,8 @@ static int mmc_queue_thread(void *d) sched_setscheduler(current, SCHED_FIFO, &scheduler_params); current->flags |= PF_MEMALLOC; + if (card->host->wakeup_on_idle) + set_wake_up_idle(true); down(&mq->thread_sem); do { @@ -75,8 +178,8 @@ static int mmc_queue_thread(void *d) cmd_flags = req ? req->cmd_flags : 0; mq->issue_fn(mq, req); cond_resched(); - if (mq->flags & MMC_QUEUE_NEW_REQUEST) { - mq->flags &= ~MMC_QUEUE_NEW_REQUEST; + if (test_bit(MMC_QUEUE_NEW_REQUEST, &mq->flags)) { + clear_bit(MMC_QUEUE_NEW_REQUEST, &mq->flags); continue; /* fetch again */ } @@ -108,6 +211,13 @@ static int mmc_queue_thread(void *d) return 0; } +static void mmc_cmdq_dispatch_req(struct request_queue *q) +{ + struct mmc_queue *mq = q->queuedata; + + wake_up(&mq->card->host->cmdq_ctx.wait); +} + /* * Generic MMC request handler. This is called for any queue on a * particular host. When the host is not busy, we look for a request @@ -183,6 +293,32 @@ static void mmc_queue_setup_discard(struct request_queue *q, } /** + * mmc_blk_cmdq_setup_queue + * @mq: mmc queue + * @card: card to attach to this queue + * + * Setup queue for CMDQ supporting MMC card + */ +void mmc_cmdq_setup_queue(struct mmc_queue *mq, struct mmc_card *card) +{ + u64 limit = BLK_BOUNCE_HIGH; + struct mmc_host *host = card->host; + + if (mmc_dev(host)->dma_mask && *mmc_dev(host)->dma_mask) + limit = *mmc_dev(host)->dma_mask; + + queue_flag_set_unlocked(QUEUE_FLAG_NONROT, mq->queue); + if (mmc_can_erase(card)) + mmc_queue_setup_discard(mq->queue, card); + + blk_queue_bounce_limit(mq->queue, limit); + blk_queue_max_hw_sectors(mq->queue, min(host->max_blk_count, + host->max_req_size / 512)); + blk_queue_max_segment_size(mq->queue, host->max_seg_size); + blk_queue_max_segments(mq->queue, host->max_segs); +} + +/** * mmc_init_queue - initialise a queue structure. * @mq: mmc queue * @card: mmc card to attach this queue @@ -192,7 +328,7 @@ static void mmc_queue_setup_discard(struct request_queue *q, * Initialise a MMC card request queue. */ int mmc_init_queue(struct mmc_queue *mq, struct mmc_card *card, - spinlock_t *lock, const char *subname) + spinlock_t *lock, const char *subname, int area_type) { struct mmc_host *host = card->host; u64 limit = BLK_BOUNCE_HIGH; @@ -204,6 +340,37 @@ int mmc_init_queue(struct mmc_queue *mq, struct mmc_card *card, limit = (u64)dma_max_pfn(mmc_dev(host)) << PAGE_SHIFT; mq->card = card; + if (card->ext_csd.cmdq_support && + (area_type == MMC_BLK_DATA_AREA_MAIN)) { + mq->queue = blk_init_queue(mmc_cmdq_dispatch_req, lock); + if (!mq->queue) + return -ENOMEM; + mmc_cmdq_setup_queue(mq, card); + ret = mmc_cmdq_init(mq, card); + if (ret) { + pr_err("%s: %d: cmdq: unable to set-up\n", + mmc_hostname(card->host), ret); + blk_cleanup_queue(mq->queue); + } else { + sema_init(&mq->thread_sem, 1); + /* hook for pm qos cmdq init */ + if (card->host->cmdq_ops->init) + card->host->cmdq_ops->init(card->host); + mq->queue->queuedata = mq; + mq->thread = kthread_run(mmc_cmdq_thread, mq, + "mmc-cmdqd/%d%s", + host->index, + subname ? subname : ""); + if (IS_ERR(mq->thread)) { + pr_err("%s: %d: cmdq: failed to start mmc-cmdqd thread\n", + mmc_hostname(card->host), ret); + ret = PTR_ERR(mq->thread); + } + + return ret; + } + } + mq->queue = blk_init_queue(mmc_request_fn, lock); if (!mq->queue) return -ENOMEM; @@ -211,6 +378,9 @@ int mmc_init_queue(struct mmc_queue *mq, struct mmc_card *card, mq->mqrq_cur = mqrq_cur; mq->mqrq_prev = mqrq_prev; mq->queue->queuedata = mq; + mq->num_wr_reqs_to_start_packing = + min_t(int, (int)card->ext_csd.max_packed_writes, + DEFAULT_NUM_REQS_TO_START_PACK); blk_queue_prep_rq(mq->queue, mmc_prep_request); queue_flag_set_unlocked(QUEUE_FLAG_NONROT, mq->queue); @@ -276,24 +446,49 @@ int mmc_init_queue(struct mmc_queue *mq, struct mmc_card *card, #endif if (!mqrq_cur->bounce_buf && !mqrq_prev->bounce_buf) { + unsigned int max_segs = host->max_segs; + blk_queue_bounce_limit(mq->queue, limit); blk_queue_max_hw_sectors(mq->queue, min(host->max_blk_count, host->max_req_size / 512)); - blk_queue_max_segments(mq->queue, host->max_segs); blk_queue_max_segment_size(mq->queue, host->max_seg_size); +retry: + blk_queue_max_segments(mq->queue, host->max_segs); mqrq_cur->sg = mmc_alloc_sg(host->max_segs, &ret); - if (ret) + if (ret == -ENOMEM) + goto cur_sg_alloc_failed; + else if (ret) goto cleanup_queue; - mqrq_prev->sg = mmc_alloc_sg(host->max_segs, &ret); - if (ret) + if (ret == -ENOMEM) + goto prev_sg_alloc_failed; + else if (ret) goto cleanup_queue; + + goto success; + +prev_sg_alloc_failed: + kfree(mqrq_cur->sg); + mqrq_cur->sg = NULL; +cur_sg_alloc_failed: + host->max_segs /= 2; + if (host->max_segs) { + goto retry; + } else { + host->max_segs = max_segs; + goto cleanup_queue; + } } +success: sema_init(&mq->thread_sem, 1); + /* hook for pm qos legacy init */ + if (card->host->ops->init) + card->host->ops->init(card->host); + mq->thread = kthread_run(mmc_queue_thread, mq, "mmcqd/%d%s", host->index, subname ? subname : ""); @@ -408,28 +603,192 @@ void mmc_packed_clean(struct mmc_queue *mq) mqrq_prev->packed = NULL; } +static void mmc_cmdq_softirq_done(struct request *rq) +{ + struct mmc_queue *mq = rq->q->queuedata; + mq->cmdq_complete_fn(rq); +} + +static void mmc_cmdq_error_work(struct work_struct *work) +{ + struct mmc_queue *mq = container_of(work, struct mmc_queue, + cmdq_err_work); + + mq->cmdq_error_fn(mq); +} + +enum blk_eh_timer_return mmc_cmdq_rq_timed_out(struct request *req) +{ + struct mmc_queue *mq = req->q->queuedata; + + pr_err("%s: request with tag: %d flags: 0x%llx timed out\n", + mmc_hostname(mq->card->host), req->tag, req->cmd_flags); + + return mq->cmdq_req_timed_out(req); +} + +int mmc_cmdq_init(struct mmc_queue *mq, struct mmc_card *card) +{ + int i, ret = 0; + /* one slot is reserved for dcmd requests */ + int q_depth = card->ext_csd.cmdq_depth - 1; + + card->cmdq_init = false; + if (!(card->host->caps2 & MMC_CAP2_CMD_QUEUE)) { + ret = -ENOTSUPP; + goto out; + } + + init_waitqueue_head(&card->host->cmdq_ctx.queue_empty_wq); + init_waitqueue_head(&card->host->cmdq_ctx.wait); + + mq->mqrq_cmdq = kzalloc( + sizeof(struct mmc_queue_req) * q_depth, GFP_KERNEL); + if (!mq->mqrq_cmdq) { + pr_warn("%s: unable to allocate mqrq's for q_depth %d\n", + mmc_card_name(card), q_depth); + ret = -ENOMEM; + goto out; + } + + /* sg is allocated for data request slots only */ + for (i = 0; i < q_depth; i++) { + mq->mqrq_cmdq[i].sg = mmc_alloc_sg(card->host->max_segs, &ret); + if (ret) { + pr_warn("%s: unable to allocate cmdq sg of size %d\n", + mmc_card_name(card), + card->host->max_segs); + goto free_mqrq_sg; + } + } + + ret = blk_queue_init_tags(mq->queue, q_depth, NULL, BLK_TAG_ALLOC_FIFO); + if (ret) { + pr_warn("%s: unable to allocate cmdq tags %d\n", + mmc_card_name(card), q_depth); + goto free_mqrq_sg; + } + + blk_queue_softirq_done(mq->queue, mmc_cmdq_softirq_done); + INIT_WORK(&mq->cmdq_err_work, mmc_cmdq_error_work); + init_completion(&mq->cmdq_shutdown_complete); + init_completion(&mq->cmdq_pending_req_done); + + blk_queue_rq_timed_out(mq->queue, mmc_cmdq_rq_timed_out); + blk_queue_rq_timeout(mq->queue, 120 * HZ); + card->cmdq_init = true; + + goto out; + +free_mqrq_sg: + for (i = 0; i < q_depth; i++) + kfree(mq->mqrq_cmdq[i].sg); + kfree(mq->mqrq_cmdq); + mq->mqrq_cmdq = NULL; +out: + return ret; +} + +void mmc_cmdq_clean(struct mmc_queue *mq, struct mmc_card *card) +{ + int i; + int q_depth = card->ext_csd.cmdq_depth - 1; + + blk_free_tags(mq->queue->queue_tags); + mq->queue->queue_tags = NULL; + blk_queue_free_tags(mq->queue); + + for (i = 0; i < q_depth; i++) + kfree(mq->mqrq_cmdq[i].sg); + kfree(mq->mqrq_cmdq); + mq->mqrq_cmdq = NULL; +} + /** * mmc_queue_suspend - suspend a MMC request queue * @mq: MMC queue to suspend + * @wait: Wait till MMC request queue is empty * * Stop the block request queue, and wait for our thread to * complete any outstanding requests. This ensures that we * won't suspend while a request is being processed. */ -void mmc_queue_suspend(struct mmc_queue *mq) +int mmc_queue_suspend(struct mmc_queue *mq, int wait) { struct request_queue *q = mq->queue; unsigned long flags; + int rc = 0; + struct mmc_card *card = mq->card; + struct request *req; - if (!(mq->flags & MMC_QUEUE_SUSPENDED)) { - mq->flags |= MMC_QUEUE_SUSPENDED; + if (card->cmdq_init && blk_queue_tagged(q)) { + struct mmc_host *host = card->host; - spin_lock_irqsave(q->queue_lock, flags); - blk_stop_queue(q); - spin_unlock_irqrestore(q->queue_lock, flags); + if (test_and_set_bit(MMC_QUEUE_SUSPENDED, &mq->flags)) + goto out; + + if (wait) { - down(&mq->thread_sem); + /* + * After blk_cleanup_queue is called, wait for all + * active_reqs to complete. + * Then wait for cmdq thread to exit before calling + * cmdq shutdown to avoid race between issuing + * requests and shutdown of cmdq. + */ + blk_cleanup_queue(q); + + if (host->cmdq_ctx.active_reqs) + wait_for_completion( + &mq->cmdq_shutdown_complete); + kthread_stop(mq->thread); + mq->cmdq_shutdown(mq); + } else { + spin_lock_irqsave(q->queue_lock, flags); + blk_stop_queue(q); + wake_up(&host->cmdq_ctx.wait); + req = blk_peek_request(q); + if (req || mq->cmdq_req_peeked || + host->cmdq_ctx.active_reqs) { + clear_bit(MMC_QUEUE_SUSPENDED, &mq->flags); + blk_start_queue(q); + rc = -EBUSY; + } + spin_unlock_irqrestore(q->queue_lock, flags); + } + + goto out; } + + if (!(test_and_set_bit(MMC_QUEUE_SUSPENDED, &mq->flags))) { + if (!wait) { + /* suspend/stop the queue in case of suspend */ + spin_lock_irqsave(q->queue_lock, flags); + blk_stop_queue(q); + spin_unlock_irqrestore(q->queue_lock, flags); + } else { + /* shutdown the queue in case of shutdown/reboot */ + blk_cleanup_queue(q); + } + + rc = down_trylock(&mq->thread_sem); + if (rc && !wait) { + /* + * Failed to take the lock so better to abort the + * suspend because mmcqd thread is processing requests. + */ + clear_bit(MMC_QUEUE_SUSPENDED, &mq->flags); + spin_lock_irqsave(q->queue_lock, flags); + blk_start_queue(q); + spin_unlock_irqrestore(q->queue_lock, flags); + rc = -EBUSY; + } else if (rc && wait) { + down(&mq->thread_sem); + rc = 0; + } + } +out: + return rc; } /** @@ -439,12 +798,13 @@ void mmc_queue_suspend(struct mmc_queue *mq) void mmc_queue_resume(struct mmc_queue *mq) { struct request_queue *q = mq->queue; + struct mmc_card *card = mq->card; unsigned long flags; - if (mq->flags & MMC_QUEUE_SUSPENDED) { - mq->flags &= ~MMC_QUEUE_SUSPENDED; + if (test_and_clear_bit(MMC_QUEUE_SUSPENDED, &mq->flags)) { - up(&mq->thread_sem); + if (!(card->cmdq_init && blk_queue_tagged(q))) + up(&mq->thread_sem); spin_lock_irqsave(q->queue_lock, flags); blk_start_queue(q); diff --git a/drivers/mmc/card/queue.h b/drivers/mmc/card/queue.h index 1dc4c99f52a1..505712f0e1b0 100644 --- a/drivers/mmc/card/queue.h +++ b/drivers/mmc/card/queue.h @@ -42,22 +42,41 @@ struct mmc_queue_req { struct mmc_async_req mmc_active; enum mmc_packed_type cmd_type; struct mmc_packed *packed; + struct mmc_cmdq_req cmdq_req; }; struct mmc_queue { struct mmc_card *card; struct task_struct *thread; struct semaphore thread_sem; - unsigned int flags; -#define MMC_QUEUE_SUSPENDED (1 << 0) -#define MMC_QUEUE_NEW_REQUEST (1 << 1) + unsigned long flags; +#define MMC_QUEUE_SUSPENDED 0 +#define MMC_QUEUE_NEW_REQUEST 1 - int (*issue_fn)(struct mmc_queue *, struct request *); + int (*issue_fn)(struct mmc_queue *, struct request *); + int (*cmdq_issue_fn)(struct mmc_queue *, + struct request *); + void (*cmdq_complete_fn)(struct request *); + void (*cmdq_error_fn)(struct mmc_queue *); + enum blk_eh_timer_return (*cmdq_req_timed_out)(struct request *); void *data; struct request_queue *queue; struct mmc_queue_req mqrq[2]; struct mmc_queue_req *mqrq_cur; struct mmc_queue_req *mqrq_prev; + struct mmc_queue_req *mqrq_cmdq; + bool wr_packing_enabled; + int num_of_potential_packed_wr_reqs; + int num_wr_reqs_to_start_packing; + bool no_pack_for_random; + struct work_struct cmdq_err_work; + + struct completion cmdq_pending_req_done; + struct completion cmdq_shutdown_complete; + struct request *cmdq_req_peeked; + int (*err_check_fn) (struct mmc_card *, struct mmc_async_req *); + void (*packed_test_fn) (struct request_queue *, struct mmc_queue_req *); + void (*cmdq_shutdown)(struct mmc_queue *); #ifdef CONFIG_MMC_SIMULATE_MAX_SPEED atomic_t max_write_speed; atomic_t max_read_speed; @@ -69,9 +88,9 @@ struct mmc_queue { }; extern int mmc_init_queue(struct mmc_queue *, struct mmc_card *, spinlock_t *, - const char *); + const char *, int); extern void mmc_cleanup_queue(struct mmc_queue *); -extern void mmc_queue_suspend(struct mmc_queue *); +extern int mmc_queue_suspend(struct mmc_queue *, int); extern void mmc_queue_resume(struct mmc_queue *); extern unsigned int mmc_queue_map_sg(struct mmc_queue *, @@ -84,4 +103,9 @@ extern void mmc_packed_clean(struct mmc_queue *); extern int mmc_access_rpmb(struct mmc_queue *); +extern void print_mmc_packing_stats(struct mmc_card *card); + +extern int mmc_cmdq_init(struct mmc_queue *mq, struct mmc_card *card); +extern void mmc_cmdq_clean(struct mmc_queue *mq, struct mmc_card *card); + #endif |
