summaryrefslogtreecommitdiff
path: root/drivers/scsi
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/scsi')
-rw-r--r--drivers/scsi/scsi_pm.c42
-rw-r--r--drivers/scsi/scsi_scan.c11
-rw-r--r--drivers/scsi/scsi_sysfs.c3
-rw-r--r--drivers/scsi/sd.c90
-rw-r--r--drivers/scsi/sd.h5
-rw-r--r--drivers/scsi/sg.c29
-rw-r--r--drivers/scsi/ufs/Kconfig35
-rw-r--r--drivers/scsi/ufs/Makefile5
-rw-r--r--drivers/scsi/ufs/ufs-debugfs.c1671
-rw-r--r--drivers/scsi/ufs/ufs-debugfs.h69
-rw-r--r--drivers/scsi/ufs/ufs-qcom-debugfs.c389
-rw-r--r--drivers/scsi/ufs/ufs-qcom-debugfs.h24
-rw-r--r--drivers/scsi/ufs/ufs-qcom-ice.c725
-rw-r--r--drivers/scsi/ufs/ufs-qcom-ice.h132
-rw-r--r--drivers/scsi/ufs/ufs-qcom.c1717
-rw-r--r--drivers/scsi/ufs/ufs-qcom.h173
-rw-r--r--drivers/scsi/ufs/ufs.h98
-rw-r--r--drivers/scsi/ufs/ufs_quirks.c120
-rw-r--r--drivers/scsi/ufs/ufs_quirks.h152
-rw-r--r--drivers/scsi/ufs/ufs_test.c1534
-rw-r--r--drivers/scsi/ufs/ufshcd-pltfrm.c80
-rw-r--r--drivers/scsi/ufs/ufshcd-pltfrm.h2
-rw-r--r--drivers/scsi/ufs/ufshcd.c5444
-rw-r--r--drivers/scsi/ufs/ufshcd.h786
-rw-r--r--drivers/scsi/ufs/ufshci.h47
-rw-r--r--drivers/scsi/ufs/unipro.h40
26 files changed, 12473 insertions, 950 deletions
diff --git a/drivers/scsi/scsi_pm.c b/drivers/scsi/scsi_pm.c
index 459abe1dcc87..243b2d13fa0d 100644
--- a/drivers/scsi/scsi_pm.c
+++ b/drivers/scsi/scsi_pm.c
@@ -16,6 +16,9 @@
#include "scsi_priv.h"
+static int do_scsi_runtime_resume(struct device *dev,
+ const struct dev_pm_ops *pm);
+
#ifdef CONFIG_PM_SLEEP
static int do_scsi_suspend(struct device *dev, const struct dev_pm_ops *pm)
@@ -77,10 +80,22 @@ static int scsi_dev_type_resume(struct device *dev,
scsi_device_resume(to_scsi_device(dev));
dev_dbg(dev, "scsi resume: %d\n", err);
- if (err == 0) {
+ if (err == 0 && (cb != do_scsi_runtime_resume)) {
pm_runtime_disable(dev);
- pm_runtime_set_active(dev);
+ err = pm_runtime_set_active(dev);
pm_runtime_enable(dev);
+
+ if (!err && scsi_is_sdev_device(dev)) {
+ struct scsi_device *sdev = to_scsi_device(dev);
+
+ /*
+ * If scsi device runtime PM is managed by block layer
+ * then we should update request queue's runtime status
+ * as well.
+ */
+ if (sdev->request_queue->dev)
+ blk_post_runtime_resume(sdev->request_queue, 0);
+ }
}
return err;
@@ -213,12 +228,32 @@ static int scsi_bus_restore(struct device *dev)
#endif /* CONFIG_PM_SLEEP */
+static int do_scsi_runtime_suspend(struct device *dev,
+ const struct dev_pm_ops *pm)
+{
+ return pm && pm->runtime_suspend ? pm->runtime_suspend(dev) : 0;
+}
+
+static int do_scsi_runtime_resume(struct device *dev,
+ const struct dev_pm_ops *pm)
+{
+ return pm && pm->runtime_resume ? pm->runtime_resume(dev) : 0;
+}
+
static int sdev_runtime_suspend(struct device *dev)
{
const struct dev_pm_ops *pm = dev->driver ? dev->driver->pm : NULL;
struct scsi_device *sdev = to_scsi_device(dev);
int err = 0;
+ if (!sdev->request_queue->dev) {
+ err = scsi_dev_type_suspend(dev, do_scsi_runtime_suspend);
+ if (err == -EAGAIN)
+ pm_schedule_suspend(dev, jiffies_to_msecs(
+ round_jiffies_up_relative(HZ/10)));
+ return err;
+ }
+
err = blk_pre_runtime_suspend(sdev->request_queue);
if (err)
return err;
@@ -248,6 +283,9 @@ static int sdev_runtime_resume(struct device *dev)
const struct dev_pm_ops *pm = dev->driver ? dev->driver->pm : NULL;
int err = 0;
+ if (!sdev->request_queue->dev)
+ return scsi_dev_type_resume(dev, do_scsi_runtime_resume);
+
blk_pre_runtime_resume(sdev->request_queue);
if (pm && pm->runtime_resume)
err = pm->runtime_resume(dev);
diff --git a/drivers/scsi/scsi_scan.c b/drivers/scsi/scsi_scan.c
index 5e34c7ed483c..fd8ebb9e66e6 100644
--- a/drivers/scsi/scsi_scan.c
+++ b/drivers/scsi/scsi_scan.c
@@ -822,13 +822,8 @@ static int scsi_add_lun(struct scsi_device *sdev, unsigned char *inq_result,
* well-known logical units. Force well-known type
* to enumerate them correctly.
*/
- if (scsi_is_wlun(sdev->lun) && sdev->type != TYPE_WLUN) {
- sdev_printk(KERN_WARNING, sdev,
- "%s: correcting incorrect peripheral device type 0x%x for W-LUN 0x%16xhN\n",
- __func__, sdev->type, (unsigned int)sdev->lun);
+ if (scsi_is_wlun(sdev->lun) && sdev->type != TYPE_WLUN)
sdev->type = TYPE_WLUN;
- }
-
}
if (sdev->type == TYPE_RBC || sdev->type == TYPE_ROM) {
@@ -973,6 +968,10 @@ static int scsi_add_lun(struct scsi_device *sdev, unsigned char *inq_result,
transport_configure_device(&sdev->sdev_gendev);
+ /* The LLD can override auto suspend tunables in ->slave_configure() */
+ sdev->use_rpm_auto = 0;
+ sdev->autosuspend_delay = SCSI_DEFAULT_AUTOSUSPEND_DELAY;
+
if (sdev->host->hostt->slave_configure) {
ret = sdev->host->hostt->slave_configure(sdev);
if (ret) {
diff --git a/drivers/scsi/scsi_sysfs.c b/drivers/scsi/scsi_sysfs.c
index b89af3841e44..1290c542f6d6 100644
--- a/drivers/scsi/scsi_sysfs.c
+++ b/drivers/scsi/scsi_sysfs.c
@@ -1065,7 +1065,8 @@ int scsi_sysfs_add_sdev(struct scsi_device *sdev)
device_enable_async_suspend(&sdev->sdev_gendev);
scsi_autopm_get_target(starget);
pm_runtime_set_active(&sdev->sdev_gendev);
- pm_runtime_forbid(&sdev->sdev_gendev);
+ if (!sdev->use_rpm_auto)
+ pm_runtime_forbid(&sdev->sdev_gendev);
pm_runtime_enable(&sdev->sdev_gendev);
scsi_autopm_put_target(starget);
diff --git a/drivers/scsi/sd.c b/drivers/scsi/sd.c
index 9176fb1b1615..71aa6a646a28 100644
--- a/drivers/scsi/sd.c
+++ b/drivers/scsi/sd.c
@@ -607,6 +607,31 @@ static void scsi_disk_put(struct scsi_disk *sdkp)
mutex_unlock(&sd_ref_mutex);
}
+struct gendisk *scsi_gendisk_get_from_dev(struct device *dev)
+{
+ struct scsi_disk *sdkp;
+
+ mutex_lock(&sd_ref_mutex);
+ sdkp = dev_get_drvdata(dev);
+ if (sdkp)
+ sdkp = scsi_disk_get(sdkp->disk);
+ mutex_unlock(&sd_ref_mutex);
+ return !sdkp ? NULL : sdkp->disk;
+}
+EXPORT_SYMBOL(scsi_gendisk_get_from_dev);
+
+void scsi_gendisk_put(struct device *dev)
+{
+ struct scsi_disk *sdkp = dev_get_drvdata(dev);
+ struct scsi_device *sdev = sdkp->device;
+
+ mutex_lock(&sd_ref_mutex);
+ put_device(&sdkp->dev);
+ scsi_device_put(sdev);
+ mutex_unlock(&sd_ref_mutex);
+}
+EXPORT_SYMBOL(scsi_gendisk_put);
+
static unsigned char sd_setup_protect_cmnd(struct scsi_cmnd *scmd,
unsigned int dix, unsigned int dif)
{
@@ -1407,17 +1432,17 @@ static int media_not_present(struct scsi_disk *sdkp,
**/
static unsigned int sd_check_events(struct gendisk *disk, unsigned int clearing)
{
- struct scsi_disk *sdkp = scsi_disk_get(disk);
- struct scsi_device *sdp;
+ struct scsi_disk *sdkp = scsi_disk(disk);
+ struct scsi_device *sdp = sdkp->device;
struct scsi_sense_hdr *sshdr = NULL;
int retval;
- if (!sdkp)
- return 0;
-
- sdp = sdkp->device;
SCSI_LOG_HLQUEUE(3, sd_printk(KERN_INFO, sdkp, "sd_check_events\n"));
+ /* Simply return for embedded storage media such as UFS */
+ if (!sdp->removable)
+ goto out;
+
/*
* If the device is offline, don't send any commands - just pretend as
* if the command failed. If the device ever comes back online, we
@@ -1472,7 +1497,6 @@ out:
kfree(sshdr);
retval = sdp->changed ? DISK_EVENT_MEDIA_CHANGE : 0;
sdp->changed = 0;
- scsi_disk_put(sdkp);
return retval;
}
@@ -2357,11 +2381,6 @@ got_data:
sizeof(cap_str_10));
if (sdkp->first_scan || old_capacity != sdkp->capacity) {
- sd_printk(KERN_NOTICE, sdkp,
- "%llu %d-byte logical blocks: (%s/%s)\n",
- (unsigned long long)sdkp->capacity,
- sector_size, cap_str_10, cap_str_2);
-
if (sdkp->physical_block_size != sector_size)
sd_printk(KERN_NOTICE, sdkp,
"%u-byte physical blocks\n",
@@ -2398,7 +2417,6 @@ sd_read_write_protect_flag(struct scsi_disk *sdkp, unsigned char *buffer)
int res;
struct scsi_device *sdp = sdkp->device;
struct scsi_mode_data data;
- int old_wp = sdkp->write_prot;
set_disk_ro(sdkp->disk, 0);
if (sdp->skip_ms_page_3f) {
@@ -2439,13 +2457,6 @@ sd_read_write_protect_flag(struct scsi_disk *sdkp, unsigned char *buffer)
} else {
sdkp->write_prot = ((data.device_specific & 0x80) != 0);
set_disk_ro(sdkp->disk, sdkp->write_prot);
- if (sdkp->first_scan || old_wp != sdkp->write_prot) {
- sd_printk(KERN_NOTICE, sdkp, "Write Protect is %s\n",
- sdkp->write_prot ? "on" : "off");
- sd_printk(KERN_DEBUG, sdkp,
- "Mode Sense: %02x %02x %02x %02x\n",
- buffer[0], buffer[1], buffer[2], buffer[3]);
- }
}
}
@@ -2458,16 +2469,13 @@ sd_read_cache_type(struct scsi_disk *sdkp, unsigned char *buffer)
{
int len = 0, res;
struct scsi_device *sdp = sdkp->device;
+ struct Scsi_Host *host = sdp->host;
int dbd;
int modepage;
int first_len;
struct scsi_mode_data data;
struct scsi_sense_hdr sshdr;
- int old_wce = sdkp->WCE;
- int old_rcd = sdkp->RCD;
- int old_dpofua = sdkp->DPOFUA;
-
if (sdkp->cache_override)
return;
@@ -2489,7 +2497,10 @@ sd_read_cache_type(struct scsi_disk *sdkp, unsigned char *buffer)
dbd = 8;
} else {
modepage = 8;
- dbd = 0;
+ if (host->set_dbd_for_caching)
+ dbd = 8;
+ else
+ dbd = 0;
}
/* cautiously ask */
@@ -2590,15 +2601,6 @@ sd_read_cache_type(struct scsi_disk *sdkp, unsigned char *buffer)
if (sdkp->WCE && sdkp->write_prot)
sdkp->WCE = 0;
- if (sdkp->first_scan || old_wce != sdkp->WCE ||
- old_rcd != sdkp->RCD || old_dpofua != sdkp->DPOFUA)
- sd_printk(KERN_NOTICE, sdkp,
- "Write cache: %s, read cache: %s, %s\n",
- sdkp->WCE ? "enabled" : "disabled",
- sdkp->RCD ? "disabled" : "enabled",
- sdkp->DPOFUA ? "supports DPO and FUA"
- : "doesn't support DPO or FUA");
-
return;
}
@@ -2912,14 +2914,12 @@ static int sd_revalidate_disk(struct gendisk *disk)
if (sdkp->opt_xfer_blocks &&
sdkp->opt_xfer_blocks <= dev_max &&
sdkp->opt_xfer_blocks <= SD_DEF_XFER_BLOCKS &&
- logical_to_bytes(sdp, sdkp->opt_xfer_blocks) >= PAGE_CACHE_SIZE) {
- q->limits.io_opt = logical_to_bytes(sdp, sdkp->opt_xfer_blocks);
- rw_max = logical_to_sectors(sdp, sdkp->opt_xfer_blocks);
- } else {
- q->limits.io_opt = 0;
+ sdkp->opt_xfer_blocks * sdp->sector_size >= PAGE_CACHE_SIZE)
+ rw_max = q->limits.io_opt =
+ sdkp->opt_xfer_blocks * sdp->sector_size;
+ else
rw_max = min_not_zero(logical_to_sectors(sdp, dev_max),
(sector_t)BLK_DEF_MAX_SECTORS);
- }
/* Do not exceed controller limit */
rw_max = min(rw_max, queue_max_hw_sectors(q));
@@ -3055,14 +3055,15 @@ static void sd_probe_async(void *data, async_cookie_t cookie)
}
blk_pm_runtime_init(sdp->request_queue, dev);
+ if (sdp->autosuspend_delay >= 0)
+ pm_runtime_set_autosuspend_delay(dev, sdp->autosuspend_delay);
+
add_disk(gd);
if (sdkp->capacity)
sd_dif_config_host(sdkp);
sd_revalidate_disk(gd);
- sd_printk(KERN_NOTICE, sdkp, "Attached SCSI %sdisk\n",
- sdp->removable ? "removable " : "");
scsi_autopm_put_device(sdp);
put_device(&sdkp->dev);
}
@@ -3319,7 +3320,6 @@ static int sd_suspend_common(struct device *dev, bool ignore_stop_errors)
return 0;
if (sdkp->WCE && sdkp->media_present) {
- sd_printk(KERN_NOTICE, sdkp, "Synchronizing SCSI cache\n");
ret = sd_sync_cache(sdkp);
if (ret) {
/* ignore OFFLINE device */
@@ -3330,7 +3330,7 @@ static int sd_suspend_common(struct device *dev, bool ignore_stop_errors)
}
if (sdkp->device->manage_start_stop) {
- sd_printk(KERN_NOTICE, sdkp, "Stopping disk\n");
+ sd_printk(KERN_DEBUG, sdkp, "Stopping disk\n");
/* an error is not worth aborting a system sleep */
ret = sd_start_stop_device(sdkp, 0);
if (ignore_stop_errors)
@@ -3361,7 +3361,7 @@ static int sd_resume(struct device *dev)
if (!sdkp->device->manage_start_stop)
return 0;
- sd_printk(KERN_NOTICE, sdkp, "Starting disk\n");
+ sd_printk(KERN_DEBUG, sdkp, "Starting disk\n");
return sd_start_stop_device(sdkp, 1);
}
diff --git a/drivers/scsi/sd.h b/drivers/scsi/sd.h
index 765a6f1ac1b7..654630bb7d0e 100644
--- a/drivers/scsi/sd.h
+++ b/drivers/scsi/sd.h
@@ -151,11 +151,6 @@ static inline sector_t logical_to_sectors(struct scsi_device *sdev, sector_t blo
return blocks << (ilog2(sdev->sector_size) - 9);
}
-static inline unsigned int logical_to_bytes(struct scsi_device *sdev, sector_t blocks)
-{
- return blocks * sdev->sector_size;
-}
-
/*
* A DIF-capable target device can be formatted with different
* protection schemes. Currently 0 through 3 are defined:
diff --git a/drivers/scsi/sg.c b/drivers/scsi/sg.c
index f5c66caad56b..237628742ecf 100644
--- a/drivers/scsi/sg.c
+++ b/drivers/scsi/sg.c
@@ -545,7 +545,7 @@ sg_read(struct file *filp, char __user *buf, size_t count, loff_t * ppos)
old_hdr->result = EIO;
break;
case DID_ERROR:
- old_hdr->result = (srp->sense_b[0] == 0 &&
+ old_hdr->result = (srp->sense_b[0] == 0 &&
hp->masked_status == GOOD) ? 0 : EIO;
break;
default:
@@ -942,8 +942,10 @@ sg_ioctl(struct file *filp, unsigned int cmd_in, unsigned long arg)
return -ENXIO;
if (!access_ok(VERIFY_WRITE, p, SZ_SG_IO_HDR))
return -EFAULT;
+ mutex_lock(&sfp->parentdp->open_rel_lock);
result = sg_new_write(sfp, filp, p, SZ_SG_IO_HDR,
1, read_only, 1, &srp);
+ mutex_unlock(&sfp->parentdp->open_rel_lock);
if (result < 0)
return result;
result = wait_event_interruptible(sfp->read_wait,
@@ -1043,8 +1045,8 @@ sg_ioctl(struct file *filp, unsigned int cmd_in, unsigned long arg)
result = get_user(val, ip);
if (result)
return result;
- if (val < 0)
- return -EINVAL;
+ if (val < 0)
+ return -EINVAL;
val = min_t(int, val,
max_sectors_bytes(sdp->device->request_queue));
mutex_lock(&sfp->f_mutex);
@@ -1054,9 +1056,10 @@ sg_ioctl(struct file *filp, unsigned int cmd_in, unsigned long arg)
mutex_unlock(&sfp->f_mutex);
return -EBUSY;
}
-
+ mutex_lock(&sfp->parentdp->open_rel_lock);
sg_remove_scat(sfp, &sfp->reserve);
sg_build_reserve(sfp, val);
+ mutex_unlock(&sfp->parentdp->open_rel_lock);
}
mutex_unlock(&sfp->f_mutex);
return 0;
@@ -1183,14 +1186,14 @@ static long sg_compat_ioctl(struct file *filp, unsigned int cmd_in, unsigned lon
return -ENXIO;
sdev = sdp->device;
- if (sdev->host->hostt->compat_ioctl) {
+ if (sdev->host->hostt->compat_ioctl) {
int ret;
ret = sdev->host->hostt->compat_ioctl(sdev, cmd_in, (void __user *)arg);
return ret;
}
-
+
return -ENOIOCTLCMD;
}
#endif
@@ -1577,9 +1580,6 @@ sg_add_device(struct device *cl_dev, struct class_interface *cl_intf)
} else
pr_warn("%s: sg_sys Invalid\n", __func__);
- sdev_printk(KERN_NOTICE, scsidp, "Attached scsi generic sg%d "
- "type %d\n", sdp->index, scsidp->type);
-
dev_set_drvdata(cl_dev, sdp);
return 0;
@@ -1684,7 +1684,7 @@ init_sg(void)
else
def_reserved_size = sg_big_buff;
- rc = register_chrdev_region(MKDEV(SCSI_GENERIC_MAJOR, 0),
+ rc = register_chrdev_region(MKDEV(SCSI_GENERIC_MAJOR, 0),
SG_MAX_DEVS, "sg");
if (rc)
return rc;
@@ -2346,7 +2346,7 @@ static const struct file_operations adio_fops = {
};
static int sg_proc_single_open_dressz(struct inode *inode, struct file *file);
-static ssize_t sg_proc_write_dressz(struct file *filp,
+static ssize_t sg_proc_write_dressz(struct file *filp,
const char __user *buffer, size_t count, loff_t *off);
static const struct file_operations dressz_fops = {
.owner = THIS_MODULE,
@@ -2486,7 +2486,7 @@ static int sg_proc_single_open_adio(struct inode *inode, struct file *file)
return single_open(file, sg_proc_seq_show_int, &sg_allow_dio);
}
-static ssize_t
+static ssize_t
sg_proc_write_adio(struct file *filp, const char __user *buffer,
size_t count, loff_t *off)
{
@@ -2507,7 +2507,7 @@ static int sg_proc_single_open_dressz(struct inode *inode, struct file *file)
return single_open(file, sg_proc_seq_show_int, &sg_big_buff);
}
-static ssize_t
+static ssize_t
sg_proc_write_dressz(struct file *filp, const char __user *buffer,
size_t count, loff_t *off)
{
@@ -2682,6 +2682,9 @@ static void sg_proc_debug_helper(struct seq_file *s, Sg_device * sdp)
seq_puts(s, srp->done ?
((1 == srp->done) ? "rcv:" : "fin:")
: "act:");
+ seq_printf(s, srp->done ?
+ ((1 == srp->done) ? "rcv:" : "fin:")
+ : "act:");
seq_printf(s, " id=%d blen=%d",
srp->header.pack_id, blen);
if (srp->done)
diff --git a/drivers/scsi/ufs/Kconfig b/drivers/scsi/ufs/Kconfig
index 5f4530744e0a..fb2f2159c0e1 100644
--- a/drivers/scsi/ufs/Kconfig
+++ b/drivers/scsi/ufs/Kconfig
@@ -83,3 +83,38 @@ config SCSI_UFS_QCOM
Select this if you have UFS controller on QCOM chipset.
If unsure, say N.
+
+config SCSI_UFS_QCOM_ICE
+ bool "QCOM specific hooks to Inline Crypto Engine for UFS driver"
+ depends on SCSI_UFS_QCOM && CRYPTO_DEV_QCOM_ICE
+ help
+ This selects the QCOM specific additions to support Inline Crypto
+ Engine (ICE).
+ ICE accelerates the crypto operations and maintains the high UFS
+ performance.
+
+ Select this if you have ICE supported for UFS on QCOM chipset.
+ If unsure, say N.
+
+
+config SCSI_UFS_TEST
+ tristate "Universal Flash Storage host controller driver unit-tests"
+ depends on SCSI_UFSHCD && IOSCHED_TEST
+ default m
+ ---help---
+ This adds UFS Host controller unit-test framework.
+ The UFS unit-tests register as a block device test utility to
+ the test-iosched and will be initiated when the test-iosched will
+ be chosen to be the active I/O scheduler.
+
+config SCSI_UFSHCD_CMD_LOGGING
+ bool "Universal Flash Storage host controller driver layer command logging support"
+ depends on SCSI_UFSHCD
+ help
+ This selects the UFS host controller driver layer command logging.
+ UFS host controller driver layer command logging records all the
+ command information sent from UFS host controller for debugging
+ purpose.
+
+ Select this if you want above mentioned debug information captured.
+ If unsure, say N.
diff --git a/drivers/scsi/ufs/Makefile b/drivers/scsi/ufs/Makefile
index 8303bcce7a23..ce98c095245c 100644
--- a/drivers/scsi/ufs/Makefile
+++ b/drivers/scsi/ufs/Makefile
@@ -1,5 +1,8 @@
# UFSHCD makefile
obj-$(CONFIG_SCSI_UFS_QCOM) += ufs-qcom.o
-obj-$(CONFIG_SCSI_UFSHCD) += ufshcd.o
+obj-$(CONFIG_SCSI_UFS_QCOM_ICE) += ufs-qcom-ice.o
+obj-$(CONFIG_SCSI_UFSHCD) += ufshcd.o ufs_quirks.o
obj-$(CONFIG_SCSI_UFSHCD_PCI) += ufshcd-pci.o
obj-$(CONFIG_SCSI_UFSHCD_PLATFORM) += ufshcd-pltfrm.o
+obj-$(CONFIG_SCSI_UFS_TEST) += ufs_test.o
+obj-$(CONFIG_DEBUG_FS) += ufs-debugfs.o ufs-qcom-debugfs.o
diff --git a/drivers/scsi/ufs/ufs-debugfs.c b/drivers/scsi/ufs/ufs-debugfs.c
new file mode 100644
index 000000000000..51eef0d5e95c
--- /dev/null
+++ b/drivers/scsi/ufs/ufs-debugfs.c
@@ -0,0 +1,1671 @@
+/* Copyright (c) 2013-2017, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * UFS debugfs - add debugfs interface to the ufshcd.
+ * This is currently used for statistics collection and exporting from the
+ * UFS driver.
+ * This infrastructure can be used for debugging or direct tweaking
+ * of the driver from userspace.
+ *
+ */
+
+#include <linux/random.h>
+#include "ufs-debugfs.h"
+#include "unipro.h"
+#include "ufshci.h"
+
+enum field_width {
+ BYTE = 1,
+ WORD = 2,
+};
+
+struct desc_field_offset {
+ char *name;
+ int offset;
+ enum field_width width_byte;
+};
+
+#define UFS_ERR_STATS_PRINT(file, error_index, string, error_seen) \
+ do { \
+ if (err_stats[error_index]) { \
+ seq_printf(file, string, \
+ err_stats[error_index]); \
+ error_seen = true; \
+ } \
+ } while (0)
+
+#define DOORBELL_CLR_TOUT_US (1000 * 1000) /* 1 sec */
+
+#ifdef CONFIG_UFS_FAULT_INJECTION
+
+#define INJECT_COMMAND_HANG (0x0)
+
+static DECLARE_FAULT_ATTR(fail_default_attr);
+static char *fail_request;
+module_param(fail_request, charp, 0);
+
+/**
+ * struct ufsdbg_err_scenario - error scenario use case
+ * @name: the name of the error scenario
+ * @err_code_arr: error codes array for this error scenario
+ * @num_err_codes: number of error codes in err_code_arr
+ */
+struct ufsdbg_err_scenario {
+ const char *name;
+ const int *err_code_arr;
+ u32 num_err_codes;
+ u32 num_err_injected;
+};
+
+/*
+ * the following static arrays are aggregation of possible errors
+ * that might occur during the relevant error scenario
+ */
+static const int err_inject_intr_err_codes[] = {
+ CONTROLLER_FATAL_ERROR,
+ SYSTEM_BUS_FATAL_ERROR,
+ INJECT_COMMAND_HANG,
+};
+
+static const int err_inject_pwr_change_err_codes[] = {
+ -EIO,
+ -ETIMEDOUT,
+ -1,
+ PWR_REMOTE,
+ PWR_BUSY,
+ PWR_ERROR_CAP,
+ PWR_FATAL_ERROR,
+};
+
+static const int err_inject_uic_err_codes[] = {
+ -EIO,
+ -ETIMEDOUT,
+};
+
+static const int err_inject_dme_attr_err_codes[] = {
+ /* an invalid DME attribute for host and device */
+ 0x1600,
+};
+
+static const int err_inject_query_err_codes[] = {
+ /* an invalid idn for flag/attribute/descriptor query request */
+ 0xFF,
+};
+
+static struct ufsdbg_err_scenario err_scen_arr[] = {
+ {
+ "ERR_INJECT_INTR",
+ err_inject_intr_err_codes,
+ ARRAY_SIZE(err_inject_intr_err_codes),
+ },
+ {
+ "ERR_INJECT_PWR_CHANGE",
+ err_inject_pwr_change_err_codes,
+ ARRAY_SIZE(err_inject_pwr_change_err_codes),
+ },
+ {
+ "ERR_INJECT_UIC",
+ err_inject_uic_err_codes,
+ ARRAY_SIZE(err_inject_uic_err_codes),
+ },
+ {
+ "ERR_INJECT_DME_ATTR",
+ err_inject_dme_attr_err_codes,
+ ARRAY_SIZE(err_inject_dme_attr_err_codes),
+ },
+ {
+ "ERR_INJECT_QUERY",
+ err_inject_query_err_codes,
+ ARRAY_SIZE(err_inject_query_err_codes),
+ },
+};
+
+static bool inject_fatal_err_tr(struct ufs_hba *hba, u8 ocs_err)
+{
+ int tag;
+
+ tag = find_first_bit(&hba->outstanding_reqs, hba->nutrs);
+ if (tag == hba->nutrs)
+ return 0;
+
+ ufshcd_writel(hba, ~(1 << tag), REG_UTP_TRANSFER_REQ_LIST_CLEAR);
+ (&hba->lrb[tag])->utr_descriptor_ptr->header.dword_2 =
+ cpu_to_be32(ocs_err);
+
+ /* fatal error injected */
+ return 1;
+}
+
+static bool inject_fatal_err_tm(struct ufs_hba *hba, u8 ocs_err)
+{
+ int tag;
+
+ tag = find_first_bit(&hba->outstanding_tasks, hba->nutmrs);
+ if (tag == hba->nutmrs)
+ return 0;
+
+ ufshcd_writel(hba, ~(1 << tag), REG_UTP_TASK_REQ_LIST_CLEAR);
+ (&hba->utmrdl_base_addr[tag])->header.dword_2 =
+ cpu_to_be32(ocs_err);
+
+ /* fatal error injected */
+ return 1;
+}
+
+static bool inject_cmd_hang_tr(struct ufs_hba *hba)
+{
+ int tag;
+
+ tag = find_first_bit(&hba->outstanding_reqs, hba->nutrs);
+ if (tag == hba->nutrs)
+ return 0;
+
+ __clear_bit(tag, &hba->outstanding_reqs);
+ hba->lrb[tag].cmd = NULL;
+ __clear_bit(tag, &hba->lrb_in_use);
+
+ /* command hang injected */
+ return 1;
+}
+
+static int inject_cmd_hang_tm(struct ufs_hba *hba)
+{
+ int tag;
+
+ tag = find_first_bit(&hba->outstanding_tasks, hba->nutmrs);
+ if (tag == hba->nutmrs)
+ return 0;
+
+ __clear_bit(tag, &hba->outstanding_tasks);
+ __clear_bit(tag, &hba->tm_slots_in_use);
+
+ /* command hang injected */
+ return 1;
+}
+
+static void
+ufsdbg_intr_fail_request(struct ufs_hba *hba, u32 *intr_status)
+{
+ u8 ocs_err;
+
+ dev_info(hba->dev, "%s: fault-inject error: 0x%x\n",
+ __func__, *intr_status);
+
+ switch (*intr_status) {
+ case CONTROLLER_FATAL_ERROR: /* fall through */
+ ocs_err = OCS_FATAL_ERROR;
+ goto set_ocs;
+ case SYSTEM_BUS_FATAL_ERROR:
+ ocs_err = OCS_INVALID_CMD_TABLE_ATTR;
+set_ocs:
+ if (!inject_fatal_err_tr(hba, ocs_err))
+ if (!inject_fatal_err_tm(hba, ocs_err))
+ goto out;
+ break;
+ case INJECT_COMMAND_HANG:
+ if (!inject_cmd_hang_tr(hba))
+ inject_cmd_hang_tm(hba);
+ break;
+ default:
+ BUG();
+ /* some configurations ignore panics caused by BUG() */
+ break;
+ }
+out:
+ return;
+}
+
+static bool
+ufsdbg_find_err_code(enum ufsdbg_err_inject_scenario usecase,
+ int *ret, u32 *index)
+{
+ struct ufsdbg_err_scenario *err_scen = &err_scen_arr[usecase];
+ u32 err_code_index;
+
+ if (!err_scen->num_err_codes)
+ return false;
+
+ err_code_index = prandom_u32() % err_scen->num_err_codes;
+
+ *index = err_code_index;
+ *ret = err_scen->err_code_arr[err_code_index];
+ return true;
+}
+
+void ufsdbg_error_inject_dispatcher(struct ufs_hba *hba,
+ enum ufsdbg_err_inject_scenario usecase,
+ int success_value, int *ret_value)
+{
+ int opt_ret = 0;
+ u32 err_code_index = 0;
+
+ /* sanity check and verify error scenario bit */
+ if ((unlikely(!hba || !ret_value)) ||
+ (likely(!(hba->debugfs_files.err_inj_scenario_mask &
+ BIT(usecase)))))
+ goto out;
+
+ if (usecase < 0 || usecase >= ERR_INJECT_MAX_ERR_SCENARIOS) {
+ dev_err(hba->dev, "%s: invalid usecase value (%d)\n",
+ __func__, usecase);
+ goto out;
+ }
+
+ if (!ufsdbg_find_err_code(usecase, &opt_ret, &err_code_index))
+ goto out;
+
+ if (!should_fail(&hba->debugfs_files.fail_attr, 1))
+ goto out;
+
+ /* if an error already occurred/injected */
+ if (*ret_value != success_value)
+ goto out;
+
+ switch (usecase) {
+ case ERR_INJECT_INTR:
+ /* an error already occurred */
+ if (*ret_value & UFSHCD_ERROR_MASK)
+ goto out;
+
+ ufsdbg_intr_fail_request(hba, (u32 *)&opt_ret);
+ /* fall through */
+ case ERR_INJECT_PWR_CHANGE:
+ case ERR_INJECT_UIC:
+ case ERR_INJECT_DME_ATTR:
+ case ERR_INJECT_QUERY:
+ goto should_fail;
+ default:
+ dev_err(hba->dev, "%s: unsupported error scenario\n",
+ __func__);
+ goto out;
+ }
+
+should_fail:
+ *ret_value = opt_ret;
+ err_scen_arr[usecase].num_err_injected++;
+ pr_debug("%s: error code index [%d], error code %d (0x%x) is injected for scenario \"%s\"\n",
+ __func__, err_code_index, *ret_value, *ret_value,
+ err_scen_arr[usecase].name);
+out:
+ /**
+ * here it's guaranteed that ret_value has the correct value,
+ * whether it was assigned with a new value, or kept its own
+ * original incoming value
+ */
+ return;
+}
+
+static int ufsdbg_err_inj_scenario_read(struct seq_file *file, void *data)
+{
+ struct ufs_hba *hba = (struct ufs_hba *)file->private;
+ enum ufsdbg_err_inject_scenario err_case;
+
+ if (!hba)
+ return -EINVAL;
+
+ seq_printf(file, "%-40s %-17s %-15s\n",
+ "Error Scenario:", "Bit[#]", "STATUS");
+
+ for (err_case = ERR_INJECT_INTR;
+ err_case < ERR_INJECT_MAX_ERR_SCENARIOS; err_case++) {
+ seq_printf(file, "%-40s 0x%-15lx %-15s\n",
+ err_scen_arr[err_case].name,
+ UFS_BIT(err_case),
+ hba->debugfs_files.err_inj_scenario_mask &
+ UFS_BIT(err_case) ? "ENABLE" : "DISABLE");
+ }
+
+ seq_printf(file, "bitwise of error scenario is 0x%x\n\n",
+ hba->debugfs_files.err_inj_scenario_mask);
+
+ seq_puts(file, "usage example:\n");
+ seq_puts(file, "echo 0x4 > /sys/kernel/debug/.../err_inj_scenario\n");
+ seq_puts(file, "in order to enable ERR_INJECT_INTR\n");
+
+ return 0;
+}
+
+static
+int ufsdbg_err_inj_scenario_open(struct inode *inode, struct file *file)
+{
+ return single_open(file,
+ ufsdbg_err_inj_scenario_read, inode->i_private);
+}
+
+static ssize_t ufsdbg_err_inj_scenario_write(struct file *file,
+ const char __user *ubuf, size_t cnt,
+ loff_t *ppos)
+{
+ struct ufs_hba *hba = file->f_mapping->host->i_private;
+ int ret;
+ int err_scen = 0;
+
+ if (!hba)
+ return -EINVAL;
+
+ ret = kstrtoint_from_user(ubuf, cnt, 0, &err_scen);
+ if (ret) {
+ dev_err(hba->dev, "%s: Invalid argument\n", __func__);
+ return ret;
+ }
+
+ hba->debugfs_files.err_inj_scenario_mask = err_scen;
+
+ return cnt;
+}
+
+static const struct file_operations ufsdbg_err_inj_scenario_ops = {
+ .open = ufsdbg_err_inj_scenario_open,
+ .read = seq_read,
+ .write = ufsdbg_err_inj_scenario_write,
+};
+
+static int ufsdbg_err_inj_stats_read(struct seq_file *file, void *data)
+{
+ enum ufsdbg_err_inject_scenario err;
+
+ seq_printf(file, "%-40s %-20s\n",
+ "Error Scenario:", "Num of Errors Injected");
+
+ for (err = 0; err < ERR_INJECT_MAX_ERR_SCENARIOS; err++) {
+ seq_printf(file, "%-40s %-20d\n",
+ err_scen_arr[err].name,
+ err_scen_arr[err].num_err_injected);
+ }
+
+ return 0;
+}
+
+static
+int ufsdbg_err_inj_stats_open(struct inode *inode, struct file *file)
+{
+ return single_open(file,
+ ufsdbg_err_inj_stats_read, inode->i_private);
+}
+
+static ssize_t ufsdbg_err_inj_stats_write(struct file *file,
+ const char __user *ubuf, size_t cnt,
+ loff_t *ppos)
+{
+ enum ufsdbg_err_inject_scenario err;
+
+ for (err = 0; err < ERR_INJECT_MAX_ERR_SCENARIOS; err++)
+ err_scen_arr[err].num_err_injected = 0;
+
+ return cnt;
+}
+
+static const struct file_operations ufsdbg_err_inj_stats_ops = {
+ .open = ufsdbg_err_inj_stats_open,
+ .read = seq_read,
+ .write = ufsdbg_err_inj_stats_write,
+};
+
+static void ufsdbg_setup_fault_injection(struct ufs_hba *hba)
+{
+ struct dentry *fault_dir;
+
+ hba->debugfs_files.fail_attr = fail_default_attr;
+
+ if (fail_request)
+ setup_fault_attr(&hba->debugfs_files.fail_attr, fail_request);
+
+ /* suppress dump stack every time failure is injected */
+ hba->debugfs_files.fail_attr.verbose = 0;
+
+ fault_dir = fault_create_debugfs_attr("inject_fault",
+ hba->debugfs_files.debugfs_root,
+ &hba->debugfs_files.fail_attr);
+
+ if (IS_ERR(fault_dir)) {
+ dev_err(hba->dev, "%s: failed to create debugfs entry for fault injection\n",
+ __func__);
+ return;
+ }
+
+ hba->debugfs_files.err_inj_scenario =
+ debugfs_create_file("err_inj_scenario",
+ S_IRUGO | S_IWUGO,
+ hba->debugfs_files.debugfs_root, hba,
+ &ufsdbg_err_inj_scenario_ops);
+
+ if (!hba->debugfs_files.err_inj_scenario) {
+ dev_err(hba->dev,
+ "%s: Could not create debugfs entry for err_scenario",
+ __func__);
+ goto fail_err_inj_scenario;
+ }
+
+ hba->debugfs_files.err_inj_stats =
+ debugfs_create_file("err_inj_stats", S_IRUSR | S_IWUSR,
+ hba->debugfs_files.debugfs_root, hba,
+ &ufsdbg_err_inj_stats_ops);
+ if (!hba->debugfs_files.err_inj_stats) {
+ dev_err(hba->dev,
+ "%s: failed create err_inj_stats debugfs entry\n",
+ __func__);
+ goto fail_err_inj_stats;
+ }
+
+ return;
+
+fail_err_inj_stats:
+ debugfs_remove(hba->debugfs_files.err_inj_scenario);
+fail_err_inj_scenario:
+ debugfs_remove_recursive(fault_dir);
+}
+#else
+static void ufsdbg_setup_fault_injection(struct ufs_hba *hba)
+{
+}
+#endif /* CONFIG_UFS_FAULT_INJECTION */
+
+#define BUFF_LINE_SIZE 16 /* Must be a multiplication of sizeof(u32) */
+#define TAB_CHARS 8
+
+static int ufsdbg_tag_stats_show(struct seq_file *file, void *data)
+{
+ struct ufs_hba *hba = (struct ufs_hba *)file->private;
+ struct ufs_stats *ufs_stats;
+ int i, j;
+ int max_depth;
+ bool is_tag_empty = true;
+ unsigned long flags;
+ char *sep = " | * | ";
+
+ if (!hba)
+ goto exit;
+
+ ufs_stats = &hba->ufs_stats;
+
+ if (!ufs_stats->enabled) {
+ pr_debug("%s: ufs statistics are disabled\n", __func__);
+ seq_puts(file, "ufs statistics are disabled");
+ goto exit;
+ }
+
+ max_depth = hba->nutrs;
+
+ spin_lock_irqsave(hba->host->host_lock, flags);
+ /* Header */
+ seq_printf(file, " Tag Stat\t\t%s Number of pending reqs upon issue (Q fullness)\n",
+ sep);
+ for (i = 0; i < TAB_CHARS * (TS_NUM_STATS + 4); i++) {
+ seq_puts(file, "-");
+ if (i == (TAB_CHARS * 3 - 1))
+ seq_puts(file, sep);
+ }
+ seq_printf(file,
+ "\n #\tnum uses\t%s\t #\tAll\tRead\tWrite\tUrg.R\tUrg.W\tFlush\n",
+ sep);
+
+ /* values */
+ for (i = 0; i < max_depth; i++) {
+ if (ufs_stats->tag_stats[i][TS_TAG] <= 0 &&
+ ufs_stats->tag_stats[i][TS_READ] <= 0 &&
+ ufs_stats->tag_stats[i][TS_WRITE] <= 0 &&
+ ufs_stats->tag_stats[i][TS_URGENT_READ] <= 0 &&
+ ufs_stats->tag_stats[i][TS_URGENT_WRITE] <= 0 &&
+ ufs_stats->tag_stats[i][TS_FLUSH] <= 0)
+ continue;
+
+ is_tag_empty = false;
+ seq_printf(file, " %d\t ", i);
+ for (j = 0; j < TS_NUM_STATS; j++) {
+ seq_printf(file, "%llu\t", ufs_stats->tag_stats[i][j]);
+ if (j != 0)
+ continue;
+ seq_printf(file, "\t%s\t %d\t%llu\t", sep, i,
+ ufs_stats->tag_stats[i][TS_READ] +
+ ufs_stats->tag_stats[i][TS_WRITE] +
+ ufs_stats->tag_stats[i][TS_URGENT_READ] +
+ ufs_stats->tag_stats[i][TS_URGENT_WRITE] +
+ ufs_stats->tag_stats[i][TS_FLUSH]);
+ }
+ seq_puts(file, "\n");
+ }
+ spin_unlock_irqrestore(hba->host->host_lock, flags);
+
+ if (is_tag_empty)
+ pr_debug("%s: All tags statistics are empty", __func__);
+
+exit:
+ return 0;
+}
+
+static int ufsdbg_tag_stats_open(struct inode *inode, struct file *file)
+{
+ return single_open(file, ufsdbg_tag_stats_show, inode->i_private);
+}
+
+static ssize_t ufsdbg_tag_stats_write(struct file *filp,
+ const char __user *ubuf, size_t cnt,
+ loff_t *ppos)
+{
+ struct ufs_hba *hba = filp->f_mapping->host->i_private;
+ struct ufs_stats *ufs_stats;
+ int val = 0;
+ int ret, bit = 0;
+ unsigned long flags;
+
+ ret = kstrtoint_from_user(ubuf, cnt, 0, &val);
+ if (ret) {
+ dev_err(hba->dev, "%s: Invalid argument\n", __func__);
+ return ret;
+ }
+
+ ufs_stats = &hba->ufs_stats;
+ spin_lock_irqsave(hba->host->host_lock, flags);
+
+ if (!val) {
+ ufs_stats->enabled = false;
+ pr_debug("%s: Disabling UFS tag statistics", __func__);
+ } else {
+ ufs_stats->enabled = true;
+ pr_debug("%s: Enabling & Resetting UFS tag statistics",
+ __func__);
+ memset(hba->ufs_stats.tag_stats[0], 0,
+ sizeof(**hba->ufs_stats.tag_stats) *
+ TS_NUM_STATS * hba->nutrs);
+
+ /* initialize current queue depth */
+ ufs_stats->q_depth = 0;
+ for_each_set_bit_from(bit, &hba->outstanding_reqs, hba->nutrs)
+ ufs_stats->q_depth++;
+ pr_debug("%s: Enabled UFS tag statistics", __func__);
+ }
+
+ spin_unlock_irqrestore(hba->host->host_lock, flags);
+ return cnt;
+}
+
+static const struct file_operations ufsdbg_tag_stats_fops = {
+ .open = ufsdbg_tag_stats_open,
+ .read = seq_read,
+ .write = ufsdbg_tag_stats_write,
+};
+
+static int ufsdbg_query_stats_show(struct seq_file *file, void *data)
+{
+ struct ufs_hba *hba = (struct ufs_hba *)file->private;
+ struct ufs_stats *ufs_stats = &hba->ufs_stats;
+ int i, j;
+ static const char *opcode_name[UPIU_QUERY_OPCODE_MAX] = {
+ "QUERY_OPCODE_NOP:",
+ "QUERY_OPCODE_READ_DESC:",
+ "QUERY_OPCODE_WRITE_DESC:",
+ "QUERY_OPCODE_READ_ATTR:",
+ "QUERY_OPCODE_WRITE_ATTR:",
+ "QUERY_OPCODE_READ_FLAG:",
+ "QUERY_OPCODE_SET_FLAG:",
+ "QUERY_OPCODE_CLEAR_FLAG:",
+ "QUERY_OPCODE_TOGGLE_FLAG:",
+ };
+
+ seq_puts(file, "\n");
+ seq_puts(file, "The following table shows how many TIMES each IDN was sent to device for each QUERY OPCODE:\n");
+ seq_puts(file, "\n");
+
+ for (i = 0; i < UPIU_QUERY_OPCODE_MAX; i++) {
+ seq_printf(file, "%-30s", opcode_name[i]);
+
+ for (j = 0; j < MAX_QUERY_IDN; j++) {
+ /*
+ * we would like to print only the non-zero data,
+ * (non-zero number of times that IDN was sent
+ * to the device per opcode). There is no
+ * importance to the "table structure" of the output.
+ */
+ if (ufs_stats->query_stats_arr[i][j])
+ seq_printf(file, "IDN 0x%02X: %d,\t", j,
+ ufs_stats->query_stats_arr[i][j]);
+ }
+ seq_puts(file, "\n");
+ }
+
+ return 0;
+}
+
+static int ufsdbg_query_stats_open(struct inode *inode, struct file *file)
+{
+ return single_open(file, ufsdbg_query_stats_show, inode->i_private);
+}
+
+static ssize_t ufsdbg_query_stats_write(struct file *filp,
+ const char __user *ubuf, size_t cnt,
+ loff_t *ppos)
+{
+ struct ufs_hba *hba = filp->f_mapping->host->i_private;
+ struct ufs_stats *ufs_stats = &hba->ufs_stats;
+ int i, j;
+
+ mutex_lock(&hba->dev_cmd.lock);
+
+ for (i = 0; i < UPIU_QUERY_OPCODE_MAX; i++)
+ for (j = 0; j < MAX_QUERY_IDN; j++)
+ ufs_stats->query_stats_arr[i][j] = 0;
+
+ mutex_unlock(&hba->dev_cmd.lock);
+
+ return cnt;
+}
+
+static const struct file_operations ufsdbg_query_stats_fops = {
+ .open = ufsdbg_query_stats_open,
+ .read = seq_read,
+ .write = ufsdbg_query_stats_write,
+};
+
+static int ufsdbg_err_stats_show(struct seq_file *file, void *data)
+{
+ struct ufs_hba *hba = (struct ufs_hba *)file->private;
+ int *err_stats;
+ unsigned long flags;
+ bool error_seen = false;
+
+ if (!hba)
+ goto exit;
+
+ err_stats = hba->ufs_stats.err_stats;
+
+ spin_lock_irqsave(hba->host->host_lock, flags);
+
+ seq_puts(file, "\n==UFS errors that caused controller reset==\n");
+
+ UFS_ERR_STATS_PRINT(file, UFS_ERR_HIBERN8_EXIT,
+ "controller reset due to hibern8 exit error:\t %d\n",
+ error_seen);
+
+ UFS_ERR_STATS_PRINT(file, UFS_ERR_VOPS_SUSPEND,
+ "controller reset due to vops suspend error:\t\t %d\n",
+ error_seen);
+
+ UFS_ERR_STATS_PRINT(file, UFS_ERR_EH,
+ "controller reset due to error handling:\t\t %d\n",
+ error_seen);
+
+ UFS_ERR_STATS_PRINT(file, UFS_ERR_CLEAR_PEND_XFER_TM,
+ "controller reset due to clear xfer/tm regs:\t\t %d\n",
+ error_seen);
+
+ UFS_ERR_STATS_PRINT(file, UFS_ERR_INT_FATAL_ERRORS,
+ "controller reset due to fatal interrupt:\t %d\n",
+ error_seen);
+
+ UFS_ERR_STATS_PRINT(file, UFS_ERR_INT_UIC_ERROR,
+ "controller reset due to uic interrupt error:\t %d\n",
+ error_seen);
+
+ if (error_seen)
+ error_seen = false;
+ else
+ seq_puts(file,
+ "so far, no errors that caused controller reset\n\n");
+
+ seq_puts(file, "\n\n==UFS other errors==\n");
+
+ UFS_ERR_STATS_PRINT(file, UFS_ERR_HIBERN8_ENTER,
+ "hibern8 enter:\t\t %d\n", error_seen);
+
+ UFS_ERR_STATS_PRINT(file, UFS_ERR_RESUME,
+ "resume error:\t\t %d\n", error_seen);
+
+ UFS_ERR_STATS_PRINT(file, UFS_ERR_SUSPEND,
+ "suspend error:\t\t %d\n", error_seen);
+
+ UFS_ERR_STATS_PRINT(file, UFS_ERR_LINKSTARTUP,
+ "linkstartup error:\t\t %d\n", error_seen);
+
+ UFS_ERR_STATS_PRINT(file, UFS_ERR_POWER_MODE_CHANGE,
+ "power change error:\t %d\n", error_seen);
+
+ UFS_ERR_STATS_PRINT(file, UFS_ERR_TASK_ABORT,
+ "abort callback:\t\t %d\n\n", error_seen);
+
+ if (!error_seen)
+ seq_puts(file,
+ "so far, no other UFS related errors\n\n");
+
+ spin_unlock_irqrestore(hba->host->host_lock, flags);
+exit:
+ return 0;
+}
+
+static int ufsdbg_err_stats_open(struct inode *inode, struct file *file)
+{
+ return single_open(file, ufsdbg_err_stats_show, inode->i_private);
+}
+
+static ssize_t ufsdbg_err_stats_write(struct file *filp,
+ const char __user *ubuf, size_t cnt,
+ loff_t *ppos)
+{
+ struct ufs_hba *hba = filp->f_mapping->host->i_private;
+ struct ufs_stats *ufs_stats;
+ unsigned long flags;
+
+ ufs_stats = &hba->ufs_stats;
+ spin_lock_irqsave(hba->host->host_lock, flags);
+
+ pr_debug("%s: Resetting UFS error statistics", __func__);
+ memset(ufs_stats->err_stats, 0, sizeof(hba->ufs_stats.err_stats));
+
+ spin_unlock_irqrestore(hba->host->host_lock, flags);
+ return cnt;
+}
+
+static const struct file_operations ufsdbg_err_stats_fops = {
+ .open = ufsdbg_err_stats_open,
+ .read = seq_read,
+ .write = ufsdbg_err_stats_write,
+};
+
+static int ufshcd_init_statistics(struct ufs_hba *hba)
+{
+ struct ufs_stats *stats = &hba->ufs_stats;
+ int ret = 0;
+ int i;
+
+ stats->enabled = false;
+ stats->tag_stats = kzalloc(sizeof(*stats->tag_stats) * hba->nutrs,
+ GFP_KERNEL);
+ if (!hba->ufs_stats.tag_stats)
+ goto no_mem;
+
+ stats->tag_stats[0] = kzalloc(sizeof(**stats->tag_stats) *
+ TS_NUM_STATS * hba->nutrs, GFP_KERNEL);
+ if (!stats->tag_stats[0])
+ goto no_mem;
+
+ for (i = 1; i < hba->nutrs; i++)
+ stats->tag_stats[i] = &stats->tag_stats[0][i * TS_NUM_STATS];
+
+ memset(stats->err_stats, 0, sizeof(hba->ufs_stats.err_stats));
+
+ goto exit;
+
+no_mem:
+ dev_err(hba->dev, "%s: Unable to allocate UFS tag_stats", __func__);
+ ret = -ENOMEM;
+exit:
+ return ret;
+}
+
+void ufsdbg_pr_buf_to_std(struct ufs_hba *hba, int offset, int num_regs,
+ char *str, void *priv)
+{
+ int i;
+ char linebuf[38];
+ int size = num_regs * sizeof(u32);
+ int lines = size / BUFF_LINE_SIZE +
+ (size % BUFF_LINE_SIZE ? 1 : 0);
+ struct seq_file *file = priv;
+
+ if (!hba || !file) {
+ pr_err("%s called with NULL pointer\n", __func__);
+ return;
+ }
+
+ for (i = 0; i < lines; i++) {
+ hex_dump_to_buffer(hba->mmio_base + offset + i * BUFF_LINE_SIZE,
+ min(BUFF_LINE_SIZE, size), BUFF_LINE_SIZE, 4,
+ linebuf, sizeof(linebuf), false);
+ seq_printf(file, "%s [%x]: %s\n", str, i * BUFF_LINE_SIZE,
+ linebuf);
+ size -= BUFF_LINE_SIZE/sizeof(u32);
+ }
+}
+
+static int ufsdbg_host_regs_show(struct seq_file *file, void *data)
+{
+ struct ufs_hba *hba = (struct ufs_hba *)file->private;
+
+ pm_runtime_get_sync(hba->dev);
+ ufshcd_hold(hba, false);
+ ufsdbg_pr_buf_to_std(hba, 0, UFSHCI_REG_SPACE_SIZE / sizeof(u32),
+ "host regs", file);
+ ufshcd_release(hba, false);
+ pm_runtime_put_sync(hba->dev);
+ return 0;
+}
+
+static int ufsdbg_host_regs_open(struct inode *inode, struct file *file)
+{
+ return single_open(file, ufsdbg_host_regs_show, inode->i_private);
+}
+
+static const struct file_operations ufsdbg_host_regs_fops = {
+ .open = ufsdbg_host_regs_open,
+ .read = seq_read,
+};
+
+static int ufsdbg_dump_device_desc_show(struct seq_file *file, void *data)
+{
+ int err = 0;
+ int buff_len = QUERY_DESC_DEVICE_MAX_SIZE;
+ u8 desc_buf[QUERY_DESC_DEVICE_MAX_SIZE];
+ struct ufs_hba *hba = (struct ufs_hba *)file->private;
+
+ struct desc_field_offset device_desc_field_name[] = {
+ {"bLength", 0x00, BYTE},
+ {"bDescriptorType", 0x01, BYTE},
+ {"bDevice", 0x02, BYTE},
+ {"bDeviceClass", 0x03, BYTE},
+ {"bDeviceSubClass", 0x04, BYTE},
+ {"bProtocol", 0x05, BYTE},
+ {"bNumberLU", 0x06, BYTE},
+ {"bNumberWLU", 0x07, BYTE},
+ {"bBootEnable", 0x08, BYTE},
+ {"bDescrAccessEn", 0x09, BYTE},
+ {"bInitPowerMode", 0x0A, BYTE},
+ {"bHighPriorityLUN", 0x0B, BYTE},
+ {"bSecureRemovalType", 0x0C, BYTE},
+ {"bSecurityLU", 0x0D, BYTE},
+ {"Reserved", 0x0E, BYTE},
+ {"bInitActiveICCLevel", 0x0F, BYTE},
+ {"wSpecVersion", 0x10, WORD},
+ {"wManufactureDate", 0x12, WORD},
+ {"iManufactureName", 0x14, BYTE},
+ {"iProductName", 0x15, BYTE},
+ {"iSerialNumber", 0x16, BYTE},
+ {"iOemID", 0x17, BYTE},
+ {"wManufactureID", 0x18, WORD},
+ {"bUD0BaseOffset", 0x1A, BYTE},
+ {"bUDConfigPLength", 0x1B, BYTE},
+ {"bDeviceRTTCap", 0x1C, BYTE},
+ {"wPeriodicRTCUpdate", 0x1D, WORD}
+ };
+
+ pm_runtime_get_sync(hba->dev);
+ err = ufshcd_read_device_desc(hba, desc_buf, buff_len);
+ pm_runtime_put_sync(hba->dev);
+
+ if (!err) {
+ int i;
+ struct desc_field_offset *tmp;
+ for (i = 0; i < ARRAY_SIZE(device_desc_field_name); ++i) {
+ tmp = &device_desc_field_name[i];
+
+ if (tmp->width_byte == BYTE) {
+ seq_printf(file,
+ "Device Descriptor[Byte offset 0x%x]: %s = 0x%x\n",
+ tmp->offset,
+ tmp->name,
+ (u8)desc_buf[tmp->offset]);
+ } else if (tmp->width_byte == WORD) {
+ seq_printf(file,
+ "Device Descriptor[Byte offset 0x%x]: %s = 0x%x\n",
+ tmp->offset,
+ tmp->name,
+ *(u16 *)&desc_buf[tmp->offset]);
+ } else {
+ seq_printf(file,
+ "Device Descriptor[offset 0x%x]: %s. Wrong Width = %d",
+ tmp->offset, tmp->name, tmp->width_byte);
+ }
+ }
+ } else {
+ seq_printf(file, "Reading Device Descriptor failed. err = %d\n",
+ err);
+ }
+
+ return err;
+}
+
+static int ufsdbg_show_hba_show(struct seq_file *file, void *data)
+{
+ struct ufs_hba *hba = (struct ufs_hba *)file->private;
+
+ seq_printf(file, "hba->outstanding_tasks = 0x%x\n",
+ (u32)hba->outstanding_tasks);
+ seq_printf(file, "hba->outstanding_reqs = 0x%x\n",
+ (u32)hba->outstanding_reqs);
+
+ seq_printf(file, "hba->capabilities = 0x%x\n", hba->capabilities);
+ seq_printf(file, "hba->nutrs = %d\n", hba->nutrs);
+ seq_printf(file, "hba->nutmrs = %d\n", hba->nutmrs);
+ seq_printf(file, "hba->ufs_version = 0x%x\n", hba->ufs_version);
+ seq_printf(file, "hba->irq = 0x%x\n", hba->irq);
+ seq_printf(file, "hba->auto_bkops_enabled = %d\n",
+ hba->auto_bkops_enabled);
+
+ seq_printf(file, "hba->ufshcd_state = 0x%x\n", hba->ufshcd_state);
+ seq_printf(file, "hba->clk_gating.state = 0x%x\n",
+ hba->clk_gating.state);
+ seq_printf(file, "hba->eh_flags = 0x%x\n", hba->eh_flags);
+ seq_printf(file, "hba->intr_mask = 0x%x\n", hba->intr_mask);
+ seq_printf(file, "hba->ee_ctrl_mask = 0x%x\n", hba->ee_ctrl_mask);
+
+ /* HBA Errors */
+ seq_printf(file, "hba->errors = 0x%x\n", hba->errors);
+ seq_printf(file, "hba->uic_error = 0x%x\n", hba->uic_error);
+ seq_printf(file, "hba->saved_err = 0x%x\n", hba->saved_err);
+ seq_printf(file, "hba->saved_uic_err = 0x%x\n", hba->saved_uic_err);
+
+ seq_printf(file, "power_mode_change_cnt = %d\n",
+ hba->ufs_stats.power_mode_change_cnt);
+ seq_printf(file, "hibern8_exit_cnt = %d\n",
+ hba->ufs_stats.hibern8_exit_cnt);
+ return 0;
+}
+
+static int ufsdbg_show_hba_open(struct inode *inode, struct file *file)
+{
+ return single_open(file, ufsdbg_show_hba_show, inode->i_private);
+}
+
+static const struct file_operations ufsdbg_show_hba_fops = {
+ .open = ufsdbg_show_hba_open,
+ .read = seq_read,
+};
+
+static int ufsdbg_dump_device_desc_open(struct inode *inode, struct file *file)
+{
+ return single_open(file,
+ ufsdbg_dump_device_desc_show, inode->i_private);
+}
+
+static const struct file_operations ufsdbg_dump_device_desc = {
+ .open = ufsdbg_dump_device_desc_open,
+ .read = seq_read,
+};
+
+static int ufsdbg_power_mode_show(struct seq_file *file, void *data)
+{
+ struct ufs_hba *hba = (struct ufs_hba *)file->private;
+ char *names[] = {
+ "INVALID MODE",
+ "FAST MODE",
+ "SLOW MODE",
+ "INVALID MODE",
+ "FASTAUTO MODE",
+ "SLOWAUTO MODE",
+ "INVALID MODE",
+ };
+
+ /* Print current status */
+ seq_puts(file, "UFS current power mode [RX, TX]:");
+ seq_printf(file, "gear=[%d,%d], lane=[%d,%d], pwr=[%s,%s], rate = %c",
+ hba->pwr_info.gear_rx, hba->pwr_info.gear_tx,
+ hba->pwr_info.lane_rx, hba->pwr_info.lane_tx,
+ names[hba->pwr_info.pwr_rx],
+ names[hba->pwr_info.pwr_tx],
+ hba->pwr_info.hs_rate == PA_HS_MODE_B ? 'B' : 'A');
+ seq_puts(file, "\n\n");
+
+ /* Print usage */
+ seq_puts(file,
+ "To change power mode write 'GGLLMM' where:\n"
+ "G - selected gear\n"
+ "L - number of lanes\n"
+ "M - power mode:\n"
+ "\t1 = fast mode\n"
+ "\t2 = slow mode\n"
+ "\t4 = fast-auto mode\n"
+ "\t5 = slow-auto mode\n"
+ "first letter is for RX, second letter is for TX.\n\n");
+
+ return 0;
+}
+
+static bool ufsdbg_power_mode_validate(struct ufs_pa_layer_attr *pwr_mode)
+{
+ if (pwr_mode->gear_rx < UFS_PWM_G1 || pwr_mode->gear_rx > UFS_PWM_G7 ||
+ pwr_mode->gear_tx < UFS_PWM_G1 || pwr_mode->gear_tx > UFS_PWM_G7 ||
+ pwr_mode->lane_rx < 1 || pwr_mode->lane_rx > 2 ||
+ pwr_mode->lane_tx < 1 || pwr_mode->lane_tx > 2 ||
+ (pwr_mode->pwr_rx != FAST_MODE && pwr_mode->pwr_rx != SLOW_MODE &&
+ pwr_mode->pwr_rx != FASTAUTO_MODE &&
+ pwr_mode->pwr_rx != SLOWAUTO_MODE) ||
+ (pwr_mode->pwr_tx != FAST_MODE && pwr_mode->pwr_tx != SLOW_MODE &&
+ pwr_mode->pwr_tx != FASTAUTO_MODE &&
+ pwr_mode->pwr_tx != SLOWAUTO_MODE)) {
+ pr_err("%s: power parameters are not valid\n", __func__);
+ return false;
+ }
+
+ return true;
+}
+
+static int ufsdbg_cfg_pwr_param(struct ufs_hba *hba,
+ struct ufs_pa_layer_attr *new_pwr,
+ struct ufs_pa_layer_attr *final_pwr)
+{
+ int ret = 0;
+ bool is_dev_sup_hs = false;
+ bool is_new_pwr_hs = false;
+ int dev_pwm_max_rx_gear;
+ int dev_pwm_max_tx_gear;
+
+ if (!hba->max_pwr_info.is_valid) {
+ dev_err(hba->dev, "%s: device max power is not valid. can't configure power\n",
+ __func__);
+ return -EINVAL;
+ }
+
+ if (hba->max_pwr_info.info.pwr_rx == FAST_MODE)
+ is_dev_sup_hs = true;
+
+ if (new_pwr->pwr_rx == FAST_MODE || new_pwr->pwr_rx == FASTAUTO_MODE)
+ is_new_pwr_hs = true;
+
+ final_pwr->lane_rx = hba->max_pwr_info.info.lane_rx;
+ final_pwr->lane_tx = hba->max_pwr_info.info.lane_tx;
+
+ /* device doesn't support HS but requested power is HS */
+ if (!is_dev_sup_hs && is_new_pwr_hs) {
+ pr_err("%s: device doesn't support HS. requested power is HS\n",
+ __func__);
+ return -ENOTSUPP;
+ } else if ((is_dev_sup_hs && is_new_pwr_hs) ||
+ (!is_dev_sup_hs && !is_new_pwr_hs)) {
+ /*
+ * If device and requested power mode are both HS or both PWM
+ * then dev_max->gear_xx are the gears to be assign to
+ * final_pwr->gear_xx
+ */
+ final_pwr->gear_rx = hba->max_pwr_info.info.gear_rx;
+ final_pwr->gear_tx = hba->max_pwr_info.info.gear_tx;
+ } else if (is_dev_sup_hs && !is_new_pwr_hs) {
+ /*
+ * If device supports HS but requested power is PWM, then we
+ * need to find out what is the max gear in PWM the device
+ * supports
+ */
+
+ ufshcd_dme_get(hba, UIC_ARG_MIB(PA_MAXRXPWMGEAR),
+ &dev_pwm_max_rx_gear);
+
+ if (!dev_pwm_max_rx_gear) {
+ pr_err("%s: couldn't get device max pwm rx gear\n",
+ __func__);
+ ret = -EINVAL;
+ goto out;
+ }
+
+ ufshcd_dme_peer_get(hba, UIC_ARG_MIB(PA_MAXRXPWMGEAR),
+ &dev_pwm_max_tx_gear);
+
+ if (!dev_pwm_max_tx_gear) {
+ pr_err("%s: couldn't get device max pwm tx gear\n",
+ __func__);
+ ret = -EINVAL;
+ goto out;
+ }
+
+ final_pwr->gear_rx = dev_pwm_max_rx_gear;
+ final_pwr->gear_tx = dev_pwm_max_tx_gear;
+ }
+
+ if ((new_pwr->gear_rx > final_pwr->gear_rx) ||
+ (new_pwr->gear_tx > final_pwr->gear_tx) ||
+ (new_pwr->lane_rx > final_pwr->lane_rx) ||
+ (new_pwr->lane_tx > final_pwr->lane_tx)) {
+ pr_err("%s: (RX,TX) GG,LL: in PWM/HS new pwr [%d%d,%d%d] exceeds device limitation [%d%d,%d%d]\n",
+ __func__,
+ new_pwr->gear_rx, new_pwr->gear_tx,
+ new_pwr->lane_rx, new_pwr->lane_tx,
+ final_pwr->gear_rx, final_pwr->gear_tx,
+ final_pwr->lane_rx, final_pwr->lane_tx);
+ return -ENOTSUPP;
+ }
+
+ final_pwr->gear_rx = new_pwr->gear_rx;
+ final_pwr->gear_tx = new_pwr->gear_tx;
+ final_pwr->lane_rx = new_pwr->lane_rx;
+ final_pwr->lane_tx = new_pwr->lane_tx;
+ final_pwr->pwr_rx = new_pwr->pwr_rx;
+ final_pwr->pwr_tx = new_pwr->pwr_tx;
+ final_pwr->hs_rate = new_pwr->hs_rate;
+
+out:
+ return ret;
+}
+
+static int ufsdbg_config_pwr_mode(struct ufs_hba *hba,
+ struct ufs_pa_layer_attr *desired_pwr_mode)
+{
+ int ret;
+
+ pm_runtime_get_sync(hba->dev);
+ ufshcd_scsi_block_requests(hba);
+ ret = ufshcd_wait_for_doorbell_clr(hba, DOORBELL_CLR_TOUT_US);
+ if (!ret)
+ ret = ufshcd_change_power_mode(hba, desired_pwr_mode);
+ ufshcd_scsi_unblock_requests(hba);
+ pm_runtime_put_sync(hba->dev);
+
+ return ret;
+}
+
+static ssize_t ufsdbg_power_mode_write(struct file *file,
+ const char __user *ubuf, size_t cnt,
+ loff_t *ppos)
+{
+ struct ufs_hba *hba = file->f_mapping->host->i_private;
+ struct ufs_pa_layer_attr pwr_mode;
+ struct ufs_pa_layer_attr final_pwr_mode;
+ char pwr_mode_str[BUFF_LINE_SIZE] = {0};
+ loff_t buff_pos = 0;
+ int ret;
+ int idx = 0;
+
+ ret = simple_write_to_buffer(pwr_mode_str, BUFF_LINE_SIZE,
+ &buff_pos, ubuf, cnt);
+
+ pwr_mode.gear_rx = pwr_mode_str[idx++] - '0';
+ pwr_mode.gear_tx = pwr_mode_str[idx++] - '0';
+ pwr_mode.lane_rx = pwr_mode_str[idx++] - '0';
+ pwr_mode.lane_tx = pwr_mode_str[idx++] - '0';
+ pwr_mode.pwr_rx = pwr_mode_str[idx++] - '0';
+ pwr_mode.pwr_tx = pwr_mode_str[idx++] - '0';
+
+ /*
+ * Switching between rates is not currently supported so use the
+ * current rate.
+ * TODO: add rate switching if and when it is supported in the future
+ */
+ pwr_mode.hs_rate = hba->pwr_info.hs_rate;
+
+ /* Validate user input */
+ if (!ufsdbg_power_mode_validate(&pwr_mode))
+ return -EINVAL;
+
+ pr_debug("%s: new power mode requested [RX,TX]: Gear=[%d,%d], Lane=[%d,%d], Mode=[%d,%d]\n",
+ __func__,
+ pwr_mode.gear_rx, pwr_mode.gear_tx, pwr_mode.lane_rx,
+ pwr_mode.lane_tx, pwr_mode.pwr_rx, pwr_mode.pwr_tx);
+
+ ret = ufsdbg_cfg_pwr_param(hba, &pwr_mode, &final_pwr_mode);
+ if (ret) {
+ dev_err(hba->dev,
+ "%s: failed to configure new power parameters, ret = %d\n",
+ __func__, ret);
+ return cnt;
+ }
+
+ ret = ufsdbg_config_pwr_mode(hba, &final_pwr_mode);
+ if (ret == -EBUSY)
+ dev_err(hba->dev,
+ "%s: ufshcd_config_pwr_mode failed: system is busy, try again\n",
+ __func__);
+ else if (ret)
+ dev_err(hba->dev,
+ "%s: ufshcd_config_pwr_mode failed, ret=%d\n",
+ __func__, ret);
+
+ return cnt;
+}
+
+static int ufsdbg_power_mode_open(struct inode *inode, struct file *file)
+{
+ return single_open(file, ufsdbg_power_mode_show, inode->i_private);
+}
+
+static const struct file_operations ufsdbg_power_mode_desc = {
+ .open = ufsdbg_power_mode_open,
+ .read = seq_read,
+ .write = ufsdbg_power_mode_write,
+};
+
+static int ufsdbg_dme_read(void *data, u64 *attr_val, bool peer)
+{
+ int ret;
+ struct ufs_hba *hba = data;
+ u32 attr_id, read_val = 0;
+ int (*read_func)(struct ufs_hba *, u32, u32 *);
+ u32 attr_sel;
+
+ if (!hba)
+ return -EINVAL;
+
+ read_func = peer ? ufshcd_dme_peer_get : ufshcd_dme_get;
+ attr_id = peer ? hba->debugfs_files.dme_peer_attr_id :
+ hba->debugfs_files.dme_local_attr_id;
+ pm_runtime_get_sync(hba->dev);
+ ufshcd_scsi_block_requests(hba);
+ ret = ufshcd_wait_for_doorbell_clr(hba, DOORBELL_CLR_TOUT_US);
+ if (!ret) {
+ if ((attr_id >= MPHY_RX_ATTR_ADDR_START)
+ && (attr_id <= MPHY_RX_ATTR_ADDR_END))
+ attr_sel = UIC_ARG_MIB_SEL(attr_id,
+ UIC_ARG_MPHY_RX_GEN_SEL_INDEX(0));
+ else
+ attr_sel = UIC_ARG_MIB(attr_id);
+
+ ret = read_func(hba, attr_sel, &read_val);
+ }
+ ufshcd_scsi_unblock_requests(hba);
+ pm_runtime_put_sync(hba->dev);
+
+ if (!ret)
+ *attr_val = (u64)read_val;
+
+ return ret;
+}
+
+static int ufsdbg_dme_local_set_attr_id(void *data, u64 attr_id)
+{
+ struct ufs_hba *hba = data;
+
+ if (!hba)
+ return -EINVAL;
+
+ hba->debugfs_files.dme_local_attr_id = (u32)attr_id;
+
+ return 0;
+}
+
+static int ufsdbg_dme_local_read(void *data, u64 *attr_val)
+{
+ return ufsdbg_dme_read(data, attr_val, false);
+}
+
+DEFINE_SIMPLE_ATTRIBUTE(ufsdbg_dme_local_read_ops,
+ ufsdbg_dme_local_read,
+ ufsdbg_dme_local_set_attr_id,
+ "%llu\n");
+
+static int ufsdbg_dme_peer_read(void *data, u64 *attr_val)
+{
+ struct ufs_hba *hba = data;
+
+ if (!hba)
+ return -EINVAL;
+ else
+ return ufsdbg_dme_read(data, attr_val, true);
+}
+
+static int ufsdbg_dme_peer_set_attr_id(void *data, u64 attr_id)
+{
+ struct ufs_hba *hba = data;
+
+ if (!hba)
+ return -EINVAL;
+
+ hba->debugfs_files.dme_peer_attr_id = (u32)attr_id;
+
+ return 0;
+}
+
+DEFINE_SIMPLE_ATTRIBUTE(ufsdbg_dme_peer_read_ops,
+ ufsdbg_dme_peer_read,
+ ufsdbg_dme_peer_set_attr_id,
+ "%llu\n");
+
+static int ufsdbg_dbg_print_en_read(void *data, u64 *attr_val)
+{
+ struct ufs_hba *hba = data;
+
+ if (!hba)
+ return -EINVAL;
+
+ *attr_val = (u64)hba->ufshcd_dbg_print;
+ return 0;
+}
+
+static int ufsdbg_dbg_print_en_set(void *data, u64 attr_id)
+{
+ struct ufs_hba *hba = data;
+
+ if (!hba)
+ return -EINVAL;
+
+ if (attr_id & ~UFSHCD_DBG_PRINT_ALL)
+ return -EINVAL;
+
+ hba->ufshcd_dbg_print = (u32)attr_id;
+ return 0;
+}
+
+DEFINE_SIMPLE_ATTRIBUTE(ufsdbg_dbg_print_en_ops,
+ ufsdbg_dbg_print_en_read,
+ ufsdbg_dbg_print_en_set,
+ "%llu\n");
+
+static ssize_t ufsdbg_req_stats_write(struct file *filp,
+ const char __user *ubuf, size_t cnt, loff_t *ppos)
+{
+ struct ufs_hba *hba = filp->f_mapping->host->i_private;
+ int val;
+ int ret;
+ unsigned long flags;
+
+ ret = kstrtoint_from_user(ubuf, cnt, 0, &val);
+ if (ret) {
+ dev_err(hba->dev, "%s: Invalid argument\n", __func__);
+ return ret;
+ }
+
+ spin_lock_irqsave(hba->host->host_lock, flags);
+ ufshcd_init_req_stats(hba);
+ spin_unlock_irqrestore(hba->host->host_lock, flags);
+
+ return cnt;
+}
+
+static int ufsdbg_req_stats_show(struct seq_file *file, void *data)
+{
+ struct ufs_hba *hba = (struct ufs_hba *)file->private;
+ int i;
+ unsigned long flags;
+
+ /* Header */
+ seq_printf(file, "\t%-10s %-10s %-10s %-10s %-10s %-10s",
+ "All", "Write", "Read", "Read(urg)", "Write(urg)", "Flush");
+
+ spin_lock_irqsave(hba->host->host_lock, flags);
+
+ seq_printf(file, "\n%s:\t", "Min");
+ for (i = 0; i < TS_NUM_STATS; i++)
+ seq_printf(file, "%-10llu ", hba->ufs_stats.req_stats[i].min);
+ seq_printf(file, "\n%s:\t", "Max");
+ for (i = 0; i < TS_NUM_STATS; i++)
+ seq_printf(file, "%-10llu ", hba->ufs_stats.req_stats[i].max);
+ seq_printf(file, "\n%s:\t", "Avg.");
+ for (i = 0; i < TS_NUM_STATS; i++)
+ seq_printf(file, "%-10llu ",
+ div64_u64(hba->ufs_stats.req_stats[i].sum,
+ hba->ufs_stats.req_stats[i].count));
+ seq_printf(file, "\n%s:\t", "Count");
+ for (i = 0; i < TS_NUM_STATS; i++)
+ seq_printf(file, "%-10llu ", hba->ufs_stats.req_stats[i].count);
+ seq_puts(file, "\n");
+ spin_unlock_irqrestore(hba->host->host_lock, flags);
+
+ return 0;
+}
+
+static int ufsdbg_req_stats_open(struct inode *inode, struct file *file)
+{
+ return single_open(file, ufsdbg_req_stats_show, inode->i_private);
+}
+
+static const struct file_operations ufsdbg_req_stats_desc = {
+ .open = ufsdbg_req_stats_open,
+ .read = seq_read,
+ .write = ufsdbg_req_stats_write,
+};
+
+
+static int ufsdbg_reset_controller_show(struct seq_file *file, void *data)
+{
+ seq_puts(file, "echo 1 > /sys/kernel/debug/.../reset_controller\n");
+ seq_puts(file, "resets the UFS controller and restores its operational state\n\n");
+
+ return 0;
+}
+
+static int ufsdbg_reset_controller_open(struct inode *inode, struct file *file)
+{
+ return single_open(file, ufsdbg_reset_controller_show,
+ inode->i_private);
+}
+
+static ssize_t ufsdbg_reset_controller_write(struct file *filp,
+ const char __user *ubuf, size_t cnt, loff_t *ppos)
+{
+ struct ufs_hba *hba = filp->f_mapping->host->i_private;
+ unsigned long flags;
+
+ pm_runtime_get_sync(hba->dev);
+ ufshcd_hold(hba, false);
+
+ spin_lock_irqsave(hba->host->host_lock, flags);
+ /*
+ * simulating a dummy error in order to "convince"
+ * eh_work to actually reset the controller
+ */
+ hba->saved_err |= INT_FATAL_ERRORS;
+ hba->silence_err_logs = true;
+ schedule_work(&hba->eh_work);
+ spin_unlock_irqrestore(hba->host->host_lock, flags);
+
+ flush_work(&hba->eh_work);
+
+ ufshcd_release(hba, false);
+ pm_runtime_put_sync(hba->dev);
+
+ return cnt;
+}
+
+static const struct file_operations ufsdbg_reset_controller = {
+ .open = ufsdbg_reset_controller_open,
+ .read = seq_read,
+ .write = ufsdbg_reset_controller_write,
+};
+
+static int ufsdbg_clear_err_state(void *data, u64 val)
+{
+ struct ufs_hba *hba = data;
+
+ if (!hba)
+ return -EINVAL;
+
+ /* clear the error state on any write attempt */
+ hba->debugfs_files.err_occurred = false;
+
+ return 0;
+}
+
+static int ufsdbg_read_err_state(void *data, u64 *val)
+{
+ struct ufs_hba *hba = data;
+
+ if (!hba)
+ return -EINVAL;
+
+ *val = hba->debugfs_files.err_occurred ? 1 : 0;
+
+ return 0;
+}
+
+void ufsdbg_set_err_state(struct ufs_hba *hba)
+{
+ hba->debugfs_files.err_occurred = true;
+}
+
+DEFINE_SIMPLE_ATTRIBUTE(ufsdbg_err_state,
+ ufsdbg_read_err_state,
+ ufsdbg_clear_err_state,
+ "%llu\n");
+
+void ufsdbg_add_debugfs(struct ufs_hba *hba)
+{
+ char root_name[sizeof("ufshcd00")];
+
+ if (!hba) {
+ pr_err("%s: NULL hba, exiting", __func__);
+ return;
+ }
+
+ snprintf(root_name, ARRAY_SIZE(root_name), "%s%d", UFSHCD,
+ hba->host->host_no);
+
+ hba->debugfs_files.debugfs_root = debugfs_create_dir(root_name, NULL);
+ if (IS_ERR(hba->debugfs_files.debugfs_root))
+ /* Don't complain -- debugfs just isn't enabled */
+ goto err_no_root;
+ if (!hba->debugfs_files.debugfs_root) {
+ /*
+ * Complain -- debugfs is enabled, but it failed to
+ * create the directory
+ */
+ dev_err(hba->dev,
+ "%s: NULL debugfs root directory, exiting", __func__);
+ goto err_no_root;
+ }
+
+ hba->debugfs_files.stats_folder = debugfs_create_dir("stats",
+ hba->debugfs_files.debugfs_root);
+ if (!hba->debugfs_files.stats_folder) {
+ dev_err(hba->dev,
+ "%s: NULL stats_folder, exiting", __func__);
+ goto err;
+ }
+
+ hba->debugfs_files.tag_stats =
+ debugfs_create_file("tag_stats", S_IRUSR | S_IWUSR,
+ hba->debugfs_files.stats_folder, hba,
+ &ufsdbg_tag_stats_fops);
+ if (!hba->debugfs_files.tag_stats) {
+ dev_err(hba->dev, "%s: NULL tag_stats file, exiting",
+ __func__);
+ goto err;
+ }
+
+ hba->debugfs_files.query_stats =
+ debugfs_create_file("query_stats", S_IRUSR | S_IWUSR,
+ hba->debugfs_files.stats_folder, hba,
+ &ufsdbg_query_stats_fops);
+ if (!hba->debugfs_files.query_stats) {
+ dev_err(hba->dev, "%s: NULL query_stats file, exiting",
+ __func__);
+ goto err;
+ }
+
+ hba->debugfs_files.err_stats =
+ debugfs_create_file("err_stats", S_IRUSR | S_IWUSR,
+ hba->debugfs_files.stats_folder, hba,
+ &ufsdbg_err_stats_fops);
+ if (!hba->debugfs_files.err_stats) {
+ dev_err(hba->dev, "%s: NULL err_stats file, exiting",
+ __func__);
+ goto err;
+ }
+
+ if (ufshcd_init_statistics(hba)) {
+ dev_err(hba->dev, "%s: Error initializing statistics",
+ __func__);
+ goto err;
+ }
+
+ hba->debugfs_files.host_regs = debugfs_create_file("host_regs", S_IRUSR,
+ hba->debugfs_files.debugfs_root, hba,
+ &ufsdbg_host_regs_fops);
+ if (!hba->debugfs_files.host_regs) {
+ dev_err(hba->dev, "%s: NULL hcd regs file, exiting", __func__);
+ goto err;
+ }
+
+ hba->debugfs_files.show_hba = debugfs_create_file("show_hba", S_IRUSR,
+ hba->debugfs_files.debugfs_root, hba,
+ &ufsdbg_show_hba_fops);
+ if (!hba->debugfs_files.show_hba) {
+ dev_err(hba->dev, "%s: NULL hba file, exiting", __func__);
+ goto err;
+ }
+
+ hba->debugfs_files.dump_dev_desc =
+ debugfs_create_file("dump_device_desc", S_IRUSR,
+ hba->debugfs_files.debugfs_root, hba,
+ &ufsdbg_dump_device_desc);
+ if (!hba->debugfs_files.dump_dev_desc) {
+ dev_err(hba->dev,
+ "%s: NULL dump_device_desc file, exiting", __func__);
+ goto err;
+ }
+
+ hba->debugfs_files.power_mode =
+ debugfs_create_file("power_mode", S_IRUSR | S_IWUSR,
+ hba->debugfs_files.debugfs_root, hba,
+ &ufsdbg_power_mode_desc);
+ if (!hba->debugfs_files.power_mode) {
+ dev_err(hba->dev,
+ "%s: NULL power_mode_desc file, exiting", __func__);
+ goto err;
+ }
+
+ hba->debugfs_files.dme_local_read =
+ debugfs_create_file("dme_local_read", S_IRUSR | S_IWUSR,
+ hba->debugfs_files.debugfs_root, hba,
+ &ufsdbg_dme_local_read_ops);
+ if (!hba->debugfs_files.dme_local_read) {
+ dev_err(hba->dev,
+ "%s: failed create dme_local_read debugfs entry\n",
+ __func__);
+ goto err;
+ }
+
+ hba->debugfs_files.dme_peer_read =
+ debugfs_create_file("dme_peer_read", S_IRUSR | S_IWUSR,
+ hba->debugfs_files.debugfs_root, hba,
+ &ufsdbg_dme_peer_read_ops);
+ if (!hba->debugfs_files.dme_peer_read) {
+ dev_err(hba->dev,
+ "%s: failed create dme_peer_read debugfs entry\n",
+ __func__);
+ goto err;
+ }
+
+ hba->debugfs_files.dbg_print_en =
+ debugfs_create_file("dbg_print_en", S_IRUSR | S_IWUSR,
+ hba->debugfs_files.debugfs_root, hba,
+ &ufsdbg_dbg_print_en_ops);
+ if (!hba->debugfs_files.dbg_print_en) {
+ dev_err(hba->dev,
+ "%s: failed create dbg_print_en debugfs entry\n",
+ __func__);
+ goto err;
+ }
+
+ hba->debugfs_files.req_stats =
+ debugfs_create_file("req_stats", S_IRUSR | S_IWUSR,
+ hba->debugfs_files.stats_folder, hba,
+ &ufsdbg_req_stats_desc);
+ if (!hba->debugfs_files.req_stats) {
+ dev_err(hba->dev,
+ "%s: failed create req_stats debugfs entry\n",
+ __func__);
+ goto err;
+ }
+
+ hba->debugfs_files.reset_controller =
+ debugfs_create_file("reset_controller", S_IRUSR | S_IWUSR,
+ hba->debugfs_files.debugfs_root, hba,
+ &ufsdbg_reset_controller);
+ if (!hba->debugfs_files.reset_controller) {
+ dev_err(hba->dev,
+ "%s: failed create reset_controller debugfs entry",
+ __func__);
+ goto err;
+ }
+
+ hba->debugfs_files.err_state =
+ debugfs_create_file("err_state", S_IRUSR | S_IWUSR,
+ hba->debugfs_files.debugfs_root, hba,
+ &ufsdbg_err_state);
+ if (!hba->debugfs_files.err_state) {
+ dev_err(hba->dev,
+ "%s: failed create err_state debugfs entry", __func__);
+ goto err;
+ }
+
+ ufsdbg_setup_fault_injection(hba);
+
+ ufshcd_vops_add_debugfs(hba, hba->debugfs_files.debugfs_root);
+
+ return;
+
+err:
+ debugfs_remove_recursive(hba->debugfs_files.debugfs_root);
+ hba->debugfs_files.debugfs_root = NULL;
+err_no_root:
+ dev_err(hba->dev, "%s: failed to initialize debugfs\n", __func__);
+}
+
+void ufsdbg_remove_debugfs(struct ufs_hba *hba)
+{
+ ufshcd_vops_remove_debugfs(hba);
+ debugfs_remove_recursive(hba->debugfs_files.debugfs_root);
+ kfree(hba->ufs_stats.tag_stats);
+
+}
diff --git a/drivers/scsi/ufs/ufs-debugfs.h b/drivers/scsi/ufs/ufs-debugfs.h
new file mode 100644
index 000000000000..13848e8b72e0
--- /dev/null
+++ b/drivers/scsi/ufs/ufs-debugfs.h
@@ -0,0 +1,69 @@
+/* Copyright (c) 2013-2016, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * UFS debugfs - add debugfs interface to the ufshcd.
+ * This is currently used for statistics collection and exporting from the
+ * UFS driver.
+ * This infrastructure can be used for debugging or direct tweaking
+ * of the driver from userspace.
+ *
+ */
+
+#ifndef _UFS_DEBUGFS_H
+#define _UFS_DEBUGFS_H
+
+#include <linux/debugfs.h>
+#include "ufshcd.h"
+
+enum ufsdbg_err_inject_scenario {
+ ERR_INJECT_INTR,
+ ERR_INJECT_PWR_CHANGE,
+ ERR_INJECT_UIC,
+ ERR_INJECT_DME_ATTR,
+ ERR_INJECT_QUERY,
+ ERR_INJECT_MAX_ERR_SCENARIOS,
+};
+
+#ifdef CONFIG_DEBUG_FS
+void ufsdbg_add_debugfs(struct ufs_hba *hba);
+void ufsdbg_remove_debugfs(struct ufs_hba *hba);
+void ufsdbg_pr_buf_to_std(struct ufs_hba *hba, int offset, int num_regs,
+ char *str, void *priv);
+void ufsdbg_set_err_state(struct ufs_hba *hba);
+#else
+static inline void ufsdbg_add_debugfs(struct ufs_hba *hba)
+{
+}
+static inline void ufsdbg_remove_debugfs(struct ufs_hba *hba)
+{
+}
+static inline void ufsdbg_pr_buf_to_std(struct ufs_hba *hba, int offset,
+ int num_regs, char *str, void *priv)
+{
+}
+void ufsdbg_set_err_state(struct ufs_hba *hba)
+{
+}
+#endif
+
+#ifdef CONFIG_UFS_FAULT_INJECTION
+void ufsdbg_error_inject_dispatcher(struct ufs_hba *hba,
+ enum ufsdbg_err_inject_scenario err_scenario,
+ int success_value, int *ret_value);
+#else
+static inline void ufsdbg_error_inject_dispatcher(struct ufs_hba *hba,
+ enum ufsdbg_err_inject_scenario err_scenario,
+ int success_value, int *ret_value)
+{
+}
+#endif
+
+#endif /* End of Header */
diff --git a/drivers/scsi/ufs/ufs-qcom-debugfs.c b/drivers/scsi/ufs/ufs-qcom-debugfs.c
new file mode 100644
index 000000000000..db4ecec6cf2f
--- /dev/null
+++ b/drivers/scsi/ufs/ufs-qcom-debugfs.c
@@ -0,0 +1,389 @@
+/*
+ * Copyright (c) 2015,2017, Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#include <linux/debugfs.h>
+#include "ufs-qcom.h"
+#include "ufs-qcom-debugfs.h"
+#include "ufs-debugfs.h"
+
+#define TESTBUS_CFG_BUFF_LINE_SIZE sizeof("0xXY, 0xXY")
+
+static void ufs_qcom_dbg_remove_debugfs(struct ufs_qcom_host *host);
+
+static int ufs_qcom_dbg_print_en_read(void *data, u64 *attr_val)
+{
+ struct ufs_qcom_host *host = data;
+
+ if (!host)
+ return -EINVAL;
+
+ *attr_val = (u64)host->dbg_print_en;
+ return 0;
+}
+
+static int ufs_qcom_dbg_print_en_set(void *data, u64 attr_id)
+{
+ struct ufs_qcom_host *host = data;
+
+ if (!host)
+ return -EINVAL;
+
+ if (attr_id & ~UFS_QCOM_DBG_PRINT_ALL)
+ return -EINVAL;
+
+ host->dbg_print_en = (u32)attr_id;
+ return 0;
+}
+
+DEFINE_SIMPLE_ATTRIBUTE(ufs_qcom_dbg_print_en_ops,
+ ufs_qcom_dbg_print_en_read,
+ ufs_qcom_dbg_print_en_set,
+ "%llu\n");
+
+static int ufs_qcom_dbg_testbus_en_read(void *data, u64 *attr_val)
+{
+ struct ufs_qcom_host *host = data;
+ bool enabled;
+
+ if (!host)
+ return -EINVAL;
+
+ enabled = !!(host->dbg_print_en & UFS_QCOM_DBG_PRINT_TEST_BUS_EN);
+ *attr_val = (u64)enabled;
+ return 0;
+}
+
+static int ufs_qcom_dbg_testbus_en_set(void *data, u64 attr_id)
+{
+ struct ufs_qcom_host *host = data;
+ int ret = 0;
+
+ if (!host)
+ return -EINVAL;
+
+ if (!!attr_id)
+ host->dbg_print_en |= UFS_QCOM_DBG_PRINT_TEST_BUS_EN;
+ else
+ host->dbg_print_en &= ~UFS_QCOM_DBG_PRINT_TEST_BUS_EN;
+
+ pm_runtime_get_sync(host->hba->dev);
+ ufshcd_hold(host->hba, false);
+ ret = ufs_qcom_testbus_config(host);
+ ufshcd_release(host->hba, false);
+ pm_runtime_put_sync(host->hba->dev);
+
+ return ret;
+}
+
+DEFINE_SIMPLE_ATTRIBUTE(ufs_qcom_dbg_testbus_en_ops,
+ ufs_qcom_dbg_testbus_en_read,
+ ufs_qcom_dbg_testbus_en_set,
+ "%llu\n");
+
+static int ufs_qcom_dbg_testbus_cfg_show(struct seq_file *file, void *data)
+{
+ struct ufs_qcom_host *host = (struct ufs_qcom_host *)file->private;
+
+ seq_printf(file , "Current configuration: major=%d, minor=%d\n\n",
+ host->testbus.select_major, host->testbus.select_minor);
+
+ /* Print usage */
+ seq_puts(file,
+ "To change the test-bus configuration, write 'MAJ,MIN' where:\n"
+ "MAJ - major select\n"
+ "MIN - minor select\n\n");
+ return 0;
+}
+
+static ssize_t ufs_qcom_dbg_testbus_cfg_write(struct file *file,
+ const char __user *ubuf, size_t cnt,
+ loff_t *ppos)
+{
+ struct ufs_qcom_host *host = file->f_mapping->host->i_private;
+ char configuration[TESTBUS_CFG_BUFF_LINE_SIZE] = {'\0'};
+ loff_t buff_pos = 0;
+ char *comma;
+ int ret = 0;
+ int major;
+ int minor;
+ unsigned long flags;
+ struct ufs_hba *hba = host->hba;
+
+
+ ret = simple_write_to_buffer(configuration,
+ TESTBUS_CFG_BUFF_LINE_SIZE - 1,
+ &buff_pos, ubuf, cnt);
+ if (ret < 0) {
+ dev_err(host->hba->dev, "%s: failed to read user data\n",
+ __func__);
+ goto out;
+ }
+ configuration[ret] = '\0';
+
+ comma = strnchr(configuration, TESTBUS_CFG_BUFF_LINE_SIZE, ',');
+ if (!comma || comma == configuration) {
+ dev_err(host->hba->dev,
+ "%s: error in configuration of testbus\n", __func__);
+ ret = -EINVAL;
+ goto out;
+ }
+
+ if (sscanf(configuration, "%i,%i", &major, &minor) != 2) {
+ dev_err(host->hba->dev,
+ "%s: couldn't parse input to 2 numeric values\n",
+ __func__);
+ ret = -EINVAL;
+ goto out;
+ }
+
+ if (!ufs_qcom_testbus_cfg_is_ok(host, major, minor)) {
+ ret = -EPERM;
+ goto out;
+ }
+
+ spin_lock_irqsave(hba->host->host_lock, flags);
+ host->testbus.select_major = (u8)major;
+ host->testbus.select_minor = (u8)minor;
+ spin_unlock_irqrestore(hba->host->host_lock, flags);
+
+ /*
+ * Sanity check of the {major, minor} tuple is done in the
+ * config function
+ */
+ pm_runtime_get_sync(host->hba->dev);
+ ufshcd_hold(host->hba, false);
+ ret = ufs_qcom_testbus_config(host);
+ ufshcd_release(host->hba, false);
+ pm_runtime_put_sync(host->hba->dev);
+ if (!ret)
+ dev_dbg(host->hba->dev,
+ "%s: New configuration: major=%d, minor=%d\n",
+ __func__, host->testbus.select_major,
+ host->testbus.select_minor);
+
+out:
+ return ret ? ret : cnt;
+}
+
+static int ufs_qcom_dbg_testbus_cfg_open(struct inode *inode, struct file *file)
+{
+ return single_open(file, ufs_qcom_dbg_testbus_cfg_show,
+ inode->i_private);
+}
+
+static const struct file_operations ufs_qcom_dbg_testbus_cfg_desc = {
+ .open = ufs_qcom_dbg_testbus_cfg_open,
+ .read = seq_read,
+ .write = ufs_qcom_dbg_testbus_cfg_write,
+};
+
+static int ufs_qcom_dbg_testbus_bus_read(void *data, u64 *attr_val)
+{
+ struct ufs_qcom_host *host = data;
+
+ if (!host)
+ return -EINVAL;
+
+ pm_runtime_get_sync(host->hba->dev);
+ ufshcd_hold(host->hba, false);
+ *attr_val = (u64)ufshcd_readl(host->hba, UFS_TEST_BUS);
+ ufshcd_release(host->hba, false);
+ pm_runtime_put_sync(host->hba->dev);
+ return 0;
+}
+
+DEFINE_SIMPLE_ATTRIBUTE(ufs_qcom_dbg_testbus_bus_ops,
+ ufs_qcom_dbg_testbus_bus_read,
+ NULL,
+ "%llu\n");
+
+static int ufs_qcom_dbg_dbg_regs_show(struct seq_file *file, void *data)
+{
+ struct ufs_qcom_host *host = (struct ufs_qcom_host *)file->private;
+ bool dbg_print_reg = !!(host->dbg_print_en &
+ UFS_QCOM_DBG_PRINT_REGS_EN);
+
+ pm_runtime_get_sync(host->hba->dev);
+ ufshcd_hold(host->hba, false);
+
+ /* Temporarily override the debug print enable */
+ host->dbg_print_en |= UFS_QCOM_DBG_PRINT_REGS_EN;
+ ufs_qcom_print_hw_debug_reg_all(host->hba, file, ufsdbg_pr_buf_to_std);
+ /* Restore previous debug print enable value */
+ if (!dbg_print_reg)
+ host->dbg_print_en &= ~UFS_QCOM_DBG_PRINT_REGS_EN;
+
+ ufshcd_release(host->hba, false);
+ pm_runtime_put_sync(host->hba->dev);
+
+ return 0;
+}
+
+static int ufs_qcom_dbg_dbg_regs_open(struct inode *inode,
+ struct file *file)
+{
+ return single_open(file, ufs_qcom_dbg_dbg_regs_show,
+ inode->i_private);
+}
+
+static const struct file_operations ufs_qcom_dbg_dbg_regs_desc = {
+ .open = ufs_qcom_dbg_dbg_regs_open,
+ .read = seq_read,
+};
+
+static int ufs_qcom_dbg_pm_qos_show(struct seq_file *file, void *data)
+{
+ struct ufs_qcom_host *host = (struct ufs_qcom_host *)file->private;
+ unsigned long flags;
+ int i;
+
+ spin_lock_irqsave(host->hba->host->host_lock, flags);
+
+ seq_printf(file, "enabled: %d\n", host->pm_qos.is_enabled);
+ for (i = 0; i < host->pm_qos.num_groups && host->pm_qos.groups; i++)
+ seq_printf(file,
+ "CPU Group #%d(mask=0x%lx): active_reqs=%d, state=%d, latency=%d\n",
+ i, host->pm_qos.groups[i].mask.bits[0],
+ host->pm_qos.groups[i].active_reqs,
+ host->pm_qos.groups[i].state,
+ host->pm_qos.groups[i].latency_us);
+
+ spin_unlock_irqrestore(host->hba->host->host_lock, flags);
+
+ return 0;
+}
+
+static int ufs_qcom_dbg_pm_qos_open(struct inode *inode,
+ struct file *file)
+{
+ return single_open(file, ufs_qcom_dbg_pm_qos_show, inode->i_private);
+}
+
+static const struct file_operations ufs_qcom_dbg_pm_qos_desc = {
+ .open = ufs_qcom_dbg_pm_qos_open,
+ .read = seq_read,
+};
+
+void ufs_qcom_dbg_add_debugfs(struct ufs_hba *hba, struct dentry *root)
+{
+ struct ufs_qcom_host *host;
+
+ if (!hba || !hba->priv) {
+ pr_err("%s: NULL host, exiting\n", __func__);
+ return;
+ }
+
+ host = hba->priv;
+ host->debugfs_files.debugfs_root = debugfs_create_dir("qcom", root);
+ if (IS_ERR(host->debugfs_files.debugfs_root))
+ /* Don't complain -- debugfs just isn't enabled */
+ goto err_no_root;
+ if (!host->debugfs_files.debugfs_root) {
+ /*
+ * Complain -- debugfs is enabled, but it failed to
+ * create the directory
+ */
+ dev_err(host->hba->dev,
+ "%s: NULL debugfs root directory, exiting", __func__);
+ goto err_no_root;
+ }
+
+ host->debugfs_files.dbg_print_en =
+ debugfs_create_file("dbg_print_en", S_IRUSR | S_IWUSR,
+ host->debugfs_files.debugfs_root, host,
+ &ufs_qcom_dbg_print_en_ops);
+ if (!host->debugfs_files.dbg_print_en) {
+ dev_err(host->hba->dev,
+ "%s: failed to create dbg_print_en debugfs entry\n",
+ __func__);
+ goto err;
+ }
+
+ host->debugfs_files.testbus = debugfs_create_dir("testbus",
+ host->debugfs_files.debugfs_root);
+ if (!host->debugfs_files.testbus) {
+ dev_err(host->hba->dev,
+ "%s: failed create testbus directory\n",
+ __func__);
+ goto err;
+ }
+
+ host->debugfs_files.testbus_en =
+ debugfs_create_file("enable", S_IRUSR | S_IWUSR,
+ host->debugfs_files.testbus, host,
+ &ufs_qcom_dbg_testbus_en_ops);
+ if (!host->debugfs_files.testbus_en) {
+ dev_err(host->hba->dev,
+ "%s: failed create testbus_en debugfs entry\n",
+ __func__);
+ goto err;
+ }
+
+ host->debugfs_files.testbus_cfg =
+ debugfs_create_file("configuration", S_IRUSR | S_IWUSR,
+ host->debugfs_files.testbus, host,
+ &ufs_qcom_dbg_testbus_cfg_desc);
+ if (!host->debugfs_files.testbus_cfg) {
+ dev_err(host->hba->dev,
+ "%s: failed create testbus_cfg debugfs entry\n",
+ __func__);
+ goto err;
+ }
+
+ host->debugfs_files.testbus_bus =
+ debugfs_create_file("TEST_BUS", S_IRUSR,
+ host->debugfs_files.testbus, host,
+ &ufs_qcom_dbg_testbus_bus_ops);
+ if (!host->debugfs_files.testbus_bus) {
+ dev_err(host->hba->dev,
+ "%s: failed create testbus_bus debugfs entry\n",
+ __func__);
+ goto err;
+ }
+
+ host->debugfs_files.dbg_regs =
+ debugfs_create_file("debug-regs", S_IRUSR,
+ host->debugfs_files.debugfs_root, host,
+ &ufs_qcom_dbg_dbg_regs_desc);
+ if (!host->debugfs_files.dbg_regs) {
+ dev_err(host->hba->dev,
+ "%s: failed create dbg_regs debugfs entry\n",
+ __func__);
+ goto err;
+ }
+
+ host->debugfs_files.pm_qos =
+ debugfs_create_file("pm_qos", S_IRUSR,
+ host->debugfs_files.debugfs_root, host,
+ &ufs_qcom_dbg_pm_qos_desc);
+ if (!host->debugfs_files.dbg_regs) {
+ dev_err(host->hba->dev,
+ "%s: failed create dbg_regs debugfs entry\n",
+ __func__);
+ goto err;
+ }
+
+ return;
+
+err:
+ ufs_qcom_dbg_remove_debugfs(host);
+err_no_root:
+ dev_err(host->hba->dev, "%s: failed to initialize debugfs\n", __func__);
+}
+
+static void ufs_qcom_dbg_remove_debugfs(struct ufs_qcom_host *host)
+{
+ debugfs_remove_recursive(host->debugfs_files.debugfs_root);
+ host->debugfs_files.debugfs_root = NULL;
+}
diff --git a/drivers/scsi/ufs/ufs-qcom-debugfs.h b/drivers/scsi/ufs/ufs-qcom-debugfs.h
new file mode 100644
index 000000000000..b693bfa84a71
--- /dev/null
+++ b/drivers/scsi/ufs/ufs-qcom-debugfs.h
@@ -0,0 +1,24 @@
+/*
+ * Copyright (c) 2015, Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#ifndef QCOM_DEBUGFS_H_
+#define QCOM_DEBUGFS_H_
+
+#include "ufshcd.h"
+
+#ifdef CONFIG_DEBUG_FS
+void ufs_qcom_dbg_add_debugfs(struct ufs_hba *hba, struct dentry *root);
+#endif
+
+#endif /* End of Header */
diff --git a/drivers/scsi/ufs/ufs-qcom-ice.c b/drivers/scsi/ufs/ufs-qcom-ice.c
new file mode 100644
index 000000000000..d288e83ec9d7
--- /dev/null
+++ b/drivers/scsi/ufs/ufs-qcom-ice.c
@@ -0,0 +1,725 @@
+/*
+ * Copyright (c) 2014-2017, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/io.h>
+#include <linux/of.h>
+#include <linux/blkdev.h>
+#include <linux/spinlock.h>
+#include <crypto/ice.h>
+
+#include "ufs-qcom-ice.h"
+#include "ufs-qcom-debugfs.h"
+#include "ufshcd.h"
+
+#define UFS_QCOM_CRYPTO_LABEL "ufs-qcom-crypto"
+/* Timeout waiting for ICE initialization, that requires TZ access */
+#define UFS_QCOM_ICE_COMPLETION_TIMEOUT_MS 500
+
+#define UFS_QCOM_ICE_DEFAULT_DBG_PRINT_EN 0
+
+static struct workqueue_struct *ice_workqueue;
+
+static void ufs_qcom_ice_dump_regs(struct ufs_qcom_host *qcom_host, int offset,
+ int len, char *prefix)
+{
+ print_hex_dump(KERN_ERR, prefix,
+ len > 4 ? DUMP_PREFIX_OFFSET : DUMP_PREFIX_NONE,
+ 16, 4, qcom_host->hba->mmio_base + offset, len * 4,
+ false);
+}
+
+void ufs_qcom_ice_print_regs(struct ufs_qcom_host *qcom_host)
+{
+ int i;
+
+ if (!(qcom_host->dbg_print_en & UFS_QCOM_DBG_PRINT_ICE_REGS_EN))
+ return;
+
+ ufs_qcom_ice_dump_regs(qcom_host, REG_UFS_QCOM_ICE_CFG, 1,
+ "REG_UFS_QCOM_ICE_CFG ");
+ for (i = 0; i < NUM_QCOM_ICE_CTRL_INFO_n_REGS; i++) {
+ pr_err("REG_UFS_QCOM_ICE_CTRL_INFO_1_%d = 0x%08X\n", i,
+ ufshcd_readl(qcom_host->hba,
+ (REG_UFS_QCOM_ICE_CTRL_INFO_1_n + 8 * i)));
+
+ pr_err("REG_UFS_QCOM_ICE_CTRL_INFO_2_%d = 0x%08X\n", i,
+ ufshcd_readl(qcom_host->hba,
+ (REG_UFS_QCOM_ICE_CTRL_INFO_2_n + 8 * i)));
+ }
+
+ if (qcom_host->ice.pdev && qcom_host->ice.vops &&
+ qcom_host->ice.vops->debug)
+ qcom_host->ice.vops->debug(qcom_host->ice.pdev);
+}
+
+static void ufs_qcom_ice_error_cb(void *host_ctrl, u32 error)
+{
+ struct ufs_qcom_host *qcom_host = (struct ufs_qcom_host *)host_ctrl;
+
+ dev_err(qcom_host->hba->dev, "%s: Error in ice operation 0x%x",
+ __func__, error);
+
+ if (qcom_host->ice.state == UFS_QCOM_ICE_STATE_ACTIVE)
+ qcom_host->ice.state = UFS_QCOM_ICE_STATE_DISABLED;
+}
+
+static struct platform_device *ufs_qcom_ice_get_pdevice(struct device *ufs_dev)
+{
+ struct device_node *node;
+ struct platform_device *ice_pdev = NULL;
+
+ node = of_parse_phandle(ufs_dev->of_node, UFS_QCOM_CRYPTO_LABEL, 0);
+
+ if (!node) {
+ dev_err(ufs_dev, "%s: ufs-qcom-crypto property not specified\n",
+ __func__);
+ goto out;
+ }
+
+ ice_pdev = qcom_ice_get_pdevice(node);
+out:
+ return ice_pdev;
+}
+
+static
+struct qcom_ice_variant_ops *ufs_qcom_ice_get_vops(struct device *ufs_dev)
+{
+ struct qcom_ice_variant_ops *ice_vops = NULL;
+ struct device_node *node;
+
+ node = of_parse_phandle(ufs_dev->of_node, UFS_QCOM_CRYPTO_LABEL, 0);
+
+ if (!node) {
+ dev_err(ufs_dev, "%s: ufs-qcom-crypto property not specified\n",
+ __func__);
+ goto out;
+ }
+
+ ice_vops = qcom_ice_get_variant_ops(node);
+
+ if (!ice_vops)
+ dev_err(ufs_dev, "%s: invalid ice_vops\n", __func__);
+
+ of_node_put(node);
+out:
+ return ice_vops;
+}
+
+/**
+ * ufs_qcom_ice_get_dev() - sets pointers to ICE data structs in UFS QCom host
+ * @qcom_host: Pointer to a UFS QCom internal host structure.
+ *
+ * Sets ICE platform device pointer and ICE vops structure
+ * corresponding to the current UFS device.
+ *
+ * Return: -EINVAL in-case of invalid input parameters:
+ * qcom_host, qcom_host->hba or qcom_host->hba->dev
+ * -ENODEV in-case ICE device is not required
+ * -EPROBE_DEFER in-case ICE is required and hasn't been probed yet
+ * 0 otherwise
+ */
+int ufs_qcom_ice_get_dev(struct ufs_qcom_host *qcom_host)
+{
+ struct device *ufs_dev;
+ int err = 0;
+
+ if (!qcom_host || !qcom_host->hba || !qcom_host->hba->dev) {
+ pr_err("%s: invalid qcom_host %p or qcom_host->hba or qcom_host->hba->dev\n",
+ __func__, qcom_host);
+ err = -EINVAL;
+ goto out;
+ }
+
+ ufs_dev = qcom_host->hba->dev;
+
+ qcom_host->ice.vops = ufs_qcom_ice_get_vops(ufs_dev);
+ qcom_host->ice.pdev = ufs_qcom_ice_get_pdevice(ufs_dev);
+
+ if (qcom_host->ice.pdev == ERR_PTR(-EPROBE_DEFER)) {
+ dev_err(ufs_dev, "%s: ICE device not probed yet\n",
+ __func__);
+ qcom_host->ice.pdev = NULL;
+ qcom_host->ice.vops = NULL;
+ err = -EPROBE_DEFER;
+ goto out;
+ }
+
+ if (!qcom_host->ice.pdev || !qcom_host->ice.vops) {
+ dev_err(ufs_dev, "%s: invalid platform device %p or vops %p\n",
+ __func__, qcom_host->ice.pdev, qcom_host->ice.vops);
+ qcom_host->ice.pdev = NULL;
+ qcom_host->ice.vops = NULL;
+ err = -ENODEV;
+ goto out;
+ }
+
+ qcom_host->ice.state = UFS_QCOM_ICE_STATE_DISABLED;
+
+out:
+ return err;
+}
+
+static void ufs_qcom_ice_cfg_work(struct work_struct *work)
+{
+ unsigned long flags;
+ struct ufs_qcom_host *qcom_host =
+ container_of(work, struct ufs_qcom_host, ice_cfg_work);
+
+ if (!qcom_host->ice.vops->config_start)
+ return;
+
+ spin_lock_irqsave(&qcom_host->ice_work_lock, flags);
+ if (!qcom_host->req_pending) {
+ qcom_host->work_pending = false;
+ spin_unlock_irqrestore(&qcom_host->ice_work_lock, flags);
+ return;
+ }
+ spin_unlock_irqrestore(&qcom_host->ice_work_lock, flags);
+
+ /*
+ * config_start is called again as previous attempt returned -EAGAIN,
+ * this call shall now take care of the necessary key setup.
+ */
+ qcom_host->ice.vops->config_start(qcom_host->ice.pdev,
+ qcom_host->req_pending, NULL, false);
+
+ spin_lock_irqsave(&qcom_host->ice_work_lock, flags);
+ qcom_host->req_pending = NULL;
+ qcom_host->work_pending = false;
+ spin_unlock_irqrestore(&qcom_host->ice_work_lock, flags);
+
+}
+
+/**
+ * ufs_qcom_ice_init() - initializes the ICE-UFS interface and ICE device
+ * @qcom_host: Pointer to a UFS QCom internal host structure.
+ * qcom_host, qcom_host->hba and qcom_host->hba->dev should all
+ * be valid pointers.
+ *
+ * Return: -EINVAL in-case of an error
+ * 0 otherwise
+ */
+int ufs_qcom_ice_init(struct ufs_qcom_host *qcom_host)
+{
+ struct device *ufs_dev = qcom_host->hba->dev;
+ int err;
+
+ err = qcom_host->ice.vops->init(qcom_host->ice.pdev,
+ qcom_host,
+ ufs_qcom_ice_error_cb);
+ if (err) {
+ dev_err(ufs_dev, "%s: ice init failed. err = %d\n",
+ __func__, err);
+ goto out;
+ } else {
+ qcom_host->ice.state = UFS_QCOM_ICE_STATE_ACTIVE;
+ }
+
+ qcom_host->dbg_print_en |= UFS_QCOM_ICE_DEFAULT_DBG_PRINT_EN;
+ ice_workqueue = alloc_workqueue("ice-set-key",
+ WQ_MEM_RECLAIM | WQ_HIGHPRI, 0);
+ if (!ice_workqueue) {
+ dev_err(ufs_dev, "%s: workqueue allocation failed.\n",
+ __func__);
+ goto out;
+ }
+ INIT_WORK(&qcom_host->ice_cfg_work, ufs_qcom_ice_cfg_work);
+
+out:
+ return err;
+}
+
+static inline bool ufs_qcom_is_data_cmd(char cmd_op, bool is_write)
+{
+ if (is_write) {
+ if (cmd_op == WRITE_6 || cmd_op == WRITE_10 ||
+ cmd_op == WRITE_16)
+ return true;
+ } else {
+ if (cmd_op == READ_6 || cmd_op == READ_10 ||
+ cmd_op == READ_16)
+ return true;
+ }
+
+ return false;
+}
+
+int ufs_qcom_ice_req_setup(struct ufs_qcom_host *qcom_host,
+ struct scsi_cmnd *cmd, u8 *cc_index, bool *enable)
+{
+ struct ice_data_setting ice_set;
+ char cmd_op = cmd->cmnd[0];
+ int err;
+ unsigned long flags;
+
+ if (!qcom_host->ice.pdev || !qcom_host->ice.vops) {
+ dev_dbg(qcom_host->hba->dev, "%s: ice device is not enabled\n",
+ __func__);
+ return 0;
+ }
+
+ if (qcom_host->ice.vops->config_start) {
+ memset(&ice_set, 0, sizeof(ice_set));
+
+ spin_lock_irqsave(
+ &qcom_host->ice_work_lock, flags);
+
+ err = qcom_host->ice.vops->config_start(qcom_host->ice.pdev,
+ cmd->request, &ice_set, true);
+ if (err) {
+ /*
+ * config_start() returns -EAGAIN when a key slot is
+ * available but still not configured. As configuration
+ * requires a non-atomic context, this means we should
+ * call the function again from the worker thread to do
+ * the configuration. For this request the error will
+ * propagate so it will be re-queued.
+ */
+ if (err == -EAGAIN) {
+ dev_dbg(qcom_host->hba->dev,
+ "%s: scheduling task for ice setup\n",
+ __func__);
+
+ if (!qcom_host->work_pending) {
+ qcom_host->req_pending = cmd->request;
+
+ if (!queue_work(ice_workqueue,
+ &qcom_host->ice_cfg_work)) {
+ qcom_host->req_pending = NULL;
+
+ spin_unlock_irqrestore(
+ &qcom_host->ice_work_lock,
+ flags);
+
+ return err;
+ }
+ qcom_host->work_pending = true;
+ }
+
+ } else {
+ if (err != -EBUSY)
+ dev_err(qcom_host->hba->dev,
+ "%s: error in ice_vops->config %d\n",
+ __func__, err);
+ }
+
+ spin_unlock_irqrestore(&qcom_host->ice_work_lock,
+ flags);
+
+ return err;
+ }
+
+ spin_unlock_irqrestore(&qcom_host->ice_work_lock, flags);
+
+ if (ufs_qcom_is_data_cmd(cmd_op, true))
+ *enable = !ice_set.encr_bypass;
+ else if (ufs_qcom_is_data_cmd(cmd_op, false))
+ *enable = !ice_set.decr_bypass;
+
+ if (ice_set.crypto_data.key_index >= 0)
+ *cc_index = (u8)ice_set.crypto_data.key_index;
+ }
+ return 0;
+}
+
+/**
+ * ufs_qcom_ice_cfg_start() - starts configuring UFS's ICE registers
+ * for an ICE transaction
+ * @qcom_host: Pointer to a UFS QCom internal host structure.
+ * qcom_host, qcom_host->hba and qcom_host->hba->dev should all
+ * be valid pointers.
+ * @cmd: Pointer to a valid scsi command. cmd->request should also be
+ * a valid pointer.
+ *
+ * Return: -EINVAL in-case of an error
+ * 0 otherwise
+ */
+int ufs_qcom_ice_cfg_start(struct ufs_qcom_host *qcom_host,
+ struct scsi_cmnd *cmd)
+{
+ struct device *dev = qcom_host->hba->dev;
+ int err = 0;
+ struct ice_data_setting ice_set;
+ unsigned int slot = 0;
+ sector_t lba = 0;
+ unsigned int ctrl_info_val = 0;
+ unsigned int bypass = 0;
+ struct request *req;
+ char cmd_op;
+ unsigned long flags;
+
+ if (!qcom_host->ice.pdev || !qcom_host->ice.vops) {
+ dev_dbg(dev, "%s: ice device is not enabled\n", __func__);
+ goto out;
+ }
+
+ if (qcom_host->ice.state != UFS_QCOM_ICE_STATE_ACTIVE) {
+ dev_err(dev, "%s: ice state (%d) is not active\n",
+ __func__, qcom_host->ice.state);
+ return -EINVAL;
+ }
+
+ if (qcom_host->hw_ver.major == 0x3) {
+ /* nothing to do here for version 0x3, exit silently */
+ return 0;
+ }
+
+ req = cmd->request;
+ if (req->bio)
+ lba = (req->bio->bi_iter.bi_sector) >>
+ UFS_QCOM_ICE_TR_DATA_UNIT_4_KB;
+
+ slot = req->tag;
+ if (slot < 0 || slot > qcom_host->hba->nutrs) {
+ dev_err(dev, "%s: slot (%d) is out of boundaries (0...%d)\n",
+ __func__, slot, qcom_host->hba->nutrs);
+ return -EINVAL;
+ }
+
+
+ memset(&ice_set, 0, sizeof(ice_set));
+ if (qcom_host->ice.vops->config_start) {
+
+ spin_lock_irqsave(
+ &qcom_host->ice_work_lock, flags);
+
+ err = qcom_host->ice.vops->config_start(qcom_host->ice.pdev,
+ req, &ice_set, true);
+ if (err) {
+ /*
+ * config_start() returns -EAGAIN when a key slot is
+ * available but still not configured. As configuration
+ * requires a non-atomic context, this means we should
+ * call the function again from the worker thread to do
+ * the configuration. For this request the error will
+ * propagate so it will be re-queued.
+ */
+ if (err == -EAGAIN) {
+
+ dev_dbg(qcom_host->hba->dev,
+ "%s: scheduling task for ice setup\n",
+ __func__);
+
+ if (!qcom_host->work_pending) {
+
+ qcom_host->req_pending = cmd->request;
+ if (!queue_work(ice_workqueue,
+ &qcom_host->ice_cfg_work)) {
+ qcom_host->req_pending = NULL;
+
+ spin_unlock_irqrestore(
+ &qcom_host->ice_work_lock,
+ flags);
+
+ return err;
+ }
+ qcom_host->work_pending = true;
+ }
+
+ } else {
+ if (err != -EBUSY)
+ dev_err(qcom_host->hba->dev,
+ "%s: error in ice_vops->config %d\n",
+ __func__, err);
+ }
+
+ spin_unlock_irqrestore(
+ &qcom_host->ice_work_lock, flags);
+
+ return err;
+ }
+
+ spin_unlock_irqrestore(
+ &qcom_host->ice_work_lock, flags);
+ }
+
+ cmd_op = cmd->cmnd[0];
+
+#define UFS_QCOM_DIR_WRITE true
+#define UFS_QCOM_DIR_READ false
+ /* if non data command, bypass shall be enabled */
+ if (!ufs_qcom_is_data_cmd(cmd_op, UFS_QCOM_DIR_WRITE) &&
+ !ufs_qcom_is_data_cmd(cmd_op, UFS_QCOM_DIR_READ))
+ bypass = UFS_QCOM_ICE_ENABLE_BYPASS;
+ /* if writing data command */
+ else if (ufs_qcom_is_data_cmd(cmd_op, UFS_QCOM_DIR_WRITE))
+ bypass = ice_set.encr_bypass ? UFS_QCOM_ICE_ENABLE_BYPASS :
+ UFS_QCOM_ICE_DISABLE_BYPASS;
+ /* if reading data command */
+ else if (ufs_qcom_is_data_cmd(cmd_op, UFS_QCOM_DIR_READ))
+ bypass = ice_set.decr_bypass ? UFS_QCOM_ICE_ENABLE_BYPASS :
+ UFS_QCOM_ICE_DISABLE_BYPASS;
+
+
+ /* Configure ICE index */
+ ctrl_info_val =
+ (ice_set.crypto_data.key_index &
+ MASK_UFS_QCOM_ICE_CTRL_INFO_KEY_INDEX)
+ << OFFSET_UFS_QCOM_ICE_CTRL_INFO_KEY_INDEX;
+
+ /* Configure data unit size of transfer request */
+ ctrl_info_val |=
+ UFS_QCOM_ICE_TR_DATA_UNIT_4_KB
+ << OFFSET_UFS_QCOM_ICE_CTRL_INFO_CDU;
+
+ /* Configure ICE bypass mode */
+ ctrl_info_val |=
+ (bypass & MASK_UFS_QCOM_ICE_CTRL_INFO_BYPASS)
+ << OFFSET_UFS_QCOM_ICE_CTRL_INFO_BYPASS;
+
+ if (qcom_host->hw_ver.major == 0x1) {
+ ufshcd_writel(qcom_host->hba, lba,
+ (REG_UFS_QCOM_ICE_CTRL_INFO_1_n + 8 * slot));
+
+ ufshcd_writel(qcom_host->hba, ctrl_info_val,
+ (REG_UFS_QCOM_ICE_CTRL_INFO_2_n + 8 * slot));
+ }
+ if (qcom_host->hw_ver.major == 0x2) {
+ ufshcd_writel(qcom_host->hba, (lba & 0xFFFFFFFF),
+ (REG_UFS_QCOM_ICE_CTRL_INFO_1_n + 16 * slot));
+
+ ufshcd_writel(qcom_host->hba, ((lba >> 32) & 0xFFFFFFFF),
+ (REG_UFS_QCOM_ICE_CTRL_INFO_2_n + 16 * slot));
+
+ ufshcd_writel(qcom_host->hba, ctrl_info_val,
+ (REG_UFS_QCOM_ICE_CTRL_INFO_3_n + 16 * slot));
+ }
+
+ /*
+ * Ensure UFS-ICE registers are being configured
+ * before next operation, otherwise UFS Host Controller might
+ * set get errors
+ */
+ mb();
+out:
+ return err;
+}
+
+/**
+ * ufs_qcom_ice_cfg_end() - finishes configuring UFS's ICE registers
+ * for an ICE transaction
+ * @qcom_host: Pointer to a UFS QCom internal host structure.
+ * qcom_host, qcom_host->hba and
+ * qcom_host->hba->dev should all
+ * be valid pointers.
+ * @cmd: Pointer to a valid scsi command. cmd->request should also be
+ * a valid pointer.
+ *
+ * Return: -EINVAL in-case of an error
+ * 0 otherwise
+ */
+int ufs_qcom_ice_cfg_end(struct ufs_qcom_host *qcom_host, struct request *req)
+{
+ int err = 0;
+ struct device *dev = qcom_host->hba->dev;
+
+ if (qcom_host->ice.vops->config_end) {
+ err = qcom_host->ice.vops->config_end(req);
+ if (err) {
+ dev_err(dev, "%s: error in ice_vops->config_end %d\n",
+ __func__, err);
+ return err;
+ }
+ }
+
+ return 0;
+}
+
+/**
+ * ufs_qcom_ice_reset() - resets UFS-ICE interface and ICE device
+ * @qcom_host: Pointer to a UFS QCom internal host structure.
+ * qcom_host, qcom_host->hba and qcom_host->hba->dev should all
+ * be valid pointers.
+ *
+ * Return: -EINVAL in-case of an error
+ * 0 otherwise
+ */
+int ufs_qcom_ice_reset(struct ufs_qcom_host *qcom_host)
+{
+ struct device *dev = qcom_host->hba->dev;
+ int err = 0;
+
+ if (!qcom_host->ice.pdev) {
+ dev_dbg(dev, "%s: ice device is not enabled\n", __func__);
+ goto out;
+ }
+
+ if (!qcom_host->ice.vops) {
+ dev_err(dev, "%s: invalid ice_vops\n", __func__);
+ return -EINVAL;
+ }
+
+ if (qcom_host->ice.state != UFS_QCOM_ICE_STATE_ACTIVE)
+ goto out;
+
+ if (qcom_host->ice.vops->reset) {
+ err = qcom_host->ice.vops->reset(qcom_host->ice.pdev);
+ if (err) {
+ dev_err(dev, "%s: ice_vops->reset failed. err %d\n",
+ __func__, err);
+ goto out;
+ }
+ }
+
+ if (qcom_host->ice.state != UFS_QCOM_ICE_STATE_ACTIVE) {
+ dev_err(qcom_host->hba->dev,
+ "%s: error. ice.state (%d) is not in active state\n",
+ __func__, qcom_host->ice.state);
+ err = -EINVAL;
+ }
+
+out:
+ return err;
+}
+
+/**
+ * ufs_qcom_ice_resume() - resumes UFS-ICE interface and ICE device from power
+ * collapse
+ * @qcom_host: Pointer to a UFS QCom internal host structure.
+ * qcom_host, qcom_host->hba and qcom_host->hba->dev should all
+ * be valid pointers.
+ *
+ * Return: -EINVAL in-case of an error
+ * 0 otherwise
+ */
+int ufs_qcom_ice_resume(struct ufs_qcom_host *qcom_host)
+{
+ struct device *dev = qcom_host->hba->dev;
+ int err = 0;
+
+ if (!qcom_host->ice.pdev) {
+ dev_dbg(dev, "%s: ice device is not enabled\n", __func__);
+ goto out;
+ }
+
+ if (qcom_host->ice.state !=
+ UFS_QCOM_ICE_STATE_SUSPENDED) {
+ goto out;
+ }
+
+ if (!qcom_host->ice.vops) {
+ dev_err(dev, "%s: invalid ice_vops\n", __func__);
+ return -EINVAL;
+ }
+
+ if (qcom_host->ice.vops->resume) {
+ err = qcom_host->ice.vops->resume(qcom_host->ice.pdev);
+ if (err) {
+ dev_err(dev, "%s: ice_vops->resume failed. err %d\n",
+ __func__, err);
+ return err;
+ }
+ }
+ qcom_host->ice.state = UFS_QCOM_ICE_STATE_ACTIVE;
+out:
+ return err;
+}
+
+/**
+ * ufs_qcom_ice_suspend() - suspends UFS-ICE interface and ICE device
+ * @qcom_host: Pointer to a UFS QCom internal host structure.
+ * qcom_host, qcom_host->hba and qcom_host->hba->dev should all
+ * be valid pointers.
+ *
+ * Return: -EINVAL in-case of an error
+ * 0 otherwise
+ */
+int ufs_qcom_ice_suspend(struct ufs_qcom_host *qcom_host)
+{
+ struct device *dev = qcom_host->hba->dev;
+ int err = 0;
+
+ if (!qcom_host->ice.pdev) {
+ dev_dbg(dev, "%s: ice device is not enabled\n", __func__);
+ goto out;
+ }
+
+ if (qcom_host->ice.vops->suspend) {
+ err = qcom_host->ice.vops->suspend(qcom_host->ice.pdev);
+ if (err) {
+ dev_err(qcom_host->hba->dev,
+ "%s: ice_vops->suspend failed. err %d\n",
+ __func__, err);
+ return -EINVAL;
+ }
+ }
+
+ if (qcom_host->ice.state == UFS_QCOM_ICE_STATE_ACTIVE) {
+ qcom_host->ice.state = UFS_QCOM_ICE_STATE_SUSPENDED;
+ } else if (qcom_host->ice.state == UFS_QCOM_ICE_STATE_DISABLED) {
+ dev_err(qcom_host->hba->dev,
+ "%s: ice state is invalid: disabled\n",
+ __func__);
+ err = -EINVAL;
+ }
+
+out:
+ return err;
+}
+
+/**
+ * ufs_qcom_ice_get_status() - returns the status of an ICE transaction
+ * @qcom_host: Pointer to a UFS QCom internal host structure.
+ * qcom_host, qcom_host->hba and qcom_host->hba->dev should all
+ * be valid pointers.
+ * @ice_status: Pointer to a valid output parameter.
+ * < 0 in case of ICE transaction failure.
+ * 0 otherwise.
+ *
+ * Return: -EINVAL in-case of an error
+ * 0 otherwise
+ */
+int ufs_qcom_ice_get_status(struct ufs_qcom_host *qcom_host, int *ice_status)
+{
+ struct device *dev = NULL;
+ int err = 0;
+ int stat = -EINVAL;
+
+ *ice_status = 0;
+
+ dev = qcom_host->hba->dev;
+ if (!dev) {
+ err = -EINVAL;
+ goto out;
+ }
+
+ if (!qcom_host->ice.pdev) {
+ dev_dbg(dev, "%s: ice device is not enabled\n", __func__);
+ goto out;
+ }
+
+ if (qcom_host->ice.state != UFS_QCOM_ICE_STATE_ACTIVE) {
+ err = -EINVAL;
+ goto out;
+ }
+
+ if (!qcom_host->ice.vops) {
+ dev_err(dev, "%s: invalid ice_vops\n", __func__);
+ return -EINVAL;
+ }
+
+ if (qcom_host->ice.vops->status) {
+ stat = qcom_host->ice.vops->status(qcom_host->ice.pdev);
+ if (stat < 0) {
+ dev_err(dev, "%s: ice_vops->status failed. stat %d\n",
+ __func__, stat);
+ err = -EINVAL;
+ goto out;
+ }
+
+ *ice_status = stat;
+ }
+
+out:
+ return err;
+}
diff --git a/drivers/scsi/ufs/ufs-qcom-ice.h b/drivers/scsi/ufs/ufs-qcom-ice.h
new file mode 100644
index 000000000000..eb0291612049
--- /dev/null
+++ b/drivers/scsi/ufs/ufs-qcom-ice.h
@@ -0,0 +1,132 @@
+/*
+ * Copyright (c) 2014-2016, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#ifndef _UFS_QCOM_ICE_H_
+#define _UFS_QCOM_ICE_H_
+
+#include <scsi/scsi_cmnd.h>
+
+#include "ufs-qcom.h"
+
+/*
+ * UFS host controller ICE registers. There are n [0..31]
+ * of each of these registers
+ */
+enum {
+ REG_UFS_QCOM_ICE_CFG = 0x2200,
+ REG_UFS_QCOM_ICE_CTRL_INFO_1_n = 0x2204,
+ REG_UFS_QCOM_ICE_CTRL_INFO_2_n = 0x2208,
+ REG_UFS_QCOM_ICE_CTRL_INFO_3_n = 0x220C,
+};
+#define NUM_QCOM_ICE_CTRL_INFO_n_REGS 32
+
+/* UFS QCOM ICE CTRL Info register offset */
+enum {
+ OFFSET_UFS_QCOM_ICE_CTRL_INFO_BYPASS = 0,
+ OFFSET_UFS_QCOM_ICE_CTRL_INFO_KEY_INDEX = 0x1,
+ OFFSET_UFS_QCOM_ICE_CTRL_INFO_CDU = 0x6,
+};
+
+/* UFS QCOM ICE CTRL Info register masks */
+enum {
+ MASK_UFS_QCOM_ICE_CTRL_INFO_BYPASS = 0x1,
+ MASK_UFS_QCOM_ICE_CTRL_INFO_KEY_INDEX = 0x1F,
+ MASK_UFS_QCOM_ICE_CTRL_INFO_CDU = 0x8,
+};
+
+/* UFS QCOM ICE encryption/decryption bypass state */
+enum {
+ UFS_QCOM_ICE_DISABLE_BYPASS = 0,
+ UFS_QCOM_ICE_ENABLE_BYPASS = 1,
+};
+
+/* UFS QCOM ICE Crypto Data Unit of target DUN of Transfer Request */
+enum {
+ UFS_QCOM_ICE_TR_DATA_UNIT_512_B = 0,
+ UFS_QCOM_ICE_TR_DATA_UNIT_1_KB = 1,
+ UFS_QCOM_ICE_TR_DATA_UNIT_2_KB = 2,
+ UFS_QCOM_ICE_TR_DATA_UNIT_4_KB = 3,
+ UFS_QCOM_ICE_TR_DATA_UNIT_8_KB = 4,
+ UFS_QCOM_ICE_TR_DATA_UNIT_16_KB = 5,
+ UFS_QCOM_ICE_TR_DATA_UNIT_32_KB = 6,
+};
+
+/* UFS QCOM ICE internal state */
+enum {
+ UFS_QCOM_ICE_STATE_DISABLED = 0,
+ UFS_QCOM_ICE_STATE_ACTIVE = 1,
+ UFS_QCOM_ICE_STATE_SUSPENDED = 2,
+};
+
+#ifdef CONFIG_SCSI_UFS_QCOM_ICE
+int ufs_qcom_ice_get_dev(struct ufs_qcom_host *qcom_host);
+int ufs_qcom_ice_init(struct ufs_qcom_host *qcom_host);
+int ufs_qcom_ice_req_setup(struct ufs_qcom_host *qcom_host,
+ struct scsi_cmnd *cmd, u8 *cc_index, bool *enable);
+int ufs_qcom_ice_cfg_start(struct ufs_qcom_host *qcom_host,
+ struct scsi_cmnd *cmd);
+int ufs_qcom_ice_cfg_end(struct ufs_qcom_host *qcom_host,
+ struct request *req);
+int ufs_qcom_ice_reset(struct ufs_qcom_host *qcom_host);
+int ufs_qcom_ice_resume(struct ufs_qcom_host *qcom_host);
+int ufs_qcom_ice_suspend(struct ufs_qcom_host *qcom_host);
+int ufs_qcom_ice_get_status(struct ufs_qcom_host *qcom_host, int *ice_status);
+void ufs_qcom_ice_print_regs(struct ufs_qcom_host *qcom_host);
+#else
+inline int ufs_qcom_ice_get_dev(struct ufs_qcom_host *qcom_host)
+{
+ if (qcom_host) {
+ qcom_host->ice.pdev = NULL;
+ qcom_host->ice.vops = NULL;
+ }
+ return -ENODEV;
+}
+inline int ufs_qcom_ice_init(struct ufs_qcom_host *qcom_host)
+{
+ return 0;
+}
+inline int ufs_qcom_ice_cfg_start(struct ufs_qcom_host *qcom_host,
+ struct scsi_cmnd *cmd)
+{
+ return 0;
+}
+inline int ufs_qcom_ice_cfg_end(struct ufs_qcom_host *qcom_host,
+ struct request *req)
+{
+ return 0;
+}
+inline int ufs_qcom_ice_reset(struct ufs_qcom_host *qcom_host)
+{
+ return 0;
+}
+inline int ufs_qcom_ice_resume(struct ufs_qcom_host *qcom_host)
+{
+ return 0;
+}
+inline int ufs_qcom_ice_suspend(struct ufs_qcom_host *qcom_host)
+{
+ return 0;
+}
+inline int ufs_qcom_ice_get_status(struct ufs_qcom_host *qcom_host,
+ int *ice_status)
+{
+ return 0;
+}
+inline void ufs_qcom_ice_print_regs(struct ufs_qcom_host *qcom_host)
+{
+ return;
+}
+#endif /* CONFIG_SCSI_UFS_QCOM_ICE */
+
+#endif /* UFS_QCOM_ICE_H_ */
diff --git a/drivers/scsi/ufs/ufs-qcom.c b/drivers/scsi/ufs/ufs-qcom.c
index 2b779a55f699..e1509b2bad19 100644
--- a/drivers/scsi/ufs/ufs-qcom.c
+++ b/drivers/scsi/ufs/ufs-qcom.c
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2013-2015, Linux Foundation. All rights reserved.
+ * Copyright (c) 2013-2017, Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
@@ -14,15 +14,33 @@
#include <linux/time.h>
#include <linux/of.h>
+#include <linux/iopoll.h>
#include <linux/platform_device.h>
-#include <linux/phy/phy.h>
+#ifdef CONFIG_QCOM_BUS_SCALING
+#include <linux/msm-bus.h>
+#endif
+
+#include <soc/qcom/scm.h>
+#include <linux/phy/phy.h>
#include <linux/phy/phy-qcom-ufs.h>
+
#include "ufshcd.h"
#include "ufshcd-pltfrm.h"
#include "unipro.h"
#include "ufs-qcom.h"
#include "ufshci.h"
+#include "ufs_quirks.h"
+#include "ufs-qcom-ice.h"
+#include "ufs-qcom-debugfs.h"
+#include <linux/clk/msm-clk.h>
+
+#define MAX_PROP_SIZE 32
+#define VDDP_REF_CLK_MIN_UV 1200000
+#define VDDP_REF_CLK_MAX_UV 1200000
+/* TODO: further tuning for this parameter may be required */
+#define UFS_QCOM_PM_QOS_UNVOTE_TIMEOUT_US (10000) /* microseconds */
+
#define UFS_QCOM_DEFAULT_DBG_PRINT_EN \
(UFS_QCOM_DBG_PRINT_REGS_EN | UFS_QCOM_DBG_PRINT_TEST_BUS_EN)
@@ -44,18 +62,24 @@ enum {
static struct ufs_qcom_host *ufs_qcom_hosts[MAX_UFS_QCOM_HOSTS];
-static int ufs_qcom_set_bus_vote(struct ufs_qcom_host *host, int vote);
+static int ufs_qcom_update_sec_cfg(struct ufs_hba *hba, bool restore_sec_cfg);
static void ufs_qcom_get_default_testbus_cfg(struct ufs_qcom_host *host);
static int ufs_qcom_set_dme_vs_core_clk_ctrl_clear_div(struct ufs_hba *hba,
u32 clk_cycles);
+static void ufs_qcom_pm_qos_suspend(struct ufs_qcom_host *host);
static void ufs_qcom_dump_regs(struct ufs_hba *hba, int offset, int len,
char *prefix)
{
print_hex_dump(KERN_ERR, prefix,
len > 4 ? DUMP_PREFIX_OFFSET : DUMP_PREFIX_NONE,
- 16, 4, (void __force *)hba->mmio_base + offset,
- len * 4, false);
+ 16, 4, hba->mmio_base + offset, len * 4, false);
+}
+
+static void ufs_qcom_dump_regs_wrapper(struct ufs_hba *hba, int offset, int len,
+ char *prefix, void *priv)
+{
+ ufs_qcom_dump_regs(hba, offset, len, prefix);
}
static int ufs_qcom_get_connected_tx_lanes(struct ufs_hba *hba, u32 *tx_lanes)
@@ -78,13 +102,10 @@ static int ufs_qcom_host_clk_get(struct device *dev,
int err = 0;
clk = devm_clk_get(dev, name);
- if (IS_ERR(clk)) {
+ if (IS_ERR(clk))
err = PTR_ERR(clk);
- dev_err(dev, "%s: failed to get %s err %d",
- __func__, name, err);
- } else {
+ else
*clk_out = clk;
- }
return err;
}
@@ -106,9 +127,11 @@ static void ufs_qcom_disable_lane_clks(struct ufs_qcom_host *host)
if (!host->is_lane_clks_enabled)
return;
- clk_disable_unprepare(host->tx_l1_sync_clk);
+ if (host->tx_l1_sync_clk)
+ clk_disable_unprepare(host->tx_l1_sync_clk);
clk_disable_unprepare(host->tx_l0_sync_clk);
- clk_disable_unprepare(host->rx_l1_sync_clk);
+ if (host->rx_l1_sync_clk)
+ clk_disable_unprepare(host->rx_l1_sync_clk);
clk_disable_unprepare(host->rx_l0_sync_clk);
host->is_lane_clks_enabled = false;
@@ -132,21 +155,20 @@ static int ufs_qcom_enable_lane_clks(struct ufs_qcom_host *host)
if (err)
goto disable_rx_l0;
- err = ufs_qcom_host_clk_enable(dev, "rx_lane1_sync_clk",
- host->rx_l1_sync_clk);
- if (err)
- goto disable_tx_l0;
-
- err = ufs_qcom_host_clk_enable(dev, "tx_lane1_sync_clk",
- host->tx_l1_sync_clk);
- if (err)
- goto disable_rx_l1;
+ if (host->hba->lanes_per_direction > 1) {
+ err = ufs_qcom_host_clk_enable(dev, "rx_lane1_sync_clk",
+ host->rx_l1_sync_clk);
+ if (err)
+ goto disable_tx_l0;
+ /* The tx lane1 clk could be muxed, hence keep this optional */
+ if (host->tx_l1_sync_clk)
+ ufs_qcom_host_clk_enable(dev, "tx_lane1_sync_clk",
+ host->tx_l1_sync_clk);
+ }
host->is_lane_clks_enabled = true;
goto out;
-disable_rx_l1:
- clk_disable_unprepare(host->rx_l1_sync_clk);
disable_tx_l0:
clk_disable_unprepare(host->tx_l0_sync_clk);
disable_rx_l0:
@@ -162,42 +184,34 @@ static int ufs_qcom_init_lane_clks(struct ufs_qcom_host *host)
err = ufs_qcom_host_clk_get(dev,
"rx_lane0_sync_clk", &host->rx_l0_sync_clk);
- if (err)
+ if (err) {
+ dev_err(dev, "%s: failed to get rx_lane0_sync_clk, err %d",
+ __func__, err);
goto out;
+ }
err = ufs_qcom_host_clk_get(dev,
"tx_lane0_sync_clk", &host->tx_l0_sync_clk);
- if (err)
- goto out;
-
- err = ufs_qcom_host_clk_get(dev, "rx_lane1_sync_clk",
- &host->rx_l1_sync_clk);
- if (err)
- goto out;
-
- err = ufs_qcom_host_clk_get(dev, "tx_lane1_sync_clk",
- &host->tx_l1_sync_clk);
-
-out:
- return err;
-}
-
-static int ufs_qcom_link_startup_post_change(struct ufs_hba *hba)
-{
- struct ufs_qcom_host *host = ufshcd_get_variant(hba);
- struct phy *phy = host->generic_phy;
- u32 tx_lanes;
- int err = 0;
-
- err = ufs_qcom_get_connected_tx_lanes(hba, &tx_lanes);
- if (err)
+ if (err) {
+ dev_err(dev, "%s: failed to get tx_lane0_sync_clk, err %d",
+ __func__, err);
goto out;
+ }
- err = ufs_qcom_phy_set_tx_lane_enable(phy, tx_lanes);
- if (err)
- dev_err(hba->dev, "%s: ufs_qcom_phy_set_tx_lane_enable failed\n",
- __func__);
+ /* In case of single lane per direction, don't read lane1 clocks */
+ if (host->hba->lanes_per_direction > 1) {
+ err = ufs_qcom_host_clk_get(dev, "rx_lane1_sync_clk",
+ &host->rx_l1_sync_clk);
+ if (err) {
+ dev_err(dev, "%s: failed to get rx_lane1_sync_clk, err %d",
+ __func__, err);
+ goto out;
+ }
+ /* The tx lane1 clk could be muxed, hence keep this optional */
+ ufs_qcom_host_clk_get(dev, "tx_lane1_sync_clk",
+ &host->tx_l1_sync_clk);
+ }
out:
return err;
}
@@ -267,9 +281,8 @@ static int ufs_qcom_power_up_sequence(struct ufs_hba *hba)
ret = ufs_qcom_phy_calibrate_phy(phy, is_rate_B);
if (ret) {
- dev_err(hba->dev,
- "%s: ufs_qcom_phy_calibrate_phy()failed, ret = %d\n",
- __func__, ret);
+ dev_err(hba->dev, "%s: ufs_qcom_phy_calibrate_phy() failed, ret = %d\n",
+ __func__, ret);
goto out;
}
@@ -290,8 +303,7 @@ static int ufs_qcom_power_up_sequence(struct ufs_hba *hba)
ret = ufs_qcom_phy_is_pcs_ready(phy);
if (ret)
- dev_err(hba->dev,
- "%s: is_physical_coding_sublayer_ready() failed, ret = %d\n",
+ dev_err(hba->dev, "%s: is_physical_coding_sublayer_ready() failed, ret = %d\n",
__func__, ret);
ufs_qcom_select_unipro_mode(host);
@@ -307,15 +319,65 @@ out:
* in a specific operation, UTP controller CGCs are by default disabled and
* this function enables them (after every UFS link startup) to save some power
* leakage.
+ *
+ * UFS host controller v3.0.0 onwards has internal clock gating mechanism
+ * in Qunipro, enable them to save additional power.
*/
-static void ufs_qcom_enable_hw_clk_gating(struct ufs_hba *hba)
+static int ufs_qcom_enable_hw_clk_gating(struct ufs_hba *hba)
{
+ struct ufs_qcom_host *host = ufshcd_get_variant(hba);
+ int err = 0;
+
+ /* Enable UTP internal clock gating */
ufshcd_writel(hba,
ufshcd_readl(hba, REG_UFS_CFG2) | REG_UFS_CFG2_CGC_EN_ALL,
REG_UFS_CFG2);
/* Ensure that HW clock gating is enabled before next operations */
mb();
+
+ /* Enable Qunipro internal clock gating if supported */
+ if (!ufs_qcom_cap_qunipro_clk_gating(host))
+ goto out;
+
+ /* Enable all the mask bits */
+ err = ufshcd_dme_rmw(hba, DL_VS_CLK_CFG_MASK,
+ DL_VS_CLK_CFG_MASK, DL_VS_CLK_CFG);
+ if (err)
+ goto out;
+
+ err = ufshcd_dme_rmw(hba, PA_VS_CLK_CFG_REG_MASK,
+ PA_VS_CLK_CFG_REG_MASK, PA_VS_CLK_CFG_REG);
+ if (err)
+ goto out;
+
+ err = ufshcd_dme_rmw(hba, DME_VS_CORE_CLK_CTRL_DME_HW_CGC_EN,
+ DME_VS_CORE_CLK_CTRL_DME_HW_CGC_EN,
+ DME_VS_CORE_CLK_CTRL);
+out:
+ return err;
+}
+
+static void ufs_qcom_force_mem_config(struct ufs_hba *hba)
+{
+ struct ufs_clk_info *clki;
+
+ /*
+ * Configure the behavior of ufs clocks core and peripheral
+ * memory state when they are turned off.
+ * This configuration is required to allow retaining
+ * ICE crypto configuration (including keys) when
+ * core_clk_ice is turned off, and powering down
+ * non-ICE RAMs of host controller.
+ */
+ list_for_each_entry(clki, &hba->clk_list_head, list) {
+ if (!strcmp(clki->name, "core_clk_ice"))
+ clk_set_flags(clki->clk, CLKFLAG_RETAIN_MEM);
+ else
+ clk_set_flags(clki->clk, CLKFLAG_NORETAIN_MEM);
+ clk_set_flags(clki->clk, CLKFLAG_NORETAIN_PERIPH);
+ clk_set_flags(clki->clk, CLKFLAG_PERIPH_OFF_CLEAR);
+ }
}
static int ufs_qcom_hce_enable_notify(struct ufs_hba *hba,
@@ -326,6 +388,7 @@ static int ufs_qcom_hce_enable_notify(struct ufs_hba *hba,
switch (status) {
case PRE_CHANGE:
+ ufs_qcom_force_mem_config(hba);
ufs_qcom_power_up_sequence(hba);
/*
* The PHY PLL output is the source of tx/rx lane symbol
@@ -333,12 +396,19 @@ static int ufs_qcom_hce_enable_notify(struct ufs_hba *hba,
* is initialized.
*/
err = ufs_qcom_enable_lane_clks(host);
+ if (!err && host->ice.pdev) {
+ err = ufs_qcom_ice_init(host);
+ if (err) {
+ dev_err(hba->dev, "%s: ICE init failed (%d)\n",
+ __func__, err);
+ err = -EINVAL;
+ }
+ }
+
break;
case POST_CHANGE:
/* check if UFS PHY moved from DISABLED to HIBERN8 */
err = ufs_qcom_check_hibern8(hba);
- ufs_qcom_enable_hw_clk_gating(hba);
-
break;
default:
dev_err(hba->dev, "%s: invalid status %d\n", __func__, status);
@@ -351,8 +421,9 @@ static int ufs_qcom_hce_enable_notify(struct ufs_hba *hba,
/**
* Returns zero for success and non-zero in case of a failure
*/
-static int ufs_qcom_cfg_timers(struct ufs_hba *hba, u32 gear,
- u32 hs, u32 rate, bool update_link_startup_timer)
+static int __ufs_qcom_cfg_timers(struct ufs_hba *hba, u32 gear,
+ u32 hs, u32 rate, bool update_link_startup_timer,
+ bool is_pre_scale_up)
{
int ret = 0;
struct ufs_qcom_host *host = ufshcd_get_variant(hba);
@@ -386,9 +457,11 @@ static int ufs_qcom_cfg_timers(struct ufs_hba *hba, u32 gear,
* SYS1CLK_1US_REG, TX_SYMBOL_CLK_1US_REG, CLK_NS_REG &
* UFS_REG_PA_LINK_STARTUP_TIMER
* But UTP controller uses SYS1CLK_1US_REG register for Interrupt
- * Aggregation logic.
+ * Aggregation / Auto hibern8 logic.
*/
- if (ufs_qcom_cap_qunipro(host) && !ufshcd_is_intr_aggr_allowed(hba))
+ if (ufs_qcom_cap_qunipro(host) &&
+ (!(ufshcd_is_intr_aggr_allowed(hba) ||
+ ufshcd_is_auto_hibern8_supported(hba))))
goto out;
if (gear == 0) {
@@ -397,8 +470,12 @@ static int ufs_qcom_cfg_timers(struct ufs_hba *hba, u32 gear,
}
list_for_each_entry(clki, &hba->clk_list_head, list) {
- if (!strcmp(clki->name, "core_clk"))
- core_clk_rate = clk_get_rate(clki->clk);
+ if (!strcmp(clki->name, "core_clk")) {
+ if (is_pre_scale_up)
+ core_clk_rate = clki->max_freq;
+ else
+ core_clk_rate = clk_get_rate(clki->clk);
+ }
}
/* If frequency is smaller than 1MHz, set to 1MHz */
@@ -495,70 +572,247 @@ out:
return ret;
}
+static int ufs_qcom_cfg_timers(struct ufs_hba *hba, u32 gear,
+ u32 hs, u32 rate, bool update_link_startup_timer)
+{
+ return __ufs_qcom_cfg_timers(hba, gear, hs, rate,
+ update_link_startup_timer, false);
+}
+
+static int ufs_qcom_link_startup_pre_change(struct ufs_hba *hba)
+{
+ struct ufs_qcom_host *host = ufshcd_get_variant(hba);
+ struct phy *phy = host->generic_phy;
+ u32 unipro_ver;
+ int err = 0;
+
+ if (ufs_qcom_cfg_timers(hba, UFS_PWM_G1, SLOWAUTO_MODE, 0, true)) {
+ dev_err(hba->dev, "%s: ufs_qcom_cfg_timers() failed\n",
+ __func__);
+ err = -EINVAL;
+ goto out;
+ }
+
+ /* make sure RX LineCfg is enabled before link startup */
+ err = ufs_qcom_phy_ctrl_rx_linecfg(phy, true);
+ if (err)
+ goto out;
+
+ if (ufs_qcom_cap_qunipro(host)) {
+ /*
+ * set unipro core clock cycles to 150 & clear clock divider
+ */
+ err = ufs_qcom_set_dme_vs_core_clk_ctrl_clear_div(hba, 150);
+ if (err)
+ goto out;
+ }
+
+ err = ufs_qcom_enable_hw_clk_gating(hba);
+ if (err)
+ goto out;
+
+ /*
+ * Some UFS devices (and may be host) have issues if LCC is
+ * enabled. So we are setting PA_Local_TX_LCC_Enable to 0
+ * before link startup which will make sure that both host
+ * and device TX LCC are disabled once link startup is
+ * completed.
+ */
+ unipro_ver = ufshcd_get_local_unipro_ver(hba);
+ if (unipro_ver != UFS_UNIPRO_VER_1_41)
+ err = ufshcd_dme_set(hba,
+ UIC_ARG_MIB(PA_LOCAL_TX_LCC_ENABLE),
+ 0);
+ if (err)
+ goto out;
+
+ if (!ufs_qcom_cap_qunipro_clk_gating(host))
+ goto out;
+
+ /* Enable all the mask bits */
+ err = ufshcd_dme_rmw(hba, SAVECONFIGTIME_MODE_MASK,
+ SAVECONFIGTIME_MODE_MASK,
+ PA_VS_CONFIG_REG1);
+out:
+ return err;
+}
+
+static int ufs_qcom_link_startup_post_change(struct ufs_hba *hba)
+{
+ struct ufs_qcom_host *host = ufshcd_get_variant(hba);
+ struct phy *phy = host->generic_phy;
+ u32 tx_lanes;
+ int err = 0;
+
+ err = ufs_qcom_get_connected_tx_lanes(hba, &tx_lanes);
+ if (err)
+ goto out;
+
+ err = ufs_qcom_phy_set_tx_lane_enable(phy, tx_lanes);
+ if (err) {
+ dev_err(hba->dev, "%s: ufs_qcom_phy_set_tx_lane_enable failed\n",
+ __func__);
+ goto out;
+ }
+
+ /*
+ * Some UFS devices send incorrect LineCfg data as part of power mode
+ * change sequence which may cause host PHY to go into bad state.
+ * Disabling Rx LineCfg of host PHY should help avoid this.
+ */
+ if (ufshcd_get_local_unipro_ver(hba) == UFS_UNIPRO_VER_1_41)
+ err = ufs_qcom_phy_ctrl_rx_linecfg(phy, false);
+ if (err) {
+ dev_err(hba->dev, "%s: ufs_qcom_phy_ctrl_rx_linecfg failed\n",
+ __func__);
+ goto out;
+ }
+
+ /*
+ * UFS controller has *clk_req output to GCC, for each one if the clocks
+ * entering it. When *clk_req for a specific clock is de-asserted,
+ * a corresponding clock from GCC is stopped. UFS controller de-asserts
+ * *clk_req outputs when it is in Auto Hibernate state only if the
+ * Clock request feature is enabled.
+ * Enable the Clock request feature:
+ * - Enable HW clock control for UFS clocks in GCC (handled by the
+ * clock driver as part of clk_prepare_enable).
+ * - Set the AH8_CFG.*CLK_REQ register bits to 1.
+ */
+ if (ufshcd_is_auto_hibern8_supported(hba))
+ ufshcd_writel(hba, ufshcd_readl(hba, UFS_AH8_CFG) |
+ UFS_HW_CLK_CTRL_EN,
+ UFS_AH8_CFG);
+ /*
+ * Make sure clock request feature gets enabled for HW clk gating
+ * before further operations.
+ */
+ mb();
+
+out:
+ return err;
+}
+
static int ufs_qcom_link_startup_notify(struct ufs_hba *hba,
enum ufs_notify_change_status status)
{
int err = 0;
- struct ufs_qcom_host *host = ufshcd_get_variant(hba);
switch (status) {
case PRE_CHANGE:
- if (ufs_qcom_cfg_timers(hba, UFS_PWM_G1, SLOWAUTO_MODE,
- 0, true)) {
- dev_err(hba->dev, "%s: ufs_qcom_cfg_timers() failed\n",
- __func__);
- err = -EINVAL;
- goto out;
- }
-
- if (ufs_qcom_cap_qunipro(host))
- /*
- * set unipro core clock cycles to 150 & clear clock
- * divider
- */
- err = ufs_qcom_set_dme_vs_core_clk_ctrl_clear_div(hba,
- 150);
-
+ err = ufs_qcom_link_startup_pre_change(hba);
break;
case POST_CHANGE:
- ufs_qcom_link_startup_post_change(hba);
+ err = ufs_qcom_link_startup_post_change(hba);
break;
default:
break;
}
-out:
return err;
}
-static int ufs_qcom_suspend(struct ufs_hba *hba, enum ufs_pm_op pm_op)
+
+static int ufs_qcom_config_vreg(struct device *dev,
+ struct ufs_vreg *vreg, bool on)
{
- struct ufs_qcom_host *host = ufshcd_get_variant(hba);
- struct phy *phy = host->generic_phy;
int ret = 0;
+ struct regulator *reg;
+ int min_uV, uA_load;
- if (ufs_qcom_is_link_off(hba)) {
- /*
- * Disable the tx/rx lane symbol clocks before PHY is
- * powered down as the PLL source should be disabled
- * after downstream clocks are disabled.
- */
- ufs_qcom_disable_lane_clks(host);
- phy_power_off(phy);
-
- /* Assert PHY soft reset */
- ufs_qcom_assert_reset(hba);
+ if (!vreg) {
+ WARN_ON(1);
+ ret = -EINVAL;
goto out;
}
+ reg = vreg->reg;
+ if (regulator_count_voltages(reg) > 0) {
+ uA_load = on ? vreg->max_uA : 0;
+ ret = regulator_set_load(vreg->reg, uA_load);
+ if (ret)
+ goto out;
+
+ min_uV = on ? vreg->min_uV : 0;
+ ret = regulator_set_voltage(reg, min_uV, vreg->max_uV);
+ if (ret) {
+ dev_err(dev, "%s: %s set voltage failed, err=%d\n",
+ __func__, vreg->name, ret);
+ goto out;
+ }
+ }
+out:
+ return ret;
+}
+
+static int ufs_qcom_enable_vreg(struct device *dev, struct ufs_vreg *vreg)
+{
+ int ret = 0;
+
+ if (vreg->enabled)
+ return ret;
+
+ ret = ufs_qcom_config_vreg(dev, vreg, true);
+ if (ret)
+ goto out;
+
+ ret = regulator_enable(vreg->reg);
+ if (ret)
+ goto out;
+
+ vreg->enabled = true;
+out:
+ return ret;
+}
+
+static int ufs_qcom_disable_vreg(struct device *dev, struct ufs_vreg *vreg)
+{
+ int ret = 0;
+
+ if (!vreg->enabled)
+ return ret;
+
+ ret = regulator_disable(vreg->reg);
+ if (ret)
+ goto out;
+
+ ret = ufs_qcom_config_vreg(dev, vreg, false);
+ if (ret)
+ goto out;
+
+ vreg->enabled = false;
+out:
+ return ret;
+}
+
+static int ufs_qcom_suspend(struct ufs_hba *hba, enum ufs_pm_op pm_op)
+{
+ struct ufs_qcom_host *host = ufshcd_get_variant(hba);
+ struct phy *phy = host->generic_phy;
+ int ret = 0;
+
/*
- * If UniPro link is not active, PHY ref_clk, main PHY analog power
- * rail and low noise analog power rail for PLL can be switched off.
+ * If UniPro link is not active or OFF, PHY ref_clk, main PHY analog
+ * power rail and low noise analog power rail for PLL can be
+ * switched off.
*/
if (!ufs_qcom_is_link_active(hba)) {
ufs_qcom_disable_lane_clks(host);
phy_power_off(phy);
+
+ if (host->vddp_ref_clk && ufs_qcom_is_link_off(hba))
+ ret = ufs_qcom_disable_vreg(hba->dev,
+ host->vddp_ref_clk);
+ ufs_qcom_ice_suspend(host);
+
+ if (ufs_qcom_is_link_off(hba)) {
+ /* Assert PHY soft reset */
+ ufs_qcom_assert_reset(hba);
+ goto out;
+ }
}
+ /* Unvote PM QoS */
+ ufs_qcom_pm_qos_suspend(host);
out:
return ret;
@@ -577,16 +831,146 @@ static int ufs_qcom_resume(struct ufs_hba *hba, enum ufs_pm_op pm_op)
goto out;
}
+ if (host->vddp_ref_clk && (hba->rpm_lvl > UFS_PM_LVL_3 ||
+ hba->spm_lvl > UFS_PM_LVL_3))
+ ufs_qcom_enable_vreg(hba->dev,
+ host->vddp_ref_clk);
+
err = ufs_qcom_enable_lane_clks(host);
if (err)
goto out;
+ err = ufs_qcom_ice_resume(host);
+ if (err) {
+ dev_err(hba->dev, "%s: ufs_qcom_ice_resume failed, err = %d\n",
+ __func__, err);
+ goto out;
+ }
+
hba->is_sys_suspended = false;
out:
return err;
}
+static int ufs_qcom_full_reset(struct ufs_hba *hba)
+{
+ int ret = -ENOTSUPP;
+
+ if (!hba->core_reset) {
+ dev_err(hba->dev, "%s: failed, err = %d\n", __func__,
+ ret);
+ goto out;
+ }
+
+ ret = reset_control_assert(hba->core_reset);
+ if (ret) {
+ dev_err(hba->dev, "%s: core_reset assert failed, err = %d\n",
+ __func__, ret);
+ goto out;
+ }
+
+ /*
+ * The hardware requirement for delay between assert/deassert
+ * is at least 3-4 sleep clock (32.7KHz) cycles, which comes to
+ * ~125us (4/32768). To be on the safe side add 200us delay.
+ */
+ usleep_range(200, 210);
+
+ ret = reset_control_deassert(hba->core_reset);
+ if (ret)
+ dev_err(hba->dev, "%s: core_reset deassert failed, err = %d\n",
+ __func__, ret);
+
+out:
+ return ret;
+}
+
+#ifdef CONFIG_SCSI_UFS_QCOM_ICE
+static int ufs_qcom_crypto_req_setup(struct ufs_hba *hba,
+ struct ufshcd_lrb *lrbp, u8 *cc_index, bool *enable, u64 *dun)
+{
+ struct ufs_qcom_host *host = ufshcd_get_variant(hba);
+ struct request *req;
+ int ret;
+
+ if (lrbp->cmd && lrbp->cmd->request)
+ req = lrbp->cmd->request;
+ else
+ return 0;
+
+ /* Use request LBA as the DUN value */
+ if (req->bio)
+ *dun = (req->bio->bi_iter.bi_sector) >>
+ UFS_QCOM_ICE_TR_DATA_UNIT_4_KB;
+
+ ret = ufs_qcom_ice_req_setup(host, lrbp->cmd, cc_index, enable);
+
+ return ret;
+}
+
+static
+int ufs_qcom_crytpo_engine_cfg_start(struct ufs_hba *hba, unsigned int task_tag)
+{
+ struct ufs_qcom_host *host = ufshcd_get_variant(hba);
+ struct ufshcd_lrb *lrbp = &hba->lrb[task_tag];
+ int err = 0;
+
+ if (!host->ice.pdev ||
+ !lrbp->cmd || lrbp->command_type != UTP_CMD_TYPE_SCSI)
+ goto out;
+
+ err = ufs_qcom_ice_cfg_start(host, lrbp->cmd);
+out:
+ return err;
+}
+
+static
+int ufs_qcom_crytpo_engine_cfg_end(struct ufs_hba *hba,
+ struct ufshcd_lrb *lrbp, struct request *req)
+{
+ struct ufs_qcom_host *host = ufshcd_get_variant(hba);
+ int err = 0;
+
+ if (!host->ice.pdev || lrbp->command_type != UTP_CMD_TYPE_SCSI)
+ goto out;
+
+ err = ufs_qcom_ice_cfg_end(host, req);
+out:
+ return err;
+}
+
+static
+int ufs_qcom_crytpo_engine_reset(struct ufs_hba *hba)
+{
+ struct ufs_qcom_host *host = ufshcd_get_variant(hba);
+ int err = 0;
+
+ if (!host->ice.pdev)
+ goto out;
+
+ err = ufs_qcom_ice_reset(host);
+out:
+ return err;
+}
+
+static int ufs_qcom_crypto_engine_get_status(struct ufs_hba *hba, u32 *status)
+{
+ struct ufs_qcom_host *host = ufshcd_get_variant(hba);
+
+ if (!status)
+ return -EINVAL;
+
+ return ufs_qcom_ice_get_status(host, status);
+}
+#else /* !CONFIG_SCSI_UFS_QCOM_ICE */
+#define ufs_qcom_crypto_req_setup NULL
+#define ufs_qcom_crytpo_engine_cfg_start NULL
+#define ufs_qcom_crytpo_engine_cfg_end NULL
+#define ufs_qcom_crytpo_engine_reset NULL
+#define ufs_qcom_crypto_engine_get_status NULL
+#endif /* CONFIG_SCSI_UFS_QCOM_ICE */
+
struct ufs_qcom_dev_params {
u32 pwm_rx_gear; /* pwm rx gear to work in */
u32 pwm_tx_gear; /* pwm tx gear to work in */
@@ -685,7 +1069,7 @@ static int ufs_qcom_get_pwr_dev_param(struct ufs_qcom_dev_params *qcom_param,
return 0;
}
-#ifdef CONFIG_MSM_BUS_SCALING
+#ifdef CONFIG_QCOM_BUS_SCALING
static int ufs_qcom_get_bus_vote(struct ufs_qcom_host *host,
const char *speed_mode)
{
@@ -739,7 +1123,7 @@ static void ufs_qcom_get_speed_mode(struct ufs_pa_layer_attr *p, char *result)
}
}
-static int ufs_qcom_set_bus_vote(struct ufs_qcom_host *host, int vote)
+static int __ufs_qcom_set_bus_vote(struct ufs_qcom_host *host, int vote)
{
int err = 0;
@@ -770,7 +1154,7 @@ static int ufs_qcom_update_bus_bw_vote(struct ufs_qcom_host *host)
vote = ufs_qcom_get_bus_vote(host, mode);
if (vote >= 0)
- err = ufs_qcom_set_bus_vote(host, vote);
+ err = __ufs_qcom_set_bus_vote(host, vote);
else
err = vote;
@@ -781,6 +1165,35 @@ static int ufs_qcom_update_bus_bw_vote(struct ufs_qcom_host *host)
return err;
}
+static int ufs_qcom_set_bus_vote(struct ufs_hba *hba, bool on)
+{
+ struct ufs_qcom_host *host = ufshcd_get_variant(hba);
+ int vote, err;
+
+ /*
+ * In case ufs_qcom_init() is not yet done, simply ignore.
+ * This ufs_qcom_set_bus_vote() shall be called from
+ * ufs_qcom_init() after init is done.
+ */
+ if (!host)
+ return 0;
+
+ if (on) {
+ vote = host->bus_vote.saved_vote;
+ if (vote == host->bus_vote.min_bw_vote)
+ ufs_qcom_update_bus_bw_vote(host);
+ } else {
+ vote = host->bus_vote.min_bw_vote;
+ }
+
+ err = __ufs_qcom_set_bus_vote(host, vote);
+ if (err)
+ dev_err(hba->dev, "%s: set bus vote failed %d\n",
+ __func__, err);
+
+ return err;
+}
+
static ssize_t
show_ufs_to_mem_max_bus_bw(struct device *dev, struct device_attribute *attr,
char *buf)
@@ -851,13 +1264,13 @@ static int ufs_qcom_bus_register(struct ufs_qcom_host *host)
out:
return err;
}
-#else /* CONFIG_MSM_BUS_SCALING */
+#else /* CONFIG_QCOM_BUS_SCALING */
static int ufs_qcom_update_bus_bw_vote(struct ufs_qcom_host *host)
{
return 0;
}
-static int ufs_qcom_set_bus_vote(struct ufs_qcom_host *host, int vote)
+static int ufs_qcom_set_bus_vote(struct ufs_hba *hba, bool on)
{
return 0;
}
@@ -866,7 +1279,10 @@ static int ufs_qcom_bus_register(struct ufs_qcom_host *host)
{
return 0;
}
-#endif /* CONFIG_MSM_BUS_SCALING */
+static inline void msm_bus_scale_unregister_client(uint32_t cl)
+{
+}
+#endif /* CONFIG_QCOM_BUS_SCALING */
static void ufs_qcom_dev_ref_clk_ctrl(struct ufs_qcom_host *host, bool enable)
{
@@ -953,6 +1369,18 @@ static int ufs_qcom_pwr_change_notify(struct ufs_hba *hba,
ufs_qcom_cap.hs_rx_gear = UFS_HS_G2;
}
+ /*
+ * Platforms using QRBTCv2 phy must limit link to PWM Gear-1
+ * and SLOW mode to successfully bring up the link.
+ */
+ if (!strcmp(ufs_qcom_phy_name(phy), "ufs_phy_qrbtc_v2")) {
+ ufs_qcom_cap.tx_lanes = 1;
+ ufs_qcom_cap.rx_lanes = 1;
+ ufs_qcom_cap.pwm_rx_gear = UFS_PWM_G1;
+ ufs_qcom_cap.pwm_tx_gear = UFS_PWM_G1;
+ ufs_qcom_cap.desired_working_mode = SLOW;
+ }
+
ret = ufs_qcom_get_pwr_dev_param(&ufs_qcom_cap,
dev_max_params,
dev_req_params);
@@ -962,6 +1390,10 @@ static int ufs_qcom_pwr_change_notify(struct ufs_hba *hba,
goto out;
}
+ /* enable the device ref clock before changing to HS mode */
+ if (!ufshcd_is_hs_mode(&hba->pwr_info) &&
+ ufshcd_is_hs_mode(dev_req_params))
+ ufs_qcom_dev_ref_clk_ctrl(host, true);
break;
case POST_CHANGE:
if (ufs_qcom_cfg_timers(hba, dev_req_params->gear_rx,
@@ -989,6 +1421,11 @@ static int ufs_qcom_pwr_change_notify(struct ufs_hba *hba,
memcpy(&host->dev_req_params,
dev_req_params, sizeof(*dev_req_params));
ufs_qcom_update_bus_bw_vote(host);
+
+ /* disable the device ref clock if entered PWM mode */
+ if (ufshcd_is_hs_mode(&hba->pwr_info) &&
+ !ufshcd_is_hs_mode(dev_req_params))
+ ufs_qcom_dev_ref_clk_ctrl(host, false);
break;
default:
ret = -EINVAL;
@@ -998,6 +1435,34 @@ out:
return ret;
}
+static int ufs_qcom_quirk_host_pa_saveconfigtime(struct ufs_hba *hba)
+{
+ int err;
+ u32 pa_vs_config_reg1;
+
+ err = ufshcd_dme_get(hba, UIC_ARG_MIB(PA_VS_CONFIG_REG1),
+ &pa_vs_config_reg1);
+ if (err)
+ goto out;
+
+ /* Allow extension of MSB bits of PA_SaveConfigTime attribute */
+ err = ufshcd_dme_set(hba, UIC_ARG_MIB(PA_VS_CONFIG_REG1),
+ (pa_vs_config_reg1 | (1 << 12)));
+
+out:
+ return err;
+}
+
+static int ufs_qcom_apply_dev_quirks(struct ufs_hba *hba)
+{
+ int err = 0;
+
+ if (hba->dev_quirks & UFS_DEVICE_QUIRK_HOST_PA_SAVECONFIGTIME)
+ err = ufs_qcom_quirk_host_pa_saveconfigtime(hba);
+
+ return err;
+}
+
static u32 ufs_qcom_get_ufs_hci_version(struct ufs_hba *hba)
{
struct ufs_qcom_host *host = ufshcd_get_variant(hba);
@@ -1021,12 +1486,12 @@ static void ufs_qcom_advertise_quirks(struct ufs_hba *hba)
{
struct ufs_qcom_host *host = ufshcd_get_variant(hba);
- if (host->hw_ver.major == 0x01) {
- hba->quirks |= UFSHCD_QUIRK_DELAY_BEFORE_DME_CMDS
- | UFSHCD_QUIRK_BROKEN_PA_RXHSUNTERMCAP
- | UFSHCD_QUIRK_DME_PEER_ACCESS_AUTO_MODE;
+ if (host->hw_ver.major == 0x1) {
+ hba->quirks |= (UFSHCD_QUIRK_DELAY_BEFORE_DME_CMDS
+ | UFSHCD_QUIRK_BROKEN_PA_RXHSUNTERMCAP
+ | UFSHCD_QUIRK_DME_PEER_ACCESS_AUTO_MODE);
- if (host->hw_ver.minor == 0x0001 && host->hw_ver.step == 0x0001)
+ if (host->hw_ver.minor == 0x001 && host->hw_ver.step == 0x0001)
hba->quirks |= UFSHCD_QUIRK_BROKEN_INTR_AGGR;
hba->quirks |= UFSHCD_QUIRK_BROKEN_LCC;
@@ -1041,34 +1506,59 @@ static void ufs_qcom_advertise_quirks(struct ufs_hba *hba)
| UFSHCD_QUIRK_DME_PEER_ACCESS_AUTO_MODE
| UFSHCD_QUIRK_BROKEN_PA_RXHSUNTERMCAP);
}
+
+ if (host->disable_lpm)
+ hba->quirks |= UFSHCD_QUIRK_BROKEN_AUTO_HIBERN8;
}
static void ufs_qcom_set_caps(struct ufs_hba *hba)
{
struct ufs_qcom_host *host = ufshcd_get_variant(hba);
- hba->caps |= UFSHCD_CAP_CLK_GATING | UFSHCD_CAP_HIBERN8_WITH_CLK_GATING;
- hba->caps |= UFSHCD_CAP_CLK_SCALING;
+ if (!host->disable_lpm) {
+ hba->caps |= UFSHCD_CAP_CLK_GATING;
+ hba->caps |= UFSHCD_CAP_HIBERN8_WITH_CLK_GATING;
+ hba->caps |= UFSHCD_CAP_CLK_SCALING;
+ }
hba->caps |= UFSHCD_CAP_AUTO_BKOPS_SUSPEND;
if (host->hw_ver.major >= 0x2) {
+ if (!host->disable_lpm)
+ hba->caps |= UFSHCD_CAP_POWER_COLLAPSE_DURING_HIBERN8;
host->caps = UFS_QCOM_CAP_QUNIPRO |
UFS_QCOM_CAP_RETAIN_SEC_CFG_AFTER_PWR_COLLAPSE;
}
+ if (host->hw_ver.major >= 0x3) {
+ host->caps |= UFS_QCOM_CAP_QUNIPRO_CLK_GATING;
+ /*
+ * The UFS PHY attached to v3.0.0 controller supports entering
+ * deeper low power state of SVS2. This lets the controller
+ * run at much lower clock frequencies for saving power.
+ * Assuming this and any future revisions of the controller
+ * support this capability. Need to revist this assumption if
+ * any future platform with this core doesn't support the
+ * capability, as there will be no benefit running at lower
+ * frequencies then.
+ */
+ host->caps |= UFS_QCOM_CAP_SVS2;
+ }
}
/**
* ufs_qcom_setup_clocks - enables/disable clocks
* @hba: host controller instance
* @on: If true, enable clocks else disable them.
+ * @is_gating_context: If true then it means this function is called from
+ * aggressive clock gating context and we may only need to gate off important
+ * clocks. If false then make sure to gate off all clocks.
*
* Returns 0 on success, non-zero on failure.
*/
-static int ufs_qcom_setup_clocks(struct ufs_hba *hba, bool on)
+static int ufs_qcom_setup_clocks(struct ufs_hba *hba, bool on,
+ bool is_gating_context)
{
struct ufs_qcom_host *host = ufshcd_get_variant(hba);
int err;
- int vote = 0;
/*
* In case ufs_qcom_init() is not yet done, simply ignore.
@@ -1090,30 +1580,426 @@ static int ufs_qcom_setup_clocks(struct ufs_hba *hba, bool on)
ufs_qcom_phy_disable_iface_clk(host->generic_phy);
goto out;
}
- vote = host->bus_vote.saved_vote;
- if (vote == host->bus_vote.min_bw_vote)
- ufs_qcom_update_bus_bw_vote(host);
+ /* enable the device ref clock for HS mode*/
+ if (ufshcd_is_hs_mode(&hba->pwr_info))
+ ufs_qcom_dev_ref_clk_ctrl(host, true);
+ err = ufs_qcom_ice_resume(host);
+ if (err)
+ goto out;
} else {
+ err = ufs_qcom_ice_suspend(host);
+ if (err)
+ goto out;
/* M-PHY RMMI interface clocks can be turned off */
ufs_qcom_phy_disable_iface_clk(host->generic_phy);
- if (!ufs_qcom_is_link_active(hba))
+ /*
+ * If auto hibern8 is supported then the link will already
+ * be in hibern8 state and the ref clock can be gated.
+ */
+ if (ufshcd_is_auto_hibern8_supported(hba) ||
+ !ufs_qcom_is_link_active(hba)) {
+ /* turn off UFS local PHY ref_clk */
+ ufs_qcom_phy_disable_ref_clk(host->generic_phy);
/* disable device ref_clk */
ufs_qcom_dev_ref_clk_ctrl(host, false);
-
- vote = host->bus_vote.min_bw_vote;
+ }
}
- err = ufs_qcom_set_bus_vote(host, vote);
- if (err)
- dev_err(hba->dev, "%s: set bus vote failed %d\n",
- __func__, err);
-
out:
return err;
}
+#ifdef CONFIG_SMP /* CONFIG_SMP */
+static int ufs_qcom_cpu_to_group(struct ufs_qcom_host *host, int cpu)
+{
+ int i;
+
+ if (cpu >= 0 && cpu < num_possible_cpus())
+ for (i = 0; i < host->pm_qos.num_groups; i++)
+ if (cpumask_test_cpu(cpu, &host->pm_qos.groups[i].mask))
+ return i;
+
+ return host->pm_qos.default_cpu;
+}
+
+static void ufs_qcom_pm_qos_req_start(struct ufs_hba *hba, struct request *req)
+{
+ unsigned long flags;
+ struct ufs_qcom_host *host;
+ struct ufs_qcom_pm_qos_cpu_group *group;
+
+ if (!hba || !req)
+ return;
+
+ host = ufshcd_get_variant(hba);
+ if (!host->pm_qos.groups)
+ return;
+
+ group = &host->pm_qos.groups[ufs_qcom_cpu_to_group(host, req->cpu)];
+
+ spin_lock_irqsave(hba->host->host_lock, flags);
+ if (!host->pm_qos.is_enabled)
+ goto out;
+
+ group->active_reqs++;
+ if (group->state != PM_QOS_REQ_VOTE &&
+ group->state != PM_QOS_VOTED) {
+ group->state = PM_QOS_REQ_VOTE;
+ queue_work(host->pm_qos.workq, &group->vote_work);
+ }
+out:
+ spin_unlock_irqrestore(hba->host->host_lock, flags);
+}
+
+/* hba->host->host_lock is assumed to be held by caller */
+static void __ufs_qcom_pm_qos_req_end(struct ufs_qcom_host *host, int req_cpu)
+{
+ struct ufs_qcom_pm_qos_cpu_group *group;
+
+ if (!host->pm_qos.groups || !host->pm_qos.is_enabled)
+ return;
+
+ group = &host->pm_qos.groups[ufs_qcom_cpu_to_group(host, req_cpu)];
+
+ if (--group->active_reqs)
+ return;
+ group->state = PM_QOS_REQ_UNVOTE;
+ queue_work(host->pm_qos.workq, &group->unvote_work);
+}
+
+static void ufs_qcom_pm_qos_req_end(struct ufs_hba *hba, struct request *req,
+ bool should_lock)
+{
+ unsigned long flags = 0;
+
+ if (!hba || !req)
+ return;
+
+ if (should_lock)
+ spin_lock_irqsave(hba->host->host_lock, flags);
+ __ufs_qcom_pm_qos_req_end(ufshcd_get_variant(hba), req->cpu);
+ if (should_lock)
+ spin_unlock_irqrestore(hba->host->host_lock, flags);
+}
+
+static void ufs_qcom_pm_qos_vote_work(struct work_struct *work)
+{
+ struct ufs_qcom_pm_qos_cpu_group *group =
+ container_of(work, struct ufs_qcom_pm_qos_cpu_group, vote_work);
+ struct ufs_qcom_host *host = group->host;
+ unsigned long flags;
+
+ spin_lock_irqsave(host->hba->host->host_lock, flags);
+
+ if (!host->pm_qos.is_enabled || !group->active_reqs) {
+ spin_unlock_irqrestore(host->hba->host->host_lock, flags);
+ return;
+ }
+
+ group->state = PM_QOS_VOTED;
+ spin_unlock_irqrestore(host->hba->host->host_lock, flags);
+
+ pm_qos_update_request(&group->req, group->latency_us);
+}
+
+static void ufs_qcom_pm_qos_unvote_work(struct work_struct *work)
+{
+ struct ufs_qcom_pm_qos_cpu_group *group = container_of(work,
+ struct ufs_qcom_pm_qos_cpu_group, unvote_work);
+ struct ufs_qcom_host *host = group->host;
+ unsigned long flags;
+
+ /*
+ * Check if new requests were submitted in the meantime and do not
+ * unvote if so.
+ */
+ spin_lock_irqsave(host->hba->host->host_lock, flags);
+
+ if (!host->pm_qos.is_enabled || group->active_reqs) {
+ spin_unlock_irqrestore(host->hba->host->host_lock, flags);
+ return;
+ }
+
+ group->state = PM_QOS_UNVOTED;
+ spin_unlock_irqrestore(host->hba->host->host_lock, flags);
+
+ pm_qos_update_request_timeout(&group->req,
+ group->latency_us, UFS_QCOM_PM_QOS_UNVOTE_TIMEOUT_US);
+}
+
+static ssize_t ufs_qcom_pm_qos_enable_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct ufs_hba *hba = dev_get_drvdata(dev->parent);
+ struct ufs_qcom_host *host = ufshcd_get_variant(hba);
+
+ return snprintf(buf, PAGE_SIZE, "%d\n", host->pm_qos.is_enabled);
+}
+
+static ssize_t ufs_qcom_pm_qos_enable_store(struct device *dev,
+ struct device_attribute *attr, const char *buf, size_t count)
+{
+ struct ufs_hba *hba = dev_get_drvdata(dev->parent);
+ struct ufs_qcom_host *host = ufshcd_get_variant(hba);
+ unsigned long value;
+ unsigned long flags;
+ bool enable;
+ int i;
+
+ if (kstrtoul(buf, 0, &value))
+ return -EINVAL;
+
+ enable = !!value;
+
+ /*
+ * Must take the spinlock and save irqs before changing the enabled
+ * flag in order to keep correctness of PM QoS release.
+ */
+ spin_lock_irqsave(hba->host->host_lock, flags);
+ if (enable == host->pm_qos.is_enabled) {
+ spin_unlock_irqrestore(hba->host->host_lock, flags);
+ return count;
+ }
+ host->pm_qos.is_enabled = enable;
+ spin_unlock_irqrestore(hba->host->host_lock, flags);
+
+ if (!enable)
+ for (i = 0; i < host->pm_qos.num_groups; i++) {
+ cancel_work_sync(&host->pm_qos.groups[i].vote_work);
+ cancel_work_sync(&host->pm_qos.groups[i].unvote_work);
+ spin_lock_irqsave(hba->host->host_lock, flags);
+ host->pm_qos.groups[i].state = PM_QOS_UNVOTED;
+ host->pm_qos.groups[i].active_reqs = 0;
+ spin_unlock_irqrestore(hba->host->host_lock, flags);
+ pm_qos_update_request(&host->pm_qos.groups[i].req,
+ PM_QOS_DEFAULT_VALUE);
+ }
+
+ return count;
+}
+
+static ssize_t ufs_qcom_pm_qos_latency_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct ufs_hba *hba = dev_get_drvdata(dev->parent);
+ struct ufs_qcom_host *host = ufshcd_get_variant(hba);
+ int ret;
+ int i;
+ int offset = 0;
+
+ for (i = 0; i < host->pm_qos.num_groups; i++) {
+ ret = snprintf(&buf[offset], PAGE_SIZE,
+ "cpu group #%d(mask=0x%lx): %d\n", i,
+ host->pm_qos.groups[i].mask.bits[0],
+ host->pm_qos.groups[i].latency_us);
+ if (ret > 0)
+ offset += ret;
+ else
+ break;
+ }
+
+ return offset;
+}
+
+static ssize_t ufs_qcom_pm_qos_latency_store(struct device *dev,
+ struct device_attribute *attr, const char *buf, size_t count)
+{
+ struct ufs_hba *hba = dev_get_drvdata(dev->parent);
+ struct ufs_qcom_host *host = ufshcd_get_variant(hba);
+ unsigned long value;
+ unsigned long flags;
+ char *strbuf;
+ char *strbuf_copy;
+ char *token;
+ int i;
+ int ret;
+
+ /* reserve one byte for null termination */
+ strbuf = kmalloc(count + 1, GFP_KERNEL);
+ if (!strbuf)
+ return -ENOMEM;
+ strbuf_copy = strbuf;
+ strlcpy(strbuf, buf, count + 1);
+
+ for (i = 0; i < host->pm_qos.num_groups; i++) {
+ token = strsep(&strbuf, ",");
+ if (!token)
+ break;
+
+ ret = kstrtoul(token, 0, &value);
+ if (ret)
+ break;
+
+ spin_lock_irqsave(hba->host->host_lock, flags);
+ host->pm_qos.groups[i].latency_us = value;
+ spin_unlock_irqrestore(hba->host->host_lock, flags);
+ }
+
+ kfree(strbuf_copy);
+ return count;
+}
+
+static int ufs_qcom_pm_qos_init(struct ufs_qcom_host *host)
+{
+ struct device_node *node = host->hba->dev->of_node;
+ struct device_attribute *attr;
+ int ret = 0;
+ int num_groups;
+ int num_values;
+ char wq_name[sizeof("ufs_pm_qos_00")];
+ int i;
+
+ num_groups = of_property_count_u32_elems(node,
+ "qcom,pm-qos-cpu-groups");
+ if (num_groups <= 0)
+ goto no_pm_qos;
+
+ num_values = of_property_count_u32_elems(node,
+ "qcom,pm-qos-cpu-group-latency-us");
+ if (num_values <= 0)
+ goto no_pm_qos;
+
+ if (num_values != num_groups || num_groups > num_possible_cpus()) {
+ dev_err(host->hba->dev, "%s: invalid count: num_groups=%d, num_values=%d, num_possible_cpus=%d\n",
+ __func__, num_groups, num_values, num_possible_cpus());
+ goto no_pm_qos;
+ }
+
+ host->pm_qos.num_groups = num_groups;
+ host->pm_qos.groups = kcalloc(host->pm_qos.num_groups,
+ sizeof(struct ufs_qcom_pm_qos_cpu_group), GFP_KERNEL);
+ if (!host->pm_qos.groups)
+ return -ENOMEM;
+
+ for (i = 0; i < host->pm_qos.num_groups; i++) {
+ u32 mask;
+
+ ret = of_property_read_u32_index(node, "qcom,pm-qos-cpu-groups",
+ i, &mask);
+ if (ret)
+ goto free_groups;
+ host->pm_qos.groups[i].mask.bits[0] = mask;
+ if (!cpumask_subset(&host->pm_qos.groups[i].mask,
+ cpu_possible_mask)) {
+ dev_err(host->hba->dev, "%s: invalid mask 0x%x for cpu group\n",
+ __func__, mask);
+ goto free_groups;
+ }
+
+ ret = of_property_read_u32_index(node,
+ "qcom,pm-qos-cpu-group-latency-us", i,
+ &host->pm_qos.groups[i].latency_us);
+ if (ret)
+ goto free_groups;
+
+ host->pm_qos.groups[i].req.type = PM_QOS_REQ_AFFINE_CORES;
+ host->pm_qos.groups[i].req.cpus_affine =
+ host->pm_qos.groups[i].mask;
+ host->pm_qos.groups[i].state = PM_QOS_UNVOTED;
+ host->pm_qos.groups[i].active_reqs = 0;
+ host->pm_qos.groups[i].host = host;
+
+ INIT_WORK(&host->pm_qos.groups[i].vote_work,
+ ufs_qcom_pm_qos_vote_work);
+ INIT_WORK(&host->pm_qos.groups[i].unvote_work,
+ ufs_qcom_pm_qos_unvote_work);
+ }
+
+ ret = of_property_read_u32(node, "qcom,pm-qos-default-cpu",
+ &host->pm_qos.default_cpu);
+ if (ret || host->pm_qos.default_cpu > num_possible_cpus())
+ host->pm_qos.default_cpu = 0;
+
+ /*
+ * Use a single-threaded workqueue to assure work submitted to the queue
+ * is performed in order. Consider the following 2 possible cases:
+ *
+ * 1. A new request arrives and voting work is scheduled for it. Before
+ * the voting work is performed the request is finished and unvote
+ * work is also scheduled.
+ * 2. A request is finished and unvote work is scheduled. Before the
+ * work is performed a new request arrives and voting work is also
+ * scheduled.
+ *
+ * In both cases a vote work and unvote work wait to be performed.
+ * If ordering is not guaranteed, then the end state might be the
+ * opposite of the desired state.
+ */
+ snprintf(wq_name, ARRAY_SIZE(wq_name), "%s_%d", "ufs_pm_qos",
+ host->hba->host->host_no);
+ host->pm_qos.workq = create_singlethread_workqueue(wq_name);
+ if (!host->pm_qos.workq) {
+ dev_err(host->hba->dev, "%s: failed to create the workqueue\n",
+ __func__);
+ ret = -ENOMEM;
+ goto free_groups;
+ }
+
+ /* Initialization was ok, add all PM QoS requests */
+ for (i = 0; i < host->pm_qos.num_groups; i++)
+ pm_qos_add_request(&host->pm_qos.groups[i].req,
+ PM_QOS_CPU_DMA_LATENCY, PM_QOS_DEFAULT_VALUE);
+
+ /* PM QoS latency sys-fs attribute */
+ attr = &host->pm_qos.latency_attr;
+ attr->show = ufs_qcom_pm_qos_latency_show;
+ attr->store = ufs_qcom_pm_qos_latency_store;
+ sysfs_attr_init(&attr->attr);
+ attr->attr.name = "pm_qos_latency_us";
+ attr->attr.mode = S_IRUGO | S_IWUSR;
+ if (device_create_file(host->hba->var->dev, attr))
+ dev_dbg(host->hba->dev, "Failed to create sysfs for pm_qos_latency_us\n");
+
+ /* PM QoS enable sys-fs attribute */
+ attr = &host->pm_qos.enable_attr;
+ attr->show = ufs_qcom_pm_qos_enable_show;
+ attr->store = ufs_qcom_pm_qos_enable_store;
+ sysfs_attr_init(&attr->attr);
+ attr->attr.name = "pm_qos_enable";
+ attr->attr.mode = S_IRUGO | S_IWUSR;
+ if (device_create_file(host->hba->var->dev, attr))
+ dev_dbg(host->hba->dev, "Failed to create sysfs for pm_qos enable\n");
+
+ host->pm_qos.is_enabled = true;
+
+ return 0;
+
+free_groups:
+ kfree(host->pm_qos.groups);
+no_pm_qos:
+ host->pm_qos.groups = NULL;
+ return ret ? ret : -ENOTSUPP;
+}
+
+static void ufs_qcom_pm_qos_suspend(struct ufs_qcom_host *host)
+{
+ int i;
+
+ if (!host->pm_qos.groups)
+ return;
+
+ for (i = 0; i < host->pm_qos.num_groups; i++)
+ flush_work(&host->pm_qos.groups[i].unvote_work);
+}
+
+static void ufs_qcom_pm_qos_remove(struct ufs_qcom_host *host)
+{
+ int i;
+
+ if (!host->pm_qos.groups)
+ return;
+
+ for (i = 0; i < host->pm_qos.num_groups; i++)
+ pm_qos_remove_request(&host->pm_qos.groups[i].req);
+ destroy_workqueue(host->pm_qos.workq);
+
+ kfree(host->pm_qos.groups);
+ host->pm_qos.groups = NULL;
+}
+#endif /* CONFIG_SMP */
+
#define ANDROID_BOOT_DEV_MAX 30
static char android_boot_dev[ANDROID_BOOT_DEV_MAX];
@@ -1126,6 +2012,69 @@ static int __init get_android_boot_dev(char *str)
__setup("androidboot.bootdevice=", get_android_boot_dev);
#endif
+/*
+ * ufs_qcom_parse_lpm - read from DTS whether LPM modes should be disabled.
+ */
+static void ufs_qcom_parse_lpm(struct ufs_qcom_host *host)
+{
+ struct device_node *node = host->hba->dev->of_node;
+
+ host->disable_lpm = of_property_read_bool(node, "qcom,disable-lpm");
+ if (host->disable_lpm)
+ pr_info("%s: will disable all LPM modes\n", __func__);
+}
+
+static int ufs_qcom_parse_reg_info(struct ufs_qcom_host *host, char *name,
+ struct ufs_vreg **out_vreg)
+{
+ int ret = 0;
+ char prop_name[MAX_PROP_SIZE];
+ struct ufs_vreg *vreg = NULL;
+ struct device *dev = host->hba->dev;
+ struct device_node *np = dev->of_node;
+
+ if (!np) {
+ dev_err(dev, "%s: non DT initialization\n", __func__);
+ goto out;
+ }
+
+ snprintf(prop_name, MAX_PROP_SIZE, "%s-supply", name);
+ if (!of_parse_phandle(np, prop_name, 0)) {
+ dev_info(dev, "%s: Unable to find %s regulator, assuming enabled\n",
+ __func__, prop_name);
+ ret = -ENODEV;
+ goto out;
+ }
+
+ vreg = devm_kzalloc(dev, sizeof(*vreg), GFP_KERNEL);
+ if (!vreg)
+ return -ENOMEM;
+
+ vreg->name = name;
+
+ snprintf(prop_name, MAX_PROP_SIZE, "%s-max-microamp", name);
+ ret = of_property_read_u32(np, prop_name, &vreg->max_uA);
+ if (ret) {
+ dev_err(dev, "%s: unable to find %s err %d\n",
+ __func__, prop_name, ret);
+ goto out;
+ }
+
+ vreg->reg = devm_regulator_get(dev, vreg->name);
+ if (IS_ERR(vreg->reg)) {
+ ret = PTR_ERR(vreg->reg);
+ dev_err(dev, "%s: %s get failed, err=%d\n",
+ __func__, vreg->name, ret);
+ }
+ vreg->min_uV = VDDP_REF_CLK_MIN_UV;
+ vreg->max_uV = VDDP_REF_CLK_MAX_UV;
+
+out:
+ if (!ret)
+ *out_vreg = vreg;
+ return ret;
+}
+
/**
* ufs_qcom_init - bind phy with controller
* @hba: host controller instance
@@ -1144,9 +2093,6 @@ static int ufs_qcom_init(struct ufs_hba *hba)
struct ufs_qcom_host *host;
struct resource *res;
- if (strlen(android_boot_dev) && strcmp(android_boot_dev, dev_name(dev)))
- return -ENODEV;
-
host = devm_kzalloc(dev, sizeof(*host), GFP_KERNEL);
if (!host) {
err = -ENOMEM;
@@ -1156,21 +2102,58 @@ static int ufs_qcom_init(struct ufs_hba *hba)
/* Make a two way bind between the qcom host and the hba */
host->hba = hba;
+ spin_lock_init(&host->ice_work_lock);
+
ufshcd_set_variant(hba, host);
- /*
- * voting/devoting device ref_clk source is time consuming hence
- * skip devoting it during aggressive clock gating. This clock
- * will still be gated off during runtime suspend.
- */
+ err = ufs_qcom_ice_get_dev(host);
+ if (err == -EPROBE_DEFER) {
+ /*
+ * UFS driver might be probed before ICE driver does.
+ * In that case we would like to return EPROBE_DEFER code
+ * in order to delay its probing.
+ */
+ dev_err(dev, "%s: required ICE device not probed yet err = %d\n",
+ __func__, err);
+ goto out_host_free;
+
+ } else if (err == -ENODEV) {
+ /*
+ * ICE device is not enabled in DTS file. No need for further
+ * initialization of ICE driver.
+ */
+ dev_warn(dev, "%s: ICE device is not enabled",
+ __func__);
+ } else if (err) {
+ dev_err(dev, "%s: ufs_qcom_ice_get_dev failed %d\n",
+ __func__, err);
+ goto out_host_free;
+ }
+
host->generic_phy = devm_phy_get(dev, "ufsphy");
- if (IS_ERR(host->generic_phy)) {
+ if (host->generic_phy == ERR_PTR(-EPROBE_DEFER)) {
+ /*
+ * UFS driver might be probed before the phy driver does.
+ * In that case we would like to return EPROBE_DEFER code.
+ */
+ err = -EPROBE_DEFER;
+ dev_warn(dev, "%s: required phy device. hasn't probed yet. err = %d\n",
+ __func__, err);
+ goto out_host_free;
+ } else if (IS_ERR(host->generic_phy)) {
err = PTR_ERR(host->generic_phy);
dev_err(dev, "%s: PHY get failed %d\n", __func__, err);
goto out;
}
+ err = ufs_qcom_pm_qos_init(host);
+ if (err)
+ dev_info(dev, "%s: PM QoS will be disabled\n", __func__);
+
+ /* restore the secure configuration */
+ ufs_qcom_update_sec_cfg(hba, true);
+
err = ufs_qcom_bus_register(host);
if (err)
goto out_host_free;
@@ -1206,19 +2189,33 @@ static int ufs_qcom_init(struct ufs_hba *hba)
ufs_qcom_phy_save_controller_version(host->generic_phy,
host->hw_ver.major, host->hw_ver.minor, host->hw_ver.step);
+ err = ufs_qcom_parse_reg_info(host, "qcom,vddp-ref-clk",
+ &host->vddp_ref_clk);
phy_init(host->generic_phy);
err = phy_power_on(host->generic_phy);
if (err)
goto out_unregister_bus;
+ if (host->vddp_ref_clk) {
+ err = ufs_qcom_enable_vreg(dev, host->vddp_ref_clk);
+ if (err) {
+ dev_err(dev, "%s: failed enabling ref clk supply: %d\n",
+ __func__, err);
+ goto out_disable_phy;
+ }
+ }
err = ufs_qcom_init_lane_clks(host);
if (err)
- goto out_disable_phy;
+ goto out_disable_vddp;
+ ufs_qcom_parse_lpm(host);
+ if (host->disable_lpm)
+ pm_runtime_forbid(host->hba->dev);
ufs_qcom_set_caps(hba);
ufs_qcom_advertise_quirks(hba);
- ufs_qcom_setup_clocks(hba, true);
+ ufs_qcom_set_bus_vote(hba, true);
+ ufs_qcom_setup_clocks(hba, true, false);
if (hba->dev->id < MAX_UFS_QCOM_HOSTS)
ufs_qcom_hosts[hba->dev->id] = host;
@@ -1234,10 +2231,14 @@ static int ufs_qcom_init(struct ufs_hba *hba)
goto out;
+out_disable_vddp:
+ if (host->vddp_ref_clk)
+ ufs_qcom_disable_vreg(dev, host->vddp_ref_clk);
out_disable_phy:
phy_power_off(host->generic_phy);
out_unregister_bus:
phy_exit(host->generic_phy);
+ msm_bus_scale_unregister_client(host->bus_vote.client_handle);
out_host_free:
devm_kfree(dev, host);
ufshcd_set_variant(hba, NULL);
@@ -1249,8 +2250,10 @@ static void ufs_qcom_exit(struct ufs_hba *hba)
{
struct ufs_qcom_host *host = ufshcd_get_variant(hba);
+ msm_bus_scale_unregister_client(host->bus_vote.client_handle);
ufs_qcom_disable_lane_clks(host);
phy_power_off(host->generic_phy);
+ ufs_qcom_pm_qos_remove(host);
}
static int ufs_qcom_set_dme_vs_core_clk_ctrl_clear_div(struct ufs_hba *hba,
@@ -1281,105 +2284,292 @@ out:
return err;
}
-static int ufs_qcom_clk_scale_up_pre_change(struct ufs_hba *hba)
+static inline int ufs_qcom_configure_lpm(struct ufs_hba *hba, bool enable)
{
- /* nothing to do as of now */
- return 0;
+ struct ufs_qcom_host *host = ufshcd_get_variant(hba);
+ struct phy *phy = host->generic_phy;
+ int err = 0;
+
+ /* The default low power mode configuration is SVS2 */
+ if (!ufs_qcom_cap_svs2(host))
+ goto out;
+
+ if (!((host->hw_ver.major == 0x3) &&
+ (host->hw_ver.minor == 0x0) &&
+ (host->hw_ver.step == 0x0)))
+ goto out;
+
+ /*
+ * The link should be put in hibern8 state before
+ * configuring the PHY to enter/exit SVS2 mode.
+ */
+ err = ufshcd_uic_hibern8_enter(hba);
+ if (err)
+ goto out;
+
+ err = ufs_qcom_phy_configure_lpm(phy, enable);
+ if (err)
+ goto out;
+
+ err = ufshcd_uic_hibern8_exit(hba);
+out:
+ return err;
}
-static int ufs_qcom_clk_scale_up_post_change(struct ufs_hba *hba)
+static int ufs_qcom_clk_scale_up_pre_change(struct ufs_hba *hba)
{
struct ufs_qcom_host *host = ufshcd_get_variant(hba);
+ struct ufs_pa_layer_attr *attr = &host->dev_req_params;
+ int err = 0;
if (!ufs_qcom_cap_qunipro(host))
- return 0;
+ goto out;
+
+ err = ufs_qcom_configure_lpm(hba, false);
+ if (err)
+ goto out;
+
+ if (attr)
+ __ufs_qcom_cfg_timers(hba, attr->gear_rx, attr->pwr_rx,
+ attr->hs_rate, false, true);
/* set unipro core clock cycles to 150 and clear clock divider */
- return ufs_qcom_set_dme_vs_core_clk_ctrl_clear_div(hba, 150);
+ err = ufs_qcom_set_dme_vs_core_clk_ctrl_clear_div(hba, 150);
+out:
+ return err;
}
static int ufs_qcom_clk_scale_down_pre_change(struct ufs_hba *hba)
{
struct ufs_qcom_host *host = ufshcd_get_variant(hba);
- int err;
- u32 core_clk_ctrl_reg;
if (!ufs_qcom_cap_qunipro(host))
return 0;
- err = ufshcd_dme_get(hba,
- UIC_ARG_MIB(DME_VS_CORE_CLK_CTRL),
- &core_clk_ctrl_reg);
-
- /* make sure CORE_CLK_DIV_EN is cleared */
- if (!err &&
- (core_clk_ctrl_reg & DME_VS_CORE_CLK_CTRL_CORE_CLK_DIV_EN_BIT)) {
- core_clk_ctrl_reg &= ~DME_VS_CORE_CLK_CTRL_CORE_CLK_DIV_EN_BIT;
- err = ufshcd_dme_set(hba,
- UIC_ARG_MIB(DME_VS_CORE_CLK_CTRL),
- core_clk_ctrl_reg);
- }
-
- return err;
+ return ufs_qcom_configure_lpm(hba, true);
}
static int ufs_qcom_clk_scale_down_post_change(struct ufs_hba *hba)
{
struct ufs_qcom_host *host = ufshcd_get_variant(hba);
+ struct ufs_pa_layer_attr *attr = &host->dev_req_params;
+ int err = 0;
if (!ufs_qcom_cap_qunipro(host))
return 0;
- /* set unipro core clock cycles to 75 and clear clock divider */
- return ufs_qcom_set_dme_vs_core_clk_ctrl_clear_div(hba, 75);
+ if (attr)
+ ufs_qcom_cfg_timers(hba, attr->gear_rx, attr->pwr_rx,
+ attr->hs_rate, false);
+
+ if (ufs_qcom_cap_svs2(host))
+ /*
+ * For SVS2 set unipro core clock cycles to 37 and
+ * clear clock divider
+ */
+ err = ufs_qcom_set_dme_vs_core_clk_ctrl_clear_div(hba, 37);
+ else
+ /*
+ * For SVS set unipro core clock cycles to 75 and
+ * clear clock divider
+ */
+ err = ufs_qcom_set_dme_vs_core_clk_ctrl_clear_div(hba, 75);
+
+ return err;
}
static int ufs_qcom_clk_scale_notify(struct ufs_hba *hba,
bool scale_up, enum ufs_notify_change_status status)
{
struct ufs_qcom_host *host = ufshcd_get_variant(hba);
- struct ufs_pa_layer_attr *dev_req_params = &host->dev_req_params;
int err = 0;
- if (status == PRE_CHANGE) {
+ switch (status) {
+ case PRE_CHANGE:
if (scale_up)
err = ufs_qcom_clk_scale_up_pre_change(hba);
else
err = ufs_qcom_clk_scale_down_pre_change(hba);
- } else {
- if (scale_up)
- err = ufs_qcom_clk_scale_up_post_change(hba);
- else
+ break;
+ case POST_CHANGE:
+ if (!scale_up)
err = ufs_qcom_clk_scale_down_post_change(hba);
- if (err || !dev_req_params)
- goto out;
-
- ufs_qcom_cfg_timers(hba,
- dev_req_params->gear_rx,
- dev_req_params->pwr_rx,
- dev_req_params->hs_rate,
- false);
ufs_qcom_update_bus_bw_vote(host);
+ break;
+ default:
+ dev_err(hba->dev, "%s: invalid status %d\n", __func__, status);
+ err = -EINVAL;
+ break;
}
-out:
return err;
}
+/*
+ * This function should be called to restore the security configuration of UFS
+ * register space after coming out of UFS host core power collapse.
+ *
+ * @hba: host controller instance
+ * @restore_sec_cfg: Set "true" if secure configuration needs to be restored
+ * and set "false" when secure configuration is lost.
+ */
+static int ufs_qcom_update_sec_cfg(struct ufs_hba *hba, bool restore_sec_cfg)
+{
+ int ret = 0;
+ u64 scm_ret = 0;
+ struct ufs_qcom_host *host = ufshcd_get_variant(hba);
+
+ /* scm command buffer structrue */
+ struct msm_scm_cmd_buf {
+ unsigned int device_id;
+ unsigned int spare;
+ } cbuf = {0};
+ #define RESTORE_SEC_CFG_CMD 0x2
+ #define UFS_TZ_DEV_ID 19
+
+ if (!host || !hba->vreg_info.vdd_hba ||
+ !(host->sec_cfg_updated ^ restore_sec_cfg)) {
+ return 0;
+ } else if (host->caps &
+ UFS_QCOM_CAP_RETAIN_SEC_CFG_AFTER_PWR_COLLAPSE) {
+ return 0;
+ } else if (!restore_sec_cfg) {
+ /*
+ * Clear the flag so next time when this function is called
+ * with restore_sec_cfg set to true, we can restore the secure
+ * configuration.
+ */
+ host->sec_cfg_updated = false;
+ goto out;
+ } else if (hba->clk_gating.state != CLKS_ON) {
+ /*
+ * Clocks should be ON to restore the host controller secure
+ * configuration.
+ */
+ goto out;
+ }
+
+ /*
+ * If we are here, Host controller clocks are running, Host controller
+ * power collapse feature is supported and Host controller has just came
+ * out of power collapse.
+ */
+ cbuf.device_id = UFS_TZ_DEV_ID;
+ ret = scm_restore_sec_cfg(cbuf.device_id, cbuf.spare, &scm_ret);
+ if (ret || scm_ret) {
+ dev_dbg(hba->dev, "%s: failed, ret %d scm_ret %llu\n",
+ __func__, ret, scm_ret);
+ if (!ret)
+ ret = scm_ret;
+ } else {
+ host->sec_cfg_updated = true;
+ }
+
+out:
+ dev_dbg(hba->dev, "%s: ip: restore_sec_cfg %d, op: restore_sec_cfg %d, ret %d scm_ret %llu\n",
+ __func__, restore_sec_cfg, host->sec_cfg_updated, ret, scm_ret);
+ return ret;
+}
+
+
+static inline u32 ufs_qcom_get_scale_down_gear(struct ufs_hba *hba)
+{
+ struct ufs_qcom_host *host = ufshcd_get_variant(hba);
+
+ if (ufs_qcom_cap_svs2(host))
+ return UFS_HS_G1;
+ /* Default SVS support @ HS G2 frequencies*/
+ return UFS_HS_G2;
+}
+
+void ufs_qcom_print_hw_debug_reg_all(struct ufs_hba *hba, void *priv,
+ void (*print_fn)(struct ufs_hba *hba, int offset, int num_regs,
+ char *str, void *priv))
+{
+ u32 reg;
+ struct ufs_qcom_host *host;
+
+ if (unlikely(!hba)) {
+ pr_err("%s: hba is NULL\n", __func__);
+ return;
+ }
+ if (unlikely(!print_fn)) {
+ dev_err(hba->dev, "%s: print_fn is NULL\n", __func__);
+ return;
+ }
+
+ host = ufshcd_get_variant(hba);
+ if (!(host->dbg_print_en & UFS_QCOM_DBG_PRINT_REGS_EN))
+ return;
+
+ reg = ufs_qcom_get_debug_reg_offset(host, UFS_UFS_DBG_RD_REG_OCSC);
+ print_fn(hba, reg, 44, "UFS_UFS_DBG_RD_REG_OCSC ", priv);
+
+ reg = ufshcd_readl(hba, REG_UFS_CFG1);
+ reg |= UFS_BIT(17);
+ ufshcd_writel(hba, reg, REG_UFS_CFG1);
+
+ reg = ufs_qcom_get_debug_reg_offset(host, UFS_UFS_DBG_RD_EDTL_RAM);
+ print_fn(hba, reg, 32, "UFS_UFS_DBG_RD_EDTL_RAM ", priv);
+
+ reg = ufs_qcom_get_debug_reg_offset(host, UFS_UFS_DBG_RD_DESC_RAM);
+ print_fn(hba, reg, 128, "UFS_UFS_DBG_RD_DESC_RAM ", priv);
+
+ reg = ufs_qcom_get_debug_reg_offset(host, UFS_UFS_DBG_RD_PRDT_RAM);
+ print_fn(hba, reg, 64, "UFS_UFS_DBG_RD_PRDT_RAM ", priv);
+
+ /* clear bit 17 - UTP_DBG_RAMS_EN */
+ ufshcd_rmwl(hba, UFS_BIT(17), 0, REG_UFS_CFG1);
+
+ reg = ufs_qcom_get_debug_reg_offset(host, UFS_DBG_RD_REG_UAWM);
+ print_fn(hba, reg, 4, "UFS_DBG_RD_REG_UAWM ", priv);
+
+ reg = ufs_qcom_get_debug_reg_offset(host, UFS_DBG_RD_REG_UARM);
+ print_fn(hba, reg, 4, "UFS_DBG_RD_REG_UARM ", priv);
+
+ reg = ufs_qcom_get_debug_reg_offset(host, UFS_DBG_RD_REG_TXUC);
+ print_fn(hba, reg, 48, "UFS_DBG_RD_REG_TXUC ", priv);
+
+ reg = ufs_qcom_get_debug_reg_offset(host, UFS_DBG_RD_REG_RXUC);
+ print_fn(hba, reg, 27, "UFS_DBG_RD_REG_RXUC ", priv);
+
+ reg = ufs_qcom_get_debug_reg_offset(host, UFS_DBG_RD_REG_DFC);
+ print_fn(hba, reg, 19, "UFS_DBG_RD_REG_DFC ", priv);
+
+ reg = ufs_qcom_get_debug_reg_offset(host, UFS_DBG_RD_REG_TRLUT);
+ print_fn(hba, reg, 34, "UFS_DBG_RD_REG_TRLUT ", priv);
+
+ reg = ufs_qcom_get_debug_reg_offset(host, UFS_DBG_RD_REG_TMRLUT);
+ print_fn(hba, reg, 9, "UFS_DBG_RD_REG_TMRLUT ", priv);
+}
+
+static void ufs_qcom_enable_test_bus(struct ufs_qcom_host *host)
+{
+ if (host->dbg_print_en & UFS_QCOM_DBG_PRINT_TEST_BUS_EN) {
+ ufshcd_rmwl(host->hba, UFS_REG_TEST_BUS_EN,
+ UFS_REG_TEST_BUS_EN, REG_UFS_CFG1);
+ ufshcd_rmwl(host->hba, TEST_BUS_EN, TEST_BUS_EN, REG_UFS_CFG1);
+ } else {
+ ufshcd_rmwl(host->hba, UFS_REG_TEST_BUS_EN, 0, REG_UFS_CFG1);
+ ufshcd_rmwl(host->hba, TEST_BUS_EN, 0, REG_UFS_CFG1);
+ }
+}
+
static void ufs_qcom_get_default_testbus_cfg(struct ufs_qcom_host *host)
{
/* provide a legal default configuration */
- host->testbus.select_major = TSTBUS_UAWM;
- host->testbus.select_minor = 1;
+ host->testbus.select_major = TSTBUS_UNIPRO;
+ host->testbus.select_minor = 37;
}
-static bool ufs_qcom_testbus_cfg_is_ok(struct ufs_qcom_host *host)
+bool ufs_qcom_testbus_cfg_is_ok(struct ufs_qcom_host *host,
+ u8 select_major, u8 select_minor)
{
- if (host->testbus.select_major >= TSTBUS_MAX) {
+ if (select_major >= TSTBUS_MAX) {
dev_err(host->hba->dev,
"%s: UFS_CFG1[TEST_BUS_SEL} may not equal 0x%05X\n",
- __func__, host->testbus.select_major);
+ __func__, select_major);
return false;
}
@@ -1388,28 +2578,33 @@ static bool ufs_qcom_testbus_cfg_is_ok(struct ufs_qcom_host *host)
* mappings of select_minor, since there is no harm in
* configuring a non-existent select_minor
*/
- if (host->testbus.select_minor > 0x1F) {
+ if (select_minor > 0xFF) {
dev_err(host->hba->dev,
"%s: 0x%05X is not a legal testbus option\n",
- __func__, host->testbus.select_minor);
+ __func__, select_minor);
return false;
}
return true;
}
+/*
+ * The caller of this function must make sure that the controller
+ * is out of runtime suspend and appropriate clocks are enabled
+ * before accessing.
+ */
int ufs_qcom_testbus_config(struct ufs_qcom_host *host)
{
- int reg;
- int offset;
+ int reg = 0;
+ int offset = 0, ret = 0, testbus_sel_offset = 19;
u32 mask = TEST_BUS_SUB_SEL_MASK;
+ unsigned long flags;
+ struct ufs_hba *hba;
if (!host)
return -EINVAL;
-
- if (!ufs_qcom_testbus_cfg_is_ok(host))
- return -EPERM;
-
+ hba = host->hba;
+ spin_lock_irqsave(hba->host->host_lock, flags);
switch (host->testbus.select_major) {
case TSTBUS_UAWM:
reg = UFS_TEST_BUS_CTRL_0;
@@ -1457,7 +2652,8 @@ int ufs_qcom_testbus_config(struct ufs_qcom_host *host)
break;
case TSTBUS_UNIPRO:
reg = UFS_UNIPRO_CFG;
- offset = 1;
+ offset = 20;
+ mask = 0xFFF;
break;
/*
* No need for a default case, since
@@ -1466,19 +2662,27 @@ int ufs_qcom_testbus_config(struct ufs_qcom_host *host)
*/
}
mask <<= offset;
-
- pm_runtime_get_sync(host->hba->dev);
- ufshcd_hold(host->hba, false);
- ufshcd_rmwl(host->hba, TEST_BUS_SEL,
- (u32)host->testbus.select_major << 19,
+ spin_unlock_irqrestore(hba->host->host_lock, flags);
+ if (reg) {
+ ufshcd_rmwl(host->hba, TEST_BUS_SEL,
+ (u32)host->testbus.select_major << testbus_sel_offset,
REG_UFS_CFG1);
- ufshcd_rmwl(host->hba, mask,
+ ufshcd_rmwl(host->hba, mask,
(u32)host->testbus.select_minor << offset,
reg);
- ufshcd_release(host->hba);
- pm_runtime_put_sync(host->hba->dev);
-
- return 0;
+ } else {
+ dev_err(hba->dev, "%s: Problem setting minor\n", __func__);
+ ret = -EINVAL;
+ goto out;
+ }
+ ufs_qcom_enable_test_bus(host);
+ /*
+ * Make sure the test bus configuration is
+ * committed before returning.
+ */
+ mb();
+out:
+ return ret;
}
static void ufs_qcom_testbus_read(struct ufs_hba *hba)
@@ -1486,13 +2690,73 @@ static void ufs_qcom_testbus_read(struct ufs_hba *hba)
ufs_qcom_dump_regs(hba, UFS_TEST_BUS, 1, "UFS_TEST_BUS ");
}
-static void ufs_qcom_dump_dbg_regs(struct ufs_hba *hba)
+static void ufs_qcom_print_unipro_testbus(struct ufs_hba *hba)
{
+ struct ufs_qcom_host *host = ufshcd_get_variant(hba);
+ u32 *testbus = NULL;
+ int i, nminor = 256, testbus_len = nminor * sizeof(u32);
+
+ testbus = kmalloc(testbus_len, GFP_KERNEL);
+ if (!testbus)
+ return;
+
+ host->testbus.select_major = TSTBUS_UNIPRO;
+ for (i = 0; i < nminor; i++) {
+ host->testbus.select_minor = i;
+ ufs_qcom_testbus_config(host);
+ testbus[i] = ufshcd_readl(hba, UFS_TEST_BUS);
+ }
+ print_hex_dump(KERN_ERR, "UNIPRO_TEST_BUS ", DUMP_PREFIX_OFFSET,
+ 16, 4, testbus, testbus_len, false);
+ kfree(testbus);
+}
+
+static void ufs_qcom_print_utp_hci_testbus(struct ufs_hba *hba)
+{
+ struct ufs_qcom_host *host = ufshcd_get_variant(hba);
+ u32 *testbus = NULL;
+ int i, nminor = 32, testbus_len = nminor * sizeof(u32);
+
+ testbus = kmalloc(testbus_len, GFP_KERNEL);
+ if (!testbus)
+ return;
+
+ host->testbus.select_major = TSTBUS_UTP_HCI;
+ for (i = 0; i < nminor; i++) {
+ host->testbus.select_minor = i;
+ ufs_qcom_testbus_config(host);
+ testbus[i] = ufshcd_readl(hba, UFS_TEST_BUS);
+ }
+ print_hex_dump(KERN_ERR, "UTP_HCI_TEST_BUS ", DUMP_PREFIX_OFFSET,
+ 16, 4, testbus, testbus_len, false);
+ kfree(testbus);
+}
+
+static void ufs_qcom_dump_dbg_regs(struct ufs_hba *hba, bool no_sleep)
+{
+ struct ufs_qcom_host *host = ufshcd_get_variant(hba);
+ struct phy *phy = host->generic_phy;
+
ufs_qcom_dump_regs(hba, REG_UFS_SYS1CLK_1US, 16,
"HCI Vendor Specific Registers ");
+ ufs_qcom_print_hw_debug_reg_all(hba, NULL, ufs_qcom_dump_regs_wrapper);
+
+ if (no_sleep)
+ return;
+ /* sleep a bit intermittently as we are dumping too much data */
+ usleep_range(1000, 1100);
ufs_qcom_testbus_read(hba);
+ usleep_range(1000, 1100);
+ ufs_qcom_print_unipro_testbus(hba);
+ usleep_range(1000, 1100);
+ ufs_qcom_print_utp_hci_testbus(hba);
+ usleep_range(1000, 1100);
+ ufs_qcom_phy_dbg_register_dump(phy);
+ usleep_range(1000, 1100);
+ ufs_qcom_ice_print_regs(host);
}
+
/**
* struct ufs_hba_qcom_vops - UFS QCOM specific variant operations
*
@@ -1500,7 +2764,6 @@ static void ufs_qcom_dump_dbg_regs(struct ufs_hba *hba)
* handshake during initialization.
*/
static struct ufs_hba_variant_ops ufs_hba_qcom_vops = {
- .name = "qcom",
.init = ufs_qcom_init,
.exit = ufs_qcom_exit,
.get_ufs_hci_version = ufs_qcom_get_ufs_hci_version,
@@ -1509,9 +2772,37 @@ static struct ufs_hba_variant_ops ufs_hba_qcom_vops = {
.hce_enable_notify = ufs_qcom_hce_enable_notify,
.link_startup_notify = ufs_qcom_link_startup_notify,
.pwr_change_notify = ufs_qcom_pwr_change_notify,
+ .apply_dev_quirks = ufs_qcom_apply_dev_quirks,
.suspend = ufs_qcom_suspend,
.resume = ufs_qcom_resume,
+ .full_reset = ufs_qcom_full_reset,
+ .update_sec_cfg = ufs_qcom_update_sec_cfg,
+ .get_scale_down_gear = ufs_qcom_get_scale_down_gear,
+ .set_bus_vote = ufs_qcom_set_bus_vote,
.dbg_register_dump = ufs_qcom_dump_dbg_regs,
+#ifdef CONFIG_DEBUG_FS
+ .add_debugfs = ufs_qcom_dbg_add_debugfs,
+#endif
+};
+
+static struct ufs_hba_crypto_variant_ops ufs_hba_crypto_variant_ops = {
+ .crypto_req_setup = ufs_qcom_crypto_req_setup,
+ .crypto_engine_cfg_start = ufs_qcom_crytpo_engine_cfg_start,
+ .crypto_engine_cfg_end = ufs_qcom_crytpo_engine_cfg_end,
+ .crypto_engine_reset = ufs_qcom_crytpo_engine_reset,
+ .crypto_engine_get_status = ufs_qcom_crypto_engine_get_status,
+};
+
+static struct ufs_hba_pm_qos_variant_ops ufs_hba_pm_qos_variant_ops = {
+ .req_start = ufs_qcom_pm_qos_req_start,
+ .req_end = ufs_qcom_pm_qos_req_end,
+};
+
+static struct ufs_hba_variant ufs_hba_qcom_variant = {
+ .name = "qcom",
+ .vops = &ufs_hba_qcom_vops,
+ .crypto_vops = &ufs_hba_crypto_variant_ops,
+ .pm_qos_vops = &ufs_hba_pm_qos_variant_ops,
};
/**
@@ -1524,9 +2815,27 @@ static int ufs_qcom_probe(struct platform_device *pdev)
{
int err;
struct device *dev = &pdev->dev;
+ struct device_node *np = dev->of_node;
+
+ /*
+ * On qcom platforms, bootdevice is the primary storage
+ * device. This device can either be eMMC or UFS.
+ * The type of device connected is detected at runtime.
+ * So, if an eMMC device is connected, and this function
+ * is invoked, it would turn-off the regulator if it detects
+ * that the storage device is not ufs.
+ * These regulators are turned ON by the bootloaders & turning
+ * them off without sending PON may damage the connected device.
+ * Hence, check for the connected device early-on & don't turn-off
+ * the regulators.
+ */
+ if (of_property_read_bool(np, "non-removable") &&
+ strlen(android_boot_dev) &&
+ strcmp(android_boot_dev, dev_name(dev)))
+ return -ENODEV;
/* Perform generic probe */
- err = ufshcd_pltfrm_init(pdev, &ufs_hba_qcom_vops);
+ err = ufshcd_pltfrm_init(pdev, &ufs_hba_qcom_variant);
if (err)
dev_err(dev, "ufshcd_pltfrm_init() failed %d\n", err);
diff --git a/drivers/scsi/ufs/ufs-qcom.h b/drivers/scsi/ufs/ufs-qcom.h
index 36249b35f858..fd98a3381d61 100644
--- a/drivers/scsi/ufs/ufs-qcom.h
+++ b/drivers/scsi/ufs/ufs-qcom.h
@@ -1,4 +1,4 @@
-/* Copyright (c) 2013-2015, The Linux Foundation. All rights reserved.
+/* Copyright (c) 2013-2017, The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
@@ -14,9 +14,14 @@
#ifndef UFS_QCOM_H_
#define UFS_QCOM_H_
+#include <linux/phy/phy.h>
+#include <linux/pm_qos.h>
+#include "ufshcd.h"
+
#define MAX_UFS_QCOM_HOSTS 1
#define MAX_U32 (~(u32)0)
#define MPHY_TX_FSM_STATE 0x41
+#define MPHY_RX_FSM_STATE 0xC1
#define TX_FSM_HIBERN8 0x1
#define HBRN8_POLL_TOUT_MS 100
#define DEFAULT_CLK_RATE_HZ 1000000
@@ -71,6 +76,7 @@ enum {
UFS_AH8_CFG = 0xFC,
};
+
/* QCOM UFS host controller vendor specific debug registers */
enum {
UFS_DBG_RD_REG_UAWM = 0x100,
@@ -94,7 +100,8 @@ enum {
/* bit definitions for REG_UFS_CFG1 register */
#define QUNIPRO_SEL UFS_BIT(0)
#define TEST_BUS_EN BIT(18)
-#define TEST_BUS_SEL GENMASK(22, 19)
+#define TEST_BUS_SEL 0x780000
+#define UFS_REG_TEST_BUS_EN BIT(30)
/* bit definitions for REG_UFS_CFG2 register */
#define UAWM_HW_CGC_EN (1 << 0)
@@ -114,6 +121,17 @@ enum {
DFC_HW_CGC_EN | TRLUT_HW_CGC_EN |\
TMRLUT_HW_CGC_EN | OCSC_HW_CGC_EN)
+/* bit definitions for UFS_AH8_CFG register */
+#define CC_UFS_HCLK_REQ_EN BIT(1)
+#define CC_UFS_SYS_CLK_REQ_EN BIT(2)
+#define CC_UFS_ICE_CORE_CLK_REQ_EN BIT(3)
+#define CC_UFS_UNIPRO_CORE_CLK_REQ_EN BIT(4)
+#define CC_UFS_AUXCLK_REQ_EN BIT(5)
+
+#define UFS_HW_CLK_CTRL_EN (CC_UFS_SYS_CLK_REQ_EN |\
+ CC_UFS_ICE_CORE_CLK_REQ_EN |\
+ CC_UFS_UNIPRO_CORE_CLK_REQ_EN |\
+ CC_UFS_AUXCLK_REQ_EN)
/* bit offset */
enum {
OFFSET_UFS_PHY_SOFT_RESET = 1,
@@ -142,10 +160,20 @@ enum ufs_qcom_phy_init_type {
UFS_QCOM_DBG_PRINT_TEST_BUS_EN)
/* QUniPro Vendor specific attributes */
+#define PA_VS_CONFIG_REG1 0x9000
+#define SAVECONFIGTIME_MODE_MASK 0x6000
+
+#define PA_VS_CLK_CFG_REG 0x9004
+#define PA_VS_CLK_CFG_REG_MASK 0x1FF
+
+#define DL_VS_CLK_CFG 0xA00B
+#define DL_VS_CLK_CFG_MASK 0x3FF
+
#define DME_VS_CORE_CLK_CTRL 0xD002
/* bit and mask definitions for DME_VS_CORE_CLK_CTRL attribute */
-#define DME_VS_CORE_CLK_CTRL_CORE_CLK_DIV_EN_BIT BIT(8)
#define DME_VS_CORE_CLK_CTRL_MAX_CORE_CLK_1US_CYCLES_MASK 0xFF
+#define DME_VS_CORE_CLK_CTRL_CORE_CLK_DIV_EN_BIT BIT(8)
+#define DME_VS_CORE_CLK_CTRL_DME_HW_CGC_EN BIT(9)
static inline void
ufs_qcom_get_controller_revision(struct ufs_hba *hba,
@@ -192,6 +220,26 @@ struct ufs_qcom_bus_vote {
struct device_attribute max_bus_bw;
};
+/**
+ * struct ufs_qcom_ice_data - ICE related information
+ * @vops: pointer to variant operations of ICE
+ * @async_done: completion for supporting ICE's driver asynchronous nature
+ * @pdev: pointer to the proper ICE platform device
+ * @state: UFS-ICE interface's internal state (see
+ * ufs-qcom-ice.h for possible internal states)
+ * @quirks: UFS-ICE interface related quirks
+ * @crypto_engine_err: crypto engine errors
+ */
+struct ufs_qcom_ice_data {
+ struct qcom_ice_variant_ops *vops;
+ struct platform_device *pdev;
+ int state;
+
+ u16 quirks;
+
+ bool crypto_engine_err;
+};
+
/* Host controller hardware version: major.minor.step */
struct ufs_hw_version {
u16 step;
@@ -199,11 +247,76 @@ struct ufs_hw_version {
u8 major;
};
+#ifdef CONFIG_DEBUG_FS
+struct qcom_debugfs_files {
+ struct dentry *debugfs_root;
+ struct dentry *dbg_print_en;
+ struct dentry *testbus;
+ struct dentry *testbus_en;
+ struct dentry *testbus_cfg;
+ struct dentry *testbus_bus;
+ struct dentry *dbg_regs;
+ struct dentry *pm_qos;
+};
+#endif
+
struct ufs_qcom_testbus {
u8 select_major;
u8 select_minor;
};
+/* PM QoS voting state */
+enum ufs_qcom_pm_qos_state {
+ PM_QOS_UNVOTED,
+ PM_QOS_VOTED,
+ PM_QOS_REQ_VOTE,
+ PM_QOS_REQ_UNVOTE,
+};
+
+/**
+ * struct ufs_qcom_pm_qos_cpu_group - data related to cluster PM QoS voting
+ * logic
+ * @req: request object for PM QoS
+ * @vote_work: work object for voting procedure
+ * @unvote_work: work object for un-voting procedure
+ * @host: back pointer to the main structure
+ * @state: voting state machine current state
+ * @latency_us: requested latency value used for cluster voting, in
+ * microseconds
+ * @mask: cpu mask defined for this cluster
+ * @active_reqs: number of active requests on this cluster
+ */
+struct ufs_qcom_pm_qos_cpu_group {
+ struct pm_qos_request req;
+ struct work_struct vote_work;
+ struct work_struct unvote_work;
+ struct ufs_qcom_host *host;
+ enum ufs_qcom_pm_qos_state state;
+ s32 latency_us;
+ cpumask_t mask;
+ int active_reqs;
+};
+
+/**
+ * struct ufs_qcom_pm_qos - data related to PM QoS voting logic
+ * @groups: PM QoS cpu group state array
+ * @enable_attr: sysfs attribute to enable/disable PM QoS voting logic
+ * @latency_attr: sysfs attribute to set latency value
+ * @workq: single threaded workqueue to run PM QoS voting/unvoting
+ * @num_clusters: number of clusters defined
+ * @default_cpu: cpu to use for voting for request not specifying a cpu
+ * @is_enabled: flag specifying whether voting logic is enabled
+ */
+struct ufs_qcom_pm_qos {
+ struct ufs_qcom_pm_qos_cpu_group *groups;
+ struct device_attribute enable_attr;
+ struct device_attribute latency_attr;
+ struct workqueue_struct *workq;
+ int num_groups;
+ int default_cpu;
+ bool is_enabled;
+};
+
struct ufs_qcom_host {
/*
* Set this capability if host controller supports the QUniPro mode
@@ -218,6 +331,17 @@ struct ufs_qcom_host {
* configuration even after UFS controller core power collapse.
*/
#define UFS_QCOM_CAP_RETAIN_SEC_CFG_AFTER_PWR_COLLAPSE UFS_BIT(1)
+
+ /*
+ * Set this capability if host controller supports Qunipro internal
+ * clock gating.
+ */
+ #define UFS_QCOM_CAP_QUNIPRO_CLK_GATING UFS_BIT(2)
+
+ /*
+ * Set this capability if host controller supports SVS2 frequencies.
+ */
+ #define UFS_QCOM_CAP_SVS2 UFS_BIT(3)
u32 caps;
struct phy *generic_phy;
@@ -228,24 +352,51 @@ struct ufs_qcom_host {
struct clk *tx_l0_sync_clk;
struct clk *rx_l1_sync_clk;
struct clk *tx_l1_sync_clk;
- bool is_lane_clks_enabled;
+ /* PM Quality-of-Service (QoS) data */
+ struct ufs_qcom_pm_qos pm_qos;
+
+ bool disable_lpm;
+ bool is_lane_clks_enabled;
+ bool sec_cfg_updated;
+ struct ufs_qcom_ice_data ice;
void __iomem *dev_ref_clk_ctrl_mmio;
bool is_dev_ref_clk_enabled;
struct ufs_hw_version hw_ver;
-
u32 dev_ref_clk_en_mask;
-
+#ifdef CONFIG_DEBUG_FS
+ struct qcom_debugfs_files debugfs_files;
+#endif
/* Bitmask for enabling debug prints */
u32 dbg_print_en;
struct ufs_qcom_testbus testbus;
+
+ spinlock_t ice_work_lock;
+ struct work_struct ice_cfg_work;
+ struct request *req_pending;
+ struct ufs_vreg *vddp_ref_clk;
+ bool work_pending;
+};
+
+static inline u32
+ufs_qcom_get_debug_reg_offset(struct ufs_qcom_host *host, u32 reg)
+{
+ if (host->hw_ver.major <= 0x02)
+ return UFS_CNTLR_2_x_x_VEN_REGS_OFFSET(reg);
+
+ return UFS_CNTLR_3_x_x_VEN_REGS_OFFSET(reg);
};
#define ufs_qcom_is_link_off(hba) ufshcd_is_link_off(hba)
#define ufs_qcom_is_link_active(hba) ufshcd_is_link_active(hba)
#define ufs_qcom_is_link_hibern8(hba) ufshcd_is_link_hibern8(hba)
+bool ufs_qcom_testbus_cfg_is_ok(struct ufs_qcom_host *host, u8 select_major,
+ u8 select_minor);
int ufs_qcom_testbus_config(struct ufs_qcom_host *host);
+void ufs_qcom_print_hw_debug_reg_all(struct ufs_hba *hba, void *priv,
+ void (*print_fn)(struct ufs_hba *hba, int offset, int num_regs,
+ char *str, void *priv));
static inline bool ufs_qcom_cap_qunipro(struct ufs_qcom_host *host)
{
@@ -255,4 +406,14 @@ static inline bool ufs_qcom_cap_qunipro(struct ufs_qcom_host *host)
return false;
}
+static inline bool ufs_qcom_cap_qunipro_clk_gating(struct ufs_qcom_host *host)
+{
+ return !!(host->caps & UFS_QCOM_CAP_QUNIPRO_CLK_GATING);
+}
+
+static inline bool ufs_qcom_cap_svs2(struct ufs_qcom_host *host)
+{
+ return !!(host->caps & UFS_QCOM_CAP_SVS2);
+}
+
#endif /* UFS_QCOM_H_ */
diff --git a/drivers/scsi/ufs/ufs.h b/drivers/scsi/ufs/ufs.h
index ce5234555cc9..7fe25d526dbe 100644
--- a/drivers/scsi/ufs/ufs.h
+++ b/drivers/scsi/ufs/ufs.h
@@ -38,11 +38,13 @@
#include <linux/mutex.h>
#include <linux/types.h>
+#include <scsi/ufs/ufs.h>
#define MAX_CDB_SIZE 16
#define GENERAL_UPIU_REQUEST_SIZE 32
#define QUERY_DESC_MAX_SIZE 255
#define QUERY_DESC_MIN_SIZE 2
+#define QUERY_DESC_HDR_SIZE 2
#define QUERY_OSF_SIZE (GENERAL_UPIU_REQUEST_SIZE - \
(sizeof(struct utp_upiu_header)))
#define RESPONSE_UPIU_SENSE_DATA_LENGTH 18
@@ -72,6 +74,16 @@ enum {
UFS_UPIU_RPMB_WLUN = 0xC4,
};
+/**
+ * ufs_is_valid_unit_desc_lun - checks if the given LUN has a unit descriptor
+ * @lun: LU number to check
+ * @return: true if the lun has a matching unit descriptor, false otherwise
+ */
+static inline bool ufs_is_valid_unit_desc_lun(u8 lun)
+{
+ return lun == UFS_UPIU_RPMB_WLUN || (lun < UFS_UPIU_MAX_GENERAL_LUN);
+}
+
/*
* UFS Protocol Information Unit related definitions
*/
@@ -127,42 +139,13 @@ enum {
UPIU_QUERY_FUNC_STANDARD_WRITE_REQUEST = 0x81,
};
-/* Flag idn for Query Requests*/
-enum flag_idn {
- QUERY_FLAG_IDN_FDEVICEINIT = 0x01,
- QUERY_FLAG_IDN_PWR_ON_WPE = 0x03,
- QUERY_FLAG_IDN_BKOPS_EN = 0x04,
-};
-
-/* Attribute idn for Query requests */
-enum attr_idn {
- QUERY_ATTR_IDN_ACTIVE_ICC_LVL = 0x03,
- QUERY_ATTR_IDN_BKOPS_STATUS = 0x05,
- QUERY_ATTR_IDN_EE_CONTROL = 0x0D,
- QUERY_ATTR_IDN_EE_STATUS = 0x0E,
-};
-
-/* Descriptor idn for Query requests */
-enum desc_idn {
- QUERY_DESC_IDN_DEVICE = 0x0,
- QUERY_DESC_IDN_CONFIGURAION = 0x1,
- QUERY_DESC_IDN_UNIT = 0x2,
- QUERY_DESC_IDN_RFU_0 = 0x3,
- QUERY_DESC_IDN_INTERCONNECT = 0x4,
- QUERY_DESC_IDN_STRING = 0x5,
- QUERY_DESC_IDN_RFU_1 = 0x6,
- QUERY_DESC_IDN_GEOMETRY = 0x7,
- QUERY_DESC_IDN_POWER = 0x8,
- QUERY_DESC_IDN_MAX,
-};
-
enum desc_header_offset {
QUERY_DESC_LENGTH_OFFSET = 0x00,
QUERY_DESC_DESC_TYPE_OFFSET = 0x01,
};
enum ufs_desc_max_size {
- QUERY_DESC_DEVICE_MAX_SIZE = 0x1F,
+ QUERY_DESC_DEVICE_MAX_SIZE = 0x40,
QUERY_DESC_CONFIGURAION_MAX_SIZE = 0x90,
QUERY_DESC_UNIT_MAX_SIZE = 0x23,
QUERY_DESC_INTERCONNECT_MAX_SIZE = 0x06,
@@ -173,6 +156,7 @@ enum ufs_desc_max_size {
QUERY_DESC_STRING_MAX_SIZE = 0xFE,
QUERY_DESC_GEOMETRY_MAZ_SIZE = 0x44,
QUERY_DESC_POWER_MAX_SIZE = 0x62,
+ QUERY_DESC_HEALTH_MAX_SIZE = 0x25,
QUERY_DESC_RFU_MAX_SIZE = 0x00,
};
@@ -196,6 +180,46 @@ enum unit_desc_param {
UNIT_DESC_PARAM_LARGE_UNIT_SIZE_M1 = 0x22,
};
+/* Device descriptor parameters offsets in bytes*/
+enum device_desc_param {
+ DEVICE_DESC_PARAM_LEN = 0x0,
+ DEVICE_DESC_PARAM_TYPE = 0x1,
+ DEVICE_DESC_PARAM_DEVICE_TYPE = 0x2,
+ DEVICE_DESC_PARAM_DEVICE_CLASS = 0x3,
+ DEVICE_DESC_PARAM_DEVICE_SUB_CLASS = 0x4,
+ DEVICE_DESC_PARAM_PRTCL = 0x5,
+ DEVICE_DESC_PARAM_NUM_LU = 0x6,
+ DEVICE_DESC_PARAM_NUM_WLU = 0x7,
+ DEVICE_DESC_PARAM_BOOT_ENBL = 0x8,
+ DEVICE_DESC_PARAM_DESC_ACCSS_ENBL = 0x9,
+ DEVICE_DESC_PARAM_INIT_PWR_MODE = 0xA,
+ DEVICE_DESC_PARAM_HIGH_PR_LUN = 0xB,
+ DEVICE_DESC_PARAM_SEC_RMV_TYPE = 0xC,
+ DEVICE_DESC_PARAM_SEC_LU = 0xD,
+ DEVICE_DESC_PARAM_BKOP_TERM_LT = 0xE,
+ DEVICE_DESC_PARAM_ACTVE_ICC_LVL = 0xF,
+ DEVICE_DESC_PARAM_SPEC_VER = 0x10,
+ DEVICE_DESC_PARAM_MANF_DATE = 0x12,
+ DEVICE_DESC_PARAM_MANF_NAME = 0x14,
+ DEVICE_DESC_PARAM_PRDCT_NAME = 0x15,
+ DEVICE_DESC_PARAM_SN = 0x16,
+ DEVICE_DESC_PARAM_OEM_ID = 0x17,
+ DEVICE_DESC_PARAM_MANF_ID = 0x18,
+ DEVICE_DESC_PARAM_UD_OFFSET = 0x1A,
+ DEVICE_DESC_PARAM_UD_LEN = 0x1B,
+ DEVICE_DESC_PARAM_RTT_CAP = 0x1C,
+ DEVICE_DESC_PARAM_FRQ_RTC = 0x1D,
+};
+
+/* Health descriptor parameters offsets in bytes*/
+enum health_desc_param {
+ HEALTH_DESC_PARAM_LEN = 0x0,
+ HEALTH_DESC_PARAM_TYPE = 0x1,
+ HEALTH_DESC_PARAM_EOL_INFO = 0x2,
+ HEALTH_DESC_PARAM_LIFE_TIME_EST_A = 0x3,
+ HEALTH_DESC_PARAM_LIFE_TIME_EST_B = 0x4,
+};
+
/*
* Logical Unit Write Protect
* 00h: LU not write protected
@@ -248,19 +272,6 @@ enum bkops_status {
BKOPS_STATUS_MAX = BKOPS_STATUS_CRITICAL,
};
-/* UTP QUERY Transaction Specific Fields OpCode */
-enum query_opcode {
- UPIU_QUERY_OPCODE_NOP = 0x0,
- UPIU_QUERY_OPCODE_READ_DESC = 0x1,
- UPIU_QUERY_OPCODE_WRITE_DESC = 0x2,
- UPIU_QUERY_OPCODE_READ_ATTR = 0x3,
- UPIU_QUERY_OPCODE_WRITE_ATTR = 0x4,
- UPIU_QUERY_OPCODE_READ_FLAG = 0x5,
- UPIU_QUERY_OPCODE_SET_FLAG = 0x6,
- UPIU_QUERY_OPCODE_CLEAR_FLAG = 0x7,
- UPIU_QUERY_OPCODE_TOGGLE_FLAG = 0x8,
-};
-
/* Query response result code */
enum {
QUERY_RESULT_SUCCESS = 0x00,
@@ -470,6 +481,7 @@ struct ufs_vreg {
struct regulator *reg;
const char *name;
bool enabled;
+ bool unused;
int min_uV;
int max_uV;
int min_uA;
diff --git a/drivers/scsi/ufs/ufs_quirks.c b/drivers/scsi/ufs/ufs_quirks.c
new file mode 100644
index 000000000000..7a501d6d7c84
--- /dev/null
+++ b/drivers/scsi/ufs/ufs_quirks.c
@@ -0,0 +1,120 @@
+/*
+ * Copyright (c) 2013-2017, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include "ufshcd.h"
+#include "ufs_quirks.h"
+
+
+static struct ufs_card_fix ufs_fixups[] = {
+ /* UFS cards deviations table */
+ UFS_FIX(UFS_VENDOR_SAMSUNG, UFS_ANY_MODEL, UFS_DEVICE_NO_VCCQ),
+ UFS_FIX(UFS_VENDOR_SAMSUNG, UFS_ANY_MODEL,
+ UFS_DEVICE_QUIRK_RECOVERY_FROM_DL_NAC_ERRORS),
+ UFS_FIX(UFS_VENDOR_SAMSUNG, UFS_ANY_MODEL,
+ UFS_DEVICE_NO_FASTAUTO),
+ UFS_FIX(UFS_VENDOR_TOSHIBA, "THGLF2G9C8KBADG",
+ UFS_DEVICE_QUIRK_PA_TACTIVATE),
+ UFS_FIX(UFS_VENDOR_TOSHIBA, "THGLF2G9D8KBADG",
+ UFS_DEVICE_QUIRK_PA_TACTIVATE),
+ UFS_FIX(UFS_VENDOR_SAMSUNG, UFS_ANY_MODEL,
+ UFS_DEVICE_QUIRK_HOST_PA_TACTIVATE),
+ UFS_FIX(UFS_VENDOR_HYNIX, UFS_ANY_MODEL,
+ UFS_DEVICE_QUIRK_HOST_PA_SAVECONFIGTIME),
+ UFS_FIX(UFS_VENDOR_HYNIX, "hB8aL1",
+ UFS_DEVICE_QUIRK_HS_G1_TO_HS_G3_SWITCH),
+ UFS_FIX(UFS_VENDOR_HYNIX, "hC8aL1",
+ UFS_DEVICE_QUIRK_HS_G1_TO_HS_G3_SWITCH),
+ UFS_FIX(UFS_VENDOR_HYNIX, "hD8aL1",
+ UFS_DEVICE_QUIRK_HS_G1_TO_HS_G3_SWITCH),
+ UFS_FIX(UFS_VENDOR_HYNIX, "hC8aM1",
+ UFS_DEVICE_QUIRK_HS_G1_TO_HS_G3_SWITCH),
+ UFS_FIX(UFS_VENDOR_HYNIX, "h08aM1",
+ UFS_DEVICE_QUIRK_HS_G1_TO_HS_G3_SWITCH),
+ UFS_FIX(UFS_VENDOR_HYNIX, "hC8GL1",
+ UFS_DEVICE_QUIRK_HS_G1_TO_HS_G3_SWITCH),
+ UFS_FIX(UFS_VENDOR_HYNIX, "hC8HL1",
+ UFS_DEVICE_QUIRK_HS_G1_TO_HS_G3_SWITCH),
+
+ END_FIX
+};
+
+static int ufs_get_device_info(struct ufs_hba *hba,
+ struct ufs_card_info *card_data)
+{
+ int err;
+ u8 model_index;
+ u8 str_desc_buf[QUERY_DESC_STRING_MAX_SIZE + 1];
+ u8 desc_buf[QUERY_DESC_DEVICE_MAX_SIZE];
+
+ err = ufshcd_read_device_desc(hba, desc_buf,
+ QUERY_DESC_DEVICE_MAX_SIZE);
+ if (err)
+ goto out;
+
+ /*
+ * getting vendor (manufacturerID) and Bank Index in big endian
+ * format
+ */
+ card_data->wmanufacturerid = desc_buf[DEVICE_DESC_PARAM_MANF_ID] << 8 |
+ desc_buf[DEVICE_DESC_PARAM_MANF_ID + 1];
+
+ model_index = desc_buf[DEVICE_DESC_PARAM_PRDCT_NAME];
+
+ memset(str_desc_buf, 0, QUERY_DESC_STRING_MAX_SIZE);
+ err = ufshcd_read_string_desc(hba, model_index, str_desc_buf,
+ QUERY_DESC_STRING_MAX_SIZE, ASCII_STD);
+ if (err)
+ goto out;
+
+ str_desc_buf[QUERY_DESC_STRING_MAX_SIZE] = '\0';
+ strlcpy(card_data->model, (str_desc_buf + QUERY_DESC_HDR_SIZE),
+ min_t(u8, str_desc_buf[QUERY_DESC_LENGTH_OFFSET],
+ MAX_MODEL_LEN));
+ /* Null terminate the model string */
+ card_data->model[MAX_MODEL_LEN] = '\0';
+
+out:
+ return err;
+}
+
+void ufs_advertise_fixup_device(struct ufs_hba *hba)
+{
+ int err;
+ struct ufs_card_fix *f;
+ struct ufs_card_info card_data;
+
+ card_data.wmanufacturerid = 0;
+ card_data.model = kmalloc(MAX_MODEL_LEN + 1, GFP_KERNEL);
+ if (!card_data.model)
+ goto out;
+
+ /* get device data*/
+ err = ufs_get_device_info(hba, &card_data);
+ if (err) {
+ dev_err(hba->dev, "%s: Failed getting device info\n", __func__);
+ goto out;
+ }
+
+ for (f = ufs_fixups; f->quirk; f++) {
+ /* if same wmanufacturerid */
+ if (((f->card.wmanufacturerid == card_data.wmanufacturerid) ||
+ (f->card.wmanufacturerid == UFS_ANY_VENDOR)) &&
+ /* and same model */
+ (STR_PRFX_EQUAL(f->card.model, card_data.model) ||
+ !strcmp(f->card.model, UFS_ANY_MODEL)))
+ /* update quirks */
+ hba->dev_quirks |= f->quirk;
+ }
+out:
+ kfree(card_data.model);
+}
diff --git a/drivers/scsi/ufs/ufs_quirks.h b/drivers/scsi/ufs/ufs_quirks.h
new file mode 100644
index 000000000000..3102517e841c
--- /dev/null
+++ b/drivers/scsi/ufs/ufs_quirks.h
@@ -0,0 +1,152 @@
+/* Copyright (c) 2014-2017, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#ifndef _UFS_QUIRKS_H_
+#define _UFS_QUIRKS_H_
+
+/* return true if s1 is a prefix of s2 */
+#define STR_PRFX_EQUAL(s1, s2) !strncmp(s1, s2, strlen(s1))
+
+#define UFS_ANY_VENDOR -1
+#define UFS_ANY_MODEL "ANY_MODEL"
+
+#define MAX_MODEL_LEN 16
+
+#define UFS_VENDOR_TOSHIBA 0x198
+#define UFS_VENDOR_SAMSUNG 0x1CE
+#define UFS_VENDOR_HYNIX 0x1AD
+
+/* UFS TOSHIBA MODELS */
+#define UFS_MODEL_TOSHIBA_32GB "THGLF2G8D4KBADR"
+#define UFS_MODEL_TOSHIBA_64GB "THGLF2G9D8KBADG"
+
+/**
+ * ufs_card_info - ufs device details
+ * @wmanufacturerid: card details
+ * @model: card model
+ */
+struct ufs_card_info {
+ u16 wmanufacturerid;
+ char *model;
+};
+
+/**
+ * ufs_card_fix - ufs device quirk info
+ * @card: ufs card details
+ * @quirk: device quirk
+ */
+struct ufs_card_fix {
+ struct ufs_card_info card;
+ unsigned int quirk;
+};
+
+#define END_FIX { { 0 } , 0 }
+
+/* add specific device quirk */
+#define UFS_FIX(_vendor, _model, _quirk) \
+ { \
+ .card.wmanufacturerid = (_vendor),\
+ .card.model = (_model), \
+ .quirk = (_quirk), \
+ }
+
+/*
+ * If UFS device is having issue in processing LCC (Line Control
+ * Command) coming from UFS host controller then enable this quirk.
+ * When this quirk is enabled, host controller driver should disable
+ * the LCC transmission on UFS host controller (by clearing
+ * TX_LCC_ENABLE attribute of host to 0).
+ */
+#define UFS_DEVICE_QUIRK_BROKEN_LCC (1 << 0)
+
+/*
+ * Some UFS devices don't need VCCQ rail for device operations. Enabling this
+ * quirk for such devices will make sure that VCCQ rail is not voted.
+ */
+#define UFS_DEVICE_NO_VCCQ (1 << 1)
+
+/*
+ * Some vendor's UFS device sends back to back NACs for the DL data frames
+ * causing the host controller to raise the DFES error status. Sometimes
+ * such UFS devices send back to back NAC without waiting for new
+ * retransmitted DL frame from the host and in such cases it might be possible
+ * the Host UniPro goes into bad state without raising the DFES error
+ * interrupt. If this happens then all the pending commands would timeout
+ * only after respective SW command (which is generally too large).
+ *
+ * We can workaround such device behaviour like this:
+ * - As soon as SW sees the DL NAC error, it should schedule the error handler
+ * - Error handler would sleep for 50ms to see if there are any fatal errors
+ * raised by UFS controller.
+ * - If there are fatal errors then SW does normal error recovery.
+ * - If there are no fatal errors then SW sends the NOP command to device
+ * to check if link is alive.
+ * - If NOP command times out, SW does normal error recovery
+ * - If NOP command succeed, skip the error handling.
+ *
+ * If DL NAC error is seen multiple times with some vendor's UFS devices then
+ * enable this quirk to initiate quick error recovery and also silence related
+ * error logs to reduce spamming of kernel logs.
+ */
+#define UFS_DEVICE_QUIRK_RECOVERY_FROM_DL_NAC_ERRORS (1 << 2)
+
+/*
+ * Some UFS devices may not work properly after resume if the link was kept
+ * in off state during suspend. Enabling this quirk will not allow the
+ * link to be kept in off state during suspend.
+ */
+#define UFS_DEVICE_QUIRK_NO_LINK_OFF (1 << 3)
+
+/*
+ * Few Toshiba UFS device models advertise RX_MIN_ACTIVATETIME_CAPABILITY as
+ * 600us which may not be enough for reliable hibern8 exit hardware sequence
+ * from UFS device.
+ * To workaround this issue, host should set its PA_TACTIVATE time to 1ms even
+ * if device advertises RX_MIN_ACTIVATETIME_CAPABILITY less than 1ms.
+ */
+#define UFS_DEVICE_QUIRK_PA_TACTIVATE (1 << 4)
+
+/*
+ * Some UFS memory devices may have really low read/write throughput in
+ * FAST AUTO mode, enable this quirk to make sure that FAST AUTO mode is
+ * never enabled for such devices.
+ */
+#define UFS_DEVICE_NO_FASTAUTO (1 << 5)
+
+/*
+ * Some UFS devices require host PA_TACTIVATE to be lower than device
+ * PA_TACTIVATE, enabling this quirk ensure this.
+ */
+#define UFS_DEVICE_QUIRK_HOST_PA_TACTIVATE (1 << 6)
+
+/*
+ * The max. value PA_SaveConfigTime is 250 (10us) but this is not enough for
+ * some vendors.
+ * Gear switch from PWM to HS may fail even with this max. PA_SaveConfigTime.
+ * Gear switch can be issued by host controller as an error recovery and any
+ * software delay will not help on this case so we need to increase
+ * PA_SaveConfigTime to >32us as per vendor recommendation.
+ */
+#define UFS_DEVICE_QUIRK_HOST_PA_SAVECONFIGTIME (1 << 7)
+
+/*
+ * Some UFS devices may stop responding after switching from HS-G1 to HS-G3.
+ * Also, it is found that these devices work fine if we do 2 steps switch:
+ * HS-G1 to HS-G2 followed by HS-G2 to HS-G3. Enabling this quirk for such
+ * device would apply this 2 steps gear switch workaround.
+ */
+#define UFS_DEVICE_QUIRK_HS_G1_TO_HS_G3_SWITCH (1 << 8)
+
+struct ufs_hba;
+void ufs_advertise_fixup_device(struct ufs_hba *hba);
+#endif /* UFS_QUIRKS_H_ */
diff --git a/drivers/scsi/ufs/ufs_test.c b/drivers/scsi/ufs/ufs_test.c
new file mode 100644
index 000000000000..e23dc3e8d9da
--- /dev/null
+++ b/drivers/scsi/ufs/ufs_test.c
@@ -0,0 +1,1534 @@
+/* Copyright (c) 2013-2016, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ */
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt"\n"
+
+#include <linux/async.h>
+#include <linux/atomic.h>
+#include <linux/blkdev.h>
+#include <linux/debugfs.h>
+#include <linux/delay.h>
+#include <linux/module.h>
+#include <linux/test-iosched.h>
+#include <scsi/scsi.h>
+#include <scsi/scsi_device.h>
+#include <scsi/scsi_cmnd.h>
+#include <scsi/scsi_host.h>
+#include <linux/delay.h>
+#include "ufshcd.h"
+#include "ufs.h"
+
+#define MODULE_NAME "ufs_test"
+#define UFS_TEST_BLK_DEV_TYPE_PREFIX "sd"
+
+#define TEST_MAX_BIOS_PER_REQ 128
+#define TEST_DEFAULT_SECTOR_RANGE (1024*1024) /* 512MB */
+#define LARGE_PRIME_1 1103515367
+#define LARGE_PRIME_2 35757
+#define MAGIC_SEED 7
+#define DEFAULT_NUM_OF_BIOS 2
+#define LONG_SEQUENTIAL_MIXED_TIMOUT_MS 100000
+#define THREADS_COMPLETION_TIMOUT msecs_to_jiffies(10000) /* 10 sec */
+#define MAX_PARALLEL_QUERIES 33
+#define RANDOM_REQUEST_THREADS 4
+#define LUN_DEPTH_TEST_SIZE 9
+#define SECTOR_SIZE 512
+#define NUM_UNLUCKY_RETRIES 10
+
+/*
+ * this defines the density of random requests in the address space, and
+ * it represents the ratio between accessed sectors and non-accessed sectors
+ */
+#define LONG_RAND_TEST_REQ_RATIO 64
+/* request queue limitation is 128 requests, and we leave 10 spare requests */
+#define QUEUE_MAX_REQUESTS 118
+#define MB_MSEC_RATIO_APPROXIMATION ((1024 * 1024) / 1000)
+/* actual number of MiB in test multiplied by 10, for single digit precision*/
+#define BYTE_TO_MB_x_10(x) ((x * 10) / (1024 * 1024))
+/* extract integer value */
+#define LONG_TEST_SIZE_INTEGER(x) (BYTE_TO_MB_x_10(x) / 10)
+/* and calculate the MiB value fraction */
+#define LONG_TEST_SIZE_FRACTION(x) (BYTE_TO_MB_x_10(x) - \
+ (LONG_TEST_SIZE_INTEGER(x) * 10))
+/* translation mask from sectors to block */
+#define SECTOR_TO_BLOCK_MASK 0x7
+
+#define TEST_OPS(test_name, upper_case_name) \
+static int ufs_test_ ## test_name ## _show(struct seq_file *file, \
+ void *data) \
+{ return ufs_test_show(file, UFS_TEST_ ## upper_case_name); } \
+static int ufs_test_ ## test_name ## _open(struct inode *inode, \
+ struct file *file) \
+{ return single_open(file, ufs_test_ ## test_name ## _show, \
+ inode->i_private); } \
+static ssize_t ufs_test_ ## test_name ## _write(struct file *file, \
+ const char __user *buf, size_t count, loff_t *ppos) \
+{ return ufs_test_write(file, buf, count, ppos, \
+ UFS_TEST_ ## upper_case_name); } \
+static const struct file_operations ufs_test_ ## test_name ## _ops = { \
+ .open = ufs_test_ ## test_name ## _open, \
+ .read = seq_read, \
+ .write = ufs_test_ ## test_name ## _write, \
+};
+
+#define add_test(utd, test_name, upper_case_name) \
+ufs_test_add_test(utd, UFS_TEST_ ## upper_case_name, "ufs_test_"#test_name,\
+ &(ufs_test_ ## test_name ## _ops)); \
+
+enum ufs_test_testcases {
+ UFS_TEST_WRITE_READ_TEST,
+ UFS_TEST_MULTI_QUERY,
+ UFS_TEST_DATA_INTEGRITY,
+
+ UFS_TEST_LONG_SEQUENTIAL_READ,
+ UFS_TEST_LONG_SEQUENTIAL_WRITE,
+ UFS_TEST_LONG_SEQUENTIAL_MIXED,
+
+ UFS_TEST_LONG_RANDOM_READ,
+ UFS_TEST_LONG_RANDOM_WRITE,
+
+ UFS_TEST_PARALLEL_READ_AND_WRITE,
+ UFS_TEST_LUN_DEPTH,
+
+ NUM_TESTS,
+};
+
+enum ufs_test_stage {
+ DEFAULT,
+ UFS_TEST_ERROR,
+
+ UFS_TEST_LONG_SEQUENTIAL_MIXED_STAGE1,
+ UFS_TEST_LONG_SEQUENTIAL_MIXED_STAGE2,
+
+ UFS_TEST_LUN_DEPTH_TEST_RUNNING,
+ UFS_TEST_LUN_DEPTH_DONE_ISSUING_REQ,
+};
+
+/* device test */
+static struct blk_dev_test_type *ufs_bdt;
+
+struct ufs_test_data {
+ /* Data structure for debugfs dentrys */
+ struct dentry **test_list;
+ /*
+ * Data structure containing individual test information, including
+ * self-defined specific data
+ */
+ struct test_info test_info;
+
+ /* A wait queue for OPs to complete */
+ wait_queue_head_t wait_q;
+ /* a flag for write compleation */
+ bool queue_complete;
+ /*
+ * To determine the number of r/w bios. When seed = 0, random is
+ * disabled and 2 BIOs are written.
+ */
+ unsigned int random_test_seed;
+ struct dentry *random_test_seed_dentry;
+
+ /* A counter for the number of test requests completed */
+ unsigned int completed_req_count;
+ /* Test stage */
+ enum ufs_test_stage test_stage;
+
+ /* Parameters for maintaining multiple threads */
+ int fail_threads;
+ atomic_t outstanding_threads;
+ struct completion outstanding_complete;
+
+ /* user-defined size of address space in which to perform I/O */
+ u32 sector_range;
+ /* total number of requests to be submitted in long test */
+ u32 long_test_num_reqs;
+
+ struct test_iosched *test_iosched;
+};
+
+static int ufs_test_add_test(struct ufs_test_data *utd,
+ enum ufs_test_testcases test_id, char *test_str,
+ const struct file_operations *test_fops)
+{
+ int ret = 0;
+ struct dentry *tests_root;
+
+ if (test_id >= NUM_TESTS)
+ return -EINVAL;
+
+ tests_root = utd->test_iosched->debug.debug_tests_root;
+ if (!tests_root) {
+ pr_err("%s: Failed to create debugfs root.", __func__);
+ return -EINVAL;
+ }
+
+ utd->test_list[test_id] = debugfs_create_file(test_str,
+ S_IRUGO | S_IWUGO, tests_root,
+ utd, test_fops);
+ if (!utd->test_list[test_id]) {
+ pr_err("%s: Could not create the test %s", test_str,
+ __func__);
+ ret = -ENOMEM;
+ }
+ return ret;
+}
+
+/**
+ * struct test_scenario - keeps scenario data that creates unique pattern
+ * @td: per test reference
+ * @direction: pattern initial direction
+ * @toggle_direction: every toggle_direction requests switch direction for one
+ * request
+ * @total_req: number of request to issue
+ * @rnd_req: should request issue to random LBA with random size
+ * @run_q: the maximum number of request to hold in queue (before run_queue())
+ */
+struct test_scenario {
+ struct test_iosched *test_iosched;
+ int direction;
+ int toggle_direction;
+ int total_req;
+ bool rnd_req;
+ int run_q;
+};
+
+enum scenario_id {
+ /* scenarios for parallel read and write test */
+ SCEN_RANDOM_READ_50,
+ SCEN_RANDOM_WRITE_50,
+
+ SCEN_RANDOM_READ_32_NO_FLUSH,
+ SCEN_RANDOM_WRITE_32_NO_FLUSH,
+
+ SCEN_RANDOM_MAX,
+};
+
+static struct test_scenario test_scenario[SCEN_RANDOM_MAX] = {
+ {NULL, READ, 0, 50, true, 5}, /* SCEN_RANDOM_READ_50 */
+ {NULL, WRITE, 0, 50, true, 5}, /* SCEN_RANDOM_WRITE_50 */
+
+ /* SCEN_RANDOM_READ_32_NO_FLUSH */
+ {NULL, READ, 0, 32, true, 64},
+ /* SCEN_RANDOM_WRITE_32_NO_FLUSH */
+ {NULL, WRITE, 0, 32, true, 64},
+};
+
+static
+struct test_scenario *get_scenario(struct test_iosched *test_iosched,
+ enum scenario_id id)
+{
+ struct test_scenario *ret = &test_scenario[id];
+
+ ret->test_iosched = test_iosched;
+ return ret;
+}
+
+static char *ufs_test_get_test_case_str(int testcase)
+{
+ switch (testcase) {
+ case UFS_TEST_WRITE_READ_TEST:
+ return "UFS write read test";
+ case UFS_TEST_MULTI_QUERY:
+ return "Test multiple queries at the same time";
+ case UFS_TEST_LONG_RANDOM_READ:
+ return "UFS long random read test";
+ case UFS_TEST_LONG_RANDOM_WRITE:
+ return "UFS long random write test";
+ case UFS_TEST_DATA_INTEGRITY:
+ return "UFS random data integrity test";
+ case UFS_TEST_LONG_SEQUENTIAL_READ:
+ return "UFS long sequential read test";
+ case UFS_TEST_LONG_SEQUENTIAL_WRITE:
+ return "UFS long sequential write test";
+ case UFS_TEST_LONG_SEQUENTIAL_MIXED:
+ return "UFS long sequential mixed test";
+ case UFS_TEST_PARALLEL_READ_AND_WRITE:
+ return "UFS parallel read and write test";
+ case UFS_TEST_LUN_DEPTH:
+ return "UFS LUN depth test";
+ }
+ return "Unknown test";
+}
+
+static unsigned int ufs_test_pseudo_random_seed(unsigned int *seed_number,
+ unsigned int min_val, unsigned int max_val)
+{
+ int ret = 0;
+
+ if (!seed_number)
+ return 0;
+
+ *seed_number = ((unsigned int) (((unsigned long) *seed_number
+ * (unsigned long) LARGE_PRIME_1) + LARGE_PRIME_2));
+ ret = (unsigned int) ((*seed_number) % max_val);
+
+ return (ret > min_val ? ret : min_val);
+}
+
+/**
+ * pseudo_rnd_sector_and_size - provides random sector and size for test request
+ * @seed: random seed
+ * @min_start_sector: minimum lba
+ * @start_sector: pointer for output start sector
+ * @num_of_bios: pointer for output number of bios
+ *
+ * Note that for UFS sector number has to be aligned with block size. Since
+ * scsi will send the block number as the LBA.
+ */
+static void pseudo_rnd_sector_and_size(struct ufs_test_data *utd,
+ unsigned int *start_sector,
+ unsigned int *num_of_bios)
+{
+ struct test_iosched *tios = utd->test_iosched;
+ u32 min_start_sector = tios->start_sector;
+ unsigned int max_sec = min_start_sector + utd->sector_range;
+
+ do {
+ *start_sector = ufs_test_pseudo_random_seed(
+ &utd->random_test_seed, 1, max_sec);
+ *num_of_bios = ufs_test_pseudo_random_seed(
+ &utd->random_test_seed, 1, TEST_MAX_BIOS_PER_REQ);
+ if (!(*num_of_bios))
+ *num_of_bios = 1;
+ } while ((*start_sector < min_start_sector) ||
+ (*start_sector + (*num_of_bios * TEST_BIO_SIZE)) > max_sec);
+ /*
+ * The test-iosched API is working with sectors 512b, while UFS LBA
+ * is in blocks (4096). Thus the last 3 bits has to be cleared.
+ */
+ *start_sector &= ~SECTOR_TO_BLOCK_MASK;
+}
+
+static void ufs_test_pseudo_rnd_size(unsigned int *seed,
+ unsigned int *num_of_bios)
+{
+ *num_of_bios = ufs_test_pseudo_random_seed(seed, 1,
+ TEST_MAX_BIOS_PER_REQ);
+ if (!(*num_of_bios))
+ *num_of_bios = DEFAULT_NUM_OF_BIOS;
+}
+
+static inline int ufs_test_pm_runtime_cfg_sync(struct test_iosched *tios,
+ bool enable)
+{
+ struct scsi_device *sdev;
+ struct ufs_hba *hba;
+ int ret;
+
+ BUG_ON(!tios || !tios->req_q || !tios->req_q->queuedata);
+ sdev = (struct scsi_device *)tios->req_q->queuedata;
+ BUG_ON(!sdev->host);
+ hba = shost_priv(sdev->host);
+ BUG_ON(!hba);
+
+ if (enable) {
+ ret = pm_runtime_get_sync(hba->dev);
+ /* Positive non-zero return values are not errors */
+ if (ret < 0) {
+ pr_err("%s: pm_runtime_get_sync failed, ret=%d\n",
+ __func__, ret);
+ return ret;
+ }
+ return 0;
+ }
+ pm_runtime_put_sync(hba->dev);
+ return 0;
+}
+
+static int ufs_test_show(struct seq_file *file, int test_case)
+{
+ char *test_description;
+
+ switch (test_case) {
+ case UFS_TEST_WRITE_READ_TEST:
+ test_description = "\nufs_write_read_test\n"
+ "=========\n"
+ "Description:\n"
+ "This test write once a random block and than reads it to "
+ "verify its content. Used to debug first time transactions.\n";
+ break;
+ case UFS_TEST_MULTI_QUERY:
+ test_description = "Test multiple queries at the same time.\n";
+ break;
+ case UFS_TEST_DATA_INTEGRITY:
+ test_description = "\nufs_data_integrity_test\n"
+ "=========\n"
+ "Description:\n"
+ "This test writes 118 requests of size 4KB to randomly chosen LBAs.\n"
+ "The test then reads from these LBAs and checks that the\n"
+ "correct buffer has been read.\n";
+ break;
+ case UFS_TEST_LONG_SEQUENTIAL_READ:
+ test_description = "\nufs_long_sequential_read_test\n"
+ "=========\n"
+ "Description:\n"
+ "This test runs the following scenarios\n"
+ "- Long Sequential Read Test: this test measures read "
+ "throughput at the driver level by sequentially reading many "
+ "large requests.\n";
+ break;
+ case UFS_TEST_LONG_RANDOM_READ:
+ test_description = "\nufs_long_random_read_test\n"
+ "=========\n"
+ "Description:\n"
+ "This test runs the following scenarios\n"
+ "- Long Random Read Test: this test measures read "
+ "IOPS at the driver level by reading many 4KB requests"
+ "with random LBAs\n";
+ break;
+ case UFS_TEST_LONG_SEQUENTIAL_WRITE:
+ test_description = "\nufs_long_sequential_write_test\n"
+ "=========\n"
+ "Description:\n"
+ "This test runs the following scenarios\n"
+ "- Long Sequential Write Test: this test measures write "
+ "throughput at the driver level by sequentially writing many "
+ "large requests\n";
+ break;
+ case UFS_TEST_LONG_RANDOM_WRITE:
+ test_description = "\nufs_long_random_write_test\n"
+ "=========\n"
+ "Description:\n"
+ "This test runs the following scenarios\n"
+ "- Long Random Write Test: this test measures write "
+ "IOPS at the driver level by writing many 4KB requests"
+ "with random LBAs\n";
+ break;
+ case UFS_TEST_LONG_SEQUENTIAL_MIXED:
+ test_description = "\nufs_long_sequential_mixed_test_read\n"
+ "=========\n"
+ "Description:\n"
+ "The test will verify correctness of sequential data pattern "
+ "written to the device while new data (with same pattern) is "
+ "written simultaneously.\n"
+ "First this test will run a long sequential write scenario."
+ "This first stage will write the pattern that will be read "
+ "later. Second, sequential read requests will read and "
+ "compare the same data. The second stage reads, will issue in "
+ "Parallel to write requests with the same LBA and size.\n"
+ "NOTE: The test requires a long timeout.\n";
+ break;
+ case UFS_TEST_PARALLEL_READ_AND_WRITE:
+ test_description = "\nufs_test_parallel_read_and_write\n"
+ "=========\n"
+ "Description:\n"
+ "This test initiate two threads. Each thread is issuing "
+ "multiple random requests. One thread will issue only read "
+ "requests, while the other will only issue write requests.\n";
+ break;
+ case UFS_TEST_LUN_DEPTH:
+ test_description = "\nufs_test_lun_depth\n"
+ "=========\n"
+ "Description:\n"
+ "This test is trying to stress the edge cases of the UFS "
+ "device queue. This queue has two such edges, the total queue "
+ "depth and the command per LU. To test those edges properly, "
+ "two deviations from the edge in addition to the edge are "
+ "tested as well. One deviation will be fixed (1), and the "
+ "second will be picked randomly.\n"
+ "The test will fill a request queue with random read "
+ "requests. The amount of request will vary each iteration and "
+ "will be either the one of the edges or the sum of this edge "
+ "with one deviations.\n"
+ "The test will test for each iteration once only reads and "
+ "once only writes.\n";
+ break;
+ default:
+ test_description = "Unknown test";
+ }
+
+ seq_puts(file, test_description);
+ return 0;
+}
+
+static struct gendisk *ufs_test_get_rq_disk(struct test_iosched *test_iosched)
+{
+ struct request_queue *req_q = test_iosched->req_q;
+ struct scsi_device *sd;
+
+ if (!req_q) {
+ pr_info("%s: Could not fetch request_queue", __func__);
+ goto exit;
+ }
+
+ sd = (struct scsi_device *)req_q->queuedata;
+ if (!sd) {
+ pr_info("%s: req_q is missing required queuedata", __func__);
+ goto exit;
+ }
+
+ return scsi_gendisk_get_from_dev(&sd->sdev_gendev);
+
+exit:
+ return NULL;
+}
+
+static int ufs_test_put_gendisk(struct test_iosched *test_iosched)
+{
+ struct request_queue *req_q = test_iosched->req_q;
+ struct scsi_device *sd;
+ int ret = 0;
+
+ if (!req_q) {
+ pr_info("%s: Could not fetch request_queue", __func__);
+ ret = -EINVAL;
+ goto exit;
+ }
+
+ sd = (struct scsi_device *)req_q->queuedata;
+ if (!sd) {
+ pr_info("%s: req_q is missing required queuedata", __func__);
+ ret = -EINVAL;
+ goto exit;
+ }
+
+ scsi_gendisk_put(&sd->sdev_gendev);
+
+exit:
+ return ret;
+}
+
+static int ufs_test_prepare(struct test_iosched *tios)
+{
+ return ufs_test_pm_runtime_cfg_sync(tios, true);
+}
+
+static int ufs_test_post(struct test_iosched *tios)
+{
+ int ret;
+
+ ret = ufs_test_pm_runtime_cfg_sync(tios, false);
+ if (!ret)
+ ret = ufs_test_put_gendisk(tios);
+
+ return ret;
+}
+
+static int ufs_test_check_result(struct test_iosched *test_iosched)
+{
+ struct ufs_test_data *utd = test_iosched->blk_dev_test_data;
+
+ if (utd->test_stage == UFS_TEST_ERROR) {
+ pr_err("%s: An error occurred during the test.", __func__);
+ return TEST_FAILED;
+ }
+
+ if (utd->fail_threads != 0) {
+ pr_err("%s: About %d threads failed during execution.",
+ __func__, utd->fail_threads);
+ return utd->fail_threads;
+ }
+
+ return 0;
+}
+
+static bool ufs_write_read_completion(struct test_iosched *test_iosched)
+{
+ struct ufs_test_data *utd = test_iosched->blk_dev_test_data;
+
+ if (!utd->queue_complete) {
+ utd->queue_complete = true;
+ wake_up(&utd->wait_q);
+ return false;
+ }
+ return true;
+}
+
+static int ufs_test_run_write_read_test(struct test_iosched *test_iosched)
+{
+ int ret = 0;
+ unsigned int start_sec;
+ unsigned int num_bios;
+ struct request_queue *q = test_iosched->req_q;
+ struct ufs_test_data *utd = test_iosched->blk_dev_test_data;
+
+ start_sec = test_iosched->start_sector + sizeof(int) * BIO_U32_SIZE
+ * test_iosched->num_of_write_bios;
+ if (utd->random_test_seed != 0)
+ ufs_test_pseudo_rnd_size(&utd->random_test_seed, &num_bios);
+ else
+ num_bios = DEFAULT_NUM_OF_BIOS;
+
+ /* Adding a write request */
+ pr_info("%s: Adding a write request with %d bios to Q, req_id=%d",
+ __func__, num_bios, test_iosched->wr_rd_next_req_id);
+
+ utd->queue_complete = false;
+ ret = test_iosched_add_wr_rd_test_req(test_iosched, 0, WRITE, start_sec,
+ num_bios, TEST_PATTERN_5A, NULL);
+ if (ret) {
+ pr_err("%s: failed to add a write request", __func__);
+ return ret;
+ }
+
+ /* waiting for the write request to finish */
+ blk_post_runtime_resume(q, 0);
+ wait_event(utd->wait_q, utd->queue_complete);
+
+ /* Adding a read request*/
+ pr_info("%s: Adding a read request to Q", __func__);
+
+ ret = test_iosched_add_wr_rd_test_req(test_iosched, 0, READ, start_sec,
+ num_bios, TEST_PATTERN_5A, NULL);
+ if (ret) {
+ pr_err("%s: failed to add a read request", __func__);
+ return ret;
+ }
+
+ blk_post_runtime_resume(q, 0);
+ return ret;
+}
+
+static void ufs_test_thread_complete(struct ufs_test_data *utd, int result)
+{
+ if (result)
+ utd->fail_threads++;
+ atomic_dec(&utd->outstanding_threads);
+ if (!atomic_read(&utd->outstanding_threads))
+ complete(&utd->outstanding_complete);
+}
+
+static void ufs_test_random_async_query(void *data, async_cookie_t cookie)
+{
+ int op;
+ struct test_iosched *test_iosched = data;
+ struct ufs_test_data *utd = test_iosched->blk_dev_test_data;
+ struct scsi_device *sdev;
+ struct ufs_hba *hba;
+ int buff_len = QUERY_DESC_UNIT_MAX_SIZE;
+ u8 desc_buf[QUERY_DESC_UNIT_MAX_SIZE];
+ bool flag;
+ u32 att;
+ int ret = 0;
+
+ sdev = (struct scsi_device *)test_iosched->req_q->queuedata;
+ BUG_ON(!sdev->host);
+ hba = shost_priv(sdev->host);
+ BUG_ON(!hba);
+
+ op = ufs_test_pseudo_random_seed(&utd->random_test_seed, 1, 8);
+ /*
+ * When write data (descriptor/attribute/flag) queries are issued,
+ * regular work and functionality must be kept. The data is read
+ * first to make sure the original state is restored.
+ */
+ switch (op) {
+ case UPIU_QUERY_OPCODE_READ_DESC:
+ case UPIU_QUERY_OPCODE_WRITE_DESC:
+ ret = ufshcd_query_descriptor(hba, UPIU_QUERY_OPCODE_READ_DESC,
+ QUERY_DESC_IDN_UNIT, 0, 0, desc_buf, &buff_len);
+ break;
+ case UPIU_QUERY_OPCODE_WRITE_ATTR:
+ case UPIU_QUERY_OPCODE_READ_ATTR:
+ ret = ufshcd_query_attr(hba, UPIU_QUERY_OPCODE_READ_ATTR,
+ QUERY_ATTR_IDN_EE_CONTROL, 0, 0, &att);
+ if (ret || op == UPIU_QUERY_OPCODE_READ_ATTR)
+ break;
+
+ ret = ufshcd_query_attr(hba, UPIU_QUERY_OPCODE_WRITE_ATTR,
+ QUERY_ATTR_IDN_EE_CONTROL, 0, 0, &att);
+ break;
+ case UPIU_QUERY_OPCODE_READ_FLAG:
+ case UPIU_QUERY_OPCODE_SET_FLAG:
+ case UPIU_QUERY_OPCODE_CLEAR_FLAG:
+ case UPIU_QUERY_OPCODE_TOGGLE_FLAG:
+ /* We read the QUERY_FLAG_IDN_BKOPS_EN and restore it later */
+ ret = ufshcd_query_flag(hba, UPIU_QUERY_OPCODE_READ_FLAG,
+ QUERY_FLAG_IDN_BKOPS_EN, &flag);
+ if (ret || op == UPIU_QUERY_OPCODE_READ_FLAG)
+ break;
+
+ /* After changing the flag we have to change it back */
+ ret = ufshcd_query_flag(hba, op, QUERY_FLAG_IDN_BKOPS_EN, NULL);
+ if ((op == UPIU_QUERY_OPCODE_SET_FLAG && flag) ||
+ (op == UPIU_QUERY_OPCODE_CLEAR_FLAG && !flag))
+ /* No need to change it back */
+ break;
+
+ if (flag)
+ ret |= ufshcd_query_flag(hba,
+ UPIU_QUERY_OPCODE_SET_FLAG,
+ QUERY_FLAG_IDN_BKOPS_EN, NULL);
+ else
+ ret |= ufshcd_query_flag(hba,
+ UPIU_QUERY_OPCODE_CLEAR_FLAG,
+ QUERY_FLAG_IDN_BKOPS_EN, NULL);
+ break;
+ default:
+ pr_err("%s: Random error unknown op %d", __func__, op);
+ }
+
+ if (ret)
+ pr_err("%s: Query thread with op %d, failed with err %d.",
+ __func__, op, ret);
+
+ ufs_test_thread_complete(utd, ret);
+}
+
+static void scenario_free_end_io_fn(struct request *rq, int err)
+{
+ struct test_request *test_rq;
+ struct test_iosched *test_iosched = rq->q->elevator->elevator_data;
+ unsigned long flags;
+
+ BUG_ON(!rq);
+ test_rq = (struct test_request *)rq->elv.priv[0];
+ BUG_ON(!test_rq);
+
+ spin_lock_irqsave(&test_iosched->lock, flags);
+ test_iosched->dispatched_count--;
+ list_del_init(&test_rq->queuelist);
+ __blk_put_request(test_iosched->req_q, test_rq->rq);
+ spin_unlock_irqrestore(&test_iosched->lock, flags);
+
+ if (err)
+ pr_err("%s: request %d completed, err=%d", __func__,
+ test_rq->req_id, err);
+
+ test_iosched_free_test_req_data_buffer(test_rq);
+ kfree(test_rq);
+
+ check_test_completion(test_iosched);
+}
+
+static bool ufs_test_multi_thread_completion(struct test_iosched *test_iosched)
+{
+ struct ufs_test_data *utd = test_iosched->blk_dev_test_data;
+ return atomic_read(&utd->outstanding_threads) <= 0 &&
+ utd->test_stage != UFS_TEST_LUN_DEPTH_TEST_RUNNING;
+}
+
+static bool long_rand_test_check_completion(struct test_iosched *test_iosched)
+{
+ struct ufs_test_data *utd = test_iosched->blk_dev_test_data;
+
+ if (utd->completed_req_count > utd->long_test_num_reqs) {
+ pr_err("%s: Error: Completed more requests than total test requests.\nTerminating test."
+ , __func__);
+ return true;
+ }
+ return utd->completed_req_count == utd->long_test_num_reqs;
+}
+
+static bool long_seq_test_check_completion(struct test_iosched *test_iosched)
+{
+ struct ufs_test_data *utd = test_iosched->blk_dev_test_data;
+
+ if (utd->completed_req_count > utd->long_test_num_reqs) {
+ pr_err("%s: Error: Completed more requests than total test requests"
+ , __func__);
+ pr_err("%s: Terminating test.", __func__);
+ return true;
+ }
+ return utd->completed_req_count == utd->long_test_num_reqs;
+}
+
+/**
+ * ufs_test_toggle_direction() - decides whether toggling is
+ * needed. Toggle factor zero means no toggling.
+ *
+ * toggle_factor - iteration to toggle = toggling frequency
+ * iteration - the current request iteration
+ *
+ * Returns nonzero if toggling is needed, and 0 when toggling is
+ * not needed.
+ */
+static inline int ufs_test_toggle_direction(int toggle_factor, int iteration)
+{
+ if (!toggle_factor)
+ return 0;
+
+ return !(iteration % toggle_factor);
+}
+
+static void ufs_test_run_scenario(void *data, async_cookie_t cookie)
+{
+ struct test_scenario *ts = (struct test_scenario *)data;
+ struct test_iosched *test_iosched = ts->test_iosched;
+ struct ufs_test_data *utd = test_iosched->blk_dev_test_data;
+ int start_sec;
+ int i;
+ int ret = 0;
+
+ BUG_ON(!ts);
+ start_sec = ts->test_iosched->start_sector;
+
+ for (i = 0; i < ts->total_req; i++) {
+ int num_bios = DEFAULT_NUM_OF_BIOS;
+ int direction;
+
+ if (ufs_test_toggle_direction(ts->toggle_direction, i))
+ direction = (ts->direction == WRITE) ? READ : WRITE;
+ else
+ direction = ts->direction;
+
+ /* use randomly generated requests */
+ if (ts->rnd_req && utd->random_test_seed != 0)
+ pseudo_rnd_sector_and_size(utd, &start_sec, &num_bios);
+
+ ret = test_iosched_add_wr_rd_test_req(test_iosched, 0,
+ direction, start_sec, num_bios, TEST_PATTERN_5A,
+ scenario_free_end_io_fn);
+ if (ret) {
+ pr_err("%s: failed to create request" , __func__);
+ break;
+ }
+
+ /*
+ * We want to run the queue every run_q requests, or,
+ * when the requests pool is exhausted
+ */
+
+ if (test_iosched->dispatched_count >= QUEUE_MAX_REQUESTS ||
+ (ts->run_q && !(i % ts->run_q)))
+ blk_post_runtime_resume(test_iosched->req_q, 0);
+ }
+
+ blk_post_runtime_resume(test_iosched->req_q, 0);
+ ufs_test_thread_complete(utd, ret);
+}
+
+static int ufs_test_run_multi_query_test(struct test_iosched *test_iosched)
+{
+ int i;
+ struct ufs_test_data *utd;
+ struct scsi_device *sdev;
+ struct ufs_hba *hba;
+
+ BUG_ON(!test_iosched || !test_iosched->req_q ||
+ !test_iosched->req_q->queuedata);
+ sdev = (struct scsi_device *)test_iosched->req_q->queuedata;
+ BUG_ON(!sdev->host);
+ hba = shost_priv(sdev->host);
+ BUG_ON(!hba);
+
+ utd = test_iosched->blk_dev_test_data;
+ atomic_set(&utd->outstanding_threads, 0);
+ utd->fail_threads = 0;
+ init_completion(&utd->outstanding_complete);
+ for (i = 0; i < MAX_PARALLEL_QUERIES; ++i) {
+ atomic_inc(&utd->outstanding_threads);
+ async_schedule(ufs_test_random_async_query, test_iosched);
+ }
+
+ if (!wait_for_completion_timeout(&utd->outstanding_complete,
+ THREADS_COMPLETION_TIMOUT)) {
+ pr_err("%s: Multi-query test timed-out %d threads left",
+ __func__, atomic_read(&utd->outstanding_threads));
+ }
+ test_iosched_mark_test_completion(test_iosched);
+ return 0;
+}
+
+static int ufs_test_run_parallel_read_and_write_test(
+ struct test_iosched *test_iosched)
+{
+ struct test_scenario *read_data, *write_data;
+ int i;
+ bool changed_seed = false;
+ struct ufs_test_data *utd = test_iosched->blk_dev_test_data;
+
+ read_data = get_scenario(test_iosched, SCEN_RANDOM_READ_50);
+ write_data = get_scenario(test_iosched, SCEN_RANDOM_WRITE_50);
+
+ /* allow randomness even if user forgot */
+ if (utd->random_test_seed <= 0) {
+ changed_seed = true;
+ utd->random_test_seed = 1;
+ }
+
+ atomic_set(&utd->outstanding_threads, 0);
+ utd->fail_threads = 0;
+ init_completion(&utd->outstanding_complete);
+
+ for (i = 0; i < (RANDOM_REQUEST_THREADS / 2); i++) {
+ async_schedule(ufs_test_run_scenario, read_data);
+ async_schedule(ufs_test_run_scenario, write_data);
+ atomic_add(2, &utd->outstanding_threads);
+ }
+
+ if (!wait_for_completion_timeout(&utd->outstanding_complete,
+ THREADS_COMPLETION_TIMOUT)) {
+ pr_err("%s: Multi-thread test timed-out %d threads left",
+ __func__, atomic_read(&utd->outstanding_threads));
+ }
+ check_test_completion(test_iosched);
+
+ /* clear random seed if changed */
+ if (changed_seed)
+ utd->random_test_seed = 0;
+
+ return 0;
+}
+
+static void ufs_test_run_synchronous_scenario(struct test_scenario *read_data)
+{
+ struct ufs_test_data *utd = read_data->test_iosched->blk_dev_test_data;
+ init_completion(&utd->outstanding_complete);
+ atomic_set(&utd->outstanding_threads, 1);
+ async_schedule(ufs_test_run_scenario, read_data);
+ if (!wait_for_completion_timeout(&utd->outstanding_complete,
+ THREADS_COMPLETION_TIMOUT)) {
+ pr_err("%s: Multi-thread test timed-out %d threads left",
+ __func__, atomic_read(&utd->outstanding_threads));
+ }
+}
+
+static int ufs_test_run_lun_depth_test(struct test_iosched *test_iosched)
+{
+ struct test_scenario *read_data, *write_data;
+ struct scsi_device *sdev;
+ bool changed_seed = false;
+ int i = 0, num_req[LUN_DEPTH_TEST_SIZE];
+ int lun_qdepth, nutrs, num_scenarios;
+ struct ufs_test_data *utd;
+
+ BUG_ON(!test_iosched || !test_iosched->req_q ||
+ !test_iosched->req_q->queuedata);
+ sdev = (struct scsi_device *)test_iosched->req_q->queuedata;
+ lun_qdepth = sdev->max_queue_depth;
+ nutrs = sdev->host->can_queue;
+ utd = test_iosched->blk_dev_test_data;
+
+ /* allow randomness even if user forgot */
+ if (utd->random_test_seed <= 0) {
+ changed_seed = true;
+ utd->random_test_seed = 1;
+ }
+
+ /* initialize the number of request for each iteration */
+ num_req[i++] = ufs_test_pseudo_random_seed(
+ &utd->random_test_seed, 1, lun_qdepth - 2);
+ num_req[i++] = lun_qdepth - 1;
+ num_req[i++] = lun_qdepth;
+ num_req[i++] = lun_qdepth + 1;
+ /* if (nutrs-lun_qdepth-2 <= 0), do not run this scenario */
+ if (nutrs - lun_qdepth - 2 > 0)
+ num_req[i++] = lun_qdepth + 1 + ufs_test_pseudo_random_seed(
+ &utd->random_test_seed, 1, nutrs - lun_qdepth - 2);
+
+ /* if nutrs == lun_qdepth, do not run these three scenarios */
+ if (nutrs != lun_qdepth) {
+ num_req[i++] = nutrs - 1;
+ num_req[i++] = nutrs;
+ num_req[i++] = nutrs + 1;
+ }
+
+ /* a random number up to 10, not to cause overflow or timeout */
+ num_req[i++] = nutrs + 1 + ufs_test_pseudo_random_seed(
+ &utd->random_test_seed, 1, 10);
+
+ num_scenarios = i;
+ utd->test_stage = UFS_TEST_LUN_DEPTH_TEST_RUNNING;
+ utd->fail_threads = 0;
+ read_data = get_scenario(test_iosched, SCEN_RANDOM_READ_32_NO_FLUSH);
+ write_data = get_scenario(test_iosched, SCEN_RANDOM_WRITE_32_NO_FLUSH);
+
+ for (i = 0; i < num_scenarios; i++) {
+ int reqs = num_req[i];
+
+ read_data->total_req = reqs;
+ write_data->total_req = reqs;
+
+ ufs_test_run_synchronous_scenario(read_data);
+ ufs_test_run_synchronous_scenario(write_data);
+ }
+
+ utd->test_stage = UFS_TEST_LUN_DEPTH_DONE_ISSUING_REQ;
+ check_test_completion(test_iosched);
+
+ /* clear random seed if changed */
+ if (changed_seed)
+ utd->random_test_seed = 0;
+
+ return 0;
+}
+
+static void long_test_free_end_io_fn(struct request *rq, int err)
+{
+ struct test_request *test_rq;
+ struct test_iosched *test_iosched = rq->q->elevator->elevator_data;
+ struct ufs_test_data *utd = test_iosched->blk_dev_test_data;
+ unsigned long flags;
+
+ if (!rq) {
+ pr_err("%s: error: NULL request", __func__);
+ return;
+ }
+
+ test_rq = (struct test_request *)rq->elv.priv[0];
+
+ BUG_ON(!test_rq);
+
+ spin_lock_irqsave(&test_iosched->lock, flags);
+ test_iosched->dispatched_count--;
+ list_del_init(&test_rq->queuelist);
+ __blk_put_request(test_iosched->req_q, test_rq->rq);
+ spin_unlock_irqrestore(&test_iosched->lock, flags);
+
+ if (utd->test_stage == UFS_TEST_LONG_SEQUENTIAL_MIXED_STAGE2 &&
+ rq_data_dir(rq) == READ &&
+ compare_buffer_to_pattern(test_rq)) {
+ /* if the pattern does not match */
+ pr_err("%s: read pattern not as expected", __func__);
+ utd->test_stage = UFS_TEST_ERROR;
+ check_test_completion(test_iosched);
+ return;
+ }
+
+ if (err)
+ pr_err("%s: request %d completed, err=%d", __func__,
+ test_rq->req_id, err);
+
+ test_iosched_free_test_req_data_buffer(test_rq);
+ kfree(test_rq);
+ utd->completed_req_count++;
+
+ check_test_completion(test_iosched);
+}
+
+/**
+ * run_long_test - main function for long sequential test
+ * @td - test specific data
+ *
+ * This function is used to fill up (and keep full) the test queue with
+ * requests. There are two scenarios this function works with:
+ * 1. Only read/write (STAGE_1 or no stage)
+ * 2. Simultaneous read and write to the same LBAs (STAGE_2)
+ */
+static int run_long_test(struct test_iosched *test_iosched)
+{
+ int ret = 0;
+ int direction, num_bios_per_request = 1;
+ static unsigned int inserted_requests;
+ u32 sector, seed, num_bios, seq_sector_delta;
+ struct ufs_test_data *utd = test_iosched->blk_dev_test_data;
+
+ BUG_ON(!test_iosched);
+ sector = test_iosched->start_sector;
+ if (test_iosched->sector_range)
+ utd->sector_range = test_iosched->sector_range;
+ else
+ utd->sector_range = TEST_DEFAULT_SECTOR_RANGE;
+
+ if (utd->test_stage != UFS_TEST_LONG_SEQUENTIAL_MIXED_STAGE2) {
+ test_iosched->test_count = 0;
+ utd->completed_req_count = 0;
+ inserted_requests = 0;
+ }
+
+ /* Set test parameters */
+ switch (test_iosched->test_info.testcase) {
+ case UFS_TEST_LONG_RANDOM_READ:
+ utd->long_test_num_reqs = (utd->sector_range * SECTOR_SIZE) /
+ (LONG_RAND_TEST_REQ_RATIO * TEST_BIO_SIZE *
+ num_bios_per_request);
+ direction = READ;
+ break;
+ case UFS_TEST_LONG_RANDOM_WRITE:
+ utd->long_test_num_reqs = (utd->sector_range * SECTOR_SIZE) /
+ (LONG_RAND_TEST_REQ_RATIO * TEST_BIO_SIZE *
+ num_bios_per_request);
+ direction = WRITE;
+ break;
+ case UFS_TEST_LONG_SEQUENTIAL_READ:
+ num_bios_per_request = TEST_MAX_BIOS_PER_REQ;
+ utd->long_test_num_reqs = (utd->sector_range * SECTOR_SIZE) /
+ (num_bios_per_request * TEST_BIO_SIZE);
+ direction = READ;
+ break;
+ case UFS_TEST_LONG_SEQUENTIAL_WRITE:
+ case UFS_TEST_LONG_SEQUENTIAL_MIXED:
+ num_bios_per_request = TEST_MAX_BIOS_PER_REQ;
+ utd->long_test_num_reqs = (utd->sector_range * SECTOR_SIZE) /
+ (num_bios_per_request * TEST_BIO_SIZE);
+ default:
+ direction = WRITE;
+ }
+
+ seq_sector_delta = num_bios_per_request * (TEST_BIO_SIZE / SECTOR_SIZE);
+
+ seed = utd->random_test_seed ? utd->random_test_seed : MAGIC_SEED;
+
+ pr_info("%s: Adding %d requests, first req_id=%d", __func__,
+ utd->long_test_num_reqs, test_iosched->wr_rd_next_req_id);
+
+ do {
+ /*
+ * since our requests come from a pool containing 128
+ * requests, we don't want to exhaust this quantity,
+ * therefore we add up to QUEUE_MAX_REQUESTS (which
+ * includes a safety margin) and then call the block layer
+ * to fetch them
+ */
+ if (test_iosched->test_count >= QUEUE_MAX_REQUESTS) {
+ blk_post_runtime_resume(test_iosched->req_q, 0);
+ continue;
+ }
+
+ switch (test_iosched->test_info.testcase) {
+ case UFS_TEST_LONG_SEQUENTIAL_READ:
+ case UFS_TEST_LONG_SEQUENTIAL_WRITE:
+ case UFS_TEST_LONG_SEQUENTIAL_MIXED:
+ /* don't need to increment on the first iteration */
+ if (inserted_requests)
+ sector += seq_sector_delta;
+ break;
+ case UFS_TEST_LONG_RANDOM_READ:
+ case UFS_TEST_LONG_RANDOM_WRITE:
+ pseudo_rnd_sector_and_size(utd, &sector, &num_bios);
+ default:
+ break;
+ }
+
+ ret = test_iosched_add_wr_rd_test_req(test_iosched, 0,
+ direction, sector, num_bios_per_request,
+ TEST_PATTERN_5A, long_test_free_end_io_fn);
+ if (ret) {
+ pr_err("%s: failed to create request" , __func__);
+ break;
+ }
+ inserted_requests++;
+ if (utd->test_stage == UFS_TEST_LONG_SEQUENTIAL_MIXED_STAGE2) {
+ ret = test_iosched_add_wr_rd_test_req(test_iosched, 0,
+ READ, sector, num_bios_per_request,
+ TEST_PATTERN_5A, long_test_free_end_io_fn);
+ if (ret) {
+ pr_err("%s: failed to create request" ,
+ __func__);
+ break;
+ }
+ inserted_requests++;
+ }
+
+ } while (inserted_requests < utd->long_test_num_reqs);
+
+ /* in this case the queue will not run in the above loop */
+ if (utd->long_test_num_reqs < QUEUE_MAX_REQUESTS)
+ blk_post_runtime_resume(test_iosched->req_q, 0);
+
+ return ret;
+}
+
+static int run_mixed_long_seq_test(struct test_iosched *test_iosched)
+{
+ int ret;
+ struct ufs_test_data *utd = test_iosched->blk_dev_test_data;
+
+ utd->test_stage = UFS_TEST_LONG_SEQUENTIAL_MIXED_STAGE1;
+ ret = run_long_test(test_iosched);
+ if (ret)
+ goto out;
+
+ pr_info("%s: First write iteration completed.", __func__);
+ pr_info("%s: Starting mixed write and reads sequence.", __func__);
+ utd->test_stage = UFS_TEST_LONG_SEQUENTIAL_MIXED_STAGE2;
+ ret = run_long_test(test_iosched);
+out:
+ return ret;
+}
+
+static int long_rand_test_calc_iops(struct test_iosched *test_iosched)
+{
+ unsigned long mtime, num_ios, iops;
+ struct ufs_test_data *utd = test_iosched->blk_dev_test_data;
+
+ mtime = ktime_to_ms(utd->test_info.test_duration);
+ num_ios = utd->completed_req_count;
+
+ pr_info("%s: time is %lu msec, IOS count is %lu", __func__, mtime,
+ num_ios);
+
+ /* preserve some precision */
+ num_ios *= 1000;
+ /* calculate those iops */
+ iops = num_ios / mtime;
+
+ pr_info("%s: IOPS: %lu IOP/sec\n", __func__, iops);
+
+ return ufs_test_post(test_iosched);
+}
+
+static int long_seq_test_calc_throughput(struct test_iosched *test_iosched)
+{
+ unsigned long fraction, integer;
+ unsigned long mtime, byte_count;
+ struct ufs_test_data *utd = test_iosched->blk_dev_test_data;
+
+ mtime = ktime_to_ms(utd->test_info.test_duration);
+ byte_count = utd->test_info.test_byte_count;
+
+ pr_info("%s: time is %lu msec, size is %lu.%lu MiB", __func__, mtime,
+ LONG_TEST_SIZE_INTEGER(byte_count),
+ LONG_TEST_SIZE_FRACTION(byte_count));
+
+ /* we first multiply in order not to lose precision */
+ mtime *= MB_MSEC_RATIO_APPROXIMATION;
+ /* divide values to get a MiB/sec integer value with one
+ digit of precision
+ */
+ fraction = integer = (byte_count * 10) / mtime;
+ integer /= 10;
+ /* and calculate the MiB value fraction */
+ fraction -= integer * 10;
+
+ pr_info("%s: Throughput: %lu.%lu MiB/sec\n", __func__, integer,
+ fraction);
+
+ return ufs_test_post(test_iosched);
+}
+
+static bool ufs_data_integrity_completion(struct test_iosched *test_iosched)
+{
+ struct ufs_test_data *utd = test_iosched->blk_dev_test_data;
+ bool ret = false;
+
+ if (!test_iosched->dispatched_count) {
+ /* q is empty in this case */
+ if (!utd->queue_complete) {
+ utd->queue_complete = true;
+ wake_up(&utd->wait_q);
+ } else {
+ /* declare completion only on second time q is empty */
+ ret = true;
+ }
+ }
+
+ return ret;
+}
+
+static int ufs_test_run_data_integrity_test(struct test_iosched *test_iosched)
+{
+ int ret = 0;
+ int i, j;
+ unsigned int start_sec, num_bios, retries = NUM_UNLUCKY_RETRIES;
+ struct request_queue *q = test_iosched->req_q;
+ int sectors[QUEUE_MAX_REQUESTS] = {0};
+ struct ufs_test_data *utd = test_iosched->blk_dev_test_data;
+
+ start_sec = test_iosched->start_sector;
+ utd->queue_complete = false;
+
+ if (utd->random_test_seed != 0) {
+ ufs_test_pseudo_rnd_size(&utd->random_test_seed, &num_bios);
+ } else {
+ num_bios = DEFAULT_NUM_OF_BIOS;
+ utd->random_test_seed = MAGIC_SEED;
+ }
+
+ /* Adding write requests */
+ pr_info("%s: Adding %d write requests, first req_id=%d", __func__,
+ QUEUE_MAX_REQUESTS, test_iosched->wr_rd_next_req_id);
+
+ for (i = 0; i < QUEUE_MAX_REQUESTS; i++) {
+ /* make sure that we didn't draw the same start_sector twice */
+ while (retries--) {
+ pseudo_rnd_sector_and_size(utd, &start_sec, &num_bios);
+ sectors[i] = start_sec;
+ for (j = 0; (j < i) && (sectors[i] != sectors[j]); j++)
+ /* just increment j */;
+ if (j == i)
+ break;
+ }
+ if (!retries) {
+ pr_err("%s: too many unlucky start_sector draw retries",
+ __func__);
+ ret = -EINVAL;
+ return ret;
+ }
+ retries = NUM_UNLUCKY_RETRIES;
+
+ ret = test_iosched_add_wr_rd_test_req(test_iosched, 0, WRITE,
+ start_sec, 1, i, long_test_free_end_io_fn);
+
+ if (ret) {
+ pr_err("%s: failed to add a write request", __func__);
+ return ret;
+ }
+ }
+
+ /* waiting for the write request to finish */
+ blk_post_runtime_resume(q, 0);
+ wait_event(utd->wait_q, utd->queue_complete);
+
+ /* Adding read requests */
+ pr_info("%s: Adding %d read requests, first req_id=%d", __func__,
+ QUEUE_MAX_REQUESTS, test_iosched->wr_rd_next_req_id);
+
+ for (i = 0; i < QUEUE_MAX_REQUESTS; i++) {
+ ret = test_iosched_add_wr_rd_test_req(test_iosched, 0, READ,
+ sectors[i], 1, i, long_test_free_end_io_fn);
+
+ if (ret) {
+ pr_err("%s: failed to add a read request", __func__);
+ return ret;
+ }
+ }
+
+ blk_post_runtime_resume(q, 0);
+ return ret;
+}
+
+static ssize_t ufs_test_write(struct file *file, const char __user *buf,
+ size_t count, loff_t *ppos, int test_case)
+{
+ int ret = 0;
+ int i;
+ int number;
+ struct seq_file *seq_f = file->private_data;
+ struct ufs_test_data *utd = seq_f->private;
+
+ ret = kstrtoint_from_user(buf, count, 0, &number);
+ if (ret < 0) {
+ pr_err("%s: Error while reading test parameter value %d",
+ __func__, ret);
+ return ret;
+ }
+
+ if (number <= 0)
+ number = 1;
+
+ pr_info("%s:the test will run for %d iterations.", __func__, number);
+ memset(&utd->test_info, 0, sizeof(struct test_info));
+
+ /* Initializing test */
+ utd->test_info.data = utd;
+ utd->test_info.get_test_case_str_fn = ufs_test_get_test_case_str;
+ utd->test_info.testcase = test_case;
+ utd->test_info.get_rq_disk_fn = ufs_test_get_rq_disk;
+ utd->test_info.check_test_result_fn = ufs_test_check_result;
+ utd->test_info.post_test_fn = ufs_test_post;
+ utd->test_info.prepare_test_fn = ufs_test_prepare;
+ utd->test_stage = DEFAULT;
+
+ switch (test_case) {
+ case UFS_TEST_WRITE_READ_TEST:
+ utd->test_info.run_test_fn = ufs_test_run_write_read_test;
+ utd->test_info.check_test_completion_fn =
+ ufs_write_read_completion;
+ break;
+ case UFS_TEST_MULTI_QUERY:
+ utd->test_info.run_test_fn = ufs_test_run_multi_query_test;
+ utd->test_info.check_test_result_fn = ufs_test_check_result;
+ break;
+ case UFS_TEST_DATA_INTEGRITY:
+ utd->test_info.run_test_fn = ufs_test_run_data_integrity_test;
+ utd->test_info.check_test_completion_fn =
+ ufs_data_integrity_completion;
+ break;
+ case UFS_TEST_LONG_RANDOM_READ:
+ case UFS_TEST_LONG_RANDOM_WRITE:
+ utd->test_info.run_test_fn = run_long_test;
+ utd->test_info.post_test_fn = long_rand_test_calc_iops;
+ utd->test_info.check_test_result_fn = ufs_test_check_result;
+ utd->test_info.check_test_completion_fn =
+ long_rand_test_check_completion;
+ break;
+ case UFS_TEST_LONG_SEQUENTIAL_READ:
+ case UFS_TEST_LONG_SEQUENTIAL_WRITE:
+ utd->test_info.run_test_fn = run_long_test;
+ utd->test_info.post_test_fn = long_seq_test_calc_throughput;
+ utd->test_info.check_test_result_fn = ufs_test_check_result;
+ utd->test_info.check_test_completion_fn =
+ long_seq_test_check_completion;
+ break;
+ case UFS_TEST_LONG_SEQUENTIAL_MIXED:
+ utd->test_info.timeout_msec = LONG_SEQUENTIAL_MIXED_TIMOUT_MS;
+ utd->test_info.run_test_fn = run_mixed_long_seq_test;
+ utd->test_info.post_test_fn = long_seq_test_calc_throughput;
+ utd->test_info.check_test_result_fn = ufs_test_check_result;
+ break;
+ case UFS_TEST_PARALLEL_READ_AND_WRITE:
+ utd->test_info.run_test_fn =
+ ufs_test_run_parallel_read_and_write_test;
+ utd->test_info.check_test_completion_fn =
+ ufs_test_multi_thread_completion;
+ break;
+ case UFS_TEST_LUN_DEPTH:
+ utd->test_info.run_test_fn = ufs_test_run_lun_depth_test;
+ break;
+ default:
+ pr_err("%s: Unknown test-case: %d", __func__, test_case);
+ WARN_ON(true);
+ }
+
+ /* Running the test multiple times */
+ for (i = 0; i < number; ++i) {
+ pr_info("%s: Cycle # %d / %d", __func__, i+1, number);
+ pr_info("%s: ====================", __func__);
+
+ utd->test_info.test_byte_count = 0;
+ ret = test_iosched_start_test(utd->test_iosched,
+ &utd->test_info);
+ if (ret) {
+ pr_err("%s: Test failed, err=%d.", __func__, ret);
+ return ret;
+ }
+
+ /* Allow FS requests to be dispatched */
+ msleep(1000);
+ }
+
+ pr_info("%s: Completed all the ufs test iterations.", __func__);
+
+ return count;
+}
+
+TEST_OPS(write_read_test, WRITE_READ_TEST);
+TEST_OPS(multi_query, MULTI_QUERY);
+TEST_OPS(data_integrity, DATA_INTEGRITY);
+TEST_OPS(long_random_read, LONG_RANDOM_READ);
+TEST_OPS(long_random_write, LONG_RANDOM_WRITE);
+TEST_OPS(long_sequential_read, LONG_SEQUENTIAL_READ);
+TEST_OPS(long_sequential_write, LONG_SEQUENTIAL_WRITE);
+TEST_OPS(long_sequential_mixed, LONG_SEQUENTIAL_MIXED);
+TEST_OPS(parallel_read_and_write, PARALLEL_READ_AND_WRITE);
+TEST_OPS(lun_depth, LUN_DEPTH);
+
+static void ufs_test_debugfs_cleanup(struct test_iosched *test_iosched)
+{
+ struct ufs_test_data *utd = test_iosched->blk_dev_test_data;
+ debugfs_remove_recursive(test_iosched->debug.debug_root);
+ kfree(utd->test_list);
+}
+
+static int ufs_test_debugfs_init(struct ufs_test_data *utd)
+{
+ struct dentry *utils_root, *tests_root;
+ int ret = 0;
+ struct test_iosched *ts = utd->test_iosched;
+
+ utils_root = ts->debug.debug_utils_root;
+ tests_root = ts->debug.debug_tests_root;
+
+ utd->test_list = kmalloc(sizeof(struct dentry *) * NUM_TESTS,
+ GFP_KERNEL);
+ if (!utd->test_list) {
+ pr_err("%s: failed to allocate tests dentrys", __func__);
+ return -ENODEV;
+ }
+
+ if (!utils_root || !tests_root) {
+ pr_err("%s: Failed to create debugfs root.", __func__);
+ ret = -EINVAL;
+ goto exit_err;
+ }
+
+ utd->random_test_seed_dentry = debugfs_create_u32("random_test_seed",
+ S_IRUGO | S_IWUGO, utils_root, &utd->random_test_seed);
+
+ if (!utd->random_test_seed_dentry) {
+ pr_err("%s: Could not create debugfs random_test_seed.",
+ __func__);
+ ret = -ENOMEM;
+ goto exit_err;
+ }
+
+ ret = add_test(utd, write_read_test, WRITE_READ_TEST);
+ if (ret)
+ goto exit_err;
+ ret = add_test(utd, data_integrity, DATA_INTEGRITY);
+ if (ret)
+ goto exit_err;
+ ret = add_test(utd, long_random_read, LONG_RANDOM_READ);
+ if (ret)
+ goto exit_err;
+ ret = add_test(utd, long_random_write, LONG_RANDOM_WRITE);
+ if (ret)
+ goto exit_err;
+ ret = add_test(utd, long_sequential_read, LONG_SEQUENTIAL_READ);
+ if (ret)
+ goto exit_err;
+ ret = add_test(utd, long_sequential_write, LONG_SEQUENTIAL_WRITE);
+ if (ret)
+ goto exit_err;
+ ret = add_test(utd, long_sequential_mixed, LONG_SEQUENTIAL_MIXED);
+ if (ret)
+ goto exit_err;
+ add_test(utd, multi_query, MULTI_QUERY);
+ if (ret)
+ goto exit_err;
+ add_test(utd, parallel_read_and_write, PARALLEL_READ_AND_WRITE);
+ if (ret)
+ goto exit_err;
+ add_test(utd, lun_depth, LUN_DEPTH);
+ if (ret)
+ goto exit_err;
+
+ goto exit;
+
+exit_err:
+ ufs_test_debugfs_cleanup(ts);
+exit:
+ return ret;
+}
+
+static int ufs_test_probe(struct test_iosched *test_iosched)
+{
+ struct ufs_test_data *utd;
+ int ret;
+
+ utd = kzalloc(sizeof(*utd), GFP_KERNEL);
+ if (!utd) {
+ pr_err("%s: failed to allocate ufs test data\n", __func__);
+ return -ENOMEM;
+ }
+
+ init_waitqueue_head(&utd->wait_q);
+ utd->test_iosched = test_iosched;
+ test_iosched->blk_dev_test_data = utd;
+
+ ret = ufs_test_debugfs_init(utd);
+ if (ret) {
+ pr_err("%s: failed to init debug-fs entries, ret=%d\n",
+ __func__, ret);
+ kfree(utd);
+ }
+
+ return ret;
+}
+
+static void ufs_test_remove(struct test_iosched *test_iosched)
+{
+ struct ufs_test_data *utd = test_iosched->blk_dev_test_data;
+
+ ufs_test_debugfs_cleanup(test_iosched);
+ test_iosched->blk_dev_test_data = NULL;
+ kfree(utd);
+}
+
+static int __init ufs_test_init(void)
+{
+ ufs_bdt = kzalloc(sizeof(*ufs_bdt), GFP_KERNEL);
+ if (!ufs_bdt)
+ return -ENOMEM;
+
+ ufs_bdt->type_prefix = UFS_TEST_BLK_DEV_TYPE_PREFIX;
+ ufs_bdt->init_fn = ufs_test_probe;
+ ufs_bdt->exit_fn = ufs_test_remove;
+ INIT_LIST_HEAD(&ufs_bdt->list);
+
+ test_iosched_register(ufs_bdt);
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(ufs_test_init);
+
+static void __exit ufs_test_exit(void)
+{
+ test_iosched_unregister(ufs_bdt);
+ kfree(ufs_bdt);
+}
+module_init(ufs_test_init);
+module_exit(ufs_test_exit);
+
+MODULE_LICENSE("GPL v2");
+MODULE_DESCRIPTION("UFC test");
diff --git a/drivers/scsi/ufs/ufshcd-pltfrm.c b/drivers/scsi/ufs/ufshcd-pltfrm.c
index f58abfcdfe81..41684dca6baa 100644
--- a/drivers/scsi/ufs/ufshcd-pltfrm.c
+++ b/drivers/scsi/ufs/ufshcd-pltfrm.c
@@ -40,6 +40,22 @@
#include "ufshcd.h"
#include "ufshcd-pltfrm.h"
+static int ufshcd_parse_reset_info(struct ufs_hba *hba)
+{
+ int ret = 0;
+
+ hba->core_reset = devm_reset_control_get(hba->dev,
+ "core_reset");
+ if (IS_ERR(hba->core_reset)) {
+ ret = PTR_ERR(hba->core_reset);
+ dev_err(hba->dev, "core_reset unavailable,err = %d\n",
+ ret);
+ hba->core_reset = NULL;
+ }
+
+ return ret;
+}
+
static int ufshcd_parse_clock_info(struct ufs_hba *hba)
{
int ret = 0;
@@ -221,7 +237,34 @@ out:
return err;
}
-#ifdef CONFIG_PM
+static void ufshcd_parse_pm_levels(struct ufs_hba *hba)
+{
+ struct device *dev = hba->dev;
+ struct device_node *np = dev->of_node;
+
+ if (np) {
+ if (of_property_read_u32(np, "rpm-level", &hba->rpm_lvl))
+ hba->rpm_lvl = -1;
+ if (of_property_read_u32(np, "spm-level", &hba->spm_lvl))
+ hba->spm_lvl = -1;
+ }
+}
+
+static int ufshcd_parse_pinctrl_info(struct ufs_hba *hba)
+{
+ int ret = 0;
+
+ /* Try to obtain pinctrl handle */
+ hba->pctrl = devm_pinctrl_get(hba->dev);
+ if (IS_ERR(hba->pctrl)) {
+ ret = PTR_ERR(hba->pctrl);
+ hba->pctrl = NULL;
+ }
+
+ return ret;
+}
+
+#ifdef CONFIG_SMP
/**
* ufshcd_pltfrm_suspend - suspend power management function
* @dev: pointer to device handle
@@ -277,12 +320,12 @@ EXPORT_SYMBOL_GPL(ufshcd_pltfrm_shutdown);
/**
* ufshcd_pltfrm_init - probe routine of the driver
* @pdev: pointer to Platform device handle
- * @vops: pointer to variant ops
+ * @var: pointer to variant specific data
*
* Returns 0 on success, non-zero value on failure
*/
int ufshcd_pltfrm_init(struct platform_device *pdev,
- struct ufs_hba_variant_ops *vops)
+ struct ufs_hba_variant *var)
{
struct ufs_hba *hba;
void __iomem *mmio_base;
@@ -310,7 +353,7 @@ int ufshcd_pltfrm_init(struct platform_device *pdev,
goto out;
}
- hba->vops = vops;
+ hba->var = var;
err = ufshcd_parse_clock_info(hba);
if (err) {
@@ -325,22 +368,37 @@ int ufshcd_pltfrm_init(struct platform_device *pdev,
goto dealloc_host;
}
- pm_runtime_set_active(&pdev->dev);
- pm_runtime_enable(&pdev->dev);
+ err = ufshcd_parse_reset_info(hba);
+ if (err) {
+ dev_err(&pdev->dev, "%s: reset parse failed %d\n",
+ __func__, err);
+ goto dealloc_host;
+ }
+
+ err = ufshcd_parse_pinctrl_info(hba);
+ if (err) {
+ dev_dbg(&pdev->dev, "%s: unable to parse pinctrl data %d\n",
+ __func__, err);
+ /* let's not fail the probe */
+ }
+
+ ufshcd_parse_pm_levels(hba);
+
+ if (!dev->dma_mask)
+ dev->dma_mask = &dev->coherent_dma_mask;
err = ufshcd_init(hba, mmio_base, irq);
if (err) {
dev_err(dev, "Intialization failed\n");
- goto out_disable_rpm;
+ goto dealloc_host;
}
platform_set_drvdata(pdev, hba);
- return 0;
+ pm_runtime_set_active(&pdev->dev);
+ pm_runtime_enable(&pdev->dev);
-out_disable_rpm:
- pm_runtime_disable(&pdev->dev);
- pm_runtime_set_suspended(&pdev->dev);
+ return 0;
dealloc_host:
ufshcd_dealloc_host(hba);
out:
diff --git a/drivers/scsi/ufs/ufshcd-pltfrm.h b/drivers/scsi/ufs/ufshcd-pltfrm.h
index df64c4180340..6d8330bd0e7d 100644
--- a/drivers/scsi/ufs/ufshcd-pltfrm.h
+++ b/drivers/scsi/ufs/ufshcd-pltfrm.h
@@ -17,7 +17,7 @@
#include "ufshcd.h"
int ufshcd_pltfrm_init(struct platform_device *pdev,
- struct ufs_hba_variant_ops *vops);
+ struct ufs_hba_variant *var);
void ufshcd_pltfrm_shutdown(struct platform_device *pdev);
#ifdef CONFIG_PM
diff --git a/drivers/scsi/ufs/ufshcd.c b/drivers/scsi/ufs/ufshcd.c
index b5e2594edb78..b821d48481ad 100644
--- a/drivers/scsi/ufs/ufshcd.c
+++ b/drivers/scsi/ufs/ufshcd.c
@@ -3,7 +3,7 @@
*
* This code is based on drivers/scsi/ufs/ufshcd.c
* Copyright (C) 2011-2013 Samsung India Software Operations
- * Copyright (c) 2013-2014, The Linux Foundation. All rights reserved.
+ * Copyright (c) 2013-2020, The Linux Foundation. All rights reserved.
*
* Authors:
* Santosh Yaraganavi <santosh.sy@samsung.com>
@@ -38,11 +38,146 @@
*/
#include <linux/async.h>
+#include <scsi/ufs/ioctl.h>
#include <linux/devfreq.h>
+#include <linux/nls.h>
+#include <linux/of.h>
#include <linux/blkdev.h>
+#include <asm/unaligned.h>
#include "ufshcd.h"
-#include "unipro.h"
+#include "ufshci.h"
+#include "ufs_quirks.h"
+#include "ufs-debugfs.h"
+#include "ufs-qcom.h"
+
+#define CREATE_TRACE_POINTS
+#include <trace/events/ufs.h>
+
+#ifdef CONFIG_DEBUG_FS
+
+static int ufshcd_tag_req_type(struct request *rq)
+{
+ int rq_type = TS_WRITE;
+
+ if (!rq || !(rq->cmd_type & REQ_TYPE_FS))
+ rq_type = TS_NOT_SUPPORTED;
+ else if (rq->cmd_flags & REQ_FLUSH)
+ rq_type = TS_FLUSH;
+ else if (rq_data_dir(rq) == READ)
+ rq_type = (rq->cmd_flags & REQ_URGENT) ?
+ TS_URGENT_READ : TS_READ;
+ else if (rq->cmd_flags & REQ_URGENT)
+ rq_type = TS_URGENT_WRITE;
+
+ return rq_type;
+}
+
+static void ufshcd_update_error_stats(struct ufs_hba *hba, int type)
+{
+ ufsdbg_set_err_state(hba);
+ if (type < UFS_ERR_MAX)
+ hba->ufs_stats.err_stats[type]++;
+}
+
+static void ufshcd_update_tag_stats(struct ufs_hba *hba, int tag)
+{
+ struct request *rq =
+ hba->lrb[tag].cmd ? hba->lrb[tag].cmd->request : NULL;
+ u64 **tag_stats = hba->ufs_stats.tag_stats;
+ int rq_type;
+
+ if (!hba->ufs_stats.enabled)
+ return;
+
+ tag_stats[tag][TS_TAG]++;
+ if (!rq || !(rq->cmd_type & REQ_TYPE_FS))
+ return;
+
+ WARN_ON(hba->ufs_stats.q_depth > hba->nutrs);
+ rq_type = ufshcd_tag_req_type(rq);
+ if (!(rq_type < 0 || rq_type > TS_NUM_STATS))
+ tag_stats[hba->ufs_stats.q_depth++][rq_type]++;
+}
+
+static void ufshcd_update_tag_stats_completion(struct ufs_hba *hba,
+ struct scsi_cmnd *cmd)
+{
+ struct request *rq = cmd ? cmd->request : NULL;
+
+ if (rq && rq->cmd_type & REQ_TYPE_FS)
+ hba->ufs_stats.q_depth--;
+}
+
+static void update_req_stats(struct ufs_hba *hba, struct ufshcd_lrb *lrbp)
+{
+ int rq_type;
+ struct request *rq = lrbp->cmd ? lrbp->cmd->request : NULL;
+ s64 delta = ktime_us_delta(lrbp->complete_time_stamp,
+ lrbp->issue_time_stamp);
+
+ /* update general request statistics */
+ if (hba->ufs_stats.req_stats[TS_TAG].count == 0)
+ hba->ufs_stats.req_stats[TS_TAG].min = delta;
+ hba->ufs_stats.req_stats[TS_TAG].count++;
+ hba->ufs_stats.req_stats[TS_TAG].sum += delta;
+ if (delta > hba->ufs_stats.req_stats[TS_TAG].max)
+ hba->ufs_stats.req_stats[TS_TAG].max = delta;
+ if (delta < hba->ufs_stats.req_stats[TS_TAG].min)
+ hba->ufs_stats.req_stats[TS_TAG].min = delta;
+
+ rq_type = ufshcd_tag_req_type(rq);
+ if (rq_type == TS_NOT_SUPPORTED)
+ return;
+
+ /* update request type specific statistics */
+ if (hba->ufs_stats.req_stats[rq_type].count == 0)
+ hba->ufs_stats.req_stats[rq_type].min = delta;
+ hba->ufs_stats.req_stats[rq_type].count++;
+ hba->ufs_stats.req_stats[rq_type].sum += delta;
+ if (delta > hba->ufs_stats.req_stats[rq_type].max)
+ hba->ufs_stats.req_stats[rq_type].max = delta;
+ if (delta < hba->ufs_stats.req_stats[rq_type].min)
+ hba->ufs_stats.req_stats[rq_type].min = delta;
+}
+
+static void
+ufshcd_update_query_stats(struct ufs_hba *hba, enum query_opcode opcode, u8 idn)
+{
+ if (opcode < UPIU_QUERY_OPCODE_MAX && idn < MAX_QUERY_IDN)
+ hba->ufs_stats.query_stats_arr[opcode][idn]++;
+}
+
+#else
+static inline void ufshcd_update_tag_stats(struct ufs_hba *hba, int tag)
+{
+}
+
+static inline void ufshcd_update_tag_stats_completion(struct ufs_hba *hba,
+ struct scsi_cmnd *cmd)
+{
+}
+
+static inline void ufshcd_update_error_stats(struct ufs_hba *hba, int type)
+{
+}
+
+static inline
+void update_req_stats(struct ufs_hba *hba, struct ufshcd_lrb *lrbp)
+{
+}
+
+static inline
+void ufshcd_update_query_stats(struct ufs_hba *hba,
+ enum query_opcode opcode, u8 idn)
+{
+}
+#endif
+
+#define PWR_INFO_MASK 0xF
+#define PWR_RX_OFFSET 4
+
+#define UFSHCD_REQ_SENSE_SIZE 18
#define UFSHCD_ENABLE_INTRS (UTP_TRANSFER_REQ_COMPL |\
UTP_TASK_REQ_COMPL |\
@@ -56,16 +191,22 @@
#define NOP_OUT_TIMEOUT 30 /* msecs */
/* Query request retries */
-#define QUERY_REQ_RETRIES 10
+#define QUERY_REQ_RETRIES 3
/* Query request timeout */
-#define QUERY_REQ_TIMEOUT 30 /* msec */
+#define QUERY_REQ_TIMEOUT 1500 /* 1.5 seconds */
/* Task management command timeout */
#define TM_CMD_TIMEOUT 100 /* msecs */
+/* maximum number of retries for a general UIC command */
+#define UFS_UIC_COMMAND_RETRIES 3
+
/* maximum number of link-startup retries */
#define DME_LINKSTARTUP_RETRIES 3
+/* Maximum retries for Hibern8 enter */
+#define UIC_HIBERN8_ENTER_RETRIES 3
+
/* maximum number of reset retries before giving up */
#define MAX_HOST_RESET_RETRIES 5
@@ -75,6 +216,17 @@
/* Interrupt aggregation default timeout, unit: 40us */
#define INT_AGGR_DEF_TO 0x02
+/* default value of auto suspend is 3 seconds */
+#define UFSHCD_AUTO_SUSPEND_DELAY_MS 3000 /* millisecs */
+
+#define UFSHCD_CLK_GATING_DELAY_MS_PWR_SAVE 10
+#define UFSHCD_CLK_GATING_DELAY_MS_PERF 50
+
+/* IOCTL opcode for command - ufs set device read only */
+#define UFS_IOCTL_BLKROSET BLKROSET
+
+#define UFSHCD_DEFAULT_LANES_PER_DIRECTION 2
+
#define ufshcd_toggle_vreg(_dev, _vreg, _on) \
({ \
int _ret; \
@@ -85,6 +237,9 @@
_ret; \
})
+#define ufshcd_hex_dump(prefix_str, buf, len) \
+print_hex_dump(KERN_ERR, prefix_str, DUMP_PREFIX_OFFSET, 16, 4, buf, len, false)
+
static u32 ufs_query_desc_max_size[] = {
QUERY_DESC_DEVICE_MAX_SIZE,
QUERY_DESC_CONFIGURAION_MAX_SIZE,
@@ -95,6 +250,7 @@ static u32 ufs_query_desc_max_size[] = {
QUERY_DESC_RFU_MAX_SIZE,
QUERY_DESC_GEOMETRY_MAZ_SIZE,
QUERY_DESC_POWER_MAX_SIZE,
+ QUERY_DESC_HEALTH_MAX_SIZE,
QUERY_DESC_RFU_MAX_SIZE,
};
@@ -120,9 +276,11 @@ enum {
/* UFSHCD UIC layer error flags */
enum {
UFSHCD_UIC_DL_PA_INIT_ERROR = (1 << 0), /* Data link layer error */
- UFSHCD_UIC_NL_ERROR = (1 << 1), /* Network layer error */
- UFSHCD_UIC_TL_ERROR = (1 << 2), /* Transport Layer error */
- UFSHCD_UIC_DME_ERROR = (1 << 3), /* DME error */
+ UFSHCD_UIC_DL_NAC_RECEIVED_ERROR = (1 << 1), /* Data link layer error */
+ UFSHCD_UIC_DL_TCx_REPLAY_ERROR = (1 << 2), /* Data link layer error */
+ UFSHCD_UIC_NL_ERROR = (1 << 3), /* Network layer error */
+ UFSHCD_UIC_TL_ERROR = (1 << 4), /* Transport Layer error */
+ UFSHCD_UIC_DME_ERROR = (1 << 5), /* DME error */
};
/* Interrupt configuration options */
@@ -132,6 +290,8 @@ enum {
UFSHCD_INT_CLEAR,
};
+#define DEFAULT_UFSHCD_DBG_PRINT_EN UFSHCD_DBG_PRINT_ALL
+
#define ufshcd_set_eh_in_progress(h) \
(h->eh_flags |= UFSHCD_EH_IN_PROGRESS)
#define ufshcd_eh_in_progress(h) \
@@ -173,49 +333,591 @@ ufs_get_pm_lvl_to_link_pwr_state(enum ufs_pm_level lvl)
return ufs_pm_lvl_states[lvl].link_state;
}
-static void ufshcd_tmc_handler(struct ufs_hba *hba);
+static inline enum ufs_pm_level
+ufs_get_desired_pm_lvl_for_dev_link_state(enum ufs_dev_pwr_mode dev_state,
+ enum uic_link_state link_state)
+{
+ enum ufs_pm_level lvl;
+
+ for (lvl = UFS_PM_LVL_0; lvl < UFS_PM_LVL_MAX; lvl++) {
+ if ((ufs_pm_lvl_states[lvl].dev_state == dev_state) &&
+ (ufs_pm_lvl_states[lvl].link_state == link_state))
+ return lvl;
+ }
+
+ /* if no match found, return the level 0 */
+ return UFS_PM_LVL_0;
+}
+
+static inline bool ufshcd_is_valid_pm_lvl(int lvl)
+{
+ if (lvl >= 0 && lvl < ARRAY_SIZE(ufs_pm_lvl_states))
+ return true;
+ else
+ return false;
+}
+
+static irqreturn_t ufshcd_intr(int irq, void *__hba);
+static irqreturn_t ufshcd_tmc_handler(struct ufs_hba *hba);
static void ufshcd_async_scan(void *data, async_cookie_t cookie);
static int ufshcd_reset_and_restore(struct ufs_hba *hba);
+static int ufshcd_eh_host_reset_handler(struct scsi_cmnd *cmd);
static int ufshcd_clear_tm_cmd(struct ufs_hba *hba, int tag);
static void ufshcd_hba_exit(struct ufs_hba *hba);
static int ufshcd_probe_hba(struct ufs_hba *hba);
-static int __ufshcd_setup_clocks(struct ufs_hba *hba, bool on,
- bool skip_ref_clk);
-static int ufshcd_setup_clocks(struct ufs_hba *hba, bool on);
-static int ufshcd_uic_hibern8_exit(struct ufs_hba *hba);
-static int ufshcd_uic_hibern8_enter(struct ufs_hba *hba);
+static int ufshcd_enable_clocks(struct ufs_hba *hba);
+static int ufshcd_disable_clocks(struct ufs_hba *hba,
+ bool is_gating_context);
+static int ufshcd_disable_clocks_skip_ref_clk(struct ufs_hba *hba,
+ bool is_gating_context);
+static void ufshcd_hold_all(struct ufs_hba *hba);
+static void ufshcd_release_all(struct ufs_hba *hba);
+static int ufshcd_set_vccq_rail_unused(struct ufs_hba *hba, bool unused);
static inline void ufshcd_add_delay_before_dme_cmd(struct ufs_hba *hba);
+static inline void ufshcd_save_tstamp_of_last_dme_cmd(struct ufs_hba *hba);
static int ufshcd_host_reset_and_restore(struct ufs_hba *hba);
-static irqreturn_t ufshcd_intr(int irq, void *__hba);
-static int ufshcd_config_pwr_mode(struct ufs_hba *hba,
- struct ufs_pa_layer_attr *desired_pwr_mode);
-static int ufshcd_change_power_mode(struct ufs_hba *hba,
- struct ufs_pa_layer_attr *pwr_mode);
+static void ufshcd_resume_clkscaling(struct ufs_hba *hba);
+static void ufshcd_suspend_clkscaling(struct ufs_hba *hba);
+static void __ufshcd_suspend_clkscaling(struct ufs_hba *hba);
+static void ufshcd_release_all(struct ufs_hba *hba);
+static void ufshcd_hba_vreg_set_lpm(struct ufs_hba *hba);
+static void ufshcd_hba_vreg_set_hpm(struct ufs_hba *hba);
+static int ufshcd_devfreq_target(struct device *dev,
+ unsigned long *freq, u32 flags);
+static int ufshcd_devfreq_get_dev_status(struct device *dev,
+ struct devfreq_dev_status *stat);
+
+#if IS_ENABLED(CONFIG_DEVFREQ_GOV_SIMPLE_ONDEMAND)
+static struct devfreq_simple_ondemand_data ufshcd_ondemand_data = {
+ .upthreshold = 70,
+ .downdifferential = 65,
+ .simple_scaling = 1,
+};
+
+static void *gov_data = &ufshcd_ondemand_data;
+#else
+static void *gov_data;
+#endif
-static inline int ufshcd_enable_irq(struct ufs_hba *hba)
+static struct devfreq_dev_profile ufs_devfreq_profile = {
+ .polling_ms = 60,
+ .target = ufshcd_devfreq_target,
+ .get_dev_status = ufshcd_devfreq_get_dev_status,
+};
+
+static inline bool ufshcd_valid_tag(struct ufs_hba *hba, int tag)
{
- int ret = 0;
+ return tag >= 0 && tag < hba->nutrs;
+}
+static inline void ufshcd_enable_irq(struct ufs_hba *hba)
+{
if (!hba->is_irq_enabled) {
- ret = request_irq(hba->irq, ufshcd_intr, IRQF_SHARED, UFSHCD,
- hba);
- if (ret)
- dev_err(hba->dev, "%s: request_irq failed, ret=%d\n",
- __func__, ret);
+ enable_irq(hba->irq);
hba->is_irq_enabled = true;
}
-
- return ret;
}
static inline void ufshcd_disable_irq(struct ufs_hba *hba)
{
if (hba->is_irq_enabled) {
- free_irq(hba->irq, hba);
+ disable_irq(hba->irq);
hba->is_irq_enabled = false;
}
}
+void ufshcd_scsi_unblock_requests(struct ufs_hba *hba)
+{
+ unsigned long flags;
+ bool unblock = false;
+
+ spin_lock_irqsave(hba->host->host_lock, flags);
+ hba->scsi_block_reqs_cnt--;
+ unblock = !hba->scsi_block_reqs_cnt;
+ spin_unlock_irqrestore(hba->host->host_lock, flags);
+ if (unblock)
+ scsi_unblock_requests(hba->host);
+}
+EXPORT_SYMBOL(ufshcd_scsi_unblock_requests);
+
+static inline void __ufshcd_scsi_block_requests(struct ufs_hba *hba)
+{
+ if (!hba->scsi_block_reqs_cnt++)
+ scsi_block_requests(hba->host);
+}
+
+void ufshcd_scsi_block_requests(struct ufs_hba *hba)
+{
+ unsigned long flags;
+
+ spin_lock_irqsave(hba->host->host_lock, flags);
+ __ufshcd_scsi_block_requests(hba);
+ spin_unlock_irqrestore(hba->host->host_lock, flags);
+}
+EXPORT_SYMBOL(ufshcd_scsi_block_requests);
+
+static int ufshcd_device_reset_ctrl(struct ufs_hba *hba, bool ctrl)
+{
+ int ret = 0;
+
+ if (!hba->pctrl)
+ return 0;
+
+ /* Assert reset if ctrl == true */
+ if (ctrl)
+ ret = pinctrl_select_state(hba->pctrl,
+ pinctrl_lookup_state(hba->pctrl, "dev-reset-assert"));
+ else
+ ret = pinctrl_select_state(hba->pctrl,
+ pinctrl_lookup_state(hba->pctrl, "dev-reset-deassert"));
+
+ if (ret < 0)
+ dev_err(hba->dev, "%s: %s failed with err %d\n",
+ __func__, ctrl ? "Assert" : "Deassert", ret);
+
+ return ret;
+}
+
+static inline int ufshcd_assert_device_reset(struct ufs_hba *hba)
+{
+ return ufshcd_device_reset_ctrl(hba, true);
+}
+
+static inline int ufshcd_deassert_device_reset(struct ufs_hba *hba)
+{
+ return ufshcd_device_reset_ctrl(hba, false);
+}
+
+static int ufshcd_reset_device(struct ufs_hba *hba)
+{
+ int ret;
+
+ /* reset the connected UFS device */
+ ret = ufshcd_assert_device_reset(hba);
+ if (ret)
+ goto out;
+ /*
+ * The reset signal is active low.
+ * The UFS device shall detect more than or equal to 1us of positive
+ * or negative RST_n pulse width.
+ * To be on safe side, keep the reset low for atleast 10us.
+ */
+ usleep_range(10, 15);
+
+ ret = ufshcd_deassert_device_reset(hba);
+ if (ret)
+ goto out;
+ /* same as assert, wait for atleast 10us after deassert */
+ usleep_range(10, 15);
+out:
+ return ret;
+}
+
+/* replace non-printable or non-ASCII characters with spaces */
+static inline void ufshcd_remove_non_printable(char *val)
+{
+ if (!val || !*val)
+ return;
+
+ if (*val < 0x20 || *val > 0x7e)
+ *val = ' ';
+}
+
+#define UFSHCD_MAX_CMD_LOGGING 200
+
+#ifdef CONFIG_TRACEPOINTS
+static inline void ufshcd_add_command_trace(struct ufs_hba *hba,
+ struct ufshcd_cmd_log_entry *entry, u8 opcode)
+{
+ if (trace_ufshcd_command_enabled()) {
+ u32 intr = ufshcd_readl(hba, REG_INTERRUPT_STATUS);
+
+ trace_ufshcd_command(dev_name(hba->dev), entry->str, entry->tag,
+ entry->doorbell, entry->transfer_len, intr,
+ entry->lba, opcode);
+ }
+}
+#else
+static inline void ufshcd_add_command_trace(struct ufs_hba *hba,
+ struct ufshcd_cmd_log_entry *entry, u8 opcode)
+{
+}
+#endif
+
+#ifdef CONFIG_SCSI_UFSHCD_CMD_LOGGING
+static void ufshcd_cmd_log_init(struct ufs_hba *hba)
+{
+ /* Allocate log entries */
+ if (!hba->cmd_log.entries) {
+ hba->cmd_log.entries = kzalloc(UFSHCD_MAX_CMD_LOGGING *
+ sizeof(struct ufshcd_cmd_log_entry), GFP_KERNEL);
+ if (!hba->cmd_log.entries)
+ return;
+ dev_dbg(hba->dev, "%s: cmd_log.entries initialized\n",
+ __func__);
+ }
+}
+
+static void __ufshcd_cmd_log(struct ufs_hba *hba, char *str, char *cmd_type,
+ unsigned int tag, u8 cmd_id, u8 idn, u8 lun,
+ sector_t lba, int transfer_len, u8 opcode)
+{
+ struct ufshcd_cmd_log_entry *entry;
+
+ if (!hba->cmd_log.entries)
+ return;
+
+ entry = &hba->cmd_log.entries[hba->cmd_log.pos];
+ entry->lun = lun;
+ entry->str = str;
+ entry->cmd_type = cmd_type;
+ entry->cmd_id = cmd_id;
+ entry->lba = lba;
+ entry->transfer_len = transfer_len;
+ entry->idn = idn;
+ entry->doorbell = ufshcd_readl(hba, REG_UTP_TRANSFER_REQ_DOOR_BELL);
+ entry->tag = tag;
+ entry->tstamp = ktime_get();
+ entry->outstanding_reqs = hba->outstanding_reqs;
+ entry->seq_num = hba->cmd_log.seq_num;
+ hba->cmd_log.seq_num++;
+ hba->cmd_log.pos =
+ (hba->cmd_log.pos + 1) % UFSHCD_MAX_CMD_LOGGING;
+
+ ufshcd_add_command_trace(hba, entry, opcode);
+}
+
+static void ufshcd_cmd_log(struct ufs_hba *hba, char *str, char *cmd_type,
+ unsigned int tag, u8 cmd_id, u8 idn)
+{
+ __ufshcd_cmd_log(hba, str, cmd_type, tag, cmd_id, idn,
+ 0xff, (sector_t)-1, -1, -1);
+}
+
+static void ufshcd_dme_cmd_log(struct ufs_hba *hba, char *str, u8 cmd_id)
+{
+ ufshcd_cmd_log(hba, str, "dme", 0xff, cmd_id, 0xff);
+}
+
+static void ufshcd_print_cmd_log(struct ufs_hba *hba)
+{
+ int i;
+ int pos;
+ struct ufshcd_cmd_log_entry *p;
+
+ if (!hba->cmd_log.entries)
+ return;
+
+ pos = hba->cmd_log.pos;
+ for (i = 0; i < UFSHCD_MAX_CMD_LOGGING; i++) {
+ p = &hba->cmd_log.entries[pos];
+ pos = (pos + 1) % UFSHCD_MAX_CMD_LOGGING;
+
+ if (ktime_to_us(p->tstamp)) {
+ pr_err("%s: %s: seq_no=%u lun=0x%x cmd_id=0x%02x lba=0x%llx txfer_len=%d tag=%u, doorbell=0x%x outstanding=0x%x idn=%d time=%lld us\n",
+ p->cmd_type, p->str, p->seq_num,
+ p->lun, p->cmd_id, (unsigned long long)p->lba,
+ p->transfer_len, p->tag, p->doorbell,
+ p->outstanding_reqs, p->idn,
+ ktime_to_us(p->tstamp));
+ usleep_range(1000, 1100);
+ }
+ }
+}
+#else
+static void ufshcd_cmd_log_init(struct ufs_hba *hba)
+{
+}
+
+static void __ufshcd_cmd_log(struct ufs_hba *hba, char *str, char *cmd_type,
+ unsigned int tag, u8 cmd_id, u8 idn, u8 lun,
+ sector_t lba, int transfer_len, u8 opcode)
+{
+ struct ufshcd_cmd_log_entry entry;
+
+ entry.str = str;
+ entry.lba = lba;
+ entry.cmd_id = cmd_id;
+ entry.transfer_len = transfer_len;
+ entry.doorbell = ufshcd_readl(hba, REG_UTP_TRANSFER_REQ_DOOR_BELL);
+ entry.tag = tag;
+
+ ufshcd_add_command_trace(hba, &entry, opcode);
+}
+
+static void ufshcd_dme_cmd_log(struct ufs_hba *hba, char *str, u8 cmd_id)
+{
+}
+
+static void ufshcd_print_cmd_log(struct ufs_hba *hba)
+{
+}
+#endif
+
+#ifdef CONFIG_TRACEPOINTS
+static inline void ufshcd_cond_add_cmd_trace(struct ufs_hba *hba,
+ unsigned int tag, const char *str)
+{
+ struct ufshcd_lrb *lrbp;
+ char *cmd_type = NULL;
+ u8 opcode = 0;
+ u8 cmd_id = 0, idn = 0;
+ sector_t lba = -1;
+ int transfer_len = -1;
+
+ lrbp = &hba->lrb[tag];
+
+ if (lrbp->cmd) { /* data phase exists */
+ opcode = (u8)(*lrbp->cmd->cmnd);
+ if ((opcode == READ_10) || (opcode == WRITE_10)) {
+ /*
+ * Currently we only fully trace read(10) and write(10)
+ * commands
+ */
+ if (lrbp->cmd->request && lrbp->cmd->request->bio)
+ lba =
+ lrbp->cmd->request->bio->bi_iter.bi_sector;
+ transfer_len = be32_to_cpu(
+ lrbp->ucd_req_ptr->sc.exp_data_transfer_len);
+ }
+ }
+
+ if (lrbp->cmd && (lrbp->command_type == UTP_CMD_TYPE_SCSI)) {
+ cmd_type = "scsi";
+ cmd_id = (u8)(*lrbp->cmd->cmnd);
+ } else if (lrbp->command_type == UTP_CMD_TYPE_DEV_MANAGE) {
+ if (hba->dev_cmd.type == DEV_CMD_TYPE_NOP) {
+ cmd_type = "nop";
+ cmd_id = 0;
+ } else if (hba->dev_cmd.type == DEV_CMD_TYPE_QUERY) {
+ cmd_type = "query";
+ cmd_id = hba->dev_cmd.query.request.upiu_req.opcode;
+ idn = hba->dev_cmd.query.request.upiu_req.idn;
+ }
+ }
+
+ __ufshcd_cmd_log(hba, (char *) str, cmd_type, tag, cmd_id, idn,
+ lrbp->lun, lba, transfer_len, opcode);
+}
+#else
+static inline void ufshcd_cond_add_cmd_trace(struct ufs_hba *hba,
+ unsigned int tag, const char *str)
+{
+}
+#endif
+
+static void ufshcd_print_clk_freqs(struct ufs_hba *hba)
+{
+ struct ufs_clk_info *clki;
+ struct list_head *head = &hba->clk_list_head;
+
+ if (!(hba->ufshcd_dbg_print & UFSHCD_DBG_PRINT_CLK_FREQ_EN))
+ return;
+
+ if (!head || list_empty(head))
+ return;
+
+ list_for_each_entry(clki, head, list) {
+ if (!IS_ERR_OR_NULL(clki->clk) && clki->min_freq &&
+ clki->max_freq)
+ dev_err(hba->dev, "clk: %s, rate: %u\n",
+ clki->name, clki->curr_freq);
+ }
+}
+
+static void ufshcd_print_uic_err_hist(struct ufs_hba *hba,
+ struct ufs_uic_err_reg_hist *err_hist, char *err_name)
+{
+ int i;
+
+ if (!(hba->ufshcd_dbg_print & UFSHCD_DBG_PRINT_UIC_ERR_HIST_EN))
+ return;
+
+ for (i = 0; i < UIC_ERR_REG_HIST_LENGTH; i++) {
+ int p = (i + err_hist->pos - 1) % UIC_ERR_REG_HIST_LENGTH;
+
+ if (err_hist->reg[p] == 0)
+ continue;
+ dev_err(hba->dev, "%s[%d] = 0x%x at %lld us", err_name, i,
+ err_hist->reg[p], ktime_to_us(err_hist->tstamp[p]));
+ }
+}
+
+static inline void __ufshcd_print_host_regs(struct ufs_hba *hba, bool no_sleep)
+{
+ if (!(hba->ufshcd_dbg_print & UFSHCD_DBG_PRINT_HOST_REGS_EN))
+ return;
+
+ /*
+ * hex_dump reads its data without the readl macro. This might
+ * cause inconsistency issues on some platform, as the printed
+ * values may be from cache and not the most recent value.
+ * To know whether you are looking at an un-cached version verify
+ * that IORESOURCE_MEM flag is on when xxx_get_resource() is invoked
+ * during platform/pci probe function.
+ */
+ ufshcd_hex_dump("host regs: ", hba->mmio_base, UFSHCI_REG_SPACE_SIZE);
+ dev_err(hba->dev, "hba->ufs_version = 0x%x, hba->capabilities = 0x%x",
+ hba->ufs_version, hba->capabilities);
+ dev_err(hba->dev,
+ "hba->outstanding_reqs = 0x%x, hba->outstanding_tasks = 0x%x",
+ (u32)hba->outstanding_reqs, (u32)hba->outstanding_tasks);
+ dev_err(hba->dev,
+ "last_hibern8_exit_tstamp at %lld us, hibern8_exit_cnt = %d",
+ ktime_to_us(hba->ufs_stats.last_hibern8_exit_tstamp),
+ hba->ufs_stats.hibern8_exit_cnt);
+
+ ufshcd_print_uic_err_hist(hba, &hba->ufs_stats.pa_err, "pa_err");
+ ufshcd_print_uic_err_hist(hba, &hba->ufs_stats.dl_err, "dl_err");
+ ufshcd_print_uic_err_hist(hba, &hba->ufs_stats.nl_err, "nl_err");
+ ufshcd_print_uic_err_hist(hba, &hba->ufs_stats.tl_err, "tl_err");
+ ufshcd_print_uic_err_hist(hba, &hba->ufs_stats.dme_err, "dme_err");
+
+ ufshcd_print_clk_freqs(hba);
+
+ ufshcd_vops_dbg_register_dump(hba, no_sleep);
+}
+
+static void ufshcd_print_host_regs(struct ufs_hba *hba)
+{
+ __ufshcd_print_host_regs(hba, false);
+}
+
+static
+void ufshcd_print_trs(struct ufs_hba *hba, unsigned long bitmap, bool pr_prdt)
+{
+ struct ufshcd_lrb *lrbp;
+ int prdt_length;
+ int tag;
+
+ if (!(hba->ufshcd_dbg_print & UFSHCD_DBG_PRINT_TRS_EN))
+ return;
+
+ for_each_set_bit(tag, &bitmap, hba->nutrs) {
+ lrbp = &hba->lrb[tag];
+
+ dev_err(hba->dev, "UPIU[%d] - issue time %lld us",
+ tag, ktime_to_us(lrbp->issue_time_stamp));
+ dev_err(hba->dev,
+ "UPIU[%d] - Transfer Request Descriptor phys@0x%llx",
+ tag, (u64)lrbp->utrd_dma_addr);
+ ufshcd_hex_dump("UPIU TRD: ", lrbp->utr_descriptor_ptr,
+ sizeof(struct utp_transfer_req_desc));
+ dev_err(hba->dev, "UPIU[%d] - Request UPIU phys@0x%llx", tag,
+ (u64)lrbp->ucd_req_dma_addr);
+ ufshcd_hex_dump("UPIU REQ: ", lrbp->ucd_req_ptr,
+ sizeof(struct utp_upiu_req));
+ dev_err(hba->dev, "UPIU[%d] - Response UPIU phys@0x%llx", tag,
+ (u64)lrbp->ucd_rsp_dma_addr);
+ ufshcd_hex_dump("UPIU RSP: ", lrbp->ucd_rsp_ptr,
+ sizeof(struct utp_upiu_rsp));
+ prdt_length =
+ le16_to_cpu(lrbp->utr_descriptor_ptr->prd_table_length);
+ dev_err(hba->dev, "UPIU[%d] - PRDT - %d entries phys@0x%llx",
+ tag, prdt_length, (u64)lrbp->ucd_prdt_dma_addr);
+ if (pr_prdt)
+ ufshcd_hex_dump("UPIU PRDT: ", lrbp->ucd_prdt_ptr,
+ sizeof(struct ufshcd_sg_entry) * prdt_length);
+ }
+}
+
+static void ufshcd_print_tmrs(struct ufs_hba *hba, unsigned long bitmap)
+{
+ struct utp_task_req_desc *tmrdp;
+ int tag;
+
+ if (!(hba->ufshcd_dbg_print & UFSHCD_DBG_PRINT_TMRS_EN))
+ return;
+
+ for_each_set_bit(tag, &bitmap, hba->nutmrs) {
+ tmrdp = &hba->utmrdl_base_addr[tag];
+ dev_err(hba->dev, "TM[%d] - Task Management Header", tag);
+ ufshcd_hex_dump("TM TRD: ", &tmrdp->header,
+ sizeof(struct request_desc_header));
+ dev_err(hba->dev, "TM[%d] - Task Management Request UPIU",
+ tag);
+ ufshcd_hex_dump("TM REQ: ", tmrdp->task_req_upiu,
+ sizeof(struct utp_upiu_req));
+ dev_err(hba->dev, "TM[%d] - Task Management Response UPIU",
+ tag);
+ ufshcd_hex_dump("TM RSP: ", tmrdp->task_rsp_upiu,
+ sizeof(struct utp_task_req_desc));
+ }
+}
+
+static void ufshcd_print_fsm_state(struct ufs_hba *hba)
+{
+ int err = 0, tx_fsm_val = 0, rx_fsm_val = 0;
+
+ err = ufshcd_dme_get(hba,
+ UIC_ARG_MIB_SEL(MPHY_TX_FSM_STATE,
+ UIC_ARG_MPHY_TX_GEN_SEL_INDEX(0)),
+ &tx_fsm_val);
+ dev_err(hba->dev, "%s: TX_FSM_STATE = %u, err = %d\n", __func__,
+ tx_fsm_val, err);
+ err = ufshcd_dme_get(hba,
+ UIC_ARG_MIB_SEL(MPHY_RX_FSM_STATE,
+ UIC_ARG_MPHY_RX_GEN_SEL_INDEX(0)),
+ &rx_fsm_val);
+ dev_err(hba->dev, "%s: RX_FSM_STATE = %u, err = %d\n", __func__,
+ rx_fsm_val, err);
+}
+
+static void ufshcd_print_host_state(struct ufs_hba *hba)
+{
+ if (!(hba->ufshcd_dbg_print & UFSHCD_DBG_PRINT_HOST_STATE_EN))
+ return;
+
+ dev_err(hba->dev, "UFS Host state=%d\n", hba->ufshcd_state);
+ dev_err(hba->dev, "lrb in use=0x%lx, outstanding reqs=0x%lx tasks=0x%lx\n",
+ hba->lrb_in_use, hba->outstanding_tasks, hba->outstanding_reqs);
+ dev_err(hba->dev, "saved_err=0x%x, saved_uic_err=0x%x, saved_ce_err=0x%x\n",
+ hba->saved_err, hba->saved_uic_err, hba->saved_ce_err);
+ dev_err(hba->dev, "Device power mode=%d, UIC link state=%d\n",
+ hba->curr_dev_pwr_mode, hba->uic_link_state);
+ dev_err(hba->dev, "PM in progress=%d, sys. suspended=%d\n",
+ hba->pm_op_in_progress, hba->is_sys_suspended);
+ dev_err(hba->dev, "Auto BKOPS=%d, Host self-block=%d\n",
+ hba->auto_bkops_enabled, hba->host->host_self_blocked);
+ dev_err(hba->dev, "Clk gate=%d, hibern8 on idle=%d\n",
+ hba->clk_gating.state, hba->hibern8_on_idle.state);
+ dev_err(hba->dev, "error handling flags=0x%x, req. abort count=%d\n",
+ hba->eh_flags, hba->req_abort_count);
+ dev_err(hba->dev, "Host capabilities=0x%x, caps=0x%x\n",
+ hba->capabilities, hba->caps);
+ dev_err(hba->dev, "quirks=0x%x, dev. quirks=0x%x\n", hba->quirks,
+ hba->dev_quirks);
+}
+
+/**
+ * ufshcd_print_pwr_info - print power params as saved in hba
+ * power info
+ * @hba: per-adapter instance
+ */
+static void ufshcd_print_pwr_info(struct ufs_hba *hba)
+{
+ char *names[] = {
+ "INVALID MODE",
+ "FAST MODE",
+ "SLOW_MODE",
+ "INVALID MODE",
+ "FASTAUTO_MODE",
+ "SLOWAUTO_MODE",
+ "INVALID MODE",
+ };
+
+ if (!(hba->ufshcd_dbg_print & UFSHCD_DBG_PRINT_PWR_EN))
+ return;
+
+ dev_err(hba->dev, "%s:[RX, TX]: gear=[%d, %d], lane[%d, %d], pwr[%s, %s], rate = %d\n",
+ __func__,
+ hba->pwr_info.gear_rx, hba->pwr_info.gear_tx,
+ hba->pwr_info.lane_rx, hba->pwr_info.lane_tx,
+ names[hba->pwr_info.pwr_rx],
+ names[hba->pwr_info.pwr_tx],
+ hba->pwr_info.hs_rate);
+}
+
/*
* ufshcd_wait_for_register - wait for register value to change
* @hba - per-adapter interface
@@ -224,11 +926,12 @@ static inline void ufshcd_disable_irq(struct ufs_hba *hba)
* @val - wait condition
* @interval_us - polling interval in microsecs
* @timeout_ms - timeout in millisecs
- *
+ * @can_sleep - perform sleep or just spin
* Returns -ETIMEDOUT on error, zero on success
*/
-static int ufshcd_wait_for_register(struct ufs_hba *hba, u32 reg, u32 mask,
- u32 val, unsigned long interval_us, unsigned long timeout_ms)
+int ufshcd_wait_for_register(struct ufs_hba *hba, u32 reg, u32 mask,
+ u32 val, unsigned long interval_us,
+ unsigned long timeout_ms, bool can_sleep)
{
int err = 0;
unsigned long timeout = jiffies + msecs_to_jiffies(timeout_ms);
@@ -237,9 +940,10 @@ static int ufshcd_wait_for_register(struct ufs_hba *hba, u32 reg, u32 mask,
val = val & mask;
while ((ufshcd_readl(hba, reg) & mask) != val) {
- /* wakeup within 50us of expiry */
- usleep_range(interval_us, interval_us + 50);
-
+ if (can_sleep)
+ usleep_range(interval_us, interval_us + 50);
+ else
+ udelay(interval_us);
if (time_after(jiffies, timeout)) {
if ((ufshcd_readl(hba, reg) & mask) != val)
err = -ETIMEDOUT;
@@ -258,10 +962,27 @@ static int ufshcd_wait_for_register(struct ufs_hba *hba, u32 reg, u32 mask,
*/
static inline u32 ufshcd_get_intr_mask(struct ufs_hba *hba)
{
- if (hba->ufs_version == UFSHCI_VERSION_10)
- return INTERRUPT_MASK_ALL_VER_10;
- else
- return INTERRUPT_MASK_ALL_VER_11;
+ u32 intr_mask = 0;
+
+ switch (hba->ufs_version) {
+ case UFSHCI_VERSION_10:
+ intr_mask = INTERRUPT_MASK_ALL_VER_10;
+ break;
+ /* allow fall through */
+ case UFSHCI_VERSION_11:
+ case UFSHCI_VERSION_20:
+ intr_mask = INTERRUPT_MASK_ALL_VER_11;
+ break;
+ /* allow fall through */
+ case UFSHCI_VERSION_21:
+ default:
+ intr_mask = INTERRUPT_MASK_ALL_VER_21;
+ }
+
+ if (!ufshcd_is_crypto_supported(hba))
+ intr_mask &= ~CRYPTO_ENGINE_FATAL_ERROR;
+
+ return intr_mask;
}
/**
@@ -361,6 +1082,16 @@ static inline void ufshcd_utrl_clear(struct ufs_hba *hba, u32 pos)
}
/**
+ * ufshcd_outstanding_req_clear - Clear a bit in outstanding request field
+ * @hba: per adapter instance
+ * @tag: position of the bit to be cleared
+ */
+static inline void ufshcd_outstanding_req_clear(struct ufs_hba *hba, int tag)
+{
+ __clear_bit(tag, &hba->outstanding_reqs);
+}
+
+/**
* ufshcd_get_lists_status - Check UCRDY, UTRLRDY and UTMRLRDY
* @reg: Register value of host controller status
*
@@ -375,11 +1106,9 @@ static inline int ufshcd_get_lists_status(u32 reg)
* 1 UTRLRDY
* 2 UTMRLRDY
* 3 UCRDY
- * 4 HEI
- * 5 DEI
- * 6-7 reserved
+ * 4-7 reserved
*/
- return (((reg) & (0xFF)) >> 1) ^ (0x07);
+ return ((reg & 0xFF) >> 1) ^ 0x07;
}
/**
@@ -515,7 +1244,11 @@ static void ufshcd_enable_run_stop_reg(struct ufs_hba *hba)
*/
static inline void ufshcd_hba_start(struct ufs_hba *hba)
{
- ufshcd_writel(hba, CONTROLLER_ENABLE, REG_CONTROLLER_ENABLE);
+ u32 val = CONTROLLER_ENABLE;
+
+ if (ufshcd_is_crypto_supported(hba))
+ val |= CRYPTO_GENERAL_ENABLE;
+ ufshcd_writel(hba, val, REG_CONTROLLER_ENABLE);
}
/**
@@ -529,6 +1262,153 @@ static inline int ufshcd_is_hba_active(struct ufs_hba *hba)
return (ufshcd_readl(hba, REG_CONTROLLER_ENABLE) & 0x1) ? 0 : 1;
}
+static const char *ufschd_uic_link_state_to_string(
+ enum uic_link_state state)
+{
+ switch (state) {
+ case UIC_LINK_OFF_STATE: return "OFF";
+ case UIC_LINK_ACTIVE_STATE: return "ACTIVE";
+ case UIC_LINK_HIBERN8_STATE: return "HIBERN8";
+ default: return "UNKNOWN";
+ }
+}
+
+static const char *ufschd_ufs_dev_pwr_mode_to_string(
+ enum ufs_dev_pwr_mode state)
+{
+ switch (state) {
+ case UFS_ACTIVE_PWR_MODE: return "ACTIVE";
+ case UFS_SLEEP_PWR_MODE: return "SLEEP";
+ case UFS_POWERDOWN_PWR_MODE: return "POWERDOWN";
+ default: return "UNKNOWN";
+ }
+}
+
+u32 ufshcd_get_local_unipro_ver(struct ufs_hba *hba)
+{
+ /* HCI version 1.0 and 1.1 supports UniPro 1.41 */
+ if ((hba->ufs_version == UFSHCI_VERSION_10) ||
+ (hba->ufs_version == UFSHCI_VERSION_11))
+ return UFS_UNIPRO_VER_1_41;
+ else
+ return UFS_UNIPRO_VER_1_6;
+}
+EXPORT_SYMBOL(ufshcd_get_local_unipro_ver);
+
+static bool ufshcd_is_unipro_pa_params_tuning_req(struct ufs_hba *hba)
+{
+ /*
+ * If both host and device support UniPro ver1.6 or later, PA layer
+ * parameters tuning happens during link startup itself.
+ *
+ * We can manually tune PA layer parameters if either host or device
+ * doesn't support UniPro ver 1.6 or later. But to keep manual tuning
+ * logic simple, we will only do manual tuning if local unipro version
+ * doesn't support ver1.6 or later.
+ */
+ if (ufshcd_get_local_unipro_ver(hba) < UFS_UNIPRO_VER_1_6)
+ return true;
+ else
+ return false;
+}
+
+/**
+ * ufshcd_set_clk_freq - set UFS controller clock frequencies
+ * @hba: per adapter instance
+ * @scale_up: If True, set max possible frequency othewise set low frequency
+ *
+ * Returns 0 if successful
+ * Returns < 0 for any other errors
+ */
+static int ufshcd_set_clk_freq(struct ufs_hba *hba, bool scale_up)
+{
+ int ret = 0;
+ struct ufs_clk_info *clki;
+ struct list_head *head = &hba->clk_list_head;
+
+ if (!head || list_empty(head))
+ goto out;
+
+ list_for_each_entry(clki, head, list) {
+ if (!IS_ERR_OR_NULL(clki->clk)) {
+ if (scale_up && clki->max_freq) {
+ if (clki->curr_freq == clki->max_freq)
+ continue;
+
+ ret = clk_set_rate(clki->clk, clki->max_freq);
+ if (ret) {
+ dev_err(hba->dev, "%s: %s clk set rate(%dHz) failed, %d\n",
+ __func__, clki->name,
+ clki->max_freq, ret);
+ break;
+ }
+ trace_ufshcd_clk_scaling(dev_name(hba->dev),
+ "scaled up", clki->name,
+ clki->curr_freq,
+ clki->max_freq);
+ clki->curr_freq = clki->max_freq;
+
+ } else if (!scale_up && clki->min_freq) {
+ if (clki->curr_freq == clki->min_freq)
+ continue;
+
+ ret = clk_set_rate(clki->clk, clki->min_freq);
+ if (ret) {
+ dev_err(hba->dev, "%s: %s clk set rate(%dHz) failed, %d\n",
+ __func__, clki->name,
+ clki->min_freq, ret);
+ break;
+ }
+ trace_ufshcd_clk_scaling(dev_name(hba->dev),
+ "scaled down", clki->name,
+ clki->curr_freq,
+ clki->min_freq);
+ clki->curr_freq = clki->min_freq;
+ }
+ }
+ dev_dbg(hba->dev, "%s: clk: %s, rate: %lu\n", __func__,
+ clki->name, clk_get_rate(clki->clk));
+ }
+
+out:
+ return ret;
+}
+
+/**
+ * ufshcd_scale_clks - scale up or scale down UFS controller clocks
+ * @hba: per adapter instance
+ * @scale_up: True if scaling up and false if scaling down
+ *
+ * Returns 0 if successful
+ * Returns < 0 for any other errors
+ */
+static int ufshcd_scale_clks(struct ufs_hba *hba, bool scale_up)
+{
+ int ret = 0;
+
+ ret = ufshcd_vops_clk_scale_notify(hba, scale_up, PRE_CHANGE);
+ if (ret)
+ return ret;
+
+ ret = ufshcd_set_clk_freq(hba, scale_up);
+ if (ret)
+ return ret;
+
+ ret = ufshcd_vops_clk_scale_notify(hba, scale_up, POST_CHANGE);
+ if (ret) {
+ ufshcd_set_clk_freq(hba, !scale_up);
+ return ret;
+ }
+
+ return ret;
+}
+
+static inline void ufshcd_cancel_gate_work(struct ufs_hba *hba)
+{
+ hrtimer_cancel(&hba->clk_gating.gate_hrtimer);
+ cancel_work_sync(&hba->clk_gating.gate_work);
+}
+
static void ufshcd_ungate_work(struct work_struct *work)
{
int ret;
@@ -536,7 +1416,7 @@ static void ufshcd_ungate_work(struct work_struct *work)
struct ufs_hba *hba = container_of(work, struct ufs_hba,
clk_gating.ungate_work);
- cancel_delayed_work_sync(&hba->clk_gating.gate_work);
+ ufshcd_cancel_gate_work(hba);
spin_lock_irqsave(hba->host->host_lock, flags);
if (hba->clk_gating.state == CLKS_ON) {
@@ -545,7 +1425,8 @@ static void ufshcd_ungate_work(struct work_struct *work)
}
spin_unlock_irqrestore(hba->host->host_lock, flags);
- ufshcd_setup_clocks(hba, true);
+ ufshcd_hba_vreg_set_hpm(hba);
+ ufshcd_enable_clocks(hba);
/* Exit from hibern8 */
if (ufshcd_can_hibern8_during_gating(hba)) {
@@ -562,9 +1443,7 @@ static void ufshcd_ungate_work(struct work_struct *work)
hba->clk_gating.is_suspended = false;
}
unblock_reqs:
- if (ufshcd_is_clkscaling_enabled(hba))
- devfreq_resume_device(hba->devfreq);
- scsi_unblock_requests(hba->host);
+ ufshcd_scsi_unblock_requests(hba);
}
/**
@@ -584,6 +1463,11 @@ int ufshcd_hold(struct ufs_hba *hba, bool async)
spin_lock_irqsave(hba->host->host_lock, flags);
hba->clk_gating.active_reqs++;
+ if (ufshcd_eh_in_progress(hba)) {
+ spin_unlock_irqrestore(hba->host->host_lock, flags);
+ return 0;
+ }
+
start:
switch (hba->clk_gating.state) {
case CLKS_ON:
@@ -611,19 +1495,28 @@ start:
}
break;
case REQ_CLKS_OFF:
- if (cancel_delayed_work(&hba->clk_gating.gate_work)) {
+ /*
+ * If the timer was active but the callback was not running
+ * we have nothing to do, just change state and return.
+ */
+ if (hrtimer_try_to_cancel(&hba->clk_gating.gate_hrtimer) == 1) {
hba->clk_gating.state = CLKS_ON;
+ trace_ufshcd_clk_gating(dev_name(hba->dev),
+ hba->clk_gating.state);
break;
}
/*
- * If we here, it means gating work is either done or
+ * If we are here, it means gating work is either done or
* currently running. Hence, fall through to cancel gating
* work and to enable clocks.
*/
case CLKS_OFF:
- scsi_block_requests(hba->host);
+ __ufshcd_scsi_block_requests(hba);
hba->clk_gating.state = REQ_CLKS_ON;
- schedule_work(&hba->clk_gating.ungate_work);
+ trace_ufshcd_clk_gating(dev_name(hba->dev),
+ hba->clk_gating.state);
+ queue_work(hba->clk_gating.clk_gating_workq,
+ &hba->clk_gating.ungate_work);
/*
* fall through to check if we should wait for this
* work to be done or not.
@@ -647,6 +1540,7 @@ start:
}
spin_unlock_irqrestore(hba->host->host_lock, flags);
out:
+ hba->ufs_stats.clk_hold.ts = ktime_get();
return rc;
}
EXPORT_SYMBOL_GPL(ufshcd_hold);
@@ -654,12 +1548,21 @@ EXPORT_SYMBOL_GPL(ufshcd_hold);
static void ufshcd_gate_work(struct work_struct *work)
{
struct ufs_hba *hba = container_of(work, struct ufs_hba,
- clk_gating.gate_work.work);
+ clk_gating.gate_work);
unsigned long flags;
spin_lock_irqsave(hba->host->host_lock, flags);
- if (hba->clk_gating.is_suspended) {
+ /*
+ * In case you are here to cancel this work the gating state
+ * would be marked as REQ_CLKS_ON. In this case save time by
+ * skipping the gating work and exit after changing the clock
+ * state to CLKS_ON.
+ */
+ if (hba->clk_gating.is_suspended ||
+ (hba->clk_gating.state != REQ_CLKS_OFF)) {
hba->clk_gating.state = CLKS_ON;
+ trace_ufshcd_clk_gating(dev_name(hba->dev),
+ hba->clk_gating.state);
goto rel_lock;
}
@@ -671,25 +1574,38 @@ static void ufshcd_gate_work(struct work_struct *work)
spin_unlock_irqrestore(hba->host->host_lock, flags);
+ if (ufshcd_is_hibern8_on_idle_allowed(hba) &&
+ hba->hibern8_on_idle.is_enabled)
+ /*
+ * Hibern8 enter work (on Idle) needs clocks to be ON hence
+ * make sure that it is flushed before turning off the clocks.
+ */
+ flush_delayed_work(&hba->hibern8_on_idle.enter_work);
+
/* put the link into hibern8 mode before turning off clocks */
if (ufshcd_can_hibern8_during_gating(hba)) {
if (ufshcd_uic_hibern8_enter(hba)) {
hba->clk_gating.state = CLKS_ON;
+ trace_ufshcd_clk_gating(dev_name(hba->dev),
+ hba->clk_gating.state);
goto out;
}
ufshcd_set_link_hibern8(hba);
}
- if (ufshcd_is_clkscaling_enabled(hba)) {
- devfreq_suspend_device(hba->devfreq);
- hba->clk_scaling.window_start_t = 0;
- }
-
- if (!ufshcd_is_link_active(hba))
- ufshcd_setup_clocks(hba, false);
+ /*
+ * If auto hibern8 is supported then the link will already
+ * be in hibern8 state and the ref clock can be gated.
+ */
+ if ((ufshcd_is_auto_hibern8_supported(hba) ||
+ !ufshcd_is_link_active(hba)) && !hba->no_ref_clk_gating)
+ ufshcd_disable_clocks(hba, true);
else
/* If link is active, device ref_clk can't be switched off */
- __ufshcd_setup_clocks(hba, false, true);
+ ufshcd_disable_clocks_skip_ref_clk(hba, true);
+
+ /* Put the host controller in low power mode if possible */
+ ufshcd_hba_vreg_set_lpm(hba);
/*
* In case you are here to cancel this work the gating state
@@ -701,9 +1617,11 @@ static void ufshcd_gate_work(struct work_struct *work)
* new requests arriving before the current cancel work is done.
*/
spin_lock_irqsave(hba->host->host_lock, flags);
- if (hba->clk_gating.state == REQ_CLKS_OFF)
+ if (hba->clk_gating.state == REQ_CLKS_OFF) {
hba->clk_gating.state = CLKS_OFF;
-
+ trace_ufshcd_clk_gating(dev_name(hba->dev),
+ hba->clk_gating.state);
+ }
rel_lock:
spin_unlock_irqrestore(hba->host->host_lock, flags);
out:
@@ -711,7 +1629,7 @@ out:
}
/* host lock must be held before calling this variant */
-static void __ufshcd_release(struct ufs_hba *hba)
+static void __ufshcd_release(struct ufs_hba *hba, bool no_sched)
{
if (!ufshcd_is_clkgating_allowed(hba))
return;
@@ -721,20 +1639,25 @@ static void __ufshcd_release(struct ufs_hba *hba)
if (hba->clk_gating.active_reqs || hba->clk_gating.is_suspended
|| hba->ufshcd_state != UFSHCD_STATE_OPERATIONAL
|| hba->lrb_in_use || hba->outstanding_tasks
- || hba->active_uic_cmd || hba->uic_async_done)
+ || hba->active_uic_cmd || hba->uic_async_done
+ || ufshcd_eh_in_progress(hba) || no_sched)
return;
hba->clk_gating.state = REQ_CLKS_OFF;
- schedule_delayed_work(&hba->clk_gating.gate_work,
- msecs_to_jiffies(hba->clk_gating.delay_ms));
+ trace_ufshcd_clk_gating(dev_name(hba->dev), hba->clk_gating.state);
+ hba->ufs_stats.clk_rel.ts = ktime_get();
+
+ hrtimer_start(&hba->clk_gating.gate_hrtimer,
+ ms_to_ktime(hba->clk_gating.delay_ms),
+ HRTIMER_MODE_REL);
}
-void ufshcd_release(struct ufs_hba *hba)
+void ufshcd_release(struct ufs_hba *hba, bool no_sched)
{
unsigned long flags;
spin_lock_irqsave(hba->host->host_lock, flags);
- __ufshcd_release(hba);
+ __ufshcd_release(hba, no_sched);
spin_unlock_irqrestore(hba->host->host_lock, flags);
}
EXPORT_SYMBOL_GPL(ufshcd_release);
@@ -762,15 +1685,177 @@ static ssize_t ufshcd_clkgate_delay_store(struct device *dev,
return count;
}
+static ssize_t ufshcd_clkgate_delay_pwr_save_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct ufs_hba *hba = dev_get_drvdata(dev);
+
+ return snprintf(buf, PAGE_SIZE, "%lu\n",
+ hba->clk_gating.delay_ms_pwr_save);
+}
+
+static ssize_t ufshcd_clkgate_delay_pwr_save_store(struct device *dev,
+ struct device_attribute *attr, const char *buf, size_t count)
+{
+ struct ufs_hba *hba = dev_get_drvdata(dev);
+ unsigned long flags, value;
+
+ if (kstrtoul(buf, 0, &value))
+ return -EINVAL;
+
+ spin_lock_irqsave(hba->host->host_lock, flags);
+
+ hba->clk_gating.delay_ms_pwr_save = value;
+ if (ufshcd_is_clkscaling_supported(hba) &&
+ !hba->clk_scaling.is_scaled_up)
+ hba->clk_gating.delay_ms = hba->clk_gating.delay_ms_pwr_save;
+
+ spin_unlock_irqrestore(hba->host->host_lock, flags);
+ return count;
+}
+
+static ssize_t ufshcd_clkgate_delay_perf_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct ufs_hba *hba = dev_get_drvdata(dev);
+
+ return snprintf(buf, PAGE_SIZE, "%lu\n", hba->clk_gating.delay_ms_perf);
+}
+
+static ssize_t ufshcd_clkgate_delay_perf_store(struct device *dev,
+ struct device_attribute *attr, const char *buf, size_t count)
+{
+ struct ufs_hba *hba = dev_get_drvdata(dev);
+ unsigned long flags, value;
+
+ if (kstrtoul(buf, 0, &value))
+ return -EINVAL;
+
+ spin_lock_irqsave(hba->host->host_lock, flags);
+
+ hba->clk_gating.delay_ms_perf = value;
+ if (ufshcd_is_clkscaling_supported(hba) &&
+ hba->clk_scaling.is_scaled_up)
+ hba->clk_gating.delay_ms = hba->clk_gating.delay_ms_perf;
+
+ spin_unlock_irqrestore(hba->host->host_lock, flags);
+ return count;
+}
+
+static ssize_t ufshcd_clkgate_enable_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct ufs_hba *hba = dev_get_drvdata(dev);
+
+ return snprintf(buf, PAGE_SIZE, "%d\n", hba->clk_gating.is_enabled);
+}
+
+static ssize_t ufshcd_clkgate_enable_store(struct device *dev,
+ struct device_attribute *attr, const char *buf, size_t count)
+{
+ struct ufs_hba *hba = dev_get_drvdata(dev);
+ unsigned long flags;
+ u32 value;
+
+ if (kstrtou32(buf, 0, &value))
+ return -EINVAL;
+
+ value = !!value;
+ if (value == hba->clk_gating.is_enabled)
+ goto out;
+
+ if (value) {
+ ufshcd_release(hba, false);
+ } else {
+ spin_lock_irqsave(hba->host->host_lock, flags);
+ hba->clk_gating.active_reqs++;
+ spin_unlock_irqrestore(hba->host->host_lock, flags);
+ }
+
+ hba->clk_gating.is_enabled = value;
+out:
+ return count;
+}
+
+static enum hrtimer_restart ufshcd_clkgate_hrtimer_handler(
+ struct hrtimer *timer)
+{
+ struct ufs_hba *hba = container_of(timer, struct ufs_hba,
+ clk_gating.gate_hrtimer);
+
+ queue_work(hba->clk_gating.clk_gating_workq,
+ &hba->clk_gating.gate_work);
+
+ return HRTIMER_NORESTART;
+}
+
static void ufshcd_init_clk_gating(struct ufs_hba *hba)
{
+ struct ufs_clk_gating *gating = &hba->clk_gating;
+ char wq_name[sizeof("ufs_clk_gating_00")];
+
+ hba->clk_gating.state = CLKS_ON;
+
if (!ufshcd_is_clkgating_allowed(hba))
return;
- hba->clk_gating.delay_ms = 150;
- INIT_DELAYED_WORK(&hba->clk_gating.gate_work, ufshcd_gate_work);
- INIT_WORK(&hba->clk_gating.ungate_work, ufshcd_ungate_work);
+ /*
+ * Disable hibern8 during clk gating if
+ * auto hibern8 is supported
+ */
+ if (ufshcd_is_auto_hibern8_supported(hba))
+ hba->caps &= ~UFSHCD_CAP_HIBERN8_WITH_CLK_GATING;
+
+ INIT_WORK(&gating->gate_work, ufshcd_gate_work);
+ INIT_WORK(&gating->ungate_work, ufshcd_ungate_work);
+ /*
+ * Clock gating work must be executed only after auto hibern8
+ * timeout has expired in the hardware or after aggressive
+ * hibern8 on idle software timeout. Using jiffy based low
+ * resolution delayed work is not reliable to guarantee this,
+ * hence use a high resolution timer to make sure we schedule
+ * the gate work precisely more than hibern8 timeout.
+ *
+ * Always make sure gating->delay_ms > hibern8_on_idle->delay_ms
+ */
+ hrtimer_init(&gating->gate_hrtimer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
+ gating->gate_hrtimer.function = ufshcd_clkgate_hrtimer_handler;
+
+ snprintf(wq_name, ARRAY_SIZE(wq_name), "ufs_clk_gating_%d",
+ hba->host->host_no);
+ hba->clk_gating.clk_gating_workq =
+ create_singlethread_workqueue(wq_name);
+
+ gating->is_enabled = true;
+
+ gating->delay_ms_pwr_save = UFSHCD_CLK_GATING_DELAY_MS_PWR_SAVE;
+ gating->delay_ms_perf = UFSHCD_CLK_GATING_DELAY_MS_PERF;
+
+ /* start with performance mode */
+ gating->delay_ms = gating->delay_ms_perf;
+
+ if (!ufshcd_is_clkscaling_supported(hba))
+ goto scaling_not_supported;
+
+ gating->delay_pwr_save_attr.show = ufshcd_clkgate_delay_pwr_save_show;
+ gating->delay_pwr_save_attr.store = ufshcd_clkgate_delay_pwr_save_store;
+ sysfs_attr_init(&gating->delay_pwr_save_attr.attr);
+ gating->delay_pwr_save_attr.attr.name = "clkgate_delay_ms_pwr_save";
+ gating->delay_pwr_save_attr.attr.mode = S_IRUGO | S_IWUSR;
+ if (device_create_file(hba->dev, &gating->delay_pwr_save_attr))
+ dev_err(hba->dev, "Failed to create sysfs for clkgate_delay_ms_pwr_save\n");
+
+ gating->delay_perf_attr.show = ufshcd_clkgate_delay_perf_show;
+ gating->delay_perf_attr.store = ufshcd_clkgate_delay_perf_store;
+ sysfs_attr_init(&gating->delay_perf_attr.attr);
+ gating->delay_perf_attr.attr.name = "clkgate_delay_ms_perf";
+ gating->delay_perf_attr.attr.mode = S_IRUGO | S_IWUSR;
+ if (device_create_file(hba->dev, &gating->delay_perf_attr))
+ dev_err(hba->dev, "Failed to create sysfs for clkgate_delay_ms_perf\n");
+
+ goto add_clkgate_enable;
+scaling_not_supported:
hba->clk_gating.delay_attr.show = ufshcd_clkgate_delay_show;
hba->clk_gating.delay_attr.store = ufshcd_clkgate_delay_store;
sysfs_attr_init(&hba->clk_gating.delay_attr.attr);
@@ -778,23 +1863,456 @@ static void ufshcd_init_clk_gating(struct ufs_hba *hba)
hba->clk_gating.delay_attr.attr.mode = S_IRUGO | S_IWUSR;
if (device_create_file(hba->dev, &hba->clk_gating.delay_attr))
dev_err(hba->dev, "Failed to create sysfs for clkgate_delay\n");
+
+add_clkgate_enable:
+ gating->enable_attr.show = ufshcd_clkgate_enable_show;
+ gating->enable_attr.store = ufshcd_clkgate_enable_store;
+ sysfs_attr_init(&gating->enable_attr.attr);
+ gating->enable_attr.attr.name = "clkgate_enable";
+ gating->enable_attr.attr.mode = S_IRUGO | S_IWUSR;
+ if (device_create_file(hba->dev, &gating->enable_attr))
+ dev_err(hba->dev, "Failed to create sysfs for clkgate_enable\n");
}
static void ufshcd_exit_clk_gating(struct ufs_hba *hba)
{
if (!ufshcd_is_clkgating_allowed(hba))
return;
- device_remove_file(hba->dev, &hba->clk_gating.delay_attr);
+ if (ufshcd_is_clkscaling_supported(hba)) {
+ device_remove_file(hba->dev,
+ &hba->clk_gating.delay_pwr_save_attr);
+ device_remove_file(hba->dev, &hba->clk_gating.delay_perf_attr);
+ } else {
+ device_remove_file(hba->dev, &hba->clk_gating.delay_attr);
+ }
+ device_remove_file(hba->dev, &hba->clk_gating.enable_attr);
+ ufshcd_cancel_gate_work(hba);
cancel_work_sync(&hba->clk_gating.ungate_work);
- cancel_delayed_work_sync(&hba->clk_gating.gate_work);
+ destroy_workqueue(hba->clk_gating.clk_gating_workq);
+}
+
+static void ufshcd_set_auto_hibern8_timer(struct ufs_hba *hba, u32 delay)
+{
+ ufshcd_rmwl(hba, AUTO_HIBERN8_TIMER_SCALE_MASK |
+ AUTO_HIBERN8_IDLE_TIMER_MASK,
+ AUTO_HIBERN8_TIMER_SCALE_1_MS | delay,
+ REG_AUTO_HIBERN8_IDLE_TIMER);
+ /* Make sure the timer gets applied before further operations */
+ mb();
+}
+
+/**
+ * ufshcd_hibern8_hold - Make sure that link is not in hibern8.
+ *
+ * @hba: per adapter instance
+ * @async: This indicates whether caller wants to exit hibern8 asynchronously.
+ *
+ * Exit from hibern8 mode and set the link as active.
+ *
+ * Return 0 on success, non-zero on failure.
+ */
+static int ufshcd_hibern8_hold(struct ufs_hba *hba, bool async)
+{
+ int rc = 0;
+ unsigned long flags;
+
+ if (!ufshcd_is_hibern8_on_idle_allowed(hba))
+ goto out;
+
+ spin_lock_irqsave(hba->host->host_lock, flags);
+ hba->hibern8_on_idle.active_reqs++;
+
+ if (ufshcd_eh_in_progress(hba)) {
+ spin_unlock_irqrestore(hba->host->host_lock, flags);
+ return 0;
+ }
+
+start:
+ switch (hba->hibern8_on_idle.state) {
+ case HIBERN8_EXITED:
+ break;
+ case REQ_HIBERN8_ENTER:
+ if (cancel_delayed_work(&hba->hibern8_on_idle.enter_work)) {
+ hba->hibern8_on_idle.state = HIBERN8_EXITED;
+ trace_ufshcd_hibern8_on_idle(dev_name(hba->dev),
+ hba->hibern8_on_idle.state);
+ break;
+ }
+ /*
+ * If we here, it means Hibern8 enter work is either done or
+ * currently running. Hence, fall through to cancel hibern8
+ * work and exit hibern8.
+ */
+ case HIBERN8_ENTERED:
+ __ufshcd_scsi_block_requests(hba);
+ hba->hibern8_on_idle.state = REQ_HIBERN8_EXIT;
+ trace_ufshcd_hibern8_on_idle(dev_name(hba->dev),
+ hba->hibern8_on_idle.state);
+ schedule_work(&hba->hibern8_on_idle.exit_work);
+ /*
+ * fall through to check if we should wait for this
+ * work to be done or not.
+ */
+ case REQ_HIBERN8_EXIT:
+ if (async) {
+ rc = -EAGAIN;
+ hba->hibern8_on_idle.active_reqs--;
+ break;
+ } else {
+ spin_unlock_irqrestore(hba->host->host_lock, flags);
+ flush_work(&hba->hibern8_on_idle.exit_work);
+ /* Make sure state is HIBERN8_EXITED before returning */
+ spin_lock_irqsave(hba->host->host_lock, flags);
+ goto start;
+ }
+ default:
+ dev_err(hba->dev, "%s: H8 is in invalid state %d\n",
+ __func__, hba->hibern8_on_idle.state);
+ break;
+ }
+ spin_unlock_irqrestore(hba->host->host_lock, flags);
+out:
+ return rc;
+}
+
+/* host lock must be held before calling this variant */
+static void __ufshcd_hibern8_release(struct ufs_hba *hba, bool no_sched)
+{
+ unsigned long delay_in_jiffies;
+
+ if (!ufshcd_is_hibern8_on_idle_allowed(hba))
+ return;
+
+ hba->hibern8_on_idle.active_reqs--;
+ BUG_ON(hba->hibern8_on_idle.active_reqs < 0);
+
+ if (hba->hibern8_on_idle.active_reqs
+ || hba->hibern8_on_idle.is_suspended
+ || hba->ufshcd_state != UFSHCD_STATE_OPERATIONAL
+ || hba->lrb_in_use || hba->outstanding_tasks
+ || hba->active_uic_cmd || hba->uic_async_done
+ || ufshcd_eh_in_progress(hba) || no_sched)
+ return;
+
+ hba->hibern8_on_idle.state = REQ_HIBERN8_ENTER;
+ trace_ufshcd_hibern8_on_idle(dev_name(hba->dev),
+ hba->hibern8_on_idle.state);
+ /*
+ * Scheduling the delayed work after 1 jiffies will make the work to
+ * get schedule any time from 0ms to 1000/HZ ms which is not desirable
+ * for hibern8 enter work as it may impact the performance if it gets
+ * scheduled almost immediately. Hence make sure that hibern8 enter
+ * work gets scheduled atleast after 2 jiffies (any time between
+ * 1000/HZ ms to 2000/HZ ms).
+ */
+ delay_in_jiffies = msecs_to_jiffies(hba->hibern8_on_idle.delay_ms);
+ if (delay_in_jiffies == 1)
+ delay_in_jiffies++;
+
+ schedule_delayed_work(&hba->hibern8_on_idle.enter_work,
+ delay_in_jiffies);
+}
+
+static void ufshcd_hibern8_release(struct ufs_hba *hba, bool no_sched)
+{
+ unsigned long flags;
+
+ spin_lock_irqsave(hba->host->host_lock, flags);
+ __ufshcd_hibern8_release(hba, no_sched);
+ spin_unlock_irqrestore(hba->host->host_lock, flags);
+}
+
+static void ufshcd_hibern8_enter_work(struct work_struct *work)
+{
+ struct ufs_hba *hba = container_of(work, struct ufs_hba,
+ hibern8_on_idle.enter_work.work);
+ unsigned long flags;
+
+ spin_lock_irqsave(hba->host->host_lock, flags);
+ if (hba->hibern8_on_idle.is_suspended) {
+ hba->hibern8_on_idle.state = HIBERN8_EXITED;
+ trace_ufshcd_hibern8_on_idle(dev_name(hba->dev),
+ hba->hibern8_on_idle.state);
+ goto rel_lock;
+ }
+
+ if (hba->hibern8_on_idle.active_reqs
+ || hba->ufshcd_state != UFSHCD_STATE_OPERATIONAL
+ || hba->lrb_in_use || hba->outstanding_tasks
+ || hba->active_uic_cmd || hba->uic_async_done)
+ goto rel_lock;
+
+ spin_unlock_irqrestore(hba->host->host_lock, flags);
+
+ if (ufshcd_is_link_active(hba) && ufshcd_uic_hibern8_enter(hba)) {
+ /* Enter failed */
+ hba->hibern8_on_idle.state = HIBERN8_EXITED;
+ trace_ufshcd_hibern8_on_idle(dev_name(hba->dev),
+ hba->hibern8_on_idle.state);
+ goto out;
+ }
+ ufshcd_set_link_hibern8(hba);
+
+ /*
+ * In case you are here to cancel this work the hibern8_on_idle.state
+ * would be marked as REQ_HIBERN8_EXIT. In this case keep the state
+ * as REQ_HIBERN8_EXIT which would anyway imply that we are in hibern8
+ * and a request to exit from it is pending. By doing this way,
+ * we keep the state machine in tact and this would ultimately
+ * prevent from doing cancel work multiple times when there are
+ * new requests arriving before the current cancel work is done.
+ */
+ spin_lock_irqsave(hba->host->host_lock, flags);
+ if (hba->hibern8_on_idle.state == REQ_HIBERN8_ENTER) {
+ hba->hibern8_on_idle.state = HIBERN8_ENTERED;
+ trace_ufshcd_hibern8_on_idle(dev_name(hba->dev),
+ hba->hibern8_on_idle.state);
+ }
+rel_lock:
+ spin_unlock_irqrestore(hba->host->host_lock, flags);
+out:
+ return;
+}
+
+static void __ufshcd_set_auto_hibern8_timer(struct ufs_hba *hba,
+ unsigned long delay_ms)
+{
+ pm_runtime_get_sync(hba->dev);
+ ufshcd_hold_all(hba);
+ ufshcd_scsi_block_requests(hba);
+ down_write(&hba->lock);
+ /* wait for all the outstanding requests to finish */
+ ufshcd_wait_for_doorbell_clr(hba, U64_MAX);
+ ufshcd_set_auto_hibern8_timer(hba, delay_ms);
+ up_write(&hba->lock);
+ ufshcd_scsi_unblock_requests(hba);
+ ufshcd_release_all(hba);
+ pm_runtime_put_sync(hba->dev);
+}
+
+static void ufshcd_hibern8_exit_work(struct work_struct *work)
+{
+ int ret;
+ unsigned long flags;
+ struct ufs_hba *hba = container_of(work, struct ufs_hba,
+ hibern8_on_idle.exit_work);
+
+ cancel_delayed_work_sync(&hba->hibern8_on_idle.enter_work);
+
+ spin_lock_irqsave(hba->host->host_lock, flags);
+ if ((hba->hibern8_on_idle.state == HIBERN8_EXITED)
+ || ufshcd_is_link_active(hba)) {
+ hba->hibern8_on_idle.state = HIBERN8_EXITED;
+ spin_unlock_irqrestore(hba->host->host_lock, flags);
+ goto unblock_reqs;
+ }
+ spin_unlock_irqrestore(hba->host->host_lock, flags);
+
+ /* Exit from hibern8 */
+ if (ufshcd_is_link_hibern8(hba)) {
+ hba->ufs_stats.clk_hold.ctx = H8_EXIT_WORK;
+ ufshcd_hold(hba, false);
+ ret = ufshcd_uic_hibern8_exit(hba);
+ hba->ufs_stats.clk_rel.ctx = H8_EXIT_WORK;
+ ufshcd_release(hba, false);
+ if (!ret) {
+ spin_lock_irqsave(hba->host->host_lock, flags);
+ ufshcd_set_link_active(hba);
+ hba->hibern8_on_idle.state = HIBERN8_EXITED;
+ trace_ufshcd_hibern8_on_idle(dev_name(hba->dev),
+ hba->hibern8_on_idle.state);
+ spin_unlock_irqrestore(hba->host->host_lock, flags);
+ }
+ }
+unblock_reqs:
+ ufshcd_scsi_unblock_requests(hba);
+}
+
+static ssize_t ufshcd_hibern8_on_idle_delay_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct ufs_hba *hba = dev_get_drvdata(dev);
+
+ return snprintf(buf, PAGE_SIZE, "%lu\n", hba->hibern8_on_idle.delay_ms);
+}
+
+static ssize_t ufshcd_hibern8_on_idle_delay_store(struct device *dev,
+ struct device_attribute *attr, const char *buf, size_t count)
+{
+ struct ufs_hba *hba = dev_get_drvdata(dev);
+ unsigned long flags, value;
+ bool change = true;
+
+ if (kstrtoul(buf, 0, &value))
+ return -EINVAL;
+
+ spin_lock_irqsave(hba->host->host_lock, flags);
+ if (hba->hibern8_on_idle.delay_ms == value)
+ change = false;
+
+ if (value >= hba->clk_gating.delay_ms_pwr_save ||
+ value >= hba->clk_gating.delay_ms_perf) {
+ dev_err(hba->dev, "hibern8_on_idle_delay (%lu) can not be >= to clkgate_delay_ms_pwr_save (%lu) and clkgate_delay_ms_perf (%lu)\n",
+ value, hba->clk_gating.delay_ms_pwr_save,
+ hba->clk_gating.delay_ms_perf);
+ spin_unlock_irqrestore(hba->host->host_lock, flags);
+ return -EINVAL;
+ }
+
+ hba->hibern8_on_idle.delay_ms = value;
+ spin_unlock_irqrestore(hba->host->host_lock, flags);
+
+ /* Update auto hibern8 timer value if supported */
+ if (change && ufshcd_is_auto_hibern8_supported(hba) &&
+ hba->hibern8_on_idle.is_enabled)
+ __ufshcd_set_auto_hibern8_timer(hba,
+ hba->hibern8_on_idle.delay_ms);
+
+ return count;
+}
+
+static ssize_t ufshcd_hibern8_on_idle_enable_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct ufs_hba *hba = dev_get_drvdata(dev);
+
+ return snprintf(buf, PAGE_SIZE, "%d\n",
+ hba->hibern8_on_idle.is_enabled);
+}
+
+static ssize_t ufshcd_hibern8_on_idle_enable_store(struct device *dev,
+ struct device_attribute *attr, const char *buf, size_t count)
+{
+ struct ufs_hba *hba = dev_get_drvdata(dev);
+ unsigned long flags;
+ u32 value;
+
+ if (kstrtou32(buf, 0, &value))
+ return -EINVAL;
+
+ value = !!value;
+ if (value == hba->hibern8_on_idle.is_enabled)
+ goto out;
+
+ /* Update auto hibern8 timer value if supported */
+ if (ufshcd_is_auto_hibern8_supported(hba)) {
+ __ufshcd_set_auto_hibern8_timer(hba,
+ value ? hba->hibern8_on_idle.delay_ms : value);
+ goto update;
+ }
+
+ if (value) {
+ /*
+ * As clock gating work would wait for the hibern8 enter work
+ * to finish, clocks would remain on during hibern8 enter work.
+ */
+ ufshcd_hold(hba, false);
+ ufshcd_release_all(hba);
+ } else {
+ spin_lock_irqsave(hba->host->host_lock, flags);
+ hba->hibern8_on_idle.active_reqs++;
+ spin_unlock_irqrestore(hba->host->host_lock, flags);
+ }
+
+update:
+ hba->hibern8_on_idle.is_enabled = value;
+out:
+ return count;
+}
+
+static void ufshcd_init_hibern8_on_idle(struct ufs_hba *hba)
+{
+ /* initialize the state variable here */
+ hba->hibern8_on_idle.state = HIBERN8_EXITED;
+
+ if (!ufshcd_is_hibern8_on_idle_allowed(hba) &&
+ !ufshcd_is_auto_hibern8_supported(hba))
+ return;
+
+ if (ufshcd_is_auto_hibern8_supported(hba)) {
+ hba->hibern8_on_idle.delay_ms = 1;
+ hba->hibern8_on_idle.state = AUTO_HIBERN8;
+ /*
+ * Disable SW hibern8 enter on idle in case
+ * auto hibern8 is supported
+ */
+ hba->caps &= ~UFSHCD_CAP_HIBERN8_ENTER_ON_IDLE;
+ } else {
+ hba->hibern8_on_idle.delay_ms = 10;
+ INIT_DELAYED_WORK(&hba->hibern8_on_idle.enter_work,
+ ufshcd_hibern8_enter_work);
+ INIT_WORK(&hba->hibern8_on_idle.exit_work,
+ ufshcd_hibern8_exit_work);
+ }
+
+ hba->hibern8_on_idle.is_enabled = true;
+
+ hba->hibern8_on_idle.delay_attr.show =
+ ufshcd_hibern8_on_idle_delay_show;
+ hba->hibern8_on_idle.delay_attr.store =
+ ufshcd_hibern8_on_idle_delay_store;
+ sysfs_attr_init(&hba->hibern8_on_idle.delay_attr.attr);
+ hba->hibern8_on_idle.delay_attr.attr.name = "hibern8_on_idle_delay_ms";
+ hba->hibern8_on_idle.delay_attr.attr.mode = S_IRUGO | S_IWUSR;
+ if (device_create_file(hba->dev, &hba->hibern8_on_idle.delay_attr))
+ dev_err(hba->dev, "Failed to create sysfs for hibern8_on_idle_delay\n");
+
+ hba->hibern8_on_idle.enable_attr.show =
+ ufshcd_hibern8_on_idle_enable_show;
+ hba->hibern8_on_idle.enable_attr.store =
+ ufshcd_hibern8_on_idle_enable_store;
+ sysfs_attr_init(&hba->hibern8_on_idle.enable_attr.attr);
+ hba->hibern8_on_idle.enable_attr.attr.name = "hibern8_on_idle_enable";
+ hba->hibern8_on_idle.enable_attr.attr.mode = S_IRUGO | S_IWUSR;
+ if (device_create_file(hba->dev, &hba->hibern8_on_idle.enable_attr))
+ dev_err(hba->dev, "Failed to create sysfs for hibern8_on_idle_enable\n");
+}
+
+static void ufshcd_exit_hibern8_on_idle(struct ufs_hba *hba)
+{
+ if (!ufshcd_is_hibern8_on_idle_allowed(hba) &&
+ !ufshcd_is_auto_hibern8_supported(hba))
+ return;
+ device_remove_file(hba->dev, &hba->hibern8_on_idle.delay_attr);
+ device_remove_file(hba->dev, &hba->hibern8_on_idle.enable_attr);
+}
+
+static void ufshcd_hold_all(struct ufs_hba *hba)
+{
+ ufshcd_hold(hba, false);
+ ufshcd_hibern8_hold(hba, false);
+}
+
+static void ufshcd_release_all(struct ufs_hba *hba)
+{
+ ufshcd_hibern8_release(hba, false);
+ ufshcd_release(hba, false);
}
/* Must be called with host lock acquired */
static void ufshcd_clk_scaling_start_busy(struct ufs_hba *hba)
{
- if (!ufshcd_is_clkscaling_enabled(hba))
+ bool queue_resume_work = false;
+
+ if (!ufshcd_is_clkscaling_supported(hba))
return;
+ if (!hba->clk_scaling.active_reqs++)
+ queue_resume_work = true;
+
+ if (!hba->clk_scaling.is_allowed || hba->pm_op_in_progress)
+ return;
+
+ if (queue_resume_work)
+ queue_work(hba->clk_scaling.workq,
+ &hba->clk_scaling.resume_work);
+
+ if (!hba->clk_scaling.window_start_t) {
+ hba->clk_scaling.window_start_t = jiffies;
+ hba->clk_scaling.tot_busy_t = 0;
+ hba->clk_scaling.is_busy_started = false;
+ }
+
if (!hba->clk_scaling.is_busy_started) {
hba->clk_scaling.busy_start_t = ktime_get();
hba->clk_scaling.is_busy_started = true;
@@ -805,7 +2323,7 @@ static void ufshcd_clk_scaling_update_busy(struct ufs_hba *hba)
{
struct ufs_clk_scaling *scaling = &hba->clk_scaling;
- if (!ufshcd_is_clkscaling_enabled(hba))
+ if (!ufshcd_is_clkscaling_supported(hba))
return;
if (!hba->outstanding_reqs && scaling->is_busy_started) {
@@ -815,17 +2333,27 @@ static void ufshcd_clk_scaling_update_busy(struct ufs_hba *hba)
scaling->is_busy_started = false;
}
}
+
/**
* ufshcd_send_command - Send SCSI or device management commands
* @hba: per adapter instance
* @task_tag: Task tag of the command
*/
static inline
-void ufshcd_send_command(struct ufs_hba *hba, unsigned int task_tag)
+int ufshcd_send_command(struct ufs_hba *hba, unsigned int task_tag)
{
+ int ret = 0;
+
+ hba->lrb[task_tag].issue_time_stamp = ktime_get();
+ hba->lrb[task_tag].complete_time_stamp = ktime_set(0, 0);
ufshcd_clk_scaling_start_busy(hba);
__set_bit(task_tag, &hba->outstanding_reqs);
ufshcd_writel(hba, 1 << task_tag, REG_UTP_TRANSFER_REQ_DOOR_BELL);
+ /* Make sure that doorbell is committed immediately */
+ wmb();
+ ufshcd_cond_add_cmd_trace(hba, task_tag, "send");
+ ufshcd_update_tag_stats(hba, task_tag);
+ return ret;
}
/**
@@ -844,7 +2372,7 @@ static inline void ufshcd_copy_sense_data(struct ufshcd_lrb *lrbp)
memcpy(lrbp->sense_buffer,
lrbp->ucd_rsp_ptr->sr.sense_data,
- min_t(int, len_to_copy, SCSI_SENSE_BUFFERSIZE));
+ min_t(int, len_to_copy, UFSHCD_REQ_SENSE_SIZE));
}
}
@@ -899,6 +2427,9 @@ static inline void ufshcd_hba_capabilities(struct ufs_hba *hba)
hba->nutrs = (hba->capabilities & MASK_TRANSFER_REQUESTS_SLOTS) + 1;
hba->nutmrs =
((hba->capabilities & MASK_TASK_MANAGEMENT_REQUEST_SLOTS) >> 16) + 1;
+
+ /* disable auto hibern8 */
+ hba->capabilities &= ~MASK_AUTO_HIBERN8_SUPPORT;
}
/**
@@ -941,6 +2472,7 @@ ufshcd_dispatch_uic_cmd(struct ufs_hba *hba, struct uic_command *uic_cmd)
hba->active_uic_cmd = uic_cmd;
+ ufshcd_dme_cmd_log(hba, "send", hba->active_uic_cmd->command);
/* Write Args */
ufshcd_writel(hba, uic_cmd->argument1, REG_UIC_COMMAND_ARG_1);
ufshcd_writel(hba, uic_cmd->argument2, REG_UIC_COMMAND_ARG_2);
@@ -971,6 +2503,11 @@ ufshcd_wait_for_uic_cmd(struct ufs_hba *hba, struct uic_command *uic_cmd)
else
ret = -ETIMEDOUT;
+ if (ret)
+ ufsdbg_set_err_state(hba);
+
+ ufshcd_dme_cmd_log(hba, "cmp1", hba->active_uic_cmd->command);
+
spin_lock_irqsave(hba->host->host_lock, flags);
hba->active_uic_cmd = NULL;
spin_unlock_irqrestore(hba->host->host_lock, flags);
@@ -982,13 +2519,15 @@ ufshcd_wait_for_uic_cmd(struct ufs_hba *hba, struct uic_command *uic_cmd)
* __ufshcd_send_uic_cmd - Send UIC commands and retrieve the result
* @hba: per adapter instance
* @uic_cmd: UIC command
+ * @completion: initialize the completion only if this is set to true
*
* Identical to ufshcd_send_uic_cmd() expect mutex. Must be called
* with mutex held and host_lock locked.
* Returns 0 only if success.
*/
static int
-__ufshcd_send_uic_cmd(struct ufs_hba *hba, struct uic_command *uic_cmd)
+__ufshcd_send_uic_cmd(struct ufs_hba *hba, struct uic_command *uic_cmd,
+ bool completion)
{
if (!ufshcd_ready_for_uic_cmd(hba)) {
dev_err(hba->dev,
@@ -996,7 +2535,8 @@ __ufshcd_send_uic_cmd(struct ufs_hba *hba, struct uic_command *uic_cmd)
return -EIO;
}
- init_completion(&uic_cmd->done);
+ if (completion)
+ init_completion(&uic_cmd->done);
ufshcd_dispatch_uic_cmd(hba, uic_cmd);
@@ -1016,19 +2556,25 @@ ufshcd_send_uic_cmd(struct ufs_hba *hba, struct uic_command *uic_cmd)
int ret;
unsigned long flags;
- ufshcd_hold(hba, false);
+ hba->ufs_stats.clk_hold.ctx = UIC_CMD_SEND;
+ ufshcd_hold_all(hba);
mutex_lock(&hba->uic_cmd_mutex);
ufshcd_add_delay_before_dme_cmd(hba);
spin_lock_irqsave(hba->host->host_lock, flags);
- ret = __ufshcd_send_uic_cmd(hba, uic_cmd);
+ ret = __ufshcd_send_uic_cmd(hba, uic_cmd, true);
spin_unlock_irqrestore(hba->host->host_lock, flags);
if (!ret)
ret = ufshcd_wait_for_uic_cmd(hba, uic_cmd);
+ ufshcd_save_tstamp_of_last_dme_cmd(hba);
mutex_unlock(&hba->uic_cmd_mutex);
+ ufshcd_release_all(hba);
+ hba->ufs_stats.clk_rel.ctx = UIC_CMD_SEND;
+
+ ufsdbg_error_inject_dispatcher(hba,
+ ERR_INJECT_UIC, 0, &ret);
- ufshcd_release(hba);
return ret;
}
@@ -1064,6 +2610,7 @@ static int ufshcd_map_sg(struct ufshcd_lrb *lrbp)
cpu_to_le32(lower_32_bits(sg->dma_address));
prd_table[i].upper_addr =
cpu_to_le32(upper_32_bits(sg->dma_address));
+ prd_table[i].reserved = 0;
}
} else {
lrbp->utr_descriptor_ptr->prd_table_length = 0;
@@ -1114,15 +2661,52 @@ static void ufshcd_disable_intr(struct ufs_hba *hba, u32 intrs)
ufshcd_writel(hba, set, REG_INTERRUPT_ENABLE);
}
+static int ufshcd_prepare_crypto_utrd(struct ufs_hba *hba,
+ struct ufshcd_lrb *lrbp)
+{
+ struct utp_transfer_req_desc *req_desc = lrbp->utr_descriptor_ptr;
+ u8 cc_index = 0;
+ bool enable = false;
+ u64 dun = 0;
+ int ret;
+
+ /*
+ * Call vendor specific code to get crypto info for this request:
+ * enable, crypto config. index, DUN.
+ * If bypass is set, don't bother setting the other fields.
+ */
+ ret = ufshcd_vops_crypto_req_setup(hba, lrbp, &cc_index, &enable, &dun);
+ if (ret) {
+ if (ret != -EAGAIN) {
+ dev_err(hba->dev,
+ "%s: failed to setup crypto request (%d)\n",
+ __func__, ret);
+ }
+
+ return ret;
+ }
+
+ if (!enable)
+ goto out;
+
+ req_desc->header.dword_0 |= cc_index | UTRD_CRYPTO_ENABLE;
+ req_desc->header.dword_1 = (u32)(dun & 0xFFFFFFFF);
+ req_desc->header.dword_3 = (u32)((dun >> 32) & 0xFFFFFFFF);
+out:
+ return 0;
+}
+
/**
* ufshcd_prepare_req_desc_hdr() - Fills the requests header
* descriptor according to request
+ * @hba: per adapter instance
* @lrbp: pointer to local reference block
* @upiu_flags: flags required in the header
* @cmd_dir: requests data direction
*/
-static void ufshcd_prepare_req_desc_hdr(struct ufshcd_lrb *lrbp,
- u32 *upiu_flags, enum dma_data_direction cmd_dir)
+static int ufshcd_prepare_req_desc_hdr(struct ufs_hba *hba,
+ struct ufshcd_lrb *lrbp, u32 *upiu_flags,
+ enum dma_data_direction cmd_dir)
{
struct utp_transfer_req_desc *req_desc = lrbp->utr_descriptor_ptr;
u32 data_direction;
@@ -1146,7 +2730,8 @@ static void ufshcd_prepare_req_desc_hdr(struct ufshcd_lrb *lrbp,
/* Transfer request descriptor header fields */
req_desc->header.dword_0 = cpu_to_le32(dword_0);
-
+ /* dword_1 is reserved, hence it is set to 0 */
+ req_desc->header.dword_1 = 0;
/*
* assigning invalid value for command status. Controller
* updates OCS on command completion, with the command
@@ -1154,6 +2739,15 @@ static void ufshcd_prepare_req_desc_hdr(struct ufshcd_lrb *lrbp,
*/
req_desc->header.dword_2 =
cpu_to_le32(OCS_INVALID_COMMAND_STATUS);
+ /* dword_3 is reserved, hence it is set to 0 */
+ req_desc->header.dword_3 = 0;
+
+ req_desc->prd_table_length = 0;
+
+ if (ufshcd_is_crypto_supported(hba))
+ return ufshcd_prepare_crypto_utrd(hba, lrbp);
+
+ return 0;
}
/**
@@ -1166,6 +2760,7 @@ static
void ufshcd_prepare_utp_scsi_cmd_upiu(struct ufshcd_lrb *lrbp, u32 upiu_flags)
{
struct utp_upiu_req *ucd_req_ptr = lrbp->ucd_req_ptr;
+ unsigned short cdb_len;
/* command descriptor fields */
ucd_req_ptr->header.dword_0 = UPIU_HEADER_DWORD(
@@ -1180,8 +2775,12 @@ void ufshcd_prepare_utp_scsi_cmd_upiu(struct ufshcd_lrb *lrbp, u32 upiu_flags)
ucd_req_ptr->sc.exp_data_transfer_len =
cpu_to_be32(lrbp->cmd->sdb.length);
- memcpy(ucd_req_ptr->sc.cdb, lrbp->cmd->cmnd,
- (min_t(unsigned short, lrbp->cmd->cmd_len, MAX_CDB_SIZE)));
+ cdb_len = min_t(unsigned short, lrbp->cmd->cmd_len, MAX_CDB_SIZE);
+ memcpy(ucd_req_ptr->sc.cdb, lrbp->cmd->cmnd, cdb_len);
+ if (cdb_len < MAX_CDB_SIZE)
+ memset(ucd_req_ptr->sc.cdb + cdb_len, 0,
+ (MAX_CDB_SIZE - cdb_len));
+ memset(lrbp->ucd_rsp_ptr, 0, sizeof(struct utp_upiu_rsp));
}
/**
@@ -1218,6 +2817,7 @@ static void ufshcd_prepare_utp_query_req_upiu(struct ufs_hba *hba,
if (query->request.upiu_req.opcode == UPIU_QUERY_OPCODE_WRITE_DESC)
memcpy(descp, query->descriptor, len);
+ memset(lrbp->ucd_rsp_ptr, 0, sizeof(struct utp_upiu_rsp));
}
static inline void ufshcd_prepare_utp_nop_upiu(struct ufshcd_lrb *lrbp)
@@ -1230,6 +2830,11 @@ static inline void ufshcd_prepare_utp_nop_upiu(struct ufshcd_lrb *lrbp)
ucd_req_ptr->header.dword_0 =
UPIU_HEADER_DWORD(
UPIU_TRANSACTION_NOP_OUT, 0, 0, lrbp->task_tag);
+ /* clear rest of the fields of basic header */
+ ucd_req_ptr->header.dword_1 = 0;
+ ucd_req_ptr->header.dword_2 = 0;
+
+ memset(lrbp->ucd_rsp_ptr, 0, sizeof(struct utp_upiu_rsp));
}
/**
@@ -1245,15 +2850,16 @@ static int ufshcd_compose_upiu(struct ufs_hba *hba, struct ufshcd_lrb *lrbp)
switch (lrbp->command_type) {
case UTP_CMD_TYPE_SCSI:
if (likely(lrbp->cmd)) {
- ufshcd_prepare_req_desc_hdr(lrbp, &upiu_flags,
- lrbp->cmd->sc_data_direction);
+ ret = ufshcd_prepare_req_desc_hdr(hba, lrbp,
+ &upiu_flags, lrbp->cmd->sc_data_direction);
ufshcd_prepare_utp_scsi_cmd_upiu(lrbp, upiu_flags);
} else {
ret = -EINVAL;
}
break;
case UTP_CMD_TYPE_DEV_MANAGE:
- ufshcd_prepare_req_desc_hdr(lrbp, &upiu_flags, DMA_NONE);
+ ret = ufshcd_prepare_req_desc_hdr(hba, lrbp, &upiu_flags,
+ DMA_NONE);
if (hba->dev_cmd.type == DEV_CMD_TYPE_QUERY)
ufshcd_prepare_utp_query_req_upiu(
hba, lrbp, upiu_flags);
@@ -1305,6 +2911,61 @@ static inline u16 ufshcd_upiu_wlun_to_scsi_wlun(u8 upiu_wlun_id)
}
/**
+ * ufshcd_get_write_lock - synchronize between shutdown, scaling &
+ * arrival of requests
+ * @hba: ufs host
+ *
+ * Lock is predominantly held by shutdown context thus, ensuring
+ * that no requests from any other context may sneak through.
+ */
+static inline void ufshcd_get_write_lock(struct ufs_hba *hba)
+{
+ down_write(&hba->lock);
+}
+
+/**
+ * ufshcd_get_read_lock - synchronize between shutdown, scaling &
+ * arrival of requests
+ * @hba: ufs host
+ *
+ * Returns 1 if acquired, < 0 on contention
+ *
+ * After shutdown's initiated, allow requests only directed to the
+ * well known device lun. The sync between scaling & issue is maintained
+ * as is and this restructuring syncs shutdown with these too.
+ */
+static int ufshcd_get_read_lock(struct ufs_hba *hba, u64 lun)
+{
+ int err = 0;
+
+ err = down_read_trylock(&hba->lock);
+ if (err > 0)
+ goto out;
+ /* let requests for well known device lun to go through */
+ if (ufshcd_scsi_to_upiu_lun(lun) == UFS_UPIU_UFS_DEVICE_WLUN)
+ return 0;
+ else if (!ufshcd_is_shutdown_ongoing(hba))
+ return -EAGAIN;
+ else
+ return -EPERM;
+
+out:
+ return err;
+}
+
+/**
+ * ufshcd_put_read_lock - synchronize between shutdown, scaling &
+ * arrival of requests
+ * @hba: ufs host
+ *
+ * Returns none
+ */
+static inline void ufshcd_put_read_lock(struct ufs_hba *hba)
+{
+ up_read(&hba->lock);
+}
+
+/**
* ufshcd_queuecommand - main entry point for SCSI requests
* @cmd: command from SCSI Midlayer
* @done: call back function
@@ -1318,12 +2979,42 @@ static int ufshcd_queuecommand(struct Scsi_Host *host, struct scsi_cmnd *cmd)
unsigned long flags;
int tag;
int err = 0;
+ bool has_read_lock = false;
hba = shost_priv(host);
+ if (!cmd || !cmd->request || !hba)
+ return -EINVAL;
+
tag = cmd->request->tag;
+ if (!ufshcd_valid_tag(hba, tag)) {
+ dev_err(hba->dev,
+ "%s: invalid command tag %d: cmd=0x%p, cmd->request=0x%p",
+ __func__, tag, cmd, cmd->request);
+ BUG();
+ }
+
+ err = ufshcd_get_read_lock(hba, cmd->device->lun);
+ if (unlikely(err < 0)) {
+ if (err == -EPERM) {
+ set_host_byte(cmd, DID_ERROR);
+ cmd->scsi_done(cmd);
+ return 0;
+ }
+ if (err == -EAGAIN)
+ return SCSI_MLQUEUE_HOST_BUSY;
+ } else if (err == 1) {
+ has_read_lock = true;
+ }
spin_lock_irqsave(hba->host->host_lock, flags);
+
+ /* if error handling is in progress, return host busy */
+ if (ufshcd_eh_in_progress(hba)) {
+ err = SCSI_MLQUEUE_HOST_BUSY;
+ goto out_unlock;
+ }
+
switch (hba->ufshcd_state) {
case UFSHCD_STATE_OPERATIONAL:
break;
@@ -1343,6 +3034,8 @@ static int ufshcd_queuecommand(struct Scsi_Host *host, struct scsi_cmnd *cmd)
}
spin_unlock_irqrestore(hba->host->host_lock, flags);
+ hba->req_abort_count = 0;
+
/* acquire the tag to make sure device cmds don't use it */
if (test_and_set_bit_lock(tag, &hba->lrb_in_use)) {
/*
@@ -1355,21 +3048,37 @@ static int ufshcd_queuecommand(struct Scsi_Host *host, struct scsi_cmnd *cmd)
goto out;
}
+ hba->ufs_stats.clk_hold.ctx = QUEUE_CMD;
err = ufshcd_hold(hba, true);
if (err) {
err = SCSI_MLQUEUE_HOST_BUSY;
clear_bit_unlock(tag, &hba->lrb_in_use);
goto out;
}
+ if (ufshcd_is_clkgating_allowed(hba))
+ WARN_ON(hba->clk_gating.state != CLKS_ON);
+
+ err = ufshcd_hibern8_hold(hba, true);
+ if (err) {
+ clear_bit_unlock(tag, &hba->lrb_in_use);
+ err = SCSI_MLQUEUE_HOST_BUSY;
+ hba->ufs_stats.clk_rel.ctx = QUEUE_CMD;
+ ufshcd_release(hba, true);
+ goto out;
+ }
+ if (ufshcd_is_hibern8_on_idle_allowed(hba))
+ WARN_ON(hba->hibern8_on_idle.state != HIBERN8_EXITED);
+
+ /* Vote PM QoS for the request */
+ ufshcd_vops_pm_qos_req_start(hba, cmd->request);
/* IO svc time latency histogram */
- if (hba != NULL && cmd->request != NULL) {
- if (hba->latency_hist_enabled &&
- (cmd->request->cmd_type == REQ_TYPE_FS)) {
- cmd->request->lat_hist_io_start = ktime_get();
- cmd->request->lat_hist_enabled = 1;
- } else
- cmd->request->lat_hist_enabled = 0;
+ if (hba->latency_hist_enabled &&
+ (cmd->request->cmd_type == REQ_TYPE_FS)) {
+ cmd->request->lat_hist_io_start = ktime_get();
+ cmd->request->lat_hist_enabled = 1;
+ } else {
+ cmd->request->lat_hist_enabled = 0;
}
WARN_ON(hba->clk_gating.state != CLKS_ON);
@@ -1378,29 +3087,80 @@ static int ufshcd_queuecommand(struct Scsi_Host *host, struct scsi_cmnd *cmd)
WARN_ON(lrbp->cmd);
lrbp->cmd = cmd;
- lrbp->sense_bufflen = SCSI_SENSE_BUFFERSIZE;
+ lrbp->sense_bufflen = UFSHCD_REQ_SENSE_SIZE;
lrbp->sense_buffer = cmd->sense_buffer;
lrbp->task_tag = tag;
lrbp->lun = ufshcd_scsi_to_upiu_lun(cmd->device->lun);
lrbp->intr_cmd = !ufshcd_is_intr_aggr_allowed(hba) ? true : false;
lrbp->command_type = UTP_CMD_TYPE_SCSI;
+ lrbp->req_abort_skip = false;
/* form UPIU before issuing the command */
- ufshcd_compose_upiu(hba, lrbp);
+ err = ufshcd_compose_upiu(hba, lrbp);
+ if (err) {
+ if (err != -EAGAIN)
+ dev_err(hba->dev,
+ "%s: failed to compose upiu %d\n",
+ __func__, err);
+
+ lrbp->cmd = NULL;
+ clear_bit_unlock(tag, &hba->lrb_in_use);
+ ufshcd_release_all(hba);
+ ufshcd_vops_pm_qos_req_end(hba, cmd->request, true);
+ goto out;
+ }
+
err = ufshcd_map_sg(lrbp);
if (err) {
- ufshcd_release(hba);
+ ufshcd_release(hba, true);
lrbp->cmd = NULL;
clear_bit_unlock(tag, &hba->lrb_in_use);
+ ufshcd_release_all(hba);
+ ufshcd_vops_pm_qos_req_end(hba, cmd->request, true);
goto out;
}
+ err = ufshcd_vops_crypto_engine_cfg_start(hba, tag);
+ if (err) {
+ if (err != -EAGAIN)
+ dev_err(hba->dev,
+ "%s: failed to configure crypto engine %d\n",
+ __func__, err);
+
+ scsi_dma_unmap(lrbp->cmd);
+ lrbp->cmd = NULL;
+ clear_bit_unlock(tag, &hba->lrb_in_use);
+ ufshcd_release_all(hba);
+ ufshcd_vops_pm_qos_req_end(hba, cmd->request, true);
+
+ goto out;
+ }
+
+ /* Make sure descriptors are ready before ringing the doorbell */
+ wmb();
/* issue command to the controller */
spin_lock_irqsave(hba->host->host_lock, flags);
- ufshcd_send_command(hba, tag);
+
+ err = ufshcd_send_command(hba, tag);
+ if (err) {
+ spin_unlock_irqrestore(hba->host->host_lock, flags);
+ scsi_dma_unmap(lrbp->cmd);
+ lrbp->cmd = NULL;
+ clear_bit_unlock(tag, &hba->lrb_in_use);
+ ufshcd_release_all(hba);
+ ufshcd_vops_pm_qos_req_end(hba, cmd->request, true);
+ ufshcd_vops_crypto_engine_cfg_end(hba, lrbp, cmd->request);
+ dev_err(hba->dev, "%s: failed sending command, %d\n",
+ __func__, err);
+ err = DID_ERROR;
+ goto out;
+ }
+
out_unlock:
spin_unlock_irqrestore(hba->host->host_lock, flags);
out:
+ if (has_read_lock)
+ ufshcd_put_read_lock(hba);
return err;
}
@@ -1437,7 +3197,7 @@ ufshcd_clear_cmd(struct ufs_hba *hba, int tag)
*/
err = ufshcd_wait_for_register(hba,
REG_UTP_TRANSFER_REQ_DOOR_BELL,
- mask, ~mask, 1000, 1000);
+ mask, ~mask, 1000, 1000, true);
return err;
}
@@ -1464,6 +3224,7 @@ ufshcd_dev_cmd_completion(struct ufs_hba *hba, struct ufshcd_lrb *lrbp)
int resp;
int err = 0;
+ hba->ufs_stats.last_hibern8_exit_tstamp = ktime_set(0, 0);
resp = ufshcd_get_req_rsp(lrbp->ucd_rsp_ptr);
switch (resp) {
@@ -1516,11 +3277,22 @@ static int ufshcd_wait_for_dev_cmd(struct ufs_hba *hba,
if (!time_left) {
err = -ETIMEDOUT;
+ dev_dbg(hba->dev, "%s: dev_cmd request timedout, tag %d\n",
+ __func__, lrbp->task_tag);
if (!ufshcd_clear_cmd(hba, lrbp->task_tag))
- /* sucessfully cleared the command, retry if needed */
+ /* successfully cleared the command, retry if needed */
err = -EAGAIN;
+ /*
+ * in case of an error, after clearing the doorbell,
+ * we also need to clear the outstanding_request
+ * field in hba
+ */
+ ufshcd_outstanding_req_clear(hba, lrbp->task_tag);
}
+ if (err)
+ ufsdbg_set_err_state(hba);
+
return err;
}
@@ -1581,6 +3353,15 @@ static int ufshcd_exec_dev_cmd(struct ufs_hba *hba,
unsigned long flags;
/*
+ * May get invoked from shutdown and IOCTL contexts.
+ * In shutdown context, it comes in with lock acquired.
+ * In error recovery context, it may come with lock acquired.
+ */
+
+ if (!ufshcd_is_shutdown_ongoing(hba) && !ufshcd_eh_in_progress(hba))
+ down_read(&hba->lock);
+
+ /*
* Get free slot, sleep if slots are unavailable.
* Even though we use wait_event() which sleeps indefinitely,
* the maximum wait time is bounded by SCSI request timeout.
@@ -1596,15 +3377,23 @@ static int ufshcd_exec_dev_cmd(struct ufs_hba *hba,
hba->dev_cmd.complete = &wait;
+ /* Make sure descriptors are ready before ringing the doorbell */
+ wmb();
spin_lock_irqsave(hba->host->host_lock, flags);
- ufshcd_send_command(hba, tag);
+ err = ufshcd_send_command(hba, tag);
spin_unlock_irqrestore(hba->host->host_lock, flags);
-
+ if (err) {
+ dev_err(hba->dev, "%s: failed sending command, %d\n",
+ __func__, err);
+ goto out_put_tag;
+ }
err = ufshcd_wait_for_dev_cmd(hba, lrbp, timeout);
out_put_tag:
ufshcd_put_dev_cmd_tag(hba, tag);
wake_up(&hba->dev_cmd.tag_wq);
+ if (!ufshcd_is_shutdown_ongoing(hba) && !ufshcd_eh_in_progress(hba))
+ up_read(&hba->lock);
return err;
}
@@ -1622,6 +3411,12 @@ static inline void ufshcd_init_query(struct ufs_hba *hba,
struct ufs_query_req **request, struct ufs_query_res **response,
enum query_opcode opcode, u8 idn, u8 index, u8 selector)
{
+ int idn_t = (int)idn;
+
+ ufsdbg_error_inject_dispatcher(hba,
+ ERR_INJECT_QUERY, idn_t, (int *)&idn_t);
+ idn = idn_t;
+
*request = &hba->dev_cmd.query.request;
*response = &hba->dev_cmd.query.response;
memset(*request, 0, sizeof(struct ufs_query_req));
@@ -1630,6 +3425,31 @@ static inline void ufshcd_init_query(struct ufs_hba *hba,
(*request)->upiu_req.idn = idn;
(*request)->upiu_req.index = index;
(*request)->upiu_req.selector = selector;
+
+ ufshcd_update_query_stats(hba, opcode, idn);
+}
+
+static int ufshcd_query_flag_retry(struct ufs_hba *hba,
+ enum query_opcode opcode, enum flag_idn idn, bool *flag_res)
+{
+ int ret;
+ int retries;
+
+ for (retries = 0; retries < QUERY_REQ_RETRIES; retries++) {
+ ret = ufshcd_query_flag(hba, opcode, idn, flag_res);
+ if (ret)
+ dev_dbg(hba->dev,
+ "%s: failed with error %d, retries %d\n",
+ __func__, ret, retries);
+ else
+ break;
+ }
+
+ if (ret)
+ dev_err(hba->dev,
+ "%s: query attribute, opcode %d, idn %d, failed with error %d after %d retires\n",
+ __func__, opcode, idn, ret, retries);
+ return ret;
}
/**
@@ -1641,16 +3461,17 @@ static inline void ufshcd_init_query(struct ufs_hba *hba,
*
* Returns 0 for success, non-zero in case of failure
*/
-static int ufshcd_query_flag(struct ufs_hba *hba, enum query_opcode opcode,
+int ufshcd_query_flag(struct ufs_hba *hba, enum query_opcode opcode,
enum flag_idn idn, bool *flag_res)
{
struct ufs_query_req *request = NULL;
struct ufs_query_res *response = NULL;
int err, index = 0, selector = 0;
+ int timeout = QUERY_REQ_TIMEOUT;
BUG_ON(!hba);
- ufshcd_hold(hba, false);
+ ufshcd_hold_all(hba);
mutex_lock(&hba->dev_cmd.lock);
ufshcd_init_query(hba, &request, &response, opcode, idn, index,
selector);
@@ -1679,12 +3500,12 @@ static int ufshcd_query_flag(struct ufs_hba *hba, enum query_opcode opcode,
goto out_unlock;
}
- err = ufshcd_exec_dev_cmd(hba, DEV_CMD_TYPE_QUERY, QUERY_REQ_TIMEOUT);
+ err = ufshcd_exec_dev_cmd(hba, DEV_CMD_TYPE_QUERY, timeout);
if (err) {
dev_err(hba->dev,
"%s: Sending flag query for idn %d failed, err = %d\n",
- __func__, idn, err);
+ __func__, request->upiu_req.idn, err);
goto out_unlock;
}
@@ -1694,9 +3515,10 @@ static int ufshcd_query_flag(struct ufs_hba *hba, enum query_opcode opcode,
out_unlock:
mutex_unlock(&hba->dev_cmd.lock);
- ufshcd_release(hba);
+ ufshcd_release_all(hba);
return err;
}
+EXPORT_SYMBOL(ufshcd_query_flag);
/**
* ufshcd_query_attr - API function for sending attribute requests
@@ -1709,7 +3531,7 @@ out_unlock:
*
* Returns 0 for success, non-zero in case of failure
*/
-static int ufshcd_query_attr(struct ufs_hba *hba, enum query_opcode opcode,
+int ufshcd_query_attr(struct ufs_hba *hba, enum query_opcode opcode,
enum attr_idn idn, u8 index, u8 selector, u32 *attr_val)
{
struct ufs_query_req *request = NULL;
@@ -1718,7 +3540,7 @@ static int ufshcd_query_attr(struct ufs_hba *hba, enum query_opcode opcode,
BUG_ON(!hba);
- ufshcd_hold(hba, false);
+ ufshcd_hold_all(hba);
if (!attr_val) {
dev_err(hba->dev, "%s: attribute value required for opcode 0x%x\n",
__func__, opcode);
@@ -1748,8 +3570,9 @@ static int ufshcd_query_attr(struct ufs_hba *hba, enum query_opcode opcode,
err = ufshcd_exec_dev_cmd(hba, DEV_CMD_TYPE_QUERY, QUERY_REQ_TIMEOUT);
if (err) {
- dev_err(hba->dev, "%s: opcode 0x%.2x for idn %d failed, err = %d\n",
- __func__, opcode, idn, err);
+ dev_err(hba->dev, "%s: opcode 0x%.2x for idn %d failed, index %d, err = %d\n",
+ __func__, opcode,
+ request->upiu_req.idn, index, err);
goto out_unlock;
}
@@ -1758,25 +3581,49 @@ static int ufshcd_query_attr(struct ufs_hba *hba, enum query_opcode opcode,
out_unlock:
mutex_unlock(&hba->dev_cmd.lock);
out:
- ufshcd_release(hba);
+ ufshcd_release_all(hba);
return err;
}
+EXPORT_SYMBOL(ufshcd_query_attr);
/**
- * ufshcd_query_descriptor - API function for sending descriptor requests
- * hba: per-adapter instance
- * opcode: attribute opcode
- * idn: attribute idn to access
- * index: index field
- * selector: selector field
- * desc_buf: the buffer that contains the descriptor
- * buf_len: length parameter passed to the device
+ * ufshcd_query_attr_retry() - API function for sending query
+ * attribute with retries
+ * @hba: per-adapter instance
+ * @opcode: attribute opcode
+ * @idn: attribute idn to access
+ * @index: index field
+ * @selector: selector field
+ * @attr_val: the attribute value after the query request
+ * completes
*
- * Returns 0 for success, non-zero in case of failure.
- * The buf_len parameter will contain, on return, the length parameter
- * received on the response.
- */
-static int ufshcd_query_descriptor(struct ufs_hba *hba,
+ * Returns 0 for success, non-zero in case of failure
+*/
+static int ufshcd_query_attr_retry(struct ufs_hba *hba,
+ enum query_opcode opcode, enum attr_idn idn, u8 index, u8 selector,
+ u32 *attr_val)
+{
+ int ret = 0;
+ u32 retries;
+
+ for (retries = QUERY_REQ_RETRIES; retries > 0; retries--) {
+ ret = ufshcd_query_attr(hba, opcode, idn, index,
+ selector, attr_val);
+ if (ret)
+ dev_dbg(hba->dev, "%s: failed with error %d, retries %d\n",
+ __func__, ret, retries);
+ else
+ break;
+ }
+
+ if (ret)
+ dev_err(hba->dev,
+ "%s: query attribute, idn %d, failed with error %d after %d retires\n",
+ __func__, idn, ret, retries);
+ return ret;
+}
+
+static int __ufshcd_query_descriptor(struct ufs_hba *hba,
enum query_opcode opcode, enum desc_idn idn, u8 index,
u8 selector, u8 *desc_buf, int *buf_len)
{
@@ -1786,7 +3633,7 @@ static int ufshcd_query_descriptor(struct ufs_hba *hba,
BUG_ON(!hba);
- ufshcd_hold(hba, false);
+ ufshcd_hold_all(hba);
if (!desc_buf) {
dev_err(hba->dev, "%s: descriptor buffer required for opcode 0x%x\n",
__func__, opcode);
@@ -1825,8 +3672,9 @@ static int ufshcd_query_descriptor(struct ufs_hba *hba,
err = ufshcd_exec_dev_cmd(hba, DEV_CMD_TYPE_QUERY, QUERY_REQ_TIMEOUT);
if (err) {
- dev_err(hba->dev, "%s: opcode 0x%.2x for idn %d failed, err = %d\n",
- __func__, opcode, idn, err);
+ dev_err(hba->dev, "%s: opcode 0x%.2x for idn %d failed, index %d, err = %d\n",
+ __func__, opcode,
+ request->upiu_req.idn, index, err);
goto out_unlock;
}
@@ -1836,11 +3684,43 @@ out_unlock:
hba->dev_cmd.query.descriptor = NULL;
mutex_unlock(&hba->dev_cmd.lock);
out:
- ufshcd_release(hba);
+ ufshcd_release_all(hba);
return err;
}
/**
+ * ufshcd_query_descriptor - API function for sending descriptor requests
+ * hba: per-adapter instance
+ * opcode: attribute opcode
+ * idn: attribute idn to access
+ * index: index field
+ * selector: selector field
+ * desc_buf: the buffer that contains the descriptor
+ * buf_len: length parameter passed to the device
+ *
+ * Returns 0 for success, non-zero in case of failure.
+ * The buf_len parameter will contain, on return, the length parameter
+ * received on the response.
+ */
+int ufshcd_query_descriptor(struct ufs_hba *hba,
+ enum query_opcode opcode, enum desc_idn idn, u8 index,
+ u8 selector, u8 *desc_buf, int *buf_len)
+{
+ int err;
+ int retries;
+
+ for (retries = QUERY_REQ_RETRIES; retries > 0; retries--) {
+ err = __ufshcd_query_descriptor(hba, opcode, idn, index,
+ selector, desc_buf, buf_len);
+ if (!err || err == -EINVAL)
+ break;
+ }
+
+ return err;
+}
+EXPORT_SYMBOL(ufshcd_query_descriptor);
+
+/**
* ufshcd_read_desc_param - read the specified descriptor parameter
* @hba: Pointer to adapter instance
* @desc_id: descriptor idn value
@@ -1886,18 +3766,41 @@ static int ufshcd_read_desc_param(struct ufs_hba *hba,
desc_id, desc_index, 0, desc_buf,
&buff_len);
- if (ret || (buff_len < ufs_query_desc_max_size[desc_id]) ||
- (desc_buf[QUERY_DESC_LENGTH_OFFSET] !=
- ufs_query_desc_max_size[desc_id])
- || (desc_buf[QUERY_DESC_DESC_TYPE_OFFSET] != desc_id)) {
- dev_err(hba->dev, "%s: Failed reading descriptor. desc_id %d param_offset %d buff_len %d ret %d",
- __func__, desc_id, param_offset, buff_len, ret);
- if (!ret)
- ret = -EINVAL;
+ if (ret) {
+ dev_err(hba->dev, "%s: Failed reading descriptor. desc_id %d, desc_index %d, param_offset %d, ret %d",
+ __func__, desc_id, desc_index, param_offset, ret);
goto out;
}
+ /* Sanity check */
+ if (desc_buf[QUERY_DESC_DESC_TYPE_OFFSET] != desc_id) {
+ dev_err(hba->dev, "%s: invalid desc_id %d in descriptor header",
+ __func__, desc_buf[QUERY_DESC_DESC_TYPE_OFFSET]);
+ ret = -EINVAL;
+ goto out;
+ }
+
+ /*
+ * While reading variable size descriptors (like string descriptor),
+ * some UFS devices may report the "LENGTH" (field in "Transaction
+ * Specific fields" of Query Response UPIU) same as what was requested
+ * in Query Request UPIU instead of reporting the actual size of the
+ * variable size descriptor.
+ * Although it's safe to ignore the "LENGTH" field for variable size
+ * descriptors as we can always derive the length of the descriptor from
+ * the descriptor header fields. Hence this change impose the length
+ * match check only for fixed size descriptors (for which we always
+ * request the correct size as part of Query Request UPIU).
+ */
+ if ((desc_id != QUERY_DESC_IDN_STRING) &&
+ (buff_len != desc_buf[QUERY_DESC_LENGTH_OFFSET])) {
+ dev_err(hba->dev, "%s: desc_buf length mismatch: buff_len %d, buff_len(desc_header) %d",
+ __func__, buff_len, desc_buf[QUERY_DESC_LENGTH_OFFSET]);
+ ret = -EINVAL;
+ goto out;
+ }
+
if (is_kmalloc)
memcpy(param_read_buf, &desc_buf[param_offset], param_size);
out:
@@ -1922,6 +3825,82 @@ static inline int ufshcd_read_power_desc(struct ufs_hba *hba,
return ufshcd_read_desc(hba, QUERY_DESC_IDN_POWER, 0, buf, size);
}
+int ufshcd_read_device_desc(struct ufs_hba *hba, u8 *buf, u32 size)
+{
+ return ufshcd_read_desc(hba, QUERY_DESC_IDN_DEVICE, 0, buf, size);
+}
+
+/**
+ * ufshcd_read_string_desc - read string descriptor
+ * @hba: pointer to adapter instance
+ * @desc_index: descriptor index
+ * @buf: pointer to buffer where descriptor would be read
+ * @size: size of buf
+ * @ascii: if true convert from unicode to ascii characters
+ *
+ * Return 0 in case of success, non-zero otherwise
+ */
+int ufshcd_read_string_desc(struct ufs_hba *hba, int desc_index, u8 *buf,
+ u32 size, bool ascii)
+{
+ int err = 0;
+
+ err = ufshcd_read_desc(hba,
+ QUERY_DESC_IDN_STRING, desc_index, buf, size);
+
+ if (err) {
+ dev_err(hba->dev, "%s: reading String Desc failed after %d retries. err = %d\n",
+ __func__, QUERY_REQ_RETRIES, err);
+ goto out;
+ }
+
+ if (ascii) {
+ int desc_len;
+ int ascii_len;
+ int i;
+ char *buff_ascii;
+
+ desc_len = buf[0];
+ /* remove header and divide by 2 to move from UTF16 to UTF8 */
+ ascii_len = (desc_len - QUERY_DESC_HDR_SIZE) / 2 + 1;
+ if (size < ascii_len + QUERY_DESC_HDR_SIZE) {
+ dev_err(hba->dev, "%s: buffer allocated size is too small\n",
+ __func__);
+ err = -ENOMEM;
+ goto out;
+ }
+
+ buff_ascii = kzalloc(ascii_len, GFP_KERNEL);
+ if (!buff_ascii) {
+ dev_err(hba->dev, "%s: Failed allocating %d bytes\n",
+ __func__, ascii_len);
+ err = -ENOMEM;
+ goto out_free_buff;
+ }
+
+ /*
+ * the descriptor contains string in UTF16 format
+ * we need to convert to utf-8 so it can be displayed
+ */
+ utf16s_to_utf8s((wchar_t *)&buf[QUERY_DESC_HDR_SIZE],
+ desc_len - QUERY_DESC_HDR_SIZE,
+ UTF16_BIG_ENDIAN, buff_ascii, ascii_len);
+
+ /* replace non-printable or non-ASCII characters with spaces */
+ for (i = 0; i < ascii_len; i++)
+ ufshcd_remove_non_printable(&buff_ascii[i]);
+
+ memset(buf + QUERY_DESC_HDR_SIZE, 0,
+ size - QUERY_DESC_HDR_SIZE);
+ memcpy(buf + QUERY_DESC_HDR_SIZE, buff_ascii, ascii_len);
+ buf[QUERY_DESC_LENGTH_OFFSET] = ascii_len + QUERY_DESC_HDR_SIZE;
+out_free_buff:
+ kfree(buff_ascii);
+ }
+out:
+ return err;
+}
+
/**
* ufshcd_read_unit_desc_param - read the specified unit descriptor parameter
* @hba: Pointer to adapter instance
@@ -1942,7 +3921,7 @@ static inline int ufshcd_read_unit_desc_param(struct ufs_hba *hba,
* Unit descriptors are only available for general purpose LUs (LUN id
* from 0 to 7) and RPMB Well known LU.
*/
- if (lun != UFS_UPIU_RPMB_WLUN && (lun >= UFS_UPIU_MAX_GENERAL_LUN))
+ if (!ufs_is_valid_unit_desc_lun(lun))
return -EOPNOTSUPP;
return ufshcd_read_desc_param(hba, QUERY_DESC_IDN_UNIT, lun,
@@ -2084,12 +4063,19 @@ static void ufshcd_host_memory_configure(struct ufs_hba *hba)
cpu_to_le16(ALIGNED_UPIU_SIZE >> 2);
hba->lrb[i].utr_descriptor_ptr = (utrdlp + i);
+ hba->lrb[i].utrd_dma_addr = hba->utrdl_dma_addr +
+ (i * sizeof(struct utp_transfer_req_desc));
hba->lrb[i].ucd_req_ptr =
(struct utp_upiu_req *)(cmd_descp + i);
+ hba->lrb[i].ucd_req_dma_addr = cmd_desc_element_addr;
hba->lrb[i].ucd_rsp_ptr =
(struct utp_upiu_rsp *)cmd_descp[i].response_upiu;
+ hba->lrb[i].ucd_rsp_dma_addr = cmd_desc_element_addr +
+ response_offset;
hba->lrb[i].ucd_prdt_ptr =
(struct ufshcd_sg_entry *)cmd_descp[i].prd_table;
+ hba->lrb[i].ucd_prdt_dma_addr = cmd_desc_element_addr +
+ prdt_offset;
}
}
@@ -2113,7 +4099,7 @@ static int ufshcd_dme_link_startup(struct ufs_hba *hba)
ret = ufshcd_send_uic_cmd(hba, &uic_cmd);
if (ret)
- dev_err(hba->dev,
+ dev_dbg(hba->dev,
"dme-link-startup: error code %d\n", ret);
return ret;
}
@@ -2149,6 +4135,13 @@ static inline void ufshcd_add_delay_before_dme_cmd(struct ufs_hba *hba)
usleep_range(min_sleep_time_us, min_sleep_time_us + 50);
}
+static inline void ufshcd_save_tstamp_of_last_dme_cmd(
+ struct ufs_hba *hba)
+{
+ if (hba->quirks & UFSHCD_QUIRK_DELAY_BEFORE_DME_CMDS)
+ hba->last_dme_cmd_tstamp = ktime_get();
+}
+
/**
* ufshcd_dme_set_attr - UIC command for DME_SET, DME_PEER_SET
* @hba: per adapter instance
@@ -2169,6 +4162,10 @@ int ufshcd_dme_set_attr(struct ufs_hba *hba, u32 attr_sel,
};
const char *set = action[!!peer];
int ret;
+ int retries = UFS_UIC_COMMAND_RETRIES;
+
+ ufsdbg_error_inject_dispatcher(hba,
+ ERR_INJECT_DME_ATTR, attr_sel, &attr_sel);
uic_cmd.command = peer ?
UIC_CMD_DME_PEER_SET : UIC_CMD_DME_SET;
@@ -2176,10 +4173,18 @@ int ufshcd_dme_set_attr(struct ufs_hba *hba, u32 attr_sel,
uic_cmd.argument2 = UIC_ARG_ATTR_TYPE(attr_set);
uic_cmd.argument3 = mib_val;
- ret = ufshcd_send_uic_cmd(hba, &uic_cmd);
+ do {
+ /* for peer attributes we retry upon failure */
+ ret = ufshcd_send_uic_cmd(hba, &uic_cmd);
+ if (ret)
+ dev_dbg(hba->dev, "%s: attr-id 0x%x val 0x%x error code %d\n",
+ set, UIC_GET_ATTR_ID(attr_sel), mib_val, ret);
+ } while (ret && peer && --retries);
+
if (ret)
- dev_err(hba->dev, "%s: attr-id 0x%x val 0x%x error code %d\n",
- set, UIC_GET_ATTR_ID(attr_sel), mib_val, ret);
+ dev_err(hba->dev, "%s: attr-id 0x%x val 0x%x failed %d retries\n",
+ set, UIC_GET_ATTR_ID(attr_sel), mib_val,
+ UFS_UIC_COMMAND_RETRIES - retries);
return ret;
}
@@ -2204,6 +4209,7 @@ int ufshcd_dme_get_attr(struct ufs_hba *hba, u32 attr_sel,
};
const char *get = action[!!peer];
int ret;
+ int retries = UFS_UIC_COMMAND_RETRIES;
struct ufs_pa_layer_attr orig_pwr_info;
struct ufs_pa_layer_attr temp_pwr_info;
bool pwr_mode_change = false;
@@ -2232,16 +4238,26 @@ int ufshcd_dme_get_attr(struct ufs_hba *hba, u32 attr_sel,
uic_cmd.command = peer ?
UIC_CMD_DME_PEER_GET : UIC_CMD_DME_GET;
+
+ ufsdbg_error_inject_dispatcher(hba,
+ ERR_INJECT_DME_ATTR, attr_sel, &attr_sel);
+
uic_cmd.argument1 = attr_sel;
- ret = ufshcd_send_uic_cmd(hba, &uic_cmd);
- if (ret) {
- dev_err(hba->dev, "%s: attr-id 0x%x error code %d\n",
- get, UIC_GET_ATTR_ID(attr_sel), ret);
- goto out;
- }
+ do {
+ /* for peer attributes we retry upon failure */
+ ret = ufshcd_send_uic_cmd(hba, &uic_cmd);
+ if (ret)
+ dev_dbg(hba->dev, "%s: attr-id 0x%x error code %d\n",
+ get, UIC_GET_ATTR_ID(attr_sel), ret);
+ } while (ret && peer && --retries);
- if (mib_val)
+ if (ret)
+ dev_err(hba->dev, "%s: attr-id 0x%x failed %d retries\n",
+ get, UIC_GET_ATTR_ID(attr_sel),
+ UFS_UIC_COMMAND_RETRIES - retries);
+
+ if (mib_val && !ret)
*mib_val = uic_cmd.argument3;
if (peer && (hba->quirks & UFSHCD_QUIRK_DME_PEER_ACCESS_AUTO_MODE)
@@ -2274,6 +4290,7 @@ static int ufshcd_uic_pwr_ctrl(struct ufs_hba *hba, struct uic_command *cmd)
unsigned long flags;
u8 status;
int ret;
+ bool reenable_intr = false;
mutex_lock(&hba->uic_cmd_mutex);
init_completion(&uic_async_done);
@@ -2281,15 +4298,17 @@ static int ufshcd_uic_pwr_ctrl(struct ufs_hba *hba, struct uic_command *cmd)
spin_lock_irqsave(hba->host->host_lock, flags);
hba->uic_async_done = &uic_async_done;
- ret = __ufshcd_send_uic_cmd(hba, cmd);
- spin_unlock_irqrestore(hba->host->host_lock, flags);
- if (ret) {
- dev_err(hba->dev,
- "pwr ctrl cmd 0x%x with mode 0x%x uic error %d\n",
- cmd->command, cmd->argument3, ret);
- goto out;
+ if (ufshcd_readl(hba, REG_INTERRUPT_ENABLE) & UIC_COMMAND_COMPL) {
+ ufshcd_disable_intr(hba, UIC_COMMAND_COMPL);
+ /*
+ * Make sure UIC command completion interrupt is disabled before
+ * issuing UIC command.
+ */
+ wmb();
+ reenable_intr = true;
}
- ret = ufshcd_wait_for_uic_cmd(hba, cmd);
+ ret = __ufshcd_send_uic_cmd(hba, cmd, false);
+ spin_unlock_irqrestore(hba->host->host_lock, flags);
if (ret) {
dev_err(hba->dev,
"pwr ctrl cmd 0x%x with mode 0x%x uic error %d\n",
@@ -2313,12 +4332,83 @@ static int ufshcd_uic_pwr_ctrl(struct ufs_hba *hba, struct uic_command *cmd)
cmd->command, status);
ret = (status != PWR_OK) ? status : -1;
}
+ ufshcd_dme_cmd_log(hba, "cmp2", hba->active_uic_cmd->command);
+
out:
+ if (ret) {
+ ufsdbg_set_err_state(hba);
+ ufshcd_print_host_state(hba);
+ ufshcd_print_pwr_info(hba);
+ ufshcd_print_host_regs(hba);
+ ufshcd_print_cmd_log(hba);
+ }
+
+ ufshcd_save_tstamp_of_last_dme_cmd(hba);
spin_lock_irqsave(hba->host->host_lock, flags);
+ hba->active_uic_cmd = NULL;
hba->uic_async_done = NULL;
+ if (reenable_intr)
+ ufshcd_enable_intr(hba, UIC_COMMAND_COMPL);
spin_unlock_irqrestore(hba->host->host_lock, flags);
mutex_unlock(&hba->uic_cmd_mutex);
+ return ret;
+}
+
+int ufshcd_wait_for_doorbell_clr(struct ufs_hba *hba, u64 wait_timeout_us)
+{
+ unsigned long flags;
+ int ret = 0;
+ u32 tm_doorbell;
+ u32 tr_doorbell;
+ bool timeout = false, do_last_check = false;
+ ktime_t start;
+
+ ufshcd_hold_all(hba);
+ spin_lock_irqsave(hba->host->host_lock, flags);
+ /*
+ * Wait for all the outstanding tasks/transfer requests.
+ * Verify by checking the doorbell registers are clear.
+ */
+ start = ktime_get();
+ do {
+ if (hba->ufshcd_state != UFSHCD_STATE_OPERATIONAL) {
+ ret = -EBUSY;
+ goto out;
+ }
+
+ tm_doorbell = ufshcd_readl(hba, REG_UTP_TASK_REQ_DOOR_BELL);
+ tr_doorbell = ufshcd_readl(hba, REG_UTP_TRANSFER_REQ_DOOR_BELL);
+ if (!tm_doorbell && !tr_doorbell) {
+ timeout = false;
+ break;
+ } else if (do_last_check) {
+ break;
+ }
+ spin_unlock_irqrestore(hba->host->host_lock, flags);
+ schedule();
+ if (ktime_to_us(ktime_sub(ktime_get(), start)) >
+ wait_timeout_us) {
+ timeout = true;
+ /*
+ * We might have scheduled out for long time so make
+ * sure to check if doorbells are cleared by this time
+ * or not.
+ */
+ do_last_check = true;
+ }
+ spin_lock_irqsave(hba->host->host_lock, flags);
+ } while (tm_doorbell || tr_doorbell);
+
+ if (timeout) {
+ dev_err(hba->dev,
+ "%s: timedout waiting for doorbell to clear (tm=0x%x, tr=0x%x)\n",
+ __func__, tm_doorbell, tr_doorbell);
+ ret = -EBUSY;
+ }
+out:
+ spin_unlock_irqrestore(hba->host->host_lock, flags);
+ ufshcd_release_all(hba);
return ret;
}
@@ -2348,33 +4438,149 @@ static int ufshcd_uic_change_pwr_mode(struct ufs_hba *hba, u8 mode)
uic_cmd.command = UIC_CMD_DME_SET;
uic_cmd.argument1 = UIC_ARG_MIB(PA_PWRMODE);
uic_cmd.argument3 = mode;
- ufshcd_hold(hba, false);
+ hba->ufs_stats.clk_hold.ctx = PWRCTL_CMD_SEND;
+ ufshcd_hold_all(hba);
ret = ufshcd_uic_pwr_ctrl(hba, &uic_cmd);
- ufshcd_release(hba);
-
+ hba->ufs_stats.clk_rel.ctx = PWRCTL_CMD_SEND;
+ ufshcd_release_all(hba);
out:
return ret;
}
-static int ufshcd_uic_hibern8_enter(struct ufs_hba *hba)
+static int ufshcd_link_recovery(struct ufs_hba *hba)
+{
+ int ret = 0;
+ unsigned long flags;
+
+ /*
+ * Check if there is any race with fatal error handling.
+ * If so, wait for it to complete. Even though fatal error
+ * handling does reset and restore in some cases, don't assume
+ * anything out of it. We are just avoiding race here.
+ */
+ do {
+ spin_lock_irqsave(hba->host->host_lock, flags);
+ if (!(work_pending(&hba->eh_work) ||
+ hba->ufshcd_state == UFSHCD_STATE_RESET))
+ break;
+ spin_unlock_irqrestore(hba->host->host_lock, flags);
+ dev_dbg(hba->dev, "%s: reset in progress\n", __func__);
+ flush_work(&hba->eh_work);
+ } while (1);
+
+
+ /*
+ * we don't know if previous reset had really reset the host controller
+ * or not. So let's force reset here to be sure.
+ */
+ hba->ufshcd_state = UFSHCD_STATE_ERROR;
+ hba->force_host_reset = true;
+ schedule_work(&hba->eh_work);
+
+ /* wait for the reset work to finish */
+ do {
+ if (!(work_pending(&hba->eh_work) ||
+ hba->ufshcd_state == UFSHCD_STATE_RESET))
+ break;
+ spin_unlock_irqrestore(hba->host->host_lock, flags);
+ dev_dbg(hba->dev, "%s: reset in progress\n", __func__);
+ flush_work(&hba->eh_work);
+ spin_lock_irqsave(hba->host->host_lock, flags);
+ } while (1);
+
+ if (!((hba->ufshcd_state == UFSHCD_STATE_OPERATIONAL) &&
+ ufshcd_is_link_active(hba)))
+ ret = -ENOLINK;
+ spin_unlock_irqrestore(hba->host->host_lock, flags);
+
+ return ret;
+}
+
+static int __ufshcd_uic_hibern8_enter(struct ufs_hba *hba)
{
+ int ret;
struct uic_command uic_cmd = {0};
+ ktime_t start = ktime_get();
uic_cmd.command = UIC_CMD_DME_HIBER_ENTER;
+ ret = ufshcd_uic_pwr_ctrl(hba, &uic_cmd);
+ trace_ufshcd_profile_hibern8(dev_name(hba->dev), "enter",
+ ktime_to_us(ktime_sub(ktime_get(), start)), ret);
+
+ /*
+ * Do full reinit if enter failed or if LINERESET was detected during
+ * Hibern8 operation. After LINERESET, link moves to default PWM-G1
+ * mode hence full reinit is required to move link to HS speeds.
+ */
+ if (ret || hba->full_init_linereset) {
+ int err;
+
+ hba->full_init_linereset = false;
+ ufshcd_update_error_stats(hba, UFS_ERR_HIBERN8_ENTER);
+ dev_err(hba->dev, "%s: hibern8 enter failed. ret = %d",
+ __func__, ret);
+ /*
+ * If link recovery fails then return error code (-ENOLINK)
+ * returned ufshcd_link_recovery().
+ * If link recovery succeeds then return -EAGAIN to attempt
+ * hibern8 enter retry again.
+ */
+ err = ufshcd_link_recovery(hba);
+ if (err) {
+ dev_err(hba->dev, "%s: link recovery failed", __func__);
+ ret = err;
+ } else {
+ ret = -EAGAIN;
+ }
+ } else {
+ dev_dbg(hba->dev, "%s: Hibern8 Enter at %lld us", __func__,
+ ktime_to_us(ktime_get()));
+ }
+
+ return ret;
+}
+
+int ufshcd_uic_hibern8_enter(struct ufs_hba *hba)
+{
+ int ret = 0, retries;
- return ufshcd_uic_pwr_ctrl(hba, &uic_cmd);
+ for (retries = UIC_HIBERN8_ENTER_RETRIES; retries > 0; retries--) {
+ ret = __ufshcd_uic_hibern8_enter(hba);
+ if (!ret)
+ goto out;
+ else if (ret != -EAGAIN)
+ /* Unable to recover the link, so no point proceeding */
+ BUG();
+ }
+out:
+ return ret;
}
-static int ufshcd_uic_hibern8_exit(struct ufs_hba *hba)
+int ufshcd_uic_hibern8_exit(struct ufs_hba *hba)
{
struct uic_command uic_cmd = {0};
int ret;
+ ktime_t start = ktime_get();
uic_cmd.command = UIC_CMD_DME_HIBER_EXIT;
ret = ufshcd_uic_pwr_ctrl(hba, &uic_cmd);
+ trace_ufshcd_profile_hibern8(dev_name(hba->dev), "exit",
+ ktime_to_us(ktime_sub(ktime_get(), start)), ret);
+
+ /* Do full reinit if exit failed */
if (ret) {
- ufshcd_set_link_off(hba);
- ret = ufshcd_host_reset_and_restore(hba);
+ ufshcd_update_error_stats(hba, UFS_ERR_HIBERN8_EXIT);
+ dev_err(hba->dev, "%s: hibern8 exit failed. ret = %d",
+ __func__, ret);
+ ret = ufshcd_link_recovery(hba);
+ /* Unable to recover the link, so no point proceeding */
+ if (ret)
+ BUG();
+ } else {
+ dev_dbg(hba->dev, "%s: Hibern8 Exit at %lld us", __func__,
+ ktime_to_us(ktime_get()));
+ hba->ufs_stats.last_hibern8_exit_tstamp = ktime_get();
+ hba->ufs_stats.hibern8_exit_cnt++;
}
return ret;
@@ -2407,8 +4613,8 @@ static int ufshcd_get_max_pwr_mode(struct ufs_hba *hba)
if (hba->max_pwr_info.is_valid)
return 0;
- pwr_info->pwr_tx = FASTAUTO_MODE;
- pwr_info->pwr_rx = FASTAUTO_MODE;
+ pwr_info->pwr_tx = FAST_MODE;
+ pwr_info->pwr_rx = FAST_MODE;
pwr_info->hs_rate = PA_HS_MODE_B;
/* Get the connected lane count */
@@ -2439,7 +4645,7 @@ static int ufshcd_get_max_pwr_mode(struct ufs_hba *hba)
__func__, pwr_info->gear_rx);
return -EINVAL;
}
- pwr_info->pwr_rx = SLOWAUTO_MODE;
+ pwr_info->pwr_rx = SLOW_MODE;
}
ufshcd_dme_peer_get(hba, UIC_ARG_MIB(PA_MAXRXHSGEAR),
@@ -2452,21 +4658,22 @@ static int ufshcd_get_max_pwr_mode(struct ufs_hba *hba)
__func__, pwr_info->gear_tx);
return -EINVAL;
}
- pwr_info->pwr_tx = SLOWAUTO_MODE;
+ pwr_info->pwr_tx = SLOW_MODE;
}
hba->max_pwr_info.is_valid = true;
return 0;
}
-static int ufshcd_change_power_mode(struct ufs_hba *hba,
+int ufshcd_change_power_mode(struct ufs_hba *hba,
struct ufs_pa_layer_attr *pwr_mode)
{
- int ret;
+ int ret = 0;
/* if already configured to the requested pwr_mode */
- if (pwr_mode->gear_rx == hba->pwr_info.gear_rx &&
- pwr_mode->gear_tx == hba->pwr_info.gear_tx &&
+ if (!hba->restore_needed &&
+ pwr_mode->gear_rx == hba->pwr_info.gear_rx &&
+ pwr_mode->gear_tx == hba->pwr_info.gear_tx &&
pwr_mode->lane_rx == hba->pwr_info.lane_rx &&
pwr_mode->lane_tx == hba->pwr_info.lane_tx &&
pwr_mode->pwr_rx == hba->pwr_info.pwr_rx &&
@@ -2476,6 +4683,10 @@ static int ufshcd_change_power_mode(struct ufs_hba *hba,
return 0;
}
+ ufsdbg_error_inject_dispatcher(hba, ERR_INJECT_PWR_CHANGE, 0, &ret);
+ if (ret)
+ return ret;
+
/*
* Configure attributes for power mode change with below.
* - PA_RXGEAR, PA_ACTIVERXDATALANES, PA_RXTERMINATION,
@@ -2507,10 +4718,25 @@ static int ufshcd_change_power_mode(struct ufs_hba *hba,
ufshcd_dme_set(hba, UIC_ARG_MIB(PA_HSSERIES),
pwr_mode->hs_rate);
+ ufshcd_dme_set(hba, UIC_ARG_MIB(PA_PWRMODEUSERDATA0),
+ DL_FC0ProtectionTimeOutVal_Default);
+ ufshcd_dme_set(hba, UIC_ARG_MIB(PA_PWRMODEUSERDATA1),
+ DL_TC0ReplayTimeOutVal_Default);
+ ufshcd_dme_set(hba, UIC_ARG_MIB(PA_PWRMODEUSERDATA2),
+ DL_AFC0ReqTimeOutVal_Default);
+
+ ufshcd_dme_set(hba, UIC_ARG_MIB(DME_LocalFC0ProtectionTimeOutVal),
+ DL_FC0ProtectionTimeOutVal_Default);
+ ufshcd_dme_set(hba, UIC_ARG_MIB(DME_LocalTC0ReplayTimeOutVal),
+ DL_TC0ReplayTimeOutVal_Default);
+ ufshcd_dme_set(hba, UIC_ARG_MIB(DME_LocalAFC0ReqTimeOutVal),
+ DL_AFC0ReqTimeOutVal_Default);
+
ret = ufshcd_uic_change_pwr_mode(hba, pwr_mode->pwr_rx << 4
| pwr_mode->pwr_tx);
if (ret) {
+ ufshcd_update_error_stats(hba, UFS_ERR_POWER_MODE_CHANGE);
dev_err(hba->dev,
"%s: power mode change failed %d\n", __func__, ret);
} else {
@@ -2519,6 +4745,7 @@ static int ufshcd_change_power_mode(struct ufs_hba *hba,
memcpy(&hba->pwr_info, pwr_mode,
sizeof(struct ufs_pa_layer_attr));
+ hba->ufs_stats.power_mode_change_cnt++;
}
return ret;
@@ -2542,6 +4769,8 @@ static int ufshcd_config_pwr_mode(struct ufs_hba *hba,
memcpy(&final_params, desired_pwr_mode, sizeof(final_params));
ret = ufshcd_change_power_mode(hba, &final_params);
+ if (!ret)
+ ufshcd_print_pwr_info(hba);
return ret;
}
@@ -2554,17 +4783,12 @@ static int ufshcd_config_pwr_mode(struct ufs_hba *hba,
*/
static int ufshcd_complete_dev_init(struct ufs_hba *hba)
{
- int i, retries, err = 0;
+ int i;
+ int err;
bool flag_res = 1;
- for (retries = QUERY_REQ_RETRIES; retries > 0; retries--) {
- /* Set the fDeviceInit flag */
- err = ufshcd_query_flag(hba, UPIU_QUERY_OPCODE_SET_FLAG,
- QUERY_FLAG_IDN_FDEVICEINIT, NULL);
- if (!err || err == -ETIMEDOUT)
- break;
- dev_dbg(hba->dev, "%s: error %d retrying\n", __func__, err);
- }
+ err = ufshcd_query_flag_retry(hba, UPIU_QUERY_OPCODE_SET_FLAG,
+ QUERY_FLAG_IDN_FDEVICEINIT, NULL);
if (err) {
dev_err(hba->dev,
"%s setting fDeviceInit flag failed with error %d\n",
@@ -2572,18 +4796,11 @@ static int ufshcd_complete_dev_init(struct ufs_hba *hba)
goto out;
}
- /* poll for max. 100 iterations for fDeviceInit flag to clear */
- for (i = 0; i < 100 && !err && flag_res; i++) {
- for (retries = QUERY_REQ_RETRIES; retries > 0; retries--) {
- err = ufshcd_query_flag(hba,
- UPIU_QUERY_OPCODE_READ_FLAG,
- QUERY_FLAG_IDN_FDEVICEINIT, &flag_res);
- if (!err || err == -ETIMEDOUT)
- break;
- dev_dbg(hba->dev, "%s: error %d retrying\n", __func__,
- err);
- }
- }
+ /* poll for max. 1000 iterations for fDeviceInit flag to clear */
+ for (i = 0; i < 1000 && !err && flag_res; i++)
+ err = ufshcd_query_flag_retry(hba, UPIU_QUERY_OPCODE_READ_FLAG,
+ QUERY_FLAG_IDN_FDEVICEINIT, &flag_res);
+
if (err)
dev_err(hba->dev,
"%s reading fDeviceInit flag failed with error %d\n",
@@ -2604,7 +4821,7 @@ out:
* To bring UFS host controller to operational state,
* 1. Enable required interrupts
* 2. Configure interrupt aggregation
- * 3. Program UTRL and UTMRL base addres
+ * 3. Program UTRL and UTMRL base address
* 4. Configure run-stop-registers
*
* Returns 0 on success, non-zero value on failure
@@ -2634,8 +4851,13 @@ static int ufshcd_make_hba_operational(struct ufs_hba *hba)
REG_UTP_TASK_REQ_LIST_BASE_H);
/*
+ * Make sure base address and interrupt setup are updated before
+ * enabling the run/stop registers below.
+ */
+ wmb();
+
+ /*
* UCRDY, UTMRLDY and UTRLRDY bits must be 1
- * DEI, HEI bits must be 0
*/
reg = ufshcd_readl(hba, REG_CONTROLLER_STATUS);
if (!(ufshcd_get_lists_status(reg))) {
@@ -2652,6 +4874,23 @@ out:
}
/**
+ * ufshcd_hba_stop - Send controller to reset state
+ * @hba: per adapter instance
+ * @can_sleep: perform sleep or just spin
+ */
+static inline void ufshcd_hba_stop(struct ufs_hba *hba, bool can_sleep)
+{
+ int err;
+
+ ufshcd_writel(hba, CONTROLLER_DISABLE, REG_CONTROLLER_ENABLE);
+ err = ufshcd_wait_for_register(hba, REG_CONTROLLER_ENABLE,
+ CONTROLLER_ENABLE, CONTROLLER_DISABLE,
+ 10, 1, can_sleep);
+ if (err)
+ dev_err(hba->dev, "%s: Controller disable failed\n", __func__);
+}
+
+/**
* ufshcd_hba_enable - initialize the controller
* @hba: per adapter instance
*
@@ -2671,18 +4910,9 @@ static int ufshcd_hba_enable(struct ufs_hba *hba)
* development and testing of this driver. msleep can be changed to
* mdelay and retry count can be reduced based on the controller.
*/
- if (!ufshcd_is_hba_active(hba)) {
-
+ if (!ufshcd_is_hba_active(hba))
/* change controller state to "reset state" */
- ufshcd_hba_stop(hba);
-
- /*
- * This delay is based on the testing done with UFS host
- * controller FPGA. The delay can be changed based on the
- * host controller used.
- */
- msleep(5);
- }
+ ufshcd_hba_stop(hba, true);
/* UniPro link is disabled at this point */
ufshcd_set_link_off(hba);
@@ -2756,6 +4986,11 @@ static int ufshcd_disable_tx_lcc(struct ufs_hba *hba, bool peer)
return err;
}
+static inline int ufshcd_disable_host_tx_lcc(struct ufs_hba *hba)
+{
+ return ufshcd_disable_tx_lcc(hba, false);
+}
+
static inline int ufshcd_disable_device_tx_lcc(struct ufs_hba *hba)
{
return ufshcd_disable_tx_lcc(hba, true);
@@ -2771,14 +5006,26 @@ static int ufshcd_link_startup(struct ufs_hba *hba)
{
int ret;
int retries = DME_LINKSTARTUP_RETRIES;
+ bool link_startup_again = false;
+
+ /*
+ * If UFS device isn't active then we will have to issue link startup
+ * 2 times to make sure the device state move to active.
+ */
+ if (!ufshcd_is_ufs_dev_active(hba))
+ link_startup_again = true;
+link_startup:
do {
ufshcd_vops_link_startup_notify(hba, PRE_CHANGE);
ret = ufshcd_dme_link_startup(hba);
+ if (ret)
+ ufshcd_update_error_stats(hba, UFS_ERR_LINKSTARTUP);
/* check if device is detected by inter-connect layer */
if (!ret && !ufshcd_is_device_present(hba)) {
+ ufshcd_update_error_stats(hba, UFS_ERR_LINKSTARTUP);
dev_err(hba->dev, "%s: Device not present\n", __func__);
ret = -ENXIO;
goto out;
@@ -2797,12 +5044,28 @@ static int ufshcd_link_startup(struct ufs_hba *hba)
/* failed to get the link up... retire */
goto out;
+ if (link_startup_again) {
+ link_startup_again = false;
+ retries = DME_LINKSTARTUP_RETRIES;
+ goto link_startup;
+ }
+
+ /* Mark that link is up in PWM-G1, 1-lane, SLOW-AUTO mode */
+ ufshcd_init_pwr_info(hba);
+ ufshcd_print_pwr_info(hba);
+
if (hba->quirks & UFSHCD_QUIRK_BROKEN_LCC) {
ret = ufshcd_disable_device_tx_lcc(hba);
if (ret)
goto out;
}
+ if (hba->dev_quirks & UFS_DEVICE_QUIRK_BROKEN_LCC) {
+ ret = ufshcd_disable_host_tx_lcc(hba);
+ if (ret)
+ goto out;
+ }
+
/* Include any host controller configuration via UIC commands */
ret = ufshcd_vops_link_startup_notify(hba, POST_CHANGE);
if (ret)
@@ -2810,8 +5073,12 @@ static int ufshcd_link_startup(struct ufs_hba *hba)
ret = ufshcd_make_hba_operational(hba);
out:
- if (ret)
+ if (ret) {
dev_err(hba->dev, "link startup failed %d\n", ret);
+ ufshcd_print_host_state(hba);
+ ufshcd_print_pwr_info(hba);
+ ufshcd_print_host_regs(hba);
+ }
return ret;
}
@@ -2830,7 +5097,7 @@ static int ufshcd_verify_dev_init(struct ufs_hba *hba)
int err = 0;
int retries;
- ufshcd_hold(hba, false);
+ ufshcd_hold_all(hba);
mutex_lock(&hba->dev_cmd.lock);
for (retries = NOP_OUT_RETRIES; retries > 0; retries--) {
err = ufshcd_exec_dev_cmd(hba, DEV_CMD_TYPE_NOP,
@@ -2842,7 +5109,7 @@ static int ufshcd_verify_dev_init(struct ufs_hba *hba)
dev_dbg(hba->dev, "%s: error %d retrying\n", __func__, err);
}
mutex_unlock(&hba->dev_cmd.lock);
- ufshcd_release(hba);
+ ufshcd_release_all(hba);
if (err)
dev_err(hba->dev, "%s: NOP OUT failed %d\n", __func__, err);
@@ -2868,10 +5135,10 @@ static void ufshcd_set_queue_depth(struct scsi_device *sdev)
lun_qdepth = hba->nutrs;
ret = ufshcd_read_unit_desc_param(hba,
- ufshcd_scsi_to_upiu_lun(sdev->lun),
- UNIT_DESC_PARAM_LU_Q_DEPTH,
- &lun_qdepth,
- sizeof(lun_qdepth));
+ ufshcd_scsi_to_upiu_lun(sdev->lun),
+ UNIT_DESC_PARAM_LU_Q_DEPTH,
+ &lun_qdepth,
+ sizeof(lun_qdepth));
/* Some WLUN doesn't support unit descriptor */
if (ret == -EOPNOTSUPP)
@@ -3001,6 +5268,9 @@ static int ufshcd_slave_configure(struct scsi_device *sdev)
blk_queue_update_dma_pad(q, PRDT_DATA_BYTE_COUNT_PAD - 1);
blk_queue_max_segment_size(q, PRDT_DATA_BYTE_COUNT_MAX);
+ sdev->autosuspend_delay = UFSHCD_AUTO_SUSPEND_DELAY_MS;
+ sdev->use_rpm_auto = 1;
+
return 0;
}
@@ -3110,6 +5380,7 @@ ufshcd_transfer_rsp_status(struct ufs_hba *hba, struct ufshcd_lrb *lrbp)
int result = 0;
int scsi_status;
int ocs;
+ bool print_prdt;
/* overall command status of utrd */
ocs = ufshcd_get_tr_ocs(lrbp);
@@ -3117,7 +5388,7 @@ ufshcd_transfer_rsp_status(struct ufs_hba *hba, struct ufshcd_lrb *lrbp)
switch (ocs) {
case OCS_SUCCESS:
result = ufshcd_get_req_rsp(lrbp->ucd_rsp_ptr);
-
+ hba->ufs_stats.last_hibern8_exit_tstamp = ktime_set(0, 0);
switch (result) {
case UPIU_TRANSACTION_RESPONSE:
/*
@@ -3133,8 +5404,28 @@ ufshcd_transfer_rsp_status(struct ufs_hba *hba, struct ufshcd_lrb *lrbp)
scsi_status = result & MASK_SCSI_STATUS;
result = ufshcd_scsi_cmd_status(lrbp, scsi_status);
- if (ufshcd_is_exception_event(lrbp->ucd_rsp_ptr))
- schedule_work(&hba->eeh_work);
+ /*
+ * Currently we are only supporting BKOPs exception
+ * events hence we can ignore BKOPs exception event
+ * during power management callbacks. BKOPs exception
+ * event is not expected to be raised in runtime suspend
+ * callback as it allows the urgent bkops.
+ * During system suspend, we are anyway forcefully
+ * disabling the bkops and if urgent bkops is needed
+ * it will be enabled on system resume. Long term
+ * solution could be to abort the system suspend if
+ * UFS device needs urgent BKOPs.
+ */
+ if (!hba->pm_op_in_progress &&
+ ufshcd_is_exception_event(lrbp->ucd_rsp_ptr)) {
+ /*
+ * Prevent suspend once eeh_work is scheduled
+ * to avoid deadlock between ufshcd_suspend
+ * and exception event handler.
+ */
+ if (schedule_work(&hba->eeh_work))
+ pm_runtime_get_noresume(hba->dev);
+ }
break;
case UPIU_TRANSACTION_REJECT_UPIU:
/* TODO: handle Reject UPIU Response */
@@ -3162,13 +5453,34 @@ ufshcd_transfer_rsp_status(struct ufs_hba *hba, struct ufshcd_lrb *lrbp)
case OCS_MISMATCH_RESP_UPIU_SIZE:
case OCS_PEER_COMM_FAILURE:
case OCS_FATAL_ERROR:
+ case OCS_DEVICE_FATAL_ERROR:
+ case OCS_INVALID_CRYPTO_CONFIG:
+ case OCS_GENERAL_CRYPTO_ERROR:
default:
result |= DID_ERROR << 16;
dev_err(hba->dev,
- "OCS error from controller = %x\n", ocs);
+ "OCS error from controller = %x for tag %d\n",
+ ocs, lrbp->task_tag);
+ /*
+ * This is called in interrupt context, hence avoid sleep
+ * while printing debug registers. Also print only the minimum
+ * debug registers needed to debug OCS failure.
+ */
+ __ufshcd_print_host_regs(hba, true);
+ ufshcd_print_host_state(hba);
break;
} /* end of switch */
+ if ((host_byte(result) != DID_OK) && !hba->silence_err_logs) {
+ print_prdt = (ocs == OCS_INVALID_PRDT_ATTR ||
+ ocs == OCS_MISMATCH_DATA_BUF_SIZE);
+ ufshcd_print_trs(hba, 1 << lrbp->task_tag, print_prdt);
+ }
+
+ if ((host_byte(result) == DID_ERROR) ||
+ (host_byte(result) == DID_ABORT))
+ ufsdbg_set_err_state(hba);
+
return result;
}
@@ -3176,58 +5488,151 @@ ufshcd_transfer_rsp_status(struct ufs_hba *hba, struct ufshcd_lrb *lrbp)
* ufshcd_uic_cmd_compl - handle completion of uic command
* @hba: per adapter instance
* @intr_status: interrupt status generated by the controller
+ *
+ * Returns
+ * IRQ_HANDLED - If interrupt is valid
+ * IRQ_NONE - If invalid interrupt
*/
-static void ufshcd_uic_cmd_compl(struct ufs_hba *hba, u32 intr_status)
+static irqreturn_t ufshcd_uic_cmd_compl(struct ufs_hba *hba, u32 intr_status)
{
+ irqreturn_t retval = IRQ_NONE;
+
if ((intr_status & UIC_COMMAND_COMPL) && hba->active_uic_cmd) {
hba->active_uic_cmd->argument2 |=
ufshcd_get_uic_cmd_result(hba);
hba->active_uic_cmd->argument3 =
ufshcd_get_dme_attr_val(hba);
complete(&hba->active_uic_cmd->done);
+ retval = IRQ_HANDLED;
}
- if ((intr_status & UFSHCD_UIC_PWR_MASK) && hba->uic_async_done)
- complete(hba->uic_async_done);
+ if (intr_status & UFSHCD_UIC_PWR_MASK) {
+ if (hba->uic_async_done) {
+ complete(hba->uic_async_done);
+ retval = IRQ_HANDLED;
+ } else if (ufshcd_is_auto_hibern8_supported(hba)) {
+ /*
+ * If uic_async_done flag is not set then this
+ * is an Auto hibern8 err interrupt.
+ * Perform a host reset followed by a full
+ * link recovery.
+ */
+ hba->ufshcd_state = UFSHCD_STATE_ERROR;
+ hba->force_host_reset = true;
+ dev_err(hba->dev, "%s: Auto Hibern8 %s failed - status: 0x%08x, upmcrs: 0x%08x\n",
+ __func__, (intr_status & UIC_HIBERNATE_ENTER) ?
+ "Enter" : "Exit",
+ intr_status, ufshcd_get_upmcrs(hba));
+ __ufshcd_print_host_regs(hba, true);
+ ufshcd_print_host_state(hba);
+ schedule_work(&hba->eh_work);
+ retval = IRQ_HANDLED;
+ }
+ }
+ return retval;
}
/**
- * ufshcd_transfer_req_compl - handle SCSI and query command completion
+ * ufshcd_abort_outstanding_requests - abort all outstanding transfer requests.
* @hba: per adapter instance
+ * @result: error result to inform scsi layer about
*/
-static void ufshcd_transfer_req_compl(struct ufs_hba *hba)
+void ufshcd_abort_outstanding_transfer_requests(struct ufs_hba *hba, int result)
+{
+ u8 index;
+ struct ufshcd_lrb *lrbp;
+ struct scsi_cmnd *cmd;
+
+ if (!hba->outstanding_reqs)
+ return;
+
+ for_each_set_bit(index, &hba->outstanding_reqs, hba->nutrs) {
+ lrbp = &hba->lrb[index];
+ cmd = lrbp->cmd;
+ if (cmd) {
+ ufshcd_cond_add_cmd_trace(hba, index, "failed");
+ ufshcd_update_error_stats(hba,
+ UFS_ERR_INT_FATAL_ERRORS);
+ scsi_dma_unmap(cmd);
+ cmd->result = result;
+ /* Clear pending transfer requests */
+ ufshcd_clear_cmd(hba, index);
+ ufshcd_outstanding_req_clear(hba, index);
+ clear_bit_unlock(index, &hba->lrb_in_use);
+ lrbp->complete_time_stamp = ktime_get();
+ update_req_stats(hba, lrbp);
+ /* Mark completed command as NULL in LRB */
+ lrbp->cmd = NULL;
+ ufshcd_release_all(hba);
+ if (cmd->request) {
+ /*
+ * As we are accessing the "request" structure,
+ * this must be called before calling
+ * ->scsi_done() callback.
+ */
+ ufshcd_vops_pm_qos_req_end(hba, cmd->request,
+ true);
+ ufshcd_vops_crypto_engine_cfg_end(hba,
+ lrbp, cmd->request);
+ }
+ /* Do not touch lrbp after scsi done */
+ cmd->scsi_done(cmd);
+ } else if (lrbp->command_type == UTP_CMD_TYPE_DEV_MANAGE) {
+ if (hba->dev_cmd.complete) {
+ ufshcd_cond_add_cmd_trace(hba, index,
+ "dev_failed");
+ ufshcd_outstanding_req_clear(hba, index);
+ complete(hba->dev_cmd.complete);
+ }
+ }
+ if (ufshcd_is_clkscaling_supported(hba))
+ hba->clk_scaling.active_reqs--;
+ }
+}
+
+/**
+ * __ufshcd_transfer_req_compl - handle SCSI and query command completion
+ * @hba: per adapter instance
+ * @completed_reqs: requests to complete
+ */
+static void __ufshcd_transfer_req_compl(struct ufs_hba *hba,
+ unsigned long completed_reqs)
{
struct ufshcd_lrb *lrbp;
struct scsi_cmnd *cmd;
- unsigned long completed_reqs;
- u32 tr_doorbell;
int result;
int index;
struct request *req;
- /* Resetting interrupt aggregation counters first and reading the
- * DOOR_BELL afterward allows us to handle all the completed requests.
- * In order to prevent other interrupts starvation the DB is read once
- * after reset. The down side of this solution is the possibility of
- * false interrupt if device completes another request after resetting
- * aggregation and before reading the DB.
- */
- if (ufshcd_is_intr_aggr_allowed(hba))
- ufshcd_reset_intr_aggr(hba);
-
- tr_doorbell = ufshcd_readl(hba, REG_UTP_TRANSFER_REQ_DOOR_BELL);
- completed_reqs = tr_doorbell ^ hba->outstanding_reqs;
-
for_each_set_bit(index, &completed_reqs, hba->nutrs) {
lrbp = &hba->lrb[index];
cmd = lrbp->cmd;
if (cmd) {
+ ufshcd_cond_add_cmd_trace(hba, index, "complete");
+ ufshcd_update_tag_stats_completion(hba, cmd);
result = ufshcd_transfer_rsp_status(hba, lrbp);
scsi_dma_unmap(cmd);
cmd->result = result;
+ clear_bit_unlock(index, &hba->lrb_in_use);
+ lrbp->complete_time_stamp = ktime_get();
+ update_req_stats(hba, lrbp);
/* Mark completed command as NULL in LRB */
lrbp->cmd = NULL;
- clear_bit_unlock(index, &hba->lrb_in_use);
+ hba->ufs_stats.clk_rel.ctx = XFR_REQ_COMPL;
+ __ufshcd_release(hba, false);
+ __ufshcd_hibern8_release(hba, false);
+ if (cmd->request) {
+ /*
+ * As we are accessing the "request" structure,
+ * this must be called before calling
+ * ->scsi_done() callback.
+ */
+ ufshcd_vops_pm_qos_req_end(hba, cmd->request,
+ false);
+ ufshcd_vops_crypto_engine_cfg_end(hba,
+ lrbp, cmd->request);
+ }
+
req = cmd->request;
if (req) {
/* Update IO svc time latency histogram */
@@ -3246,11 +5651,15 @@ static void ufshcd_transfer_req_compl(struct ufs_hba *hba)
}
/* Do not touch lrbp after scsi done */
cmd->scsi_done(cmd);
- __ufshcd_release(hba);
} else if (lrbp->command_type == UTP_CMD_TYPE_DEV_MANAGE) {
- if (hba->dev_cmd.complete)
+ if (hba->dev_cmd.complete) {
+ ufshcd_cond_add_cmd_trace(hba, index,
+ "dcmp");
complete(hba->dev_cmd.complete);
+ }
}
+ if (ufshcd_is_clkscaling_supported(hba))
+ hba->clk_scaling.active_reqs--;
}
/* clear corresponding bits of completed commands */
@@ -3263,6 +5672,40 @@ static void ufshcd_transfer_req_compl(struct ufs_hba *hba)
}
/**
+ * ufshcd_transfer_req_compl - handle SCSI and query command completion
+ * @hba: per adapter instance
+ *
+ * Returns
+ * IRQ_HANDLED - If interrupt is valid
+ * IRQ_NONE - If invalid interrupt
+ */
+static irqreturn_t ufshcd_transfer_req_compl(struct ufs_hba *hba)
+{
+ unsigned long completed_reqs;
+ u32 tr_doorbell;
+
+ /* Resetting interrupt aggregation counters first and reading the
+ * DOOR_BELL afterward allows us to handle all the completed requests.
+ * In order to prevent other interrupts starvation the DB is read once
+ * after reset. The down side of this solution is the possibility of
+ * false interrupt if device completes another request after resetting
+ * aggregation and before reading the DB.
+ */
+ if (ufshcd_is_intr_aggr_allowed(hba))
+ ufshcd_reset_intr_aggr(hba);
+
+ tr_doorbell = ufshcd_readl(hba, REG_UTP_TRANSFER_REQ_DOOR_BELL);
+ completed_reqs = tr_doorbell ^ hba->outstanding_reqs;
+
+ if (completed_reqs) {
+ __ufshcd_transfer_req_compl(hba, completed_reqs);
+ return IRQ_HANDLED;
+ } else {
+ return IRQ_NONE;
+ }
+}
+
+/**
* ufshcd_disable_ee - disable exception event
* @hba: per-adapter instance
* @mask: exception event to disable
@@ -3282,7 +5725,7 @@ static int ufshcd_disable_ee(struct ufs_hba *hba, u16 mask)
val = hba->ee_ctrl_mask & ~mask;
val &= 0xFFFF; /* 2 bytes */
- err = ufshcd_query_attr(hba, UPIU_QUERY_OPCODE_WRITE_ATTR,
+ err = ufshcd_query_attr_retry(hba, UPIU_QUERY_OPCODE_WRITE_ATTR,
QUERY_ATTR_IDN_EE_CONTROL, 0, 0, &val);
if (!err)
hba->ee_ctrl_mask &= ~mask;
@@ -3310,7 +5753,7 @@ static int ufshcd_enable_ee(struct ufs_hba *hba, u16 mask)
val = hba->ee_ctrl_mask | mask;
val &= 0xFFFF; /* 2 bytes */
- err = ufshcd_query_attr(hba, UPIU_QUERY_OPCODE_WRITE_ATTR,
+ err = ufshcd_query_attr_retry(hba, UPIU_QUERY_OPCODE_WRITE_ATTR,
QUERY_ATTR_IDN_EE_CONTROL, 0, 0, &val);
if (!err)
hba->ee_ctrl_mask |= mask;
@@ -3336,7 +5779,7 @@ static int ufshcd_enable_auto_bkops(struct ufs_hba *hba)
if (hba->auto_bkops_enabled)
goto out;
- err = ufshcd_query_flag(hba, UPIU_QUERY_OPCODE_SET_FLAG,
+ err = ufshcd_query_flag_retry(hba, UPIU_QUERY_OPCODE_SET_FLAG,
QUERY_FLAG_IDN_BKOPS_EN, NULL);
if (err) {
dev_err(hba->dev, "%s: failed to enable bkops %d\n",
@@ -3345,6 +5788,7 @@ static int ufshcd_enable_auto_bkops(struct ufs_hba *hba)
}
hba->auto_bkops_enabled = true;
+ trace_ufshcd_auto_bkops_state(dev_name(hba->dev), 1);
/* No need of URGENT_BKOPS exception from the device */
err = ufshcd_disable_ee(hba, MASK_EE_URGENT_BKOPS);
@@ -3385,7 +5829,7 @@ static int ufshcd_disable_auto_bkops(struct ufs_hba *hba)
goto out;
}
- err = ufshcd_query_flag(hba, UPIU_QUERY_OPCODE_CLEAR_FLAG,
+ err = ufshcd_query_flag_retry(hba, UPIU_QUERY_OPCODE_CLEAR_FLAG,
QUERY_FLAG_IDN_BKOPS_EN, NULL);
if (err) {
dev_err(hba->dev, "%s: failed to disable bkops %d\n",
@@ -3395,6 +5839,7 @@ static int ufshcd_disable_auto_bkops(struct ufs_hba *hba)
}
hba->auto_bkops_enabled = false;
+ trace_ufshcd_auto_bkops_state(dev_name(hba->dev), 0);
out:
return err;
}
@@ -3423,7 +5868,7 @@ static void ufshcd_force_reset_auto_bkops(struct ufs_hba *hba)
static inline int ufshcd_get_bkops_status(struct ufs_hba *hba, u32 *status)
{
- return ufshcd_query_attr(hba, UPIU_QUERY_OPCODE_READ_ATTR,
+ return ufshcd_query_attr_retry(hba, UPIU_QUERY_OPCODE_READ_ATTR,
QUERY_ATTR_IDN_BKOPS_STATUS, 0, 0, status);
}
@@ -3481,15 +5926,52 @@ out:
*/
static int ufshcd_urgent_bkops(struct ufs_hba *hba)
{
- return ufshcd_bkops_ctrl(hba, BKOPS_STATUS_PERF_IMPACT);
+ return ufshcd_bkops_ctrl(hba, hba->urgent_bkops_lvl);
}
static inline int ufshcd_get_ee_status(struct ufs_hba *hba, u32 *status)
{
- return ufshcd_query_attr(hba, UPIU_QUERY_OPCODE_READ_ATTR,
+ return ufshcd_query_attr_retry(hba, UPIU_QUERY_OPCODE_READ_ATTR,
QUERY_ATTR_IDN_EE_STATUS, 0, 0, status);
}
+static void ufshcd_bkops_exception_event_handler(struct ufs_hba *hba)
+{
+ int err;
+ u32 curr_status = 0;
+
+ if (hba->is_urgent_bkops_lvl_checked)
+ goto enable_auto_bkops;
+
+ err = ufshcd_get_bkops_status(hba, &curr_status);
+ if (err) {
+ dev_err(hba->dev, "%s: failed to get BKOPS status %d\n",
+ __func__, err);
+ goto out;
+ }
+
+ /*
+ * We are seeing that some devices are raising the urgent bkops
+ * exception events even when BKOPS status doesn't indicate performace
+ * impacted or critical. Handle these device by determining their urgent
+ * bkops status at runtime.
+ */
+ if (curr_status < BKOPS_STATUS_PERF_IMPACT) {
+ dev_err(hba->dev, "%s: device raised urgent BKOPS exception for bkops status %d\n",
+ __func__, curr_status);
+ /* update the current status as the urgent bkops level */
+ hba->urgent_bkops_lvl = curr_status;
+ hba->is_urgent_bkops_lvl_checked = true;
+ }
+
+enable_auto_bkops:
+ err = ufshcd_enable_auto_bkops(hba);
+out:
+ if (err < 0)
+ dev_err(hba->dev, "%s: failed to handle urgent bkops %d\n",
+ __func__, err);
+}
+
/**
* ufshcd_exception_event_handler - handle exceptions raised by device
* @work: pointer to work data
@@ -3505,7 +5987,7 @@ static void ufshcd_exception_event_handler(struct work_struct *work)
hba = container_of(work, struct ufs_hba, eeh_work);
pm_runtime_get_sync(hba->dev);
- scsi_block_requests(hba->host);
+ ufshcd_scsi_block_requests(hba);
err = ufshcd_get_ee_status(hba, &status);
if (err) {
dev_err(hba->dev, "%s: failed to get exception status %d\n",
@@ -3514,18 +5996,124 @@ static void ufshcd_exception_event_handler(struct work_struct *work)
}
status &= hba->ee_ctrl_mask;
- if (status & MASK_EE_URGENT_BKOPS) {
- err = ufshcd_urgent_bkops(hba);
- if (err < 0)
- dev_err(hba->dev, "%s: failed to handle urgent bkops %d\n",
- __func__, err);
- }
+
+ if (status & MASK_EE_URGENT_BKOPS)
+ ufshcd_bkops_exception_event_handler(hba);
+
out:
- scsi_unblock_requests(hba->host);
- pm_runtime_put_sync(hba->dev);
+ ufshcd_scsi_unblock_requests(hba);
+ /*
+ * pm_runtime_get_noresume is called while scheduling
+ * eeh_work to avoid suspend racing with exception work.
+ * Hence decrement usage counter using pm_runtime_put_noidle
+ * to allow suspend on completion of exception event handler.
+ */
+ pm_runtime_put_noidle(hba->dev);
+ pm_runtime_put(hba->dev);
return;
}
+/* Complete requests that have door-bell cleared */
+static void ufshcd_complete_requests(struct ufs_hba *hba)
+{
+ ufshcd_transfer_req_compl(hba);
+ ufshcd_tmc_handler(hba);
+}
+
+/**
+ * ufshcd_quirk_dl_nac_errors - This function checks if error handling is
+ * to recover from the DL NAC errors or not.
+ * @hba: per-adapter instance
+ *
+ * Returns true if error handling is required, false otherwise
+ */
+static bool ufshcd_quirk_dl_nac_errors(struct ufs_hba *hba)
+{
+ unsigned long flags;
+ bool err_handling = true;
+
+ spin_lock_irqsave(hba->host->host_lock, flags);
+ /*
+ * UFS_DEVICE_QUIRK_RECOVERY_FROM_DL_NAC_ERRORS only workaround the
+ * device fatal error and/or DL NAC & REPLAY timeout errors.
+ */
+ if (hba->saved_err & (CONTROLLER_FATAL_ERROR | SYSTEM_BUS_FATAL_ERROR))
+ goto out;
+
+ if ((hba->saved_err & DEVICE_FATAL_ERROR) ||
+ ((hba->saved_err & UIC_ERROR) &&
+ (hba->saved_uic_err & UFSHCD_UIC_DL_TCx_REPLAY_ERROR))) {
+ /*
+ * we have to do error recovery but atleast silence the error
+ * logs.
+ */
+ hba->silence_err_logs = true;
+ goto out;
+ }
+
+ if ((hba->saved_err & UIC_ERROR) &&
+ (hba->saved_uic_err & UFSHCD_UIC_DL_NAC_RECEIVED_ERROR)) {
+ int err;
+ /*
+ * wait for 50ms to see if we can get any other errors or not.
+ */
+ spin_unlock_irqrestore(hba->host->host_lock, flags);
+ msleep(50);
+ spin_lock_irqsave(hba->host->host_lock, flags);
+
+ /*
+ * now check if we have got any other severe errors other than
+ * DL NAC error?
+ */
+ if ((hba->saved_err & INT_FATAL_ERRORS) ||
+ ((hba->saved_err & UIC_ERROR) &&
+ (hba->saved_uic_err & ~UFSHCD_UIC_DL_NAC_RECEIVED_ERROR))) {
+ if (((hba->saved_err & INT_FATAL_ERRORS) ==
+ DEVICE_FATAL_ERROR) || (hba->saved_uic_err &
+ ~UFSHCD_UIC_DL_NAC_RECEIVED_ERROR))
+ hba->silence_err_logs = true;
+ goto out;
+ }
+
+ /*
+ * As DL NAC is the only error received so far, send out NOP
+ * command to confirm if link is still active or not.
+ * - If we don't get any response then do error recovery.
+ * - If we get response then clear the DL NAC error bit.
+ */
+
+ /* silence the error logs from NOP command */
+ hba->silence_err_logs = true;
+ spin_unlock_irqrestore(hba->host->host_lock, flags);
+ err = ufshcd_verify_dev_init(hba);
+ spin_lock_irqsave(hba->host->host_lock, flags);
+ hba->silence_err_logs = false;
+
+ if (err) {
+ hba->silence_err_logs = true;
+ goto out;
+ }
+
+ /* Link seems to be alive hence ignore the DL NAC errors */
+ if (hba->saved_uic_err == UFSHCD_UIC_DL_NAC_RECEIVED_ERROR)
+ hba->saved_err &= ~UIC_ERROR;
+ /* clear NAC error */
+ hba->saved_uic_err &= ~UFSHCD_UIC_DL_NAC_RECEIVED_ERROR;
+ if (!hba->saved_uic_err) {
+ err_handling = false;
+ goto out;
+ }
+ /*
+ * there seems to be some errors other than NAC, so do error
+ * recovery
+ */
+ hba->silence_err_logs = true;
+ }
+out:
+ spin_unlock_irqrestore(hba->host->host_lock, flags);
+ return err_handling;
+}
+
/**
* ufshcd_err_handler - handle UFS errors that require s/w attention
* @work: pointer to work structure
@@ -3534,51 +6122,149 @@ static void ufshcd_err_handler(struct work_struct *work)
{
struct ufs_hba *hba;
unsigned long flags;
- u32 err_xfer = 0;
- u32 err_tm = 0;
+ bool err_xfer = false, err_tm = false;
int err = 0;
int tag;
+ bool needs_reset = false;
+ bool clks_enabled = false;
hba = container_of(work, struct ufs_hba, eh_work);
- pm_runtime_get_sync(hba->dev);
- ufshcd_hold(hba, false);
-
spin_lock_irqsave(hba->host->host_lock, flags);
- if (hba->ufshcd_state == UFSHCD_STATE_RESET) {
- spin_unlock_irqrestore(hba->host->host_lock, flags);
+ ufsdbg_set_err_state(hba);
+
+ if (hba->ufshcd_state == UFSHCD_STATE_RESET)
goto out;
+
+ /*
+ * Make sure the clocks are ON before we proceed with err
+ * handling. For the majority of cases err handler would be
+ * run with clocks ON. There is a possibility that the err
+ * handler was scheduled due to auto hibern8 error interrupt,
+ * in which case the clocks could be gated or be in the
+ * process of gating when the err handler runs.
+ */
+ if (unlikely((hba->clk_gating.state != CLKS_ON) &&
+ ufshcd_is_auto_hibern8_supported(hba))) {
+ spin_unlock_irqrestore(hba->host->host_lock, flags);
+ hba->ufs_stats.clk_hold.ctx = ERR_HNDLR_WORK;
+ ufshcd_hold(hba, false);
+ spin_lock_irqsave(hba->host->host_lock, flags);
+ clks_enabled = true;
}
hba->ufshcd_state = UFSHCD_STATE_RESET;
ufshcd_set_eh_in_progress(hba);
/* Complete requests that have door-bell cleared by h/w */
- ufshcd_transfer_req_compl(hba);
- ufshcd_tmc_handler(hba);
- spin_unlock_irqrestore(hba->host->host_lock, flags);
+ ufshcd_complete_requests(hba);
+
+ if (hba->dev_quirks & UFS_DEVICE_QUIRK_RECOVERY_FROM_DL_NAC_ERRORS) {
+ bool ret;
+
+ spin_unlock_irqrestore(hba->host->host_lock, flags);
+ /* release the lock as ufshcd_quirk_dl_nac_errors() may sleep */
+ ret = ufshcd_quirk_dl_nac_errors(hba);
+ spin_lock_irqsave(hba->host->host_lock, flags);
+ if (!ret)
+ goto skip_err_handling;
+ }
+ /*
+ * Dump controller state before resetting. Transfer requests state
+ * will be dump as part of the request completion.
+ */
+ if (hba->saved_err & (INT_FATAL_ERRORS | UIC_ERROR)) {
+ dev_err(hba->dev, "%s: saved_err 0x%x saved_uic_err 0x%x",
+ __func__, hba->saved_err, hba->saved_uic_err);
+ if (!hba->silence_err_logs) {
+ /* release lock as print host regs sleeps */
+ spin_unlock_irqrestore(hba->host->host_lock, flags);
+ ufshcd_print_host_regs(hba);
+ ufshcd_print_host_state(hba);
+ ufshcd_print_pwr_info(hba);
+ ufshcd_print_tmrs(hba, hba->outstanding_tasks);
+ ufshcd_print_cmd_log(hba);
+ spin_lock_irqsave(hba->host->host_lock, flags);
+ }
+ }
+
+ if ((hba->saved_err & INT_FATAL_ERRORS)
+ || hba->saved_ce_err || hba->force_host_reset ||
+ ((hba->saved_err & UIC_ERROR) &&
+ (hba->saved_uic_err & (UFSHCD_UIC_DL_PA_INIT_ERROR |
+ UFSHCD_UIC_DL_NAC_RECEIVED_ERROR |
+ UFSHCD_UIC_DL_TCx_REPLAY_ERROR))))
+ needs_reset = true;
+
+ /*
+ * if host reset is required then skip clearing the pending
+ * transfers forcefully because they will automatically get
+ * cleared after link startup.
+ */
+ if (needs_reset)
+ goto skip_pending_xfer_clear;
+
+ /* release lock as clear command might sleep */
+ spin_unlock_irqrestore(hba->host->host_lock, flags);
/* Clear pending transfer requests */
- for_each_set_bit(tag, &hba->outstanding_reqs, hba->nutrs)
- if (ufshcd_clear_cmd(hba, tag))
- err_xfer |= 1 << tag;
+ for_each_set_bit(tag, &hba->outstanding_reqs, hba->nutrs) {
+ if (ufshcd_clear_cmd(hba, tag)) {
+ err_xfer = true;
+ goto lock_skip_pending_xfer_clear;
+ }
+ }
/* Clear pending task management requests */
- for_each_set_bit(tag, &hba->outstanding_tasks, hba->nutmrs)
- if (ufshcd_clear_tm_cmd(hba, tag))
- err_tm |= 1 << tag;
+ for_each_set_bit(tag, &hba->outstanding_tasks, hba->nutmrs) {
+ if (ufshcd_clear_tm_cmd(hba, tag)) {
+ err_tm = true;
+ goto lock_skip_pending_xfer_clear;
+ }
+ }
- /* Complete the requests that are cleared by s/w */
+lock_skip_pending_xfer_clear:
spin_lock_irqsave(hba->host->host_lock, flags);
- ufshcd_transfer_req_compl(hba);
- ufshcd_tmc_handler(hba);
- spin_unlock_irqrestore(hba->host->host_lock, flags);
+ /* Complete the requests that are cleared by s/w */
+ ufshcd_complete_requests(hba);
+
+ if (err_xfer || err_tm)
+ needs_reset = true;
+
+skip_pending_xfer_clear:
/* Fatal errors need reset */
- if (err_xfer || err_tm || (hba->saved_err & INT_FATAL_ERRORS) ||
- ((hba->saved_err & UIC_ERROR) &&
- (hba->saved_uic_err & UFSHCD_UIC_DL_PA_INIT_ERROR))) {
+ if (needs_reset) {
+ unsigned long max_doorbells = (1UL << hba->nutrs) - 1;
+
+ if (hba->saved_err & INT_FATAL_ERRORS)
+ ufshcd_update_error_stats(hba,
+ UFS_ERR_INT_FATAL_ERRORS);
+ if (hba->saved_ce_err)
+ ufshcd_update_error_stats(hba, UFS_ERR_CRYPTO_ENGINE);
+
+ if (hba->saved_err & UIC_ERROR)
+ ufshcd_update_error_stats(hba,
+ UFS_ERR_INT_UIC_ERROR);
+
+ if (err_xfer || err_tm)
+ ufshcd_update_error_stats(hba,
+ UFS_ERR_CLEAR_PEND_XFER_TM);
+
+ /*
+ * ufshcd_reset_and_restore() does the link reinitialization
+ * which will need atleast one empty doorbell slot to send the
+ * device management commands (NOP and query commands).
+ * If there is no slot empty at this moment then free up last
+ * slot forcefully.
+ */
+ if (hba->outstanding_reqs == max_doorbells)
+ __ufshcd_transfer_req_compl(hba,
+ (1UL << (hba->nutrs - 1)));
+
+ spin_unlock_irqrestore(hba->host->host_lock, flags);
err = ufshcd_reset_and_restore(hba);
+ spin_lock_irqsave(hba->host->host_lock, flags);
if (err) {
dev_err(hba->dev, "%s: reset and restore failed\n",
__func__);
@@ -3591,76 +6277,225 @@ static void ufshcd_err_handler(struct work_struct *work)
scsi_report_bus_reset(hba->host, 0);
hba->saved_err = 0;
hba->saved_uic_err = 0;
+ hba->saved_ce_err = 0;
+ hba->force_host_reset = false;
}
+
+skip_err_handling:
+ if (!needs_reset) {
+ hba->ufshcd_state = UFSHCD_STATE_OPERATIONAL;
+ if (hba->saved_err || hba->saved_uic_err)
+ dev_err_ratelimited(hba->dev, "%s: exit: saved_err 0x%x saved_uic_err 0x%x",
+ __func__, hba->saved_err, hba->saved_uic_err);
+ }
+
+ hba->silence_err_logs = false;
+
+ if (clks_enabled) {
+ __ufshcd_release(hba, false);
+ hba->ufs_stats.clk_rel.ctx = ERR_HNDLR_WORK;
+ }
+out:
ufshcd_clear_eh_in_progress(hba);
+ spin_unlock_irqrestore(hba->host->host_lock, flags);
+}
+
+static void ufshcd_update_uic_reg_hist(struct ufs_uic_err_reg_hist *reg_hist,
+ u32 reg)
+{
+ reg_hist->reg[reg_hist->pos] = reg;
+ reg_hist->tstamp[reg_hist->pos] = ktime_get();
+ reg_hist->pos = (reg_hist->pos + 1) % UIC_ERR_REG_HIST_LENGTH;
+}
+
+static void ufshcd_rls_handler(struct work_struct *work)
+{
+ struct ufs_hba *hba;
+ int ret = 0;
+ u32 mode;
+
+ hba = container_of(work, struct ufs_hba, rls_work);
+ pm_runtime_get_sync(hba->dev);
+ ufshcd_scsi_block_requests(hba);
+ down_write(&hba->lock);
+ ret = ufshcd_wait_for_doorbell_clr(hba, U64_MAX);
+ if (ret) {
+ dev_err(hba->dev,
+ "Timed out (%d) waiting for DB to clear\n",
+ ret);
+ goto out;
+ }
+
+ ufshcd_dme_get(hba, UIC_ARG_MIB(PA_PWRMODE), &mode);
+ if (hba->pwr_info.pwr_rx != ((mode >> PWR_RX_OFFSET) & PWR_INFO_MASK))
+ hba->restore_needed = true;
+
+ if (hba->pwr_info.pwr_tx != (mode & PWR_INFO_MASK))
+ hba->restore_needed = true;
+
+ ufshcd_dme_get(hba, UIC_ARG_MIB(PA_RXGEAR), &mode);
+ if (hba->pwr_info.gear_rx != mode)
+ hba->restore_needed = true;
+
+ ufshcd_dme_get(hba, UIC_ARG_MIB(PA_TXGEAR), &mode);
+ if (hba->pwr_info.gear_tx != mode)
+ hba->restore_needed = true;
+
+ if (hba->restore_needed)
+ ret = ufshcd_config_pwr_mode(hba, &(hba->pwr_info));
+
+ if (ret)
+ dev_err(hba->dev, "%s: Failed setting power mode, err = %d\n",
+ __func__, ret);
+ else
+ hba->restore_needed = false;
out:
- scsi_unblock_requests(hba->host);
- ufshcd_release(hba);
+ up_write(&hba->lock);
+ ufshcd_scsi_unblock_requests(hba);
pm_runtime_put_sync(hba->dev);
}
/**
* ufshcd_update_uic_error - check and set fatal UIC error flags.
* @hba: per-adapter instance
+ *
+ * Returns
+ * IRQ_HANDLED - If interrupt is valid
+ * IRQ_NONE - If invalid interrupt
*/
-static void ufshcd_update_uic_error(struct ufs_hba *hba)
+static irqreturn_t ufshcd_update_uic_error(struct ufs_hba *hba)
{
u32 reg;
+ irqreturn_t retval = IRQ_NONE;
+
+ /* PHY layer lane error */
+ reg = ufshcd_readl(hba, REG_UIC_ERROR_CODE_PHY_ADAPTER_LAYER);
+ if ((reg & UIC_PHY_ADAPTER_LAYER_ERROR) &&
+ (reg & UIC_PHY_ADAPTER_LAYER_ERROR_CODE_MASK)) {
+ /*
+ * To know whether this error is fatal or not, DB timeout
+ * must be checked but this error is handled separately.
+ */
+ dev_dbg(hba->dev, "%s: UIC Lane error reported, reg 0x%x\n",
+ __func__, reg);
+ ufshcd_update_uic_reg_hist(&hba->ufs_stats.pa_err, reg);
+
+ /*
+ * Don't ignore LINERESET indication during hibern8
+ * enter operation.
+ */
+ if (reg & UIC_PHY_ADAPTER_LAYER_GENERIC_ERROR) {
+ struct uic_command *cmd = hba->active_uic_cmd;
+
+ if (cmd) {
+ if (cmd->command == UIC_CMD_DME_HIBER_ENTER) {
+ dev_err(hba->dev, "%s: LINERESET during hibern8 enter, reg 0x%x\n",
+ __func__, reg);
+ hba->full_init_linereset = true;
+ }
+ }
+ if (!hba->full_init_linereset)
+ schedule_work(&hba->rls_work);
+ }
+ retval |= IRQ_HANDLED;
+ }
/* PA_INIT_ERROR is fatal and needs UIC reset */
reg = ufshcd_readl(hba, REG_UIC_ERROR_CODE_DATA_LINK_LAYER);
- if (reg & UIC_DATA_LINK_LAYER_ERROR_PA_INIT)
- hba->uic_error |= UFSHCD_UIC_DL_PA_INIT_ERROR;
+ if ((reg & UIC_DATA_LINK_LAYER_ERROR) &&
+ (reg & UIC_DATA_LINK_LAYER_ERROR_CODE_MASK)) {
+ ufshcd_update_uic_reg_hist(&hba->ufs_stats.dl_err, reg);
+
+ if (reg & UIC_DATA_LINK_LAYER_ERROR_PA_INIT) {
+ hba->uic_error |= UFSHCD_UIC_DL_PA_INIT_ERROR;
+ } else if (hba->dev_quirks &
+ UFS_DEVICE_QUIRK_RECOVERY_FROM_DL_NAC_ERRORS) {
+ if (reg & UIC_DATA_LINK_LAYER_ERROR_NAC_RECEIVED)
+ hba->uic_error |=
+ UFSHCD_UIC_DL_NAC_RECEIVED_ERROR;
+ else if (reg &
+ UIC_DATA_LINK_LAYER_ERROR_TCx_REPLAY_TIMEOUT)
+ hba->uic_error |=
+ UFSHCD_UIC_DL_TCx_REPLAY_ERROR;
+ }
+ retval |= IRQ_HANDLED;
+ }
/* UIC NL/TL/DME errors needs software retry */
reg = ufshcd_readl(hba, REG_UIC_ERROR_CODE_NETWORK_LAYER);
- if (reg)
+ if ((reg & UIC_NETWORK_LAYER_ERROR) &&
+ (reg & UIC_NETWORK_LAYER_ERROR_CODE_MASK)) {
+ ufshcd_update_uic_reg_hist(&hba->ufs_stats.nl_err, reg);
hba->uic_error |= UFSHCD_UIC_NL_ERROR;
+ retval |= IRQ_HANDLED;
+ }
reg = ufshcd_readl(hba, REG_UIC_ERROR_CODE_TRANSPORT_LAYER);
- if (reg)
+ if ((reg & UIC_TRANSPORT_LAYER_ERROR) &&
+ (reg & UIC_TRANSPORT_LAYER_ERROR_CODE_MASK)) {
+ ufshcd_update_uic_reg_hist(&hba->ufs_stats.tl_err, reg);
hba->uic_error |= UFSHCD_UIC_TL_ERROR;
+ retval |= IRQ_HANDLED;
+ }
reg = ufshcd_readl(hba, REG_UIC_ERROR_CODE_DME);
- if (reg)
+ if ((reg & UIC_DME_ERROR) &&
+ (reg & UIC_DME_ERROR_CODE_MASK)) {
+ ufshcd_update_uic_reg_hist(&hba->ufs_stats.dme_err, reg);
hba->uic_error |= UFSHCD_UIC_DME_ERROR;
+ retval |= IRQ_HANDLED;
+ }
dev_dbg(hba->dev, "%s: UIC error flags = 0x%08x\n",
__func__, hba->uic_error);
+ return retval;
}
/**
* ufshcd_check_errors - Check for errors that need s/w attention
* @hba: per-adapter instance
+ *
+ * Returns
+ * IRQ_HANDLED - If interrupt is valid
+ * IRQ_NONE - If invalid interrupt
*/
-static void ufshcd_check_errors(struct ufs_hba *hba)
+static irqreturn_t ufshcd_check_errors(struct ufs_hba *hba)
{
bool queue_eh_work = false;
+ irqreturn_t retval = IRQ_NONE;
- if (hba->errors & INT_FATAL_ERRORS)
+ if (hba->errors & INT_FATAL_ERRORS || hba->ce_error)
queue_eh_work = true;
if (hba->errors & UIC_ERROR) {
hba->uic_error = 0;
- ufshcd_update_uic_error(hba);
+ retval = ufshcd_update_uic_error(hba);
if (hba->uic_error)
queue_eh_work = true;
}
if (queue_eh_work) {
+ /*
+ * update the transfer error masks to sticky bits, let's do this
+ * irrespective of current ufshcd_state.
+ */
+ hba->saved_err |= hba->errors;
+ hba->saved_uic_err |= hba->uic_error;
+ hba->saved_ce_err |= hba->ce_error;
+
/* handle fatal errors only when link is functional */
if (hba->ufshcd_state == UFSHCD_STATE_OPERATIONAL) {
- /* block commands from scsi mid-layer */
- scsi_block_requests(hba->host);
-
- /* transfer error masks to sticky bits */
- hba->saved_err |= hba->errors;
- hba->saved_uic_err |= hba->uic_error;
+ /*
+ * Set error handling in progress flag early so that we
+ * don't issue new requests any more.
+ */
+ ufshcd_set_eh_in_progress(hba);
hba->ufshcd_state = UFSHCD_STATE_ERROR;
schedule_work(&hba->eh_work);
}
+ retval |= IRQ_HANDLED;
}
/*
* if (!queue_eh_work) -
@@ -3668,40 +6503,63 @@ static void ufshcd_check_errors(struct ufs_hba *hba)
* itself without s/w intervention or errors that will be
* handled by the SCSI core layer.
*/
+ return retval;
}
/**
* ufshcd_tmc_handler - handle task management function completion
* @hba: per adapter instance
+ *
+ * Returns
+ * IRQ_HANDLED - If interrupt is valid
+ * IRQ_NONE - If invalid interrupt
*/
-static void ufshcd_tmc_handler(struct ufs_hba *hba)
+static irqreturn_t ufshcd_tmc_handler(struct ufs_hba *hba)
{
u32 tm_doorbell;
tm_doorbell = ufshcd_readl(hba, REG_UTP_TASK_REQ_DOOR_BELL);
hba->tm_condition = tm_doorbell ^ hba->outstanding_tasks;
- wake_up(&hba->tm_wq);
+ if (hba->tm_condition) {
+ wake_up(&hba->tm_wq);
+ return IRQ_HANDLED;
+ } else {
+ return IRQ_NONE;
+ }
}
/**
* ufshcd_sl_intr - Interrupt service routine
* @hba: per adapter instance
* @intr_status: contains interrupts generated by the controller
+ *
+ * Returns
+ * IRQ_HANDLED - If interrupt is valid
+ * IRQ_NONE - If invalid interrupt
*/
-static void ufshcd_sl_intr(struct ufs_hba *hba, u32 intr_status)
+static irqreturn_t ufshcd_sl_intr(struct ufs_hba *hba, u32 intr_status)
{
+ irqreturn_t retval = IRQ_NONE;
+
+ ufsdbg_error_inject_dispatcher(hba,
+ ERR_INJECT_INTR, intr_status, &intr_status);
+
+ ufshcd_vops_crypto_engine_get_status(hba, &hba->ce_error);
+
hba->errors = UFSHCD_ERROR_MASK & intr_status;
- if (hba->errors)
- ufshcd_check_errors(hba);
+ if (hba->errors || hba->ce_error)
+ retval |= ufshcd_check_errors(hba);
if (intr_status & UFSHCD_UIC_MASK)
- ufshcd_uic_cmd_compl(hba, intr_status);
+ retval |= ufshcd_uic_cmd_compl(hba, intr_status);
if (intr_status & UTP_TASK_REQ_COMPL)
- ufshcd_tmc_handler(hba);
+ retval |= ufshcd_tmc_handler(hba);
if (intr_status & UTP_TRANSFER_REQ_COMPL)
- ufshcd_transfer_req_compl(hba);
+ retval |= ufshcd_transfer_req_compl(hba);
+
+ return retval;
}
/**
@@ -3709,23 +6567,45 @@ static void ufshcd_sl_intr(struct ufs_hba *hba, u32 intr_status)
* @irq: irq number
* @__hba: pointer to adapter instance
*
- * Returns IRQ_HANDLED - If interrupt is valid
- * IRQ_NONE - If invalid interrupt
+ * Returns
+ * IRQ_HANDLED - If interrupt is valid
+ * IRQ_NONE - If invalid interrupt
*/
static irqreturn_t ufshcd_intr(int irq, void *__hba)
{
- u32 intr_status;
+ u32 intr_status, enabled_intr_status;
irqreturn_t retval = IRQ_NONE;
struct ufs_hba *hba = __hba;
+ int retries = hba->nutrs;
spin_lock(hba->host->host_lock);
intr_status = ufshcd_readl(hba, REG_INTERRUPT_STATUS);
+ hba->ufs_stats.last_intr_status = intr_status;
+ hba->ufs_stats.last_intr_ts = ktime_get();
+ /*
+ * There could be max of hba->nutrs reqs in flight and in worst case
+ * if the reqs get finished 1 by 1 after the interrupt status is
+ * read, make sure we handle them by checking the interrupt status
+ * again in a loop until we process all of the reqs before returning.
+ */
+ do {
+ enabled_intr_status =
+ intr_status & ufshcd_readl(hba, REG_INTERRUPT_ENABLE);
+ if (intr_status)
+ ufshcd_writel(hba, intr_status, REG_INTERRUPT_STATUS);
+ if (enabled_intr_status)
+ retval |= ufshcd_sl_intr(hba, enabled_intr_status);
- if (intr_status) {
- ufshcd_writel(hba, intr_status, REG_INTERRUPT_STATUS);
- ufshcd_sl_intr(hba, intr_status);
- retval = IRQ_HANDLED;
+ intr_status = ufshcd_readl(hba, REG_INTERRUPT_STATUS);
+ } while (intr_status && --retries);
+
+ if (retval == IRQ_NONE) {
+ dev_err(hba->dev, "%s: Unhandled interrupt 0x%08x\n",
+ __func__, intr_status);
+ ufshcd_hex_dump("host regs: ", hba->mmio_base,
+ UFSHCI_REG_SPACE_SIZE);
}
+
spin_unlock(hba->host->host_lock);
return retval;
}
@@ -3746,7 +6626,7 @@ static int ufshcd_clear_tm_cmd(struct ufs_hba *hba, int tag)
/* poll for max. 1 sec to clear door bell register by h/w */
err = ufshcd_wait_for_register(hba,
REG_UTP_TASK_REQ_DOOR_BELL,
- mask, 0, 1000, 1000);
+ mask, 0, 1000, 1000, true);
out:
return err;
}
@@ -3780,7 +6660,8 @@ static int ufshcd_issue_tm_cmd(struct ufs_hba *hba, int lun_id, int task_id,
* the maximum wait time is bounded by %TM_CMD_TIMEOUT.
*/
wait_event(hba->tm_tag_wq, ufshcd_get_tm_free_slot(hba, &free_slot));
- ufshcd_hold(hba, false);
+ hba->ufs_stats.clk_hold.ctx = TM_CMD_SEND;
+ ufshcd_hold_all(hba);
spin_lock_irqsave(host->host_lock, flags);
task_req_descp = hba->utmrdl_base_addr;
@@ -3809,7 +6690,13 @@ static int ufshcd_issue_tm_cmd(struct ufs_hba *hba, int lun_id, int task_id,
/* send command to the controller */
__set_bit(free_slot, &hba->outstanding_tasks);
+
+ /* Make sure descriptors are ready before ringing the task doorbell */
+ wmb();
+
ufshcd_writel(hba, 1 << free_slot, REG_UTP_TASK_REQ_DOOR_BELL);
+ /* Make sure that doorbell is committed immediately */
+ wmb();
spin_unlock_irqrestore(host->host_lock, flags);
@@ -3831,8 +6718,9 @@ static int ufshcd_issue_tm_cmd(struct ufs_hba *hba, int lun_id, int task_id,
clear_bit(free_slot, &hba->tm_condition);
ufshcd_put_tm_slot(hba, free_slot);
wake_up(&hba->tm_tag_wq);
+ hba->ufs_stats.clk_rel.ctx = TM_CMD_SEND;
- ufshcd_release(hba);
+ ufshcd_release_all(hba);
return err;
}
@@ -3855,6 +6743,7 @@ static int ufshcd_eh_device_reset_handler(struct scsi_cmnd *cmd)
host = cmd->device->host;
hba = shost_priv(host);
+ ufshcd_print_cmd_log(hba);
lun = ufshcd_scsi_to_upiu_lun(cmd->device->lun);
err = ufshcd_issue_tm_cmd(hba, lun, 0, UFS_LOGICAL_RESET, &resp);
if (err || resp != UPIU_TASK_MANAGEMENT_FUNC_COMPL) {
@@ -3874,7 +6763,9 @@ static int ufshcd_eh_device_reset_handler(struct scsi_cmnd *cmd)
spin_lock_irqsave(host->host_lock, flags);
ufshcd_transfer_req_compl(hba);
spin_unlock_irqrestore(host->host_lock, flags);
+
out:
+ hba->req_abort_count = 0;
if (!err) {
err = SUCCESS;
} else {
@@ -3884,6 +6775,17 @@ out:
return err;
}
+static void ufshcd_set_req_abort_skip(struct ufs_hba *hba, unsigned long bitmap)
+{
+ struct ufshcd_lrb *lrbp;
+ int tag;
+
+ for_each_set_bit(tag, &bitmap, hba->nutrs) {
+ lrbp = &hba->lrb[tag];
+ lrbp->req_abort_skip = true;
+ }
+}
+
/**
* ufshcd_abort - abort a specific command
* @cmd: SCSI command pointer
@@ -3911,31 +6813,87 @@ static int ufshcd_abort(struct scsi_cmnd *cmd)
host = cmd->device->host;
hba = shost_priv(host);
tag = cmd->request->tag;
+ if (!ufshcd_valid_tag(hba, tag)) {
+ dev_err(hba->dev,
+ "%s: invalid command tag %d: cmd=0x%p, cmd->request=0x%p",
+ __func__, tag, cmd, cmd->request);
+ BUG();
+ }
- ufshcd_hold(hba, false);
+ lrbp = &hba->lrb[tag];
+
+ ufshcd_update_error_stats(hba, UFS_ERR_TASK_ABORT);
+
+ /*
+ * Task abort to the device W-LUN is illegal. When this command
+ * will fail, due to spec violation, scsi err handling next step
+ * will be to send LU reset which, again, is a spec violation.
+ * To avoid these unnecessary/illegal step we skip to the last error
+ * handling stage: reset and restore.
+ */
+ if (lrbp->lun == UFS_UPIU_UFS_DEVICE_WLUN)
+ return ufshcd_eh_host_reset_handler(cmd);
+
+ ufshcd_hold_all(hba);
+ reg = ufshcd_readl(hba, REG_UTP_TRANSFER_REQ_DOOR_BELL);
/* If command is already aborted/completed, return SUCCESS */
- if (!(test_bit(tag, &hba->outstanding_reqs)))
+ if (!(test_bit(tag, &hba->outstanding_reqs))) {
+ dev_err(hba->dev,
+ "%s: cmd at tag %d already completed, outstanding=0x%lx, doorbell=0x%x\n",
+ __func__, tag, hba->outstanding_reqs, reg);
goto out;
+ }
- reg = ufshcd_readl(hba, REG_UTP_TRANSFER_REQ_DOOR_BELL);
if (!(reg & (1 << tag))) {
dev_err(hba->dev,
"%s: cmd was completed, but without a notifying intr, tag = %d",
__func__, tag);
}
- lrbp = &hba->lrb[tag];
+ /* Print Transfer Request of aborted task */
+ dev_err(hba->dev, "%s: Device abort task at tag %d", __func__, tag);
+
+ /*
+ * Print detailed info about aborted request.
+ * As more than one request might get aborted at the same time,
+ * print full information only for the first aborted request in order
+ * to reduce repeated printouts. For other aborted requests only print
+ * basic details.
+ */
+ scsi_print_command(cmd);
+ if (!hba->req_abort_count) {
+ ufshcd_print_fsm_state(hba);
+ ufshcd_print_host_regs(hba);
+ ufshcd_print_host_state(hba);
+ ufshcd_print_pwr_info(hba);
+ ufshcd_print_trs(hba, 1 << tag, true);
+ } else {
+ ufshcd_print_trs(hba, 1 << tag, false);
+ }
+ hba->req_abort_count++;
+
+
+ /* Skip task abort in case previous aborts failed and report failure */
+ if (lrbp->req_abort_skip) {
+ err = -EIO;
+ goto out;
+ }
+
for (poll_cnt = 100; poll_cnt; poll_cnt--) {
err = ufshcd_issue_tm_cmd(hba, lrbp->lun, lrbp->task_tag,
UFS_QUERY_TASK, &resp);
if (!err && resp == UPIU_TASK_MANAGEMENT_FUNC_SUCCEEDED) {
/* cmd pending in the device */
+ dev_err(hba->dev, "%s: cmd pending in the device. tag = %d",
+ __func__, tag);
break;
} else if (!err && resp == UPIU_TASK_MANAGEMENT_FUNC_COMPL) {
/*
* cmd not pending in the device, check if it is
* in transition.
*/
+ dev_err(hba->dev, "%s: cmd at tag %d not pending in the device.",
+ __func__, tag);
reg = ufshcd_readl(hba, REG_UTP_TRANSFER_REQ_DOOR_BELL);
if (reg & (1 << tag)) {
/* sleep for max. 200us to stabilize */
@@ -3943,8 +6901,13 @@ static int ufshcd_abort(struct scsi_cmnd *cmd)
continue;
}
/* command completed already */
+ dev_err(hba->dev, "%s: cmd at tag %d successfully cleared from DB.",
+ __func__, tag);
goto out;
} else {
+ dev_err(hba->dev,
+ "%s: no response from device. tag = %d, err %d",
+ __func__, tag, err);
if (!err)
err = resp; /* service response error */
goto out;
@@ -3959,19 +6922,25 @@ static int ufshcd_abort(struct scsi_cmnd *cmd)
err = ufshcd_issue_tm_cmd(hba, lrbp->lun, lrbp->task_tag,
UFS_ABORT_TASK, &resp);
if (err || resp != UPIU_TASK_MANAGEMENT_FUNC_COMPL) {
- if (!err)
+ if (!err) {
err = resp; /* service response error */
+ dev_err(hba->dev, "%s: issued. tag = %d, err %d",
+ __func__, tag, err);
+ }
goto out;
}
err = ufshcd_clear_cmd(hba, tag);
- if (err)
+ if (err) {
+ dev_err(hba->dev, "%s: Failed clearing cmd at tag %d, err %d",
+ __func__, tag, err);
goto out;
+ }
scsi_dma_unmap(cmd);
spin_lock_irqsave(host->host_lock, flags);
- __clear_bit(tag, &hba->outstanding_reqs);
+ ufshcd_outstanding_req_clear(hba, tag);
hba->lrb[tag].cmd = NULL;
spin_unlock_irqrestore(host->host_lock, flags);
@@ -3983,14 +6952,15 @@ out:
err = SUCCESS;
} else {
dev_err(hba->dev, "%s: failed with err %d\n", __func__, err);
+ ufshcd_set_req_abort_skip(hba, hba->outstanding_reqs);
err = FAILED;
}
/*
- * This ufshcd_release() corresponds to the original scsi cmd that got
- * aborted here (as we won't get any IRQ for it).
+ * This ufshcd_release_all() corresponds to the original scsi cmd that
+ * got aborted here (as we won't get any IRQ for it).
*/
- ufshcd_release(hba);
+ ufshcd_release_all(hba);
return err;
}
@@ -4011,9 +6981,12 @@ static int ufshcd_host_reset_and_restore(struct ufs_hba *hba)
/* Reset the host controller */
spin_lock_irqsave(hba->host->host_lock, flags);
- ufshcd_hba_stop(hba);
+ ufshcd_hba_stop(hba, false);
spin_unlock_irqrestore(hba->host->host_lock, flags);
+ /* scale up clocks to max frequency before full reinitialization */
+ ufshcd_set_clk_freq(hba, true);
+
err = ufshcd_hba_enable(hba);
if (err)
goto out;
@@ -4021,8 +6994,21 @@ static int ufshcd_host_reset_and_restore(struct ufs_hba *hba)
/* Establish the link again and restore the device */
err = ufshcd_probe_hba(hba);
- if (!err && (hba->ufshcd_state != UFSHCD_STATE_OPERATIONAL))
+ if (!err && (hba->ufshcd_state != UFSHCD_STATE_OPERATIONAL)) {
err = -EIO;
+ goto out;
+ }
+
+ if (!err) {
+ err = ufshcd_vops_crypto_engine_reset(hba);
+ if (err) {
+ dev_err(hba->dev,
+ "%s: failed to reset crypto engine %d\n",
+ __func__, err);
+ goto out;
+ }
+ }
+
out:
if (err)
dev_err(hba->dev, "%s: Host init failed %d\n", __func__, err);
@@ -4046,10 +7032,26 @@ static int ufshcd_reset_and_restore(struct ufs_hba *hba)
int retries = MAX_HOST_RESET_RETRIES;
do {
+ err = ufshcd_vops_full_reset(hba);
+ if (err)
+ dev_warn(hba->dev, "%s: full reset returned %d\n",
+ __func__, err);
+
+ err = ufshcd_reset_device(hba);
+ if (err)
+ dev_warn(hba->dev, "%s: device reset failed. err %d\n",
+ __func__, err);
+
err = ufshcd_host_reset_and_restore(hba);
} while (err && --retries);
/*
+ * There is no point proceeding even after failing
+ * to recover after multiple retries.
+ */
+ if (err)
+ BUG();
+ /*
* After reset the door-bell might be cleared, complete
* outstanding requests in s/w here.
*/
@@ -4069,13 +7071,12 @@ static int ufshcd_reset_and_restore(struct ufs_hba *hba)
*/
static int ufshcd_eh_host_reset_handler(struct scsi_cmnd *cmd)
{
- int err;
+ int err = SUCCESS;
unsigned long flags;
struct ufs_hba *hba;
hba = shost_priv(cmd->device->host);
- ufshcd_hold(hba, false);
/*
* Check if there is any race with fatal error handling.
* If so, wait for it to complete. Even though fatal error
@@ -4088,28 +7089,37 @@ static int ufshcd_eh_host_reset_handler(struct scsi_cmnd *cmd)
hba->ufshcd_state == UFSHCD_STATE_RESET))
break;
spin_unlock_irqrestore(hba->host->host_lock, flags);
- dev_dbg(hba->dev, "%s: reset in progress\n", __func__);
+ dev_err(hba->dev, "%s: reset in progress - 1\n", __func__);
flush_work(&hba->eh_work);
} while (1);
- hba->ufshcd_state = UFSHCD_STATE_RESET;
- ufshcd_set_eh_in_progress(hba);
- spin_unlock_irqrestore(hba->host->host_lock, flags);
+ /*
+ * we don't know if previous reset had really reset the host controller
+ * or not. So let's force reset here to be sure.
+ */
+ hba->ufshcd_state = UFSHCD_STATE_ERROR;
+ hba->force_host_reset = true;
+ schedule_work(&hba->eh_work);
- err = ufshcd_reset_and_restore(hba);
+ /* wait for the reset work to finish */
+ do {
+ if (!(work_pending(&hba->eh_work) ||
+ hba->ufshcd_state == UFSHCD_STATE_RESET))
+ break;
+ spin_unlock_irqrestore(hba->host->host_lock, flags);
+ dev_err(hba->dev, "%s: reset in progress - 2\n", __func__);
+ flush_work(&hba->eh_work);
+ spin_lock_irqsave(hba->host->host_lock, flags);
+ } while (1);
- spin_lock_irqsave(hba->host->host_lock, flags);
- if (!err) {
- err = SUCCESS;
- hba->ufshcd_state = UFSHCD_STATE_OPERATIONAL;
- } else {
+ if (!((hba->ufshcd_state == UFSHCD_STATE_OPERATIONAL) &&
+ ufshcd_is_link_active(hba))) {
err = FAILED;
hba->ufshcd_state = UFSHCD_STATE_ERROR;
}
- ufshcd_clear_eh_in_progress(hba);
+
spin_unlock_irqrestore(hba->host->host_lock, flags);
- ufshcd_release(hba);
return err;
}
@@ -4221,9 +7231,9 @@ static void ufshcd_init_icc_levels(struct ufs_hba *hba)
dev_dbg(hba->dev, "%s: setting icc_level 0x%x",
__func__, hba->init_prefetch_data.icc_level);
- ret = ufshcd_query_attr(hba, UPIU_QUERY_OPCODE_WRITE_ATTR,
- QUERY_ATTR_IDN_ACTIVE_ICC_LVL, 0, 0,
- &hba->init_prefetch_data.icc_level);
+ ret = ufshcd_query_attr_retry(hba, UPIU_QUERY_OPCODE_WRITE_ATTR,
+ QUERY_ATTR_IDN_ACTIVE_ICC_LVL, 0, 0,
+ &hba->init_prefetch_data.icc_level);
if (ret)
dev_err(hba->dev,
@@ -4299,6 +7309,210 @@ out:
}
/**
+ * ufshcd_tune_pa_tactivate - Tunes PA_TActivate of local UniPro
+ * @hba: per-adapter instance
+ *
+ * PA_TActivate parameter can be tuned manually if UniPro version is less than
+ * 1.61. PA_TActivate needs to be greater than or equal to peerM-PHY's
+ * RX_MIN_ACTIVATETIME_CAPABILITY attribute. This optimal value can help reduce
+ * the hibern8 exit latency.
+ *
+ * Returns zero on success, non-zero error value on failure.
+ */
+static int ufshcd_tune_pa_tactivate(struct ufs_hba *hba)
+{
+ int ret = 0;
+ u32 peer_rx_min_activatetime = 0, tuned_pa_tactivate;
+
+ if (!ufshcd_is_unipro_pa_params_tuning_req(hba))
+ return 0;
+
+ ret = ufshcd_dme_peer_get(hba,
+ UIC_ARG_MIB_SEL(
+ RX_MIN_ACTIVATETIME_CAPABILITY,
+ UIC_ARG_MPHY_RX_GEN_SEL_INDEX(0)),
+ &peer_rx_min_activatetime);
+ if (ret)
+ goto out;
+
+ /* make sure proper unit conversion is applied */
+ tuned_pa_tactivate =
+ ((peer_rx_min_activatetime * RX_MIN_ACTIVATETIME_UNIT_US)
+ / PA_TACTIVATE_TIME_UNIT_US);
+ ret = ufshcd_dme_set(hba, UIC_ARG_MIB(PA_TACTIVATE),
+ tuned_pa_tactivate);
+
+out:
+ return ret;
+}
+
+/**
+ * ufshcd_tune_pa_hibern8time - Tunes PA_Hibern8Time of local UniPro
+ * @hba: per-adapter instance
+ *
+ * PA_Hibern8Time parameter can be tuned manually if UniPro version is less than
+ * 1.61. PA_Hibern8Time needs to be maximum of local M-PHY's
+ * TX_HIBERN8TIME_CAPABILITY & peer M-PHY's RX_HIBERN8TIME_CAPABILITY.
+ * This optimal value can help reduce the hibern8 exit latency.
+ *
+ * Returns zero on success, non-zero error value on failure.
+ */
+static int ufshcd_tune_pa_hibern8time(struct ufs_hba *hba)
+{
+ int ret = 0;
+ u32 local_tx_hibern8_time_cap = 0, peer_rx_hibern8_time_cap = 0;
+ u32 max_hibern8_time, tuned_pa_hibern8time;
+
+ ret = ufshcd_dme_get(hba,
+ UIC_ARG_MIB_SEL(TX_HIBERN8TIME_CAPABILITY,
+ UIC_ARG_MPHY_TX_GEN_SEL_INDEX(0)),
+ &local_tx_hibern8_time_cap);
+ if (ret)
+ goto out;
+
+ ret = ufshcd_dme_peer_get(hba,
+ UIC_ARG_MIB_SEL(RX_HIBERN8TIME_CAPABILITY,
+ UIC_ARG_MPHY_RX_GEN_SEL_INDEX(0)),
+ &peer_rx_hibern8_time_cap);
+ if (ret)
+ goto out;
+
+ max_hibern8_time = max(local_tx_hibern8_time_cap,
+ peer_rx_hibern8_time_cap);
+ /* make sure proper unit conversion is applied */
+ tuned_pa_hibern8time = ((max_hibern8_time * HIBERN8TIME_UNIT_US)
+ / PA_HIBERN8_TIME_UNIT_US);
+ ret = ufshcd_dme_set(hba, UIC_ARG_MIB(PA_HIBERN8TIME),
+ tuned_pa_hibern8time);
+out:
+ return ret;
+}
+
+/**
+ * ufshcd_quirk_tune_host_pa_tactivate - Ensures that host PA_TACTIVATE is
+ * less than device PA_TACTIVATE time.
+ * @hba: per-adapter instance
+ *
+ * Some UFS devices require host PA_TACTIVATE to be lower than device
+ * PA_TACTIVATE, we need to enable UFS_DEVICE_QUIRK_HOST_PA_TACTIVATE quirk
+ * for such devices.
+ *
+ * Returns zero on success, non-zero error value on failure.
+ */
+static int ufshcd_quirk_tune_host_pa_tactivate(struct ufs_hba *hba)
+{
+ int ret = 0;
+ u32 granularity, peer_granularity;
+ u32 pa_tactivate, peer_pa_tactivate;
+ u32 pa_tactivate_us, peer_pa_tactivate_us;
+ u8 gran_to_us_table[] = {1, 4, 8, 16, 32, 100};
+
+ ret = ufshcd_dme_get(hba, UIC_ARG_MIB(PA_GRANULARITY),
+ &granularity);
+ if (ret)
+ goto out;
+
+ ret = ufshcd_dme_peer_get(hba, UIC_ARG_MIB(PA_GRANULARITY),
+ &peer_granularity);
+ if (ret)
+ goto out;
+
+ if ((granularity < PA_GRANULARITY_MIN_VAL) ||
+ (granularity > PA_GRANULARITY_MAX_VAL)) {
+ dev_err(hba->dev, "%s: invalid host PA_GRANULARITY %d",
+ __func__, granularity);
+ return -EINVAL;
+ }
+
+ if ((peer_granularity < PA_GRANULARITY_MIN_VAL) ||
+ (peer_granularity > PA_GRANULARITY_MAX_VAL)) {
+ dev_err(hba->dev, "%s: invalid device PA_GRANULARITY %d",
+ __func__, peer_granularity);
+ return -EINVAL;
+ }
+
+ ret = ufshcd_dme_get(hba, UIC_ARG_MIB(PA_TACTIVATE), &pa_tactivate);
+ if (ret)
+ goto out;
+
+ ret = ufshcd_dme_peer_get(hba, UIC_ARG_MIB(PA_TACTIVATE),
+ &peer_pa_tactivate);
+ if (ret)
+ goto out;
+
+ pa_tactivate_us = pa_tactivate * gran_to_us_table[granularity - 1];
+ peer_pa_tactivate_us = peer_pa_tactivate *
+ gran_to_us_table[peer_granularity - 1];
+
+ if (pa_tactivate_us > peer_pa_tactivate_us) {
+ u32 new_peer_pa_tactivate;
+
+ new_peer_pa_tactivate = pa_tactivate_us /
+ gran_to_us_table[peer_granularity - 1];
+ new_peer_pa_tactivate++;
+ ret = ufshcd_dme_peer_set(hba, UIC_ARG_MIB(PA_TACTIVATE),
+ new_peer_pa_tactivate);
+ }
+
+out:
+ return ret;
+}
+
+static void ufshcd_tune_unipro_params(struct ufs_hba *hba)
+{
+ if (ufshcd_is_unipro_pa_params_tuning_req(hba)) {
+ ufshcd_tune_pa_tactivate(hba);
+ ufshcd_tune_pa_hibern8time(hba);
+ }
+
+ if (hba->dev_quirks & UFS_DEVICE_QUIRK_PA_TACTIVATE)
+ /* set 1ms timeout for PA_TACTIVATE */
+ ufshcd_dme_set(hba, UIC_ARG_MIB(PA_TACTIVATE), 10);
+
+ if (hba->dev_quirks & UFS_DEVICE_QUIRK_HOST_PA_TACTIVATE)
+ ufshcd_quirk_tune_host_pa_tactivate(hba);
+
+ ufshcd_vops_apply_dev_quirks(hba);
+}
+
+static void ufshcd_clear_dbg_ufs_stats(struct ufs_hba *hba)
+{
+ int err_reg_hist_size = sizeof(struct ufs_uic_err_reg_hist);
+
+ memset(&hba->ufs_stats.pa_err, 0, err_reg_hist_size);
+ memset(&hba->ufs_stats.dl_err, 0, err_reg_hist_size);
+ memset(&hba->ufs_stats.nl_err, 0, err_reg_hist_size);
+ memset(&hba->ufs_stats.tl_err, 0, err_reg_hist_size);
+ memset(&hba->ufs_stats.dme_err, 0, err_reg_hist_size);
+
+ hba->req_abort_count = 0;
+}
+
+static void ufshcd_apply_pm_quirks(struct ufs_hba *hba)
+{
+ if (hba->dev_quirks & UFS_DEVICE_QUIRK_NO_LINK_OFF) {
+ if (ufs_get_pm_lvl_to_link_pwr_state(hba->rpm_lvl) ==
+ UIC_LINK_OFF_STATE) {
+ hba->rpm_lvl =
+ ufs_get_desired_pm_lvl_for_dev_link_state(
+ UFS_SLEEP_PWR_MODE,
+ UIC_LINK_HIBERN8_STATE);
+ dev_info(hba->dev, "UFS_DEVICE_QUIRK_NO_LINK_OFF enabled, changed rpm_lvl to %d\n",
+ hba->rpm_lvl);
+ }
+ if (ufs_get_pm_lvl_to_link_pwr_state(hba->spm_lvl) ==
+ UIC_LINK_OFF_STATE) {
+ hba->spm_lvl =
+ ufs_get_desired_pm_lvl_for_dev_link_state(
+ UFS_SLEEP_PWR_MODE,
+ UIC_LINK_HIBERN8_STATE);
+ dev_info(hba->dev, "UFS_DEVICE_QUIRK_NO_LINK_OFF enabled, changed spm_lvl to %d\n",
+ hba->spm_lvl);
+ }
+ }
+}
+
+/**
* ufshcd_probe_hba - probe hba to detect device and initialize
* @hba: per-adapter instance
*
@@ -4307,12 +7521,17 @@ out:
static int ufshcd_probe_hba(struct ufs_hba *hba)
{
int ret;
+ ktime_t start = ktime_get();
ret = ufshcd_link_startup(hba);
if (ret)
goto out;
- ufshcd_init_pwr_info(hba);
+ /* Debug counters initialization */
+ ufshcd_clear_dbg_ufs_stats(hba);
+ /* set the default level for urgent bkops */
+ hba->urgent_bkops_lvl = BKOPS_STATUS_PERF_IMPACT;
+ hba->is_urgent_bkops_lvl_checked = false;
/* UniPro link is active now */
ufshcd_set_link_active(hba);
@@ -4325,10 +7544,18 @@ static int ufshcd_probe_hba(struct ufs_hba *hba)
if (ret)
goto out;
+ ufs_advertise_fixup_device(hba);
+ ufshcd_tune_unipro_params(hba);
+
+ ufshcd_apply_pm_quirks(hba);
+ ret = ufshcd_set_vccq_rail_unused(hba,
+ (hba->dev_quirks & UFS_DEVICE_NO_VCCQ) ? true : false);
+ if (ret)
+ goto out;
+
/* UFS device is also active now */
ufshcd_set_ufs_dev_active(hba);
ufshcd_force_reset_auto_bkops(hba);
- hba->ufshcd_state = UFSHCD_STATE_OPERATIONAL;
hba->wlun_dev_clr_ua = true;
if (ufshcd_get_max_pwr_mode(hba)) {
@@ -4337,11 +7564,15 @@ static int ufshcd_probe_hba(struct ufs_hba *hba)
__func__);
} else {
ret = ufshcd_config_pwr_mode(hba, &hba->max_pwr_info.info);
- if (ret)
+ if (ret) {
dev_err(hba->dev, "%s: Failed setting power mode, err = %d\n",
__func__, ret);
+ goto out;
+ }
}
+ /* set the state as operational after switching to desired gear */
+ hba->ufshcd_state = UFSHCD_STATE_OPERATIONAL;
/*
* If we are in error handling context or in power management callbacks
* context, no need to scan the host
@@ -4351,8 +7582,8 @@ static int ufshcd_probe_hba(struct ufs_hba *hba)
/* clear any previous UFS device information */
memset(&hba->dev_info, 0, sizeof(hba->dev_info));
- if (!ufshcd_query_flag(hba, UPIU_QUERY_OPCODE_READ_FLAG,
- QUERY_FLAG_IDN_PWR_ON_WPE, &flag))
+ if (!ufshcd_query_flag_retry(hba, UPIU_QUERY_OPCODE_READ_FLAG,
+ QUERY_FLAG_IDN_PWR_ON_WPE, &flag))
hba->dev_info.f_power_on_wp_en = flag;
if (!hba->is_init_prefetch)
@@ -4363,6 +7594,27 @@ static int ufshcd_probe_hba(struct ufs_hba *hba)
if (ret)
goto out;
+ /* Initialize devfreq after UFS device is detected */
+ if (ufshcd_is_clkscaling_supported(hba)) {
+ memcpy(&hba->clk_scaling.saved_pwr_info.info,
+ &hba->pwr_info, sizeof(struct ufs_pa_layer_attr));
+ hba->clk_scaling.saved_pwr_info.is_valid = true;
+ hba->clk_scaling.is_scaled_up = true;
+ if (!hba->devfreq) {
+ hba->devfreq = devfreq_add_device(hba->dev,
+ &ufs_devfreq_profile,
+ "simple_ondemand",
+ gov_data);
+ if (IS_ERR(hba->devfreq)) {
+ ret = PTR_ERR(hba->devfreq);
+ dev_err(hba->dev, "Unable to register with devfreq %d\n",
+ ret);
+ goto out;
+ }
+ }
+ hba->clk_scaling.is_allowed = true;
+ }
+
scsi_scan_host(hba->host);
pm_runtime_put_sync(hba->dev);
}
@@ -4370,10 +7622,13 @@ static int ufshcd_probe_hba(struct ufs_hba *hba)
if (!hba->is_init_prefetch)
hba->is_init_prefetch = true;
- /* Resume devfreq after UFS device is detected */
- if (ufshcd_is_clkscaling_enabled(hba))
- devfreq_resume_device(hba->devfreq);
-
+ /*
+ * Enable auto hibern8 if supported, after full host and
+ * device initialization.
+ */
+ if (ufshcd_is_auto_hibern8_supported(hba))
+ ufshcd_set_auto_hibern8_timer(hba,
+ hba->hibern8_on_idle.delay_ms);
out:
/*
* If we failed to initialize the device or the device is not
@@ -4384,6 +7639,9 @@ out:
ufshcd_hba_exit(hba);
}
+ trace_ufshcd_init(dev_name(hba->dev), ret,
+ ktime_to_us(ktime_sub(ktime_get(), start)),
+ hba->curr_dev_pwr_mode, hba->uic_link_state);
return ret;
}
@@ -4396,7 +7654,296 @@ static void ufshcd_async_scan(void *data, async_cookie_t cookie)
{
struct ufs_hba *hba = (struct ufs_hba *)data;
+ /*
+ * Don't allow clock gating and hibern8 enter for faster device
+ * detection.
+ */
+ ufshcd_hold_all(hba);
ufshcd_probe_hba(hba);
+ ufshcd_release_all(hba);
+}
+
+/**
+ * ufshcd_query_ioctl - perform user read queries
+ * @hba: per-adapter instance
+ * @lun: used for lun specific queries
+ * @buffer: user space buffer for reading and submitting query data and params
+ * @return: 0 for success negative error code otherwise
+ *
+ * Expected/Submitted buffer structure is struct ufs_ioctl_query_data.
+ * It will read the opcode, idn and buf_length parameters, and, put the
+ * response in the buffer field while updating the used size in buf_length.
+ */
+static int ufshcd_query_ioctl(struct ufs_hba *hba, u8 lun, void __user *buffer)
+{
+ struct ufs_ioctl_query_data *ioctl_data;
+ int err = 0;
+ int length = 0;
+ void *data_ptr;
+ bool flag;
+ u32 att;
+ u8 index;
+ u8 *desc = NULL;
+
+ ioctl_data = kzalloc(sizeof(struct ufs_ioctl_query_data), GFP_KERNEL);
+ if (!ioctl_data) {
+ dev_err(hba->dev, "%s: Failed allocating %zu bytes\n", __func__,
+ sizeof(struct ufs_ioctl_query_data));
+ err = -ENOMEM;
+ goto out;
+ }
+
+ /* extract params from user buffer */
+ err = copy_from_user(ioctl_data, buffer,
+ sizeof(struct ufs_ioctl_query_data));
+ if (err) {
+ dev_err(hba->dev,
+ "%s: Failed copying buffer from user, err %d\n",
+ __func__, err);
+ goto out_release_mem;
+ }
+
+ /* verify legal parameters & send query */
+ switch (ioctl_data->opcode) {
+ case UPIU_QUERY_OPCODE_READ_DESC:
+ switch (ioctl_data->idn) {
+ case QUERY_DESC_IDN_DEVICE:
+ case QUERY_DESC_IDN_CONFIGURAION:
+ case QUERY_DESC_IDN_INTERCONNECT:
+ case QUERY_DESC_IDN_GEOMETRY:
+ case QUERY_DESC_IDN_POWER:
+ index = 0;
+ break;
+ case QUERY_DESC_IDN_UNIT:
+ if (!ufs_is_valid_unit_desc_lun(lun)) {
+ dev_err(hba->dev,
+ "%s: No unit descriptor for lun 0x%x\n",
+ __func__, lun);
+ err = -EINVAL;
+ goto out_release_mem;
+ }
+ index = lun;
+ break;
+ default:
+ goto out_einval;
+ }
+ length = min_t(int, QUERY_DESC_MAX_SIZE,
+ ioctl_data->buf_size);
+ desc = kzalloc(length, GFP_KERNEL);
+ if (!desc) {
+ dev_err(hba->dev, "%s: Failed allocating %d bytes\n",
+ __func__, length);
+ err = -ENOMEM;
+ goto out_release_mem;
+ }
+ err = ufshcd_query_descriptor(hba, ioctl_data->opcode,
+ ioctl_data->idn, index, 0, desc, &length);
+ break;
+ case UPIU_QUERY_OPCODE_READ_ATTR:
+ switch (ioctl_data->idn) {
+ case QUERY_ATTR_IDN_BOOT_LU_EN:
+ case QUERY_ATTR_IDN_POWER_MODE:
+ case QUERY_ATTR_IDN_ACTIVE_ICC_LVL:
+ case QUERY_ATTR_IDN_OOO_DATA_EN:
+ case QUERY_ATTR_IDN_BKOPS_STATUS:
+ case QUERY_ATTR_IDN_PURGE_STATUS:
+ case QUERY_ATTR_IDN_MAX_DATA_IN:
+ case QUERY_ATTR_IDN_MAX_DATA_OUT:
+ case QUERY_ATTR_IDN_REF_CLK_FREQ:
+ case QUERY_ATTR_IDN_CONF_DESC_LOCK:
+ case QUERY_ATTR_IDN_MAX_NUM_OF_RTT:
+ case QUERY_ATTR_IDN_EE_CONTROL:
+ case QUERY_ATTR_IDN_EE_STATUS:
+ case QUERY_ATTR_IDN_SECONDS_PASSED:
+ index = 0;
+ break;
+ case QUERY_ATTR_IDN_DYN_CAP_NEEDED:
+ case QUERY_ATTR_IDN_CORR_PRG_BLK_NUM:
+ index = lun;
+ break;
+ default:
+ goto out_einval;
+ }
+ err = ufshcd_query_attr(hba, ioctl_data->opcode, ioctl_data->idn,
+ index, 0, &att);
+ break;
+
+ case UPIU_QUERY_OPCODE_WRITE_ATTR:
+ err = copy_from_user(&att,
+ buffer + sizeof(struct ufs_ioctl_query_data),
+ sizeof(u32));
+ if (err) {
+ dev_err(hba->dev,
+ "%s: Failed copying buffer from user, err %d\n",
+ __func__, err);
+ goto out_release_mem;
+ }
+
+ switch (ioctl_data->idn) {
+ case QUERY_ATTR_IDN_BOOT_LU_EN:
+ index = 0;
+ if (att > QUERY_ATTR_IDN_BOOT_LU_EN_MAX) {
+ dev_err(hba->dev,
+ "%s: Illegal ufs query ioctl data, opcode 0x%x, idn 0x%x, att 0x%x\n",
+ __func__, ioctl_data->opcode,
+ (unsigned int)ioctl_data->idn, att);
+ err = -EINVAL;
+ goto out_release_mem;
+ }
+ break;
+ default:
+ goto out_einval;
+ }
+ err = ufshcd_query_attr(hba, ioctl_data->opcode,
+ ioctl_data->idn, index, 0, &att);
+ break;
+
+ case UPIU_QUERY_OPCODE_READ_FLAG:
+ switch (ioctl_data->idn) {
+ case QUERY_FLAG_IDN_FDEVICEINIT:
+ case QUERY_FLAG_IDN_PERMANENT_WPE:
+ case QUERY_FLAG_IDN_PWR_ON_WPE:
+ case QUERY_FLAG_IDN_BKOPS_EN:
+ case QUERY_FLAG_IDN_PURGE_ENABLE:
+ case QUERY_FLAG_IDN_FPHYRESOURCEREMOVAL:
+ case QUERY_FLAG_IDN_BUSY_RTC:
+ break;
+ default:
+ goto out_einval;
+ }
+ err = ufshcd_query_flag_retry(hba, ioctl_data->opcode,
+ ioctl_data->idn, &flag);
+ break;
+ default:
+ goto out_einval;
+ }
+
+ if (err) {
+ dev_err(hba->dev, "%s: Query for idn %d failed\n", __func__,
+ ioctl_data->idn);
+ goto out_release_mem;
+ }
+
+ /*
+ * copy response data
+ * As we might end up reading less data then what is specified in
+ * "ioctl_data->buf_size". So we are updating "ioctl_data->
+ * buf_size" to what exactly we have read.
+ */
+ switch (ioctl_data->opcode) {
+ case UPIU_QUERY_OPCODE_READ_DESC:
+ ioctl_data->buf_size = min_t(int, ioctl_data->buf_size, length);
+ data_ptr = desc;
+ break;
+ case UPIU_QUERY_OPCODE_READ_ATTR:
+ ioctl_data->buf_size = sizeof(u32);
+ data_ptr = &att;
+ break;
+ case UPIU_QUERY_OPCODE_READ_FLAG:
+ ioctl_data->buf_size = 1;
+ data_ptr = &flag;
+ break;
+ case UPIU_QUERY_OPCODE_WRITE_ATTR:
+ goto out_release_mem;
+ default:
+ goto out_einval;
+ }
+
+ /* copy to user */
+ err = copy_to_user(buffer, ioctl_data,
+ sizeof(struct ufs_ioctl_query_data));
+ if (err)
+ dev_err(hba->dev, "%s: Failed copying back to user.\n",
+ __func__);
+ err = copy_to_user(buffer + sizeof(struct ufs_ioctl_query_data),
+ data_ptr, ioctl_data->buf_size);
+ if (err)
+ dev_err(hba->dev, "%s: err %d copying back to user.\n",
+ __func__, err);
+ goto out_release_mem;
+
+out_einval:
+ dev_err(hba->dev,
+ "%s: illegal ufs query ioctl data, opcode 0x%x, idn 0x%x\n",
+ __func__, ioctl_data->opcode, (unsigned int)ioctl_data->idn);
+ err = -EINVAL;
+out_release_mem:
+ kfree(ioctl_data);
+ kfree(desc);
+out:
+ return err;
+}
+
+/**
+ * ufshcd_ioctl - ufs ioctl callback registered in scsi_host
+ * @dev: scsi device required for per LUN queries
+ * @cmd: command opcode
+ * @buffer: user space buffer for transferring data
+ *
+ * Supported commands:
+ * UFS_IOCTL_QUERY
+ */
+static int ufshcd_ioctl(struct scsi_device *dev, int cmd, void __user *buffer)
+{
+ struct ufs_hba *hba = shost_priv(dev->host);
+ int err = 0;
+
+ BUG_ON(!hba);
+ if (!buffer) {
+ dev_err(hba->dev, "%s: User buffer is NULL!\n", __func__);
+ return -EINVAL;
+ }
+
+ switch (cmd) {
+ case UFS_IOCTL_QUERY:
+ pm_runtime_get_sync(hba->dev);
+ err = ufshcd_query_ioctl(hba, ufshcd_scsi_to_upiu_lun(dev->lun),
+ buffer);
+ pm_runtime_put_sync(hba->dev);
+ break;
+ default:
+ err = -ENOIOCTLCMD;
+ dev_dbg(hba->dev, "%s: Unsupported ioctl cmd %d\n", __func__,
+ cmd);
+ break;
+ }
+
+ return err;
+}
+
+static enum blk_eh_timer_return ufshcd_eh_timed_out(struct scsi_cmnd *scmd)
+{
+ unsigned long flags;
+ struct Scsi_Host *host;
+ struct ufs_hba *hba;
+ int index;
+ bool found = false;
+
+ if (!scmd || !scmd->device || !scmd->device->host)
+ return BLK_EH_NOT_HANDLED;
+
+ host = scmd->device->host;
+ hba = shost_priv(host);
+ if (!hba)
+ return BLK_EH_NOT_HANDLED;
+
+ spin_lock_irqsave(host->host_lock, flags);
+
+ for_each_set_bit(index, &hba->outstanding_reqs, hba->nutrs) {
+ if (hba->lrb[index].cmd == scmd) {
+ found = true;
+ break;
+ }
+ }
+
+ spin_unlock_irqrestore(host->host_lock, flags);
+
+ /*
+ * Bypass SCSI error handling and reset the block layer timer if this
+ * SCSI command was not actually dispatched to UFS driver, otherwise
+ * let SCSI layer handle the error as usual.
+ */
+ return found ? BLK_EH_NOT_HANDLED : BLK_EH_RESET_TIMER;
}
static struct scsi_host_template ufshcd_driver_template = {
@@ -4411,6 +7958,11 @@ static struct scsi_host_template ufshcd_driver_template = {
.eh_abort_handler = ufshcd_abort,
.eh_device_reset_handler = ufshcd_eh_device_reset_handler,
.eh_host_reset_handler = ufshcd_eh_host_reset_handler,
+ .eh_timed_out = ufshcd_eh_timed_out,
+ .ioctl = ufshcd_ioctl,
+#ifdef CONFIG_COMPAT
+ .compat_ioctl = ufshcd_ioctl,
+#endif
.this_id = -1,
.sg_tablesize = SG_ALL,
.cmd_per_lun = UFSHCD_CMD_PER_LUN,
@@ -4448,7 +8000,13 @@ static int ufshcd_config_vreg_load(struct device *dev, struct ufs_vreg *vreg,
static inline int ufshcd_config_vreg_lpm(struct ufs_hba *hba,
struct ufs_vreg *vreg)
{
- return ufshcd_config_vreg_load(hba->dev, vreg, UFS_VREG_LPM_LOAD_UA);
+ if (!vreg)
+ return 0;
+ else if (vreg->unused)
+ return 0;
+ else
+ return ufshcd_config_vreg_load(hba->dev, vreg,
+ UFS_VREG_LPM_LOAD_UA);
}
static inline int ufshcd_config_vreg_hpm(struct ufs_hba *hba,
@@ -4456,8 +8014,10 @@ static inline int ufshcd_config_vreg_hpm(struct ufs_hba *hba,
{
if (!vreg)
return 0;
-
- return ufshcd_config_vreg_load(hba->dev, vreg, vreg->max_uA);
+ else if (vreg->unused)
+ return 0;
+ else
+ return ufshcd_config_vreg_load(hba->dev, vreg, vreg->max_uA);
}
static int ufshcd_config_vreg(struct device *dev,
@@ -4474,6 +8034,11 @@ static int ufshcd_config_vreg(struct device *dev,
name = vreg->name;
if (regulator_count_voltages(reg) > 0) {
+ uA_load = on ? vreg->max_uA : 0;
+ ret = ufshcd_config_vreg_load(dev, vreg, uA_load);
+ if (ret)
+ goto out;
+
if (vreg->min_uV && vreg->max_uV) {
min_uV = on ? vreg->min_uV : 0;
ret = regulator_set_voltage(reg, min_uV, vreg->max_uV);
@@ -4484,11 +8049,6 @@ static int ufshcd_config_vreg(struct device *dev,
goto out;
}
}
-
- uA_load = on ? vreg->max_uA : 0;
- ret = ufshcd_config_vreg_load(dev, vreg, uA_load);
- if (ret)
- goto out;
}
out:
return ret;
@@ -4498,7 +8058,9 @@ static int ufshcd_enable_vreg(struct device *dev, struct ufs_vreg *vreg)
{
int ret = 0;
- if (!vreg || vreg->enabled)
+ if (!vreg)
+ goto out;
+ else if (vreg->enabled || vreg->unused)
goto out;
ret = ufshcd_config_vreg(dev, vreg, true);
@@ -4518,7 +8080,9 @@ static int ufshcd_disable_vreg(struct device *dev, struct ufs_vreg *vreg)
{
int ret = 0;
- if (!vreg || !vreg->enabled)
+ if (!vreg)
+ goto out;
+ else if (!vreg->enabled || vreg->unused)
goto out;
ret = regulator_disable(vreg->reg);
@@ -4568,11 +8132,16 @@ out:
static int ufshcd_setup_hba_vreg(struct ufs_hba *hba, bool on)
{
struct ufs_vreg_info *info = &hba->vreg_info;
+ int ret = 0;
- if (info)
- return ufshcd_toggle_vreg(hba->dev, info->vdd_hba, on);
+ if (info->vdd_hba) {
+ ret = ufshcd_toggle_vreg(hba->dev, info->vdd_hba, on);
- return 0;
+ if (!ret)
+ ufshcd_vops_update_sec_cfg(hba, on);
+ }
+
+ return ret;
}
static int ufshcd_get_vreg(struct device *dev, struct ufs_vreg *vreg)
@@ -4624,22 +8193,73 @@ static int ufshcd_init_hba_vreg(struct ufs_hba *hba)
return 0;
}
-static int __ufshcd_setup_clocks(struct ufs_hba *hba, bool on,
- bool skip_ref_clk)
+static int ufshcd_set_vccq_rail_unused(struct ufs_hba *hba, bool unused)
+{
+ int ret = 0;
+ struct ufs_vreg_info *info = &hba->vreg_info;
+
+ if (!info)
+ goto out;
+ else if (!info->vccq)
+ goto out;
+
+ if (unused) {
+ /* shut off the rail here */
+ ret = ufshcd_toggle_vreg(hba->dev, info->vccq, false);
+ /*
+ * Mark this rail as no longer used, so it doesn't get enabled
+ * later by mistake
+ */
+ if (!ret)
+ info->vccq->unused = true;
+ } else {
+ /*
+ * rail should have been already enabled hence just make sure
+ * that unused flag is cleared.
+ */
+ info->vccq->unused = false;
+ }
+out:
+ return ret;
+}
+
+static int ufshcd_setup_clocks(struct ufs_hba *hba, bool on,
+ bool skip_ref_clk, bool is_gating_context)
{
int ret = 0;
struct ufs_clk_info *clki;
struct list_head *head = &hba->clk_list_head;
unsigned long flags;
+ ktime_t start = ktime_get();
+ bool clk_state_changed = false;
if (!head || list_empty(head))
goto out;
+ /* call vendor specific bus vote before enabling the clocks */
+ if (on) {
+ ret = ufshcd_vops_set_bus_vote(hba, on);
+ if (ret)
+ return ret;
+ }
+
+ /*
+ * vendor specific setup_clocks ops may depend on clocks managed by
+ * this standard driver hence call the vendor specific setup_clocks
+ * before disabling the clocks managed here.
+ */
+ if (!on) {
+ ret = ufshcd_vops_setup_clocks(hba, on, is_gating_context);
+ if (ret)
+ return ret;
+ }
+
list_for_each_entry(clki, head, list) {
if (!IS_ERR_OR_NULL(clki->clk)) {
if (skip_ref_clk && !strcmp(clki->name, "ref_clk"))
continue;
+ clk_state_changed = on ^ clki->enabled;
if (on && !clki->enabled) {
ret = clk_prepare_enable(clki->clk);
if (ret) {
@@ -4656,24 +8276,65 @@ static int __ufshcd_setup_clocks(struct ufs_hba *hba, bool on,
}
}
- ret = ufshcd_vops_setup_clocks(hba, on);
+ /*
+ * vendor specific setup_clocks ops may depend on clocks managed by
+ * this standard driver hence call the vendor specific setup_clocks
+ * after enabling the clocks managed here.
+ */
+ if (on) {
+ ret = ufshcd_vops_setup_clocks(hba, on, is_gating_context);
+ if (ret)
+ goto out;
+ }
+
+ /*
+ * call vendor specific bus vote to remove the vote after
+ * disabling the clocks.
+ */
+ if (!on)
+ ret = ufshcd_vops_set_bus_vote(hba, on);
+
out:
if (ret) {
+ if (on)
+ /* Can't do much if this fails */
+ (void) ufshcd_vops_set_bus_vote(hba, false);
list_for_each_entry(clki, head, list) {
if (!IS_ERR_OR_NULL(clki->clk) && clki->enabled)
clk_disable_unprepare(clki->clk);
}
- } else if (on) {
+ } else if (!ret && on) {
spin_lock_irqsave(hba->host->host_lock, flags);
hba->clk_gating.state = CLKS_ON;
+ trace_ufshcd_clk_gating(dev_name(hba->dev),
+ hba->clk_gating.state);
spin_unlock_irqrestore(hba->host->host_lock, flags);
+ /* restore the secure configuration as clocks are enabled */
+ ufshcd_vops_update_sec_cfg(hba, true);
}
+
+ if (clk_state_changed)
+ trace_ufshcd_profile_clk_gating(dev_name(hba->dev),
+ (on ? "on" : "off"),
+ ktime_to_us(ktime_sub(ktime_get(), start)), ret);
return ret;
}
-static int ufshcd_setup_clocks(struct ufs_hba *hba, bool on)
+static int ufshcd_enable_clocks(struct ufs_hba *hba)
{
- return __ufshcd_setup_clocks(hba, on, false);
+ return ufshcd_setup_clocks(hba, true, false, false);
+}
+
+static int ufshcd_disable_clocks(struct ufs_hba *hba,
+ bool is_gating_context)
+{
+ return ufshcd_setup_clocks(hba, false, false, is_gating_context);
+}
+
+static int ufshcd_disable_clocks_skip_ref_clk(struct ufs_hba *hba,
+ bool is_gating_context)
+{
+ return ufshcd_setup_clocks(hba, false, true, is_gating_context);
}
static int ufshcd_init_clocks(struct ufs_hba *hba)
@@ -4719,7 +8380,7 @@ static int ufshcd_variant_hba_init(struct ufs_hba *hba)
{
int err = 0;
- if (!hba->vops)
+ if (!hba->var || !hba->var->vops)
goto out;
err = ufshcd_vops_init(hba);
@@ -4743,11 +8404,9 @@ out:
static void ufshcd_variant_hba_exit(struct ufs_hba *hba)
{
- if (!hba->vops)
+ if (!hba->var || !hba->var->vops)
return;
- ufshcd_vops_setup_clocks(hba, false);
-
ufshcd_vops_setup_regulators(hba, false);
ufshcd_vops_exit(hba);
@@ -4776,7 +8435,7 @@ static int ufshcd_hba_init(struct ufs_hba *hba)
if (err)
goto out_disable_hba_vreg;
- err = ufshcd_setup_clocks(hba, true);
+ err = ufshcd_enable_clocks(hba);
if (err)
goto out_disable_hba_vreg;
@@ -4798,7 +8457,7 @@ static int ufshcd_hba_init(struct ufs_hba *hba)
out_disable_vreg:
ufshcd_setup_vreg(hba, false);
out_disable_clks:
- ufshcd_setup_clocks(hba, false);
+ ufshcd_disable_clocks(hba, false);
out_disable_hba_vreg:
ufshcd_setup_hba_vreg(hba, false);
out:
@@ -4810,7 +8469,13 @@ static void ufshcd_hba_exit(struct ufs_hba *hba)
if (hba->is_powered) {
ufshcd_variant_hba_exit(hba);
ufshcd_setup_vreg(hba, false);
- ufshcd_setup_clocks(hba, false);
+ if (ufshcd_is_clkscaling_supported(hba)) {
+ if (hba->devfreq)
+ ufshcd_suspend_clkscaling(hba);
+ if (hba->clk_scaling.workq)
+ destroy_workqueue(hba->clk_scaling.workq);
+ }
+ ufshcd_disable_clocks(hba, false);
ufshcd_setup_hba_vreg(hba, false);
hba->is_powered = false;
}
@@ -4823,19 +8488,19 @@ ufshcd_send_request_sense(struct ufs_hba *hba, struct scsi_device *sdp)
0,
0,
0,
- SCSI_SENSE_BUFFERSIZE,
+ UFSHCD_REQ_SENSE_SIZE,
0};
char *buffer;
int ret;
- buffer = kzalloc(SCSI_SENSE_BUFFERSIZE, GFP_KERNEL);
+ buffer = kzalloc(UFSHCD_REQ_SENSE_SIZE, GFP_KERNEL);
if (!buffer) {
ret = -ENOMEM;
goto out;
}
ret = scsi_execute_req_flags(sdp, cmd, DMA_FROM_DEVICE, buffer,
- SCSI_SENSE_BUFFERSIZE, NULL,
+ UFSHCD_REQ_SENSE_SIZE, NULL,
msecs_to_jiffies(1000), 3, NULL, REQ_PM);
if (ret)
pr_err("%s: failed with err %d\n", __func__, ret);
@@ -4943,10 +8608,20 @@ static int ufshcd_link_state_transition(struct ufs_hba *hba,
(!check_for_bkops || (check_for_bkops &&
!hba->auto_bkops_enabled))) {
/*
+ * Let's make sure that link is in low power mode, we are doing
+ * this currently by putting the link in Hibern8. Otherway to
+ * put the link in low power mode is to send the DME end point
+ * to device and then send the DME reset command to local
+ * unipro. But putting the link in hibern8 is much faster.
+ */
+ ret = ufshcd_uic_hibern8_enter(hba);
+ if (ret)
+ goto out;
+ /*
* Change controller state to "reset state" which
* should also put the link in off/reset state
*/
- ufshcd_hba_stop(hba);
+ ufshcd_hba_stop(hba, true);
/*
* TODO: Check if we need any delay to make sure that
* controller is reset
@@ -4961,6 +8636,15 @@ out:
static void ufshcd_vreg_set_lpm(struct ufs_hba *hba)
{
/*
+ * It seems some UFS devices may keep drawing more than sleep current
+ * (atleast for 500us) from UFS rails (especially from VCCQ rail).
+ * To avoid this situation, add 2ms delay before putting these UFS
+ * rails in LPM mode.
+ */
+ if (!ufshcd_is_link_active(hba))
+ usleep_range(2000, 2100);
+
+ /*
* If UFS device is either in UFS_Sleep turn off VCC rail to save some
* power.
*
@@ -4992,7 +8676,6 @@ static int ufshcd_vreg_set_hpm(struct ufs_hba *hba)
!hba->dev_info.is_lu_power_on_wp) {
ret = ufshcd_setup_vreg(hba, true);
} else if (!ufshcd_is_ufs_dev_active(hba)) {
- ret = ufshcd_toggle_vreg(hba->dev, hba->vreg_info.vcc, true);
if (!ret && !ufshcd_is_link_active(hba)) {
ret = ufshcd_config_vreg_hpm(hba, hba->vreg_info.vccq);
if (ret)
@@ -5001,6 +8684,7 @@ static int ufshcd_vreg_set_hpm(struct ufs_hba *hba)
if (ret)
goto vccq_lpm;
}
+ ret = ufshcd_toggle_vreg(hba->dev, hba->vreg_info.vcc, true);
}
goto out;
@@ -5014,13 +8698,17 @@ out:
static void ufshcd_hba_vreg_set_lpm(struct ufs_hba *hba)
{
- if (ufshcd_is_link_off(hba))
+ if (ufshcd_is_link_off(hba) ||
+ (ufshcd_is_link_hibern8(hba)
+ && ufshcd_is_power_collapse_during_hibern8_allowed(hba)))
ufshcd_setup_hba_vreg(hba, false);
}
static void ufshcd_hba_vreg_set_hpm(struct ufs_hba *hba)
{
- if (ufshcd_is_link_off(hba))
+ if (ufshcd_is_link_off(hba) ||
+ (ufshcd_is_link_hibern8(hba)
+ && ufshcd_is_power_collapse_during_hibern8_allowed(hba)))
ufshcd_setup_hba_vreg(hba, true);
}
@@ -5062,8 +8750,17 @@ static int ufshcd_suspend(struct ufs_hba *hba, enum ufs_pm_op pm_op)
* If we can't transition into any of the low power modes
* just gate the clocks.
*/
- ufshcd_hold(hba, false);
+ WARN_ON(hba->hibern8_on_idle.is_enabled &&
+ hba->hibern8_on_idle.active_reqs);
+ ufshcd_hold_all(hba);
hba->clk_gating.is_suspended = true;
+ hba->hibern8_on_idle.is_suspended = true;
+
+ if (hba->clk_scaling.is_allowed) {
+ cancel_work_sync(&hba->clk_scaling.suspend_work);
+ cancel_work_sync(&hba->clk_scaling.resume_work);
+ ufshcd_suspend_clkscaling(hba);
+ }
if (req_dev_pwr_mode == UFS_ACTIVE_PWR_MODE &&
req_link_state == UIC_LINK_ACTIVE_STATE) {
@@ -5072,12 +8769,12 @@ static int ufshcd_suspend(struct ufs_hba *hba, enum ufs_pm_op pm_op)
if ((req_dev_pwr_mode == hba->curr_dev_pwr_mode) &&
(req_link_state == hba->uic_link_state))
- goto out;
+ goto enable_gating;
/* UFS device & link must be active before we enter in this function */
if (!ufshcd_is_ufs_dev_active(hba) || !ufshcd_is_link_active(hba)) {
ret = -EINVAL;
- goto out;
+ goto enable_gating;
}
if (ufshcd_is_runtime_pm(pm_op)) {
@@ -5106,23 +8803,19 @@ static int ufshcd_suspend(struct ufs_hba *hba, enum ufs_pm_op pm_op)
goto enable_gating;
}
+ flush_work(&hba->eeh_work);
ret = ufshcd_link_state_transition(hba, req_link_state, 1);
if (ret)
goto set_dev_active;
+ if (ufshcd_is_link_hibern8(hba) &&
+ ufshcd_is_hibern8_on_idle_allowed(hba))
+ hba->hibern8_on_idle.state = HIBERN8_ENTERED;
+
ufshcd_vreg_set_lpm(hba);
disable_clks:
/*
- * The clock scaling needs access to controller registers. Hence, Wait
- * for pending clock scaling work to be done before clocks are
- * turned off.
- */
- if (ufshcd_is_clkscaling_enabled(hba)) {
- devfreq_suspend_device(hba->devfreq);
- hba->clk_scaling.window_start_t = 0;
- }
- /*
* Call vendor specific suspend callback. As these callbacks may access
* vendor specific host controller register space call them before the
* host clocks are ON.
@@ -5131,17 +8824,19 @@ disable_clks:
if (ret)
goto set_link_active;
- ret = ufshcd_vops_setup_clocks(hba, false);
- if (ret)
- goto vops_resume;
-
if (!ufshcd_is_link_active(hba))
- ufshcd_setup_clocks(hba, false);
+ ret = ufshcd_disable_clocks(hba, false);
else
/* If link is active, device ref_clk can't be switched off */
- __ufshcd_setup_clocks(hba, false, true);
+ ret = ufshcd_disable_clocks_skip_ref_clk(hba, false);
+ if (ret)
+ goto set_link_active;
- hba->clk_gating.state = CLKS_OFF;
+ if (ufshcd_is_clkgating_allowed(hba)) {
+ hba->clk_gating.state = CLKS_OFF;
+ trace_ufshcd_clk_gating(dev_name(hba->dev),
+ hba->clk_gating.state);
+ }
/*
* Disable the host irq as host controller as there won't be any
* host controller transaction expected till resume.
@@ -5151,22 +8846,31 @@ disable_clks:
ufshcd_hba_vreg_set_lpm(hba);
goto out;
-vops_resume:
- ufshcd_vops_resume(hba, pm_op);
set_link_active:
+ if (hba->clk_scaling.is_allowed)
+ ufshcd_resume_clkscaling(hba);
ufshcd_vreg_set_hpm(hba);
- if (ufshcd_is_link_hibern8(hba) && !ufshcd_uic_hibern8_exit(hba))
+ if (ufshcd_is_link_hibern8(hba) && !ufshcd_uic_hibern8_exit(hba)) {
ufshcd_set_link_active(hba);
- else if (ufshcd_is_link_off(hba))
+ } else if (ufshcd_is_link_off(hba)) {
+ ufshcd_update_error_stats(hba, UFS_ERR_VOPS_SUSPEND);
ufshcd_host_reset_and_restore(hba);
+ }
set_dev_active:
if (!ufshcd_set_dev_pwr_mode(hba, UFS_ACTIVE_PWR_MODE))
ufshcd_disable_auto_bkops(hba);
enable_gating:
+ if (hba->clk_scaling.is_allowed)
+ ufshcd_resume_clkscaling(hba);
+ hba->hibern8_on_idle.is_suspended = false;
hba->clk_gating.is_suspended = false;
- ufshcd_release(hba);
+ ufshcd_release_all(hba);
out:
hba->pm_op_in_progress = 0;
+
+ if (ret)
+ ufshcd_update_error_stats(hba, UFS_ERR_SUSPEND);
+
return ret;
}
@@ -5190,14 +8894,12 @@ static int ufshcd_resume(struct ufs_hba *hba, enum ufs_pm_op pm_op)
ufshcd_hba_vreg_set_hpm(hba);
/* Make sure clocks are enabled before accessing controller */
- ret = ufshcd_setup_clocks(hba, true);
+ ret = ufshcd_enable_clocks(hba);
if (ret)
goto out;
/* enable the host irq as host controller would be active soon */
- ret = ufshcd_enable_irq(hba);
- if (ret)
- goto disable_irq_and_vops_clks;
+ ufshcd_enable_irq(hba);
ret = ufshcd_vreg_set_hpm(hba);
if (ret)
@@ -5214,18 +8916,28 @@ static int ufshcd_resume(struct ufs_hba *hba, enum ufs_pm_op pm_op)
if (ufshcd_is_link_hibern8(hba)) {
ret = ufshcd_uic_hibern8_exit(hba);
- if (!ret)
+ if (!ret) {
ufshcd_set_link_active(hba);
- else
+ if (ufshcd_is_hibern8_on_idle_allowed(hba))
+ hba->hibern8_on_idle.state = HIBERN8_EXITED;
+ } else {
goto vendor_suspend;
+ }
} else if (ufshcd_is_link_off(hba)) {
- ret = ufshcd_host_reset_and_restore(hba);
/*
- * ufshcd_host_reset_and_restore() should have already
+ * A full initialization of the host and the device is required
+ * since the link was put to off during suspend.
+ */
+ ret = ufshcd_reset_and_restore(hba);
+ /*
+ * ufshcd_reset_and_restore() should have already
* set the link state as active
*/
if (ret || !ufshcd_is_link_active(hba))
goto vendor_suspend;
+ /* mark link state as hibern8 exited */
+ if (ufshcd_is_hibern8_on_idle_allowed(hba))
+ hba->hibern8_on_idle.state = HIBERN8_EXITED;
}
if (!ufshcd_is_ufs_dev_active(hba)) {
@@ -5244,25 +8956,37 @@ static int ufshcd_resume(struct ufs_hba *hba, enum ufs_pm_op pm_op)
ufshcd_urgent_bkops(hba);
hba->clk_gating.is_suspended = false;
+ hba->hibern8_on_idle.is_suspended = false;
- if (ufshcd_is_clkscaling_enabled(hba))
- devfreq_resume_device(hba->devfreq);
+ if (hba->clk_scaling.is_allowed)
+ ufshcd_resume_clkscaling(hba);
/* Schedule clock gating in case of no access to UFS device yet */
- ufshcd_release(hba);
+ ufshcd_release_all(hba);
goto out;
set_old_link_state:
ufshcd_link_state_transition(hba, old_link_state, 0);
+ if (ufshcd_is_link_hibern8(hba) &&
+ ufshcd_is_hibern8_on_idle_allowed(hba))
+ hba->hibern8_on_idle.state = HIBERN8_ENTERED;
vendor_suspend:
ufshcd_vops_suspend(hba, pm_op);
disable_vreg:
ufshcd_vreg_set_lpm(hba);
disable_irq_and_vops_clks:
ufshcd_disable_irq(hba);
- ufshcd_setup_clocks(hba, false);
+ if (hba->clk_scaling.is_allowed)
+ ufshcd_suspend_clkscaling(hba);
+ ufshcd_disable_clocks(hba, false);
+ if (ufshcd_is_clkgating_allowed(hba))
+ hba->clk_gating.state = CLKS_OFF;
out:
hba->pm_op_in_progress = 0;
+
+ if (ret)
+ ufshcd_update_error_stats(hba, UFS_ERR_RESUME);
+
return ret;
}
@@ -5278,20 +9002,18 @@ out:
int ufshcd_system_suspend(struct ufs_hba *hba)
{
int ret = 0;
+ ktime_t start = ktime_get();
if (!hba || !hba->is_powered)
return 0;
- if (pm_runtime_suspended(hba->dev)) {
- if (hba->rpm_lvl == hba->spm_lvl)
- /*
- * There is possibility that device may still be in
- * active state during the runtime suspend.
- */
- if ((ufs_get_pm_lvl_to_dev_pwr_mode(hba->spm_lvl) ==
- hba->curr_dev_pwr_mode) && !hba->auto_bkops_enabled)
- goto out;
+ if ((ufs_get_pm_lvl_to_dev_pwr_mode(hba->spm_lvl) ==
+ hba->curr_dev_pwr_mode) &&
+ (ufs_get_pm_lvl_to_link_pwr_state(hba->spm_lvl) ==
+ hba->uic_link_state))
+ goto out;
+ if (pm_runtime_suspended(hba->dev)) {
/*
* UFS device and/or UFS link low power states during runtime
* suspend seems to be different than what is expected during
@@ -5307,6 +9029,9 @@ int ufshcd_system_suspend(struct ufs_hba *hba)
ret = ufshcd_suspend(hba, UFS_SYSTEM_PM);
out:
+ trace_ufshcd_system_suspend(dev_name(hba->dev), ret,
+ ktime_to_us(ktime_sub(ktime_get(), start)),
+ hba->curr_dev_pwr_mode, hba->uic_link_state);
if (!ret)
hba->is_sys_suspended = true;
return ret;
@@ -5322,6 +9047,9 @@ EXPORT_SYMBOL(ufshcd_system_suspend);
int ufshcd_system_resume(struct ufs_hba *hba)
{
+ int ret = 0;
+ ktime_t start = ktime_get();
+
if (!hba)
return -EINVAL;
@@ -5330,9 +9058,14 @@ int ufshcd_system_resume(struct ufs_hba *hba)
* Let the runtime resume take care of resuming
* if runtime suspended.
*/
- return 0;
-
- return ufshcd_resume(hba, UFS_SYSTEM_PM);
+ goto out;
+ else
+ ret = ufshcd_resume(hba, UFS_SYSTEM_PM);
+out:
+ trace_ufshcd_system_resume(dev_name(hba->dev), ret,
+ ktime_to_us(ktime_sub(ktime_get(), start)),
+ hba->curr_dev_pwr_mode, hba->uic_link_state);
+ return ret;
}
EXPORT_SYMBOL(ufshcd_system_resume);
@@ -5346,13 +9079,23 @@ EXPORT_SYMBOL(ufshcd_system_resume);
*/
int ufshcd_runtime_suspend(struct ufs_hba *hba)
{
+ int ret = 0;
+ ktime_t start = ktime_get();
+
if (!hba)
return -EINVAL;
if (!hba->is_powered)
- return 0;
+ goto out;
+ else
+ ret = ufshcd_suspend(hba, UFS_RUNTIME_PM);
+out:
+ trace_ufshcd_runtime_suspend(dev_name(hba->dev), ret,
+ ktime_to_us(ktime_sub(ktime_get(), start)),
+ hba->curr_dev_pwr_mode,
+ hba->uic_link_state);
+ return ret;
- return ufshcd_suspend(hba, UFS_RUNTIME_PM);
}
EXPORT_SYMBOL(ufshcd_runtime_suspend);
@@ -5379,13 +9122,22 @@ EXPORT_SYMBOL(ufshcd_runtime_suspend);
*/
int ufshcd_runtime_resume(struct ufs_hba *hba)
{
+ int ret = 0;
+ ktime_t start = ktime_get();
+
if (!hba)
return -EINVAL;
if (!hba->is_powered)
- return 0;
-
- return ufshcd_resume(hba, UFS_RUNTIME_PM);
+ goto out;
+ else
+ ret = ufshcd_resume(hba, UFS_RUNTIME_PM);
+out:
+ trace_ufshcd_runtime_resume(dev_name(hba->dev), ret,
+ ktime_to_us(ktime_sub(ktime_get(), start)),
+ hba->curr_dev_pwr_mode,
+ hba->uic_link_state);
+ return ret;
}
EXPORT_SYMBOL(ufshcd_runtime_resume);
@@ -5395,6 +9147,246 @@ int ufshcd_runtime_idle(struct ufs_hba *hba)
}
EXPORT_SYMBOL(ufshcd_runtime_idle);
+static inline ssize_t ufshcd_pm_lvl_store(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t count,
+ bool rpm)
+{
+ struct ufs_hba *hba = dev_get_drvdata(dev);
+ unsigned long flags, value;
+
+ if (kstrtoul(buf, 0, &value))
+ return -EINVAL;
+
+ if (value >= UFS_PM_LVL_MAX)
+ return -EINVAL;
+
+ spin_lock_irqsave(hba->host->host_lock, flags);
+ if (rpm)
+ hba->rpm_lvl = value;
+ else
+ hba->spm_lvl = value;
+ ufshcd_apply_pm_quirks(hba);
+ spin_unlock_irqrestore(hba->host->host_lock, flags);
+ return count;
+}
+
+static ssize_t ufshcd_rpm_lvl_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct ufs_hba *hba = dev_get_drvdata(dev);
+ int curr_len;
+ u8 lvl;
+
+ curr_len = snprintf(buf, PAGE_SIZE,
+ "\nCurrent Runtime PM level [%d] => dev_state [%s] link_state [%s]\n",
+ hba->rpm_lvl,
+ ufschd_ufs_dev_pwr_mode_to_string(
+ ufs_pm_lvl_states[hba->rpm_lvl].dev_state),
+ ufschd_uic_link_state_to_string(
+ ufs_pm_lvl_states[hba->rpm_lvl].link_state));
+
+ curr_len += snprintf((buf + curr_len), (PAGE_SIZE - curr_len),
+ "\nAll available Runtime PM levels info:\n");
+ for (lvl = UFS_PM_LVL_0; lvl < UFS_PM_LVL_MAX; lvl++)
+ curr_len += snprintf((buf + curr_len), (PAGE_SIZE - curr_len),
+ "\tRuntime PM level [%d] => dev_state [%s] link_state [%s]\n",
+ lvl,
+ ufschd_ufs_dev_pwr_mode_to_string(
+ ufs_pm_lvl_states[lvl].dev_state),
+ ufschd_uic_link_state_to_string(
+ ufs_pm_lvl_states[lvl].link_state));
+
+ return curr_len;
+}
+
+static ssize_t ufshcd_rpm_lvl_store(struct device *dev,
+ struct device_attribute *attr, const char *buf, size_t count)
+{
+ return ufshcd_pm_lvl_store(dev, attr, buf, count, true);
+}
+
+static void ufshcd_add_rpm_lvl_sysfs_nodes(struct ufs_hba *hba)
+{
+ hba->rpm_lvl_attr.show = ufshcd_rpm_lvl_show;
+ hba->rpm_lvl_attr.store = ufshcd_rpm_lvl_store;
+ sysfs_attr_init(&hba->rpm_lvl_attr.attr);
+ hba->rpm_lvl_attr.attr.name = "rpm_lvl";
+ hba->rpm_lvl_attr.attr.mode = S_IRUGO | S_IWUSR;
+ if (device_create_file(hba->dev, &hba->rpm_lvl_attr))
+ dev_err(hba->dev, "Failed to create sysfs for rpm_lvl\n");
+}
+
+static ssize_t ufshcd_spm_lvl_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct ufs_hba *hba = dev_get_drvdata(dev);
+ int curr_len;
+ u8 lvl;
+
+ curr_len = snprintf(buf, PAGE_SIZE,
+ "\nCurrent System PM level [%d] => dev_state [%s] link_state [%s]\n",
+ hba->spm_lvl,
+ ufschd_ufs_dev_pwr_mode_to_string(
+ ufs_pm_lvl_states[hba->spm_lvl].dev_state),
+ ufschd_uic_link_state_to_string(
+ ufs_pm_lvl_states[hba->spm_lvl].link_state));
+
+ curr_len += snprintf((buf + curr_len), (PAGE_SIZE - curr_len),
+ "\nAll available System PM levels info:\n");
+ for (lvl = UFS_PM_LVL_0; lvl < UFS_PM_LVL_MAX; lvl++)
+ curr_len += snprintf((buf + curr_len), (PAGE_SIZE - curr_len),
+ "\tSystem PM level [%d] => dev_state [%s] link_state [%s]\n",
+ lvl,
+ ufschd_ufs_dev_pwr_mode_to_string(
+ ufs_pm_lvl_states[lvl].dev_state),
+ ufschd_uic_link_state_to_string(
+ ufs_pm_lvl_states[lvl].link_state));
+
+ return curr_len;
+}
+
+static ssize_t ufshcd_spm_lvl_store(struct device *dev,
+ struct device_attribute *attr, const char *buf, size_t count)
+{
+ return ufshcd_pm_lvl_store(dev, attr, buf, count, false);
+}
+
+static void ufshcd_add_spm_lvl_sysfs_nodes(struct ufs_hba *hba)
+{
+ hba->spm_lvl_attr.show = ufshcd_spm_lvl_show;
+ hba->spm_lvl_attr.store = ufshcd_spm_lvl_store;
+ sysfs_attr_init(&hba->spm_lvl_attr.attr);
+ hba->spm_lvl_attr.attr.name = "spm_lvl";
+ hba->spm_lvl_attr.attr.mode = S_IRUGO | S_IWUSR;
+ if (device_create_file(hba->dev, &hba->spm_lvl_attr))
+ dev_err(hba->dev, "Failed to create sysfs for spm_lvl\n");
+}
+
+static ssize_t ufs_sysfs_read_desc_param(struct ufs_hba *hba,
+ enum desc_idn desc_id,
+ u8 desc_index,
+ u8 param_offset,
+ u8 *sysfs_buf,
+ u8 param_size)
+{
+ u8 desc_buf[8] = {0};
+ int ret;
+
+ if (param_size > 8)
+ return -EINVAL;
+
+ pm_runtime_get_sync(hba->dev);
+ ret = ufshcd_read_desc_param(hba, desc_id, desc_index,
+ param_offset, desc_buf, param_size);
+ pm_runtime_put_sync(hba->dev);
+
+ if (ret)
+ return -EINVAL;
+ switch (param_size) {
+ case 1:
+ ret = snprintf(sysfs_buf, PAGE_SIZE, "0x%02X\n", *desc_buf);
+ break;
+ case 2:
+ ret = snprintf(sysfs_buf, PAGE_SIZE, "0x%04X\n",
+ get_unaligned_be16(desc_buf));
+ break;
+ case 4:
+ ret = snprintf(sysfs_buf, PAGE_SIZE, "0x%08X\n",
+ get_unaligned_be32(desc_buf));
+ break;
+ case 8:
+ ret = snprintf(sysfs_buf, PAGE_SIZE, "0x%016llX\n",
+ get_unaligned_be64(desc_buf));
+ break;
+ }
+
+ return ret;
+}
+
+
+#define UFS_DESC_PARAM(_name, _puname, _duname, _size) \
+ static ssize_t _name##_show(struct device *dev, \
+ struct device_attribute *attr, char *buf) \
+{ \
+ struct ufs_hba *hba = dev_get_drvdata(dev); \
+ return ufs_sysfs_read_desc_param(hba, QUERY_DESC_IDN_##_duname, \
+ 0, _duname##_DESC_PARAM##_puname, buf, _size); \
+} \
+static DEVICE_ATTR_RO(_name)
+
+#define UFS_HEALTH_DESC_PARAM(_name, _uname, _size) \
+ UFS_DESC_PARAM(_name, _uname, HEALTH, _size)
+
+UFS_HEALTH_DESC_PARAM(eol_info, _EOL_INFO, 1);
+UFS_HEALTH_DESC_PARAM(life_time_estimation_a, _LIFE_TIME_EST_A, 1);
+UFS_HEALTH_DESC_PARAM(life_time_estimation_b, _LIFE_TIME_EST_B, 1);
+
+static struct attribute *ufs_sysfs_health_descriptor[] = {
+ &dev_attr_eol_info.attr,
+ &dev_attr_life_time_estimation_a.attr,
+ &dev_attr_life_time_estimation_b.attr,
+ NULL,
+};
+
+static const struct attribute_group ufs_sysfs_health_descriptor_group = {
+ .name = "health_descriptor",
+ .attrs = ufs_sysfs_health_descriptor,
+};
+
+static const struct attribute_group *ufs_sysfs_groups[] = {
+ &ufs_sysfs_health_descriptor_group,
+ NULL,
+};
+
+
+static void ufshcd_add_desc_sysfs_nodes(struct device *dev)
+{
+ int ret;
+
+ ret = sysfs_create_groups(&dev->kobj, ufs_sysfs_groups);
+ if (ret)
+ dev_err(dev,
+ "%s: sysfs groups creation failed (err = %d)\n",
+ __func__, ret);
+}
+
+static inline void ufshcd_add_sysfs_nodes(struct ufs_hba *hba)
+{
+ ufshcd_add_rpm_lvl_sysfs_nodes(hba);
+ ufshcd_add_spm_lvl_sysfs_nodes(hba);
+ ufshcd_add_desc_sysfs_nodes(hba->dev);
+}
+
+static void ufshcd_shutdown_clkscaling(struct ufs_hba *hba)
+{
+ bool suspend = false;
+ unsigned long flags;
+
+ spin_lock_irqsave(hba->host->host_lock, flags);
+ if (hba->clk_scaling.is_allowed) {
+ hba->clk_scaling.is_allowed = false;
+ suspend = true;
+ }
+ spin_unlock_irqrestore(hba->host->host_lock, flags);
+
+ /**
+ * Scaling may be scheduled before, hence make sure it
+ * doesn't race with shutdown
+ */
+ if (ufshcd_is_clkscaling_supported(hba)) {
+ device_remove_file(hba->dev, &hba->clk_scaling.enable_attr);
+ cancel_work_sync(&hba->clk_scaling.suspend_work);
+ cancel_work_sync(&hba->clk_scaling.resume_work);
+ if (suspend)
+ ufshcd_suspend_clkscaling(hba);
+ }
+
+ /* Unregister so that devfreq_monitor can't race with shutdown */
+ if (hba->devfreq)
+ devfreq_remove_device(hba->devfreq);
+}
+
/**
* ufshcd_shutdown - shutdown routine
* @hba: per adapter instance
@@ -5414,7 +9406,24 @@ int ufshcd_shutdown(struct ufs_hba *hba)
goto out;
pm_runtime_get_sync(hba->dev);
-
+ ufshcd_hold_all(hba);
+ ufshcd_mark_shutdown_ongoing(hba);
+ ufshcd_shutdown_clkscaling(hba);
+ /**
+ * (1) Acquire the lock to stop any more requests
+ * (2) Wait for all issued requests to complete
+ */
+ ufshcd_get_write_lock(hba);
+ ufshcd_scsi_block_requests(hba);
+ ret = ufshcd_wait_for_doorbell_clr(hba, U64_MAX);
+ if (ret)
+ dev_err(hba->dev, "%s: waiting for DB clear: failed: %d\n",
+ __func__, ret);
+ /* Requests may have errored out above, let it be handled */
+ flush_work(&hba->eh_work);
+ /* reqs issued from contexts other than shutdown will fail from now */
+ ufshcd_scsi_unblock_requests(hba);
+ ufshcd_release_all(hba);
ret = ufshcd_suspend(hba, UFS_SHUTDOWN_PM);
out:
if (ret)
@@ -5489,13 +9498,17 @@ void ufshcd_remove(struct ufs_hba *hba)
scsi_remove_host(hba->host);
/* disable interrupts */
ufshcd_disable_intr(hba, hba->intr_mask);
- ufshcd_hba_stop(hba);
+ ufshcd_hba_stop(hba, true);
ufshcd_exit_clk_gating(hba);
- ufshcd_exit_latency_hist(hba);
- if (ufshcd_is_clkscaling_enabled(hba))
+ ufshcd_exit_hibern8_on_idle(hba);
+ if (ufshcd_is_clkscaling_supported(hba)) {
+ device_remove_file(hba->dev, &hba->clk_scaling.enable_attr);
+ ufshcd_exit_latency_hist(hba);
devfreq_remove_device(hba->devfreq);
+ }
ufshcd_hba_exit(hba);
+ ufsdbg_remove_debugfs(hba);
}
EXPORT_SYMBOL_GPL(ufshcd_remove);
@@ -5561,103 +9574,400 @@ out_error:
}
EXPORT_SYMBOL(ufshcd_alloc_host);
-static int ufshcd_scale_clks(struct ufs_hba *hba, bool scale_up)
+/**
+ * ufshcd_is_devfreq_scaling_required - check if scaling is required or not
+ * @hba: per adapter instance
+ * @scale_up: True if scaling up and false if scaling down
+ *
+ * Returns true if scaling is required, false otherwise.
+ */
+static bool ufshcd_is_devfreq_scaling_required(struct ufs_hba *hba,
+ bool scale_up)
{
- int ret = 0;
struct ufs_clk_info *clki;
struct list_head *head = &hba->clk_list_head;
if (!head || list_empty(head))
- goto out;
-
- ret = ufshcd_vops_clk_scale_notify(hba, scale_up, PRE_CHANGE);
- if (ret)
- return ret;
+ return false;
list_for_each_entry(clki, head, list) {
if (!IS_ERR_OR_NULL(clki->clk)) {
if (scale_up && clki->max_freq) {
if (clki->curr_freq == clki->max_freq)
continue;
- ret = clk_set_rate(clki->clk, clki->max_freq);
- if (ret) {
- dev_err(hba->dev, "%s: %s clk set rate(%dHz) failed, %d\n",
- __func__, clki->name,
- clki->max_freq, ret);
- break;
- }
- clki->curr_freq = clki->max_freq;
-
+ return true;
} else if (!scale_up && clki->min_freq) {
if (clki->curr_freq == clki->min_freq)
continue;
- ret = clk_set_rate(clki->clk, clki->min_freq);
- if (ret) {
- dev_err(hba->dev, "%s: %s clk set rate(%dHz) failed, %d\n",
- __func__, clki->name,
- clki->min_freq, ret);
- break;
- }
- clki->curr_freq = clki->min_freq;
+ return true;
}
}
- dev_dbg(hba->dev, "%s: clk: %s, rate: %lu\n", __func__,
- clki->name, clk_get_rate(clki->clk));
}
- ret = ufshcd_vops_clk_scale_notify(hba, scale_up, POST_CHANGE);
+ return false;
+}
+
+/**
+ * ufshcd_scale_gear - scale up/down UFS gear
+ * @hba: per adapter instance
+ * @scale_up: True for scaling up gear and false for scaling down
+ *
+ * Returns 0 for success,
+ * Returns -EBUSY if scaling can't happen at this time
+ * Returns non-zero for any other errors
+ */
+static int ufshcd_scale_gear(struct ufs_hba *hba, bool scale_up)
+{
+ int ret = 0;
+ struct ufs_pa_layer_attr new_pwr_info;
+ u32 scale_down_gear = ufshcd_vops_get_scale_down_gear(hba);
+
+ BUG_ON(!hba->clk_scaling.saved_pwr_info.is_valid);
+
+ if (scale_up) {
+ memcpy(&new_pwr_info, &hba->clk_scaling.saved_pwr_info.info,
+ sizeof(struct ufs_pa_layer_attr));
+ /*
+ * Some UFS devices may stop responding after switching from
+ * HS-G1 to HS-G3. Also, it is found that these devices work
+ * fine if we do 2 steps switch: HS-G1 to HS-G2 followed by
+ * HS-G2 to HS-G3. If UFS_DEVICE_QUIRK_HS_G1_TO_HS_G3_SWITCH
+ * quirk is enabled for such devices, this 2 steps gear switch
+ * workaround will be applied.
+ */
+ if ((hba->dev_quirks & UFS_DEVICE_QUIRK_HS_G1_TO_HS_G3_SWITCH)
+ && (hba->pwr_info.gear_tx == UFS_HS_G1)
+ && (new_pwr_info.gear_tx == UFS_HS_G3)) {
+ /* scale up to G2 first */
+ new_pwr_info.gear_tx = UFS_HS_G2;
+ new_pwr_info.gear_rx = UFS_HS_G2;
+ ret = ufshcd_change_power_mode(hba, &new_pwr_info);
+ if (ret)
+ goto out;
+
+ /* scale up to G3 now */
+ new_pwr_info.gear_tx = UFS_HS_G3;
+ new_pwr_info.gear_rx = UFS_HS_G3;
+ /* now, fall through to set the HS-G3 */
+ }
+ ret = ufshcd_change_power_mode(hba, &new_pwr_info);
+ if (ret)
+ goto out;
+ } else {
+ memcpy(&new_pwr_info, &hba->pwr_info,
+ sizeof(struct ufs_pa_layer_attr));
+
+ if (hba->pwr_info.gear_tx > scale_down_gear
+ || hba->pwr_info.gear_rx > scale_down_gear) {
+ /* save the current power mode */
+ memcpy(&hba->clk_scaling.saved_pwr_info.info,
+ &hba->pwr_info,
+ sizeof(struct ufs_pa_layer_attr));
+
+ /* scale down gear */
+ new_pwr_info.gear_tx = scale_down_gear;
+ new_pwr_info.gear_rx = scale_down_gear;
+ if (!(hba->dev_quirks & UFS_DEVICE_NO_FASTAUTO)) {
+ new_pwr_info.pwr_tx = FASTAUTO_MODE;
+ new_pwr_info.pwr_rx = FASTAUTO_MODE;
+ }
+ }
+ ret = ufshcd_change_power_mode(hba, &new_pwr_info);
+ }
out:
+ if (ret)
+ dev_err(hba->dev, "%s: failed err %d, old gear: (tx %d rx %d), new gear: (tx %d rx %d), scale_up = %d",
+ __func__, ret,
+ hba->pwr_info.gear_tx, hba->pwr_info.gear_rx,
+ new_pwr_info.gear_tx, new_pwr_info.gear_rx,
+ scale_up);
+
return ret;
}
+static int ufshcd_clock_scaling_prepare(struct ufs_hba *hba)
+{
+ #define DOORBELL_CLR_TOUT_US (1000 * 1000) /* 1 sec */
+ int ret = 0;
+ /*
+ * make sure that there are no outstanding requests when
+ * clock scaling is in progress
+ */
+ ufshcd_scsi_block_requests(hba);
+ down_write(&hba->lock);
+ if (ufshcd_wait_for_doorbell_clr(hba, DOORBELL_CLR_TOUT_US)) {
+ ret = -EBUSY;
+ up_write(&hba->lock);
+ ufshcd_scsi_unblock_requests(hba);
+ }
+
+ return ret;
+}
+
+static void ufshcd_clock_scaling_unprepare(struct ufs_hba *hba)
+{
+ up_write(&hba->lock);
+ ufshcd_scsi_unblock_requests(hba);
+}
+
+/**
+ * ufshcd_devfreq_scale - scale up/down UFS clocks and gear
+ * @hba: per adapter instance
+ * @scale_up: True for scaling up and false for scalin down
+ *
+ * Returns 0 for success,
+ * Returns -EBUSY if scaling can't happen at this time
+ * Returns non-zero for any other errors
+ */
+static int ufshcd_devfreq_scale(struct ufs_hba *hba, bool scale_up)
+{
+ int ret = 0;
+
+ /* let's not get into low power until clock scaling is completed */
+ hba->ufs_stats.clk_hold.ctx = CLK_SCALE_WORK;
+ ufshcd_hold_all(hba);
+
+ ret = ufshcd_clock_scaling_prepare(hba);
+ if (ret)
+ goto out;
+
+ /* scale down the gear before scaling down clocks */
+ if (!scale_up) {
+ ret = ufshcd_scale_gear(hba, false);
+ if (ret)
+ goto clk_scaling_unprepare;
+ }
+
+ /*
+ * If auto hibern8 is supported then put the link in
+ * hibern8 manually, this is to avoid auto hibern8
+ * racing during clock frequency scaling sequence.
+ */
+ if (ufshcd_is_auto_hibern8_supported(hba)) {
+ ret = ufshcd_uic_hibern8_enter(hba);
+ if (ret)
+ /* link will be bad state so no need to scale_up_gear */
+ return ret;
+ }
+
+ ret = ufshcd_scale_clks(hba, scale_up);
+ if (ret)
+ goto scale_up_gear;
+
+ if (ufshcd_is_auto_hibern8_supported(hba)) {
+ ret = ufshcd_uic_hibern8_exit(hba);
+ if (ret)
+ /* link will be bad state so no need to scale_up_gear */
+ return ret;
+ }
+
+ /* scale up the gear after scaling up clocks */
+ if (scale_up) {
+ ret = ufshcd_scale_gear(hba, true);
+ if (ret) {
+ ufshcd_scale_clks(hba, false);
+ goto clk_scaling_unprepare;
+ }
+ }
+
+ if (!ret) {
+ hba->clk_scaling.is_scaled_up = scale_up;
+ if (scale_up)
+ hba->clk_gating.delay_ms =
+ hba->clk_gating.delay_ms_perf;
+ else
+ hba->clk_gating.delay_ms =
+ hba->clk_gating.delay_ms_pwr_save;
+ }
+
+ goto clk_scaling_unprepare;
+
+scale_up_gear:
+ if (!scale_up)
+ ufshcd_scale_gear(hba, true);
+clk_scaling_unprepare:
+ ufshcd_clock_scaling_unprepare(hba);
+out:
+ hba->ufs_stats.clk_rel.ctx = CLK_SCALE_WORK;
+ ufshcd_release_all(hba);
+ return ret;
+}
+
+static void __ufshcd_suspend_clkscaling(struct ufs_hba *hba)
+{
+ unsigned long flags;
+
+ devfreq_suspend_device(hba->devfreq);
+ spin_lock_irqsave(hba->host->host_lock, flags);
+ hba->clk_scaling.window_start_t = 0;
+ spin_unlock_irqrestore(hba->host->host_lock, flags);
+}
+
+static void ufshcd_suspend_clkscaling(struct ufs_hba *hba)
+{
+ unsigned long flags;
+ bool suspend = false;
+
+ if (!ufshcd_is_clkscaling_supported(hba))
+ return;
+
+ spin_lock_irqsave(hba->host->host_lock, flags);
+ if (!hba->clk_scaling.is_suspended) {
+ suspend = true;
+ hba->clk_scaling.is_suspended = true;
+ }
+ spin_unlock_irqrestore(hba->host->host_lock, flags);
+
+ if (suspend)
+ __ufshcd_suspend_clkscaling(hba);
+}
+
+static void ufshcd_resume_clkscaling(struct ufs_hba *hba)
+{
+ unsigned long flags;
+ bool resume = false;
+
+ if (!ufshcd_is_clkscaling_supported(hba))
+ return;
+
+ spin_lock_irqsave(hba->host->host_lock, flags);
+ if (hba->clk_scaling.is_suspended) {
+ resume = true;
+ hba->clk_scaling.is_suspended = false;
+ }
+ spin_unlock_irqrestore(hba->host->host_lock, flags);
+
+ if (resume)
+ devfreq_resume_device(hba->devfreq);
+}
+
+static ssize_t ufshcd_clkscale_enable_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct ufs_hba *hba = dev_get_drvdata(dev);
+
+ return snprintf(buf, PAGE_SIZE, "%d\n", hba->clk_scaling.is_allowed);
+}
+
+static ssize_t ufshcd_clkscale_enable_store(struct device *dev,
+ struct device_attribute *attr, const char *buf, size_t count)
+{
+ struct ufs_hba *hba = dev_get_drvdata(dev);
+ u32 value;
+ int err;
+
+ if (kstrtou32(buf, 0, &value))
+ return -EINVAL;
+
+ value = !!value;
+ if (value == hba->clk_scaling.is_allowed)
+ goto out;
+
+ pm_runtime_get_sync(hba->dev);
+ ufshcd_hold(hba, false);
+
+ cancel_work_sync(&hba->clk_scaling.suspend_work);
+ cancel_work_sync(&hba->clk_scaling.resume_work);
+
+ hba->clk_scaling.is_allowed = value;
+
+ if (value) {
+ ufshcd_resume_clkscaling(hba);
+ } else {
+ ufshcd_suspend_clkscaling(hba);
+ err = ufshcd_devfreq_scale(hba, true);
+ if (err)
+ dev_err(hba->dev, "%s: failed to scale clocks up %d\n",
+ __func__, err);
+ }
+
+ ufshcd_release(hba, false);
+ pm_runtime_put_sync(hba->dev);
+out:
+ return count;
+}
+
+static void ufshcd_clk_scaling_suspend_work(struct work_struct *work)
+{
+ struct ufs_hba *hba = container_of(work, struct ufs_hba,
+ clk_scaling.suspend_work);
+ unsigned long irq_flags;
+
+ spin_lock_irqsave(hba->host->host_lock, irq_flags);
+ if (hba->clk_scaling.active_reqs || hba->clk_scaling.is_suspended) {
+ spin_unlock_irqrestore(hba->host->host_lock, irq_flags);
+ return;
+ }
+ hba->clk_scaling.is_suspended = true;
+ spin_unlock_irqrestore(hba->host->host_lock, irq_flags);
+
+ __ufshcd_suspend_clkscaling(hba);
+}
+
+static void ufshcd_clk_scaling_resume_work(struct work_struct *work)
+{
+ struct ufs_hba *hba = container_of(work, struct ufs_hba,
+ clk_scaling.resume_work);
+ unsigned long irq_flags;
+
+ spin_lock_irqsave(hba->host->host_lock, irq_flags);
+ if (!hba->clk_scaling.is_suspended) {
+ spin_unlock_irqrestore(hba->host->host_lock, irq_flags);
+ return;
+ }
+ hba->clk_scaling.is_suspended = false;
+ spin_unlock_irqrestore(hba->host->host_lock, irq_flags);
+
+ devfreq_resume_device(hba->devfreq);
+}
+
static int ufshcd_devfreq_target(struct device *dev,
unsigned long *freq, u32 flags)
{
- int err = 0;
+ int ret = 0;
struct ufs_hba *hba = dev_get_drvdata(dev);
- bool release_clk_hold = false;
unsigned long irq_flags;
+ ktime_t start;
+ bool scale_up, sched_clk_scaling_suspend_work = false;
- if (!ufshcd_is_clkscaling_enabled(hba))
+ if (!ufshcd_is_clkscaling_supported(hba))
return -EINVAL;
+ if ((*freq > 0) && (*freq < UINT_MAX)) {
+ dev_err(hba->dev, "%s: invalid freq = %lu\n", __func__, *freq);
+ return -EINVAL;
+ }
+
spin_lock_irqsave(hba->host->host_lock, irq_flags);
if (ufshcd_eh_in_progress(hba)) {
spin_unlock_irqrestore(hba->host->host_lock, irq_flags);
return 0;
}
- if (ufshcd_is_clkgating_allowed(hba) &&
- (hba->clk_gating.state != CLKS_ON)) {
- if (cancel_delayed_work(&hba->clk_gating.gate_work)) {
- /* hold the vote until the scaling work is completed */
- hba->clk_gating.active_reqs++;
- release_clk_hold = true;
- hba->clk_gating.state = CLKS_ON;
- } else {
- /*
- * Clock gating work seems to be running in parallel
- * hence skip scaling work to avoid deadlock between
- * current scaling work and gating work.
- */
- spin_unlock_irqrestore(hba->host->host_lock, irq_flags);
- return 0;
- }
+ if (!hba->clk_scaling.active_reqs)
+ sched_clk_scaling_suspend_work = true;
+
+ scale_up = (*freq == UINT_MAX) ? true : false;
+ if (!ufshcd_is_devfreq_scaling_required(hba, scale_up)) {
+ spin_unlock_irqrestore(hba->host->host_lock, irq_flags);
+ ret = 0;
+ goto out; /* no state change required */
}
spin_unlock_irqrestore(hba->host->host_lock, irq_flags);
- if (*freq == UINT_MAX)
- err = ufshcd_scale_clks(hba, true);
- else if (*freq == 0)
- err = ufshcd_scale_clks(hba, false);
+ start = ktime_get();
+ ret = ufshcd_devfreq_scale(hba, scale_up);
+ trace_ufshcd_profile_clk_scaling(dev_name(hba->dev),
+ (scale_up ? "up" : "down"),
+ ktime_to_us(ktime_sub(ktime_get(), start)), ret);
- spin_lock_irqsave(hba->host->host_lock, irq_flags);
- if (release_clk_hold)
- __ufshcd_release(hba);
- spin_unlock_irqrestore(hba->host->host_lock, irq_flags);
+out:
+ if (sched_clk_scaling_suspend_work)
+ queue_work(hba->clk_scaling.workq,
+ &hba->clk_scaling.suspend_work);
- return err;
+ return ret;
}
static int ufshcd_devfreq_get_dev_status(struct device *dev,
@@ -5667,7 +9977,7 @@ static int ufshcd_devfreq_get_dev_status(struct device *dev,
struct ufs_clk_scaling *scaling = &hba->clk_scaling;
unsigned long flags;
- if (!ufshcd_is_clkscaling_enabled(hba))
+ if (!ufshcd_is_clkscaling_supported(hba))
return -EINVAL;
memset(stat, 0, sizeof(*stat));
@@ -5698,12 +10008,31 @@ start_window:
return 0;
}
-static struct devfreq_dev_profile ufs_devfreq_profile = {
- .polling_ms = 100,
- .target = ufshcd_devfreq_target,
- .get_dev_status = ufshcd_devfreq_get_dev_status,
-};
+static void ufshcd_clkscaling_init_sysfs(struct ufs_hba *hba)
+{
+ hba->clk_scaling.enable_attr.show = ufshcd_clkscale_enable_show;
+ hba->clk_scaling.enable_attr.store = ufshcd_clkscale_enable_store;
+ sysfs_attr_init(&hba->clk_scaling.enable_attr.attr);
+ hba->clk_scaling.enable_attr.attr.name = "clkscale_enable";
+ hba->clk_scaling.enable_attr.attr.mode = S_IRUGO | S_IWUSR;
+ if (device_create_file(hba->dev, &hba->clk_scaling.enable_attr))
+ dev_err(hba->dev, "Failed to create sysfs for clkscale_enable\n");
+}
+static void ufshcd_init_lanes_per_dir(struct ufs_hba *hba)
+{
+ struct device *dev = hba->dev;
+ int ret;
+
+ ret = of_property_read_u32(dev->of_node, "lanes-per-direction",
+ &hba->lanes_per_direction);
+ if (ret) {
+ dev_dbg(hba->dev,
+ "%s: failed to read lanes-per-direction, ret=%d\n",
+ __func__, ret);
+ hba->lanes_per_direction = UFSHCD_DEFAULT_LANES_PER_DIRECTION;
+ }
+}
/**
* ufshcd_init - Driver initialization routine
* @hba: per-adapter instance
@@ -5727,6 +10056,8 @@ int ufshcd_init(struct ufs_hba *hba, void __iomem *mmio_base, unsigned int irq)
hba->mmio_base = mmio_base;
hba->irq = irq;
+ ufshcd_init_lanes_per_dir(hba);
+
err = ufshcd_hba_init(hba);
if (err)
goto out_error;
@@ -5737,9 +10068,20 @@ int ufshcd_init(struct ufs_hba *hba, void __iomem *mmio_base, unsigned int irq)
/* Get UFS version supported by the controller */
hba->ufs_version = ufshcd_get_ufs_version(hba);
+ /* print error message if ufs_version is not valid */
+ if ((hba->ufs_version != UFSHCI_VERSION_10) &&
+ (hba->ufs_version != UFSHCI_VERSION_11) &&
+ (hba->ufs_version != UFSHCI_VERSION_20) &&
+ (hba->ufs_version != UFSHCI_VERSION_21))
+ dev_err(hba->dev, "invalid UFS version 0x%x\n",
+ hba->ufs_version);
+
/* Get Interrupt bit mask per version */
hba->intr_mask = ufshcd_get_intr_mask(hba);
+ /* Enable debug prints */
+ hba->ufshcd_dbg_print = DEFAULT_UFSHCD_DBG_PRINT_EN;
+
err = ufshcd_set_dma_mask(hba);
if (err) {
dev_err(hba->dev, "set dma mask failed\n");
@@ -5763,6 +10105,7 @@ int ufshcd_init(struct ufs_hba *hba, void __iomem *mmio_base, unsigned int irq)
host->max_channel = UFSHCD_MAX_CHANNEL;
host->unique_id = host->host_no;
host->max_cmd_len = MAX_CDB_SIZE;
+ host->set_dbd_for_caching = 1;
hba->max_pwr_info.is_valid = false;
@@ -5773,6 +10116,7 @@ int ufshcd_init(struct ufs_hba *hba, void __iomem *mmio_base, unsigned int irq)
/* Initialize work queues */
INIT_WORK(&hba->eh_work, ufshcd_err_handler);
INIT_WORK(&hba->eeh_work, ufshcd_exception_event_handler);
+ INIT_WORK(&hba->rls_work, ufshcd_rls_handler);
/* Initialize UIC command mutex */
mutex_init(&hba->uic_cmd_mutex);
@@ -5780,10 +10124,28 @@ int ufshcd_init(struct ufs_hba *hba, void __iomem *mmio_base, unsigned int irq)
/* Initialize mutex for device management commands */
mutex_init(&hba->dev_cmd.lock);
+ init_rwsem(&hba->lock);
+
/* Initialize device management tag acquire wait queue */
init_waitqueue_head(&hba->dev_cmd.tag_wq);
ufshcd_init_clk_gating(hba);
+ ufshcd_init_hibern8_on_idle(hba);
+
+ /*
+ * In order to avoid any spurious interrupt immediately after
+ * registering UFS controller interrupt handler, clear any pending UFS
+ * interrupt status and disable all the UFS interrupts.
+ */
+ ufshcd_writel(hba, ufshcd_readl(hba, REG_INTERRUPT_STATUS),
+ REG_INTERRUPT_STATUS);
+ ufshcd_writel(hba, 0, REG_INTERRUPT_ENABLE);
+ /*
+ * Make sure that UFS interrupts are disabled and any pending interrupt
+ * status is cleared before registering UFS interrupt handler.
+ */
+ mb();
+
/* IRQ registration */
err = devm_request_irq(dev, irq, ufshcd_intr, IRQF_SHARED, UFSHCD, hba);
if (err) {
@@ -5799,39 +10161,75 @@ int ufshcd_init(struct ufs_hba *hba, void __iomem *mmio_base, unsigned int irq)
goto exit_gating;
}
+ /* Reset controller to power on reset (POR) state */
+ ufshcd_vops_full_reset(hba);
+
+ /* reset connected UFS device */
+ err = ufshcd_reset_device(hba);
+ if (err)
+ dev_warn(hba->dev, "%s: device reset failed. err %d\n",
+ __func__, err);
+
/* Host controller enable */
err = ufshcd_hba_enable(hba);
if (err) {
dev_err(hba->dev, "Host controller enable failed\n");
+ ufshcd_print_host_regs(hba);
+ ufshcd_print_host_state(hba);
goto out_remove_scsi_host;
}
- if (ufshcd_is_clkscaling_enabled(hba)) {
- hba->devfreq = devfreq_add_device(dev, &ufs_devfreq_profile,
- "simple_ondemand", NULL);
- if (IS_ERR(hba->devfreq)) {
- dev_err(hba->dev, "Unable to register with devfreq %ld\n",
- PTR_ERR(hba->devfreq));
- goto out_remove_scsi_host;
- }
- /* Suspend devfreq until the UFS device is detected */
- devfreq_suspend_device(hba->devfreq);
- hba->clk_scaling.window_start_t = 0;
+ if (ufshcd_is_clkscaling_supported(hba)) {
+ char wq_name[sizeof("ufs_clkscaling_00")];
+
+ INIT_WORK(&hba->clk_scaling.suspend_work,
+ ufshcd_clk_scaling_suspend_work);
+ INIT_WORK(&hba->clk_scaling.resume_work,
+ ufshcd_clk_scaling_resume_work);
+
+ snprintf(wq_name, ARRAY_SIZE(wq_name), "ufs_clkscaling_%d",
+ host->host_no);
+ hba->clk_scaling.workq = create_singlethread_workqueue(wq_name);
+
+ ufshcd_clkscaling_init_sysfs(hba);
}
+ /*
+ * If rpm_lvl and and spm_lvl are not already set to valid levels,
+ * set the default power management level for UFS runtime and system
+ * suspend. Default power saving mode selected is keeping UFS link in
+ * Hibern8 state and UFS device in sleep.
+ */
+ if (!ufshcd_is_valid_pm_lvl(hba->rpm_lvl))
+ hba->rpm_lvl = ufs_get_desired_pm_lvl_for_dev_link_state(
+ UFS_SLEEP_PWR_MODE,
+ UIC_LINK_HIBERN8_STATE);
+ if (!ufshcd_is_valid_pm_lvl(hba->spm_lvl))
+ hba->spm_lvl = ufs_get_desired_pm_lvl_for_dev_link_state(
+ UFS_SLEEP_PWR_MODE,
+ UIC_LINK_HIBERN8_STATE);
+
/* Hold auto suspend until async scan completes */
pm_runtime_get_sync(dev);
ufshcd_init_latency_hist(hba);
/*
- * The device-initialize-sequence hasn't been invoked yet.
- * Set the device to power-off state
+ * We are assuming that device wasn't put in sleep/power-down
+ * state exclusively during the boot stage before kernel.
+ * This assumption helps avoid doing link startup twice during
+ * ufshcd_probe_hba().
*/
- ufshcd_set_ufs_dev_poweroff(hba);
+ ufshcd_set_ufs_dev_active(hba);
+
+ ufshcd_cmd_log_init(hba);
async_schedule(ufshcd_async_scan, hba);
+ ufsdbg_add_debugfs(hba);
+
+ ufshcd_add_sysfs_nodes(hba);
+
return 0;
out_remove_scsi_host:
diff --git a/drivers/scsi/ufs/ufshcd.h b/drivers/scsi/ufs/ufshcd.h
index c1310ead0c2a..931b6b31de19 100644
--- a/drivers/scsi/ufs/ufshcd.h
+++ b/drivers/scsi/ufs/ufshcd.h
@@ -3,6 +3,7 @@
*
* This code is based on drivers/scsi/ufs/ufshcd.h
* Copyright (C) 2011-2013 Samsung India Software Operations
+ * Copyright (c) 2013-2017, The Linux Foundation. All rights reserved.
*
* Authors:
* Santosh Yaraganavi <santosh.sy@samsung.com>
@@ -38,12 +39,14 @@
#include <linux/module.h>
#include <linux/kernel.h>
+#include <linux/hrtimer.h>
#include <linux/init.h>
#include <linux/interrupt.h>
#include <linux/io.h>
#include <linux/delay.h>
#include <linux/slab.h>
#include <linux/spinlock.h>
+#include <linux/rwsem.h>
#include <linux/workqueue.h>
#include <linux/errno.h>
#include <linux/types.h>
@@ -53,6 +56,8 @@
#include <linux/clk.h>
#include <linux/completion.h>
#include <linux/regulator/consumer.h>
+#include <linux/reset.h>
+#include "unipro.h"
#include <asm/irq.h>
#include <asm/byteorder.h>
@@ -63,11 +68,15 @@
#include <scsi/scsi_dbg.h>
#include <scsi/scsi_eh.h>
+#include <linux/fault-inject.h>
#include "ufs.h"
#include "ufshci.h"
#define UFSHCD "ufshcd"
-#define UFSHCD_DRIVER_VERSION "0.2"
+#define UFSHCD_DRIVER_VERSION "0.3"
+
+#define UFS_BIT(x) BIT(x)
+#define UFS_MASK(x, y) (x << ((y) % BITS_PER_LONG))
struct ufs_hba;
@@ -125,6 +134,26 @@ enum uic_link_state {
#define ufshcd_set_link_hibern8(hba) ((hba)->uic_link_state = \
UIC_LINK_HIBERN8_STATE)
+enum {
+ /* errors which require the host controller reset for recovery */
+ UFS_ERR_HIBERN8_EXIT,
+ UFS_ERR_VOPS_SUSPEND,
+ UFS_ERR_EH,
+ UFS_ERR_CLEAR_PEND_XFER_TM,
+ UFS_ERR_INT_FATAL_ERRORS,
+ UFS_ERR_INT_UIC_ERROR,
+ UFS_ERR_CRYPTO_ENGINE,
+
+ /* other errors */
+ UFS_ERR_HIBERN8_ENTER,
+ UFS_ERR_RESUME,
+ UFS_ERR_SUSPEND,
+ UFS_ERR_LINKSTARTUP,
+ UFS_ERR_POWER_MODE_CHANGE,
+ UFS_ERR_TASK_ABORT,
+ UFS_ERR_MAX,
+};
+
/*
* UFS Power management levels.
* Each level is in increasing order of power savings.
@@ -150,6 +179,10 @@ struct ufs_pm_lvl_states {
* @ucd_req_ptr: UCD address of the command
* @ucd_rsp_ptr: Response UPIU address for this command
* @ucd_prdt_ptr: PRDT address of the command
+ * @utrd_dma_addr: UTRD dma address for debug
+ * @ucd_prdt_dma_addr: PRDT dma address for debug
+ * @ucd_rsp_dma_addr: UPIU response dma address for debug
+ * @ucd_req_dma_addr: UPIU request dma address for debug
* @cmd: pointer to SCSI command
* @sense_buffer: pointer to sense buffer address of the SCSI command
* @sense_bufflen: Length of the sense buffer
@@ -158,6 +191,9 @@ struct ufs_pm_lvl_states {
* @task_tag: Task tag of the command
* @lun: LUN of the command
* @intr_cmd: Interrupt command (doesn't participate in interrupt aggregation)
+ * @issue_time_stamp: time stamp for debug purposes
+ * @complete_time_stamp: time stamp for statistics
+ * @req_abort_skip: skip request abort task flag
*/
struct ufshcd_lrb {
struct utp_transfer_req_desc *utr_descriptor_ptr;
@@ -165,6 +201,11 @@ struct ufshcd_lrb {
struct utp_upiu_rsp *ucd_rsp_ptr;
struct ufshcd_sg_entry *ucd_prdt_ptr;
+ dma_addr_t utrd_dma_addr;
+ dma_addr_t ucd_req_dma_addr;
+ dma_addr_t ucd_rsp_dma_addr;
+ dma_addr_t ucd_prdt_dma_addr;
+
struct scsi_cmnd *cmd;
u8 *sense_buffer;
unsigned int sense_bufflen;
@@ -174,6 +215,10 @@ struct ufshcd_lrb {
int task_tag;
u8 lun; /* UPIU LUN id field is only 8-bit wide */
bool intr_cmd;
+ ktime_t issue_time_stamp;
+ ktime_t complete_time_stamp;
+
+ bool req_abort_skip;
};
/**
@@ -245,7 +290,6 @@ struct ufs_pwr_mode_info {
/**
* struct ufs_hba_variant_ops - variant specific callbacks
- * @name: variant name
* @init: called when the driver is initialized
* @exit: called to cleanup everything done in init
* @get_ufs_hci_version: called to get UFS HCI version
@@ -261,17 +305,24 @@ struct ufs_pwr_mode_info {
* to be set.
* @suspend: called during host controller PM callback
* @resume: called during host controller PM callback
+ * @full_reset: called during link recovery for handling variant specific
+ * implementations of resetting the hci
* @dbg_register_dump: used to dump controller debug information
+ * @update_sec_cfg: called to restore host controller secure configuration
+ * @get_scale_down_gear: called to get the minimum supported gear to
+ * scale down
+ * @set_bus_vote: called to vote for the required bus bandwidth
+ * @add_debugfs: used to add debugfs entries
+ * @remove_debugfs: used to remove debugfs entries
*/
struct ufs_hba_variant_ops {
- const char *name;
int (*init)(struct ufs_hba *);
- void (*exit)(struct ufs_hba *);
+ void (*exit)(struct ufs_hba *);
u32 (*get_ufs_hci_version)(struct ufs_hba *);
int (*clk_scale_notify)(struct ufs_hba *, bool,
enum ufs_notify_change_status);
- int (*setup_clocks)(struct ufs_hba *, bool);
- int (*setup_regulators)(struct ufs_hba *, bool);
+ int (*setup_clocks)(struct ufs_hba *, bool, bool);
+ int (*setup_regulators)(struct ufs_hba *, bool);
int (*hce_enable_notify)(struct ufs_hba *,
enum ufs_notify_change_status);
int (*link_startup_notify)(struct ufs_hba *,
@@ -280,9 +331,60 @@ struct ufs_hba_variant_ops {
enum ufs_notify_change_status status,
struct ufs_pa_layer_attr *,
struct ufs_pa_layer_attr *);
- int (*suspend)(struct ufs_hba *, enum ufs_pm_op);
- int (*resume)(struct ufs_hba *, enum ufs_pm_op);
- void (*dbg_register_dump)(struct ufs_hba *hba);
+ int (*apply_dev_quirks)(struct ufs_hba *);
+ int (*suspend)(struct ufs_hba *, enum ufs_pm_op);
+ int (*resume)(struct ufs_hba *, enum ufs_pm_op);
+ int (*full_reset)(struct ufs_hba *);
+ void (*dbg_register_dump)(struct ufs_hba *hba, bool no_sleep);
+ int (*update_sec_cfg)(struct ufs_hba *hba, bool restore_sec_cfg);
+ u32 (*get_scale_down_gear)(struct ufs_hba *);
+ int (*set_bus_vote)(struct ufs_hba *, bool);
+#ifdef CONFIG_DEBUG_FS
+ void (*add_debugfs)(struct ufs_hba *hba, struct dentry *root);
+ void (*remove_debugfs)(struct ufs_hba *hba);
+#endif
+};
+
+/**
+ * struct ufs_hba_crypto_variant_ops - variant specific crypto callbacks
+ * @crypto_req_setup: retreieve the necessary cryptographic arguments to setup
+ a requests's transfer descriptor.
+ * @crypto_engine_cfg_start: start configuring cryptographic engine
+ * according to tag
+ * parameter
+ * @crypto_engine_cfg_end: end configuring cryptographic engine
+ * according to tag parameter
+ * @crypto_engine_reset: perform reset to the cryptographic engine
+ * @crypto_engine_get_status: get errors status of the cryptographic engine
+ */
+struct ufs_hba_crypto_variant_ops {
+ int (*crypto_req_setup)(struct ufs_hba *, struct ufshcd_lrb *lrbp,
+ u8 *cc_index, bool *enable, u64 *dun);
+ int (*crypto_engine_cfg_start)(struct ufs_hba *, unsigned int);
+ int (*crypto_engine_cfg_end)(struct ufs_hba *, struct ufshcd_lrb *,
+ struct request *);
+ int (*crypto_engine_reset)(struct ufs_hba *);
+ int (*crypto_engine_get_status)(struct ufs_hba *, u32 *);
+};
+
+/**
+* struct ufs_hba_pm_qos_variant_ops - variant specific PM QoS callbacks
+*/
+struct ufs_hba_pm_qos_variant_ops {
+ void (*req_start)(struct ufs_hba *, struct request *);
+ void (*req_end)(struct ufs_hba *, struct request *, bool);
+};
+
+/**
+ * struct ufs_hba_variant - variant specific parameters
+ * @name: variant name
+ */
+struct ufs_hba_variant {
+ struct device *dev;
+ const char *name;
+ struct ufs_hba_variant_ops *vops;
+ struct ufs_hba_crypto_variant_ops *crypto_vops;
+ struct ufs_hba_pm_qos_variant_ops *pm_qos_vops;
};
/* clock gating state */
@@ -295,33 +397,117 @@ enum clk_gating_state {
/**
* struct ufs_clk_gating - UFS clock gating related info
- * @gate_work: worker to turn off clocks after some delay as specified in
- * delay_ms
+ * @gate_hrtimer: hrtimer to invoke @gate_work after some delay as
+ * specified in @delay_ms
+ * @gate_work: worker to turn off clocks
* @ungate_work: worker to turn on clocks that will be used in case of
* interrupt context
* @state: the current clocks state
- * @delay_ms: gating delay in ms
+ * @delay_ms: current gating delay in ms
+ * @delay_ms_pwr_save: gating delay (in ms) in power save mode
+ * @delay_ms_perf: gating delay (in ms) in performance mode
* @is_suspended: clk gating is suspended when set to 1 which can be used
* during suspend/resume
- * @delay_attr: sysfs attribute to control delay_attr
+ * @delay_attr: sysfs attribute to control delay_ms if clock scaling is disabled
+ * @delay_pwr_save_attr: sysfs attribute to control delay_ms_pwr_save
+ * @delay_perf_attr: sysfs attribute to control delay_ms_perf
+ * @enable_attr: sysfs attribute to enable/disable clock gating
+ * @is_enabled: Indicates the current status of clock gating
* @active_reqs: number of requests that are pending and should be waited for
* completion before gating clocks.
*/
struct ufs_clk_gating {
- struct delayed_work gate_work;
+ struct hrtimer gate_hrtimer;
+ struct work_struct gate_work;
struct work_struct ungate_work;
enum clk_gating_state state;
unsigned long delay_ms;
+ unsigned long delay_ms_pwr_save;
+ unsigned long delay_ms_perf;
bool is_suspended;
struct device_attribute delay_attr;
+ struct device_attribute delay_pwr_save_attr;
+ struct device_attribute delay_perf_attr;
+ struct device_attribute enable_attr;
+ bool is_enabled;
int active_reqs;
+ struct workqueue_struct *clk_gating_workq;
};
+/* Hibern8 state */
+enum ufshcd_hibern8_on_idle_state {
+ HIBERN8_ENTERED,
+ HIBERN8_EXITED,
+ REQ_HIBERN8_ENTER,
+ REQ_HIBERN8_EXIT,
+ AUTO_HIBERN8,
+};
+
+/**
+ * struct ufs_hibern8_on_idle - UFS Hibern8 on idle related data
+ * @enter_work: worker to put UFS link in hibern8 after some delay as
+ * specified in delay_ms
+ * @exit_work: worker to bring UFS link out of hibern8
+ * @state: the current hibern8 state
+ * @delay_ms: hibern8 enter delay in ms
+ * @is_suspended: hibern8 enter is suspended when set to 1 which can be used
+ * during suspend/resume
+ * @active_reqs: number of requests that are pending and should be waited for
+ * completion before scheduling delayed "enter_work".
+ * @delay_attr: sysfs attribute to control delay_attr
+ * @enable_attr: sysfs attribute to enable/disable hibern8 on idle
+ * @is_enabled: Indicates the current status of hibern8
+ */
+struct ufs_hibern8_on_idle {
+ struct delayed_work enter_work;
+ struct work_struct exit_work;
+ enum ufshcd_hibern8_on_idle_state state;
+ unsigned long delay_ms;
+ bool is_suspended;
+ int active_reqs;
+ struct device_attribute delay_attr;
+ struct device_attribute enable_attr;
+ bool is_enabled;
+};
+
+struct ufs_saved_pwr_info {
+ struct ufs_pa_layer_attr info;
+ bool is_valid;
+};
+
+/**
+ * struct ufs_clk_scaling - UFS clock scaling related data
+ * @active_reqs: number of requests that are pending. If this is zero when
+ * devfreq ->target() function is called then schedule "suspend_work" to
+ * suspend devfreq.
+ * @tot_busy_t: Total busy time in current polling window
+ * @window_start_t: Start time (in jiffies) of the current polling window
+ * @busy_start_t: Start time of current busy period
+ * @enable_attr: sysfs attribute to enable/disable clock scaling
+ * @saved_pwr_info: UFS power mode may also be changed during scaling and this
+ * one keeps track of previous power mode.
+ * @workq: workqueue to schedule devfreq suspend/resume work
+ * @suspend_work: worker to suspend devfreq
+ * @resume_work: worker to resume devfreq
+ * @is_allowed: tracks if scaling is currently allowed or not
+ * @is_busy_started: tracks if busy period has started or not
+ * @is_suspended: tracks if devfreq is suspended or not
+ * @is_scaled_up: tracks if we are currently scaled up or scaled down
+ */
struct ufs_clk_scaling {
- ktime_t busy_start_t;
- bool is_busy_started;
- unsigned long tot_busy_t;
+ int active_reqs;
+ unsigned long tot_busy_t;
unsigned long window_start_t;
+ ktime_t busy_start_t;
+ struct device_attribute enable_attr;
+ struct ufs_saved_pwr_info saved_pwr_info;
+ struct workqueue_struct *workq;
+ struct work_struct suspend_work;
+ struct work_struct resume_work;
+ bool is_allowed;
+ bool is_busy_started;
+ bool is_suspended;
+ bool is_scaled_up;
};
/**
@@ -333,6 +519,170 @@ struct ufs_init_prefetch {
u32 icc_level;
};
+#define UIC_ERR_REG_HIST_LENGTH 20
+/**
+ * struct ufs_uic_err_reg_hist - keeps history of uic errors
+ * @pos: index to indicate cyclic buffer position
+ * @reg: cyclic buffer for registers value
+ * @tstamp: cyclic buffer for time stamp
+ */
+struct ufs_uic_err_reg_hist {
+ int pos;
+ u32 reg[UIC_ERR_REG_HIST_LENGTH];
+ ktime_t tstamp[UIC_ERR_REG_HIST_LENGTH];
+};
+
+#ifdef CONFIG_DEBUG_FS
+struct debugfs_files {
+ struct dentry *debugfs_root;
+ struct dentry *stats_folder;
+ struct dentry *tag_stats;
+ struct dentry *err_stats;
+ struct dentry *show_hba;
+ struct dentry *host_regs;
+ struct dentry *dump_dev_desc;
+ struct dentry *power_mode;
+ struct dentry *dme_local_read;
+ struct dentry *dme_peer_read;
+ struct dentry *dbg_print_en;
+ struct dentry *req_stats;
+ struct dentry *query_stats;
+ u32 dme_local_attr_id;
+ u32 dme_peer_attr_id;
+ struct dentry *reset_controller;
+ struct dentry *err_state;
+ bool err_occurred;
+#ifdef CONFIG_UFS_FAULT_INJECTION
+ struct dentry *err_inj_scenario;
+ struct dentry *err_inj_stats;
+ u32 err_inj_scenario_mask;
+ struct fault_attr fail_attr;
+#endif
+ bool is_sys_suspended;
+};
+
+/* tag stats statistics types */
+enum ts_types {
+ TS_NOT_SUPPORTED = -1,
+ TS_TAG = 0,
+ TS_READ = 1,
+ TS_WRITE = 2,
+ TS_URGENT_READ = 3,
+ TS_URGENT_WRITE = 4,
+ TS_FLUSH = 5,
+ TS_NUM_STATS = 6,
+};
+
+/**
+ * struct ufshcd_req_stat - statistics for request handling times (in usec)
+ * @min: shortest time measured
+ * @max: longest time measured
+ * @sum: sum of all the handling times measured (used for average calculation)
+ * @count: number of measurements taken
+ */
+struct ufshcd_req_stat {
+ u64 min;
+ u64 max;
+ u64 sum;
+ u64 count;
+};
+#endif
+
+enum ufshcd_ctx {
+ QUEUE_CMD,
+ ERR_HNDLR_WORK,
+ H8_EXIT_WORK,
+ UIC_CMD_SEND,
+ PWRCTL_CMD_SEND,
+ TM_CMD_SEND,
+ XFR_REQ_COMPL,
+ CLK_SCALE_WORK,
+};
+
+struct ufshcd_clk_ctx {
+ ktime_t ts;
+ enum ufshcd_ctx ctx;
+};
+
+/**
+ * struct ufs_stats - keeps usage/err statistics
+ * @enabled: enable tag stats for debugfs
+ * @tag_stats: pointer to tag statistic counters
+ * @q_depth: current amount of busy slots
+ * @err_stats: counters to keep track of various errors
+ * @req_stats: request handling time statistics per request type
+ * @query_stats_arr: array that holds query statistics
+ * @hibern8_exit_cnt: Counter to keep track of number of exits,
+ * reset this after link-startup.
+ * @last_hibern8_exit_tstamp: Set time after the hibern8 exit.
+ * Clear after the first successful command completion.
+ * @pa_err: tracks pa-uic errors
+ * @dl_err: tracks dl-uic errors
+ * @nl_err: tracks nl-uic errors
+ * @tl_err: tracks tl-uic errors
+ * @dme_err: tracks dme errors
+ */
+struct ufs_stats {
+#ifdef CONFIG_DEBUG_FS
+ bool enabled;
+ u64 **tag_stats;
+ int q_depth;
+ int err_stats[UFS_ERR_MAX];
+ struct ufshcd_req_stat req_stats[TS_NUM_STATS];
+ int query_stats_arr[UPIU_QUERY_OPCODE_MAX][MAX_QUERY_IDN];
+
+#endif
+ u32 last_intr_status;
+ ktime_t last_intr_ts;
+ struct ufshcd_clk_ctx clk_hold;
+ struct ufshcd_clk_ctx clk_rel;
+ u32 hibern8_exit_cnt;
+ ktime_t last_hibern8_exit_tstamp;
+ u32 power_mode_change_cnt;
+ struct ufs_uic_err_reg_hist pa_err;
+ struct ufs_uic_err_reg_hist dl_err;
+ struct ufs_uic_err_reg_hist nl_err;
+ struct ufs_uic_err_reg_hist tl_err;
+ struct ufs_uic_err_reg_hist dme_err;
+};
+
+/* UFS Host Controller debug print bitmask */
+#define UFSHCD_DBG_PRINT_CLK_FREQ_EN UFS_BIT(0)
+#define UFSHCD_DBG_PRINT_UIC_ERR_HIST_EN UFS_BIT(1)
+#define UFSHCD_DBG_PRINT_HOST_REGS_EN UFS_BIT(2)
+#define UFSHCD_DBG_PRINT_TRS_EN UFS_BIT(3)
+#define UFSHCD_DBG_PRINT_TMRS_EN UFS_BIT(4)
+#define UFSHCD_DBG_PRINT_PWR_EN UFS_BIT(5)
+#define UFSHCD_DBG_PRINT_HOST_STATE_EN UFS_BIT(6)
+
+#define UFSHCD_DBG_PRINT_ALL \
+ (UFSHCD_DBG_PRINT_CLK_FREQ_EN | \
+ UFSHCD_DBG_PRINT_UIC_ERR_HIST_EN | \
+ UFSHCD_DBG_PRINT_HOST_REGS_EN | UFSHCD_DBG_PRINT_TRS_EN | \
+ UFSHCD_DBG_PRINT_TMRS_EN | UFSHCD_DBG_PRINT_PWR_EN | \
+ UFSHCD_DBG_PRINT_HOST_STATE_EN)
+
+struct ufshcd_cmd_log_entry {
+ char *str; /* context like "send", "complete" */
+ char *cmd_type; /* "scsi", "query", "nop", "dme" */
+ u8 lun;
+ u8 cmd_id;
+ sector_t lba;
+ int transfer_len;
+ u8 idn; /* used only for query idn */
+ u32 doorbell;
+ u32 outstanding_reqs;
+ u32 seq_num;
+ unsigned int tag;
+ ktime_t tstamp;
+};
+
+struct ufshcd_cmd_log {
+ struct ufshcd_cmd_log_entry *entries;
+ int pos;
+ u32 seq_num;
+};
+
/**
* struct ufs_hba - per adapter private structure
* @mmio_base: UFSHCI base register address
@@ -352,7 +702,7 @@ struct ufs_init_prefetch {
* @nutrs: Transfer Request Queue depth supported by controller
* @nutmrs: Task Management Queue depth supported by controller
* @ufs_version: UFS Version to which controller complies
- * @vops: pointer to variant specific operations
+ * @var: pointer to variant specific data
* @priv: pointer to variant specific private data
* @irq: Irq number of the controller
* @active_uic_cmd: handle of active UIC command
@@ -378,10 +728,18 @@ struct ufs_init_prefetch {
* @dev_cmd: ufs device management command information
* @last_dme_cmd_tstamp: time stamp of the last completed DME command
* @auto_bkops_enabled: to track whether bkops is enabled in device
+ * @ufs_stats: ufshcd statistics to be used via debugfs
+ * @debugfs_files: debugfs files associated with the ufs stats
+ * @ufshcd_dbg_print: Bitmask for enabling debug prints
* @vreg_info: UFS device voltage regulator information
* @clk_list_head: UFS host controller clocks list node head
* @pwr_info: holds current power mode
* @max_pwr_info: keeps the device max valid pwm
+ * @hibern8_on_idle: UFS Hibern8 on idle related data
+ * @urgent_bkops_lvl: keeps track of urgent bkops level for device
+ * @is_urgent_bkops_lvl_checked: keeps track if the urgent bkops level for
+ * device is known or not.
+ * @scsi_block_reqs_cnt: reference counting for scsi block requests
*/
struct ufs_hba {
void __iomem *mmio_base;
@@ -407,9 +765,11 @@ struct ufs_hba {
enum ufs_dev_pwr_mode curr_dev_pwr_mode;
enum uic_link_state uic_link_state;
/* Desired UFS power management level during runtime PM */
- enum ufs_pm_level rpm_lvl;
+ int rpm_lvl;
/* Desired UFS power management level during system PM */
- enum ufs_pm_level spm_lvl;
+ int spm_lvl;
+ struct device_attribute rpm_lvl_attr;
+ struct device_attribute spm_lvl_attr;
int pm_op_in_progress;
struct ufshcd_lrb *lrb;
@@ -422,7 +782,7 @@ struct ufs_hba {
int nutrs;
int nutmrs;
u32 ufs_version;
- struct ufs_hba_variant_ops *vops;
+ struct ufs_hba_variant *var;
void *priv;
unsigned int irq;
bool is_irq_enabled;
@@ -467,8 +827,14 @@ struct ufs_hba {
*/
#define UFSHCD_QUIRK_BROKEN_UFS_HCI_VERSION UFS_BIT(5)
+ /* Auto hibern8 support is broken */
+ #define UFSHCD_QUIRK_BROKEN_AUTO_HIBERN8 UFS_BIT(6)
+
unsigned int quirks; /* Deviations from standard UFSHCI spec. */
+ /* Device deviations from standard UFS device spec. */
+ unsigned int dev_quirks;
+
wait_queue_head_t tm_wq;
wait_queue_head_t tm_tag_wq;
unsigned long tm_condition;
@@ -489,12 +855,17 @@ struct ufs_hba {
/* Work Queues */
struct work_struct eh_work;
struct work_struct eeh_work;
+ struct work_struct rls_work;
/* HBA Errors */
u32 errors;
u32 uic_error;
+ u32 ce_error; /* crypto engine errors */
u32 saved_err;
u32 saved_uic_err;
+ u32 saved_ce_err;
+ bool silence_err_logs;
+ bool force_host_reset;
/* Device management request data */
struct ufs_dev_cmd dev_cmd;
@@ -503,15 +874,33 @@ struct ufs_hba {
/* Keeps information of the UFS device connected to this host */
struct ufs_dev_info dev_info;
bool auto_bkops_enabled;
+
+ struct ufs_stats ufs_stats;
+#ifdef CONFIG_DEBUG_FS
+ struct debugfs_files debugfs_files;
+#endif
+
struct ufs_vreg_info vreg_info;
struct list_head clk_list_head;
bool wlun_dev_clr_ua;
+ /* Number of requests aborts */
+ int req_abort_count;
+
+ /* Number of lanes available (1 or 2) for Rx/Tx */
+ u32 lanes_per_direction;
+
+ /* Bitmask for enabling debug prints */
+ u32 ufshcd_dbg_print;
+
struct ufs_pa_layer_attr pwr_info;
struct ufs_pwr_mode_info max_pwr_info;
struct ufs_clk_gating clk_gating;
+ struct ufs_hibern8_on_idle hibern8_on_idle;
+ struct ufshcd_cmd_log cmd_log;
+
/* Control to enable/disable host capabilities */
u32 caps;
/* Allow dynamic clk gating */
@@ -528,6 +917,8 @@ struct ufs_hba {
* CAUTION: Enabling this might reduce overall UFS throughput.
*/
#define UFSHCD_CAP_INTR_AGGR (1 << 4)
+ /* Allow standalone Hibern8 enter on idle */
+#define UFSHCD_CAP_HIBERN8_ENTER_ON_IDLE (1 << 5)
/*
* This capability allows the device auto-bkops to be always enabled
* except during suspend (both runtime and suspend).
@@ -535,17 +926,49 @@ struct ufs_hba {
* to do background operation when it's active but it might degrade
* the performance of ongoing read/write operations.
*/
-#define UFSHCD_CAP_KEEP_AUTO_BKOPS_ENABLED_EXCEPT_SUSPEND (1 << 5)
+#define UFSHCD_CAP_KEEP_AUTO_BKOPS_ENABLED_EXCEPT_SUSPEND (1 << 6)
+ /*
+ * If host controller hardware can be power collapsed when UFS link is
+ * in hibern8 then enable this cap.
+ */
+#define UFSHCD_CAP_POWER_COLLAPSE_DURING_HIBERN8 (1 << 7)
struct devfreq *devfreq;
struct ufs_clk_scaling clk_scaling;
bool is_sys_suspended;
+ enum bkops_status urgent_bkops_lvl;
+ bool is_urgent_bkops_lvl_checked;
+
+ /* sync b/w diff contexts */
+ struct rw_semaphore lock;
+ unsigned long shutdown_in_prog;
+
+ struct reset_control *core_reset;
+ /* If set, don't gate device ref_clk during clock gating */
+ bool no_ref_clk_gating;
+
+ int scsi_block_reqs_cnt;
+
+ bool full_init_linereset;
+ struct pinctrl *pctrl;
+
int latency_hist_enabled;
struct io_latency_state io_lat_read;
struct io_latency_state io_lat_write;
+ bool restore_needed;
};
+static inline void ufshcd_mark_shutdown_ongoing(struct ufs_hba *hba)
+{
+ set_bit(0, &hba->shutdown_in_prog);
+}
+
+static inline bool ufshcd_is_shutdown_ongoing(struct ufs_hba *hba)
+{
+ return !!(test_bit(0, &hba->shutdown_in_prog));
+}
+
/* Returns true if clocks can be gated. Otherwise false */
static inline bool ufshcd_is_clkgating_allowed(struct ufs_hba *hba)
{
@@ -555,7 +978,7 @@ static inline bool ufshcd_can_hibern8_during_gating(struct ufs_hba *hba)
{
return hba->caps & UFSHCD_CAP_HIBERN8_WITH_CLK_GATING;
}
-static inline int ufshcd_is_clkscaling_enabled(struct ufs_hba *hba)
+static inline int ufshcd_is_clkscaling_supported(struct ufs_hba *hba)
{
return hba->caps & UFSHCD_CAP_CLK_SCALING;
}
@@ -563,6 +986,22 @@ static inline bool ufshcd_can_autobkops_during_suspend(struct ufs_hba *hba)
{
return hba->caps & UFSHCD_CAP_AUTO_BKOPS_SUSPEND;
}
+static inline bool ufshcd_is_hibern8_on_idle_allowed(struct ufs_hba *hba)
+{
+ return hba->caps & UFSHCD_CAP_HIBERN8_ENTER_ON_IDLE;
+}
+
+static inline bool ufshcd_is_power_collapse_during_hibern8_allowed(
+ struct ufs_hba *hba)
+{
+ return !!(hba->caps & UFSHCD_CAP_POWER_COLLAPSE_DURING_HIBERN8);
+}
+
+static inline bool ufshcd_keep_autobkops_enabled_except_suspend(
+ struct ufs_hba *hba)
+{
+ return hba->caps & UFSHCD_CAP_KEEP_AUTO_BKOPS_ENABLED_EXCEPT_SUSPEND;
+}
static inline bool ufshcd_is_intr_aggr_allowed(struct ufs_hba *hba)
{
@@ -573,10 +1012,21 @@ static inline bool ufshcd_is_intr_aggr_allowed(struct ufs_hba *hba)
return false;
}
+static inline bool ufshcd_is_auto_hibern8_supported(struct ufs_hba *hba)
+{
+ return !!((hba->capabilities & MASK_AUTO_HIBERN8_SUPPORT) &&
+ !(hba->quirks & UFSHCD_QUIRK_BROKEN_AUTO_HIBERN8));
+}
+
+static inline bool ufshcd_is_crypto_supported(struct ufs_hba *hba)
+{
+ return !!(hba->capabilities & MASK_CRYPTO_SUPPORT);
+}
+
#define ufshcd_writel(hba, val, reg) \
- writel((val), (hba)->mmio_base + (reg))
+ writel_relaxed((val), (hba)->mmio_base + (reg))
#define ufshcd_readl(hba, reg) \
- readl((hba)->mmio_base + (reg))
+ readl_relaxed((hba)->mmio_base + (reg))
/**
* ufshcd_rmwl - read modify write into a register
@@ -587,7 +1037,7 @@ static inline bool ufshcd_is_intr_aggr_allowed(struct ufs_hba *hba)
*/
static inline void ufshcd_rmwl(struct ufs_hba *hba, u32 mask, u32 val, u32 reg)
{
- u32 tmp;
+ u32 tmp;
tmp = ufshcd_readl(hba, reg);
tmp &= ~mask;
@@ -599,21 +1049,11 @@ int ufshcd_alloc_host(struct device *, struct ufs_hba **);
void ufshcd_dealloc_host(struct ufs_hba *);
int ufshcd_init(struct ufs_hba * , void __iomem * , unsigned int);
void ufshcd_remove(struct ufs_hba *);
-
-/**
- * ufshcd_hba_stop - Send controller to reset state
- * @hba: per adapter instance
- */
-static inline void ufshcd_hba_stop(struct ufs_hba *hba)
-{
- ufshcd_writel(hba, CONTROLLER_DISABLE, REG_CONTROLLER_ENABLE);
-}
-
-static inline void check_upiu_size(void)
-{
- BUILD_BUG_ON(ALIGNED_UPIU_SIZE <
- GENERAL_UPIU_REQUEST_SIZE + QUERY_DESC_MAX_SIZE);
-}
+int ufshcd_wait_for_register(struct ufs_hba *hba, u32 reg, u32 mask,
+ u32 val, unsigned long interval_us,
+ unsigned long timeout_ms, bool can_sleep);
+int ufshcd_uic_hibern8_enter(struct ufs_hba *hba);
+int ufshcd_uic_hibern8_exit(struct ufs_hba *hba);
/**
* ufshcd_set_variant - set variant specific data to the hba
@@ -635,12 +1075,6 @@ static inline void *ufshcd_get_variant(struct ufs_hba *hba)
BUG_ON(!hba);
return hba->priv;
}
-static inline bool ufshcd_keep_autobkops_enabled_except_suspend(
- struct ufs_hba *hba)
-{
- return hba->caps & UFSHCD_CAP_KEEP_AUTO_BKOPS_ENABLED_EXCEPT_SUSPEND;
-}
-
extern int ufshcd_runtime_suspend(struct ufs_hba *hba);
extern int ufshcd_runtime_resume(struct ufs_hba *hba);
extern int ufshcd_runtime_idle(struct ufs_hba *hba);
@@ -698,76 +1132,139 @@ static inline int ufshcd_dme_peer_get(struct ufs_hba *hba,
return ufshcd_dme_get_attr(hba, attr_sel, mib_val, DME_PEER);
}
+/**
+ * ufshcd_dme_rmw - get modify set a dme attribute
+ * @hba - per adapter instance
+ * @mask - mask to apply on read value
+ * @val - actual value to write
+ * @attr - dme attribute
+ */
+static inline int ufshcd_dme_rmw(struct ufs_hba *hba, u32 mask,
+ u32 val, u32 attr)
+{
+ u32 cfg = 0;
+ int err = 0;
+
+ err = ufshcd_dme_get(hba, UIC_ARG_MIB(attr), &cfg);
+ if (err)
+ goto out;
+
+ cfg &= ~mask;
+ cfg |= (val & mask);
+
+ err = ufshcd_dme_set(hba, UIC_ARG_MIB(attr), cfg);
+
+out:
+ return err;
+}
+
+int ufshcd_read_device_desc(struct ufs_hba *hba, u8 *buf, u32 size);
+
+static inline bool ufshcd_is_hs_mode(struct ufs_pa_layer_attr *pwr_info)
+{
+ return (pwr_info->pwr_rx == FAST_MODE ||
+ pwr_info->pwr_rx == FASTAUTO_MODE) &&
+ (pwr_info->pwr_tx == FAST_MODE ||
+ pwr_info->pwr_tx == FASTAUTO_MODE);
+}
+
+#ifdef CONFIG_DEBUG_FS
+static inline void ufshcd_init_req_stats(struct ufs_hba *hba)
+{
+ memset(hba->ufs_stats.req_stats, 0, sizeof(hba->ufs_stats.req_stats));
+}
+#else
+static inline void ufshcd_init_req_stats(struct ufs_hba *hba) {}
+#endif
+
+#define ASCII_STD true
+#define UTF16_STD false
+int ufshcd_read_string_desc(struct ufs_hba *hba, int desc_index, u8 *buf,
+ u32 size, bool ascii);
+
+/* Expose Query-Request API */
+int ufshcd_query_flag(struct ufs_hba *hba, enum query_opcode opcode,
+ enum flag_idn idn, bool *flag_res);
+int ufshcd_query_attr(struct ufs_hba *hba, enum query_opcode opcode,
+ enum attr_idn idn, u8 index, u8 selector, u32 *attr_val);
+int ufshcd_query_descriptor(struct ufs_hba *hba, enum query_opcode opcode,
+ enum desc_idn idn, u8 index, u8 selector, u8 *desc_buf, int *buf_len);
+
int ufshcd_hold(struct ufs_hba *hba, bool async);
-void ufshcd_release(struct ufs_hba *hba);
+void ufshcd_release(struct ufs_hba *hba, bool no_sched);
+int ufshcd_wait_for_doorbell_clr(struct ufs_hba *hba, u64 wait_timeout_us);
+int ufshcd_change_power_mode(struct ufs_hba *hba,
+ struct ufs_pa_layer_attr *pwr_mode);
+void ufshcd_abort_outstanding_transfer_requests(struct ufs_hba *hba,
+ int result);
+u32 ufshcd_get_local_unipro_ver(struct ufs_hba *hba);
+
+void ufshcd_scsi_block_requests(struct ufs_hba *hba);
+void ufshcd_scsi_unblock_requests(struct ufs_hba *hba);
/* Wrapper functions for safely calling variant operations */
static inline const char *ufshcd_get_var_name(struct ufs_hba *hba)
{
- if (hba->vops)
- return hba->vops->name;
+ if (hba->var && hba->var->name)
+ return hba->var->name;
return "";
}
static inline int ufshcd_vops_init(struct ufs_hba *hba)
{
- if (hba->vops && hba->vops->init)
- return hba->vops->init(hba);
-
+ if (hba->var && hba->var->vops && hba->var->vops->init)
+ return hba->var->vops->init(hba);
return 0;
}
static inline void ufshcd_vops_exit(struct ufs_hba *hba)
{
- if (hba->vops && hba->vops->exit)
- return hba->vops->exit(hba);
+ if (hba->var && hba->var->vops && hba->var->vops->exit)
+ hba->var->vops->exit(hba);
}
static inline u32 ufshcd_vops_get_ufs_hci_version(struct ufs_hba *hba)
{
- if (hba->vops && hba->vops->get_ufs_hci_version)
- return hba->vops->get_ufs_hci_version(hba);
-
+ if (hba->var && hba->var->vops && hba->var->vops->get_ufs_hci_version)
+ return hba->var->vops->get_ufs_hci_version(hba);
return ufshcd_readl(hba, REG_UFS_VERSION);
}
static inline int ufshcd_vops_clk_scale_notify(struct ufs_hba *hba,
bool up, enum ufs_notify_change_status status)
{
- if (hba->vops && hba->vops->clk_scale_notify)
- return hba->vops->clk_scale_notify(hba, up, status);
+ if (hba->var && hba->var->vops && hba->var->vops->clk_scale_notify)
+ return hba->var->vops->clk_scale_notify(hba, up, status);
return 0;
}
-static inline int ufshcd_vops_setup_clocks(struct ufs_hba *hba, bool on)
+static inline int ufshcd_vops_setup_clocks(struct ufs_hba *hba, bool on,
+ bool is_gating_context)
{
- if (hba->vops && hba->vops->setup_clocks)
- return hba->vops->setup_clocks(hba, on);
+ if (hba->var && hba->var->vops && hba->var->vops->setup_clocks)
+ return hba->var->vops->setup_clocks(hba, on, is_gating_context);
return 0;
}
static inline int ufshcd_vops_setup_regulators(struct ufs_hba *hba, bool status)
{
- if (hba->vops && hba->vops->setup_regulators)
- return hba->vops->setup_regulators(hba, status);
-
+ if (hba->var && hba->var->vops && hba->var->vops->setup_regulators)
+ return hba->var->vops->setup_regulators(hba, status);
return 0;
}
static inline int ufshcd_vops_hce_enable_notify(struct ufs_hba *hba,
bool status)
{
- if (hba->vops && hba->vops->hce_enable_notify)
- return hba->vops->hce_enable_notify(hba, status);
-
+ if (hba->var && hba->var->vops && hba->var->vops->hce_enable_notify)
+ hba->var->vops->hce_enable_notify(hba, status);
return 0;
}
static inline int ufshcd_vops_link_startup_notify(struct ufs_hba *hba,
bool status)
{
- if (hba->vops && hba->vops->link_startup_notify)
- return hba->vops->link_startup_notify(hba, status);
-
+ if (hba->var && hba->var->vops && hba->var->vops->link_startup_notify)
+ return hba->var->vops->link_startup_notify(hba, status);
return 0;
}
@@ -776,33 +1273,156 @@ static inline int ufshcd_vops_pwr_change_notify(struct ufs_hba *hba,
struct ufs_pa_layer_attr *dev_max_params,
struct ufs_pa_layer_attr *dev_req_params)
{
- if (hba->vops && hba->vops->pwr_change_notify)
- return hba->vops->pwr_change_notify(hba, status,
+ if (hba->var && hba->var->vops && hba->var->vops->pwr_change_notify)
+ return hba->var->vops->pwr_change_notify(hba, status,
dev_max_params, dev_req_params);
-
return -ENOTSUPP;
}
-static inline int ufshcd_vops_suspend(struct ufs_hba *hba, enum ufs_pm_op op)
+static inline int ufshcd_vops_apply_dev_quirks(struct ufs_hba *hba)
{
- if (hba->vops && hba->vops->suspend)
- return hba->vops->suspend(hba, op);
+ if (hba->var && hba->var->vops && hba->var->vops->apply_dev_quirks)
+ return hba->var->vops->apply_dev_quirks(hba);
+ return 0;
+}
+static inline int ufshcd_vops_suspend(struct ufs_hba *hba, enum ufs_pm_op op)
+{
+ if (hba->var && hba->var->vops && hba->var->vops->suspend)
+ return hba->var->vops->suspend(hba, op);
return 0;
}
static inline int ufshcd_vops_resume(struct ufs_hba *hba, enum ufs_pm_op op)
{
- if (hba->vops && hba->vops->resume)
- return hba->vops->resume(hba, op);
+ if (hba->var && hba->var->vops && hba->var->vops->resume)
+ return hba->var->vops->resume(hba, op);
+ return 0;
+}
+static inline int ufshcd_vops_full_reset(struct ufs_hba *hba)
+{
+ if (hba->var && hba->var->vops && hba->var->vops->full_reset)
+ return hba->var->vops->full_reset(hba);
+ return 0;
+}
+
+
+static inline void ufshcd_vops_dbg_register_dump(struct ufs_hba *hba,
+ bool no_sleep)
+{
+ if (hba->var && hba->var->vops && hba->var->vops->dbg_register_dump)
+ hba->var->vops->dbg_register_dump(hba, no_sleep);
+}
+
+static inline int ufshcd_vops_update_sec_cfg(struct ufs_hba *hba,
+ bool restore_sec_cfg)
+{
+ if (hba->var && hba->var->vops && hba->var->vops->update_sec_cfg)
+ return hba->var->vops->update_sec_cfg(hba, restore_sec_cfg);
+ return 0;
+}
+
+static inline u32 ufshcd_vops_get_scale_down_gear(struct ufs_hba *hba)
+{
+ if (hba->var && hba->var->vops && hba->var->vops->get_scale_down_gear)
+ return hba->var->vops->get_scale_down_gear(hba);
+ /* Default to lowest high speed gear */
+ return UFS_HS_G1;
+}
+
+static inline int ufshcd_vops_set_bus_vote(struct ufs_hba *hba, bool on)
+{
+ if (hba->var && hba->var->vops && hba->var->vops->set_bus_vote)
+ return hba->var->vops->set_bus_vote(hba, on);
+ return 0;
+}
+
+#ifdef CONFIG_DEBUG_FS
+static inline void ufshcd_vops_add_debugfs(struct ufs_hba *hba,
+ struct dentry *root)
+{
+ if (hba->var && hba->var->vops && hba->var->vops->add_debugfs)
+ hba->var->vops->add_debugfs(hba, root);
+}
+
+static inline void ufshcd_vops_remove_debugfs(struct ufs_hba *hba)
+{
+ if (hba->var && hba->var->vops && hba->var->vops->remove_debugfs)
+ hba->var->vops->remove_debugfs(hba);
+}
+#else
+static inline void ufshcd_vops_add_debugfs(struct ufs_hba *hba, struct dentry *)
+{
+}
+
+static inline void ufshcd_vops_remove_debugfs(struct ufs_hba *hba)
+{
+}
+#endif
+
+static inline int ufshcd_vops_crypto_req_setup(struct ufs_hba *hba,
+ struct ufshcd_lrb *lrbp, u8 *cc_index, bool *enable, u64 *dun)
+{
+ if (hba->var && hba->var->crypto_vops &&
+ hba->var->crypto_vops->crypto_req_setup)
+ return hba->var->crypto_vops->crypto_req_setup(hba, lrbp,
+ cc_index, enable, dun);
return 0;
}
-static inline void ufshcd_vops_dbg_register_dump(struct ufs_hba *hba)
+static inline int ufshcd_vops_crypto_engine_cfg_start(struct ufs_hba *hba,
+ unsigned int task_tag)
+{
+ if (hba->var && hba->var->crypto_vops &&
+ hba->var->crypto_vops->crypto_engine_cfg_start)
+ return hba->var->crypto_vops->crypto_engine_cfg_start
+ (hba, task_tag);
+ return 0;
+}
+
+static inline int ufshcd_vops_crypto_engine_cfg_end(struct ufs_hba *hba,
+ struct ufshcd_lrb *lrbp,
+ struct request *req)
+{
+ if (hba->var && hba->var->crypto_vops &&
+ hba->var->crypto_vops->crypto_engine_cfg_end)
+ return hba->var->crypto_vops->crypto_engine_cfg_end
+ (hba, lrbp, req);
+ return 0;
+}
+
+static inline int ufshcd_vops_crypto_engine_reset(struct ufs_hba *hba)
+{
+ if (hba->var && hba->var->crypto_vops &&
+ hba->var->crypto_vops->crypto_engine_reset)
+ return hba->var->crypto_vops->crypto_engine_reset(hba);
+ return 0;
+}
+
+static inline int ufshcd_vops_crypto_engine_get_status(struct ufs_hba *hba,
+ u32 *status)
+{
+ if (hba->var && hba->var->crypto_vops &&
+ hba->var->crypto_vops->crypto_engine_get_status)
+ return hba->var->crypto_vops->crypto_engine_get_status(hba,
+ status);
+ return 0;
+}
+
+static inline void ufshcd_vops_pm_qos_req_start(struct ufs_hba *hba,
+ struct request *req)
+{
+ if (hba->var && hba->var->pm_qos_vops &&
+ hba->var->pm_qos_vops->req_start)
+ hba->var->pm_qos_vops->req_start(hba, req);
+}
+
+static inline void ufshcd_vops_pm_qos_req_end(struct ufs_hba *hba,
+ struct request *req, bool lock)
{
- if (hba->vops && hba->vops->dbg_register_dump)
- hba->vops->dbg_register_dump(hba);
+ if (hba->var && hba->var->pm_qos_vops && hba->var->pm_qos_vops->req_end)
+ hba->var->pm_qos_vops->req_end(hba, req, lock);
}
#endif /* End of Header */
diff --git a/drivers/scsi/ufs/ufshci.h b/drivers/scsi/ufs/ufshci.h
index 0ae0967aaed8..c0e4650a75ad 100644
--- a/drivers/scsi/ufs/ufshci.h
+++ b/drivers/scsi/ufs/ufshci.h
@@ -48,6 +48,7 @@ enum {
REG_UFS_VERSION = 0x08,
REG_CONTROLLER_DEV_ID = 0x10,
REG_CONTROLLER_PROD_ID = 0x14,
+ REG_AUTO_HIBERN8_IDLE_TIMER = 0x18,
REG_INTERRUPT_STATUS = 0x20,
REG_INTERRUPT_ENABLE = 0x24,
REG_CONTROLLER_STATUS = 0x30,
@@ -72,15 +73,24 @@ enum {
REG_UIC_COMMAND_ARG_1 = 0x94,
REG_UIC_COMMAND_ARG_2 = 0x98,
REG_UIC_COMMAND_ARG_3 = 0x9C,
+
+ UFSHCI_REG_SPACE_SIZE = 0xA0,
+
+ REG_UFS_CCAP = 0x100,
+ REG_UFS_CRYPTOCAP = 0x104,
+
+ UFSHCI_CRYPTO_REG_SPACE_SIZE = 0x400,
};
/* Controller capability masks */
enum {
MASK_TRANSFER_REQUESTS_SLOTS = 0x0000001F,
MASK_TASK_MANAGEMENT_REQUEST_SLOTS = 0x00070000,
+ MASK_AUTO_HIBERN8_SUPPORT = 0x00800000,
MASK_64_ADDRESSING_SUPPORT = 0x01000000,
MASK_OUT_OF_ORDER_DATA_DELIVERY_SUPPORT = 0x02000000,
MASK_UIC_DME_TEST_MODE_SUPPORT = 0x04000000,
+ MASK_CRYPTO_SUPPORT = 0x10000000,
};
/* UFS Version 08h */
@@ -92,6 +102,7 @@ enum {
UFSHCI_VERSION_10 = 0x00010000, /* 1.0 */
UFSHCI_VERSION_11 = 0x00010100, /* 1.1 */
UFSHCI_VERSION_20 = 0x00000200, /* 2.0 */
+ UFSHCI_VERSION_21 = 0x00000210, /* 2.1 */
};
/*
@@ -108,8 +119,19 @@ enum {
#define MANUFACTURE_ID_MASK UFS_MASK(0xFFFF, 0)
#define PRODUCT_ID_MASK UFS_MASK(0xFFFF, 16)
-#define UFS_BIT(x) (1L << (x))
-
+/*
+ * AHIT - Auto-Hibernate Idle Timer 18h
+ */
+#define AUTO_HIBERN8_IDLE_TIMER_MASK UFS_MASK(0x3FF, 0)
+#define AUTO_HIBERN8_TIMER_SCALE_MASK UFS_MASK(0x7, 10)
+#define AUTO_HIBERN8_TIMER_SCALE_1_US UFS_MASK(0x0, 10)
+#define AUTO_HIBERN8_TIMER_SCALE_10_US UFS_MASK(0x1, 10)
+#define AUTO_HIBERN8_TIMER_SCALE_100_US UFS_MASK(0x2, 10)
+#define AUTO_HIBERN8_TIMER_SCALE_1_MS UFS_MASK(0x3, 10)
+#define AUTO_HIBERN8_TIMER_SCALE_10_MS UFS_MASK(0x4, 10)
+#define AUTO_HIBERN8_TIMER_SCALE_100_MS UFS_MASK(0x5, 10)
+
+/* IS - Interrupt status (20h) / IE - Interrupt enable (24h) */
#define UTP_TRANSFER_REQ_COMPL UFS_BIT(0)
#define UIC_DME_END_PT_RESET UFS_BIT(1)
#define UIC_ERROR UFS_BIT(2)
@@ -124,6 +146,7 @@ enum {
#define DEVICE_FATAL_ERROR UFS_BIT(11)
#define CONTROLLER_FATAL_ERROR UFS_BIT(16)
#define SYSTEM_BUS_FATAL_ERROR UFS_BIT(17)
+#define CRYPTO_ENGINE_FATAL_ERROR UFS_BIT(18)
#define UFSHCD_UIC_PWR_MASK (UIC_HIBERNATE_ENTER |\
UIC_HIBERNATE_EXIT |\
@@ -134,11 +157,13 @@ enum {
#define UFSHCD_ERROR_MASK (UIC_ERROR |\
DEVICE_FATAL_ERROR |\
CONTROLLER_FATAL_ERROR |\
- SYSTEM_BUS_FATAL_ERROR)
+ SYSTEM_BUS_FATAL_ERROR |\
+ CRYPTO_ENGINE_FATAL_ERROR)
#define INT_FATAL_ERRORS (DEVICE_FATAL_ERROR |\
CONTROLLER_FATAL_ERROR |\
- SYSTEM_BUS_FATAL_ERROR)
+ SYSTEM_BUS_FATAL_ERROR |\
+ CRYPTO_ENGINE_FATAL_ERROR)
/* HCS - Host Controller Status 30h */
#define DEVICE_PRESENT UFS_BIT(0)
@@ -160,16 +185,21 @@ enum {
/* HCE - Host Controller Enable 34h */
#define CONTROLLER_ENABLE UFS_BIT(0)
+#define CRYPTO_GENERAL_ENABLE UFS_BIT(1)
#define CONTROLLER_DISABLE 0x0
/* UECPA - Host UIC Error Code PHY Adapter Layer 38h */
#define UIC_PHY_ADAPTER_LAYER_ERROR UFS_BIT(31)
+#define UIC_PHY_ADAPTER_LAYER_GENERIC_ERROR UFS_BIT(4)
#define UIC_PHY_ADAPTER_LAYER_ERROR_CODE_MASK 0x1F
+#define UIC_PHY_ADAPTER_LAYER_LANE_ERR_MASK 0xF
/* UECDL - Host UIC Error Code Data Link Layer 3Ch */
#define UIC_DATA_LINK_LAYER_ERROR UFS_BIT(31)
#define UIC_DATA_LINK_LAYER_ERROR_CODE_MASK 0x7FFF
#define UIC_DATA_LINK_LAYER_ERROR_PA_INIT 0x2000
+#define UIC_DATA_LINK_LAYER_ERROR_NAC_RECEIVED 0x0001
+#define UIC_DATA_LINK_LAYER_ERROR_TCx_REPLAY_TIMEOUT 0x0002
/* UECN - Host UIC Error Code Network Layer 40h */
#define UIC_NETWORK_LAYER_ERROR UFS_BIT(31)
@@ -209,6 +239,7 @@ enum {
/* GenSelectorIndex calculation macros for M-PHY attributes */
#define UIC_ARG_MPHY_TX_GEN_SEL_INDEX(lane) (lane)
+#define UIC_ARG_MPHY_RX_GEN_SEL_INDEX(lane) (PA_MAXDATALANES + (lane))
#define UIC_ARG_MIB_SEL(attr, sel) ((((attr) & 0xFFFF) << 16) |\
((sel) & 0xFFFF))
@@ -262,6 +293,9 @@ enum {
/* Interrupt disable mask for UFSHCI v1.1 */
INTERRUPT_MASK_ALL_VER_11 = 0x31FFF,
+
+ /* Interrupt disable mask for UFSHCI v2.1 */
+ INTERRUPT_MASK_ALL_VER_21 = 0x71FFF,
};
/*
@@ -299,6 +333,9 @@ enum {
OCS_PEER_COMM_FAILURE = 0x5,
OCS_ABORTED = 0x6,
OCS_FATAL_ERROR = 0x7,
+ OCS_DEVICE_FATAL_ERROR = 0x8,
+ OCS_INVALID_CRYPTO_CONFIG = 0x9,
+ OCS_GENERAL_CRYPTO_ERROR = 0xA,
OCS_INVALID_COMMAND_STATUS = 0x0F,
MASK_OCS = 0x0F,
};
@@ -334,6 +371,8 @@ struct utp_transfer_cmd_desc {
struct ufshcd_sg_entry prd_table[SG_ALL];
};
+#define UTRD_CRYPTO_ENABLE UFS_BIT(23)
+
/**
* struct request_desc_header - Descriptor Header common to both UTRD and UTMRD
* @dword0: Descriptor Header DW0
diff --git a/drivers/scsi/ufs/unipro.h b/drivers/scsi/ufs/unipro.h
index 816a8a46efb8..602e196e9249 100644
--- a/drivers/scsi/ufs/unipro.h
+++ b/drivers/scsi/ufs/unipro.h
@@ -1,6 +1,4 @@
/*
- * drivers/scsi/ufs/unipro.h
- *
* Copyright (C) 2013 Samsung Electronics Co., Ltd.
*
* This program is free software; you can redistribute it and/or modify
@@ -15,6 +13,7 @@
/*
* M-TX Configuration Attributes
*/
+#define TX_HIBERN8TIME_CAPABILITY 0x000F
#define TX_MODE 0x0021
#define TX_HSRATE_SERIES 0x0022
#define TX_HSGEAR 0x0023
@@ -48,8 +47,16 @@
#define RX_ENTER_HIBERN8 0x00A7
#define RX_BYPASS_8B10B_ENABLE 0x00A8
#define RX_TERMINATION_FORCE_ENABLE 0x0089
+#define RX_MIN_ACTIVATETIME_CAPABILITY 0x008F
+#define RX_HIBERN8TIME_CAPABILITY 0x0092
+
+#define MPHY_RX_ATTR_ADDR_START 0x81
+#define MPHY_RX_ATTR_ADDR_END 0xC1
#define is_mphy_tx_attr(attr) (attr < RX_MODE)
+#define RX_MIN_ACTIVATETIME_UNIT_US 100
+#define HIBERN8TIME_UNIT_US 100
+
/*
* PHY Adpater attributes
*/
@@ -70,6 +77,7 @@
#define PA_MAXRXSPEEDFAST 0x1541
#define PA_MAXRXSPEEDSLOW 0x1542
#define PA_TXLINKSTARTUPHS 0x1544
+#define PA_LOCAL_TX_LCC_ENABLE 0x155E
#define PA_TXSPEEDFAST 0x1565
#define PA_TXSPEEDSLOW 0x1566
#define PA_REMOTEVERINFO 0x15A0
@@ -83,6 +91,7 @@
#define PA_MAXRXHSGEAR 0x1587
#define PA_RXHSUNTERMCAP 0x15A5
#define PA_RXLSTERMCAP 0x15A6
+#define PA_GRANULARITY 0x15AA
#define PA_PACPREQTIMEOUT 0x1590
#define PA_PACPREQEOBTIMEOUT 0x1591
#define PA_HIBERN8TIME 0x15A7
@@ -110,6 +119,23 @@
#define PA_STALLNOCONFIGTIME 0x15A3
#define PA_SAVECONFIGTIME 0x15A4
+#define PA_TACTIVATE_TIME_UNIT_US 10
+#define PA_HIBERN8_TIME_UNIT_US 100
+
+#define PA_GRANULARITY_MIN_VAL 1
+#define PA_GRANULARITY_MAX_VAL 6
+
+/* PHY Adapter Protocol Constants */
+#define PA_MAXDATALANES 4
+
+#define DL_FC0ProtectionTimeOutVal_Default 8191
+#define DL_TC0ReplayTimeOutVal_Default 65535
+#define DL_AFC0ReqTimeOutVal_Default 32767
+
+#define DME_LocalFC0ProtectionTimeOutVal 0xD041
+#define DME_LocalTC0ReplayTimeOutVal 0xD042
+#define DME_LocalAFC0ReqTimeOutVal 0xD043
+
/* PA power modes */
enum {
FAST_MODE = 1,
@@ -143,6 +169,16 @@ enum ufs_hs_gear_tag {
UFS_HS_G3, /* HS Gear 3 */
};
+enum ufs_unipro_ver {
+ UFS_UNIPRO_VER_RESERVED = 0,
+ UFS_UNIPRO_VER_1_40 = 1, /* UniPro version 1.40 */
+ UFS_UNIPRO_VER_1_41 = 2, /* UniPro version 1.41 */
+ UFS_UNIPRO_VER_1_6 = 3, /* UniPro version 1.6 */
+ UFS_UNIPRO_VER_MAX = 4, /* UniPro unsupported version */
+ /* UniPro version field mask in PA_LOCALVERINFO */
+ UFS_UNIPRO_VER_MASK = 0xF,
+};
+
/*
* Data Link Layer Attributes
*/