summaryrefslogtreecommitdiff
path: root/drivers/scsi/ufs
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/scsi/ufs')
-rw-r--r--drivers/scsi/ufs/ufs-qcom-ice.c13
-rw-r--r--drivers/scsi/ufs/ufshcd.c37
2 files changed, 35 insertions, 15 deletions
diff --git a/drivers/scsi/ufs/ufs-qcom-ice.c b/drivers/scsi/ufs/ufs-qcom-ice.c
index 84765b17086c..d288e83ec9d7 100644
--- a/drivers/scsi/ufs/ufs-qcom-ice.c
+++ b/drivers/scsi/ufs/ufs-qcom-ice.c
@@ -27,6 +27,8 @@
#define UFS_QCOM_ICE_DEFAULT_DBG_PRINT_EN 0
+static struct workqueue_struct *ice_workqueue;
+
static void ufs_qcom_ice_dump_regs(struct ufs_qcom_host *qcom_host, int offset,
int len, char *prefix)
{
@@ -224,6 +226,13 @@ int ufs_qcom_ice_init(struct ufs_qcom_host *qcom_host)
}
qcom_host->dbg_print_en |= UFS_QCOM_ICE_DEFAULT_DBG_PRINT_EN;
+ ice_workqueue = alloc_workqueue("ice-set-key",
+ WQ_MEM_RECLAIM | WQ_HIGHPRI, 0);
+ if (!ice_workqueue) {
+ dev_err(ufs_dev, "%s: workqueue allocation failed.\n",
+ __func__);
+ goto out;
+ }
INIT_WORK(&qcom_host->ice_cfg_work, ufs_qcom_ice_cfg_work);
out:
@@ -284,7 +293,7 @@ int ufs_qcom_ice_req_setup(struct ufs_qcom_host *qcom_host,
if (!qcom_host->work_pending) {
qcom_host->req_pending = cmd->request;
- if (!schedule_work(
+ if (!queue_work(ice_workqueue,
&qcom_host->ice_cfg_work)) {
qcom_host->req_pending = NULL;
@@ -404,7 +413,7 @@ int ufs_qcom_ice_cfg_start(struct ufs_qcom_host *qcom_host,
if (!qcom_host->work_pending) {
qcom_host->req_pending = cmd->request;
- if (!schedule_work(
+ if (!queue_work(ice_workqueue,
&qcom_host->ice_cfg_work)) {
qcom_host->req_pending = NULL;
diff --git a/drivers/scsi/ufs/ufshcd.c b/drivers/scsi/ufs/ufshcd.c
index 129b3a1b3d7f..1b95523cc693 100644
--- a/drivers/scsi/ufs/ufshcd.c
+++ b/drivers/scsi/ufs/ufshcd.c
@@ -389,8 +389,8 @@ static int ufshcd_devfreq_get_dev_status(struct device *dev,
#if IS_ENABLED(CONFIG_DEVFREQ_GOV_SIMPLE_ONDEMAND)
static struct devfreq_simple_ondemand_data ufshcd_ondemand_data = {
- .upthreshold = 35,
- .downdifferential = 30,
+ .upthreshold = 70,
+ .downdifferential = 65,
.simple_scaling = 1,
};
@@ -400,7 +400,7 @@ static void *gov_data;
#endif
static struct devfreq_dev_profile ufs_devfreq_profile = {
- .polling_ms = 40,
+ .polling_ms = 60,
.target = ufshcd_devfreq_target,
.get_dev_status = ufshcd_devfreq_get_dev_status,
};
@@ -1455,6 +1455,7 @@ unblock_reqs:
int ufshcd_hold(struct ufs_hba *hba, bool async)
{
int rc = 0;
+ bool flush_result;
unsigned long flags;
if (!ufshcd_is_clkgating_allowed(hba))
@@ -1480,8 +1481,15 @@ start:
*/
if (ufshcd_can_hibern8_during_gating(hba) &&
ufshcd_is_link_hibern8(hba)) {
+ if (async) {
+ rc = -EAGAIN;
+ hba->clk_gating.active_reqs--;
+ break;
+ }
spin_unlock_irqrestore(hba->host->host_lock, flags);
- flush_work(&hba->clk_gating.ungate_work);
+ flush_result = flush_work(&hba->clk_gating.ungate_work);
+ if (hba->clk_gating.is_suspended && !flush_result)
+ goto out;
spin_lock_irqsave(hba->host->host_lock, flags);
goto start;
}
@@ -2419,6 +2427,9 @@ static inline void ufshcd_hba_capabilities(struct ufs_hba *hba)
hba->nutrs = (hba->capabilities & MASK_TRANSFER_REQUESTS_SLOTS) + 1;
hba->nutmrs =
((hba->capabilities & MASK_TASK_MANAGEMENT_REQUEST_SLOTS) >> 16) + 1;
+
+ /* disable auto hibern8 */
+ hba->capabilities &= ~MASK_AUTO_HIBERN8_SUPPORT;
}
/**
@@ -3666,10 +3677,10 @@ static int __ufshcd_query_descriptor(struct ufs_hba *hba,
goto out_unlock;
}
- hba->dev_cmd.query.descriptor = NULL;
*buf_len = be16_to_cpu(response->upiu_res.length);
out_unlock:
+ hba->dev_cmd.query.descriptor = NULL;
mutex_unlock(&hba->dev_cmd.lock);
out:
ufshcd_release_all(hba);
@@ -6305,6 +6316,7 @@ static void ufshcd_rls_handler(struct work_struct *work)
hba = container_of(work, struct ufs_hba, rls_work);
pm_runtime_get_sync(hba->dev);
ufshcd_scsi_block_requests(hba);
+ down_write(&hba->lock);
ret = ufshcd_wait_for_doorbell_clr(hba, U64_MAX);
if (ret) {
dev_err(hba->dev,
@@ -6338,6 +6350,7 @@ static void ufshcd_rls_handler(struct work_struct *work)
hba->restore_needed = false;
out:
+ up_write(&hba->lock);
ufshcd_scsi_unblock_requests(hba);
pm_runtime_put_sync(hba->dev);
}
@@ -6721,20 +6734,17 @@ static int ufshcd_eh_device_reset_handler(struct scsi_cmnd *cmd)
{
struct Scsi_Host *host;
struct ufs_hba *hba;
- unsigned int tag;
u32 pos;
int err;
- u8 resp = 0xF;
- struct ufshcd_lrb *lrbp;
+ u8 resp = 0xF, lun;
unsigned long flags;
host = cmd->device->host;
hba = shost_priv(host);
- tag = cmd->request->tag;
ufshcd_print_cmd_log(hba);
- lrbp = &hba->lrb[tag];
- err = ufshcd_issue_tm_cmd(hba, lrbp->lun, 0, UFS_LOGICAL_RESET, &resp);
+ lun = ufshcd_scsi_to_upiu_lun(cmd->device->lun);
+ err = ufshcd_issue_tm_cmd(hba, lun, 0, UFS_LOGICAL_RESET, &resp);
if (err || resp != UPIU_TASK_MANAGEMENT_FUNC_COMPL) {
if (!err)
err = resp;
@@ -6743,7 +6753,7 @@ static int ufshcd_eh_device_reset_handler(struct scsi_cmnd *cmd)
/* clear the commands that were pending for corresponding LUN */
for_each_set_bit(pos, &hba->outstanding_reqs, hba->nutrs) {
- if (hba->lrb[pos].lun == lrbp->lun) {
+ if (hba->lrb[pos].lun == lun) {
err = ufshcd_clear_cmd(hba, pos);
if (err)
break;
@@ -7579,7 +7589,8 @@ static int ufshcd_probe_hba(struct ufs_hba *hba)
ufshcd_init_icc_levels(hba);
/* Add required well known logical units to scsi mid layer */
- if (ufshcd_scsi_add_wlus(hba))
+ ret = ufshcd_scsi_add_wlus(hba);
+ if (ret)
goto out;
/* Initialize devfreq after UFS device is detected */