summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorGilad Broner <gbroner@codeaurora.org>2014-10-26 22:18:43 +0200
committerDavid Keitel <dkeitel@codeaurora.org>2016-03-22 10:57:46 -0700
commit695e85590b0ea486abe28c2033387acd7a2254d6 (patch)
treee4a4279a45cb6ece0b3f889fb7022a271e94969d
parent851c8c3971e151330ebf74efffeb15223b9d5e0e (diff)
scsi: ufs: add cpu-dma latency PM QOS request
Add PM QOS cpu-dma latency request to the driver. Latency parameter value is taken from the device tree node using an optional parameter 'qcom,cpu-dma-latency-us'. Unless specified, a default of 200us is used. Change-Id: I3e10da9e65fc7324897c866b0c2a40cc5e6ca070 Signed-off-by: Gilad Broner <gbroner@codeaurora.org> [subhashj@codeaurora.org: resolved trivial merge conflicts] Signed-off-by: Subhash Jadavani <subhashj@codeaurora.org> [venkatg@codeaurora.org: resolved trivial merge conflicts] Signed-off-by: Venkat Gopalakrishnan <venkatg@codeaurora.org>
-rw-r--r--Documentation/devicetree/bindings/ufs/ufshcd-pltfrm.txt14
-rw-r--r--drivers/scsi/ufs/ufshcd-pltfrm.c27
-rw-r--r--drivers/scsi/ufs/ufshcd.c226
-rw-r--r--include/linux/scsi/ufs/ufshcd.h71
4 files changed, 335 insertions, 3 deletions
diff --git a/Documentation/devicetree/bindings/ufs/ufshcd-pltfrm.txt b/Documentation/devicetree/bindings/ufs/ufshcd-pltfrm.txt
index 8f8ee5cbff9a..9925d3af7912 100644
--- a/Documentation/devicetree/bindings/ufs/ufshcd-pltfrm.txt
+++ b/Documentation/devicetree/bindings/ufs/ufshcd-pltfrm.txt
@@ -113,6 +113,20 @@ Note: The instantaneous bandwidth (IB) value in the vectors-KBps field should
- qcom,bus-vector-names: specifies string IDs for the corresponding
bus vectors in the same order as qcom,msm-bus,vectors-KBps property.
+- qcom,cpu-dma-latency-us: optional parameter specifying the allowed CPU-DMA
+ latency parameter for PM QOS, in units of microseconds. If this parameter is
+ not specified a default of 200us is used.
+- qcom,cpu-affinity: this is a string that specifies the pm QoS request type.
+ The supported cpu affinity modes are:
+ "all_cores" - PM_QOS_REQ_ALL_CORES is applicable to all CPU cores that are
+ online and this would have a power impact when there are more number of CPUs.
+ "affine_irq" - PM_QOS_REQ_AFFINE_IRQ request type shall update/apply the vote
+ only to that CPU to which this IRQ's affinity is set to.
+ "affine_cores" - PM_QOS_REQ_AFFINE_CORES request type is used for targets that
+ have little cluster and will update/apply the vote to all the cores in the
+ little cluster.
+ The default CPU affinity mode is PM_QOS_REQ_AFFINE_IRQ.
+
Example:
ufshc@0xfc598000 {
...
diff --git a/drivers/scsi/ufs/ufshcd-pltfrm.c b/drivers/scsi/ufs/ufshcd-pltfrm.c
index 31fe2e5797c8..9ba077a9af05 100644
--- a/drivers/scsi/ufs/ufshcd-pltfrm.c
+++ b/drivers/scsi/ufs/ufshcd-pltfrm.c
@@ -235,7 +235,31 @@ static void ufshcd_parse_pm_levels(struct ufs_hba *hba)
}
}
-#ifdef CONFIG_PM
+#ifdef CONFIG_SMP
+static void ufshcd_parse_pm_qos(struct ufs_hba *hba)
+{
+ const char *cpu_affinity = NULL;
+
+ hba->pm_qos.cpu_dma_latency_us = UFS_DEFAULT_CPU_DMA_LATENCY_US;
+ of_property_read_u32(hba->dev->of_node, "qcom,cpu-dma-latency-us",
+ &hba->pm_qos.cpu_dma_latency_us);
+ dev_dbg(hba->dev, "cpu_dma_latency_us = %u\n",
+ hba->pm_qos.cpu_dma_latency_us);
+
+ hba->pm_qos.req.type = PM_QOS_REQ_AFFINE_IRQ;
+ if (!of_property_read_string(hba->dev->of_node, "qcom,cpu-affinity",
+ &cpu_affinity)) {
+ if (!strcmp(cpu_affinity, "all_cores"))
+ hba->pm_qos.req.type = PM_QOS_REQ_ALL_CORES;
+ else if (!strcmp(cpu_affinity, "affine_cores"))
+ hba->pm_qos.req.type = PM_QOS_REQ_AFFINE_CORES;
+ else if (!strcmp(cpu_affinity, "affine_irq"))
+ hba->pm_qos.req.type = PM_QOS_REQ_AFFINE_IRQ;
+ }
+ dev_dbg(hba->dev, "hba->pm_qos.pm_qos_req.type = %u\n",
+ hba->pm_qos.req.type);
+}
+
/**
* ufshcd_pltfrm_suspend - suspend power management function
* @dev: pointer to device handle
@@ -339,6 +363,7 @@ int ufshcd_pltfrm_init(struct platform_device *pdev,
goto dealloc_host;
}
+ ufshcd_parse_pm_qos(hba);
ufshcd_parse_pm_levels(hba);
pm_runtime_set_active(&pdev->dev);
pm_runtime_enable(&pdev->dev);
diff --git a/drivers/scsi/ufs/ufshcd.c b/drivers/scsi/ufs/ufshcd.c
index dc796b7d30c3..54f53f547b32 100644
--- a/drivers/scsi/ufs/ufshcd.c
+++ b/drivers/scsi/ufs/ufshcd.c
@@ -149,6 +149,8 @@
/* IOCTL opcode for command - ufs set device read only */
#define UFS_IOCTL_BLKROSET BLKROSET
+#define UFSHCD_PM_QOS_UNVOTE_TIMEOUT_US (10000) /* microseconds */
+
#define ufshcd_toggle_vreg(_dev, _vreg, _on) \
({ \
int _ret; \
@@ -1405,15 +1407,193 @@ static void ufshcd_exit_hibern8_on_idle(struct ufs_hba *hba)
device_remove_file(hba->dev, &hba->hibern8_on_idle.enable_attr);
}
+#ifdef CONFIG_SMP
+
+/* Host lock is assumed to be held by caller */
+static int __ufshcd_pm_qos_hold(struct ufs_hba *hba, bool async)
+{
+ int ret = 0;
+ unsigned long flags;
+
+ if (!hba->pm_qos.cpu_dma_latency_us)
+ goto out;
+
+ hba->pm_qos.active_reqs++;
+ if (hba->pm_qos.is_suspended)
+ goto out;
+start:
+ switch (hba->pm_qos.state) {
+ case PM_QOS_VOTED:
+ /* nothing to do */
+ break;
+ case PM_QOS_REQ_UNVOTE:
+ /*
+ * Fall-through - unvoting is either running or completed,
+ * so need to perform voting.
+ */
+ case PM_QOS_UNVOTED:
+ scsi_block_requests(hba->host);
+ hba->pm_qos.state = PM_QOS_REQ_VOTE;
+ schedule_work(&hba->pm_qos.vote_work);
+ /* fall-through */
+ case PM_QOS_REQ_VOTE:
+ if (async) {
+ hba->pm_qos.active_reqs--;
+ ret = -EAGAIN;
+ break;
+ }
+ spin_unlock_irqrestore(hba->host->host_lock, flags);
+ flush_work(&hba->pm_qos.vote_work);
+ spin_lock_irqsave(hba->host->host_lock, flags);
+ goto start;
+ default:
+ dev_err(hba->dev, "%s: PM QoS invalid state %d\n", __func__,
+ hba->pm_qos.state);
+ ret = -EINVAL;
+ break;
+ }
+out:
+ return ret;
+}
+
+static int ufshcd_pm_qos_hold(struct ufs_hba *hba, bool async)
+{
+ unsigned long flags;
+ int ret;
+
+ spin_lock_irqsave(hba->host->host_lock, flags);
+ ret = __ufshcd_pm_qos_hold(hba, async);
+ spin_unlock_irqrestore(hba->host->host_lock, flags);
+
+ return ret;
+}
+
+/* Host lock is assumed to be held by caller */
+static void __ufshcd_pm_qos_release(struct ufs_hba *hba)
+{
+ if (!hba->pm_qos.cpu_dma_latency_us)
+ return;
+
+ if (--hba->pm_qos.active_reqs)
+ return;
+
+ hba->pm_qos.state = PM_QOS_REQ_UNVOTE;
+ schedule_work(&hba->pm_qos.unvote_work);
+}
+
+static void ufshcd_pm_qos_release(struct ufs_hba *hba)
+{
+ unsigned long flags;
+
+ spin_lock_irqsave(hba->host->host_lock, flags);
+ __ufshcd_pm_qos_release(hba);
+ spin_unlock_irqrestore(hba->host->host_lock, flags);
+}
+
+static void ufshcd_pm_qos_vote_work(struct work_struct *work)
+{
+ struct ufshcd_pm_qos *ufs_pm_qos =
+ container_of(work, struct ufshcd_pm_qos, vote_work);
+ struct ufs_hba *hba = container_of(ufs_pm_qos, struct ufs_hba, pm_qos);
+ unsigned long flags;
+
+ /*
+ * un-voting work might be running when a new request arrives
+ * and causes voting work to schedule. To prevent race condition
+ * make sure the un-voting is finished.
+ */
+ cancel_work_sync(&hba->pm_qos.unvote_work);
+
+ pm_qos_update_request(&hba->pm_qos.req,
+ hba->pm_qos.cpu_dma_latency_us);
+
+ spin_lock_irqsave(hba->host->host_lock, flags);
+ hba->pm_qos.state = PM_QOS_VOTED;
+ spin_unlock_irqrestore(hba->host->host_lock, flags);
+
+ scsi_unblock_requests(hba->host);
+}
+
+static void ufshcd_pm_qos_unvote_work(struct work_struct *work)
+{
+ struct ufshcd_pm_qos *ufs_pm_qos =
+ container_of(work, struct ufshcd_pm_qos, unvote_work);
+ struct ufs_hba *hba = container_of(ufs_pm_qos, struct ufs_hba, pm_qos);
+ unsigned long flags;
+
+ /*
+ * Check if new requests were submitted in the meantime and do not
+ * unvote if so.
+ */
+ spin_lock_irqsave(hba->host->host_lock, flags);
+ if (hba->pm_qos.active_reqs) {
+ spin_unlock_irqrestore(hba->host->host_lock, flags);
+ return;
+ }
+ spin_unlock_irqrestore(hba->host->host_lock, flags);
+
+ /*
+ * When PM QoS voting is suspended (clocks scaled down or PM suspend
+ * taking place) we can un-vote immediately. Otherwise, un-voting is
+ * best done a bit later to accommodate for a burst of new upcoming
+ * requests.
+ */
+ if (hba->pm_qos.is_suspended)
+ pm_qos_update_request(&hba->pm_qos.req, PM_QOS_DEFAULT_VALUE);
+ else
+ pm_qos_update_request_timeout(&hba->pm_qos.req,
+ PM_QOS_DEFAULT_VALUE, UFSHCD_PM_QOS_UNVOTE_TIMEOUT_US);
+
+ spin_lock_irqsave(hba->host->host_lock, flags);
+ hba->pm_qos.state = PM_QOS_UNVOTED;
+ spin_unlock_irqrestore(hba->host->host_lock, flags);
+}
+
+static int ufshcd_pm_qos_init(struct ufs_hba *hba)
+{
+ /*
+ * PM_QOS_REQ_AFFINE_CORES request type is used for targets that have
+ * little cluster and will update/apply the vote to all the cores in
+ * the little cluster.
+ */
+ if (hba->pm_qos.req.type == PM_QOS_REQ_AFFINE_CORES)
+ hba->pm_qos.req.cpus_affine.bits[0] = 0x0F;
+ else if (hba->pm_qos.req.type == PM_QOS_REQ_AFFINE_IRQ)
+ hba->pm_qos.req.irq = hba->irq;
+
+ if (hba->pm_qos.cpu_dma_latency_us)
+ pm_qos_add_request(&hba->pm_qos.req,
+ PM_QOS_CPU_DMA_LATENCY, hba->pm_qos.cpu_dma_latency_us);
+ else
+ pm_qos_add_request(&hba->pm_qos.req,
+ PM_QOS_CPU_DMA_LATENCY, PM_QOS_DEFAULT_VALUE);
+ hba->pm_qos.state = PM_QOS_VOTED;
+ hba->pm_qos.active_reqs = 0;
+ hba->pm_qos.is_suspended = false;
+ INIT_WORK(&hba->pm_qos.vote_work, ufshcd_pm_qos_vote_work);
+ INIT_WORK(&hba->pm_qos.unvote_work, ufshcd_pm_qos_unvote_work);
+
+ return 0;
+}
+
+static void ufshcd_pm_qos_remove(struct ufs_hba *hba)
+{
+ pm_qos_remove_request(&hba->pm_qos.req);
+}
+
+#endif /* CONFIG_SMP */
+
static void ufshcd_hold_all(struct ufs_hba *hba)
{
ufshcd_hold(hba, false);
+ ufshcd_pm_qos_hold(hba, false);
ufshcd_hibern8_hold(hba, false);
}
static void ufshcd_release_all(struct ufs_hba *hba)
{
ufshcd_hibern8_release(hba);
+ ufshcd_pm_qos_release(hba);
ufshcd_release(hba);
}
@@ -1443,6 +1623,7 @@ static void ufshcd_clk_scaling_update_busy(struct ufs_hba *hba)
scaling->is_busy_started = false;
}
}
+
/**
* ufshcd_send_command - Send SCSI or device management commands
* @hba: per adapter instance
@@ -2013,10 +2194,19 @@ static int ufshcd_queuecommand(struct Scsi_Host *host, struct scsi_cmnd *cmd)
}
WARN_ON(hba->clk_gating.state != CLKS_ON);
+ err = ufshcd_pm_qos_hold(hba, true);
+ if (err) {
+ err = SCSI_MLQUEUE_HOST_BUSY;
+ clear_bit_unlock(tag, &hba->lrb_in_use);
+ ufshcd_release(hba);
+ goto out;
+ }
+
err = ufshcd_hibern8_hold(hba, true);
if (err) {
clear_bit_unlock(tag, &hba->lrb_in_use);
err = SCSI_MLQUEUE_HOST_BUSY;
+ ufshcd_pm_qos_release(hba);
ufshcd_release(hba);
goto out;
}
@@ -4275,6 +4465,7 @@ static void ufshcd_transfer_req_compl(struct ufs_hba *hba)
/* Do not touch lrbp after scsi done */
cmd->scsi_done(cmd);
__ufshcd_release(hba);
+ __ufshcd_pm_qos_release(hba);
__ufshcd_hibern8_release(hba);
} else if (lrbp->command_type == UTP_CMD_TYPE_DEV_MANAGE) {
if (hba->dev_cmd.complete)
@@ -6506,6 +6697,7 @@ static int ufshcd_suspend(struct ufs_hba *hba, enum ufs_pm_op pm_op)
enum ufs_pm_level pm_lvl;
enum ufs_dev_pwr_mode req_dev_pwr_mode;
enum uic_link_state req_link_state;
+ unsigned long flags;
hba->pm_op_in_progress = 1;
if (!ufshcd_is_shutdown_pm(pm_op)) {
@@ -6528,6 +6720,13 @@ static int ufshcd_suspend(struct ufs_hba *hba, enum ufs_pm_op pm_op)
hba->clk_gating.is_suspended = true;
hba->hibern8_on_idle.is_suspended = true;
+ /* While entering PM suspend release the PM QoS vote and suspend it */
+ if (hba->pm_qos.state == PM_QOS_VOTED) {
+ pm_qos_update_request(&hba->pm_qos.req, PM_QOS_DEFAULT_VALUE);
+ hba->pm_qos.state = PM_QOS_UNVOTED;
+ }
+ hba->pm_qos.is_suspended = true;
+
if (req_dev_pwr_mode == UFS_ACTIVE_PWR_MODE &&
req_link_state == UIC_LINK_ACTIVE_STATE) {
goto disable_clks;
@@ -6632,6 +6831,9 @@ set_dev_active:
enable_gating:
hba->hibern8_on_idle.is_suspended = false;
hba->clk_gating.is_suspended = false;
+ spin_lock_irqsave(hba->host->host_lock, flags);
+ hba->pm_qos.is_suspended = false;
+ spin_unlock_irqrestore(hba->host->host_lock, flags);
ufshcd_release_all(hba);
out:
hba->pm_op_in_progress = 0;
@@ -6656,6 +6858,7 @@ static int ufshcd_resume(struct ufs_hba *hba, enum ufs_pm_op pm_op)
{
int ret;
enum uic_link_state old_link_state;
+ unsigned long flags;
hba->pm_op_in_progress = 1;
old_link_state = hba->uic_link_state;
@@ -6720,6 +6923,10 @@ static int ufshcd_resume(struct ufs_hba *hba, enum ufs_pm_op pm_op)
hba->clk_gating.is_suspended = false;
hba->hibern8_on_idle.is_suspended = false;
+ spin_lock_irqsave(hba->host->host_lock, flags);
+ hba->pm_qos.is_suspended = false;
+ spin_unlock_irqrestore(hba->host->host_lock, flags);
+
if (hba->clk_scaling.is_allowed)
ufshcd_resume_clkscaling(hba);
@@ -7151,6 +7358,7 @@ static int ufshcd_scale_clks(struct ufs_hba *hba, bool scale_up)
struct list_head *head = &hba->clk_list_head;
ktime_t start = ktime_get();
bool clk_state_changed = false;
+ unsigned long flags;
if (!head || list_empty(head))
goto out;
@@ -7203,6 +7411,11 @@ static int ufshcd_scale_clks(struct ufs_hba *hba, bool scale_up)
clki->name, clk_get_rate(clki->clk));
}
+ /* Suspend PM QoS voting when clocks are scaled down and vise versa */
+ spin_lock_irqsave(hba->host->host_lock, flags);
+ hba->pm_qos.is_suspended = !scale_up;
+ spin_unlock_irqrestore(hba->host->host_lock, flags);
+
ret = ufshcd_vops_clk_scale_notify(hba, scale_up, POST_CHANGE);
out:
@@ -7401,6 +7614,13 @@ int ufshcd_init(struct ufs_hba *hba, void __iomem *mmio_base, unsigned int irq)
goto out_disable;
}
+ /* Configure PM_QOS latency */
+ err = ufshcd_pm_qos_init(hba);
+ if (err) {
+ dev_err(hba->dev, "ufshcd_pm_qos_init failed, err=%d\n", err);
+ goto exit_gating;
+ }
+
/* Configure LRB */
ufshcd_host_memory_configure(hba);
@@ -7452,7 +7672,7 @@ int ufshcd_init(struct ufs_hba *hba, void __iomem *mmio_base, unsigned int irq)
err = devm_request_irq(dev, irq, ufshcd_intr, IRQF_SHARED, UFSHCD, hba);
if (err) {
dev_err(hba->dev, "request irq failed\n");
- goto exit_gating;
+ goto pm_qos_remove;
} else {
hba->is_irq_enabled = true;
}
@@ -7460,7 +7680,7 @@ int ufshcd_init(struct ufs_hba *hba, void __iomem *mmio_base, unsigned int irq)
err = scsi_add_host(host, hba->dev);
if (err) {
dev_err(hba->dev, "scsi_add_host failed\n");
- goto exit_gating;
+ goto pm_qos_remove;
}
/* Host controller enable */
@@ -7517,6 +7737,8 @@ int ufshcd_init(struct ufs_hba *hba, void __iomem *mmio_base, unsigned int irq)
out_remove_scsi_host:
scsi_remove_host(hba->host);
+pm_qos_remove:
+ ufshcd_pm_qos_remove(hba);
exit_gating:
ufshcd_exit_clk_gating(hba);
out_disable:
diff --git a/include/linux/scsi/ufs/ufshcd.h b/include/linux/scsi/ufs/ufshcd.h
index d6c130e38e43..18a4ec1509d9 100644
--- a/include/linux/scsi/ufs/ufshcd.h
+++ b/include/linux/scsi/ufs/ufshcd.h
@@ -54,6 +54,7 @@
#include <linux/clk.h>
#include <linux/completion.h>
#include <linux/regulator/consumer.h>
+#include <linux/pm_qos.h>
#include <asm/irq.h>
#include <asm/byteorder.h>
@@ -448,6 +449,38 @@ enum ts_types {
};
#endif
+/* PM QoS voting state */
+enum ufshcd_pm_qos_state {
+ PM_QOS_UNVOTED,
+ PM_QOS_VOTED,
+ PM_QOS_REQ_VOTE,
+ PM_QOS_REQ_UNVOTE,
+};
+
+/* Default latency for PM QOS */
+#define UFS_DEFAULT_CPU_DMA_LATENCY_US 200 /* microseconds */
+
+/**
+ * struct ufshcd_pm_qos - data related to PM QoS voting logic
+ * @vote_work: work object for voting procedure
+ * @unvote_work: work object for un-voting procedure
+ * @req: request object for PM QoS
+ * @cpu_dma_latency_us: requested latency value used for voting in microseconds
+ * @state: voting state machine current state
+ * @active_reqs: number of active requests requiring PM QoS voting
+ * @is_suspended: flag specifying whether voting logic is suspended.
+ * When set, voting will not occur for pending requests.
+ */
+struct ufshcd_pm_qos {
+ struct work_struct vote_work;
+ struct work_struct unvote_work;
+ struct pm_qos_request req;
+ u32 cpu_dma_latency_us;
+ enum ufshcd_pm_qos_state state;
+ int active_reqs;
+ bool is_suspended;
+};
+
/**
* struct ufs_hba - per adapter private structure
* @mmio_base: UFSHCI base register address
@@ -637,6 +670,9 @@ struct ufs_hba {
bool wlun_dev_clr_ua;
+ /* PM Quality-of-Service (QoS) data */
+ struct ufshcd_pm_qos pm_qos;
+
struct ufs_pa_layer_attr pwr_info;
struct ufs_pwr_mode_info max_pwr_info;
@@ -834,6 +870,41 @@ static const struct ufs_hba_variant_ops ufs_hba_qcom_vops = {
};
#endif
+#ifndef CONFIG_SMP
+static inline int ufshcd_pm_qos_init(struct ufs_hba *hba)
+{
+ return 0;
+}
+
+static inline int ufshcd_pm_qos_hold(struct ufs_hba *hba, bool async)
+{
+ return 0;
+}
+
+static inline int __ufshcd_pm_qos_hold(struct ufs_hba *hba, bool async)
+{
+ return 0;
+}
+
+static inline int ufshcd_pm_qos_release(struct ufs_hba *hba)
+{
+ return 0;
+}
+
+static inline int __ufshcd_pm_qos_release(struct ufs_hba *hba)
+{
+ return 0;
+}
+
+static inline void ufshcd_pm_qos_remove(struct ufs_hba *hba)
+{
+}
+
+static inline void ufshcd_parse_pm_qos(struct ufs_hba *hba)
+{
+}
+#endif /* CONFIG_SMP */
+
/* Expose Query-Request API */
int ufshcd_query_flag(struct ufs_hba *hba, enum query_opcode opcode,
enum flag_idn idn, bool *flag_res);