summaryrefslogtreecommitdiff
path: root/drivers/power/qcom
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/power/qcom')
-rw-r--r--drivers/power/qcom/Kconfig66
-rw-r--r--drivers/power/qcom/Makefile3
-rw-r--r--drivers/power/qcom/apm.c1059
-rw-r--r--drivers/power/qcom/debug_core.c330
-rw-r--r--drivers/power/qcom/lpm-stats.c871
-rw-r--r--drivers/power/qcom/msm-core.c1133
6 files changed, 3462 insertions, 0 deletions
diff --git a/drivers/power/qcom/Kconfig b/drivers/power/qcom/Kconfig
new file mode 100644
index 000000000000..efb9dd9628bb
--- /dev/null
+++ b/drivers/power/qcom/Kconfig
@@ -0,0 +1,66 @@
+config MSM_PM
+ depends on PM
+ select MSM_IDLE_STATS if DEBUG_FS
+ select CPU_IDLE_MULTIPLE_DRIVERS
+ bool "Qualcomm platform specific PM driver"
+ help
+ Platform specific power driver to manage cores and l2
+ low power modes. It interface with various system
+ driver and put the cores into low power modes.
+
+config MSM_NOPM
+ default y if !PM
+ bool
+ help
+ This enables bare minimum support of power management at platform level.
+ i.e WFI
+
+config APSS_CORE_EA
+ depends on CPU_FREQ && PM_OPP
+ bool "Qualcomm Technology Inc specific power aware driver"
+ help
+ Platform specific power aware driver to provide power
+ and temperature information to the scheduler.
+
+config MSM_APM
+ bool "Qualcomm Technologies, Inc. platform specific APM driver"
+ help
+ Platform specific driver to manage the power source of
+ memory arrays. Interfaces with regulator drivers to ensure
+ SRAM Vmin requirements are met across different performance
+ levels.
+
+if MSM_PM
+menuconfig MSM_IDLE_STATS
+ bool "Collect idle statistics"
+ help
+ Collect cores various low power mode idle statistics
+ and export them in proc/msm_pm_stats. User can read
+ this data and determine what low power modes and how
+ many times cores have entered into LPM modes.
+
+if MSM_IDLE_STATS
+
+config MSM_IDLE_STATS_FIRST_BUCKET
+ int "First bucket time"
+ default 62500
+ help
+ Upper time limit in nanoseconds of first bucket.
+
+config MSM_IDLE_STATS_BUCKET_SHIFT
+ int "Bucket shift"
+ default 2
+
+config MSM_IDLE_STATS_BUCKET_COUNT
+ int "Bucket count"
+ default 10
+
+config MSM_SUSPEND_STATS_FIRST_BUCKET
+ int "First bucket time for suspend"
+ default 1000000000
+ help
+ Upper time limit in nanoseconds of first bucket of the
+ histogram. This is for collecting statistics on suspend.
+
+endif # MSM_IDLE_STATS
+endif # MSM_PM
diff --git a/drivers/power/qcom/Makefile b/drivers/power/qcom/Makefile
new file mode 100644
index 000000000000..8e1ce14e384c
--- /dev/null
+++ b/drivers/power/qcom/Makefile
@@ -0,0 +1,3 @@
+obj-$(CONFIG_MSM_IDLE_STATS) += lpm-stats.o
+obj-$(CONFIG_APSS_CORE_EA) += msm-core.o debug_core.o
+obj-$(CONFIG_MSM_APM) += apm.o
diff --git a/drivers/power/qcom/apm.c b/drivers/power/qcom/apm.c
new file mode 100644
index 000000000000..9455468f1734
--- /dev/null
+++ b/drivers/power/qcom/apm.c
@@ -0,0 +1,1059 @@
+/*
+ * Copyright (c) 2015-2017, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#define pr_fmt(fmt) "%s: " fmt, __func__
+
+#include <linux/debugfs.h>
+#include <linux/delay.h>
+#include <linux/of_device.h>
+#include <linux/init.h>
+#include <linux/io.h>
+#include <linux/kernel.h>
+#include <linux/list.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/platform_device.h>
+#include <linux/slab.h>
+#include <linux/string.h>
+#include <linux/power/qcom/apm.h>
+#include <soc/qcom/scm.h>
+#include <linux/arm-smccc.h>
+
+/*
+ * VDD_APCC
+ * =============================================================
+ * | VDD_MX | |
+ * | ==========================|============= |
+ * ___|___ ___|___ ___|___ ___|___ ___|___ ___|___
+ * | | | | | | | | | | | |
+ * | APCC | | MX HS | | MX HS | | APCC | | MX HS | | APCC |
+ * | HS | | | | | | HS | | | | HS |
+ * |_______| |_______| |_______| |_______| |_______| |_______|
+ * |_________| |_________| |__________|
+ * | | |
+ * ______|_____ ______|_____ _______|_____
+ * | | | | | |
+ * | | | | | |
+ * | CPU MEM | | L2 MEM | | L3 MEM |
+ * | Arrays | | Arrays | | Arrays |
+ * | | | | | |
+ * |____________| |____________| |_____________|
+ *
+ */
+
+/* Register value definitions */
+#define APCS_GFMUXA_SEL_VAL 0x13
+#define APCS_GFMUXA_DESEL_VAL 0x03
+#define MSM_APM_MX_MODE_VAL 0x00
+#define MSM_APM_APCC_MODE_VAL 0x10
+#define MSM_APM_MX_DONE_VAL 0x00
+#define MSM_APM_APCC_DONE_VAL 0x03
+#define MSM_APM_OVERRIDE_SEL_VAL 0xb0
+#define MSM_APM_SEC_CLK_SEL_VAL 0x30
+#define SPM_EVENT_SET_VAL 0x01
+#define SPM_EVENT_CLEAR_VAL 0x00
+
+/* Register bit mask definitions */
+#define MSM_APM_CTL_STS_MASK 0x0f
+
+/* Register offset definitions */
+#define APCC_APM_MODE 0x00000098
+#define APCC_APM_CTL_STS 0x000000a8
+#define APCS_SPARE 0x00000068
+#define APCS_VERSION 0x00000fd0
+
+#define HMSS_VERSION_1P2 0x10020000
+
+#define MSM_APM_SWITCH_TIMEOUT_US 10
+#define SPM_WAKEUP_DELAY_US 2
+#define SPM_EVENT_NUM 6
+
+#define MSM_APM_DRIVER_NAME "qcom,msm-apm"
+
+
+enum {
+ CLOCK_ASSERT_ENABLE,
+ CLOCK_ASSERT_DISABLE,
+ CLOCK_ASSERT_TOGGLE,
+};
+
+enum {
+ MSM8996_ID,
+ MSM8996PRO_ID,
+ MSM8953_ID,
+};
+
+struct msm_apm_ctrl_dev {
+ struct list_head list;
+ struct device *dev;
+ enum msm_apm_supply supply;
+ spinlock_t lock;
+ void __iomem *reg_base;
+ void __iomem *apcs_csr_base;
+ void __iomem **apcs_spm_events_addr;
+ void __iomem *apc0_pll_ctl_addr;
+ void __iomem *apc1_pll_ctl_addr;
+ bool clk_src_override;
+ u32 version;
+ struct dentry *debugfs;
+ u32 msm_id;
+};
+
+#if defined(CONFIG_DEBUG_FS)
+static struct dentry *apm_debugfs_base;
+#endif
+
+static DEFINE_MUTEX(apm_ctrl_list_mutex);
+static LIST_HEAD(apm_ctrl_list);
+
+/*
+ * Get the resources associated with the APM controller from device tree
+ * and remap all I/O addresses that are relevant to this HW revision.
+ */
+static int msm_apm_ctrl_devm_ioremap(struct platform_device *pdev,
+ struct msm_apm_ctrl_dev *ctrl)
+{
+ struct device *dev = &pdev->dev;
+ struct resource *res;
+ static const char *res_name[SPM_EVENT_NUM] = {
+ "apc0-l2-spm",
+ "apc1-l2-spm",
+ "apc0-cpu0-spm",
+ "apc0-cpu1-spm",
+ "apc1-cpu0-spm",
+ "apc1-cpu1-spm"
+ };
+ int i, ret = 0;
+
+ res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "pm-apcc-glb");
+ if (!res) {
+ dev_err(dev, "Missing PM APCC Global register physical address");
+ return -EINVAL;
+ }
+ ctrl->reg_base = devm_ioremap(dev, res->start, resource_size(res));
+ if (!ctrl->reg_base) {
+ dev_err(dev, "Failed to map PM APCC Global registers\n");
+ return -ENOMEM;
+ }
+
+ res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "apcs-csr");
+ if (!res) {
+ dev_err(dev, "Missing APCS CSR physical base address");
+ return -EINVAL;
+ }
+ ctrl->apcs_csr_base = devm_ioremap(dev, res->start, resource_size(res));
+ if (!ctrl->apcs_csr_base) {
+ dev_err(dev, "Failed to map APCS CSR registers\n");
+ return -ENOMEM;
+ }
+
+ ctrl->clk_src_override = of_property_read_bool(dev->of_node,
+ "qcom,clock-source-override");
+
+ if (ctrl->clk_src_override)
+ dev_info(dev, "overriding clock sources across APM switch\n");
+
+ ctrl->version = readl_relaxed(ctrl->apcs_csr_base + APCS_VERSION);
+
+ if (ctrl->version >= HMSS_VERSION_1P2)
+ return ret;
+
+ ctrl->apcs_spm_events_addr = devm_kzalloc(&pdev->dev,
+ SPM_EVENT_NUM
+ * sizeof(void __iomem *),
+ GFP_KERNEL);
+ if (!ctrl->apcs_spm_events_addr) {
+ dev_err(dev, "Failed to allocate memory for APCS SPM event registers\n");
+ return -ENOMEM;
+ }
+
+ for (i = 0; i < SPM_EVENT_NUM; i++) {
+ res = platform_get_resource_byname(pdev, IORESOURCE_MEM,
+ res_name[i]);
+ if (!res) {
+ dev_err(dev, "Missing address for %s\n", res_name[i]);
+ ret = -EINVAL;
+ goto free_events;
+ }
+
+ ctrl->apcs_spm_events_addr[i] = devm_ioremap(dev, res->start,
+ resource_size(res));
+ if (!ctrl->apcs_spm_events_addr[i]) {
+ dev_err(dev, "Failed to map %s\n", res_name[i]);
+ ret = -ENOMEM;
+ goto free_events;
+ }
+
+ dev_dbg(dev, "%s event phys: %pa virt:0x%p\n", res_name[i],
+ &res->start, ctrl->apcs_spm_events_addr[i]);
+ }
+
+ res = platform_get_resource_byname(pdev, IORESOURCE_MEM,
+ "apc0-pll-ctl");
+ if (!res) {
+ dev_err(dev, "Missing APC0 PLL CTL physical address\n");
+ ret = -EINVAL;
+ goto free_events;
+ }
+
+ ctrl->apc0_pll_ctl_addr = devm_ioremap(dev,
+ res->start,
+ resource_size(res));
+ if (!ctrl->apc0_pll_ctl_addr) {
+ dev_err(dev, "Failed to map APC0 PLL CTL register\n");
+ ret = -ENOMEM;
+ goto free_events;
+ }
+
+ res = platform_get_resource_byname(pdev, IORESOURCE_MEM,
+ "apc1-pll-ctl");
+ if (!res) {
+ dev_err(dev, "Missing APC1 PLL CTL physical address\n");
+ ret = -EINVAL;
+ goto free_events;
+ }
+
+ ctrl->apc1_pll_ctl_addr = devm_ioremap(dev,
+ res->start,
+ resource_size(res));
+ if (!ctrl->apc1_pll_ctl_addr) {
+ dev_err(dev, "Failed to map APC1 PLL CTL register\n");
+ ret = -ENOMEM;
+ goto free_events;
+ }
+
+ return ret;
+
+free_events:
+ devm_kfree(dev, ctrl->apcs_spm_events_addr);
+ return ret;
+}
+
+/* MSM8953 register offset definition */
+#define MSM8953_APM_DLY_CNTR 0x2ac
+
+/* Register field shift definitions */
+#define APM_CTL_SEL_SWITCH_DLY_SHIFT 0
+#define APM_CTL_RESUME_CLK_DLY_SHIFT 8
+#define APM_CTL_HALT_CLK_DLY_SHIFT 16
+#define APM_CTL_POST_HALT_DLY_SHIFT 24
+
+/* Register field mask definitions */
+#define APM_CTL_SEL_SWITCH_DLY_MASK GENMASK(7, 0)
+#define APM_CTL_RESUME_CLK_DLY_MASK GENMASK(15, 8)
+#define APM_CTL_HALT_CLK_DLY_MASK GENMASK(23, 16)
+#define APM_CTL_POST_HALT_DLY_MASK GENMASK(31, 24)
+
+/*
+ * Get the resources associated with the MSM8953 APM controller from
+ * device tree, remap all I/O addresses, and program the initial
+ * register configuration required for the MSM8953 APM controller device.
+ */
+static int msm8953_apm_ctrl_init(struct platform_device *pdev,
+ struct msm_apm_ctrl_dev *ctrl)
+{
+ struct device *dev = &pdev->dev;
+ struct resource *res;
+ u32 delay_counter, val = 0, regval = 0;
+ int rc = 0;
+
+ res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "pm-apcc-glb");
+ if (!res) {
+ dev_err(dev, "Missing PM APCC Global register physical address\n");
+ return -ENODEV;
+ }
+ ctrl->reg_base = devm_ioremap(dev, res->start, resource_size(res));
+ if (!ctrl->reg_base) {
+ dev_err(dev, "Failed to map PM APCC Global registers\n");
+ return -ENOMEM;
+ }
+
+ /*
+ * Initial APM register configuration required before starting
+ * APM HW controller.
+ */
+ regval = readl_relaxed(ctrl->reg_base + MSM8953_APM_DLY_CNTR);
+ val = regval;
+
+ if (of_find_property(dev->of_node, "qcom,apm-post-halt-delay", NULL)) {
+ rc = of_property_read_u32(dev->of_node,
+ "qcom,apm-post-halt-delay", &delay_counter);
+ if (rc < 0) {
+ dev_err(dev, "apm-post-halt-delay read failed, rc = %d",
+ rc);
+ return rc;
+ }
+
+ val &= ~APM_CTL_POST_HALT_DLY_MASK;
+ val |= (delay_counter << APM_CTL_POST_HALT_DLY_SHIFT)
+ & APM_CTL_POST_HALT_DLY_MASK;
+ }
+
+ if (of_find_property(dev->of_node, "qcom,apm-halt-clk-delay", NULL)) {
+ rc = of_property_read_u32(dev->of_node,
+ "qcom,apm-halt-clk-delay", &delay_counter);
+ if (rc < 0) {
+ dev_err(dev, "apm-halt-clk-delay read failed, rc = %d",
+ rc);
+ return rc;
+ }
+
+ val &= ~APM_CTL_HALT_CLK_DLY_MASK;
+ val |= (delay_counter << APM_CTL_HALT_CLK_DLY_SHIFT)
+ & APM_CTL_HALT_CLK_DLY_MASK;
+ }
+
+ if (of_find_property(dev->of_node, "qcom,apm-resume-clk-delay", NULL)) {
+ rc = of_property_read_u32(dev->of_node,
+ "qcom,apm-resume-clk-delay", &delay_counter);
+ if (rc < 0) {
+ dev_err(dev, "apm-resume-clk-delay read failed, rc = %d",
+ rc);
+ return rc;
+ }
+
+ val &= ~APM_CTL_RESUME_CLK_DLY_MASK;
+ val |= (delay_counter << APM_CTL_RESUME_CLK_DLY_SHIFT)
+ & APM_CTL_RESUME_CLK_DLY_MASK;
+ }
+
+ if (of_find_property(dev->of_node, "qcom,apm-sel-switch-delay", NULL)) {
+ rc = of_property_read_u32(dev->of_node,
+ "qcom,apm-sel-switch-delay", &delay_counter);
+ if (rc < 0) {
+ dev_err(dev, "apm-sel-switch-delay read failed, rc = %d",
+ rc);
+ return rc;
+ }
+
+ val &= ~APM_CTL_SEL_SWITCH_DLY_MASK;
+ val |= (delay_counter << APM_CTL_SEL_SWITCH_DLY_SHIFT)
+ & APM_CTL_SEL_SWITCH_DLY_MASK;
+ }
+
+ if (val != regval) {
+ writel_relaxed(val, ctrl->reg_base + MSM8953_APM_DLY_CNTR);
+ /* make sure write completes before return */
+ mb();
+ }
+
+ return rc;
+}
+
+static int msm_apm_secure_clock_source_override(
+ struct msm_apm_ctrl_dev *ctrl_dev, bool enable)
+{
+ int ret;
+
+ if (ctrl_dev->clk_src_override) {
+ ret = __invoke_psci_fn_smc(0xC4000020, 3, enable ?
+ CLOCK_ASSERT_ENABLE :
+ CLOCK_ASSERT_DISABLE, 0);
+ if (ret)
+ dev_err(ctrl_dev->dev, "PSCI request to switch to %s clock source failed\n",
+ enable ? "GPLL0" : "original");
+ }
+
+ return 0;
+}
+
+static int msm8996_apm_wait_for_switch(struct msm_apm_ctrl_dev *ctrl_dev,
+ u32 done_val)
+{
+ int timeout = MSM_APM_SWITCH_TIMEOUT_US;
+ u32 regval;
+
+ while (timeout > 0) {
+ regval = readl_relaxed(ctrl_dev->reg_base + APCC_APM_CTL_STS);
+ if ((regval & MSM_APM_CTL_STS_MASK) == done_val)
+ break;
+
+ udelay(1);
+ timeout--;
+ }
+
+ if (timeout == 0) {
+ dev_err(ctrl_dev->dev, "%s switch timed out. APCC_APM_CTL_STS=0x%x\n",
+ done_val == MSM_APM_MX_DONE_VAL
+ ? "APCC to MX" : "MX to APCC",
+ regval);
+ return -ETIMEDOUT;
+ }
+
+ return 0;
+}
+
+static int msm8996_apm_switch_to_mx(struct msm_apm_ctrl_dev *ctrl_dev)
+{
+ unsigned long flags;
+ int i, ret;
+
+ mutex_lock(&scm_lmh_lock);
+ spin_lock_irqsave(&ctrl_dev->lock, flags);
+
+ ret = msm_apm_secure_clock_source_override(ctrl_dev, true);
+ if (ret)
+ goto done;
+
+ /* Perform revision-specific programming steps */
+ if (ctrl_dev->version < HMSS_VERSION_1P2) {
+ /* Clear SPM events */
+ for (i = 0; i < SPM_EVENT_NUM; i++)
+ writel_relaxed(SPM_EVENT_CLEAR_VAL,
+ ctrl_dev->apcs_spm_events_addr[i]);
+
+ udelay(SPM_WAKEUP_DELAY_US);
+
+ /* Switch APC/CBF to GPLL0 clock */
+ writel_relaxed(APCS_GFMUXA_SEL_VAL,
+ ctrl_dev->apcs_csr_base + APCS_SPARE);
+ ndelay(200);
+ writel_relaxed(MSM_APM_OVERRIDE_SEL_VAL,
+ ctrl_dev->apc0_pll_ctl_addr);
+ ndelay(200);
+ writel_relaxed(MSM_APM_OVERRIDE_SEL_VAL,
+ ctrl_dev->apc1_pll_ctl_addr);
+
+ /* Ensure writes complete before proceeding */
+ mb();
+ }
+
+ /* Switch arrays to MX supply and wait for its completion */
+ writel_relaxed(MSM_APM_MX_MODE_VAL, ctrl_dev->reg_base +
+ APCC_APM_MODE);
+
+ /* Ensure write above completes before delaying */
+ mb();
+
+ ret = msm8996_apm_wait_for_switch(ctrl_dev, MSM_APM_MX_DONE_VAL);
+
+ /* Perform revision-specific programming steps */
+ if (ctrl_dev->version < HMSS_VERSION_1P2) {
+ /* Switch APC/CBF clocks to original source */
+ writel_relaxed(APCS_GFMUXA_DESEL_VAL,
+ ctrl_dev->apcs_csr_base + APCS_SPARE);
+ ndelay(200);
+ writel_relaxed(MSM_APM_SEC_CLK_SEL_VAL,
+ ctrl_dev->apc0_pll_ctl_addr);
+ ndelay(200);
+ writel_relaxed(MSM_APM_SEC_CLK_SEL_VAL,
+ ctrl_dev->apc1_pll_ctl_addr);
+
+ /* Complete clock source switch before SPM event sequence */
+ mb();
+
+ /* Set SPM events */
+ for (i = 0; i < SPM_EVENT_NUM; i++)
+ writel_relaxed(SPM_EVENT_SET_VAL,
+ ctrl_dev->apcs_spm_events_addr[i]);
+ }
+
+ /*
+ * Ensure that HMSS v1.0/v1.1 register writes are completed before
+ * bailing out in the case of a switching time out.
+ */
+ if (ret)
+ goto done;
+
+ ret = msm_apm_secure_clock_source_override(ctrl_dev, false);
+ if (ret)
+ goto done;
+
+ ctrl_dev->supply = MSM_APM_SUPPLY_MX;
+ dev_dbg(ctrl_dev->dev, "APM supply switched to MX\n");
+
+done:
+ spin_unlock_irqrestore(&ctrl_dev->lock, flags);
+ mutex_unlock(&scm_lmh_lock);
+
+ return ret;
+}
+
+static int msm8996_apm_switch_to_apcc(struct msm_apm_ctrl_dev *ctrl_dev)
+{
+ unsigned long flags;
+ int i, ret;
+
+ mutex_lock(&scm_lmh_lock);
+ spin_lock_irqsave(&ctrl_dev->lock, flags);
+
+ ret = msm_apm_secure_clock_source_override(ctrl_dev, true);
+ if (ret)
+ goto done;
+
+ /* Perform revision-specific programming steps */
+ if (ctrl_dev->version < HMSS_VERSION_1P2) {
+ /* Clear SPM events */
+ for (i = 0; i < SPM_EVENT_NUM; i++)
+ writel_relaxed(SPM_EVENT_CLEAR_VAL,
+ ctrl_dev->apcs_spm_events_addr[i]);
+
+ udelay(SPM_WAKEUP_DELAY_US);
+
+ /* Switch APC/CBF to GPLL0 clock */
+ writel_relaxed(APCS_GFMUXA_SEL_VAL,
+ ctrl_dev->apcs_csr_base + APCS_SPARE);
+ ndelay(200);
+ writel_relaxed(MSM_APM_OVERRIDE_SEL_VAL,
+ ctrl_dev->apc0_pll_ctl_addr);
+ ndelay(200);
+ writel_relaxed(MSM_APM_OVERRIDE_SEL_VAL,
+ ctrl_dev->apc1_pll_ctl_addr);
+
+ /* Ensure previous writes complete before proceeding */
+ mb();
+ }
+
+ /* Switch arrays to APCC supply and wait for its completion */
+ writel_relaxed(MSM_APM_APCC_MODE_VAL, ctrl_dev->reg_base +
+ APCC_APM_MODE);
+
+ /* Ensure write above completes before delaying */
+ mb();
+
+ ret = msm8996_apm_wait_for_switch(ctrl_dev, MSM_APM_APCC_DONE_VAL);
+
+ /* Perform revision-specific programming steps */
+ if (ctrl_dev->version < HMSS_VERSION_1P2) {
+ /* Set SPM events */
+ for (i = 0; i < SPM_EVENT_NUM; i++)
+ writel_relaxed(SPM_EVENT_SET_VAL,
+ ctrl_dev->apcs_spm_events_addr[i]);
+
+ /* Complete SPM event sequence before clock source switch */
+ mb();
+
+ /* Switch APC/CBF clocks to original source */
+ writel_relaxed(APCS_GFMUXA_DESEL_VAL,
+ ctrl_dev->apcs_csr_base + APCS_SPARE);
+ ndelay(200);
+ writel_relaxed(MSM_APM_SEC_CLK_SEL_VAL,
+ ctrl_dev->apc0_pll_ctl_addr);
+ ndelay(200);
+ writel_relaxed(MSM_APM_SEC_CLK_SEL_VAL,
+ ctrl_dev->apc1_pll_ctl_addr);
+ }
+
+ /*
+ * Ensure that HMSS v1.0/v1.1 register writes are completed before
+ * bailing out in the case of a switching time out.
+ */
+ if (ret)
+ goto done;
+
+ ret = msm_apm_secure_clock_source_override(ctrl_dev, false);
+ if (ret)
+ goto done;
+
+ ctrl_dev->supply = MSM_APM_SUPPLY_APCC;
+ dev_dbg(ctrl_dev->dev, "APM supply switched to APCC\n");
+
+done:
+ spin_unlock_irqrestore(&ctrl_dev->lock, flags);
+ mutex_unlock(&scm_lmh_lock);
+
+ return ret;
+}
+
+static int msm8996pro_apm_switch_to_mx(struct msm_apm_ctrl_dev *ctrl_dev)
+{
+ unsigned long flags;
+ int ret;
+
+ spin_lock_irqsave(&ctrl_dev->lock, flags);
+
+ /* Switch arrays to MX supply and wait for its completion */
+ writel_relaxed(MSM_APM_MX_MODE_VAL, ctrl_dev->reg_base +
+ APCC_APM_MODE);
+
+ /* Ensure write above completes before delaying */
+ mb();
+
+ ret = msm8996_apm_wait_for_switch(ctrl_dev, MSM_APM_MX_DONE_VAL);
+ if (ret)
+ goto done;
+
+ ctrl_dev->supply = MSM_APM_SUPPLY_MX;
+ dev_dbg(ctrl_dev->dev, "APM supply switched to MX\n");
+
+done:
+ spin_unlock_irqrestore(&ctrl_dev->lock, flags);
+
+ return ret;
+}
+
+static int msm8996pro_apm_switch_to_apcc(struct msm_apm_ctrl_dev *ctrl_dev)
+{
+ unsigned long flags;
+ int ret;
+
+ spin_lock_irqsave(&ctrl_dev->lock, flags);
+
+ /* Switch arrays to APCC supply and wait for its completion */
+ writel_relaxed(MSM_APM_APCC_MODE_VAL, ctrl_dev->reg_base +
+ APCC_APM_MODE);
+
+ /* Ensure write above completes before delaying */
+ mb();
+
+ ret = msm8996_apm_wait_for_switch(ctrl_dev, MSM_APM_APCC_DONE_VAL);
+ if (ret)
+ goto done;
+
+ ctrl_dev->supply = MSM_APM_SUPPLY_APCC;
+ dev_dbg(ctrl_dev->dev, "APM supply switched to APCC\n");
+
+done:
+ spin_unlock_irqrestore(&ctrl_dev->lock, flags);
+
+ return ret;
+}
+
+/* MSM8953 register value definitions */
+#define MSM8953_APM_MX_MODE_VAL 0x00
+#define MSM8953_APM_APCC_MODE_VAL 0x02
+#define MSM8953_APM_MX_DONE_VAL 0x00
+#define MSM8953_APM_APCC_DONE_VAL 0x03
+
+/* MSM8953 register offset definitions */
+#define MSM8953_APCC_APM_MODE 0x000002a8
+#define MSM8953_APCC_APM_CTL_STS 0x000002b0
+
+/* 8953 constants */
+#define MSM8953_APM_SWITCH_TIMEOUT_US 500
+
+/* Register bit mask definitions */
+#define MSM8953_APM_CTL_STS_MASK 0x1f
+
+static int msm8953_apm_switch_to_mx(struct msm_apm_ctrl_dev *ctrl_dev)
+{
+ int timeout = MSM8953_APM_SWITCH_TIMEOUT_US;
+ u32 regval;
+ int ret = 0;
+ unsigned long flags;
+
+ spin_lock_irqsave(&ctrl_dev->lock, flags);
+
+ /* Switch arrays to MX supply and wait for its completion */
+ writel_relaxed(MSM8953_APM_MX_MODE_VAL, ctrl_dev->reg_base +
+ MSM8953_APCC_APM_MODE);
+
+ /* Ensure write above completes before delaying */
+ mb();
+
+ while (timeout > 0) {
+ regval = readl_relaxed(ctrl_dev->reg_base +
+ MSM8953_APCC_APM_CTL_STS);
+ if ((regval & MSM8953_APM_CTL_STS_MASK) ==
+ MSM8953_APM_MX_DONE_VAL)
+ break;
+
+ udelay(1);
+ timeout--;
+ }
+
+ if (timeout == 0) {
+ ret = -ETIMEDOUT;
+ dev_err(ctrl_dev->dev, "APCC to MX APM switch timed out. APCC_APM_CTL_STS=0x%x\n",
+ regval);
+ } else {
+ ctrl_dev->supply = MSM_APM_SUPPLY_MX;
+ dev_dbg(ctrl_dev->dev, "APM supply switched to MX\n");
+ }
+
+ spin_unlock_irqrestore(&ctrl_dev->lock, flags);
+
+ return ret;
+}
+
+static int msm8953_apm_switch_to_apcc(struct msm_apm_ctrl_dev *ctrl_dev)
+{
+ int timeout = MSM8953_APM_SWITCH_TIMEOUT_US;
+ u32 regval;
+ int ret = 0;
+ unsigned long flags;
+
+ spin_lock_irqsave(&ctrl_dev->lock, flags);
+
+ /* Switch arrays to APCC supply and wait for its completion */
+ writel_relaxed(MSM8953_APM_APCC_MODE_VAL, ctrl_dev->reg_base +
+ MSM8953_APCC_APM_MODE);
+
+ /* Ensure write above completes before delaying */
+ mb();
+
+ while (timeout > 0) {
+ regval = readl_relaxed(ctrl_dev->reg_base +
+ MSM8953_APCC_APM_CTL_STS);
+ if ((regval & MSM8953_APM_CTL_STS_MASK) ==
+ MSM8953_APM_APCC_DONE_VAL)
+ break;
+
+ udelay(1);
+ timeout--;
+ }
+
+ if (timeout == 0) {
+ ret = -ETIMEDOUT;
+ dev_err(ctrl_dev->dev, "MX to APCC APM switch timed out. APCC_APM_CTL_STS=0x%x\n",
+ regval);
+ } else {
+ ctrl_dev->supply = MSM_APM_SUPPLY_APCC;
+ dev_dbg(ctrl_dev->dev, "APM supply switched to APCC\n");
+ }
+
+ spin_unlock_irqrestore(&ctrl_dev->lock, flags);
+
+ return ret;
+}
+
+static int msm_apm_switch_to_mx(struct msm_apm_ctrl_dev *ctrl_dev)
+{
+ int ret = 0;
+
+ switch (ctrl_dev->msm_id) {
+ case MSM8996_ID:
+ ret = msm8996_apm_switch_to_mx(ctrl_dev);
+ break;
+ case MSM8996PRO_ID:
+ ret = msm8996pro_apm_switch_to_mx(ctrl_dev);
+ break;
+ case MSM8953_ID:
+ ret = msm8953_apm_switch_to_mx(ctrl_dev);
+ break;
+ }
+
+ return ret;
+}
+
+static int msm_apm_switch_to_apcc(struct msm_apm_ctrl_dev *ctrl_dev)
+{
+ int ret = 0;
+
+ switch (ctrl_dev->msm_id) {
+ case MSM8996_ID:
+ ret = msm8996_apm_switch_to_apcc(ctrl_dev);
+ break;
+ case MSM8996PRO_ID:
+ ret = msm8996pro_apm_switch_to_apcc(ctrl_dev);
+ break;
+ case MSM8953_ID:
+ ret = msm8953_apm_switch_to_apcc(ctrl_dev);
+ break;
+ }
+
+ return ret;
+}
+
+/**
+ * msm_apm_get_supply() - Returns the supply that is currently
+ * powering the memory arrays
+ * @ctrl_dev: Pointer to an MSM APM controller device
+ *
+ * Returns the supply currently selected by the APM.
+ */
+int msm_apm_get_supply(struct msm_apm_ctrl_dev *ctrl_dev)
+{
+ return ctrl_dev->supply;
+}
+EXPORT_SYMBOL(msm_apm_get_supply);
+
+/**
+ * msm_apm_set_supply() - Perform the necessary steps to switch the voltage
+ * source of the memory arrays to a given supply
+ * @ctrl_dev: Pointer to an MSM APM controller device
+ * @supply: Power rail to use as supply for the memory
+ * arrays
+ *
+ * Returns 0 on success, -ETIMEDOUT on APM switch timeout, or -EPERM if
+ * the supply is not supported.
+ */
+int msm_apm_set_supply(struct msm_apm_ctrl_dev *ctrl_dev,
+ enum msm_apm_supply supply)
+{
+ int ret;
+
+ switch (supply) {
+ case MSM_APM_SUPPLY_APCC:
+ ret = msm_apm_switch_to_apcc(ctrl_dev);
+ break;
+ case MSM_APM_SUPPLY_MX:
+ ret = msm_apm_switch_to_mx(ctrl_dev);
+ break;
+ default:
+ ret = -EPERM;
+ break;
+ }
+
+ return ret;
+}
+EXPORT_SYMBOL(msm_apm_set_supply);
+
+/**
+ * msm_apm_ctrl_dev_get() - get a handle to the MSM APM controller linked to
+ * the device in device tree
+ * @dev: Pointer to the device
+ *
+ * The device must specify "qcom,apm-ctrl" property in its device tree
+ * node which points to an MSM APM controller device node.
+ *
+ * Returns an MSM APM controller handle if successful or ERR_PTR on any error.
+ * If the APM controller device hasn't probed yet, ERR_PTR(-EPROBE_DEFER) is
+ * returned.
+ */
+struct msm_apm_ctrl_dev *msm_apm_ctrl_dev_get(struct device *dev)
+{
+ struct msm_apm_ctrl_dev *ctrl_dev = NULL;
+ struct msm_apm_ctrl_dev *dev_found = ERR_PTR(-EPROBE_DEFER);
+ struct device_node *ctrl_node;
+
+ if (!dev || !dev->of_node) {
+ pr_err("Invalid device node\n");
+ return ERR_PTR(-EINVAL);
+ }
+
+ ctrl_node = of_parse_phandle(dev->of_node, "qcom,apm-ctrl", 0);
+ if (!ctrl_node) {
+ pr_err("Could not find qcom,apm-ctrl property in %s\n",
+ dev->of_node->full_name);
+ return ERR_PTR(-ENXIO);
+ }
+
+ mutex_lock(&apm_ctrl_list_mutex);
+ list_for_each_entry(ctrl_dev, &apm_ctrl_list, list) {
+ if (ctrl_dev->dev && ctrl_dev->dev->of_node == ctrl_node) {
+ dev_found = ctrl_dev;
+ break;
+ }
+ }
+ mutex_unlock(&apm_ctrl_list_mutex);
+
+ of_node_put(ctrl_node);
+ return dev_found;
+}
+EXPORT_SYMBOL(msm_apm_ctrl_dev_get);
+
+#if defined(CONFIG_DEBUG_FS)
+
+static int apm_supply_dbg_open(struct inode *inode, struct file *filep)
+{
+ filep->private_data = inode->i_private;
+
+ return 0;
+}
+
+static ssize_t apm_supply_dbg_read(struct file *filep, char __user *ubuf,
+ size_t count, loff_t *ppos)
+{
+ struct msm_apm_ctrl_dev *ctrl_dev = filep->private_data;
+ char buf[10];
+ int len;
+
+ if (!ctrl_dev) {
+ pr_err("invalid apm ctrl handle\n");
+ return -ENODEV;
+ }
+
+ if (ctrl_dev->supply == MSM_APM_SUPPLY_APCC)
+ len = snprintf(buf, sizeof(buf), "APCC\n");
+ else if (ctrl_dev->supply == MSM_APM_SUPPLY_MX)
+ len = snprintf(buf, sizeof(buf), "MX\n");
+ else
+ len = snprintf(buf, sizeof(buf), "ERR\n");
+
+ return simple_read_from_buffer(ubuf, count, ppos, buf, len);
+}
+
+static const struct file_operations apm_supply_fops = {
+ .open = apm_supply_dbg_open,
+ .read = apm_supply_dbg_read,
+};
+
+static void apm_debugfs_base_init(void)
+{
+ apm_debugfs_base = debugfs_create_dir("msm-apm", NULL);
+
+ if (IS_ERR_OR_NULL(apm_debugfs_base))
+ pr_err("msm-apm debugfs base directory creation failed\n");
+}
+
+static void apm_debugfs_init(struct msm_apm_ctrl_dev *ctrl_dev)
+{
+ struct dentry *temp;
+
+ if (IS_ERR_OR_NULL(apm_debugfs_base)) {
+ pr_err("Base directory missing, cannot create apm debugfs nodes\n");
+ return;
+ }
+
+ ctrl_dev->debugfs = debugfs_create_dir(dev_name(ctrl_dev->dev),
+ apm_debugfs_base);
+ if (IS_ERR_OR_NULL(ctrl_dev->debugfs)) {
+ pr_err("%s debugfs directory creation failed\n",
+ dev_name(ctrl_dev->dev));
+ return;
+ }
+
+ temp = debugfs_create_file("supply", 0444, ctrl_dev->debugfs,
+ ctrl_dev, &apm_supply_fops);
+ if (IS_ERR_OR_NULL(temp)) {
+ pr_err("supply mode creation failed\n");
+ return;
+ }
+}
+
+static void apm_debugfs_deinit(struct msm_apm_ctrl_dev *ctrl_dev)
+{
+ if (!IS_ERR_OR_NULL(ctrl_dev->debugfs))
+ debugfs_remove_recursive(ctrl_dev->debugfs);
+}
+
+static void apm_debugfs_base_remove(void)
+{
+ debugfs_remove_recursive(apm_debugfs_base);
+}
+#else
+
+static void apm_debugfs_base_init(void)
+{}
+
+static void apm_debugfs_init(struct msm_apm_ctrl_dev *ctrl_dev)
+{}
+
+static void apm_debugfs_deinit(struct msm_apm_ctrl_dev *ctrl_dev)
+{}
+
+static void apm_debugfs_base_remove(void)
+{}
+
+#endif
+
+static const struct of_device_id msm_apm_match_table[] = {
+ {
+ .compatible = "qcom,msm-apm",
+ .data = (void *)(uintptr_t)MSM8996_ID,
+ },
+ {
+ .compatible = "qcom,msm8996pro-apm",
+ .data = (void *)(uintptr_t)MSM8996PRO_ID,
+ },
+ {
+ .compatible = "qcom,msm8953-apm",
+ .data = (void *)(uintptr_t)MSM8953_ID,
+ },
+ {}
+};
+
+static int msm_apm_probe(struct platform_device *pdev)
+{
+ struct device *dev = &pdev->dev;
+ struct msm_apm_ctrl_dev *ctrl;
+ const struct of_device_id *match;
+ int ret = 0;
+
+ dev_dbg(dev, "probing MSM Array Power Mux driver\n");
+
+ if (!dev->of_node) {
+ dev_err(dev, "Device tree node is missing\n");
+ return -ENODEV;
+ }
+
+ match = of_match_device(msm_apm_match_table, dev);
+ if (!match)
+ return -ENODEV;
+
+ ctrl = devm_kzalloc(dev, sizeof(*ctrl), GFP_KERNEL);
+ if (!ctrl)
+ return -ENOMEM;
+
+ INIT_LIST_HEAD(&ctrl->list);
+ spin_lock_init(&ctrl->lock);
+ ctrl->dev = dev;
+ ctrl->msm_id = (uintptr_t)match->data;
+ platform_set_drvdata(pdev, ctrl);
+
+ switch (ctrl->msm_id) {
+ case MSM8996_ID:
+ case MSM8996PRO_ID:
+ ret = msm_apm_ctrl_devm_ioremap(pdev, ctrl);
+ if (ret) {
+ dev_err(dev, "Failed to add APM controller device\n");
+ return ret;
+ }
+ break;
+ case MSM8953_ID:
+ ret = msm8953_apm_ctrl_init(pdev, ctrl);
+ if (ret) {
+ dev_err(dev, "Failed to initialize APM controller device: ret=%d\n",
+ ret);
+ return ret;
+ }
+ break;
+ default:
+ dev_err(dev, "unable to add APM controller device for msm_id:%d\n",
+ ctrl->msm_id);
+ return -ENODEV;
+ }
+
+ apm_debugfs_init(ctrl);
+ mutex_lock(&apm_ctrl_list_mutex);
+ list_add_tail(&ctrl->list, &apm_ctrl_list);
+ mutex_unlock(&apm_ctrl_list_mutex);
+
+ dev_dbg(dev, "MSM Array Power Mux driver probe successful");
+
+ return ret;
+}
+
+static int msm_apm_remove(struct platform_device *pdev)
+{
+ struct msm_apm_ctrl_dev *ctrl_dev;
+
+ ctrl_dev = platform_get_drvdata(pdev);
+ if (ctrl_dev) {
+ mutex_lock(&apm_ctrl_list_mutex);
+ list_del(&ctrl_dev->list);
+ mutex_unlock(&apm_ctrl_list_mutex);
+ apm_debugfs_deinit(ctrl_dev);
+ }
+
+ return 0;
+}
+
+static struct platform_driver msm_apm_driver = {
+ .driver = {
+ .name = MSM_APM_DRIVER_NAME,
+ .of_match_table = msm_apm_match_table,
+ .owner = THIS_MODULE,
+ },
+ .probe = msm_apm_probe,
+ .remove = msm_apm_remove,
+};
+
+static int __init msm_apm_init(void)
+{
+ apm_debugfs_base_init();
+ return platform_driver_register(&msm_apm_driver);
+}
+
+static void __exit msm_apm_exit(void)
+{
+ platform_driver_unregister(&msm_apm_driver);
+ apm_debugfs_base_remove();
+}
+
+arch_initcall(msm_apm_init);
+module_exit(msm_apm_exit);
+
+MODULE_DESCRIPTION("MSM Array Power Mux driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/power/qcom/debug_core.c b/drivers/power/qcom/debug_core.c
new file mode 100644
index 000000000000..51b6d63fe994
--- /dev/null
+++ b/drivers/power/qcom/debug_core.c
@@ -0,0 +1,330 @@
+/* Copyright (c) 2014-2017, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/slab.h>
+#include <linux/uaccess.h>
+#include <linux/string.h>
+#include <linux/debugfs.h>
+#include <linux/ctype.h>
+#include <linux/cpu.h>
+#include "soc/qcom/msm-core.h"
+
+#define MAX_PSTATES 50
+#define NUM_OF_PENTRY 3 /* number of variables for ptable node */
+#define NUM_OF_EENTRY 2 /* number of variables for enable node */
+
+enum arg_offset {
+ CPU_OFFSET,
+ FREQ_OFFSET,
+ POWER_OFFSET,
+};
+
+struct core_debug {
+ int cpu;
+ struct cpu_pstate_pwr *head;
+ int enabled;
+ int len;
+ struct cpu_pwr_stats *ptr;
+ struct cpu_pstate_pwr *driver_data;
+ int driver_len;
+};
+
+static DEFINE_PER_CPU(struct core_debug, c_dgfs);
+static struct cpu_pwr_stats *msm_core_data;
+static struct debugfs_blob_wrapper help_msg = {
+ .data =
+"MSM CORE Debug-FS Support\n"
+"\n"
+"Hierarchy schema\n"
+"/sys/kernel/debug/msm_core\n"
+" /help - Static help text\n"
+" /ptable - write to p-state table\n"
+" /enable - enable the written p-state table\n"
+" /ptable_dump - Dump the debug ptable\n"
+"\n"
+"Usage\n"
+" Input test frequency and power information in ptable:\n"
+" echo \"0 300000 120\" > ptable\n"
+" format: <cpu> <frequency in khz> <power>\n"
+"\n"
+" Enable the ptable for the cpu:\n"
+" echo \"0 1\" > enable\n"
+" format: <cpu> <1 to enable, 0 to disable>\n"
+" Note: Writing 0 to disable will reset/clear the ptable\n"
+"\n"
+" Dump the entire ptable:\n"
+" cat ptable\n"
+" ----- CPU0 - Enabled ---------\n"
+" Freq Power\n"
+" 700000 120\n"
+"----- CPU0 - Live numbers -----\n"
+" Freq Power\n"
+" 300000 218\n"
+" ----- CPU1 - Written ---------\n"
+" Freq Power\n"
+" 700000 120\n"
+" Ptable dump will dump the status of the table as well\n"
+" It shows:\n"
+" Enabled -> for a cpu that debug ptable enabled\n"
+" Written -> for a cpu that has debug ptable values written\n"
+" but not enabled\n"
+"\n",
+
+};
+
+static void add_to_ptable(unsigned int *arg)
+{
+ struct core_debug *node;
+ int i, cpu = arg[CPU_OFFSET];
+ uint32_t freq = arg[FREQ_OFFSET];
+ uint32_t power = arg[POWER_OFFSET];
+
+ if (!cpu_possible(cpu))
+ return;
+
+ if ((freq == 0) || (power == 0)) {
+ pr_warn("Incorrect power data\n");
+ return;
+ }
+
+ node = &per_cpu(c_dgfs, cpu);
+
+ if (node->len >= MAX_PSTATES) {
+ pr_warn("Dropped ptable update - no space left.\n");
+ return;
+ }
+
+ if (!node->head) {
+ node->head = kzalloc(sizeof(struct cpu_pstate_pwr) *
+ (MAX_PSTATES + 1),
+ GFP_KERNEL);
+ if (!node->head)
+ return;
+ }
+
+ for (i = 0; i < node->len; i++) {
+ if (node->head[i].freq == freq) {
+ node->head[i].power = power;
+ return;
+ }
+ }
+
+ /* Insert a new frequency (may need to move things around to
+ keep in ascending order). */
+ for (i = MAX_PSTATES - 1; i > 0; i--) {
+ if (node->head[i-1].freq > freq) {
+ node->head[i].freq = node->head[i-1].freq;
+ node->head[i].power = node->head[i-1].power;
+ } else if (node->head[i-1].freq != 0) {
+ break;
+ }
+ }
+
+ if (node->len < MAX_PSTATES) {
+ node->head[i].freq = freq;
+ node->head[i].power = power;
+ node->len++;
+ }
+
+ if (node->ptr)
+ node->ptr->len = node->len;
+}
+
+static int split_ptable_args(char *line, unsigned int *arg, uint32_t n)
+{
+ char *args;
+ int i;
+ int ret = 0;
+
+ for (i = 0; i < n; i++) {
+ if (!line)
+ break;
+ args = strsep(&line, " ");
+ ret = kstrtouint(args, 10, &arg[i]);
+ if (ret)
+ return ret;
+ }
+ return ret;
+}
+
+static ssize_t msm_core_ptable_write(struct file *file,
+ const char __user *ubuf, size_t len, loff_t *offp)
+{
+ char *kbuf;
+ int ret;
+ unsigned int arg[3];
+
+ if (len == 0)
+ return 0;
+
+ kbuf = kzalloc(len + 1, GFP_KERNEL);
+ if (!kbuf)
+ return -ENOMEM;
+
+ if (copy_from_user(kbuf, ubuf, len)) {
+ ret = -EFAULT;
+ goto done;
+ }
+ kbuf[len] = '\0';
+ ret = split_ptable_args(kbuf, arg, NUM_OF_PENTRY);
+ if (!ret) {
+ add_to_ptable(arg);
+ ret = len;
+ }
+done:
+ kfree(kbuf);
+ return ret;
+}
+
+static void print_table(struct seq_file *m, struct cpu_pstate_pwr *c_n,
+ int len)
+{
+ int i;
+
+ seq_puts(m, " Freq Power\n");
+ for (i = 0; i < len; i++)
+ seq_printf(m, " %d %u\n", c_n[i].freq,
+ c_n[i].power);
+
+}
+
+static int msm_core_ptable_read(struct seq_file *m, void *data)
+{
+ int cpu;
+ struct core_debug *node;
+
+ for_each_possible_cpu(cpu) {
+ node = &per_cpu(c_dgfs, cpu);
+ if (node->head) {
+ seq_printf(m, "----- CPU%d - %s - Debug -------\n",
+ cpu, node->enabled == 1 ? "Enabled" : "Written");
+ print_table(m, node->head, node->len);
+ }
+ if (msm_core_data[cpu].ptable) {
+ seq_printf(m, "--- CPU%d - Live numbers at %ldC---\n",
+ cpu, node->ptr->temp);
+ print_table(m, msm_core_data[cpu].ptable,
+ node->driver_len);
+ }
+ }
+ return 0;
+}
+
+static ssize_t msm_core_enable_write(struct file *file,
+ const char __user *ubuf, size_t len, loff_t *offp)
+{
+ char *kbuf;
+ int ret;
+ unsigned int arg[3];
+ int cpu;
+
+ if (len == 0)
+ return 0;
+
+ kbuf = kzalloc(len + 1, GFP_KERNEL);
+ if (!kbuf)
+ return -ENOMEM;
+
+ if (copy_from_user(kbuf, ubuf, len)) {
+ ret = -EFAULT;
+ goto done;
+ }
+ kbuf[len] = '\0';
+ ret = split_ptable_args(kbuf, arg, NUM_OF_EENTRY);
+ if (ret)
+ goto done;
+ cpu = arg[CPU_OFFSET];
+
+ if (cpu_possible(cpu)) {
+ struct core_debug *node = &per_cpu(c_dgfs, cpu);
+
+ if (arg[FREQ_OFFSET]) {
+ msm_core_data[cpu].ptable = node->head;
+ msm_core_data[cpu].len = node->len;
+ } else {
+ msm_core_data[cpu].ptable = node->driver_data;
+ msm_core_data[cpu].len = node->driver_len;
+ node->len = 0;
+ }
+ node->enabled = arg[FREQ_OFFSET];
+ }
+ ret = len;
+ blocking_notifier_call_chain(
+ get_power_update_notifier(), cpu, NULL);
+
+done:
+ kfree(kbuf);
+ return ret;
+}
+
+static const struct file_operations msm_core_enable_ops = {
+ .write = msm_core_enable_write,
+};
+
+static int msm_core_dump_open(struct inode *inode, struct file *file)
+{
+ return single_open(file, msm_core_ptable_read, inode->i_private);
+}
+
+static const struct file_operations msm_core_ptable_ops = {
+ .open = msm_core_dump_open,
+ .read = seq_read,
+ .write = msm_core_ptable_write,
+ .llseek = seq_lseek,
+ .release = single_release,
+};
+
+int msm_core_debug_init(void)
+{
+ struct dentry *dir = NULL;
+ struct dentry *file = NULL;
+ int i;
+
+ msm_core_data = get_cpu_pwr_stats();
+ if (!msm_core_data)
+ goto fail;
+
+ dir = debugfs_create_dir("msm_core", NULL);
+ if (IS_ERR_OR_NULL(dir))
+ return PTR_ERR(dir);
+
+ file = debugfs_create_file("enable",
+ S_IRUSR|S_IRGRP|S_IWUSR|S_IWGRP, dir, NULL,
+ &msm_core_enable_ops);
+ if (IS_ERR_OR_NULL(file))
+ goto fail;
+
+ file = debugfs_create_file("ptable",
+ S_IRUSR|S_IRGRP|S_IWUSR|S_IWGRP, dir, NULL,
+ &msm_core_ptable_ops);
+ if (IS_ERR_OR_NULL(file))
+ goto fail;
+
+ help_msg.size = strlen(help_msg.data);
+ file = debugfs_create_blob("help", S_IRUGO, dir, &help_msg);
+ if (IS_ERR_OR_NULL(file))
+ goto fail;
+
+ for (i = 0; i < num_possible_cpus(); i++) {
+ per_cpu(c_dgfs, i).ptr = &msm_core_data[i];
+ per_cpu(c_dgfs, i).driver_data = msm_core_data[i].ptable;
+ per_cpu(c_dgfs, i).driver_len = msm_core_data[i].len;
+ }
+ return 0;
+fail:
+ debugfs_remove(dir);
+ return PTR_ERR(file);
+}
+late_initcall(msm_core_debug_init);
diff --git a/drivers/power/qcom/lpm-stats.c b/drivers/power/qcom/lpm-stats.c
new file mode 100644
index 000000000000..d3cafc411a77
--- /dev/null
+++ b/drivers/power/qcom/lpm-stats.c
@@ -0,0 +1,871 @@
+/* Copyright (c) 2012-2016, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <linux/init.h>
+#include <linux/spinlock.h>
+#include <linux/uaccess.h>
+#include <linux/proc_fs.h>
+#include <linux/seq_file.h>
+#include <linux/debugfs.h>
+#include <linux/sched.h>
+#include <linux/slab.h>
+#include <linux/suspend.h>
+#include <soc/qcom/spm.h>
+#include <soc/qcom/pm.h>
+#include <soc/qcom/lpm-stats.h>
+
+#define MAX_STR_LEN 256
+#define MAX_TIME_LEN 20
+const char *lpm_stats_reset = "reset";
+const char *lpm_stats_suspend = "suspend";
+
+struct lpm_sleep_time {
+ struct kobj_attribute ts_attr;
+ unsigned int cpu;
+};
+
+struct level_stats {
+ const char *name;
+ struct lpm_stats *owner;
+ int64_t first_bucket_time;
+ int bucket[CONFIG_MSM_IDLE_STATS_BUCKET_COUNT];
+ int64_t min_time[CONFIG_MSM_IDLE_STATS_BUCKET_COUNT];
+ int64_t max_time[CONFIG_MSM_IDLE_STATS_BUCKET_COUNT];
+ int success_count;
+ int failed_count;
+ int64_t total_time;
+ uint64_t enter_time;
+};
+
+static struct level_stats suspend_time_stats;
+
+static DEFINE_PER_CPU_SHARED_ALIGNED(struct lpm_stats, cpu_stats);
+
+static uint64_t get_total_sleep_time(unsigned int cpu_id)
+{
+ struct lpm_stats *stats = &per_cpu(cpu_stats, cpu_id);
+ int i;
+ uint64_t ret = 0;
+
+ for (i = 0; i < stats->num_levels; i++)
+ ret += stats->time_stats[i].total_time;
+
+ return ret;
+}
+
+static void update_level_stats(struct level_stats *stats, uint64_t t,
+ bool success)
+{
+ uint64_t bt;
+ int i;
+
+ if (!success) {
+ stats->failed_count++;
+ return;
+ }
+
+ stats->success_count++;
+ stats->total_time += t;
+ bt = t;
+ do_div(bt, stats->first_bucket_time);
+
+ if (bt < 1ULL << (CONFIG_MSM_IDLE_STATS_BUCKET_SHIFT *
+ (CONFIG_MSM_IDLE_STATS_BUCKET_COUNT - 1)))
+ i = DIV_ROUND_UP(fls((uint32_t)bt),
+ CONFIG_MSM_IDLE_STATS_BUCKET_SHIFT);
+ else
+ i = CONFIG_MSM_IDLE_STATS_BUCKET_COUNT - 1;
+
+ if (i >= CONFIG_MSM_IDLE_STATS_BUCKET_COUNT)
+ i = CONFIG_MSM_IDLE_STATS_BUCKET_COUNT - 1;
+
+ stats->bucket[i]++;
+
+ if (t < stats->min_time[i] || !stats->max_time[i])
+ stats->min_time[i] = t;
+ if (t > stats->max_time[i])
+ stats->max_time[i] = t;
+ return;
+}
+
+static void level_stats_print(struct seq_file *m, struct level_stats *stats)
+{
+ int i = 0;
+ int64_t bucket_time = 0;
+ char seqs[MAX_STR_LEN] = {0};
+ int64_t s = stats->total_time;
+ uint32_t ns = do_div(s, NSEC_PER_SEC);
+
+ snprintf(seqs, MAX_STR_LEN,
+ "[%s] %s:\n"
+ " success count: %7d\n"
+ " total success time: %lld.%09u\n",
+ stats->owner->name,
+ stats->name,
+ stats->success_count,
+ s, ns);
+ seq_puts(m, seqs);
+
+ if (stats->failed_count) {
+ snprintf(seqs, MAX_STR_LEN, " failed count: %7d\n",
+ stats->failed_count);
+ seq_puts(m, seqs);
+ }
+
+ bucket_time = stats->first_bucket_time;
+ for (i = 0;
+ i < CONFIG_MSM_IDLE_STATS_BUCKET_COUNT - 1;
+ i++) {
+ s = bucket_time;
+ ns = do_div(s, NSEC_PER_SEC);
+ snprintf(seqs, MAX_STR_LEN,
+ "\t<%6lld.%09u: %7d (%lld-%lld)\n",
+ s, ns, stats->bucket[i],
+ stats->min_time[i],
+ stats->max_time[i]);
+ seq_puts(m, seqs);
+ bucket_time <<= CONFIG_MSM_IDLE_STATS_BUCKET_SHIFT;
+ }
+ snprintf(seqs, MAX_STR_LEN,
+ "\t>=%5lld.%09u:%8d (%lld-%lld)\n",
+ s, ns, stats->bucket[i],
+ stats->min_time[i],
+ stats->max_time[i]);
+ seq_puts(m, seqs);
+}
+
+static int level_stats_file_show(struct seq_file *m, void *v)
+{
+ struct level_stats *stats = NULL;
+
+ if (!m->private)
+ return -EINVAL;
+
+ stats = (struct level_stats *) m->private;
+
+ level_stats_print(m, stats);
+
+ return 0;
+}
+
+static int level_stats_file_open(struct inode *inode, struct file *file)
+{
+ return single_open(file, level_stats_file_show, inode->i_private);
+}
+
+static void level_stats_print_all(struct seq_file *m, struct lpm_stats *stats)
+{
+ struct list_head *centry = NULL;
+ struct lpm_stats *pos = NULL;
+ int i = 0;
+
+ for (i = 0; i < stats->num_levels; i++)
+ level_stats_print(m, &stats->time_stats[i]);
+
+ if (list_empty(&stats->child))
+ return;
+
+ centry = &stats->child;
+ list_for_each_entry(pos, centry, sibling) {
+ level_stats_print_all(m, pos);
+ }
+}
+
+static void level_stats_reset(struct level_stats *stats)
+{
+ memset(stats->bucket, 0, sizeof(stats->bucket));
+ memset(stats->min_time, 0, sizeof(stats->min_time));
+ memset(stats->max_time, 0, sizeof(stats->max_time));
+ stats->success_count = 0;
+ stats->failed_count = 0;
+ stats->total_time = 0;
+}
+
+static void level_stats_reset_all(struct lpm_stats *stats)
+{
+ struct list_head *centry = NULL;
+ struct lpm_stats *pos = NULL;
+ int i = 0;
+
+ for (i = 0; i < stats->num_levels; i++)
+ level_stats_reset(&stats->time_stats[i]);
+
+ if (list_empty(&stats->child))
+ return;
+
+ centry = &stats->child;
+ list_for_each_entry(pos, centry, sibling) {
+ level_stats_reset_all(pos);
+ }
+}
+
+static int lpm_stats_file_show(struct seq_file *m, void *v)
+{
+ struct lpm_stats *stats = (struct lpm_stats *)m->private;
+
+ if (!m->private) {
+ pr_err("%s: Invalid pdata, Cannot print stats\n", __func__);
+ return -EINVAL;
+ }
+
+ level_stats_print_all(m, stats);
+ level_stats_print(m, &suspend_time_stats);
+
+ return 0;
+}
+
+static int lpm_stats_file_open(struct inode *inode, struct file *file)
+{
+ return single_open(file, lpm_stats_file_show, inode->i_private);
+}
+
+static ssize_t level_stats_file_write(struct file *file,
+ const char __user *buffer, size_t count, loff_t *off)
+{
+ char buf[MAX_STR_LEN] = {0};
+ struct inode *in = file->f_inode;
+ struct level_stats *stats = (struct level_stats *)in->i_private;
+ size_t len = strnlen(lpm_stats_reset, MAX_STR_LEN);
+
+ if (!stats)
+ return -EINVAL;
+
+ if (count != len+1)
+ return -EINVAL;
+
+ if (copy_from_user(buf, buffer, len))
+ return -EFAULT;
+
+ if (strcmp(buf, lpm_stats_reset))
+ return -EINVAL;
+
+ level_stats_reset(stats);
+
+ return count;
+}
+
+static ssize_t lpm_stats_file_write(struct file *file,
+ const char __user *buffer, size_t count, loff_t *off)
+{
+ char buf[MAX_STR_LEN] = {0};
+ struct inode *in = file->f_inode;
+ struct lpm_stats *stats = (struct lpm_stats *)in->i_private;
+ size_t len = strnlen(lpm_stats_reset, MAX_STR_LEN);
+
+ if (!stats)
+ return -EINVAL;
+
+ if (count != len+1)
+ return -EINVAL;
+
+ if (copy_from_user(buf, buffer, len))
+ return -EFAULT;
+
+ if (strcmp(buf, lpm_stats_reset))
+ return -EINVAL;
+
+ level_stats_reset_all(stats);
+
+ return count;
+}
+
+int lifo_stats_file_show(struct seq_file *m, void *v)
+{
+ struct lpm_stats *stats = NULL;
+ struct list_head *centry = NULL;
+ struct lpm_stats *pos = NULL;
+ char seqs[MAX_STR_LEN] = {0};
+
+ if (!m->private)
+ return -EINVAL;
+
+ stats = (struct lpm_stats *)m->private;
+
+ if (list_empty(&stats->child)) {
+ pr_err("%s: ERROR: Lifo level with no children.\n",
+ __func__);
+ return -EINVAL;
+ }
+
+ centry = &stats->child;
+ list_for_each_entry(pos, centry, sibling) {
+ snprintf(seqs, MAX_STR_LEN,
+ "%s:\n"
+ "\tLast-In:%u\n"
+ "\tFirst-Out:%u\n",
+ pos->name,
+ pos->lifo.last_in,
+ pos->lifo.first_out);
+ seq_puts(m, seqs);
+ }
+ return 0;
+}
+
+static int lifo_stats_file_open(struct inode *inode, struct file *file)
+{
+ return single_open(file, lifo_stats_file_show, inode->i_private);
+}
+
+static void lifo_stats_reset_all(struct lpm_stats *stats)
+{
+ struct list_head *centry = NULL;
+ struct lpm_stats *pos = NULL;
+
+ centry = &stats->child;
+ list_for_each_entry(pos, centry, sibling) {
+ pos->lifo.last_in = 0;
+ pos->lifo.first_out = 0;
+ if (!list_empty(&pos->child))
+ lifo_stats_reset_all(pos);
+ }
+}
+
+static ssize_t lifo_stats_file_write(struct file *file,
+ const char __user *buffer, size_t count, loff_t *off)
+{
+ char buf[MAX_STR_LEN] = {0};
+ struct inode *in = file->f_inode;
+ struct lpm_stats *stats = (struct lpm_stats *)in->i_private;
+ size_t len = strnlen(lpm_stats_reset, MAX_STR_LEN);
+
+ if (!stats)
+ return -EINVAL;
+
+ if (count != len+1)
+ return -EINVAL;
+
+ if (copy_from_user(buf, buffer, len))
+ return -EFAULT;
+
+ if (strcmp(buf, lpm_stats_reset))
+ return -EINVAL;
+
+ lifo_stats_reset_all(stats);
+
+ return count;
+}
+
+static const struct file_operations level_stats_fops = {
+ .owner = THIS_MODULE,
+ .open = level_stats_file_open,
+ .read = seq_read,
+ .release = single_release,
+ .llseek = no_llseek,
+ .write = level_stats_file_write,
+};
+
+static const struct file_operations lpm_stats_fops = {
+ .owner = THIS_MODULE,
+ .open = lpm_stats_file_open,
+ .read = seq_read,
+ .release = single_release,
+ .llseek = no_llseek,
+ .write = lpm_stats_file_write,
+};
+
+static const struct file_operations lifo_stats_fops = {
+ .owner = THIS_MODULE,
+ .open = lifo_stats_file_open,
+ .read = seq_read,
+ .release = single_release,
+ .llseek = no_llseek,
+ .write = lifo_stats_file_write,
+};
+
+static void update_last_in_stats(struct lpm_stats *stats)
+{
+ struct list_head *centry = NULL;
+ struct lpm_stats *pos = NULL;
+
+ if (list_empty(&stats->child))
+ return;
+
+ centry = &stats->child;
+ list_for_each_entry(pos, centry, sibling) {
+ if (cpumask_test_cpu(smp_processor_id(), &pos->mask)) {
+ pos->lifo.last_in++;
+ return;
+ }
+ }
+ WARN(1, "Should not reach here\n");
+}
+
+static void update_first_out_stats(struct lpm_stats *stats)
+{
+ struct list_head *centry = NULL;
+ struct lpm_stats *pos = NULL;
+
+ if (list_empty(&stats->child))
+ return;
+
+ centry = &stats->child;
+ list_for_each_entry(pos, centry, sibling) {
+ if (cpumask_test_cpu(smp_processor_id(), &pos->mask)) {
+ pos->lifo.first_out++;
+ return;
+ }
+ }
+ WARN(1, "Should not reach here\n");
+}
+
+static inline void update_exit_stats(struct lpm_stats *stats, uint32_t index,
+ bool success)
+{
+ uint64_t exit_time = 0;
+
+ /* Update time stats only when exit is preceded by enter */
+ exit_time = stats->sleep_time;
+ update_level_stats(&stats->time_stats[index], exit_time,
+ success);
+}
+
+static int config_level(const char *name, const char **levels,
+ int num_levels, struct lpm_stats *parent, struct lpm_stats *stats)
+{
+ int i = 0;
+ struct dentry *directory = NULL;
+ const char *rootname = "lpm_stats";
+ const char *dirname = rootname;
+
+ strlcpy(stats->name, name, MAX_STR_LEN);
+ stats->num_levels = num_levels;
+ stats->parent = parent;
+ INIT_LIST_HEAD(&stats->sibling);
+ INIT_LIST_HEAD(&stats->child);
+
+ stats->time_stats = kzalloc(sizeof(struct level_stats) *
+ num_levels, GFP_KERNEL);
+ if (!stats->time_stats) {
+ pr_err("%s: Insufficient memory for %s level time stats\n",
+ __func__, name);
+ return -ENOMEM;
+ }
+
+ if (parent) {
+ list_add_tail(&stats->sibling, &parent->child);
+ directory = parent->directory;
+ dirname = name;
+ }
+
+ stats->directory = debugfs_create_dir(dirname, directory);
+ if (!stats->directory) {
+ pr_err("%s: Unable to create %s debugfs directory\n",
+ __func__, dirname);
+ kfree(stats->time_stats);
+ return -EPERM;
+ }
+
+ for (i = 0; i < num_levels; i++) {
+ stats->time_stats[i].name = levels[i];
+ stats->time_stats[i].owner = stats;
+ stats->time_stats[i].first_bucket_time =
+ CONFIG_MSM_IDLE_STATS_FIRST_BUCKET;
+ stats->time_stats[i].enter_time = 0;
+
+ if (!debugfs_create_file(stats->time_stats[i].name, S_IRUGO,
+ stats->directory, (void *)&stats->time_stats[i],
+ &level_stats_fops)) {
+ pr_err("%s: Unable to create %s %s level-stats file\n",
+ __func__, stats->name,
+ stats->time_stats[i].name);
+ kfree(stats->time_stats);
+ return -EPERM;
+ }
+ }
+
+ if (!debugfs_create_file("stats", S_IRUGO, stats->directory,
+ (void *)stats, &lpm_stats_fops)) {
+ pr_err("%s: Unable to create %s's overall 'stats' file\n",
+ __func__, stats->name);
+ kfree(stats->time_stats);
+ return -EPERM;
+ }
+
+ return 0;
+}
+
+static ssize_t total_sleep_time_show(struct kobject *kobj,
+ struct kobj_attribute *attr, char *buf)
+{
+ struct lpm_sleep_time *cpu_sleep_time = container_of(attr,
+ struct lpm_sleep_time, ts_attr);
+ unsigned int cpu = cpu_sleep_time->cpu;
+ uint64_t total_time = get_total_sleep_time(cpu);
+
+ return snprintf(buf, MAX_TIME_LEN, "%llu.%09u\n", total_time,
+ do_div(total_time, NSEC_PER_SEC));
+}
+
+static struct kobject *local_module_kobject(void)
+{
+ struct kobject *kobj;
+
+ kobj = kset_find_obj(module_kset, KBUILD_MODNAME);
+
+ if (!kobj) {
+ int err;
+ struct module_kobject *mk;
+
+ mk = kzalloc(sizeof(*mk), GFP_KERNEL);
+ if (!mk)
+ return ERR_PTR(-ENOMEM);
+
+ mk->mod = THIS_MODULE;
+ mk->kobj.kset = module_kset;
+
+ err = kobject_init_and_add(&mk->kobj, &module_ktype, NULL,
+ "%s", KBUILD_MODNAME);
+
+ if (err) {
+ kobject_put(&mk->kobj);
+ kfree(mk);
+ pr_err("%s: cannot create kobject for %s\n",
+ __func__, KBUILD_MODNAME);
+ return ERR_PTR(err);
+ }
+
+ kobject_get(&mk->kobj);
+ kobj = &mk->kobj;
+ }
+
+ return kobj;
+}
+
+static int create_sysfs_node(unsigned int cpu, struct lpm_stats *stats)
+{
+ struct kobject *cpu_kobj = NULL;
+ struct lpm_sleep_time *ts = NULL;
+ struct kobject *stats_kobj;
+ char cpu_name[] = "cpuXX";
+ int ret = -ENOMEM;
+
+ stats_kobj = local_module_kobject();
+
+ if (IS_ERR_OR_NULL(stats_kobj))
+ return PTR_ERR(stats_kobj);
+
+ snprintf(cpu_name, sizeof(cpu_name), "cpu%u", cpu);
+ cpu_kobj = kobject_create_and_add(cpu_name, stats_kobj);
+ if (!cpu_kobj)
+ return -ENOMEM;
+
+ ts = kzalloc(sizeof(*ts), GFP_KERNEL);
+ if (!ts)
+ goto failed;
+
+ sysfs_attr_init(&ts->ts_attr.attr);
+ ts->ts_attr.attr.name = "total_sleep_time_secs";
+ ts->ts_attr.attr.mode = 0444;
+ ts->ts_attr.show = total_sleep_time_show;
+ ts->ts_attr.store = NULL;
+ ts->cpu = cpu;
+
+ ret = sysfs_create_file(cpu_kobj, &ts->ts_attr.attr);
+ if (ret)
+ goto failed;
+
+ return 0;
+
+failed:
+ kfree(ts);
+ kobject_put(cpu_kobj);
+ return ret;
+}
+
+static struct lpm_stats *config_cpu_level(const char *name,
+ const char **levels, int num_levels, struct lpm_stats *parent,
+ struct cpumask *mask)
+{
+ int cpu = 0;
+ struct lpm_stats *pstats = NULL;
+ struct lpm_stats *stats = NULL;
+
+ for (pstats = parent; pstats; pstats = pstats->parent)
+ cpumask_or(&pstats->mask, &pstats->mask, mask);
+
+ for_each_cpu(cpu, mask) {
+ int ret = 0;
+ char cpu_name[MAX_STR_LEN] = {0};
+
+ stats = &per_cpu(cpu_stats, cpu);
+ snprintf(cpu_name, MAX_STR_LEN, "%s%d", name, cpu);
+ cpumask_set_cpu(cpu, &stats->mask);
+
+ stats->is_cpu = true;
+
+ ret = config_level(cpu_name, levels, num_levels, parent,
+ stats);
+ if (ret) {
+ pr_err("%s: Unable to create %s stats\n",
+ __func__, cpu_name);
+ return ERR_PTR(ret);
+ }
+
+ ret = create_sysfs_node(cpu, stats);
+
+ if (ret) {
+ pr_err("Could not create the sysfs node\n");
+ return ERR_PTR(ret);
+ }
+ }
+
+ return stats;
+}
+
+static void config_suspend_level(struct lpm_stats *stats)
+{
+ suspend_time_stats.name = lpm_stats_suspend;
+ suspend_time_stats.owner = stats;
+ suspend_time_stats.first_bucket_time =
+ CONFIG_MSM_SUSPEND_STATS_FIRST_BUCKET;
+ suspend_time_stats.enter_time = 0;
+ suspend_time_stats.success_count = 0;
+ suspend_time_stats.failed_count = 0;
+
+ if (!debugfs_create_file(suspend_time_stats.name, S_IRUGO,
+ stats->directory, (void *)&suspend_time_stats,
+ &level_stats_fops))
+ pr_err("%s: Unable to create %s Suspend stats file\n",
+ __func__, stats->name);
+}
+
+static struct lpm_stats *config_cluster_level(const char *name,
+ const char **levels, int num_levels, struct lpm_stats *parent)
+{
+ struct lpm_stats *stats = NULL;
+ int ret = 0;
+
+ stats = kzalloc(sizeof(struct lpm_stats), GFP_KERNEL);
+ if (!stats) {
+ pr_err("%s: Insufficient memory for %s stats\n",
+ __func__, name);
+ return ERR_PTR(-ENOMEM);
+ }
+
+ stats->is_cpu = false;
+
+ ret = config_level(name, levels, num_levels, parent, stats);
+ if (ret) {
+ pr_err("%s: Unable to create %s stats\n", __func__,
+ name);
+ kfree(stats);
+ return ERR_PTR(ret);
+ }
+
+ if (!debugfs_create_file("lifo", S_IRUGO, stats->directory,
+ (void *)stats, &lifo_stats_fops)) {
+ pr_err("%s: Unable to create %s lifo stats file\n",
+ __func__, stats->name);
+ kfree(stats);
+ return ERR_PTR(-EPERM);
+ }
+
+ if (!parent)
+ config_suspend_level(stats);
+
+ return stats;
+}
+
+static void cleanup_stats(struct lpm_stats *stats)
+{
+ struct list_head *centry = NULL;
+ struct lpm_stats *pos = NULL;
+
+ centry = &stats->child;
+ list_for_each_entry_reverse(pos, centry, sibling) {
+ if (!list_empty(&pos->child))
+ cleanup_stats(pos);
+
+ list_del_init(&pos->child);
+
+ kfree(pos->time_stats);
+ if (!pos->is_cpu)
+ kfree(pos);
+ }
+ kfree(stats->time_stats);
+ kfree(stats);
+}
+
+static void lpm_stats_cleanup(struct lpm_stats *stats)
+{
+ struct lpm_stats *pstats = stats;
+
+ if (!pstats)
+ return;
+
+ while (pstats->parent)
+ pstats = pstats->parent;
+
+ debugfs_remove_recursive(pstats->directory);
+
+ cleanup_stats(pstats);
+}
+
+/**
+ * lpm_stats_config_level() - API to configure levels stats.
+ *
+ * @name: Name of the cluster/cpu.
+ * @levels: Low power mode level names.
+ * @num_levels: Number of leves supported.
+ * @parent: Pointer to the parent's lpm_stats object.
+ * @mask: cpumask, if configuring cpu stats, else NULL.
+ *
+ * Function to communicate the low power mode levels supported by
+ * cpus or a cluster.
+ *
+ * Return: Pointer to the lpm_stats object or ERR_PTR(-ERRNO)
+ */
+struct lpm_stats *lpm_stats_config_level(const char *name,
+ const char **levels, int num_levels, struct lpm_stats *parent,
+ struct cpumask *mask)
+{
+ struct lpm_stats *stats = NULL;
+
+ if (!levels || num_levels <= 0 || IS_ERR(parent)) {
+ pr_err("%s: Invalid input\n\t\tlevels = %p\n\t\t"
+ "num_levels = %d\n\t\tparent = %ld\n",
+ __func__, levels, num_levels, PTR_ERR(parent));
+ return ERR_PTR(-EINVAL);
+ }
+
+ if (mask)
+ stats = config_cpu_level(name, levels, num_levels, parent,
+ mask);
+ else
+ stats = config_cluster_level(name, levels, num_levels,
+ parent);
+
+ if (IS_ERR(stats)) {
+ lpm_stats_cleanup(parent);
+ return stats;
+ }
+
+ return stats;
+}
+EXPORT_SYMBOL(lpm_stats_config_level);
+
+/**
+ * lpm_stats_cluster_enter() - API to communicate the lpm level a cluster
+ * is prepared to enter.
+ *
+ * @stats: Pointer to the cluster's lpm_stats object.
+ * @index: Index of the lpm level that the cluster is going to enter.
+ *
+ * Function to communicate the low power mode level that the cluster is
+ * prepared to enter.
+ */
+void lpm_stats_cluster_enter(struct lpm_stats *stats, uint32_t index)
+{
+ if (IS_ERR_OR_NULL(stats))
+ return;
+
+ update_last_in_stats(stats);
+}
+EXPORT_SYMBOL(lpm_stats_cluster_enter);
+
+/**
+ * lpm_stats_cluster_exit() - API to communicate the lpm level a cluster
+ * exited.
+ *
+ * @stats: Pointer to the cluster's lpm_stats object.
+ * @index: Index of the cluster lpm level.
+ * @success: Success/Failure of the low power mode execution.
+ *
+ * Function to communicate the low power mode level that the cluster
+ * exited.
+ */
+void lpm_stats_cluster_exit(struct lpm_stats *stats, uint32_t index,
+ bool success)
+{
+ if (IS_ERR_OR_NULL(stats))
+ return;
+
+ update_exit_stats(stats, index, success);
+
+ update_first_out_stats(stats);
+}
+EXPORT_SYMBOL(lpm_stats_cluster_exit);
+
+/**
+ * lpm_stats_cpu_enter() - API to communicate the lpm level a cpu
+ * is prepared to enter.
+ *
+ * @index: cpu's lpm level index.
+ *
+ * Function to communicate the low power mode level that the cpu is
+ * prepared to enter.
+ */
+void lpm_stats_cpu_enter(uint32_t index, uint64_t time)
+{
+ struct lpm_stats *stats = &(*this_cpu_ptr(&(cpu_stats)));
+
+ stats->sleep_time = time;
+
+ if (!stats->time_stats)
+ return;
+
+}
+EXPORT_SYMBOL(lpm_stats_cpu_enter);
+
+/**
+ * lpm_stats_cpu_exit() - API to communicate the lpm level that the cpu exited.
+ *
+ * @index: cpu's lpm level index.
+ * @success: Success/Failure of the low power mode execution.
+ *
+ * Function to communicate the low power mode level that the cpu exited.
+ */
+void lpm_stats_cpu_exit(uint32_t index, uint64_t time, bool success)
+{
+ struct lpm_stats *stats = &(*this_cpu_ptr(&(cpu_stats)));
+
+ if (!stats->time_stats)
+ return;
+
+ stats->sleep_time = time - stats->sleep_time;
+
+ update_exit_stats(stats, index, success);
+}
+EXPORT_SYMBOL(lpm_stats_cpu_exit);
+
+/**
+ * lpm_stats_suspend_enter() - API to communicate system entering suspend.
+ *
+ * Function to communicate that the system is ready to enter suspend.
+ */
+void lpm_stats_suspend_enter(void)
+{
+ struct timespec ts;
+
+ getnstimeofday(&ts);
+ suspend_time_stats.enter_time = timespec_to_ns(&ts);
+}
+EXPORT_SYMBOL(lpm_stats_suspend_enter);
+
+/**
+ * lpm_stats_suspend_exit() - API to communicate system exiting suspend.
+ *
+ * Function to communicate that the system exited suspend.
+ */
+void lpm_stats_suspend_exit(void)
+{
+ struct timespec ts;
+ uint64_t exit_time = 0;
+
+ getnstimeofday(&ts);
+ exit_time = timespec_to_ns(&ts) - suspend_time_stats.enter_time;
+ update_level_stats(&suspend_time_stats, exit_time, true);
+}
+EXPORT_SYMBOL(lpm_stats_suspend_exit);
diff --git a/drivers/power/qcom/msm-core.c b/drivers/power/qcom/msm-core.c
new file mode 100644
index 000000000000..825c27e7a4c1
--- /dev/null
+++ b/drivers/power/qcom/msm-core.c
@@ -0,0 +1,1133 @@
+/* Copyright (c) 2014-2017, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#include <linux/cpu.h>
+#include <linux/cpufreq.h>
+#include <linux/err.h>
+#include <linux/fs.h>
+#include <linux/init.h>
+#include <linux/io.h>
+#include <linux/kthread.h>
+#include <linux/kernel.h>
+#include <linux/miscdevice.h>
+#include <linux/module.h>
+#include <linux/msm-core-interface.h>
+#include <linux/mutex.h>
+#include <linux/of.h>
+#include <linux/of_platform.h>
+#include <linux/pm_opp.h>
+#include <linux/platform_device.h>
+#include <linux/pm_opp.h>
+#include <linux/slab.h>
+#include <linux/suspend.h>
+#include <linux/thermal.h>
+#include <linux/types.h>
+#include <linux/uaccess.h>
+#include <linux/uio_driver.h>
+#include <asm/smp_plat.h>
+#include <stdbool.h>
+#define CREATE_TRACE_POINTS
+#include <trace/events/trace_msm_core.h>
+
+#define TEMP_BASE_POINT 35
+#define TEMP_MAX_POINT 95
+#define CPU_HOTPLUG_LIMIT 80
+#define CPU_BIT_MASK(cpu) BIT(cpu)
+#define DEFAULT_TEMP 40
+#define DEFAULT_LOW_HYST_TEMP 10
+#define DEFAULT_HIGH_HYST_TEMP 5
+#define CLUSTER_OFFSET_FOR_MPIDR 8
+#define MAX_CORES_PER_CLUSTER 4
+#define MAX_NUM_OF_CLUSTERS 2
+#define NUM_OF_CORNERS 10
+#define DEFAULT_SCALING_FACTOR 1
+
+#define ALLOCATE_2D_ARRAY(type)\
+static type **allocate_2d_array_##type(int idx)\
+{\
+ int i;\
+ type **ptr = NULL;\
+ if (!idx) \
+ return ERR_PTR(-EINVAL);\
+ ptr = kzalloc(sizeof(*ptr) * TEMP_DATA_POINTS, \
+ GFP_KERNEL);\
+ if (!ptr) { \
+ return ERR_PTR(-ENOMEM); \
+ } \
+ for (i = 0; i < TEMP_DATA_POINTS; i++) { \
+ ptr[i] = kzalloc(sizeof(*ptr[i]) * \
+ idx, GFP_KERNEL);\
+ if (!ptr[i]) {\
+ goto done;\
+ } \
+ } \
+ return ptr;\
+done:\
+ for (i = 0; i < TEMP_DATA_POINTS; i++) \
+ kfree(ptr[i]);\
+ kfree(ptr);\
+ return ERR_PTR(-ENOMEM);\
+}
+
+struct cpu_activity_info {
+ int cpu;
+ int mpidr;
+ long temp;
+ int sensor_id;
+ struct sensor_threshold hi_threshold;
+ struct sensor_threshold low_threshold;
+ struct cpu_static_info *sp;
+};
+
+struct cpu_static_info {
+ uint32_t **power;
+ cpumask_t mask;
+ struct cpufreq_frequency_table *table;
+ uint32_t *voltage;
+ uint32_t num_of_freqs;
+};
+
+static DEFINE_MUTEX(policy_update_mutex);
+static DEFINE_MUTEX(kthread_update_mutex);
+static DEFINE_SPINLOCK(update_lock);
+static struct delayed_work sampling_work;
+static struct completion sampling_completion;
+static struct task_struct *sampling_task;
+static int low_hyst_temp;
+static int high_hyst_temp;
+static struct platform_device *msm_core_pdev;
+static struct cpu_activity_info activity[NR_CPUS];
+DEFINE_PER_CPU(struct cpu_pstate_pwr *, ptable);
+static struct cpu_pwr_stats cpu_stats[NR_CPUS];
+static uint32_t scaling_factor;
+ALLOCATE_2D_ARRAY(uint32_t);
+
+static int poll_ms;
+module_param_named(polling_interval, poll_ms, int,
+ S_IRUGO | S_IWUSR | S_IWGRP);
+
+static int disabled;
+module_param_named(disabled, disabled, int,
+ S_IRUGO | S_IWUSR | S_IWGRP);
+static bool in_suspend;
+static bool activate_power_table;
+static int max_throttling_temp = 80; /* in C */
+module_param_named(throttling_temp, max_throttling_temp, int,
+ S_IRUGO | S_IWUSR | S_IWGRP);
+
+/*
+ * Cannot be called from an interrupt context
+ */
+static void set_and_activate_threshold(uint32_t sensor_id,
+ struct sensor_threshold *threshold)
+{
+ if (sensor_set_trip(sensor_id, threshold)) {
+ pr_err("%s: Error in setting trip %d\n",
+ KBUILD_MODNAME, threshold->trip);
+ return;
+ }
+
+ if (sensor_activate_trip(sensor_id, threshold, true)) {
+ sensor_cancel_trip(sensor_id, threshold);
+ pr_err("%s: Error in enabling trip %d\n",
+ KBUILD_MODNAME, threshold->trip);
+ return;
+ }
+}
+
+static void set_threshold(struct cpu_activity_info *cpu_node)
+{
+ if (cpu_node->sensor_id < 0)
+ return;
+
+ /*
+ * Before operating on the threshold structure which is used by
+ * thermal core ensure that the sensor is disabled to prevent
+ * incorrect operations on the sensor list maintained by thermal code.
+ */
+ sensor_activate_trip(cpu_node->sensor_id,
+ &cpu_node->hi_threshold, false);
+ sensor_activate_trip(cpu_node->sensor_id,
+ &cpu_node->low_threshold, false);
+
+ cpu_node->hi_threshold.temp = (cpu_node->temp + high_hyst_temp) *
+ scaling_factor;
+ cpu_node->low_threshold.temp = (cpu_node->temp - low_hyst_temp) *
+ scaling_factor;
+
+ /*
+ * Set the threshold only if we are below the hotplug limit
+ * Adding more work at this high temperature range, seems to
+ * fail hotplug notifications.
+ */
+ if (cpu_node->hi_threshold.temp < (CPU_HOTPLUG_LIMIT * scaling_factor))
+ set_and_activate_threshold(cpu_node->sensor_id,
+ &cpu_node->hi_threshold);
+
+ set_and_activate_threshold(cpu_node->sensor_id,
+ &cpu_node->low_threshold);
+}
+
+static void samplequeue_handle(struct work_struct *work)
+{
+ complete(&sampling_completion);
+}
+
+/* May be called from an interrupt context */
+static void core_temp_notify(enum thermal_trip_type type,
+ int temp, void *data)
+{
+ struct cpu_activity_info *cpu_node =
+ (struct cpu_activity_info *) data;
+
+ temp /= scaling_factor;
+
+ trace_temp_notification(cpu_node->sensor_id,
+ type, temp, cpu_node->temp);
+
+ cpu_node->temp = temp;
+
+ complete(&sampling_completion);
+}
+
+static void repopulate_stats(int cpu)
+{
+ int i;
+ struct cpu_activity_info *cpu_node = &activity[cpu];
+ int temp_point;
+ struct cpu_pstate_pwr *pt = per_cpu(ptable, cpu);
+
+ if (!pt)
+ return;
+
+ if (cpu_node->temp < TEMP_BASE_POINT)
+ temp_point = 0;
+ else if (cpu_node->temp > TEMP_MAX_POINT)
+ temp_point = TEMP_DATA_POINTS - 1;
+ else
+ temp_point = (cpu_node->temp - TEMP_BASE_POINT) / 5;
+
+ cpu_stats[cpu].temp = cpu_node->temp;
+ for (i = 0; i < cpu_node->sp->num_of_freqs; i++)
+ pt[i].power = cpu_node->sp->power[temp_point][i];
+
+ trace_cpu_stats(cpu, cpu_stats[cpu].temp, pt[0].power,
+ pt[cpu_node->sp->num_of_freqs-1].power);
+};
+
+void trigger_cpu_pwr_stats_calc(void)
+{
+ int cpu;
+ static long prev_temp[NR_CPUS];
+ struct cpu_activity_info *cpu_node;
+ int temp;
+
+ if (disabled)
+ return;
+
+ spin_lock(&update_lock);
+
+ for_each_online_cpu(cpu) {
+ cpu_node = &activity[cpu];
+ if (cpu_node->sensor_id < 0)
+ continue;
+
+ if (cpu_node->temp == prev_temp[cpu]) {
+ sensor_get_temp(cpu_node->sensor_id, &temp);
+ cpu_node->temp = temp / scaling_factor;
+ }
+
+ prev_temp[cpu] = cpu_node->temp;
+
+ /*
+ * Do not populate/update stats before policy and ptable have
+ * been updated.
+ */
+ if (activate_power_table && cpu_stats[cpu].ptable
+ && cpu_node->sp->table)
+ repopulate_stats(cpu);
+ }
+ spin_unlock(&update_lock);
+}
+EXPORT_SYMBOL(trigger_cpu_pwr_stats_calc);
+
+void set_cpu_throttled(cpumask_t *mask, bool throttling)
+{
+ int cpu;
+
+ if (!mask)
+ return;
+
+ spin_lock(&update_lock);
+ for_each_cpu(cpu, mask)
+ cpu_stats[cpu].throttling = throttling;
+ spin_unlock(&update_lock);
+}
+EXPORT_SYMBOL(set_cpu_throttled);
+
+static void update_related_freq_table(struct cpufreq_policy *policy)
+{
+ int cpu, num_of_freqs;
+ struct cpufreq_frequency_table *table;
+
+ table = cpufreq_frequency_get_table(policy->cpu);
+ if (!table) {
+ pr_err("Couldn't get freq table for cpu%d\n",
+ policy->cpu);
+ return;
+ }
+
+ for (num_of_freqs = 0; table[num_of_freqs].frequency !=
+ CPUFREQ_TABLE_END;)
+ num_of_freqs++;
+
+ /*
+ * Synchronous cores within cluster have the same
+ * policy. Since these cores do not have the cpufreq
+ * table initialized for all of them, copy the same
+ * table to all the related cpus.
+ */
+ for_each_cpu(cpu, policy->related_cpus) {
+ activity[cpu].sp->table = table;
+ activity[cpu].sp->num_of_freqs = num_of_freqs;
+ }
+}
+
+static __ref int do_sampling(void *data)
+{
+ int cpu;
+ struct cpu_activity_info *cpu_node;
+ static int prev_temp[NR_CPUS];
+
+ while (!kthread_should_stop()) {
+ wait_for_completion(&sampling_completion);
+ cancel_delayed_work(&sampling_work);
+
+ mutex_lock(&kthread_update_mutex);
+ if (in_suspend)
+ goto unlock;
+
+ trigger_cpu_pwr_stats_calc();
+
+ for_each_online_cpu(cpu) {
+ cpu_node = &activity[cpu];
+ if (prev_temp[cpu] != cpu_node->temp) {
+ prev_temp[cpu] = cpu_node->temp;
+ set_threshold(cpu_node);
+ trace_temp_threshold(cpu, cpu_node->temp,
+ cpu_node->hi_threshold.temp /
+ scaling_factor,
+ cpu_node->low_threshold.temp /
+ scaling_factor);
+ }
+ }
+ if (!poll_ms)
+ goto unlock;
+
+ schedule_delayed_work(&sampling_work,
+ msecs_to_jiffies(poll_ms));
+unlock:
+ mutex_unlock(&kthread_update_mutex);
+ }
+ return 0;
+}
+
+static void clear_static_power(struct cpu_static_info *sp)
+{
+ int i;
+
+ if (!sp)
+ return;
+
+ if (cpumask_first(&sp->mask) < num_possible_cpus())
+ return;
+
+ for (i = 0; i < TEMP_DATA_POINTS; i++)
+ kfree(sp->power[i]);
+ kfree(sp->power);
+ kfree(sp);
+}
+
+BLOCKING_NOTIFIER_HEAD(msm_core_stats_notifier_list);
+
+struct blocking_notifier_head *get_power_update_notifier(void)
+{
+ return &msm_core_stats_notifier_list;
+}
+
+int register_cpu_pwr_stats_ready_notifier(struct notifier_block *nb)
+{
+ return blocking_notifier_chain_register(&msm_core_stats_notifier_list,
+ nb);
+}
+
+static int update_userspace_power(struct sched_params __user *argp)
+{
+ int i;
+ int ret;
+ int cpu = -1;
+ struct cpu_activity_info *node;
+ struct cpu_static_info *sp, *clear_sp;
+ int cpumask, cluster, mpidr;
+ bool pdata_valid[NR_CPUS] = {0};
+
+ get_user(cpumask, &argp->cpumask);
+ get_user(cluster, &argp->cluster);
+ mpidr = cluster << 8;
+
+ pr_debug("%s: cpumask %d, cluster: %d\n", __func__, cpumask,
+ cluster);
+ for (i = 0; i < MAX_CORES_PER_CLUSTER; i++, cpumask >>= 1) {
+ if (!(cpumask & 0x01))
+ continue;
+
+ mpidr |= i;
+ for_each_possible_cpu(cpu) {
+ if (cpu_logical_map(cpu) == mpidr)
+ break;
+ }
+ }
+
+ if ((cpu < 0) || (cpu >= num_possible_cpus()))
+ return -EINVAL;
+
+ node = &activity[cpu];
+ /* Allocate new memory to copy cpumask specific power
+ * information.
+ */
+ sp = kzalloc(sizeof(*sp), GFP_KERNEL);
+ if (!sp)
+ return -ENOMEM;
+
+
+ sp->power = allocate_2d_array_uint32_t(node->sp->num_of_freqs);
+ if (IS_ERR_OR_NULL(sp->power)) {
+ ret = PTR_ERR(sp->power);
+ kfree(sp);
+ return ret;
+ }
+ sp->num_of_freqs = node->sp->num_of_freqs;
+ sp->voltage = node->sp->voltage;
+ sp->table = node->sp->table;
+
+ for (i = 0; i < TEMP_DATA_POINTS; i++) {
+ ret = copy_from_user(sp->power[i], &argp->power[i][0],
+ sizeof(sp->power[i][0]) * node->sp->num_of_freqs);
+ if (ret)
+ goto failed;
+ }
+
+ /* Copy the same power values for all the cpus in the cpumask
+ * argp->cpumask within the cluster (argp->cluster)
+ */
+ get_user(cpumask, &argp->cpumask);
+ spin_lock(&update_lock);
+ for (i = 0; i < MAX_CORES_PER_CLUSTER; i++, cpumask >>= 1) {
+ if (!(cpumask & 0x01))
+ continue;
+ mpidr = (cluster << CLUSTER_OFFSET_FOR_MPIDR);
+ mpidr |= i;
+ for_each_possible_cpu(cpu) {
+ if (!(cpu_logical_map(cpu) == mpidr))
+ continue;
+
+ node = &activity[cpu];
+ clear_sp = node->sp;
+ node->sp = sp;
+ cpumask_set_cpu(cpu, &sp->mask);
+ if (clear_sp) {
+ cpumask_clear_cpu(cpu, &clear_sp->mask);
+ clear_static_power(clear_sp);
+ }
+ cpu_stats[cpu].ptable = per_cpu(ptable, cpu);
+ repopulate_stats(cpu);
+ pdata_valid[cpu] = true;
+ }
+ }
+ spin_unlock(&update_lock);
+
+ for_each_possible_cpu(cpu) {
+ if (!pdata_valid[cpu])
+ continue;
+
+ blocking_notifier_call_chain(
+ &msm_core_stats_notifier_list, cpu, NULL);
+ }
+
+ activate_power_table = true;
+ return 0;
+
+failed:
+ for (i = 0; i < TEMP_DATA_POINTS; i++)
+ kfree(sp->power[i]);
+ kfree(sp->power);
+ kfree(sp);
+ return ret;
+}
+
+static long msm_core_ioctl(struct file *file, unsigned int cmd,
+ unsigned long arg)
+{
+ long ret = 0;
+ struct cpu_activity_info *node = NULL;
+ struct sched_params __user *argp = (struct sched_params __user *)arg;
+ int i, cpu = num_possible_cpus();
+ int mpidr, cluster, cpumask;
+
+ if (!argp)
+ return -EINVAL;
+
+ get_user(cluster, &argp->cluster);
+ mpidr = (cluster << (MAX_CORES_PER_CLUSTER *
+ MAX_NUM_OF_CLUSTERS));
+ get_user(cpumask, &argp->cpumask);
+
+ switch (cmd) {
+ case EA_LEAKAGE:
+ ret = update_userspace_power(argp);
+ if (ret)
+ pr_err("Userspace power update failed with %ld\n", ret);
+ break;
+ case EA_VOLT:
+ for (i = 0; cpumask > 0; i++, cpumask >>= 1) {
+ for_each_possible_cpu(cpu) {
+ if (cpu_logical_map(cpu) == (mpidr | i))
+ break;
+ }
+ }
+ if (cpu >= num_possible_cpus())
+ break;
+
+ mutex_lock(&policy_update_mutex);
+ node = &activity[cpu];
+ if (!node->sp->table) {
+ ret = -EINVAL;
+ goto unlock;
+ }
+ ret = copy_to_user((void __user *)&argp->voltage[0],
+ node->sp->voltage,
+ sizeof(uint32_t) * node->sp->num_of_freqs);
+ if (ret)
+ break;
+ for (i = 0; i < node->sp->num_of_freqs; i++) {
+ ret = copy_to_user((void __user *)&argp->freq[i],
+ &node->sp->table[i].frequency,
+ sizeof(uint32_t));
+ if (ret)
+ break;
+ }
+unlock:
+ mutex_unlock(&policy_update_mutex);
+ break;
+ default:
+ break;
+ }
+
+ return ret;
+}
+
+#ifdef CONFIG_COMPAT
+static long msm_core_compat_ioctl(struct file *file, unsigned int cmd,
+ unsigned long arg)
+{
+ arg = (unsigned long)compat_ptr(arg);
+ return msm_core_ioctl(file, cmd, arg);
+}
+#endif
+
+static int msm_core_open(struct inode *inode, struct file *file)
+{
+ return 0;
+}
+
+static int msm_core_release(struct inode *inode, struct file *file)
+{
+ return 0;
+}
+
+static inline void init_sens_threshold(struct sensor_threshold *threshold,
+ enum thermal_trip_type trip, long temp,
+ void *data)
+{
+ threshold->trip = trip;
+ threshold->temp = temp;
+ threshold->data = data;
+ threshold->notify = (void *)core_temp_notify;
+}
+
+static int msm_core_stats_init(struct device *dev, int cpu)
+{
+ int i;
+ struct cpu_activity_info *cpu_node;
+ struct cpu_pstate_pwr *pstate = NULL;
+
+ cpu_node = &activity[cpu];
+ cpu_stats[cpu].cpu = cpu;
+ cpu_stats[cpu].temp = cpu_node->temp;
+ cpu_stats[cpu].throttling = false;
+
+ cpu_stats[cpu].len = cpu_node->sp->num_of_freqs;
+ pstate = devm_kzalloc(dev,
+ sizeof(*pstate) * cpu_node->sp->num_of_freqs,
+ GFP_KERNEL);
+ if (!pstate)
+ return -ENOMEM;
+
+ for (i = 0; i < cpu_node->sp->num_of_freqs; i++)
+ pstate[i].freq = cpu_node->sp->table[i].frequency;
+
+ per_cpu(ptable, cpu) = pstate;
+
+ return 0;
+}
+
+static int msm_core_task_init(struct device *dev)
+{
+ init_completion(&sampling_completion);
+ sampling_task = kthread_run(do_sampling, NULL, "msm-core:sampling");
+ if (IS_ERR(sampling_task)) {
+ pr_err("Failed to create do_sampling err: %ld\n",
+ PTR_ERR(sampling_task));
+ return PTR_ERR(sampling_task);
+ }
+ return 0;
+}
+
+struct cpu_pwr_stats *get_cpu_pwr_stats(void)
+{
+ return cpu_stats;
+}
+EXPORT_SYMBOL(get_cpu_pwr_stats);
+
+static int msm_get_power_values(int cpu, struct cpu_static_info *sp)
+{
+ int i = 0, j;
+ int ret = 0;
+ uint64_t power;
+
+ /* Calculate dynamic power spent for every frequency using formula:
+ * Power = V * V * f
+ * where V = voltage for frequency
+ * f = frequency
+ * */
+ sp->power = allocate_2d_array_uint32_t(sp->num_of_freqs);
+ if (IS_ERR_OR_NULL(sp->power))
+ return PTR_ERR(sp->power);
+
+ for (i = 0; i < TEMP_DATA_POINTS; i++) {
+ for (j = 0; j < sp->num_of_freqs; j++) {
+ power = sp->voltage[j] *
+ sp->table[j].frequency;
+ do_div(power, 1000);
+ do_div(power, 1000);
+ power *= sp->voltage[j];
+ do_div(power, 1000);
+ sp->power[i][j] = power;
+ }
+ }
+ return ret;
+}
+
+static int msm_get_voltage_levels(struct device *dev, int cpu,
+ struct cpu_static_info *sp)
+{
+ unsigned int *voltage;
+ int i;
+ int corner;
+ struct dev_pm_opp *opp;
+ struct device *cpu_dev = get_cpu_device(cpu);
+ /*
+ * Convert cpr corner voltage to average voltage of both
+ * a53 and a57 votlage value
+ */
+ int average_voltage[NUM_OF_CORNERS] = {0, 746, 841, 843, 940, 953, 976,
+ 1024, 1090, 1100};
+
+ if (!cpu_dev)
+ return -ENODEV;
+
+ voltage = devm_kzalloc(dev,
+ sizeof(*voltage) * sp->num_of_freqs, GFP_KERNEL);
+
+ if (!voltage)
+ return -ENOMEM;
+
+ rcu_read_lock();
+ for (i = 0; i < sp->num_of_freqs; i++) {
+ opp = dev_pm_opp_find_freq_exact(cpu_dev,
+ sp->table[i].frequency * 1000, true);
+ corner = dev_pm_opp_get_voltage(opp);
+
+ if (corner > 400000)
+ voltage[i] = corner / 1000;
+ else if (corner > 0 && corner < ARRAY_SIZE(average_voltage))
+ voltage[i] = average_voltage[corner];
+ else
+ voltage[i]
+ = average_voltage[ARRAY_SIZE(average_voltage) - 1];
+ }
+ rcu_read_unlock();
+
+ sp->voltage = voltage;
+ return 0;
+}
+
+static int msm_core_dyn_pwr_init(struct platform_device *pdev,
+ int cpu)
+{
+ int ret = 0;
+
+ if (!activity[cpu].sp->table)
+ return 0;
+
+ ret = msm_get_voltage_levels(&pdev->dev, cpu, activity[cpu].sp);
+ if (ret)
+ return ret;
+
+ ret = msm_get_power_values(cpu, activity[cpu].sp);
+
+ return ret;
+}
+
+static int msm_core_tsens_init(struct device_node *node, int cpu)
+{
+ int ret = 0;
+ char *key = NULL;
+ struct device_node *phandle;
+ const char *sensor_type = NULL;
+ struct cpu_activity_info *cpu_node = &activity[cpu];
+ int temp;
+
+ if (!node)
+ return -ENODEV;
+
+ key = "sensor";
+ phandle = of_parse_phandle(node, key, 0);
+ if (!phandle) {
+ pr_info("%s: No sensor mapping found for the core\n",
+ __func__);
+ /* Do not treat this as error as some targets might have
+ * temperature notification only in userspace.
+ * Use default temperature for the core. Userspace might
+ * update the temperature once it is up.
+ */
+ cpu_node->sensor_id = -ENODEV;
+ cpu_node->temp = DEFAULT_TEMP;
+ return 0;
+ }
+
+ key = "qcom,sensor-name";
+ ret = of_property_read_string(phandle, key,
+ &sensor_type);
+ if (ret) {
+ pr_err("%s: Cannot read tsens id\n", __func__);
+ return ret;
+ }
+
+ cpu_node->sensor_id = sensor_get_id((char *)sensor_type);
+ if (cpu_node->sensor_id < 0)
+ return cpu_node->sensor_id;
+
+ key = "qcom,scaling-factor";
+ ret = of_property_read_u32(phandle, key,
+ &scaling_factor);
+ if (ret) {
+ pr_info("%s: Cannot read tsens scaling factor\n", __func__);
+ scaling_factor = DEFAULT_SCALING_FACTOR;
+ }
+
+ ret = sensor_get_temp(cpu_node->sensor_id, &temp);
+ if (ret)
+ return ret;
+
+ cpu_node->temp = temp / scaling_factor;
+
+ init_sens_threshold(&cpu_node->hi_threshold,
+ THERMAL_TRIP_CONFIGURABLE_HI,
+ (cpu_node->temp + high_hyst_temp) * scaling_factor,
+ (void *)cpu_node);
+ init_sens_threshold(&cpu_node->low_threshold,
+ THERMAL_TRIP_CONFIGURABLE_LOW,
+ (cpu_node->temp - low_hyst_temp) * scaling_factor,
+ (void *)cpu_node);
+
+ return ret;
+}
+
+static int msm_core_mpidr_init(struct device_node *phandle)
+{
+ int ret = 0;
+ char *key = NULL;
+ int mpidr;
+
+ key = "reg";
+ ret = of_property_read_u32(phandle, key,
+ &mpidr);
+ if (ret) {
+ pr_err("%s: Cannot read mpidr\n", __func__);
+ return ret;
+ }
+ return mpidr;
+}
+
+static int msm_core_cpu_policy_handler(struct notifier_block *nb,
+ unsigned long val, void *data)
+{
+ struct cpufreq_policy *policy = data;
+ struct cpu_activity_info *cpu_info = &activity[policy->cpu];
+ int cpu;
+ int ret;
+
+ if (cpu_info->sp->table)
+ return NOTIFY_OK;
+
+ switch (val) {
+ case CPUFREQ_CREATE_POLICY:
+ mutex_lock(&policy_update_mutex);
+ update_related_freq_table(policy);
+
+ for_each_cpu(cpu, policy->related_cpus) {
+ ret = msm_core_dyn_pwr_init(msm_core_pdev, cpu);
+ if (ret)
+ pr_debug("voltage-pwr table update failed\n");
+
+ ret = msm_core_stats_init(&msm_core_pdev->dev, cpu);
+ if (ret)
+ pr_debug("Stats table update failed\n");
+ }
+ mutex_unlock(&policy_update_mutex);
+ break;
+ default:
+ break;
+ }
+ return NOTIFY_OK;
+}
+
+struct notifier_block cpu_policy = {
+ .notifier_call = msm_core_cpu_policy_handler
+};
+
+static int system_suspend_handler(struct notifier_block *nb,
+ unsigned long val, void *data)
+{
+ int cpu;
+
+ mutex_lock(&kthread_update_mutex);
+ switch (val) {
+ case PM_POST_HIBERNATION:
+ case PM_POST_SUSPEND:
+ case PM_POST_RESTORE:
+ /*
+ * Set completion event to read temperature and repopulate
+ * stats
+ */
+ in_suspend = 0;
+ complete(&sampling_completion);
+ break;
+ case PM_HIBERNATION_PREPARE:
+ case PM_SUSPEND_PREPARE:
+ /*
+ * cancel delayed work to be able to restart immediately
+ * after system resume
+ */
+ in_suspend = 1;
+ cancel_delayed_work(&sampling_work);
+ /*
+ * cancel TSENS interrupts as we do not want to wake up from
+ * suspend to take care of repopulate stats while the system is
+ * in suspend
+ */
+ for_each_possible_cpu(cpu) {
+ if (activity[cpu].sensor_id < 0)
+ continue;
+
+ sensor_activate_trip(activity[cpu].sensor_id,
+ &activity[cpu].hi_threshold, false);
+ sensor_activate_trip(activity[cpu].sensor_id,
+ &activity[cpu].low_threshold, false);
+ }
+ break;
+ default:
+ break;
+ }
+ mutex_unlock(&kthread_update_mutex);
+
+ return NOTIFY_OK;
+}
+
+static int msm_core_freq_init(void)
+{
+ int cpu;
+ struct cpufreq_policy *policy;
+
+ for_each_possible_cpu(cpu) {
+ activity[cpu].sp = kzalloc(sizeof(*(activity[cpu].sp)),
+ GFP_KERNEL);
+ if (!activity[cpu].sp)
+ return -ENOMEM;
+ }
+
+ for_each_online_cpu(cpu) {
+ if (activity[cpu].sp->table)
+ continue;
+
+ policy = cpufreq_cpu_get(cpu);
+ if (!policy)
+ continue;
+
+ update_related_freq_table(policy);
+ cpufreq_cpu_put(policy);
+ }
+
+ return 0;
+}
+
+static int msm_core_params_init(struct platform_device *pdev)
+{
+ int ret = 0;
+ unsigned long cpu = 0;
+ struct device_node *child_node = NULL;
+ struct device_node *ea_node = NULL;
+ char *key = NULL;
+ int mpidr;
+
+ for_each_possible_cpu(cpu) {
+ child_node = of_get_cpu_node(cpu, NULL);
+
+ if (!child_node)
+ continue;
+
+ mpidr = msm_core_mpidr_init(child_node);
+ if (mpidr < 0)
+ return mpidr;
+
+ if (cpu >= num_possible_cpus())
+ continue;
+
+ activity[cpu].mpidr = mpidr;
+
+ key = "qcom,ea";
+ ea_node = of_parse_phandle(child_node, key, 0);
+ if (!ea_node) {
+ pr_err("%s Couldn't find the ea_node for cpu%lu\n",
+ __func__, cpu);
+ return -ENODEV;
+ }
+
+ ret = msm_core_tsens_init(ea_node, cpu);
+ if (ret)
+ return ret;
+
+ if (!activity[cpu].sp->table)
+ continue;
+
+ ret = msm_core_dyn_pwr_init(msm_core_pdev, cpu);
+ if (ret)
+ pr_debug("voltage-pwr table update failed\n");
+
+ ret = msm_core_stats_init(&msm_core_pdev->dev, cpu);
+ if (ret)
+ pr_debug("Stats table update failed\n");
+ }
+
+ return 0;
+}
+
+static const struct file_operations msm_core_ops = {
+ .owner = THIS_MODULE,
+ .unlocked_ioctl = msm_core_ioctl,
+#ifdef CONFIG_COMPAT
+ .compat_ioctl = msm_core_compat_ioctl,
+#endif
+ .open = msm_core_open,
+ .release = msm_core_release,
+};
+
+static struct miscdevice msm_core_device = {
+ .minor = MISC_DYNAMIC_MINOR,
+ .name = "pta",
+ .fops = &msm_core_ops
+};
+
+static void free_dyn_memory(void)
+{
+ int i, cpu;
+
+ for_each_possible_cpu(cpu) {
+ if (activity[cpu].sp) {
+ for (i = 0; i < TEMP_DATA_POINTS; i++) {
+ if (!activity[cpu].sp->power)
+ break;
+
+ kfree(activity[cpu].sp->power[i]);
+ }
+ }
+ kfree(activity[cpu].sp);
+ }
+}
+
+static int uio_init(struct platform_device *pdev)
+{
+ int ret = 0;
+ struct uio_info *info = NULL;
+ struct resource *clnt_res = NULL;
+ u32 ea_mem_size = 0;
+ phys_addr_t ea_mem_pyhsical = 0;
+
+ clnt_res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ if (!clnt_res) {
+ pr_err("resource not found\n");
+ return -ENODEV;
+ }
+
+ info = devm_kzalloc(&pdev->dev, sizeof(struct uio_info), GFP_KERNEL);
+ if (!info)
+ return -ENOMEM;
+
+ ea_mem_size = resource_size(clnt_res);
+ ea_mem_pyhsical = clnt_res->start;
+
+ if (ea_mem_size == 0) {
+ pr_err("msm-core: memory size is zero");
+ return -EINVAL;
+ }
+
+ /* Setup device */
+ info->name = clnt_res->name;
+ info->version = "1.0";
+ info->mem[0].addr = ea_mem_pyhsical;
+ info->mem[0].size = ea_mem_size;
+ info->mem[0].memtype = UIO_MEM_PHYS;
+
+ ret = uio_register_device(&pdev->dev, info);
+ if (ret) {
+ pr_err("uio register failed ret=%d", ret);
+ return ret;
+ }
+ dev_set_drvdata(&pdev->dev, info);
+
+ return 0;
+}
+
+static int msm_core_dev_probe(struct platform_device *pdev)
+{
+ int ret = 0;
+ char *key = NULL;
+ struct device_node *node;
+ int cpu;
+ struct uio_info *info;
+
+ if (!pdev)
+ return -ENODEV;
+
+ msm_core_pdev = pdev;
+ node = pdev->dev.of_node;
+ if (!node)
+ return -ENODEV;
+
+ key = "qcom,low-hyst-temp";
+ ret = of_property_read_u32(node, key, &low_hyst_temp);
+ if (ret)
+ low_hyst_temp = DEFAULT_LOW_HYST_TEMP;
+
+ key = "qcom,high-hyst-temp";
+ ret = of_property_read_u32(node, key, &high_hyst_temp);
+ if (ret)
+ high_hyst_temp = DEFAULT_HIGH_HYST_TEMP;
+
+ key = "qcom,polling-interval";
+ ret = of_property_read_u32(node, key, &poll_ms);
+ if (ret)
+ pr_info("msm-core initialized without polling period\n");
+
+ key = "qcom,throttling-temp";
+ ret = of_property_read_u32(node, key, &max_throttling_temp);
+
+ ret = uio_init(pdev);
+ if (ret)
+ return ret;
+
+ ret = msm_core_freq_init();
+ if (ret)
+ goto failed;
+
+ ret = misc_register(&msm_core_device);
+ if (ret) {
+ pr_err("%s: Error registering device %d\n", __func__, ret);
+ goto failed;
+ }
+
+ ret = msm_core_params_init(pdev);
+ if (ret)
+ goto failed;
+
+ INIT_DEFERRABLE_WORK(&sampling_work, samplequeue_handle);
+ ret = msm_core_task_init(&pdev->dev);
+ if (ret)
+ goto failed;
+
+ for_each_possible_cpu(cpu)
+ set_threshold(&activity[cpu]);
+
+ schedule_delayed_work(&sampling_work, msecs_to_jiffies(0));
+ cpufreq_register_notifier(&cpu_policy, CPUFREQ_POLICY_NOTIFIER);
+ pm_notifier(system_suspend_handler, 0);
+ return 0;
+failed:
+ info = dev_get_drvdata(&pdev->dev);
+ uio_unregister_device(info);
+ free_dyn_memory();
+ return ret;
+}
+
+static int msm_core_remove(struct platform_device *pdev)
+{
+ int cpu;
+ struct uio_info *info = dev_get_drvdata(&pdev->dev);
+
+ uio_unregister_device(info);
+
+ for_each_possible_cpu(cpu) {
+ if (activity[cpu].sensor_id < 0)
+ continue;
+
+ sensor_cancel_trip(activity[cpu].sensor_id,
+ &activity[cpu].hi_threshold);
+ sensor_cancel_trip(activity[cpu].sensor_id,
+ &activity[cpu].low_threshold);
+ }
+ free_dyn_memory();
+ misc_deregister(&msm_core_device);
+ return 0;
+}
+
+static struct of_device_id msm_core_match_table[] = {
+ {.compatible = "qcom,apss-core-ea"},
+ {},
+};
+
+static struct platform_driver msm_core_driver = {
+ .probe = msm_core_dev_probe,
+ .driver = {
+ .name = "msm_core",
+ .owner = THIS_MODULE,
+ .of_match_table = msm_core_match_table,
+ },
+ .remove = msm_core_remove,
+};
+
+static int __init msm_core_init(void)
+{
+ return platform_driver_register(&msm_core_driver);
+}
+late_initcall(msm_core_init);