summaryrefslogtreecommitdiff
path: root/drivers/devfreq
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/devfreq')
-rw-r--r--drivers/devfreq/Kconfig150
-rw-r--r--drivers/devfreq/Makefile16
-rw-r--r--drivers/devfreq/arm-memlat-mon.c368
-rw-r--r--drivers/devfreq/armbw-pm.c466
-rw-r--r--drivers/devfreq/bimc-bwmon.c688
-rw-r--r--drivers/devfreq/devfreq.c60
-rw-r--r--drivers/devfreq/devfreq_devbw.c290
-rw-r--r--drivers/devfreq/devfreq_simple_dev.c213
-rw-r--r--drivers/devfreq/devfreq_spdm.c443
-rw-r--r--drivers/devfreq/devfreq_spdm.h130
-rw-r--r--drivers/devfreq/devfreq_spdm_debugfs.c848
-rw-r--r--drivers/devfreq/devfreq_trace.h44
-rw-r--r--drivers/devfreq/governor.h1
-rw-r--r--drivers/devfreq/governor_bw_hwmon.c983
-rw-r--r--drivers/devfreq/governor_bw_hwmon.h88
-rw-r--r--drivers/devfreq/governor_bw_vbif.c142
-rw-r--r--drivers/devfreq/governor_cache_hwmon.c429
-rw-r--r--drivers/devfreq/governor_cache_hwmon.h71
-rw-r--r--drivers/devfreq/governor_cpufreq.c712
-rw-r--r--drivers/devfreq/governor_gpubw_mon.c255
-rw-r--r--drivers/devfreq/governor_memlat.c414
-rw-r--r--drivers/devfreq/governor_memlat.h89
-rw-r--r--drivers/devfreq/governor_msm_adreno_tz.c660
-rw-r--r--drivers/devfreq/governor_performance.c24
-rw-r--r--drivers/devfreq/governor_powersave.c5
-rw-r--r--drivers/devfreq/governor_simpleondemand.c29
-rw-r--r--drivers/devfreq/governor_spdm_bw_hyp.c417
-rw-r--r--drivers/devfreq/governor_userspace.c3
-rw-r--r--drivers/devfreq/m4m-hwmon.c429
-rw-r--r--drivers/devfreq/msmcci-hwmon.c627
30 files changed, 9069 insertions, 25 deletions
diff --git a/drivers/devfreq/Kconfig b/drivers/devfreq/Kconfig
index 64281bb2f650..396f66bb28d1 100644
--- a/drivers/devfreq/Kconfig
+++ b/drivers/devfreq/Kconfig
@@ -64,6 +64,114 @@ config DEVFREQ_GOV_USERSPACE
Otherwise, the governor does not change the frequnecy
given at the initialization.
+config DEVFREQ_GOV_QCOM_ADRENO_TZ
+ tristate "Qualcom Adreno Trustzone"
+ depends on QCOM_KGSL && QCOM_SCM
+ help
+ Trustzone based governor for the Adreno GPU.
+ Sets the frequency using a "on-demand" algorithm.
+ This governor is unlikely to be useful for other devices.
+
+config DEVFREQ_GOV_CPUFREQ
+ tristate "CPUfreq"
+ depends on CPU_FREQ
+ help
+ Chooses frequency based on the online CPUs' current frequency and a
+ CPU frequency to device frequency mapping table(s). This governor
+ can be useful for controlling devices such as DDR, cache, CCI, etc.
+
+config QCOM_BIMC_BWMON
+ tristate "QCOM BIMC Bandwidth monitor hardware"
+ depends on ARCH_QCOM
+ help
+ The BIMC Bandwidth monitor hardware allows for monitoring the
+ traffic coming from each master port connected to the BIMC. It also
+ has the capability to raise an IRQ when the count exceeds a
+ programmable limit.
+
+config DEVFREQ_GOV_QCOM_GPUBW_MON
+ tristate "GPU BW voting governor"
+ depends on DEVFREQ_GOV_QCOM_ADRENO_TZ
+ help
+ QTI GPU governor for GPU bus bandwidth voting.
+ This governor works together with QTI Adreno Trustzone governor,
+ and select bus frequency votes using an "on-demand" alorithm.
+ This governor is unlikely to be useful for non-QTI devices.
+
+config ARMBW_HWMON
+ tristate "ARM PMU Bandwidth monitor hardware"
+ depends on ARCH_QCOM
+ help
+ The PMU present on these ARM cores allow for the use of counters to
+ monitor the traffic coming from each core to the bus. It also has the
+ capability to raise an IRQ when the counter overflows, which can be
+ used to get an IRQ when the count exceeds a certain value
+
+config ARM_MEMLAT_MON
+ tristate "ARM CPU Memory Latency monitor hardware"
+ depends on ARCH_QCOM
+ help
+ The PMU present on these ARM cores allow for the use of counters to
+ monitor the memory latency characteristics of an ARM CPU workload.
+ This driver uses these counters to implement the APIs needed by
+ the mem_latency devfreq governor.
+
+config QCOMCCI_HWMON
+ tristate "QCOM CCI Cache monitor hardware"
+ depends on ARCH_QCOM
+ help
+ QCOM CCI has additional PMU counters that can be used to monitor
+ cache requests. QCOM CCI hardware monitor device configures these
+ registers to monitor cache and inform governor. It can also set an
+ IRQ when count exceeds a programmable limit.
+
+config QCOM_M4M_HWMON
+ tristate "QCOM M4M cache monitor hardware"
+ depends on ARCH_QCOM
+ help
+ QCOM M4M has counters that can be used to monitor requests coming to
+ M4M. QCOM M4M hardware monitor device programs corresponding registers
+ to monitor cache and inform governor. It can also set an IRQ when
+ count exceeds a programmable limit.
+
+config DEVFREQ_GOV_QCOM_BW_HWMON
+ tristate "HW monitor based governor for device BW"
+ depends on QCOM_BIMC_BWMON
+ help
+ HW monitor based governor for device to DDR bandwidth voting.
+ This governor sets the CPU BW vote by using BIMC counters to monitor
+ the CPU's use of DDR. Since this uses target specific counters it
+ can conflict with existing profiling tools. This governor is unlikely
+ to be useful for non-QCOM devices.
+
+config DEVFREQ_GOV_QCOM_CACHE_HWMON
+ tristate "HW monitor based governor for cache frequency"
+ help
+ HW monitor based governor for cache frequency scaling. This
+ governor sets the cache frequency by using PM counters to monitor the
+ CPU's use of cache. Since this governor uses some of the PM counters
+ it can conflict with existing profiling tools. This governor is
+ unlikely to be useful for other devices.
+
+config DEVFREQ_GOV_SPDM_HYP
+ bool "QCOM SPDM Hypervisor Governor"
+ depends on ARCH_QCOM
+ help
+ Hypervisor based governor for CPU bandwidth voting
+ for QCOM chipsets.
+ Sets the frequency using a "on-demand" algorithm.
+ This governor is unlikely to be useful for other devices.
+
+config DEVFREQ_GOV_MEMLAT
+ tristate "HW monitor based governor for device BW"
+ depends on ARM_MEMLAT_MON
+ help
+ HW monitor based governor for device to DDR bandwidth voting.
+ This governor sets the CPU BW vote based on stats obtained from memalat
+ monitor if it determines that a workload is memory latency bound. Since
+ this uses target specific counters it can conflict with existing profiling
+ tools.
+
comment "DEVFREQ Drivers"
config ARM_EXYNOS4_BUS_DEVFREQ
@@ -98,6 +206,48 @@ config ARM_TEGRA_DEVFREQ
It reads ACTMON counters of memory controllers and adjusts the
operating frequencies and voltages with OPP support.
+config DEVFREQ_SIMPLE_DEV
+ tristate "Device driver for simple clock device with no status info"
+ select DEVFREQ_GOV_PERFORMANCE
+ select DEVFREQ_GOV_POWERSAVE
+ select DEVFREQ_GOV_USERSPACE
+ select DEVFREQ_GOV_CPUFREQ
+ help
+ Device driver for simple devices that control their frequency using
+ clock APIs and don't have any form of status reporting.
+
+config QCOM_DEVFREQ_DEVBW
+ bool "QCOM DEVFREQ device for device master <-> slave IB/AB BW voting"
+ depends on ARCH_QCOM
+ select DEVFREQ_GOV_PERFORMANCE
+ select DEVFREQ_GOV_POWERSAVE
+ select DEVFREQ_GOV_USERSPACE
+ select DEVFREQ_GOV_CPUFREQ
+ default n
+ help
+ Different devfreq governors use this devfreq device to make CPU to
+ DDR IB/AB bandwidth votes. This driver provides a SoC topology
+ agnostic interface to so that some of the devfreq governors can be
+ shared across SoCs.
+
+config SPDM_SCM
+ bool "QCOM SPDM SCM based call support"
+ depends on DEVFREQ_SPDM
+ help
+ SPDM driver support the dcvs algorithm logic being accessed via
+ scm or hvc calls. This adds the support for SPDM interaction to
+ tz via SCM based call. If not selected then Hypervior interaction
+ will be activated.
+
+config DEVFREQ_SPDM
+ bool "QCOM SPDM based bandwidth voting"
+ depends on ARCH_QCOM
+ select DEVFREQ_GOV_SPDM_HYP
+ help
+ This adds the support for SPDM based bandwidth voting on QCOM chipsets.
+ This driver allows any SPDM based client to vote for bandwidth.
+ Used with the QCOM SPDM Hypervisor Governor.
+
source "drivers/devfreq/event/Kconfig"
endif # PM_DEVFREQ
diff --git a/drivers/devfreq/Makefile b/drivers/devfreq/Makefile
index 5134f9ee983d..0fbb2b2c5296 100644
--- a/drivers/devfreq/Makefile
+++ b/drivers/devfreq/Makefile
@@ -4,11 +4,27 @@ obj-$(CONFIG_DEVFREQ_GOV_SIMPLE_ONDEMAND) += governor_simpleondemand.o
obj-$(CONFIG_DEVFREQ_GOV_PERFORMANCE) += governor_performance.o
obj-$(CONFIG_DEVFREQ_GOV_POWERSAVE) += governor_powersave.o
obj-$(CONFIG_DEVFREQ_GOV_USERSPACE) += governor_userspace.o
+obj-$(CONFIG_DEVFREQ_GOV_QCOM_ADRENO_TZ) += governor_msm_adreno_tz.o
+obj-$(CONFIG_DEVFREQ_GOV_CPUFREQ) += governor_cpufreq.o
+obj-$(CONFIG_QCOM_BIMC_BWMON) += bimc-bwmon.o
+obj-$(CONFIG_ARMBW_HWMON) += armbw-pm.o
+obj-$(CONFIG_ARM_MEMLAT_MON) += arm-memlat-mon.o
+obj-$(CONFIG_QCOMCCI_HWMON) += msmcci-hwmon.o
+obj-$(CONFIG_QCOM_M4M_HWMON) += m4m-hwmon.o
+obj-$(CONFIG_DEVFREQ_GOV_QCOM_BW_HWMON) += governor_bw_hwmon.o
+obj-$(CONFIG_DEVFREQ_GOV_QCOM_CACHE_HWMON) += governor_cache_hwmon.o
+obj-$(CONFIG_DEVFREQ_GOV_SPDM_HYP) += governor_spdm_bw_hyp.o
+obj-$(CONFIG_DEVFREQ_GOV_QCOM_GPUBW_MON) += governor_gpubw_mon.o
+obj-$(CONFIG_DEVFREQ_GOV_QCOM_GPUBW_MON) += governor_bw_vbif.o
+obj-$(CONFIG_DEVFREQ_GOV_MEMLAT) += governor_memlat.o
# DEVFREQ Drivers
obj-$(CONFIG_ARM_EXYNOS4_BUS_DEVFREQ) += exynos/
obj-$(CONFIG_ARM_EXYNOS5_BUS_DEVFREQ) += exynos/
obj-$(CONFIG_ARM_TEGRA_DEVFREQ) += tegra-devfreq.o
+obj-$(CONFIG_QCOM_DEVFREQ_DEVBW) += devfreq_devbw.o
+obj-$(CONFIG_DEVFREQ_SIMPLE_DEV) += devfreq_simple_dev.o
+obj-$(CONFIG_DEVFREQ_SPDM) += devfreq_spdm.o devfreq_spdm_debugfs.o
# DEVFREQ Event Drivers
obj-$(CONFIG_PM_DEVFREQ_EVENT) += event/
diff --git a/drivers/devfreq/arm-memlat-mon.c b/drivers/devfreq/arm-memlat-mon.c
new file mode 100644
index 000000000000..4fb0a5ffda50
--- /dev/null
+++ b/drivers/devfreq/arm-memlat-mon.c
@@ -0,0 +1,368 @@
+/*
+ * Copyright (c) 2014-2016, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#define pr_fmt(fmt) "arm-memlat-mon: " fmt
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/init.h>
+#include <linux/io.h>
+#include <linux/delay.h>
+#include <linux/err.h>
+#include <linux/errno.h>
+#include <linux/interrupt.h>
+#include <linux/platform_device.h>
+#include <linux/of.h>
+#include <linux/of_irq.h>
+#include <linux/slab.h>
+#include <linux/irq.h>
+#include <linux/cpu_pm.h>
+#include <linux/cpu.h>
+#include "governor.h"
+#include "governor_memlat.h"
+#include <linux/perf_event.h>
+
+enum ev_index {
+ INST_IDX,
+ L2DM_IDX,
+ CYC_IDX,
+ NUM_EVENTS
+};
+#define INST_EV 0x08
+#define L2DM_EV 0x17
+#define CYC_EV 0x11
+
+struct event_data {
+ struct perf_event *pevent;
+ unsigned long prev_count;
+};
+
+struct memlat_hwmon_data {
+ struct event_data events[NUM_EVENTS];
+ ktime_t prev_ts;
+ bool init_pending;
+};
+static DEFINE_PER_CPU(struct memlat_hwmon_data, pm_data);
+
+struct cpu_grp_info {
+ cpumask_t cpus;
+ struct memlat_hwmon hw;
+ struct notifier_block arm_memlat_cpu_notif;
+};
+
+static unsigned long compute_freq(struct memlat_hwmon_data *hw_data,
+ unsigned long cyc_cnt)
+{
+ ktime_t ts;
+ unsigned int diff;
+ unsigned long freq = 0;
+
+ ts = ktime_get();
+ diff = ktime_to_us(ktime_sub(ts, hw_data->prev_ts));
+ if (!diff)
+ diff = 1;
+ hw_data->prev_ts = ts;
+ freq = cyc_cnt;
+ do_div(freq, diff);
+
+ return freq;
+}
+
+#define MAX_COUNT_LIM 0xFFFFFFFFFFFFFFFF
+static inline unsigned long read_event(struct event_data *event)
+{
+ unsigned long ev_count;
+ u64 total, enabled, running;
+
+ total = perf_event_read_value(event->pevent, &enabled, &running);
+ if (total >= event->prev_count)
+ ev_count = total - event->prev_count;
+ else
+ ev_count = (MAX_COUNT_LIM - event->prev_count) + total;
+
+ event->prev_count = total;
+
+ return ev_count;
+}
+
+static void read_perf_counters(int cpu, struct cpu_grp_info *cpu_grp)
+{
+ int cpu_idx;
+ struct memlat_hwmon_data *hw_data = &per_cpu(pm_data, cpu);
+ struct memlat_hwmon *hw = &cpu_grp->hw;
+ unsigned long cyc_cnt;
+
+ if (hw_data->init_pending)
+ return;
+
+ cpu_idx = cpu - cpumask_first(&cpu_grp->cpus);
+
+ hw->core_stats[cpu_idx].inst_count =
+ read_event(&hw_data->events[INST_IDX]);
+
+ hw->core_stats[cpu_idx].mem_count =
+ read_event(&hw_data->events[L2DM_IDX]);
+
+ cyc_cnt = read_event(&hw_data->events[CYC_IDX]);
+ hw->core_stats[cpu_idx].freq = compute_freq(hw_data, cyc_cnt);
+}
+
+static unsigned long get_cnt(struct memlat_hwmon *hw)
+{
+ int cpu;
+ struct cpu_grp_info *cpu_grp = container_of(hw,
+ struct cpu_grp_info, hw);
+
+ for_each_cpu(cpu, &cpu_grp->cpus)
+ read_perf_counters(cpu, cpu_grp);
+
+ return 0;
+}
+
+static void delete_events(struct memlat_hwmon_data *hw_data)
+{
+ int i;
+
+ for (i = 0; i < NUM_EVENTS; i++) {
+ hw_data->events[i].prev_count = 0;
+ perf_event_release_kernel(hw_data->events[i].pevent);
+ }
+}
+
+static void stop_hwmon(struct memlat_hwmon *hw)
+{
+ int cpu, idx;
+ struct memlat_hwmon_data *hw_data;
+ struct cpu_grp_info *cpu_grp = container_of(hw,
+ struct cpu_grp_info, hw);
+
+ get_online_cpus();
+ for_each_cpu(cpu, &cpu_grp->cpus) {
+ hw_data = &per_cpu(pm_data, cpu);
+ if (hw_data->init_pending)
+ hw_data->init_pending = false;
+ else
+ delete_events(hw_data);
+
+ /* Clear governor data */
+ idx = cpu - cpumask_first(&cpu_grp->cpus);
+ hw->core_stats[idx].inst_count = 0;
+ hw->core_stats[idx].mem_count = 0;
+ hw->core_stats[idx].freq = 0;
+ }
+ put_online_cpus();
+
+ unregister_cpu_notifier(&cpu_grp->arm_memlat_cpu_notif);
+}
+
+static struct perf_event_attr *alloc_attr(void)
+{
+ struct perf_event_attr *attr;
+
+ attr = kzalloc(sizeof(struct perf_event_attr), GFP_KERNEL);
+ if (!attr)
+ return ERR_PTR(-ENOMEM);
+
+ attr->type = PERF_TYPE_RAW;
+ attr->size = sizeof(struct perf_event_attr);
+ attr->pinned = 1;
+ attr->exclude_idle = 1;
+
+ return attr;
+}
+
+static int set_events(struct memlat_hwmon_data *hw_data, int cpu)
+{
+ struct perf_event *pevent;
+ struct perf_event_attr *attr;
+ int err;
+
+ /* Allocate an attribute for event initialization */
+ attr = alloc_attr();
+ if (IS_ERR(attr))
+ return PTR_ERR(attr);
+
+ attr->config = INST_EV;
+ pevent = perf_event_create_kernel_counter(attr, cpu, NULL, NULL, NULL);
+ if (IS_ERR(pevent))
+ goto err_out;
+ hw_data->events[INST_IDX].pevent = pevent;
+ perf_event_enable(hw_data->events[INST_IDX].pevent);
+
+ attr->config = L2DM_EV;
+ pevent = perf_event_create_kernel_counter(attr, cpu, NULL, NULL, NULL);
+ if (IS_ERR(pevent))
+ goto err_out;
+ hw_data->events[L2DM_IDX].pevent = pevent;
+ perf_event_enable(hw_data->events[L2DM_IDX].pevent);
+
+ attr->config = CYC_EV;
+ pevent = perf_event_create_kernel_counter(attr, cpu, NULL, NULL, NULL);
+ if (IS_ERR(pevent))
+ goto err_out;
+ hw_data->events[CYC_IDX].pevent = pevent;
+ perf_event_enable(hw_data->events[CYC_IDX].pevent);
+
+ kfree(attr);
+ return 0;
+
+err_out:
+ err = PTR_ERR(pevent);
+ kfree(attr);
+ return err;
+}
+
+static int arm_memlat_cpu_callback(struct notifier_block *nb,
+ unsigned long action, void *hcpu)
+{
+ unsigned long cpu = (unsigned long)hcpu;
+ struct memlat_hwmon_data *hw_data = &per_cpu(pm_data, cpu);
+
+ if ((action != CPU_ONLINE) || !hw_data->init_pending)
+ return NOTIFY_OK;
+
+ if (set_events(hw_data, cpu))
+ pr_warn("Failed to create perf event for CPU%lu\n", cpu);
+
+ hw_data->init_pending = false;
+
+ return NOTIFY_OK;
+}
+
+static int start_hwmon(struct memlat_hwmon *hw)
+{
+ int cpu, ret = 0;
+ struct memlat_hwmon_data *hw_data;
+ struct cpu_grp_info *cpu_grp = container_of(hw,
+ struct cpu_grp_info, hw);
+
+ register_cpu_notifier(&cpu_grp->arm_memlat_cpu_notif);
+
+ get_online_cpus();
+ for_each_cpu(cpu, &cpu_grp->cpus) {
+ hw_data = &per_cpu(pm_data, cpu);
+ ret = set_events(hw_data, cpu);
+ if (ret) {
+ if (!cpu_online(cpu)) {
+ hw_data->init_pending = true;
+ ret = 0;
+ } else {
+ pr_warn("Perf event init failed on CPU%d\n",
+ cpu);
+ break;
+ }
+ }
+ }
+
+ put_online_cpus();
+ return ret;
+}
+
+static int get_mask_from_dev_handle(struct platform_device *pdev,
+ cpumask_t *mask)
+{
+ struct device *dev = &pdev->dev;
+ struct device_node *dev_phandle;
+ struct device *cpu_dev;
+ int cpu, i = 0;
+ int ret = -ENOENT;
+
+ dev_phandle = of_parse_phandle(dev->of_node, "qcom,cpulist", i++);
+ while (dev_phandle) {
+ for_each_possible_cpu(cpu) {
+ cpu_dev = get_cpu_device(cpu);
+ if (cpu_dev && cpu_dev->of_node == dev_phandle) {
+ cpumask_set_cpu(cpu, mask);
+ ret = 0;
+ break;
+ }
+ }
+ dev_phandle = of_parse_phandle(dev->of_node,
+ "qcom,cpulist", i++);
+ }
+
+ return ret;
+}
+
+static int arm_memlat_mon_driver_probe(struct platform_device *pdev)
+{
+ struct device *dev = &pdev->dev;
+ struct memlat_hwmon *hw;
+ struct cpu_grp_info *cpu_grp;
+ int cpu, ret;
+
+ cpu_grp = devm_kzalloc(dev, sizeof(*cpu_grp), GFP_KERNEL);
+ if (!cpu_grp)
+ return -ENOMEM;
+ cpu_grp->arm_memlat_cpu_notif.notifier_call = arm_memlat_cpu_callback;
+ hw = &cpu_grp->hw;
+
+ hw->dev = dev;
+ hw->of_node = of_parse_phandle(dev->of_node, "qcom,target-dev", 0);
+ if (!hw->of_node) {
+ dev_err(dev, "Couldn't find a target device\n");
+ return -ENODEV;
+ }
+
+ if (get_mask_from_dev_handle(pdev, &cpu_grp->cpus)) {
+ dev_err(dev, "CPU list is empty\n");
+ return -ENODEV;
+ }
+
+ hw->num_cores = cpumask_weight(&cpu_grp->cpus);
+ hw->core_stats = devm_kzalloc(dev, hw->num_cores *
+ sizeof(*(hw->core_stats)), GFP_KERNEL);
+ if (!hw->core_stats)
+ return -ENOMEM;
+
+ for_each_cpu(cpu, &cpu_grp->cpus)
+ hw->core_stats[cpu - cpumask_first(&cpu_grp->cpus)].id = cpu;
+
+ hw->start_hwmon = &start_hwmon;
+ hw->stop_hwmon = &stop_hwmon;
+ hw->get_cnt = &get_cnt;
+
+ ret = register_memlat(dev, hw);
+ if (ret) {
+ pr_err("Mem Latency Gov registration failed\n");
+ return ret;
+ }
+
+ return 0;
+}
+
+static struct of_device_id match_table[] = {
+ { .compatible = "qcom,arm-memlat-mon" },
+ {}
+};
+
+static struct platform_driver arm_memlat_mon_driver = {
+ .probe = arm_memlat_mon_driver_probe,
+ .driver = {
+ .name = "arm-memlat-mon",
+ .of_match_table = match_table,
+ .owner = THIS_MODULE,
+ },
+};
+
+static int __init arm_memlat_mon_init(void)
+{
+ return platform_driver_register(&arm_memlat_mon_driver);
+}
+module_init(arm_memlat_mon_init);
+
+static void __exit arm_memlat_mon_exit(void)
+{
+ platform_driver_unregister(&arm_memlat_mon_driver);
+}
+module_exit(arm_memlat_mon_exit);
diff --git a/drivers/devfreq/armbw-pm.c b/drivers/devfreq/armbw-pm.c
new file mode 100644
index 000000000000..b5c05953eda3
--- /dev/null
+++ b/drivers/devfreq/armbw-pm.c
@@ -0,0 +1,466 @@
+/*
+ * Copyright (c) 2014, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#define pr_fmt(fmt) "armbw-pm: " fmt
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/init.h>
+#include <linux/io.h>
+#include <linux/delay.h>
+#include <linux/err.h>
+#include <linux/errno.h>
+#include <linux/interrupt.h>
+#include <linux/platform_device.h>
+#include <linux/of.h>
+#include <linux/of_irq.h>
+#include <linux/slab.h>
+#include <linux/irq.h>
+#include <linux/cpu_pm.h>
+#include <linux/cpu.h>
+#include "governor.h"
+#include "governor_bw_hwmon.h"
+
+
+#define DEFINE_CP15_READ(name, op1, n, m, op2) \
+static u32 read_##name(void) \
+{ \
+ u32 val; \
+ asm volatile ("mrc p15, " #op1 ", %0, c" #n ", c" #m ", " #op2 \
+ : "=r" (val)); \
+ return val; \
+}
+
+#define DEFINE_CP15_WRITE(name, op1, n, m, op2) \
+static void write_##name(u32 val) \
+{ \
+ asm volatile ("mcr p15, " #op1 ", %0, c" #n ", c" #m", "#op2 \
+ : : "r" (val)); \
+}
+
+#define DEFINE_CP15_RW(name, op1, n, m, op2) \
+DEFINE_CP15_READ(name, op1, n, m, op2) \
+DEFINE_CP15_WRITE(name, op1, n, m, op2)
+
+DEFINE_CP15_WRITE(pmselr, 0, 9, 12, 5)
+DEFINE_CP15_WRITE(pmcntenset, 0, 9, 12, 1)
+DEFINE_CP15_WRITE(pmcntenclr, 0, 9, 12, 2)
+DEFINE_CP15_RW(pmovsr, 0, 9, 12, 3)
+DEFINE_CP15_WRITE(pmxevtyper, 0, 9, 13, 1)
+DEFINE_CP15_RW(pmxevcntr, 0, 9, 13, 2)
+DEFINE_CP15_WRITE(pmintenset, 0, 9, 14, 1)
+DEFINE_CP15_WRITE(pmintenclr, 0, 9, 14, 2)
+DEFINE_CP15_WRITE(pmcr, 0, 9, 12, 0)
+
+struct bwmon_data {
+ int cpu;
+ u32 saved_evcntr;
+ unsigned long count;
+ u32 prev_rw_start_val;
+ u32 limit;
+};
+
+static DEFINE_SPINLOCK(bw_lock);
+static struct bw_hwmon *globalhw;
+static struct work_struct irqwork;
+static int bw_irq;
+static DEFINE_PER_CPU(struct bwmon_data, gov_data);
+static int use_cnt;
+static DEFINE_MUTEX(use_lock);
+static struct workqueue_struct *bw_wq;
+static u32 bytes_per_beat;
+
+#define RW_NUM 0x19
+#define RW_MON 0
+
+static void mon_enable(void *info)
+{
+ /* Clear previous overflow state for given counter*/
+ write_pmovsr(BIT(RW_MON));
+ /* Enable event counter n */
+ write_pmcntenset(BIT(RW_MON));
+}
+
+static void mon_disable(void *info)
+{
+ write_pmcntenclr(BIT(RW_MON));
+}
+
+static void mon_irq_enable(void *info)
+{
+ write_pmintenset(BIT(RW_MON));
+}
+
+static void mon_irq_disable(void *info)
+{
+ write_pmintenclr(BIT(RW_MON));
+}
+
+static void mon_set_counter(void *count)
+{
+ write_pmxevcntr(*(u32 *) count);
+}
+
+static void mon_bw_init(void *evcntrval)
+{
+ u32 count;
+
+ if (!evcntrval)
+ count = 0xFFFFFFFF;
+ else
+ count = *(u32 *) evcntrval;
+
+ write_pmcr(BIT(0));
+ write_pmselr(RW_MON);
+ write_pmxevtyper(RW_NUM);
+ write_pmxevcntr(count);
+}
+
+static void percpu_bwirq_enable(void *info)
+{
+ enable_percpu_irq(bw_irq, IRQ_TYPE_EDGE_RISING);
+}
+
+static void percpu_bwirq_disable(void *info)
+{
+ disable_percpu_irq(bw_irq);
+}
+
+static irqreturn_t mon_intr_handler(int irq, void *dev_id)
+{
+ queue_work(bw_wq, &irqwork);
+ return IRQ_HANDLED;
+}
+
+static void bwmon_work(struct work_struct *work)
+{
+ update_bw_hwmon(globalhw);
+}
+
+static unsigned int beats_to_mbps(long long beats, unsigned int us)
+{
+ beats *= USEC_PER_SEC;
+ beats *= bytes_per_beat;
+ do_div(beats, us);
+ beats = DIV_ROUND_UP_ULL(beats, SZ_1M);
+
+ return beats;
+}
+
+static unsigned int mbps_to_beats(unsigned long mbps, unsigned int ms,
+ unsigned int tolerance_percent)
+{
+ mbps *= (100 + tolerance_percent) * ms;
+ mbps /= 100;
+ mbps = DIV_ROUND_UP(mbps, MSEC_PER_SEC);
+ mbps = mult_frac(mbps, SZ_1M, bytes_per_beat);
+ return mbps;
+}
+
+static long mon_get_bw_count(u32 start_val)
+{
+ u32 overflow, count;
+
+ count = read_pmxevcntr();
+ overflow = read_pmovsr();
+ if (overflow & BIT(RW_MON))
+ return 0xFFFFFFFF - start_val + count;
+ else
+ return count - start_val;
+}
+
+static void get_beat_count(void *arg)
+{
+ int cpu = smp_processor_id();
+ struct bwmon_data *data = &per_cpu(gov_data, cpu);
+
+ mon_disable(NULL);
+ data->count = mon_get_bw_count(data->prev_rw_start_val);
+}
+
+static unsigned long measure_bw_and_set_irq(struct bw_hwmon *hw,
+ unsigned int tol, unsigned int us)
+{
+ unsigned long bw = 0;
+ unsigned long tempbw;
+ int cpu;
+ struct bwmon_data *data;
+ unsigned int sample_ms = hw->df->profile->polling_ms;
+
+ spin_lock(&bw_lock);
+ on_each_cpu(get_beat_count, NULL, true);
+ for_each_possible_cpu(cpu) {
+ data = &per_cpu(gov_data, cpu);
+
+ tempbw = beats_to_mbps(data->count, us);
+ data->limit = mbps_to_beats(tempbw, sample_ms, tol);
+ data->prev_rw_start_val = 0xFFFFFFFF - data->limit;
+ if (cpu_online(cpu))
+ smp_call_function_single(cpu, mon_set_counter,
+ &(data->prev_rw_start_val), true);
+ bw += tempbw;
+ data->count = 0;
+ }
+ on_each_cpu(mon_enable, NULL, true);
+ spin_unlock(&bw_lock);
+ return bw;
+}
+
+static void save_hotplugstate(void)
+{
+ int cpu = smp_processor_id();
+ struct bwmon_data *data;
+
+ data = &per_cpu(gov_data, cpu);
+ percpu_bwirq_disable(NULL);
+ mon_disable(NULL);
+ data->saved_evcntr = read_pmxevcntr();
+ data->count = mon_get_bw_count(data->prev_rw_start_val);
+}
+
+static void restore_hotplugstate(void)
+{
+ int cpu = smp_processor_id();
+ u32 count;
+ struct bwmon_data *data;
+
+ data = &per_cpu(gov_data, cpu);
+ percpu_bwirq_enable(NULL);
+ if (data->count != 0)
+ count = data->saved_evcntr;
+ else
+ count = data->prev_rw_start_val = 0xFFFFFFFF - data->limit;
+ mon_bw_init(&count);
+ mon_irq_enable(NULL);
+ mon_enable(NULL);
+}
+
+static void save_pmstate(void)
+{
+ int cpu = smp_processor_id();
+ struct bwmon_data *data;
+
+ data = &per_cpu(gov_data, cpu);
+ mon_disable(NULL);
+ data->saved_evcntr = read_pmxevcntr();
+}
+
+static void restore_pmstate(void)
+{
+ int cpu = smp_processor_id();
+ u32 count;
+ struct bwmon_data *data;
+
+ data = &per_cpu(gov_data, cpu);
+ count = data->saved_evcntr;
+ mon_bw_init(&count);
+ mon_irq_enable(NULL);
+ mon_enable(NULL);
+}
+
+static int pm_notif(struct notifier_block *nb, unsigned long action,
+ void *data)
+{
+ switch (action) {
+ case CPU_PM_ENTER:
+ save_pmstate();
+ break;
+ case CPU_PM_ENTER_FAILED:
+ case CPU_PM_EXIT:
+ restore_pmstate();
+ break;
+ }
+
+ return NOTIFY_OK;
+}
+
+static struct notifier_block bwmon_cpu_pm_nb = {
+ .notifier_call = pm_notif,
+};
+
+static int hotplug_notif(struct notifier_block *nb, unsigned long action,
+ void *data)
+{
+ switch (action) {
+ case CPU_DYING:
+ spin_lock(&bw_lock);
+ save_hotplugstate();
+ spin_unlock(&bw_lock);
+ break;
+ case CPU_STARTING:
+ spin_lock(&bw_lock);
+ restore_hotplugstate();
+ spin_unlock(&bw_lock);
+ break;
+ }
+
+ return NOTIFY_OK;
+}
+
+static struct notifier_block cpu_hotplug_nb = {
+ .notifier_call = hotplug_notif,
+};
+
+static int register_notifier(void)
+{
+ int ret = 0;
+
+ mutex_lock(&use_lock);
+ if (use_cnt == 0) {
+ ret = cpu_pm_register_notifier(&bwmon_cpu_pm_nb);
+ if (ret)
+ goto out;
+ ret = register_cpu_notifier(&cpu_hotplug_nb);
+ if (ret) {
+ cpu_pm_unregister_notifier(&bwmon_cpu_pm_nb);
+ goto out;
+ }
+ }
+ use_cnt++;
+out:
+ mutex_unlock(&use_lock);
+ return ret;
+}
+
+static void unregister_notifier(void)
+{
+ mutex_lock(&use_lock);
+ if (use_cnt == 1) {
+ unregister_cpu_notifier(&cpu_hotplug_nb);
+ cpu_pm_unregister_notifier(&bwmon_cpu_pm_nb);
+ } else if (use_cnt == 0) {
+ pr_warn("Notifier ref count unbalanced\n");
+ goto out;
+ }
+ use_cnt--;
+out:
+ mutex_unlock(&use_lock);
+}
+
+static void stop_bw_hwmon(struct bw_hwmon *hw)
+{
+ unregister_notifier();
+ on_each_cpu(mon_disable, NULL, true);
+ on_each_cpu(mon_irq_disable, NULL, true);
+ on_each_cpu(percpu_bwirq_disable, NULL, true);
+ free_percpu_irq(bw_irq, &gov_data);
+}
+
+static int start_bw_hwmon(struct bw_hwmon *hw, unsigned long mbps)
+{
+ u32 limit;
+ int cpu;
+ struct bwmon_data *data;
+ struct device *dev = hw->df->dev.parent;
+ int ret;
+
+ ret = request_percpu_irq(bw_irq, mon_intr_handler,
+ "bw_hwmon", &gov_data);
+ if (ret) {
+ dev_err(dev, "Unable to register interrupt handler!\n");
+ return ret;
+ }
+
+ get_online_cpus();
+ on_each_cpu(mon_bw_init, NULL, true);
+ on_each_cpu(mon_disable, NULL, true);
+
+ ret = register_notifier();
+ if (ret) {
+ pr_err("Unable to register notifier\n");
+ return ret;
+ }
+
+ limit = mbps_to_beats(mbps, hw->df->profile->polling_ms, 0);
+ limit /= num_online_cpus();
+
+ for_each_possible_cpu(cpu) {
+ data = &per_cpu(gov_data, cpu);
+ data->limit = limit;
+ data->prev_rw_start_val = 0xFFFFFFFF - data->limit;
+ }
+
+ INIT_WORK(&irqwork, bwmon_work);
+
+ on_each_cpu(percpu_bwirq_enable, NULL, true);
+ on_each_cpu(mon_irq_enable, NULL, true);
+ on_each_cpu(mon_enable, NULL, true);
+ put_online_cpus();
+ return 0;
+}
+
+static int armbw_pm_driver_probe(struct platform_device *pdev)
+{
+ struct device *dev = &pdev->dev;
+ struct bw_hwmon *bw;
+ int ret;
+
+ bw = devm_kzalloc(dev, sizeof(*bw), GFP_KERNEL);
+ if (!bw)
+ return -ENOMEM;
+ bw->dev = dev;
+
+ bw_irq = platform_get_irq(pdev, 0);
+ if (bw_irq < 0) {
+ pr_err("Unable to get IRQ number\n");
+ return bw_irq;
+ }
+
+ ret = of_property_read_u32(dev->of_node, "qcom,bytes-per-beat",
+ &bytes_per_beat);
+
+ if (ret) {
+ pr_err("Unable to read bytes per beat\n");
+ return ret;
+ }
+
+ bw->start_hwmon = &start_bw_hwmon;
+ bw->stop_hwmon = &stop_bw_hwmon;
+ bw->meas_bw_and_set_irq = &measure_bw_and_set_irq;
+ globalhw = bw;
+
+ ret = register_bw_hwmon(dev, bw);
+ if (ret) {
+ pr_err("CPUBW hwmon registration failed\n");
+ return ret;
+ }
+ return 0;
+}
+
+static struct of_device_id match_table[] = {
+ { .compatible = "qcom,armbw-pm" },
+ {}
+};
+
+static struct platform_driver armbw_pm_driver = {
+ .probe = armbw_pm_driver_probe,
+ .driver = {
+ .name = "armbw-pm",
+ .of_match_table = match_table,
+ .owner = THIS_MODULE,
+ },
+};
+
+static int __init armbw_pm_init(void)
+{
+ bw_wq = alloc_workqueue("armbw-pm-bwmon", WQ_HIGHPRI, 2);
+ return platform_driver_register(&armbw_pm_driver);
+}
+module_init(armbw_pm_init);
+
+static void __exit armbw_pm_exit(void)
+{
+ platform_driver_unregister(&armbw_pm_driver);
+ destroy_workqueue(bw_wq);
+}
+module_exit(armbw_pm_exit);
diff --git a/drivers/devfreq/bimc-bwmon.c b/drivers/devfreq/bimc-bwmon.c
new file mode 100644
index 000000000000..315d3a67e43e
--- /dev/null
+++ b/drivers/devfreq/bimc-bwmon.c
@@ -0,0 +1,688 @@
+/*
+ * Copyright (c) 2014-2016, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#define pr_fmt(fmt) "bimc-bwmon: " fmt
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/init.h>
+#include <linux/io.h>
+#include <linux/delay.h>
+#include <linux/bitops.h>
+#include <linux/err.h>
+#include <linux/errno.h>
+#include <linux/interrupt.h>
+#include <linux/platform_device.h>
+#include <linux/of.h>
+#include <linux/of_device.h>
+#include <linux/spinlock.h>
+#include "governor_bw_hwmon.h"
+
+#define GLB_INT_STATUS(m) ((m)->global_base + 0x100)
+#define GLB_INT_CLR(m) ((m)->global_base + 0x108)
+#define GLB_INT_EN(m) ((m)->global_base + 0x10C)
+#define MON_INT_STATUS(m) ((m)->base + 0x100)
+#define MON_INT_CLR(m) ((m)->base + 0x108)
+#define MON_INT_EN(m) ((m)->base + 0x10C)
+#define MON_EN(m) ((m)->base + 0x280)
+#define MON_CLEAR(m) ((m)->base + 0x284)
+#define MON_CNT(m) ((m)->base + 0x288)
+#define MON_THRES(m) ((m)->base + 0x290)
+#define MON_MASK(m) ((m)->base + 0x298)
+#define MON_MATCH(m) ((m)->base + 0x29C)
+
+#define MON2_EN(m) ((m)->base + 0x2A0)
+#define MON2_CLEAR(m) ((m)->base + 0x2A4)
+#define MON2_SW(m) ((m)->base + 0x2A8)
+#define MON2_THRES_HI(m) ((m)->base + 0x2AC)
+#define MON2_THRES_MED(m) ((m)->base + 0x2B0)
+#define MON2_THRES_LO(m) ((m)->base + 0x2B4)
+#define MON2_ZONE_ACTIONS(m) ((m)->base + 0x2B8)
+#define MON2_ZONE_CNT_THRES(m) ((m)->base + 0x2BC)
+#define MON2_BYTE_CNT(m) ((m)->base + 0x2D0)
+#define MON2_WIN_TIMER(m) ((m)->base + 0x2D4)
+#define MON2_ZONE_CNT(m) ((m)->base + 0x2D8)
+#define MON2_ZONE_MAX(m, zone) ((m)->base + 0x2E0 + 0x4 * zone)
+
+struct bwmon_spec {
+ bool wrap_on_thres;
+ bool overflow;
+ bool throt_adj;
+ bool hw_sampling;
+};
+
+struct bwmon {
+ void __iomem *base;
+ void __iomem *global_base;
+ unsigned int mport;
+ unsigned int irq;
+ const struct bwmon_spec *spec;
+ struct device *dev;
+ struct bw_hwmon hw;
+ u32 hw_timer_hz;
+ u32 throttle_adj;
+ u32 sample_size_ms;
+ u32 intr_status;
+};
+
+#define to_bwmon(ptr) container_of(ptr, struct bwmon, hw)
+#define has_hw_sampling(m) (m->spec->hw_sampling)
+
+#define ENABLE_MASK BIT(0)
+#define THROTTLE_MASK 0x1F
+#define THROTTLE_SHIFT 16
+#define INT_ENABLE_V1 0x1
+#define INT_STATUS_MASK 0x03
+#define INT_STATUS_MASK_HWS 0xF0
+
+static DEFINE_SPINLOCK(glb_lock);
+static void mon_enable(struct bwmon *m)
+{
+ if (has_hw_sampling(m))
+ writel_relaxed((ENABLE_MASK | m->throttle_adj), MON2_EN(m));
+ else
+ writel_relaxed((ENABLE_MASK | m->throttle_adj), MON_EN(m));
+}
+
+static void mon_disable(struct bwmon *m)
+{
+ if (has_hw_sampling(m))
+ writel_relaxed(m->throttle_adj, MON2_EN(m));
+ else
+ writel_relaxed(m->throttle_adj, MON_EN(m));
+ /*
+ * mon_disable() and mon_irq_clear(),
+ * If latter goes first and count happen to trigger irq, we would
+ * have the irq line high but no one handling it.
+ */
+ mb();
+}
+
+#define MON_CLEAR_BIT 0x1
+#define MON_CLEAR_ALL_BIT 0x2
+static void mon_clear(struct bwmon *m, bool clear_all)
+{
+ if (!has_hw_sampling(m)) {
+ writel_relaxed(MON_CLEAR_BIT, MON_CLEAR(m));
+ goto out;
+ }
+
+ if (clear_all)
+ writel_relaxed(MON_CLEAR_ALL_BIT, MON2_CLEAR(m));
+ else
+ writel_relaxed(MON_CLEAR_BIT, MON2_CLEAR(m));
+
+ /*
+ * The counter clear and IRQ clear bits are not in the same 4KB
+ * region. So, we need to make sure the counter clear is completed
+ * before we try to clear the IRQ or do any other counter operations.
+ */
+out:
+ mb();
+}
+
+#define SAMPLE_WIN_LIM 0xFFFFF
+static void mon_set_hw_sampling_window(struct bwmon *m, unsigned int sample_ms)
+{
+ u32 rate;
+
+ if (unlikely(sample_ms != m->sample_size_ms)) {
+ rate = mult_frac(sample_ms, m->hw_timer_hz, MSEC_PER_SEC);
+ m->sample_size_ms = sample_ms;
+ if (unlikely(rate > SAMPLE_WIN_LIM)) {
+ rate = SAMPLE_WIN_LIM;
+ pr_warn("Sample window %u larger than hw limit: %u\n",
+ rate, SAMPLE_WIN_LIM);
+ }
+ writel_relaxed(rate, MON2_SW(m));
+ }
+}
+
+static void mon_irq_enable(struct bwmon *m)
+{
+ u32 val;
+
+ spin_lock(&glb_lock);
+ val = readl_relaxed(GLB_INT_EN(m));
+ val |= 1 << m->mport;
+ writel_relaxed(val, GLB_INT_EN(m));
+
+ val = readl_relaxed(MON_INT_EN(m));
+ val |= has_hw_sampling(m) ? INT_STATUS_MASK_HWS : INT_ENABLE_V1;
+ writel_relaxed(val, MON_INT_EN(m));
+ spin_unlock(&glb_lock);
+ /*
+ * make Sure irq enable complete for local and global
+ * to avoid race with other monitor calls
+ */
+ mb();
+}
+
+static void mon_irq_disable(struct bwmon *m)
+{
+ u32 val;
+
+ spin_lock(&glb_lock);
+ val = readl_relaxed(GLB_INT_EN(m));
+ val &= ~(1 << m->mport);
+ writel_relaxed(val, GLB_INT_EN(m));
+
+ val = readl_relaxed(MON_INT_EN(m));
+ val &= has_hw_sampling(m) ? ~INT_STATUS_MASK_HWS : ~INT_ENABLE_V1;
+ writel_relaxed(val, MON_INT_EN(m));
+ spin_unlock(&glb_lock);
+ /*
+ * make Sure irq disable complete for local and global
+ * to avoid race with other monitor calls
+ */
+ mb();
+}
+
+static unsigned int mon_irq_status(struct bwmon *m)
+{
+ u32 mval;
+
+ mval = readl_relaxed(MON_INT_STATUS(m));
+
+ dev_dbg(m->dev, "IRQ status p:%x, g:%x\n", mval,
+ readl_relaxed(GLB_INT_STATUS(m)));
+
+ mval &= has_hw_sampling(m) ? INT_STATUS_MASK_HWS : INT_STATUS_MASK;
+
+ return mval;
+}
+
+static void mon_irq_clear(struct bwmon *m)
+{
+ u32 intclr;
+
+ intclr = has_hw_sampling(m) ? INT_STATUS_MASK_HWS : INT_STATUS_MASK;
+
+ writel_relaxed(intclr, MON_INT_CLR(m));
+ mb();
+ writel_relaxed(1 << m->mport, GLB_INT_CLR(m));
+ mb();
+}
+
+static int mon_set_throttle_adj(struct bw_hwmon *hw, uint adj)
+{
+ struct bwmon *m = to_bwmon(hw);
+
+ if (adj > THROTTLE_MASK)
+ return -EINVAL;
+
+ adj = (adj & THROTTLE_MASK) << THROTTLE_SHIFT;
+ m->throttle_adj = adj;
+
+ return 0;
+}
+
+static u32 mon_get_throttle_adj(struct bw_hwmon *hw)
+{
+ struct bwmon *m = to_bwmon(hw);
+
+ return m->throttle_adj >> THROTTLE_SHIFT;
+}
+
+#define ZONE1_SHIFT 8
+#define ZONE2_SHIFT 16
+#define ZONE3_SHIFT 24
+#define ZONE0_ACTION 0x01 /* Increment zone 0 count */
+#define ZONE1_ACTION 0x09 /* Increment zone 1 & clear lower zones */
+#define ZONE2_ACTION 0x25 /* Increment zone 2 & clear lower zones */
+#define ZONE3_ACTION 0x95 /* Increment zone 3 & clear lower zones */
+static u32 calc_zone_actions(void)
+{
+ u32 zone_actions;
+
+ zone_actions = ZONE0_ACTION;
+ zone_actions |= ZONE1_ACTION << ZONE1_SHIFT;
+ zone_actions |= ZONE2_ACTION << ZONE2_SHIFT;
+ zone_actions |= ZONE3_ACTION << ZONE3_SHIFT;
+
+ return zone_actions;
+}
+
+#define ZONE_CNT_LIM 0xFFU
+#define UP_CNT_1 1
+static u32 calc_zone_counts(struct bw_hwmon *hw)
+{
+ u32 zone_counts;
+
+ zone_counts = ZONE_CNT_LIM;
+ zone_counts |= min(hw->down_cnt, ZONE_CNT_LIM) << ZONE1_SHIFT;
+ zone_counts |= ZONE_CNT_LIM << ZONE2_SHIFT;
+ zone_counts |= UP_CNT_1 << ZONE3_SHIFT;
+
+ return zone_counts;
+}
+
+static unsigned int mbps_to_mb(unsigned long mbps, unsigned int ms)
+{
+ mbps *= ms;
+ mbps = DIV_ROUND_UP(mbps, MSEC_PER_SEC);
+ return mbps;
+}
+
+/*
+ * Define the 4 zones using HI, MED & LO thresholds:
+ * Zone 0: byte count < THRES_LO
+ * Zone 1: THRES_LO < byte count < THRES_MED
+ * Zone 2: THRES_MED < byte count < THRES_HI
+ * Zone 3: byte count > THRES_HI
+ */
+#define THRES_LIM 0x7FFU
+static void set_zone_thres(struct bwmon *m, unsigned int sample_ms)
+{
+ struct bw_hwmon *hw = &(m->hw);
+ u32 hi, med, lo;
+
+ hi = mbps_to_mb(hw->up_wake_mbps, sample_ms);
+ med = mbps_to_mb(hw->down_wake_mbps, sample_ms);
+ lo = 0;
+
+ if (unlikely((hi > THRES_LIM) || (med > hi) || (lo > med))) {
+ pr_warn("Zone thres larger than hw limit: hi:%u med:%u lo:%u\n",
+ hi, med, lo);
+ hi = min(hi, THRES_LIM);
+ med = min(med, hi - 1);
+ lo = min(lo, med-1);
+ }
+
+ writel_relaxed(hi, MON2_THRES_HI(m));
+ writel_relaxed(med, MON2_THRES_MED(m));
+ writel_relaxed(lo, MON2_THRES_LO(m));
+ dev_dbg(m->dev, "Thres: hi:%u med:%u lo:%u\n", hi, med, lo);
+}
+
+static void mon_set_zones(struct bwmon *m, unsigned int sample_ms)
+{
+ struct bw_hwmon *hw = &(m->hw);
+ u32 zone_cnt_thres = calc_zone_counts(hw);
+
+ mon_set_hw_sampling_window(m, sample_ms);
+ set_zone_thres(m, sample_ms);
+ /* Set the zone count thresholds for interrupts */
+ writel_relaxed(zone_cnt_thres, MON2_ZONE_CNT_THRES(m));
+
+ dev_dbg(m->dev, "Zone Count Thres: %0x\n", zone_cnt_thres);
+}
+
+static void mon_set_limit(struct bwmon *m, u32 count)
+{
+ writel_relaxed(count, MON_THRES(m));
+ dev_dbg(m->dev, "Thres: %08x\n", count);
+}
+
+static u32 mon_get_limit(struct bwmon *m)
+{
+ return readl_relaxed(MON_THRES(m));
+}
+
+#define THRES_HIT(status) (status & BIT(0))
+#define OVERFLOW(status) (status & BIT(1))
+static unsigned long mon_get_count(struct bwmon *m)
+{
+ unsigned long count, status;
+
+ count = readl_relaxed(MON_CNT(m));
+ status = mon_irq_status(m);
+
+ dev_dbg(m->dev, "Counter: %08lx\n", count);
+
+ if (OVERFLOW(status) && m->spec->overflow)
+ count += 0xFFFFFFFF;
+ if (THRES_HIT(status) && m->spec->wrap_on_thres)
+ count += mon_get_limit(m);
+
+ dev_dbg(m->dev, "Actual Count: %08lx\n", count);
+
+ return count;
+}
+
+static unsigned int get_zone(struct bwmon *m)
+{
+ u32 zone_counts;
+ u32 zone;
+
+ zone = get_bitmask_order((m->intr_status & INT_STATUS_MASK_HWS) >> 4);
+ if (zone) {
+ zone--;
+ } else {
+ zone_counts = readl_relaxed(MON2_ZONE_CNT(m));
+ if (zone_counts) {
+ zone = get_bitmask_order(zone_counts) - 1;
+ zone /= 8;
+ }
+ }
+
+ m->intr_status = 0;
+ return zone;
+}
+
+static unsigned long mon_get_zone_stats(struct bwmon *m)
+{
+ unsigned int zone;
+ unsigned long count = 0;
+
+ zone = get_zone(m);
+
+ count = readl_relaxed(MON2_ZONE_MAX(m, zone)) + 1;
+ count *= SZ_1M;
+
+ dev_dbg(m->dev, "Zone%d Max byte count: %08lx\n", zone, count);
+
+ return count;
+}
+
+/* ********** CPUBW specific code ********** */
+
+/* Returns MBps of read/writes for the sampling window. */
+static unsigned int mbps_to_bytes(unsigned long mbps, unsigned int ms,
+ unsigned int tolerance_percent)
+{
+ mbps *= (100 + tolerance_percent) * ms;
+ mbps /= 100;
+ mbps = DIV_ROUND_UP(mbps, MSEC_PER_SEC);
+ mbps *= SZ_1M;
+ return mbps;
+}
+
+static unsigned long get_bytes_and_clear(struct bw_hwmon *hw)
+{
+ struct bwmon *m = to_bwmon(hw);
+ unsigned long count;
+
+ mon_disable(m);
+ count = has_hw_sampling(m) ? mon_get_zone_stats(m) : mon_get_count(m);
+ mon_clear(m, false);
+ mon_irq_clear(m);
+ mon_enable(m);
+
+ return count;
+}
+
+static unsigned long set_thres(struct bw_hwmon *hw, unsigned long bytes)
+{
+ unsigned long count;
+ u32 limit;
+ struct bwmon *m = to_bwmon(hw);
+
+ mon_disable(m);
+ count = mon_get_count(m);
+ mon_clear(m, false);
+ mon_irq_clear(m);
+
+ if (likely(!m->spec->wrap_on_thres))
+ limit = bytes;
+ else
+ limit = max(bytes, 500000UL);
+
+ mon_set_limit(m, limit);
+ mon_enable(m);
+
+ return count;
+}
+
+static unsigned long set_hw_events(struct bw_hwmon *hw, unsigned sample_ms)
+{
+ struct bwmon *m = to_bwmon(hw);
+
+ mon_disable(m);
+ mon_clear(m, false);
+ mon_irq_clear(m);
+
+ mon_set_zones(m, sample_ms);
+ mon_enable(m);
+
+ return 0;
+}
+
+static irqreturn_t bwmon_intr_handler(int irq, void *dev)
+{
+ struct bwmon *m = dev;
+
+ m->intr_status = mon_irq_status(m);
+ if (!m->intr_status)
+ return IRQ_NONE;
+
+ if (bw_hwmon_sample_end(&m->hw) > 0)
+ return IRQ_WAKE_THREAD;
+
+ return IRQ_HANDLED;
+}
+
+static irqreturn_t bwmon_intr_thread(int irq, void *dev)
+{
+ struct bwmon *m = dev;
+
+ update_bw_hwmon(&m->hw);
+ return IRQ_HANDLED;
+}
+
+static int start_bw_hwmon(struct bw_hwmon *hw, unsigned long mbps)
+{
+ struct bwmon *m = to_bwmon(hw);
+ u32 limit;
+ u32 zone_actions = calc_zone_actions();
+ int ret;
+
+ ret = request_threaded_irq(m->irq, bwmon_intr_handler,
+ bwmon_intr_thread,
+ IRQF_ONESHOT | IRQF_SHARED,
+ dev_name(m->dev), m);
+ if (ret) {
+ dev_err(m->dev, "Unable to register interrupt handler! (%d)\n",
+ ret);
+ return ret;
+ }
+
+ mon_disable(m);
+
+ mon_clear(m, true);
+ limit = mbps_to_bytes(mbps, hw->df->profile->polling_ms, 0);
+ if (has_hw_sampling(m)) {
+ mon_set_zones(m, hw->df->profile->polling_ms);
+ /* Set the zone actions to increment appropriate counters */
+ writel_relaxed(zone_actions, MON2_ZONE_ACTIONS(m));
+ } else {
+ mon_set_limit(m, limit);
+ }
+
+ mon_irq_clear(m);
+ mon_irq_enable(m);
+ mon_enable(m);
+
+ return 0;
+}
+
+static void stop_bw_hwmon(struct bw_hwmon *hw)
+{
+ struct bwmon *m = to_bwmon(hw);
+
+ mon_irq_disable(m);
+ free_irq(m->irq, m);
+ mon_disable(m);
+ mon_clear(m, true);
+ mon_irq_clear(m);
+}
+
+static int suspend_bw_hwmon(struct bw_hwmon *hw)
+{
+ struct bwmon *m = to_bwmon(hw);
+
+ mon_irq_disable(m);
+ free_irq(m->irq, m);
+ mon_disable(m);
+ mon_irq_clear(m);
+
+ return 0;
+}
+
+static int resume_bw_hwmon(struct bw_hwmon *hw)
+{
+ struct bwmon *m = to_bwmon(hw);
+ int ret;
+
+ mon_clear(m, false);
+ ret = request_threaded_irq(m->irq, bwmon_intr_handler,
+ bwmon_intr_thread,
+ IRQF_ONESHOT | IRQF_SHARED,
+ dev_name(m->dev), m);
+ if (ret) {
+ dev_err(m->dev, "Unable to register interrupt handler! (%d)\n",
+ ret);
+ return ret;
+ }
+
+ mon_irq_enable(m);
+ mon_enable(m);
+
+ return 0;
+}
+
+/*************************************************************************/
+
+static const struct bwmon_spec spec[] = {
+ { .wrap_on_thres = true, .overflow = false, .throt_adj = false,
+ .hw_sampling = false},
+ { .wrap_on_thres = false, .overflow = true, .throt_adj = false,
+ .hw_sampling = false},
+ { .wrap_on_thres = false, .overflow = true, .throt_adj = true,
+ .hw_sampling = false},
+ { .wrap_on_thres = false, .overflow = true, .throt_adj = true,
+ .hw_sampling = true},
+};
+
+static struct of_device_id match_table[] = {
+ { .compatible = "qcom,bimc-bwmon", .data = &spec[0] },
+ { .compatible = "qcom,bimc-bwmon2", .data = &spec[1] },
+ { .compatible = "qcom,bimc-bwmon3", .data = &spec[2] },
+ { .compatible = "qcom,bimc-bwmon4", .data = &spec[3] },
+ {}
+};
+
+static int bimc_bwmon_driver_probe(struct platform_device *pdev)
+{
+ struct device *dev = &pdev->dev;
+ struct resource *res;
+ struct bwmon *m;
+ const struct of_device_id *id;
+ int ret;
+ u32 data;
+
+ m = devm_kzalloc(dev, sizeof(*m), GFP_KERNEL);
+ if (!m)
+ return -ENOMEM;
+ m->dev = dev;
+
+ ret = of_property_read_u32(dev->of_node, "qcom,mport", &data);
+ if (ret) {
+ dev_err(dev, "mport not found!\n");
+ return ret;
+ }
+ m->mport = data;
+
+ id = of_match_device(match_table, dev);
+ if (!id) {
+ dev_err(dev, "Unknown device type!\n");
+ return -ENODEV;
+ }
+ m->spec = id->data;
+
+ if (has_hw_sampling(m)) {
+ ret = of_property_read_u32(dev->of_node,
+ "qcom,hw-timer-hz", &data);
+ if (ret) {
+ dev_err(dev, "HW sampling rate not specified!\n");
+ return ret;
+ }
+ m->hw_timer_hz = data;
+ }
+
+ res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "base");
+ if (!res) {
+ dev_err(dev, "base not found!\n");
+ return -EINVAL;
+ }
+ m->base = devm_ioremap(dev, res->start, resource_size(res));
+ if (!m->base) {
+ dev_err(dev, "Unable map base!\n");
+ return -ENOMEM;
+ }
+
+ res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "global_base");
+ if (!res) {
+ dev_err(dev, "global_base not found!\n");
+ return -EINVAL;
+ }
+ m->global_base = devm_ioremap(dev, res->start, resource_size(res));
+ if (!m->global_base) {
+ dev_err(dev, "Unable map global_base!\n");
+ return -ENOMEM;
+ }
+
+ m->irq = platform_get_irq(pdev, 0);
+ if (m->irq < 0) {
+ dev_err(dev, "Unable to get IRQ number\n");
+ return m->irq;
+ }
+
+ m->hw.of_node = of_parse_phandle(dev->of_node, "qcom,target-dev", 0);
+ if (!m->hw.of_node)
+ return -EINVAL;
+ m->hw.start_hwmon = &start_bw_hwmon;
+ m->hw.stop_hwmon = &stop_bw_hwmon;
+ m->hw.suspend_hwmon = &suspend_bw_hwmon;
+ m->hw.resume_hwmon = &resume_bw_hwmon;
+ m->hw.get_bytes_and_clear = &get_bytes_and_clear;
+ m->hw.set_thres = &set_thres;
+ if (has_hw_sampling(m))
+ m->hw.set_hw_events = &set_hw_events;
+ if (m->spec->throt_adj) {
+ m->hw.set_throttle_adj = &mon_set_throttle_adj;
+ m->hw.get_throttle_adj = &mon_get_throttle_adj;
+ }
+
+ ret = register_bw_hwmon(dev, &m->hw);
+ if (ret) {
+ dev_err(dev, "Dev BW hwmon registration failed\n");
+ return ret;
+ }
+
+ return 0;
+}
+
+static struct platform_driver bimc_bwmon_driver = {
+ .probe = bimc_bwmon_driver_probe,
+ .driver = {
+ .name = "bimc-bwmon",
+ .of_match_table = match_table,
+ .owner = THIS_MODULE,
+ },
+};
+
+static int __init bimc_bwmon_init(void)
+{
+ return platform_driver_register(&bimc_bwmon_driver);
+}
+module_init(bimc_bwmon_init);
+
+static void __exit bimc_bwmon_exit(void)
+{
+ platform_driver_unregister(&bimc_bwmon_driver);
+}
+module_exit(bimc_bwmon_exit);
+
+MODULE_DESCRIPTION("BIMC bandwidth monitor driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/devfreq/devfreq.c b/drivers/devfreq/devfreq.c
index ca848cc6a8fd..844a8ad666a9 100644
--- a/drivers/devfreq/devfreq.c
+++ b/drivers/devfreq/devfreq.c
@@ -69,11 +69,34 @@ static struct devfreq *find_device_devfreq(struct device *dev)
}
/**
+ * devfreq_set_freq_limits() - Set min and max frequency from freq_table
+ * @devfreq: the devfreq instance
+ */
+static void devfreq_set_freq_limits(struct devfreq *devfreq)
+{
+ int idx;
+ unsigned long min = ~0, max = 0;
+
+ if (!devfreq->profile->freq_table)
+ return;
+
+ for (idx = 0; idx < devfreq->profile->max_state; idx++) {
+ if (min > devfreq->profile->freq_table[idx])
+ min = devfreq->profile->freq_table[idx];
+ if (max < devfreq->profile->freq_table[idx])
+ max = devfreq->profile->freq_table[idx];
+ }
+
+ devfreq->min_freq = min;
+ devfreq->max_freq = max;
+}
+
+/**
* devfreq_get_freq_level() - Lookup freq_table for the frequency
* @devfreq: the devfreq instance
* @freq: the target frequency
*/
-static int devfreq_get_freq_level(struct devfreq *devfreq, unsigned long freq)
+int devfreq_get_freq_level(struct devfreq *devfreq, unsigned long freq)
{
int lev;
@@ -83,6 +106,7 @@ static int devfreq_get_freq_level(struct devfreq *devfreq, unsigned long freq)
return -EINVAL;
}
+EXPORT_SYMBOL(devfreq_get_freq_level);
/**
* devfreq_update_status() - Update statistics of devfreq behavior
@@ -172,7 +196,7 @@ int update_devfreq(struct devfreq *devfreq)
return -EINVAL;
/* Reevaluate the proper frequency */
- err = devfreq->governor->get_target_freq(devfreq, &freq);
+ err = devfreq->governor->get_target_freq(devfreq, &freq, &flags);
if (err)
return err;
@@ -486,6 +510,7 @@ struct devfreq *devfreq_add_device(struct device *dev,
devfreq->profile->max_state,
GFP_KERNEL);
devfreq->last_stat_updated = jiffies;
+ devfreq_set_freq_limits(devfreq);
dev_set_name(&devfreq->dev, "%s", dev_name(dev));
err = device_register(&devfreq->dev);
@@ -536,7 +561,6 @@ int devfreq_remove_device(struct devfreq *devfreq)
return -EINVAL;
device_unregister(&devfreq->dev);
- put_device(&devfreq->dev);
return 0;
}
@@ -782,7 +806,7 @@ static ssize_t governor_store(struct device *dev, struct device_attribute *attr,
struct devfreq *df = to_devfreq(dev);
int ret;
char str_governor[DEVFREQ_NAME_LEN + 1];
- struct devfreq_governor *governor;
+ const struct devfreq_governor *governor, *prev_gov;
ret = sscanf(buf, "%" __stringify(DEVFREQ_NAME_LEN) "s", str_governor);
if (ret != 1)
@@ -807,12 +831,21 @@ static ssize_t governor_store(struct device *dev, struct device_attribute *attr,
goto out;
}
}
+ prev_gov = df->governor;
df->governor = governor;
strncpy(df->governor_name, governor->name, DEVFREQ_NAME_LEN);
ret = df->governor->event_handler(df, DEVFREQ_GOV_START, NULL);
- if (ret)
+ if (ret) {
dev_warn(dev, "%s: Governor %s not started(%d)\n",
__func__, df->governor->name, ret);
+ if (prev_gov) {
+ df->governor = prev_gov;
+ strncpy(df->governor_name, prev_gov->name,
+ DEVFREQ_NAME_LEN);
+ df->governor->event_handler(df, DEVFREQ_GOV_START,
+ NULL);
+ }
+ }
out:
mutex_unlock(&devfreq_list_lock);
@@ -969,19 +1002,26 @@ static ssize_t available_frequencies_show(struct device *d,
struct devfreq *df = to_devfreq(d);
struct device *dev = df->dev.parent;
struct dev_pm_opp *opp;
+ unsigned int i = 0, max_state = df->profile->max_state;
+ bool use_opp;
ssize_t count = 0;
unsigned long freq = 0;
rcu_read_lock();
- do {
- opp = dev_pm_opp_find_freq_ceil(dev, &freq);
- if (IS_ERR(opp))
- break;
+ use_opp = dev_pm_opp_get_opp_count(dev) > 0;
+ while (use_opp || (!use_opp && i < max_state)) {
+ if (use_opp) {
+ opp = dev_pm_opp_find_freq_ceil(dev, &freq);
+ if (IS_ERR(opp))
+ break;
+ } else {
+ freq = df->profile->freq_table[i++];
+ }
count += scnprintf(&buf[count], (PAGE_SIZE - count - 2),
"%lu ", freq);
freq++;
- } while (1);
+ }
rcu_read_unlock();
/* Truncate the trailing space */
diff --git a/drivers/devfreq/devfreq_devbw.c b/drivers/devfreq/devfreq_devbw.c
new file mode 100644
index 000000000000..8a836d92e542
--- /dev/null
+++ b/drivers/devfreq/devfreq_devbw.c
@@ -0,0 +1,290 @@
+/*
+ * Copyright (c) 2013-2014, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#define pr_fmt(fmt) "devbw: " fmt
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/init.h>
+#include <linux/io.h>
+#include <linux/delay.h>
+#include <linux/ktime.h>
+#include <linux/time.h>
+#include <linux/err.h>
+#include <linux/errno.h>
+#include <linux/mutex.h>
+#include <linux/interrupt.h>
+#include <linux/devfreq.h>
+#include <linux/of.h>
+#include <trace/events/power.h>
+#include <linux/msm-bus.h>
+#include <linux/msm-bus-board.h>
+
+/* Has to be ULL to prevent overflow where this macro is used. */
+#define MBYTE (1ULL << 20)
+#define MAX_PATHS 2
+#define DBL_BUF 2
+
+struct dev_data {
+ struct msm_bus_vectors vectors[MAX_PATHS * DBL_BUF];
+ struct msm_bus_paths bw_levels[DBL_BUF];
+ struct msm_bus_scale_pdata bw_data;
+ int num_paths;
+ u32 bus_client;
+ int cur_idx;
+ int cur_ab;
+ int cur_ib;
+ long gov_ab;
+ unsigned int ab_percent;
+ struct devfreq *df;
+ struct devfreq_dev_profile dp;
+};
+
+static int set_bw(struct device *dev, int new_ib, int new_ab)
+{
+ struct dev_data *d = dev_get_drvdata(dev);
+ int i, ret;
+
+ if (d->cur_ib == new_ib && d->cur_ab == new_ab)
+ return 0;
+
+ i = (d->cur_idx + 1) % DBL_BUF;
+
+ d->bw_levels[i].vectors[0].ib = new_ib * MBYTE;
+ d->bw_levels[i].vectors[0].ab = new_ab / d->num_paths * MBYTE;
+ d->bw_levels[i].vectors[1].ib = new_ib * MBYTE;
+ d->bw_levels[i].vectors[1].ab = new_ab / d->num_paths * MBYTE;
+
+ dev_dbg(dev, "BW MBps: AB: %d IB: %d\n", new_ab, new_ib);
+
+ ret = msm_bus_scale_client_update_request(d->bus_client, i);
+ if (ret) {
+ dev_err(dev, "bandwidth request failed (%d)\n", ret);
+ } else {
+ d->cur_idx = i;
+ d->cur_ib = new_ib;
+ d->cur_ab = new_ab;
+ }
+
+ return ret;
+}
+
+static unsigned int find_ab(struct dev_data *d, unsigned long *freq)
+{
+ return (d->ab_percent * (*freq)) / 100;
+}
+
+static void find_freq(struct devfreq_dev_profile *p, unsigned long *freq,
+ u32 flags)
+{
+ int i;
+ unsigned long atmost, atleast, f;
+
+ atmost = p->freq_table[0];
+ atleast = p->freq_table[p->max_state-1];
+ for (i = 0; i < p->max_state; i++) {
+ f = p->freq_table[i];
+ if (f <= *freq)
+ atmost = max(f, atmost);
+ if (f >= *freq)
+ atleast = min(f, atleast);
+ }
+
+ if (flags & DEVFREQ_FLAG_LEAST_UPPER_BOUND)
+ *freq = atmost;
+ else
+ *freq = atleast;
+}
+
+static int devbw_target(struct device *dev, unsigned long *freq, u32 flags)
+{
+ struct dev_data *d = dev_get_drvdata(dev);
+
+ find_freq(&d->dp, freq, flags);
+
+ if (!d->gov_ab)
+ return set_bw(dev, *freq, find_ab(d, freq));
+ else
+ return set_bw(dev, *freq, d->gov_ab);
+}
+
+static int devbw_get_dev_status(struct device *dev,
+ struct devfreq_dev_status *stat)
+{
+ struct dev_data *d = dev_get_drvdata(dev);
+
+ stat->private_data = &d->gov_ab;
+ return 0;
+}
+
+#define PROP_PORTS "qcom,src-dst-ports"
+#define PROP_TBL "qcom,bw-tbl"
+#define PROP_AB_PER "qcom,ab-percent"
+#define PROP_ACTIVE "qcom,active-only"
+
+int devfreq_add_devbw(struct device *dev)
+{
+ struct dev_data *d;
+ struct devfreq_dev_profile *p;
+ u32 *data, ports[MAX_PATHS * 2];
+ const char *gov_name;
+ int ret, len, i, num_paths;
+
+ d = devm_kzalloc(dev, sizeof(*d), GFP_KERNEL);
+ if (!d)
+ return -ENOMEM;
+ dev_set_drvdata(dev, d);
+
+ if (of_find_property(dev->of_node, PROP_PORTS, &len)) {
+ len /= sizeof(ports[0]);
+ if (len % 2 || len > ARRAY_SIZE(ports)) {
+ dev_err(dev, "Unexpected number of ports\n");
+ return -EINVAL;
+ }
+
+ ret = of_property_read_u32_array(dev->of_node, PROP_PORTS,
+ ports, len);
+ if (ret)
+ return ret;
+
+ num_paths = len / 2;
+ } else {
+ return -EINVAL;
+ }
+
+ d->bw_levels[0].vectors = &d->vectors[0];
+ d->bw_levels[1].vectors = &d->vectors[MAX_PATHS];
+ d->bw_data.usecase = d->bw_levels;
+ d->bw_data.num_usecases = ARRAY_SIZE(d->bw_levels);
+ d->bw_data.name = dev_name(dev);
+ d->bw_data.active_only = of_property_read_bool(dev->of_node,
+ PROP_ACTIVE);
+
+ for (i = 0; i < num_paths; i++) {
+ d->bw_levels[0].vectors[i].src = ports[2 * i];
+ d->bw_levels[0].vectors[i].dst = ports[2 * i + 1];
+ d->bw_levels[1].vectors[i].src = ports[2 * i];
+ d->bw_levels[1].vectors[i].dst = ports[2 * i + 1];
+ }
+ d->bw_levels[0].num_paths = num_paths;
+ d->bw_levels[1].num_paths = num_paths;
+ d->num_paths = num_paths;
+
+ p = &d->dp;
+ p->polling_ms = 50;
+ p->target = devbw_target;
+ p->get_dev_status = devbw_get_dev_status;
+
+ if (of_find_property(dev->of_node, PROP_TBL, &len)) {
+ len /= sizeof(*data);
+ data = devm_kzalloc(dev, len * sizeof(*data), GFP_KERNEL);
+ if (!data)
+ return -ENOMEM;
+
+ p->freq_table = devm_kzalloc(dev,
+ len * sizeof(*p->freq_table),
+ GFP_KERNEL);
+ if (!p->freq_table)
+ return -ENOMEM;
+
+ ret = of_property_read_u32_array(dev->of_node, PROP_TBL,
+ data, len);
+ if (ret)
+ return ret;
+
+ for (i = 0; i < len; i++)
+ p->freq_table[i] = data[i];
+ p->max_state = len;
+ }
+
+ if (of_find_property(dev->of_node, PROP_AB_PER, &len)) {
+ ret = of_property_read_u32(dev->of_node, PROP_AB_PER,
+ &d->ab_percent);
+ if (ret)
+ return ret;
+
+ dev_dbg(dev, "ab-percent used %u\n", d->ab_percent);
+ }
+
+ d->bus_client = msm_bus_scale_register_client(&d->bw_data);
+ if (!d->bus_client) {
+ dev_err(dev, "Unable to register bus client\n");
+ return -ENODEV;
+ }
+
+ if (of_property_read_string(dev->of_node, "governor", &gov_name))
+ gov_name = "performance";
+
+ d->df = devfreq_add_device(dev, p, gov_name, NULL);
+ if (IS_ERR(d->df)) {
+ msm_bus_scale_unregister_client(d->bus_client);
+ return PTR_ERR(d->df);
+ }
+
+ return 0;
+}
+
+int devfreq_remove_devbw(struct device *dev)
+{
+ struct dev_data *d = dev_get_drvdata(dev);
+ msm_bus_scale_unregister_client(d->bus_client);
+ devfreq_remove_device(d->df);
+ return 0;
+}
+
+int devfreq_suspend_devbw(struct device *dev)
+{
+ struct dev_data *d = dev_get_drvdata(dev);
+ return devfreq_suspend_device(d->df);
+}
+
+int devfreq_resume_devbw(struct device *dev)
+{
+ struct dev_data *d = dev_get_drvdata(dev);
+ return devfreq_resume_device(d->df);
+}
+
+static int devfreq_devbw_probe(struct platform_device *pdev)
+{
+ return devfreq_add_devbw(&pdev->dev);
+}
+
+static int devfreq_devbw_remove(struct platform_device *pdev)
+{
+ return devfreq_remove_devbw(&pdev->dev);
+}
+
+static struct of_device_id match_table[] = {
+ { .compatible = "qcom,devbw" },
+ {}
+};
+
+static struct platform_driver devbw_driver = {
+ .probe = devfreq_devbw_probe,
+ .remove = devfreq_devbw_remove,
+ .driver = {
+ .name = "devbw",
+ .of_match_table = match_table,
+ .owner = THIS_MODULE,
+ },
+};
+
+static int __init devbw_init(void)
+{
+ platform_driver_register(&devbw_driver);
+ return 0;
+}
+device_initcall(devbw_init);
+
+MODULE_DESCRIPTION("Device DDR bandwidth voting driver MSM SoCs");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/devfreq/devfreq_simple_dev.c b/drivers/devfreq/devfreq_simple_dev.c
new file mode 100644
index 000000000000..ba562bdcd787
--- /dev/null
+++ b/drivers/devfreq/devfreq_simple_dev.c
@@ -0,0 +1,213 @@
+/*
+ * Copyright (c) 2014-2015, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#define pr_fmt(fmt) "devfreq-simple-dev: " fmt
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/init.h>
+#include <linux/io.h>
+#include <linux/delay.h>
+#include <linux/ktime.h>
+#include <linux/time.h>
+#include <linux/err.h>
+#include <linux/errno.h>
+#include <linux/mutex.h>
+#include <linux/platform_device.h>
+#include <linux/interrupt.h>
+#include <linux/devfreq.h>
+#include <linux/of.h>
+#include <linux/clk.h>
+#include <trace/events/power.h>
+
+struct dev_data {
+ struct clk *clk;
+ struct devfreq *df;
+ struct devfreq_dev_profile profile;
+};
+
+static void find_freq(struct devfreq_dev_profile *p, unsigned long *freq,
+ u32 flags)
+{
+ int i;
+ unsigned long atmost, atleast, f;
+
+ atmost = p->freq_table[0];
+ atleast = p->freq_table[p->max_state-1];
+ for (i = 0; i < p->max_state; i++) {
+ f = p->freq_table[i];
+ if (f <= *freq)
+ atmost = max(f, atmost);
+ if (f >= *freq)
+ atleast = min(f, atleast);
+ }
+
+ if (flags & DEVFREQ_FLAG_LEAST_UPPER_BOUND)
+ *freq = atmost;
+ else
+ *freq = atleast;
+}
+
+static int dev_target(struct device *dev, unsigned long *freq, u32 flags)
+{
+ struct dev_data *d = dev_get_drvdata(dev);
+ unsigned long rfreq;
+
+ find_freq(&d->profile, freq, flags);
+
+ rfreq = clk_round_rate(d->clk, *freq * 1000);
+ if (IS_ERR_VALUE(rfreq)) {
+ dev_err(dev, "devfreq: Cannot find matching frequency for %lu\n",
+ *freq);
+ return rfreq;
+ }
+
+ return clk_set_rate(d->clk, rfreq);
+}
+
+static int dev_get_cur_freq(struct device *dev, unsigned long *freq)
+{
+ struct dev_data *d = dev_get_drvdata(dev);
+ unsigned long f;
+
+ f = clk_get_rate(d->clk);
+ if (IS_ERR_VALUE(f))
+ return f;
+ *freq = f / 1000;
+ return 0;
+}
+
+#define PROP_TBL "freq-tbl-khz"
+static int devfreq_clock_probe(struct platform_device *pdev)
+{
+ struct device *dev = &pdev->dev;
+ struct dev_data *d;
+ struct devfreq_dev_profile *p;
+ u32 *data, poll;
+ const char *gov_name;
+ int ret, len, i, j;
+ unsigned long f;
+
+ d = devm_kzalloc(dev, sizeof(*d), GFP_KERNEL);
+ if (!d)
+ return -ENOMEM;
+ platform_set_drvdata(pdev, d);
+
+ d->clk = devm_clk_get(dev, "devfreq_clk");
+ if (IS_ERR(d->clk))
+ return PTR_ERR(d->clk);
+
+ if (!of_find_property(dev->of_node, PROP_TBL, &len))
+ return -EINVAL;
+
+ len /= sizeof(*data);
+ data = devm_kzalloc(dev, len * sizeof(*data), GFP_KERNEL);
+ if (!data)
+ return -ENOMEM;
+
+ p = &d->profile;
+ p->freq_table = devm_kzalloc(dev, len * sizeof(*p->freq_table),
+ GFP_KERNEL);
+ if (!p->freq_table)
+ return -ENOMEM;
+
+ ret = of_property_read_u32_array(dev->of_node, PROP_TBL, data, len);
+ if (ret)
+ return ret;
+
+ j = 0;
+ for (i = 0; i < len; i++) {
+ f = clk_round_rate(d->clk, data[i] * 1000);
+ if (IS_ERR_VALUE(f))
+ dev_warn(dev, "Unable to find dev rate for %d KHz",
+ data[i]);
+ else
+ p->freq_table[j++] = f / 1000;
+ }
+ p->max_state = j;
+ devm_kfree(dev, data);
+
+ if (p->max_state == 0) {
+ dev_err(dev, "Error parsing property %s!\n", PROP_TBL);
+ return -EINVAL;
+ }
+
+ p->target = dev_target;
+ p->get_cur_freq = dev_get_cur_freq;
+ ret = dev_get_cur_freq(dev, &p->initial_freq);
+ if (ret)
+ return ret;
+
+ p->polling_ms = 50;
+ if (!of_property_read_u32(dev->of_node, "polling-ms", &poll))
+ p->polling_ms = poll;
+
+ if (of_property_read_string(dev->of_node, "governor", &gov_name))
+ gov_name = "performance";
+
+ if (of_property_read_bool(dev->of_node, "qcom,prepare-clk")) {
+ ret = clk_prepare(d->clk);
+ if (ret)
+ return ret;
+ }
+
+ d->df = devfreq_add_device(dev, p, gov_name, NULL);
+ if (IS_ERR(d->df)) {
+ ret = PTR_ERR(d->df);
+ goto add_err;
+ }
+
+ return 0;
+add_err:
+ if (of_property_read_bool(dev->of_node, "qcom,prepare-clk"))
+ clk_unprepare(d->clk);
+ return ret;
+}
+
+static int devfreq_clock_remove(struct platform_device *pdev)
+{
+ struct dev_data *d = platform_get_drvdata(pdev);
+ devfreq_remove_device(d->df);
+ return 0;
+}
+
+static struct of_device_id match_table[] = {
+ { .compatible = "devfreq-simple-dev" },
+ {}
+};
+
+static struct platform_driver devfreq_clock_driver = {
+ .probe = devfreq_clock_probe,
+ .remove = devfreq_clock_remove,
+ .driver = {
+ .name = "devfreq-simple-dev",
+ .of_match_table = match_table,
+ .owner = THIS_MODULE,
+ },
+};
+
+static int __init devfreq_clock_init(void)
+{
+ platform_driver_register(&devfreq_clock_driver);
+ return 0;
+}
+device_initcall(devfreq_clock_init);
+
+static void __exit devfreq_clock_exit(void)
+{
+ platform_driver_unregister(&devfreq_clock_driver);
+}
+module_exit(devfreq_clock_exit);
+
+MODULE_DESCRIPTION("Devfreq driver for setting generic device clock frequency");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/devfreq/devfreq_spdm.c b/drivers/devfreq/devfreq_spdm.c
new file mode 100644
index 000000000000..8e35570e0443
--- /dev/null
+++ b/drivers/devfreq/devfreq_spdm.c
@@ -0,0 +1,443 @@
+/*
+*Copyright (c) 2014-2015, The Linux Foundation. All rights reserved.
+*
+*This program is free software; you can redistribute it and/or modify
+*it under the terms of the GNU General Public License version 2 and
+*only version 2 as published by the Free Software Foundation.
+*
+*This program is distributed in the hope that it will be useful,
+*but WITHOUT ANY WARRANTY; without even the implied warranty of
+*MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+*GNU General Public License for more details.
+*/
+#include <linux/clk.h>
+#include <linux/device.h>
+#include <linux/devfreq.h>
+#include <linux/init.h>
+#include <linux/ipc_logging.h>
+#include <linux/gfp.h>
+#include <linux/list.h>
+#include <linux/module.h>
+#include <linux/mutex.h>
+#include <linux/msm-bus.h>
+#include <linux/of.h>
+#include <linux/platform_device.h>
+
+#include "governor.h"
+#include "devfreq_spdm.h"
+
+static void *spdm_ipc_log_ctxt;
+#define DEVFREQ_SPDM_DEFAULT_WINDOW_MS 100
+#define SPDM_IPC_LOG_PAGES 5
+
+#define SPDM_IPC_LOG(x...) do { \
+ pr_debug(x); \
+ if (spdm_ipc_log_ctxt) \
+ ipc_log_string(spdm_ipc_log_ctxt, x); \
+} while (0)
+
+#define COPY_SIZE(x, y) ((x) <= (y) ? (x) : (y))
+
+static int change_bw(struct device *dev, unsigned long *freq, u32 flags)
+{
+ struct spdm_data *data = 0;
+ int i;
+ int next_idx;
+ int ret = 0;
+ struct spdm_args desc = { { 0 } };
+ int ext_status = 0;
+
+ if (!dev || !freq)
+ return -EINVAL;
+
+ data = dev_get_drvdata(dev);
+ if (!data)
+ return -EINVAL;
+
+ if (data->devfreq->previous_freq == *freq)
+ goto update_thresholds;
+
+ next_idx = data->cur_idx + 1;
+ next_idx = next_idx % 2;
+
+ for (i = 0; i < data->pdata->usecase[next_idx].num_paths; i++)
+ data->pdata->usecase[next_idx].vectors[i].ab = (*freq) << 6;
+
+ data->cur_idx = next_idx;
+ ret = msm_bus_scale_client_update_request(data->bus_scale_client_id,
+ data->cur_idx);
+
+update_thresholds:
+ desc.arg[0] = SPDM_CMD_ENABLE;
+ desc.arg[1] = data->spdm_client;
+
+ if (data->cci_clk)
+ desc.arg[2] = (clk_get_rate(data->cci_clk)) / 1000;
+ else
+ desc.arg[2] = 0;
+
+ ext_status = spdm_ext_call(&desc, 3);
+ if (ext_status)
+ pr_err("External command %u failed with error %u",
+ (int)desc.arg[0], ext_status);
+ return ret;
+}
+
+static int get_cur_bw(struct device *dev, unsigned long *freq)
+{
+ struct spdm_data *data = 0;
+
+ if (!dev || !freq)
+ return -EINVAL;
+
+ data = dev_get_drvdata(dev);
+ if (!data)
+ return -EINVAL;
+
+ *freq = data->pdata->usecase[data->cur_idx].vectors[0].ab >> 6;
+
+ return 0;
+}
+
+static int get_dev_status(struct device *dev, struct devfreq_dev_status *status)
+{
+ struct spdm_data *data = 0;
+ int ret;
+
+ if (!dev || !status)
+ return -EINVAL;
+
+ data = dev_get_drvdata(dev);
+ if (!data)
+ return -EINVAL;
+
+ /* determine if we want to go up or down based on the notification */
+ if (data->action == SPDM_UP)
+ status->busy_time = 255;
+ else
+ status->busy_time = 0;
+ status->total_time = 255;
+ ret = get_cur_bw(dev, &status->current_frequency);
+ if (ret)
+ return ret;
+
+ return 0;
+
+}
+
+static int populate_config_data(struct spdm_data *data,
+ struct platform_device *pdev)
+{
+ int ret = -EINVAL;
+ struct device_node *node = pdev->dev.of_node;
+ struct property *prop = 0;
+
+ ret = of_property_read_u32(node, "qcom,max-vote",
+ &data->config_data.max_vote);
+ if (ret)
+ return ret;
+
+ ret = of_property_read_u32(node, "qcom,bw-upstep",
+ &data->config_data.upstep);
+ if (ret)
+ return ret;
+
+ ret = of_property_read_u32(node, "qcom,bw-dwnstep",
+ &data->config_data.downstep);
+ if (ret)
+ return ret;
+
+ ret = of_property_read_u32(node, "qcom,alpha-up",
+ &data->config_data.aup);
+ if (ret)
+ return ret;
+
+ ret = of_property_read_u32(node, "qcom,alpha-down",
+ &data->config_data.adown);
+ if (ret)
+ return ret;
+
+ ret = of_property_read_u32(node, "qcom,bucket-size",
+ &data->config_data.bucket_size);
+ if (ret)
+ return ret;
+
+ ret = of_property_read_u32_array(node, "qcom,pl-freqs",
+ data->config_data.pl_freqs,
+ SPDM_PL_COUNT - 1);
+ if (ret)
+ return ret;
+
+ ret = of_property_read_u32_array(node, "qcom,reject-rate",
+ data->config_data.reject_rate,
+ SPDM_PL_COUNT * 2);
+ if (ret)
+ return ret;
+
+ ret = of_property_read_u32_array(node, "qcom,response-time-us",
+ data->config_data.response_time_us,
+ SPDM_PL_COUNT * 2);
+ if (ret)
+ return ret;
+
+ ret = of_property_read_u32_array(node, "qcom,cci-response-time-us",
+ data->config_data.cci_response_time_us,
+ SPDM_PL_COUNT * 2);
+ if (ret)
+ return ret;
+
+ ret = of_property_read_u32(node, "qcom,max-cci-freq",
+ &data->config_data.max_cci_freq);
+ if (ret)
+ return ret;
+ ret = of_property_read_u32(node, "qcom,up-step-multp",
+ &data->config_data.up_step_multp);
+ if (ret)
+ return ret;
+
+ prop = of_find_property(node, "qcom,ports", 0);
+ if (!prop)
+ return -EINVAL;
+ data->config_data.num_ports = prop->length / sizeof(u32);
+ data->config_data.ports =
+ devm_kzalloc(&pdev->dev, prop->length, GFP_KERNEL);
+ if (!data->config_data.ports)
+ return -ENOMEM;
+ ret = of_property_read_u32_array(node, "qcom,ports",
+ data->config_data.ports,
+ data->config_data.num_ports);
+ if (ret) {
+ devm_kfree(&pdev->dev, data->config_data.ports);
+ data->config_data.ports = NULL;
+ return ret;
+ }
+
+ return 0;
+}
+
+static int populate_spdm_data(struct spdm_data *data,
+ struct platform_device *pdev)
+{
+ int ret = -EINVAL;
+ struct device_node *node = pdev->dev.of_node;
+
+ ret = populate_config_data(data, pdev);
+ if (ret)
+ return ret;
+
+ ret =
+ of_property_read_u32(node, "qcom,spdm-client", &data->spdm_client);
+ if (ret)
+ goto no_client;
+
+ ret = of_property_read_u32(node, "qcom,spdm-interval", &data->window);
+ if (ret)
+ data->window = DEVFREQ_SPDM_DEFAULT_WINDOW_MS;
+
+ data->pdata = msm_bus_cl_get_pdata(pdev);
+ if (!data->pdata) {
+ ret = -EINVAL;
+ goto no_pdata;
+ }
+
+ return 0;
+
+no_client:
+no_pdata:
+ devm_kfree(&pdev->dev, data->config_data.ports);
+ data->config_data.ports = NULL;
+ return ret;
+}
+
+int __spdm_hyp_call(struct spdm_args *args, int num_args)
+{
+ struct hvc_desc desc = { { 0 } };
+ int status;
+
+ memcpy(desc.arg, args->arg,
+ COPY_SIZE(sizeof(desc.arg), sizeof(args->arg)));
+ SPDM_IPC_LOG("hvc call fn:0x%x, cmd:%llu, num_args:%d\n",
+ HVC_FN_SIP(SPDM_HYP_FNID), desc.arg[0], num_args);
+
+ status = hvc(HVC_FN_SIP(SPDM_HYP_FNID), &desc);
+
+ memcpy(args->ret, desc.ret,
+ COPY_SIZE(sizeof(args->ret), sizeof(desc.ret)));
+ SPDM_IPC_LOG("hvc return fn:0x%x cmd:%llu Ret[0]:%llu Ret[1]:%llu\n",
+ HVC_FN_SIP(SPDM_HYP_FNID), desc.arg[0],
+ desc.ret[0], desc.ret[1]);
+ return status;
+}
+
+int __spdm_scm_call(struct spdm_args *args, int num_args)
+{
+ int status = 0;
+
+ SPDM_IPC_LOG("%s:svc_id:%d,cmd_id:%d,cmd:%llu,num_args:%d\n",
+ __func__, SPDM_SCM_SVC_ID, SPDM_SCM_CMD_ID,
+ args->arg[0], num_args);
+
+ if (!is_scm_armv8()) {
+ status = scm_call(SPDM_SCM_SVC_ID, SPDM_SCM_CMD_ID, args->arg,
+ sizeof(args->arg), args->ret,
+ sizeof(args->ret));
+ } else {
+ struct scm_desc desc = {0};
+ /*
+ * Need to hard code this, this is a requirement from TZ syscall
+ * interface.
+ */
+ desc.arginfo = SCM_ARGS(6);
+ memcpy(desc.args, args->arg,
+ COPY_SIZE(sizeof(desc.args), sizeof(args->arg)));
+
+ status = scm_call2(SCM_SIP_FNID(SPDM_SCM_SVC_ID,
+ SPDM_SCM_CMD_ID), &desc);
+
+ memcpy(args->ret, desc.ret,
+ COPY_SIZE(sizeof(args->ret), sizeof(desc.ret)));
+ }
+ SPDM_IPC_LOG("%s:svc_id:%d,cmd_id:%d,cmd:%llu,Ret[0]:%llu,Ret[1]:%llu\n"
+ , __func__, SPDM_SCM_SVC_ID, SPDM_SCM_CMD_ID, args->arg[0],
+ args->ret[0], args->ret[1]);
+ return status;
+}
+
+static int probe(struct platform_device *pdev)
+{
+ struct spdm_data *data = 0;
+ int ret = -EINVAL;
+ struct spdm_args desc = { { 0 } };
+ int ext_status = 0;
+
+ data = devm_kzalloc(&pdev->dev, sizeof(*data), GFP_KERNEL);
+ if (!data)
+ return -ENOMEM;
+
+ data->action = SPDM_DOWN;
+
+ platform_set_drvdata(pdev, data);
+
+ ret = populate_spdm_data(data, pdev);
+ if (ret)
+ goto bad_of;
+
+ desc.arg[0] = SPDM_CMD_GET_VERSION;
+ ext_status = spdm_ext_call(&desc, 1);
+ if (ext_status) {
+ pr_err("%s:External command %u failed with error %u\n",
+ __func__, (int)desc.arg[0], ext_status);
+ goto bad_of;
+ }
+
+ if (desc.ret[0] < SPDM_TZ_VERSION) {
+ pr_err("%s: Version mismatch expected 0x%x got 0x%x", __func__,
+ SPDM_TZ_VERSION, (int)desc.arg[0]);
+ goto bad_of;
+ }
+
+ data->bus_scale_client_id = msm_bus_scale_register_client(data->pdata);
+ if (!data->bus_scale_client_id) {
+ ret = -EINVAL;
+ goto no_bus_scaling;
+ }
+
+ data->cci_clk = clk_get(&pdev->dev, "cci_clk");
+ if (IS_ERR(data->cci_clk)) {
+ data->cci_clk = NULL;
+ }
+
+ data->profile =
+ devm_kzalloc(&pdev->dev, sizeof(*(data->profile)), GFP_KERNEL);
+ if (!data->profile) {
+ ret = -ENOMEM;
+ goto no_profile;
+ }
+ data->profile->target = change_bw;
+ data->profile->get_dev_status = get_dev_status;
+ data->profile->get_cur_freq = get_cur_bw;
+ data->profile->polling_ms = data->window;
+
+ data->devfreq =
+ devfreq_add_device(&pdev->dev, data->profile, "spdm_bw_hyp", data);
+ if (IS_ERR(data->devfreq)) {
+ ret = PTR_ERR(data->devfreq);
+ goto no_spdm_device;
+ }
+
+ spdm_init_debugfs(&pdev->dev);
+ spdm_ipc_log_ctxt = ipc_log_context_create(SPDM_IPC_LOG_PAGES,
+ "devfreq_spdm", 0);
+
+ if (IS_ERR_OR_NULL(spdm_ipc_log_ctxt)) {
+ pr_err("%s: Failed to create IPC log context\n", __func__);
+ spdm_ipc_log_ctxt = NULL;
+ }
+
+
+ return 0;
+
+no_spdm_device:
+ devm_kfree(&pdev->dev, data->profile);
+no_profile:
+ msm_bus_scale_unregister_client(data->bus_scale_client_id);
+no_bus_scaling:
+ devm_kfree(&pdev->dev, data->config_data.ports);
+bad_of:
+ devm_kfree(&pdev->dev, data);
+ platform_set_drvdata(pdev, NULL);
+ return ret;
+}
+
+static int remove(struct platform_device *pdev)
+{
+ struct spdm_data *data = 0;
+
+ data = platform_get_drvdata(pdev);
+
+ spdm_remove_debugfs(data);
+
+ if (data->devfreq)
+ devfreq_remove_device(data->devfreq);
+
+ if (data->profile)
+ devm_kfree(&pdev->dev, data->profile);
+
+ if (data->bus_scale_client_id)
+ msm_bus_scale_unregister_client(data->bus_scale_client_id);
+
+ if (data->config_data.ports)
+ devm_kfree(&pdev->dev, data->config_data.ports);
+
+ devm_kfree(&pdev->dev, data);
+ platform_set_drvdata(pdev, NULL);
+
+ if (spdm_ipc_log_ctxt)
+ ipc_log_context_destroy(spdm_ipc_log_ctxt);
+
+ return 0;
+}
+
+static const struct of_device_id devfreq_spdm_match[] = {
+ {.compatible = "qcom,devfreq_spdm"},
+ {}
+};
+
+static struct platform_driver devfreq_spdm_drvr = {
+ .driver = {
+ .name = "devfreq_spdm",
+ .owner = THIS_MODULE,
+ .of_match_table = devfreq_spdm_match,
+ },
+ .probe = probe,
+ .remove = remove,
+};
+
+static int __init devfreq_spdm_init(void)
+{
+ return platform_driver_register(&devfreq_spdm_drvr);
+}
+
+module_init(devfreq_spdm_init);
+
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/devfreq/devfreq_spdm.h b/drivers/devfreq/devfreq_spdm.h
new file mode 100644
index 000000000000..c68f81cf8d4e
--- /dev/null
+++ b/drivers/devfreq/devfreq_spdm.h
@@ -0,0 +1,130 @@
+/*
+*Copyright (c) 2014-2015, The Linux Foundation. All rights reserved.
+*
+*This program is free software; you can redistribute it and/or modify
+*it under the terms of the GNU General Public License version 2 and
+*only version 2 as published by the Free Software Foundation.
+*
+*This program is distributed in the hope that it will be useful,
+*but WITHOUT ANY WARRANTY; without even the implied warranty of
+*MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+*GNU General Public License for more details.
+*/
+
+#ifndef DEVFREQ_SPDM_H
+#define DEVFREQ_SPDM_H
+
+#include <linux/list.h>
+#include <soc/qcom/hvc.h>
+#include <soc/qcom/scm.h>
+
+enum pl_levels { SPDM_PL1, SPDM_PL2, SPDM_PL3, SPDM_PL_COUNT };
+enum actions { SPDM_UP, SPDM_DOWN };
+enum spdm_client { SPDM_CLIENT_CPU, SPDM_CLIENT_GPU, SPDM_CLIENT_COUNT };
+
+struct spdm_config_data {
+ /* in MB/s */
+ u32 upstep;
+ u32 downstep;
+ u32 up_step_multp;
+
+ u32 num_ports;
+ u32 *ports;
+ u32 aup;
+ u32 adown;
+ u32 bucket_size;
+
+ /*
+ * If We define n PL levels we need n-1 frequencies to tell
+ * where to change from one pl to another
+ */
+ /* hz */
+ u32 pl_freqs[SPDM_PL_COUNT - 1];
+ /*
+ * We have a low threshold and a high threhold for each pl to support
+ * the two port solution so we need twice as many entries as
+ * performance levels
+ */
+ /* in 100th's of a percent */
+ u32 reject_rate[SPDM_PL_COUNT * 2];
+ u32 response_time_us[SPDM_PL_COUNT * 2];
+ u32 cci_response_time_us[SPDM_PL_COUNT * 2];
+ /* hz */
+ u32 max_cci_freq;
+ /* in MB/s */
+ u32 max_vote;
+
+};
+
+struct spdm_data {
+ /* bus scaling data */
+ int cur_idx;
+ struct msm_bus_scale_pdata *pdata;
+ u32 bus_scale_client_id;
+ /* in mb/s */
+ u32 new_bw;
+
+ /* devfreq data */
+ struct devfreq *devfreq;
+ struct devfreq_dev_profile *profile;
+ unsigned long action;
+ int window;
+ struct clk *cci_clk;
+
+ /* spdm hw/gov data */
+ struct spdm_config_data config_data;
+
+ enum spdm_client spdm_client;
+ /* list used by governor to keep track of spdm devices */
+ struct list_head list;
+
+ struct dentry *debugfs_dir;
+
+ bool enabled;
+};
+
+extern void spdm_init_debugfs(struct device *dev);
+extern void spdm_remove_debugfs(struct spdm_data *data);
+
+#define SPDM_HYP_FNID 5
+#define SPDM_SCM_SVC_ID 0x9
+#define SPDM_SCM_CMD_ID 0x4
+#define SPDM_TZ_VERSION 0x20000 /* TZ SPDM driver version */
+/* SPDM CMD ID's for hypervisor/SCM */
+#define SPDM_CMD_GET_VERSION 0
+#define SPDM_CMD_GET_BW_ALL 1
+#define SPDM_CMD_GET_BW_SPECIFIC 2
+#define SPDM_CMD_ENABLE 3
+#define SPDM_CMD_DISABLE 4
+#define SPDM_CMD_CFG_PORTS 5
+#define SPDM_CMD_CFG_FLTR 6
+#define SPDM_CMD_CFG_PL 7
+#define SPDM_CMD_CFG_REJRATE_LOW 8
+#define SPDM_CMD_CFG_REJRATE_MED 9
+#define SPDM_CMD_CFG_REJRATE_HIGH 10
+#define SPDM_CMD_CFG_RESPTIME_LOW 11
+#define SPDM_CMD_CFG_RESPTIME_MED 12
+#define SPDM_CMD_CFG_RESPTIME_HIGH 13
+#define SPDM_CMD_CFG_CCIRESPTIME_LOW 14
+#define SPDM_CMD_CFG_CCIRESPTIME_MED 15
+#define SPDM_CMD_CFG_CCIRESPTIME_HIGH 16
+#define SPDM_CMD_CFG_MAXCCI 17
+#define SPDM_CMD_CFG_VOTES 18
+
+#define SPDM_MAX_ARGS 6
+#define SPDM_MAX_RETS 3
+
+struct spdm_args {
+ u64 arg[SPDM_MAX_ARGS];
+ u64 ret[SPDM_MAX_RETS];
+};
+
+extern int __spdm_hyp_call(struct spdm_args *args, int num_args);
+extern int __spdm_scm_call(struct spdm_args *args, int num_args);
+
+#ifdef CONFIG_SPDM_SCM
+#define spdm_ext_call __spdm_scm_call
+#else
+#define spdm_ext_call __spdm_hyp_call
+#endif
+#endif
diff --git a/drivers/devfreq/devfreq_spdm_debugfs.c b/drivers/devfreq/devfreq_spdm_debugfs.c
new file mode 100644
index 000000000000..94e94f3bbc1c
--- /dev/null
+++ b/drivers/devfreq/devfreq_spdm_debugfs.c
@@ -0,0 +1,848 @@
+/*
+*Copyright (c) 2014-2015, The Linux Foundation. All rights reserved.
+*
+*This program is free software; you can redistribute it and/or modify
+*it under the terms of the GNU General Public License version 2 and
+*only version 2 as published by the Free Software Foundation.
+*
+*This program is distributed in the hope that it will be useful,
+*but WITHOUT ANY WARRANTY; without even the implied warranty of
+*MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+*GNU General Public License for more details.
+*/
+#include <linux/debugfs.h>
+#include <linux/device.h>
+#include <linux/module.h>
+#include <linux/msm-bus.h>
+#include <linux/slab.h>
+#include <linux/uaccess.h>
+#include "devfreq_spdm.h"
+#include "governor.h"
+
+static int spdm_open(struct inode *inode, struct file *file)
+{
+ file->private_data = inode->i_private;
+ return 0;
+}
+
+static char buf[PAGE_SIZE];
+
+static ssize_t enable_write(struct file *file, const char __user *data,
+ size_t size, loff_t *offset)
+{
+ struct spdm_data *spdm_data = file->private_data;
+ int i;
+ int next_idx;
+
+ if (size > sizeof(buf))
+ return -EINVAL;
+
+ if (copy_from_user(buf, data, size)) {
+ goto err;
+ size = -EINVAL;
+ }
+
+ if (sscanf(buf, "%u\n", &i) != 1) {
+ size = -EINVAL;
+ goto err;
+ }
+ i = !!i;
+
+ if (i == spdm_data->enabled)
+ goto out;
+
+ spdm_data->devfreq->governor->event_handler(spdm_data->devfreq,
+ i ? DEVFREQ_GOV_START :
+ DEVFREQ_GOV_STOP, NULL);
+
+ if (!i) {
+ next_idx = spdm_data->cur_idx + 1;
+ next_idx = next_idx % 2;
+
+ for (i = 0; i < spdm_data->pdata->usecase[next_idx].num_paths;
+ i++)
+ spdm_data->pdata->usecase[next_idx].vectors[i].ab = 0;
+
+ spdm_data->cur_idx = next_idx;
+ msm_bus_scale_client_update_request
+ (spdm_data->bus_scale_client_id, spdm_data->cur_idx);
+ }
+
+out:
+ *offset += size;
+err:
+ memset(buf, 0, sizeof(buf));
+ return size;
+}
+
+static ssize_t enable_read(struct file *file, char __user *data,
+ size_t size, loff_t *offset)
+{
+ struct spdm_data *spdm_data = file->private_data;
+ int len = 32;
+
+ if (size > sizeof(buf))
+ return -EINVAL;
+
+ len = scnprintf(buf, size, "%u\n", spdm_data->enabled);
+ len = simple_read_from_buffer(data, size, offset, buf, len);
+
+ memset(buf, 0, sizeof(buf));
+ return len;
+}
+
+static const struct file_operations enable_fops = {
+ .open = spdm_open,
+ .write = enable_write,
+ .read = enable_read,
+};
+
+static ssize_t pl_write(struct file *file, const char __user *data,
+ size_t size, loff_t *offset)
+{
+ struct spdm_data *spdm_data = file->private_data;
+ struct spdm_args desc = { { 0 } };
+ int ext_status = 0;
+ int i;
+
+ if (size > sizeof(buf))
+ return -EINVAL;
+
+ if (copy_from_user(buf, data, size)) {
+ size = -EINVAL;
+ goto out;
+ }
+
+ if (sscanf(buf, "%u %u\n", &spdm_data->config_data.pl_freqs[0],
+ &spdm_data->config_data.pl_freqs[1]) != 2) {
+ size = -EINVAL;
+ goto out;
+ }
+
+ desc.arg[0] = SPDM_CMD_CFG_PL;
+ desc.arg[1] = spdm_data->spdm_client;
+ for (i = 0; i < SPDM_PL_COUNT - 1; i++)
+ desc.arg[i+2] = spdm_data->config_data.pl_freqs[i];
+ ext_status = spdm_ext_call(&desc, SPDM_PL_COUNT + 1);
+ if (ext_status)
+ pr_err("External command %u failed with error %u",
+ (int)desc.arg[0], ext_status);
+ *offset += size;
+out:
+ memset(buf, 0, sizeof(buf));
+ return size;
+
+}
+
+static ssize_t pl_read(struct file *file, char __user *data,
+ size_t size, loff_t *offset)
+{
+ struct spdm_data *spdm_data = file->private_data;
+ int i = 32;
+
+ if (size > sizeof(buf))
+ return -EINVAL;
+
+ i = scnprintf(buf, size, "%u %u\n", spdm_data->config_data.pl_freqs[0],
+ spdm_data->config_data.pl_freqs[1]);
+ i = simple_read_from_buffer(data, size, offset, buf, i);
+
+ memset(buf, 0, sizeof(buf));
+ return i;
+}
+
+static const struct file_operations pl_fops = {
+ .open = spdm_open,
+ .write = pl_write,
+ .read = pl_read,
+};
+
+static ssize_t rejrate_low_write(struct file *file, const char __user *data,
+ size_t size, loff_t *offset)
+{
+ struct spdm_data *spdm_data = file->private_data;
+ struct spdm_args desc = { { 0 } };
+ int ext_status = 0;
+
+ if (size > sizeof(buf))
+ return -EINVAL;
+
+ if (copy_from_user(buf, data, size)) {
+ size = -EINVAL;
+ goto out;
+ }
+
+ if (sscanf(buf, "%u %u\n", &spdm_data->config_data.reject_rate[0],
+ &spdm_data->config_data.reject_rate[1]) != 2) {
+ size = -EINVAL;
+ goto out;
+ }
+
+ desc.arg[0] = SPDM_CMD_CFG_REJRATE_LOW;
+ desc.arg[1] = spdm_data->spdm_client;
+ desc.arg[2] = spdm_data->config_data.reject_rate[0];
+ desc.arg[3] = spdm_data->config_data.reject_rate[1];
+ ext_status = spdm_ext_call(&desc, 4);
+ if (ext_status)
+ pr_err("External command %u failed with error %u",
+ (int)desc.arg[0], ext_status);
+ *offset += size;
+out:
+ memset(buf, 0, sizeof(buf));
+ return size;
+}
+
+static ssize_t rejrate_low_read(struct file *file, char __user *data,
+ size_t size, loff_t *offset)
+{
+ struct spdm_data *spdm_data = file->private_data;
+ int i = 32;
+
+ if (size > sizeof(buf))
+ return -EINVAL;
+
+ i = scnprintf(buf, size, "%u %u\n",
+ spdm_data->config_data.reject_rate[0],
+ spdm_data->config_data.reject_rate[1]);
+
+ i = simple_read_from_buffer(data, size, offset, buf, i);
+
+ memset(buf, 0, sizeof(buf));
+ return i;
+}
+
+static const struct file_operations rrl_fops = {
+ .open = spdm_open,
+ .write = rejrate_low_write,
+ .read = rejrate_low_read,
+};
+
+static ssize_t rejrate_med_write(struct file *file, const char __user *data,
+ size_t size, loff_t *offset)
+{
+ struct spdm_data *spdm_data = file->private_data;
+ struct spdm_args desc = { { 0 } };
+ int ext_status = 0;
+
+ if (size > sizeof(buf))
+ return -EINVAL;
+
+ if (copy_from_user(buf, data, size)) {
+ size = -EINVAL;
+ goto out;
+ }
+ if (sscanf(buf, "%u %u\n", &spdm_data->config_data.reject_rate[2],
+ &spdm_data->config_data.reject_rate[3]) != 2) {
+ size = -EINVAL;
+ goto out;
+ }
+
+ desc.arg[0] = SPDM_CMD_CFG_REJRATE_MED;
+ desc.arg[1] = spdm_data->spdm_client;
+ desc.arg[2] = spdm_data->config_data.reject_rate[2];
+ desc.arg[3] = spdm_data->config_data.reject_rate[3];
+ ext_status = spdm_ext_call(&desc, 4);
+ if (ext_status)
+ pr_err("External command %u failed with error %u",
+ (int)desc.arg[0], ext_status);
+ *offset += size;
+out:
+ memset(buf, 0, sizeof(buf));
+ return size;
+}
+
+static ssize_t rejrate_med_read(struct file *file, char __user *data,
+ size_t size, loff_t *offset)
+{
+ struct spdm_data *spdm_data = file->private_data;
+ int i = 32;
+
+ if (size > sizeof(buf))
+ return -EINVAL;
+
+ i = scnprintf(buf, size, "%u %u\n",
+ spdm_data->config_data.reject_rate[2],
+ spdm_data->config_data.reject_rate[3]);
+
+ i = simple_read_from_buffer(data, size, offset, buf, i);
+ memset(buf, 0, sizeof(buf));
+ return i;
+}
+
+static const struct file_operations rrm_fops = {
+ .open = spdm_open,
+ .write = rejrate_med_write,
+ .read = rejrate_med_read,
+};
+
+static ssize_t rejrate_high_write(struct file *file, const char __user *data,
+ size_t size, loff_t *offset)
+{
+ struct spdm_data *spdm_data = file->private_data;
+ struct spdm_args desc = { { 0 } };
+ int ext_status = 0;
+
+ if (size > sizeof(buf))
+ return -EINVAL;
+
+ if (copy_from_user(buf, data, size)) {
+ size = -EINVAL;
+ goto out;
+ }
+ if (sscanf(buf, "%u %u\n", &spdm_data->config_data.reject_rate[4],
+ &spdm_data->config_data.reject_rate[5]) != 2) {
+ size = -EINVAL;
+ goto out;
+ }
+
+ desc.arg[0] = SPDM_CMD_CFG_REJRATE_HIGH;
+ desc.arg[1] = spdm_data->spdm_client;
+ desc.arg[2] = spdm_data->config_data.reject_rate[4];
+ desc.arg[3] = spdm_data->config_data.reject_rate[5];
+ ext_status = spdm_ext_call(&desc, 4);
+ if (ext_status)
+ pr_err("External command %u failed with error %u",
+ (int)desc.arg[0], ext_status);
+ *offset += size;
+out:
+ memset(buf, 0, sizeof(buf));
+ return size;
+}
+
+static ssize_t rejrate_high_read(struct file *file, char __user *data,
+ size_t size, loff_t *offset)
+{
+ struct spdm_data *spdm_data = file->private_data;
+ int i = 32;
+
+ if (size > sizeof(buf))
+ return -EINVAL;
+
+ i = scnprintf(buf, size, "%u %u\n",
+ spdm_data->config_data.reject_rate[4],
+ spdm_data->config_data.reject_rate[5]);
+
+ i = simple_read_from_buffer(data, size, offset, buf, i);
+ memset(buf, 0, sizeof(buf));
+ return i;
+}
+
+static const struct file_operations rrh_fops = {
+ .open = spdm_open,
+ .write = rejrate_high_write,
+ .read = rejrate_high_read,
+};
+
+static ssize_t resptime_low_write(struct file *file, const char __user *data,
+ size_t size, loff_t *offset)
+{
+ struct spdm_data *spdm_data = file->private_data;
+ struct spdm_args desc = { { 0 } };
+ int ext_status = 0;
+
+ if (size > sizeof(buf))
+ return -EINVAL;
+
+ if (copy_from_user(buf, data, size)) {
+ size = -EINVAL;
+ goto out;
+ }
+ if (sscanf(buf, "%u %u\n", &spdm_data->config_data.response_time_us[0],
+ &spdm_data->config_data.response_time_us[1]) != 2) {
+ size = -EINVAL;
+ goto out;
+ }
+
+ desc.arg[0] = SPDM_CMD_CFG_RESPTIME_LOW;
+ desc.arg[1] = spdm_data->spdm_client;
+ desc.arg[2] = spdm_data->config_data.response_time_us[0];
+ desc.arg[3] = spdm_data->config_data.response_time_us[1];
+ ext_status = spdm_ext_call(&desc, 4);
+ if (ext_status)
+ pr_err("External command %u failed with error %u",
+ (int)desc.arg[0], ext_status);
+ *offset += size;
+out:
+ memset(buf, 0, sizeof(buf));
+ return size;
+}
+
+static ssize_t resptime_low_read(struct file *file, char __user *data,
+ size_t size, loff_t *offset)
+{
+ struct spdm_data *spdm_data = file->private_data;
+ int i = 32;
+
+ if (size > sizeof(buf))
+ return -EINVAL;
+
+ i = scnprintf(buf, size, "%u %u\n",
+ spdm_data->config_data.response_time_us[0],
+ spdm_data->config_data.response_time_us[1]);
+
+ i = simple_read_from_buffer(data, size, offset, buf, i);
+ memset(buf, 0, sizeof(buf));
+ return i;
+}
+
+static const struct file_operations rtl_fops = {
+ .open = spdm_open,
+ .write = resptime_low_write,
+ .read = resptime_low_read,
+};
+
+static ssize_t resptime_med_write(struct file *file, const char __user *data,
+ size_t size, loff_t *offset)
+{
+ struct spdm_data *spdm_data = file->private_data;
+ struct spdm_args desc = { { 0 } };
+ int ext_status = 0;
+
+ if (size > sizeof(buf))
+ return -EINVAL;
+
+ if (copy_from_user(buf, data, size)) {
+ size = -EINVAL;
+ goto out;
+ }
+ if (sscanf(buf, "%u %u\n", &spdm_data->config_data.response_time_us[2],
+ &spdm_data->config_data.response_time_us[3]) != 2) {
+ size = -EINVAL;
+ goto out;
+ }
+
+ desc.arg[0] = SPDM_CMD_CFG_RESPTIME_MED;
+ desc.arg[1] = spdm_data->spdm_client;
+ desc.arg[2] = spdm_data->config_data.response_time_us[2];
+ desc.arg[3] = spdm_data->config_data.response_time_us[3];
+ ext_status = spdm_ext_call(&desc, 4);
+ if (ext_status)
+ pr_err("External command %u failed with error %u",
+ (int)desc.arg[0], ext_status);
+ *offset += size;
+out:
+ memset(buf, 0, sizeof(buf));
+ return size;
+}
+
+static ssize_t resptime_med_read(struct file *file, char __user *data,
+ size_t size, loff_t *offset)
+{
+ struct spdm_data *spdm_data = file->private_data;
+ int i = 32;
+
+ if (size > sizeof(buf))
+ return -EINVAL;
+
+ i = scnprintf(buf, size, "%u %u\n",
+ spdm_data->config_data.response_time_us[2],
+ spdm_data->config_data.response_time_us[3]);
+
+ i = simple_read_from_buffer(data, size, offset, buf, i);
+ memset(buf, 0, sizeof(buf));
+ return i;
+}
+
+static const struct file_operations rtm_fops = {
+ .open = spdm_open,
+ .write = resptime_med_write,
+ .read = resptime_med_read,
+};
+
+static ssize_t resptime_high_write(struct file *file, const char __user *data,
+ size_t size, loff_t *offset)
+{
+ struct spdm_data *spdm_data = file->private_data;
+ struct spdm_args desc = { { 0 } };
+ int ext_status = 0;
+
+ if (size > sizeof(buf))
+ return -EINVAL;
+
+ if (copy_from_user(buf, data, size)) {
+ size = -EINVAL;
+ goto out;
+ }
+ if (sscanf(buf, "%u %u\n", &spdm_data->config_data.response_time_us[4],
+ &spdm_data->config_data.response_time_us[5]) != 2) {
+ size = -EINVAL;
+ goto out;
+ }
+
+ desc.arg[0] = SPDM_CMD_CFG_RESPTIME_HIGH;
+ desc.arg[1] = spdm_data->spdm_client;
+ desc.arg[2] = spdm_data->config_data.response_time_us[4];
+ desc.arg[3] = spdm_data->config_data.response_time_us[5];
+ ext_status = spdm_ext_call(&desc, 4);
+ if (ext_status)
+ pr_err("External command %u failed with error %u",
+ (int)desc.arg[0], ext_status);
+ *offset += size;
+out:
+ memset(buf, 0, sizeof(buf));
+ return size;
+}
+
+static ssize_t resptime_high_read(struct file *file, char __user *data,
+ size_t size, loff_t *offset)
+{
+ struct spdm_data *spdm_data = file->private_data;
+ int i = 32;
+
+ if (size > sizeof(buf))
+ return -EINVAL;
+
+ i = scnprintf(buf, size, "%u %u\n",
+ spdm_data->config_data.response_time_us[4],
+ spdm_data->config_data.response_time_us[5]);
+
+ i = simple_read_from_buffer(data, size, offset, buf, i);
+ memset(buf, 0, sizeof(buf));
+ return i;
+}
+
+static const struct file_operations rth_fops = {
+ .open = spdm_open,
+ .write = resptime_high_write,
+ .read = resptime_high_read,
+};
+
+static ssize_t cciresptime_low_write(struct file *file,
+ const char __user *data, size_t size,
+ loff_t *offset)
+{
+ struct spdm_data *spdm_data = file->private_data;
+ struct spdm_args desc = { { 0 } };
+ int ext_status = 0;
+
+ if (size > sizeof(buf))
+ return -EINVAL;
+
+ if (copy_from_user(buf, data, size)) {
+ size = -EINVAL;
+ goto out;
+ }
+ if (sscanf(buf, "%u %u\n",
+ &spdm_data->config_data.cci_response_time_us[0],
+ &spdm_data->config_data.cci_response_time_us[1]) != 2) {
+ size = -EINVAL;
+ goto out;
+ }
+
+ desc.arg[0] = SPDM_CMD_CFG_CCIRESPTIME_LOW;
+ desc.arg[1] = spdm_data->spdm_client;
+ desc.arg[2] = spdm_data->config_data.cci_response_time_us[0];
+ desc.arg[3] = spdm_data->config_data.cci_response_time_us[1];
+ ext_status = spdm_ext_call(&desc, 4);
+ if (ext_status)
+ pr_err("External command %u failed with error %u",
+ (int)desc.arg[0], ext_status);
+ *offset += size;
+out:
+ memset(buf, 0, sizeof(buf));
+ return size;
+}
+
+static ssize_t cciresptime_low_read(struct file *file, char __user *data,
+ size_t size, loff_t *offset)
+{
+ struct spdm_data *spdm_data = file->private_data;
+ int i = 32;
+
+ if (size > sizeof(buf))
+ return -EINVAL;
+
+ i = scnprintf(buf, size, "%u %u\n",
+ spdm_data->config_data.cci_response_time_us[0],
+ spdm_data->config_data.cci_response_time_us[1]);
+
+ i = simple_read_from_buffer(data, size, offset, buf, i);
+ memset(buf, 0, sizeof(buf));
+ return i;
+}
+
+static const struct file_operations ccil_fops = {
+ .open = spdm_open,
+ .write = cciresptime_low_write,
+ .read = cciresptime_low_read,
+};
+
+static ssize_t cciresptime_med_write(struct file *file,
+ const char __user *data, size_t size,
+ loff_t *offset)
+{
+ struct spdm_data *spdm_data = file->private_data;
+ struct spdm_args desc = { { 0 } };
+ int ext_status = 0;
+
+ if (size > sizeof(buf))
+ return -EINVAL;
+
+ if (copy_from_user(buf, data, size)) {
+ size = -EINVAL;
+ goto out;
+ }
+ if (sscanf(buf, "%u %u\n",
+ &spdm_data->config_data.cci_response_time_us[2],
+ &spdm_data->config_data.cci_response_time_us[3]) != 2) {
+ size = -EINVAL;
+ goto out;
+ }
+
+ desc.arg[0] = SPDM_CMD_CFG_CCIRESPTIME_MED;
+ desc.arg[1] = spdm_data->spdm_client;
+ desc.arg[2] = spdm_data->config_data.cci_response_time_us[2];
+ desc.arg[3] = spdm_data->config_data.cci_response_time_us[3];
+ ext_status = spdm_ext_call(&desc, 4);
+ if (ext_status)
+ pr_err("External command %u failed with error %u",
+ (int)desc.arg[0], ext_status);
+ *offset += size;
+out:
+ memset(buf, 0, sizeof(buf));
+ return size;
+}
+
+static ssize_t cciresptime_med_read(struct file *file, char __user *data,
+ size_t size, loff_t *offset)
+{
+ struct spdm_data *spdm_data = file->private_data;
+ int i = 32;
+
+ if (size > sizeof(buf))
+ return -EINVAL;
+
+ i = scnprintf(buf, size, "%u %u\n",
+ spdm_data->config_data.cci_response_time_us[2],
+ spdm_data->config_data.cci_response_time_us[3]);
+
+ i = simple_read_from_buffer(data, size, offset, buf, i);
+ memset(buf, 0, sizeof(buf));
+ return i;
+}
+
+static const struct file_operations ccim_fops = {
+ .open = spdm_open,
+ .write = cciresptime_med_write,
+ .read = cciresptime_med_read,
+};
+
+static ssize_t cciresptime_high_write(struct file *file,
+ const char __user *data,
+ size_t size, loff_t *offset)
+{
+ struct spdm_data *spdm_data = file->private_data;
+ struct spdm_args desc = { { 0 } };
+ int ext_status = 0;
+
+ if (size > sizeof(buf))
+ return -EINVAL;
+
+ if (copy_from_user(buf, data, size)) {
+ size = -EINVAL;
+ goto out;
+ }
+ if (sscanf(buf, "%u %u\n",
+ &spdm_data->config_data.cci_response_time_us[4],
+ &spdm_data->config_data.cci_response_time_us[5]) != 2){
+ size = -EINVAL;
+ goto out;
+ }
+
+ desc.arg[0] = SPDM_CMD_CFG_CCIRESPTIME_HIGH;
+ desc.arg[1] = spdm_data->spdm_client;
+ desc.arg[2] = spdm_data->config_data.cci_response_time_us[4];
+ desc.arg[3] = spdm_data->config_data.cci_response_time_us[5];
+ ext_status = spdm_ext_call(&desc, 4);
+ if (ext_status)
+ pr_err("External command %u failed with error %u",
+ (int)desc.arg[0], ext_status);
+ *offset += size;
+out:
+ memset(buf, 0, sizeof(buf));
+ return size;
+}
+
+static ssize_t cciresptime_high_read(struct file *file, char __user *data,
+ size_t size, loff_t *offset)
+{
+ struct spdm_data *spdm_data = file->private_data;
+ int i = 32;
+
+ if (size > sizeof(buf))
+ return -EINVAL;
+
+ i = scnprintf(buf, size, "%u %u\n",
+ spdm_data->config_data.cci_response_time_us[4],
+ spdm_data->config_data.cci_response_time_us[5]);
+
+ i = simple_read_from_buffer(data, size, offset, buf, i);
+ memset(buf, 0, sizeof(buf));
+ return i;
+}
+
+static const struct file_operations ccih_fops = {
+ .open = spdm_open,
+ .write = cciresptime_high_write,
+ .read = cciresptime_high_read,
+};
+
+static ssize_t cci_max_write(struct file *file, const char __user *data,
+ size_t size, loff_t *offset)
+{
+ struct spdm_data *spdm_data = file->private_data;
+ struct spdm_args desc = { { 0 } };
+ int ext_status = 0;
+
+ if (size > sizeof(buf))
+ return -EINVAL;
+
+ if (copy_from_user(buf, data, size)) {
+ size = -EINVAL;
+ goto out;
+ }
+ if (sscanf(buf, "%u\n", &spdm_data->config_data.max_cci_freq) != 1) {
+ size = -EINVAL;
+ goto out;
+ }
+
+ desc.arg[0] = SPDM_CMD_CFG_MAXCCI;
+ desc.arg[1] = spdm_data->spdm_client;
+ desc.arg[2] = spdm_data->config_data.max_cci_freq;
+ ext_status = spdm_ext_call(&desc, 3);
+ if (ext_status)
+ pr_err("External command %u failed with error %u",
+ (int)desc.arg[0], ext_status);
+ *offset += size;
+out:
+ memset(buf, 0, sizeof(buf));
+ return size;
+}
+
+static ssize_t cci_max_read(struct file *file, char __user *data,
+ size_t size, loff_t *offset)
+{
+ struct spdm_data *spdm_data = file->private_data;
+ int i = 32;
+
+ if (size > sizeof(buf))
+ return -EINVAL;
+
+ i = scnprintf(buf, size, "%u\n", spdm_data->config_data.max_cci_freq);
+
+ i = simple_read_from_buffer(data, size, offset, buf, i);
+ memset(buf, 0, sizeof(buf));
+ return i;
+}
+
+static const struct file_operations ccimax_fops = {
+ .open = spdm_open,
+ .write = cci_max_write,
+ .read = cci_max_read,
+};
+
+static ssize_t vote_cfg_write(struct file *file, const char __user *data,
+ size_t size, loff_t *offset)
+{
+ struct spdm_data *spdm_data = file->private_data;
+ struct spdm_args desc = { { 0 } };
+ int ext_status = 0;
+
+ if (size > sizeof(buf))
+ return -EINVAL;
+
+ if (copy_from_user(buf, data, size)) {
+ size = -EINVAL;
+ goto out;
+ }
+ if (sscanf(buf, "%u %u %u %u\n", &spdm_data->config_data.upstep,
+ &spdm_data->config_data.downstep,
+ &spdm_data->config_data.max_vote,
+ &spdm_data->config_data.up_step_multp) != 4) {
+ size = -EINVAL;
+ goto out;
+ }
+
+ desc.arg[0] = SPDM_CMD_CFG_VOTES;
+ desc.arg[1] = spdm_data->spdm_client;
+ desc.arg[2] = spdm_data->config_data.upstep;
+ desc.arg[3] = spdm_data->config_data.downstep;
+ desc.arg[4] = spdm_data->config_data.max_vote;
+ desc.arg[5] = spdm_data->config_data.up_step_multp;
+ ext_status = spdm_ext_call(&desc, 6);
+ if (ext_status)
+ pr_err("External command %u failed with error %u",
+ (int)desc.arg[0], ext_status);
+ *offset += size;
+out:
+ memset(buf, 0, sizeof(buf));
+ return size;
+}
+
+static ssize_t vote_cfg_read(struct file *file, char __user *data,
+ size_t size, loff_t *offset)
+{
+ struct spdm_data *spdm_data = file->private_data;
+ int i = 32;
+
+ if (size > sizeof(buf))
+ return -EINVAL;
+
+ i = scnprintf(buf, size, "%u %u %u %u\n",
+ spdm_data->config_data.upstep,
+ spdm_data->config_data.downstep,
+ spdm_data->config_data.max_vote,
+ spdm_data->config_data.up_step_multp);
+
+ i = simple_read_from_buffer(data, size, offset, buf, i);
+ memset(buf, 0, sizeof(buf));
+ return i;
+}
+
+static const struct file_operations vote_fops = {
+ .open = spdm_open,
+ .write = vote_cfg_write,
+ .read = vote_cfg_read,
+};
+
+void spdm_init_debugfs(struct device *dev)
+{
+ struct spdm_data *data = 0;
+
+ data = dev_get_drvdata(dev);
+ data->debugfs_dir = debugfs_create_dir(dev_name(dev), NULL);
+
+ debugfs_create_file("enable", 0600, data->debugfs_dir, data,
+ &enable_fops);
+ debugfs_create_file("pl_freqs", 0600, data->debugfs_dir, data,
+ &pl_fops);
+ debugfs_create_file("rej_rate_low", 0600, data->debugfs_dir, data,
+ &rrl_fops);
+ debugfs_create_file("rej_rate_med", 0600, data->debugfs_dir, data,
+ &rrm_fops);
+ debugfs_create_file("rej_rate_high", 0600, data->debugfs_dir, data,
+ &rrh_fops);
+ debugfs_create_file("resp_time_low", 0600, data->debugfs_dir, data,
+ &rtl_fops);
+ debugfs_create_file("resp_time_med", 0600, data->debugfs_dir, data,
+ &rtm_fops);
+ debugfs_create_file("resp_time_high", 0600, data->debugfs_dir, data,
+ &rth_fops);
+ debugfs_create_file("cci_resp_time_low", 0600, data->debugfs_dir, data,
+ &ccil_fops);
+ debugfs_create_file("cci_resp_time_med", 0600, data->debugfs_dir, data,
+ &ccim_fops);
+ debugfs_create_file("cci_resp_time_high", 0600, data->debugfs_dir,
+ data, &ccih_fops);
+ debugfs_create_file("cci_max", 0600, data->debugfs_dir, data,
+ &ccimax_fops);
+ debugfs_create_file("vote_cfg", 0600, data->debugfs_dir, data,
+ &vote_fops);
+}
+
+void spdm_remove_debugfs(struct spdm_data *data)
+{
+ debugfs_remove_recursive(data->debugfs_dir);
+}
+
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/devfreq/devfreq_trace.h b/drivers/devfreq/devfreq_trace.h
new file mode 100644
index 000000000000..865698336cc4
--- /dev/null
+++ b/drivers/devfreq/devfreq_trace.h
@@ -0,0 +1,44 @@
+/* Copyright (c) 2014-2016, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#if !defined(_DEVFREQ_TRACE_H) || defined(TRACE_HEADER_MULTI_READ)
+#define _DEVFREQ_TRACE_H
+
+#undef TRACE_SYSTEM
+#define TRACE_SYSTEM devfreq
+#undef TRACE_INCLUDE_PATH
+#define TRACE_INCLUDE_PATH .
+#undef TRACE_INCLUDE_FILE
+#define TRACE_INCLUDE_FILE devfreq_trace
+
+#include <linux/tracepoint.h>
+
+TRACE_EVENT(devfreq_msg,
+ TP_PROTO(const char *msg),
+ TP_ARGS(msg),
+ TP_STRUCT__entry(
+ __string(msg, msg)
+ ),
+ TP_fast_assign(
+ __assign_str(msg, msg);
+ ),
+ TP_printk(
+ "%s", __get_str(msg)
+ )
+);
+
+#endif /* _DEVFREQ_TRACE_H */
+
+/* This part must be outside protection */
+#include <trace/define_trace.h>
+
diff --git a/drivers/devfreq/governor.h b/drivers/devfreq/governor.h
index fad7d6321978..ebde695e99c7 100644
--- a/drivers/devfreq/governor.h
+++ b/drivers/devfreq/governor.h
@@ -38,4 +38,5 @@ extern void devfreq_interval_update(struct devfreq *devfreq,
extern int devfreq_add_governor(struct devfreq_governor *governor);
extern int devfreq_remove_governor(struct devfreq_governor *governor);
+extern int devfreq_get_freq_level(struct devfreq *devfreq, unsigned long freq);
#endif /* _GOVERNOR_H */
diff --git a/drivers/devfreq/governor_bw_hwmon.c b/drivers/devfreq/governor_bw_hwmon.c
new file mode 100644
index 000000000000..972de02ca549
--- /dev/null
+++ b/drivers/devfreq/governor_bw_hwmon.c
@@ -0,0 +1,983 @@
+/*
+ * Copyright (c) 2013-2017, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#define pr_fmt(fmt) "bw-hwmon: " fmt
+
+#include <linux/kernel.h>
+#include <linux/sizes.h>
+#include <linux/module.h>
+#include <linux/init.h>
+#include <linux/io.h>
+#include <linux/delay.h>
+#include <linux/ktime.h>
+#include <linux/time.h>
+#include <linux/err.h>
+#include <linux/errno.h>
+#include <linux/mutex.h>
+#include <linux/interrupt.h>
+#include <linux/spinlock.h>
+#include <linux/platform_device.h>
+#include <linux/of.h>
+#include <linux/devfreq.h>
+#include <trace/events/power.h>
+#include "governor.h"
+#include "governor_bw_hwmon.h"
+
+#define NUM_MBPS_ZONES 10
+struct hwmon_node {
+ unsigned int guard_band_mbps;
+ unsigned int decay_rate;
+ unsigned int io_percent;
+ unsigned int bw_step;
+ unsigned int sample_ms;
+ unsigned int up_scale;
+ unsigned int up_thres;
+ unsigned int down_thres;
+ unsigned int down_count;
+ unsigned int hist_memory;
+ unsigned int hyst_trigger_count;
+ unsigned int hyst_length;
+ unsigned int idle_mbps;
+ unsigned int low_power_ceil_mbps;
+ unsigned int low_power_io_percent;
+ unsigned int low_power_delay;
+ unsigned int mbps_zones[NUM_MBPS_ZONES];
+
+ unsigned long prev_ab;
+ unsigned long *dev_ab;
+ unsigned long resume_freq;
+ unsigned long resume_ab;
+ unsigned long bytes;
+ unsigned long max_mbps;
+ unsigned long hist_max_mbps;
+ unsigned long hist_mem;
+ unsigned long hyst_peak;
+ unsigned long hyst_mbps;
+ unsigned long hyst_trig_win;
+ unsigned long hyst_en;
+ unsigned long above_low_power;
+ unsigned long prev_req;
+ unsigned int wake;
+ unsigned int down_cnt;
+ ktime_t prev_ts;
+ ktime_t hist_max_ts;
+ bool sampled;
+ bool mon_started;
+ struct list_head list;
+ void *orig_data;
+ struct bw_hwmon *hw;
+ struct devfreq_governor *gov;
+ struct attribute_group *attr_grp;
+};
+
+#define UP_WAKE 1
+#define DOWN_WAKE 2
+static DEFINE_SPINLOCK(irq_lock);
+
+static LIST_HEAD(hwmon_list);
+static DEFINE_MUTEX(list_lock);
+static DEFINE_MUTEX(sync_lock);
+
+
+static int use_cnt;
+static DEFINE_MUTEX(state_lock);
+
+#define show_attr(name) \
+static ssize_t show_##name(struct device *dev, \
+ struct device_attribute *attr, char *buf) \
+{ \
+ struct devfreq *df = to_devfreq(dev); \
+ struct hwmon_node *hw = df->data; \
+ return snprintf(buf, PAGE_SIZE, "%u\n", hw->name); \
+}
+
+#define store_attr(name, _min, _max) \
+static ssize_t store_##name(struct device *dev, \
+ struct device_attribute *attr, const char *buf, \
+ size_t count) \
+{ \
+ struct devfreq *df = to_devfreq(dev); \
+ struct hwmon_node *hw = df->data; \
+ int ret; \
+ unsigned int val; \
+ ret = sscanf(buf, "%u", &val); \
+ if (ret != 1) \
+ return -EINVAL; \
+ val = max(val, _min); \
+ val = min(val, _max); \
+ hw->name = val; \
+ return count; \
+}
+
+#define gov_attr(__attr, min, max) \
+show_attr(__attr) \
+store_attr(__attr, min, max) \
+static DEVICE_ATTR(__attr, 0644, show_##__attr, store_##__attr)
+
+#define show_list_attr(name, n) \
+static ssize_t show_list_##name(struct device *dev, \
+ struct device_attribute *attr, char *buf) \
+{ \
+ struct devfreq *df = to_devfreq(dev); \
+ struct hwmon_node *hw = df->data; \
+ unsigned int i, cnt = 0; \
+ \
+ for (i = 0; i < n && hw->name[i]; i++) \
+ cnt += snprintf(buf + cnt, PAGE_SIZE, "%u ", hw->name[i]);\
+ cnt += snprintf(buf + cnt, PAGE_SIZE, "\n"); \
+ return cnt; \
+}
+
+#define store_list_attr(name, n, _min, _max) \
+static ssize_t store_list_##name(struct device *dev, \
+ struct device_attribute *attr, const char *buf, \
+ size_t count) \
+{ \
+ struct devfreq *df = to_devfreq(dev); \
+ struct hwmon_node *hw = df->data; \
+ int ret; \
+ unsigned int i = 0, val; \
+ \
+ do { \
+ ret = sscanf(buf, "%u", &val); \
+ if (ret != 1) \
+ break; \
+ buf = strnchr(buf, PAGE_SIZE, ' '); \
+ if (buf) \
+ buf++; \
+ val = max(val, _min); \
+ val = min(val, _max); \
+ hw->name[i] = val; \
+ i++; \
+ } while (buf && i < n - 1); \
+ if (i < 1) \
+ return -EINVAL; \
+ hw->name[i] = 0; \
+ return count; \
+}
+
+#define gov_list_attr(__attr, n, min, max) \
+show_list_attr(__attr, n) \
+store_list_attr(__attr, n, min, max) \
+static DEVICE_ATTR(__attr, 0644, show_list_##__attr, store_list_##__attr)
+
+#define MIN_MS 10U
+#define MAX_MS 500U
+
+/* Returns MBps of read/writes for the sampling window. */
+static unsigned int bytes_to_mbps(long long bytes, unsigned int us)
+{
+ bytes *= USEC_PER_SEC;
+ do_div(bytes, us);
+ bytes = DIV_ROUND_UP_ULL(bytes, SZ_1M);
+ return bytes;
+}
+
+static unsigned int mbps_to_bytes(unsigned long mbps, unsigned int ms)
+{
+ mbps *= ms;
+ mbps = DIV_ROUND_UP(mbps, MSEC_PER_SEC);
+ mbps *= SZ_1M;
+ return mbps;
+}
+
+static int __bw_hwmon_sw_sample_end(struct bw_hwmon *hwmon)
+{
+ struct devfreq *df;
+ struct hwmon_node *node;
+ ktime_t ts;
+ unsigned long bytes, mbps;
+ unsigned int us;
+ int wake = 0;
+
+ df = hwmon->df;
+ node = df->data;
+
+ ts = ktime_get();
+ us = ktime_to_us(ktime_sub(ts, node->prev_ts));
+
+ bytes = hwmon->get_bytes_and_clear(hwmon);
+ bytes += node->bytes;
+ node->bytes = 0;
+
+ mbps = bytes_to_mbps(bytes, us);
+ node->max_mbps = max(node->max_mbps, mbps);
+
+ /*
+ * If the measured bandwidth in a micro sample is greater than the
+ * wake up threshold, it indicates an increase in load that's non
+ * trivial. So, have the governor ignore historical idle time or low
+ * bandwidth usage and do the bandwidth calculation based on just
+ * this micro sample.
+ */
+ if (mbps > node->hw->up_wake_mbps) {
+ wake = UP_WAKE;
+ } else if (mbps < node->hw->down_wake_mbps) {
+ if (node->down_cnt)
+ node->down_cnt--;
+ if (node->down_cnt <= 0)
+ wake = DOWN_WAKE;
+ }
+
+ node->prev_ts = ts;
+ node->wake = wake;
+ node->sampled = true;
+
+ trace_bw_hwmon_meas(dev_name(df->dev.parent),
+ mbps,
+ us,
+ wake);
+
+ return wake;
+}
+
+static int __bw_hwmon_hw_sample_end(struct bw_hwmon *hwmon)
+{
+ struct devfreq *df;
+ struct hwmon_node *node;
+ unsigned long bytes, mbps;
+ int wake = 0;
+
+ df = hwmon->df;
+ node = df->data;
+
+ /*
+ * If this read is in response to an IRQ, the HW monitor should
+ * return the measurement in the micro sample that triggered the IRQ.
+ * Otherwise, it should return the maximum measured value in any
+ * micro sample since the last time we called get_bytes_and_clear()
+ */
+ bytes = hwmon->get_bytes_and_clear(hwmon);
+ mbps = bytes_to_mbps(bytes, node->sample_ms * USEC_PER_MSEC);
+ node->max_mbps = mbps;
+
+ if (mbps > node->hw->up_wake_mbps)
+ wake = UP_WAKE;
+ else if (mbps < node->hw->down_wake_mbps)
+ wake = DOWN_WAKE;
+
+ node->wake = wake;
+ node->sampled = true;
+
+ trace_bw_hwmon_meas(dev_name(df->dev.parent),
+ mbps,
+ node->sample_ms * USEC_PER_MSEC,
+ wake);
+
+ return 1;
+}
+
+static int __bw_hwmon_sample_end(struct bw_hwmon *hwmon)
+{
+ if (hwmon->set_hw_events)
+ return __bw_hwmon_hw_sample_end(hwmon);
+ else
+ return __bw_hwmon_sw_sample_end(hwmon);
+}
+
+int bw_hwmon_sample_end(struct bw_hwmon *hwmon)
+{
+ unsigned long flags;
+ int wake;
+
+ spin_lock_irqsave(&irq_lock, flags);
+ wake = __bw_hwmon_sample_end(hwmon);
+ spin_unlock_irqrestore(&irq_lock, flags);
+
+ return wake;
+}
+
+unsigned long to_mbps_zone(struct hwmon_node *node, unsigned long mbps)
+{
+ int i;
+
+ for (i = 0; i < NUM_MBPS_ZONES && node->mbps_zones[i]; i++)
+ if (node->mbps_zones[i] >= mbps)
+ return node->mbps_zones[i];
+
+ return node->hw->df->max_freq;
+}
+
+#define MIN_MBPS 500UL
+#define HIST_PEAK_TOL 60
+static unsigned long get_bw_and_set_irq(struct hwmon_node *node,
+ unsigned long *freq, unsigned long *ab)
+{
+ unsigned long meas_mbps, thres, flags, req_mbps, adj_mbps;
+ unsigned long meas_mbps_zone;
+ unsigned long hist_lo_tol, hyst_lo_tol;
+ struct bw_hwmon *hw = node->hw;
+ unsigned int new_bw, io_percent;
+ ktime_t ts;
+ unsigned int ms = 0;
+
+ spin_lock_irqsave(&irq_lock, flags);
+
+ if (!hw->set_hw_events) {
+ ts = ktime_get();
+ ms = ktime_to_ms(ktime_sub(ts, node->prev_ts));
+ }
+ if (!node->sampled || ms >= node->sample_ms)
+ __bw_hwmon_sample_end(node->hw);
+ node->sampled = false;
+
+ req_mbps = meas_mbps = node->max_mbps;
+ node->max_mbps = 0;
+
+ hist_lo_tol = (node->hist_max_mbps * HIST_PEAK_TOL) / 100;
+ /* Remember historic peak in the past hist_mem decision windows. */
+ if (meas_mbps > node->hist_max_mbps || !node->hist_mem) {
+ /* If new max or no history */
+ node->hist_max_mbps = meas_mbps;
+ node->hist_mem = node->hist_memory;
+ } else if (meas_mbps >= hist_lo_tol) {
+ /*
+ * If subsequent peaks come close (within tolerance) to but
+ * less than the historic peak, then reset the history start,
+ * but not the peak value.
+ */
+ node->hist_mem = node->hist_memory;
+ } else {
+ /* Count down history expiration. */
+ if (node->hist_mem)
+ node->hist_mem--;
+ }
+
+ /* Keep track of whether we are in low power mode consistently. */
+ if (meas_mbps > node->low_power_ceil_mbps)
+ node->above_low_power = node->low_power_delay;
+ if (node->above_low_power)
+ node->above_low_power--;
+
+ if (node->above_low_power)
+ io_percent = node->io_percent;
+ else
+ io_percent = node->low_power_io_percent;
+
+ /*
+ * The AB value that corresponds to the lowest mbps zone greater than
+ * or equal to the "frequency" the current measurement will pick.
+ * This upper limit is useful for balancing out any prediction
+ * mechanisms to be power friendly.
+ */
+ meas_mbps_zone = (meas_mbps * 100) / io_percent;
+ meas_mbps_zone = to_mbps_zone(node, meas_mbps_zone);
+ meas_mbps_zone = (meas_mbps_zone * io_percent) / 100;
+ meas_mbps_zone = max(meas_mbps, meas_mbps_zone);
+
+ /*
+ * If this is a wake up due to BW increase, vote much higher BW than
+ * what we measure to stay ahead of increasing traffic and then set
+ * it up to vote for measured BW if we see down_count short sample
+ * windows of low traffic.
+ */
+ if (node->wake == UP_WAKE) {
+ req_mbps += ((meas_mbps - node->prev_req)
+ * node->up_scale) / 100;
+ /*
+ * However if the measured load is less than the historic
+ * peak, but the over request is higher than the historic
+ * peak, then we could limit the over requesting to the
+ * historic peak.
+ */
+ if (req_mbps > node->hist_max_mbps
+ && meas_mbps < node->hist_max_mbps)
+ req_mbps = node->hist_max_mbps;
+
+ req_mbps = min(req_mbps, meas_mbps_zone);
+ }
+
+ hyst_lo_tol = (node->hyst_mbps * HIST_PEAK_TOL) / 100;
+ if (meas_mbps > node->hyst_mbps && meas_mbps > MIN_MBPS) {
+ hyst_lo_tol = (meas_mbps * HIST_PEAK_TOL) / 100;
+ node->hyst_peak = 0;
+ node->hyst_trig_win = node->hyst_length;
+ node->hyst_mbps = meas_mbps;
+ }
+
+ /*
+ * Check node->max_mbps to avoid double counting peaks that cause
+ * early termination of a window.
+ */
+ if (meas_mbps >= hyst_lo_tol && meas_mbps > MIN_MBPS
+ && !node->max_mbps) {
+ node->hyst_peak++;
+ if (node->hyst_peak >= node->hyst_trigger_count
+ || node->hyst_en)
+ node->hyst_en = node->hyst_length;
+ }
+
+ if (node->hyst_trig_win)
+ node->hyst_trig_win--;
+ if (node->hyst_en)
+ node->hyst_en--;
+
+ if (!node->hyst_trig_win && !node->hyst_en) {
+ node->hyst_peak = 0;
+ node->hyst_mbps = 0;
+ }
+
+ if (node->hyst_en) {
+ if (meas_mbps > node->idle_mbps)
+ req_mbps = max(req_mbps, node->hyst_mbps);
+ }
+
+ /* Stretch the short sample window size, if the traffic is too low */
+ if (meas_mbps < MIN_MBPS) {
+ hw->up_wake_mbps = (max(MIN_MBPS, req_mbps)
+ * (100 + node->up_thres)) / 100;
+ hw->down_wake_mbps = 0;
+ hw->undo_over_req_mbps = 0;
+ thres = mbps_to_bytes(max(MIN_MBPS, req_mbps / 2),
+ node->sample_ms);
+ } else {
+ /*
+ * Up wake vs down wake are intentionally a percentage of
+ * req_mbps vs meas_mbps to make sure the over requesting
+ * phase is handled properly. We only want to wake up and
+ * reduce the vote based on the measured mbps being less than
+ * the previous measurement that caused the "over request".
+ */
+ hw->up_wake_mbps = (req_mbps * (100 + node->up_thres)) / 100;
+ hw->down_wake_mbps = (meas_mbps * node->down_thres) / 100;
+ if (node->wake == UP_WAKE)
+ hw->undo_over_req_mbps = min(req_mbps, meas_mbps_zone);
+ else
+ hw->undo_over_req_mbps = 0;
+ thres = mbps_to_bytes(meas_mbps, node->sample_ms);
+ }
+
+ if (hw->set_hw_events) {
+ hw->down_cnt = node->down_count;
+ hw->set_hw_events(hw, node->sample_ms);
+ } else {
+ node->down_cnt = node->down_count;
+ node->bytes = hw->set_thres(hw, thres);
+ }
+
+ node->wake = 0;
+ node->prev_req = req_mbps;
+
+ spin_unlock_irqrestore(&irq_lock, flags);
+
+ adj_mbps = req_mbps + node->guard_band_mbps;
+
+ if (adj_mbps > node->prev_ab) {
+ new_bw = adj_mbps;
+ } else {
+ new_bw = adj_mbps * node->decay_rate
+ + node->prev_ab * (100 - node->decay_rate);
+ new_bw /= 100;
+ }
+
+ node->prev_ab = new_bw;
+ if (ab)
+ *ab = roundup(new_bw, node->bw_step);
+
+ *freq = (new_bw * 100) / io_percent;
+ trace_bw_hwmon_update(dev_name(node->hw->df->dev.parent),
+ new_bw,
+ *freq,
+ hw->up_wake_mbps,
+ hw->down_wake_mbps);
+ return req_mbps;
+}
+
+static struct hwmon_node *find_hwmon_node(struct devfreq *df)
+{
+ struct hwmon_node *node, *found = NULL;
+
+ mutex_lock(&list_lock);
+ list_for_each_entry(node, &hwmon_list, list)
+ if (node->hw->dev == df->dev.parent ||
+ node->hw->of_node == df->dev.parent->of_node ||
+ (!node->hw->dev && !node->hw->of_node &&
+ node->gov == df->governor)) {
+ found = node;
+ break;
+ }
+ mutex_unlock(&list_lock);
+
+ return found;
+}
+
+int update_bw_hwmon(struct bw_hwmon *hwmon)
+{
+ struct devfreq *df;
+ struct hwmon_node *node;
+ int ret;
+
+ if (!hwmon)
+ return -EINVAL;
+ df = hwmon->df;
+ if (!df)
+ return -ENODEV;
+ node = df->data;
+ if (!node)
+ return -ENODEV;
+
+ if (!node->mon_started)
+ return -EBUSY;
+
+ dev_dbg(df->dev.parent, "Got update request\n");
+ devfreq_monitor_stop(df);
+
+ mutex_lock(&df->lock);
+ ret = update_devfreq(df);
+ if (ret)
+ dev_err(df->dev.parent,
+ "Unable to update freq on request!\n");
+ mutex_unlock(&df->lock);
+
+ devfreq_monitor_start(df);
+
+ return 0;
+}
+
+static int start_monitor(struct devfreq *df, bool init)
+{
+ struct hwmon_node *node = df->data;
+ struct bw_hwmon *hw = node->hw;
+ struct device *dev = df->dev.parent;
+ unsigned long mbps;
+ int ret;
+
+ node->prev_ts = ktime_get();
+
+ if (init) {
+ node->prev_ab = 0;
+ node->resume_freq = 0;
+ node->resume_ab = 0;
+ mbps = (df->previous_freq * node->io_percent) / 100;
+ hw->up_wake_mbps = mbps;
+ hw->down_wake_mbps = MIN_MBPS;
+ hw->undo_over_req_mbps = 0;
+ ret = hw->start_hwmon(hw, mbps);
+ } else {
+ ret = hw->resume_hwmon(hw);
+ }
+
+ if (ret) {
+ dev_err(dev, "Unable to start HW monitor! (%d)\n", ret);
+ return ret;
+ }
+
+ if (init)
+ devfreq_monitor_start(df);
+ else
+ devfreq_monitor_resume(df);
+
+ node->mon_started = true;
+
+ return 0;
+}
+
+static void stop_monitor(struct devfreq *df, bool init)
+{
+ struct hwmon_node *node = df->data;
+ struct bw_hwmon *hw = node->hw;
+
+ node->mon_started = false;
+
+ if (init) {
+ devfreq_monitor_stop(df);
+ hw->stop_hwmon(hw);
+ } else {
+ devfreq_monitor_suspend(df);
+ hw->suspend_hwmon(hw);
+ }
+
+}
+
+static int gov_start(struct devfreq *df)
+{
+ int ret = 0;
+ struct device *dev = df->dev.parent;
+ struct hwmon_node *node;
+ struct bw_hwmon *hw;
+ struct devfreq_dev_status stat;
+
+ node = find_hwmon_node(df);
+ if (!node) {
+ dev_err(dev, "Unable to find HW monitor!\n");
+ return -ENODEV;
+ }
+ hw = node->hw;
+
+ stat.private_data = NULL;
+ if (df->profile->get_dev_status)
+ ret = df->profile->get_dev_status(df->dev.parent, &stat);
+ if (ret || !stat.private_data)
+ dev_warn(dev, "Device doesn't take AB votes!\n");
+ else
+ node->dev_ab = stat.private_data;
+
+ hw->df = df;
+ node->orig_data = df->data;
+ df->data = node;
+
+ if (start_monitor(df, true))
+ goto err_start;
+
+ ret = sysfs_create_group(&df->dev.kobj, node->attr_grp);
+ if (ret)
+ goto err_sysfs;
+
+ return 0;
+
+err_sysfs:
+ stop_monitor(df, true);
+err_start:
+ df->data = node->orig_data;
+ node->orig_data = NULL;
+ hw->df = NULL;
+ node->dev_ab = NULL;
+ return ret;
+}
+
+static void gov_stop(struct devfreq *df)
+{
+ struct hwmon_node *node = df->data;
+ struct bw_hwmon *hw = node->hw;
+
+ sysfs_remove_group(&df->dev.kobj, node->attr_grp);
+ stop_monitor(df, true);
+ df->data = node->orig_data;
+ node->orig_data = NULL;
+ hw->df = NULL;
+ /*
+ * Not all governors know about this additional extended device
+ * configuration. To avoid leaving the extended configuration at a
+ * stale state, set it to 0 and let the next governor take it from
+ * there.
+ */
+ if (node->dev_ab)
+ *node->dev_ab = 0;
+ node->dev_ab = NULL;
+}
+
+static int gov_suspend(struct devfreq *df)
+{
+ struct hwmon_node *node = df->data;
+ unsigned long resume_freq = df->previous_freq;
+ unsigned long resume_ab = *node->dev_ab;
+
+ if (!node->hw->suspend_hwmon)
+ return -ENOSYS;
+
+ if (node->resume_freq) {
+ dev_warn(df->dev.parent, "Governor already suspended!\n");
+ return -EBUSY;
+ }
+
+ stop_monitor(df, false);
+
+ mutex_lock(&df->lock);
+ update_devfreq(df);
+ mutex_unlock(&df->lock);
+
+ node->resume_freq = resume_freq;
+ node->resume_ab = resume_ab;
+
+ return 0;
+}
+
+static int gov_resume(struct devfreq *df)
+{
+ struct hwmon_node *node = df->data;
+
+ if (!node->hw->resume_hwmon)
+ return -ENOSYS;
+
+ if (!node->resume_freq) {
+ dev_warn(df->dev.parent, "Governor already resumed!\n");
+ return -EBUSY;
+ }
+
+ mutex_lock(&df->lock);
+ update_devfreq(df);
+ mutex_unlock(&df->lock);
+
+ node->resume_freq = 0;
+ node->resume_ab = 0;
+
+ return start_monitor(df, false);
+}
+
+static int devfreq_bw_hwmon_get_freq(struct devfreq *df,
+ unsigned long *freq,
+ u32 *flag)
+{
+ struct hwmon_node *node = df->data;
+
+ /* Suspend/resume sequence */
+ if (!node->mon_started) {
+ *freq = node->resume_freq;
+ *node->dev_ab = node->resume_ab;
+ return 0;
+ }
+
+ get_bw_and_set_irq(node, freq, node->dev_ab);
+
+ return 0;
+}
+
+static ssize_t store_throttle_adj(struct device *dev,
+ struct device_attribute *attr, const char *buf, size_t count)
+{
+ struct devfreq *df = to_devfreq(dev);
+ struct hwmon_node *node = df->data;
+ int ret;
+ unsigned int val;
+
+ if (!node->hw->set_throttle_adj)
+ return -ENOSYS;
+
+ ret = kstrtouint(buf, 10, &val);
+ if (ret)
+ return ret;
+
+ ret = node->hw->set_throttle_adj(node->hw, val);
+
+ if (!ret)
+ return count;
+ else
+ return ret;
+}
+
+static ssize_t show_throttle_adj(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct devfreq *df = to_devfreq(dev);
+ struct hwmon_node *node = df->data;
+ unsigned int val;
+
+ if (!node->hw->get_throttle_adj)
+ val = 0;
+ else
+ val = node->hw->get_throttle_adj(node->hw);
+
+ return snprintf(buf, PAGE_SIZE, "%u\n", val);
+}
+
+static DEVICE_ATTR(throttle_adj, 0644, show_throttle_adj,
+ store_throttle_adj);
+
+gov_attr(guard_band_mbps, 0U, 2000U);
+gov_attr(decay_rate, 0U, 100U);
+gov_attr(io_percent, 1U, 100U);
+gov_attr(bw_step, 50U, 1000U);
+gov_attr(sample_ms, 1U, 50U);
+gov_attr(up_scale, 0U, 500U);
+gov_attr(up_thres, 1U, 100U);
+gov_attr(down_thres, 0U, 90U);
+gov_attr(down_count, 0U, 90U);
+gov_attr(hist_memory, 0U, 90U);
+gov_attr(hyst_trigger_count, 0U, 90U);
+gov_attr(hyst_length, 0U, 90U);
+gov_attr(idle_mbps, 0U, 2000U);
+gov_attr(low_power_ceil_mbps, 0U, 2500U);
+gov_attr(low_power_io_percent, 1U, 100U);
+gov_attr(low_power_delay, 1U, 60U);
+gov_list_attr(mbps_zones, NUM_MBPS_ZONES, 0U, UINT_MAX);
+
+static struct attribute *dev_attr[] = {
+ &dev_attr_guard_band_mbps.attr,
+ &dev_attr_decay_rate.attr,
+ &dev_attr_io_percent.attr,
+ &dev_attr_bw_step.attr,
+ &dev_attr_sample_ms.attr,
+ &dev_attr_up_scale.attr,
+ &dev_attr_up_thres.attr,
+ &dev_attr_down_thres.attr,
+ &dev_attr_down_count.attr,
+ &dev_attr_hist_memory.attr,
+ &dev_attr_hyst_trigger_count.attr,
+ &dev_attr_hyst_length.attr,
+ &dev_attr_idle_mbps.attr,
+ &dev_attr_low_power_ceil_mbps.attr,
+ &dev_attr_low_power_io_percent.attr,
+ &dev_attr_low_power_delay.attr,
+ &dev_attr_mbps_zones.attr,
+ &dev_attr_throttle_adj.attr,
+ NULL,
+};
+
+static struct attribute_group dev_attr_group = {
+ .name = "bw_hwmon",
+ .attrs = dev_attr,
+};
+
+static int devfreq_bw_hwmon_ev_handler(struct devfreq *df,
+ unsigned int event, void *data)
+{
+ int ret;
+ unsigned int sample_ms;
+ struct hwmon_node *node;
+ struct bw_hwmon *hw;
+
+ switch (event) {
+ case DEVFREQ_GOV_START:
+ sample_ms = df->profile->polling_ms;
+ sample_ms = max(MIN_MS, sample_ms);
+ sample_ms = min(MAX_MS, sample_ms);
+ df->profile->polling_ms = sample_ms;
+
+ ret = gov_start(df);
+ if (ret)
+ return ret;
+
+ dev_dbg(df->dev.parent,
+ "Enabled dev BW HW monitor governor\n");
+ break;
+
+ case DEVFREQ_GOV_STOP:
+ gov_stop(df);
+ dev_dbg(df->dev.parent,
+ "Disabled dev BW HW monitor governor\n");
+ break;
+
+ case DEVFREQ_GOV_INTERVAL:
+ mutex_lock(&sync_lock);
+ sample_ms = *(unsigned int *)data;
+ sample_ms = max(MIN_MS, sample_ms);
+ sample_ms = min(MAX_MS, sample_ms);
+ /*
+ * Suspend/resume the HW monitor around the interval update
+ * to prevent the HW monitor IRQ from trying to change
+ * stop/start the delayed workqueue while the interval update
+ * is happening.
+ */
+ node = df->data;
+ hw = node->hw;
+ hw->suspend_hwmon(hw);
+ devfreq_interval_update(df, &sample_ms);
+ ret = hw->resume_hwmon(hw);
+ if (ret) {
+ dev_err(df->dev.parent,
+ "Unable to resume HW monitor (%d)\n", ret);
+ return ret;
+ }
+ mutex_unlock(&sync_lock);
+ break;
+
+ case DEVFREQ_GOV_SUSPEND:
+ ret = gov_suspend(df);
+ if (ret) {
+ dev_err(df->dev.parent,
+ "Unable to suspend BW HW mon governor (%d)\n",
+ ret);
+ return ret;
+ }
+
+ dev_dbg(df->dev.parent, "Suspended BW HW mon governor\n");
+ break;
+
+ case DEVFREQ_GOV_RESUME:
+ ret = gov_resume(df);
+ if (ret) {
+ dev_err(df->dev.parent,
+ "Unable to resume BW HW mon governor (%d)\n",
+ ret);
+ return ret;
+ }
+
+ dev_dbg(df->dev.parent, "Resumed BW HW mon governor\n");
+ break;
+ }
+
+ return 0;
+}
+
+static struct devfreq_governor devfreq_gov_bw_hwmon = {
+ .name = "bw_hwmon",
+ .get_target_freq = devfreq_bw_hwmon_get_freq,
+ .event_handler = devfreq_bw_hwmon_ev_handler,
+};
+
+int register_bw_hwmon(struct device *dev, struct bw_hwmon *hwmon)
+{
+ int ret = 0;
+ struct hwmon_node *node;
+ struct attribute_group *attr_grp;
+
+ if (!hwmon->gov && !hwmon->dev && !hwmon->of_node)
+ return -EINVAL;
+
+ node = devm_kzalloc(dev, sizeof(*node), GFP_KERNEL);
+ if (!node) {
+ dev_err(dev, "Unable to register gov. Out of memory!\n");
+ return -ENOMEM;
+ }
+
+ if (hwmon->gov) {
+ attr_grp = devm_kzalloc(dev, sizeof(*attr_grp), GFP_KERNEL);
+ if (!attr_grp)
+ return -ENOMEM;
+
+ hwmon->gov->get_target_freq = devfreq_bw_hwmon_get_freq;
+ hwmon->gov->event_handler = devfreq_bw_hwmon_ev_handler;
+ attr_grp->name = hwmon->gov->name;
+ attr_grp->attrs = dev_attr;
+
+ node->gov = hwmon->gov;
+ node->attr_grp = attr_grp;
+ } else {
+ node->gov = &devfreq_gov_bw_hwmon;
+ node->attr_grp = &dev_attr_group;
+ }
+
+ node->guard_band_mbps = 100;
+ node->decay_rate = 90;
+ node->io_percent = 16;
+ node->low_power_ceil_mbps = 0;
+ node->low_power_io_percent = 16;
+ node->low_power_delay = 60;
+ node->bw_step = 190;
+ node->sample_ms = 50;
+ node->up_scale = 0;
+ node->up_thres = 10;
+ node->down_thres = 0;
+ node->down_count = 3;
+ node->hist_memory = 0;
+ node->hyst_trigger_count = 3;
+ node->hyst_length = 0;
+ node->idle_mbps = 400;
+ node->mbps_zones[0] = 0;
+ node->hw = hwmon;
+
+ mutex_lock(&list_lock);
+ list_add_tail(&node->list, &hwmon_list);
+ mutex_unlock(&list_lock);
+
+ if (hwmon->gov) {
+ ret = devfreq_add_governor(hwmon->gov);
+ } else {
+ mutex_lock(&state_lock);
+ if (!use_cnt)
+ ret = devfreq_add_governor(&devfreq_gov_bw_hwmon);
+ if (!ret)
+ use_cnt++;
+ mutex_unlock(&state_lock);
+ }
+
+ if (!ret)
+ dev_info(dev, "BW HWmon governor registered.\n");
+ else
+ dev_err(dev, "BW HWmon governor registration failed!\n");
+
+ return ret;
+}
+
+MODULE_DESCRIPTION("HW monitor based dev DDR bandwidth voting driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/devfreq/governor_bw_hwmon.h b/drivers/devfreq/governor_bw_hwmon.h
new file mode 100644
index 000000000000..7578399cfb88
--- /dev/null
+++ b/drivers/devfreq/governor_bw_hwmon.h
@@ -0,0 +1,88 @@
+/*
+ * Copyright (c) 2014-2016, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef _GOVERNOR_BW_HWMON_H
+#define _GOVERNOR_BW_HWMON_H
+
+#include <linux/kernel.h>
+#include <linux/devfreq.h>
+
+/**
+ * struct bw_hwmon - dev BW HW monitor info
+ * @start_hwmon: Start the HW monitoring of the dev BW
+ * @stop_hwmon: Stop the HW monitoring of dev BW
+ * @set_thres: Set the count threshold to generate an IRQ
+ * @get_bytes_and_clear: Get the bytes transferred since the last call
+ * and reset the counter to start over.
+ * @set_throttle_adj: Set throttle adjust field to the given value
+ * @get_throttle_adj: Get the value written to throttle adjust field
+ * @dev: Pointer to device that this HW monitor can
+ * monitor.
+ * @of_node: OF node of device that this HW monitor can
+ * monitor.
+ * @gov: devfreq_governor struct that should be used
+ * when registering this HW monitor with devfreq.
+ * Only the name field is expected to be
+ * initialized.
+ * @df: Devfreq node that this HW monitor is being
+ * used for. NULL when not actively in use and
+ * non-NULL when in use.
+ *
+ * One of dev, of_node or governor_name needs to be specified for a
+ * successful registration.
+ *
+ */
+struct bw_hwmon {
+ int (*start_hwmon)(struct bw_hwmon *hw, unsigned long mbps);
+ void (*stop_hwmon)(struct bw_hwmon *hw);
+ int (*suspend_hwmon)(struct bw_hwmon *hw);
+ int (*resume_hwmon)(struct bw_hwmon *hw);
+ unsigned long (*set_thres)(struct bw_hwmon *hw, unsigned long bytes);
+ unsigned long (*set_hw_events)(struct bw_hwmon *hw,
+ unsigned int sample_ms);
+ unsigned long (*get_bytes_and_clear)(struct bw_hwmon *hw);
+ int (*set_throttle_adj)(struct bw_hwmon *hw, uint adj);
+ u32 (*get_throttle_adj)(struct bw_hwmon *hw);
+ struct device *dev;
+ struct device_node *of_node;
+ struct devfreq_governor *gov;
+
+ unsigned long up_wake_mbps;
+ unsigned long undo_over_req_mbps;
+ unsigned long down_wake_mbps;
+ unsigned int down_cnt;
+
+ struct devfreq *df;
+};
+
+#ifdef CONFIG_DEVFREQ_GOV_QCOM_BW_HWMON
+int register_bw_hwmon(struct device *dev, struct bw_hwmon *hwmon);
+int update_bw_hwmon(struct bw_hwmon *hwmon);
+int bw_hwmon_sample_end(struct bw_hwmon *hwmon);
+#else
+static inline int register_bw_hwmon(struct device *dev,
+ struct bw_hwmon *hwmon)
+{
+ return 0;
+}
+static inline int update_bw_hwmon(struct bw_hwmon *hwmon)
+{
+ return 0;
+}
+static inline int bw_hwmon_sample_end(struct bw_hwmon *hwmon)
+{
+ return 0;
+}
+#endif
+
+#endif /* _GOVERNOR_BW_HWMON_H */
diff --git a/drivers/devfreq/governor_bw_vbif.c b/drivers/devfreq/governor_bw_vbif.c
new file mode 100644
index 000000000000..33e144b653d0
--- /dev/null
+++ b/drivers/devfreq/governor_bw_vbif.c
@@ -0,0 +1,142 @@
+/*
+ * Copyright (c) 2014-2016, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/devfreq.h>
+#include <linux/module.h>
+#include "governor.h"
+
+unsigned long (*extern_get_bw)(void) = NULL;
+unsigned long *dev_ab;
+static unsigned long dev_ib;
+
+DEFINE_MUTEX(df_lock);
+static struct devfreq *df;
+
+/*
+ * This function is 'get_target_freq' API for the governor.
+ * It just calls an external function that should be registered
+ * by KGSL driver to get and return a value for frequency.
+ */
+static int devfreq_vbif_get_freq(struct devfreq *df,
+ unsigned long *freq,
+ u32 *flag)
+{
+ /* If the IB isn't set yet, check if it should be non-zero. */
+ if (!dev_ib && extern_get_bw) {
+ dev_ib = extern_get_bw();
+ if (dev_ab)
+ *dev_ab = dev_ib / 4;
+ }
+
+ *freq = dev_ib;
+ return 0;
+}
+
+/*
+ * Registers a function to be used to request a frequency
+ * value from legacy vbif based bus bandwidth governor.
+ * This function is called by KGSL driver.
+ */
+void devfreq_vbif_register_callback(void *p)
+{
+ extern_get_bw = p;
+}
+
+int devfreq_vbif_update_bw(unsigned long ib, unsigned long ab)
+{
+ int ret = 0;
+
+ mutex_lock(&df_lock);
+ if (df) {
+ mutex_lock(&df->lock);
+ dev_ib = ib;
+ *dev_ab = ab;
+ ret = update_devfreq(df);
+ mutex_unlock(&df->lock);
+ }
+ mutex_unlock(&df_lock);
+ return ret;
+}
+
+static int devfreq_vbif_ev_handler(struct devfreq *devfreq,
+ unsigned int event, void *data)
+{
+ int ret;
+ struct devfreq_dev_status stat;
+
+ switch (event) {
+ case DEVFREQ_GOV_START:
+ mutex_lock(&df_lock);
+ df = devfreq;
+ if (df->profile->get_dev_status &&
+ !df->profile->get_dev_status(df->dev.parent, &stat) &&
+ stat.private_data)
+ dev_ab = stat.private_data;
+ else
+ pr_warn("Device doesn't take AB votes!\n");
+
+ mutex_unlock(&df_lock);
+
+ ret = devfreq_vbif_update_bw(0, 0);
+ if (ret) {
+ pr_err("Unable to update BW! Gov start failed!\n");
+ return ret;
+ }
+ /*
+ * Normally at this point governors start the polling with
+ * devfreq_monitor_start(df);
+ * This governor doesn't poll, but expect external calls
+ * of its devfreq_vbif_update_bw() function
+ */
+ pr_debug("Enabled MSM VBIF governor\n");
+ break;
+
+ case DEVFREQ_GOV_STOP:
+ mutex_lock(&df_lock);
+ df = NULL;
+ mutex_unlock(&df_lock);
+
+ pr_debug("Disabled MSM VBIF governor\n");
+ break;
+ }
+
+ return 0;
+}
+
+static struct devfreq_governor devfreq_vbif = {
+ .name = "bw_vbif",
+ .get_target_freq = devfreq_vbif_get_freq,
+ .event_handler = devfreq_vbif_ev_handler,
+};
+
+static int __init devfreq_vbif_init(void)
+{
+ return devfreq_add_governor(&devfreq_vbif);
+}
+subsys_initcall(devfreq_vbif_init);
+
+static void __exit devfreq_vbif_exit(void)
+{
+ int ret;
+
+ ret = devfreq_remove_governor(&devfreq_vbif);
+ if (ret)
+ pr_err("%s: failed remove governor %d\n", __func__, ret);
+
+}
+module_exit(devfreq_vbif_exit);
+
+MODULE_DESCRIPTION("VBIF based GPU bus BW voting governor");
+MODULE_LICENSE("GPL v2");
+
+
diff --git a/drivers/devfreq/governor_cache_hwmon.c b/drivers/devfreq/governor_cache_hwmon.c
new file mode 100644
index 000000000000..a2167e011a3d
--- /dev/null
+++ b/drivers/devfreq/governor_cache_hwmon.c
@@ -0,0 +1,429 @@
+/*
+ * Copyright (c) 2014-2015, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#define pr_fmt(fmt) "cache-hwmon: " fmt
+
+#include <linux/kernel.h>
+#include <linux/sizes.h>
+#include <linux/module.h>
+#include <linux/init.h>
+#include <linux/io.h>
+#include <linux/delay.h>
+#include <linux/ktime.h>
+#include <linux/time.h>
+#include <linux/err.h>
+#include <linux/errno.h>
+#include <linux/mutex.h>
+#include <linux/interrupt.h>
+#include <linux/platform_device.h>
+#include <linux/of.h>
+#include <linux/devfreq.h>
+#include <trace/events/power.h>
+#include "governor.h"
+#include "governor_cache_hwmon.h"
+
+struct cache_hwmon_node {
+ unsigned int cycles_per_low_req;
+ unsigned int cycles_per_med_req;
+ unsigned int cycles_per_high_req;
+ unsigned int min_busy;
+ unsigned int max_busy;
+ unsigned int tolerance_mrps;
+ unsigned int guard_band_mhz;
+ unsigned int decay_rate;
+ unsigned long prev_mhz;
+ ktime_t prev_ts;
+ bool mon_started;
+ struct list_head list;
+ void *orig_data;
+ struct cache_hwmon *hw;
+ struct attribute_group *attr_grp;
+};
+
+static LIST_HEAD(cache_hwmon_list);
+static DEFINE_MUTEX(list_lock);
+
+static int use_cnt;
+static DEFINE_MUTEX(register_lock);
+
+static DEFINE_MUTEX(monitor_lock);
+
+#define show_attr(name) \
+static ssize_t show_##name(struct device *dev, \
+ struct device_attribute *attr, char *buf) \
+{ \
+ struct devfreq *df = to_devfreq(dev); \
+ struct cache_hwmon_node *hw = df->data; \
+ return snprintf(buf, PAGE_SIZE, "%u\n", hw->name); \
+}
+
+#define store_attr(name, _min, _max) \
+static ssize_t store_##name(struct device *dev, \
+ struct device_attribute *attr, const char *buf, \
+ size_t count) \
+{ \
+ int ret; \
+ unsigned int val; \
+ struct devfreq *df = to_devfreq(dev); \
+ struct cache_hwmon_node *hw = df->data; \
+ ret = sscanf(buf, "%u", &val); \
+ if (ret != 1) \
+ return -EINVAL; \
+ val = max(val, _min); \
+ val = min(val, _max); \
+ hw->name = val; \
+ return count; \
+}
+
+#define gov_attr(__attr, min, max) \
+show_attr(__attr) \
+store_attr(__attr, min, max) \
+static DEVICE_ATTR(__attr, 0644, show_##__attr, store_##__attr)
+
+#define MIN_MS 10U
+#define MAX_MS 500U
+
+static struct cache_hwmon_node *find_hwmon_node(struct devfreq *df)
+{
+ struct cache_hwmon_node *node, *found = NULL;
+
+ mutex_lock(&list_lock);
+ list_for_each_entry(node, &cache_hwmon_list, list)
+ if (node->hw->dev == df->dev.parent ||
+ node->hw->of_node == df->dev.parent->of_node) {
+ found = node;
+ break;
+ }
+ mutex_unlock(&list_lock);
+
+ return found;
+}
+
+static unsigned long measure_mrps_and_set_irq(struct cache_hwmon_node *node,
+ struct mrps_stats *stat)
+{
+ ktime_t ts;
+ unsigned int us;
+ struct cache_hwmon *hw = node->hw;
+
+ /*
+ * Since we are stopping the counters, we don't want this short work
+ * to be interrupted by other tasks and cause the measurements to be
+ * wrong. Not blocking interrupts to avoid affecting interrupt
+ * latency and since they should be short anyway because they run in
+ * atomic context.
+ */
+ preempt_disable();
+
+ ts = ktime_get();
+ us = ktime_to_us(ktime_sub(ts, node->prev_ts));
+ if (!us)
+ us = 1;
+
+ hw->meas_mrps_and_set_irq(hw, node->tolerance_mrps, us, stat);
+ node->prev_ts = ts;
+
+ preempt_enable();
+
+ trace_cache_hwmon_meas(dev_name(hw->df->dev.parent), stat->mrps[HIGH],
+ stat->mrps[MED], stat->mrps[LOW],
+ stat->busy_percent, us);
+ return 0;
+}
+
+static void compute_cache_freq(struct cache_hwmon_node *node,
+ struct mrps_stats *mrps, unsigned long *freq)
+{
+ unsigned long new_mhz;
+ unsigned int busy;
+
+ new_mhz = mrps->mrps[HIGH] * node->cycles_per_high_req
+ + mrps->mrps[MED] * node->cycles_per_med_req
+ + mrps->mrps[LOW] * node->cycles_per_low_req;
+
+ busy = max(node->min_busy, mrps->busy_percent);
+ busy = min(node->max_busy, busy);
+
+ new_mhz *= 100;
+ new_mhz /= busy;
+
+ if (new_mhz < node->prev_mhz) {
+ new_mhz = new_mhz * node->decay_rate + node->prev_mhz
+ * (100 - node->decay_rate);
+ new_mhz /= 100;
+ }
+ node->prev_mhz = new_mhz;
+
+ new_mhz += node->guard_band_mhz;
+ *freq = new_mhz * 1000;
+ trace_cache_hwmon_update(dev_name(node->hw->df->dev.parent), *freq);
+}
+
+#define TOO_SOON_US (1 * USEC_PER_MSEC)
+int update_cache_hwmon(struct cache_hwmon *hwmon)
+{
+ struct cache_hwmon_node *node;
+ struct devfreq *df;
+ ktime_t ts;
+ unsigned int us;
+ int ret;
+
+ if (!hwmon)
+ return -EINVAL;
+ df = hwmon->df;
+ if (!df)
+ return -ENODEV;
+ node = df->data;
+ if (!node)
+ return -ENODEV;
+
+ mutex_lock(&monitor_lock);
+ if (!node->mon_started) {
+ mutex_unlock(&monitor_lock);
+ return -EBUSY;
+ }
+
+ dev_dbg(df->dev.parent, "Got update request\n");
+ devfreq_monitor_stop(df);
+
+ /*
+ * Don't recalc cache freq if the interrupt comes right after a
+ * previous cache freq calculation. This is done for two reasons:
+ *
+ * 1. Sampling the cache request during a very short duration can
+ * result in a very inaccurate measurement due to very short
+ * bursts.
+ * 2. This can only happen if the limit was hit very close to the end
+ * of the previous sample period. Which means the current cache
+ * request estimate is not very off and doesn't need to be
+ * readjusted.
+ */
+ ts = ktime_get();
+ us = ktime_to_us(ktime_sub(ts, node->prev_ts));
+ if (us > TOO_SOON_US) {
+ mutex_lock(&df->lock);
+ ret = update_devfreq(df);
+ if (ret)
+ dev_err(df->dev.parent,
+ "Unable to update freq on request!\n");
+ mutex_unlock(&df->lock);
+ }
+
+ devfreq_monitor_start(df);
+
+ mutex_unlock(&monitor_lock);
+ return 0;
+}
+
+static int devfreq_cache_hwmon_get_freq(struct devfreq *df,
+ unsigned long *freq,
+ u32 *flag)
+{
+ struct mrps_stats stat;
+ struct cache_hwmon_node *node = df->data;
+
+ memset(&stat, 0, sizeof(stat));
+ measure_mrps_and_set_irq(node, &stat);
+ compute_cache_freq(node, &stat, freq);
+
+ return 0;
+}
+
+gov_attr(cycles_per_low_req, 1U, 100U);
+gov_attr(cycles_per_med_req, 1U, 100U);
+gov_attr(cycles_per_high_req, 1U, 100U);
+gov_attr(min_busy, 1U, 100U);
+gov_attr(max_busy, 1U, 100U);
+gov_attr(tolerance_mrps, 0U, 100U);
+gov_attr(guard_band_mhz, 0U, 500U);
+gov_attr(decay_rate, 0U, 100U);
+
+static struct attribute *dev_attr[] = {
+ &dev_attr_cycles_per_low_req.attr,
+ &dev_attr_cycles_per_med_req.attr,
+ &dev_attr_cycles_per_high_req.attr,
+ &dev_attr_min_busy.attr,
+ &dev_attr_max_busy.attr,
+ &dev_attr_tolerance_mrps.attr,
+ &dev_attr_guard_band_mhz.attr,
+ &dev_attr_decay_rate.attr,
+ NULL,
+};
+
+static struct attribute_group dev_attr_group = {
+ .name = "cache_hwmon",
+ .attrs = dev_attr,
+};
+
+static int start_monitoring(struct devfreq *df)
+{
+ int ret;
+ struct mrps_stats mrps;
+ struct device *dev = df->dev.parent;
+ struct cache_hwmon_node *node;
+ struct cache_hwmon *hw;
+
+ node = find_hwmon_node(df);
+ if (!node) {
+ dev_err(dev, "Unable to find HW monitor!\n");
+ return -ENODEV;
+ }
+ hw = node->hw;
+ hw->df = df;
+ node->orig_data = df->data;
+ df->data = node;
+
+ node->prev_ts = ktime_get();
+ node->prev_mhz = 0;
+ mrps.mrps[HIGH] = (df->previous_freq / 1000) - node->guard_band_mhz;
+ mrps.mrps[HIGH] /= node->cycles_per_high_req;
+ mrps.mrps[MED] = mrps.mrps[LOW] = 0;
+
+ ret = hw->start_hwmon(hw, &mrps);
+ if (ret) {
+ dev_err(dev, "Unable to start HW monitor!\n");
+ goto err_start;
+ }
+
+ mutex_lock(&monitor_lock);
+ devfreq_monitor_start(df);
+ node->mon_started = true;
+ mutex_unlock(&monitor_lock);
+
+ ret = sysfs_create_group(&df->dev.kobj, &dev_attr_group);
+ if (ret) {
+ dev_err(dev, "Error creating sys entries!\n");
+ goto sysfs_fail;
+ }
+
+ return 0;
+
+sysfs_fail:
+ mutex_lock(&monitor_lock);
+ node->mon_started = false;
+ devfreq_monitor_stop(df);
+ mutex_unlock(&monitor_lock);
+ hw->stop_hwmon(hw);
+err_start:
+ df->data = node->orig_data;
+ node->orig_data = NULL;
+ hw->df = NULL;
+ return ret;
+}
+
+static void stop_monitoring(struct devfreq *df)
+{
+ struct cache_hwmon_node *node = df->data;
+ struct cache_hwmon *hw = node->hw;
+
+ sysfs_remove_group(&df->dev.kobj, &dev_attr_group);
+ mutex_lock(&monitor_lock);
+ node->mon_started = false;
+ devfreq_monitor_stop(df);
+ mutex_unlock(&monitor_lock);
+ hw->stop_hwmon(hw);
+ df->data = node->orig_data;
+ node->orig_data = NULL;
+ hw->df = NULL;
+}
+
+static int devfreq_cache_hwmon_ev_handler(struct devfreq *df,
+ unsigned int event, void *data)
+{
+ int ret;
+ unsigned int sample_ms;
+
+ switch (event) {
+ case DEVFREQ_GOV_START:
+ sample_ms = df->profile->polling_ms;
+ sample_ms = max(MIN_MS, sample_ms);
+ sample_ms = min(MAX_MS, sample_ms);
+ df->profile->polling_ms = sample_ms;
+
+ ret = start_monitoring(df);
+ if (ret)
+ return ret;
+
+ dev_dbg(df->dev.parent, "Enabled Cache HW monitor governor\n");
+ break;
+
+ case DEVFREQ_GOV_STOP:
+ stop_monitoring(df);
+ dev_dbg(df->dev.parent, "Disabled Cache HW monitor governor\n");
+ break;
+
+ case DEVFREQ_GOV_INTERVAL:
+ sample_ms = *(unsigned int *)data;
+ sample_ms = max(MIN_MS, sample_ms);
+ sample_ms = min(MAX_MS, sample_ms);
+ devfreq_interval_update(df, &sample_ms);
+ break;
+ }
+
+ return 0;
+}
+
+static struct devfreq_governor devfreq_cache_hwmon = {
+ .name = "cache_hwmon",
+ .get_target_freq = devfreq_cache_hwmon_get_freq,
+ .event_handler = devfreq_cache_hwmon_ev_handler,
+};
+
+int register_cache_hwmon(struct device *dev, struct cache_hwmon *hwmon)
+{
+ int ret = 0;
+ struct cache_hwmon_node *node;
+
+ if (!hwmon->dev && !hwmon->of_node)
+ return -EINVAL;
+
+ node = devm_kzalloc(dev, sizeof(*node), GFP_KERNEL);
+ if (!node) {
+ dev_err(dev, "Unable to register gov. Out of memory!\n");
+ return -ENOMEM;
+ }
+
+ node->cycles_per_med_req = 20;
+ node->cycles_per_high_req = 35;
+ node->min_busy = 100;
+ node->max_busy = 100;
+ node->tolerance_mrps = 5;
+ node->guard_band_mhz = 100;
+ node->decay_rate = 90;
+ node->hw = hwmon;
+ node->attr_grp = &dev_attr_group;
+
+ mutex_lock(&register_lock);
+ if (!use_cnt) {
+ ret = devfreq_add_governor(&devfreq_cache_hwmon);
+ if (!ret)
+ use_cnt++;
+ }
+ mutex_unlock(&register_lock);
+
+ if (!ret) {
+ dev_info(dev, "Cache HWmon governor registered.\n");
+ } else {
+ dev_err(dev, "Failed to add Cache HWmon governor\n");
+ return ret;
+ }
+
+ mutex_lock(&list_lock);
+ list_add_tail(&node->list, &cache_hwmon_list);
+ mutex_unlock(&list_lock);
+
+ return ret;
+}
+
+MODULE_DESCRIPTION("HW monitor based cache freq driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/devfreq/governor_cache_hwmon.h b/drivers/devfreq/governor_cache_hwmon.h
new file mode 100644
index 000000000000..01b5a7556eac
--- /dev/null
+++ b/drivers/devfreq/governor_cache_hwmon.h
@@ -0,0 +1,71 @@
+/*
+ * Copyright (c) 2014, 2016, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef _GOVERNOR_CACHE_HWMON_H
+#define _GOVERNOR_CACHE_HWMON_H
+
+#include <linux/kernel.h>
+#include <linux/devfreq.h>
+
+enum request_group {
+ HIGH,
+ MED,
+ LOW,
+ MAX_NUM_GROUPS,
+};
+
+struct mrps_stats {
+ unsigned long mrps[MAX_NUM_GROUPS];
+ unsigned int busy_percent;
+};
+
+/**
+ * struct cache_hwmon - devfreq Cache HW monitor info
+ * @start_hwmon: Start the HW monitoring
+ * @stop_hwmon: Stop the HW monitoring
+ * @meas_mrps_and_set_irq: Return the measured count and set up the
+ * IRQ to fire if usage exceeds current
+ * measurement by @tol percent.
+ * @dev: device that this HW monitor can monitor.
+ * @of_node: OF node of device that this HW monitor can monitor.
+ * @df: Devfreq node that this HW montior is being used
+ * for. NULL when not actively in use, and non-NULL
+ * when in use.
+ */
+struct cache_hwmon {
+ int (*start_hwmon)(struct cache_hwmon *hw, struct mrps_stats *mrps);
+ void (*stop_hwmon)(struct cache_hwmon *hw);
+ unsigned long (*meas_mrps_and_set_irq)(struct cache_hwmon *hw,
+ unsigned int tol, unsigned int us,
+ struct mrps_stats *mrps);
+ struct device *dev;
+ struct device_node *of_node;
+ struct devfreq *df;
+};
+
+#ifdef CONFIG_DEVFREQ_GOV_QCOM_CACHE_HWMON
+int register_cache_hwmon(struct device *dev, struct cache_hwmon *hwmon);
+int update_cache_hwmon(struct cache_hwmon *hwmon);
+#else
+static inline int register_cache_hwmon(struct device *dev,
+ struct cache_hwmon *hwmon)
+{
+ return 0;
+}
+int update_cache_hwmon(struct cache_hwmon *hwmon)
+{
+ return 0;
+}
+#endif
+
+#endif /* _GOVERNOR_CACHE_HWMON_H */
diff --git a/drivers/devfreq/governor_cpufreq.c b/drivers/devfreq/governor_cpufreq.c
new file mode 100644
index 000000000000..97c200b18ae2
--- /dev/null
+++ b/drivers/devfreq/governor_cpufreq.c
@@ -0,0 +1,712 @@
+/*
+ * Copyright (c) 2014-2015, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#define pr_fmt(fmt) "dev-cpufreq: " fmt
+
+#include <linux/devfreq.h>
+#include <linux/cpu.h>
+#include <linux/cpufreq.h>
+#include <linux/cpumask.h>
+#include <linux/slab.h>
+#include <linux/platform_device.h>
+#include <linux/of.h>
+#include <linux/module.h>
+#include "governor.h"
+
+struct cpu_state {
+ unsigned int freq;
+ unsigned int min_freq;
+ unsigned int max_freq;
+ bool on;
+ unsigned int first_cpu;
+};
+static struct cpu_state *state[NR_CPUS];
+static int cpufreq_cnt;
+
+struct freq_map {
+ unsigned int cpu_khz;
+ unsigned int target_freq;
+};
+
+struct devfreq_node {
+ struct devfreq *df;
+ void *orig_data;
+ struct device *dev;
+ struct device_node *of_node;
+ struct list_head list;
+ struct freq_map **map;
+ struct freq_map *common_map;
+ unsigned int timeout;
+ struct delayed_work dwork;
+ bool drop;
+ unsigned long prev_tgt;
+};
+static LIST_HEAD(devfreq_list);
+static DEFINE_MUTEX(state_lock);
+static DEFINE_MUTEX(cpufreq_reg_lock);
+
+#define show_attr(name) \
+static ssize_t show_##name(struct device *dev, \
+ struct device_attribute *attr, char *buf) \
+{ \
+ struct devfreq *df = to_devfreq(dev); \
+ struct devfreq_node *n = df->data; \
+ return snprintf(buf, PAGE_SIZE, "%u\n", n->name); \
+}
+
+#define store_attr(name, _min, _max) \
+static ssize_t store_##name(struct device *dev, \
+ struct device_attribute *attr, const char *buf, \
+ size_t count) \
+{ \
+ struct devfreq *df = to_devfreq(dev); \
+ struct devfreq_node *n = df->data; \
+ int ret; \
+ unsigned int val; \
+ ret = sscanf(buf, "%u", &val); \
+ if (ret != 1) \
+ return -EINVAL; \
+ val = max(val, _min); \
+ val = min(val, _max); \
+ n->name = val; \
+ return count; \
+}
+
+#define gov_attr(__attr, min, max) \
+show_attr(__attr) \
+store_attr(__attr, min, max) \
+static DEVICE_ATTR(__attr, 0644, show_##__attr, store_##__attr)
+
+static int update_node(struct devfreq_node *node)
+{
+ int ret;
+ struct devfreq *df = node->df;
+
+ if (!df)
+ return 0;
+
+ cancel_delayed_work_sync(&node->dwork);
+
+ mutex_lock(&df->lock);
+ node->drop = false;
+ ret = update_devfreq(df);
+ if (ret) {
+ dev_err(df->dev.parent, "Unable to update frequency\n");
+ goto out;
+ }
+
+ if (!node->timeout)
+ goto out;
+
+ if (df->previous_freq <= df->min_freq)
+ goto out;
+
+ schedule_delayed_work(&node->dwork,
+ msecs_to_jiffies(node->timeout));
+out:
+ mutex_unlock(&df->lock);
+ return ret;
+}
+
+static void update_all_devfreqs(void)
+{
+ struct devfreq_node *node;
+
+ list_for_each_entry(node, &devfreq_list, list) {
+ update_node(node);
+ }
+}
+
+static void do_timeout(struct work_struct *work)
+{
+ struct devfreq_node *node = container_of(to_delayed_work(work),
+ struct devfreq_node, dwork);
+ struct devfreq *df = node->df;
+
+ mutex_lock(&df->lock);
+ node->drop = true;
+ update_devfreq(df);
+ mutex_unlock(&df->lock);
+}
+
+static struct devfreq_node *find_devfreq_node(struct device *dev)
+{
+ struct devfreq_node *node;
+
+ list_for_each_entry(node, &devfreq_list, list)
+ if (node->dev == dev || node->of_node == dev->of_node)
+ return node;
+
+ return NULL;
+}
+
+/* ==================== cpufreq part ==================== */
+static void add_policy(struct cpufreq_policy *policy)
+{
+ struct cpu_state *new_state;
+ unsigned int cpu, first_cpu;
+
+ if (state[policy->cpu]) {
+ state[policy->cpu]->freq = policy->cur;
+ state[policy->cpu]->on = true;
+ } else {
+ new_state = kzalloc(sizeof(struct cpu_state), GFP_KERNEL);
+ if (!new_state)
+ return;
+
+ first_cpu = cpumask_first(policy->related_cpus);
+ new_state->first_cpu = first_cpu;
+ new_state->freq = policy->cur;
+ new_state->min_freq = policy->cpuinfo.min_freq;
+ new_state->max_freq = policy->cpuinfo.max_freq;
+ new_state->on = true;
+
+ for_each_cpu(cpu, policy->related_cpus)
+ state[cpu] = new_state;
+ }
+}
+
+static int cpufreq_policy_notifier(struct notifier_block *nb,
+ unsigned long event, void *data)
+{
+ struct cpufreq_policy *policy = data;
+
+ switch (event) {
+ case CPUFREQ_CREATE_POLICY:
+ mutex_lock(&state_lock);
+ add_policy(policy);
+ update_all_devfreqs();
+ mutex_unlock(&state_lock);
+ break;
+
+ case CPUFREQ_REMOVE_POLICY:
+ mutex_lock(&state_lock);
+ if (state[policy->cpu]) {
+ state[policy->cpu]->on = false;
+ update_all_devfreqs();
+ }
+ mutex_unlock(&state_lock);
+ break;
+ }
+
+ return 0;
+}
+
+static struct notifier_block cpufreq_policy_nb = {
+ .notifier_call = cpufreq_policy_notifier
+};
+
+static int cpufreq_trans_notifier(struct notifier_block *nb,
+ unsigned long event, void *data)
+{
+ struct cpufreq_freqs *freq = data;
+ struct cpu_state *s;
+
+ if (event != CPUFREQ_POSTCHANGE)
+ return 0;
+
+ mutex_lock(&state_lock);
+
+ s = state[freq->cpu];
+ if (!s)
+ goto out;
+
+ if (s->freq != freq->new) {
+ s->freq = freq->new;
+ update_all_devfreqs();
+ }
+
+out:
+ mutex_unlock(&state_lock);
+ return 0;
+}
+
+static struct notifier_block cpufreq_trans_nb = {
+ .notifier_call = cpufreq_trans_notifier
+};
+
+static int register_cpufreq(void)
+{
+ int ret = 0;
+ unsigned int cpu;
+ struct cpufreq_policy *policy;
+
+ mutex_lock(&cpufreq_reg_lock);
+
+ if (cpufreq_cnt)
+ goto cnt_not_zero;
+
+ get_online_cpus();
+ ret = cpufreq_register_notifier(&cpufreq_policy_nb,
+ CPUFREQ_POLICY_NOTIFIER);
+ if (ret)
+ goto out;
+
+ ret = cpufreq_register_notifier(&cpufreq_trans_nb,
+ CPUFREQ_TRANSITION_NOTIFIER);
+ if (ret) {
+ cpufreq_unregister_notifier(&cpufreq_policy_nb,
+ CPUFREQ_POLICY_NOTIFIER);
+ goto out;
+ }
+
+ for_each_online_cpu(cpu) {
+ policy = cpufreq_cpu_get(cpu);
+ if (policy) {
+ add_policy(policy);
+ cpufreq_cpu_put(policy);
+ }
+ }
+out:
+ put_online_cpus();
+cnt_not_zero:
+ if (!ret)
+ cpufreq_cnt++;
+ mutex_unlock(&cpufreq_reg_lock);
+ return ret;
+}
+
+static int unregister_cpufreq(void)
+{
+ int ret = 0;
+ int cpu;
+
+ mutex_lock(&cpufreq_reg_lock);
+
+ if (cpufreq_cnt > 1)
+ goto out;
+
+ cpufreq_unregister_notifier(&cpufreq_policy_nb,
+ CPUFREQ_POLICY_NOTIFIER);
+ cpufreq_unregister_notifier(&cpufreq_trans_nb,
+ CPUFREQ_TRANSITION_NOTIFIER);
+
+ for (cpu = ARRAY_SIZE(state) - 1; cpu >= 0; cpu--) {
+ if (!state[cpu])
+ continue;
+ if (state[cpu]->first_cpu == cpu)
+ kfree(state[cpu]);
+ state[cpu] = NULL;
+ }
+
+out:
+ cpufreq_cnt--;
+ mutex_unlock(&cpufreq_reg_lock);
+ return ret;
+}
+
+/* ==================== devfreq part ==================== */
+
+static unsigned int interpolate_freq(struct devfreq *df, unsigned int cpu)
+{
+ unsigned int *freq_table = df->profile->freq_table;
+ unsigned int cpu_min = state[cpu]->min_freq;
+ unsigned int cpu_max = state[cpu]->max_freq;
+ unsigned int cpu_freq = state[cpu]->freq;
+ unsigned int dev_min, dev_max, cpu_percent;
+
+ if (freq_table) {
+ dev_min = freq_table[0];
+ dev_max = freq_table[df->profile->max_state - 1];
+ } else {
+ if (df->max_freq <= df->min_freq)
+ return 0;
+ dev_min = df->min_freq;
+ dev_max = df->max_freq;
+ }
+
+ cpu_percent = ((cpu_freq - cpu_min) * 100) / (cpu_max - cpu_min);
+ return dev_min + mult_frac(dev_max - dev_min, cpu_percent, 100);
+}
+
+static unsigned int cpu_to_dev_freq(struct devfreq *df, unsigned int cpu)
+{
+ struct freq_map *map = NULL;
+ unsigned int cpu_khz = 0, freq;
+ struct devfreq_node *n = df->data;
+
+ if (!state[cpu] || !state[cpu]->on || state[cpu]->first_cpu != cpu) {
+ freq = 0;
+ goto out;
+ }
+
+ if (n->common_map)
+ map = n->common_map;
+ else if (n->map)
+ map = n->map[cpu];
+
+ cpu_khz = state[cpu]->freq;
+
+ if (!map) {
+ freq = interpolate_freq(df, cpu);
+ goto out;
+ }
+
+ while (map->cpu_khz && map->cpu_khz < cpu_khz)
+ map++;
+ if (!map->cpu_khz)
+ map--;
+ freq = map->target_freq;
+
+out:
+ dev_dbg(df->dev.parent, "CPU%u: %d -> dev: %u\n", cpu, cpu_khz, freq);
+ return freq;
+}
+
+static int devfreq_cpufreq_get_freq(struct devfreq *df,
+ unsigned long *freq,
+ u32 *flag)
+{
+ unsigned int cpu, tgt_freq = 0;
+ struct devfreq_node *node;
+
+ node = df->data;
+ if (!node) {
+ pr_err("Unable to find devfreq node!\n");
+ return -ENODEV;
+ }
+
+ if (node->drop) {
+ *freq = 0;
+ return 0;
+ }
+
+ for_each_possible_cpu(cpu)
+ tgt_freq = max(tgt_freq, cpu_to_dev_freq(df, cpu));
+
+ if (node->timeout && tgt_freq < node->prev_tgt)
+ *freq = 0;
+ else
+ *freq = tgt_freq;
+
+ node->prev_tgt = tgt_freq;
+
+ return 0;
+}
+
+static unsigned int show_table(char *buf, unsigned int len,
+ struct freq_map *map)
+{
+ unsigned int cnt = 0;
+
+ cnt += snprintf(buf + cnt, len - cnt, "CPU freq\tDevice freq\n");
+
+ while (map->cpu_khz && cnt < len) {
+ cnt += snprintf(buf + cnt, len - cnt, "%8u\t%11u\n",
+ map->cpu_khz, map->target_freq);
+ map++;
+ }
+ if (cnt < len)
+ cnt += snprintf(buf + cnt, len - cnt, "\n");
+
+ return cnt;
+}
+
+static ssize_t show_map(struct device *dev, struct device_attribute *attr,
+ char *buf)
+{
+ struct devfreq *df = to_devfreq(dev);
+ struct devfreq_node *n = df->data;
+ struct freq_map *map;
+ unsigned int cnt = 0, cpu;
+
+ mutex_lock(&state_lock);
+ if (n->common_map) {
+ map = n->common_map;
+ cnt += snprintf(buf + cnt, PAGE_SIZE - cnt,
+ "Common table for all CPUs:\n");
+ cnt += show_table(buf + cnt, PAGE_SIZE - cnt, map);
+ } else if (n->map) {
+ for_each_possible_cpu(cpu) {
+ map = n->map[cpu];
+ if (!map)
+ continue;
+ cnt += snprintf(buf + cnt, PAGE_SIZE - cnt,
+ "CPU %u:\n", cpu);
+ if (cnt >= PAGE_SIZE)
+ break;
+ cnt += show_table(buf + cnt, PAGE_SIZE - cnt, map);
+ if (cnt >= PAGE_SIZE)
+ break;
+ }
+ } else {
+ cnt += snprintf(buf + cnt, PAGE_SIZE - cnt,
+ "Device freq interpolated based on CPU freq\n");
+ }
+ mutex_unlock(&state_lock);
+
+ return cnt;
+}
+
+static DEVICE_ATTR(freq_map, 0444, show_map, NULL);
+gov_attr(timeout, 0U, 100U);
+
+static struct attribute *dev_attr[] = {
+ &dev_attr_freq_map.attr,
+ &dev_attr_timeout.attr,
+ NULL,
+};
+
+static struct attribute_group dev_attr_group = {
+ .name = "cpufreq",
+ .attrs = dev_attr,
+};
+
+static int devfreq_cpufreq_gov_start(struct devfreq *devfreq)
+{
+ int ret = 0;
+ struct devfreq_node *node;
+ bool alloc = false;
+
+ ret = register_cpufreq();
+ if (ret)
+ return ret;
+
+ ret = sysfs_create_group(&devfreq->dev.kobj, &dev_attr_group);
+ if (ret) {
+ unregister_cpufreq();
+ return ret;
+ }
+
+ mutex_lock(&state_lock);
+
+ node = find_devfreq_node(devfreq->dev.parent);
+ if (node == NULL) {
+ node = kzalloc(sizeof(struct devfreq_node), GFP_KERNEL);
+ if (!node) {
+ pr_err("Out of memory!\n");
+ ret = -ENOMEM;
+ goto alloc_fail;
+ }
+ alloc = true;
+ node->dev = devfreq->dev.parent;
+ list_add_tail(&node->list, &devfreq_list);
+ }
+
+ INIT_DELAYED_WORK(&node->dwork, do_timeout);
+
+ node->df = devfreq;
+ node->orig_data = devfreq->data;
+ devfreq->data = node;
+
+ ret = update_node(node);
+ if (ret)
+ goto update_fail;
+
+ mutex_unlock(&state_lock);
+ return 0;
+
+update_fail:
+ devfreq->data = node->orig_data;
+ if (alloc) {
+ list_del(&node->list);
+ kfree(node);
+ }
+alloc_fail:
+ mutex_unlock(&state_lock);
+ sysfs_remove_group(&devfreq->dev.kobj, &dev_attr_group);
+ unregister_cpufreq();
+ return ret;
+}
+
+static void devfreq_cpufreq_gov_stop(struct devfreq *devfreq)
+{
+ struct devfreq_node *node = devfreq->data;
+
+ cancel_delayed_work_sync(&node->dwork);
+
+ mutex_lock(&state_lock);
+ devfreq->data = node->orig_data;
+ if (node->map || node->common_map) {
+ node->df = NULL;
+ } else {
+ list_del(&node->list);
+ kfree(node);
+ }
+ mutex_unlock(&state_lock);
+
+ sysfs_remove_group(&devfreq->dev.kobj, &dev_attr_group);
+ unregister_cpufreq();
+}
+
+static int devfreq_cpufreq_ev_handler(struct devfreq *devfreq,
+ unsigned int event, void *data)
+{
+ int ret;
+
+ switch (event) {
+ case DEVFREQ_GOV_START:
+
+ ret = devfreq_cpufreq_gov_start(devfreq);
+ if (ret) {
+ pr_err("Governor start failed!\n");
+ return ret;
+ }
+ pr_debug("Enabled dev CPUfreq governor\n");
+ break;
+
+ case DEVFREQ_GOV_STOP:
+
+ devfreq_cpufreq_gov_stop(devfreq);
+ pr_debug("Disabled dev CPUfreq governor\n");
+ break;
+ }
+
+ return 0;
+}
+
+static struct devfreq_governor devfreq_cpufreq = {
+ .name = "cpufreq",
+ .get_target_freq = devfreq_cpufreq_get_freq,
+ .event_handler = devfreq_cpufreq_ev_handler,
+};
+
+#define NUM_COLS 2
+static struct freq_map *read_tbl(struct device_node *of_node, char *prop_name)
+{
+ int len, nf, i, j;
+ u32 data;
+ struct freq_map *tbl;
+
+ if (!of_find_property(of_node, prop_name, &len))
+ return NULL;
+ len /= sizeof(data);
+
+ if (len % NUM_COLS || len == 0)
+ return NULL;
+ nf = len / NUM_COLS;
+
+ tbl = kzalloc((nf + 1) * sizeof(*tbl), GFP_KERNEL);
+ if (!tbl)
+ return NULL;
+
+ for (i = 0, j = 0; i < nf; i++, j += 2) {
+ of_property_read_u32_index(of_node, prop_name, j, &data);
+ tbl[i].cpu_khz = data;
+
+ of_property_read_u32_index(of_node, prop_name, j + 1, &data);
+ tbl[i].target_freq = data;
+ }
+ tbl[i].cpu_khz = 0;
+
+ return tbl;
+}
+
+#define PROP_TARGET "target-dev"
+#define PROP_TABLE "cpu-to-dev-map"
+static int add_table_from_of(struct device_node *of_node)
+{
+ struct device_node *target_of_node;
+ struct devfreq_node *node;
+ struct freq_map *common_tbl;
+ struct freq_map **tbl_list = NULL;
+ static char prop_name[] = PROP_TABLE "-999999";
+ int cpu, ret, cnt = 0, prop_sz = ARRAY_SIZE(prop_name);
+
+ target_of_node = of_parse_phandle(of_node, PROP_TARGET, 0);
+ if (!target_of_node)
+ return -EINVAL;
+
+ node = kzalloc(sizeof(struct devfreq_node), GFP_KERNEL);
+ if (!node)
+ return -ENOMEM;
+
+ common_tbl = read_tbl(of_node, PROP_TABLE);
+ if (!common_tbl) {
+ tbl_list = kzalloc(sizeof(*tbl_list) * NR_CPUS, GFP_KERNEL);
+ if (!tbl_list)
+ return -ENOMEM;
+
+ for_each_possible_cpu(cpu) {
+ ret = snprintf(prop_name, prop_sz, "%s-%d",
+ PROP_TABLE, cpu);
+ if (ret >= prop_sz) {
+ pr_warn("More CPUs than I can handle!\n");
+ pr_warn("Skipping rest of the tables!\n");
+ break;
+ }
+ tbl_list[cpu] = read_tbl(of_node, prop_name);
+ if (tbl_list[cpu])
+ cnt++;
+ }
+ }
+ if (!common_tbl && !cnt) {
+ kfree(tbl_list);
+ return -EINVAL;
+ }
+
+ mutex_lock(&state_lock);
+ node->of_node = target_of_node;
+ node->map = tbl_list;
+ node->common_map = common_tbl;
+ list_add_tail(&node->list, &devfreq_list);
+ mutex_unlock(&state_lock);
+
+ return 0;
+}
+
+static int __init devfreq_cpufreq_init(void)
+{
+ int ret;
+ struct device_node *of_par, *of_child;
+
+ of_par = of_find_node_by_name(NULL, "devfreq-cpufreq");
+ if (of_par) {
+ for_each_child_of_node(of_par, of_child) {
+ ret = add_table_from_of(of_child);
+ if (ret)
+ pr_err("Parsing %s failed!\n", of_child->name);
+ else
+ pr_debug("Parsed %s.\n", of_child->name);
+ }
+ of_node_put(of_par);
+ } else {
+ pr_info("No tables parsed from DT.\n");
+ }
+
+ ret = devfreq_add_governor(&devfreq_cpufreq);
+ if (ret) {
+ pr_err("Governor add failed!\n");
+ return ret;
+ }
+
+ return 0;
+}
+subsys_initcall(devfreq_cpufreq_init);
+
+static void __exit devfreq_cpufreq_exit(void)
+{
+ int ret, cpu;
+ struct devfreq_node *node, *tmp;
+
+ ret = devfreq_remove_governor(&devfreq_cpufreq);
+ if (ret)
+ pr_err("Governor remove failed!\n");
+
+ mutex_lock(&state_lock);
+ list_for_each_entry_safe(node, tmp, &devfreq_list, list) {
+ kfree(node->common_map);
+ for_each_possible_cpu(cpu)
+ kfree(node->map[cpu]);
+ kfree(node->map);
+ list_del(&node->list);
+ kfree(node);
+ }
+ mutex_unlock(&state_lock);
+
+ return;
+}
+module_exit(devfreq_cpufreq_exit);
+
+MODULE_DESCRIPTION("CPU freq based generic governor for devfreq devices");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/devfreq/governor_gpubw_mon.c b/drivers/devfreq/governor_gpubw_mon.c
new file mode 100644
index 000000000000..8234d30dc644
--- /dev/null
+++ b/drivers/devfreq/governor_gpubw_mon.c
@@ -0,0 +1,255 @@
+/* Copyright (c) 2014-2016, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#include <linux/devfreq.h>
+#include <linux/module.h>
+#include <linux/msm_adreno_devfreq.h>
+#include <linux/slab.h>
+
+#include "devfreq_trace.h"
+#include "governor.h"
+
+#define MIN_BUSY 1000
+#define LONG_FLOOR 50000
+#define HIST 5
+#define TARGET 80
+#define CAP 75
+/* AB vote is in multiple of BW_STEP Mega bytes */
+#define BW_STEP 160
+
+static void _update_cutoff(struct devfreq_msm_adreno_tz_data *priv,
+ unsigned int norm_max)
+{
+ int i;
+
+ priv->bus.max = norm_max;
+ for (i = 0; i < priv->bus.num; i++) {
+ priv->bus.up[i] = priv->bus.p_up[i] * norm_max / 100;
+ priv->bus.down[i] = priv->bus.p_down[i] * norm_max / 100;
+ }
+}
+
+static int devfreq_gpubw_get_target(struct devfreq *df,
+ unsigned long *freq,
+ u32 *flag)
+{
+
+ struct devfreq_msm_adreno_tz_data *priv = df->data;
+ struct msm_busmon_extended_profile *bus_profile = container_of(
+ (df->profile),
+ struct msm_busmon_extended_profile,
+ profile);
+ struct devfreq_dev_status stats;
+ struct xstats b;
+ int result;
+ int level = 0;
+ int act_level;
+ int norm_cycles;
+ int gpu_percent;
+ /*
+ * Normalized AB should at max usage be the gpu_bimc frequency in MHz.
+ * Start with a reasonable value and let the system push it up to max.
+ */
+ static int norm_ab_max = 300;
+ int norm_ab;
+ unsigned long ab_mbytes = 0;
+
+ if (priv == NULL)
+ return 0;
+
+ stats.private_data = &b;
+
+ result = df->profile->get_dev_status(df->dev.parent, &stats);
+
+ *freq = stats.current_frequency;
+
+ priv->bus.total_time += stats.total_time;
+ priv->bus.gpu_time += stats.busy_time;
+ priv->bus.ram_time += b.ram_time;
+ priv->bus.ram_wait += b.ram_wait;
+
+ level = devfreq_get_freq_level(df, stats.current_frequency);
+
+ if (priv->bus.total_time < LONG_FLOOR)
+ return result;
+
+ norm_cycles = (unsigned int)(priv->bus.ram_time + priv->bus.ram_wait) /
+ (unsigned int) priv->bus.total_time;
+ gpu_percent = (100 * (unsigned int)priv->bus.gpu_time) /
+ (unsigned int) priv->bus.total_time;
+
+ /*
+ * If there's a new high watermark, update the cutoffs and send the
+ * FAST hint. Otherwise check the current value against the current
+ * cutoffs.
+ */
+ if (norm_cycles > priv->bus.max) {
+ _update_cutoff(priv, norm_cycles);
+ bus_profile->flag = DEVFREQ_FLAG_FAST_HINT;
+ } else {
+ /* GPU votes for IB not AB so don't under vote the system */
+ norm_cycles = (100 * norm_cycles) / TARGET;
+ act_level = priv->bus.index[level] + b.mod;
+ act_level = (act_level < 0) ? 0 : act_level;
+ act_level = (act_level >= priv->bus.num) ?
+ (priv->bus.num - 1) : act_level;
+ if (norm_cycles > priv->bus.up[act_level] &&
+ gpu_percent > CAP)
+ bus_profile->flag = DEVFREQ_FLAG_FAST_HINT;
+ else if (norm_cycles < priv->bus.down[act_level] && level)
+ bus_profile->flag = DEVFREQ_FLAG_SLOW_HINT;
+ }
+
+ /* Calculate the AB vote based on bus width if defined */
+ if (priv->bus.width) {
+ norm_ab = (unsigned int)priv->bus.ram_time /
+ (unsigned int) priv->bus.total_time;
+ /* Calculate AB in Mega Bytes and roundup in BW_STEP */
+ ab_mbytes = (norm_ab * priv->bus.width * 1000000ULL) >> 20;
+ bus_profile->ab_mbytes = roundup(ab_mbytes, BW_STEP);
+ } else if (bus_profile->flag) {
+ /* Re-calculate the AB percentage for a new IB vote */
+ norm_ab = (unsigned int)priv->bus.ram_time /
+ (unsigned int) priv->bus.total_time;
+ if (norm_ab > norm_ab_max)
+ norm_ab_max = norm_ab;
+ bus_profile->percent_ab = (100 * norm_ab) / norm_ab_max;
+ }
+
+ priv->bus.total_time = 0;
+ priv->bus.gpu_time = 0;
+ priv->bus.ram_time = 0;
+ priv->bus.ram_wait = 0;
+
+ return result;
+}
+
+static int gpubw_start(struct devfreq *devfreq)
+{
+ struct devfreq_msm_adreno_tz_data *priv;
+
+ struct msm_busmon_extended_profile *bus_profile = container_of(
+ (devfreq->profile),
+ struct msm_busmon_extended_profile,
+ profile);
+ unsigned int t1, t2 = 2 * HIST;
+ int i, bus_size;
+
+
+ devfreq->data = bus_profile->private_data;
+ priv = devfreq->data;
+
+ bus_size = sizeof(u32) * priv->bus.num;
+ priv->bus.up = kzalloc(bus_size, GFP_KERNEL);
+ priv->bus.down = kzalloc(bus_size, GFP_KERNEL);
+ priv->bus.p_up = kzalloc(bus_size, GFP_KERNEL);
+ priv->bus.p_down = kzalloc(bus_size, GFP_KERNEL);
+ if (priv->bus.up == NULL || priv->bus.down == NULL ||
+ priv->bus.p_up == NULL || priv->bus.p_down == NULL)
+ return -ENOMEM;
+
+ /* Set up the cut-over percentages for the bus calculation. */
+ for (i = 0; i < priv->bus.num; i++) {
+ t1 = (u32)(100 * priv->bus.ib[i]) /
+ (u32)priv->bus.ib[priv->bus.num - 1];
+ priv->bus.p_up[i] = t1 - HIST;
+ priv->bus.p_down[i] = t2 - 2 * HIST;
+ t2 = t1;
+ }
+ /* Set the upper-most and lower-most bounds correctly. */
+ priv->bus.p_down[0] = 0;
+ priv->bus.p_down[1] = (priv->bus.p_down[1] > (2 * HIST)) ?
+ priv->bus.p_down[1] : (2 * HIST);
+ if (priv->bus.num >= 1)
+ priv->bus.p_up[priv->bus.num - 1] = 100;
+ _update_cutoff(priv, priv->bus.max);
+
+ return 0;
+}
+
+static int gpubw_stop(struct devfreq *devfreq)
+{
+ struct devfreq_msm_adreno_tz_data *priv = devfreq->data;
+
+ if (priv) {
+ kfree(priv->bus.up);
+ kfree(priv->bus.down);
+ kfree(priv->bus.p_up);
+ kfree(priv->bus.p_down);
+ }
+ devfreq->data = NULL;
+ return 0;
+}
+
+static int devfreq_gpubw_event_handler(struct devfreq *devfreq,
+ unsigned int event, void *data)
+{
+ int result = 0;
+ unsigned long freq;
+
+ mutex_lock(&devfreq->lock);
+ freq = devfreq->previous_freq;
+ switch (event) {
+ case DEVFREQ_GOV_START:
+ result = gpubw_start(devfreq);
+ break;
+ case DEVFREQ_GOV_STOP:
+ result = gpubw_stop(devfreq);
+ break;
+ case DEVFREQ_GOV_RESUME:
+ /* TODO ..... */
+ /* ret = update_devfreq(devfreq); */
+ break;
+ case DEVFREQ_GOV_SUSPEND:
+ {
+ struct devfreq_msm_adreno_tz_data *priv = devfreq->data;
+
+ priv->bus.total_time = 0;
+ priv->bus.gpu_time = 0;
+ priv->bus.ram_time = 0;
+ }
+ break;
+ default:
+ result = 0;
+ break;
+ }
+ mutex_unlock(&devfreq->lock);
+ return result;
+}
+
+static struct devfreq_governor devfreq_gpubw = {
+ .name = "gpubw_mon",
+ .get_target_freq = devfreq_gpubw_get_target,
+ .event_handler = devfreq_gpubw_event_handler,
+};
+
+static int __init devfreq_gpubw_init(void)
+{
+ return devfreq_add_governor(&devfreq_gpubw);
+}
+subsys_initcall(devfreq_gpubw_init);
+
+static void __exit devfreq_gpubw_exit(void)
+{
+ int ret;
+
+ ret = devfreq_remove_governor(&devfreq_gpubw);
+ if (ret)
+ pr_err("%s: failed remove governor %d\n", __func__, ret);
+
+}
+module_exit(devfreq_gpubw_exit);
+
+MODULE_DESCRIPTION("GPU bus bandwidth voting driver. Uses VBIF counters");
+MODULE_LICENSE("GPL v2");
+
diff --git a/drivers/devfreq/governor_memlat.c b/drivers/devfreq/governor_memlat.c
new file mode 100644
index 000000000000..a3c826e152e1
--- /dev/null
+++ b/drivers/devfreq/governor_memlat.c
@@ -0,0 +1,414 @@
+/*
+ * Copyright (c) 2015-2016, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#define pr_fmt(fmt) "mem_lat: " fmt
+
+#include <linux/kernel.h>
+#include <linux/sizes.h>
+#include <linux/module.h>
+#include <linux/init.h>
+#include <linux/io.h>
+#include <linux/delay.h>
+#include <linux/ktime.h>
+#include <linux/time.h>
+#include <linux/err.h>
+#include <linux/errno.h>
+#include <linux/mutex.h>
+#include <linux/interrupt.h>
+#include <linux/platform_device.h>
+#include <linux/of.h>
+#include <linux/devfreq.h>
+#include "governor.h"
+#include "governor_memlat.h"
+
+#include <trace/events/power.h>
+
+struct memlat_node {
+ unsigned int ratio_ceil;
+ bool mon_started;
+ struct list_head list;
+ void *orig_data;
+ struct memlat_hwmon *hw;
+ struct devfreq_governor *gov;
+ struct attribute_group *attr_grp;
+};
+
+static LIST_HEAD(memlat_list);
+static DEFINE_MUTEX(list_lock);
+
+static int use_cnt;
+static DEFINE_MUTEX(state_lock);
+
+#define show_attr(name) \
+static ssize_t show_##name(struct device *dev, \
+ struct device_attribute *attr, char *buf) \
+{ \
+ struct devfreq *df = to_devfreq(dev); \
+ struct memlat_node *hw = df->data; \
+ return snprintf(buf, PAGE_SIZE, "%u\n", hw->name); \
+}
+
+#define store_attr(name, _min, _max) \
+static ssize_t store_##name(struct device *dev, \
+ struct device_attribute *attr, const char *buf, \
+ size_t count) \
+{ \
+ struct devfreq *df = to_devfreq(dev); \
+ struct memlat_node *hw = df->data; \
+ int ret; \
+ unsigned int val; \
+ ret = kstrtouint(buf, 10, &val); \
+ if (ret) \
+ return ret; \
+ val = max(val, _min); \
+ val = min(val, _max); \
+ hw->name = val; \
+ return count; \
+}
+
+#define gov_attr(__attr, min, max) \
+show_attr(__attr) \
+store_attr(__attr, min, max) \
+static DEVICE_ATTR(__attr, 0644, show_##__attr, store_##__attr)
+
+static ssize_t show_map(struct device *dev, struct device_attribute *attr,
+ char *buf)
+{
+ struct devfreq *df = to_devfreq(dev);
+ struct memlat_node *n = df->data;
+ struct core_dev_map *map = n->hw->freq_map;
+ unsigned int cnt = 0;
+
+ cnt += snprintf(buf, PAGE_SIZE, "Core freq (MHz)\tDevice BW\n");
+
+ while (map->core_mhz && cnt < PAGE_SIZE) {
+ cnt += snprintf(buf + cnt, PAGE_SIZE - cnt, "%15u\t%9u\n",
+ map->core_mhz, map->target_freq);
+ map++;
+ }
+ if (cnt < PAGE_SIZE)
+ cnt += snprintf(buf + cnt, PAGE_SIZE - cnt, "\n");
+
+ return cnt;
+}
+
+static DEVICE_ATTR(freq_map, 0444, show_map, NULL);
+
+static unsigned long core_to_dev_freq(struct memlat_node *node,
+ unsigned long coref)
+{
+ struct memlat_hwmon *hw = node->hw;
+ struct core_dev_map *map = hw->freq_map;
+ unsigned long freq = 0;
+
+ if (!map)
+ goto out;
+
+ while (map->core_mhz && map->core_mhz < coref)
+ map++;
+ if (!map->core_mhz)
+ map--;
+ freq = map->target_freq;
+
+out:
+ pr_debug("freq: %lu -> dev: %lu\n", coref, freq);
+ return freq;
+}
+
+static struct memlat_node *find_memlat_node(struct devfreq *df)
+{
+ struct memlat_node *node, *found = NULL;
+
+ mutex_lock(&list_lock);
+ list_for_each_entry(node, &memlat_list, list)
+ if (node->hw->dev == df->dev.parent ||
+ node->hw->of_node == df->dev.parent->of_node) {
+ found = node;
+ break;
+ }
+ mutex_unlock(&list_lock);
+
+ return found;
+}
+
+static int start_monitor(struct devfreq *df)
+{
+ struct memlat_node *node = df->data;
+ struct memlat_hwmon *hw = node->hw;
+ struct device *dev = df->dev.parent;
+ int ret;
+
+ ret = hw->start_hwmon(hw);
+
+ if (ret) {
+ dev_err(dev, "Unable to start HW monitor! (%d)\n", ret);
+ return ret;
+ }
+
+ devfreq_monitor_start(df);
+
+ node->mon_started = true;
+
+ return 0;
+}
+
+static void stop_monitor(struct devfreq *df)
+{
+ struct memlat_node *node = df->data;
+ struct memlat_hwmon *hw = node->hw;
+
+ node->mon_started = false;
+
+ devfreq_monitor_stop(df);
+ hw->stop_hwmon(hw);
+}
+
+static int gov_start(struct devfreq *df)
+{
+ int ret = 0;
+ struct device *dev = df->dev.parent;
+ struct memlat_node *node;
+ struct memlat_hwmon *hw;
+
+ node = find_memlat_node(df);
+ if (!node) {
+ dev_err(dev, "Unable to find HW monitor!\n");
+ return -ENODEV;
+ }
+ hw = node->hw;
+
+ hw->df = df;
+ node->orig_data = df->data;
+ df->data = node;
+
+ if (start_monitor(df))
+ goto err_start;
+
+ ret = sysfs_create_group(&df->dev.kobj, node->attr_grp);
+ if (ret)
+ goto err_sysfs;
+
+ return 0;
+
+err_sysfs:
+ stop_monitor(df);
+err_start:
+ df->data = node->orig_data;
+ node->orig_data = NULL;
+ hw->df = NULL;
+ return ret;
+}
+
+static void gov_stop(struct devfreq *df)
+{
+ struct memlat_node *node = df->data;
+ struct memlat_hwmon *hw = node->hw;
+
+ sysfs_remove_group(&df->dev.kobj, node->attr_grp);
+ stop_monitor(df);
+ df->data = node->orig_data;
+ node->orig_data = NULL;
+ hw->df = NULL;
+}
+
+static int devfreq_memlat_get_freq(struct devfreq *df,
+ unsigned long *freq,
+ u32 *flag)
+{
+ int i, lat_dev;
+ struct memlat_node *node = df->data;
+ struct memlat_hwmon *hw = node->hw;
+ unsigned long max_freq = 0;
+ unsigned int ratio;
+
+ hw->get_cnt(hw);
+
+ for (i = 0; i < hw->num_cores; i++) {
+ ratio = hw->core_stats[i].inst_count;
+
+ if (hw->core_stats[i].mem_count)
+ ratio /= hw->core_stats[i].mem_count;
+
+ trace_memlat_dev_meas(dev_name(df->dev.parent),
+ hw->core_stats[i].id,
+ hw->core_stats[i].inst_count,
+ hw->core_stats[i].mem_count,
+ hw->core_stats[i].freq, ratio);
+
+ if (ratio && ratio <= node->ratio_ceil
+ && hw->core_stats[i].freq > max_freq) {
+ lat_dev = i;
+ max_freq = hw->core_stats[i].freq;
+ }
+ }
+
+ if (max_freq) {
+ max_freq = core_to_dev_freq(node, max_freq);
+ trace_memlat_dev_update(dev_name(df->dev.parent),
+ hw->core_stats[lat_dev].id,
+ hw->core_stats[lat_dev].inst_count,
+ hw->core_stats[lat_dev].mem_count,
+ hw->core_stats[lat_dev].freq,
+ max_freq);
+ }
+
+ *freq = max_freq;
+ return 0;
+}
+
+gov_attr(ratio_ceil, 1U, 10000U);
+
+static struct attribute *dev_attr[] = {
+ &dev_attr_ratio_ceil.attr,
+ &dev_attr_freq_map.attr,
+ NULL,
+};
+
+static struct attribute_group dev_attr_group = {
+ .name = "mem_latency",
+ .attrs = dev_attr,
+};
+
+#define MIN_MS 10U
+#define MAX_MS 500U
+static int devfreq_memlat_ev_handler(struct devfreq *df,
+ unsigned int event, void *data)
+{
+ int ret;
+ unsigned int sample_ms;
+
+ switch (event) {
+ case DEVFREQ_GOV_START:
+ sample_ms = df->profile->polling_ms;
+ sample_ms = max(MIN_MS, sample_ms);
+ sample_ms = min(MAX_MS, sample_ms);
+ df->profile->polling_ms = sample_ms;
+
+ ret = gov_start(df);
+ if (ret)
+ return ret;
+
+ dev_dbg(df->dev.parent,
+ "Enabled Memory Latency governor\n");
+ break;
+
+ case DEVFREQ_GOV_STOP:
+ gov_stop(df);
+ dev_dbg(df->dev.parent,
+ "Disabled Memory Latency governor\n");
+ break;
+
+ case DEVFREQ_GOV_INTERVAL:
+ sample_ms = *(unsigned int *)data;
+ sample_ms = max(MIN_MS, sample_ms);
+ sample_ms = min(MAX_MS, sample_ms);
+ devfreq_interval_update(df, &sample_ms);
+ break;
+ }
+
+ return 0;
+}
+
+static struct devfreq_governor devfreq_gov_memlat = {
+ .name = "mem_latency",
+ .get_target_freq = devfreq_memlat_get_freq,
+ .event_handler = devfreq_memlat_ev_handler,
+};
+
+#define NUM_COLS 2
+static struct core_dev_map *init_core_dev_map(struct device *dev,
+ char *prop_name)
+{
+ int len, nf, i, j;
+ u32 data;
+ struct core_dev_map *tbl;
+ int ret;
+
+ if (!of_find_property(dev->of_node, prop_name, &len))
+ return NULL;
+ len /= sizeof(data);
+
+ if (len % NUM_COLS || len == 0)
+ return NULL;
+ nf = len / NUM_COLS;
+
+ tbl = devm_kzalloc(dev, (nf + 1) * sizeof(struct core_dev_map),
+ GFP_KERNEL);
+ if (!tbl)
+ return NULL;
+
+ for (i = 0, j = 0; i < nf; i++, j += 2) {
+ ret = of_property_read_u32_index(dev->of_node, prop_name, j,
+ &data);
+ if (ret)
+ return NULL;
+ tbl[i].core_mhz = data / 1000;
+
+ ret = of_property_read_u32_index(dev->of_node, prop_name, j + 1,
+ &data);
+ if (ret)
+ return NULL;
+ tbl[i].target_freq = data;
+ pr_debug("Entry%d CPU:%u, Dev:%u\n", i, tbl[i].core_mhz,
+ tbl[i].target_freq);
+ }
+ tbl[i].core_mhz = 0;
+
+ return tbl;
+}
+
+int register_memlat(struct device *dev, struct memlat_hwmon *hw)
+{
+ int ret = 0;
+ struct memlat_node *node;
+
+ if (!hw->dev && !hw->of_node)
+ return -EINVAL;
+
+ node = devm_kzalloc(dev, sizeof(*node), GFP_KERNEL);
+ if (!node)
+ return -ENOMEM;
+
+ node->gov = &devfreq_gov_memlat;
+ node->attr_grp = &dev_attr_group;
+
+ node->ratio_ceil = 10;
+ node->hw = hw;
+
+ hw->freq_map = init_core_dev_map(dev, "qcom,core-dev-table");
+ if (!hw->freq_map) {
+ dev_err(dev, "Couldn't find the core-dev freq table!\n");
+ return -EINVAL;
+ }
+
+ mutex_lock(&list_lock);
+ list_add_tail(&node->list, &memlat_list);
+ mutex_unlock(&list_lock);
+
+ mutex_lock(&state_lock);
+ if (!use_cnt)
+ ret = devfreq_add_governor(&devfreq_gov_memlat);
+ if (!ret)
+ use_cnt++;
+ mutex_unlock(&state_lock);
+
+ if (!ret)
+ dev_info(dev, "Memory Latency governor registered.\n");
+ else
+ dev_err(dev, "Memory Latency governor registration failed!\n");
+
+ return ret;
+}
+
+MODULE_DESCRIPTION("HW monitor based dev DDR bandwidth voting driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/devfreq/governor_memlat.h b/drivers/devfreq/governor_memlat.h
new file mode 100644
index 000000000000..a0e52a0997ad
--- /dev/null
+++ b/drivers/devfreq/governor_memlat.h
@@ -0,0 +1,89 @@
+/*
+ * Copyright (c) 2015-2016, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef _GOVERNOR_BW_HWMON_H
+#define _GOVERNOR_BW_HWMON_H
+
+#include <linux/kernel.h>
+#include <linux/devfreq.h>
+
+/**
+ * struct dev_stats - Device stats
+ * @inst_count: Number of instructions executed.
+ * @mem_count: Number of memory accesses made.
+ * @freq: Effective frequency of the device in the
+ * last interval.
+ */
+struct dev_stats {
+ int id;
+ unsigned long inst_count;
+ unsigned long mem_count;
+ unsigned long freq;
+};
+
+struct core_dev_map {
+ unsigned int core_mhz;
+ unsigned int target_freq;
+};
+
+/**
+ * struct memlat_hwmon - Memory Latency HW monitor info
+ * @start_hwmon: Start the HW monitoring
+ * @stop_hwmon: Stop the HW monitoring
+ * @get_cnt: Return the number of intructions executed,
+ * memory accesses and effective frequency
+ * @dev: Pointer to device that this HW monitor can
+ * monitor.
+ * @of_node: OF node of device that this HW monitor can
+ * monitor.
+ * @df: Devfreq node that this HW monitor is being
+ * used for. NULL when not actively in use and
+ * non-NULL when in use.
+ * @num_cores: Number of cores that are monitored by the
+ * hardware monitor.
+ * @core_stats: Array containing instruction count, memory
+ * accesses and effective frequency for each core.
+ *
+ * One of dev or of_node needs to be specified for a successful registration.
+ *
+ */
+struct memlat_hwmon {
+ int (*start_hwmon)(struct memlat_hwmon *hw);
+ void (*stop_hwmon)(struct memlat_hwmon *hw);
+ unsigned long (*get_cnt)(struct memlat_hwmon *hw);
+ struct device *dev;
+ struct device_node *of_node;
+
+ unsigned int num_cores;
+ struct dev_stats *core_stats;
+
+ struct devfreq *df;
+ struct core_dev_map *freq_map;
+};
+
+#ifdef CONFIG_DEVFREQ_GOV_MEMLAT
+int register_memlat(struct device *dev, struct memlat_hwmon *hw);
+int update_memlat(struct memlat_hwmon *hw);
+#else
+static inline int register_memlat(struct device *dev,
+ struct memlat_hwmon *hw)
+{
+ return 0;
+}
+static inline int update_memlat(struct memlat_hwmon *hw)
+{
+ return 0;
+}
+#endif
+
+#endif /* _GOVERNOR_BW_HWMON_H */
diff --git a/drivers/devfreq/governor_msm_adreno_tz.c b/drivers/devfreq/governor_msm_adreno_tz.c
new file mode 100644
index 000000000000..f31089d63e0c
--- /dev/null
+++ b/drivers/devfreq/governor_msm_adreno_tz.c
@@ -0,0 +1,660 @@
+/* Copyright (c) 2010-2017, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ */
+#include <linux/errno.h>
+#include <linux/module.h>
+#include <linux/devfreq.h>
+#include <linux/math64.h>
+#include <linux/spinlock.h>
+#include <linux/slab.h>
+#include <linux/io.h>
+#include <linux/ftrace.h>
+#include <linux/mm.h>
+#include <linux/msm_adreno_devfreq.h>
+#include <asm/cacheflush.h>
+#include <soc/qcom/scm.h>
+#include "governor.h"
+
+static DEFINE_SPINLOCK(tz_lock);
+static DEFINE_SPINLOCK(sample_lock);
+static DEFINE_SPINLOCK(suspend_lock);
+/*
+ * FLOOR is 5msec to capture up to 3 re-draws
+ * per frame for 60fps content.
+ */
+#define FLOOR 5000
+/*
+ * MIN_BUSY is 1 msec for the sample to be sent
+ */
+#define MIN_BUSY 1000
+#define MAX_TZ_VERSION 0
+
+/*
+ * CEILING is 50msec, larger than any standard
+ * frame length, but less than the idle timer.
+ */
+#define CEILING 50000
+#define TZ_RESET_ID 0x3
+#define TZ_UPDATE_ID 0x4
+#define TZ_INIT_ID 0x6
+
+#define TZ_RESET_ID_64 0x7
+#define TZ_UPDATE_ID_64 0x8
+#define TZ_INIT_ID_64 0x9
+
+#define TZ_V2_UPDATE_ID_64 0xA
+#define TZ_V2_INIT_ID_64 0xB
+#define TZ_V2_INIT_CA_ID_64 0xC
+#define TZ_V2_UPDATE_WITH_CA_ID_64 0xD
+
+#define TAG "msm_adreno_tz: "
+
+static u64 suspend_time;
+static u64 suspend_start;
+static unsigned long acc_total, acc_relative_busy;
+
+static struct msm_adreno_extended_profile *partner_gpu_profile;
+static void do_partner_start_event(struct work_struct *work);
+static void do_partner_stop_event(struct work_struct *work);
+static void do_partner_suspend_event(struct work_struct *work);
+static void do_partner_resume_event(struct work_struct *work);
+
+static struct workqueue_struct *workqueue;
+
+/*
+ * Returns GPU suspend time in millisecond.
+ */
+u64 suspend_time_ms(void)
+{
+ u64 suspend_sampling_time;
+ u64 time_diff = 0;
+
+ if (suspend_start == 0)
+ return 0;
+
+ suspend_sampling_time = (u64)ktime_to_ms(ktime_get());
+ time_diff = suspend_sampling_time - suspend_start;
+ /* Update the suspend_start sample again */
+ suspend_start = suspend_sampling_time;
+ return time_diff;
+}
+
+static ssize_t gpu_load_show(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ unsigned long sysfs_busy_perc = 0;
+ /*
+ * Average out the samples taken since last read
+ * This will keep the average value in sync with
+ * with the client sampling duration.
+ */
+ spin_lock(&sample_lock);
+ if (acc_total)
+ sysfs_busy_perc = (acc_relative_busy * 100) / acc_total;
+
+ /* Reset the parameters */
+ acc_total = 0;
+ acc_relative_busy = 0;
+ spin_unlock(&sample_lock);
+ return snprintf(buf, PAGE_SIZE, "%lu\n", sysfs_busy_perc);
+}
+
+/*
+ * Returns the time in ms for which gpu was in suspend state
+ * since last time the entry is read.
+ */
+static ssize_t suspend_time_show(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ u64 time_diff = 0;
+
+ spin_lock(&suspend_lock);
+ time_diff = suspend_time_ms();
+ /*
+ * Adding the previous suspend time also as the gpu
+ * can go and come out of suspend states in between
+ * reads also and we should have the total suspend
+ * since last read.
+ */
+ time_diff += suspend_time;
+ suspend_time = 0;
+ spin_unlock(&suspend_lock);
+
+ return snprintf(buf, PAGE_SIZE, "%llu\n", time_diff);
+}
+
+static DEVICE_ATTR(gpu_load, 0444, gpu_load_show, NULL);
+
+static DEVICE_ATTR(suspend_time, 0444,
+ suspend_time_show,
+ NULL);
+
+static const struct device_attribute *adreno_tz_attr_list[] = {
+ &dev_attr_gpu_load,
+ &dev_attr_suspend_time,
+ NULL
+};
+
+void compute_work_load(struct devfreq_dev_status *stats,
+ struct devfreq_msm_adreno_tz_data *priv,
+ struct devfreq *devfreq)
+{
+ spin_lock(&sample_lock);
+ /*
+ * Keep collecting the stats till the client
+ * reads it. Average of all samples and reset
+ * is done when the entry is read
+ */
+ acc_total += stats->total_time;
+ acc_relative_busy += (stats->busy_time * stats->current_frequency) /
+ devfreq->profile->freq_table[0];
+ spin_unlock(&sample_lock);
+}
+
+/* Trap into the TrustZone, and call funcs there. */
+static int __secure_tz_reset_entry2(unsigned int *scm_data, u32 size_scm_data,
+ bool is_64)
+{
+ int ret;
+ /* sync memory before sending the commands to tz */
+ __iowmb();
+
+ if (!is_64) {
+ spin_lock(&tz_lock);
+ ret = scm_call_atomic2(SCM_SVC_IO, TZ_RESET_ID, scm_data[0],
+ scm_data[1]);
+ spin_unlock(&tz_lock);
+ } else {
+ if (is_scm_armv8()) {
+ struct scm_desc desc = {0};
+ desc.arginfo = 0;
+ ret = scm_call2(SCM_SIP_FNID(SCM_SVC_DCVS,
+ TZ_RESET_ID_64), &desc);
+ } else {
+ ret = scm_call(SCM_SVC_DCVS, TZ_RESET_ID_64, scm_data,
+ size_scm_data, NULL, 0);
+ }
+ }
+ return ret;
+}
+
+static int __secure_tz_update_entry3(unsigned int *scm_data, u32 size_scm_data,
+ int *val, u32 size_val, struct devfreq_msm_adreno_tz_data *priv)
+{
+ int ret;
+ /* sync memory before sending the commands to tz */
+ __iowmb();
+
+ if (!priv->is_64) {
+ spin_lock(&tz_lock);
+ ret = scm_call_atomic3(SCM_SVC_IO, TZ_UPDATE_ID,
+ scm_data[0], scm_data[1], scm_data[2]);
+ spin_unlock(&tz_lock);
+ *val = ret;
+ } else {
+ if (is_scm_armv8()) {
+ unsigned int cmd_id;
+ struct scm_desc desc = {0};
+ desc.args[0] = scm_data[0];
+ desc.args[1] = scm_data[1];
+ desc.args[2] = scm_data[2];
+
+ if (!priv->ctxt_aware_enable) {
+ desc.arginfo = SCM_ARGS(3);
+ cmd_id = TZ_V2_UPDATE_ID_64;
+ } else {
+ /* Add context count infomration to update*/
+ desc.args[3] = scm_data[3];
+ desc.arginfo = SCM_ARGS(4);
+ cmd_id = TZ_V2_UPDATE_WITH_CA_ID_64;
+ }
+ ret = scm_call2(SCM_SIP_FNID(SCM_SVC_DCVS, cmd_id),
+ &desc);
+ *val = desc.ret[0];
+ } else {
+ ret = scm_call(SCM_SVC_DCVS, TZ_UPDATE_ID_64, scm_data,
+ size_scm_data, val, size_val);
+ }
+ }
+ return ret;
+}
+
+static int tz_init_ca(struct devfreq_msm_adreno_tz_data *priv)
+{
+ unsigned int tz_ca_data[2];
+ struct scm_desc desc = {0};
+ u8 *tz_buf;
+ int ret;
+
+ /* Set data for TZ */
+ tz_ca_data[0] = priv->bin.ctxt_aware_target_pwrlevel;
+ tz_ca_data[1] = priv->bin.ctxt_aware_busy_penalty;
+
+ tz_buf = kzalloc(PAGE_ALIGN(sizeof(tz_ca_data)), GFP_KERNEL);
+ if (!tz_buf)
+ return -ENOMEM;
+
+ memcpy(tz_buf, tz_ca_data, sizeof(tz_ca_data));
+ /* Ensure memcpy completes execution */
+ mb();
+ dmac_flush_range(tz_buf,
+ tz_buf + PAGE_ALIGN(sizeof(tz_ca_data)));
+
+ desc.args[0] = virt_to_phys(tz_buf);
+ desc.args[1] = sizeof(tz_ca_data);
+ desc.arginfo = SCM_ARGS(2, SCM_RW, SCM_VAL);
+
+ ret = scm_call2(SCM_SIP_FNID(SCM_SVC_DCVS,
+ TZ_V2_INIT_CA_ID_64),
+ &desc);
+
+ kzfree(tz_buf);
+
+ return ret;
+}
+
+static int tz_init(struct devfreq_msm_adreno_tz_data *priv,
+ unsigned int *tz_pwrlevels, u32 size_pwrlevels,
+ unsigned int *version, u32 size_version)
+{
+ int ret;
+ /* Make sure all CMD IDs are avaialble */
+ if (scm_is_call_available(SCM_SVC_DCVS, TZ_INIT_ID)) {
+ ret = scm_call(SCM_SVC_DCVS, TZ_INIT_ID, tz_pwrlevels,
+ size_pwrlevels, NULL, 0);
+ *version = 0;
+
+ } else if (scm_is_call_available(SCM_SVC_DCVS, TZ_INIT_ID_64) &&
+ scm_is_call_available(SCM_SVC_DCVS, TZ_UPDATE_ID_64) &&
+ scm_is_call_available(SCM_SVC_DCVS, TZ_RESET_ID_64)) {
+ struct scm_desc desc = {0};
+ u8 *tz_buf;
+
+ if (!is_scm_armv8()) {
+ ret = scm_call(SCM_SVC_DCVS, TZ_INIT_ID_64,
+ tz_pwrlevels, size_pwrlevels,
+ version, size_version);
+ if (!ret)
+ priv->is_64 = true;
+ return ret;
+ }
+
+ tz_buf = kzalloc(PAGE_ALIGN(size_pwrlevels), GFP_KERNEL);
+ if (!tz_buf)
+ return -ENOMEM;
+ memcpy(tz_buf, tz_pwrlevels, size_pwrlevels);
+ /* Ensure memcpy completes execution */
+ mb();
+ dmac_flush_range(tz_buf, tz_buf + PAGE_ALIGN(size_pwrlevels));
+
+ desc.args[0] = virt_to_phys(tz_buf);
+ desc.args[1] = size_pwrlevels;
+ desc.arginfo = SCM_ARGS(2, SCM_RW, SCM_VAL);
+
+ ret = scm_call2(SCM_SIP_FNID(SCM_SVC_DCVS, TZ_V2_INIT_ID_64),
+ &desc);
+ *version = desc.ret[0];
+ if (!ret)
+ priv->is_64 = true;
+ kzfree(tz_buf);
+ } else
+ ret = -EINVAL;
+
+ /* Initialize context aware feature, if enabled. */
+ if (!ret && priv->ctxt_aware_enable) {
+ if (priv->is_64 &&
+ (scm_is_call_available(SCM_SVC_DCVS,
+ TZ_V2_INIT_CA_ID_64)) &&
+ (scm_is_call_available(SCM_SVC_DCVS,
+ TZ_V2_UPDATE_WITH_CA_ID_64))) {
+ ret = tz_init_ca(priv);
+ /*
+ * If context aware feature intialization fails,
+ * just print an error message and return
+ * success as normal DCVS will still work.
+ */
+ if (ret) {
+ pr_err(TAG "tz: context aware DCVS init failed\n");
+ priv->ctxt_aware_enable = false;
+ return 0;
+ }
+ } else {
+ pr_warn(TAG "tz: context aware DCVS not supported\n");
+ priv->ctxt_aware_enable = false;
+ }
+ }
+
+ return ret;
+}
+
+static int tz_get_target_freq(struct devfreq *devfreq, unsigned long *freq,
+ u32 *flag)
+{
+ int result = 0;
+ struct devfreq_msm_adreno_tz_data *priv = devfreq->data;
+ struct devfreq_dev_status stats;
+ int val, level = 0;
+ unsigned int scm_data[4];
+ int context_count = 0;
+
+ /* keeps stats.private_data == NULL */
+ result = devfreq->profile->get_dev_status(devfreq->dev.parent, &stats);
+ if (result) {
+ pr_err(TAG "get_status failed %d\n", result);
+ return result;
+ }
+
+ *freq = stats.current_frequency;
+ priv->bin.total_time += stats.total_time;
+ priv->bin.busy_time += stats.busy_time;
+
+ if (stats.private_data)
+ context_count = *((int *)stats.private_data);
+
+ /* Update the GPU load statistics */
+ compute_work_load(&stats, priv, devfreq);
+ /*
+ * Do not waste CPU cycles running this algorithm if
+ * the GPU just started, or if less than FLOOR time
+ * has passed since the last run or the gpu hasn't been
+ * busier than MIN_BUSY.
+ */
+ if ((stats.total_time == 0) ||
+ (priv->bin.total_time < FLOOR) ||
+ (unsigned int) priv->bin.busy_time < MIN_BUSY) {
+ return 0;
+ }
+
+ level = devfreq_get_freq_level(devfreq, stats.current_frequency);
+ if (level < 0) {
+ pr_err(TAG "bad freq %ld\n", stats.current_frequency);
+ return level;
+ }
+
+ /*
+ * If there is an extended block of busy processing,
+ * increase frequency. Otherwise run the normal algorithm.
+ */
+ if (!priv->disable_busy_time_burst &&
+ priv->bin.busy_time > CEILING) {
+ val = -1 * level;
+ } else {
+
+ scm_data[0] = level;
+ scm_data[1] = priv->bin.total_time;
+ scm_data[2] = priv->bin.busy_time;
+ scm_data[3] = context_count;
+ __secure_tz_update_entry3(scm_data, sizeof(scm_data),
+ &val, sizeof(val), priv);
+ }
+ priv->bin.total_time = 0;
+ priv->bin.busy_time = 0;
+
+ /*
+ * If the decision is to move to a different level, make sure the GPU
+ * frequency changes.
+ */
+ if (val) {
+ level += val;
+ level = max(level, 0);
+ level = min_t(int, level, devfreq->profile->max_state - 1);
+ }
+
+ *freq = devfreq->profile->freq_table[level];
+ return 0;
+}
+
+static int tz_notify(struct notifier_block *nb, unsigned long type, void *devp)
+{
+ int result = 0;
+ struct devfreq *devfreq = devp;
+
+ switch (type) {
+ case ADRENO_DEVFREQ_NOTIFY_IDLE:
+ case ADRENO_DEVFREQ_NOTIFY_RETIRE:
+ mutex_lock(&devfreq->lock);
+ result = update_devfreq(devfreq);
+ mutex_unlock(&devfreq->lock);
+ /* Nofifying partner bus governor if any */
+ if (partner_gpu_profile && partner_gpu_profile->bus_devfreq) {
+ mutex_lock(&partner_gpu_profile->bus_devfreq->lock);
+ update_devfreq(partner_gpu_profile->bus_devfreq);
+ mutex_unlock(&partner_gpu_profile->bus_devfreq->lock);
+ }
+ break;
+ /* ignored by this governor */
+ case ADRENO_DEVFREQ_NOTIFY_SUBMIT:
+ default:
+ break;
+ }
+ return notifier_from_errno(result);
+}
+
+static int tz_start(struct devfreq *devfreq)
+{
+ struct devfreq_msm_adreno_tz_data *priv;
+ unsigned int tz_pwrlevels[MSM_ADRENO_MAX_PWRLEVELS + 1];
+ int i, out, ret;
+ unsigned int version;
+
+ struct msm_adreno_extended_profile *gpu_profile = container_of(
+ (devfreq->profile),
+ struct msm_adreno_extended_profile,
+ profile);
+
+ /*
+ * Assuming that we have only one instance of the adreno device
+ * connected to this governor,
+ * can safely restore the pointer to the governor private data
+ * from the container of the device profile
+ */
+ devfreq->data = gpu_profile->private_data;
+ partner_gpu_profile = gpu_profile;
+
+ priv = devfreq->data;
+ priv->nb.notifier_call = tz_notify;
+
+ out = 1;
+ if (devfreq->profile->max_state < MSM_ADRENO_MAX_PWRLEVELS) {
+ for (i = 0; i < devfreq->profile->max_state; i++)
+ tz_pwrlevels[out++] = devfreq->profile->freq_table[i];
+ tz_pwrlevels[0] = i;
+ } else {
+ pr_err(TAG "tz_pwrlevels[] is too short\n");
+ return -EINVAL;
+ }
+
+ INIT_WORK(&gpu_profile->partner_start_event_ws,
+ do_partner_start_event);
+ INIT_WORK(&gpu_profile->partner_stop_event_ws,
+ do_partner_stop_event);
+ INIT_WORK(&gpu_profile->partner_suspend_event_ws,
+ do_partner_suspend_event);
+ INIT_WORK(&gpu_profile->partner_resume_event_ws,
+ do_partner_resume_event);
+
+ ret = tz_init(priv, tz_pwrlevels, sizeof(tz_pwrlevels), &version,
+ sizeof(version));
+ if (ret != 0 || version > MAX_TZ_VERSION) {
+ pr_err(TAG "tz_init failed\n");
+ return ret;
+ }
+
+ for (i = 0; adreno_tz_attr_list[i] != NULL; i++)
+ device_create_file(&devfreq->dev, adreno_tz_attr_list[i]);
+
+ return kgsl_devfreq_add_notifier(devfreq->dev.parent, &priv->nb);
+}
+
+static int tz_stop(struct devfreq *devfreq)
+{
+ int i;
+ struct devfreq_msm_adreno_tz_data *priv = devfreq->data;
+
+ kgsl_devfreq_del_notifier(devfreq->dev.parent, &priv->nb);
+
+ for (i = 0; adreno_tz_attr_list[i] != NULL; i++)
+ device_remove_file(&devfreq->dev, adreno_tz_attr_list[i]);
+
+ flush_workqueue(workqueue);
+
+ /* leaving the governor and cleaning the pointer to private data */
+ devfreq->data = NULL;
+ partner_gpu_profile = NULL;
+ return 0;
+}
+
+static int tz_suspend(struct devfreq *devfreq)
+{
+ struct devfreq_msm_adreno_tz_data *priv = devfreq->data;
+ unsigned int scm_data[2] = {0, 0};
+ __secure_tz_reset_entry2(scm_data, sizeof(scm_data), priv->is_64);
+
+ priv->bin.total_time = 0;
+ priv->bin.busy_time = 0;
+ return 0;
+}
+
+static int tz_handler(struct devfreq *devfreq, unsigned int event, void *data)
+{
+ int result;
+
+ struct msm_adreno_extended_profile *gpu_profile = container_of(
+ (devfreq->profile),
+ struct msm_adreno_extended_profile,
+ profile);
+ switch (event) {
+ case DEVFREQ_GOV_START:
+ result = tz_start(devfreq);
+ break;
+
+ case DEVFREQ_GOV_STOP:
+ /* Queue the stop work before the TZ is stopped */
+ if (partner_gpu_profile && partner_gpu_profile->bus_devfreq)
+ queue_work(workqueue,
+ &gpu_profile->partner_stop_event_ws);
+ spin_lock(&suspend_lock);
+ suspend_start = 0;
+ spin_unlock(&suspend_lock);
+ result = tz_stop(devfreq);
+ break;
+
+ case DEVFREQ_GOV_SUSPEND:
+ result = tz_suspend(devfreq);
+ if (!result) {
+ spin_lock(&suspend_lock);
+ /* Collect the start sample for suspend time */
+ suspend_start = (u64)ktime_to_ms(ktime_get());
+ spin_unlock(&suspend_lock);
+ }
+ break;
+
+ case DEVFREQ_GOV_RESUME:
+ spin_lock(&suspend_lock);
+ suspend_time += suspend_time_ms();
+ /* Reset the suspend_start when gpu resumes */
+ suspend_start = 0;
+ spin_unlock(&suspend_lock);
+
+ case DEVFREQ_GOV_INTERVAL:
+ /* ignored, this governor doesn't use polling */
+ default:
+ result = 0;
+ break;
+ }
+
+ if (partner_gpu_profile && partner_gpu_profile->bus_devfreq)
+ switch (event) {
+ case DEVFREQ_GOV_START:
+ queue_work(workqueue,
+ &gpu_profile->partner_start_event_ws);
+ break;
+ case DEVFREQ_GOV_SUSPEND:
+ queue_work(workqueue,
+ &gpu_profile->partner_suspend_event_ws);
+ break;
+ case DEVFREQ_GOV_RESUME:
+ queue_work(workqueue,
+ &gpu_profile->partner_resume_event_ws);
+ break;
+ }
+
+ return result;
+}
+
+static void _do_partner_event(struct work_struct *work, unsigned int event)
+{
+ struct devfreq *bus_devfreq;
+
+ if (partner_gpu_profile == NULL)
+ return;
+
+ bus_devfreq = partner_gpu_profile->bus_devfreq;
+
+ if (bus_devfreq != NULL &&
+ bus_devfreq->governor &&
+ bus_devfreq->governor->event_handler)
+ bus_devfreq->governor->event_handler(bus_devfreq, event, NULL);
+}
+
+static void do_partner_start_event(struct work_struct *work)
+{
+ _do_partner_event(work, DEVFREQ_GOV_START);
+}
+
+static void do_partner_stop_event(struct work_struct *work)
+{
+ _do_partner_event(work, DEVFREQ_GOV_STOP);
+}
+
+static void do_partner_suspend_event(struct work_struct *work)
+{
+ _do_partner_event(work, DEVFREQ_GOV_SUSPEND);
+}
+
+static void do_partner_resume_event(struct work_struct *work)
+{
+ _do_partner_event(work, DEVFREQ_GOV_RESUME);
+}
+
+
+static struct devfreq_governor msm_adreno_tz = {
+ .name = "msm-adreno-tz",
+ .get_target_freq = tz_get_target_freq,
+ .event_handler = tz_handler,
+};
+
+static int __init msm_adreno_tz_init(void)
+{
+ workqueue = create_freezable_workqueue("governor_msm_adreno_tz_wq");
+ if (workqueue == NULL)
+ return -ENOMEM;
+
+ return devfreq_add_governor(&msm_adreno_tz);
+}
+subsys_initcall(msm_adreno_tz_init);
+
+static void __exit msm_adreno_tz_exit(void)
+{
+ int ret = devfreq_remove_governor(&msm_adreno_tz);
+ if (ret)
+ pr_err(TAG "failed to remove governor %d\n", ret);
+
+ if (workqueue != NULL)
+ destroy_workqueue(workqueue);
+}
+
+module_exit(msm_adreno_tz_exit);
+
+MODULE_LICENSE("GPLv2");
diff --git a/drivers/devfreq/governor_performance.c b/drivers/devfreq/governor_performance.c
index c72f942f30a8..74ae3bb277c7 100644
--- a/drivers/devfreq/governor_performance.c
+++ b/drivers/devfreq/governor_performance.c
@@ -14,7 +14,8 @@
#include "governor.h"
static int devfreq_performance_func(struct devfreq *df,
- unsigned long *freq)
+ unsigned long *freq,
+ u32 *flag)
{
/*
* target callback should be able to get floor value as
@@ -31,13 +32,26 @@ static int devfreq_performance_handler(struct devfreq *devfreq,
unsigned int event, void *data)
{
int ret = 0;
+ unsigned long freq;
- if (event == DEVFREQ_GOV_START) {
- mutex_lock(&devfreq->lock);
+ mutex_lock(&devfreq->lock);
+ freq = devfreq->previous_freq;
+ switch (event) {
+ case DEVFREQ_GOV_START:
+ devfreq->profile->target(devfreq->dev.parent,
+ &freq,
+ DEVFREQ_FLAG_WAKEUP_MAXFREQ);
+ /* fall through */
+ case DEVFREQ_GOV_RESUME:
ret = update_devfreq(devfreq);
- mutex_unlock(&devfreq->lock);
+ break;
+ case DEVFREQ_GOV_SUSPEND:
+ devfreq->profile->target(devfreq->dev.parent,
+ &freq,
+ DEVFREQ_FLAG_WAKEUP_MAXFREQ);
+ break;
}
-
+ mutex_unlock(&devfreq->lock);
return ret;
}
diff --git a/drivers/devfreq/governor_powersave.c b/drivers/devfreq/governor_powersave.c
index 0c6bed567e6d..57f3738a0b9d 100644
--- a/drivers/devfreq/governor_powersave.c
+++ b/drivers/devfreq/governor_powersave.c
@@ -14,7 +14,8 @@
#include "governor.h"
static int devfreq_powersave_func(struct devfreq *df,
- unsigned long *freq)
+ unsigned long *freq,
+ u32 *flag)
{
/*
* target callback should be able to get ceiling value as
@@ -29,7 +30,7 @@ static int devfreq_powersave_handler(struct devfreq *devfreq,
{
int ret = 0;
- if (event == DEVFREQ_GOV_START) {
+ if (event == DEVFREQ_GOV_START || event == DEVFREQ_GOV_RESUME) {
mutex_lock(&devfreq->lock);
ret = update_devfreq(devfreq);
mutex_unlock(&devfreq->lock);
diff --git a/drivers/devfreq/governor_simpleondemand.c b/drivers/devfreq/governor_simpleondemand.c
index ae72ba5e78df..6200e14ccc19 100644
--- a/drivers/devfreq/governor_simpleondemand.c
+++ b/drivers/devfreq/governor_simpleondemand.c
@@ -19,7 +19,8 @@
#define DFSO_UPTHRESHOLD (90)
#define DFSO_DOWNDIFFERENCTIAL (5)
static int devfreq_simple_ondemand_func(struct devfreq *df,
- unsigned long *freq)
+ unsigned long *freq,
+ u32 *flag)
{
int err;
struct devfreq_dev_status *stat;
@@ -28,6 +29,7 @@ static int devfreq_simple_ondemand_func(struct devfreq *df,
unsigned int dfso_downdifferential = DFSO_DOWNDIFFERENCTIAL;
struct devfreq_simple_ondemand_data *data = df->data;
unsigned long max = (df->max_freq) ? df->max_freq : UINT_MAX;
+ unsigned long min = (df->min_freq) ? df->min_freq : 0;
err = devfreq_update_stats(df);
if (err)
@@ -45,18 +47,31 @@ static int devfreq_simple_ondemand_func(struct devfreq *df,
dfso_upthreshold < dfso_downdifferential)
return -EINVAL;
- /* Assume MAX if it is going to be divided by zero */
- if (stat->total_time == 0) {
- *freq = max;
- return 0;
- }
-
/* Prevent overflow */
if (stat->busy_time >= (1 << 24) || stat->total_time >= (1 << 24)) {
stat->busy_time >>= 7;
stat->total_time >>= 7;
}
+ if (data && data->simple_scaling) {
+ if (stat->busy_time * 100 >
+ stat->total_time * dfso_upthreshold)
+ *freq = max;
+ else if (stat->busy_time * 100 <
+ stat->total_time *
+ (dfso_upthreshold - dfso_downdifferential))
+ *freq = min;
+ else
+ *freq = df->previous_freq;
+ return 0;
+ }
+
+ /* Assume MAX if it is going to be divided by zero */
+ if (stat->total_time == 0) {
+ *freq = max;
+ return 0;
+ }
+
/* Set MAX if it's busy enough */
if (stat->busy_time * 100 >
stat->total_time * dfso_upthreshold) {
diff --git a/drivers/devfreq/governor_spdm_bw_hyp.c b/drivers/devfreq/governor_spdm_bw_hyp.c
new file mode 100644
index 000000000000..f18b72af5fc4
--- /dev/null
+++ b/drivers/devfreq/governor_spdm_bw_hyp.c
@@ -0,0 +1,417 @@
+/*
+*Copyright (c) 2014-2016, The Linux Foundation. All rights reserved.
+*
+*This program is free software; you can redistribute it and/or modify
+*it under the terms of the GNU General Public License version 2 and
+*only version 2 as published by the Free Software Foundation.
+*
+*This program is distributed in the hope that it will be useful,
+*but WITHOUT ANY WARRANTY; without even the implied warranty of
+*MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+*GNU General Public License for more details.
+*/
+
+#include <linux/devfreq.h>
+#include <linux/init.h>
+#include <linux/interrupt.h>
+#include <linux/irqreturn.h>
+#include <linux/list.h>
+#include <linux/module.h>
+#include <linux/mutex.h>
+#include <linux/of.h>
+#include <linux/platform_device.h>
+#include <soc/qcom/rpm-smd.h>
+#include "governor.h"
+#include "devfreq_spdm.h"
+
+enum msm_spdm_rt_res {
+ SPDM_RES_ID = 1,
+ SPDM_RES_TYPE = 0x63707362,
+ SPDM_KEY = 0x00006e65,
+ SPDM_SIZE = 4,
+};
+
+static LIST_HEAD(devfreqs);
+static DEFINE_MUTEX(devfreqs_lock);
+
+static int enable_clocks(void)
+{
+ struct msm_rpm_request *rpm_req;
+ int id;
+ const int one = 1;
+ rpm_req = msm_rpm_create_request(MSM_RPM_CTX_ACTIVE_SET, SPDM_RES_TYPE,
+ SPDM_RES_ID, 1);
+ if (!rpm_req)
+ return -ENODEV;
+ msm_rpm_add_kvp_data(rpm_req, SPDM_KEY, (const uint8_t *)&one,
+ sizeof(int));
+ id = msm_rpm_send_request(rpm_req);
+ msm_rpm_wait_for_ack(id);
+ msm_rpm_free_request(rpm_req);
+
+ return 0;
+}
+
+static int disable_clocks(void)
+{
+ struct msm_rpm_request *rpm_req;
+ int id;
+ const int zero = 0;
+ rpm_req = msm_rpm_create_request(MSM_RPM_CTX_ACTIVE_SET, SPDM_RES_TYPE,
+ SPDM_RES_ID, 1);
+ if (!rpm_req)
+ return -ENODEV;
+ msm_rpm_add_kvp_data(rpm_req, SPDM_KEY, (const uint8_t *)&zero,
+ sizeof(int));
+ id = msm_rpm_send_request(rpm_req);
+ msm_rpm_wait_for_ack(id);
+ msm_rpm_free_request(rpm_req);
+
+ return 0;
+}
+
+static irqreturn_t threaded_isr(int irq, void *dev_id)
+{
+ struct spdm_data *data;
+ struct spdm_args desc = { { 0 } };
+ int ext_status = 0;
+
+ /* call hyp to get bw_vote */
+ desc.arg[0] = SPDM_CMD_GET_BW_ALL;
+ ext_status = spdm_ext_call(&desc, 1);
+ if (ext_status)
+ pr_err("External command %u failed with error %u",
+ (int)desc.arg[0], ext_status);
+ mutex_lock(&devfreqs_lock);
+ list_for_each_entry(data, &devfreqs, list) {
+ if (data == NULL || data->devfreq == NULL) {
+ pr_err("Spurious interrupts\n");
+ break;
+ }
+ if (data->spdm_client == desc.ret[0]) {
+ devfreq_monitor_suspend(data->devfreq);
+ mutex_lock(&data->devfreq->lock);
+ data->action = SPDM_UP;
+ data->new_bw =
+ (desc.ret[1] * 1000) >> 6;
+ update_devfreq(data->devfreq);
+ data->action = SPDM_DOWN;
+ mutex_unlock(&data->devfreq->lock);
+ devfreq_monitor_resume(data->devfreq);
+ break;
+ }
+ }
+ mutex_unlock(&devfreqs_lock);
+ return IRQ_HANDLED;
+}
+
+static irqreturn_t isr(int irq, void *dev_id)
+{
+ return IRQ_WAKE_THREAD;
+}
+
+static int gov_spdm_hyp_target_bw(struct devfreq *devfreq, unsigned long *freq,
+ u32 *flag)
+{
+ struct devfreq_dev_status status;
+ int ret = -EINVAL;
+ int usage;
+ struct spdm_args desc = { { 0 } };
+ int ext_status = 0;
+ u64 bw_ret;
+
+ if (!devfreq || !devfreq->profile || !devfreq->profile->get_dev_status)
+ return ret;
+
+ ret = devfreq->profile->get_dev_status(devfreq->dev.parent, &status);
+ if (ret)
+ return ret;
+
+ usage = (status.busy_time * 100) / status.total_time;
+
+ if (usage > 0) {
+ /* up was already called as part of hyp, so just use the
+ * already stored values */
+ *freq = ((struct spdm_data *)devfreq->data)->new_bw;
+ } else {
+ desc.arg[0] = SPDM_CMD_GET_BW_SPECIFIC;
+ desc.arg[1] = ((struct spdm_data *)devfreq->data)->spdm_client;
+ ext_status = spdm_ext_call(&desc, 2);
+ if (ext_status)
+ pr_err("External command %u failed with error %u",
+ (int)desc.arg[0], ext_status);
+ bw_ret = desc.ret[0] * 1000;
+ *freq = bw_ret >> 6;
+ }
+
+ return 0;
+}
+
+static int gov_spdm_hyp_eh(struct devfreq *devfreq, unsigned int event,
+ void *data)
+{
+ struct spdm_args desc = { { 0 } };
+ int ext_status = 0;
+ struct spdm_data *spdm_data = (struct spdm_data *)devfreq->data;
+ int i;
+
+ switch (event) {
+ case DEVFREQ_GOV_START:
+ mutex_lock(&devfreqs_lock);
+ list_add(&spdm_data->list, &devfreqs);
+ mutex_unlock(&devfreqs_lock);
+ /* call hyp with config data */
+ desc.arg[0] = SPDM_CMD_CFG_PORTS;
+ desc.arg[1] = spdm_data->spdm_client;
+ desc.arg[2] = spdm_data->config_data.num_ports;
+ for (i = 0; i < spdm_data->config_data.num_ports; i++)
+ desc.arg[i+3] = spdm_data->config_data.ports[i];
+ ext_status = spdm_ext_call(&desc,
+ spdm_data->config_data.num_ports + 3);
+ if (ext_status)
+ pr_err("External command %u failed with error %u",
+ (int)desc.arg[0], ext_status);
+
+ desc.arg[0] = SPDM_CMD_CFG_FLTR;
+ desc.arg[1] = spdm_data->spdm_client;
+ desc.arg[2] = spdm_data->config_data.aup;
+ desc.arg[3] = spdm_data->config_data.adown;
+ desc.arg[4] = spdm_data->config_data.bucket_size;
+ ext_status = spdm_ext_call(&desc, 5);
+ if (ext_status)
+ pr_err("External command %u failed with error %u",
+ (int)desc.arg[0], ext_status);
+
+ desc.arg[0] = SPDM_CMD_CFG_PL;
+ desc.arg[1] = spdm_data->spdm_client;
+ for (i = 0; i < SPDM_PL_COUNT - 1; i++)
+ desc.arg[i+2] = spdm_data->config_data.pl_freqs[i];
+ ext_status = spdm_ext_call(&desc, SPDM_PL_COUNT + 1);
+ if (ext_status)
+ pr_err("External command %u failed with error %u",
+ (int)desc.arg[0], ext_status);
+
+ desc.arg[0] = SPDM_CMD_CFG_REJRATE_LOW;
+ desc.arg[1] = spdm_data->spdm_client;
+ desc.arg[2] = spdm_data->config_data.reject_rate[0];
+ desc.arg[3] = spdm_data->config_data.reject_rate[1];
+ ext_status = spdm_ext_call(&desc, 4);
+ if (ext_status)
+ pr_err("External command %u failed with error %u",
+ (int)desc.arg[0], ext_status);
+ desc.arg[0] = SPDM_CMD_CFG_REJRATE_MED;
+ desc.arg[1] = spdm_data->spdm_client;
+ desc.arg[2] = spdm_data->config_data.reject_rate[2];
+ desc.arg[3] = spdm_data->config_data.reject_rate[3];
+ ext_status = spdm_ext_call(&desc, 4);
+ if (ext_status)
+ pr_err("External command %u failed with error %u",
+ (int)desc.arg[0], ext_status);
+ desc.arg[0] = SPDM_CMD_CFG_REJRATE_HIGH;
+ desc.arg[1] = spdm_data->spdm_client;
+ desc.arg[2] = spdm_data->config_data.reject_rate[4];
+ desc.arg[3] = spdm_data->config_data.reject_rate[5];
+ ext_status = spdm_ext_call(&desc, 4);
+ if (ext_status)
+ pr_err("External command %u failed with error %u",
+ (int)desc.arg[0], ext_status);
+
+ desc.arg[0] = SPDM_CMD_CFG_RESPTIME_LOW;
+ desc.arg[1] = spdm_data->spdm_client;
+ desc.arg[2] = spdm_data->config_data.response_time_us[0];
+ desc.arg[3] = spdm_data->config_data.response_time_us[1];
+ ext_status = spdm_ext_call(&desc, 4);
+ if (ext_status)
+ pr_err("External command %u failed with error %u",
+ (int)desc.arg[0], ext_status);
+ desc.arg[0] = SPDM_CMD_CFG_RESPTIME_MED;
+ desc.arg[1] = spdm_data->spdm_client;
+ desc.arg[2] = spdm_data->config_data.response_time_us[2];
+ desc.arg[3] = spdm_data->config_data.response_time_us[3];
+ ext_status = spdm_ext_call(&desc, 4);
+ if (ext_status)
+ pr_err("External command %u failed with error %u",
+ (int)desc.arg[0], ext_status);
+ desc.arg[0] = SPDM_CMD_CFG_RESPTIME_HIGH;
+ desc.arg[1] = spdm_data->spdm_client;
+ desc.arg[2] = spdm_data->config_data.response_time_us[4];
+ desc.arg[3] = spdm_data->config_data.response_time_us[5];
+ ext_status = spdm_ext_call(&desc, 4);
+ if (ext_status)
+ pr_err("External command %u failed with error %u",
+ (int)desc.arg[0], ext_status);
+
+ desc.arg[0] = SPDM_CMD_CFG_CCIRESPTIME_LOW;
+ desc.arg[1] = spdm_data->spdm_client;
+ desc.arg[2] = spdm_data->config_data.cci_response_time_us[0];
+ desc.arg[3] = spdm_data->config_data.cci_response_time_us[1];
+ ext_status = spdm_ext_call(&desc, 4);
+ if (ext_status)
+ pr_err("External command %u failed with error %u",
+ (int)desc.arg[0], ext_status);
+ desc.arg[0] = SPDM_CMD_CFG_CCIRESPTIME_MED;
+ desc.arg[1] = spdm_data->spdm_client;
+ desc.arg[2] = spdm_data->config_data.cci_response_time_us[2];
+ desc.arg[3] = spdm_data->config_data.cci_response_time_us[3];
+ ext_status = spdm_ext_call(&desc, 4);
+ if (ext_status)
+ pr_err("External command %u failed with error %u",
+ (int)desc.arg[0], ext_status);
+ desc.arg[0] = SPDM_CMD_CFG_CCIRESPTIME_HIGH;
+ desc.arg[1] = spdm_data->spdm_client;
+ desc.arg[2] = spdm_data->config_data.cci_response_time_us[4];
+ desc.arg[3] = spdm_data->config_data.cci_response_time_us[5];
+ ext_status = spdm_ext_call(&desc, 4);
+ if (ext_status)
+ pr_err("External command %u failed with error %u",
+ (int)desc.arg[0], ext_status);
+
+ desc.arg[0] = SPDM_CMD_CFG_MAXCCI;
+ desc.arg[1] = spdm_data->spdm_client;
+ desc.arg[2] = spdm_data->config_data.max_cci_freq;
+ ext_status = spdm_ext_call(&desc, 3);
+ if (ext_status)
+ pr_err("External command %u failed with error %u",
+ (int)desc.arg[0], ext_status);
+
+ desc.arg[0] = SPDM_CMD_CFG_VOTES;
+ desc.arg[1] = spdm_data->spdm_client;
+ desc.arg[2] = spdm_data->config_data.upstep;
+ desc.arg[3] = spdm_data->config_data.downstep;
+ desc.arg[4] = spdm_data->config_data.max_vote;
+ desc.arg[5] = spdm_data->config_data.up_step_multp;
+ ext_status = spdm_ext_call(&desc, 6);
+ if (ext_status)
+ pr_err("External command %u failed with error %u",
+ (int)desc.arg[0], ext_status);
+
+ /* call hyp enable/commit */
+ desc.arg[0] = SPDM_CMD_ENABLE;
+ desc.arg[1] = spdm_data->spdm_client;
+ desc.arg[2] = 0;
+ ext_status = spdm_ext_call(&desc, 3);
+ if (ext_status) {
+ pr_err("External command %u failed with error %u",
+ (int)desc.arg[0], ext_status);
+ mutex_lock(&devfreqs_lock);
+ /*
+ * the spdm device probe will fail so remove it from
+ * the list to prevent accessing a deleted pointer in
+ * the future
+ * */
+ list_del(&spdm_data->list);
+ mutex_unlock(&devfreqs_lock);
+ return -EINVAL;
+ }
+ spdm_data->enabled = true;
+ devfreq_monitor_start(devfreq);
+ break;
+
+ case DEVFREQ_GOV_STOP:
+ devfreq_monitor_stop(devfreq);
+ /* find devfreq in list and remove it */
+ mutex_lock(&devfreqs_lock);
+ list_del(&spdm_data->list);
+ mutex_unlock(&devfreqs_lock);
+
+ /* call hypvervisor to disable */
+ desc.arg[0] = SPDM_CMD_DISABLE;
+ desc.arg[1] = spdm_data->spdm_client;
+ ext_status = spdm_ext_call(&desc, 2);
+ if (ext_status)
+ pr_err("External command %u failed with error %u",
+ (int)desc.arg[0], ext_status);
+ spdm_data->enabled = false;
+ break;
+
+ case DEVFREQ_GOV_INTERVAL:
+ devfreq_interval_update(devfreq, (unsigned int *)data);
+ break;
+
+ case DEVFREQ_GOV_SUSPEND:
+ devfreq_monitor_suspend(devfreq);
+ break;
+
+ case DEVFREQ_GOV_RESUME:
+ devfreq_monitor_resume(devfreq);
+ break;
+
+ default:
+ break;
+ }
+
+ return 0;
+}
+
+static struct devfreq_governor spdm_hyp_gov = {
+ .name = "spdm_bw_hyp",
+ .get_target_freq = gov_spdm_hyp_target_bw,
+ .event_handler = gov_spdm_hyp_eh,
+};
+
+static int probe(struct platform_device *pdev)
+{
+ int ret = -EINVAL;
+ int *irq = 0;
+
+ irq = devm_kzalloc(&pdev->dev, sizeof(int), GFP_KERNEL);
+ if (!irq)
+ return -ENOMEM;
+ platform_set_drvdata(pdev, irq);
+
+ ret = devfreq_add_governor(&spdm_hyp_gov);
+ if (ret)
+ goto nogov;
+
+ *irq = platform_get_irq_byname(pdev, "spdm-irq");
+ ret = request_threaded_irq(*irq, isr, threaded_isr,
+ IRQF_ONESHOT,
+ spdm_hyp_gov.name, pdev);
+ if (ret)
+ goto no_irq;
+
+ enable_clocks();
+ return 0;
+
+no_irq:
+ devfreq_remove_governor(&spdm_hyp_gov);
+nogov:
+ devm_kfree(&pdev->dev, irq);
+ return ret;
+}
+
+static int remove(struct platform_device *pdev)
+{
+ int *irq = 0;
+
+ disable_clocks();
+ irq = platform_get_drvdata(pdev);
+ free_irq(*irq, pdev);
+ devfreq_remove_governor(&spdm_hyp_gov);
+ devm_kfree(&pdev->dev, irq);
+ return 0;
+}
+
+static const struct of_device_id gov_spdm_match[] = {
+ {.compatible = "qcom,gov_spdm_hyp"},
+ {}
+};
+
+static struct platform_driver gov_spdm_hyp_drvr = {
+ .driver = {
+ .name = "gov_spdm_hyp",
+ .owner = THIS_MODULE,
+ .of_match_table = gov_spdm_match,
+ },
+ .probe = probe,
+ .remove = remove,
+};
+
+static int __init governor_spdm_bw_hyp(void)
+{
+ return platform_driver_register(&gov_spdm_hyp_drvr);
+}
+
+module_init(governor_spdm_bw_hyp);
+
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/devfreq/governor_userspace.c b/drivers/devfreq/governor_userspace.c
index 35de6e83c1fe..4fbde042e9dd 100644
--- a/drivers/devfreq/governor_userspace.c
+++ b/drivers/devfreq/governor_userspace.c
@@ -22,7 +22,8 @@ struct userspace_data {
bool valid;
};
-static int devfreq_userspace_func(struct devfreq *df, unsigned long *freq)
+static int devfreq_userspace_func(struct devfreq *df, unsigned long *freq,
+ u32 *flag)
{
struct userspace_data *data = df->data;
diff --git a/drivers/devfreq/m4m-hwmon.c b/drivers/devfreq/m4m-hwmon.c
new file mode 100644
index 000000000000..51b077119cf3
--- /dev/null
+++ b/drivers/devfreq/m4m-hwmon.c
@@ -0,0 +1,429 @@
+/*
+ * Copyright (c) 2014-2015, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#define pr_fmt(fmt) "m4m-hwmon: " fmt
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/init.h>
+#include <linux/io.h>
+#include <linux/delay.h>
+#include <linux/err.h>
+#include <linux/errno.h>
+#include <linux/interrupt.h>
+#include <linux/platform_device.h>
+#include <linux/of.h>
+#include <linux/of_device.h>
+#include <linux/spinlock.h>
+#include "governor_cache_hwmon.h"
+
+#define cntr_offset(idx) (sizeof(u32) * idx)
+
+/* register offsets from base address */
+#define DCVS_VERSION(m) ((m)->base + 0x0)
+#define GLOBAL_CR_CTL(m) ((m)->base + 0x8)
+#define GLOBAL_CR_RESET(m) ((m)->base + 0xC)
+#define OVSTAT(m) ((m)->base + 0x30)
+#define OVCLR(m) ((m)->base + 0x34)
+#define OVSET(m) ((m)->base + 0x3C) /* unused */
+#define EVCNTR(m, x) ((m)->base + 0x40 + cntr_offset(x))
+#define CNTCTL(m, x) ((m)->base + 0x100 + cntr_offset(x))
+/* counter 0/1 does not have type control */
+#define EVTYPER_START 2
+#define EVTYPER(x) ((m)->base + 0x140 + cntr_offset(x))
+
+/* bitmasks for GLOBAL_CR_CTL and CNTCTLx */
+#define CNT_EN BIT(0)
+#define IRQ_EN BIT(1)
+
+/* non-configurable counters */
+#define CYC_CNTR_IDX 0
+#define WASTED_CYC_CNTR_IDX 1
+
+/* counter is 28-bit */
+#define CNT_MAX 0x0FFFFFFFU
+
+struct m4m_counter {
+ int idx;
+ u32 event_mask;
+ unsigned int last_start;
+};
+
+struct m4m_hwmon {
+ void __iomem *base;
+ struct m4m_counter cntr[MAX_NUM_GROUPS];
+ int num_cntr;
+ int irq;
+ struct cache_hwmon hw;
+ struct device *dev;
+};
+
+#define to_mon(ptr) container_of(ptr, struct m4m_hwmon, hw)
+
+static DEFINE_SPINLOCK(init_lock);
+
+/* Should only be called once while HW is in POR state */
+static inline void mon_global_init(struct m4m_hwmon *m)
+{
+ writel_relaxed(CNT_EN | IRQ_EN, GLOBAL_CR_CTL(m));
+}
+
+static inline void _mon_disable_cntr_and_irq(struct m4m_hwmon *m, int cntr_idx)
+{
+ writel_relaxed(0, CNTCTL(m, cntr_idx));
+}
+
+static inline void _mon_enable_cntr_and_irq(struct m4m_hwmon *m, int cntr_idx)
+{
+ writel_relaxed(CNT_EN | IRQ_EN, CNTCTL(m, cntr_idx));
+}
+
+static void mon_disable(struct m4m_hwmon *m)
+{
+ int i;
+
+ for (i = 0; i < m->num_cntr; i++)
+ _mon_disable_cntr_and_irq(m, m->cntr[i].idx);
+ /* make sure all counter/irq are indeed disabled */
+ mb();
+}
+
+static void mon_enable(struct m4m_hwmon *m)
+{
+ int i;
+
+ for (i = 0; i < m->num_cntr; i++)
+ _mon_enable_cntr_and_irq(m, m->cntr[i].idx);
+}
+
+static inline void _mon_ov_clear(struct m4m_hwmon *m, int cntr_idx)
+{
+ writel_relaxed(BIT(cntr_idx), OVCLR(m));
+}
+
+static void mon_ov_clear(struct m4m_hwmon *m, enum request_group grp)
+{
+ _mon_ov_clear(m, m->cntr[grp].idx);
+}
+
+static inline u32 mon_irq_status(struct m4m_hwmon *m)
+{
+ return readl_relaxed(OVSTAT(m));
+}
+
+static bool mon_is_ovstat_set(struct m4m_hwmon *m)
+{
+ int i;
+ u32 status = mon_irq_status(m);
+
+ for (i = 0; i < m->num_cntr; i++)
+ if (status & BIT(m->cntr[i].idx))
+ return true;
+ return false;
+}
+
+/* counter must be stopped first */
+static unsigned long _mon_get_count(struct m4m_hwmon *m,
+ int cntr_idx, unsigned int start)
+{
+ unsigned long cnt;
+ u32 cur_cnt = readl_relaxed(EVCNTR(m, cntr_idx));
+ u32 ov = readl_relaxed(OVSTAT(m)) & BIT(cntr_idx);
+
+ if (!ov && cur_cnt < start) {
+ dev_warn(m->dev, "Counter%d overflowed but not detected\n",
+ cntr_idx);
+ ov = 1;
+ }
+
+ if (ov)
+ cnt = CNT_MAX - start + cur_cnt;
+ else
+ cnt = cur_cnt - start;
+
+ return cnt;
+}
+
+static unsigned long mon_get_count(struct m4m_hwmon *m,
+ enum request_group grp)
+{
+ return _mon_get_count(m, m->cntr[grp].idx, m->cntr[grp].last_start);
+}
+
+static inline void mon_set_limit(struct m4m_hwmon *m, enum request_group grp,
+ unsigned int limit)
+{
+ u32 start;
+
+ if (limit >= CNT_MAX)
+ limit = CNT_MAX;
+ start = CNT_MAX - limit;
+
+ writel_relaxed(start, EVCNTR(m, m->cntr[grp].idx));
+ m->cntr[grp].last_start = start;
+}
+
+static inline void mon_enable_cycle_cntr(struct m4m_hwmon *m)
+{
+ writel_relaxed(CNT_EN, CNTCTL(m, CYC_CNTR_IDX));
+}
+
+static inline void mon_disable_cycle_cntr(struct m4m_hwmon *m)
+{
+ _mon_disable_cntr_and_irq(m, CYC_CNTR_IDX);
+}
+
+static inline unsigned long mon_get_cycle_count(struct m4m_hwmon *m)
+{
+ return _mon_get_count(m, CYC_CNTR_IDX, 0);
+}
+
+static inline void mon_clear_cycle_cntr(struct m4m_hwmon *m)
+{
+ writel_relaxed(0, EVCNTR(m, CYC_CNTR_IDX));
+ _mon_ov_clear(m, CYC_CNTR_IDX);
+}
+
+static void mon_init(struct m4m_hwmon *m)
+{
+ static bool mon_inited;
+ unsigned long flags;
+ int i;
+
+ spin_lock_irqsave(&init_lock, flags);
+ if (!mon_inited)
+ mon_global_init(m);
+ spin_unlock_irqrestore(&init_lock, flags);
+
+ /* configure counter events */
+ for (i = 0; i < m->num_cntr; i++)
+ writel_relaxed(m->cntr[i].event_mask, EVTYPER(m->cntr[i].idx));
+}
+
+static irqreturn_t m4m_hwmon_intr_handler(int irq, void *dev)
+{
+ struct m4m_hwmon *m = dev;
+
+ if (mon_is_ovstat_set(m)) {
+ update_cache_hwmon(&m->hw);
+ return IRQ_HANDLED;
+ }
+ return IRQ_NONE;
+}
+
+static int count_to_mrps(unsigned long count, unsigned int us)
+{
+ do_div(count, us);
+ count++;
+ return count;
+}
+
+static unsigned int mrps_to_count(unsigned int mrps, unsigned int ms,
+ unsigned int tolerance)
+{
+ mrps += tolerance;
+ mrps *= ms * USEC_PER_MSEC;
+ return mrps;
+}
+
+static unsigned long m4m_meas_mrps_and_set_irq(struct cache_hwmon *hw,
+ unsigned int tol, unsigned int us, struct mrps_stats *mrps)
+{
+ struct m4m_hwmon *m = to_mon(hw);
+ unsigned long count, cyc_count;
+ unsigned long f = hw->df->previous_freq;
+ unsigned int sample_ms = hw->df->profile->polling_ms;
+ int i;
+ u32 limit;
+
+ mon_disable(m);
+ mon_disable_cycle_cntr(m);
+
+ /* calculate mrps and set limit */
+ for (i = 0; i < m->num_cntr; i++) {
+ count = mon_get_count(m, i);
+ mrps->mrps[i] = count_to_mrps(count, us);
+ limit = mrps_to_count(mrps->mrps[i], sample_ms, tol);
+ mon_ov_clear(m, i);
+ mon_set_limit(m, i, limit);
+ dev_dbg(m->dev, "Counter[%d] count 0x%lx, limit 0x%x\n",
+ m->cntr[i].idx, count, limit);
+ }
+
+ /* get cycle count and calculate busy percent */
+ cyc_count = mon_get_cycle_count(m);
+ mrps->busy_percent = mult_frac(cyc_count, 1000, us) * 100 / f;
+ mon_clear_cycle_cntr(m);
+ dev_dbg(m->dev, "Cycle count 0x%lx\n", cyc_count);
+
+ /* re-enable monitor */
+ mon_enable(m);
+ mon_enable_cycle_cntr(m);
+
+ return 0;
+}
+
+static int m4m_start_hwmon(struct cache_hwmon *hw, struct mrps_stats *mrps)
+{
+ struct m4m_hwmon *m = to_mon(hw);
+ unsigned int sample_ms = hw->df->profile->polling_ms;
+ int ret, i;
+ u32 limit;
+
+ ret = request_threaded_irq(m->irq, NULL, m4m_hwmon_intr_handler,
+ IRQF_ONESHOT | IRQF_SHARED,
+ dev_name(m->dev), m);
+ if (ret) {
+ dev_err(m->dev, "Unable to register for irq\n");
+ return ret;
+ }
+
+ mon_init(m);
+ mon_disable(m);
+ mon_disable_cycle_cntr(m);
+ for (i = 0; i < m->num_cntr; i++) {
+ mon_ov_clear(m, i);
+ limit = mrps_to_count(mrps->mrps[i], sample_ms, 0);
+ mon_set_limit(m, i, limit);
+ }
+ mon_clear_cycle_cntr(m);
+ mon_enable(m);
+ mon_enable_cycle_cntr(m);
+
+ return 0;
+}
+
+static void m4m_stop_hwmon(struct cache_hwmon *hw)
+{
+ struct m4m_hwmon *m = to_mon(hw);
+ int i;
+
+ mon_disable(m);
+ free_irq(m->irq, m);
+ for (i = 0; i < m->num_cntr; i++)
+ mon_ov_clear(m, i);
+}
+
+/* device probe functions */
+static struct of_device_id match_table[] = {
+ { .compatible = "qcom,m4m-hwmon" },
+ {}
+};
+
+static int m4m_hwmon_parse_cntr(struct device *dev,
+ struct m4m_hwmon *m)
+{
+ u32 *data;
+ const char *prop_name = "qcom,counter-event-sel";
+ int ret, len, i;
+
+ if (!of_find_property(dev->of_node, prop_name, &len))
+ return -EINVAL;
+ len /= sizeof(*data);
+
+ if (len % 2 || len > MAX_NUM_GROUPS * 2)
+ return -EINVAL;
+
+ data = devm_kcalloc(dev, len, sizeof(*data), GFP_KERNEL);
+ if (!data)
+ return -ENOMEM;
+ ret = of_property_read_u32_array(dev->of_node, prop_name, data, len);
+ if (ret)
+ return ret;
+
+ len /= 2;
+ m->num_cntr = len;
+ for (i = 0; i < len; i++) {
+ /* disallow non-configurable counters */
+ if (data[i * 2] < EVTYPER_START)
+ return -EINVAL;
+ m->cntr[i].idx = data[i * 2];
+ m->cntr[i].event_mask = data[i * 2 + 1];
+ }
+
+ devm_kfree(dev, data);
+ return 0;
+}
+
+static int m4m_hwmon_driver_probe(struct platform_device *pdev)
+{
+ struct device *dev = &pdev->dev;
+ struct resource *res;
+ struct m4m_hwmon *m;
+ int ret;
+
+ m = devm_kzalloc(dev, sizeof(*m), GFP_KERNEL);
+ if (!m)
+ return -ENOMEM;
+ m->dev = dev;
+
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ if (!res) {
+ dev_err(dev, "base not found!\n");
+ return -EINVAL;
+ }
+ m->base = devm_ioremap(dev, res->start, resource_size(res));
+ if (!m->base)
+ return -ENOMEM;
+
+ m->irq = platform_get_irq(pdev, 0);
+ if (m->irq < 0) {
+ dev_err(dev, "Unable to get IRQ number\n");
+ return m->irq;
+ }
+
+ ret = m4m_hwmon_parse_cntr(dev, m);
+ if (ret) {
+ dev_err(dev, "Unable to parse counter events\n");
+ return ret;
+ }
+
+ m->hw.of_node = of_parse_phandle(dev->of_node, "qcom,target-dev", 0);
+ if (!m->hw.of_node)
+ return -EINVAL;
+ m->hw.start_hwmon = &m4m_start_hwmon;
+ m->hw.stop_hwmon = &m4m_stop_hwmon;
+ m->hw.meas_mrps_and_set_irq = &m4m_meas_mrps_and_set_irq;
+
+ ret = register_cache_hwmon(dev, &m->hw);
+ if (ret) {
+ dev_err(dev, "Dev BW hwmon registration failed\n");
+ return ret;
+ }
+
+ return 0;
+}
+
+static struct platform_driver m4m_hwmon_driver = {
+ .probe = m4m_hwmon_driver_probe,
+ .driver = {
+ .name = "m4m-hwmon",
+ .of_match_table = match_table,
+ .owner = THIS_MODULE,
+ },
+};
+
+static int __init m4m_hwmon_init(void)
+{
+ return platform_driver_register(&m4m_hwmon_driver);
+}
+module_init(m4m_hwmon_init);
+
+static void __exit m4m_hwmon_exit(void)
+{
+ platform_driver_unregister(&m4m_hwmon_driver);
+}
+module_exit(m4m_hwmon_exit);
+
+MODULE_DESCRIPTION("M4M hardware monitor driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/devfreq/msmcci-hwmon.c b/drivers/devfreq/msmcci-hwmon.c
new file mode 100644
index 000000000000..e226c7b1ad0d
--- /dev/null
+++ b/drivers/devfreq/msmcci-hwmon.c
@@ -0,0 +1,627 @@
+/*
+ * Copyright (c) 2014-2015, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#define pr_fmt(fmt) "msmcci-hwmon: " fmt
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/init.h>
+#include <linux/io.h>
+#include <linux/delay.h>
+#include <linux/err.h>
+#include <linux/errno.h>
+#include <linux/interrupt.h>
+#include <linux/platform_device.h>
+#include <linux/of.h>
+#include <linux/of_device.h>
+#include <linux/spinlock.h>
+#include <linux/cpu_pm.h>
+#include <soc/qcom/scm.h>
+#include "governor_cache_hwmon.h"
+
+#define EVNT_SEL 0x0
+#define EVNT_CNT_MATCH_VAL 0x18
+#define MATCH_FLG 0x30
+#define MATCH_FLG_CLR 0x48
+#define OVR_FLG 0x60
+#define OVR_FLG_CLR 0x78
+#define CNT_CTRL 0x94
+#define CNT_VALUE 0xAC
+
+#define ENABLE_OVR_FLG BIT(4)
+#define ENABLE_MATCH_FLG BIT(5)
+#define ENABLE_EVNT_CNT BIT(0)
+#define RESET_EVNT_CNT BIT(1)
+
+#define CNT_DISABLE (ENABLE_OVR_FLG | ENABLE_MATCH_FLG)
+#define CNT_RESET_CLR (ENABLE_OVR_FLG | ENABLE_MATCH_FLG)
+#define CNT_ENABLE (ENABLE_OVR_FLG | ENABLE_MATCH_FLG | ENABLE_EVNT_CNT)
+#define CNT_RESET (ENABLE_OVR_FLG | ENABLE_MATCH_FLG | RESET_EVNT_CNT)
+
+struct msmcci_hwmon {
+ struct list_head list;
+
+ union {
+ phys_addr_t phys_base[MAX_NUM_GROUPS];
+ void * __iomem virt_base[MAX_NUM_GROUPS];
+ };
+ int irq[MAX_NUM_GROUPS];
+ u32 event_sel[MAX_NUM_GROUPS];
+ int num_counters;
+
+ /*
+ * Multiple interrupts might fire together for one device.
+ * In that case, only one re-evaluation needs to be done.
+ */
+ struct mutex update_lock;
+
+ /* For counter state save and restore */
+ unsigned long cur_limit[MAX_NUM_GROUPS];
+ unsigned long cur_count[MAX_NUM_GROUPS];
+ bool mon_enabled;
+
+ struct cache_hwmon hw;
+ struct device *dev;
+ bool secure_io;
+ bool irq_shared;
+};
+
+#define to_mon(ptr) container_of(ptr, struct msmcci_hwmon, hw)
+
+static LIST_HEAD(msmcci_hwmon_list);
+static DEFINE_MUTEX(list_lock);
+
+static int use_cnt;
+static DEFINE_MUTEX(notifier_reg_lock);
+
+static inline int write_mon_reg(struct msmcci_hwmon *m, int idx,
+ unsigned long offset, u32 value)
+{
+ int ret = 0;
+
+ if (m->secure_io)
+ ret = scm_io_write(m->phys_base[idx] + offset, value);
+ else
+ writel_relaxed(value, m->virt_base[idx] + offset);
+
+ return ret;
+}
+
+static inline u32 read_mon_reg(struct msmcci_hwmon *m, int idx,
+ unsigned long offset)
+{
+ if (m->secure_io)
+ return scm_io_read(m->phys_base[idx] + offset);
+ else
+ return readl_relaxed(m->virt_base[idx] + offset);
+}
+
+static int mon_init(struct msmcci_hwmon *m)
+{
+ int ret, i;
+
+ for (i = 0; i < m->num_counters; i++) {
+ ret = write_mon_reg(m, i, EVNT_SEL, m->event_sel[i]);
+ if (ret)
+ return ret;
+ }
+ return 0;
+}
+
+static void mon_enable(struct msmcci_hwmon *m)
+{
+ int i;
+
+ for (i = 0; i < m->num_counters; i++)
+ write_mon_reg(m, i, CNT_CTRL, CNT_ENABLE);
+}
+
+static void mon_disable(struct msmcci_hwmon *m)
+{
+ int i;
+
+ for (i = 0; i < m->num_counters; i++)
+ write_mon_reg(m, i, CNT_CTRL, CNT_DISABLE);
+}
+
+static bool mon_is_match_flag_set(struct msmcci_hwmon *m, int idx)
+{
+ return (bool)read_mon_reg(m, idx, MATCH_FLG);
+}
+
+/* mon_clear_single() can only be called when monitor is disabled */
+static void mon_clear_single(struct msmcci_hwmon *m, int idx)
+{
+ write_mon_reg(m, idx, CNT_CTRL, CNT_RESET);
+ write_mon_reg(m, idx, CNT_CTRL, CNT_RESET_CLR);
+ /* reset counter before match/overflow flags are cleared */
+ mb();
+ write_mon_reg(m, idx, MATCH_FLG_CLR, 1);
+ write_mon_reg(m, idx, MATCH_FLG_CLR, 0);
+ write_mon_reg(m, idx, OVR_FLG_CLR, 1);
+ write_mon_reg(m, idx, OVR_FLG_CLR, 0);
+}
+
+static void mon_set_limit_single(struct msmcci_hwmon *m, int idx, u32 limit)
+{
+ write_mon_reg(m, idx, EVNT_CNT_MATCH_VAL, limit);
+}
+
+static irqreturn_t msmcci_hwmon_shared_intr_handler(int irq, void *dev)
+{
+ struct msmcci_hwmon *m = dev;
+ int idx = -1, i;
+
+ for (i = 0; i < m->num_counters; i++) {
+ if (mon_is_match_flag_set(m, i)) {
+ idx = i;
+ break;
+ }
+ }
+ if (idx == -1)
+ return IRQ_NONE;
+
+ update_cache_hwmon(&m->hw);
+
+ return IRQ_HANDLED;
+}
+
+static irqreturn_t msmcci_hwmon_intr_handler(int irq, void *dev)
+{
+ struct msmcci_hwmon *m = dev;
+ int idx = -1, i;
+
+ for (i = 0; i < m->num_counters; i++) {
+ if (m->irq[i] == irq) {
+ idx = i;
+ break;
+ }
+ }
+ BUG_ON(idx == -1);
+
+ /*
+ * Multiple independent interrupts could fire together and trigger
+ * update_cache_hwmon() for same device. If we don't lock, we
+ * could end up calling devfreq_monitor_start/stop()
+ * concurrently, which would cause timer/workqueue object
+ * corruption. However, we can't re-evaluate a few times back to
+ * back either because the very short window won't be
+ * representative. Since update_cache_hwmon() will clear match
+ * flags for all counters, interrupts for other counters can
+ * simply return if their match flags have already been cleared.
+ */
+ mutex_lock(&m->update_lock);
+ if (mon_is_match_flag_set(m, idx))
+ update_cache_hwmon(&m->hw);
+ mutex_unlock(&m->update_lock);
+ return IRQ_HANDLED;
+}
+
+static unsigned long mon_read_count_single(struct msmcci_hwmon *m, int idx)
+{
+ unsigned long count, ovr;
+
+ count = read_mon_reg(m, idx, CNT_VALUE);
+ ovr = read_mon_reg(m, idx, OVR_FLG);
+ if (ovr == 1) {
+ count += 0xFFFFFFFFUL;
+ dev_warn(m->dev, "Counter[%d]: overflowed\n", idx);
+ }
+ return count;
+}
+
+static int count_to_mrps(unsigned long count, unsigned int us)
+{
+ do_div(count, us);
+ count++;
+ return count;
+}
+
+static unsigned int mrps_to_count(unsigned int mrps, unsigned int ms,
+ unsigned int tolerance)
+{
+ mrps += tolerance;
+ mrps *= ms * USEC_PER_MSEC;
+ return mrps;
+}
+
+static unsigned long meas_mrps_and_set_irq(struct cache_hwmon *hw,
+ unsigned int tol, unsigned int us, struct mrps_stats *mrps)
+{
+ struct msmcci_hwmon *m = to_mon(hw);
+ unsigned long count;
+ unsigned int sample_ms = hw->df->profile->polling_ms;
+ int i;
+ u32 limit;
+
+ mon_disable(m);
+
+ /* calculate mrps and set limit */
+ for (i = 0; i < m->num_counters; i++) {
+ count = mon_read_count_single(m, i);
+ /*
+ * When CCI is power collapsed, counters are cleared. Add
+ * saved count to the current reading and clear saved count
+ * to ensure we won't apply it more than once.
+ */
+ count += m->cur_count[i];
+ m->cur_count[i] = 0;
+
+ mrps->mrps[i] = count_to_mrps(count, us);
+ limit = mrps_to_count(mrps->mrps[i], sample_ms, tol);
+
+ mon_clear_single(m, i);
+ mon_set_limit_single(m, i, limit);
+ /* save current limit for restoring after power collapse */
+ m->cur_limit[i] = limit;
+
+ dev_dbg(m->dev, "Counter[%d] count 0x%lx, limit 0x%x\n",
+ i, count, limit);
+ }
+
+ /*
+ * There is no cycle counter for this device.
+ * Treat all cycles as busy.
+ */
+ mrps->busy_percent = 100;
+
+ /* re-enable monitor */
+ mon_enable(m);
+
+ return 0;
+}
+
+static void msmcci_hwmon_save_state(void)
+{
+ int i;
+ struct msmcci_hwmon *m;
+
+ list_for_each_entry(m, &msmcci_hwmon_list, list) {
+ if (!m->mon_enabled)
+ continue;
+ mon_disable(m);
+ /*
+ * Power collapse might happen multiple times before
+ * re-evaluation is done. Accumulate the saved count.
+ * Clear counter after read in case power collapse is
+ * aborted and register values are not wiped.
+ */
+ for (i = 0; i < m->num_counters; i++) {
+ m->cur_count[i] += mon_read_count_single(m, i);
+ mon_clear_single(m, i);
+ }
+ }
+}
+
+static void msmcci_hwmon_restore_limit(struct msmcci_hwmon *m, int i)
+{
+ u32 new_limit;
+
+ if (m->cur_count[i] < m->cur_limit[i]) {
+ new_limit = m->cur_limit[i] - m->cur_count[i];
+ } else {
+ /*
+ * If counter is larger than limit, interrupt should have
+ * fired and prevented power collapse from happening. Just
+ * in case the interrupt does not come, restore previous
+ * limit so that interrupt will be triggered at some point.
+ */
+ new_limit = m->cur_limit[i];
+ }
+ mon_set_limit_single(m, i, new_limit);
+ dev_dbg(m->dev, "Counter[%d] restore limit to 0x%x, saved count 0x%lx\n",
+ i, new_limit, m->cur_count[i]);
+}
+
+static void msmcci_hwmon_restore_state(void)
+{
+ int i;
+ struct msmcci_hwmon *m;
+
+ list_for_each_entry(m, &msmcci_hwmon_list, list) {
+ if (!m->mon_enabled)
+ continue;
+ mon_init(m);
+ for (i = 0; i < m->num_counters; i++)
+ msmcci_hwmon_restore_limit(m, i);
+ mon_enable(m);
+ }
+}
+
+#define CCI_LEVEL 2
+static int msmcci_hwmon_pm_callback(struct notifier_block *nb,
+ unsigned long val, void *data)
+{
+ unsigned int level = (unsigned long) data;
+
+ if (level != CCI_LEVEL)
+ return NOTIFY_DONE;
+
+ /*
+ * When CCI power collapse callback happens, only current CPU
+ * would be executing code. Thus there is no need to hold
+ * mutex or spinlock.
+ */
+ switch (val) {
+ case CPU_CLUSTER_PM_ENTER:
+ msmcci_hwmon_save_state();
+ break;
+ case CPU_CLUSTER_PM_ENTER_FAILED:
+ case CPU_CLUSTER_PM_EXIT:
+ msmcci_hwmon_restore_state();
+ break;
+ default:
+ return NOTIFY_DONE;
+ }
+
+ return NOTIFY_OK;
+}
+
+static struct notifier_block pm_notifier_block = {
+ .notifier_call = msmcci_hwmon_pm_callback,
+};
+
+static int register_pm_notifier(struct msmcci_hwmon *m)
+{
+ int ret;
+
+ mutex_lock(&notifier_reg_lock);
+ if (!use_cnt) {
+ ret = cpu_pm_register_notifier(&pm_notifier_block);
+ if (ret) {
+ dev_err(m->dev, "Failed to register for PM notification\n");
+ mutex_unlock(&notifier_reg_lock);
+ return ret;
+ }
+ }
+ use_cnt++;
+ mutex_unlock(&notifier_reg_lock);
+
+ return 0;
+}
+
+static void unregister_pm_nofitifier(void)
+{
+ mutex_lock(&notifier_reg_lock);
+ use_cnt--;
+ if (!use_cnt)
+ cpu_pm_unregister_notifier(&pm_notifier_block);
+ mutex_unlock(&notifier_reg_lock);
+}
+
+static int request_shared_interrupt(struct msmcci_hwmon *m)
+{
+ int ret;
+
+ ret = request_threaded_irq(m->irq[HIGH], NULL,
+ msmcci_hwmon_shared_intr_handler,
+ IRQF_ONESHOT | IRQF_SHARED,
+ dev_name(m->dev), m);
+ if (ret)
+ dev_err(m->dev, "Unable to register shared interrupt handler for irq %d\n",
+ m->irq[HIGH]);
+
+ return ret;
+}
+
+static int request_interrupts(struct msmcci_hwmon *m)
+{
+ int i, ret;
+
+ for (i = 0; i < m->num_counters; i++) {
+ ret = request_threaded_irq(m->irq[i], NULL,
+ msmcci_hwmon_intr_handler, IRQF_ONESHOT,
+ dev_name(m->dev), m);
+ if (ret) {
+ dev_err(m->dev, "Unable to register interrupt handler for irq %d\n",
+ m->irq[i]);
+ goto irq_failure;
+ }
+ }
+ return 0;
+
+irq_failure:
+ for (i--; i > 0; i--) {
+ disable_irq(m->irq[i]);
+ free_irq(m->irq[i], m);
+ }
+ return ret;
+}
+
+static int start_hwmon(struct cache_hwmon *hw, struct mrps_stats *mrps)
+{
+ struct msmcci_hwmon *m = to_mon(hw);
+ unsigned int sample_ms = hw->df->profile->polling_ms;
+ int ret, i;
+ u32 limit;
+
+ ret = register_pm_notifier(m);
+ if (ret)
+ return ret;
+
+ if (m->irq_shared)
+ ret = request_shared_interrupt(m);
+ else
+ ret = request_interrupts(m);
+
+ if (ret) {
+ unregister_pm_nofitifier();
+ return ret;
+ }
+ mon_init(m);
+ mon_disable(m);
+ for (i = 0; i < m->num_counters; i++) {
+ mon_clear_single(m, i);
+ limit = mrps_to_count(mrps->mrps[i], sample_ms, 0);
+ mon_set_limit_single(m, i, limit);
+ }
+ mon_enable(m);
+ m->mon_enabled = true;
+
+ return 0;
+}
+
+static void stop_hwmon(struct cache_hwmon *hw)
+{
+ struct msmcci_hwmon *m = to_mon(hw);
+ int i;
+
+ m->mon_enabled = false;
+ mon_disable(m);
+
+ for (i = 0; i < m->num_counters; i++) {
+ if (!m->irq_shared || i == HIGH) {
+ disable_irq(m->irq[i]);
+ free_irq(m->irq[i], m);
+ }
+ mon_clear_single(m, i);
+ }
+
+ unregister_pm_nofitifier();
+}
+
+static int msmcci_hwmon_parse_dt(struct platform_device *pdev,
+ struct msmcci_hwmon *m, int idx)
+{
+ struct device *dev = &pdev->dev;
+ struct resource *res;
+ u32 sel;
+ int ret;
+
+ if (idx >= MAX_NUM_GROUPS)
+ return -EINVAL;
+
+ res = platform_get_resource(pdev, IORESOURCE_MEM, idx);
+ if (!res)
+ return (idx == HIGH) ? -EINVAL : 0;
+
+ if (m->secure_io)
+ m->phys_base[idx] = res->start;
+ else {
+ m->virt_base[idx] = devm_ioremap(&pdev->dev, res->start,
+ resource_size(res));
+ if (!m->virt_base[idx]) {
+ dev_err(dev, "failed to ioremap\n");
+ return -ENOMEM;
+ }
+ }
+
+ ret = of_property_read_u32_index(pdev->dev.of_node,
+ "qcom,counter-event-sel", idx, &sel);
+ if (ret) {
+ dev_err(dev, "Counter[%d] failed to read event sel\n", idx);
+ return ret;
+ }
+ m->event_sel[idx] = sel;
+
+ if (!m->irq_shared || idx == HIGH) {
+ m->irq[idx] = platform_get_irq(pdev, idx);
+ if (m->irq[idx] < 0) {
+ dev_err(dev, "Counter[%d] failed to get IRQ number\n",
+ idx);
+ return m->irq[idx];
+ }
+ }
+ m->num_counters++;
+ return 0;
+}
+
+static int msmcci_hwmon_driver_probe(struct platform_device *pdev)
+{
+ struct device *dev = &pdev->dev;
+ struct msmcci_hwmon *m;
+ int ret;
+
+ m = devm_kzalloc(dev, sizeof(*m), GFP_KERNEL);
+ if (!m)
+ return -ENOMEM;
+ m->dev = &pdev->dev;
+
+ m->secure_io = of_property_read_bool(pdev->dev.of_node,
+ "qcom,secure-io");
+
+ m->irq_shared = of_property_read_bool(pdev->dev.of_node,
+ "qcom,shared-irq");
+
+ ret = msmcci_hwmon_parse_dt(pdev, m, HIGH);
+ if (ret)
+ return ret;
+ ret = msmcci_hwmon_parse_dt(pdev, m, MED);
+ if (ret)
+ return ret;
+ ret = msmcci_hwmon_parse_dt(pdev, m, LOW);
+ if (ret)
+ return ret;
+
+ m->hw.of_node = of_parse_phandle(dev->of_node, "qcom,target-dev", 0);
+ if (!m->hw.of_node) {
+ dev_err(dev, "No target device specified\n");
+ return -EINVAL;
+ }
+ m->hw.start_hwmon = &start_hwmon;
+ m->hw.stop_hwmon = &stop_hwmon;
+ m->hw.meas_mrps_and_set_irq = &meas_mrps_and_set_irq;
+ mutex_init(&m->update_lock);
+
+ /*
+ * This tests whether secure IO for monitor registers
+ * is supported.
+ */
+ ret = mon_init(m);
+ if (ret) {
+ dev_err(dev, "Failed to config monitor. Cache hwmon not registered\n");
+ return ret;
+ }
+
+ ret = register_cache_hwmon(dev, &m->hw);
+ if (ret) {
+ dev_err(dev, "MSMCCI cache hwmon registration failed\n");
+ return ret;
+ }
+
+ mutex_lock(&list_lock);
+ list_add_tail(&m->list, &msmcci_hwmon_list);
+ mutex_unlock(&list_lock);
+
+ dev_info(dev, "MSMCCI cache hwmon registered\n");
+ return 0;
+}
+
+static struct of_device_id match_table[] = {
+ { .compatible = "qcom,msmcci-hwmon" },
+ {}
+};
+
+static struct platform_driver msmcci_hwmon_driver = {
+ .probe = msmcci_hwmon_driver_probe,
+ .driver = {
+ .name = "msmcci-hwmon",
+ .of_match_table = match_table,
+ .owner = THIS_MODULE,
+ },
+};
+
+static int __init msmcci_hwmon_init(void)
+{
+ return platform_driver_register(&msmcci_hwmon_driver);
+}
+module_init(msmcci_hwmon_init);
+
+static void __exit msmcci_hwmon_exit(void)
+{
+ platform_driver_unregister(&msmcci_hwmon_driver);
+}
+module_exit(msmcci_hwmon_exit);
+
+MODULE_DESCRIPTION("QTI CCI performance monitor driver");
+MODULE_LICENSE("GPL v2");