summaryrefslogtreecommitdiff
path: root/drivers/cpufreq/cpu-boost.c
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/cpufreq/cpu-boost.c')
-rw-r--r--drivers/cpufreq/cpu-boost.c340
1 files changed, 340 insertions, 0 deletions
diff --git a/drivers/cpufreq/cpu-boost.c b/drivers/cpufreq/cpu-boost.c
new file mode 100644
index 000000000000..9a181ee6ea8d
--- /dev/null
+++ b/drivers/cpufreq/cpu-boost.c
@@ -0,0 +1,340 @@
+/*
+ * Copyright (c) 2013-2015, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#define pr_fmt(fmt) "cpu-boost: " fmt
+
+#include <linux/kernel.h>
+#include <linux/init.h>
+#include <linux/cpufreq.h>
+#include <linux/cpu.h>
+#include <linux/sched.h>
+#include <linux/moduleparam.h>
+#include <linux/slab.h>
+#include <linux/input.h>
+#include <linux/time.h>
+
+struct cpu_sync {
+ int cpu;
+ unsigned int input_boost_min;
+ unsigned int input_boost_freq;
+};
+
+static DEFINE_PER_CPU(struct cpu_sync, sync_info);
+static struct workqueue_struct *cpu_boost_wq;
+
+static struct work_struct input_boost_work;
+static bool input_boost_enabled;
+
+static unsigned int input_boost_ms = 40;
+module_param(input_boost_ms, uint, 0644);
+
+static bool sched_boost_on_input;
+module_param(sched_boost_on_input, bool, 0644);
+
+static bool sched_boost_active;
+
+static struct delayed_work input_boost_rem;
+static u64 last_input_time;
+#define MIN_INPUT_INTERVAL (150 * USEC_PER_MSEC)
+
+static int set_input_boost_freq(const char *buf, const struct kernel_param *kp)
+{
+ int i, ntokens = 0;
+ unsigned int val, cpu;
+ const char *cp = buf;
+ bool enabled = false;
+
+ while ((cp = strpbrk(cp + 1, " :")))
+ ntokens++;
+
+ /* single number: apply to all CPUs */
+ if (!ntokens) {
+ if (sscanf(buf, "%u\n", &val) != 1)
+ return -EINVAL;
+ for_each_possible_cpu(i)
+ per_cpu(sync_info, i).input_boost_freq = val;
+ goto check_enable;
+ }
+
+ /* CPU:value pair */
+ if (!(ntokens % 2))
+ return -EINVAL;
+
+ cp = buf;
+ for (i = 0; i < ntokens; i += 2) {
+ if (sscanf(cp, "%u:%u", &cpu, &val) != 2)
+ return -EINVAL;
+ if (cpu > num_possible_cpus())
+ return -EINVAL;
+
+ per_cpu(sync_info, cpu).input_boost_freq = val;
+ cp = strchr(cp, ' ');
+ cp++;
+ }
+
+check_enable:
+ for_each_possible_cpu(i) {
+ if (per_cpu(sync_info, i).input_boost_freq) {
+ enabled = true;
+ break;
+ }
+ }
+ input_boost_enabled = enabled;
+
+ return 0;
+}
+
+static int get_input_boost_freq(char *buf, const struct kernel_param *kp)
+{
+ int cnt = 0, cpu;
+ struct cpu_sync *s;
+
+ for_each_possible_cpu(cpu) {
+ s = &per_cpu(sync_info, cpu);
+ cnt += snprintf(buf + cnt, PAGE_SIZE - cnt,
+ "%d:%u ", cpu, s->input_boost_freq);
+ }
+ cnt += snprintf(buf + cnt, PAGE_SIZE - cnt, "\n");
+ return cnt;
+}
+
+static const struct kernel_param_ops param_ops_input_boost_freq = {
+ .set = set_input_boost_freq,
+ .get = get_input_boost_freq,
+};
+module_param_cb(input_boost_freq, &param_ops_input_boost_freq, NULL, 0644);
+
+/*
+ * The CPUFREQ_ADJUST notifier is used to override the current policy min to
+ * make sure policy min >= boost_min. The cpufreq framework then does the job
+ * of enforcing the new policy.
+ *
+ * The sync kthread needs to run on the CPU in question to avoid deadlocks in
+ * the wake up code. Achieve this by binding the thread to the respective
+ * CPU. But a CPU going offline unbinds threads from that CPU. So, set it up
+ * again each time the CPU comes back up. We can use CPUFREQ_START to figure
+ * out a CPU is coming online instead of registering for hotplug notifiers.
+ */
+static int boost_adjust_notify(struct notifier_block *nb, unsigned long val,
+ void *data)
+{
+ struct cpufreq_policy *policy = data;
+ unsigned int cpu = policy->cpu;
+ struct cpu_sync *s = &per_cpu(sync_info, cpu);
+ unsigned int ib_min = s->input_boost_min;
+
+ switch (val) {
+ case CPUFREQ_ADJUST:
+ if (!ib_min)
+ break;
+
+ pr_debug("CPU%u policy min before boost: %u kHz\n",
+ cpu, policy->min);
+ pr_debug("CPU%u boost min: %u kHz\n", cpu, ib_min);
+
+ cpufreq_verify_within_limits(policy, ib_min, UINT_MAX);
+
+ pr_debug("CPU%u policy min after boost: %u kHz\n",
+ cpu, policy->min);
+ break;
+ }
+
+ return NOTIFY_OK;
+}
+
+static struct notifier_block boost_adjust_nb = {
+ .notifier_call = boost_adjust_notify,
+};
+
+static void update_policy_online(void)
+{
+ unsigned int i;
+
+ /* Re-evaluate policy to trigger adjust notifier for online CPUs */
+ get_online_cpus();
+ for_each_online_cpu(i) {
+ pr_debug("Updating policy for CPU%d\n", i);
+ cpufreq_update_policy(i);
+ }
+ put_online_cpus();
+}
+
+static void do_input_boost_rem(struct work_struct *work)
+{
+ unsigned int i, ret;
+ struct cpu_sync *i_sync_info;
+
+ /* Reset the input_boost_min for all CPUs in the system */
+ pr_debug("Resetting input boost min for all CPUs\n");
+ for_each_possible_cpu(i) {
+ i_sync_info = &per_cpu(sync_info, i);
+ i_sync_info->input_boost_min = 0;
+ }
+
+ /* Update policies for all online CPUs */
+ update_policy_online();
+
+ if (sched_boost_active) {
+ ret = sched_set_boost(0);
+ if (ret)
+ pr_err("cpu-boost: HMP boost disable failed\n");
+ sched_boost_active = false;
+ }
+}
+
+static void do_input_boost(struct work_struct *work)
+{
+ unsigned int i, ret;
+ struct cpu_sync *i_sync_info;
+
+ cancel_delayed_work_sync(&input_boost_rem);
+ if (sched_boost_active) {
+ sched_set_boost(0);
+ sched_boost_active = false;
+ }
+
+ /* Set the input_boost_min for all CPUs in the system */
+ pr_debug("Setting input boost min for all CPUs\n");
+ for_each_possible_cpu(i) {
+ i_sync_info = &per_cpu(sync_info, i);
+ i_sync_info->input_boost_min = i_sync_info->input_boost_freq;
+ }
+
+ /* Update policies for all online CPUs */
+ update_policy_online();
+
+ /* Enable scheduler boost to migrate tasks to big cluster */
+ if (sched_boost_on_input) {
+ ret = sched_set_boost(1);
+ if (ret)
+ pr_err("cpu-boost: HMP boost enable failed\n");
+ else
+ sched_boost_active = true;
+ }
+
+ queue_delayed_work(cpu_boost_wq, &input_boost_rem,
+ msecs_to_jiffies(input_boost_ms));
+}
+
+static void cpuboost_input_event(struct input_handle *handle,
+ unsigned int type, unsigned int code, int value)
+{
+ u64 now;
+
+ if (!input_boost_enabled)
+ return;
+
+ now = ktime_to_us(ktime_get());
+ if (now - last_input_time < MIN_INPUT_INTERVAL)
+ return;
+
+ if (work_pending(&input_boost_work))
+ return;
+
+ queue_work(cpu_boost_wq, &input_boost_work);
+ last_input_time = ktime_to_us(ktime_get());
+}
+
+static int cpuboost_input_connect(struct input_handler *handler,
+ struct input_dev *dev, const struct input_device_id *id)
+{
+ struct input_handle *handle;
+ int error;
+
+ handle = kzalloc(sizeof(struct input_handle), GFP_KERNEL);
+ if (!handle)
+ return -ENOMEM;
+
+ handle->dev = dev;
+ handle->handler = handler;
+ handle->name = "cpufreq";
+
+ error = input_register_handle(handle);
+ if (error)
+ goto err2;
+
+ error = input_open_device(handle);
+ if (error)
+ goto err1;
+
+ return 0;
+err1:
+ input_unregister_handle(handle);
+err2:
+ kfree(handle);
+ return error;
+}
+
+static void cpuboost_input_disconnect(struct input_handle *handle)
+{
+ input_close_device(handle);
+ input_unregister_handle(handle);
+ kfree(handle);
+}
+
+static const struct input_device_id cpuboost_ids[] = {
+ /* multi-touch touchscreen */
+ {
+ .flags = INPUT_DEVICE_ID_MATCH_EVBIT |
+ INPUT_DEVICE_ID_MATCH_ABSBIT,
+ .evbit = { BIT_MASK(EV_ABS) },
+ .absbit = { [BIT_WORD(ABS_MT_POSITION_X)] =
+ BIT_MASK(ABS_MT_POSITION_X) |
+ BIT_MASK(ABS_MT_POSITION_Y) },
+ },
+ /* touchpad */
+ {
+ .flags = INPUT_DEVICE_ID_MATCH_KEYBIT |
+ INPUT_DEVICE_ID_MATCH_ABSBIT,
+ .keybit = { [BIT_WORD(BTN_TOUCH)] = BIT_MASK(BTN_TOUCH) },
+ .absbit = { [BIT_WORD(ABS_X)] =
+ BIT_MASK(ABS_X) | BIT_MASK(ABS_Y) },
+ },
+ /* Keypad */
+ {
+ .flags = INPUT_DEVICE_ID_MATCH_EVBIT,
+ .evbit = { BIT_MASK(EV_KEY) },
+ },
+ { },
+};
+
+static struct input_handler cpuboost_input_handler = {
+ .event = cpuboost_input_event,
+ .connect = cpuboost_input_connect,
+ .disconnect = cpuboost_input_disconnect,
+ .name = "cpu-boost",
+ .id_table = cpuboost_ids,
+};
+
+static int cpu_boost_init(void)
+{
+ int cpu, ret;
+ struct cpu_sync *s;
+
+ cpu_boost_wq = alloc_workqueue("cpuboost_wq", WQ_HIGHPRI, 0);
+ if (!cpu_boost_wq)
+ return -EFAULT;
+
+ INIT_WORK(&input_boost_work, do_input_boost);
+ INIT_DELAYED_WORK(&input_boost_rem, do_input_boost_rem);
+
+ for_each_possible_cpu(cpu) {
+ s = &per_cpu(sync_info, cpu);
+ s->cpu = cpu;
+ }
+ cpufreq_register_notifier(&boost_adjust_nb, CPUFREQ_POLICY_NOTIFIER);
+ ret = input_register_handler(&cpuboost_input_handler);
+
+ return ret;
+}
+late_initcall(cpu_boost_init);