summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorRohit Gupta <rohgup@codeaurora.org>2018-05-29 17:23:04 +0200
committerGeorg Veichtlbauer <georg@vware.at>2023-07-26 21:01:08 +0200
commit821cf3a1a8476001f02a27771dcd27ef8b913feb (patch)
tree4449b2c129d09812d864d495bf8eaad44df7a248
parent9f52618d15b8c2461ad3ade8440a2ac1e21d203c (diff)
BACKPORT: cpufreq: schedutil: Cache tunables on governor exit
Currently when all the related CPUs from a policy go offline or the governor is switched, cpufreq framework calls sugov_exit() that frees the governor tunables. When any of the related CPUs comes back online or governor is switched back to schedutil sugov_init() gets called which allocates a fresh set of tunables that are set to default values. This can cause the userspace settings to those tunables to be lost across governor switches or when an entire cluster is hotplugged out. To prevent this, save the tunable values on governor exit. Restore these values to the newly allocated tunables on governor init. Change-Id: I671d4d0e1a4e63e948bfddb0005367df33c0c249 Signed-off-by: Rohit Gupta <rohgup@codeaurora.org> [Caching and restoring different tunables.] Signed-off-by: joshuous <joshuous@gmail.com> Change-Id: I852ae2d23f10c9337e7057a47adcc46fe0623c6a Signed-off-by: joshuous <joshuous@gmail.com>
-rw-r--r--kernel/sched/cpufreq_schedutil.c49
1 files changed, 48 insertions, 1 deletions
diff --git a/kernel/sched/cpufreq_schedutil.c b/kernel/sched/cpufreq_schedutil.c
index 580ab6e4a529..f4c7e6f81460 100644
--- a/kernel/sched/cpufreq_schedutil.c
+++ b/kernel/sched/cpufreq_schedutil.c
@@ -82,6 +82,7 @@ struct sugov_cpu {
};
static DEFINE_PER_CPU(struct sugov_cpu, sugov_cpu);
+static DEFINE_PER_CPU(struct sugov_tunables *, cached_tunables);
/************************ Governor internals ***********************/
@@ -640,6 +641,29 @@ static struct sugov_tunables *sugov_tunables_alloc(struct sugov_policy *sg_polic
return tunables;
}
+static void sugov_tunables_save(struct cpufreq_policy *policy,
+ struct sugov_tunables *tunables)
+{
+ int cpu;
+ struct sugov_tunables *cached = per_cpu(cached_tunables, policy->cpu);
+
+ if (!have_governor_per_policy())
+ return;
+
+ if (!cached) {
+ cached = kzalloc(sizeof(*tunables), GFP_KERNEL);
+ if (!cached) {
+ pr_warn("Couldn't allocate tunables for caching\n");
+ return;
+ }
+ for_each_cpu(cpu, policy->related_cpus)
+ per_cpu(cached_tunables, cpu) = cached;
+ }
+
+ cached->up_rate_limit_us = tunables->up_rate_limit_us;
+ cached->down_rate_limit_us = tunables->down_rate_limit_us;
+}
+
static void sugov_tunables_free(struct sugov_tunables *tunables)
{
if (!have_governor_per_policy())
@@ -648,6 +672,25 @@ static void sugov_tunables_free(struct sugov_tunables *tunables)
kfree(tunables);
}
+static void sugov_tunables_restore(struct cpufreq_policy *policy)
+{
+ struct sugov_policy *sg_policy = policy->governor_data;
+ struct sugov_tunables *tunables = sg_policy->tunables;
+ struct sugov_tunables *cached = per_cpu(cached_tunables, policy->cpu);
+
+ if (!cached)
+ return;
+
+ tunables->up_rate_limit_us = cached->up_rate_limit_us;
+ tunables->down_rate_limit_us = cached->down_rate_limit_us;
+ sg_policy->up_rate_delay_ns =
+ tunables->up_rate_limit_us * NSEC_PER_USEC;
+ sg_policy->down_rate_delay_ns =
+ tunables->down_rate_limit_us * NSEC_PER_USEC;
+ sg_policy->min_rate_limit_ns = min(sg_policy->up_rate_delay_ns,
+ sg_policy->down_rate_delay_ns);
+}
+
static int sugov_init(struct cpufreq_policy *policy)
{
struct sugov_policy *sg_policy;
@@ -710,6 +753,8 @@ static int sugov_init(struct cpufreq_policy *policy)
policy->governor_data = sg_policy;
sg_policy->tunables = tunables;
+ sugov_tunables_restore(policy);
+
ret = kobject_init_and_add(&tunables->attr_set.kobj, &sugov_tunables_ktype,
get_governor_parent_kobj(policy), "%s",
cpufreq_gov_schedutil.name);
@@ -749,8 +794,10 @@ static int sugov_exit(struct cpufreq_policy *policy)
count = gov_attr_set_put(&tunables->attr_set, &sg_policy->tunables_hook);
policy->governor_data = NULL;
- if (!count)
+ if (!count) {
+ sugov_tunables_save(policy, tunables);
sugov_tunables_free(tunables);
+ }
mutex_unlock(&global_tunables_lock);