summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorChris Redpath <chris.redpath@arm.com>2017-04-25 10:37:58 +0100
committerChris Redpath <chris.redpath@arm.com>2017-10-27 13:30:32 +0100
commitfac311be26e5af64612c386f5a041984fe7c59a2 (patch)
treef710754397aa5e087c2ea7ac29ae087b79426d02
parentc8bc3e3a3ede641b48360ab0bd35eb316168008b (diff)
cpufreq/sched: Consider max cpu capacity when choosing frequencies
When using schedfreq on cpus with max capacity significantly smaller than 1024, the tick update uses non-normalised capacities - this leads to selecting an incorrect OPP as we were scaling the frequency as if the max capacity achievable was 1024 rather than the max for that particular cpu or group. This could result in a cpu being stuck at the lowest OPP and unable to generate enough utilisation to climb out if the max capacity is significantly smaller than 1024. Instead, normalize the capacity to be in the range 0-1024 in the tick so that when we later select a frequency, we get the correct one. Also comments updated to be clearer about what is needed. Change-Id: Id84391c7ac015311002ada21813a353ee13bee60 Signed-off-by: Chris Redpath <chris.redpath@arm.com>
-rw-r--r--kernel/sched/core.c4
-rw-r--r--kernel/sched/fair.c4
-rw-r--r--kernel/sched/sched.h4
3 files changed, 10 insertions, 2 deletions
diff --git a/kernel/sched/core.c b/kernel/sched/core.c
index 83f7c682032b..9cf530a6123e 100644
--- a/kernel/sched/core.c
+++ b/kernel/sched/core.c
@@ -2987,7 +2987,9 @@ static void sched_freq_tick_pelt(int cpu)
* utilization and to harm its performance the least, request
* a jump to a higher OPP as soon as the margin of free capacity
* is impacted (specified by capacity_margin).
+ * Remember CPU utilization in sched_capacity_reqs should be normalised.
*/
+ cpu_utilization = cpu_utilization * SCHED_CAPACITY_SCALE / capacity_orig_of(cpu);
set_cfs_cpu_capacity(cpu, true, cpu_utilization);
}
@@ -3014,7 +3016,9 @@ static void sched_freq_tick_walt(int cpu)
* It is likely that the load is growing so we
* keep the added margin in our request as an
* extra boost.
+ * Remember CPU utilization in sched_capacity_reqs should be normalised.
*/
+ cpu_utilization = cpu_utilization * SCHED_CAPACITY_SCALE / capacity_orig_of(cpu);
set_cfs_cpu_capacity(cpu, true, cpu_utilization);
}
diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c
index 5cac6a77b2bc..e6b2461d07d6 100644
--- a/kernel/sched/fair.c
+++ b/kernel/sched/fair.c
@@ -4671,7 +4671,7 @@ static void update_capacity_of(int cpu)
if (!sched_freq())
return;
- /* Convert scale-invariant capacity to cpu. */
+ /* Normalize scale-invariant capacity to cpu. */
req_cap = boosted_cpu_util(cpu);
req_cap = req_cap * SCHED_CAPACITY_SCALE / capacity_orig_of(cpu);
set_cfs_cpu_capacity(cpu, true, req_cap);
@@ -4864,7 +4864,7 @@ static void dequeue_task_fair(struct rq *rq, struct task_struct *p, int flags)
if (rq->cfs.nr_running)
update_capacity_of(cpu_of(rq));
else if (sched_freq())
- set_cfs_cpu_capacity(cpu_of(rq), false, 0);
+ set_cfs_cpu_capacity(cpu_of(rq), false, 0); /* no normalization required for 0 */
}
}
diff --git a/kernel/sched/sched.h b/kernel/sched/sched.h
index 9f3d89faacdc..5256f05a26e8 100644
--- a/kernel/sched/sched.h
+++ b/kernel/sched/sched.h
@@ -1621,6 +1621,10 @@ static inline bool sched_freq(void)
return static_key_false(&__sched_freq);
}
+/*
+ * sched_capacity_reqs expects capacity requests to be normalised.
+ * All capacities should sum to the range of 0-1024.
+ */
DECLARE_PER_CPU(struct sched_capacity_reqs, cpu_sched_capacity_reqs);
void update_cpu_capacity_request(int cpu, bool request);