summaryrefslogtreecommitdiff
path: root/kernel/sched
diff options
context:
space:
mode:
authorJoonwoo Park <joonwoop@codeaurora.org>2016-12-08 16:12:12 -0800
committerJoonwoo Park <joonwoop@codeaurora.org>2017-09-01 17:20:59 -0700
commitee4cebd75ed7b77132c39c0093923f9ff1bcafaa (patch)
treefde67cb9845a167a6d30dc87ca10517ce46405f0 /kernel/sched
parent48f67ea85de468a9b3e47e723e7681cf7771dea6 (diff)
sched: EAS/WALT: use cr_avg instead of prev_runnable_sum
WALT accounts two major statistics; CPU load and cumulative tasks demand. The CPU load which is account of accumulated each CPU's absolute execution time is for CPU frequency guidance. Whereas cumulative tasks demand which is each CPU's instantaneous load to reflect CPU's load at given time is for task placement decision. Use cumulative tasks demand for cpu_util() for task placement and introduce cpu_util_freq() for frequency guidance. Change-Id: Id928f01dbc8cb2a617cdadc584c1f658022565c5 Signed-off-by: Joonwoo Park <joonwoop@codeaurora.org>
Diffstat (limited to 'kernel/sched')
-rw-r--r--kernel/sched/core.c2
-rw-r--r--kernel/sched/fair.c4
-rw-r--r--kernel/sched/sched.h16
3 files changed, 18 insertions, 4 deletions
diff --git a/kernel/sched/core.c b/kernel/sched/core.c
index 9307827cc7b1..4f97de8e0b18 100644
--- a/kernel/sched/core.c
+++ b/kernel/sched/core.c
@@ -2992,7 +2992,7 @@ static void sched_freq_tick_pelt(int cpu)
#ifdef CONFIG_SCHED_WALT
static void sched_freq_tick_walt(int cpu)
{
- unsigned long cpu_utilization = cpu_util(cpu);
+ unsigned long cpu_utilization = cpu_util_freq(cpu);
unsigned long capacity_curr = capacity_curr_of(cpu);
if (walt_disabled || !sysctl_sched_use_walt_cpu_util)
diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c
index c193e9b1c38f..3641dad3d4cc 100644
--- a/kernel/sched/fair.c
+++ b/kernel/sched/fair.c
@@ -4659,7 +4659,7 @@ static inline void hrtick_update(struct rq *rq)
static bool cpu_overutilized(int cpu);
unsigned long boosted_cpu_util(int cpu);
#else
-#define boosted_cpu_util(cpu) cpu_util(cpu)
+#define boosted_cpu_util(cpu) cpu_util_freq(cpu)
#endif
#ifdef CONFIG_SMP
@@ -5937,7 +5937,7 @@ schedtune_task_margin(struct task_struct *task)
unsigned long
boosted_cpu_util(int cpu)
{
- unsigned long util = cpu_util(cpu);
+ unsigned long util = cpu_util_freq(cpu);
long margin = schedtune_cpu_margin(util, cpu);
trace_sched_boost_cpu(cpu, util, margin);
diff --git a/kernel/sched/sched.h b/kernel/sched/sched.h
index 029cf2bbeda2..73077f535e95 100644
--- a/kernel/sched/sched.h
+++ b/kernel/sched/sched.h
@@ -1592,7 +1592,7 @@ static inline unsigned long __cpu_util(int cpu, int delta)
#ifdef CONFIG_SCHED_WALT
if (!walt_disabled && sysctl_sched_use_walt_cpu_util) {
- util = cpu_rq(cpu)->prev_runnable_sum << SCHED_LOAD_SHIFT;
+ util = cpu_rq(cpu)->cumulative_runnable_avg << SCHED_LOAD_SHIFT;
do_div(util, walt_ravg_window);
}
#endif
@@ -1608,6 +1608,20 @@ static inline unsigned long cpu_util(int cpu)
return __cpu_util(cpu, 0);
}
+static inline unsigned long cpu_util_freq(int cpu)
+{
+ unsigned long util = cpu_rq(cpu)->cfs.avg.util_avg;
+ unsigned long capacity = capacity_orig_of(cpu);
+
+#ifdef CONFIG_SCHED_WALT
+ if (!walt_disabled && sysctl_sched_use_walt_cpu_util) {
+ util = cpu_rq(cpu)->prev_runnable_sum << SCHED_LOAD_SHIFT;
+ do_div(util, walt_ravg_window);
+ }
+#endif
+ return (util >= capacity) ? capacity : util;
+}
+
#endif
#ifdef CONFIG_CPU_FREQ_GOV_SCHED