diff options
Diffstat (limited to 'kernel')
| -rw-r--r-- | kernel/sched/core.c | 2 | ||||
| -rw-r--r-- | kernel/sched/fair.c | 4 | ||||
| -rw-r--r-- | kernel/sched/sched.h | 16 |
3 files changed, 18 insertions, 4 deletions
diff --git a/kernel/sched/core.c b/kernel/sched/core.c index 9307827cc7b1..4f97de8e0b18 100644 --- a/kernel/sched/core.c +++ b/kernel/sched/core.c @@ -2992,7 +2992,7 @@ static void sched_freq_tick_pelt(int cpu) #ifdef CONFIG_SCHED_WALT static void sched_freq_tick_walt(int cpu) { - unsigned long cpu_utilization = cpu_util(cpu); + unsigned long cpu_utilization = cpu_util_freq(cpu); unsigned long capacity_curr = capacity_curr_of(cpu); if (walt_disabled || !sysctl_sched_use_walt_cpu_util) diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c index c193e9b1c38f..3641dad3d4cc 100644 --- a/kernel/sched/fair.c +++ b/kernel/sched/fair.c @@ -4659,7 +4659,7 @@ static inline void hrtick_update(struct rq *rq) static bool cpu_overutilized(int cpu); unsigned long boosted_cpu_util(int cpu); #else -#define boosted_cpu_util(cpu) cpu_util(cpu) +#define boosted_cpu_util(cpu) cpu_util_freq(cpu) #endif #ifdef CONFIG_SMP @@ -5937,7 +5937,7 @@ schedtune_task_margin(struct task_struct *task) unsigned long boosted_cpu_util(int cpu) { - unsigned long util = cpu_util(cpu); + unsigned long util = cpu_util_freq(cpu); long margin = schedtune_cpu_margin(util, cpu); trace_sched_boost_cpu(cpu, util, margin); diff --git a/kernel/sched/sched.h b/kernel/sched/sched.h index 029cf2bbeda2..73077f535e95 100644 --- a/kernel/sched/sched.h +++ b/kernel/sched/sched.h @@ -1592,7 +1592,7 @@ static inline unsigned long __cpu_util(int cpu, int delta) #ifdef CONFIG_SCHED_WALT if (!walt_disabled && sysctl_sched_use_walt_cpu_util) { - util = cpu_rq(cpu)->prev_runnable_sum << SCHED_LOAD_SHIFT; + util = cpu_rq(cpu)->cumulative_runnable_avg << SCHED_LOAD_SHIFT; do_div(util, walt_ravg_window); } #endif @@ -1608,6 +1608,20 @@ static inline unsigned long cpu_util(int cpu) return __cpu_util(cpu, 0); } +static inline unsigned long cpu_util_freq(int cpu) +{ + unsigned long util = cpu_rq(cpu)->cfs.avg.util_avg; + unsigned long capacity = capacity_orig_of(cpu); + +#ifdef CONFIG_SCHED_WALT + if (!walt_disabled && sysctl_sched_use_walt_cpu_util) { + util = cpu_rq(cpu)->prev_runnable_sum << SCHED_LOAD_SHIFT; + do_div(util, walt_ravg_window); + } +#endif + return (util >= capacity) ? capacity : util; +} + #endif #ifdef CONFIG_CPU_FREQ_GOV_SCHED |
