diff options
-rw-r--r-- | kernel/sched/core.c | 4 | ||||
-rw-r--r-- | kernel/sched/fair.c | 4 | ||||
-rw-r--r-- | kernel/sched/sched.h | 4 |
3 files changed, 10 insertions, 2 deletions
diff --git a/kernel/sched/core.c b/kernel/sched/core.c index 83f7c682032b..9cf530a6123e 100644 --- a/kernel/sched/core.c +++ b/kernel/sched/core.c @@ -2987,7 +2987,9 @@ static void sched_freq_tick_pelt(int cpu) * utilization and to harm its performance the least, request * a jump to a higher OPP as soon as the margin of free capacity * is impacted (specified by capacity_margin). + * Remember CPU utilization in sched_capacity_reqs should be normalised. */ + cpu_utilization = cpu_utilization * SCHED_CAPACITY_SCALE / capacity_orig_of(cpu); set_cfs_cpu_capacity(cpu, true, cpu_utilization); } @@ -3014,7 +3016,9 @@ static void sched_freq_tick_walt(int cpu) * It is likely that the load is growing so we * keep the added margin in our request as an * extra boost. + * Remember CPU utilization in sched_capacity_reqs should be normalised. */ + cpu_utilization = cpu_utilization * SCHED_CAPACITY_SCALE / capacity_orig_of(cpu); set_cfs_cpu_capacity(cpu, true, cpu_utilization); } diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c index 5cac6a77b2bc..e6b2461d07d6 100644 --- a/kernel/sched/fair.c +++ b/kernel/sched/fair.c @@ -4671,7 +4671,7 @@ static void update_capacity_of(int cpu) if (!sched_freq()) return; - /* Convert scale-invariant capacity to cpu. */ + /* Normalize scale-invariant capacity to cpu. */ req_cap = boosted_cpu_util(cpu); req_cap = req_cap * SCHED_CAPACITY_SCALE / capacity_orig_of(cpu); set_cfs_cpu_capacity(cpu, true, req_cap); @@ -4864,7 +4864,7 @@ static void dequeue_task_fair(struct rq *rq, struct task_struct *p, int flags) if (rq->cfs.nr_running) update_capacity_of(cpu_of(rq)); else if (sched_freq()) - set_cfs_cpu_capacity(cpu_of(rq), false, 0); + set_cfs_cpu_capacity(cpu_of(rq), false, 0); /* no normalization required for 0 */ } } diff --git a/kernel/sched/sched.h b/kernel/sched/sched.h index 9f3d89faacdc..5256f05a26e8 100644 --- a/kernel/sched/sched.h +++ b/kernel/sched/sched.h @@ -1621,6 +1621,10 @@ static inline bool sched_freq(void) return static_key_false(&__sched_freq); } +/* + * sched_capacity_reqs expects capacity requests to be normalised. + * All capacities should sum to the range of 0-1024. + */ DECLARE_PER_CPU(struct sched_capacity_reqs, cpu_sched_capacity_reqs); void update_cpu_capacity_request(int cpu, bool request); |