diff options
author | Vikram Mulukutla <markivx@codeaurora.org> | 2017-03-20 13:41:37 -0700 |
---|---|---|
committer | Michael Bestas <mkbestas@lineageos.org> | 2019-12-23 23:43:40 +0200 |
commit | 0086bc47f8b54d30de0ced09e9bb45cb4fef47a8 (patch) | |
tree | b5a24f13f592ac20770ddabbc640c7b9b4ec1c32 /kernel/sched/core.c | |
parent | 5c43a2afb87ea44e4d52585989a76b1e6a2e6ce6 (diff) |
sched: cpufreq: HMP load reporting changes
Since HMP uses WALT, ensure that load is reported just once per window,
with the exception of intercluster migrations. Further, try to report load
whenever WALT stats are updated.
Change-Id: I6539f8c916f6f271cf26f03249de7f953d5b12c2
Signed-off-by: Vikram Mulukutla <markivx@codeaurora.org>
Diffstat (limited to 'kernel/sched/core.c')
-rw-r--r-- | kernel/sched/core.c | 6 |
1 files changed, 6 insertions, 0 deletions
diff --git a/kernel/sched/core.c b/kernel/sched/core.c index f6f8bb2f0d95..72e1ffe809f0 100644 --- a/kernel/sched/core.c +++ b/kernel/sched/core.c @@ -2132,6 +2132,7 @@ try_to_wake_up(struct task_struct *p, unsigned int state, int wake_flags, wallclock = sched_ktime_clock(); update_task_ravg(rq->curr, rq, TASK_UPDATE, wallclock, 0); update_task_ravg(p, rq, TASK_WAKE, wallclock, 0); + cpufreq_update_util(rq, 0); raw_spin_unlock(&rq->lock); rcu_read_lock(); @@ -2225,6 +2226,7 @@ static void try_to_wake_up_local(struct task_struct *p) update_task_ravg(rq->curr, rq, TASK_UPDATE, wallclock, 0); update_task_ravg(p, rq, TASK_WAKE, wallclock, 0); + cpufreq_update_util(rq, 0); ttwu_activate(rq, p, ENQUEUE_WAKEUP); note_task_waking(p, wallclock); } @@ -3196,6 +3198,8 @@ void scheduler_tick(void) calc_global_load_tick(rq); wallclock = sched_ktime_clock(); update_task_ravg(rq->curr, rq, TASK_UPDATE, wallclock, 0); + + cpufreq_update_util(rq, 0); early_notif = early_detection_notify(rq, wallclock); raw_spin_unlock(&rq->lock); @@ -3564,6 +3568,7 @@ static void __sched notrace __schedule(bool preempt) if (likely(prev != next)) { update_task_ravg(prev, rq, PUT_PREV_TASK, wallclock, 0); update_task_ravg(next, rq, PICK_NEXT_TASK, wallclock, 0); + cpufreq_update_util(rq, 0); if (!is_idle_task(prev) && !prev->on_rq) update_avg_burst(prev); @@ -3582,6 +3587,7 @@ static void __sched notrace __schedule(bool preempt) cpu = cpu_of(rq); } else { update_task_ravg(prev, rq, TASK_UPDATE, wallclock, 0); + cpufreq_update_util(rq, 0); lockdep_unpin_lock(&rq->lock); raw_spin_unlock_irq(&rq->lock); } |