diff options
| author | Joonwoo Park <joonwoop@codeaurora.org> | 2015-07-30 10:44:13 -0700 |
|---|---|---|
| committer | David Keitel <dkeitel@codeaurora.org> | 2016-03-23 20:02:30 -0700 |
| commit | 383ae6b29eb14d498e3a57f40fa61115d910646d (patch) | |
| tree | 73e9270e5c0509fa24aa9a6d39580a7d23fa854c /kernel/sched/sched.h | |
| parent | 446beddcd4aeb4246c6a1f391063138bd4e899ee (diff) | |
sched: clean up fixup_hmp_sched_stats()
The commit 392edf4969d20 ("sched: avoid stale cumulative_runnable_avg
HMP statistics) introduced the callback function fixup_hmp_sched_stats()
so update_history() can avoid decrement and increment pair of HMP stat.
However the commit also made fixup function to do obscure p->ravg.demand
update which isn't the cleanest way.
Revise the function fixup_hmp_sched_stats() so the caller can update
p->ravg.demand directly.
Change-Id: Id54667d306495d2109c26362813f80f08a1385ad
[joonwoop@codeaurora.org: stripped out CONFIG_SCHED_QHMP.]
Signed-off-by: Joonwoo Park <joonwoop@codeaurora.org>
Diffstat (limited to 'kernel/sched/sched.h')
| -rw-r--r-- | kernel/sched/sched.h | 22 |
1 files changed, 9 insertions, 13 deletions
diff --git a/kernel/sched/sched.h b/kernel/sched/sched.h index fa15ca43e312..cb9114208ed0 100644 --- a/kernel/sched/sched.h +++ b/kernel/sched/sched.h @@ -1012,7 +1012,6 @@ extern unsigned int sched_init_task_load_windows; extern unsigned int sched_heavy_task; extern unsigned int up_down_migrate_scale_factor; extern void reset_cpu_hmp_stats(int cpu, int reset_cra); -extern void fixup_nr_big_task(int cpu, int reset_stats); extern unsigned int max_task_load(void); extern void sched_account_irqtime(int cpu, struct task_struct *curr, u64 delta, u64 wallclock); @@ -1044,6 +1043,13 @@ static inline int max_poss_capacity(struct rq *rq) return rq->max_possible_capacity; } +static inline unsigned int task_load(struct task_struct *p) +{ + if (sched_use_pelt) + return p->se.avg.runnable_avg_sum_scaled; + + return p->ravg.demand; +} static inline void inc_cumulative_runnable_avg(struct hmp_sched_stats *stats, @@ -1079,18 +1085,12 @@ dec_cumulative_runnable_avg(struct hmp_sched_stats *stats, static inline void fixup_cumulative_runnable_avg(struct hmp_sched_stats *stats, - struct task_struct *p, u32 new_task_load) + struct task_struct *p, s64 task_load_delta) { - u32 task_load; - - task_load = sched_use_pelt ? - p->se.avg.runnable_avg_sum_scaled : p->ravg.demand; - p->ravg.demand = new_task_load; - if (!sched_enable_hmp || sched_disable_window_stats) return; - stats->cumulative_runnable_avg += ((s64)new_task_load - task_load); + stats->cumulative_runnable_avg += task_load_delta; BUG_ON((s64)stats->cumulative_runnable_avg < 0); } @@ -1130,10 +1130,6 @@ static inline int sched_cpu_high_irqload(int cpu) struct hmp_sched_stats; -static inline void fixup_nr_big_task(int cpu, int reset_stats) -{ -} - static inline u64 scale_load_to_cpu(u64 load, int cpu) { return load; |
