diff options
author | Joonwoo Park <joonwoop@codeaurora.org> | 2015-07-30 10:44:13 -0700 |
---|---|---|
committer | David Keitel <dkeitel@codeaurora.org> | 2016-03-23 20:02:30 -0700 |
commit | 383ae6b29eb14d498e3a57f40fa61115d910646d (patch) | |
tree | 73e9270e5c0509fa24aa9a6d39580a7d23fa854c | |
parent | 446beddcd4aeb4246c6a1f391063138bd4e899ee (diff) |
sched: clean up fixup_hmp_sched_stats()
The commit 392edf4969d20 ("sched: avoid stale cumulative_runnable_avg
HMP statistics) introduced the callback function fixup_hmp_sched_stats()
so update_history() can avoid decrement and increment pair of HMP stat.
However the commit also made fixup function to do obscure p->ravg.demand
update which isn't the cleanest way.
Revise the function fixup_hmp_sched_stats() so the caller can update
p->ravg.demand directly.
Change-Id: Id54667d306495d2109c26362813f80f08a1385ad
[joonwoop@codeaurora.org: stripped out CONFIG_SCHED_QHMP.]
Signed-off-by: Joonwoo Park <joonwoop@codeaurora.org>
-rw-r--r-- | kernel/sched/core.c | 6 | ||||
-rw-r--r-- | kernel/sched/deadline.c | 4 | ||||
-rw-r--r-- | kernel/sched/fair.c | 80 | ||||
-rw-r--r-- | kernel/sched/rt.c | 4 | ||||
-rw-r--r-- | kernel/sched/sched.h | 22 | ||||
-rw-r--r-- | kernel/sched/stop_task.c | 4 |
6 files changed, 63 insertions, 57 deletions
diff --git a/kernel/sched/core.c b/kernel/sched/core.c index 4074dd46bc29..1e638fc6ebce 100644 --- a/kernel/sched/core.c +++ b/kernel/sched/core.c @@ -1877,8 +1877,8 @@ static void update_history(struct rq *rq, struct task_struct *p, if (task_on_rq_queued(p) && (!task_has_dl_policy(p) || !p->dl.dl_throttled)) p->sched_class->fixup_hmp_sched_stats(rq, p, demand); - else - p->ravg.demand = demand; + + p->ravg.demand = demand; done: trace_sched_update_history(rq, p, runtime, samples, event); @@ -2261,8 +2261,6 @@ void reset_all_window_stats(u64 window_start, unsigned int window_size) rq->nt_curr_runnable_sum = rq->nt_prev_runnable_sum = 0; #endif reset_cpu_hmp_stats(cpu, 1); - - fixup_nr_big_task(cpu, 0); } if (sched_window_stats_policy != sysctl_sched_window_stats_policy) { diff --git a/kernel/sched/deadline.c b/kernel/sched/deadline.c index a4db4290f428..bbd3c632d545 100644 --- a/kernel/sched/deadline.c +++ b/kernel/sched/deadline.c @@ -871,7 +871,9 @@ static void fixup_hmp_sched_stats_dl(struct rq *rq, struct task_struct *p, u32 new_task_load) { - fixup_cumulative_runnable_avg(&rq->hmp_stats, p, new_task_load); + s64 task_load_delta = (s64)new_task_load - task_load(p); + + fixup_cumulative_runnable_avg(&rq->hmp_stats, p, task_load_delta); } #else /* CONFIG_SCHED_HMP */ diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c index f009c718fd82..118767ee9a1d 100644 --- a/kernel/sched/fair.c +++ b/kernel/sched/fair.c @@ -2677,14 +2677,6 @@ unsigned int __read_mostly sched_init_task_load_pelt; unsigned int __read_mostly sched_init_task_load_windows; unsigned int __read_mostly sysctl_sched_init_task_load_pct = 100; -static inline unsigned int task_load(struct task_struct *p) -{ - if (sched_use_pelt) - return p->se.avg.runnable_avg_sum_scaled; - - return p->ravg.demand; -} - unsigned int max_task_load(void) { if (sched_use_pelt) @@ -2871,17 +2863,19 @@ static inline int upmigrate_discouraged(struct task_struct *p) #endif /* Is a task "big" on its current cpu */ -static inline int is_big_task(struct task_struct *p) +static inline int __is_big_task(struct task_struct *p, u64 scaled_load) { - u64 load = task_load(p); int nice = task_nice(p); if (nice > sched_upmigrate_min_nice || upmigrate_discouraged(p)) return 0; - load = scale_load_to_cpu(load, task_cpu(p)); + return scaled_load > sched_upmigrate; +} - return load > sched_upmigrate; +static inline int is_big_task(struct task_struct *p) +{ + return __is_big_task(p, scale_load_to_cpu(task_load(p), task_cpu(p))); } static inline u64 cpu_load(int cpu) @@ -3414,6 +3408,29 @@ void reset_cpu_hmp_stats(int cpu, int reset_cra) reset_hmp_stats(&cpu_rq(cpu)->hmp_stats, reset_cra); } +static void +fixup_nr_big_tasks(struct hmp_sched_stats *stats, struct task_struct *p, + s64 delta) +{ + u64 new_task_load; + u64 old_task_load; + + if (!sched_enable_hmp || sched_disable_window_stats) + return; + + old_task_load = scale_load_to_cpu(task_load(p), task_cpu(p)); + new_task_load = scale_load_to_cpu(delta + task_load(p), task_cpu(p)); + + if (__is_big_task(p, old_task_load) && !__is_big_task(p, new_task_load)) + stats->nr_big_tasks--; + else if (!__is_big_task(p, old_task_load) && + __is_big_task(p, new_task_load)) + stats->nr_big_tasks++; + + BUG_ON(stats->nr_big_tasks < 0); +} + + #ifdef CONFIG_CFS_BANDWIDTH static inline int cfs_rq_throttled(struct cfs_rq *cfs_rq); @@ -3490,29 +3507,23 @@ static void fixup_hmp_sched_stats_fair(struct rq *rq, struct task_struct *p, { struct cfs_rq *cfs_rq; struct sched_entity *se = &p->se; - u32 old_task_load = p->ravg.demand; + s64 task_load_delta = (s64)new_task_load - task_load(p); for_each_sched_entity(se) { cfs_rq = cfs_rq_of(se); - dec_nr_big_task(&cfs_rq->hmp_stats, p); fixup_cumulative_runnable_avg(&cfs_rq->hmp_stats, p, - new_task_load); - inc_nr_big_task(&cfs_rq->hmp_stats, p); + task_load_delta); + fixup_nr_big_tasks(&cfs_rq->hmp_stats, p, task_load_delta); if (cfs_rq_throttled(cfs_rq)) break; - /* - * fixup_cumulative_runnable_avg() sets p->ravg.demand to - * new_task_load. - */ - p->ravg.demand = old_task_load; } /* Fix up rq->hmp_stats only if we didn't find any throttled cfs_rq */ if (!se) { - dec_nr_big_task(&rq->hmp_stats, p); - fixup_cumulative_runnable_avg(&rq->hmp_stats, p, new_task_load); - inc_nr_big_task(&rq->hmp_stats, p); + fixup_cumulative_runnable_avg(&rq->hmp_stats, p, + task_load_delta); + fixup_nr_big_tasks(&rq->hmp_stats, p, task_load_delta); } } @@ -3533,14 +3544,14 @@ dec_hmp_sched_stats_fair(struct rq *rq, struct task_struct *p) dec_nr_big_task(&rq->hmp_stats, p); dec_cumulative_runnable_avg(&rq->hmp_stats, p); } - static void fixup_hmp_sched_stats_fair(struct rq *rq, struct task_struct *p, u32 new_task_load) { - dec_nr_big_task(&rq->hmp_stats, p); - fixup_cumulative_runnable_avg(&rq->hmp_stats, p, new_task_load); - inc_nr_big_task(&rq->hmp_stats, p); + s64 task_load_delta = (s64)new_task_load - task_load(p); + + fixup_cumulative_runnable_avg(&rq->hmp_stats, p, task_load_delta); + fixup_nr_big_tasks(&rq->hmp_stats, p, task_load_delta); } static inline int task_will_be_throttled(struct task_struct *p) @@ -3559,18 +3570,13 @@ _inc_hmp_sched_stats_fair(struct rq *rq, struct task_struct *p, int change_cra) /* * Walk runqueue of cpu and re-initialize 'nr_big_tasks' counters. */ -void fixup_nr_big_task(int cpu, int reset_stats) +static void update_nr_big_tasks(int cpu) { struct rq *rq = cpu_rq(cpu); struct task_struct *p; - /* fixup_nr_big_task() is called from two functions. In one of - * them stats are already reset, don't waste time resetting them again - */ - if (reset_stats) { - /* Do not reset cumulative_runnable_avg */ - reset_cpu_hmp_stats(cpu, 0); - } + /* Do not reset cumulative_runnable_avg */ + reset_cpu_hmp_stats(cpu, 0); list_for_each_entry(p, &rq->cfs_tasks, se.group_node) _inc_hmp_sched_stats_fair(rq, p, 0); @@ -3596,7 +3602,7 @@ void post_big_task_count_change(const struct cpumask *cpus) /* Assumes local_irq_disable() keeps online cpumap stable */ for_each_cpu(i, cpus) - fixup_nr_big_task(i, 1); + update_nr_big_tasks(i); for_each_cpu(i, cpus) raw_spin_unlock(&cpu_rq(i)->lock); diff --git a/kernel/sched/rt.c b/kernel/sched/rt.c index dbf8ea6dc535..4d490c90b03e 100644 --- a/kernel/sched/rt.c +++ b/kernel/sched/rt.c @@ -1200,7 +1200,9 @@ static void fixup_hmp_sched_stats_rt(struct rq *rq, struct task_struct *p, u32 new_task_load) { - fixup_cumulative_runnable_avg(&rq->hmp_stats, p, new_task_load); + s64 task_load_delta = (s64)new_task_load - task_load(p); + + fixup_cumulative_runnable_avg(&rq->hmp_stats, p, task_load_delta); } #else /* CONFIG_SCHED_HMP */ diff --git a/kernel/sched/sched.h b/kernel/sched/sched.h index fa15ca43e312..cb9114208ed0 100644 --- a/kernel/sched/sched.h +++ b/kernel/sched/sched.h @@ -1012,7 +1012,6 @@ extern unsigned int sched_init_task_load_windows; extern unsigned int sched_heavy_task; extern unsigned int up_down_migrate_scale_factor; extern void reset_cpu_hmp_stats(int cpu, int reset_cra); -extern void fixup_nr_big_task(int cpu, int reset_stats); extern unsigned int max_task_load(void); extern void sched_account_irqtime(int cpu, struct task_struct *curr, u64 delta, u64 wallclock); @@ -1044,6 +1043,13 @@ static inline int max_poss_capacity(struct rq *rq) return rq->max_possible_capacity; } +static inline unsigned int task_load(struct task_struct *p) +{ + if (sched_use_pelt) + return p->se.avg.runnable_avg_sum_scaled; + + return p->ravg.demand; +} static inline void inc_cumulative_runnable_avg(struct hmp_sched_stats *stats, @@ -1079,18 +1085,12 @@ dec_cumulative_runnable_avg(struct hmp_sched_stats *stats, static inline void fixup_cumulative_runnable_avg(struct hmp_sched_stats *stats, - struct task_struct *p, u32 new_task_load) + struct task_struct *p, s64 task_load_delta) { - u32 task_load; - - task_load = sched_use_pelt ? - p->se.avg.runnable_avg_sum_scaled : p->ravg.demand; - p->ravg.demand = new_task_load; - if (!sched_enable_hmp || sched_disable_window_stats) return; - stats->cumulative_runnable_avg += ((s64)new_task_load - task_load); + stats->cumulative_runnable_avg += task_load_delta; BUG_ON((s64)stats->cumulative_runnable_avg < 0); } @@ -1130,10 +1130,6 @@ static inline int sched_cpu_high_irqload(int cpu) struct hmp_sched_stats; -static inline void fixup_nr_big_task(int cpu, int reset_stats) -{ -} - static inline u64 scale_load_to_cpu(u64 load, int cpu) { return load; diff --git a/kernel/sched/stop_task.c b/kernel/sched/stop_task.c index 93aba3060d9a..1918c46ccca9 100644 --- a/kernel/sched/stop_task.c +++ b/kernel/sched/stop_task.c @@ -35,7 +35,9 @@ static void fixup_hmp_sched_stats_stop(struct rq *rq, struct task_struct *p, u32 new_task_load) { - fixup_cumulative_runnable_avg(&rq->hmp_stats, p, new_task_load); + s64 task_load_delta = (s64)new_task_load - task_load(p); + + fixup_cumulative_runnable_avg(&rq->hmp_stats, p, task_load_delta); } #else /* CONFIG_SCHED_HMP */ |