diff options
author | Srivatsa Vaddagiri <vatsa@codeaurora.org> | 2015-01-16 11:27:31 +0530 |
---|---|---|
committer | David Keitel <dkeitel@codeaurora.org> | 2016-03-23 20:01:34 -0700 |
commit | 0a33ec2ea97b52f7cccbc5da42a243d6c9038c6f (patch) | |
tree | 2e1658af2cb66f497c23d6829629f545cc688140 /kernel/sched/core.c | |
parent | 207d78dd263114ccf6e04581f43e4dc99e7b068d (diff) |
sched: Consolidate hmp stats into their own struct
Key hmp stats (nr_big_tasks, nr_small_tasks and
cumulative_runnable_average) are currently maintained per-cpu in
'struct rq'. Merge those stats in their own structure (struct
hmp_sched_stats) and modify impacted functions to deal with the newly
introduced structure. This cleanup is required for a subsequent patch
which fixes various issues with use of CFS_BANDWIDTH feature in HMP
scheduler.
Change-Id: Ieffc10a3b82a102f561331bc385d042c15a33998
Signed-off-by: Srivatsa Vaddagiri <vatsa@codeaurora.org>
[rameezmustafa@codeaurora.org: Port to msm-3.18]
Signed-off-by: Syed Rameez Mustafa <rameezmustafa@codeaurora.org>
[joonwoop@codeaurora.org: fixed conflict in __update_load_avg().]
Signed-off-by: Joonwoo Park <joonwoop@codeaurora.org>
Diffstat (limited to 'kernel/sched/core.c')
-rw-r--r-- | kernel/sched/core.c | 37 |
1 files changed, 11 insertions, 26 deletions
diff --git a/kernel/sched/core.c b/kernel/sched/core.c index 494c6cc518b0..8ad6ea28b278 100644 --- a/kernel/sched/core.c +++ b/kernel/sched/core.c @@ -859,7 +859,6 @@ static inline void enqueue_task(struct rq *rq, struct task_struct *p, int flags) sched_info_queued(rq, p); p->sched_class->enqueue_task(rq, p, flags); trace_sched_enq_deq_task(p, 1, cpumask_bits(&p->cpus_allowed)[0]); - inc_cumulative_runnable_avg(rq, p); } static inline void dequeue_task(struct rq *rq, struct task_struct *p, int flags) @@ -869,7 +868,6 @@ static inline void dequeue_task(struct rq *rq, struct task_struct *p, int flags) sched_info_dequeued(rq, p); p->sched_class->dequeue_task(rq, p, flags); trace_sched_enq_deq_task(p, 0, cpumask_bits(&p->cpus_allowed)[0]); - dec_cumulative_runnable_avg(rq, p); } void activate_task(struct rq *rq, struct task_struct *p, int flags) @@ -1684,12 +1682,8 @@ static void update_history(struct rq *rq, struct task_struct *p, } p->ravg.sum = 0; - if (p->on_rq) { - rq->cumulative_runnable_avg -= p->ravg.demand; - BUG_ON((s64)rq->cumulative_runnable_avg < 0); - if (p->sched_class == &fair_sched_class) - dec_nr_big_small_task(rq, p); - } + if (p->on_rq) + p->sched_class->dec_hmp_sched_stats(rq, p); avg = div64_u64(sum, sched_ravg_hist_size); @@ -1704,11 +1698,8 @@ static void update_history(struct rq *rq, struct task_struct *p, p->ravg.demand = demand; - if (p->on_rq) { - rq->cumulative_runnable_avg += p->ravg.demand; - if (p->sched_class == &fair_sched_class) - inc_nr_big_small_task(rq, p); - } + if (p->on_rq) + p->sched_class->inc_hmp_sched_stats(rq, p); done: trace_sched_update_history(rq, p, runtime, samples, event); @@ -2094,7 +2085,7 @@ void reset_all_window_stats(u64 window_start, unsigned int window_size) #ifdef CONFIG_SCHED_FREQ_INPUT rq->curr_runnable_sum = rq->prev_runnable_sum = 0; #endif - rq->cumulative_runnable_avg = 0; + rq->hmp_stats.cumulative_runnable_avg = 0; fixup_nr_big_small_task(cpu); } @@ -2248,11 +2239,8 @@ static void fixup_busy_time(struct task_struct *p, int new_cpu) * reflect new demand. Restore load temporarily for such * task on its runqueue */ - if (p->on_rq) { - inc_cumulative_runnable_avg(src_rq, p); - if (p->sched_class == &fair_sched_class) - inc_nr_big_small_task(src_rq, p); - } + if (p->on_rq) + p->sched_class->inc_hmp_sched_stats(src_rq, p); update_task_ravg(p, task_rq(p), TASK_MIGRATE, wallclock, 0); @@ -2261,11 +2249,8 @@ static void fixup_busy_time(struct task_struct *p, int new_cpu) * Remove task's load from rq as its now migrating to * another cpu. */ - if (p->on_rq) { - dec_cumulative_runnable_avg(src_rq, p); - if (p->sched_class == &fair_sched_class) - dec_nr_big_small_task(src_rq, p); - } + if (p->on_rq) + p->sched_class->dec_hmp_sched_stats(src_rq, p); if (p->ravg.curr_window) { src_rq->curr_runnable_sum -= p->ravg.curr_window; @@ -9174,12 +9159,12 @@ void __init sched_init(void) rq->min_freq = 1; rq->max_possible_freq = 1; rq->max_possible_capacity = 0; - rq->cumulative_runnable_avg = 0; + rq->hmp_stats.cumulative_runnable_avg = 0; rq->efficiency = 1024; rq->capacity = 1024; rq->load_scale_factor = 1024; rq->window_start = 0; - rq->nr_small_tasks = rq->nr_big_tasks = 0; + rq->hmp_stats.nr_small_tasks = rq->hmp_stats.nr_big_tasks = 0; rq->hmp_flags = 0; rq->mostly_idle_load = pct_to_real(20); rq->mostly_idle_nr_run = 3; |