diff options
author | Srivatsa Vaddagiri <vatsa@codeaurora.org> | 2015-01-16 11:27:31 +0530 |
---|---|---|
committer | David Keitel <dkeitel@codeaurora.org> | 2016-03-23 20:01:34 -0700 |
commit | 0a33ec2ea97b52f7cccbc5da42a243d6c9038c6f (patch) | |
tree | 2e1658af2cb66f497c23d6829629f545cc688140 /kernel/sched/rt.c | |
parent | 207d78dd263114ccf6e04581f43e4dc99e7b068d (diff) |
sched: Consolidate hmp stats into their own struct
Key hmp stats (nr_big_tasks, nr_small_tasks and
cumulative_runnable_average) are currently maintained per-cpu in
'struct rq'. Merge those stats in their own structure (struct
hmp_sched_stats) and modify impacted functions to deal with the newly
introduced structure. This cleanup is required for a subsequent patch
which fixes various issues with use of CFS_BANDWIDTH feature in HMP
scheduler.
Change-Id: Ieffc10a3b82a102f561331bc385d042c15a33998
Signed-off-by: Srivatsa Vaddagiri <vatsa@codeaurora.org>
[rameezmustafa@codeaurora.org: Port to msm-3.18]
Signed-off-by: Syed Rameez Mustafa <rameezmustafa@codeaurora.org>
[joonwoop@codeaurora.org: fixed conflict in __update_load_avg().]
Signed-off-by: Joonwoo Park <joonwoop@codeaurora.org>
Diffstat (limited to 'kernel/sched/rt.c')
-rw-r--r-- | kernel/sched/rt.c | 38 |
1 files changed, 36 insertions, 2 deletions
diff --git a/kernel/sched/rt.c b/kernel/sched/rt.c index f20b6711a1bb..a817272a390b 100644 --- a/kernel/sched/rt.c +++ b/kernel/sched/rt.c @@ -1182,6 +1182,30 @@ void dec_rt_group(struct sched_rt_entity *rt_se, struct rt_rq *rt_rq) {} #endif /* CONFIG_RT_GROUP_SCHED */ +#ifdef CONFIG_SCHED_HMP + +static void +inc_hmp_sched_stats_rt(struct rq *rq, struct task_struct *p) +{ + inc_cumulative_runnable_avg(&rq->hmp_stats, p); +} + +static void +dec_hmp_sched_stats_rt(struct rq *rq, struct task_struct *p) +{ + dec_cumulative_runnable_avg(&rq->hmp_stats, p); +} + +#else /* CONFIG_SCHED_HMP */ + +static inline void +inc_hmp_sched_stats_rt(struct rq *rq, struct task_struct *p) { } + +static inline void +dec_hmp_sched_stats_rt(struct rq *rq, struct task_struct *p) { } + +#endif /* CONFIG_SCHED_HMP */ + static inline unsigned int rt_se_nr_running(struct sched_rt_entity *rt_se) { @@ -1313,6 +1337,7 @@ enqueue_task_rt(struct rq *rq, struct task_struct *p, int flags) rt_se->timeout = 0; enqueue_rt_entity(rt_se, flags & ENQUEUE_HEAD); + inc_hmp_sched_stats_rt(rq, p); if (!task_current(rq, p) && p->nr_cpus_allowed > 1) enqueue_pushable_task(rq, p); @@ -1324,6 +1349,7 @@ static void dequeue_task_rt(struct rq *rq, struct task_struct *p, int flags) update_curr_rt(rq); dequeue_rt_entity(rt_se); + dec_hmp_sched_stats_rt(rq, p); dequeue_pushable_task(rq, p); } @@ -1670,12 +1696,15 @@ static int find_lowest_rq_hmp(struct task_struct *task) } return best_cpu; } -#else + +#else /* CONFIG_SCHED_HMP */ + static int find_lowest_rq_hmp(struct task_struct *task) { return -1; } -#endif + +#endif /* CONFIG_SCHED_HMP */ static int find_lowest_rq(struct task_struct *task) { @@ -2240,6 +2269,7 @@ void __init init_sched_rt_class(void) GFP_KERNEL, cpu_to_node(i)); } } + #endif /* CONFIG_SMP */ /* @@ -2414,6 +2444,10 @@ const struct sched_class rt_sched_class = { .switched_to = switched_to_rt, .update_curr = update_curr_rt, +#ifdef CONFIG_SCHED_HMP + .inc_hmp_sched_stats = inc_hmp_sched_stats_rt, + .dec_hmp_sched_stats = dec_hmp_sched_stats_rt, +#endif }; #ifdef CONFIG_SCHED_DEBUG |