diff options
author | Srivatsa Vaddagiri <vatsa@codeaurora.org> | 2015-01-16 11:27:31 +0530 |
---|---|---|
committer | David Keitel <dkeitel@codeaurora.org> | 2016-03-23 20:01:34 -0700 |
commit | 0a33ec2ea97b52f7cccbc5da42a243d6c9038c6f (patch) | |
tree | 2e1658af2cb66f497c23d6829629f545cc688140 | |
parent | 207d78dd263114ccf6e04581f43e4dc99e7b068d (diff) |
sched: Consolidate hmp stats into their own struct
Key hmp stats (nr_big_tasks, nr_small_tasks and
cumulative_runnable_average) are currently maintained per-cpu in
'struct rq'. Merge those stats in their own structure (struct
hmp_sched_stats) and modify impacted functions to deal with the newly
introduced structure. This cleanup is required for a subsequent patch
which fixes various issues with use of CFS_BANDWIDTH feature in HMP
scheduler.
Change-Id: Ieffc10a3b82a102f561331bc385d042c15a33998
Signed-off-by: Srivatsa Vaddagiri <vatsa@codeaurora.org>
[rameezmustafa@codeaurora.org: Port to msm-3.18]
Signed-off-by: Syed Rameez Mustafa <rameezmustafa@codeaurora.org>
[joonwoop@codeaurora.org: fixed conflict in __update_load_avg().]
Signed-off-by: Joonwoo Park <joonwoop@codeaurora.org>
-rw-r--r-- | include/trace/events/sched.h | 10 | ||||
-rw-r--r-- | kernel/sched/core.c | 37 | ||||
-rw-r--r-- | kernel/sched/debug.c | 4 | ||||
-rw-r--r-- | kernel/sched/fair.c | 103 | ||||
-rw-r--r-- | kernel/sched/idle_task.c | 18 | ||||
-rw-r--r-- | kernel/sched/rt.c | 38 | ||||
-rw-r--r-- | kernel/sched/sched.h | 82 | ||||
-rw-r--r-- | kernel/sched/stop_task.c | 30 |
8 files changed, 225 insertions, 97 deletions
diff --git a/include/trace/events/sched.h b/include/trace/events/sched.h index 6f4a8f0d45e3..1ac6edf6f8e4 100644 --- a/include/trace/events/sched.h +++ b/include/trace/events/sched.h @@ -185,11 +185,11 @@ TRACE_EVENT(sched_cpu_load, __entry->idle = idle; __entry->mostly_idle = mostly_idle; __entry->nr_running = rq->nr_running; - __entry->nr_big_tasks = rq->nr_big_tasks; - __entry->nr_small_tasks = rq->nr_small_tasks; + __entry->nr_big_tasks = rq->hmp_stats.nr_big_tasks; + __entry->nr_small_tasks = rq->hmp_stats.nr_small_tasks; __entry->load_scale_factor = rq->load_scale_factor; __entry->capacity = rq->capacity; - __entry->cumulative_runnable_avg = rq->cumulative_runnable_avg; + __entry->cumulative_runnable_avg = rq->hmp_stats.cumulative_runnable_avg; __entry->irqload = irqload; __entry->cur_freq = rq->cur_freq; __entry->max_freq = rq->max_freq; @@ -323,8 +323,8 @@ TRACE_EVENT(sched_update_history, __entry->demand = p->ravg.demand; memcpy(__entry->hist, p->ravg.sum_history, RAVG_HIST_SIZE_MAX * sizeof(u32)); - __entry->nr_big_tasks = rq->nr_big_tasks; - __entry->nr_small_tasks = rq->nr_small_tasks; + __entry->nr_big_tasks = rq->hmp_stats.nr_big_tasks; + __entry->nr_small_tasks = rq->hmp_stats.nr_small_tasks; __entry->cpu = rq->cpu; ), diff --git a/kernel/sched/core.c b/kernel/sched/core.c index 494c6cc518b0..8ad6ea28b278 100644 --- a/kernel/sched/core.c +++ b/kernel/sched/core.c @@ -859,7 +859,6 @@ static inline void enqueue_task(struct rq *rq, struct task_struct *p, int flags) sched_info_queued(rq, p); p->sched_class->enqueue_task(rq, p, flags); trace_sched_enq_deq_task(p, 1, cpumask_bits(&p->cpus_allowed)[0]); - inc_cumulative_runnable_avg(rq, p); } static inline void dequeue_task(struct rq *rq, struct task_struct *p, int flags) @@ -869,7 +868,6 @@ static inline void dequeue_task(struct rq *rq, struct task_struct *p, int flags) sched_info_dequeued(rq, p); p->sched_class->dequeue_task(rq, p, flags); trace_sched_enq_deq_task(p, 0, cpumask_bits(&p->cpus_allowed)[0]); - dec_cumulative_runnable_avg(rq, p); } void activate_task(struct rq *rq, struct task_struct *p, int flags) @@ -1684,12 +1682,8 @@ static void update_history(struct rq *rq, struct task_struct *p, } p->ravg.sum = 0; - if (p->on_rq) { - rq->cumulative_runnable_avg -= p->ravg.demand; - BUG_ON((s64)rq->cumulative_runnable_avg < 0); - if (p->sched_class == &fair_sched_class) - dec_nr_big_small_task(rq, p); - } + if (p->on_rq) + p->sched_class->dec_hmp_sched_stats(rq, p); avg = div64_u64(sum, sched_ravg_hist_size); @@ -1704,11 +1698,8 @@ static void update_history(struct rq *rq, struct task_struct *p, p->ravg.demand = demand; - if (p->on_rq) { - rq->cumulative_runnable_avg += p->ravg.demand; - if (p->sched_class == &fair_sched_class) - inc_nr_big_small_task(rq, p); - } + if (p->on_rq) + p->sched_class->inc_hmp_sched_stats(rq, p); done: trace_sched_update_history(rq, p, runtime, samples, event); @@ -2094,7 +2085,7 @@ void reset_all_window_stats(u64 window_start, unsigned int window_size) #ifdef CONFIG_SCHED_FREQ_INPUT rq->curr_runnable_sum = rq->prev_runnable_sum = 0; #endif - rq->cumulative_runnable_avg = 0; + rq->hmp_stats.cumulative_runnable_avg = 0; fixup_nr_big_small_task(cpu); } @@ -2248,11 +2239,8 @@ static void fixup_busy_time(struct task_struct *p, int new_cpu) * reflect new demand. Restore load temporarily for such * task on its runqueue */ - if (p->on_rq) { - inc_cumulative_runnable_avg(src_rq, p); - if (p->sched_class == &fair_sched_class) - inc_nr_big_small_task(src_rq, p); - } + if (p->on_rq) + p->sched_class->inc_hmp_sched_stats(src_rq, p); update_task_ravg(p, task_rq(p), TASK_MIGRATE, wallclock, 0); @@ -2261,11 +2249,8 @@ static void fixup_busy_time(struct task_struct *p, int new_cpu) * Remove task's load from rq as its now migrating to * another cpu. */ - if (p->on_rq) { - dec_cumulative_runnable_avg(src_rq, p); - if (p->sched_class == &fair_sched_class) - dec_nr_big_small_task(src_rq, p); - } + if (p->on_rq) + p->sched_class->dec_hmp_sched_stats(src_rq, p); if (p->ravg.curr_window) { src_rq->curr_runnable_sum -= p->ravg.curr_window; @@ -9174,12 +9159,12 @@ void __init sched_init(void) rq->min_freq = 1; rq->max_possible_freq = 1; rq->max_possible_capacity = 0; - rq->cumulative_runnable_avg = 0; + rq->hmp_stats.cumulative_runnable_avg = 0; rq->efficiency = 1024; rq->capacity = 1024; rq->load_scale_factor = 1024; rq->window_start = 0; - rq->nr_small_tasks = rq->nr_big_tasks = 0; + rq->hmp_stats.nr_small_tasks = rq->hmp_stats.nr_big_tasks = 0; rq->hmp_flags = 0; rq->mostly_idle_load = pct_to_real(20); rq->mostly_idle_nr_run = 3; diff --git a/kernel/sched/debug.c b/kernel/sched/debug.c index 752a0de12871..f0d1e0b40195 100644 --- a/kernel/sched/debug.c +++ b/kernel/sched/debug.c @@ -320,8 +320,8 @@ do { \ P(max_freq); #endif #ifdef CONFIG_SCHED_HMP - P(nr_big_tasks); - P(nr_small_tasks); + P(hmp_stats.nr_big_tasks); + P(hmp_stats.nr_small_tasks); #endif #undef P #undef PN diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c index 4da8b618232e..3b5f061ec020 100644 --- a/kernel/sched/fair.c +++ b/kernel/sched/fair.c @@ -2590,7 +2590,7 @@ unsigned int __read_mostly sched_enable_hmp = 0; /* A cpu can no longer accomodate more tasks if: * * rq->nr_running > sysctl_sched_spill_nr_run || - * rq->cumulative_runnable_avg > sched_spill_load + * rq->hmp_stats.cumulative_runnable_avg > sched_spill_load */ unsigned int __read_mostly sysctl_sched_spill_nr_run = 10; @@ -2836,7 +2836,7 @@ static inline u64 cpu_load(int cpu) { struct rq *rq = cpu_rq(cpu); - return scale_load_to_cpu(rq->cumulative_runnable_avg, cpu); + return scale_load_to_cpu(rq->hmp_stats.cumulative_runnable_avg, cpu); } static inline u64 cpu_load_sync(int cpu, int sync) @@ -2844,7 +2844,7 @@ static inline u64 cpu_load_sync(int cpu, int sync) struct rq *rq = cpu_rq(cpu); u64 load; - load = rq->cumulative_runnable_avg; + load = rq->hmp_stats.cumulative_runnable_avg; /* * If load is being checked in a sync wakeup environment, @@ -3469,28 +3469,42 @@ done: return best_cpu; } -void inc_nr_big_small_task(struct rq *rq, struct task_struct *p) +static void +inc_nr_big_small_task(struct hmp_sched_stats *stats, struct task_struct *p) { if (!sched_enable_hmp || sched_disable_window_stats) return; if (is_big_task(p)) - rq->nr_big_tasks++; + stats->nr_big_tasks++; else if (is_small_task(p)) - rq->nr_small_tasks++; + stats->nr_small_tasks++; } -void dec_nr_big_small_task(struct rq *rq, struct task_struct *p) +static void +dec_nr_big_small_task(struct hmp_sched_stats *stats, struct task_struct *p) { if (!sched_enable_hmp || sched_disable_window_stats) return; if (is_big_task(p)) - rq->nr_big_tasks--; + stats->nr_big_tasks--; else if (is_small_task(p)) - rq->nr_small_tasks--; + stats->nr_small_tasks--; + + BUG_ON(stats->nr_big_tasks < 0 || stats->nr_small_tasks < 0); +} - BUG_ON(rq->nr_big_tasks < 0 || rq->nr_small_tasks < 0); +static void inc_rq_hmp_stats(struct rq *rq, struct task_struct *p) +{ + inc_cumulative_runnable_avg(&rq->hmp_stats, p); + inc_nr_big_small_task(&rq->hmp_stats, p); +} + +static void dec_rq_hmp_stats(struct rq *rq, struct task_struct *p) +{ + dec_cumulative_runnable_avg(&rq->hmp_stats, p); + dec_nr_big_small_task(&rq->hmp_stats, p); } /* @@ -3502,10 +3516,10 @@ void fixup_nr_big_small_task(int cpu) struct rq *rq = cpu_rq(cpu); struct task_struct *p; - rq->nr_big_tasks = 0; - rq->nr_small_tasks = 0; + rq->hmp_stats.nr_big_tasks = 0; + rq->hmp_stats.nr_small_tasks = 0; list_for_each_entry(p, &rq->cfs_tasks, se.group_node) - inc_nr_big_small_task(rq, p); + inc_nr_big_small_task(&rq->hmp_stats, p); } /* Disable interrupts and grab runqueue lock of all cpus listed in @cpus */ @@ -3840,7 +3854,7 @@ void check_for_migration(struct rq *rq, struct task_struct *p) static inline int nr_big_tasks(struct rq *rq) { - return rq->nr_big_tasks; + return rq->hmp_stats.nr_big_tasks; } static inline int is_cpu_throttling_imminent(int cpu) @@ -3873,6 +3887,19 @@ unsigned int cpu_temp(int cpu) return 0; } +static void +inc_hmp_sched_stats_fair(struct rq *rq, struct task_struct *p) +{ + inc_cumulative_runnable_avg(&rq->hmp_stats, p); + inc_nr_big_small_task(&rq->hmp_stats, p); +} + +static void +dec_hmp_sched_stats_fair(struct rq *rq, struct task_struct *p) +{ + dec_cumulative_runnable_avg(&rq->hmp_stats, p); + dec_nr_big_small_task(&rq->hmp_stats, p); +} #else /* CONFIG_SCHED_HMP */ @@ -3945,6 +3972,18 @@ unsigned int cpu_temp(int cpu) return 0; } +static inline void inc_rq_hmp_stats(struct rq *rq, struct task_struct *p) { } +static inline void dec_rq_hmp_stats(struct rq *rq, struct task_struct *p) { } + +static inline void +inc_hmp_sched_stats_fair(struct rq *rq, struct task_struct *p) +{ +} + +static inline void +dec_hmp_sched_stats_fair(struct rq *rq, struct task_struct *p) +{ +} #endif /* CONFIG_SCHED_HMP */ @@ -4048,7 +4087,7 @@ __update_load_avg(u64 now, int cpu, struct sched_avg *sa, if (!cfs_rq && weight) { se = container_of(sa, struct sched_entity, avg); if (entity_is_task(se) && se->on_rq) - dec_cumulative_runnable_avg(rq_of(cfs_rq), task_of(se)); + dec_hmp_sched_stats_fair(rq_of(cfs_rq), task_of(se)); } scale_freq = arch_scale_freq_capacity(NULL, cpu); @@ -4116,7 +4155,7 @@ __update_load_avg(u64 now, int cpu, struct sched_avg *sa, } if (se && entity_is_task(se) && se->on_rq) - inc_cumulative_runnable_avg(rq_of(cfs_rq), task_of(se)); + inc_hmp_sched_stats_fair(rq_of(cfs_rq), task_of(se)); if (running) sa->util_sum += scaled_delta * scale_cpu; @@ -4358,6 +4397,9 @@ static inline int idle_balance(struct rq *rq) return 0; } +static inline void inc_rq_hmp_stats(struct rq *rq, struct task_struct *p) { } +static inline void dec_rq_hmp_stats(struct rq *rq, struct task_struct *p) { } + #endif /* CONFIG_SMP */ #ifdef CONFIG_SCHED_HMP @@ -5705,7 +5747,7 @@ enqueue_task_fair(struct rq *rq, struct task_struct *p, int flags) if (!se) { add_nr_running(rq, 1); - inc_nr_big_small_task(rq, p); + inc_rq_hmp_stats(rq, p); } hrtick_update(rq); } @@ -5766,7 +5808,7 @@ static void dequeue_task_fair(struct rq *rq, struct task_struct *p, int flags) if (!se) { sub_nr_running(rq, 1); - dec_nr_big_small_task(rq, p); + dec_rq_hmp_stats(rq, p); } hrtick_update(rq); } @@ -7974,8 +8016,8 @@ static inline void update_sg_lb_stats(struct lb_env *env, *overload = true; #ifdef CONFIG_SCHED_HMP - sgs->sum_nr_big_tasks += rq->nr_big_tasks; - sgs->sum_nr_small_tasks += rq->nr_small_tasks; + sgs->sum_nr_big_tasks += rq->hmp_stats.nr_big_tasks; + sgs->sum_nr_small_tasks += rq->hmp_stats.nr_small_tasks; sgs->group_cpu_load += cpu_load(i); #endif @@ -8481,18 +8523,20 @@ out_balanced: static struct rq *find_busiest_queue_hmp(struct lb_env *env, struct sched_group *group) { - struct rq *busiest = NULL, *rq; + struct rq *busiest = NULL; u64 max_runnable_avg = 0; int i; for_each_cpu(i, sched_group_cpus(group)) { + struct rq *rq = cpu_rq(i); + u64 cumulative_runnable_avg = + rq->hmp_stats.cumulative_runnable_avg; + if (!cpumask_test_cpu(i, env->cpus)) continue; - rq = cpu_rq(i); - - if (rq->cumulative_runnable_avg > max_runnable_avg) { - max_runnable_avg = rq->cumulative_runnable_avg; + if (cumulative_runnable_avg > max_runnable_avg) { + max_runnable_avg = cumulative_runnable_avg; busiest = rq; } } @@ -9616,8 +9660,9 @@ static inline int _nohz_kick_needed_hmp(struct rq *rq, int cpu, int *type) && rq->max_freq > rq->mostly_idle_freq) return 0; - if (rq->nr_running >= 2 && (rq->nr_running - rq->nr_small_tasks >= 2 || - rq->nr_running > rq->mostly_idle_nr_run || + if (rq->nr_running >= 2 && + (rq->nr_running - rq->hmp_stats.nr_small_tasks >= 2 || + rq->nr_running > rq->mostly_idle_nr_run || cpu_load(cpu) > rq->mostly_idle_load)) { if (rq->capacity == max_capacity) @@ -10242,6 +10287,10 @@ const struct sched_class fair_sched_class = { #ifdef CONFIG_FAIR_GROUP_SCHED .task_move_group = task_move_group_fair, #endif +#ifdef CONFIG_SCHED_HMP + .inc_hmp_sched_stats = inc_hmp_sched_stats_fair, + .dec_hmp_sched_stats = dec_hmp_sched_stats_fair, +#endif }; #ifdef CONFIG_SCHED_DEBUG diff --git a/kernel/sched/idle_task.c b/kernel/sched/idle_task.c index c4ae0f1fdf9b..cccb9c97158e 100644 --- a/kernel/sched/idle_task.c +++ b/kernel/sched/idle_task.c @@ -79,6 +79,20 @@ static void update_curr_idle(struct rq *rq) { } +#ifdef CONFIG_SCHED_HMP + +static void +inc_hmp_sched_stats_idle(struct rq *rq, struct task_struct *p) +{ +} + +static void +dec_hmp_sched_stats_idle(struct rq *rq, struct task_struct *p) +{ +} + +#endif + /* * Simple, special scheduling class for the per-CPU idle tasks: */ @@ -107,4 +121,8 @@ const struct sched_class idle_sched_class = { .prio_changed = prio_changed_idle, .switched_to = switched_to_idle, .update_curr = update_curr_idle, +#ifdef CONFIG_SCHED_HMP + .inc_hmp_sched_stats = inc_hmp_sched_stats_idle, + .dec_hmp_sched_stats = dec_hmp_sched_stats_idle, +#endif }; diff --git a/kernel/sched/rt.c b/kernel/sched/rt.c index f20b6711a1bb..a817272a390b 100644 --- a/kernel/sched/rt.c +++ b/kernel/sched/rt.c @@ -1182,6 +1182,30 @@ void dec_rt_group(struct sched_rt_entity *rt_se, struct rt_rq *rt_rq) {} #endif /* CONFIG_RT_GROUP_SCHED */ +#ifdef CONFIG_SCHED_HMP + +static void +inc_hmp_sched_stats_rt(struct rq *rq, struct task_struct *p) +{ + inc_cumulative_runnable_avg(&rq->hmp_stats, p); +} + +static void +dec_hmp_sched_stats_rt(struct rq *rq, struct task_struct *p) +{ + dec_cumulative_runnable_avg(&rq->hmp_stats, p); +} + +#else /* CONFIG_SCHED_HMP */ + +static inline void +inc_hmp_sched_stats_rt(struct rq *rq, struct task_struct *p) { } + +static inline void +dec_hmp_sched_stats_rt(struct rq *rq, struct task_struct *p) { } + +#endif /* CONFIG_SCHED_HMP */ + static inline unsigned int rt_se_nr_running(struct sched_rt_entity *rt_se) { @@ -1313,6 +1337,7 @@ enqueue_task_rt(struct rq *rq, struct task_struct *p, int flags) rt_se->timeout = 0; enqueue_rt_entity(rt_se, flags & ENQUEUE_HEAD); + inc_hmp_sched_stats_rt(rq, p); if (!task_current(rq, p) && p->nr_cpus_allowed > 1) enqueue_pushable_task(rq, p); @@ -1324,6 +1349,7 @@ static void dequeue_task_rt(struct rq *rq, struct task_struct *p, int flags) update_curr_rt(rq); dequeue_rt_entity(rt_se); + dec_hmp_sched_stats_rt(rq, p); dequeue_pushable_task(rq, p); } @@ -1670,12 +1696,15 @@ static int find_lowest_rq_hmp(struct task_struct *task) } return best_cpu; } -#else + +#else /* CONFIG_SCHED_HMP */ + static int find_lowest_rq_hmp(struct task_struct *task) { return -1; } -#endif + +#endif /* CONFIG_SCHED_HMP */ static int find_lowest_rq(struct task_struct *task) { @@ -2240,6 +2269,7 @@ void __init init_sched_rt_class(void) GFP_KERNEL, cpu_to_node(i)); } } + #endif /* CONFIG_SMP */ /* @@ -2414,6 +2444,10 @@ const struct sched_class rt_sched_class = { .switched_to = switched_to_rt, .update_curr = update_curr_rt, +#ifdef CONFIG_SCHED_HMP + .inc_hmp_sched_stats = inc_hmp_sched_stats_rt, + .dec_hmp_sched_stats = dec_hmp_sched_stats_rt, +#endif }; #ifdef CONFIG_SCHED_DEBUG diff --git a/kernel/sched/sched.h b/kernel/sched/sched.h index ce26255ed810..d514d36e4685 100644 --- a/kernel/sched/sched.h +++ b/kernel/sched/sched.h @@ -345,6 +345,15 @@ struct cfs_bandwidth { }; #endif /* CONFIG_CGROUP_SCHED */ +#ifdef CONFIG_SCHED_HMP + +struct hmp_sched_stats { + int nr_big_tasks, nr_small_tasks; + u64 cumulative_runnable_avg; +}; + +#endif + /* CFS-related fields in a runqueue */ struct cfs_rq { struct load_weight load; @@ -652,7 +661,8 @@ struct rq { unsigned int cur_freq, max_freq, min_freq, max_possible_freq; struct cpumask freq_domain_cpumask; - u64 cumulative_runnable_avg; + struct hmp_sched_stats hmp_stats; + int efficiency; /* Differentiate cpus with different IPC capability */ int load_scale_factor; int capacity; @@ -662,6 +672,7 @@ struct rq { u32 mostly_idle_load; int mostly_idle_nr_run; int mostly_idle_freq; + unsigned long hmp_flags; u64 cur_irqload; u64 avg_irqload; @@ -677,11 +688,6 @@ struct rq { u64 prev_runnable_sum; #endif -#ifdef CONFIG_SCHED_HMP - int nr_small_tasks, nr_big_tasks; - unsigned long hmp_flags; -#endif - #ifdef CONFIG_IRQ_TIME_ACCOUNTING u64 prev_irq_time; #endif @@ -967,6 +973,7 @@ extern struct mutex policy_mutex; extern unsigned int sched_ravg_window; extern unsigned int sched_use_pelt; extern unsigned int sched_disable_window_stats; +extern unsigned int sched_enable_hmp; extern unsigned int max_possible_freq; extern unsigned int min_max_freq; extern unsigned int pct_task_load(struct task_struct *p); @@ -1007,24 +1014,35 @@ static inline int capacity(struct rq *rq) } static inline void -inc_cumulative_runnable_avg(struct rq *rq, struct task_struct *p) +inc_cumulative_runnable_avg(struct hmp_sched_stats *stats, + struct task_struct *p) { - if (sched_use_pelt) - rq->cumulative_runnable_avg += - p->se.avg.runnable_avg_sum_scaled; - else if (!sched_disable_window_stats) - rq->cumulative_runnable_avg += p->ravg.demand; + u32 task_load; + + if (!sched_enable_hmp || sched_disable_window_stats) + return; + + task_load = sched_use_pelt ? p->se.avg.runnable_avg_sum_scaled : + (sched_disable_window_stats ? 0 : p->ravg.demand); + + stats->cumulative_runnable_avg += task_load; } static inline void -dec_cumulative_runnable_avg(struct rq *rq, struct task_struct *p) +dec_cumulative_runnable_avg(struct hmp_sched_stats *stats, + struct task_struct *p) { - if (sched_use_pelt) - rq->cumulative_runnable_avg -= - p->se.avg.runnable_avg_sum_scaled; - else if (!sched_disable_window_stats) - rq->cumulative_runnable_avg -= p->ravg.demand; - BUG_ON((s64)rq->cumulative_runnable_avg < 0); + u32 task_load; + + if (!sched_enable_hmp || sched_disable_window_stats) + return; + + task_load = sched_use_pelt ? p->se.avg.runnable_avg_sum_scaled : + (sched_disable_window_stats ? 0 : p->ravg.demand); + + stats->cumulative_runnable_avg -= task_load; + + BUG_ON((s64)stats->cumulative_runnable_avg < 0); } #define pct_to_real(tunable) \ @@ -1060,6 +1078,8 @@ static inline int sched_cpu_high_irqload(int cpu) #else /* CONFIG_SCHED_HMP */ +struct hmp_sched_stats; + static inline int pct_task_load(struct task_struct *p) { return 0; } static inline int capacity(struct rq *rq) @@ -1067,13 +1087,13 @@ static inline int capacity(struct rq *rq) return SCHED_LOAD_SCALE; } -static inline void -inc_cumulative_runnable_avg(struct rq *rq, struct task_struct *p) +static inline void inc_cumulative_runnable_avg(struct hmp_sched_stats *stats, + struct task_struct *p) { } -static inline void -dec_cumulative_runnable_avg(struct rq *rq, struct task_struct *p) +static inline void dec_cumulative_runnable_avg(struct hmp_sched_stats *stats, + struct task_struct *p) { } @@ -1150,14 +1170,10 @@ static inline void clear_reserved(int cpu) clear_bit(CPU_RESERVED, &rq->hmp_flags); } -extern unsigned int sched_enable_hmp; - int mostly_idle_cpu(int cpu); extern void check_for_migration(struct rq *rq, struct task_struct *p); extern void pre_big_small_task_count_change(const struct cpumask *cpus); extern void post_big_small_task_count_change(const struct cpumask *cpus); -extern void inc_nr_big_small_task(struct rq *rq, struct task_struct *p); -extern void dec_nr_big_small_task(struct rq *rq, struct task_struct *p); extern void set_hmp_defaults(void); extern unsigned int power_cost_at_freq(int cpu, unsigned int freq); extern void reset_all_window_stats(u64 window_start, unsigned int window_size); @@ -1174,14 +1190,6 @@ static inline void pre_big_small_task_count_change(void) { } static inline void post_big_small_task_count_change(void) { } static inline void set_hmp_defaults(void) { } -static inline void inc_nr_big_small_task(struct rq *rq, struct task_struct *p) -{ -} - -static inline void dec_nr_big_small_task(struct rq *rq, struct task_struct *p) -{ -} - static inline void clear_reserved(int cpu) { } #define power_cost_at_freq(...) 0 @@ -1521,6 +1529,10 @@ struct sched_class { #ifdef CONFIG_FAIR_GROUP_SCHED void (*task_move_group) (struct task_struct *p); #endif +#ifdef CONFIG_SCHED_HMP + void (*inc_hmp_sched_stats)(struct rq *rq, struct task_struct *p); + void (*dec_hmp_sched_stats)(struct rq *rq, struct task_struct *p); +#endif }; static inline void put_prev_task(struct rq *rq, struct task_struct *prev) diff --git a/kernel/sched/stop_task.c b/kernel/sched/stop_task.c index cbc67da10954..f101bb39ee5e 100644 --- a/kernel/sched/stop_task.c +++ b/kernel/sched/stop_task.c @@ -17,6 +17,30 @@ select_task_rq_stop(struct task_struct *p, int cpu, int sd_flag, int flags) } #endif /* CONFIG_SMP */ +#ifdef CONFIG_SCHED_HMP + +static void +inc_hmp_sched_stats_stop(struct rq *rq, struct task_struct *p) +{ + inc_cumulative_runnable_avg(&rq->hmp_stats, p); +} + +static void +dec_hmp_sched_stats_stop(struct rq *rq, struct task_struct *p) +{ + dec_cumulative_runnable_avg(&rq->hmp_stats, p); +} + +#else /* CONFIG_SCHED_HMP */ + +static inline void +inc_hmp_sched_stats_stop(struct rq *rq, struct task_struct *p) { } + +static inline void +dec_hmp_sched_stats_stop(struct rq *rq, struct task_struct *p) { } + +#endif /* CONFIG_SCHED_HMP */ + static void check_preempt_curr_stop(struct rq *rq, struct task_struct *p, int flags) { @@ -42,12 +66,14 @@ static void enqueue_task_stop(struct rq *rq, struct task_struct *p, int flags) { add_nr_running(rq, 1); + inc_hmp_sched_stats_stop(rq, p); } static void dequeue_task_stop(struct rq *rq, struct task_struct *p, int flags) { sub_nr_running(rq, 1); + dec_hmp_sched_stats_stop(rq, p); } static void yield_task_stop(struct rq *rq) @@ -134,4 +160,8 @@ const struct sched_class stop_sched_class = { .prio_changed = prio_changed_stop, .switched_to = switched_to_stop, .update_curr = update_curr_stop, +#ifdef CONFIG_SCHED_HMP + .inc_hmp_sched_stats = inc_hmp_sched_stats_stop, + .dec_hmp_sched_stats = dec_hmp_sched_stats_stop, +#endif }; |