summaryrefslogtreecommitdiff
path: root/kernel/sched/core.c
diff options
context:
space:
mode:
Diffstat (limited to 'kernel/sched/core.c')
-rw-r--r--kernel/sched/core.c37
1 files changed, 11 insertions, 26 deletions
diff --git a/kernel/sched/core.c b/kernel/sched/core.c
index 494c6cc518b0..8ad6ea28b278 100644
--- a/kernel/sched/core.c
+++ b/kernel/sched/core.c
@@ -859,7 +859,6 @@ static inline void enqueue_task(struct rq *rq, struct task_struct *p, int flags)
sched_info_queued(rq, p);
p->sched_class->enqueue_task(rq, p, flags);
trace_sched_enq_deq_task(p, 1, cpumask_bits(&p->cpus_allowed)[0]);
- inc_cumulative_runnable_avg(rq, p);
}
static inline void dequeue_task(struct rq *rq, struct task_struct *p, int flags)
@@ -869,7 +868,6 @@ static inline void dequeue_task(struct rq *rq, struct task_struct *p, int flags)
sched_info_dequeued(rq, p);
p->sched_class->dequeue_task(rq, p, flags);
trace_sched_enq_deq_task(p, 0, cpumask_bits(&p->cpus_allowed)[0]);
- dec_cumulative_runnable_avg(rq, p);
}
void activate_task(struct rq *rq, struct task_struct *p, int flags)
@@ -1684,12 +1682,8 @@ static void update_history(struct rq *rq, struct task_struct *p,
}
p->ravg.sum = 0;
- if (p->on_rq) {
- rq->cumulative_runnable_avg -= p->ravg.demand;
- BUG_ON((s64)rq->cumulative_runnable_avg < 0);
- if (p->sched_class == &fair_sched_class)
- dec_nr_big_small_task(rq, p);
- }
+ if (p->on_rq)
+ p->sched_class->dec_hmp_sched_stats(rq, p);
avg = div64_u64(sum, sched_ravg_hist_size);
@@ -1704,11 +1698,8 @@ static void update_history(struct rq *rq, struct task_struct *p,
p->ravg.demand = demand;
- if (p->on_rq) {
- rq->cumulative_runnable_avg += p->ravg.demand;
- if (p->sched_class == &fair_sched_class)
- inc_nr_big_small_task(rq, p);
- }
+ if (p->on_rq)
+ p->sched_class->inc_hmp_sched_stats(rq, p);
done:
trace_sched_update_history(rq, p, runtime, samples, event);
@@ -2094,7 +2085,7 @@ void reset_all_window_stats(u64 window_start, unsigned int window_size)
#ifdef CONFIG_SCHED_FREQ_INPUT
rq->curr_runnable_sum = rq->prev_runnable_sum = 0;
#endif
- rq->cumulative_runnable_avg = 0;
+ rq->hmp_stats.cumulative_runnable_avg = 0;
fixup_nr_big_small_task(cpu);
}
@@ -2248,11 +2239,8 @@ static void fixup_busy_time(struct task_struct *p, int new_cpu)
* reflect new demand. Restore load temporarily for such
* task on its runqueue
*/
- if (p->on_rq) {
- inc_cumulative_runnable_avg(src_rq, p);
- if (p->sched_class == &fair_sched_class)
- inc_nr_big_small_task(src_rq, p);
- }
+ if (p->on_rq)
+ p->sched_class->inc_hmp_sched_stats(src_rq, p);
update_task_ravg(p, task_rq(p), TASK_MIGRATE,
wallclock, 0);
@@ -2261,11 +2249,8 @@ static void fixup_busy_time(struct task_struct *p, int new_cpu)
* Remove task's load from rq as its now migrating to
* another cpu.
*/
- if (p->on_rq) {
- dec_cumulative_runnable_avg(src_rq, p);
- if (p->sched_class == &fair_sched_class)
- dec_nr_big_small_task(src_rq, p);
- }
+ if (p->on_rq)
+ p->sched_class->dec_hmp_sched_stats(src_rq, p);
if (p->ravg.curr_window) {
src_rq->curr_runnable_sum -= p->ravg.curr_window;
@@ -9174,12 +9159,12 @@ void __init sched_init(void)
rq->min_freq = 1;
rq->max_possible_freq = 1;
rq->max_possible_capacity = 0;
- rq->cumulative_runnable_avg = 0;
+ rq->hmp_stats.cumulative_runnable_avg = 0;
rq->efficiency = 1024;
rq->capacity = 1024;
rq->load_scale_factor = 1024;
rq->window_start = 0;
- rq->nr_small_tasks = rq->nr_big_tasks = 0;
+ rq->hmp_stats.nr_small_tasks = rq->hmp_stats.nr_big_tasks = 0;
rq->hmp_flags = 0;
rq->mostly_idle_load = pct_to_real(20);
rq->mostly_idle_nr_run = 3;