diff options
Diffstat (limited to 'kernel')
-rw-r--r-- | kernel/sched/fair.c | 63 | ||||
-rw-r--r-- | kernel/sched/rt.c | 8 |
2 files changed, 38 insertions, 33 deletions
diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c index ee5f8e686a31..ef6046d3a016 100644 --- a/kernel/sched/fair.c +++ b/kernel/sched/fair.c @@ -5951,6 +5951,25 @@ enqueue_task_fair(struct rq *rq, struct task_struct *p, int flags) struct sched_entity *se = &p->se; #ifdef CONFIG_SMP int task_new = flags & ENQUEUE_WAKEUP_NEW; + + /* + * Update SchedTune accounting. + * + * We do it before updating the CPU capacity to ensure the + * boost value of the current task is accounted for in the + * selection of the OPP. + * + * We do it also in the case where we enqueue a throttled task; + * we could argue that a throttled task should not boost a CPU, + * however: + * a) properly implementing CPU boosting considering throttled + * tasks will increase a lot the complexity of the solution + * b) it's not easy to quantify the benefits introduced by + * such a more complex solution. + * Thus, for the time being we go for the simple solution and boost + * also for throttled RQs. + */ + schedtune_enqueue_task(p, cpu_of(rq)); #endif /* @@ -6001,26 +6020,6 @@ enqueue_task_fair(struct rq *rq, struct task_struct *p, int flags) } #ifdef CONFIG_SMP - - /* - * Update SchedTune accounting. - * - * We do it before updating the CPU capacity to ensure the - * boost value of the current task is accounted for in the - * selection of the OPP. - * - * We do it also in the case where we enqueue a throttled task; - * we could argue that a throttled task should not boost a CPU, - * however: - * a) properly implementing CPU boosting considering throttled - * tasks will increase a lot the complexity of the solution - * b) it's not easy to quantify the benefits introduced by - * such a more complex solution. - * Thus, for the time being we go for the simple solution and boost - * also for throttled RQs. - */ - schedtune_enqueue_task(p, cpu_of(rq)); - if (energy_aware() && !se) { walt_inc_cumulative_runnable_avg(rq, p); if (!task_new && !rq->rd->overutilized && @@ -6050,6 +6049,17 @@ static void dequeue_task_fair(struct rq *rq, struct task_struct *p, int flags) if (task_sleep && rq->nr_running == 1) flags |= DEQUEUE_IDLE; +#ifdef CONFIG_SMP + /* + * Update SchedTune accounting + * + * We do it before updating the CPU capacity to ensure the + * boost value of the current task is accounted for in the + * selection of the OPP. + */ + schedtune_dequeue_task(p, cpu_of(rq)); +#endif + for_each_sched_entity(se) { cfs_rq = cfs_rq_of(se); dequeue_entity(cfs_rq, se, flags); @@ -6099,19 +6109,6 @@ static void dequeue_task_fair(struct rq *rq, struct task_struct *p, int flags) dec_rq_hmp_stats(rq, p, 1); } -#ifdef CONFIG_SMP - - /* - * Update SchedTune accounting - * - * We do it before updating the CPU capacity to ensure the - * boost value of the current task is accounted for in the - * selection of the OPP. - */ - schedtune_dequeue_task(p, cpu_of(rq)); - -#endif /* CONFIG_SMP */ - hrtick_update(rq); } diff --git a/kernel/sched/rt.c b/kernel/sched/rt.c index 2083a54cdd49..ac81704e14d9 100644 --- a/kernel/sched/rt.c +++ b/kernel/sched/rt.c @@ -1446,6 +1446,10 @@ enqueue_task_rt(struct rq *rq, struct task_struct *p, int flags) { struct sched_rt_entity *rt_se = &p->rt; +#ifdef CONFIG_SMP + schedtune_enqueue_task(p, cpu_of(rq)); +#endif + if (flags & ENQUEUE_WAKEUP) rt_se->timeout = 0; @@ -1488,6 +1492,10 @@ static void dequeue_task_rt(struct rq *rq, struct task_struct *p, int flags) { struct sched_rt_entity *rt_se = &p->rt; +#ifdef CONFIG_SMP + schedtune_dequeue_task(p, cpu_of(rq)); +#endif + update_curr_rt(rq); dequeue_rt_entity(rt_se, flags); walt_dec_cumulative_runnable_avg(rq, p); |