diff options
Diffstat (limited to 'kernel/sched/rt.c')
-rw-r--r-- | kernel/sched/rt.c | 11 |
1 files changed, 11 insertions, 0 deletions
diff --git a/kernel/sched/rt.c b/kernel/sched/rt.c index 391ec29c71c0..ac81704e14d9 100644 --- a/kernel/sched/rt.c +++ b/kernel/sched/rt.c @@ -12,6 +12,7 @@ #include <linux/hrtimer.h> #include "tune.h" +#include "walt.h" int sched_rr_timeslice = RR_TIMESLICE; int sysctl_sched_rr_timeslice = (MSEC_PER_SEC / HZ) * RR_TIMESLICE; @@ -1445,10 +1446,15 @@ enqueue_task_rt(struct rq *rq, struct task_struct *p, int flags) { struct sched_rt_entity *rt_se = &p->rt; +#ifdef CONFIG_SMP + schedtune_enqueue_task(p, cpu_of(rq)); +#endif + if (flags & ENQUEUE_WAKEUP) rt_se->timeout = 0; enqueue_rt_entity(rt_se, flags); + walt_inc_cumulative_runnable_avg(rq, p); inc_hmp_sched_stats_rt(rq, p); if (!task_current(rq, p) && p->nr_cpus_allowed > 1) @@ -1486,8 +1492,13 @@ static void dequeue_task_rt(struct rq *rq, struct task_struct *p, int flags) { struct sched_rt_entity *rt_se = &p->rt; +#ifdef CONFIG_SMP + schedtune_dequeue_task(p, cpu_of(rq)); +#endif + update_curr_rt(rq); dequeue_rt_entity(rt_se, flags); + walt_dec_cumulative_runnable_avg(rq, p); dec_hmp_sched_stats_rt(rq, p); dequeue_pushable_task(rq, p); |