summaryrefslogtreecommitdiff
path: root/kernel/sched/fair.c
diff options
context:
space:
mode:
Diffstat (limited to 'kernel/sched/fair.c')
-rw-r--r--kernel/sched/fair.c11
1 files changed, 7 insertions, 4 deletions
diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c
index 9c717c3be75d..736adab1a503 100644
--- a/kernel/sched/fair.c
+++ b/kernel/sched/fair.c
@@ -4250,8 +4250,6 @@ enqueue_task_fair(struct rq *rq, struct task_struct *p, int flags)
cpu_overutilized(rq->cpu))
rq->rd->overutilized = true;
- schedtune_enqueue_task(p, cpu_of(rq));
-
/*
* We want to potentially trigger a freq switch
* request only for tasks that are waking up; this is
@@ -4262,6 +4260,10 @@ enqueue_task_fair(struct rq *rq, struct task_struct *p, int flags)
if (task_new || task_wakeup)
update_capacity_of(cpu_of(rq));
}
+
+ /* Update SchedTune accouting */
+ schedtune_enqueue_task(p, cpu_of(rq));
+
#endif /* CONFIG_SMP */
hrtick_update(rq);
@@ -4327,7 +4329,6 @@ static void dequeue_task_fair(struct rq *rq, struct task_struct *p, int flags)
#ifdef CONFIG_SMP
if (!se) {
- schedtune_dequeue_task(p, cpu_of(rq));
/*
* We want to potentially trigger a freq switch
@@ -4345,6 +4346,9 @@ static void dequeue_task_fair(struct rq *rq, struct task_struct *p, int flags)
}
}
+ /* Update SchedTune accouting */
+ schedtune_dequeue_task(p, cpu_of(rq));
+
#endif /* CONFIG_SMP */
hrtick_update(rq);
@@ -5625,7 +5629,6 @@ static inline int find_best_target(struct task_struct *p)
* The target CPU can be already at a capacity level higher
* than the one required to boost the task.
*/
-
if (new_util > capacity_orig_of(i))
continue;