summaryrefslogtreecommitdiff
path: root/kernel/sched/rt.c
diff options
context:
space:
mode:
authorChris Redpath <chris.redpath@arm.com>2018-02-27 15:29:09 +0000
committerGeorg Veichtlbauer <georg@vware.at>2023-07-16 13:06:24 +0200
commitb775cb29f66382f04ba4c1e7ad385081a020269b (patch)
treee54efb426413902bdacb06c08639873325b25df0 /kernel/sched/rt.c
parentebdb82f7b34aeab34623d7a5e4dd673fc2807842 (diff)
ANDROID: Move schedtune en/dequeue before schedutil update triggers
CPU rq util updates happen when rq signals are updated as part of enqueue and dequeue operations. Doing these updates triggers a call to the registered util update handler, which takes schedtune boosting into account. Enqueueing the task in the correct schedtune group after this happens means that we will potentially not see the boost for an entire throttle period. Move the enqueue/dequeue operations for schedtune before the signal updates which can trigger OPP changes. Change-Id: I4236e6b194bc5daad32ff33067d4be1987996780 Signed-off-by: Chris Redpath <chris.redpath@arm.com>
Diffstat (limited to 'kernel/sched/rt.c')
-rw-r--r--kernel/sched/rt.c8
1 files changed, 8 insertions, 0 deletions
diff --git a/kernel/sched/rt.c b/kernel/sched/rt.c
index 2083a54cdd49..ac81704e14d9 100644
--- a/kernel/sched/rt.c
+++ b/kernel/sched/rt.c
@@ -1446,6 +1446,10 @@ enqueue_task_rt(struct rq *rq, struct task_struct *p, int flags)
{
struct sched_rt_entity *rt_se = &p->rt;
+#ifdef CONFIG_SMP
+ schedtune_enqueue_task(p, cpu_of(rq));
+#endif
+
if (flags & ENQUEUE_WAKEUP)
rt_se->timeout = 0;
@@ -1488,6 +1492,10 @@ static void dequeue_task_rt(struct rq *rq, struct task_struct *p, int flags)
{
struct sched_rt_entity *rt_se = &p->rt;
+#ifdef CONFIG_SMP
+ schedtune_dequeue_task(p, cpu_of(rq));
+#endif
+
update_curr_rt(rq);
dequeue_rt_entity(rt_se, flags);
walt_dec_cumulative_runnable_avg(rq, p);