summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorChris Redpath <chris.redpath@arm.com>2018-02-27 15:29:09 +0000
committerGeorg Veichtlbauer <georg@vware.at>2023-07-16 13:06:24 +0200
commitb775cb29f66382f04ba4c1e7ad385081a020269b (patch)
treee54efb426413902bdacb06c08639873325b25df0
parentebdb82f7b34aeab34623d7a5e4dd673fc2807842 (diff)
ANDROID: Move schedtune en/dequeue before schedutil update triggers
CPU rq util updates happen when rq signals are updated as part of enqueue and dequeue operations. Doing these updates triggers a call to the registered util update handler, which takes schedtune boosting into account. Enqueueing the task in the correct schedtune group after this happens means that we will potentially not see the boost for an entire throttle period. Move the enqueue/dequeue operations for schedtune before the signal updates which can trigger OPP changes. Change-Id: I4236e6b194bc5daad32ff33067d4be1987996780 Signed-off-by: Chris Redpath <chris.redpath@arm.com>
-rw-r--r--kernel/sched/fair.c63
-rw-r--r--kernel/sched/rt.c8
2 files changed, 38 insertions, 33 deletions
diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c
index ee5f8e686a31..ef6046d3a016 100644
--- a/kernel/sched/fair.c
+++ b/kernel/sched/fair.c
@@ -5951,6 +5951,25 @@ enqueue_task_fair(struct rq *rq, struct task_struct *p, int flags)
struct sched_entity *se = &p->se;
#ifdef CONFIG_SMP
int task_new = flags & ENQUEUE_WAKEUP_NEW;
+
+ /*
+ * Update SchedTune accounting.
+ *
+ * We do it before updating the CPU capacity to ensure the
+ * boost value of the current task is accounted for in the
+ * selection of the OPP.
+ *
+ * We do it also in the case where we enqueue a throttled task;
+ * we could argue that a throttled task should not boost a CPU,
+ * however:
+ * a) properly implementing CPU boosting considering throttled
+ * tasks will increase a lot the complexity of the solution
+ * b) it's not easy to quantify the benefits introduced by
+ * such a more complex solution.
+ * Thus, for the time being we go for the simple solution and boost
+ * also for throttled RQs.
+ */
+ schedtune_enqueue_task(p, cpu_of(rq));
#endif
/*
@@ -6001,26 +6020,6 @@ enqueue_task_fair(struct rq *rq, struct task_struct *p, int flags)
}
#ifdef CONFIG_SMP
-
- /*
- * Update SchedTune accounting.
- *
- * We do it before updating the CPU capacity to ensure the
- * boost value of the current task is accounted for in the
- * selection of the OPP.
- *
- * We do it also in the case where we enqueue a throttled task;
- * we could argue that a throttled task should not boost a CPU,
- * however:
- * a) properly implementing CPU boosting considering throttled
- * tasks will increase a lot the complexity of the solution
- * b) it's not easy to quantify the benefits introduced by
- * such a more complex solution.
- * Thus, for the time being we go for the simple solution and boost
- * also for throttled RQs.
- */
- schedtune_enqueue_task(p, cpu_of(rq));
-
if (energy_aware() && !se) {
walt_inc_cumulative_runnable_avg(rq, p);
if (!task_new && !rq->rd->overutilized &&
@@ -6050,6 +6049,17 @@ static void dequeue_task_fair(struct rq *rq, struct task_struct *p, int flags)
if (task_sleep && rq->nr_running == 1)
flags |= DEQUEUE_IDLE;
+#ifdef CONFIG_SMP
+ /*
+ * Update SchedTune accounting
+ *
+ * We do it before updating the CPU capacity to ensure the
+ * boost value of the current task is accounted for in the
+ * selection of the OPP.
+ */
+ schedtune_dequeue_task(p, cpu_of(rq));
+#endif
+
for_each_sched_entity(se) {
cfs_rq = cfs_rq_of(se);
dequeue_entity(cfs_rq, se, flags);
@@ -6099,19 +6109,6 @@ static void dequeue_task_fair(struct rq *rq, struct task_struct *p, int flags)
dec_rq_hmp_stats(rq, p, 1);
}
-#ifdef CONFIG_SMP
-
- /*
- * Update SchedTune accounting
- *
- * We do it before updating the CPU capacity to ensure the
- * boost value of the current task is accounted for in the
- * selection of the OPP.
- */
- schedtune_dequeue_task(p, cpu_of(rq));
-
-#endif /* CONFIG_SMP */
-
hrtick_update(rq);
}
diff --git a/kernel/sched/rt.c b/kernel/sched/rt.c
index 2083a54cdd49..ac81704e14d9 100644
--- a/kernel/sched/rt.c
+++ b/kernel/sched/rt.c
@@ -1446,6 +1446,10 @@ enqueue_task_rt(struct rq *rq, struct task_struct *p, int flags)
{
struct sched_rt_entity *rt_se = &p->rt;
+#ifdef CONFIG_SMP
+ schedtune_enqueue_task(p, cpu_of(rq));
+#endif
+
if (flags & ENQUEUE_WAKEUP)
rt_se->timeout = 0;
@@ -1488,6 +1492,10 @@ static void dequeue_task_rt(struct rq *rq, struct task_struct *p, int flags)
{
struct sched_rt_entity *rt_se = &p->rt;
+#ifdef CONFIG_SMP
+ schedtune_dequeue_task(p, cpu_of(rq));
+#endif
+
update_curr_rt(rq);
dequeue_rt_entity(rt_se, flags);
walt_dec_cumulative_runnable_avg(rq, p);