summaryrefslogtreecommitdiff
path: root/kernel/sched
diff options
context:
space:
mode:
authorJoel Fernandes <joelaf@google.com>2017-07-20 23:46:56 -0700
committerGeorg Veichtlbauer <georg@vware.at>2023-07-16 13:06:17 +0200
commitebdb82f7b34aeab34623d7a5e4dd673fc2807842 (patch)
treec977a35914bcac8e246bde1596b36cd91c3403c5 /kernel/sched
parentff383d94478af0bb62f828bad550e42681a7176e (diff)
sched/fair: Skip frequency updates if CPU about to idle
If CPU is about to idle, prevent a frequency update. With the number of schedutil governor wake ups are reduced by more than half on a test playing bluetooth audio. Test: sugov wake ups drop by more than half when playing music with screen off (476 / 1092) Bug: 64689959 Change-Id: I400026557b4134c0ac77f51c79610a96eb985b4a Signed-off-by: Joel Fernandes <joelaf@google.com>
Diffstat (limited to 'kernel/sched')
-rw-r--r--kernel/sched/fair.c18
-rw-r--r--kernel/sched/sched.h1
2 files changed, 16 insertions, 3 deletions
diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c
index 08e608a04f5b..ee5f8e686a31 100644
--- a/kernel/sched/fair.c
+++ b/kernel/sched/fair.c
@@ -4332,6 +4332,7 @@ update_cfs_rq_load_avg(u64 now, struct cfs_rq *cfs_rq, bool update_freq)
*/
#define UPDATE_TG 0x1
#define SKIP_AGE_LOAD 0x2
+#define SKIP_CPUFREQ 0x4
/* Update task and its cfs_rq load average */
static inline void update_load_avg(struct sched_entity *se, int flags)
@@ -4352,7 +4353,7 @@ static inline void update_load_avg(struct sched_entity *se, int flags)
cfs_rq->curr == se, NULL);
}
- decayed = update_cfs_rq_load_avg(now, cfs_rq, true);
+ decayed = update_cfs_rq_load_avg(now, cfs_rq, !(flags & SKIP_CPUFREQ));
decayed |= propagate_entity_load_avg(se);
if (decayed && (flags & UPDATE_TG))
@@ -4528,6 +4529,7 @@ update_cfs_rq_load_avg(u64 now, struct cfs_rq *cfs_rq, bool update_freq)
#define UPDATE_TG 0x0
#define SKIP_AGE_LOAD 0x0
+#define SKIP_CPUFREQ 0x3
static inline void update_load_avg(struct sched_entity *se, int not_used1){}
static inline void
@@ -4750,6 +4752,8 @@ static __always_inline void return_cfs_rq_runtime(struct cfs_rq *cfs_rq);
static void
dequeue_entity(struct cfs_rq *cfs_rq, struct sched_entity *se, int flags)
{
+ int update_flags;
+
/*
* Update run-time statistics of the 'current'.
*/
@@ -4763,7 +4767,12 @@ dequeue_entity(struct cfs_rq *cfs_rq, struct sched_entity *se, int flags)
* - For group entity, update its weight to reflect the new share
* of its group cfs_rq.
*/
- update_load_avg(se, UPDATE_TG);
+ update_flags = UPDATE_TG;
+
+ if (flags & DEQUEUE_IDLE)
+ update_flags |= SKIP_CPUFREQ;
+
+ update_load_avg(se, update_flags);
dequeue_entity_load_avg(cfs_rq, se);
update_stats_dequeue(cfs_rq, se);
@@ -6038,6 +6047,9 @@ static void dequeue_task_fair(struct rq *rq, struct task_struct *p, int flags)
struct sched_entity *se = &p->se;
int task_sleep = flags & DEQUEUE_SLEEP;
+ if (task_sleep && rq->nr_running == 1)
+ flags |= DEQUEUE_IDLE;
+
for_each_sched_entity(se) {
cfs_rq = cfs_rq_of(se);
dequeue_entity(cfs_rq, se, flags);
@@ -6078,7 +6090,7 @@ static void dequeue_task_fair(struct rq *rq, struct task_struct *p, int flags)
if (cfs_rq_throttled(cfs_rq))
break;
- update_load_avg(se, UPDATE_TG);
+ update_load_avg(se, UPDATE_TG | (flags & DEQUEUE_IDLE));
update_cfs_shares(se);
}
diff --git a/kernel/sched/sched.h b/kernel/sched/sched.h
index 284cc86d3ad4..bafa2931c898 100644
--- a/kernel/sched/sched.h
+++ b/kernel/sched/sched.h
@@ -2032,6 +2032,7 @@ static const u32 prio_to_wmult[40] = {
#define DEQUEUE_SLEEP 0x01
#define DEQUEUE_SAVE 0x02 /* matches ENQUEUE_RESTORE */
#define DEQUEUE_MOVE 0x04 /* matches ENQUEUE_MOVE */
+#define DEQUEUE_IDLE 0x80 /* The last dequeue before IDLE */
#define ENQUEUE_WAKEUP 0x01
#define ENQUEUE_RESTORE 0x02