summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorVikram Mulukutla <markivx@codeaurora.org>2017-03-20 13:41:37 -0700
committerMichael Bestas <mkbestas@lineageos.org>2019-12-23 23:43:40 +0200
commit0086bc47f8b54d30de0ced09e9bb45cb4fef47a8 (patch)
treeb5a24f13f592ac20770ddabbc640c7b9b4ec1c32
parent5c43a2afb87ea44e4d52585989a76b1e6a2e6ce6 (diff)
sched: cpufreq: HMP load reporting changes
Since HMP uses WALT, ensure that load is reported just once per window, with the exception of intercluster migrations. Further, try to report load whenever WALT stats are updated. Change-Id: I6539f8c916f6f271cf26f03249de7f953d5b12c2 Signed-off-by: Vikram Mulukutla <markivx@codeaurora.org>
-rw-r--r--include/linux/sched.h1
-rw-r--r--kernel/sched/core.c6
-rw-r--r--kernel/sched/hmp.c5
-rw-r--r--kernel/sched/sched.h14
4 files changed, 26 insertions, 0 deletions
diff --git a/include/linux/sched.h b/include/linux/sched.h
index a05e1e82ecf3..31cb433d7691 100644
--- a/include/linux/sched.h
+++ b/include/linux/sched.h
@@ -3719,6 +3719,7 @@ static inline unsigned long rlimit_max(unsigned int limit)
#define SCHED_CPUFREQ_RT (1U << 0)
#define SCHED_CPUFREQ_DL (1U << 1)
#define SCHED_CPUFREQ_IOWAIT (1U << 2)
+#define SCHED_CPUFREQ_INTERCLUSTER_MIG (1U << 3)
#ifdef CONFIG_CPU_FREQ
struct update_util_data {
diff --git a/kernel/sched/core.c b/kernel/sched/core.c
index f6f8bb2f0d95..72e1ffe809f0 100644
--- a/kernel/sched/core.c
+++ b/kernel/sched/core.c
@@ -2132,6 +2132,7 @@ try_to_wake_up(struct task_struct *p, unsigned int state, int wake_flags,
wallclock = sched_ktime_clock();
update_task_ravg(rq->curr, rq, TASK_UPDATE, wallclock, 0);
update_task_ravg(p, rq, TASK_WAKE, wallclock, 0);
+ cpufreq_update_util(rq, 0);
raw_spin_unlock(&rq->lock);
rcu_read_lock();
@@ -2225,6 +2226,7 @@ static void try_to_wake_up_local(struct task_struct *p)
update_task_ravg(rq->curr, rq, TASK_UPDATE, wallclock, 0);
update_task_ravg(p, rq, TASK_WAKE, wallclock, 0);
+ cpufreq_update_util(rq, 0);
ttwu_activate(rq, p, ENQUEUE_WAKEUP);
note_task_waking(p, wallclock);
}
@@ -3196,6 +3198,8 @@ void scheduler_tick(void)
calc_global_load_tick(rq);
wallclock = sched_ktime_clock();
update_task_ravg(rq->curr, rq, TASK_UPDATE, wallclock, 0);
+
+ cpufreq_update_util(rq, 0);
early_notif = early_detection_notify(rq, wallclock);
raw_spin_unlock(&rq->lock);
@@ -3564,6 +3568,7 @@ static void __sched notrace __schedule(bool preempt)
if (likely(prev != next)) {
update_task_ravg(prev, rq, PUT_PREV_TASK, wallclock, 0);
update_task_ravg(next, rq, PICK_NEXT_TASK, wallclock, 0);
+ cpufreq_update_util(rq, 0);
if (!is_idle_task(prev) && !prev->on_rq)
update_avg_burst(prev);
@@ -3582,6 +3587,7 @@ static void __sched notrace __schedule(bool preempt)
cpu = cpu_of(rq);
} else {
update_task_ravg(prev, rq, TASK_UPDATE, wallclock, 0);
+ cpufreq_update_util(rq, 0);
lockdep_unpin_lock(&rq->lock);
raw_spin_unlock_irq(&rq->lock);
}
diff --git a/kernel/sched/hmp.c b/kernel/sched/hmp.c
index 5337ac7fcba1..ac9c93bc45fc 100644
--- a/kernel/sched/hmp.c
+++ b/kernel/sched/hmp.c
@@ -3649,6 +3649,11 @@ void fixup_busy_time(struct task_struct *p, int new_cpu)
migrate_top_tasks(p, src_rq, dest_rq);
+ if (!same_freq_domain(new_cpu, task_cpu(p))) {
+ cpufreq_update_util(dest_rq, SCHED_CPUFREQ_INTERCLUSTER_MIG);
+ cpufreq_update_util(src_rq, SCHED_CPUFREQ_INTERCLUSTER_MIG);
+ }
+
if (p == src_rq->ed_task) {
src_rq->ed_task = NULL;
if (!dest_rq->ed_task)
diff --git a/kernel/sched/sched.h b/kernel/sched/sched.h
index 0c121d8bd4c5..2315fbfcd64d 100644
--- a/kernel/sched/sched.h
+++ b/kernel/sched/sched.h
@@ -440,6 +440,7 @@ struct cpu_cycle {
#define for_each_sched_cluster(cluster) \
list_for_each_entry_rcu(cluster, &cluster_head, list)
+extern unsigned int sched_disable_window_stats;
#endif /* CONFIG_SCHED_HMP */
/* CFS-related fields in a runqueue */
@@ -792,6 +793,7 @@ struct rq {
int cstate, wakeup_latency, wakeup_energy;
u64 window_start;
+ u64 load_reported_window;
unsigned long hmp_flags;
u64 cur_irqload;
@@ -2852,6 +2854,18 @@ static inline void cpufreq_update_util(struct rq *rq, unsigned int flags)
{
struct update_util_data *data;
+#ifdef CONFIG_SCHED_HMP
+ /*
+ * Skip if we've already reported, but not if this is an inter-cluster
+ * migration
+ */
+ if (!sched_disable_window_stats &&
+ (rq->load_reported_window == rq->window_start) &&
+ !(flags & SCHED_CPUFREQ_INTERCLUSTER_MIG))
+ return;
+ rq->load_reported_window = rq->window_start;
+#endif
+
data = rcu_dereference_sched(*this_cpu_ptr(&cpufreq_update_util_data));
if (data)
data->func(data, rq_clock(rq), flags);