summaryrefslogtreecommitdiff
path: root/kernel/sched
diff options
context:
space:
mode:
Diffstat (limited to 'kernel/sched')
-rw-r--r--kernel/sched/core.c6
-rw-r--r--kernel/sched/hmp.c5
-rw-r--r--kernel/sched/sched.h14
3 files changed, 25 insertions, 0 deletions
diff --git a/kernel/sched/core.c b/kernel/sched/core.c
index f6f8bb2f0d95..72e1ffe809f0 100644
--- a/kernel/sched/core.c
+++ b/kernel/sched/core.c
@@ -2132,6 +2132,7 @@ try_to_wake_up(struct task_struct *p, unsigned int state, int wake_flags,
wallclock = sched_ktime_clock();
update_task_ravg(rq->curr, rq, TASK_UPDATE, wallclock, 0);
update_task_ravg(p, rq, TASK_WAKE, wallclock, 0);
+ cpufreq_update_util(rq, 0);
raw_spin_unlock(&rq->lock);
rcu_read_lock();
@@ -2225,6 +2226,7 @@ static void try_to_wake_up_local(struct task_struct *p)
update_task_ravg(rq->curr, rq, TASK_UPDATE, wallclock, 0);
update_task_ravg(p, rq, TASK_WAKE, wallclock, 0);
+ cpufreq_update_util(rq, 0);
ttwu_activate(rq, p, ENQUEUE_WAKEUP);
note_task_waking(p, wallclock);
}
@@ -3196,6 +3198,8 @@ void scheduler_tick(void)
calc_global_load_tick(rq);
wallclock = sched_ktime_clock();
update_task_ravg(rq->curr, rq, TASK_UPDATE, wallclock, 0);
+
+ cpufreq_update_util(rq, 0);
early_notif = early_detection_notify(rq, wallclock);
raw_spin_unlock(&rq->lock);
@@ -3564,6 +3568,7 @@ static void __sched notrace __schedule(bool preempt)
if (likely(prev != next)) {
update_task_ravg(prev, rq, PUT_PREV_TASK, wallclock, 0);
update_task_ravg(next, rq, PICK_NEXT_TASK, wallclock, 0);
+ cpufreq_update_util(rq, 0);
if (!is_idle_task(prev) && !prev->on_rq)
update_avg_burst(prev);
@@ -3582,6 +3587,7 @@ static void __sched notrace __schedule(bool preempt)
cpu = cpu_of(rq);
} else {
update_task_ravg(prev, rq, TASK_UPDATE, wallclock, 0);
+ cpufreq_update_util(rq, 0);
lockdep_unpin_lock(&rq->lock);
raw_spin_unlock_irq(&rq->lock);
}
diff --git a/kernel/sched/hmp.c b/kernel/sched/hmp.c
index 5337ac7fcba1..ac9c93bc45fc 100644
--- a/kernel/sched/hmp.c
+++ b/kernel/sched/hmp.c
@@ -3649,6 +3649,11 @@ void fixup_busy_time(struct task_struct *p, int new_cpu)
migrate_top_tasks(p, src_rq, dest_rq);
+ if (!same_freq_domain(new_cpu, task_cpu(p))) {
+ cpufreq_update_util(dest_rq, SCHED_CPUFREQ_INTERCLUSTER_MIG);
+ cpufreq_update_util(src_rq, SCHED_CPUFREQ_INTERCLUSTER_MIG);
+ }
+
if (p == src_rq->ed_task) {
src_rq->ed_task = NULL;
if (!dest_rq->ed_task)
diff --git a/kernel/sched/sched.h b/kernel/sched/sched.h
index 0c121d8bd4c5..2315fbfcd64d 100644
--- a/kernel/sched/sched.h
+++ b/kernel/sched/sched.h
@@ -440,6 +440,7 @@ struct cpu_cycle {
#define for_each_sched_cluster(cluster) \
list_for_each_entry_rcu(cluster, &cluster_head, list)
+extern unsigned int sched_disable_window_stats;
#endif /* CONFIG_SCHED_HMP */
/* CFS-related fields in a runqueue */
@@ -792,6 +793,7 @@ struct rq {
int cstate, wakeup_latency, wakeup_energy;
u64 window_start;
+ u64 load_reported_window;
unsigned long hmp_flags;
u64 cur_irqload;
@@ -2852,6 +2854,18 @@ static inline void cpufreq_update_util(struct rq *rq, unsigned int flags)
{
struct update_util_data *data;
+#ifdef CONFIG_SCHED_HMP
+ /*
+ * Skip if we've already reported, but not if this is an inter-cluster
+ * migration
+ */
+ if (!sched_disable_window_stats &&
+ (rq->load_reported_window == rq->window_start) &&
+ !(flags & SCHED_CPUFREQ_INTERCLUSTER_MIG))
+ return;
+ rq->load_reported_window = rq->window_start;
+#endif
+
data = rcu_dereference_sched(*this_cpu_ptr(&cpufreq_update_util_data));
if (data)
data->func(data, rq_clock(rq), flags);