diff options
Diffstat (limited to 'kernel')
| -rw-r--r-- | kernel/sched/core.c | 1 | ||||
| -rw-r--r-- | kernel/sched/hmp.c | 24 | ||||
| -rw-r--r-- | kernel/sched/sched.h | 2 |
3 files changed, 18 insertions, 9 deletions
diff --git a/kernel/sched/core.c b/kernel/sched/core.c index 94bd3cffa5ac..a1626bdf8729 100644 --- a/kernel/sched/core.c +++ b/kernel/sched/core.c @@ -7748,7 +7748,6 @@ void __init sched_init(void) rq->old_busy_time = 0; rq->old_estimated_time = 0; rq->old_busy_time_group = 0; - rq->notifier_sent = 0; rq->hmp_stats.pred_demands_sum = 0; #endif rq->max_idle_balance_cost = sysctl_sched_migration_cost; diff --git a/kernel/sched/hmp.c b/kernel/sched/hmp.c index 898da9b83a72..8da0147b4f89 100644 --- a/kernel/sched/hmp.c +++ b/kernel/sched/hmp.c @@ -356,6 +356,7 @@ struct sched_cluster init_cluster = { .dstate_wakeup_energy = 0, .dstate_wakeup_latency = 0, .exec_scale_factor = 1024, + .notifier_sent = 0, }; static void update_all_clusters_stats(void) @@ -519,6 +520,7 @@ static struct sched_cluster *alloc_new_cluster(const struct cpumask *cpus) if (cluster->efficiency < min_possible_efficiency) min_possible_efficiency = cluster->efficiency; + cluster->notifier_sent = 0; return cluster; } @@ -1769,8 +1771,8 @@ static int send_notification(struct rq *rq, int check_pred, int check_groups) } raw_spin_lock_irqsave(&rq->lock, flags); - if (!rq->notifier_sent) { - rq->notifier_sent = 1; + if (!rq->cluster->notifier_sent) { + rq->cluster->notifier_sent = 1; rc = 1; trace_sched_freq_alert(cpu_of(rq), check_pred, check_groups, rq, new_load); @@ -2938,7 +2940,7 @@ void sched_get_cpus_busy(struct sched_load *busy, u64 nload[cpus], ngload[cpus]; u64 pload[cpus]; unsigned int cur_freq[cpus], max_freq[cpus]; - int notifier_sent[cpus]; + int notifier_sent = 0; int early_detection[cpus]; int cpu, i = 0; unsigned int window_size; @@ -2979,9 +2981,17 @@ void sched_get_cpus_busy(struct sched_load *busy, max_busy_cpu = cpu; } - notifier_sent[i] = rq->notifier_sent; + /* + * sched_get_cpus_busy() is called for all CPUs in a + * frequency domain. So the notifier_sent flag per + * cluster works even when a frequency domain spans + * more than 1 cluster. + */ + if (rq->cluster->notifier_sent) { + notifier_sent = 1; + rq->cluster->notifier_sent = 0; + } early_detection[i] = (rq->ed_task != NULL); - rq->notifier_sent = 0; cur_freq[i] = cpu_cur_freq(cpu); max_freq[i] = cpu_max_freq(cpu); i++; @@ -3005,7 +3015,7 @@ void sched_get_cpus_busy(struct sched_load *busy, goto skip_early; rq = cpu_rq(cpu); - if (!notifier_sent[i]) { + if (!notifier_sent) { if (cpu == max_busy_cpu) group_load_in_freq_domain( &rq->freq_domain_cpumask, @@ -3046,7 +3056,7 @@ skip_early: goto exit_early; } - if (!notifier_sent[i]) { + if (!notifier_sent) { load[i] = scale_load_to_freq(load[i], max_freq[i], cur_freq[i]); nload[i] = scale_load_to_freq(nload[i], max_freq[i], diff --git a/kernel/sched/sched.h b/kernel/sched/sched.h index cdfccdeb4eea..e31334d5f581 100644 --- a/kernel/sched/sched.h +++ b/kernel/sched/sched.h @@ -391,6 +391,7 @@ struct sched_cluster { bool freq_init_done; int dstate, dstate_wakeup_latency, dstate_wakeup_energy; unsigned int static_cluster_pwr_cost; + int notifier_sent; }; extern unsigned long all_cluster_ids[]; @@ -749,7 +750,6 @@ struct rq { struct task_struct *ed_task; struct cpu_cycle cc; u64 old_busy_time, old_busy_time_group; - int notifier_sent; u64 old_estimated_time; u64 curr_runnable_sum; u64 prev_runnable_sum; |
