diff options
Diffstat (limited to 'kernel')
| -rw-r--r-- | kernel/sched/core.c | 94 |
1 files changed, 65 insertions, 29 deletions
diff --git a/kernel/sched/core.c b/kernel/sched/core.c index 0c6508bebca5..6f47fa3dbf41 100644 --- a/kernel/sched/core.c +++ b/kernel/sched/core.c @@ -2159,51 +2159,87 @@ scale_load_to_freq(u64 load, unsigned int src_freq, unsigned int dst_freq) return div64_u64(load * (u64)src_freq, (u64)dst_freq); } -unsigned long sched_get_busy(int cpu) +void sched_get_cpus_busy(unsigned long *busy, const struct cpumask *query_cpus) { unsigned long flags; - struct rq *rq = cpu_rq(cpu); - u64 load; + struct rq *rq; + const int cpus = cpumask_weight(query_cpus); + u64 load[cpus]; + unsigned int cur_freq[cpus], max_freq[cpus]; + int notifier_sent[cpus]; + int cpu, i = 0; + unsigned int window_size; + + if (unlikely(cpus == 0)) + return; /* * This function could be called in timer context, and the * current task may have been executing for a long time. Ensure * that the window stats are current by doing an update. */ - raw_spin_lock_irqsave(&rq->lock, flags); - update_task_ravg(rq->curr, rq, TASK_UPDATE, sched_clock(), 0); - load = rq->old_busy_time = rq->prev_runnable_sum; + local_irq_save(flags); + for_each_cpu(cpu, query_cpus) + raw_spin_lock(&cpu_rq(cpu)->lock); - /* - * Scale load in reference to rq->max_possible_freq. - * - * Note that scale_load_to_cpu() scales load in reference to - * rq->max_freq - */ - load = scale_load_to_cpu(load, cpu); + window_size = sched_ravg_window; - if (!rq->notifier_sent) { - u64 load_at_cur_freq; - - load_at_cur_freq = scale_load_to_freq(load, rq->max_freq, - rq->cur_freq); - if (load_at_cur_freq > sched_ravg_window) - load_at_cur_freq = sched_ravg_window; - load = scale_load_to_freq(load_at_cur_freq, - rq->cur_freq, rq->max_possible_freq); - } else { - load = scale_load_to_freq(load, rq->max_freq, - rq->max_possible_freq); + for_each_cpu(cpu, query_cpus) { + rq = cpu_rq(cpu); + + update_task_ravg(rq->curr, rq, TASK_UPDATE, sched_clock(), 0); + load[i] = rq->old_busy_time = rq->prev_runnable_sum; + /* + * Scale load in reference to rq->max_possible_freq. + * + * Note that scale_load_to_cpu() scales load in reference to + * rq->max_freq. + */ + load[i] = scale_load_to_cpu(load[i], cpu); + + notifier_sent[i] = rq->notifier_sent; rq->notifier_sent = 0; + cur_freq[i] = rq->cur_freq; + max_freq[i] = rq->max_freq; + i++; } - load = div64_u64(load, NSEC_PER_USEC); + for_each_cpu(cpu, query_cpus) + raw_spin_unlock(&(cpu_rq(cpu))->lock); + local_irq_restore(flags); - raw_spin_unlock_irqrestore(&rq->lock, flags); + i = 0; + for_each_cpu(cpu, query_cpus) { + rq = cpu_rq(cpu); + + if (!notifier_sent[i]) { + load[i] = scale_load_to_freq(load[i], max_freq[i], + cur_freq[i]); + if (load[i] > window_size) + load[i] = window_size; + load[i] = scale_load_to_freq(load[i], cur_freq[i], + rq->max_possible_freq); + } else { + load[i] = scale_load_to_freq(load[i], max_freq[i], + rq->max_possible_freq); + } + + busy[i] = div64_u64(load[i], NSEC_PER_USEC); + + trace_sched_get_busy(cpu, busy[i]); + i++; + } +} + +unsigned long sched_get_busy(int cpu) +{ + struct cpumask query_cpu = CPU_MASK_NONE; + unsigned long busy; - trace_sched_get_busy(cpu, load); + cpumask_set_cpu(cpu, &query_cpu); + sched_get_cpus_busy(&busy, &query_cpu); - return load; + return busy; } void sched_set_io_is_busy(int val) |
