diff options
| author | Srivatsa Vaddagiri <vatsa@codeaurora.org> | 2015-05-13 16:04:50 +0530 |
|---|---|---|
| committer | David Keitel <dkeitel@codeaurora.org> | 2016-03-23 20:02:07 -0700 |
| commit | eca78aaf849e531a94483c614ca5e93d8e575532 (patch) | |
| tree | d9e1906f6218c5832a1e653f3ebbda05dca666a0 /kernel | |
| parent | 371435451a69f4a178fbe1729c18b1820823e5c7 (diff) | |
sched: report loads greater than 100% only during load alert notifications
The busy time of CPUs is adjusted during task migrations. This can
result in reporting the load greater than 100% to the governor and
causes direct jumps to the higher frequencies during the intra cluster
migrations. Hence clip the load to 100% during the load reporting at
the end of the window. The load is not clipped for load alert notifications
which allows ramping up the frequency faster for inter cluster migrations
and heavy task wakeup scenarios.
Change-Id: I7347260aa476287ecfc706d4dd0877f4b75a1089
Signed-off-by: Srivatsa Vaddagiri <vatsa@codeaurora.org>
Signed-off-by: Pavankumar Kondeti <pkondeti@codeaurora.org>
Signed-off-by: Joonwoo Park <joonwoop@codeaurora.org>
Diffstat (limited to 'kernel')
| -rw-r--r-- | kernel/sched/core.c | 38 | ||||
| -rw-r--r-- | kernel/sched/sched.h | 1 |
2 files changed, 36 insertions, 3 deletions
diff --git a/kernel/sched/core.c b/kernel/sched/core.c index aa32aed9ff91..0c6508bebca5 100644 --- a/kernel/sched/core.c +++ b/kernel/sched/core.c @@ -1342,6 +1342,8 @@ static inline unsigned int load_to_freq(struct rq *rq, u64 load) static int send_notification(struct rq *rq) { unsigned int cur_freq, freq_required; + unsigned long flags; + int rc = 0; if (!sched_enable_hmp) return 0; @@ -1352,7 +1354,14 @@ static int send_notification(struct rq *rq) if (nearly_same_freq(cur_freq, freq_required)) return 0; - return 1; + raw_spin_lock_irqsave(&rq->lock, flags); + if (!rq->notifier_sent) { + rq->notifier_sent = 1; + rc = 1; + } + raw_spin_unlock_irqrestore(&rq->lock, flags); + + return rc; } /* Alert governor if there is a need to change frequency */ @@ -2144,6 +2153,12 @@ void reset_all_window_stats(u64 window_start, unsigned int window_size) #ifdef CONFIG_SCHED_FREQ_INPUT +static inline u64 +scale_load_to_freq(u64 load, unsigned int src_freq, unsigned int dst_freq) +{ + return div64_u64(load * (u64)src_freq, (u64)dst_freq); +} + unsigned long sched_get_busy(int cpu) { unsigned long flags; @@ -2158,7 +2173,6 @@ unsigned long sched_get_busy(int cpu) raw_spin_lock_irqsave(&rq->lock, flags); update_task_ravg(rq->curr, rq, TASK_UPDATE, sched_clock(), 0); load = rq->old_busy_time = rq->prev_runnable_sum; - raw_spin_unlock_irqrestore(&rq->lock, flags); /* * Scale load in reference to rq->max_possible_freq. @@ -2167,9 +2181,26 @@ unsigned long sched_get_busy(int cpu) * rq->max_freq */ load = scale_load_to_cpu(load, cpu); - load = div64_u64(load * (u64)rq->max_freq, (u64)rq->max_possible_freq); + + if (!rq->notifier_sent) { + u64 load_at_cur_freq; + + load_at_cur_freq = scale_load_to_freq(load, rq->max_freq, + rq->cur_freq); + if (load_at_cur_freq > sched_ravg_window) + load_at_cur_freq = sched_ravg_window; + load = scale_load_to_freq(load_at_cur_freq, + rq->cur_freq, rq->max_possible_freq); + } else { + load = scale_load_to_freq(load, rq->max_freq, + rq->max_possible_freq); + rq->notifier_sent = 0; + } + load = div64_u64(load, NSEC_PER_USEC); + raw_spin_unlock_irqrestore(&rq->lock, flags); + trace_sched_get_busy(cpu, load); return load; @@ -9255,6 +9286,7 @@ void __init sched_init(void) #ifdef CONFIG_SCHED_FREQ_INPUT rq->curr_runnable_sum = rq->prev_runnable_sum = 0; rq->old_busy_time = 0; + rq->notifier_sent = 0; #endif #endif rq->max_idle_balance_cost = sysctl_sched_migration_cost; diff --git a/kernel/sched/sched.h b/kernel/sched/sched.h index c41b8ab9c7b0..568c3427ca0e 100644 --- a/kernel/sched/sched.h +++ b/kernel/sched/sched.h @@ -688,6 +688,7 @@ struct rq { #ifdef CONFIG_SCHED_FREQ_INPUT unsigned int old_busy_time; + int notifier_sent; #endif #endif |
