diff options
| author | Srivatsa Vaddagiri <vatsa@codeaurora.org> | 2014-03-29 17:19:50 -0700 |
|---|---|---|
| committer | David Keitel <dkeitel@codeaurora.org> | 2016-03-23 19:58:59 -0700 |
| commit | 025dedac36fd96e36e04578ff3c055f4b1bc59a6 (patch) | |
| tree | f199783197b292bc089431dc3d8fdaa287c72b05 /kernel | |
| parent | 77fe8dd14da1d5f1cc32382a761206e8dd4ce6da (diff) | |
sched: Add scaled task load statistics
Scheduler guided frequency selection as well as task placement on
heterogeneous systems require scaled task load statistics. This patch
adds a 'runnable_avg_sum_scaled' metric per task that is a scaled
derivative of 'runnable_avg_sum'. Load is scaled in reference to
"best" cpu, i.e one with best possible max_freq
Change-Id: Ie8ae450d0b02753e9927fb769aee734c6d33190f
Signed-off-by: Srivatsa Vaddagiri <vatsa@codeaurora.org>
[rameezmustafa@codeaurora.org]: Port to msm-3.18]
Signed-off-by: Syed Rameez Mustafa <rameezmustafa@codeaurora.org>
[joonwoop@codeaurora.org: incoporated with change 9d89c257df
(" sched/fair: Rewrite runnable load and utilization average
tracking"). Used container_of() to get sched_entity.]
Signed-off-by: Joonwoo Park <joonwoop@codeaurora.org>
Diffstat (limited to 'kernel')
| -rw-r--r-- | kernel/sched/fair.c | 56 |
1 files changed, 56 insertions, 0 deletions
diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c index 5c37d1952e67..e8a510a2187b 100644 --- a/kernel/sched/fair.c +++ b/kernel/sched/fair.c @@ -2530,6 +2530,9 @@ static u32 __compute_runnable_contrib(u64 n) return contrib + runnable_avg_yN_sum[n]; } +static void add_to_scaled_stat(int cpu, struct sched_avg *sa, u64 delta); +static inline void decay_scaled_stat(struct sched_avg *sa, u64 periods); + #if (SCHED_LOAD_SHIFT - SCHED_LOAD_RESOLUTION) != 10 || SCHED_CAPACITY_SHIFT != 10 #error "load tracking assumes 2^10 as unit" #endif @@ -2572,6 +2575,7 @@ __update_load_avg(u64 now, int cpu, struct sched_avg *sa, u32 contrib; unsigned int delta_w, scaled_delta_w, decayed = 0; unsigned long scale_freq, scale_cpu; + struct sched_entity *se = NULL; delta = now - sa->last_update_time; /* @@ -2592,6 +2596,12 @@ __update_load_avg(u64 now, int cpu, struct sched_avg *sa, return 0; sa->last_update_time = now; + if (!cfs_rq && weight) { + se = container_of(sa, struct sched_entity, avg); + if (entity_is_task(se) && se->on_rq) + dec_cumulative_runnable_avg(rq_of(cfs_rq), task_of(se)); + } + scale_freq = arch_scale_freq_capacity(NULL, cpu); scale_cpu = arch_scale_cpu_capacity(NULL, cpu); @@ -2612,6 +2622,7 @@ __update_load_avg(u64 now, int cpu, struct sched_avg *sa, scaled_delta_w = cap_scale(delta_w, scale_freq); if (weight) { sa->load_sum += weight * scaled_delta_w; + add_to_scaled_stat(cpu, sa, delta_w); if (cfs_rq) { cfs_rq->runnable_load_sum += weight * scaled_delta_w; @@ -2638,6 +2649,7 @@ __update_load_avg(u64 now, int cpu, struct sched_avg *sa, contrib = cap_scale(contrib, scale_freq); if (weight) { sa->load_sum += weight * contrib; + add_to_scaled_stat(cpu, sa, contrib); if (cfs_rq) cfs_rq->runnable_load_sum += weight * contrib; } @@ -2649,9 +2661,14 @@ __update_load_avg(u64 now, int cpu, struct sched_avg *sa, scaled_delta = cap_scale(delta, scale_freq); if (weight) { sa->load_sum += weight * scaled_delta; + add_to_scaled_stat(cpu, sa, delta); if (cfs_rq) cfs_rq->runnable_load_sum += weight * scaled_delta; } + + if (se && entity_is_task(se) && se->on_rq) + inc_cumulative_runnable_avg(rq_of(cfs_rq), task_of(se)); + if (running) sa->util_sum += scaled_delta * scale_cpu; @@ -2929,6 +2946,45 @@ void init_new_task_load(struct task_struct *p) p->ravg.sum_history[i] = 0; } +/* + * Add scaled version of 'delta' to runnable_avg_sum_scaled + * 'delta' is scaled in reference to "best" cpu + */ +static inline void +add_to_scaled_stat(int cpu, struct sched_avg *sa, u64 delta) +{ + struct rq *rq = cpu_rq(cpu); + int cur_freq = rq->cur_freq, max_freq = rq->max_freq; + int cpu_max_possible_freq = rq->max_possible_freq; + u64 scaled_delta; + + if (unlikely(cur_freq > max_possible_freq || + (cur_freq == max_freq && + max_freq < cpu_max_possible_freq))) + cur_freq = max_possible_freq; + + scaled_delta = div64_u64(delta * cur_freq, max_possible_freq); + sa->runnable_avg_sum_scaled += scaled_delta; +} + +static inline void decay_scaled_stat(struct sched_avg *sa, u64 periods) +{ + sa->runnable_avg_sum_scaled = + decay_load(sa->runnable_avg_sum_scaled, + periods); +} + +#else /* CONFIG_SCHED_FREQ_INPUT */ + +static inline void +add_to_scaled_stat(int cpu, struct sched_avg *sa, u64 delta) +{ +} + +static inline void decay_scaled_stat(struct sched_avg *sa, u64 periods) +{ +} + #endif /* CONFIG_SCHED_FREQ_INPUT */ static void enqueue_sleeper(struct cfs_rq *cfs_rq, struct sched_entity *se) |
