diff options
| author | Steve Muckle <smuckle@codeaurora.org> | 2014-04-29 14:01:50 -0700 |
|---|---|---|
| committer | David Keitel <dkeitel@codeaurora.org> | 2016-03-23 19:59:17 -0700 |
| commit | bb0b8e985911508db6c341b8b0bb2feff4ac603a (patch) | |
| tree | af43789179bed58b0d08f523b4ee7fdd65c58a2c /kernel | |
| parent | 9427c55650b4362c94bd14340c0782314699c2b8 (diff) | |
sched: window-stats: Add aggregated runqueue windowed stats
Add counters per-cpu to track its busy time in the latest window and
one previous to that. This would be needed to track accurate busy time
per-cpu that accounts for migrations. Basically once a task migrates,
its execution time in current window is migrated as well to new cpu.
The idle task's runtime is not accounted since it should not count
towards runqueue busy time.
Change-Id: I4014dd686f95dbbfaa4274269bc36ed716573421
Signed-off-by: Steve Muckle <smuckle@codeaurora.org>
Signed-off-by: Srivatsa Vaddagiri <vatsa@codeaurora.org>
Diffstat (limited to 'kernel')
| -rw-r--r-- | kernel/sched/core.c | 35 | ||||
| -rw-r--r-- | kernel/sched/sched.h | 3 |
2 files changed, 38 insertions, 0 deletions
diff --git a/kernel/sched/core.c b/kernel/sched/core.c index ad17bd1e57b0..238dfe87ad80 100644 --- a/kernel/sched/core.c +++ b/kernel/sched/core.c @@ -1182,6 +1182,15 @@ static inline void move_window_start(struct rq *rq, u64 wallclock) nr_windows = div64_u64(delta, sched_ravg_window); rq->window_start += (u64)nr_windows * (u64)sched_ravg_window; + + if (nr_windows) { + if (nr_windows == 1) + rq->prev_runnable_sum = rq->curr_runnable_sum; + else + rq->prev_runnable_sum = 0; + + rq->curr_runnable_sum = 0; + } } void update_task_ravg(struct task_struct *p, struct rq *rq, int update_sum) @@ -1191,6 +1200,8 @@ void update_task_ravg(struct task_struct *p, struct rq *rq, int update_sum) u64 wallclock = sched_clock(); u64 mark_start = p->ravg.mark_start; u64 window_start; + u32 prev_contrib = 0; + u32 curr_contrib = 0; if (sched_use_pelt || !rq->window_start) return; @@ -1198,6 +1209,13 @@ void update_task_ravg(struct task_struct *p, struct rq *rq, int update_sum) move_window_start(rq, wallclock); window_start = rq->window_start; + /* + * Don't bother accounting for idle task, also we would not want + * to attribute its time to the aggregate RQ busy time + */ + if (is_idle_task(p)) + return; + do { s64 delta = 0; int n = 0; @@ -1205,8 +1223,10 @@ void update_task_ravg(struct task_struct *p, struct rq *rq, int update_sum) u32 sum = 0; new_window = 0; + if (window_start > mark_start) { delta = window_start - mark_start; + n = div64_u64(delta, window_size); window_start -= n * window_size; now = window_start; @@ -1232,6 +1252,9 @@ void update_task_ravg(struct task_struct *p, struct rq *rq, int update_sum) p->ravg.sum += delta; if (unlikely(p->ravg.sum > window_size)) p->ravg.sum = window_size; + + prev_contrib = curr_contrib; + curr_contrib = delta; } if (!new_window) @@ -1244,11 +1267,22 @@ void update_task_ravg(struct task_struct *p, struct rq *rq, int update_sum) if (update_sum) sum = window_size; update_history(rq, p, sum, n); + + /* + * We will always shift curr_contrib into + * prev_contrib when tallying the remainder in + * the current window on the next loop + * iteration. + */ + curr_contrib = sum; } mark_start = window_start; } while (new_window); p->ravg.mark_start = wallclock; + + rq->curr_runnable_sum += curr_contrib; + rq->prev_runnable_sum += prev_contrib; } unsigned long __weak arch_get_cpu_efficiency(int cpu) @@ -8003,6 +8037,7 @@ void __init sched_init(void) #endif #ifdef CONFIG_SCHED_HMP rq->nr_small_tasks = rq->nr_big_tasks = 0; + rq->curr_runnable_sum = rq->prev_runnable_sum = 0; #endif for (j = 0; j < CPU_LOAD_IDX_MAX; j++) diff --git a/kernel/sched/sched.h b/kernel/sched/sched.h index beb0cd2e97d6..499996ec8c0b 100644 --- a/kernel/sched/sched.h +++ b/kernel/sched/sched.h @@ -655,6 +655,9 @@ struct rq { int load_scale_factor; int capacity; u64 window_start; + + unsigned int curr_runnable_sum; + unsigned int prev_runnable_sum; #endif #ifdef CONFIG_SCHED_HMP |
