diff options
| author | Srivatsa Vaddagiri <vatsa@codeaurora.org> | 2014-07-23 13:59:58 +0530 |
|---|---|---|
| committer | David Keitel <dkeitel@codeaurora.org> | 2016-03-23 20:00:02 -0700 |
| commit | 0b210afc21c31a1bd28b721a70a54aced29eedb0 (patch) | |
| tree | 0039454805c1783a5ea37a8980918954a284fd2e /kernel | |
| parent | 730e262d6a58608572eec74ea7eb37c0e4d8d11f (diff) | |
sched: window-stats: Reset all window stats
Currently, few of the window statistics for tasks are not reset when
window size is changing. Fix this to completely reset all window
statistics for tasks and cpus. Move the reset code to a function,
which can be reused by a subsequent patch that resets same statistics
upon policy change.
Change-Id: Ic626260245b89007c4d70b9a07ebd577e217f283
Signed-off-by: Srivatsa Vaddagiri <vatsa@codeaurora.org>
Diffstat (limited to 'kernel')
| -rw-r--r-- | kernel/sched/core.c | 80 |
1 files changed, 48 insertions, 32 deletions
diff --git a/kernel/sched/core.c b/kernel/sched/core.c index 1b6839ecdf1d..6c8d5076878a 100644 --- a/kernel/sched/core.c +++ b/kernel/sched/core.c @@ -1514,59 +1514,46 @@ unsigned long sched_get_busy(int cpu) NSEC_PER_USEC); } -int sched_set_window(u64 window_start, unsigned int window_size) +/* Called with IRQs disabled */ +void reset_all_window_stats(u64 window_start, unsigned int window_size) { int cpu; - u64 wallclock, ws, now; - int delta; - unsigned long flags; + u64 wallclock; struct task_struct *g, *p; - if (sched_use_pelt || - (window_size * TICK_NSEC < MIN_SCHED_RAVG_WINDOW)) - return -EINVAL; - - local_irq_save(flags); - - now = get_jiffies_64(); - if (time_after64(window_start, now)) { - delta = window_start - now; /* how many jiffies ahead */ - delta /= window_size; /* # of windows to roll back */ - delta += 1; - window_start -= (delta * window_size); - } - - ws = (window_start - sched_init_jiffy); /* jiffy difference */ - ws *= TICK_NSEC; - ws += sched_clock_at_init_jiffy; - - BUG_ON(sched_clock() < ws); - for_each_online_cpu(cpu) { struct rq *rq = cpu_rq(cpu); raw_spin_lock(&rq->lock); } - sched_ravg_window = window_size * TICK_NSEC; - set_hmp_defaults(); + if (window_size) { + sched_ravg_window = window_size * TICK_NSEC; + set_hmp_defaults(); + } wallclock = sched_clock(); read_lock(&tasklist_lock); do_each_thread(g, p) { - p->ravg.sum = p->ravg.prev_window = 0; + int i; + + p->ravg.sum = 0; + p->ravg.demand = 0; + p->ravg.partial_demand = 0; + p->ravg.prev_window = 0; + for (i = 0; i < RAVG_HIST_SIZE; ++i) + p->ravg.sum_history[i] = 0; + p->ravg.mark_start = wallclock; } while_each_thread(g, p); read_unlock(&tasklist_lock); for_each_online_cpu(cpu) { struct rq *rq = cpu_rq(cpu); - rq->window_start = ws; + if (window_start) + rq->window_start = window_start; rq->curr_runnable_sum = rq->prev_runnable_sum = 0; - if (!is_idle_task(rq->curr)) { - rq->curr->ravg.mark_start = wallclock; - rq->curr_runnable_sum += rq->curr->ravg.partial_demand; - } + rq->cumulative_runnable_avg = 0; fixup_nr_big_small_task(cpu); } @@ -1574,6 +1561,35 @@ int sched_set_window(u64 window_start, unsigned int window_size) struct rq *rq = cpu_rq(cpu); raw_spin_unlock(&rq->lock); } +} + +int sched_set_window(u64 window_start, unsigned int window_size) +{ + u64 ws, now; + int delta; + unsigned long flags; + + if (sched_use_pelt || + (window_size * TICK_NSEC < MIN_SCHED_RAVG_WINDOW)) + return -EINVAL; + + local_irq_save(flags); + + now = get_jiffies_64(); + if (time_after64(window_start, now)) { + delta = window_start - now; /* how many jiffies ahead */ + delta /= window_size; /* # of windows to roll back */ + delta += 1; + window_start -= (delta * window_size); + } + + ws = (window_start - sched_init_jiffy); /* jiffy difference */ + ws *= TICK_NSEC; + ws += sched_clock_at_init_jiffy; + + BUG_ON(sched_clock() < ws); + + reset_all_window_stats(ws, window_size); local_irq_restore(flags); |
