summaryrefslogtreecommitdiff
path: root/kernel
diff options
context:
space:
mode:
Diffstat (limited to 'kernel')
-rw-r--r--kernel/sched/core.c80
1 files changed, 48 insertions, 32 deletions
diff --git a/kernel/sched/core.c b/kernel/sched/core.c
index 1b6839ecdf1d..6c8d5076878a 100644
--- a/kernel/sched/core.c
+++ b/kernel/sched/core.c
@@ -1514,59 +1514,46 @@ unsigned long sched_get_busy(int cpu)
NSEC_PER_USEC);
}
-int sched_set_window(u64 window_start, unsigned int window_size)
+/* Called with IRQs disabled */
+void reset_all_window_stats(u64 window_start, unsigned int window_size)
{
int cpu;
- u64 wallclock, ws, now;
- int delta;
- unsigned long flags;
+ u64 wallclock;
struct task_struct *g, *p;
- if (sched_use_pelt ||
- (window_size * TICK_NSEC < MIN_SCHED_RAVG_WINDOW))
- return -EINVAL;
-
- local_irq_save(flags);
-
- now = get_jiffies_64();
- if (time_after64(window_start, now)) {
- delta = window_start - now; /* how many jiffies ahead */
- delta /= window_size; /* # of windows to roll back */
- delta += 1;
- window_start -= (delta * window_size);
- }
-
- ws = (window_start - sched_init_jiffy); /* jiffy difference */
- ws *= TICK_NSEC;
- ws += sched_clock_at_init_jiffy;
-
- BUG_ON(sched_clock() < ws);
-
for_each_online_cpu(cpu) {
struct rq *rq = cpu_rq(cpu);
raw_spin_lock(&rq->lock);
}
- sched_ravg_window = window_size * TICK_NSEC;
- set_hmp_defaults();
+ if (window_size) {
+ sched_ravg_window = window_size * TICK_NSEC;
+ set_hmp_defaults();
+ }
wallclock = sched_clock();
read_lock(&tasklist_lock);
do_each_thread(g, p) {
- p->ravg.sum = p->ravg.prev_window = 0;
+ int i;
+
+ p->ravg.sum = 0;
+ p->ravg.demand = 0;
+ p->ravg.partial_demand = 0;
+ p->ravg.prev_window = 0;
+ for (i = 0; i < RAVG_HIST_SIZE; ++i)
+ p->ravg.sum_history[i] = 0;
+ p->ravg.mark_start = wallclock;
} while_each_thread(g, p);
read_unlock(&tasklist_lock);
for_each_online_cpu(cpu) {
struct rq *rq = cpu_rq(cpu);
- rq->window_start = ws;
+ if (window_start)
+ rq->window_start = window_start;
rq->curr_runnable_sum = rq->prev_runnable_sum = 0;
- if (!is_idle_task(rq->curr)) {
- rq->curr->ravg.mark_start = wallclock;
- rq->curr_runnable_sum += rq->curr->ravg.partial_demand;
- }
+ rq->cumulative_runnable_avg = 0;
fixup_nr_big_small_task(cpu);
}
@@ -1574,6 +1561,35 @@ int sched_set_window(u64 window_start, unsigned int window_size)
struct rq *rq = cpu_rq(cpu);
raw_spin_unlock(&rq->lock);
}
+}
+
+int sched_set_window(u64 window_start, unsigned int window_size)
+{
+ u64 ws, now;
+ int delta;
+ unsigned long flags;
+
+ if (sched_use_pelt ||
+ (window_size * TICK_NSEC < MIN_SCHED_RAVG_WINDOW))
+ return -EINVAL;
+
+ local_irq_save(flags);
+
+ now = get_jiffies_64();
+ if (time_after64(window_start, now)) {
+ delta = window_start - now; /* how many jiffies ahead */
+ delta /= window_size; /* # of windows to roll back */
+ delta += 1;
+ window_start -= (delta * window_size);
+ }
+
+ ws = (window_start - sched_init_jiffy); /* jiffy difference */
+ ws *= TICK_NSEC;
+ ws += sched_clock_at_init_jiffy;
+
+ BUG_ON(sched_clock() < ws);
+
+ reset_all_window_stats(ws, window_size);
local_irq_restore(flags);