summaryrefslogtreecommitdiff
path: root/kernel
diff options
context:
space:
mode:
authorSteve Muckle <smuckle@codeaurora.org>2014-04-29 14:16:47 -0700
committerDavid Keitel <dkeitel@codeaurora.org>2016-03-23 19:59:18 -0700
commitcebda4b7a3455755bb20b8e2cee8049fdbeb416d (patch)
tree64b5238f5c059e1794aab7bd85295136dcba1fbd /kernel
parentbb0b8e985911508db6c341b8b0bb2feff4ac603a (diff)
sched: window-stats: adjust RQ curr, prev sums on task migration
Adjust cpu's busy time in its recent and previous window upon task migration. This would enable scheduler to provide better inputs to cpufreq governor on a cpu's busy time in a given window. Change-Id: Idec2ca459382e9f46d882da3af53148412d631c6 Signed-off-by: Steve Muckle <smuckle@codeaurora.org> Signed-off-by: Srivatsa Vaddagiri <vatsa@codeaurora.org>
Diffstat (limited to 'kernel')
-rw-r--r--kernel/sched/core.c18
1 files changed, 17 insertions, 1 deletions
diff --git a/kernel/sched/core.c b/kernel/sched/core.c
index 238dfe87ad80..89247c82da3c 100644
--- a/kernel/sched/core.c
+++ b/kernel/sched/core.c
@@ -1623,8 +1623,24 @@ void set_task_cpu(struct task_struct *p, unsigned int new_cpu)
p->se.nr_migrations++;
perf_event_task_migrate(p);
- if (p->state == TASK_RUNNING)
+#if defined(CONFIG_SCHED_FREQ_INPUT) || defined(CONFIG_SCHED_HMP)
+ if (p->on_rq) {
+ struct rq *src_rq = task_rq(p);
+ struct rq *dest_rq = cpu_rq(new_cpu);
+
+ p->on_rq = 0; /* Fixme */
update_task_ravg(p, task_rq(p), 0);
+ p->on_rq = 1; /* Fixme */
+ update_task_ravg(dest_rq->curr, dest_rq, 1);
+
+
+ src_rq->curr_runnable_sum -= p->ravg.sum;
+ src_rq->prev_runnable_sum -= p->ravg.prev_window;
+ dest_rq->curr_runnable_sum += p->ravg.sum;
+ dest_rq->prev_runnable_sum += p->ravg.prev_window;
+ }
+#endif
+
}
__set_task_cpu(p, new_cpu);