summaryrefslogtreecommitdiff
path: root/kernel/sched/debug.c
diff options
context:
space:
mode:
authorSrivatsa Vaddagiri <vatsa@codeaurora.org>2014-09-01 13:26:53 +0530
committerDavid Keitel <dkeitel@codeaurora.org>2016-03-23 20:00:50 -0700
commit3a67b4ce87bf96312d4c5728047d830a66258854 (patch)
treebd392503fe5c0f2da5231f1865cc86fd8b7b0672 /kernel/sched/debug.c
parent977dc392f77522559697cbedfb2d48ad7be96aec (diff)
sched: window-stats: Enhance cpu busy time accounting
rq->curr/prev_runnable_sum counters represent cpu demand from various tasks that have run on a cpu. Any task that runs on a cpu will have a representation in rq->curr_runnable_sum. Their partial_demand value will be included in rq->curr_runnable_sum. Since partial_demand is derived from historical load samples for a task, rq->curr_runnable_sum could represent "inflated/un-realistic" cpu usage. As an example, lets say that task with partial_demand of 10ms runs for only 1ms on a cpu. What is included in rq->curr_runnable_sum is 10ms (and not the actual execution time of 1ms). This leads to cpu busy time being reported on the upside causing frequency to stay higher than necessary. This patch fixes cpu busy accounting scheme to strictly represent actual usage. It also provides for conditional fixup of busy time upon migration and upon heavy-task wakeup. CRs-Fixed: 691443 Change-Id: Ic4092627668053934049af4dfef65d9b6b901e6b Signed-off-by: Srivatsa Vaddagiri <vatsa@codeaurora.org> Signed-off-by: Syed Rameez Mustafa <rameezmustafa@codeaurora.org> [joonwoop@codeaurora.org: fixed conflict in init_task_load(), se.avg.decay_count has deprecated.] Signed-off-by: Joonwoo Park <joonwoop@codeaurora.org>
Diffstat (limited to 'kernel/sched/debug.c')
-rw-r--r--kernel/sched/debug.c4
1 files changed, 2 insertions, 2 deletions
diff --git a/kernel/sched/debug.c b/kernel/sched/debug.c
index 5290ec2a14bf..f9bb03279152 100644
--- a/kernel/sched/debug.c
+++ b/kernel/sched/debug.c
@@ -309,7 +309,7 @@ do { \
#ifdef CONFIG_SMP
P(cpu_capacity);
#endif
-#if defined(CONFIG_SCHED_HMP) || defined(CONFIG_SCHED_FREQ_INPUT)
+#ifdef CONFIG_SCHED_HMP
P(load_scale_factor);
P(capacity);
P(max_possible_capacity);
@@ -632,7 +632,7 @@ void proc_sched_show_task(struct task_struct *p, struct seq_file *m)
#if defined(CONFIG_SMP) && defined(CONFIG_FAIR_GROUP_SCHED)
__P(load_avg);
-#if defined(CONFIG_SCHED_FREQ_INPUT) || defined(CONFIG_SCHED_HMP)
+#ifdef CONFIG_SCHED_HMP
P(ravg.demand);
P(se.avg.runnable_avg_sum_scaled);
#endif