summaryrefslogtreecommitdiff
path: root/kernel/sched/debug.c
diff options
context:
space:
mode:
authorSrivatsa Vaddagiri <vatsa@codeaurora.org>2014-03-31 18:21:26 -0700
committerDavid Keitel <dkeitel@codeaurora.org>2016-03-23 19:59:10 -0700
commitbf863e333ff3704543c1300f837058b33c7bcb46 (patch)
tree9124831c17a160973f199bb0ffec942d2af211e5 /kernel/sched/debug.c
parent1bea4eae335b554359c67b2321093bf52abd9538 (diff)
sched: Provide scaled load information for tasks in /proc
Extend "sched" file in /proc for every task to provide information on scaled load statistics and percentage-scaled based load (load_avg) for a task. This will be valuable debug aid. Change-Id: I6ee0394b409c77c7f79f5b9ac560da03dc879758 Signed-off-by: Srivatsa Vaddagiri <vatsa@codeaurora.org>
Diffstat (limited to 'kernel/sched/debug.c')
-rw-r--r--kernel/sched/debug.c11
1 files changed, 11 insertions, 0 deletions
diff --git a/kernel/sched/debug.c b/kernel/sched/debug.c
index a89496c171c6..02319b46ad6c 100644
--- a/kernel/sched/debug.c
+++ b/kernel/sched/debug.c
@@ -571,6 +571,9 @@ static void sched_show_numa(struct task_struct *p, struct seq_file *m)
void proc_sched_show_task(struct task_struct *p, struct seq_file *m)
{
unsigned long nr_switches;
+ unsigned int load_avg;
+
+ load_avg = pct_task_load(p);
SEQ_printf(m, "%s (%d, #threads: %d)\n", p->comm, task_pid_nr(p),
get_nr_threads(p));
@@ -622,6 +625,14 @@ void proc_sched_show_task(struct task_struct *p, struct seq_file *m)
P(se.statistics.nr_wakeups_passive);
P(se.statistics.nr_wakeups_idle);
+#if defined(CONFIG_SMP) && defined(CONFIG_FAIR_GROUP_SCHED)
+ __P(load_avg);
+#if defined(CONFIG_SCHED_FREQ_INPUT) || defined(CONFIG_SCHED_HMP)
+ P(ravg.demand);
+ P(se.avg.runnable_avg_sum_scaled);
+#endif
+#endif
+
{
u64 avg_atom, avg_per_cpu;