summaryrefslogtreecommitdiff
path: root/kernel/sched/debug.c
diff options
context:
space:
mode:
Diffstat (limited to 'kernel/sched/debug.c')
-rw-r--r--kernel/sched/debug.c46
1 files changed, 46 insertions, 0 deletions
diff --git a/kernel/sched/debug.c b/kernel/sched/debug.c
index 641511771ae6..c8c4272c61d8 100644
--- a/kernel/sched/debug.c
+++ b/kernel/sched/debug.c
@@ -227,6 +227,14 @@ void print_cfs_rq(struct seq_file *m, int cpu, struct cfs_rq *cfs_rq)
cfs_rq->throttled);
SEQ_printf(m, " .%-30s: %d\n", "throttle_count",
cfs_rq->throttle_count);
+ SEQ_printf(m, " .%-30s: %d\n", "runtime_enabled",
+ cfs_rq->runtime_enabled);
+#ifdef CONFIG_SCHED_HMP
+ SEQ_printf(m, " .%-30s: %d\n", "nr_big_tasks",
+ cfs_rq->hmp_stats.nr_big_tasks);
+ SEQ_printf(m, " .%-30s: %llu\n", "cumulative_runnable_avg",
+ cfs_rq->hmp_stats.cumulative_runnable_avg);
+#endif
#endif
#ifdef CONFIG_FAIR_GROUP_SCHED
@@ -306,6 +314,23 @@ do { \
P(cpu_load[2]);
P(cpu_load[3]);
P(cpu_load[4]);
+#ifdef CONFIG_SMP
+ P(cpu_capacity);
+#endif
+#ifdef CONFIG_SCHED_HMP
+ P(static_cpu_pwr_cost);
+ P(cluster->static_cluster_pwr_cost);
+ P(cluster->load_scale_factor);
+ P(cluster->capacity);
+ P(cluster->max_possible_capacity);
+ P(cluster->efficiency);
+ P(cluster->cur_freq);
+ P(cluster->max_freq);
+ P(cluster->exec_scale_factor);
+ P(hmp_stats.nr_big_tasks);
+ SEQ_printf(m, " .%-30s: %llu\n", "hmp_stats.cumulative_runnable_avg",
+ rq->hmp_stats.cumulative_runnable_avg);
+#endif
#undef P
#undef PN
@@ -386,6 +411,15 @@ static void sched_debug_header(struct seq_file *m)
PN(sysctl_sched_wakeup_granularity);
P(sysctl_sched_child_runs_first);
P(sysctl_sched_features);
+#ifdef CONFIG_SCHED_HMP
+ P(sched_upmigrate);
+ P(sched_downmigrate);
+ P(sched_init_task_load_windows);
+ P(min_capacity);
+ P(max_capacity);
+ P(sched_ravg_window);
+ P(sched_load_granule);
+#endif
#undef PN
#undef P
@@ -408,6 +442,7 @@ static int sched_debug_show(struct seq_file *m, void *v)
return 0;
}
+#ifdef CONFIG_SYSRQ_SCHED_DEBUG
void sysrq_sched_debug_show(void)
{
int cpu;
@@ -417,6 +452,7 @@ void sysrq_sched_debug_show(void)
print_cpu(NULL, cpu);
}
+#endif
/*
* This itererator needs some explanation.
@@ -547,6 +583,9 @@ static void sched_show_numa(struct task_struct *p, struct seq_file *m)
void proc_sched_show_task(struct task_struct *p, struct seq_file *m)
{
unsigned long nr_switches;
+ unsigned int load_avg;
+
+ load_avg = pct_task_load(p);
SEQ_printf(m, "%s (%d, #threads: %d)\n", p->comm, task_pid_nr(p),
get_nr_threads(p));
@@ -598,6 +637,13 @@ void proc_sched_show_task(struct task_struct *p, struct seq_file *m)
P(se.statistics.nr_wakeups_passive);
P(se.statistics.nr_wakeups_idle);
+#if defined(CONFIG_SMP) && defined(CONFIG_FAIR_GROUP_SCHED)
+ __P(load_avg);
+#ifdef CONFIG_SCHED_HMP
+ P(ravg.demand);
+#endif
+#endif
+
{
u64 avg_atom, avg_per_cpu;