summaryrefslogtreecommitdiff
path: root/kernel/sched
diff options
context:
space:
mode:
authorJoonwoo Park <joonwoop@codeaurora.org>2015-03-02 11:25:17 -0800
committerDavid Keitel <dkeitel@codeaurora.org>2016-03-23 20:01:50 -0700
commitb40bf941f61756bcca03a818b4c8fa857612f8cd (patch)
tree27f8532b5cebdc76f82afe08ecc60441ff751319 /kernel/sched
parent8f90803a45d3aa349a4d0f1051b194767ece5e26 (diff)
sched: add scheduling latency tracking procfs node
Add a new procfs node /proc/sys/kernel/sched_max_latency_us to track the worst scheduling latency. It provides easier way to identify maximum scheduling latency seen across the CPUs. Change-Id: I6e435bbf825c0a4dff2eded4a1256fb93f108d0e [joonwoop@codeaurora.org: fixed conflict in update_stats_wait_end().] Signed-off-by: Joonwoo Park <joonwoop@codeaurora.org>
Diffstat (limited to 'kernel/sched')
-rw-r--r--kernel/sched/fair.c63
1 files changed, 63 insertions, 0 deletions
diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c
index 1da6091b54f3..ec3ee54800a8 100644
--- a/kernel/sched/fair.c
+++ b/kernel/sched/fair.c
@@ -126,6 +126,14 @@ unsigned int sysctl_sched_cfs_bandwidth_slice = 5000UL;
#ifdef CONFIG_SCHEDSTATS
unsigned int sysctl_sched_latency_panic_threshold;
unsigned int sysctl_sched_latency_warn_threshold;
+
+struct sched_max_latency {
+ unsigned int latency_us;
+ char comm[TASK_COMM_LEN];
+ pid_t pid;
+};
+
+static DEFINE_PER_CPU(struct sched_max_latency, sched_max_latency);
#endif /* CONFIG_SCHEDSTATS */
static inline void update_load_add(struct load_weight *lw, unsigned long inc)
@@ -756,6 +764,54 @@ static void update_curr_fair(struct rq *rq)
}
#ifdef CONFIG_SCHEDSTATS
+int sched_max_latency_sysctl(struct ctl_table *table, int write,
+ void __user *buffer, size_t *lenp, loff_t *ppos)
+{
+ int ret = 0;
+ int i, cpu = nr_cpu_ids;
+ char msg[256];
+ unsigned long flags;
+ struct rq *rq;
+ struct sched_max_latency max, *lat;
+
+ if (!write) {
+ max.latency_us = 0;
+ for_each_possible_cpu(i) {
+ rq = cpu_rq(i);
+ raw_spin_lock_irqsave(&rq->lock, flags);
+
+ lat = &per_cpu(sched_max_latency, i);
+ if (max.latency_us < lat->latency_us) {
+ max = *lat;
+ cpu = i;
+ }
+
+ raw_spin_unlock_irqrestore(&rq->lock, flags);
+ }
+
+ if (cpu != nr_cpu_ids) {
+ table->maxlen =
+ snprintf(msg, sizeof(msg),
+ "cpu%d comm=%s pid=%u latency=%u(us)",
+ cpu, max.comm, max.pid, max.latency_us);
+ table->data = msg;
+ ret = proc_dostring(table, write, buffer, lenp, ppos);
+ }
+ } else {
+ for_each_possible_cpu(i) {
+ rq = cpu_rq(i);
+ raw_spin_lock_irqsave(&rq->lock, flags);
+
+ memset(&per_cpu(sched_max_latency, i), 0,
+ sizeof(struct sched_max_latency));
+
+ raw_spin_unlock_irqrestore(&rq->lock, flags);
+ }
+ }
+
+ return ret;
+}
+
static inline void check_for_high_latency(struct task_struct *p, u64 latency_us)
{
int do_warn, do_panic;
@@ -792,6 +848,7 @@ update_stats_wait_end(struct cfs_rq *cfs_rq, struct sched_entity *se)
{
struct task_struct *p;
u64 delta = rq_clock(rq_of(cfs_rq)) - se->statistics.wait_start;
+ struct sched_max_latency *max;
if (entity_is_task(se)) {
p = task_of(se);
@@ -805,6 +862,12 @@ update_stats_wait_end(struct cfs_rq *cfs_rq, struct sched_entity *se)
return;
}
trace_sched_stat_wait(p, delta);
+ max = this_cpu_ptr(&sched_max_latency);
+ if (max->latency_us < delta >> 10) {
+ max->latency_us = delta;
+ max->pid = task_of(se)->pid;
+ memcpy(max->comm, task_of(se)->comm, TASK_COMM_LEN);
+ }
check_for_high_latency(p, delta >> 10);
}