summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
-rw-r--r--include/trace/events/sched.h20
-rw-r--r--kernel/sched/fair.c17
2 files changed, 33 insertions, 4 deletions
diff --git a/include/trace/events/sched.h b/include/trace/events/sched.h
index 33b1c719c9d7..8006e3a46723 100644
--- a/include/trace/events/sched.h
+++ b/include/trace/events/sched.h
@@ -888,6 +888,26 @@ TRACE_EVENT(sched_tune_filter,
__entry->payoff, __entry->region)
);
+/*
+ * Tracepoint for system overutilized flag
+ */
+TRACE_EVENT(sched_overutilized,
+
+ TP_PROTO(bool overutilized),
+
+ TP_ARGS(overutilized),
+
+ TP_STRUCT__entry(
+ __field( bool, overutilized )
+ ),
+
+ TP_fast_assign(
+ __entry->overutilized = overutilized;
+ ),
+
+ TP_printk("overutilized=%d",
+ __entry->overutilized ? 1 : 0)
+);
#ifdef CONFIG_SCHED_WALT
struct rq;
diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c
index e099ce747345..b84277a8530d 100644
--- a/kernel/sched/fair.c
+++ b/kernel/sched/fair.c
@@ -4242,8 +4242,10 @@ enqueue_task_fair(struct rq *rq, struct task_struct *p, int flags)
if (!se) {
walt_inc_cumulative_runnable_avg(rq, p);
if (!task_new && !rq->rd->overutilized &&
- cpu_overutilized(rq->cpu))
+ cpu_overutilized(rq->cpu)) {
rq->rd->overutilized = true;
+ trace_sched_overutilized(true);
+ }
/*
* We want to potentially trigger a freq switch
@@ -7503,12 +7505,17 @@ next_group:
env->dst_rq->rd->overload = overload;
/* Update over-utilization (tipping point, U >= 0) indicator */
- if (env->dst_rq->rd->overutilized != overutilized)
+ if (env->dst_rq->rd->overutilized != overutilized) {
env->dst_rq->rd->overutilized = overutilized;
+ trace_sched_overutilized(overutilized);
+ }
} else {
- if (!env->dst_rq->rd->overutilized && overutilized)
+ if (!env->dst_rq->rd->overutilized && overutilized) {
env->dst_rq->rd->overutilized = true;
+ trace_sched_overutilized(true);
+ }
}
+
}
/**
@@ -8948,8 +8955,10 @@ static void task_tick_fair(struct rq *rq, struct task_struct *curr, int queued)
task_tick_numa(rq, curr);
#ifdef CONFIG_SMP
- if (!rq->rd->overutilized && cpu_overutilized(task_cpu(curr)))
+ if (!rq->rd->overutilized && cpu_overutilized(task_cpu(curr))) {
rq->rd->overutilized = true;
+ trace_sched_overutilized(true);
+ }
rq->misfit_task = !task_fits_max(curr, rq->cpu);
#endif