summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
-rw-r--r--include/trace/events/sched.h50
-rw-r--r--kernel/sched/fair.c14
-rw-r--r--kernel/sched/rt.c4
-rw-r--r--kernel/sched/sched.h3
4 files changed, 39 insertions, 32 deletions
diff --git a/include/trace/events/sched.h b/include/trace/events/sched.h
index 1a215ba026b5..f279fcf33297 100644
--- a/include/trace/events/sched.h
+++ b/include/trace/events/sched.h
@@ -58,7 +58,7 @@ TRACE_EVENT(sched_kthread_stop_ret,
*/
TRACE_EVENT(sched_enq_deq_task,
- TP_PROTO(struct task_struct *p, int enqueue, unsigned int cpus_allowed),
+ TP_PROTO(struct task_struct *p, bool enqueue, unsigned int cpus_allowed),
TP_ARGS(p, enqueue, cpus_allowed),
@@ -67,14 +67,12 @@ TRACE_EVENT(sched_enq_deq_task,
__field( pid_t, pid )
__field( int, prio )
__field( int, cpu )
- __field( int, enqueue )
+ __field( bool, enqueue )
__field(unsigned int, nr_running )
__field(unsigned long, cpu_load )
__field(unsigned int, rt_nr_running )
__field(unsigned int, cpus_allowed )
#ifdef CONFIG_SCHED_HMP
- __field(unsigned int, sum_scaled )
- __field(unsigned int, period )
__field(unsigned int, demand )
#endif
),
@@ -90,15 +88,13 @@ TRACE_EVENT(sched_enq_deq_task,
__entry->rt_nr_running = task_rq(p)->rt.rt_nr_running;
__entry->cpus_allowed = cpus_allowed;
#ifdef CONFIG_SCHED_HMP
- __entry->sum_scaled = p->se.avg.runnable_avg_sum_scaled;
- __entry->period = p->se.avg.runnable_avg_period;
__entry->demand = p->ravg.demand;
#endif
),
TP_printk("cpu=%d %s comm=%s pid=%d prio=%d nr_running=%u cpu_load=%lu rt_nr_running=%u affine=%x"
#ifdef CONFIG_SCHED_HMP
- " sum_scaled=%u period=%u demand=%u"
+ " demand=%u"
#endif
, __entry->cpu,
__entry->enqueue ? "enqueue" : "dequeue",
@@ -106,7 +102,7 @@ TRACE_EVENT(sched_enq_deq_task,
__entry->prio, __entry->nr_running,
__entry->cpu_load, __entry->rt_nr_running, __entry->cpus_allowed
#ifdef CONFIG_SCHED_HMP
- , __entry->sum_scaled, __entry->period, __entry->demand
+ , __entry->demand
#endif
)
);
@@ -115,22 +111,19 @@ TRACE_EVENT(sched_enq_deq_task,
TRACE_EVENT(sched_task_load,
- TP_PROTO(struct task_struct *p, int boost, int reason,
- int sync, int need_idle, int best_cpu),
+ TP_PROTO(struct task_struct *p, bool boost, int reason,
+ bool sync, bool need_idle, int best_cpu),
TP_ARGS(p, boost, reason, sync, need_idle, best_cpu),
TP_STRUCT__entry(
__array( char, comm, TASK_COMM_LEN )
__field( pid_t, pid )
- __field(unsigned int, sum )
- __field(unsigned int, sum_scaled )
- __field(unsigned int, period )
__field(unsigned int, demand )
- __field( int, boost )
+ __field( bool, boost )
__field( int, reason )
- __field( int, sync )
- __field( int, need_idle )
+ __field( bool, sync )
+ __field( bool, need_idle )
__field( int, best_cpu )
__field( u64, latency )
),
@@ -138,9 +131,6 @@ TRACE_EVENT(sched_task_load,
TP_fast_assign(
memcpy(__entry->comm, p->comm, TASK_COMM_LEN);
__entry->pid = p->pid;
- __entry->sum = p->se.avg.runnable_avg_sum;
- __entry->sum_scaled = p->se.avg.runnable_avg_sum_scaled;
- __entry->period = p->se.avg.runnable_avg_period;
__entry->demand = p->ravg.demand;
__entry->boost = boost;
__entry->reason = reason;
@@ -151,14 +141,13 @@ TRACE_EVENT(sched_task_load,
sched_clock() - p->ravg.mark_start : 0;
),
- TP_printk("%d (%s): sum=%u, sum_scaled=%u, period=%u demand=%u boost=%d reason=%d sync=%d need_idle=%d best_cpu=%d latency=%llu",
- __entry->pid, __entry->comm, __entry->sum,
- __entry->sum_scaled, __entry->period, __entry->demand,
+ TP_printk("%d (%s): demand=%u boost=%d reason=%d sync=%d need_idle=%d best_cpu=%d latency=%llu",
+ __entry->pid, __entry->comm, __entry->demand,
__entry->boost, __entry->reason, __entry->sync,
__entry->need_idle, __entry->best_cpu, __entry->latency)
);
-TRACE_EVENT(sched_cpu_load,
+DECLARE_EVENT_CLASS(sched_cpu_load,
TP_PROTO(struct rq *rq, int idle, u64 irqload, unsigned int power_cost, int temp),
@@ -206,6 +195,21 @@ TRACE_EVENT(sched_cpu_load,
__entry->dstate, __entry->temp)
);
+DEFINE_EVENT(sched_cpu_load, sched_cpu_load_wakeup,
+ TP_PROTO(struct rq *rq, int idle, u64 irqload, unsigned int power_cost, int temp),
+ TP_ARGS(rq, idle, irqload, power_cost, temp)
+);
+
+DEFINE_EVENT(sched_cpu_load, sched_cpu_load_lb,
+ TP_PROTO(struct rq *rq, int idle, u64 irqload, unsigned int power_cost, int temp),
+ TP_ARGS(rq, idle, irqload, power_cost, temp)
+);
+
+DEFINE_EVENT(sched_cpu_load, sched_cpu_load_cgroup,
+ TP_PROTO(struct rq *rq, int idle, u64 irqload, unsigned int power_cost, int temp),
+ TP_ARGS(rq, idle, irqload, power_cost, temp)
+);
+
TRACE_EVENT(sched_set_boost,
TP_PROTO(int ref_count),
diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c
index f6bba6dfdf9e..3813be29b6e8 100644
--- a/kernel/sched/fair.c
+++ b/kernel/sched/fair.c
@@ -3195,10 +3195,10 @@ static int select_best_cpu(struct task_struct *p, int target, int reason,
for_each_cpu(i, &search_cpus) {
struct rq *rq = cpu_rq(i);
- trace_sched_cpu_load(cpu_rq(i), idle_cpu(i), sched_irqload(i),
- power_cost(i, task_load(p) +
- cpu_cravg_sync(i, sync)),
- cpu_temp(i));
+ trace_sched_cpu_load_wakeup(cpu_rq(i), idle_cpu(i),
+ sched_irqload(i),
+ power_cost(i, task_load(p) + cpu_cravg_sync(i, sync)),
+ cpu_temp(i));
if (skip_freq_domain(trq, rq, reason)) {
cpumask_andnot(&search_cpus, &search_cpus,
@@ -5233,7 +5233,7 @@ static void throttle_cfs_rq(struct cfs_rq *cfs_rq)
raw_spin_unlock(&cfs_b->lock);
/* Log effect on hmp stats after throttling */
- trace_sched_cpu_load(rq, idle_cpu(cpu_of(rq)),
+ trace_sched_cpu_load_cgroup(rq, idle_cpu(cpu_of(rq)),
sched_irqload(cpu_of(rq)),
power_cost(cpu_of(rq), 0),
cpu_temp(cpu_of(rq)));
@@ -5290,7 +5290,7 @@ void unthrottle_cfs_rq(struct cfs_rq *cfs_rq)
resched_curr(rq);
/* Log effect on hmp stats after un-throttling */
- trace_sched_cpu_load(rq, idle_cpu(cpu_of(rq)),
+ trace_sched_cpu_load_cgroup(rq, idle_cpu(cpu_of(rq)),
sched_irqload(cpu_of(rq)),
power_cost(cpu_of(rq), 0),
cpu_temp(cpu_of(rq)));
@@ -8068,7 +8068,7 @@ static inline void update_sg_lb_stats(struct lb_env *env,
for_each_cpu_and(i, sched_group_cpus(group), env->cpus) {
struct rq *rq = cpu_rq(i);
- trace_sched_cpu_load(cpu_rq(i), idle_cpu(i),
+ trace_sched_cpu_load_lb(cpu_rq(i), idle_cpu(i),
sched_irqload(i),
power_cost(i, 0),
cpu_temp(i));
diff --git a/kernel/sched/rt.c b/kernel/sched/rt.c
index f0f3cf71c538..1fd22539a334 100644
--- a/kernel/sched/rt.c
+++ b/kernel/sched/rt.c
@@ -1695,8 +1695,8 @@ static int find_lowest_rq_hmp(struct task_struct *task)
cpu_load = scale_load_to_cpu(
cpu_rq(i)->hmp_stats.cumulative_runnable_avg, i);
cpu_cost = power_cost(i, cpu_cravg_sync(i, 0));
- trace_sched_cpu_load(cpu_rq(i), idle_cpu(i), sched_irqload(i),
- cpu_cost, cpu_temp(i));
+ trace_sched_cpu_load_wakeup(cpu_rq(i), idle_cpu(i),
+ sched_irqload(i), cpu_cost, cpu_temp(i));
if (power_delta_exceeded(cpu_cost, min_cost)) {
if (cpu_cost > min_cost)
diff --git a/kernel/sched/sched.h b/kernel/sched/sched.h
index 2045dec5e80c..85531d648cfb 100644
--- a/kernel/sched/sched.h
+++ b/kernel/sched/sched.h
@@ -1292,6 +1292,9 @@ static inline void clear_reserved(int cpu) { }
#define power_cost(...) 0
#define trace_sched_cpu_load(...)
+#define trace_sched_cpu_load_lb(...)
+#define trace_sched_cpu_load_cgroup(...)
+#define trace_sched_cpu_load_wakeup(...)
#endif /* CONFIG_SCHED_HMP */