summaryrefslogtreecommitdiff
path: root/include/trace
diff options
context:
space:
mode:
authorSrivatsa Vaddagiri <vatsa@codeaurora.org>2015-01-16 11:27:31 +0530
committerDavid Keitel <dkeitel@codeaurora.org>2016-03-23 20:01:34 -0700
commit0a33ec2ea97b52f7cccbc5da42a243d6c9038c6f (patch)
tree2e1658af2cb66f497c23d6829629f545cc688140 /include/trace
parent207d78dd263114ccf6e04581f43e4dc99e7b068d (diff)
sched: Consolidate hmp stats into their own struct
Key hmp stats (nr_big_tasks, nr_small_tasks and cumulative_runnable_average) are currently maintained per-cpu in 'struct rq'. Merge those stats in their own structure (struct hmp_sched_stats) and modify impacted functions to deal with the newly introduced structure. This cleanup is required for a subsequent patch which fixes various issues with use of CFS_BANDWIDTH feature in HMP scheduler. Change-Id: Ieffc10a3b82a102f561331bc385d042c15a33998 Signed-off-by: Srivatsa Vaddagiri <vatsa@codeaurora.org> [rameezmustafa@codeaurora.org: Port to msm-3.18] Signed-off-by: Syed Rameez Mustafa <rameezmustafa@codeaurora.org> [joonwoop@codeaurora.org: fixed conflict in __update_load_avg().] Signed-off-by: Joonwoo Park <joonwoop@codeaurora.org>
Diffstat (limited to 'include/trace')
-rw-r--r--include/trace/events/sched.h10
1 files changed, 5 insertions, 5 deletions
diff --git a/include/trace/events/sched.h b/include/trace/events/sched.h
index 6f4a8f0d45e3..1ac6edf6f8e4 100644
--- a/include/trace/events/sched.h
+++ b/include/trace/events/sched.h
@@ -185,11 +185,11 @@ TRACE_EVENT(sched_cpu_load,
__entry->idle = idle;
__entry->mostly_idle = mostly_idle;
__entry->nr_running = rq->nr_running;
- __entry->nr_big_tasks = rq->nr_big_tasks;
- __entry->nr_small_tasks = rq->nr_small_tasks;
+ __entry->nr_big_tasks = rq->hmp_stats.nr_big_tasks;
+ __entry->nr_small_tasks = rq->hmp_stats.nr_small_tasks;
__entry->load_scale_factor = rq->load_scale_factor;
__entry->capacity = rq->capacity;
- __entry->cumulative_runnable_avg = rq->cumulative_runnable_avg;
+ __entry->cumulative_runnable_avg = rq->hmp_stats.cumulative_runnable_avg;
__entry->irqload = irqload;
__entry->cur_freq = rq->cur_freq;
__entry->max_freq = rq->max_freq;
@@ -323,8 +323,8 @@ TRACE_EVENT(sched_update_history,
__entry->demand = p->ravg.demand;
memcpy(__entry->hist, p->ravg.sum_history,
RAVG_HIST_SIZE_MAX * sizeof(u32));
- __entry->nr_big_tasks = rq->nr_big_tasks;
- __entry->nr_small_tasks = rq->nr_small_tasks;
+ __entry->nr_big_tasks = rq->hmp_stats.nr_big_tasks;
+ __entry->nr_small_tasks = rq->hmp_stats.nr_small_tasks;
__entry->cpu = rq->cpu;
),