diff options
| author | Joonwoo Park <joonwoop@codeaurora.org> | 2016-03-08 13:46:04 -0800 |
|---|---|---|
| committer | Kyle Yan <kyan@codeaurora.org> | 2016-04-27 19:13:05 -0700 |
| commit | 35f1d99e0a3ad7f1b15ca2085ca92fd545dd01de (patch) | |
| tree | a52e8cb3208ee558621098b70e3b41645a9c1ef3 /include | |
| parent | 343dcf1ecc085671982de5de6212dbad827bbf1a (diff) | |
sched: add support for CPU frequency estimation with cycle counter
At present scheduler calculates task's demand with the task's execution
time weighted over CPU frequency. The CPU frequency is given by
governor's CPU frequency transition notification. Such notification
may not be available.
Provide an API for CPU clock driver to register callback functions so
in order for scheduler to access CPU's cycle counter to estimate CPU's
frequency without notification. At time point scheduler assumes the
cycle counter increases always even when cluster is idle which might
not be true. This will be fixed by subsequent change for more accurate
I/O wait time accounting.
CRs-fixed: 1006303
Change-Id: I93b187efd7bc225db80da0184683694f5ab99738
Signed-off-by: Joonwoo Park <joonwoop@codeaurora.org>
Diffstat (limited to 'include')
| -rw-r--r-- | include/linux/sched.h | 7 | ||||
| -rw-r--r-- | include/trace/events/sched.h | 44 |
2 files changed, 44 insertions, 7 deletions
diff --git a/include/linux/sched.h b/include/linux/sched.h index 7ece18efd02b..5c2534a3b818 100644 --- a/include/linux/sched.h +++ b/include/linux/sched.h @@ -1470,6 +1470,7 @@ struct task_struct { u64 last_switch_out_ts; struct related_thread_group *grp; struct list_head grp_list; + u64 cpu_cycles; #endif #ifdef CONFIG_CGROUP_SCHED struct task_group *sched_task_group; @@ -3358,4 +3359,10 @@ static inline unsigned long rlimit_max(unsigned int limit) return task_rlimit_max(current, limit); } +struct cpu_cycle_counter_cb { + u64 (*get_cpu_cycle_counter)(int cpu); + u32 (*get_cpu_cycles_max_per_us)(int cpu); +}; +int register_cpu_cycle_counter_cb(struct cpu_cycle_counter_cb *cb); + #endif diff --git a/include/trace/events/sched.h b/include/trace/events/sched.h index 41ab6aee4d7f..f20e3b0b0eda 100644 --- a/include/trace/events/sched.h +++ b/include/trace/events/sched.h @@ -203,7 +203,6 @@ DECLARE_EVENT_CLASS(sched_cpu_load, __field(unsigned int, capacity ) __field( u64, cumulative_runnable_avg ) __field( u64, irqload ) - __field(unsigned int, cur_freq ) __field(unsigned int, max_freq ) __field(unsigned int, power_cost ) __field( int, cstate ) @@ -220,7 +219,6 @@ DECLARE_EVENT_CLASS(sched_cpu_load, __entry->capacity = cpu_capacity(rq->cpu); __entry->cumulative_runnable_avg = rq->hmp_stats.cumulative_runnable_avg; __entry->irqload = irqload; - __entry->cur_freq = cpu_cur_freq(rq->cpu); __entry->max_freq = cpu_max_freq(rq->cpu); __entry->power_cost = power_cost; __entry->cstate = rq->cstate; @@ -228,10 +226,10 @@ DECLARE_EVENT_CLASS(sched_cpu_load, __entry->temp = temp; ), - TP_printk("cpu %u idle %d nr_run %u nr_big %u lsf %u capacity %u cr_avg %llu irqload %llu fcur %u fmax %u power_cost %u cstate %d dstate %d temp %d", + TP_printk("cpu %u idle %d nr_run %u nr_big %u lsf %u capacity %u cr_avg %llu irqload %llu fmax %u power_cost %u cstate %d dstate %d temp %d", __entry->cpu, __entry->idle, __entry->nr_running, __entry->nr_big_tasks, __entry->load_scale_factor, __entry->capacity, - __entry->cumulative_runnable_avg, __entry->irqload, __entry->cur_freq, + __entry->cumulative_runnable_avg, __entry->irqload, __entry->max_freq, __entry->power_cost, __entry->cstate, __entry->dstate, __entry->temp) ); @@ -271,9 +269,9 @@ TRACE_EVENT(sched_set_boost, TRACE_EVENT(sched_update_task_ravg, TP_PROTO(struct task_struct *p, struct rq *rq, enum task_event evt, - u64 wallclock, u64 irqtime), + u64 wallclock, u64 irqtime, u32 cycles, u32 exec_time), - TP_ARGS(p, rq, evt, wallclock, irqtime), + TP_ARGS(p, rq, evt, wallclock, irqtime, cycles, exec_time), TP_STRUCT__entry( __array( char, comm, TASK_COMM_LEN ) @@ -309,7 +307,8 @@ TRACE_EVENT(sched_update_task_ravg, __entry->evt = evt; __entry->cpu = rq->cpu; __entry->cur_pid = rq->curr->pid; - __entry->cur_freq = cpu_cur_freq(rq->cpu); + __entry->cur_freq = cpu_cycles_to_freq(rq->cpu, cycles, + exec_time); memcpy(__entry->comm, p->comm, TASK_COMM_LEN); __entry->pid = p->pid; __entry->mark_start = p->ravg.mark_start; @@ -1203,6 +1202,37 @@ TRACE_EVENT(sched_get_nr_running_avg, TP_printk("avg=%d big_avg=%d iowait_avg=%d", __entry->avg, __entry->big_avg, __entry->iowait_avg) ); + +TRACE_EVENT(sched_get_task_cpu_cycles, + + TP_PROTO(int cpu, int event, u64 cycles, u32 exec_time), + + TP_ARGS(cpu, event, cycles, exec_time), + + TP_STRUCT__entry( + __field(int, cpu ) + __field(int, event ) + __field(u64, cycles ) + __field(u64, exec_time ) + __field(u32, freq ) + __field(u32, legacy_freq ) + ), + + TP_fast_assign( + __entry->cpu = cpu; + __entry->event = event; + __entry->cycles = cycles; + __entry->exec_time = exec_time; + __entry->freq = cpu_cycles_to_freq(cpu, cycles, + exec_time); + __entry->legacy_freq = cpu_cur_freq(cpu); + ), + + TP_printk("cpu=%d event=%d cycles=%llu exec_time=%llu freq=%u legacy_freq=%u", + __entry->cpu, __entry->event, __entry->cycles, + __entry->exec_time, __entry->freq, __entry->legacy_freq) +); + #endif /* _TRACE_SCHED_H */ /* This part must be outside protection */ |
