diff options
| author | Srivatsa Vaddagiri <vatsa@codeaurora.org> | 2014-03-29 16:56:45 -0700 |
|---|---|---|
| committer | David Keitel <dkeitel@codeaurora.org> | 2016-03-23 19:58:59 -0700 |
| commit | 77fe8dd14da1d5f1cc32382a761206e8dd4ce6da (patch) | |
| tree | 38b89b1478018fd3056b6fd75b92c89c65846adc /kernel/sched/sched.h | |
| parent | a25a5c1c307cde7a851c1d0dd99b90f0a9e1ea28 (diff) | |
sched: Introduce CONFIG_SCHED_FREQ_INPUT
Introduce a compile time flag to enable scheduler guidance of
frequency selection. This flag is also used to turn on or off
window-based load stats feature.
Having a compile time flag will let some platforms avoid any
overhead that may be present with this scheduler feature.
Change-Id: Id8dec9839f90dcac82f58ef7e2bd0ccd0b6bd16c
Signed-off-by: Srivatsa Vaddagiri <vatsa@codeaurora.org>
[rameezmustafa@codeaurora.org]: Port to msm-3.18]
Signed-off-by: Syed Rameez Mustafa <rameezmustafa@codeaurora.org>
[joonwoop@codeaurora.org: fixed minor conflict around
sysctl_timer_migration.]
Signed-off-by: Joonwoo Park <joonwoop@codeaurora.org>
Diffstat (limited to 'kernel/sched/sched.h')
| -rw-r--r-- | kernel/sched/sched.h | 29 |
1 files changed, 29 insertions, 0 deletions
diff --git a/kernel/sched/sched.h b/kernel/sched/sched.h index dab9568c36c7..bb7f283b6dac 100644 --- a/kernel/sched/sched.h +++ b/kernel/sched/sched.h @@ -643,12 +643,14 @@ struct rq { u64 max_idle_balance_cost; #endif +#ifdef CONFIG_SCHED_FREQ_INPUT /* * max_freq = user or thermal defined maximum * max_possible_freq = maximum supported by hardware */ unsigned int cur_freq, max_freq, min_freq, max_possible_freq; u64 cumulative_runnable_avg; +#endif #ifdef CONFIG_IRQ_TIME_ACCOUNTING u64 prev_irq_time; @@ -915,6 +917,8 @@ static inline void sched_ttwu_pending(void) { } #include "stats.h" #include "auto_group.h" +#ifdef CONFIG_SCHED_FREQ_INPUT + extern unsigned int sched_ravg_window; extern unsigned int max_possible_freq; extern unsigned int min_max_freq; @@ -934,6 +938,24 @@ dec_cumulative_runnable_avg(struct rq *rq, struct task_struct *p) BUG_ON((s64)rq->cumulative_runnable_avg < 0); } +#else /* CONFIG_SCHED_FREQ_INPUT */ + +static inline int pct_task_load(struct task_struct *p) { return 0; } + +static inline void +inc_cumulative_runnable_avg(struct rq *rq, struct task_struct *p) +{ +} + +static inline void +dec_cumulative_runnable_avg(struct rq *rq, struct task_struct *p) +{ +} + +static inline void init_new_task_load(struct task_struct *p) { } + +#endif /* CONFIG_SCHED_FREQ_INPUT */ + #ifdef CONFIG_CGROUP_SCHED /* @@ -1267,8 +1289,15 @@ struct sched_class { #endif }; +#ifdef CONFIG_SCHED_FREQ_INPUT extern void update_task_ravg(struct task_struct *p, struct rq *rq, int update_sum); +#else /* CONFIG_SCHED_FREQ_INPUT */ +static inline void +update_task_ravg(struct task_struct *p, struct rq *rq, int update_sum) +{ +} +#endif /* CONFIG_SCHED_FREQ_INPUT */ static inline void put_prev_task(struct rq *rq, struct task_struct *prev) { |
