diff options
| author | Olav Haugan <ohaugan@codeaurora.org> | 2017-02-01 17:59:51 -0800 |
|---|---|---|
| committer | Olav Haugan <ohaugan@codeaurora.org> | 2017-02-02 10:23:08 -0800 |
| commit | 475820b5bcfa8e8cc266db079df6cb2ee4d4d600 (patch) | |
| tree | 146e6c7bd84cb8aaf455e34873bc7279323ca5eb /kernel/sched/sched.h | |
| parent | af883d4db0b398542bb561808a11019f0998d129 (diff) | |
sched: Remove sched_enable_hmp flag
Clean up the code and make it more maintainable by removing dependency
on the sched_enable_hmp flag. We do not support HMP scheduler without
recompiling. Enabling the HMP scheduler is done through enabling the
CONFIG_SCHED_HMP config.
Change-Id: I246c1b1889f8dcbc8f0a0805077c0ce5d4f083b0
Signed-off-by: Olav Haugan <ohaugan@codeaurora.org>
Diffstat (limited to 'kernel/sched/sched.h')
| -rw-r--r-- | kernel/sched/sched.h | 9 |
1 files changed, 3 insertions, 6 deletions
diff --git a/kernel/sched/sched.h b/kernel/sched/sched.h index d907eeb297a3..5defec3fb3a0 100644 --- a/kernel/sched/sched.h +++ b/kernel/sched/sched.h @@ -1083,7 +1083,6 @@ enum sched_boost_policy { extern struct mutex policy_mutex; extern unsigned int sched_ravg_window; extern unsigned int sched_disable_window_stats; -extern unsigned int sched_enable_hmp; extern unsigned int max_possible_freq; extern unsigned int min_max_freq; extern unsigned int pct_task_load(struct task_struct *p); @@ -1127,7 +1126,6 @@ extern void update_cluster_topology(void); extern void note_task_waking(struct task_struct *p, u64 wallclock); extern void set_task_last_switch_out(struct task_struct *p, u64 wallclock); extern void init_clusters(void); -extern int __init set_sched_enable_hmp(char *str); extern void reset_cpu_hmp_stats(int cpu, int reset_cra); extern unsigned int max_task_load(void); extern void sched_account_irqtime(int cpu, struct task_struct *curr, @@ -1257,7 +1255,7 @@ inc_cumulative_runnable_avg(struct hmp_sched_stats *stats, { u32 task_load; - if (!sched_enable_hmp || sched_disable_window_stats) + if (sched_disable_window_stats) return; task_load = sched_disable_window_stats ? 0 : p->ravg.demand; @@ -1272,7 +1270,7 @@ dec_cumulative_runnable_avg(struct hmp_sched_stats *stats, { u32 task_load; - if (!sched_enable_hmp || sched_disable_window_stats) + if (sched_disable_window_stats) return; task_load = sched_disable_window_stats ? 0 : p->ravg.demand; @@ -1290,7 +1288,7 @@ fixup_cumulative_runnable_avg(struct hmp_sched_stats *stats, struct task_struct *p, s64 task_load_delta, s64 pred_demand_delta) { - if (!sched_enable_hmp || sched_disable_window_stats) + if (sched_disable_window_stats) return; stats->cumulative_runnable_avg += task_load_delta; @@ -1667,7 +1665,6 @@ static inline int update_preferred_cluster(struct related_thread_group *grp, static inline void add_new_task_to_grp(struct task_struct *new) {} -#define sched_enable_hmp 0 #define PRED_DEMAND_DELTA (0) static inline void |
