diff options
| author | Jeevan Shriram <jshriram@codeaurora.org> | 2016-03-15 12:16:55 -0700 |
|---|---|---|
| committer | David Keitel <dkeitel@codeaurora.org> | 2016-03-23 21:24:22 -0700 |
| commit | 48d195bfd6602746bb70c622cda396f0179b92bc (patch) | |
| tree | 6b5ee460faef361b13e896e68c00757e831e6100 /kernel | |
| parent | 643a1372494fcc2a5160e83360e178ca0c41f7bf (diff) | |
sched: remove init_new_task_load from CONFIG_SMP
Move init_new_task_load function from CONFIG_SMP to avoid
linking error for ARCH=um
Signed-off-by: Jeevan Shriram <jshriram@codeaurora.org>
Diffstat (limited to 'kernel')
| -rw-r--r-- | kernel/sched/fair.c | 65 |
1 files changed, 33 insertions, 32 deletions
diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c index fbc5e647c59e..2e69f2fb4447 100644 --- a/kernel/sched/fair.c +++ b/kernel/sched/fair.c @@ -3976,38 +3976,6 @@ dec_hmp_sched_stats_fair(struct rq *rq, struct task_struct *p) { } #endif /* CONFIG_SCHED_HMP */ -#ifdef CONFIG_SCHED_HMP - -void init_new_task_load(struct task_struct *p) -{ - int i; - u32 init_load_windows = sched_init_task_load_windows; - u32 init_load_pelt = sched_init_task_load_pelt; - u32 init_load_pct = current->init_load_pct; - - p->init_load_pct = 0; - memset(&p->ravg, 0, sizeof(struct ravg)); - - if (init_load_pct) { - init_load_pelt = div64_u64((u64)init_load_pct * - (u64)LOAD_AVG_MAX, 100); - init_load_windows = div64_u64((u64)init_load_pct * - (u64)sched_ravg_window, 100); - } - - p->ravg.demand = init_load_windows; - for (i = 0; i < RAVG_HIST_SIZE_MAX; ++i) - p->ravg.sum_history[i] = init_load_windows; - p->se.avg.runnable_avg_sum_scaled = init_load_pelt; -} - -#else /* CONFIG_SCHED_HMP */ - -void init_new_task_load(struct task_struct *p) -{ -} - -#endif /* CONFIG_SCHED_HMP */ #if (SCHED_LOAD_SHIFT - SCHED_LOAD_RESOLUTION) != 10 || SCHED_CAPACITY_SHIFT != 10 @@ -4395,6 +4363,39 @@ dec_rq_hmp_stats(struct rq *rq, struct task_struct *p, int change_cra) { } #ifdef CONFIG_SCHED_HMP +void init_new_task_load(struct task_struct *p) +{ + int i; + u32 init_load_windows = sched_init_task_load_windows; + u32 init_load_pelt = sched_init_task_load_pelt; + u32 init_load_pct = current->init_load_pct; + + p->init_load_pct = 0; + memset(&p->ravg, 0, sizeof(struct ravg)); + + if (init_load_pct) { + init_load_pelt = div64_u64((u64)init_load_pct * + (u64)LOAD_AVG_MAX, 100); + init_load_windows = div64_u64((u64)init_load_pct * + (u64)sched_ravg_window, 100); + } + + p->ravg.demand = init_load_windows; + for (i = 0; i < RAVG_HIST_SIZE_MAX; ++i) + p->ravg.sum_history[i] = init_load_windows; + p->se.avg.runnable_avg_sum_scaled = init_load_pelt; +} + +#else /* CONFIG_SCHED_HMP */ + +void init_new_task_load(struct task_struct *p) +{ +} + +#endif /* CONFIG_SCHED_HMP */ + +#ifdef CONFIG_SCHED_HMP + /* Return task demand in percentage scale */ unsigned int pct_task_load(struct task_struct *p) { |
