diff options
| author | Joonwoo Park <joonwoop@codeaurora.org> | 2015-06-10 14:57:52 -0700 |
|---|---|---|
| committer | David Keitel <dkeitel@codeaurora.org> | 2016-03-23 20:02:14 -0700 |
| commit | a509c84de711d7dd31e28a9cc04c76a83acf5b3c (patch) | |
| tree | a599cb77278a459e1fca1ed46fcf61e63ca02ac8 /kernel/sched/sched.h | |
| parent | 024505821e55bfa768bf1a8fe64a84ae689e1abe (diff) | |
sched: inline function scale_load_to_cpu()
Inline relatively small and frequently used function scale_load_to_cpu().
CRs-fixed: 849655
Change-Id: Id5f60595c394959d78e6da4cc4c18c338fec285b
Signed-off-by: Joonwoo Park <joonwoop@codeaurora.org>
Diffstat (limited to 'kernel/sched/sched.h')
| -rw-r--r-- | kernel/sched/sched.h | 16 |
1 files changed, 15 insertions, 1 deletions
diff --git a/kernel/sched/sched.h b/kernel/sched/sched.h index 44233c6adca0..c595be354732 100644 --- a/kernel/sched/sched.h +++ b/kernel/sched/sched.h @@ -1001,7 +1001,6 @@ extern unsigned int sched_upmigrate; extern unsigned int sched_downmigrate; extern unsigned int sched_init_task_load_pelt; extern unsigned int sched_init_task_load_windows; -extern u64 scale_load_to_cpu(u64 load, int cpu); extern unsigned int sched_heavy_task; extern unsigned int up_down_migrate_scale_factor; extern void reset_cpu_hmp_stats(int cpu, int reset_cra); @@ -1013,6 +1012,21 @@ unsigned int cpu_temp(int cpu); extern unsigned int nr_eligible_big_tasks(int cpu); extern void update_up_down_migrate(void); +/* + * 'load' is in reference to "best cpu" at its best frequency. + * Scale that in reference to a given cpu, accounting for how bad it is + * in reference to "best cpu". + */ +static inline u64 scale_load_to_cpu(u64 task_load, int cpu) +{ + struct rq *rq = cpu_rq(cpu); + + task_load *= (u64)rq->load_scale_factor; + task_load /= 1024; + + return task_load; +} + static inline int capacity(struct rq *rq) { return rq->capacity; |
