diff options
| -rw-r--r-- | kernel/sched/fair.c | 15 | ||||
| -rw-r--r-- | kernel/sched/sched.h | 16 |
2 files changed, 15 insertions, 16 deletions
diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c index 564b715b1586..9d046d58050b 100644 --- a/kernel/sched/fair.c +++ b/kernel/sched/fair.c @@ -2942,21 +2942,6 @@ int sched_get_cpu_mostly_idle_nr_run(int cpu) return rq->mostly_idle_nr_run; } -/* - * 'load' is in reference to "best cpu" at its best frequency. - * Scale that in reference to a given cpu, accounting for how bad it is - * in reference to "best cpu". - */ -u64 scale_load_to_cpu(u64 task_load, int cpu) -{ - struct rq *rq = cpu_rq(cpu); - - task_load *= (u64)rq->load_scale_factor; - task_load /= 1024; - - return task_load; -} - #ifdef CONFIG_CGROUP_SCHED static inline int upmigrate_discouraged(struct task_struct *p) diff --git a/kernel/sched/sched.h b/kernel/sched/sched.h index 44233c6adca0..c595be354732 100644 --- a/kernel/sched/sched.h +++ b/kernel/sched/sched.h @@ -1001,7 +1001,6 @@ extern unsigned int sched_upmigrate; extern unsigned int sched_downmigrate; extern unsigned int sched_init_task_load_pelt; extern unsigned int sched_init_task_load_windows; -extern u64 scale_load_to_cpu(u64 load, int cpu); extern unsigned int sched_heavy_task; extern unsigned int up_down_migrate_scale_factor; extern void reset_cpu_hmp_stats(int cpu, int reset_cra); @@ -1013,6 +1012,21 @@ unsigned int cpu_temp(int cpu); extern unsigned int nr_eligible_big_tasks(int cpu); extern void update_up_down_migrate(void); +/* + * 'load' is in reference to "best cpu" at its best frequency. + * Scale that in reference to a given cpu, accounting for how bad it is + * in reference to "best cpu". + */ +static inline u64 scale_load_to_cpu(u64 task_load, int cpu) +{ + struct rq *rq = cpu_rq(cpu); + + task_load *= (u64)rq->load_scale_factor; + task_load /= 1024; + + return task_load; +} + static inline int capacity(struct rq *rq) { return rq->capacity; |
