diff options
Diffstat (limited to 'kernel/sched/sched.h')
| -rw-r--r-- | kernel/sched/sched.h | 82 |
1 files changed, 47 insertions, 35 deletions
diff --git a/kernel/sched/sched.h b/kernel/sched/sched.h index ce26255ed810..d514d36e4685 100644 --- a/kernel/sched/sched.h +++ b/kernel/sched/sched.h @@ -345,6 +345,15 @@ struct cfs_bandwidth { }; #endif /* CONFIG_CGROUP_SCHED */ +#ifdef CONFIG_SCHED_HMP + +struct hmp_sched_stats { + int nr_big_tasks, nr_small_tasks; + u64 cumulative_runnable_avg; +}; + +#endif + /* CFS-related fields in a runqueue */ struct cfs_rq { struct load_weight load; @@ -652,7 +661,8 @@ struct rq { unsigned int cur_freq, max_freq, min_freq, max_possible_freq; struct cpumask freq_domain_cpumask; - u64 cumulative_runnable_avg; + struct hmp_sched_stats hmp_stats; + int efficiency; /* Differentiate cpus with different IPC capability */ int load_scale_factor; int capacity; @@ -662,6 +672,7 @@ struct rq { u32 mostly_idle_load; int mostly_idle_nr_run; int mostly_idle_freq; + unsigned long hmp_flags; u64 cur_irqload; u64 avg_irqload; @@ -677,11 +688,6 @@ struct rq { u64 prev_runnable_sum; #endif -#ifdef CONFIG_SCHED_HMP - int nr_small_tasks, nr_big_tasks; - unsigned long hmp_flags; -#endif - #ifdef CONFIG_IRQ_TIME_ACCOUNTING u64 prev_irq_time; #endif @@ -967,6 +973,7 @@ extern struct mutex policy_mutex; extern unsigned int sched_ravg_window; extern unsigned int sched_use_pelt; extern unsigned int sched_disable_window_stats; +extern unsigned int sched_enable_hmp; extern unsigned int max_possible_freq; extern unsigned int min_max_freq; extern unsigned int pct_task_load(struct task_struct *p); @@ -1007,24 +1014,35 @@ static inline int capacity(struct rq *rq) } static inline void -inc_cumulative_runnable_avg(struct rq *rq, struct task_struct *p) +inc_cumulative_runnable_avg(struct hmp_sched_stats *stats, + struct task_struct *p) { - if (sched_use_pelt) - rq->cumulative_runnable_avg += - p->se.avg.runnable_avg_sum_scaled; - else if (!sched_disable_window_stats) - rq->cumulative_runnable_avg += p->ravg.demand; + u32 task_load; + + if (!sched_enable_hmp || sched_disable_window_stats) + return; + + task_load = sched_use_pelt ? p->se.avg.runnable_avg_sum_scaled : + (sched_disable_window_stats ? 0 : p->ravg.demand); + + stats->cumulative_runnable_avg += task_load; } static inline void -dec_cumulative_runnable_avg(struct rq *rq, struct task_struct *p) +dec_cumulative_runnable_avg(struct hmp_sched_stats *stats, + struct task_struct *p) { - if (sched_use_pelt) - rq->cumulative_runnable_avg -= - p->se.avg.runnable_avg_sum_scaled; - else if (!sched_disable_window_stats) - rq->cumulative_runnable_avg -= p->ravg.demand; - BUG_ON((s64)rq->cumulative_runnable_avg < 0); + u32 task_load; + + if (!sched_enable_hmp || sched_disable_window_stats) + return; + + task_load = sched_use_pelt ? p->se.avg.runnable_avg_sum_scaled : + (sched_disable_window_stats ? 0 : p->ravg.demand); + + stats->cumulative_runnable_avg -= task_load; + + BUG_ON((s64)stats->cumulative_runnable_avg < 0); } #define pct_to_real(tunable) \ @@ -1060,6 +1078,8 @@ static inline int sched_cpu_high_irqload(int cpu) #else /* CONFIG_SCHED_HMP */ +struct hmp_sched_stats; + static inline int pct_task_load(struct task_struct *p) { return 0; } static inline int capacity(struct rq *rq) @@ -1067,13 +1087,13 @@ static inline int capacity(struct rq *rq) return SCHED_LOAD_SCALE; } -static inline void -inc_cumulative_runnable_avg(struct rq *rq, struct task_struct *p) +static inline void inc_cumulative_runnable_avg(struct hmp_sched_stats *stats, + struct task_struct *p) { } -static inline void -dec_cumulative_runnable_avg(struct rq *rq, struct task_struct *p) +static inline void dec_cumulative_runnable_avg(struct hmp_sched_stats *stats, + struct task_struct *p) { } @@ -1150,14 +1170,10 @@ static inline void clear_reserved(int cpu) clear_bit(CPU_RESERVED, &rq->hmp_flags); } -extern unsigned int sched_enable_hmp; - int mostly_idle_cpu(int cpu); extern void check_for_migration(struct rq *rq, struct task_struct *p); extern void pre_big_small_task_count_change(const struct cpumask *cpus); extern void post_big_small_task_count_change(const struct cpumask *cpus); -extern void inc_nr_big_small_task(struct rq *rq, struct task_struct *p); -extern void dec_nr_big_small_task(struct rq *rq, struct task_struct *p); extern void set_hmp_defaults(void); extern unsigned int power_cost_at_freq(int cpu, unsigned int freq); extern void reset_all_window_stats(u64 window_start, unsigned int window_size); @@ -1174,14 +1190,6 @@ static inline void pre_big_small_task_count_change(void) { } static inline void post_big_small_task_count_change(void) { } static inline void set_hmp_defaults(void) { } -static inline void inc_nr_big_small_task(struct rq *rq, struct task_struct *p) -{ -} - -static inline void dec_nr_big_small_task(struct rq *rq, struct task_struct *p) -{ -} - static inline void clear_reserved(int cpu) { } #define power_cost_at_freq(...) 0 @@ -1521,6 +1529,10 @@ struct sched_class { #ifdef CONFIG_FAIR_GROUP_SCHED void (*task_move_group) (struct task_struct *p); #endif +#ifdef CONFIG_SCHED_HMP + void (*inc_hmp_sched_stats)(struct rq *rq, struct task_struct *p); + void (*dec_hmp_sched_stats)(struct rq *rq, struct task_struct *p); +#endif }; static inline void put_prev_task(struct rq *rq, struct task_struct *prev) |
