diff options
| author | Syed Rameez Mustafa <rameezmustafa@codeaurora.org> | 2016-08-31 16:54:12 -0700 |
|---|---|---|
| committer | Syed Rameez Mustafa <rameezmustafa@codeaurora.org> | 2016-11-16 17:57:56 -0800 |
| commit | 30fc7742350f45a6b3667880de0a72c68509ccc5 (patch) | |
| tree | 840761c4bedb6168d261de7f30c4a64ac8740bb2 /kernel/sched/sched.h | |
| parent | fd5b530593795c9aa145c7f9aa3eb817d3841af3 (diff) | |
sched/hmp: Enhance co-location and scheduler boost features
The recent introduction of the schedtune cgroup controller has provided
the scheduler with added flexibility in terms of some of it's placement
features. In particular each cgroup under the schedtune controller can
now specify:
1) Whether it needs co-location along with other cgroups
2) Whether it is eligible for scheduler boost (sched_boost_enabled)
3) Whether the kernel can override the boost eligibility when necessary
(sched_boost_no_override)
The scheduler now creates a reserved co-location group at boot. This
group is used to co-locate all tasks that form part of any one of the
cgroups that have co-location enabled. This reserved group can neither
be destroyed nor reused for other purposes. Furthermore, cgroups are
only allowed to indicate their co-location preference once at boot.
Further updates are disallowed.
Since we are now creating co-location groups for an extended period of
time, there are a few other factors to consider when determining the
preferred cluster for the group. We first exclude any tasks in the
group that have not been observed to be running for a significant
amount of time. Secondly we introduce the notion of group up and down
migrate tunables to allow different migration policies than individual
tasks. Lastly we break co-location if a single task in a group exceeds
up-migrate but the total load of the group does not exceed group
up-migrate.
In terms of sched_boost, the scheduler now supports multiple types of
boost. These are:
1) FULL_THROTTLE : Force up-migrate tasks belonging any cgroup that
has the sched_boost_enabled flag turned on. Little
CPUs will only be used when big CPUs can no longer
accommodate tasks. Also up-migrate all RT tasks.
2) CONSERVATIVE : Override the sched_boost_enabled flag for all cgroups
except those that have the sched_boost_no_override
flag set. Force up-migrate all tasks belonging to only
those cgroups that still remain eligible for boost.
RT tasks do not get force up migrated.
3) RESTRAINED : Start frequency aggregation for co-located tasks. This
type of boost does not force up-migrate any task.
Finally the boost API removes ref-counting. This means that there can
only be a single entity using boost at any given time. If multiple
entities are managing boost, they are required to be well behaved so
that they don't interfere with one another. Even for a single client,
it is not possible to switch directly from one boost type to another.
Boost must be first turned off before switching over to a new type.
Change-Id: I8d224a70cbef162f27078b62b73acaa22670861d
Signed-off-by: Syed Rameez Mustafa <rameezmustafa@codeaurora.org>
Diffstat (limited to 'kernel/sched/sched.h')
| -rw-r--r-- | kernel/sched/sched.h | 47 |
1 files changed, 34 insertions, 13 deletions
diff --git a/kernel/sched/sched.h b/kernel/sched/sched.h index 4289bf6cd642..30838bb9b442 100644 --- a/kernel/sched/sched.h +++ b/kernel/sched/sched.h @@ -1061,8 +1061,6 @@ extern unsigned int max_load_scale_factor; extern unsigned int max_possible_capacity; extern unsigned int min_max_possible_capacity; extern unsigned int max_power_cost; -extern unsigned int sched_upmigrate; -extern unsigned int sched_downmigrate; extern unsigned int sched_init_task_load_windows; extern unsigned int up_down_migrate_scale_factor; extern unsigned int sysctl_sched_restrict_cluster_spill; @@ -1106,18 +1104,23 @@ extern void sched_account_irqstart(int cpu, struct task_struct *curr, u64 wallclock); extern unsigned int cpu_temp(int cpu); extern unsigned int nr_eligible_big_tasks(int cpu); -extern void update_up_down_migrate(void); extern int update_preferred_cluster(struct related_thread_group *grp, struct task_struct *p, u32 old_load); extern void set_preferred_cluster(struct related_thread_group *grp); extern void add_new_task_to_grp(struct task_struct *new); +extern unsigned int update_freq_aggregate_threshold(unsigned int threshold); -enum sched_boost_type { +enum sched_boost_policy { SCHED_BOOST_NONE, SCHED_BOOST_ON_BIG, SCHED_BOOST_ON_ALL, }; +#define NO_BOOST 0 +#define FULL_THROTTLE_BOOST 1 +#define CONSERVATIVE_BOOST 2 +#define RESTRAINED_BOOST 3 + static inline struct sched_cluster *cpu_cluster(int cpu) { return cpu_rq(cpu)->cluster; @@ -1387,14 +1390,11 @@ extern void set_hmp_defaults(void); extern int power_delta_exceeded(unsigned int cpu_cost, unsigned int base_cost); extern unsigned int power_cost(int cpu, u64 demand); extern void reset_all_window_stats(u64 window_start, unsigned int window_size); -extern void boost_kick(int cpu); extern int sched_boost(void); extern int task_load_will_fit(struct task_struct *p, u64 task_load, int cpu, - enum sched_boost_type boost_type); -extern enum sched_boost_type sched_boost_type(void); + enum sched_boost_policy boost_policy); +extern enum sched_boost_policy sched_boost_policy(void); extern int task_will_fit(struct task_struct *p, int cpu); -extern int group_will_fit(struct sched_cluster *cluster, - struct related_thread_group *grp, u64 demand); extern u64 cpu_load(int cpu); extern u64 cpu_load_sync(int cpu, int sync); extern int preferred_cluster(struct sched_cluster *cluster, @@ -1422,10 +1422,32 @@ extern u64 cpu_upmigrate_discourage_read_u64(struct cgroup_subsys_state *css, struct cftype *cft); extern int cpu_upmigrate_discourage_write_u64(struct cgroup_subsys_state *css, struct cftype *cft, u64 upmigrate_discourage); -extern void sched_hmp_parse_dt(void); -extern void init_sched_hmp_boost_policy(void); +extern void sched_boost_parse_dt(void); extern void clear_top_tasks_bitmap(unsigned long *bitmap); +#if defined(CONFIG_SCHED_TUNE) && defined(CONFIG_CGROUP_SCHEDTUNE) +extern bool task_sched_boost(struct task_struct *p); +extern int sync_cgroup_colocation(struct task_struct *p, bool insert); +extern bool same_schedtune(struct task_struct *tsk1, struct task_struct *tsk2); +extern void update_cgroup_boost_settings(void); +extern void restore_cgroup_boost_settings(void); + +#else +static inline bool +same_schedtune(struct task_struct *tsk1, struct task_struct *tsk2) +{ + return true; +} + +static inline bool task_sched_boost(struct task_struct *p) +{ + return true; +} + +static inline void update_cgroup_boost_settings(void) { } +static inline void restore_cgroup_boost_settings(void) { } +#endif + #else /* CONFIG_SCHED_HMP */ struct hmp_sched_stats; @@ -1615,8 +1637,7 @@ static inline void post_big_task_count_change(void) { } static inline void set_hmp_defaults(void) { } static inline void clear_reserved(int cpu) { } -static inline void sched_hmp_parse_dt(void) {} -static inline void init_sched_hmp_boost_policy(void) {} +static inline void sched_boost_parse_dt(void) {} #define trace_sched_cpu_load(...) #define trace_sched_cpu_load_lb(...) |
