summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorLinux Build Service Account <lnxbuild@localhost>2016-12-01 16:39:41 -0800
committerGerrit - the friendly Code Review server <code-review@localhost>2016-12-01 16:39:40 -0800
commitb832093be4cb17857933d1acfb72f43ce0d5f93a (patch)
treed081f7559974ce2335b5e668960cb8953bdc6e9c
parenta7adb0df6b408605dd6a5d42ff56b60904ac1cf8 (diff)
parent7437cd7c4bb7a463ec2cac7c37283f9eec5c01c2 (diff)
Merge "sched: pre-allocate colocation groups"
-rw-r--r--include/linux/sched.h2
-rw-r--r--kernel/sched/core.c3
-rw-r--r--kernel/sched/hmp.c195
-rw-r--r--kernel/sched/sched.h3
4 files changed, 108 insertions, 95 deletions
diff --git a/include/linux/sched.h b/include/linux/sched.h
index 4f6711f31939..9c3be2d56ac5 100644
--- a/include/linux/sched.h
+++ b/include/linux/sched.h
@@ -2401,6 +2401,8 @@ struct cpu_cycle_counter_cb {
u64 (*get_cpu_cycle_counter)(int cpu);
};
+#define MAX_NUM_CGROUP_COLOC_ID 20
+
#ifdef CONFIG_SCHED_HMP
extern void free_task_load_ptrs(struct task_struct *p);
extern int sched_set_window(u64 window_start, unsigned int window_size);
diff --git a/kernel/sched/core.c b/kernel/sched/core.c
index f352d06d7673..d7846edd7a79 100644
--- a/kernel/sched/core.c
+++ b/kernel/sched/core.c
@@ -8071,6 +8071,9 @@ void __init sched_init(void)
atomic_set(&rq->nr_iowait, 0);
}
+ i = alloc_related_thread_groups();
+ BUG_ON(i);
+
set_hmp_defaults();
set_load_weight(&init_task);
diff --git a/kernel/sched/hmp.c b/kernel/sched/hmp.c
index 5ff7a11d043f..6304c5030137 100644
--- a/kernel/sched/hmp.c
+++ b/kernel/sched/hmp.c
@@ -788,11 +788,12 @@ __read_mostly unsigned int sched_major_task_runtime = 10000000;
static unsigned int sync_cpu;
-static LIST_HEAD(related_thread_groups);
+struct related_thread_group *related_thread_groups[MAX_NUM_CGROUP_COLOC_ID];
+static LIST_HEAD(active_related_thread_groups);
static DEFINE_RWLOCK(related_thread_group_lock);
#define for_each_related_thread_group(grp) \
- list_for_each_entry(grp, &related_thread_groups, list)
+ list_for_each_entry(grp, &active_related_thread_groups, list)
/*
* Task load is categorized into buckets for the purpose of top task tracking.
@@ -3056,7 +3057,7 @@ void reset_all_window_stats(u64 window_start, unsigned int window_size)
read_unlock(&tasklist_lock);
- list_for_each_entry(grp, &related_thread_groups, list) {
+ list_for_each_entry(grp, &active_related_thread_groups, list) {
int j;
for_each_possible_cpu(j) {
@@ -3972,47 +3973,54 @@ _group_cpu_time(struct related_thread_group *grp, int cpu)
return grp ? per_cpu_ptr(grp->cpu_time, cpu) : NULL;
}
-struct related_thread_group *alloc_related_thread_group(int group_id)
+static inline struct related_thread_group*
+lookup_related_thread_group(unsigned int group_id)
{
- struct related_thread_group *grp;
-
- grp = kzalloc(sizeof(*grp), GFP_ATOMIC);
- if (!grp)
- return ERR_PTR(-ENOMEM);
-
- if (alloc_group_cputime(grp)) {
- kfree(grp);
- return ERR_PTR(-ENOMEM);
- }
-
- grp->id = group_id;
- INIT_LIST_HEAD(&grp->tasks);
- INIT_LIST_HEAD(&grp->list);
- raw_spin_lock_init(&grp->lock);
-
- return grp;
+ return related_thread_groups[group_id];
}
-struct related_thread_group *lookup_related_thread_group(unsigned int group_id)
+int alloc_related_thread_groups(void)
{
+ int i, ret;
struct related_thread_group *grp;
- list_for_each_entry(grp, &related_thread_groups, list) {
- if (grp->id == group_id)
- return grp;
+ /* groupd_id = 0 is invalid as it's special id to remove group. */
+ for (i = 1; i < MAX_NUM_CGROUP_COLOC_ID; i++) {
+ grp = kzalloc(sizeof(*grp), GFP_NOWAIT);
+ if (!grp) {
+ ret = -ENOMEM;
+ goto err;
+ }
+
+ if (alloc_group_cputime(grp)) {
+ kfree(grp);
+ ret = -ENOMEM;
+ goto err;
+ }
+
+ grp->id = i;
+ INIT_LIST_HEAD(&grp->tasks);
+ INIT_LIST_HEAD(&grp->list);
+ raw_spin_lock_init(&grp->lock);
+
+ related_thread_groups[i] = grp;
}
- return NULL;
-}
+ return 0;
-/* See comments before preferred_cluster() */
-static void free_related_thread_group(struct rcu_head *rcu)
-{
- struct related_thread_group *grp = container_of(rcu, struct
- related_thread_group, rcu);
+err:
+ for (i = 1; i < MAX_NUM_CGROUP_COLOC_ID; i++) {
+ grp = lookup_related_thread_group(i);
+ if (grp) {
+ free_group_cputime(grp);
+ kfree(grp);
+ related_thread_groups[i] = NULL;
+ } else {
+ break;
+ }
+ }
- free_group_cputime(grp);
- kfree(grp);
+ return ret;
}
static void remove_task_from_group(struct task_struct *p)
@@ -4037,10 +4045,12 @@ static void remove_task_from_group(struct task_struct *p)
raw_spin_unlock(&grp->lock);
/* Reserved groups cannot be destroyed */
- if (empty_group && grp->id != DEFAULT_CGROUP_COLOC_ID) {
- list_del(&grp->list);
- call_rcu(&grp->rcu, free_related_thread_group);
- }
+ if (empty_group && grp->id != DEFAULT_CGROUP_COLOC_ID)
+ /*
+ * We test whether grp->list is attached with list_empty()
+ * hence re-init the list after deletion.
+ */
+ list_del_init(&grp->list);
}
static int
@@ -4112,53 +4122,15 @@ void add_new_task_to_grp(struct task_struct *new)
write_unlock_irqrestore(&related_thread_group_lock, flags);
}
-#if defined(CONFIG_SCHED_TUNE) && defined(CONFIG_CGROUP_SCHEDTUNE)
-/*
- * We create a default colocation group at boot. There is no need to
- * synchronize tasks between cgroups at creation time because the
- * correct cgroup hierarchy is not available at boot. Therefore cgroup
- * colocation is turned off by default even though the colocation group
- * itself has been allocated. Furthermore this colocation group cannot
- * be destroyted once it has been created. All of this has been as part
- * of runtime optimizations.
- *
- * The job of synchronizing tasks to the colocation group is done when
- * the colocation flag in the cgroup is turned on.
- */
-static int __init create_default_coloc_group(void)
-{
- struct related_thread_group *grp = NULL;
- unsigned long flags;
-
- grp = alloc_related_thread_group(DEFAULT_CGROUP_COLOC_ID);
- if (IS_ERR(grp)) {
- WARN_ON(1);
- return -ENOMEM;
- }
-
- write_lock_irqsave(&related_thread_group_lock, flags);
- list_add(&grp->list, &related_thread_groups);
- write_unlock_irqrestore(&related_thread_group_lock, flags);
-
- update_freq_aggregate_threshold(MAX_FREQ_AGGR_THRESH);
- return 0;
-}
-late_initcall(create_default_coloc_group);
-
-int sync_cgroup_colocation(struct task_struct *p, bool insert)
-{
- unsigned int grp_id = insert ? DEFAULT_CGROUP_COLOC_ID : 0;
-
- return sched_set_group_id(p, grp_id);
-}
-#endif
-
-int sched_set_group_id(struct task_struct *p, unsigned int group_id)
+static int __sched_set_group_id(struct task_struct *p, unsigned int group_id)
{
int rc = 0;
unsigned long flags;
struct related_thread_group *grp = NULL;
+ if (group_id >= MAX_NUM_CGROUP_COLOC_ID)
+ return -EINVAL;
+
raw_spin_lock_irqsave(&p->pi_lock, flags);
write_lock(&related_thread_group_lock);
@@ -4174,29 +4146,26 @@ int sched_set_group_id(struct task_struct *p, unsigned int group_id)
}
grp = lookup_related_thread_group(group_id);
- if (!grp) {
- /* This is a reserved id */
- if (group_id == DEFAULT_CGROUP_COLOC_ID) {
- rc = -EINVAL;
- goto done;
- }
-
- grp = alloc_related_thread_group(group_id);
- if (IS_ERR(grp)) {
- rc = -ENOMEM;
- goto done;
- }
-
- list_add(&grp->list, &related_thread_groups);
- }
+ if (list_empty(&grp->list))
+ list_add(&grp->list, &active_related_thread_groups);
rc = add_task_to_group(p, grp);
done:
write_unlock(&related_thread_group_lock);
raw_spin_unlock_irqrestore(&p->pi_lock, flags);
+
return rc;
}
+int sched_set_group_id(struct task_struct *p, unsigned int group_id)
+{
+ /* DEFAULT_CGROUP_COLOC_ID is a reserved id */
+ if (group_id == DEFAULT_CGROUP_COLOC_ID)
+ return -EINVAL;
+
+ return __sched_set_group_id(p, group_id);
+}
+
unsigned int sched_get_group_id(struct task_struct *p)
{
unsigned int group_id;
@@ -4210,6 +4179,42 @@ unsigned int sched_get_group_id(struct task_struct *p)
return group_id;
}
+#if defined(CONFIG_SCHED_TUNE) && defined(CONFIG_CGROUP_SCHEDTUNE)
+/*
+ * We create a default colocation group at boot. There is no need to
+ * synchronize tasks between cgroups at creation time because the
+ * correct cgroup hierarchy is not available at boot. Therefore cgroup
+ * colocation is turned off by default even though the colocation group
+ * itself has been allocated. Furthermore this colocation group cannot
+ * be destroyted once it has been created. All of this has been as part
+ * of runtime optimizations.
+ *
+ * The job of synchronizing tasks to the colocation group is done when
+ * the colocation flag in the cgroup is turned on.
+ */
+static int __init create_default_coloc_group(void)
+{
+ struct related_thread_group *grp = NULL;
+ unsigned long flags;
+
+ grp = lookup_related_thread_group(DEFAULT_CGROUP_COLOC_ID);
+ write_lock_irqsave(&related_thread_group_lock, flags);
+ list_add(&grp->list, &active_related_thread_groups);
+ write_unlock_irqrestore(&related_thread_group_lock, flags);
+
+ update_freq_aggregate_threshold(MAX_FREQ_AGGR_THRESH);
+ return 0;
+}
+late_initcall(create_default_coloc_group);
+
+int sync_cgroup_colocation(struct task_struct *p, bool insert)
+{
+ unsigned int grp_id = insert ? DEFAULT_CGROUP_COLOC_ID : 0;
+
+ return __sched_set_group_id(p, grp_id);
+}
+#endif
+
static void update_cpu_cluster_capacity(const cpumask_t *cpus)
{
int i;
diff --git a/kernel/sched/sched.h b/kernel/sched/sched.h
index 30838bb9b442..f569c6fe3cbb 100644
--- a/kernel/sched/sched.h
+++ b/kernel/sched/sched.h
@@ -1448,6 +1448,8 @@ static inline void update_cgroup_boost_settings(void) { }
static inline void restore_cgroup_boost_settings(void) { }
#endif
+extern int alloc_related_thread_groups(void);
+
#else /* CONFIG_SCHED_HMP */
struct hmp_sched_stats;
@@ -1638,6 +1640,7 @@ static inline void set_hmp_defaults(void) { }
static inline void clear_reserved(int cpu) { }
static inline void sched_boost_parse_dt(void) {}
+static inline int alloc_related_thread_groups(void) { return 0; }
#define trace_sched_cpu_load(...)
#define trace_sched_cpu_load_lb(...)