diff options
| author | Trilok Soni <tsoni@codeaurora.org> | 2016-08-25 19:05:37 -0700 |
|---|---|---|
| committer | Trilok Soni <tsoni@codeaurora.org> | 2016-08-26 14:34:05 -0700 |
| commit | 5ab1e18aa3913d454e1bd1498b20ee581aae2c6b (patch) | |
| tree | 42bd10ef0bf5cdb8deb05656bf802c77dc580ff7 /kernel/sched/core.c | |
| parent | e97b6a0e0217f7c072fdad6c50673cd7a64348e1 (diff) | |
Revert "Merge remote-tracking branch 'msm-4.4/tmp-510d0a3f' into msm-4.4"
This reverts commit 9d6fd2c3e9fcfb ("Merge remote-tracking branch
'msm-4.4/tmp-510d0a3f' into msm-4.4"), because it breaks the
dump parsing tools due to kernel can be loaded anywhere in the memory
now and not fixed at linear mapping.
Change-Id: Id416f0a249d803442847d09ac47781147b0d0ee6
Signed-off-by: Trilok Soni <tsoni@codeaurora.org>
Diffstat (limited to 'kernel/sched/core.c')
| -rw-r--r-- | kernel/sched/core.c | 35 |
1 files changed, 21 insertions, 14 deletions
diff --git a/kernel/sched/core.c b/kernel/sched/core.c index 25afcb8a1402..db0472b37feb 100644 --- a/kernel/sched/core.c +++ b/kernel/sched/core.c @@ -11223,7 +11223,7 @@ void set_curr_task(int cpu, struct task_struct *p) /* task_group_lock serializes the addition/removal of task groups */ static DEFINE_SPINLOCK(task_group_lock); -static void sched_free_group(struct task_group *tg) +static void free_sched_group(struct task_group *tg) { free_fair_sched_group(tg); free_rt_sched_group(tg); @@ -11249,7 +11249,7 @@ struct task_group *sched_create_group(struct task_group *parent) return tg; err: - sched_free_group(tg); + free_sched_group(tg); return ERR_PTR(-ENOMEM); } @@ -11269,16 +11269,17 @@ void sched_online_group(struct task_group *tg, struct task_group *parent) } /* rcu callback to free various structures associated with a task group */ -static void sched_free_group_rcu(struct rcu_head *rhp) +static void free_sched_group_rcu(struct rcu_head *rhp) { /* now it should be safe to free those cfs_rqs */ - sched_free_group(container_of(rhp, struct task_group, rcu)); + free_sched_group(container_of(rhp, struct task_group, rcu)); } +/* Destroy runqueue etc associated with a task group */ void sched_destroy_group(struct task_group *tg) { /* wait for possible concurrent references to cfs_rqs complete */ - call_rcu(&tg->rcu, sched_free_group_rcu); + call_rcu(&tg->rcu, free_sched_group_rcu); } void sched_offline_group(struct task_group *tg) @@ -11739,26 +11740,31 @@ cpu_cgroup_css_alloc(struct cgroup_subsys_state *parent_css) if (IS_ERR(tg)) return ERR_PTR(-ENOMEM); - sched_online_group(tg, parent); - return &tg->css; } -static void cpu_cgroup_css_released(struct cgroup_subsys_state *css) +static int cpu_cgroup_css_online(struct cgroup_subsys_state *css) { struct task_group *tg = css_tg(css); + struct task_group *parent = css_tg(css->parent); - sched_offline_group(tg); + if (parent) + sched_online_group(tg, parent); + return 0; } static void cpu_cgroup_css_free(struct cgroup_subsys_state *css) { struct task_group *tg = css_tg(css); - /* - * Relies on the RCU grace period between css_released() and this. - */ - sched_free_group(tg); + sched_destroy_group(tg); +} + +static void cpu_cgroup_css_offline(struct cgroup_subsys_state *css) +{ + struct task_group *tg = css_tg(css); + + sched_offline_group(tg); } static void cpu_cgroup_fork(struct task_struct *task, void *private) @@ -12187,8 +12193,9 @@ static struct cftype cpu_files[] = { struct cgroup_subsys cpu_cgrp_subsys = { .css_alloc = cpu_cgroup_css_alloc, - .css_released = cpu_cgroup_css_released, .css_free = cpu_cgroup_css_free, + .css_online = cpu_cgroup_css_online, + .css_offline = cpu_cgroup_css_offline, .fork = cpu_cgroup_fork, .can_attach = cpu_cgroup_can_attach, .attach = cpu_cgroup_attach, |
