summaryrefslogtreecommitdiff
path: root/kernel/sched
diff options
context:
space:
mode:
authorSrivatsa Vaddagiri <vatsa@codeaurora.org>2015-02-06 18:05:53 +0530
committerDavid Keitel <dkeitel@codeaurora.org>2016-03-23 20:01:44 -0700
commit73b7708de7deb4e45af2063944f52241083c81d7 (patch)
tree74acf2033adb39e663bd3cb1bfd3816967219d5f /kernel/sched
parenta5cb71df223160dc3ddeff332b8da8b689645fff (diff)
sched: Add cgroup-based criteria for upmigration
It may be desirable to discourage upmigration of tasks belonging to some cgroups. Add a per-cgroup flag (upmigrate_discourage) that discourages upmigration of tasks of a cgroup. Tasks of the cgroup are allowed to upmigrate only under overcommitted scenario. Change-Id: I1780e420af1b6865c5332fb55ee1ee408b74d8ce Signed-off-by: Srivatsa Vaddagiri <vatsa@codeaurora.org> [rameezmustafa@codeaurora.org: Use new cgroup APIs] Signed-off-by: Syed Rameez Mustafa <rameezmustafa@codeaurora.org>
Diffstat (limited to 'kernel/sched')
-rw-r--r--kernel/sched/core.c46
-rw-r--r--kernel/sched/fair.c26
-rw-r--r--kernel/sched/sched.h3
3 files changed, 69 insertions, 6 deletions
diff --git a/kernel/sched/core.c b/kernel/sched/core.c
index eb6480b51d93..b4bb96da4efb 100644
--- a/kernel/sched/core.c
+++ b/kernel/sched/core.c
@@ -10006,6 +10006,45 @@ static int cpu_notify_on_migrate_write_u64(struct cgroup_subsys_state *css,
return 0;
}
+#ifdef CONFIG_SCHED_HMP
+
+static u64 cpu_upmigrate_discourage_read_u64(struct cgroup_subsys_state *css,
+ struct cftype *cft)
+{
+ struct task_group *tg = css_tg(css);
+
+ return tg->upmigrate_discouraged;
+}
+
+static int cpu_upmigrate_discourage_write_u64(struct cgroup_subsys_state *css,
+ struct cftype *cft, u64 upmigrate_discourage)
+{
+ struct task_group *tg = css_tg(css);
+ int discourage = upmigrate_discourage > 0;
+
+ if (tg->upmigrate_discouraged == discourage)
+ return 0;
+
+ /*
+ * Revisit big-task classification for tasks of this cgroup. It would
+ * have been efficient to walk tasks of just this cgroup in running
+ * state, but we don't have easy means to do that. Walk all tasks in
+ * running state on all cpus instead and re-visit their big task
+ * classification.
+ */
+ get_online_cpus();
+ pre_big_small_task_count_change(cpu_online_mask);
+
+ tg->upmigrate_discouraged = discourage;
+
+ post_big_small_task_count_change(cpu_online_mask);
+ put_online_cpus();
+
+ return 0;
+}
+
+#endif /* CONFIG_SCHED_HMP */
+
#ifdef CONFIG_FAIR_GROUP_SCHED
static int cpu_shares_write_u64(struct cgroup_subsys_state *css,
struct cftype *cftype, u64 shareval)
@@ -10296,6 +10335,13 @@ static struct cftype cpu_files[] = {
.read_u64 = cpu_notify_on_migrate_read_u64,
.write_u64 = cpu_notify_on_migrate_write_u64,
},
+#ifdef CONFIG_SCHED_HMP
+ {
+ .name = "upmigrate_discourage",
+ .read_u64 = cpu_upmigrate_discourage_read_u64,
+ .write_u64 = cpu_upmigrate_discourage_write_u64,
+ },
+#endif
#ifdef CONFIG_FAIR_GROUP_SCHED
{
.name = "shares",
diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c
index cc16d24352b2..da18be6d7bbe 100644
--- a/kernel/sched/fair.c
+++ b/kernel/sched/fair.c
@@ -2808,14 +2808,29 @@ u64 scale_load_to_cpu(u64 task_load, int cpu)
return task_load;
}
+#ifdef CONFIG_CGROUP_SCHED
+
+static inline int upmigrate_discouraged(struct task_struct *p)
+{
+ return task_group(p)->upmigrate_discouraged;
+}
+
+#else
+
+static inline int upmigrate_discouraged(struct task_struct *p)
+{
+ return 0;
+}
+
+#endif
+
/* Is a task "big" on its current cpu */
static inline int is_big_task(struct task_struct *p)
{
u64 load = task_load(p);
int nice = task_nice(p);
- /* Todo: Provide cgroup-based control as well? */
- if (nice > sched_upmigrate_min_nice)
+ if (nice > sched_upmigrate_min_nice || upmigrate_discouraged(p))
return 0;
load = scale_load_to_cpu(load, task_cpu(p));
@@ -3002,8 +3017,7 @@ static int task_will_fit(struct task_struct *p, int cpu)
if (rq->capacity > prev_rq->capacity)
return 1;
} else {
- /* Todo: Provide cgroup-based control as well? */
- if (nice > sched_upmigrate_min_nice)
+ if (nice > sched_upmigrate_min_nice || upmigrate_discouraged(p))
return 1;
load = scale_load_to_cpu(task_load(p), cpu);
@@ -3981,8 +3995,8 @@ static inline int migration_needed(struct rq *rq, struct task_struct *p)
if (is_small_task(p))
return 0;
- /* Todo: cgroup-based control? */
- if (nice > sched_upmigrate_min_nice && rq->capacity > min_capacity)
+ if ((nice > sched_upmigrate_min_nice || upmigrate_discouraged(p)) &&
+ rq->capacity > min_capacity)
return MOVE_TO_LITTLE_CPU;
if (!task_will_fit(p, cpu_of(rq)))
diff --git a/kernel/sched/sched.h b/kernel/sched/sched.h
index eae7973b37b9..d897c967bb87 100644
--- a/kernel/sched/sched.h
+++ b/kernel/sched/sched.h
@@ -241,6 +241,9 @@ struct task_group {
struct cgroup_subsys_state css;
bool notify_on_migrate;
+#ifdef CONFIG_SCHED_HMP
+ bool upmigrate_discouraged;
+#endif
#ifdef CONFIG_FAIR_GROUP_SCHED
/* schedulable entities of this group on each cpu */