summaryrefslogtreecommitdiff
path: root/kernel/cpuset.c
diff options
context:
space:
mode:
Diffstat (limited to 'kernel/cpuset.c')
-rw-r--r--kernel/cpuset.c38
1 files changed, 25 insertions, 13 deletions
diff --git a/kernel/cpuset.c b/kernel/cpuset.c
index e2e294d997e0..e3c0f38acbe6 100644
--- a/kernel/cpuset.c
+++ b/kernel/cpuset.c
@@ -98,7 +98,7 @@ struct cpuset {
/* user-configured CPUs and Memory Nodes allow to tasks */
cpumask_var_t cpus_allowed;
- cpumask_var_t cpus_requested;
+ cpumask_var_t cpus_requested; /* CPUS requested, but not used because of hotplug */
nodemask_t mems_allowed;
/* effective CPUs and Memory Nodes allow to tasks */
@@ -325,8 +325,7 @@ static struct file_system_type cpuset_fs_type = {
/*
* Return in pmask the portion of a cpusets's cpus_allowed that
* are online. If none are online, walk up the cpuset hierarchy
- * until we find one that does have some online cpus. The top
- * cpuset always has some cpus online.
+ * until we find one that does have some online cpus.
*
* One way or another, we guarantee to return some non-empty subset
* of cpu_online_mask.
@@ -335,8 +334,20 @@ static struct file_system_type cpuset_fs_type = {
*/
static void guarantee_online_cpus(struct cpuset *cs, struct cpumask *pmask)
{
- while (!cpumask_intersects(cs->effective_cpus, cpu_online_mask))
+ while (!cpumask_intersects(cs->effective_cpus, cpu_online_mask)) {
cs = parent_cs(cs);
+ if (unlikely(!cs)) {
+ /*
+ * The top cpuset doesn't have any online cpu as a
+ * consequence of a race between cpuset_hotplug_work
+ * and cpu hotplug notifier. But we know the top
+ * cpuset's effective_cpus is on its way to to be
+ * identical to cpu_online_mask.
+ */
+ cpumask_copy(pmask, cpu_online_mask);
+ return;
+ }
+ }
cpumask_and(pmask, cs->effective_cpus, cpu_online_mask);
}
@@ -1945,11 +1956,11 @@ cpuset_css_alloc(struct cgroup_subsys_state *parent_css)
if (!cs)
return ERR_PTR(-ENOMEM);
if (!alloc_cpumask_var(&cs->cpus_allowed, GFP_KERNEL))
- goto free_cs;
- if (!alloc_cpumask_var(&cs->cpus_requested, GFP_KERNEL))
- goto free_allowed;
+ goto error_allowed;
if (!alloc_cpumask_var(&cs->effective_cpus, GFP_KERNEL))
- goto free_requested;
+ goto error_effective;
+ if (!alloc_cpumask_var(&cs->cpus_requested, GFP_KERNEL))
+ goto error_requested;
set_bit(CS_SCHED_LOAD_BALANCE, &cs->flags);
cpumask_clear(cs->cpus_allowed);
@@ -1962,11 +1973,11 @@ cpuset_css_alloc(struct cgroup_subsys_state *parent_css)
return &cs->css;
-free_requested:
- free_cpumask_var(cs->cpus_requested);
-free_allowed:
+error_requested:
+ free_cpumask_var(cs->effective_cpus);
+error_effective:
free_cpumask_var(cs->cpus_allowed);
-free_cs:
+error_allowed:
kfree(cs);
return ERR_PTR(-ENOMEM);
}
@@ -2268,7 +2279,8 @@ retry:
goto retry;
}
- cpumask_and(&new_cpus, cs->cpus_requested, parent_cs(cs)->effective_cpus);
+ cpumask_and(&new_cpus, cs->cpus_requested,
+ parent_cs(cs)->effective_cpus);
nodes_and(new_mems, cs->mems_allowed, parent_cs(cs)->effective_mems);
cpus_updated = !cpumask_equal(&new_cpus, cs->effective_cpus);