summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorLinux Build Service Account <lnxbuild@localhost>2016-11-10 22:49:39 -0800
committerGerrit - the friendly Code Review server <code-review@localhost>2016-11-10 22:49:39 -0800
commit2401d64a48302bad3fc74e8ca02edd188d27ae61 (patch)
tree21d21cfc4a4401e4a2e0eb0388f7d20f9bfd25c4
parent896ddc1b329090ec02a777a5d59be8dedd3543a3 (diff)
parentc82e2f73d1028cd1ff2d6b767ea656909888e516 (diff)
Merge "core_ctl: Synchronize access to cluster cpu list"
-rw-r--r--kernel/sched/core_ctl.c38
1 files changed, 38 insertions, 0 deletions
diff --git a/kernel/sched/core_ctl.c b/kernel/sched/core_ctl.c
index 0db85a4fa9c8..ecf6c568f0b5 100644
--- a/kernel/sched/core_ctl.c
+++ b/kernel/sched/core_ctl.c
@@ -719,8 +719,18 @@ static void move_cpu_lru(struct cpu_data *cpu_data)
static void try_to_isolate(struct cluster_data *cluster, unsigned int need)
{
struct cpu_data *c, *tmp;
+ unsigned long flags;
+ unsigned int num_cpus = cluster->num_cpus;
+ /*
+ * Protect against entry being removed (and added at tail) by other
+ * thread (hotplug).
+ */
+ spin_lock_irqsave(&state_lock, flags);
list_for_each_entry_safe(c, tmp, &cluster->lru, sib) {
+ if (!num_cpus--)
+ break;
+
if (!is_active(c))
continue;
if (cluster->active_cpus == need)
@@ -729,6 +739,8 @@ static void try_to_isolate(struct cluster_data *cluster, unsigned int need)
if (c->is_busy)
continue;
+ spin_unlock_irqrestore(&state_lock, flags);
+
pr_debug("Trying to isolate CPU%u\n", c->cpu);
if (!sched_isolate_cpu(c->cpu)) {
c->isolated_by_us = true;
@@ -738,7 +750,9 @@ static void try_to_isolate(struct cluster_data *cluster, unsigned int need)
pr_debug("Unable to isolate CPU%u\n", c->cpu);
}
cluster->active_cpus = get_active_cpu_count(cluster);
+ spin_lock_irqsave(&state_lock, flags);
}
+ spin_unlock_irqrestore(&state_lock, flags);
/*
* If the number of active CPUs is within the limits, then
@@ -747,12 +761,19 @@ static void try_to_isolate(struct cluster_data *cluster, unsigned int need)
if (cluster->active_cpus <= cluster->max_cpus)
return;
+ num_cpus = cluster->num_cpus;
+ spin_lock_irqsave(&state_lock, flags);
list_for_each_entry_safe(c, tmp, &cluster->lru, sib) {
+ if (!num_cpus--)
+ break;
+
if (!is_active(c))
continue;
if (cluster->active_cpus <= cluster->max_cpus)
break;
+ spin_unlock_irqrestore(&state_lock, flags);
+
pr_debug("Trying to isolate CPU%u\n", c->cpu);
if (!sched_isolate_cpu(c->cpu)) {
c->isolated_by_us = true;
@@ -762,15 +783,28 @@ static void try_to_isolate(struct cluster_data *cluster, unsigned int need)
pr_debug("Unable to isolate CPU%u\n", c->cpu);
}
cluster->active_cpus = get_active_cpu_count(cluster);
+ spin_lock_irqsave(&state_lock, flags);
}
+ spin_unlock_irqrestore(&state_lock, flags);
+
}
static void __try_to_unisolate(struct cluster_data *cluster,
unsigned int need, bool force)
{
struct cpu_data *c, *tmp;
+ unsigned long flags;
+ unsigned int num_cpus = cluster->num_cpus;
+ /*
+ * Protect against entry being removed (and added at tail) by other
+ * thread (hotplug).
+ */
+ spin_lock_irqsave(&state_lock, flags);
list_for_each_entry_safe(c, tmp, &cluster->lru, sib) {
+ if (!num_cpus--)
+ break;
+
if (!c->isolated_by_us)
continue;
if ((c->online && !cpu_isolated(c->cpu)) ||
@@ -779,6 +813,8 @@ static void __try_to_unisolate(struct cluster_data *cluster,
if (cluster->active_cpus == need)
break;
+ spin_unlock_irqrestore(&state_lock, flags);
+
pr_debug("Trying to unisolate CPU%u\n", c->cpu);
if (!sched_unisolate_cpu(c->cpu)) {
c->isolated_by_us = false;
@@ -787,7 +823,9 @@ static void __try_to_unisolate(struct cluster_data *cluster,
pr_debug("Unable to unisolate CPU%u\n", c->cpu);
}
cluster->active_cpus = get_active_cpu_count(cluster);
+ spin_lock_irqsave(&state_lock, flags);
}
+ spin_unlock_irqrestore(&state_lock, flags);
}
static void try_to_unisolate(struct cluster_data *cluster, unsigned int need)