diff options
| author | Linux Build Service Account <lnxbuild@localhost> | 2016-12-10 15:43:16 -0800 |
|---|---|---|
| committer | Gerrit - the friendly Code Review server <code-review@localhost> | 2016-12-10 15:43:16 -0800 |
| commit | 496c2d7a0fe6beac03ccc293c56fa9ad563a49d8 (patch) | |
| tree | 2b8692a355b93cd0525124dbfa37917d0e312359 /kernel | |
| parent | a3ed87c16cb413b7019cc91f8e7d1047443e86fa (diff) | |
| parent | 8cf404403a00039b63859397e269b7fe26bd2bef (diff) | |
Merge "sched/core: Fix race condition in clearing hmp request"
Diffstat (limited to 'kernel')
| -rw-r--r-- | kernel/sched/core.c | 41 |
1 files changed, 25 insertions, 16 deletions
diff --git a/kernel/sched/core.c b/kernel/sched/core.c index d7846edd7a79..13990fa6f9cf 100644 --- a/kernel/sched/core.c +++ b/kernel/sched/core.c @@ -1248,15 +1248,16 @@ static int __set_cpus_allowed_ptr(struct task_struct *p, goto out; cpumask_andnot(&allowed_mask, new_mask, cpu_isolated_mask); + cpumask_and(&allowed_mask, &allowed_mask, cpu_active_mask); - dest_cpu = cpumask_any_and(cpu_active_mask, &allowed_mask); + dest_cpu = cpumask_any(&allowed_mask); if (dest_cpu >= nr_cpu_ids) { - dest_cpu = cpumask_any_and(cpu_active_mask, new_mask); + cpumask_and(&allowed_mask, cpu_active_mask, new_mask); + dest_cpu = cpumask_any(&allowed_mask); if (dest_cpu >= nr_cpu_ids) { ret = -EINVAL; goto out; } - cpumask_copy(&allowed_mask, new_mask); } do_set_cpus_allowed(p, new_mask); @@ -4635,6 +4636,8 @@ long sched_setaffinity(pid_t pid, const struct cpumask *in_mask) cpumask_var_t cpus_allowed, new_mask; struct task_struct *p; int retval; + int dest_cpu; + cpumask_t allowed_mask; rcu_read_lock(); @@ -4696,20 +4699,26 @@ long sched_setaffinity(pid_t pid, const struct cpumask *in_mask) } #endif again: - retval = __set_cpus_allowed_ptr(p, new_mask, true); - - if (!retval) { - cpuset_cpus_allowed(p, cpus_allowed); - if (!cpumask_subset(new_mask, cpus_allowed)) { - /* - * We must have raced with a concurrent cpuset - * update. Just reset the cpus_allowed to the - * cpuset's cpus_allowed - */ - cpumask_copy(new_mask, cpus_allowed); - goto again; + cpumask_andnot(&allowed_mask, new_mask, cpu_isolated_mask); + dest_cpu = cpumask_any_and(cpu_active_mask, &allowed_mask); + if (dest_cpu < nr_cpu_ids) { + retval = __set_cpus_allowed_ptr(p, new_mask, true); + if (!retval) { + cpuset_cpus_allowed(p, cpus_allowed); + if (!cpumask_subset(new_mask, cpus_allowed)) { + /* + * We must have raced with a concurrent cpuset + * update. Just reset the cpus_allowed to the + * cpuset's cpus_allowed + */ + cpumask_copy(new_mask, cpus_allowed); + goto again; + } } + } else { + retval = -EINVAL; } + out_free_new_mask: free_cpumask_var(new_mask); out_free_cpus_allowed: @@ -5600,6 +5609,7 @@ int do_isolation_work_cpu_stop(void *data) */ nohz_balance_clear_nohz_mask(cpu); + clear_hmp_request(cpu); local_irq_enable(); return 0; } @@ -5724,7 +5734,6 @@ int sched_isolate_cpu(int cpu) migrate_sync_cpu(cpu, cpumask_first(&avail_cpus)); stop_cpus(cpumask_of(cpu), do_isolation_work_cpu_stop, 0); - clear_hmp_request(cpu); calc_load_migrate(rq); update_max_interval(); |
