diff options
| author | Linux Build Service Account <lnxbuild@localhost> | 2016-12-11 18:37:37 -0700 |
|---|---|---|
| committer | Linux Build Service Account <lnxbuild@localhost> | 2016-12-11 18:37:37 -0700 |
| commit | 6998daf8c5fbfa58f8fc9e33407cafeb39de714e (patch) | |
| tree | 00749aac5bdf44a78b5c633e646f26b06f45da79 /kernel | |
| parent | 97e6e94b40eba98d8b0bc8db959dbdee40d592c6 (diff) | |
| parent | a80e267a8c0d61790c3d1d5f7181ebd1be39c438 (diff) | |
Promotion of kernel.lnx.4.4-161211.
CRs Change ID Subject
--------------------------------------------------------------------------------------------------------------
1092907 I6d73b8f246ae3754ab60984af198333fd284ae16 sched/core: Fix race condition in clearing hmp request
1078814 Ie28323cdabccdc64196fae25deb2f56dfbb5a0e7 ARM: dts: msm: enable the watchdog bark interrupt for sm
1094175 I34a3ad2da7f8e267f0d570c0329ac9b73a43309e ARM: dts: msm: add qdss node support for msmtriton
1092907 I078d01e63860d1fc60fc96eb0c739c0f680ae983 sched/core: Prevent (user) space tasks from affining to
1096988 I34e3f586b99b6ff1af1d2323d4f272ee3cca7fa2 ARM: dts: msm: Configure MPM for qusb2phy_dpse_hv for ms
1080662 I42e448ecacad4781b460c4c989026307169ba1b5 mm: remove gup_flags FOLL_WRITE games from __get_user_pa
1091972 I9d7cc82c50c1b09610f5a4eac86af4617389f23f ARM: dts: msm: Add BCL peripheral configs for pmfalcon
1092907 Id06151a8e34edab49ac76b4bffd50c132f0b792f sched: Ensure proper task migration when a CPU is isolat
1098004 I7d96555573cfd6cca1ca56c877d78fde943f8100 sound: usb: Handle implicit feedback data endpoint prope
1091972 I0062f3b39f00ff2f0e74affcffbcf1afd89d3b2f power: bcl: Add frequency mitigation as an optional prop
1099112 I014c823bcf2545f005205dde326a074eaa5d7a6a clk: msm: clock-osm: don't use version register to enabl
Change-Id: Ie97cd9c7d2048e9e540056d4f5f121435a147d05
CRs-Fixed: 1098004, 1094175, 1078814, 1080662, 1092907, 1099112, 1096988, 1091972
Diffstat (limited to 'kernel')
| -rw-r--r-- | kernel/sched/core.c | 87 | ||||
| -rw-r--r-- | kernel/sched/rt.c | 8 |
2 files changed, 69 insertions, 26 deletions
diff --git a/kernel/sched/core.c b/kernel/sched/core.c index d7846edd7a79..ee708909dc17 100644 --- a/kernel/sched/core.c +++ b/kernel/sched/core.c @@ -1248,15 +1248,16 @@ static int __set_cpus_allowed_ptr(struct task_struct *p, goto out; cpumask_andnot(&allowed_mask, new_mask, cpu_isolated_mask); + cpumask_and(&allowed_mask, &allowed_mask, cpu_active_mask); - dest_cpu = cpumask_any_and(cpu_active_mask, &allowed_mask); + dest_cpu = cpumask_any(&allowed_mask); if (dest_cpu >= nr_cpu_ids) { - dest_cpu = cpumask_any_and(cpu_active_mask, new_mask); + cpumask_and(&allowed_mask, cpu_active_mask, new_mask); + dest_cpu = cpumask_any(&allowed_mask); if (dest_cpu >= nr_cpu_ids) { ret = -EINVAL; goto out; } - cpumask_copy(&allowed_mask, new_mask); } do_set_cpus_allowed(p, new_mask); @@ -4635,6 +4636,8 @@ long sched_setaffinity(pid_t pid, const struct cpumask *in_mask) cpumask_var_t cpus_allowed, new_mask; struct task_struct *p; int retval; + int dest_cpu; + cpumask_t allowed_mask; rcu_read_lock(); @@ -4696,20 +4699,26 @@ long sched_setaffinity(pid_t pid, const struct cpumask *in_mask) } #endif again: - retval = __set_cpus_allowed_ptr(p, new_mask, true); - - if (!retval) { - cpuset_cpus_allowed(p, cpus_allowed); - if (!cpumask_subset(new_mask, cpus_allowed)) { - /* - * We must have raced with a concurrent cpuset - * update. Just reset the cpus_allowed to the - * cpuset's cpus_allowed - */ - cpumask_copy(new_mask, cpus_allowed); - goto again; + cpumask_andnot(&allowed_mask, new_mask, cpu_isolated_mask); + dest_cpu = cpumask_any_and(cpu_active_mask, &allowed_mask); + if (dest_cpu < nr_cpu_ids) { + retval = __set_cpus_allowed_ptr(p, new_mask, true); + if (!retval) { + cpuset_cpus_allowed(p, cpus_allowed); + if (!cpumask_subset(new_mask, cpus_allowed)) { + /* + * We must have raced with a concurrent cpuset + * update. Just reset the cpus_allowed to the + * cpuset's cpus_allowed + */ + cpumask_copy(new_mask, cpus_allowed); + goto again; + } } + } else { + retval = -EINVAL; } + out_free_new_mask: free_cpumask_var(new_mask); out_free_cpus_allowed: @@ -5455,6 +5464,37 @@ static struct task_struct fake_task = { }; /* + * Remove a task from the runqueue and pretend that it's migrating. This + * should prevent migrations for the detached task and disallow further + * changes to tsk_cpus_allowed. + */ +static void +detach_one_task(struct task_struct *p, struct rq *rq, struct list_head *tasks) +{ + lockdep_assert_held(&rq->lock); + + p->on_rq = TASK_ON_RQ_MIGRATING; + deactivate_task(rq, p, 0); + list_add(&p->se.group_node, tasks); +} + +static void attach_tasks(struct list_head *tasks, struct rq *rq) +{ + struct task_struct *p; + + lockdep_assert_held(&rq->lock); + + while (!list_empty(tasks)) { + p = list_first_entry(tasks, struct task_struct, se.group_node); + list_del_init(&p->se.group_node); + + BUG_ON(task_rq(p) != rq); + activate_task(rq, p, 0); + p->on_rq = TASK_ON_RQ_QUEUED; + } +} + +/* * Migrate all tasks (not pinned if pinned argument say so) from the rq, * sleeping tasks will be migrated by try_to_wake_up()->select_task_rq(). * @@ -5468,6 +5508,7 @@ static void migrate_tasks(struct rq *dead_rq, bool migrate_pinned_tasks) struct task_struct *next, *stop = rq->stop; int dest_cpu; unsigned int num_pinned_kthreads = 1; /* this thread */ + LIST_HEAD(tasks); cpumask_t avail_cpus; cpumask_andnot(&avail_cpus, cpu_online_mask, cpu_isolated_mask); @@ -5492,12 +5533,10 @@ static void migrate_tasks(struct rq *dead_rq, bool migrate_pinned_tasks) for (;;) { /* - * There's this thread running + pinned threads, bail when - * that's the only remaining threads. + * There's this thread running, bail when that's the only + * remaining thread. */ - if ((migrate_pinned_tasks && rq->nr_running == 1) || - (!migrate_pinned_tasks && - rq->nr_running <= num_pinned_kthreads)) + if (rq->nr_running == 1) break; /* @@ -5510,8 +5549,9 @@ static void migrate_tasks(struct rq *dead_rq, bool migrate_pinned_tasks) if (!migrate_pinned_tasks && next->flags & PF_KTHREAD && !cpumask_intersects(&avail_cpus, &next->cpus_allowed)) { - lockdep_unpin_lock(&rq->lock); + detach_one_task(next, rq, &tasks); num_pinned_kthreads += 1; + lockdep_unpin_lock(&rq->lock); continue; } @@ -5559,6 +5599,9 @@ static void migrate_tasks(struct rq *dead_rq, bool migrate_pinned_tasks) } rq->stop = stop; + + if (num_pinned_kthreads > 1) + attach_tasks(&tasks, rq); } static void set_rq_online(struct rq *rq); @@ -5600,6 +5643,7 @@ int do_isolation_work_cpu_stop(void *data) */ nohz_balance_clear_nohz_mask(cpu); + clear_hmp_request(cpu); local_irq_enable(); return 0; } @@ -5724,7 +5768,6 @@ int sched_isolate_cpu(int cpu) migrate_sync_cpu(cpu, cpumask_first(&avail_cpus)); stop_cpus(cpumask_of(cpu), do_isolation_work_cpu_stop, 0); - clear_hmp_request(cpu); calc_load_migrate(rq); update_max_interval(); diff --git a/kernel/sched/rt.c b/kernel/sched/rt.c index 12a04f30ef77..52edd6b158ed 100644 --- a/kernel/sched/rt.c +++ b/kernel/sched/rt.c @@ -1970,11 +1970,11 @@ retry: goto retry; } - deactivate_task(rq, next_task, 0); next_task->on_rq = TASK_ON_RQ_MIGRATING; + deactivate_task(rq, next_task, 0); set_task_cpu(next_task, lowest_rq->cpu); - next_task->on_rq = TASK_ON_RQ_QUEUED; activate_task(lowest_rq, next_task, 0); + next_task->on_rq = TASK_ON_RQ_QUEUED; ret = 1; resched_curr(lowest_rq); @@ -2226,11 +2226,11 @@ static void pull_rt_task(struct rq *this_rq) resched = true; - deactivate_task(src_rq, p, 0); p->on_rq = TASK_ON_RQ_MIGRATING; + deactivate_task(src_rq, p, 0); set_task_cpu(p, this_cpu); - p->on_rq = TASK_ON_RQ_QUEUED; activate_task(this_rq, p, 0); + p->on_rq = TASK_ON_RQ_QUEUED; /* * We continue with the search, just in * case there's an even higher prio task |
