summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorPeter Zijlstra <peterz@infradead.org>2017-07-25 18:58:21 +0200
committerGeorg Veichtlbauer <georg@vware.at>2023-07-16 12:47:43 +0200
commit0ffdb886996b66e1eb9bbda6e9605bf7dbca0cd1 (patch)
treecc01ef5dc9b9aec12674b3d519362bc4667cdc34
parentc9999f04236e640bc82b2085cadca8487184bc76 (diff)
BACKPORT: sched/core: Fix rules for running on online && !active CPUs
As already enforced by the WARN() in __set_cpus_allowed_ptr(), the rules for running on an online && !active CPU are stricter than just being a kthread, you need to be a per-cpu kthread. If you're not strictly per-CPU, you have better CPUs to run on and don't need the partially booted one to get your work done. The exception is to allow smpboot threads to bootstrap the CPU itself and get kernel 'services' initialized before we allow userspace on it. Change-Id: I515e873a6e5be0cde7771ecedf56101614300fe2 Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org> Cc: Linus Torvalds <torvalds@linux-foundation.org> Cc: Paul E. McKenney <paulmck@linux.vnet.ibm.com> Cc: Peter Zijlstra <peterz@infradead.org> Cc: Steven Rostedt <rostedt@goodmis.org> Cc: Tejun Heo <tj@kernel.org> Cc: Thomas Gleixner <tglx@linutronix.de> Fixes: 955dbdf4ce87 ("sched: Allow migrating kthreads into online but inactive CPUs") Link: http://lkml.kernel.org/r/20170725165821.cejhb7v2s3kecems@hirez.programming.kicks-ass.net Signed-off-by: Ingo Molnar <mingo@kernel.org> Backported to 4.4 Signed-off-by: joshuous <joshuous@gmail.com>
-rw-r--r--kernel/sched/core.c41
1 files changed, 29 insertions, 12 deletions
diff --git a/kernel/sched/core.c b/kernel/sched/core.c
index e83959592d6c..f73831486c2f 100644
--- a/kernel/sched/core.c
+++ b/kernel/sched/core.c
@@ -1085,6 +1085,33 @@ void check_preempt_curr(struct rq *rq, struct task_struct *p, int flags)
}
#ifdef CONFIG_SMP
+
+static inline bool is_per_cpu_kthread(struct task_struct *p)
+{
+ if (!(p->flags & PF_KTHREAD))
+ return false;
+
+ if (p->nr_cpus_allowed != 1)
+ return false;
+
+ return true;
+}
+
+/*
+ * Per-CPU kthreads are allowed to run on !actie && online CPUs, see
+ * __set_cpus_allowed_ptr() and select_fallback_rq().
+ */
+static inline bool is_cpu_allowed(struct task_struct *p, int cpu)
+{
+ if (!cpumask_test_cpu(cpu, &p->cpus_allowed))
+ return false;
+
+ if (is_per_cpu_kthread(p))
+ return cpu_online(cpu);
+
+ return cpu_active(cpu);
+}
+
/*
* This is how migration works:
*
@@ -1144,16 +1171,8 @@ static struct rq *__migrate_task(struct rq *rq, struct task_struct *p, int dest_
{
int src_cpu;
- if (p->flags & PF_KTHREAD) {
- if (unlikely(!cpu_online(dest_cpu)))
- return ret;
- } else {
- if (unlikely(!cpu_active(dest_cpu)))
- return ret;
- }
-
/* Affinity changed (again). */
- if (!cpumask_test_cpu(dest_cpu, tsk_cpus_allowed(p)))
+ if (!is_cpu_allowed(p, dest_cpu))
return rq;
src_cpu = cpu_of(rq);
@@ -1654,9 +1673,7 @@ static int select_fallback_rq(int cpu, struct task_struct *p, bool allow_iso)
for (;;) {
/* Any allowed, online CPU? */
for_each_cpu(dest_cpu, tsk_cpus_allowed(p)) {
- if (!(p->flags & PF_KTHREAD) && !cpu_active(dest_cpu))
- continue;
- if (!cpu_online(dest_cpu))
+ if (!is_cpu_allowed(p, dest_cpu))
continue;
if (cpu_isolated(dest_cpu)) {
if (allow_iso)