summaryrefslogtreecommitdiff
path: root/kernel/smpboot.c
diff options
context:
space:
mode:
Diffstat (limited to 'kernel/smpboot.c')
-rw-r--r--kernel/smpboot.c38
1 files changed, 38 insertions, 0 deletions
diff --git a/kernel/smpboot.c b/kernel/smpboot.c
index 3a0415803b09..552e154fc77e 100644
--- a/kernel/smpboot.c
+++ b/kernel/smpboot.c
@@ -122,7 +122,45 @@ static int smpboot_thread_fn(void *data)
}
if (kthread_should_park()) {
+ /*
+ * Serialize against wakeup. If we take the lock first,
+ * wakeup is skipped. If we run later, we observe,
+ * TASK_RUNNING update from wakeup path, before moving
+ * forward. This helps avoid the race, where wakeup
+ * observes TASK_INTERRUPTIBLE, and also observes
+ * the TASK_PARKED in kthread_parkme() before updating
+ * task state to TASK_RUNNING. In this case, kthread
+ * gets parked in TASK_RUNNING state. This results
+ * in panic later on in kthread_unpark(), as it sees
+ * KTHREAD_IS_PARKED flag set but fails to rebind the
+ * kthread, due to it being not in TASK_PARKED state.
+ *
+ * Control thread Hotplug Thread
+ *
+ * kthread_park()
+ * set KTHREAD_SHOULD_PARK
+ * smpboot_thread_fn()
+ * set_current_state(
+ * TASK_INTERRUPTIBLE);
+ * kthread_parkme()
+ *
+ * wake_up_process()
+ *
+ * raw_spin_lock_irqsave(&p->pi_lock, flags);
+ * if (!(p->state & state))
+ * goto out;
+ *
+ * __set_current_state(
+ * TASK_PARKED);
+ *
+ * if (p->on_rq && ttwu_remote(p, wake_flags))
+ * ttwu_remote()
+ * p->state = TASK_RUNNING;
+ * schedule();
+ */
+ raw_spin_lock(&current->pi_lock);
__set_current_state(TASK_RUNNING);
+ raw_spin_unlock(&current->pi_lock);
preempt_enable();
if (ht->park && td->status == HP_THREAD_ACTIVE) {
BUG_ON(td->cpu != smp_processor_id());