diff options
Diffstat (limited to 'kernel')
| -rw-r--r-- | kernel/locking/lockdep.c | 12 | ||||
| -rw-r--r-- | kernel/printk/printk.c | 6 | ||||
| -rw-r--r-- | kernel/smpboot.c | 38 | ||||
| -rw-r--r-- | kernel/trace/trace.c | 5 |
4 files changed, 55 insertions, 6 deletions
diff --git a/kernel/locking/lockdep.c b/kernel/locking/lockdep.c index 0e2c4911ba61..6e171b547a80 100644 --- a/kernel/locking/lockdep.c +++ b/kernel/locking/lockdep.c @@ -1264,11 +1264,11 @@ unsigned long lockdep_count_forward_deps(struct lock_class *class) this.parent = NULL; this.class = class; - local_irq_save(flags); + raw_local_irq_save(flags); arch_spin_lock(&lockdep_lock); ret = __lockdep_count_forward_deps(&this); arch_spin_unlock(&lockdep_lock); - local_irq_restore(flags); + raw_local_irq_restore(flags); return ret; } @@ -1291,11 +1291,11 @@ unsigned long lockdep_count_backward_deps(struct lock_class *class) this.parent = NULL; this.class = class; - local_irq_save(flags); + raw_local_irq_save(flags); arch_spin_lock(&lockdep_lock); ret = __lockdep_count_backward_deps(&this); arch_spin_unlock(&lockdep_lock); - local_irq_restore(flags); + raw_local_irq_restore(flags); return ret; } @@ -4123,7 +4123,7 @@ void debug_check_no_locks_freed(const void *mem_from, unsigned long mem_len) if (unlikely(!debug_locks)) return; - local_irq_save(flags); + raw_local_irq_save(flags); for (i = 0; i < curr->lockdep_depth; i++) { hlock = curr->held_locks + i; @@ -4134,7 +4134,7 @@ void debug_check_no_locks_freed(const void *mem_from, unsigned long mem_len) print_freed_lock_bug(curr, mem_from, mem_from + mem_len, hlock); break; } - local_irq_restore(flags); + raw_local_irq_restore(flags); } EXPORT_SYMBOL_GPL(debug_check_no_locks_freed); diff --git a/kernel/printk/printk.c b/kernel/printk/printk.c index dca87791e9c1..da573aeaeb12 100644 --- a/kernel/printk/printk.c +++ b/kernel/printk/printk.c @@ -2119,6 +2119,8 @@ void resume_console(void) console_unlock(); } +#ifdef CONFIG_CONSOLE_FLUSH_ON_HOTPLUG + /** * console_cpu_notify - print deferred console messages after CPU hotplug * @self: notifier struct @@ -2148,6 +2150,8 @@ static int console_cpu_notify(struct notifier_block *self, return NOTIFY_OK; } +#endif + /** * console_lock - lock the console system for exclusive use. * @@ -2712,7 +2716,9 @@ static int __init printk_late_init(void) unregister_console(con); } } +#ifdef CONFIG_CONSOLE_FLUSH_ON_HOTPLUG hotcpu_notifier(console_cpu_notify, 0); +#endif return 0; } late_initcall(printk_late_init); diff --git a/kernel/smpboot.c b/kernel/smpboot.c index 3a0415803b09..552e154fc77e 100644 --- a/kernel/smpboot.c +++ b/kernel/smpboot.c @@ -122,7 +122,45 @@ static int smpboot_thread_fn(void *data) } if (kthread_should_park()) { + /* + * Serialize against wakeup. If we take the lock first, + * wakeup is skipped. If we run later, we observe, + * TASK_RUNNING update from wakeup path, before moving + * forward. This helps avoid the race, where wakeup + * observes TASK_INTERRUPTIBLE, and also observes + * the TASK_PARKED in kthread_parkme() before updating + * task state to TASK_RUNNING. In this case, kthread + * gets parked in TASK_RUNNING state. This results + * in panic later on in kthread_unpark(), as it sees + * KTHREAD_IS_PARKED flag set but fails to rebind the + * kthread, due to it being not in TASK_PARKED state. + * + * Control thread Hotplug Thread + * + * kthread_park() + * set KTHREAD_SHOULD_PARK + * smpboot_thread_fn() + * set_current_state( + * TASK_INTERRUPTIBLE); + * kthread_parkme() + * + * wake_up_process() + * + * raw_spin_lock_irqsave(&p->pi_lock, flags); + * if (!(p->state & state)) + * goto out; + * + * __set_current_state( + * TASK_PARKED); + * + * if (p->on_rq && ttwu_remote(p, wake_flags)) + * ttwu_remote() + * p->state = TASK_RUNNING; + * schedule(); + */ + raw_spin_lock(¤t->pi_lock); __set_current_state(TASK_RUNNING); + raw_spin_unlock(¤t->pi_lock); preempt_enable(); if (ht->park && td->status == HP_THREAD_ACTIVE) { BUG_ON(td->cpu != smp_processor_id()); diff --git a/kernel/trace/trace.c b/kernel/trace/trace.c index e62c44cb8b74..b30adaae739a 100644 --- a/kernel/trace/trace.c +++ b/kernel/trace/trace.c @@ -2237,6 +2237,7 @@ out: } EXPORT_SYMBOL_GPL(trace_vbprintk); +__printf(3, 0) static int __trace_array_vprintk(struct ring_buffer *buffer, unsigned long ip, const char *fmt, va_list args) @@ -2288,12 +2289,14 @@ __trace_array_vprintk(struct ring_buffer *buffer, return len; } +__printf(3, 0) int trace_array_vprintk(struct trace_array *tr, unsigned long ip, const char *fmt, va_list args) { return __trace_array_vprintk(tr->trace_buffer.buffer, ip, fmt, args); } +__printf(3, 0) int trace_array_printk(struct trace_array *tr, unsigned long ip, const char *fmt, ...) { @@ -2309,6 +2312,7 @@ int trace_array_printk(struct trace_array *tr, return ret; } +__printf(3, 4) int trace_array_printk_buf(struct ring_buffer *buffer, unsigned long ip, const char *fmt, ...) { @@ -2324,6 +2328,7 @@ int trace_array_printk_buf(struct ring_buffer *buffer, return ret; } +__printf(2, 0) int trace_vprintk(unsigned long ip, const char *fmt, va_list args) { return trace_array_vprintk(&global_trace, ip, fmt, args); |
