diff options
| author | H. Peter Anvin <hpa@linux.intel.com> | 2013-01-29 14:59:09 -0800 |
|---|---|---|
| committer | H. Peter Anvin <hpa@linux.intel.com> | 2013-01-29 15:10:15 -0800 |
| commit | de65d816aa44f9ddd79861ae21d75010cc1fd003 (patch) | |
| tree | 04a637a43b2e52a733d0dcb7595a47057571e7da /kernel/workqueue.c | |
| parent | 9710f581bb4c35589ac046b0cfc0deb7f369fc85 (diff) | |
| parent | 5dcd14ecd41ea2b3ae3295a9b30d98769d52165f (diff) | |
Merge remote-tracking branch 'origin/x86/boot' into x86/mm2
Coming patches to x86/mm2 require the changes and advanced baseline in
x86/boot.
Resolved Conflicts:
arch/x86/kernel/setup.c
mm/nobootmem.c
Signed-off-by: H. Peter Anvin <hpa@linux.intel.com>
Diffstat (limited to 'kernel/workqueue.c')
| -rw-r--r-- | kernel/workqueue.c | 28 |
1 files changed, 20 insertions, 8 deletions
diff --git a/kernel/workqueue.c b/kernel/workqueue.c index 042d221d33cc..fbc6576a83c3 100644 --- a/kernel/workqueue.c +++ b/kernel/workqueue.c @@ -739,8 +739,10 @@ void wq_worker_waking_up(struct task_struct *task, unsigned int cpu) { struct worker *worker = kthread_data(task); - if (!(worker->flags & WORKER_NOT_RUNNING)) + if (!(worker->flags & WORKER_NOT_RUNNING)) { + WARN_ON_ONCE(worker->pool->gcwq->cpu != cpu); atomic_inc(get_pool_nr_running(worker->pool)); + } } /** @@ -1361,8 +1363,19 @@ static void __queue_delayed_work(int cpu, struct workqueue_struct *wq, WARN_ON_ONCE(timer->function != delayed_work_timer_fn || timer->data != (unsigned long)dwork); - BUG_ON(timer_pending(timer)); - BUG_ON(!list_empty(&work->entry)); + WARN_ON_ONCE(timer_pending(timer)); + WARN_ON_ONCE(!list_empty(&work->entry)); + + /* + * If @delay is 0, queue @dwork->work immediately. This is for + * both optimization and correctness. The earliest @timer can + * expire is on the closest next tick and delayed_work users depend + * on that there's no such delay when @delay is 0. + */ + if (!delay) { + __queue_work(cpu, wq, &dwork->work); + return; + } timer_stats_timer_set_start_info(&dwork->timer); @@ -1417,9 +1430,6 @@ bool queue_delayed_work_on(int cpu, struct workqueue_struct *wq, bool ret = false; unsigned long flags; - if (!delay) - return queue_work_on(cpu, wq, &dwork->work); - /* read the comment in __queue_work() */ local_irq_save(flags); @@ -2407,8 +2417,10 @@ static int rescuer_thread(void *__wq) repeat: set_current_state(TASK_INTERRUPTIBLE); - if (kthread_should_stop()) + if (kthread_should_stop()) { + __set_current_state(TASK_RUNNING); return 0; + } /* * See whether any cpu is asking for help. Unbounded @@ -3475,7 +3487,7 @@ unsigned int work_busy(struct work_struct *work) unsigned int ret = 0; if (!gcwq) - return false; + return 0; spin_lock_irqsave(&gcwq->lock, flags); |
