diff options
| author | Olof Johansson <olof@lixom.net> | 2013-04-02 18:33:58 -0700 |
|---|---|---|
| committer | Olof Johansson <olof@lixom.net> | 2013-04-02 18:33:58 -0700 |
| commit | 06b851e58b73a268f8092c2d990f697b2e7c53bd (patch) | |
| tree | a44e10462a39202b60636f8b102376a9c69169c1 /kernel/workqueue.c | |
| parent | 5f03dc2002f5dc85ce87e69caff7f28f17f5c9b2 (diff) | |
| parent | 17e7979f83c1de305fa81efb0aa7a3a56bd8121c (diff) | |
Merge branch 'lpc32xx/defconfig' of git://git.antcom.de/linux-2.6 into next/soc
* 'lpc32xx/defconfig' of git://git.antcom.de/linux-2.6: (604 commits)
ARM: LPC32xx: defconfig update: Cleanup (EXPERIMENTAL)
ARM: LPC32xx: defconfig update: Remove the museum NAND option
ARM: LPC32xx: defconfig update: Default drivers and cleanup
ARM: LPC32xx: defconfig update: gpio and keys
+ Linux 3.9-rc4
Signed-off-by: Olof Johansson <olof@lixom.net>
Conflicts:
arch/arm/Kconfig
Diffstat (limited to 'kernel/workqueue.c')
| -rw-r--r-- | kernel/workqueue.c | 51 |
1 files changed, 29 insertions, 22 deletions
diff --git a/kernel/workqueue.c b/kernel/workqueue.c index 81f2457811eb..b48cd597145d 100644 --- a/kernel/workqueue.c +++ b/kernel/workqueue.c @@ -457,11 +457,12 @@ static int worker_pool_assign_id(struct worker_pool *pool) int ret; mutex_lock(&worker_pool_idr_mutex); - idr_pre_get(&worker_pool_idr, GFP_KERNEL); - ret = idr_get_new(&worker_pool_idr, pool, &pool->id); + ret = idr_alloc(&worker_pool_idr, pool, 0, 0, GFP_KERNEL); + if (ret >= 0) + pool->id = ret; mutex_unlock(&worker_pool_idr_mutex); - return ret; + return ret < 0 ? ret : 0; } /* @@ -3446,28 +3447,34 @@ static void wq_unbind_fn(struct work_struct *work) spin_unlock_irq(&pool->lock); mutex_unlock(&pool->assoc_mutex); - } - /* - * Call schedule() so that we cross rq->lock and thus can guarantee - * sched callbacks see the %WORKER_UNBOUND flag. This is necessary - * as scheduler callbacks may be invoked from other cpus. - */ - schedule(); + /* + * Call schedule() so that we cross rq->lock and thus can + * guarantee sched callbacks see the %WORKER_UNBOUND flag. + * This is necessary as scheduler callbacks may be invoked + * from other cpus. + */ + schedule(); - /* - * Sched callbacks are disabled now. Zap nr_running. After this, - * nr_running stays zero and need_more_worker() and keep_working() - * are always true as long as the worklist is not empty. Pools on - * @cpu now behave as unbound (in terms of concurrency management) - * pools which are served by workers tied to the CPU. - * - * On return from this function, the current worker would trigger - * unbound chain execution of pending work items if other workers - * didn't already. - */ - for_each_std_worker_pool(pool, cpu) + /* + * Sched callbacks are disabled now. Zap nr_running. + * After this, nr_running stays zero and need_more_worker() + * and keep_working() are always true as long as the + * worklist is not empty. This pool now behaves as an + * unbound (in terms of concurrency management) pool which + * are served by workers tied to the pool. + */ atomic_set(&pool->nr_running, 0); + + /* + * With concurrency management just turned off, a busy + * worker blocking could lead to lengthy stalls. Kick off + * unbound chain execution of currently pending work items. + */ + spin_lock_irq(&pool->lock); + wake_up_worker(pool); + spin_unlock_irq(&pool->lock); + } } /* |
