diff options
| author | Trilok Soni <tsoni@codeaurora.org> | 2016-08-25 19:05:37 -0700 |
|---|---|---|
| committer | Trilok Soni <tsoni@codeaurora.org> | 2016-08-26 14:34:05 -0700 |
| commit | 5ab1e18aa3913d454e1bd1498b20ee581aae2c6b (patch) | |
| tree | 42bd10ef0bf5cdb8deb05656bf802c77dc580ff7 /kernel/workqueue.c | |
| parent | e97b6a0e0217f7c072fdad6c50673cd7a64348e1 (diff) | |
Revert "Merge remote-tracking branch 'msm-4.4/tmp-510d0a3f' into msm-4.4"
This reverts commit 9d6fd2c3e9fcfb ("Merge remote-tracking branch
'msm-4.4/tmp-510d0a3f' into msm-4.4"), because it breaks the
dump parsing tools due to kernel can be loaded anywhere in the memory
now and not fixed at linear mapping.
Change-Id: Id416f0a249d803442847d09ac47781147b0d0ee6
Signed-off-by: Trilok Soni <tsoni@codeaurora.org>
Diffstat (limited to 'kernel/workqueue.c')
| -rw-r--r-- | kernel/workqueue.c | 40 |
1 files changed, 0 insertions, 40 deletions
diff --git a/kernel/workqueue.c b/kernel/workqueue.c index 316b316c7528..ef84d9874d03 100644 --- a/kernel/workqueue.c +++ b/kernel/workqueue.c @@ -652,35 +652,6 @@ static void set_work_pool_and_clear_pending(struct work_struct *work, */ smp_wmb(); set_work_data(work, (unsigned long)pool_id << WORK_OFFQ_POOL_SHIFT, 0); - /* - * The following mb guarantees that previous clear of a PENDING bit - * will not be reordered with any speculative LOADS or STORES from - * work->current_func, which is executed afterwards. This possible - * reordering can lead to a missed execution on attempt to qeueue - * the same @work. E.g. consider this case: - * - * CPU#0 CPU#1 - * ---------------------------- -------------------------------- - * - * 1 STORE event_indicated - * 2 queue_work_on() { - * 3 test_and_set_bit(PENDING) - * 4 } set_..._and_clear_pending() { - * 5 set_work_data() # clear bit - * 6 smp_mb() - * 7 work->current_func() { - * 8 LOAD event_indicated - * } - * - * Without an explicit full barrier speculative LOAD on line 8 can - * be executed before CPU#0 does STORE on line 1. If that happens, - * CPU#0 observes the PENDING bit is still set and new execution of - * a @work is not queued in a hope, that CPU#1 will eventually - * finish the queued @work. Meanwhile CPU#1 does not see - * event_indicated is set, because speculative LOAD was executed - * before actual STORE. - */ - smp_mb(); } static void clear_work_data(struct work_struct *work) @@ -4476,17 +4447,6 @@ static void rebind_workers(struct worker_pool *pool) pool->attrs->cpumask) < 0); spin_lock_irq(&pool->lock); - - /* - * XXX: CPU hotplug notifiers are weird and can call DOWN_FAILED - * w/o preceding DOWN_PREPARE. Work around it. CPU hotplug is - * being reworked and this can go away in time. - */ - if (!(pool->flags & POOL_DISASSOCIATED)) { - spin_unlock_irq(&pool->lock); - return; - } - pool->flags &= ~POOL_DISASSOCIATED; for_each_pool_worker(worker, pool) { |
