From 630d9c47274aa89bfa77fe6556d7818bdcb12992 Mon Sep 17 00:00:00 2001 From: Paul Gortmaker Date: Wed, 16 Nov 2011 23:57:37 -0500 Subject: fs: reduce the use of module.h wherever possible For files only using THIS_MODULE and/or EXPORT_SYMBOL, map them onto including export.h -- or if the file isn't even using those, then just delete the include. Fix up any implicit include dependencies that were being masked by module.h along the way. Signed-off-by: Paul Gortmaker --- fs/aio.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'fs/aio.c') diff --git a/fs/aio.c b/fs/aio.c index 969beb0e2231..4b5e06390db0 100644 --- a/fs/aio.c +++ b/fs/aio.c @@ -13,7 +13,7 @@ #include #include #include -#include +#include #include #include #include -- cgit v1.2.3 From 880641bb9da2473e9ecf6c708d993b29928c1b3c Mon Sep 17 00:00:00 2001 From: Jeff Moyer Date: Mon, 5 Mar 2012 14:59:12 -0800 Subject: aio: wake up waiters when freeing unused kiocbs Bart Van Assche reported a hung fio process when either hot-removing storage or when interrupting the fio process itself. The (pruned) call trace for the latter looks like so: fio D 0000000000000001 0 6849 6848 0x00000004 ffff880092541b88 0000000000000046 ffff880000000000 ffff88012fa11dc0 ffff88012404be70 ffff880092541fd8 ffff880092541fd8 ffff880092541fd8 ffff880128b894d0 ffff88012404be70 ffff880092541b88 000000018106f24d Call Trace: schedule+0x3f/0x60 io_schedule+0x8f/0xd0 wait_for_all_aios+0xc0/0x100 exit_aio+0x55/0xc0 mmput+0x2d/0x110 exit_mm+0x10d/0x130 do_exit+0x671/0x860 do_group_exit+0x44/0xb0 get_signal_to_deliver+0x218/0x5a0 do_signal+0x65/0x700 do_notify_resume+0x65/0x80 int_signal+0x12/0x17 The problem lies with the allocation batching code. It will opportunistically allocate kiocbs, and then trim back the list of iocbs when there is not enough room in the completion ring to hold all of the events. In the case above, what happens is that the pruning back of events ends up freeing up the last active request and the context is marked as dead, so it is thus responsible for waking up waiters. Unfortunately, the code does not check for this condition, so we end up with a hung task. Signed-off-by: Jeff Moyer Reported-by: Bart Van Assche Tested-by: Bart Van Assche Cc: [3.2.x only] Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- fs/aio.c | 2 ++ 1 file changed, 2 insertions(+) (limited to 'fs/aio.c') diff --git a/fs/aio.c b/fs/aio.c index 969beb0e2231..67e4b9047cc9 100644 --- a/fs/aio.c +++ b/fs/aio.c @@ -490,6 +490,8 @@ static void kiocb_batch_free(struct kioctx *ctx, struct kiocb_batch *batch) kmem_cache_free(kiocb_cachep, req); ctx->reqs_active--; } + if (unlikely(!ctx->reqs_active && ctx->dead)) + wake_up_all(&ctx->wait); spin_unlock_irq(&ctx->ctx_lock); } -- cgit v1.2.3 From 86b62a2cb4fc09037bbce2959d2992962396fd7f Mon Sep 17 00:00:00 2001 From: Al Viro Date: Wed, 7 Mar 2012 05:16:35 +0000 Subject: aio: fix io_setup/io_destroy race Have ioctx_alloc() return an extra reference, so that caller would drop it on success and not bother with re-grabbing it on failure exit. The current code is obviously broken - io_destroy() from another thread that managed to guess the address io_setup() would've returned would free ioctx right under us; gets especially interesting if aio_context_t * we pass to io_setup() points to PROT_READ mapping, so put_user() fails and we end up doing io_destroy() on kioctx another thread has just got freed... Signed-off-by: Al Viro Acked-by: Benjamin LaHaise Reviewed-by: Jeff Moyer Cc: stable@vger.kernel.org Signed-off-by: Linus Torvalds --- fs/aio.c | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) (limited to 'fs/aio.c') diff --git a/fs/aio.c b/fs/aio.c index 67e4b9047cc9..f6578cb22d00 100644 --- a/fs/aio.c +++ b/fs/aio.c @@ -273,7 +273,7 @@ static struct kioctx *ioctx_alloc(unsigned nr_events) mm = ctx->mm = current->mm; atomic_inc(&mm->mm_count); - atomic_set(&ctx->users, 1); + atomic_set(&ctx->users, 2); spin_lock_init(&ctx->ctx_lock); spin_lock_init(&ctx->ring_info.ring_lock); init_waitqueue_head(&ctx->wait); @@ -1338,10 +1338,10 @@ SYSCALL_DEFINE2(io_setup, unsigned, nr_events, aio_context_t __user *, ctxp) ret = PTR_ERR(ioctx); if (!IS_ERR(ioctx)) { ret = put_user(ioctx->user_id, ctxp); - if (!ret) + if (!ret) { + put_ioctx(ioctx); return 0; - - get_ioctx(ioctx); /* io_destroy() expects us to hold a ref */ + } io_destroy(ioctx); } -- cgit v1.2.3 From c7b285550544c22bc005ec20978472c9ac7138c6 Mon Sep 17 00:00:00 2001 From: Al Viro Date: Thu, 8 Mar 2012 17:51:19 +0000 Subject: aio: fix the "too late munmap()" race Current code has put_ioctx() called asynchronously from aio_fput_routine(); that's done *after* we have killed the request that used to pin ioctx, so there's nothing to stop io_destroy() waiting in wait_for_all_aios() from progressing. As the result, we can end up with async call of put_ioctx() being the last one and possibly happening during exit_mmap() or elf_core_dump(), neither of which expects stray munmap() being done to them... We do need to prevent _freeing_ ioctx until aio_fput_routine() is done with that, but that's all we care about - neither io_destroy() nor exit_aio() will progress past wait_for_all_aios() until aio_fput_routine() does really_put_req(), so the ioctx teardown won't be done until then and we don't care about the contents of ioctx past that point. Since actual freeing of these suckers is RCU-delayed, we don't need to bump ioctx refcount when request goes into list for async removal. All we need is rcu_read_lock held just over the ->ctx_lock-protected area in aio_fput_routine(). Signed-off-by: Al Viro Reviewed-by: Jeff Moyer Acked-by: Benjamin LaHaise Cc: stable@vger.kernel.org Signed-off-by: Linus Torvalds --- fs/aio.c | 14 ++++++-------- 1 file changed, 6 insertions(+), 8 deletions(-) (limited to 'fs/aio.c') diff --git a/fs/aio.c b/fs/aio.c index f6578cb22d00..b9d64d89a043 100644 --- a/fs/aio.c +++ b/fs/aio.c @@ -228,12 +228,6 @@ static void __put_ioctx(struct kioctx *ctx) call_rcu(&ctx->rcu_head, ctx_rcu_free); } -static inline void get_ioctx(struct kioctx *kioctx) -{ - BUG_ON(atomic_read(&kioctx->users) <= 0); - atomic_inc(&kioctx->users); -} - static inline int try_get_ioctx(struct kioctx *kioctx) { return atomic_inc_not_zero(&kioctx->users); @@ -609,11 +603,16 @@ static void aio_fput_routine(struct work_struct *data) fput(req->ki_filp); /* Link the iocb into the context's free list */ + rcu_read_lock(); spin_lock_irq(&ctx->ctx_lock); really_put_req(ctx, req); + /* + * at that point ctx might've been killed, but actual + * freeing is RCU'd + */ spin_unlock_irq(&ctx->ctx_lock); + rcu_read_unlock(); - put_ioctx(ctx); spin_lock_irq(&fput_lock); } spin_unlock_irq(&fput_lock); @@ -644,7 +643,6 @@ static int __aio_put_req(struct kioctx *ctx, struct kiocb *req) * this function will be executed w/out any aio kthread wakeup. */ if (unlikely(!fput_atomic(req->ki_filp))) { - get_ioctx(ctx); spin_lock(&fput_lock); list_add(&req->ki_list, &fput_head); spin_unlock(&fput_lock); -- cgit v1.2.3 From e8e3c3d66fd9d1ee2250f68d778cc48c1346d228 Mon Sep 17 00:00:00 2001 From: Cong Wang Date: Fri, 25 Nov 2011 23:14:27 +0800 Subject: fs: remove the second argument of k[un]map_atomic() Acked-by: Benjamin LaHaise Signed-off-by: Cong Wang --- fs/aio.c | 30 +++++++++++++++--------------- 1 file changed, 15 insertions(+), 15 deletions(-) (limited to 'fs/aio.c') diff --git a/fs/aio.c b/fs/aio.c index b9d64d89a043..5b600cb8779e 100644 --- a/fs/aio.c +++ b/fs/aio.c @@ -160,7 +160,7 @@ static int aio_setup_ring(struct kioctx *ctx) info->nr = nr_events; /* trusted copy */ - ring = kmap_atomic(info->ring_pages[0], KM_USER0); + ring = kmap_atomic(info->ring_pages[0]); ring->nr = nr_events; /* user copy */ ring->id = ctx->user_id; ring->head = ring->tail = 0; @@ -168,32 +168,32 @@ static int aio_setup_ring(struct kioctx *ctx) ring->compat_features = AIO_RING_COMPAT_FEATURES; ring->incompat_features = AIO_RING_INCOMPAT_FEATURES; ring->header_length = sizeof(struct aio_ring); - kunmap_atomic(ring, KM_USER0); + kunmap_atomic(ring); return 0; } /* aio_ring_event: returns a pointer to the event at the given index from - * kmap_atomic(, km). Release the pointer with put_aio_ring_event(); + * kmap_atomic(). Release the pointer with put_aio_ring_event(); */ #define AIO_EVENTS_PER_PAGE (PAGE_SIZE / sizeof(struct io_event)) #define AIO_EVENTS_FIRST_PAGE ((PAGE_SIZE - sizeof(struct aio_ring)) / sizeof(struct io_event)) #define AIO_EVENTS_OFFSET (AIO_EVENTS_PER_PAGE - AIO_EVENTS_FIRST_PAGE) -#define aio_ring_event(info, nr, km) ({ \ +#define aio_ring_event(info, nr) ({ \ unsigned pos = (nr) + AIO_EVENTS_OFFSET; \ struct io_event *__event; \ __event = kmap_atomic( \ - (info)->ring_pages[pos / AIO_EVENTS_PER_PAGE], km); \ + (info)->ring_pages[pos / AIO_EVENTS_PER_PAGE]); \ __event += pos % AIO_EVENTS_PER_PAGE; \ __event; \ }) -#define put_aio_ring_event(event, km) do { \ +#define put_aio_ring_event(event) do { \ struct io_event *__event = (event); \ (void)__event; \ - kunmap_atomic((void *)((unsigned long)__event & PAGE_MASK), km); \ + kunmap_atomic((void *)((unsigned long)__event & PAGE_MASK)); \ } while(0) static void ctx_rcu_free(struct rcu_head *head) @@ -1019,10 +1019,10 @@ int aio_complete(struct kiocb *iocb, long res, long res2) if (kiocbIsCancelled(iocb)) goto put_rq; - ring = kmap_atomic(info->ring_pages[0], KM_IRQ1); + ring = kmap_atomic(info->ring_pages[0]); tail = info->tail; - event = aio_ring_event(info, tail, KM_IRQ0); + event = aio_ring_event(info, tail); if (++tail >= info->nr) tail = 0; @@ -1043,8 +1043,8 @@ int aio_complete(struct kiocb *iocb, long res, long res2) info->tail = tail; ring->tail = tail; - put_aio_ring_event(event, KM_IRQ0); - kunmap_atomic(ring, KM_IRQ1); + put_aio_ring_event(event); + kunmap_atomic(ring); pr_debug("added to ring %p at [%lu]\n", iocb, tail); @@ -1089,7 +1089,7 @@ static int aio_read_evt(struct kioctx *ioctx, struct io_event *ent) unsigned long head; int ret = 0; - ring = kmap_atomic(info->ring_pages[0], KM_USER0); + ring = kmap_atomic(info->ring_pages[0]); dprintk("in aio_read_evt h%lu t%lu m%lu\n", (unsigned long)ring->head, (unsigned long)ring->tail, (unsigned long)ring->nr); @@ -1101,18 +1101,18 @@ static int aio_read_evt(struct kioctx *ioctx, struct io_event *ent) head = ring->head % info->nr; if (head != ring->tail) { - struct io_event *evp = aio_ring_event(info, head, KM_USER1); + struct io_event *evp = aio_ring_event(info, head); *ent = *evp; head = (head + 1) % info->nr; smp_mb(); /* finish reading the event before updatng the head */ ring->head = head; ret = 1; - put_aio_ring_event(evp, KM_USER1); + put_aio_ring_event(evp); } spin_unlock(&info->ring_lock); out: - kunmap_atomic(ring, KM_USER0); + kunmap_atomic(ring); dprintk("leaving aio_read_evt: %d h%lu t%lu\n", ret, (unsigned long)ring->head, (unsigned long)ring->tail); return ret; -- cgit v1.2.3 From e23754f880f10124f0a2848f9d17e361a295378e Mon Sep 17 00:00:00 2001 From: Al Viro Date: Tue, 6 Mar 2012 14:33:22 -0500 Subject: aio: don't bother with async freeing on failure in ioctx_alloc() Signed-off-by: Al Viro --- fs/aio.c | 12 +++++------- 1 file changed, 5 insertions(+), 7 deletions(-) (limited to 'fs/aio.c') diff --git a/fs/aio.c b/fs/aio.c index b9d64d89a043..d09b56090aa5 100644 --- a/fs/aio.c +++ b/fs/aio.c @@ -248,6 +248,7 @@ static struct kioctx *ioctx_alloc(unsigned nr_events) struct mm_struct *mm; struct kioctx *ctx; int did_sync = 0; + int err = -ENOMEM; /* Prevent overflows */ if ((nr_events > (0x10000000U / sizeof(struct io_event))) || @@ -310,16 +311,13 @@ static struct kioctx *ioctx_alloc(unsigned nr_events) return ctx; out_cleanup: - __put_ioctx(ctx); - return ERR_PTR(-EAGAIN); - + err = -EAGAIN; + aio_free_ring(ctx); out_freectx: mmdrop(mm); kmem_cache_free(kioctx_cachep, ctx); - ctx = ERR_PTR(-ENOMEM); - - dprintk("aio: error allocating ioctx %p\n", ctx); - return ctx; + dprintk("aio: error allocating ioctx %d\n", err); + return ERR_PTR(err); } /* aio_cancel_all -- cgit v1.2.3 From 2dd542b7aeb1c222273cf0593a718d9b44998d9f Mon Sep 17 00:00:00 2001 From: Al Viro Date: Sat, 10 Mar 2012 23:10:35 -0500 Subject: aio: aio_nr decrements don't need to be delayed we can do that right in __put_ioctx(); as the result, the loop in ioctx_alloc() can be killed. Signed-off-by: Al Viro --- fs/aio.c | 42 ++++++++++++++---------------------------- 1 file changed, 14 insertions(+), 28 deletions(-) (limited to 'fs/aio.c') diff --git a/fs/aio.c b/fs/aio.c index d09b56090aa5..216eb37b2c76 100644 --- a/fs/aio.c +++ b/fs/aio.c @@ -199,16 +199,7 @@ static int aio_setup_ring(struct kioctx *ctx) static void ctx_rcu_free(struct rcu_head *head) { struct kioctx *ctx = container_of(head, struct kioctx, rcu_head); - unsigned nr_events = ctx->max_reqs; - kmem_cache_free(kioctx_cachep, ctx); - - if (nr_events) { - spin_lock(&aio_nr_lock); - BUG_ON(aio_nr - nr_events > aio_nr); - aio_nr -= nr_events; - spin_unlock(&aio_nr_lock); - } } /* __put_ioctx @@ -217,6 +208,7 @@ static void ctx_rcu_free(struct rcu_head *head) */ static void __put_ioctx(struct kioctx *ctx) { + unsigned nr_events = ctx->max_reqs; BUG_ON(ctx->reqs_active); cancel_delayed_work(&ctx->wq); @@ -224,6 +216,12 @@ static void __put_ioctx(struct kioctx *ctx) aio_free_ring(ctx); mmdrop(ctx->mm); ctx->mm = NULL; + if (nr_events) { + spin_lock(&aio_nr_lock); + BUG_ON(aio_nr - nr_events > aio_nr); + aio_nr -= nr_events; + spin_unlock(&aio_nr_lock); + } pr_debug("__put_ioctx: freeing %p\n", ctx); call_rcu(&ctx->rcu_head, ctx_rcu_free); } @@ -247,7 +245,6 @@ static struct kioctx *ioctx_alloc(unsigned nr_events) { struct mm_struct *mm; struct kioctx *ctx; - int did_sync = 0; int err = -ENOMEM; /* Prevent overflows */ @@ -257,7 +254,7 @@ static struct kioctx *ioctx_alloc(unsigned nr_events) return ERR_PTR(-EINVAL); } - if ((unsigned long)nr_events > aio_max_nr) + if (!nr_events || (unsigned long)nr_events > aio_max_nr) return ERR_PTR(-EAGAIN); ctx = kmem_cache_zalloc(kioctx_cachep, GFP_KERNEL); @@ -281,25 +278,14 @@ static struct kioctx *ioctx_alloc(unsigned nr_events) goto out_freectx; /* limit the number of system wide aios */ - do { - spin_lock_bh(&aio_nr_lock); - if (aio_nr + nr_events > aio_max_nr || - aio_nr + nr_events < aio_nr) - ctx->max_reqs = 0; - else - aio_nr += ctx->max_reqs; + spin_lock_bh(&aio_nr_lock); + if (aio_nr + nr_events > aio_max_nr || + aio_nr + nr_events < aio_nr) { spin_unlock_bh(&aio_nr_lock); - if (ctx->max_reqs || did_sync) - break; - - /* wait for rcu callbacks to have completed before giving up */ - synchronize_rcu(); - did_sync = 1; - ctx->max_reqs = nr_events; - } while (1); - - if (ctx->max_reqs == 0) goto out_cleanup; + } + aio_nr += ctx->max_reqs; + spin_unlock_bh(&aio_nr_lock); /* now link into global list. */ spin_lock(&mm->ioctx_lock); -- cgit v1.2.3 From 9fa1cb397fa052fc9acfaf5a9f2faff31e10f6b7 Mon Sep 17 00:00:00 2001 From: Al Viro Date: Sat, 10 Mar 2012 23:14:05 -0500 Subject: aio: aio_nr_lock is taken only synchronously now Signed-off-by: Al Viro --- fs/aio.c | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) (limited to 'fs/aio.c') diff --git a/fs/aio.c b/fs/aio.c index 216eb37b2c76..9c3de88e2ead 100644 --- a/fs/aio.c +++ b/fs/aio.c @@ -278,14 +278,14 @@ static struct kioctx *ioctx_alloc(unsigned nr_events) goto out_freectx; /* limit the number of system wide aios */ - spin_lock_bh(&aio_nr_lock); + spin_lock(&aio_nr_lock); if (aio_nr + nr_events > aio_max_nr || aio_nr + nr_events < aio_nr) { - spin_unlock_bh(&aio_nr_lock); + spin_unlock(&aio_nr_lock); goto out_cleanup; } aio_nr += ctx->max_reqs; - spin_unlock_bh(&aio_nr_lock); + spin_unlock(&aio_nr_lock); /* now link into global list. */ spin_lock(&mm->ioctx_lock); -- cgit v1.2.3 From bf50722a3c4a83aae651dc20b708308a4f119eb9 Mon Sep 17 00:00:00 2001 From: Al Viro Date: Sun, 11 Mar 2012 00:58:40 -0500 Subject: aio: use cancel_delayed_work_sync() Signed-off-by: Al Viro --- fs/aio.c | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) (limited to 'fs/aio.c') diff --git a/fs/aio.c b/fs/aio.c index 9c3de88e2ead..a92d7547b6f6 100644 --- a/fs/aio.c +++ b/fs/aio.c @@ -211,8 +211,7 @@ static void __put_ioctx(struct kioctx *ctx) unsigned nr_events = ctx->max_reqs; BUG_ON(ctx->reqs_active); - cancel_delayed_work(&ctx->wq); - cancel_work_sync(&ctx->wq.work); + cancel_delayed_work_sync(&ctx->wq); aio_free_ring(ctx); mmdrop(ctx->mm); ctx->mm = NULL; -- cgit v1.2.3 From cd1ea261ac128479833b9f518bf788ee47ada2de Mon Sep 17 00:00:00 2001 From: Al Viro Date: Sun, 11 Mar 2012 00:59:07 -0500 Subject: aio: don't bother with cancel_delayed_work() in exit_aio() __put_ioctx() will cover it anyway. Signed-off-by: Al Viro --- fs/aio.c | 4 ---- 1 file changed, 4 deletions(-) (limited to 'fs/aio.c') diff --git a/fs/aio.c b/fs/aio.c index a92d7547b6f6..3174cd969b01 100644 --- a/fs/aio.c +++ b/fs/aio.c @@ -390,10 +390,6 @@ void exit_aio(struct mm_struct *mm) aio_cancel_all(ctx); wait_for_all_aios(ctx); - /* - * Ensure we don't leave the ctx on the aio_wq - */ - cancel_work_sync(&ctx->wq.work); if (1 != atomic_read(&ctx->users)) printk(KERN_DEBUG -- cgit v1.2.3 From 9fcf03d0d6e845ed495fc8b1ec328b473ff298b3 Mon Sep 17 00:00:00 2001 From: Al Viro Date: Tue, 13 Mar 2012 22:06:28 -0400 Subject: aio: fix the comment in aio_kick_handler() It should've been changed when queue_work() became queue_delayed_work(..., 0) in there. It's always had been about not needing a delay, not about not using specific function... Signed-off-by: Al Viro --- fs/aio.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'fs/aio.c') diff --git a/fs/aio.c b/fs/aio.c index 3174cd969b01..59b431a2ae0c 100644 --- a/fs/aio.c +++ b/fs/aio.c @@ -899,7 +899,7 @@ static void aio_kick_handler(struct work_struct *work) unuse_mm(mm); set_fs(oldfs); /* - * we're in a worker thread already, don't use queue_delayed_work, + * we're in a worker thread already; no point using non-zero delay */ if (requeue) queue_delayed_work(aio_wq, &ctx->wq, 0); -- cgit v1.2.3 From 06af121eab543e5554f7a29538f171a382aaf855 Mon Sep 17 00:00:00 2001 From: Al Viro Date: Tue, 20 Mar 2012 16:26:24 -0400 Subject: aio: merge aio_cancel_all() with wait_for_all_aios() Signed-off-by: Al Viro --- fs/aio.c | 22 +++++++--------------- 1 file changed, 7 insertions(+), 15 deletions(-) (limited to 'fs/aio.c') diff --git a/fs/aio.c b/fs/aio.c index 4f71627264fd..fe37a94127e7 100644 --- a/fs/aio.c +++ b/fs/aio.c @@ -305,15 +305,18 @@ out_freectx: return ERR_PTR(err); } -/* aio_cancel_all +/* kill_ctx * Cancels all outstanding aio requests on an aio context. Used * when the processes owning a context have all exited to encourage * the rapid destruction of the kioctx. */ -static void aio_cancel_all(struct kioctx *ctx) +static void kill_ctx(struct kioctx *ctx) { int (*cancel)(struct kiocb *, struct io_event *); + struct task_struct *tsk = current; + DECLARE_WAITQUEUE(wait, tsk); struct io_event res; + spin_lock_irq(&ctx->ctx_lock); ctx->dead = 1; while (!list_empty(&ctx->active_reqs)) { @@ -329,15 +332,7 @@ static void aio_cancel_all(struct kioctx *ctx) spin_lock_irq(&ctx->ctx_lock); } } - spin_unlock_irq(&ctx->ctx_lock); -} -static void wait_for_all_aios(struct kioctx *ctx) -{ - struct task_struct *tsk = current; - DECLARE_WAITQUEUE(wait, tsk); - - spin_lock_irq(&ctx->ctx_lock); if (!ctx->reqs_active) goto out; @@ -387,9 +382,7 @@ void exit_aio(struct mm_struct *mm) ctx = hlist_entry(mm->ioctx_list.first, struct kioctx, list); hlist_del_rcu(&ctx->list); - aio_cancel_all(ctx); - - wait_for_all_aios(ctx); + kill_ctx(ctx); if (1 != atomic_read(&ctx->users)) printk(KERN_DEBUG @@ -1269,8 +1262,7 @@ static void io_destroy(struct kioctx *ioctx) if (likely(!was_dead)) put_ioctx(ioctx); /* twice for the list */ - aio_cancel_all(ioctx); - wait_for_all_aios(ioctx); + kill_ctx(ioctx); /* * Wake up any waiters. The setting of ctx->dead must be seen -- cgit v1.2.3 From a2e1859adb3b05fa99f87a67df9ef2a4b7b04a13 Mon Sep 17 00:00:00 2001 From: Al Viro Date: Tue, 20 Mar 2012 16:27:57 -0400 Subject: aio: take final put_ioctx() into callers of io_destroy() Signed-off-by: Al Viro --- fs/aio.c | 10 ++++------ 1 file changed, 4 insertions(+), 6 deletions(-) (limited to 'fs/aio.c') diff --git a/fs/aio.c b/fs/aio.c index fe37a94127e7..da887604dfc5 100644 --- a/fs/aio.c +++ b/fs/aio.c @@ -1270,7 +1270,6 @@ static void io_destroy(struct kioctx *ioctx) * locking done by the above calls to ensure this consistency. */ wake_up_all(&ioctx->wait); - put_ioctx(ioctx); /* once for the lookup */ } /* sys_io_setup: @@ -1307,11 +1306,9 @@ SYSCALL_DEFINE2(io_setup, unsigned, nr_events, aio_context_t __user *, ctxp) ret = PTR_ERR(ioctx); if (!IS_ERR(ioctx)) { ret = put_user(ioctx->user_id, ctxp); - if (!ret) { - put_ioctx(ioctx); - return 0; - } - io_destroy(ioctx); + if (ret) + io_destroy(ioctx); + put_ioctx(ioctx); } out: @@ -1329,6 +1326,7 @@ SYSCALL_DEFINE1(io_destroy, aio_context_t, ctx) struct kioctx *ioctx = lookup_ioctx(ctx); if (likely(NULL != ioctx)) { io_destroy(ioctx); + put_ioctx(ioctx); return 0; } pr_debug("EINVAL: io_destroy: invalid context id\n"); -- cgit v1.2.3