diff options
author | Vinayak Menon <vinmenon@codeaurora.org> | 2016-01-07 12:29:29 +0530 |
---|---|---|
committer | David Keitel <dkeitel@codeaurora.org> | 2016-03-23 21:16:45 -0700 |
commit | a360ae6982fbf45bc3a64c6ff748a9101408daa1 (patch) | |
tree | b8ca1481cc06ffe8c3f73619252d28428954e3f8 /mm/zcache.c | |
parent | 862365f7936e326f66d0e19be5e50adb48f1b7b7 (diff) |
mm: zcache: fix race between store and evict
The following race is possible:
CPU 1 CPU2
zcache_store_page
zbud_alloc
zcache_evict_zpage
zpool = zhandle->zpool;
CRASH
zcache_store_page
zhandle->zpool = zpool
Fix this by properly initializing the zhandle and validating
in zcache_evict_zpage
Change-Id: I02328220b30f415fa1f171236eab3a2e40072fd9
Signed-off-by: Vinayak Menon <vinmenon@codeaurora.org>
Diffstat (limited to 'mm/zcache.c')
-rw-r--r-- | mm/zcache.c | 26 |
1 files changed, 17 insertions, 9 deletions
diff --git a/mm/zcache.c b/mm/zcache.c index 0151a2c54f66..61ef23abc844 100644 --- a/mm/zcache.c +++ b/mm/zcache.c @@ -77,7 +77,7 @@ static atomic_t zcache_stored_zero_pages = ATOMIC_INIT(0); #define GFP_ZCACHE \ (__GFP_FS | __GFP_NORETRY | __GFP_NOWARN | \ - __GFP_NOMEMALLOC | __GFP_NO_KSWAPD) + __GFP_NOMEMALLOC | __GFP_NO_KSWAPD | __GFP_ZERO) /* * Make sure this is different from radix tree @@ -669,10 +669,6 @@ static void zcache_store_page(int pool_id, struct cleancache_filekey key, } zhandle = (struct zcache_ra_handle *)zbud_map(zpool->pool, zaddr); - zhandle->ra_index = index; - zhandle->rb_index = key.u.ino; - zhandle->zlen = zlen; - zhandle->zpool = zpool; /* Compressed page data stored at the end of zcache_ra_handle */ zpage = (u8 *)(zhandle + 1); @@ -696,6 +692,10 @@ zero: if (zero) { atomic_inc(&zcache_stored_zero_pages); } else { + zhandle->ra_index = index; + zhandle->rb_index = key.u.ino; + zhandle->zlen = zlen; + zhandle->zpool = zpool; atomic_inc(&zcache_stored_pages); zpool->size = zbud_get_pool_size(zpool->pool); } @@ -776,6 +776,7 @@ static void zcache_flush_ratree(struct zcache_pool *zpool, unsigned long index = 0; int count, i; struct zcache_ra_handle *zhandle; + void *zaddr = NULL; do { void *zaddrs[FREE_BATCH]; @@ -787,14 +788,18 @@ static void zcache_flush_ratree(struct zcache_pool *zpool, for (i = 0; i < count; i++) { if (zaddrs[i] == ZERO_HANDLE) { - radix_tree_delete(&rbnode->ratree, indices[i]); - atomic_dec(&zcache_stored_zero_pages); + zaddr = radix_tree_delete(&rbnode->ratree, + indices[i]); + if (zaddr) + atomic_dec(&zcache_stored_zero_pages); continue; } zhandle = (struct zcache_ra_handle *)zbud_map( zpool->pool, (unsigned long)zaddrs[i]); index = zhandle->ra_index; - radix_tree_delete(&rbnode->ratree, index); + zaddr = radix_tree_delete(&rbnode->ratree, index); + if (!zaddr) + continue; zbud_unmap(zpool->pool, (unsigned long)zaddrs[i]); zbud_free(zpool->pool, (unsigned long)zaddrs[i]); atomic_dec(&zcache_stored_pages); @@ -889,7 +894,10 @@ static int zcache_evict_zpage(struct zbud_pool *pool, unsigned long zaddr) zhandle = (struct zcache_ra_handle *)zbud_map(pool, zaddr); zpool = zhandle->zpool; - BUG_ON(!zpool); + /* There can be a race with zcache store */ + if (!zpool) + return -EINVAL; + BUG_ON(pool != zpool->pool); zaddr_intree = zcache_load_delete_zaddr(zpool, zhandle->rb_index, |