diff options
| author | Sahitya Tummala <stummala@codeaurora.org> | 2017-04-19 11:50:03 +0530 |
|---|---|---|
| committer | Sahitya Tummala <stummala@codeaurora.org> | 2017-05-04 09:41:48 +0530 |
| commit | d07d314e7d1d451afcc428ca074b9e03d38bf302 (patch) | |
| tree | 16ff023c3d11e7f6326f0c8073a828ba3d709408 | |
| parent | 50a43711dc69c2d102504145a1a55acd47504aea (diff) | |
fs/mbcache: fix use after free issue in mb_cache_shrink_scan()
Fix the below potential race between these two contexts -
mb_cache_entry_get() and mb_cache_shrink_scan(), which results
into use after free issue.
task a:
mb_cache_shrink_scan()
|--if(!list_empty(&mb_cache_lru_list))
|--get the ce entry
|--list_del_init(&ce->e_lru_list);
|--check ce->e_used, ce->e_queued,
ce->e_refcnt and continue
-> gets prempted here
task b:
ext4_xattr_release_block()
|--mb_cache_entry_get()
|--get ce from hlist_bl_for_each_entry()
|--increment ce->e_used and
list_del_init(&ce->e_lru_list)
|--mb_cache_entry_free()
|--hlist_bl_lock(ce->e_block_hash_p);
-> results into use after free
Also, fix similar potential race between mb_cache_entry_alloc() and
mb_cache_entry_get() in case if cache->c_max_entries is reached.
Change-Id: I01049bae5d914cfb8494ab299ec2e068745d1110
Signed-off-by: Sahitya Tummala <stummala@codeaurora.org>
| -rw-r--r-- | fs/mbcache.c | 18 |
1 files changed, 8 insertions, 10 deletions
diff --git a/fs/mbcache.c b/fs/mbcache.c index 187477ded6b3..ab1da987d1ae 100644 --- a/fs/mbcache.c +++ b/fs/mbcache.c @@ -262,7 +262,6 @@ mb_cache_shrink_scan(struct shrinker *shrink, struct shrink_control *sc) list_del_init(&ce->e_lru_list); if (ce->e_used || ce->e_queued || atomic_read(&ce->e_refcnt)) continue; - spin_unlock(&mb_cache_spinlock); /* Prevent any find or get operation on the entry */ hlist_bl_lock(ce->e_block_hash_p); hlist_bl_lock(ce->e_index_hash_p); @@ -271,10 +270,10 @@ mb_cache_shrink_scan(struct shrinker *shrink, struct shrink_control *sc) !list_empty(&ce->e_lru_list)) { hlist_bl_unlock(ce->e_index_hash_p); hlist_bl_unlock(ce->e_block_hash_p); - spin_lock(&mb_cache_spinlock); continue; } __mb_cache_entry_unhash_unlock(ce); + spin_unlock(&mb_cache_spinlock); list_add_tail(&ce->e_lru_list, &free_list); spin_lock(&mb_cache_spinlock); } @@ -516,7 +515,6 @@ mb_cache_entry_alloc(struct mb_cache *cache, gfp_t gfp_flags) if (ce->e_used || ce->e_queued || atomic_read(&ce->e_refcnt)) continue; - spin_unlock(&mb_cache_spinlock); /* * Prevent any find or get operation on the * entry. @@ -530,13 +528,13 @@ mb_cache_entry_alloc(struct mb_cache *cache, gfp_t gfp_flags) hlist_bl_unlock(ce->e_index_hash_p); hlist_bl_unlock(ce->e_block_hash_p); l = &mb_cache_lru_list; - spin_lock(&mb_cache_spinlock); continue; } mb_assert(list_empty(&ce->e_lru_list)); mb_assert(!(ce->e_used || ce->e_queued || atomic_read(&ce->e_refcnt))); __mb_cache_entry_unhash_unlock(ce); + spin_unlock(&mb_cache_spinlock); goto found; } } @@ -670,6 +668,7 @@ mb_cache_entry_get(struct mb_cache *cache, struct block_device *bdev, cache->c_bucket_bits); block_hash_p = &cache->c_block_hash[bucket]; /* First serialize access to the block corresponding hash chain. */ + spin_lock(&mb_cache_spinlock); hlist_bl_lock(block_hash_p); hlist_bl_for_each_entry(ce, l, block_hash_p, e_block_list) { mb_assert(ce->e_block_hash_p == block_hash_p); @@ -678,9 +677,11 @@ mb_cache_entry_get(struct mb_cache *cache, struct block_device *bdev, * Prevent a free from removing the entry. */ atomic_inc(&ce->e_refcnt); + if (!list_empty(&ce->e_lru_list)) + list_del_init(&ce->e_lru_list); hlist_bl_unlock(block_hash_p); + spin_unlock(&mb_cache_spinlock); __spin_lock_mb_cache_entry(ce); - atomic_dec(&ce->e_refcnt); if (ce->e_used > 0) { DEFINE_WAIT(wait); while (ce->e_used > 0) { @@ -695,13 +696,9 @@ mb_cache_entry_get(struct mb_cache *cache, struct block_device *bdev, finish_wait(&mb_cache_queue, &wait); } ce->e_used += 1 + MB_CACHE_WRITER; + atomic_dec(&ce->e_refcnt); __spin_unlock_mb_cache_entry(ce); - if (!list_empty(&ce->e_lru_list)) { - spin_lock(&mb_cache_spinlock); - list_del_init(&ce->e_lru_list); - spin_unlock(&mb_cache_spinlock); - } if (!__mb_cache_entry_is_block_hashed(ce)) { __mb_cache_entry_release(ce); return NULL; @@ -710,6 +707,7 @@ mb_cache_entry_get(struct mb_cache *cache, struct block_device *bdev, } } hlist_bl_unlock(block_hash_p); + spin_unlock(&mb_cache_spinlock); return NULL; } |
