diff options
author | Vinayak Menon <vinmenon@codeaurora.org> | 2015-09-01 15:09:35 +0530 |
---|---|---|
committer | David Keitel <dkeitel@codeaurora.org> | 2016-03-23 21:15:20 -0700 |
commit | 4d09f895a3dd400f90ae44690d9d9af5ad5a58bc (patch) | |
tree | 3c31ff127ed38ecc88ed01710ec42f77d5df69fd /mm/zcache.c | |
parent | 90a22ec53a7607ca80afbcc580453d99e73d0d57 (diff) |
mm: zcache: clear zcache when low on file pages
When files pages are very low, it is better to clear off
zcache pages, since the freed memory can be used to sustain
an application in foreground. Moreover when file pages
are too low, we don't gain much by holding a few zcache
pages.
Change-Id: I88dd295d24b7de18fb3bc0788e0baeb6bfdb2f6d
Signed-off-by: Vinayak Menon <vinmenon@codeaurora.org>
Diffstat (limited to 'mm/zcache.c')
-rw-r--r-- | mm/zcache.c | 33 |
1 files changed, 28 insertions, 5 deletions
diff --git a/mm/zcache.c b/mm/zcache.c index 47200cc6de9b..0151a2c54f66 100644 --- a/mm/zcache.c +++ b/mm/zcache.c @@ -58,6 +58,8 @@ module_param_named(compressor, zcache_compressor, charp, 0); static unsigned int zcache_max_pool_percent = 10; module_param_named(max_pool_percent, zcache_max_pool_percent, uint, 0644); +static unsigned int zcache_clear_percent = 4; +module_param_named(clear_percent, zcache_clear_percent, uint, 0644); /* * zcache statistics */ @@ -187,36 +189,53 @@ static unsigned long zcache_count(struct shrinker *s, static unsigned long zcache_scan(struct shrinker *s, struct shrink_control *sc) { unsigned long active_file; + unsigned long file; long file_gap; unsigned long freed = 0; + unsigned long pool; static bool running; int i = 0; + int retries; if (running) goto end; running = true; active_file = global_page_state(NR_ACTIVE_FILE); + file = global_page_state(NR_FILE_PAGES); + pool = zcache_pages(); + + file_gap = pool - file; + + if ((file_gap >= 0) && + (totalram_pages * zcache_clear_percent / 100 > file)) { + file_gap = pool; + zcache_pool_shrink++; + goto reclaim; + } /* * file_gap == 0 means that the number of pages * stored by zcache is around twice as many as the * number of active file pages. */ - file_gap = zcache_pages() - active_file; + file_gap = pool - active_file; if (file_gap < 0) file_gap = 0; else zcache_pool_shrink++; - while (file_gap > 0) { +reclaim: + retries = file_gap; + while ((file_gap > 0) && retries) { struct zcache_pool *zpool = zcache.pools[i++ % MAX_ZCACHE_POOLS]; if (!zpool || !zpool->size) continue; if (zbud_reclaim_page(zpool->pool, 8)) { zcache_pool_shrink_fail++; - break; + retries--; + continue; } freed++; file_gap--; @@ -378,8 +397,12 @@ cleanup: */ static bool zcache_is_full(void) { - return totalram_pages * zcache_max_pool_percent / 100 < - zcache_pages(); + long file = global_page_state(NR_FILE_PAGES); + + return ((totalram_pages * zcache_max_pool_percent / 100 < + zcache_pages()) || + (totalram_pages * zcache_clear_percent / 100 > + file)); } /* |