summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorBob Liu <bob.liu@oracle.com>2013-08-06 19:36:16 +0800
committerDavid Keitel <dkeitel@codeaurora.org>2016-03-23 21:15:14 -0700
commit37b371fd53223d1fbdcc97e4ee2e3f02f21354ea (patch)
tree4bc0e6510220d639d8ecf1725f9308dadf952f74
parentc3ca4b603e062b56119d3602cb99cc779e7d65f9 (diff)
mm: zcache: add evict zpages supporting
Implemented zbud_ops->evict, so that compressed zpages can be evicted from zbud memory pool in the case that the compressed pool is full. zbud already managered the compressed pool based on LRU. The evict was implemented just by dropping the compressed file page data directly, if the data is required again then no more disk reading can be saved. Signed-off-by: Bob Liu <bob.liu@oracle.com> Patch-mainline: linux-mm @ 2013-08-06 11:36:16 [vinmenon@codeaurora.org: trivial merge conflict fixes] Signed-off-by: Vinayak Menon <vinmenon@codeaurora.org> Change-Id: Ia66652475e490f0233547511e80abf7587054e65
-rw-r--r--mm/zcache.c52
1 files changed, 46 insertions, 6 deletions
diff --git a/mm/zcache.c b/mm/zcache.c
index ac90f2853fb8..8667fcfb971e 100644
--- a/mm/zcache.c
+++ b/mm/zcache.c
@@ -65,6 +65,9 @@ static u64 zcache_pool_limit_hit;
static u64 zcache_dup_entry;
static u64 zcache_zbud_alloc_fail;
static u64 zcache_pool_pages;
+static u64 zcache_evict_zpages;
+static u64 zcache_evict_filepages;
+static u64 zcache_reclaim_fail;
static atomic_t zcache_stored_pages = ATOMIC_INIT(0);
/*
@@ -129,6 +132,7 @@ struct zcache_ra_handle {
int rb_index; /* Redblack tree index */
int ra_index; /* Radix tree index */
int zlen; /* Compressed page size */
+ struct zcache_pool *zpool; /* Finding zcache_pool during evict */
};
static struct kmem_cache *zcache_rbnode_cache;
@@ -494,7 +498,15 @@ static void zcache_store_page(int pool_id, struct cleancache_filekey key,
if (zcache_is_full()) {
zcache_pool_limit_hit++;
- return;
+ if (zbud_reclaim_page(zpool->pool, 8)) {
+ zcache_reclaim_fail++;
+ return;
+ }
+ /*
+ * Continue if reclaimed a page frame succ.
+ */
+ zcache_evict_filepages++;
+ zcache_pool_pages = zbud_get_pool_size(zpool->pool);
}
/* compress */
@@ -522,6 +534,8 @@ static void zcache_store_page(int pool_id, struct cleancache_filekey key,
zhandle->ra_index = index;
zhandle->rb_index = key.u.ino;
zhandle->zlen = zlen;
+ zhandle->zpool = zpool;
+
/* Compressed page data stored at the end of zcache_ra_handle */
zpage = (u8 *)(zhandle + 1);
memcpy(zpage, dst, zlen);
@@ -692,16 +706,36 @@ static void zcache_flush_fs(int pool_id)
}
/*
- * Evict pages from zcache pool on an LRU basis after the compressed pool is
- * full.
+ * Evict compressed pages from zcache pool on an LRU basis after the compressed
+ * pool is full.
*/
-static int zcache_evict_entry(struct zbud_pool *pool, unsigned long zaddr)
+static int zcache_evict_zpage(struct zbud_pool *pool, unsigned long zaddr)
{
- return -EINVAL;
+ struct zcache_pool *zpool;
+ struct zcache_ra_handle *zhandle;
+ void *zaddr_intree;
+
+ zhandle = (struct zcache_ra_handle *)zbud_map(pool, zaddr);
+
+ zpool = zhandle->zpool;
+ BUG_ON(!zpool);
+ BUG_ON(pool != zpool->pool);
+
+ zaddr_intree = zcache_load_delete_zaddr(zpool, zhandle->rb_index,
+ zhandle->ra_index);
+ if (zaddr_intree) {
+ BUG_ON((unsigned long)zaddr_intree != zaddr);
+ zbud_unmap(pool, zaddr);
+ zbud_free(pool, zaddr);
+ atomic_dec(&zcache_stored_pages);
+ zcache_pool_pages = zbud_get_pool_size(pool);
+ zcache_evict_zpages++;
+ }
+ return 0;
}
static struct zbud_ops zcache_zbud_ops = {
- .evict = zcache_evict_entry
+ .evict = zcache_evict_zpage
};
/* Return pool id */
@@ -832,6 +866,12 @@ static int __init zcache_debugfs_init(void)
&zcache_pool_pages);
debugfs_create_atomic_t("stored_pages", S_IRUGO, zcache_debugfs_root,
&zcache_stored_pages);
+ debugfs_create_u64("evicted_zpages", S_IRUGO, zcache_debugfs_root,
+ &zcache_evict_zpages);
+ debugfs_create_u64("evicted_filepages", S_IRUGO, zcache_debugfs_root,
+ &zcache_evict_filepages);
+ debugfs_create_u64("reclaim_fail", S_IRUGO, zcache_debugfs_root,
+ &zcache_reclaim_fail);
return 0;
}