summaryrefslogtreecommitdiff
path: root/mm/vmscan.c
diff options
context:
space:
mode:
authorLiam Mark <lmark@codeaurora.org>2014-11-25 14:43:42 -0800
committerDavid Keitel <dkeitel@codeaurora.org>2016-03-22 11:03:33 -0700
commit8a94faffd0a430d83c2d9d62c2d4c9b6070f6323 (patch)
treeca5b1afefe47903b5481b96e41da286eac00c1f0 /mm/vmscan.c
parent747b0dceaea4cecff7a3adad6c657a656c9aeb43 (diff)
mm: vmscan: support complete shrinker reclaim
Ensure that shrinkers are given the option to completely drop their caches even when their caches are smaller than the batch size. This change helps improve memory headroom by ensuring that under significant memory pressure shrinkers can drop all of their caches. This change only attempts to more aggressively call the shrinkers during background memory reclaim, inorder to avoid hurting the perforamnce of direct memory reclaim. Change-Id: I8dbc29c054add639e4810e36fd2c8a063e5c52f3 Signed-off-by: Liam Mark <lmark@codeaurora.org>
Diffstat (limited to 'mm/vmscan.c')
-rw-r--r--mm/vmscan.c6
1 files changed, 5 insertions, 1 deletions
diff --git a/mm/vmscan.c b/mm/vmscan.c
index 9f844e8b92df..65c6a720d8ec 100644
--- a/mm/vmscan.c
+++ b/mm/vmscan.c
@@ -320,6 +320,10 @@ static unsigned long do_shrink_slab(struct shrink_control *shrinkctl,
int nid = shrinkctl->nid;
long batch_size = shrinker->batch ? shrinker->batch
: SHRINK_BATCH;
+ long min_cache_size = batch_size;
+
+ if (current_is_kswapd())
+ min_cache_size = 0;
freeable = shrinker->count_objects(shrinker, shrinkctl);
if (freeable == 0)
@@ -385,7 +389,7 @@ static unsigned long do_shrink_slab(struct shrink_control *shrinkctl,
* scanning at high prio and therefore should try to reclaim as much as
* possible.
*/
- while (total_scan >= batch_size ||
+ while (total_scan > min_cache_size ||
total_scan >= freeable) {
unsigned long ret;
unsigned long nr_to_scan = min(batch_size, total_scan);