diff options
| author | Liam Mark <lmark@codeaurora.org> | 2015-02-27 12:59:00 -0800 |
|---|---|---|
| committer | Jeevan Shriram <jshriram@codeaurora.org> | 2016-04-13 11:11:01 -0700 |
| commit | 1426d1f8d93d9a4eea97c91eb5ebd1357afe7b16 (patch) | |
| tree | 02520a1a45ef13bd3a5414f0fb15ec5b8c320114 | |
| parent | 2f5e2c732d2b8a2b0ddffe8dfe133c4cfe6850f2 (diff) | |
lowmemorykiller: Don't count swap cache pages twice
The lowmem_shrink function discounts all the swap cache pages from
the file cache count. The zone aware code also discounts all file
cache pages from a certain zone. This results in some swap cache
pages being discounted twice, which can result in the low memory
killer being unnecessarily aggressive.
Fix the low memory killer to only discount the swap cache pages
once.
Change-Id: I650bbfbf0fbbabd01d82bdb3502b57ff59c3e14f
Signed-off-by: Liam Mark <lmark@codeaurora.org>
Signed-off-by: Vinayak Menon <vinmenon@codeaurora.org>
| -rw-r--r-- | drivers/staging/android/lowmemorykiller.c | 3 | ||||
| -rw-r--r-- | include/linux/mmzone.h | 1 | ||||
| -rw-r--r-- | mm/swap_state.c | 2 | ||||
| -rw-r--r-- | mm/vmstat.c | 1 |
4 files changed, 6 insertions, 1 deletions
diff --git a/drivers/staging/android/lowmemorykiller.c b/drivers/staging/android/lowmemorykiller.c index e60421299164..1aab1f67e571 100644 --- a/drivers/staging/android/lowmemorykiller.c +++ b/drivers/staging/android/lowmemorykiller.c @@ -251,7 +251,8 @@ void tune_lmk_zone_param(struct zonelist *zonelist, int classzone_idx, if (other_file != NULL) *other_file -= zone_page_state(zone, NR_FILE_PAGES) - - zone_page_state(zone, NR_SHMEM); + - zone_page_state(zone, NR_SHMEM) + - zone_page_state(zone, NR_SWAPCACHE); } else if (zone_idx < classzone_idx) { if (zone_watermark_ok(zone, 0, 0, classzone_idx, 0) && other_free) { diff --git a/include/linux/mmzone.h b/include/linux/mmzone.h index 8a5894308eb2..04030f756e7c 100644 --- a/include/linux/mmzone.h +++ b/include/linux/mmzone.h @@ -167,6 +167,7 @@ enum zone_stat_item { WORKINGSET_NODERECLAIM, NR_ANON_TRANSPARENT_HUGEPAGES, NR_FREE_CMA_PAGES, + NR_SWAPCACHE, NR_VM_ZONE_STAT_ITEMS }; /* diff --git a/mm/swap_state.c b/mm/swap_state.c index 4e166f1c692c..61039e39e25f 100644 --- a/mm/swap_state.c +++ b/mm/swap_state.c @@ -96,6 +96,7 @@ int __add_to_swap_cache(struct page *page, swp_entry_t entry) if (likely(!error)) { address_space->nrpages++; __inc_zone_page_state(page, NR_FILE_PAGES); + __inc_zone_page_state(page, NR_SWAPCACHE); INC_CACHE_INFO(add_total); } spin_unlock_irq(&address_space->tree_lock); @@ -148,6 +149,7 @@ void __delete_from_swap_cache(struct page *page) ClearPageSwapCache(page); address_space->nrpages--; __dec_zone_page_state(page, NR_FILE_PAGES); + __dec_zone_page_state(page, NR_SWAPCACHE); INC_CACHE_INFO(del_total); } diff --git a/mm/vmstat.c b/mm/vmstat.c index 4923dfe89983..c0e67319d3e8 100644 --- a/mm/vmstat.c +++ b/mm/vmstat.c @@ -763,6 +763,7 @@ const char * const vmstat_text[] = { "workingset_nodereclaim", "nr_anon_transparent_hugepages", "nr_free_cma", + "nr_swapcache", /* enum writeback_stat_item counters */ "nr_dirty_threshold", |
