diff options
| author | Minchan Kim <minchan@kernel.org> | 2013-05-09 16:21:27 +0900 |
|---|---|---|
| committer | Kyle Yan <kyan@codeaurora.org> | 2016-06-22 14:43:57 -0700 |
| commit | 06de050ac6a250930f5b95dc153744d64dbc13c4 (patch) | |
| tree | 7d78780c17783725e3699e0a414160ce52dcd500 /mm/vmscan.c | |
| parent | a4e92011d44d60ad33dca31785682ddb82c44e40 (diff) | |
mm: Enhance per process reclaim to consider shared pages
Some pages could be shared by several processes. (ex, libc)
In case of that, it's too bad to reclaim them from the beginnig.
This patch causes VM to keep them on memory until last task
try to reclaim them so shared pages will be reclaimed only if
all of task has gone swapping out.
This feature doesn't handle non-linear mapping on ramfs because
it's very time-consuming and doesn't make sure of reclaiming and
not common.
Change-Id: I7e5f34f2e947f5db6d405867fe2ad34863ca40f7
Signed-off-by: Sangseok Lee <sangseok.lee@lge.com>
Signed-off-by: Minchan Kim <minchan@kernel.org>
Patch-mainline: linux-mm @ 9 May 2013 16:21:27
[vinmenon@codeaurora.org: trivial merge conflict fixes + changes
to make the patch work with 3.18 kernel]
Signed-off-by: Vinayak Menon <vinmenon@codeaurora.org>
Diffstat (limited to 'mm/vmscan.c')
| -rw-r--r-- | mm/vmscan.c | 14 |
1 files changed, 12 insertions, 2 deletions
diff --git a/mm/vmscan.c b/mm/vmscan.c index fe9c39e6b900..73f5cec91063 100644 --- a/mm/vmscan.c +++ b/mm/vmscan.c @@ -105,6 +105,13 @@ struct scan_control { /* Number of pages freed so far during a call to shrink_zones() */ unsigned long nr_reclaimed; + + /* + * Reclaim pages from a vma. If the page is shared by other tasks + * it is zapped from a vma without reclaim so it ends up remaining + * on memory until last task zap it. + */ + struct vm_area_struct *target_vma; }; #define lru_to_page(_head) (list_entry((_head)->prev, struct page, lru)) @@ -1115,7 +1122,8 @@ static unsigned long shrink_page_list(struct list_head *page_list, */ if (page_mapped(page) && mapping) { switch (try_to_unmap(page, - ttu_flags|TTU_BATCH_FLUSH)) { + ttu_flags|TTU_BATCH_FLUSH, + sc->target_vma)) { case SWAP_FAIL: goto activate_locked; case SWAP_AGAIN: @@ -1324,7 +1332,8 @@ unsigned long reclaim_clean_pages_from_list(struct zone *zone, } #ifdef CONFIG_PROCESS_RECLAIM -unsigned long reclaim_pages_from_list(struct list_head *page_list) +unsigned long reclaim_pages_from_list(struct list_head *page_list, + struct vm_area_struct *vma) { struct scan_control sc = { .gfp_mask = GFP_KERNEL, @@ -1332,6 +1341,7 @@ unsigned long reclaim_pages_from_list(struct list_head *page_list) .may_writepage = 1, .may_unmap = 1, .may_swap = 1, + .target_vma = vma, }; unsigned long nr_reclaimed; |
