summaryrefslogtreecommitdiff
path: root/mm/memory.c
diff options
context:
space:
mode:
authorKirill Tkhai <ktkhai@virtuozzo.com>2019-03-05 15:43:06 -0800
committerGeorg Veichtlbauer <georg@vware.at>2023-07-16 12:47:43 +0200
commit90dccbae4c0410e209049e22eaae9cd2718e1aa5 (patch)
tree4de61c330c6097164610b5cbb6687ca3743adfa7 /mm/memory.c
parentebf270d24640abda4ddc8061e615facdb9b074d0 (diff)
UPSTREAM: mm: reuse only-pte-mapped KSM page in do_wp_page()
Add an optimization for KSM pages almost in the same way that we have for ordinary anonymous pages. If there is a write fault in a page, which is mapped to an only pte, and it is not related to swap cache; the page may be reused without copying its content. [ Note that we do not consider PageSwapCache() pages at least for now, since we don't want to complicate __get_ksm_page(), which has nice optimization based on this (for the migration case). Currenly it is spinning on PageSwapCache() pages, waiting for when they have unfreezed counters (i.e., for the migration finish). But we don't want to make it also spinning on swap cache pages, which we try to reuse, since there is not a very high probability to reuse them. So, for now we do not consider PageSwapCache() pages at all. ] So in reuse_ksm_page() we check for 1) PageSwapCache() and 2) page_stable_node(), to skip a page, which KSM is currently trying to link to stable tree. Then we do page_ref_freeze() to prohibit KSM to merge one more page into the page, we are reusing. After that, nobody can refer to the reusing page: KSM skips !PageSwapCache() pages with zero refcount; and the protection against of all other participants is the same as for reused ordinary anon pages pte lock, page lock and mmap_sem. [akpm@linux-foundation.org: replace BUG_ON()s with WARN_ON()s] Link: http://lkml.kernel.org/r/154471491016.31352.1168978849911555609.stgit@localhost.localdomain Signed-off-by: Kirill Tkhai <ktkhai@virtuozzo.com> Reviewed-by: Yang Shi <yang.shi@linux.alibaba.com> Cc: "Kirill A. Shutemov" <kirill@shutemov.name> Cc: Hugh Dickins <hughd@google.com> Cc: Andrea Arcangeli <aarcange@redhat.com> Cc: Christian Koenig <christian.koenig@amd.com> Cc: Claudio Imbrenda <imbrenda@linux.vnet.ibm.com> Cc: Rik van Riel <riel@surriel.com> Cc: Huang Ying <ying.huang@intel.com> Cc: Minchan Kim <minchan@kernel.org> Cc: Kirill Tkhai <ktkhai@virtuozzo.com> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org> Change-Id: If32387b1f7c36f0e12fcbb0926bf1b67886ec594
Diffstat (limited to 'mm/memory.c')
-rw-r--r--mm/memory.c17
1 files changed, 15 insertions, 2 deletions
diff --git a/mm/memory.c b/mm/memory.c
index 09a57fe6ae01..ccb04d3f9bab 100644
--- a/mm/memory.c
+++ b/mm/memory.c
@@ -2399,7 +2399,10 @@ static int do_wp_page(struct mm_struct *mm, struct vm_area_struct *vma,
* Take out anonymous pages first, anonymous shared vmas are
* not dirty accountable.
*/
- if (PageAnon(old_page) && !PageKsm(old_page)) {
+ if (PageAnon(old_page)) {
+ if (PageKsm(old_page) && (PageSwapCache(old_page) ||
+ page_count(old_page) != 1))
+ goto copy;
if (!trylock_page(old_page)) {
page_cache_get(old_page);
pte_unmap_unlock(page_table, ptl);
@@ -2414,6 +2417,16 @@ static int do_wp_page(struct mm_struct *mm, struct vm_area_struct *vma,
}
page_cache_release(old_page);
}
+ if (PageKsm(old_page)) {
+ bool reused = reuse_ksm_page(old_page, vma,
+ address);
+ unlock_page(old_page);
+ if (!reused)
+ goto copy;
+ wp_page_reuse(mm, vma, address, page_table, ptl,
+ orig_pte, old_page, 0, 0);
+ return VM_FAULT_WRITE;
+ }
if (reuse_swap_page(old_page)) {
/*
* The page is all ours. Move it to our anon_vma so
@@ -2431,7 +2444,7 @@ static int do_wp_page(struct mm_struct *mm, struct vm_area_struct *vma,
return wp_page_shared(mm, vma, address, page_table, pmd,
ptl, orig_pte, old_page);
}
-
+copy:
/*
* Ok, we need to copy. Oh, well..
*/