summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
-rw-r--r--include/linux/ksm.h7
-rw-r--r--mm/ksm.c30
-rw-r--r--mm/memory.c17
3 files changed, 50 insertions, 4 deletions
diff --git a/include/linux/ksm.h b/include/linux/ksm.h
index 481c8c4627ca..febba394f93c 100644
--- a/include/linux/ksm.h
+++ b/include/linux/ksm.h
@@ -62,6 +62,8 @@ struct page *ksm_might_need_to_copy(struct page *page,
int rmap_walk_ksm(struct page *page, struct rmap_walk_control *rwc);
void ksm_migrate_page(struct page *newpage, struct page *oldpage);
+bool reuse_ksm_page(struct page *page,
+ struct vm_area_struct *vma, unsigned long address);
#else /* !CONFIG_KSM */
@@ -102,6 +104,11 @@ static inline int rmap_walk_ksm(struct page *page,
static inline void ksm_migrate_page(struct page *newpage, struct page *oldpage)
{
}
+static inline bool reuse_ksm_page(struct page *page,
+ struct vm_area_struct *vma, unsigned long address)
+{
+ return false;
+}
#endif /* CONFIG_MMU */
#endif /* !CONFIG_KSM */
diff --git a/mm/ksm.c b/mm/ksm.c
index 2a4ef426b331..bfee36c149e6 100644
--- a/mm/ksm.c
+++ b/mm/ksm.c
@@ -580,8 +580,9 @@ again:
* case this node is no longer referenced, and should be freed;
* however, it might mean that the page is under page_freeze_refs().
* The __remove_mapping() case is easy, again the node is now stale;
- * but if page is swapcache in migrate_page_move_mapping(), it might
- * still be our page, in which case it's essential to keep the node.
+ * the same is in reuse_ksm_page() case; but if page is swapcache
+ * in migrate_page_move_mapping(), it might still be our page,
+ * in which case it's essential to keep the node.
*/
while (!get_page_unless_zero(page)) {
/*
@@ -2061,6 +2062,31 @@ out:
return ret;
}
+bool reuse_ksm_page(struct page *page,
+ struct vm_area_struct *vma,
+ unsigned long address)
+{
+#ifdef CONFIG_DEBUG_VM
+ if (WARN_ON(is_zero_pfn(page_to_pfn(page))) ||
+ WARN_ON(!page_mapped(page)) ||
+ WARN_ON(!PageLocked(page))) {
+ dump_page(page, "reuse_ksm_page");
+ return false;
+ }
+#endif
+
+ if (PageSwapCache(page) || !page_stable_node(page))
+ return false;
+ /* Prohibit parallel get_ksm_page() */
+ if (!page_ref_freeze(page, 1))
+ return false;
+
+ page_move_anon_rmap(page, vma);
+ page->index = linear_page_index(vma, address);
+ page_ref_unfreeze(page, 1);
+
+ return true;
+}
#ifdef CONFIG_MIGRATION
void ksm_migrate_page(struct page *newpage, struct page *oldpage)
{
diff --git a/mm/memory.c b/mm/memory.c
index 09a57fe6ae01..ccb04d3f9bab 100644
--- a/mm/memory.c
+++ b/mm/memory.c
@@ -2399,7 +2399,10 @@ static int do_wp_page(struct mm_struct *mm, struct vm_area_struct *vma,
* Take out anonymous pages first, anonymous shared vmas are
* not dirty accountable.
*/
- if (PageAnon(old_page) && !PageKsm(old_page)) {
+ if (PageAnon(old_page)) {
+ if (PageKsm(old_page) && (PageSwapCache(old_page) ||
+ page_count(old_page) != 1))
+ goto copy;
if (!trylock_page(old_page)) {
page_cache_get(old_page);
pte_unmap_unlock(page_table, ptl);
@@ -2414,6 +2417,16 @@ static int do_wp_page(struct mm_struct *mm, struct vm_area_struct *vma,
}
page_cache_release(old_page);
}
+ if (PageKsm(old_page)) {
+ bool reused = reuse_ksm_page(old_page, vma,
+ address);
+ unlock_page(old_page);
+ if (!reused)
+ goto copy;
+ wp_page_reuse(mm, vma, address, page_table, ptl,
+ orig_pte, old_page, 0, 0);
+ return VM_FAULT_WRITE;
+ }
if (reuse_swap_page(old_page)) {
/*
* The page is all ours. Move it to our anon_vma so
@@ -2431,7 +2444,7 @@ static int do_wp_page(struct mm_struct *mm, struct vm_area_struct *vma,
return wp_page_shared(mm, vma, address, page_table, pmd,
ptl, orig_pte, old_page);
}
-
+copy:
/*
* Ok, we need to copy. Oh, well..
*/