From 4e047f897771222215ee572e1c0b25e9417376eb Mon Sep 17 00:00:00 2001 From: Shachar Raindel Date: Tue, 14 Apr 2015 15:46:25 -0700 Subject: mm: refactor do_wp_page, extract the reuse case Currently do_wp_page contains 265 code lines. It also contains 9 goto statements, of which 5 are targeting labels which are not cleanup related. This makes the function extremely difficult to understand. The following patches are an attempt at breaking the function to its basic components, and making it easier to understand. The patches are straight forward function extractions from do_wp_page. As we extract functions, we remove unneeded parameters and simplify the code as much as possible. However, the functionality is supposed to remain completely unchanged. The patches also attempt to document the functionality of each extracted function. In patch 2, we split the unlock logic to the contain logic relevant to specific needs of each use case, instead of having huge number of conditional decisions in a single unlock flow. This patch (of 4): When do_wp_page is ending, in several cases it needs to reuse the existing page. This is achieved by making the page table writable, and possibly updating the page-cache state. Currently, this logic was "called" by using a goto jump. This makes following the control flow of the function harder. It is also against the coding style guidelines for using goto. As the code can easily be refactored into a specialized function, refactor it out and simplify the code flow in do_wp_page. Acked-by: Linus Torvalds Acked-by: Kirill A. Shutemov Acked-by: Rik van Riel Acked-by: Andi Kleen Acked-by: Haggai Eran Acked-by: Johannes Weiner Cc: Mel Gorman Cc: Matthew Wilcox Cc: Dave Hansen Cc: Naoya Horiguchi Cc: Andrea Arcangeli Cc: Peter Feiner Cc: Michel Lespinasse Reviewed-by: Michal Hocko Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- mm/memory.c | 117 +++++++++++++++++++++++++++++++++++------------------------- 1 file changed, 68 insertions(+), 49 deletions(-) (limited to 'mm/memory.c') diff --git a/mm/memory.c b/mm/memory.c index 97839f5c8c30..e70685f3e836 100644 --- a/mm/memory.c +++ b/mm/memory.c @@ -1982,6 +1982,65 @@ static int do_page_mkwrite(struct vm_area_struct *vma, struct page *page, return ret; } +/* + * Handle write page faults for pages that can be reused in the current vma + * + * This can happen either due to the mapping being with the VM_SHARED flag, + * or due to us being the last reference standing to the page. In either + * case, all we need to do here is to mark the page as writable and update + * any related book-keeping. + */ +static inline int wp_page_reuse(struct mm_struct *mm, + struct vm_area_struct *vma, unsigned long address, + pte_t *page_table, spinlock_t *ptl, pte_t orig_pte, + struct page *page, int page_mkwrite, + int dirty_shared) + __releases(ptl) +{ + pte_t entry; + /* + * Clear the pages cpupid information as the existing + * information potentially belongs to a now completely + * unrelated process. + */ + if (page) + page_cpupid_xchg_last(page, (1 << LAST_CPUPID_SHIFT) - 1); + + flush_cache_page(vma, address, pte_pfn(orig_pte)); + entry = pte_mkyoung(orig_pte); + entry = maybe_mkwrite(pte_mkdirty(entry), vma); + if (ptep_set_access_flags(vma, address, page_table, entry, 1)) + update_mmu_cache(vma, address, page_table); + pte_unmap_unlock(page_table, ptl); + + if (dirty_shared) { + struct address_space *mapping; + int dirtied; + + if (!page_mkwrite) + lock_page(page); + + dirtied = set_page_dirty(page); + VM_BUG_ON_PAGE(PageAnon(page), page); + mapping = page->mapping; + unlock_page(page); + page_cache_release(page); + + if ((dirtied || page_mkwrite) && mapping) { + /* + * Some device drivers do not set page.mapping + * but still dirty their pages + */ + balance_dirty_pages_ratelimited(mapping); + } + + if (!page_mkwrite) + file_update_time(vma->vm_file); + } + + return VM_FAULT_WRITE; +} + /* * This routine handles present pages, when users try to write * to a shared page. It is done by copying the page to a new address @@ -2008,8 +2067,6 @@ static int do_wp_page(struct mm_struct *mm, struct vm_area_struct *vma, struct page *old_page, *new_page = NULL; pte_t entry; int ret = 0; - int page_mkwrite = 0; - bool dirty_shared = false; unsigned long mmun_start = 0; /* For mmu_notifiers */ unsigned long mmun_end = 0; /* For mmu_notifiers */ struct mem_cgroup *memcg; @@ -2026,7 +2083,8 @@ static int do_wp_page(struct mm_struct *mm, struct vm_area_struct *vma, */ if ((vma->vm_flags & (VM_WRITE|VM_SHARED)) == (VM_WRITE|VM_SHARED)) - goto reuse; + return wp_page_reuse(mm, vma, address, page_table, ptl, + orig_pte, old_page, 0, 0); goto gotten; } @@ -2055,12 +2113,16 @@ static int do_wp_page(struct mm_struct *mm, struct vm_area_struct *vma, */ page_move_anon_rmap(old_page, vma, address); unlock_page(old_page); - goto reuse; + return wp_page_reuse(mm, vma, address, page_table, ptl, + orig_pte, old_page, 0, 0); } unlock_page(old_page); } else if (unlikely((vma->vm_flags & (VM_WRITE|VM_SHARED)) == (VM_WRITE|VM_SHARED))) { + int page_mkwrite = 0; + page_cache_get(old_page); + /* * Only catch write-faults on shared writable pages, * read-only shared pages can get COWed by @@ -2091,51 +2153,8 @@ static int do_wp_page(struct mm_struct *mm, struct vm_area_struct *vma, page_mkwrite = 1; } - dirty_shared = true; - -reuse: - /* - * Clear the pages cpupid information as the existing - * information potentially belongs to a now completely - * unrelated process. - */ - if (old_page) - page_cpupid_xchg_last(old_page, (1 << LAST_CPUPID_SHIFT) - 1); - - flush_cache_page(vma, address, pte_pfn(orig_pte)); - entry = pte_mkyoung(orig_pte); - entry = maybe_mkwrite(pte_mkdirty(entry), vma); - if (ptep_set_access_flags(vma, address, page_table, entry,1)) - update_mmu_cache(vma, address, page_table); - pte_unmap_unlock(page_table, ptl); - ret |= VM_FAULT_WRITE; - - if (dirty_shared) { - struct address_space *mapping; - int dirtied; - - if (!page_mkwrite) - lock_page(old_page); - - dirtied = set_page_dirty(old_page); - VM_BUG_ON_PAGE(PageAnon(old_page), old_page); - mapping = old_page->mapping; - unlock_page(old_page); - page_cache_release(old_page); - - if ((dirtied || page_mkwrite) && mapping) { - /* - * Some device drivers do not set page.mapping - * but still dirty their pages - */ - balance_dirty_pages_ratelimited(mapping); - } - - if (!page_mkwrite) - file_update_time(vma->vm_file); - } - - return ret; + return wp_page_reuse(mm, vma, address, page_table, ptl, + orig_pte, old_page, page_mkwrite, 1); } /* -- cgit v1.2.3 From 28766805275c12c2298883cece3f98505ac764b4 Mon Sep 17 00:00:00 2001 From: Shachar Raindel Date: Tue, 14 Apr 2015 15:46:29 -0700 Subject: mm: refactor do_wp_page - rewrite the unlock flow When do_wp_page is ending, in several cases it needs to unlock the pages and ptls it was accessing. Currently, this logic was "called" by using a goto jump. This makes following the control flow of the function harder. Readability was further hampered by the unlock case containing large amount of logic needed only in one of the 3 cases. Using goto for cleanup is generally allowed. However, moving the trivial unlocking flows to the relevant call sites allow deeper refactoring in the next patch. Signed-off-by: Shachar Raindel Acked-by: Linus Torvalds Acked-by: Kirill A. Shutemov Acked-by: Rik van Riel Acked-by: Andi Kleen Acked-by: Haggai Eran Acked-by: Johannes Weiner Cc: Mel Gorman Cc: Matthew Wilcox Cc: Dave Hansen Cc: Naoya Horiguchi Cc: Andrea Arcangeli Cc: Peter Feiner Cc: Michel Lespinasse Reviewed-by: Michal Hocko Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- mm/memory.c | 21 ++++++++++++--------- 1 file changed, 12 insertions(+), 9 deletions(-) (limited to 'mm/memory.c') diff --git a/mm/memory.c b/mm/memory.c index e70685f3e836..0e28fddafdaf 100644 --- a/mm/memory.c +++ b/mm/memory.c @@ -2066,7 +2066,7 @@ static int do_wp_page(struct mm_struct *mm, struct vm_area_struct *vma, { struct page *old_page, *new_page = NULL; pte_t entry; - int ret = 0; + int page_copied = 0; unsigned long mmun_start = 0; /* For mmu_notifiers */ unsigned long mmun_end = 0; /* For mmu_notifiers */ struct mem_cgroup *memcg; @@ -2101,7 +2101,9 @@ static int do_wp_page(struct mm_struct *mm, struct vm_area_struct *vma, &ptl); if (!pte_same(*page_table, orig_pte)) { unlock_page(old_page); - goto unlock; + pte_unmap_unlock(page_table, ptl); + page_cache_release(old_page); + return 0; } page_cache_release(old_page); } @@ -2148,7 +2150,9 @@ static int do_wp_page(struct mm_struct *mm, struct vm_area_struct *vma, &ptl); if (!pte_same(*page_table, orig_pte)) { unlock_page(old_page); - goto unlock; + pte_unmap_unlock(page_table, ptl); + page_cache_release(old_page); + return 0; } page_mkwrite = 1; } @@ -2246,29 +2250,28 @@ gotten: /* Free the old page.. */ new_page = old_page; - ret |= VM_FAULT_WRITE; + page_copied = 1; } else mem_cgroup_cancel_charge(new_page, memcg); if (new_page) page_cache_release(new_page); -unlock: + pte_unmap_unlock(page_table, ptl); - if (mmun_end > mmun_start) - mmu_notifier_invalidate_range_end(mm, mmun_start, mmun_end); + mmu_notifier_invalidate_range_end(mm, mmun_start, mmun_end); if (old_page) { /* * Don't let another task, with possibly unlocked vma, * keep the mlocked page. */ - if ((ret & VM_FAULT_WRITE) && (vma->vm_flags & VM_LOCKED)) { + if (page_copied && (vma->vm_flags & VM_LOCKED)) { lock_page(old_page); /* LRU manipulation */ munlock_vma_page(old_page); unlock_page(old_page); } page_cache_release(old_page); } - return ret; + return page_copied ? VM_FAULT_WRITE : 0; oom_free_new: page_cache_release(new_page); oom: -- cgit v1.2.3 From 2f38ab2c3c7fef04dca0313fd89d91f142ca9281 Mon Sep 17 00:00:00 2001 From: Shachar Raindel Date: Tue, 14 Apr 2015 15:46:32 -0700 Subject: mm: refactor do_wp_page, extract the page copy flow In some cases, do_wp_page had to copy the page suffering a write fault to a new location. If the function logic decided that to do this, it was done by jumping with a "goto" operation to the relevant code block. This made the code really hard to understand. It is also against the kernel coding style guidelines. This patch extracts the page copy and page table update logic to a separate function. It also clean up the naming, from "gotten" to "wp_page_copy", and adds few comments. Signed-off-by: Shachar Raindel Acked-by: Linus Torvalds Acked-by: Kirill A. Shutemov Acked-by: Rik van Riel Acked-by: Andi Kleen Acked-by: Haggai Eran Acked-by: Johannes Weiner Cc: Mel Gorman Cc: Matthew Wilcox Cc: Dave Hansen Cc: Naoya Horiguchi Cc: Andrea Arcangeli Cc: Peter Feiner Cc: Michel Lespinasse Reviewed-by: Michal Hocko Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- mm/memory.c | 265 +++++++++++++++++++++++++++++++++--------------------------- 1 file changed, 147 insertions(+), 118 deletions(-) (limited to 'mm/memory.c') diff --git a/mm/memory.c b/mm/memory.c index 0e28fddafdaf..cfd3c78f00fe 100644 --- a/mm/memory.c +++ b/mm/memory.c @@ -2041,6 +2041,146 @@ static inline int wp_page_reuse(struct mm_struct *mm, return VM_FAULT_WRITE; } +/* + * Handle the case of a page which we actually need to copy to a new page. + * + * Called with mmap_sem locked and the old page referenced, but + * without the ptl held. + * + * High level logic flow: + * + * - Allocate a page, copy the content of the old page to the new one. + * - Handle book keeping and accounting - cgroups, mmu-notifiers, etc. + * - Take the PTL. If the pte changed, bail out and release the allocated page + * - If the pte is still the way we remember it, update the page table and all + * relevant references. This includes dropping the reference the page-table + * held to the old page, as well as updating the rmap. + * - In any case, unlock the PTL and drop the reference we took to the old page. + */ +static int wp_page_copy(struct mm_struct *mm, struct vm_area_struct *vma, + unsigned long address, pte_t *page_table, pmd_t *pmd, + pte_t orig_pte, struct page *old_page) +{ + struct page *new_page = NULL; + spinlock_t *ptl = NULL; + pte_t entry; + int page_copied = 0; + const unsigned long mmun_start = address & PAGE_MASK; /* For mmu_notifiers */ + const unsigned long mmun_end = mmun_start + PAGE_SIZE; /* For mmu_notifiers */ + struct mem_cgroup *memcg; + + if (unlikely(anon_vma_prepare(vma))) + goto oom; + + if (is_zero_pfn(pte_pfn(orig_pte))) { + new_page = alloc_zeroed_user_highpage_movable(vma, address); + if (!new_page) + goto oom; + } else { + new_page = alloc_page_vma(GFP_HIGHUSER_MOVABLE, vma, address); + if (!new_page) + goto oom; + cow_user_page(new_page, old_page, address, vma); + } + __SetPageUptodate(new_page); + + if (mem_cgroup_try_charge(new_page, mm, GFP_KERNEL, &memcg)) + goto oom_free_new; + + mmu_notifier_invalidate_range_start(mm, mmun_start, mmun_end); + + /* + * Re-check the pte - we dropped the lock + */ + page_table = pte_offset_map_lock(mm, pmd, address, &ptl); + if (likely(pte_same(*page_table, orig_pte))) { + if (old_page) { + if (!PageAnon(old_page)) { + dec_mm_counter_fast(mm, MM_FILEPAGES); + inc_mm_counter_fast(mm, MM_ANONPAGES); + } + } else { + inc_mm_counter_fast(mm, MM_ANONPAGES); + } + flush_cache_page(vma, address, pte_pfn(orig_pte)); + entry = mk_pte(new_page, vma->vm_page_prot); + entry = maybe_mkwrite(pte_mkdirty(entry), vma); + /* + * Clear the pte entry and flush it first, before updating the + * pte with the new entry. This will avoid a race condition + * seen in the presence of one thread doing SMC and another + * thread doing COW. + */ + ptep_clear_flush_notify(vma, address, page_table); + page_add_new_anon_rmap(new_page, vma, address); + mem_cgroup_commit_charge(new_page, memcg, false); + lru_cache_add_active_or_unevictable(new_page, vma); + /* + * We call the notify macro here because, when using secondary + * mmu page tables (such as kvm shadow page tables), we want the + * new page to be mapped directly into the secondary page table. + */ + set_pte_at_notify(mm, address, page_table, entry); + update_mmu_cache(vma, address, page_table); + if (old_page) { + /* + * Only after switching the pte to the new page may + * we remove the mapcount here. Otherwise another + * process may come and find the rmap count decremented + * before the pte is switched to the new page, and + * "reuse" the old page writing into it while our pte + * here still points into it and can be read by other + * threads. + * + * The critical issue is to order this + * page_remove_rmap with the ptp_clear_flush above. + * Those stores are ordered by (if nothing else,) + * the barrier present in the atomic_add_negative + * in page_remove_rmap. + * + * Then the TLB flush in ptep_clear_flush ensures that + * no process can access the old page before the + * decremented mapcount is visible. And the old page + * cannot be reused until after the decremented + * mapcount is visible. So transitively, TLBs to + * old page will be flushed before it can be reused. + */ + page_remove_rmap(old_page); + } + + /* Free the old page.. */ + new_page = old_page; + page_copied = 1; + } else { + mem_cgroup_cancel_charge(new_page, memcg); + } + + if (new_page) + page_cache_release(new_page); + + pte_unmap_unlock(page_table, ptl); + mmu_notifier_invalidate_range_end(mm, mmun_start, mmun_end); + if (old_page) { + /* + * Don't let another task, with possibly unlocked vma, + * keep the mlocked page. + */ + if (page_copied && (vma->vm_flags & VM_LOCKED)) { + lock_page(old_page); /* LRU manipulation */ + munlock_vma_page(old_page); + unlock_page(old_page); + } + page_cache_release(old_page); + } + return page_copied ? VM_FAULT_WRITE : 0; +oom_free_new: + page_cache_release(new_page); +oom: + if (old_page) + page_cache_release(old_page); + return VM_FAULT_OOM; +} + /* * This routine handles present pages, when users try to write * to a shared page. It is done by copying the page to a new address @@ -2064,12 +2204,7 @@ static int do_wp_page(struct mm_struct *mm, struct vm_area_struct *vma, spinlock_t *ptl, pte_t orig_pte) __releases(ptl) { - struct page *old_page, *new_page = NULL; - pte_t entry; - int page_copied = 0; - unsigned long mmun_start = 0; /* For mmu_notifiers */ - unsigned long mmun_end = 0; /* For mmu_notifiers */ - struct mem_cgroup *memcg; + struct page *old_page; old_page = vm_normal_page(vma, address, orig_pte); if (!old_page) { @@ -2085,7 +2220,10 @@ static int do_wp_page(struct mm_struct *mm, struct vm_area_struct *vma, (VM_WRITE|VM_SHARED)) return wp_page_reuse(mm, vma, address, page_table, ptl, orig_pte, old_page, 0, 0); - goto gotten; + + pte_unmap_unlock(page_table, ptl); + return wp_page_copy(mm, vma, address, page_table, pmd, + orig_pte, old_page); } /* @@ -2165,119 +2303,10 @@ static int do_wp_page(struct mm_struct *mm, struct vm_area_struct *vma, * Ok, we need to copy. Oh, well.. */ page_cache_get(old_page); -gotten: - pte_unmap_unlock(page_table, ptl); - - if (unlikely(anon_vma_prepare(vma))) - goto oom; - - if (is_zero_pfn(pte_pfn(orig_pte))) { - new_page = alloc_zeroed_user_highpage_movable(vma, address); - if (!new_page) - goto oom; - } else { - new_page = alloc_page_vma(GFP_HIGHUSER_MOVABLE, vma, address); - if (!new_page) - goto oom; - cow_user_page(new_page, old_page, address, vma); - } - __SetPageUptodate(new_page); - - if (mem_cgroup_try_charge(new_page, mm, GFP_KERNEL, &memcg)) - goto oom_free_new; - - mmun_start = address & PAGE_MASK; - mmun_end = mmun_start + PAGE_SIZE; - mmu_notifier_invalidate_range_start(mm, mmun_start, mmun_end); - - /* - * Re-check the pte - we dropped the lock - */ - page_table = pte_offset_map_lock(mm, pmd, address, &ptl); - if (likely(pte_same(*page_table, orig_pte))) { - if (old_page) { - if (!PageAnon(old_page)) { - dec_mm_counter_fast(mm, MM_FILEPAGES); - inc_mm_counter_fast(mm, MM_ANONPAGES); - } - } else - inc_mm_counter_fast(mm, MM_ANONPAGES); - flush_cache_page(vma, address, pte_pfn(orig_pte)); - entry = mk_pte(new_page, vma->vm_page_prot); - entry = maybe_mkwrite(pte_mkdirty(entry), vma); - /* - * Clear the pte entry and flush it first, before updating the - * pte with the new entry. This will avoid a race condition - * seen in the presence of one thread doing SMC and another - * thread doing COW. - */ - ptep_clear_flush_notify(vma, address, page_table); - page_add_new_anon_rmap(new_page, vma, address); - mem_cgroup_commit_charge(new_page, memcg, false); - lru_cache_add_active_or_unevictable(new_page, vma); - /* - * We call the notify macro here because, when using secondary - * mmu page tables (such as kvm shadow page tables), we want the - * new page to be mapped directly into the secondary page table. - */ - set_pte_at_notify(mm, address, page_table, entry); - update_mmu_cache(vma, address, page_table); - if (old_page) { - /* - * Only after switching the pte to the new page may - * we remove the mapcount here. Otherwise another - * process may come and find the rmap count decremented - * before the pte is switched to the new page, and - * "reuse" the old page writing into it while our pte - * here still points into it and can be read by other - * threads. - * - * The critical issue is to order this - * page_remove_rmap with the ptp_clear_flush above. - * Those stores are ordered by (if nothing else,) - * the barrier present in the atomic_add_negative - * in page_remove_rmap. - * - * Then the TLB flush in ptep_clear_flush ensures that - * no process can access the old page before the - * decremented mapcount is visible. And the old page - * cannot be reused until after the decremented - * mapcount is visible. So transitively, TLBs to - * old page will be flushed before it can be reused. - */ - page_remove_rmap(old_page); - } - - /* Free the old page.. */ - new_page = old_page; - page_copied = 1; - } else - mem_cgroup_cancel_charge(new_page, memcg); - - if (new_page) - page_cache_release(new_page); pte_unmap_unlock(page_table, ptl); - mmu_notifier_invalidate_range_end(mm, mmun_start, mmun_end); - if (old_page) { - /* - * Don't let another task, with possibly unlocked vma, - * keep the mlocked page. - */ - if (page_copied && (vma->vm_flags & VM_LOCKED)) { - lock_page(old_page); /* LRU manipulation */ - munlock_vma_page(old_page); - unlock_page(old_page); - } - page_cache_release(old_page); - } - return page_copied ? VM_FAULT_WRITE : 0; -oom_free_new: - page_cache_release(new_page); -oom: - if (old_page) - page_cache_release(old_page); - return VM_FAULT_OOM; + return wp_page_copy(mm, vma, address, page_table, pmd, + orig_pte, old_page); } static void unmap_mapping_range_vma(struct vm_area_struct *vma, -- cgit v1.2.3 From 93e478d4c36ecaf15b942988b8272102d661d44e Mon Sep 17 00:00:00 2001 From: Shachar Raindel Date: Tue, 14 Apr 2015 15:46:35 -0700 Subject: mm: refactor do_wp_page handling of shared vma into a function The do_wp_page function is extremely long. Extract the logic for handling a page belonging to a shared vma into a function of its own. This helps the readability of the code, without doing any functional change in it. Signed-off-by: Shachar Raindel Acked-by: Linus Torvalds Acked-by: Kirill A. Shutemov Acked-by: Rik van Riel Acked-by: Andi Kleen Acked-by: Haggai Eran Acked-by: Johannes Weiner Cc: Mel Gorman Cc: Matthew Wilcox Cc: Dave Hansen Cc: Naoya Horiguchi Cc: Andrea Arcangeli Cc: Peter Feiner Cc: Michel Lespinasse Reviewed-by: Michal Hocko Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- mm/memory.c | 86 ++++++++++++++++++++++++++++++++++--------------------------- 1 file changed, 48 insertions(+), 38 deletions(-) (limited to 'mm/memory.c') diff --git a/mm/memory.c b/mm/memory.c index cfd3c78f00fe..ac20b2a6a0c3 100644 --- a/mm/memory.c +++ b/mm/memory.c @@ -2181,6 +2181,52 @@ oom: return VM_FAULT_OOM; } +static int wp_page_shared(struct mm_struct *mm, struct vm_area_struct *vma, + unsigned long address, pte_t *page_table, + pmd_t *pmd, spinlock_t *ptl, pte_t orig_pte, + struct page *old_page) + __releases(ptl) +{ + int page_mkwrite = 0; + + page_cache_get(old_page); + + /* + * Only catch write-faults on shared writable pages, + * read-only shared pages can get COWed by + * get_user_pages(.write=1, .force=1). + */ + if (vma->vm_ops && vma->vm_ops->page_mkwrite) { + int tmp; + + pte_unmap_unlock(page_table, ptl); + tmp = do_page_mkwrite(vma, old_page, address); + if (unlikely(!tmp || (tmp & + (VM_FAULT_ERROR | VM_FAULT_NOPAGE)))) { + page_cache_release(old_page); + return tmp; + } + /* + * Since we dropped the lock we need to revalidate + * the PTE as someone else may have changed it. If + * they did, we just return, as we can count on the + * MMU to tell us if they didn't also make it writable. + */ + page_table = pte_offset_map_lock(mm, pmd, address, + &ptl); + if (!pte_same(*page_table, orig_pte)) { + unlock_page(old_page); + pte_unmap_unlock(page_table, ptl); + page_cache_release(old_page); + return 0; + } + page_mkwrite = 1; + } + + return wp_page_reuse(mm, vma, address, page_table, ptl, + orig_pte, old_page, page_mkwrite, 1); +} + /* * This routine handles present pages, when users try to write * to a shared page. It is done by copying the page to a new address @@ -2259,44 +2305,8 @@ static int do_wp_page(struct mm_struct *mm, struct vm_area_struct *vma, unlock_page(old_page); } else if (unlikely((vma->vm_flags & (VM_WRITE|VM_SHARED)) == (VM_WRITE|VM_SHARED))) { - int page_mkwrite = 0; - - page_cache_get(old_page); - - /* - * Only catch write-faults on shared writable pages, - * read-only shared pages can get COWed by - * get_user_pages(.write=1, .force=1). - */ - if (vma->vm_ops && vma->vm_ops->page_mkwrite) { - int tmp; - - pte_unmap_unlock(page_table, ptl); - tmp = do_page_mkwrite(vma, old_page, address); - if (unlikely(!tmp || (tmp & - (VM_FAULT_ERROR | VM_FAULT_NOPAGE)))) { - page_cache_release(old_page); - return tmp; - } - /* - * Since we dropped the lock we need to revalidate - * the PTE as someone else may have changed it. If - * they did, we just return, as we can count on the - * MMU to tell us if they didn't also make it writable. - */ - page_table = pte_offset_map_lock(mm, pmd, address, - &ptl); - if (!pte_same(*page_table, orig_pte)) { - unlock_page(old_page); - pte_unmap_unlock(page_table, ptl); - page_cache_release(old_page); - return 0; - } - page_mkwrite = 1; - } - - return wp_page_reuse(mm, vma, address, page_table, ptl, - orig_pte, old_page, page_mkwrite, 1); + return wp_page_shared(mm, vma, address, page_table, pmd, + ptl, orig_pte, old_page); } /* -- cgit v1.2.3 From 4db0c3c2983cc6b7a08a33542af5e14de8a9258c Mon Sep 17 00:00:00 2001 From: Jason Low Date: Wed, 15 Apr 2015 16:14:08 -0700 Subject: mm: remove rest of ACCESS_ONCE() usages We converted some of the usages of ACCESS_ONCE to READ_ONCE in the mm/ tree since it doesn't work reliably on non-scalar types. This patch removes the rest of the usages of ACCESS_ONCE, and use the new READ_ONCE API for the read accesses. This makes things cleaner, instead of using separate/multiple sets of APIs. Signed-off-by: Jason Low Acked-by: Michal Hocko Acked-by: Davidlohr Bueso Acked-by: Rik van Riel Reviewed-by: Christian Borntraeger Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- mm/memory.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'mm/memory.c') diff --git a/mm/memory.c b/mm/memory.c index ac20b2a6a0c3..656593f73c8e 100644 --- a/mm/memory.c +++ b/mm/memory.c @@ -2845,7 +2845,7 @@ static void do_fault_around(struct vm_area_struct *vma, unsigned long address, struct vm_fault vmf; int off; - nr_pages = ACCESS_ONCE(fault_around_bytes) >> PAGE_SHIFT; + nr_pages = READ_ONCE(fault_around_bytes) >> PAGE_SHIFT; mask = ~(nr_pages * PAGE_SIZE - 1) & PAGE_MASK; start_addr = max(address & mask, vma->vm_start); -- cgit v1.2.3 From 2682582a6ea118d974c33da64923ae8c687fdd0b Mon Sep 17 00:00:00 2001 From: Konstantin Khlebnikov Date: Wed, 15 Apr 2015 16:15:08 -0700 Subject: mm/memory: also print a_ops->readpage in print_bad_pte() A lot of filesystems use generic_file_mmap() and filemap_fault(), f_op->mmap and vm_ops->fault aren't enough to identify filesystem. This prints file name, vm_ops->fault, f_op->mmap and a_ops->readpage (which is almost always implemented and filesystem-specific). Example: [ 23.676410] BUG: Bad page map in process sh pte:1b7e6025 pmd:19bbd067 [ 23.676887] page:ffffea00006df980 count:4 mapcount:1 mapping:ffff8800196426c0 index:0x97 [ 23.677481] flags: 0x10000000000000c(referenced|uptodate) [ 23.677896] page dumped because: bad pte [ 23.678205] addr:00007f52fcb17000 vm_flags:00000075 anon_vma: (null) mapping:ffff8800196426c0 index:97 [ 23.678922] file:libc-2.19.so fault:filemap_fault mmap:generic_file_readonly_mmap readpage:v9fs_vfs_readpage [akpm@linux-foundation.org: use pr_alert, per Kirill] Signed-off-by: Konstantin Khlebnikov Cc: Sasha Levin Acked-by: Kirill A. Shutemov Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- mm/memory.c | 11 +++++------ 1 file changed, 5 insertions(+), 6 deletions(-) (limited to 'mm/memory.c') diff --git a/mm/memory.c b/mm/memory.c index 656593f73c8e..f9628e568c58 100644 --- a/mm/memory.c +++ b/mm/memory.c @@ -690,12 +690,11 @@ static void print_bad_pte(struct vm_area_struct *vma, unsigned long addr, /* * Choose text because data symbols depend on CONFIG_KALLSYMS_ALL=y */ - if (vma->vm_ops) - printk(KERN_ALERT "vma->vm_ops->fault: %pSR\n", - vma->vm_ops->fault); - if (vma->vm_file) - printk(KERN_ALERT "vma->vm_file->f_op->mmap: %pSR\n", - vma->vm_file->f_op->mmap); + pr_alert("file:%pD fault:%pf mmap:%pf readpage:%pf\n", + vma->vm_file, + vma->vm_ops ? vma->vm_ops->fault : NULL, + vma->vm_file ? vma->vm_file->f_op->mmap : NULL, + mapping ? mapping->a_ops->readpage : NULL); dump_stack(); add_taint(TAINT_BAD_PAGE, LOCKDEP_NOW_UNRELIABLE); } -- cgit v1.2.3 From dd9061846a3ba01b0fa45423aaa087e4a69187fa Mon Sep 17 00:00:00 2001 From: Boaz Harrosh Date: Wed, 15 Apr 2015 16:15:11 -0700 Subject: mm: new pfn_mkwrite same as page_mkwrite for VM_PFNMAP This will allow FS that uses VM_PFNMAP | VM_MIXEDMAP (no page structs) to get notified when access is a write to a read-only PFN. This can happen if we mmap() a file then first mmap-read from it to page-in a read-only PFN, than we mmap-write to the same page. We need this functionality to fix a DAX bug, where in the scenario above we fail to set ctime/mtime though we modified the file. An xfstest is attached to this patchset that shows the failure and the fix. (A DAX patch will follow) This functionality is extra important for us, because upon dirtying of a pmem page we also want to RDMA the page to a remote cluster node. We define a new pfn_mkwrite and do not reuse page_mkwrite because 1 - The name ;-) 2 - But mainly because it would take a very long and tedious audit of all page_mkwrite functions of VM_MIXEDMAP/VM_PFNMAP users. To make sure they do not now CRASH. For example current DAX code (which this is for) would crash. If we would want to reuse page_mkwrite, We will need to first patch all users, so to not-crash-on-no-page. Then enable this patch. But even if I did that I would not sleep so well at night. Adding a new vector is the safest thing to do, and is not that expensive. an extra pointer at a static function vector per driver. Also the new vector is better for performance, because else we Will call all current Kernel vectors, so to: check-ha-no-page-do-nothing and return. No need to call it from do_shared_fault because do_wp_page is called to change pte permissions anyway. Signed-off-by: Yigal Korman Signed-off-by: Boaz Harrosh Acked-by: Kirill A. Shutemov Cc: Matthew Wilcox Cc: Jan Kara Cc: Hugh Dickins Cc: Mel Gorman Cc: Dave Chinner Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- mm/memory.c | 43 +++++++++++++++++++++++++++++++++++++++---- 1 file changed, 39 insertions(+), 4 deletions(-) (limited to 'mm/memory.c') diff --git a/mm/memory.c b/mm/memory.c index f9628e568c58..22e037e3364e 100644 --- a/mm/memory.c +++ b/mm/memory.c @@ -2180,6 +2180,42 @@ oom: return VM_FAULT_OOM; } +/* + * Handle write page faults for VM_MIXEDMAP or VM_PFNMAP for a VM_SHARED + * mapping + */ +static int wp_pfn_shared(struct mm_struct *mm, + struct vm_area_struct *vma, unsigned long address, + pte_t *page_table, spinlock_t *ptl, pte_t orig_pte, + pmd_t *pmd) +{ + if (vma->vm_ops && vma->vm_ops->pfn_mkwrite) { + struct vm_fault vmf = { + .page = NULL, + .pgoff = linear_page_index(vma, address), + .virtual_address = (void __user *)(address & PAGE_MASK), + .flags = FAULT_FLAG_WRITE | FAULT_FLAG_MKWRITE, + }; + int ret; + + pte_unmap_unlock(page_table, ptl); + ret = vma->vm_ops->pfn_mkwrite(vma, &vmf); + if (ret & VM_FAULT_ERROR) + return ret; + page_table = pte_offset_map_lock(mm, pmd, address, &ptl); + /* + * We might have raced with another page fault while we + * released the pte_offset_map_lock. + */ + if (!pte_same(*page_table, orig_pte)) { + pte_unmap_unlock(page_table, ptl); + return 0; + } + } + return wp_page_reuse(mm, vma, address, page_table, ptl, orig_pte, + NULL, 0, 0); +} + static int wp_page_shared(struct mm_struct *mm, struct vm_area_struct *vma, unsigned long address, pte_t *page_table, pmd_t *pmd, spinlock_t *ptl, pte_t orig_pte, @@ -2258,13 +2294,12 @@ static int do_wp_page(struct mm_struct *mm, struct vm_area_struct *vma, * VM_PFNMAP VMA. * * We should not cow pages in a shared writeable mapping. - * Just mark the pages writable as we can't do any dirty - * accounting on raw pfn maps. + * Just mark the pages writable and/or call ops->pfn_mkwrite. */ if ((vma->vm_flags & (VM_WRITE|VM_SHARED)) == (VM_WRITE|VM_SHARED)) - return wp_page_reuse(mm, vma, address, page_table, ptl, - orig_pte, old_page, 0, 0); + return wp_pfn_shared(mm, vma, address, page_table, ptl, + orig_pte, pmd); pte_unmap_unlock(page_table, ptl); return wp_page_copy(mm, vma, address, page_table, pmd, -- cgit v1.2.3