diff options
Diffstat (limited to 'mm/rmap.c')
| -rw-r--r-- | mm/rmap.c | 70 | 
1 files changed, 44 insertions, 26 deletions
| diff --git a/mm/rmap.c b/mm/rmap.c index c8454e06b6c8..5b5ad584ffb7 100644 --- a/mm/rmap.c +++ b/mm/rmap.c @@ -120,6 +120,21 @@ static void anon_vma_chain_free(struct anon_vma_chain *anon_vma_chain)  	kmem_cache_free(anon_vma_chain_cachep, anon_vma_chain);  } +static void anon_vma_chain_link(struct vm_area_struct *vma, +				struct anon_vma_chain *avc, +				struct anon_vma *anon_vma) +{ +	avc->vma = vma; +	avc->anon_vma = anon_vma; +	list_add(&avc->same_vma, &vma->anon_vma_chain); + +	/* +	 * It's critical to add new vmas to the tail of the anon_vma, +	 * see comment in huge_memory.c:__split_huge_page(). +	 */ +	list_add_tail(&avc->same_anon_vma, &anon_vma->head); +} +  /**   * anon_vma_prepare - attach an anon_vma to a memory region   * @vma: the memory region in question @@ -175,10 +190,7 @@ int anon_vma_prepare(struct vm_area_struct *vma)  		spin_lock(&mm->page_table_lock);  		if (likely(!vma->anon_vma)) {  			vma->anon_vma = anon_vma; -			avc->anon_vma = anon_vma; -			avc->vma = vma; -			list_add(&avc->same_vma, &vma->anon_vma_chain); -			list_add_tail(&avc->same_anon_vma, &anon_vma->head); +			anon_vma_chain_link(vma, avc, anon_vma);  			allocated = NULL;  			avc = NULL;  		} @@ -224,21 +236,6 @@ static inline void unlock_anon_vma_root(struct anon_vma *root)  		mutex_unlock(&root->mutex);  } -static void anon_vma_chain_link(struct vm_area_struct *vma, -				struct anon_vma_chain *avc, -				struct anon_vma *anon_vma) -{ -	avc->vma = vma; -	avc->anon_vma = anon_vma; -	list_add(&avc->same_vma, &vma->anon_vma_chain); - -	/* -	 * It's critical to add new vmas to the tail of the anon_vma, -	 * see comment in huge_memory.c:__split_huge_page(). -	 */ -	list_add_tail(&avc->same_anon_vma, &anon_vma->head); -} -  /*   * Attach the anon_vmas from src to dst.   * Returns 0 on success, -ENOMEM on failure. @@ -1151,10 +1148,15 @@ void page_add_new_anon_rmap(struct page *page,   */  void page_add_file_rmap(struct page *page)  { +	bool locked; +	unsigned long flags; + +	mem_cgroup_begin_update_page_stat(page, &locked, &flags);  	if (atomic_inc_and_test(&page->_mapcount)) {  		__inc_zone_page_state(page, NR_FILE_MAPPED);  		mem_cgroup_inc_page_stat(page, MEMCG_NR_FILE_MAPPED);  	} +	mem_cgroup_end_update_page_stat(page, &locked, &flags);  }  /** @@ -1165,9 +1167,21 @@ void page_add_file_rmap(struct page *page)   */  void page_remove_rmap(struct page *page)  { +	bool anon = PageAnon(page); +	bool locked; +	unsigned long flags; + +	/* +	 * The anon case has no mem_cgroup page_stat to update; but may +	 * uncharge_page() below, where the lock ordering can deadlock if +	 * we hold the lock against page_stat move: so avoid it on anon. +	 */ +	if (!anon) +		mem_cgroup_begin_update_page_stat(page, &locked, &flags); +  	/* page still mapped by someone else? */  	if (!atomic_add_negative(-1, &page->_mapcount)) -		return; +		goto out;  	/*  	 * Now that the last pte has gone, s390 must transfer dirty @@ -1176,7 +1190,7 @@ void page_remove_rmap(struct page *page)  	 * not if it's in swapcache - there might be another pte slot  	 * containing the swap entry, but page not yet written to swap.  	 */ -	if ((!PageAnon(page) || PageSwapCache(page)) && +	if ((!anon || PageSwapCache(page)) &&  	    page_test_and_clear_dirty(page_to_pfn(page), 1))  		set_page_dirty(page);  	/* @@ -1184,8 +1198,8 @@ void page_remove_rmap(struct page *page)  	 * and not charged by memcg for now.  	 */  	if (unlikely(PageHuge(page))) -		return; -	if (PageAnon(page)) { +		goto out; +	if (anon) {  		mem_cgroup_uncharge_page(page);  		if (!PageTransHuge(page))  			__dec_zone_page_state(page, NR_ANON_PAGES); @@ -1205,6 +1219,9 @@ void page_remove_rmap(struct page *page)  	 * Leaving it set also helps swapoff to reinstate ptes  	 * faster for those pages still in swapcache.  	 */ +out: +	if (!anon) +		mem_cgroup_end_update_page_stat(page, &locked, &flags);  }  /* @@ -1282,7 +1299,7 @@ int try_to_unmap_one(struct page *page, struct vm_area_struct *vma,  			}  			dec_mm_counter(mm, MM_ANONPAGES);  			inc_mm_counter(mm, MM_SWAPENTS); -		} else if (PAGE_MIGRATION) { +		} else if (IS_ENABLED(CONFIG_MIGRATION)) {  			/*  			 * Store the pfn of the page in a special migration  			 * pte. do_swap_page() will wait until the migration @@ -1293,7 +1310,8 @@ int try_to_unmap_one(struct page *page, struct vm_area_struct *vma,  		}  		set_pte_at(mm, address, pte, swp_entry_to_pte(entry));  		BUG_ON(pte_file(*pte)); -	} else if (PAGE_MIGRATION && (TTU_ACTION(flags) == TTU_MIGRATION)) { +	} else if (IS_ENABLED(CONFIG_MIGRATION) && +		   (TTU_ACTION(flags) == TTU_MIGRATION)) {  		/* Establish migration entry for a file page */  		swp_entry_t entry;  		entry = make_migration_entry(page, pte_write(pteval)); @@ -1499,7 +1517,7 @@ static int try_to_unmap_anon(struct page *page, enum ttu_flags flags)  		 * locking requirements of exec(), migration skips  		 * temporary VMAs until after exec() completes.  		 */ -		if (PAGE_MIGRATION && (flags & TTU_MIGRATION) && +		if (IS_ENABLED(CONFIG_MIGRATION) && (flags & TTU_MIGRATION) &&  				is_vma_temporary_stack(vma))  			continue; | 
