diff options
Diffstat (limited to 'mm')
| -rw-r--r-- | mm/Kconfig | 22 | ||||
| -rw-r--r-- | mm/filemap.c | 14 | ||||
| -rw-r--r-- | mm/filemap_xip.c | 25 | ||||
| -rw-r--r-- | mm/hugetlb.c | 13 | ||||
| -rw-r--r-- | mm/madvise.c | 22 | ||||
| -rw-r--r-- | mm/memory.c | 91 | ||||
| -rw-r--r-- | mm/mempolicy.c | 16 | ||||
| -rw-r--r-- | mm/mmap.c | 6 | ||||
| -rw-r--r-- | mm/mremap.c | 6 | ||||
| -rw-r--r-- | mm/nommu.c | 6 | ||||
| -rw-r--r-- | mm/page_alloc.c | 54 | ||||
| -rw-r--r-- | mm/rmap.c | 29 | ||||
| -rw-r--r-- | mm/shmem.c | 108 | ||||
| -rw-r--r-- | mm/slab.c | 33 | ||||
| -rw-r--r-- | mm/sparse.c | 75 | ||||
| -rw-r--r-- | mm/swap_state.c | 6 | ||||
| -rw-r--r-- | mm/swapfile.c | 412 | ||||
| -rw-r--r-- | mm/vmalloc.c | 2 | ||||
| -rw-r--r-- | mm/vmscan.c | 12 |
19 files changed, 521 insertions, 431 deletions
diff --git a/mm/Kconfig b/mm/Kconfig index cd379936cac6..4e9937ac3529 100644 --- a/mm/Kconfig +++ b/mm/Kconfig @@ -89,3 +89,25 @@ config NEED_MULTIPLE_NODES config HAVE_MEMORY_PRESENT def_bool y depends on ARCH_HAVE_MEMORY_PRESENT || SPARSEMEM + +# +# SPARSEMEM_EXTREME (which is the default) does some bootmem +# allocations when memory_present() is called. If this can not +# be done on your architecture, select this option. However, +# statically allocating the mem_section[] array can potentially +# consume vast quantities of .bss, so be careful. +# +# This option will also potentially produce smaller runtime code +# with gcc 3.4 and later. +# +config SPARSEMEM_STATIC + def_bool n + +# +# Architectecture platforms which require a two level mem_section in SPARSEMEM +# must select this option. This is usually for architecture platforms with +# an extremely sparse physical address space. +# +config SPARSEMEM_EXTREME + def_bool y + depends on SPARSEMEM && !SPARSEMEM_STATIC diff --git a/mm/filemap.c b/mm/filemap.c index c11418dd94e8..88611928e71f 100644 --- a/mm/filemap.c +++ b/mm/filemap.c @@ -54,9 +54,8 @@ * * ->i_mmap_lock (vmtruncate) * ->private_lock (__free_pte->__set_page_dirty_buffers) - * ->swap_list_lock - * ->swap_device_lock (exclusive_swap_page, others) - * ->mapping->tree_lock + * ->swap_lock (exclusive_swap_page, others) + * ->mapping->tree_lock * * ->i_sem * ->i_mmap_lock (truncate->unmap_mapping_range) @@ -86,7 +85,7 @@ * ->page_table_lock (anon_vma_prepare and various) * * ->page_table_lock - * ->swap_device_lock (try_to_unmap_one) + * ->swap_lock (try_to_unmap_one) * ->private_lock (try_to_unmap_one) * ->tree_lock (try_to_unmap_one) * ->zone.lru_lock (follow_page->mark_page_accessed) @@ -1505,8 +1504,12 @@ repeat: return -EINVAL; page = filemap_getpage(file, pgoff, nonblock); + + /* XXX: This is wrong, a filesystem I/O error may have happened. Fix that as + * done in shmem_populate calling shmem_getpage */ if (!page && !nonblock) return -ENOMEM; + if (page) { err = install_page(mm, vma, addr, page, prot); if (err) { @@ -1514,6 +1517,9 @@ repeat: return err; } } else { + /* No page was found just because we can't read it in now (being + * here implies nonblock != 0), but the page may exist, so set + * the PTE to fault it in later. */ err = install_file_pte(mm, vma, addr, pgoff, prot); if (err) return err; diff --git a/mm/filemap_xip.c b/mm/filemap_xip.c index 3b6e384b98a6..8c199f537732 100644 --- a/mm/filemap_xip.c +++ b/mm/filemap_xip.c @@ -68,13 +68,12 @@ do_xip_mapping_read(struct address_space *mapping, if (unlikely(IS_ERR(page))) { if (PTR_ERR(page) == -ENODATA) { /* sparse */ - page = virt_to_page(empty_zero_page); + page = ZERO_PAGE(0); } else { desc->error = PTR_ERR(page); goto out; } - } else - BUG_ON(!PageUptodate(page)); + } /* If users can be writing to this page using arbitrary * virtual addresses, take care about potential aliasing @@ -84,8 +83,7 @@ do_xip_mapping_read(struct address_space *mapping, flush_dcache_page(page); /* - * Ok, we have the page, and it's up-to-date, so - * now we can copy it to user space... + * Ok, we have the page, so now we can copy it to user space... * * The actor routine returns how many bytes were actually used.. * NOTE! This may not be the same as how much of a user buffer @@ -164,7 +162,7 @@ EXPORT_SYMBOL_GPL(xip_file_sendfile); * xip_write * * This function walks all vmas of the address_space and unmaps the - * empty_zero_page when found at pgoff. Should it go in rmap.c? + * ZERO_PAGE when found at pgoff. Should it go in rmap.c? */ static void __xip_unmap (struct address_space * mapping, @@ -187,11 +185,11 @@ __xip_unmap (struct address_space * mapping, * We need the page_table_lock to protect us from page faults, * munmap, fork, etc... */ - pte = page_check_address(virt_to_page(empty_zero_page), mm, + pte = page_check_address(ZERO_PAGE(address), mm, address); if (!IS_ERR(pte)) { /* Nuke the page table entry. */ - flush_cache_page(vma, address, pte_pfn(pte)); + flush_cache_page(vma, address, pte_pfn(*pte)); pteval = ptep_clear_flush(vma, address, pte); BUG_ON(pte_dirty(pteval)); pte_unmap(pte); @@ -230,7 +228,6 @@ xip_file_nopage(struct vm_area_struct * area, page = mapping->a_ops->get_xip_page(mapping, pgoff*(PAGE_SIZE/512), 0); if (!IS_ERR(page)) { - BUG_ON(!PageUptodate(page)); return page; } if (PTR_ERR(page) != -ENODATA) @@ -245,12 +242,11 @@ xip_file_nopage(struct vm_area_struct * area, pgoff*(PAGE_SIZE/512), 1); if (IS_ERR(page)) return NULL; - BUG_ON(!PageUptodate(page)); /* unmap page at pgoff from all other vmas */ __xip_unmap(mapping, pgoff); } else { - /* not shared and writable, use empty_zero_page */ - page = virt_to_page(empty_zero_page); + /* not shared and writable, use ZERO_PAGE() */ + page = ZERO_PAGE(address); } return page; @@ -319,8 +315,6 @@ __xip_file_write(struct file *filp, const char __user *buf, break; } - BUG_ON(!PageUptodate(page)); - copied = filemap_copy_from_user(page, offset, buf, bytes); flush_dcache_page(page); if (likely(copied > 0)) { @@ -435,8 +429,7 @@ xip_truncate_page(struct address_space *mapping, loff_t from) return 0; else return PTR_ERR(page); - } else - BUG_ON(!PageUptodate(page)); + } kaddr = kmap_atomic(page, KM_USER0); memset(kaddr + offset, 0, length); kunmap_atomic(kaddr, KM_USER0); diff --git a/mm/hugetlb.c b/mm/hugetlb.c index fbd1111ea119..901ac523a1c3 100644 --- a/mm/hugetlb.c +++ b/mm/hugetlb.c @@ -301,6 +301,7 @@ void unmap_hugepage_range(struct vm_area_struct *vma, unsigned long start, { struct mm_struct *mm = vma->vm_mm; unsigned long address; + pte_t *ptep; pte_t pte; struct page *page; @@ -309,9 +310,17 @@ void unmap_hugepage_range(struct vm_area_struct *vma, unsigned long start, BUG_ON(end & ~HPAGE_MASK); for (address = start; address < end; address += HPAGE_SIZE) { - pte = huge_ptep_get_and_clear(mm, address, huge_pte_offset(mm, address)); + ptep = huge_pte_offset(mm, address); + if (! ptep) + /* This can happen on truncate, or if an + * mmap() is aborted due to an error before + * the prefault */ + continue; + + pte = huge_ptep_get_and_clear(mm, address, ptep); if (pte_none(pte)) continue; + page = pte_page(pte); put_page(page); } @@ -351,8 +360,6 @@ int hugetlb_prefault(struct address_space *mapping, struct vm_area_struct *vma) ret = -ENOMEM; goto out; } - if (! pte_none(*pte)) - hugetlb_clean_stale_pgtable(pte); idx = ((addr - vma->vm_start) >> HPAGE_SHIFT) + (vma->vm_pgoff >> (HPAGE_SHIFT - PAGE_SHIFT)); diff --git a/mm/madvise.c b/mm/madvise.c index 73180a22877e..4454936f87d1 100644 --- a/mm/madvise.c +++ b/mm/madvise.c @@ -37,7 +37,7 @@ static long madvise_behavior(struct vm_area_struct * vma, if (new_flags == vma->vm_flags) { *prev = vma; - goto success; + goto out; } pgoff = vma->vm_pgoff + ((start - vma->vm_start) >> PAGE_SHIFT); @@ -62,6 +62,7 @@ static long madvise_behavior(struct vm_area_struct * vma, goto out; } +success: /* * vm_flags is protected by the mmap_sem held in write mode. */ @@ -70,7 +71,6 @@ static long madvise_behavior(struct vm_area_struct * vma, out: if (error == -ENOMEM) error = -EAGAIN; -success: return error; } @@ -83,9 +83,6 @@ static long madvise_willneed(struct vm_area_struct * vma, { struct file *file = vma->vm_file; - if (!file) - return -EBADF; - if (file->f_mapping->a_ops->get_xip_page) { /* no bad return value, but ignore advice */ return 0; @@ -140,11 +137,16 @@ static long madvise_dontneed(struct vm_area_struct * vma, return 0; } -static long madvise_vma(struct vm_area_struct *vma, struct vm_area_struct **prev, - unsigned long start, unsigned long end, int behavior) +static long +madvise_vma(struct vm_area_struct *vma, struct vm_area_struct **prev, + unsigned long start, unsigned long end, int behavior) { + struct file *filp = vma->vm_file; long error = -EBADF; + if (!filp) + goto out; + switch (behavior) { case MADV_NORMAL: case MADV_SEQUENTIAL: @@ -165,6 +167,7 @@ static long madvise_vma(struct vm_area_struct *vma, struct vm_area_struct **prev break; } +out: return error; } @@ -234,8 +237,9 @@ asmlinkage long sys_madvise(unsigned long start, size_t len_in, int behavior) * - different from the way of handling in mlock etc. */ vma = find_vma_prev(current->mm, start, &prev); - if (!vma && prev) - vma = prev->vm_next; + if (vma && start > vma->vm_start) + prev = vma; + for (;;) { /* Still start < end. */ error = -ENOMEM; diff --git a/mm/memory.c b/mm/memory.c index beabdefa6254..788a62810340 100644 --- a/mm/memory.c +++ b/mm/memory.c @@ -498,6 +498,17 @@ int copy_page_range(struct mm_struct *dst_mm, struct mm_struct *src_mm, unsigned long addr = vma->vm_start; unsigned long end = vma->vm_end; + /* + * Don't copy ptes where a page fault will fill them correctly. + * Fork becomes much lighter when there are big shared or private + * readonly mappings. The tradeoff is that copy_page_range is more + * efficient than faulting. + */ + if (!(vma->vm_flags & (VM_HUGETLB|VM_NONLINEAR|VM_RESERVED))) { + if (!vma->anon_vma) + return 0; + } + if (is_vm_hugetlb_page(vma)) return copy_hugetlb_page_range(dst_mm, src_mm, vma); @@ -551,7 +562,8 @@ static void zap_pte_range(struct mmu_gather *tlb, pmd_t *pmd, page->index > details->last_index)) continue; } - ptent = ptep_get_and_clear(tlb->mm, addr, pte); + ptent = ptep_get_and_clear_full(tlb->mm, addr, pte, + tlb->fullmm); tlb_remove_tlb_entry(tlb, pte, addr); if (unlikely(!page)) continue; @@ -579,7 +591,7 @@ static void zap_pte_range(struct mmu_gather *tlb, pmd_t *pmd, continue; if (!pte_file(ptent)) free_swap_and_cache(pte_to_swp_entry(ptent)); - pte_clear(tlb->mm, addr, pte); + pte_clear_full(tlb->mm, addr, pte, tlb->fullmm); } while (pte++, addr += PAGE_SIZE, addr != end); pte_unmap(pte - 1); } @@ -776,8 +788,8 @@ unsigned long zap_page_range(struct vm_area_struct *vma, unsigned long address, * Do a quick page-table lookup for a single page. * mm->page_table_lock must be held. */ -static struct page * -__follow_page(struct mm_struct *mm, unsigned long address, int read, int write) +static struct page *__follow_page(struct mm_struct *mm, unsigned long address, + int read, int write, int accessed) { pgd_t *pgd; pud_t *pud; @@ -818,9 +830,11 @@ __follow_page(struct mm_struct *mm, unsigned long address, int read, int write) pfn = pte_pfn(pte); if (pfn_valid(pfn)) { page = pfn_to_page(pfn); - if (write && !pte_dirty(pte) && !PageDirty(page)) - set_page_dirty(page); - mark_page_accessed(page); + if (accessed) { + if (write && !pte_dirty(pte) &&!PageDirty(page)) + set_page_dirty(page); + mark_page_accessed(page); + } return page; } } @@ -829,16 +843,19 @@ out: return NULL; } -struct page * +inline struct page * follow_page(struct mm_struct *mm, unsigned long address, int write) { - return __follow_page(mm, address, /*read*/0, write); + return __follow_page(mm, address, 0, write, 1); } -int -check_user_page_readable(struct mm_struct *mm, unsigned long address) +/* + * check_user_page_readable() can be called frm niterrupt context by oprofile, + * so we need to avoid taking any non-irq-safe locks + */ +int check_user_page_readable(struct mm_struct *mm, unsigned long address) { - return __follow_page(mm, address, /*read*/1, /*write*/0) != NULL; + return __follow_page(mm, address, 1, 0, 0) != NULL; } EXPORT_SYMBOL(check_user_page_readable); @@ -908,9 +925,13 @@ int get_user_pages(struct task_struct *tsk, struct mm_struct *mm, pud = pud_offset(pgd, pg); BUG_ON(pud_none(*pud)); pmd = pmd_offset(pud, pg); - BUG_ON(pmd_none(*pmd)); + if (pmd_none(*pmd)) + return i ? : -EFAULT; pte = pte_offset_map(pmd, pg); - BUG_ON(pte_none(*pte)); + if (pte_none(*pte)) { + pte_unmap(pte); + return i ? : -EFAULT; + } if (pages) { pages[i] = pte_page(*pte); get_page(pages[i]); @@ -935,11 +956,13 @@ int get_user_pages(struct task_struct *tsk, struct mm_struct *mm, } spin_lock(&mm->page_table_lock); do { + int write_access = write; struct page *page; - int lookup_write = write; cond_resched_lock(&mm->page_table_lock); - while (!(page = follow_page(mm, start, lookup_write))) { + while (!(page = follow_page(mm, start, write_access))) { + int ret; + /* * Shortcut for anonymous pages. We don't want * to force the creation of pages tables for @@ -947,13 +970,23 @@ int get_user_pages(struct task_struct *tsk, struct mm_struct *mm, * nobody touched so far. This is important * for doing a core dump for these mappings. */ - if (!lookup_write && - untouched_anonymous_page(mm,vma,start)) { + if (!write && untouched_anonymous_page(mm,vma,start)) { page = ZERO_PAGE(start); break; } spin_unlock(&mm->page_table_lock); - switch (handle_mm_fault(mm,vma,start,write)) { + ret = __handle_mm_fault(mm, vma, start, write_access); + + /* + * The VM_FAULT_WRITE bit tells us that do_wp_page has + * broken COW when necessary, even if maybe_mkwrite + * decided not to set pte_write. We can thus safely do + * subsequent page lookups as if they were reads. + */ + if (ret & VM_FAULT_WRITE) + write_access = 0; + + switch (ret & ~VM_FAULT_WRITE) { case VM_FAULT_MINOR: tsk->min_flt++; break; @@ -967,14 +1000,6 @@ int get_user_pages(struct task_struct *tsk, struct mm_struct *mm, default: BUG(); } - /* - * Now that we have performed a write fault - * and surely no longer have a shared page we - * shouldn't write, we shouldn't ignore an - * unwritable page in the page table if - * we are forcing write access. - */ - lookup_write = write && !force; spin_lock(&mm->page_table_lock); } if (pages) { @@ -1224,6 +1249,7 @@ static int do_wp_page(struct mm_struct *mm, struct vm_area_struct * vma, struct page *old_page, *new_page; unsigned long pfn = pte_pfn(pte); pte_t entry; + int ret; if (unlikely(!pfn_valid(pfn))) { /* @@ -1251,7 +1277,7 @@ static int do_wp_page(struct mm_struct *mm, struct vm_area_struct * vma, lazy_mmu_prot_update(entry); pte_unmap(page_table); spin_unlock(&mm->page_table_lock); - return VM_FAULT_MINOR; + return VM_FAULT_MINOR|VM_FAULT_WRITE; } } pte_unmap(page_table); @@ -1278,6 +1304,7 @@ static int do_wp_page(struct mm_struct *mm, struct vm_area_struct * vma, /* * Re-check the pte - we dropped the lock */ + ret = VM_FAULT_MINOR; spin_lock(&mm->page_table_lock); page_table = pte_offset_map(pmd, address); if (likely(pte_same(*page_table, pte))) { @@ -1294,12 +1321,13 @@ static int do_wp_page(struct mm_struct *mm, struct vm_area_struct * vma, /* Free the old page.. */ new_page = old_page; + ret |= VM_FAULT_WRITE; } pte_unmap(page_table); page_cache_release(new_page); page_cache_release(old_page); spin_unlock(&mm->page_table_lock); - return VM_FAULT_MINOR; + return ret; no_new_page: page_cache_release(old_page); @@ -1928,7 +1956,7 @@ static int do_file_page(struct mm_struct * mm, struct vm_area_struct * vma, * Fall back to the linear mapping if the fs does not support * ->populate: */ - if (!vma->vm_ops || !vma->vm_ops->populate || + if (!vma->vm_ops->populate || (write_access && !(vma->vm_flags & VM_SHARED))) { pte_clear(mm, address, pte); return do_no_page(mm, vma, address, write_access, pte, pmd); @@ -1991,7 +2019,6 @@ static inline int handle_pte_fault(struct mm_struct *mm, if (write_access) { if (!pte_write(entry)) return do_wp_page(mm, vma, address, pte, pmd, entry); - entry = pte_mkdirty(entry); } entry = pte_mkyoung(entry); @@ -2006,7 +2033,7 @@ static inline int handle_pte_fault(struct mm_struct *mm, /* * By the time we get here, we already hold the mm semaphore */ -int handle_mm_fault(struct mm_struct *mm, struct vm_area_struct * vma, +int __handle_mm_fault(struct mm_struct *mm, struct vm_area_struct * vma, unsigned long address, int write_access) { pgd_t *pgd; diff --git a/mm/mempolicy.c b/mm/mempolicy.c index cb41c31e7c87..13492d66b7c8 100644 --- a/mm/mempolicy.c +++ b/mm/mempolicy.c @@ -443,7 +443,7 @@ asmlinkage long sys_set_mempolicy(int mode, unsigned long __user *nmask, struct mempolicy *new; DECLARE_BITMAP(nodes, MAX_NUMNODES); - if (mode > MPOL_MAX) + if (mode < 0 || mode > MPOL_MAX) return -EINVAL; err = get_nodes(nodes, nmask, maxnode, mode); if (err) @@ -664,10 +664,10 @@ asmlinkage long compat_sys_mbind(compat_ulong_t start, compat_ulong_t len, #endif /* Return effective policy for a VMA */ -static struct mempolicy * -get_vma_policy(struct vm_area_struct *vma, unsigned long addr) +struct mempolicy * +get_vma_policy(struct task_struct *task, struct vm_area_struct *vma, unsigned long addr) { - struct mempolicy *pol = current->mempolicy; + struct mempolicy *pol = task->mempolicy; if (vma) { if (vma->vm_ops && vma->vm_ops->get_policy) @@ -786,7 +786,7 @@ static struct page *alloc_page_interleave(unsigned int __nocast gfp, unsigned or struct page * alloc_page_vma(unsigned int __nocast gfp, struct vm_area_struct *vma, unsigned long addr) { - struct mempolicy *pol = get_vma_policy(vma, addr); + struct mempolicy *pol = get_vma_policy(current, vma, addr); cpuset_update_current_mems_allowed(); @@ -908,7 +908,7 @@ void __mpol_free(struct mempolicy *p) /* Find first node suitable for an allocation */ int mpol_first_node(struct vm_area_struct *vma, unsigned long addr) { - struct mempolicy *pol = get_vma_policy(vma, addr); + struct mempolicy *pol = get_vma_policy(current, vma, addr); switch (pol->policy) { case MPOL_DEFAULT: @@ -928,7 +928,7 @@ int mpol_first_node(struct vm_area_struct *vma, unsigned long addr) /* Find secondary valid nodes for an allocation */ int mpol_node_valid(int nid, struct vm_area_struct *vma, unsigned long addr) { - struct mempolicy *pol = get_vma_policy(vma, addr); + struct mempolicy *pol = get_vma_policy(current, vma, addr); switch (pol->policy) { case MPOL_PREFERRED: @@ -1138,11 +1138,11 @@ void mpol_free_shared_policy(struct shared_policy *p) while (next) { n = rb_entry(next, struct sp_node, nd); next = rb_next(&n->nd); + rb_erase(&n->nd, &p->root); mpol_free(n->policy); kmem_cache_free(sn_cache, n); } spin_unlock(&p->lock); - p->root = RB_ROOT; } /* assumes fs == KERNEL_DS */ diff --git a/mm/mmap.c b/mm/mmap.c index da3fa90a0aae..404319477e71 100644 --- a/mm/mmap.c +++ b/mm/mmap.c @@ -143,7 +143,11 @@ int __vm_enough_memory(long pages, int cap_sys_admin) leave 3% of the size of this process for other processes */ allowed -= current->mm->total_vm / 32; - if (atomic_read(&vm_committed_space) < allowed) + /* + * cast `allowed' as a signed long because vm_committed_space + * sometimes has a negative value + */ + if (atomic_read(&vm_committed_space) < (long)allowed) return 0; vm_unacct_memory(pages); diff --git a/mm/mremap.c b/mm/mremap.c index ec7238a78f36..a32fed454bd7 100644 --- a/mm/mremap.c +++ b/mm/mremap.c @@ -141,6 +141,10 @@ move_one_page(struct vm_area_struct *vma, unsigned long old_addr, if (dst) { pte_t pte; pte = ptep_clear_flush(vma, old_addr, src); + /* ZERO_PAGE can be dependant on virtual addr */ + if (pfn_valid(pte_pfn(pte)) && + pte_page(pte) == ZERO_PAGE(old_addr)) + pte = pte_wrprotect(mk_pte(ZERO_PAGE(new_addr), new_vma->vm_page_prot)); set_pte_at(mm, new_addr, dst, pte); } else error = -ENOMEM; @@ -229,6 +233,7 @@ static unsigned long move_vma(struct vm_area_struct *vma, * since do_munmap() will decrement it by old_len == new_len */ mm->total_vm += new_len >> PAGE_SHIFT; + __vm_stat_account(mm, vma->vm_flags, vma->vm_file, new_len>>PAGE_SHIFT); if (do_munmap(mm, old_addr, old_len) < 0) { /* OOM: unable to split vma, just get accounts right */ @@ -243,7 +248,6 @@ static unsigned long move_vma(struct vm_area_struct *vma, vma->vm_next->vm_flags |= VM_ACCOUNT; } - __vm_stat_account(mm, vma->vm_flags, vma->vm_file, new_len>>PAGE_SHIFT); if (vm_flags & VM_LOCKED) { mm->locked_vm += new_len >> PAGE_SHIFT; if (new_len > old_len) diff --git a/mm/nommu.c b/mm/nommu.c index ce74452c02d9..fd4e8df0f02d 100644 --- a/mm/nommu.c +++ b/mm/nommu.c @@ -1167,7 +1167,11 @@ int __vm_enough_memory(long pages, int cap_sys_admin) leave 3% of the size of this process for other processes */ allowed -= current->mm->total_vm / 32; - if (atomic_read(&vm_committed_space) < allowed) + /* + * cast `allowed' as a signed long because vm_committed_space + * sometimes has a negative value + */ + if (atomic_read(&vm_committed_space) < (long)allowed) return 0; vm_unacct_memory(pages); diff --git a/mm/page_alloc.c b/mm/page_alloc.c index 1d6ba6a4b594..b06a9636d971 100644 --- a/mm/page_alloc.c +++ b/mm/page_alloc.c @@ -329,7 +329,7 @@ static inline void free_pages_check(const char *function, struct page *page) 1 << PG_writeback ))) bad_page(function, page); if (PageDirty(page)) - ClearPageDirty(page); + __ClearPageDirty(page); } /* @@ -1061,20 +1061,19 @@ unsigned int nr_free_pages_pgdat(pg_data_t *pgdat) static unsigned int nr_free_zone_pages(int offset) { - pg_data_t *pgdat; + /* Just pick one node, since fallback list is circular */ + pg_data_t *pgdat = NODE_DATA(numa_node_id()); unsigned int sum = 0; - for_each_pgdat(pgdat) { - struct zonelist *zonelist = pgdat->node_zonelists + offset; - struct zone **zonep = zonelist->zones; - struct zone *zone; + struct zonelist *zonelist = pgdat->node_zonelists + offset; + struct zone **zonep = zonelist->zones; + struct zone *zone; - for (zone = *zonep++; zone; zone = *zonep++) { - unsigned long size = zone->present_pages; - unsigned long high = zone->pages_high; - if (size > high) - sum += size - high; - } + for (zone = *zonep++; zone; zone = *zonep++) { + unsigned long size = zone->present_pages; + unsigned long high = zone->pages_high; + if (size > high) + sum += size - high; } return sum; @@ -1131,19 +1130,20 @@ EXPORT_SYMBOL(nr_pagecache); DEFINE_PER_CPU(long, nr_pagecache_local) = 0; #endif -void __get_page_state(struct page_state *ret, int nr) +void __get_page_state(struct page_state *ret, int nr, cpumask_t *cpumask) { int cpu = 0; memset(ret, 0, sizeof(*ret)); + cpus_and(*cpumask, *cpumask, cpu_online_map); - cpu = first_cpu(cpu_online_map); + cpu = first_cpu(*cpumask); while (cpu < NR_CPUS) { unsigned long *in, *out, off; in = (unsigned long *)&per_cpu(page_states, cpu); - cpu = next_cpu(cpu, cpu_online_map); + cpu = next_cpu(cpu, *cpumask); if (cpu < NR_CPUS) prefetch(&per_cpu(page_states, cpu)); @@ -1154,19 +1154,33 @@ void __get_page_state(struct page_state *ret, int nr) } } +void get_page_state_node(struct page_state *ret, int node) +{ + int nr; + cpumask_t mask = node_to_cpumask(node); + + nr = offsetof(struct page_state, GET_PAGE_STATE_LAST); + nr /= sizeof(unsigned long); + + __get_page_state(ret, nr+1, &mask); +} + void get_page_state(struct page_state *ret) { int nr; + cpumask_t mask = CPU_MASK_ALL; nr = offsetof(struct page_state, GET_PAGE_STATE_LAST); nr /= sizeof(unsigned long); - __get_page_state(ret, nr + 1); + __get_page_state(ret, nr + 1, &mask); } void get_full_page_state(struct page_state *ret) { - __get_page_state(ret, sizeof(*ret) / sizeof(unsigned long)); + cpumask_t mask = CPU_MASK_ALL; + + __get_page_state(ret, sizeof(*ret) / sizeof(unsigned long), &mask); } unsigned long __read_page_state(unsigned long offset) @@ -1861,7 +1875,6 @@ static void __init free_area_init_core(struct pglist_data *pgdat, unsigned long *zones_size, unsigned long *zholes_size) { unsigned long i, j; - const unsigned long zone_required_alignment = 1UL << (MAX_ORDER-1); int cpu, nid = pgdat->node_id; unsigned long zone_start_pfn = pgdat->node_start_pfn; @@ -1911,7 +1924,7 @@ static void __init free_area_init_core(struct pglist_data *pgdat, zone->nr_scan_inactive = 0; zone->nr_active = 0; zone->nr_inactive = 0; - atomic_set(&zone->reclaim_in_progress, -1); + atomic_set(&zone->reclaim_in_progress, 0); if (!size) continue; @@ -1934,9 +1947,6 @@ static void __init free_area_init_core(struct pglist_data *pgdat, zone->zone_mem_map = pfn_to_page(zone_start_pfn); zone->zone_start_pfn = zone_start_pfn; - if ((zone_start_pfn) & (zone_required_alignment-1)) - printk(KERN_CRIT "BUG: wrong zone alignment, it will crash\n"); - memmap_init(size, nid, j, zone_start_pfn); zonetable_add(zone, nid, j, zone_start_pfn, size); diff --git a/mm/rmap.c b/mm/rmap.c index 08ac5c7fa91f..450f5241b5a5 100644 --- a/mm/rmap.c +++ b/mm/rmap.c @@ -34,9 +34,8 @@ * anon_vma->lock * mm->page_table_lock * zone->lru_lock (in mark_page_accessed) - * swap_list_lock (in swap_free etc's swap_info_get) + * swap_lock (in swap_duplicate, swap_info_get) * mmlist_lock (in mmput, drain_mmlist and others) - * swap_device_lock (in swap_duplicate, swap_info_get) * mapping->private_lock (in __set_page_dirty_buffers) * inode_lock (in set_page_dirty's __mark_inode_dirty) * sb_lock (within inode_lock in fs/fs-writeback.c) @@ -290,8 +289,6 @@ static int page_referenced_one(struct page *page, pte_t *pte; int referenced = 0; - if (!get_mm_counter(mm, rss)) - goto out; address = vma_address(page, vma); if (address == -EFAULT) goto out; @@ -442,22 +439,19 @@ int page_referenced(struct page *page, int is_locked, int ignore_token) void page_add_anon_rmap(struct page *page, struct vm_area_struct *vma, unsigned long address) { - struct anon_vma *anon_vma = vma->anon_vma; - pgoff_t index; - BUG_ON(PageReserved(page)); - BUG_ON(!anon_vma); inc_mm_counter(vma->vm_mm, anon_rss); - anon_vma = (void *) anon_vma + PAGE_MAPPING_ANON; - index = (address - vma->vm_start) >> PAGE_SHIFT; - index += vma->vm_pgoff; - index >>= PAGE_CACHE_SHIFT - PAGE_SHIFT; - if (atomic_inc_and_test(&page->_mapcount)) { - page->index = index; + struct anon_vma *anon_vma = vma->anon_vma; + + BUG_ON(!anon_vma); + anon_vma = (void *) anon_vma + PAGE_MAPPING_ANON; page->mapping = (struct address_space *) anon_vma; + + page->index = linear_page_index(vma, address); + inc_page_state(nr_mapped); } /* else checking page index and mapping is racy */ @@ -518,8 +512,6 @@ static int try_to_unmap_one(struct page *page, struct vm_area_struct *vma) pte_t pteval; int ret = SWAP_AGAIN; - if (!get_mm_counter(mm, rss)) - goto out; address = vma_address(page, vma); if (address == -EFAULT) goto out; @@ -532,6 +524,8 @@ static int try_to_unmap_one(struct page *page, struct vm_area_struct *vma) * If the page is mlock()d, we cannot swap it out. * If it's recently referenced (perhaps page_referenced * skipped over this mm) then we should reactivate it. + * + * Pages belonging to VM_RESERVED regions should not happen here. */ if ((vma->vm_flags & (VM_LOCKED|VM_RESERVED)) || ptep_clear_flush_young(vma, address, pte)) { @@ -767,8 +761,7 @@ static int try_to_unmap_file(struct page *page) if (vma->vm_flags & (VM_LOCKED|VM_RESERVED)) continue; cursor = (unsigned long) vma->vm_private_data; - while (get_mm_counter(vma->vm_mm, rss) && - cursor < max_nl_cursor && + while ( cursor < max_nl_cursor && cursor < vma->vm_end - vma->vm_start) { try_to_unmap_cluster(cursor, &mapcount, vma); cursor += CLUSTER_SIZE; diff --git a/mm/shmem.c b/mm/shmem.c index e64fa726a790..bdc4bbb6ddbb 100644 --- a/mm/shmem.c +++ b/mm/shmem.c @@ -45,7 +45,6 @@ #include <linux/swapops.h> #include <linux/mempolicy.h> #include <linux/namei.h> -#include <linux/xattr.h> #include <asm/uaccess.h> #include <asm/div64.h> #include <asm/pgtable.h> @@ -179,7 +178,6 @@ static struct address_space_operations shmem_aops; static struct file_operations shmem_file_operations; static struct inode_operations shmem_inode_operations; static struct inode_operations shmem_dir_inode_operations; -static struct inode_operations shmem_special_inode_operations; static struct vm_operations_struct shmem_vm_ops; static struct backing_dev_info shmem_backing_dev_info = { @@ -1195,6 +1193,7 @@ static int shmem_populate(struct vm_area_struct *vma, err = shmem_getpage(inode, pgoff, &page, sgp, NULL); if (err) return err; + /* Page may still be null, but only if nonblock was set. */ if (page) { mark_page_accessed(page); err = install_page(mm, vma, addr, page, prot); @@ -1202,7 +1201,10 @@ static int shmem_populate(struct vm_area_struct *vma, page_cache_release(page); return err; } - } else if (nonblock) { + } else { + /* No page was found just because we can't read it in + * now (being here implies nonblock != 0), but the page + * may exist, so set the PTE to fault it in later. */ err = install_file_pte(mm, vma, addr, pgoff, prot); if (err) return err; @@ -1296,7 +1298,6 @@ shmem_get_inode(struct super_block *sb, int mode, dev_t dev) switch (mode & S_IFMT) { default: - inode->i_op = &shmem_special_inode_operations; init_special_inode(inode, mode, dev); break; case S_IFREG: @@ -1773,44 +1774,33 @@ static int shmem_symlink(struct inode *dir, struct dentry *dentry, const char *s return 0; } -static int shmem_follow_link_inline(struct dentry *dentry, struct nameidata *nd) +static void *shmem_follow_link_inline(struct dentry *dentry, struct nameidata *nd) { nd_set_link(nd, (char *)SHMEM_I(dentry->d_inode)); - return 0; + return NULL; } -static int shmem_follow_link(struct dentry *dentry, struct nameidata *nd) +static void *shmem_follow_link(struct dentry *dentry, struct nameidata *nd) { struct page *page = NULL; int res = shmem_getpage(dentry->d_inode, 0, &page, SGP_READ, NULL); nd_set_link(nd, res ? ERR_PTR(res) : kmap(page)); - return 0; + return page; } -static void shmem_put_link(struct dentry *dentry, struct nameidata *nd) +static void shmem_put_link(struct dentry *dentry, struct nameidata *nd, void *cookie) { if (!IS_ERR(nd_get_link(nd))) { - struct page *page; - - page = find_get_page(dentry->d_inode->i_mapping, 0); - if (!page) - BUG(); + struct page *page = cookie; kunmap(page); mark_page_accessed(page); page_cache_release(page); - page_cache_release(page); } } static struct inode_operations shmem_symlink_inline_operations = { .readlink = generic_readlink, .follow_link = shmem_follow_link_inline, -#ifdef CONFIG_TMPFS_XATTR - .setxattr = generic_setxattr, - .getxattr = generic_getxattr, - .listxattr = generic_listxattr, - .removexattr = generic_removexattr, -#endif }; static struct inode_operations shmem_symlink_inode_operations = { @@ -1818,12 +1808,6 @@ static struct inode_operations shmem_symlink_inode_operations = { .readlink = generic_readlink, .follow_link = shmem_follow_link, .put_link = shmem_put_link, -#ifdef CONFIG_TMPFS_XATTR - .setxattr = generic_setxattr, - .getxattr = generic_getxattr, - .listxattr = generic_listxattr, - .removexattr = generic_removexattr, -#endif }; static int shmem_parse_options(char *options, int *mode, uid_t *uid, gid_t *gid, unsigned long *blocks, unsigned long *inodes) @@ -1943,12 +1927,6 @@ static void shmem_put_super(struct super_block *sb) sb->s_fs_info = NULL; } -#ifdef CONFIG_TMPFS_XATTR -static struct xattr_handler *shmem_xattr_handlers[]; -#else -#define shmem_xattr_handlers NULL -#endif - static int shmem_fill_super(struct super_block *sb, void *data, int silent) { @@ -1999,7 +1977,6 @@ static int shmem_fill_super(struct super_block *sb, sb->s_blocksize_bits = PAGE_CACHE_SHIFT; sb->s_magic = TMPFS_MAGIC; sb->s_op = &shmem_ops; - sb->s_xattr = shmem_xattr_handlers; inode = shmem_get_inode(sb, S_IFDIR | mode, 0); if (!inode) @@ -2088,12 +2065,6 @@ static struct file_operations shmem_file_operations = { static struct inode_operations shmem_inode_operations = { .truncate = shmem_truncate, .setattr = shmem_notify_change, -#ifdef CONFIG_TMPFS_XATTR - .setxattr = generic_setxattr, - .getxattr = generic_getxattr, - .listxattr = generic_listxattr, - .removexattr = generic_removexattr, -#endif }; static struct inode_operations shmem_dir_inode_operations = { @@ -2107,21 +2078,6 @@ static struct inode_operations shmem_dir_inode_operations = { .rmdir = shmem_rmdir, .mknod = shmem_mknod, .rename = shmem_rename, -#ifdef CONFIG_TMPFS_XATTR - .setxattr = generic_setxattr, - .getxattr = generic_getxattr, - .listxattr = generic_listxattr, - .removexattr = generic_removexattr, -#endif -#endif -}; - -static struct inode_operations shmem_special_inode_operations = { -#ifdef CONFIG_TMPFS_XATTR - .setxattr = generic_setxattr, - .getxattr = generic_getxattr, - .listxattr = generic_listxattr, - .removexattr = generic_removexattr, #endif }; @@ -2147,48 +2103,6 @@ static struct vm_operations_struct shmem_vm_ops = { }; -#ifdef CONFIG_TMPFS_SECURITY - -static size_t shmem_xattr_security_list(struct inode *inode, char *list, size_t list_len, - const char *name, size_t name_len) -{ - return security_inode_listsecurity(inode, list, list_len); -} - -static int shmem_xattr_security_get(struct inode *inode, const char *name, void *buffer, size_t size) -{ - if (strcmp(name, "") == 0) - return -EINVAL; - return security_inode_getsecurity(inode, name, buffer, size); -} - -static int shmem_xattr_security_set(struct inode *inode, const char *name, const void *value, size_t size, int flags) -{ - if (strcmp(name, "") == 0) - return -EINVAL; - return security_inode_setsecurity(inode, name, value, size, flags); -} - -static struct xattr_handler shmem_xattr_security_handler = { - .prefix = XATTR_SECURITY_PREFIX, - .list = shmem_xattr_security_list, - .get = shmem_xattr_security_get, - .set = shmem_xattr_security_set, -}; - -#endif /* CONFIG_TMPFS_SECURITY */ - -#ifdef CONFIG_TMPFS_XATTR - -static struct xattr_handler *shmem_xattr_handlers[] = { -#ifdef CONFIG_TMPFS_SECURITY - &shmem_xattr_security_handler, -#endif - NULL -}; - -#endif /* CONFIG_TMPFS_XATTR */ - static struct super_block *shmem_get_sb(struct file_system_type *fs_type, int flags, const char *dev_name, void *data) { diff --git a/mm/slab.c b/mm/slab.c index c9e706db4634..a9ff4f7f9860 100644 --- a/mm/slab.c +++ b/mm/slab.c @@ -189,6 +189,7 @@ * is less than 512 (PAGE_SIZE<<3), but greater than 256. */ +typedef unsigned int kmem_bufctl_t; #define BUFCTL_END (((kmem_bufctl_t)(~0U))-0) #define BUFCTL_FREE (((kmem_bufctl_t)(~0U))-1) #define SLAB_LIMIT (((kmem_bufctl_t)(~0U))-2) @@ -600,7 +601,7 @@ static inline kmem_cache_t *__find_general_cachep(size_t size, csizep++; /* - * Really subtile: The last entry with cs->cs_size==ULONG_MAX + * Really subtle: The last entry with cs->cs_size==ULONG_MAX * has cs_{dma,}cachep==NULL. Thus no special case * for large kmalloc calls required. */ @@ -2165,7 +2166,9 @@ static inline void *__cache_alloc(kmem_cache_t *cachep, unsigned int __nocast fl objp = cache_alloc_refill(cachep, flags); } local_irq_restore(save_flags); - objp = cache_alloc_debugcheck_after(cachep, flags, objp, __builtin_return_address(0)); + objp = cache_alloc_debugcheck_after(cachep, flags, objp, + __builtin_return_address(0)); + prefetchw(objp); return objp; } @@ -3073,20 +3076,24 @@ ssize_t slabinfo_write(struct file *file, const char __user *buffer, } #endif +/** + * ksize - get the actual amount of memory allocated for a given object + * @objp: Pointer to the object + * + * kmalloc may internally round up allocations and return more memory + * than requested. ksize() can be used to determine the actual amount of + * memory allocated. The caller may use this additional memory, even though + * a smaller amount of memory was initially specified with the kmalloc call. + * The caller must guarantee that objp points to a valid object previously + * allocated with either kmalloc() or kmem_cache_alloc(). The object + * must not be freed during the duration of the call. + */ unsigned int ksize(const void *objp) { - kmem_cache_t *c; - unsigned long flags; - unsigned int size = 0; - - if (likely(objp != NULL)) { - local_irq_save(flags); - c = GET_PAGE_CACHE(virt_to_page(objp)); - size = kmem_cache_size(c); - local_irq_restore(flags); - } + if (unlikely(objp == NULL)) + return 0; - return size; + return obj_reallen(GET_PAGE_CACHE(virt_to_page(objp))); } diff --git a/mm/sparse.c b/mm/sparse.c index b54e304df4a7..347249a4917a 100644 --- a/mm/sparse.c +++ b/mm/sparse.c @@ -6,6 +6,7 @@ #include <linux/mmzone.h> #include <linux/bootmem.h> #include <linux/module.h> +#include <linux/spinlock.h> #include <asm/dma.h> /* @@ -13,9 +14,64 @@ * * 1) mem_section - memory sections, mem_map's for valid memory */ -struct mem_section mem_section[NR_MEM_SECTIONS]; +#ifdef CONFIG_SPARSEMEM_EXTREME +struct mem_section *mem_section[NR_SECTION_ROOTS] + ____cacheline_maxaligned_in_smp; +#else +struct mem_section mem_section[NR_SECTION_ROOTS][SECTIONS_PER_ROOT] + ____cacheline_maxaligned_in_smp; +#endif EXPORT_SYMBOL(mem_section); +#ifdef CONFIG_SPARSEMEM_EXTREME +static struct mem_section *sparse_index_alloc(int nid) +{ + struct mem_section *section = NULL; + unsigned long array_size = SECTIONS_PER_ROOT * + sizeof(struct mem_section); + + section = alloc_bootmem_node(NODE_DATA(nid), array_size); + + if (section) + memset(section, 0, array_size); + + return section; +} + +static int sparse_index_init(unsigned long section_nr, int nid) +{ + static spinlock_t index_init_lock = SPIN_LOCK_UNLOCKED; + unsigned long root = SECTION_NR_TO_ROOT(section_nr); + struct mem_section *section; + int ret = 0; + + if (mem_section[root]) + return -EEXIST; + + section = sparse_index_alloc(nid); + /* + * This lock keeps two different sections from + * reallocating for the same index + */ + spin_lock(&index_init_lock); + + if (mem_section[root]) { + ret = -EEXIST; + goto out; + } + + mem_section[root] = section; +out: + spin_unlock(&index_init_lock); + return ret; +} +#else /* !SPARSEMEM_EXTREME */ +static inline int sparse_index_init(unsigned long section_nr, int nid) +{ + return 0; +} +#endif + /* Record a memory area against a node. */ void memory_present(int nid, unsigned long start, unsigned long end) { @@ -24,8 +80,13 @@ void memory_present(int nid, unsigned long start, unsigned long end) start &= PAGE_SECTION_MASK; for (pfn = start; pfn < end; pfn += PAGES_PER_SECTION) { unsigned long section = pfn_to_section_nr(pfn); - if (!mem_section[section].section_mem_map) - mem_section[section].section_mem_map = SECTION_MARKED_PRESENT; + struct mem_section *ms; + + sparse_index_init(section, nid); + + ms = __nr_to_section(section); + if (!ms->section_mem_map) + ms->section_mem_map = SECTION_MARKED_PRESENT; } } @@ -85,6 +146,7 @@ static struct page *sparse_early_mem_map_alloc(unsigned long pnum) { struct page *map; int nid = early_pfn_to_nid(section_nr_to_pfn(pnum)); + struct mem_section *ms = __nr_to_section(pnum); map = alloc_remap(nid, sizeof(struct page) * PAGES_PER_SECTION); if (map) @@ -96,7 +158,7 @@ static struct page *sparse_early_mem_map_alloc(unsigned long pnum) return map; printk(KERN_WARNING "%s: allocation failed\n", __FUNCTION__); - mem_section[pnum].section_mem_map = 0; + ms->section_mem_map = 0; return NULL; } @@ -114,8 +176,9 @@ void sparse_init(void) continue; map = sparse_early_mem_map_alloc(pnum); - if (map) - sparse_init_one_section(&mem_section[pnum], pnum, map); + if (!map) + continue; + sparse_init_one_section(__nr_to_section(pnum), pnum, map); } } diff --git a/mm/swap_state.c b/mm/swap_state.c index 4f251775ef90..029e56eb5e77 100644 --- a/mm/swap_state.c +++ b/mm/swap_state.c @@ -124,6 +124,7 @@ void __delete_from_swap_cache(struct page *page) BUG_ON(!PageLocked(page)); BUG_ON(!PageSwapCache(page)); BUG_ON(PageWriteback(page)); + BUG_ON(PagePrivate(page)); radix_tree_delete(&swapper_space.page_tree, page->private); page->private = 0; @@ -196,11 +197,6 @@ void delete_from_swap_cache(struct page *page) { swp_entry_t entry; - BUG_ON(!PageSwapCache(page)); - BUG_ON(!PageLocked(page)); - BUG_ON(PageWriteback(page)); - BUG_ON(PagePrivate(page)); - entry.val = page->private; write_lock_irq(&swapper_space.tree_lock); diff --git a/mm/swapfile.c b/mm/swapfile.c index 60cd24a55204..4b6e8bf986bc 100644 --- a/mm/swapfile.c +++ b/mm/swapfile.c @@ -31,7 +31,7 @@ #include <asm/tlbflush.h> #include <linux/swapops.h> -DEFINE_SPINLOCK(swaplock); +DEFINE_SPINLOCK(swap_lock); unsigned int nr_swapfiles; long total_swap_pages; static int swap_overflow; @@ -51,13 +51,11 @@ static DECLARE_MUTEX(swapon_sem); /* * We need this because the bdev->unplug_fn can sleep and we cannot - * hold swap_list_lock while calling the unplug_fn. And swap_list_lock + * hold swap_lock while calling the unplug_fn. And swap_lock * cannot be turned into a semaphore. */ static DECLARE_RWSEM(swap_unplug_sem); -#define SWAPFILE_CLUSTER 256 - void swap_unplug_io_fn(struct backing_dev_info *unused_bdi, struct page *page) { swp_entry_t entry; @@ -84,116 +82,135 @@ void swap_unplug_io_fn(struct backing_dev_info *unused_bdi, struct page *page) up_read(&swap_unplug_sem); } -static inline int scan_swap_map(struct swap_info_struct *si) +#define SWAPFILE_CLUSTER 256 +#define LATENCY_LIMIT 256 + +static inline unsigned long scan_swap_map(struct swap_info_struct *si) { - unsigned long offset; + unsigned long offset, last_in_cluster; + int latency_ration = LATENCY_LIMIT; + /* - * We try to cluster swap pages by allocating them - * sequentially in swap. Once we've allocated - * SWAPFILE_CLUSTER pages this way, however, we resort to - * first-free allocation, starting a new cluster. This - * prevents us from scattering swap pages all over the entire - * swap partition, so that we reduce overall disk seek times - * between swap pages. -- sct */ - if (si->cluster_nr) { - while (si->cluster_next <= si->highest_bit) { - offset = si->cluster_next++; + * We try to cluster swap pages by allocating them sequentially + * in swap. Once we've allocated SWAPFILE_CLUSTER pages this + * way, however, we resort to first-free allocation, starting + * a new cluster. This prevents us from scattering swap pages + * all over the entire swap partition, so that we reduce + * overall disk seek times between swap pages. -- sct + * But we do now try to find an empty cluster. -Andrea + */ + + si->flags += SWP_SCANNING; + if (unlikely(!si->cluster_nr)) { + si->cluster_nr = SWAPFILE_CLUSTER - 1; + if (si->pages - si->inuse_pages < SWAPFILE_CLUSTER) + goto lowest; + spin_unlock(&swap_lock); + + offset = si->lowest_bit; + last_in_cluster = offset + SWAPFILE_CLUSTER - 1; + + /* Locate the first empty (unaligned) cluster */ + for (; last_in_cluster <= si->highest_bit; offset++) { if (si->swap_map[offset]) - continue; - si->cluster_nr--; - goto got_page; - } - } - si->cluster_nr = SWAPFILE_CLUSTER; - - /* try to find an empty (even not aligned) cluster. */ - offset = si->lowest_bit; - check_next_cluster: - if (offset+SWAPFILE_CLUSTER-1 <= si->highest_bit) - { - unsigned long nr; - for (nr = offset; nr < offset+SWAPFILE_CLUSTER; nr++) - if (si->swap_map[nr]) - { - offset = nr+1; - goto check_next_cluster; + last_in_cluster = offset + SWAPFILE_CLUSTER; + else if (offset == last_in_cluster) { + spin_lock(&swap_lock); + si->cluster_next = offset-SWAPFILE_CLUSTER-1; + goto cluster; } - /* We found a completly empty cluster, so start - * using it. - */ - goto got_page; + if (unlikely(--latency_ration < 0)) { + cond_resched(); + latency_ration = LATENCY_LIMIT; + } + } + spin_lock(&swap_lock); + goto lowest; } - /* No luck, so now go finegrined as usual. -Andrea */ - for (offset = si->lowest_bit; offset <= si->highest_bit ; offset++) { - if (si->swap_map[offset]) - continue; - si->lowest_bit = offset+1; - got_page: + + si->cluster_nr--; +cluster: + offset = si->cluster_next; + if (offset > si->highest_bit) +lowest: offset = si->lowest_bit; +checks: if (!(si->flags & SWP_WRITEOK)) + goto no_page; + if (!si->highest_bit) + goto no_page; + if (!si->swap_map[offset]) { if (offset == si->lowest_bit) si->lowest_bit++; if (offset == si->highest_bit) si->highest_bit--; - if (si->lowest_bit > si->highest_bit) { + si->inuse_pages++; + if (si->inuse_pages == si->pages) { si->lowest_bit = si->max; si->highest_bit = 0; } si->swap_map[offset] = 1; - si->inuse_pages++; - nr_swap_pages--; - si->cluster_next = offset+1; + si->cluster_next = offset + 1; + si->flags -= SWP_SCANNING; return offset; } - si->lowest_bit = si->max; - si->highest_bit = 0; + + spin_unlock(&swap_lock); + while (++offset <= si->highest_bit) { + if (!si->swap_map[offset]) { + spin_lock(&swap_lock); + goto checks; + } + if (unlikely(--latency_ration < 0)) { + cond_resched(); + latency_ration = LATENCY_LIMIT; + } + } + spin_lock(&swap_lock); + goto lowest; + +no_page: + si->flags -= SWP_SCANNING; return 0; } swp_entry_t get_swap_page(void) { - struct swap_info_struct * p; - unsigned long offset; - swp_entry_t entry; - int type, wrapped = 0; + struct swap_info_struct *si; + pgoff_t offset; + int type, next; + int wrapped = 0; - entry.val = 0; /* Out of memory */ - swap_list_lock(); - type = swap_list.next; - if (type < 0) - goto out; + spin_lock(&swap_lock); if (nr_swap_pages <= 0) - goto out; + goto noswap; + nr_swap_pages--; + + for (type = swap_list.next; type >= 0 && wrapped < 2; type = next) { + si = swap_info + type; + next = si->next; + if (next < 0 || + (!wrapped && si->prio != swap_info[next].prio)) { + next = swap_list.head; + wrapped++; + } - while (1) { - p = &swap_info[type]; - if ((p->flags & SWP_ACTIVE) == SWP_ACTIVE) { - swap_device_lock(p); - offset = scan_swap_map(p); - swap_device_unlock(p); - if (offset) { - entry = swp_entry(type,offset); - type = swap_info[type].next; - if (type < 0 || - p->prio != swap_info[type].prio) { - swap_list.next = swap_list.head; - } else { - swap_list.next = type; - } - goto out; - } + if (!si->highest_bit) + continue; + if (!(si->flags & SWP_WRITEOK)) + continue; + + swap_list.next = next; + offset = scan_swap_map(si); + if (offset) { + spin_unlock(&swap_lock); + return swp_entry(type, offset); } - type = p->next; - if (!wrapped) { - if (type < 0 || p->prio != swap_info[type].prio) { - type = swap_list.head; - wrapped = 1; - } - } else - if (type < 0) - goto out; /* out of swap space */ + next = swap_list.next; } -out: - swap_list_unlock(); - return entry; + + nr_swap_pages++; +noswap: + spin_unlock(&swap_lock); + return (swp_entry_t) {0}; } static struct swap_info_struct * swap_info_get(swp_entry_t entry) @@ -214,10 +231,7 @@ static struct swap_info_struct * swap_info_get(swp_entry_t entry) goto bad_offset; if (!p->swap_map[offset]) goto bad_free; - swap_list_lock(); - if (p->prio > swap_info[swap_list.next].prio) - swap_list.next = type; - swap_device_lock(p); + spin_lock(&swap_lock); return p; bad_free: @@ -235,12 +249,6 @@ out: return NULL; } -static void swap_info_put(struct swap_info_struct * p) -{ - swap_device_unlock(p); - swap_list_unlock(); -} - static int swap_entry_free(struct swap_info_struct *p, unsigned long offset) { int count = p->swap_map[offset]; @@ -253,6 +261,8 @@ static int swap_entry_free(struct swap_info_struct *p, unsigned long offset) p->lowest_bit = offset; if (offset > p->highest_bit) p->highest_bit = offset; + if (p->prio > swap_info[swap_list.next].prio) + swap_list.next = p - swap_info; nr_swap_pages++; p->inuse_pages--; } @@ -271,7 +281,7 @@ void swap_free(swp_entry_t entry) p = swap_info_get(entry); if (p) { swap_entry_free(p, swp_offset(entry)); - swap_info_put(p); + spin_unlock(&swap_lock); } } @@ -289,7 +299,7 @@ static inline int page_swapcount(struct page *page) if (p) { /* Subtract the 1 for the swap cache itself */ count = p->swap_map[swp_offset(entry)] - 1; - swap_info_put(p); + spin_unlock(&swap_lock); } return count; } @@ -346,7 +356,7 @@ int remove_exclusive_swap_page(struct page *page) } write_unlock_irq(&swapper_space.tree_lock); } - swap_info_put(p); + spin_unlock(&swap_lock); if (retval) { swap_free(entry); @@ -369,7 +379,7 @@ void free_swap_and_cache(swp_entry_t entry) if (p) { if (swap_entry_free(p, swp_offset(entry)) == 1) page = find_trylock_page(&swapper_space, entry.val); - swap_info_put(p); + spin_unlock(&swap_lock); } if (page) { int one_user; @@ -531,17 +541,18 @@ static int unuse_mm(struct mm_struct *mm, * Scan swap_map from current position to next entry still in use. * Recycle to start on reaching the end, returning 0 when empty. */ -static int find_next_to_unuse(struct swap_info_struct *si, int prev) +static unsigned int find_next_to_unuse(struct swap_info_struct *si, + unsigned int prev) { - int max = si->max; - int i = prev; + unsigned int max = si->max; + unsigned int i = prev; int count; /* - * No need for swap_device_lock(si) here: we're just looking + * No need for swap_lock here: we're just looking * for whether an entry is in use, not modifying it; false * hits are okay, and sys_swapoff() has already prevented new - * allocations from this area (while holding swap_list_lock()). + * allocations from this area (while holding swap_lock). */ for (;;) { if (++i >= max) { @@ -577,7 +588,7 @@ static int try_to_unuse(unsigned int type) unsigned short swcount; struct page *page; swp_entry_t entry; - int i = 0; + unsigned int i = 0; int retval = 0; int reset_overflow = 0; int shmem; @@ -731,9 +742,9 @@ static int try_to_unuse(unsigned int type) * report them; but do report if we reset SWAP_MAP_MAX. */ if (*swap_map == SWAP_MAP_MAX) { - swap_device_lock(si); + spin_lock(&swap_lock); *swap_map = 1; - swap_device_unlock(si); + spin_unlock(&swap_lock); reset_overflow = 1; } @@ -797,9 +808,9 @@ static int try_to_unuse(unsigned int type) } /* - * After a successful try_to_unuse, if no swap is now in use, we know we - * can empty the mmlist. swap_list_lock must be held on entry and exit. - * Note that mmlist_lock nests inside swap_list_lock, and an mm must be + * After a successful try_to_unuse, if no swap is now in use, we know + * we can empty the mmlist. swap_lock must be held on entry and exit. + * Note that mmlist_lock nests inside swap_lock, and an mm must be * added to the mmlist just after page_duplicate - before would be racy. */ static void drain_mmlist(void) @@ -832,9 +843,9 @@ sector_t map_swap_page(struct swap_info_struct *sis, pgoff_t offset) offset < (se->start_page + se->nr_pages)) { return se->start_block + (offset - se->start_page); } - lh = se->list.prev; + lh = se->list.next; if (lh == &sis->extent_list) - lh = lh->prev; + lh = lh->next; se = list_entry(lh, struct swap_extent, list); sis->curr_swap_extent = se; BUG_ON(se == start_se); /* It *must* be present */ @@ -854,15 +865,13 @@ static void destroy_swap_extents(struct swap_info_struct *sis) list_del(&se->list); kfree(se); } - sis->nr_extents = 0; } /* * Add a block range (and the corresponding page range) into this swapdev's - * extent list. The extent list is kept sorted in block order. + * extent list. The extent list is kept sorted in page order. * - * This function rather assumes that it is called in ascending sector_t order. - * It doesn't look for extent coalescing opportunities. + * This function rather assumes that it is called in ascending page order. */ static int add_swap_extent(struct swap_info_struct *sis, unsigned long start_page, @@ -872,16 +881,15 @@ add_swap_extent(struct swap_info_struct *sis, unsigned long start_page, struct swap_extent *new_se; struct list_head *lh; - lh = sis->extent_list.next; /* The highest-addressed block */ - while (lh != &sis->extent_list) { + lh = sis->extent_list.prev; /* The highest page extent */ + if (lh != &sis->extent_list) { se = list_entry(lh, struct swap_extent, list); - if (se->start_block + se->nr_pages == start_block && - se->start_page + se->nr_pages == start_page) { + BUG_ON(se->start_page + se->nr_pages != start_page); + if (se->start_block + se->nr_pages == start_block) { /* Merge it */ se->nr_pages += nr_pages; return 0; } - lh = lh->next; } /* @@ -894,16 +902,8 @@ add_swap_extent(struct swap_info_struct *sis, unsigned long start_page, new_se->nr_pages = nr_pages; new_se->start_block = start_block; - lh = sis->extent_list.prev; /* The lowest block */ - while (lh != &sis->extent_list) { - se = list_entry(lh, struct swap_extent, list); - if (se->start_block > start_block) - break; - lh = lh->prev; - } - list_add_tail(&new_se->list, lh); - sis->nr_extents++; - return 0; + list_add_tail(&new_se->list, &sis->extent_list); + return 1; } /* @@ -926,7 +926,7 @@ add_swap_extent(struct swap_info_struct *sis, unsigned long start_page, * requirements, they are simply tossed out - we will never use those blocks * for swapping. * - * For S_ISREG swapfiles we hold i_sem across the life of the swapon. This + * For S_ISREG swapfiles we set S_SWAPFILE across the life of the swapon. This * prevents root from shooting her foot off by ftruncating an in-use swapfile, * which will scribble on the fs. * @@ -937,7 +937,7 @@ add_swap_extent(struct swap_info_struct *sis, unsigned long start_page, * This is extremely effective. The average number of iterations in * map_swap_page() has been measured at about 0.3 per page. - akpm. */ -static int setup_swap_extents(struct swap_info_struct *sis) +static int setup_swap_extents(struct swap_info_struct *sis, sector_t *span) { struct inode *inode; unsigned blocks_per_page; @@ -945,11 +945,15 @@ static int setup_swap_extents(struct swap_info_struct *sis) unsigned blkbits; sector_t probe_block; sector_t last_block; + sector_t lowest_block = -1; + sector_t highest_block = 0; + int nr_extents = 0; int ret; inode = sis->swap_file->f_mapping->host; if (S_ISBLK(inode->i_mode)) { ret = add_swap_extent(sis, 0, sis->max, 0); + *span = sis->pages; goto done; } @@ -994,22 +998,32 @@ static int setup_swap_extents(struct swap_info_struct *sis) } } + first_block >>= (PAGE_SHIFT - blkbits); + if (page_no) { /* exclude the header page */ + if (first_block < lowest_block) + lowest_block = first_block; + if (first_block > highest_block) + highest_block = first_block; + } + /* * We found a PAGE_SIZE-length, PAGE_SIZE-aligned run of blocks */ - ret = add_swap_extent(sis, page_no, 1, - first_block >> (PAGE_SHIFT - blkbits)); - if (ret) + ret = add_swap_extent(sis, page_no, 1, first_block); + if (ret < 0) goto out; + nr_extents += ret; page_no++; probe_block += blocks_per_page; reprobe: continue; } - ret = 0; + ret = nr_extents; + *span = 1 + highest_block - lowest_block; if (page_no == 0) - ret = -EINVAL; + page_no = 1; /* force Empty message */ sis->max = page_no; + sis->pages = page_no - 1; sis->highest_bit = page_no - 1; done: sis->curr_swap_extent = list_entry(sis->extent_list.prev, @@ -1069,7 +1083,7 @@ asmlinkage long sys_swapoff(const char __user * specialfile) mapping = victim->f_mapping; prev = -1; - swap_list_lock(); + spin_lock(&swap_lock); for (type = swap_list.head; type >= 0; type = swap_info[type].next) { p = swap_info + type; if ((p->flags & SWP_ACTIVE) == SWP_ACTIVE) { @@ -1080,14 +1094,14 @@ asmlinkage long sys_swapoff(const char __user * specialfile) } if (type < 0) { err = -EINVAL; - swap_list_unlock(); + spin_unlock(&swap_lock); goto out_dput; } if (!security_vm_enough_memory(p->pages)) vm_unacct_memory(p->pages); else { err = -ENOMEM; - swap_list_unlock(); + spin_unlock(&swap_lock); goto out_dput; } if (prev < 0) { @@ -1102,18 +1116,15 @@ asmlinkage long sys_swapoff(const char __user * specialfile) nr_swap_pages -= p->pages; total_swap_pages -= p->pages; p->flags &= ~SWP_WRITEOK; - swap_list_unlock(); + spin_unlock(&swap_lock); + current->flags |= PF_SWAPOFF; err = try_to_unuse(type); current->flags &= ~PF_SWAPOFF; - /* wait for any unplug function to finish */ - down_write(&swap_unplug_sem); - up_write(&swap_unplug_sem); - if (err) { /* re-insert swap space back into swap_list */ - swap_list_lock(); + spin_lock(&swap_lock); for (prev = -1, i = swap_list.head; i >= 0; prev = i, i = swap_info[i].next) if (p->prio >= swap_info[i].prio) break; @@ -1125,22 +1136,35 @@ asmlinkage long sys_swapoff(const char __user * specialfile) nr_swap_pages += p->pages; total_swap_pages += p->pages; p->flags |= SWP_WRITEOK; - swap_list_unlock(); + spin_unlock(&swap_lock); goto out_dput; } + + /* wait for any unplug function to finish */ + down_write(&swap_unplug_sem); + up_write(&swap_unplug_sem); + + destroy_swap_extents(p); down(&swapon_sem); - swap_list_lock(); + spin_lock(&swap_lock); drain_mmlist(); - swap_device_lock(p); + + /* wait for anyone still in scan_swap_map */ + p->highest_bit = 0; /* cuts scans short */ + while (p->flags >= SWP_SCANNING) { + spin_unlock(&swap_lock); + set_current_state(TASK_UNINTERRUPTIBLE); + schedule_timeout(1); + spin_lock(&swap_lock); + } + swap_file = p->swap_file; p->swap_file = NULL; p->max = 0; swap_map = p->swap_map; p->swap_map = NULL; p->flags = 0; - destroy_swap_extents(p); - swap_device_unlock(p); - swap_list_unlock(); + spin_unlock(&swap_lock); up(&swapon_sem); vfree(swap_map); inode = mapping->host; @@ -1213,7 +1237,7 @@ static int swap_show(struct seq_file *swap, void *v) file = ptr->swap_file; len = seq_path(swap, file->f_vfsmnt, file->f_dentry, " \t\n\\"); - seq_printf(swap, "%*s%s\t%d\t%ld\t%d\n", + seq_printf(swap, "%*s%s\t%u\t%u\t%d\n", len < 40 ? 40 - len : 1, " ", S_ISBLK(file->f_dentry->d_inode->i_mode) ? "partition" : "file\t", @@ -1272,7 +1296,9 @@ asmlinkage long sys_swapon(const char __user * specialfile, int swap_flags) static int least_priority; union swap_header *swap_header = NULL; int swap_header_version; - int nr_good_pages = 0; + unsigned int nr_good_pages = 0; + int nr_extents = 0; + sector_t span; unsigned long maxpages = 1; int swapfilesize; unsigned short *swap_map; @@ -1282,7 +1308,7 @@ asmlinkage long sys_swapon(const char __user * specialfile, int swap_flags) if (!capable(CAP_SYS_ADMIN)) return -EPERM; - swap_list_lock(); + spin_lock(&swap_lock); p = swap_info; for (type = 0 ; type < nr_swapfiles ; type++,p++) if (!(p->flags & SWP_USED)) @@ -1301,14 +1327,13 @@ asmlinkage long sys_swapon(const char __user * specialfile, int swap_flags) * swp_entry_t or the architecture definition of a swap pte. */ if (type > swp_type(pte_to_swp_entry(swp_entry_to_pte(swp_entry(~0UL,0))))) { - swap_list_unlock(); + spin_unlock(&swap_lock); goto out; } if (type >= nr_swapfiles) nr_swapfiles = type+1; INIT_LIST_HEAD(&p->extent_list); p->flags = SWP_USED; - p->nr_extents = 0; p->swap_file = NULL; p->old_block_size = 0; p->swap_map = NULL; @@ -1316,7 +1341,6 @@ asmlinkage long sys_swapon(const char __user * specialfile, int swap_flags) p->highest_bit = 0; p->cluster_nr = 0; p->inuse_pages = 0; - spin_lock_init(&p->sdev_lock); p->next = -1; if (swap_flags & SWAP_FLAG_PREFER) { p->prio = @@ -1324,7 +1348,7 @@ asmlinkage long sys_swapon(const char __user * specialfile, int swap_flags) } else { p->prio = --least_priority; } - swap_list_unlock(); + spin_unlock(&swap_lock); name = getname(specialfile); error = PTR_ERR(name); if (IS_ERR(name)) { @@ -1426,6 +1450,8 @@ asmlinkage long sys_swapon(const char __user * specialfile, int swap_flags) } p->lowest_bit = 1; + p->cluster_next = 1; + /* * Find out how many pages are allowed for a single swap * device. There are two limiting factors: 1) the number of @@ -1446,6 +1472,10 @@ asmlinkage long sys_swapon(const char __user * specialfile, int swap_flags) p->highest_bit = maxpages - 1; error = -EINVAL; + if (!maxpages) + goto bad_swap; + if (swap_header->info.nr_badpages && S_ISREG(inode->i_mode)) + goto bad_swap; if (swap_header->info.nr_badpages > MAX_SWAP_BADPAGES) goto bad_swap; @@ -1470,35 +1500,40 @@ asmlinkage long sys_swapon(const char __user * specialfile, int swap_flags) if (error) goto bad_swap; } - + if (swapfilesize && maxpages > swapfilesize) { printk(KERN_WARNING "Swap area shorter than signature indicates\n"); error = -EINVAL; goto bad_swap; } + if (nr_good_pages) { + p->swap_map[0] = SWAP_MAP_BAD; + p->max = maxpages; + p->pages = nr_good_pages; + nr_extents = setup_swap_extents(p, &span); + if (nr_extents < 0) { + error = nr_extents; + goto bad_swap; + } + nr_good_pages = p->pages; + } if (!nr_good_pages) { printk(KERN_WARNING "Empty swap-file\n"); error = -EINVAL; goto bad_swap; } - p->swap_map[0] = SWAP_MAP_BAD; - p->max = maxpages; - p->pages = nr_good_pages; - - error = setup_swap_extents(p); - if (error) - goto bad_swap; down(&swapon_sem); - swap_list_lock(); - swap_device_lock(p); + spin_lock(&swap_lock); p->flags = SWP_ACTIVE; nr_swap_pages += nr_good_pages; total_swap_pages += nr_good_pages; - printk(KERN_INFO "Adding %dk swap on %s. Priority:%d extents:%d\n", - nr_good_pages<<(PAGE_SHIFT-10), name, - p->prio, p->nr_extents); + + printk(KERN_INFO "Adding %uk swap on %s. " + "Priority:%d extents:%d across:%lluk\n", + nr_good_pages<<(PAGE_SHIFT-10), name, p->prio, + nr_extents, (unsigned long long)span<<(PAGE_SHIFT-10)); /* insert swap space into swap_list: */ prev = -1; @@ -1514,8 +1549,7 @@ asmlinkage long sys_swapon(const char __user * specialfile, int swap_flags) } else { swap_info[prev].next = p - swap_info; } - swap_device_unlock(p); - swap_list_unlock(); + spin_unlock(&swap_lock); up(&swapon_sem); error = 0; goto out; @@ -1524,16 +1558,16 @@ bad_swap: set_blocksize(bdev, p->old_block_size); bd_release(bdev); } + destroy_swap_extents(p); bad_swap_2: - swap_list_lock(); + spin_lock(&swap_lock); swap_map = p->swap_map; p->swap_file = NULL; p->swap_map = NULL; p->flags = 0; if (!(swap_flags & SWAP_FLAG_PREFER)) ++least_priority; - swap_list_unlock(); - destroy_swap_extents(p); + spin_unlock(&swap_lock); vfree(swap_map); if (swap_file) filp_close(swap_file, NULL); @@ -1557,7 +1591,7 @@ void si_swapinfo(struct sysinfo *val) unsigned int i; unsigned long nr_to_be_unused = 0; - swap_list_lock(); + spin_lock(&swap_lock); for (i = 0; i < nr_swapfiles; i++) { if (!(swap_info[i].flags & SWP_USED) || (swap_info[i].flags & SWP_WRITEOK)) @@ -1566,7 +1600,7 @@ void si_swapinfo(struct sysinfo *val) } val->freeswap = nr_swap_pages + nr_to_be_unused; val->totalswap = total_swap_pages + nr_to_be_unused; - swap_list_unlock(); + spin_unlock(&swap_lock); } /* @@ -1587,7 +1621,7 @@ int swap_duplicate(swp_entry_t entry) p = type + swap_info; offset = swp_offset(entry); - swap_device_lock(p); + spin_lock(&swap_lock); if (offset < p->max && p->swap_map[offset]) { if (p->swap_map[offset] < SWAP_MAP_MAX - 1) { p->swap_map[offset]++; @@ -1599,7 +1633,7 @@ int swap_duplicate(swp_entry_t entry) result = 1; } } - swap_device_unlock(p); + spin_unlock(&swap_lock); out: return result; @@ -1615,7 +1649,7 @@ get_swap_info_struct(unsigned type) } /* - * swap_device_lock prevents swap_map being freed. Don't grab an extra + * swap_lock prevents swap_map being freed. Don't grab an extra * reference on the swaphandle, it doesn't matter if it becomes unused. */ int valid_swaphandles(swp_entry_t entry, unsigned long *offset) @@ -1631,7 +1665,7 @@ int valid_swaphandles(swp_entry_t entry, unsigned long *offset) toff++, i--; *offset = toff; - swap_device_lock(swapdev); + spin_lock(&swap_lock); do { /* Don't read-ahead past the end of the swap area */ if (toff >= swapdev->max) @@ -1644,6 +1678,6 @@ int valid_swaphandles(swp_entry_t entry, unsigned long *offset) toff++; ret++; } while (--i); - swap_device_unlock(swapdev); + spin_unlock(&swap_lock); return ret; } diff --git a/mm/vmalloc.c b/mm/vmalloc.c index 8ff16a1eee6a..67b358e57ef6 100644 --- a/mm/vmalloc.c +++ b/mm/vmalloc.c @@ -158,8 +158,6 @@ int map_vm_area(struct vm_struct *area, pgprot_t prot, struct page ***pages) return err; } -#define IOREMAP_MAX_ORDER (7 + PAGE_SHIFT) /* 128 pages */ - struct vm_struct *__get_vm_area(unsigned long size, unsigned long flags, unsigned long start, unsigned long end) { diff --git a/mm/vmscan.c b/mm/vmscan.c index cfffe5098d53..0095533cdde9 100644 --- a/mm/vmscan.c +++ b/mm/vmscan.c @@ -822,6 +822,8 @@ shrink_zone(struct zone *zone, struct scan_control *sc) unsigned long nr_active; unsigned long nr_inactive; + atomic_inc(&zone->reclaim_in_progress); + /* * Add one to `nr_to_scan' just to make sure that the kernel will * slowly sift through the active list. @@ -861,6 +863,8 @@ shrink_zone(struct zone *zone, struct scan_control *sc) } throttle_vm_writeout(); + + atomic_dec(&zone->reclaim_in_progress); } /* @@ -900,9 +904,7 @@ shrink_caches(struct zone **zones, struct scan_control *sc) if (zone->all_unreclaimable && sc->priority != DEF_PRIORITY) continue; /* Let kswapd poll it */ - atomic_inc(&zone->reclaim_in_progress); shrink_zone(zone, sc); - atomic_dec(&zone->reclaim_in_progress); } } @@ -1358,14 +1360,13 @@ int zone_reclaim(struct zone *zone, unsigned int gfp_mask, unsigned int order) sc.swap_cluster_max = SWAP_CLUSTER_MAX; /* Don't reclaim the zone if there are other reclaimers active */ - if (!atomic_inc_and_test(&zone->reclaim_in_progress)) + if (atomic_read(&zone->reclaim_in_progress) > 0) goto out; shrink_zone(zone, &sc); total_reclaimed = sc.nr_reclaimed; out: - atomic_dec(&zone->reclaim_in_progress); return total_reclaimed; } @@ -1375,6 +1376,9 @@ asmlinkage long sys_set_zone_reclaim(unsigned int node, unsigned int zone, struct zone *z; int i; + if (!capable(CAP_SYS_ADMIN)) + return -EACCES; + if (node >= MAX_NUMNODES || !node_online(node)) return -EINVAL; |
