diff options
Diffstat (limited to 'mm')
| -rw-r--r-- | mm/hugetlb.c | 14 | ||||
| -rw-r--r-- | mm/memory_hotplug.c | 6 | ||||
| -rw-r--r-- | mm/mempolicy.c | 6 | ||||
| -rw-r--r-- | mm/migrate.c | 11 | ||||
| -rw-r--r-- | mm/mmap.c | 7 | ||||
| -rw-r--r-- | mm/shmem.c | 12 | ||||
| -rw-r--r-- | mm/vmalloc.c | 2 |
7 files changed, 43 insertions, 15 deletions
diff --git a/mm/hugetlb.c b/mm/hugetlb.c index f1a45f5077fe..324b2953e57e 100644 --- a/mm/hugetlb.c +++ b/mm/hugetlb.c @@ -3472,7 +3472,6 @@ retry_avoidcopy: copy_user_huge_page(new_page, old_page, address, vma, pages_per_huge_page(h)); __SetPageUptodate(new_page); - set_page_huge_active(new_page); mmun_start = address & huge_page_mask(h); mmun_end = mmun_start + huge_page_size(h); @@ -3494,6 +3493,7 @@ retry_avoidcopy: make_huge_pte(vma, new_page, 1)); page_remove_rmap(old_page); hugepage_add_new_anon_rmap(new_page, vma, address); + set_page_huge_active(new_page); /* Make the old page be freed below */ new_page = old_page; } @@ -3575,6 +3575,7 @@ static int hugetlb_no_page(struct mm_struct *mm, struct vm_area_struct *vma, struct page *page; pte_t new_pte; spinlock_t *ptl; + bool new_page = false; /* * Currently, we are forced to kill the process in the event the @@ -3608,7 +3609,7 @@ retry: } clear_huge_page(page, address, pages_per_huge_page(h)); __SetPageUptodate(page); - set_page_huge_active(page); + new_page = true; if (vma->vm_flags & VM_MAYSHARE) { int err = huge_add_to_page_cache(page, mapping, idx); @@ -3680,6 +3681,15 @@ retry: } spin_unlock(ptl); + + /* + * Only make newly allocated pages active. Existing pages found + * in the pagecache could be !page_huge_active() if they have been + * isolated for migration. + */ + if (new_page) + set_page_huge_active(page); + unlock_page(page); out: return ret; diff --git a/mm/memory_hotplug.c b/mm/memory_hotplug.c index 77712cdcd23a..f95503b39768 100644 --- a/mm/memory_hotplug.c +++ b/mm/memory_hotplug.c @@ -1364,7 +1364,8 @@ static struct page *next_active_pageblock(struct page *page) int is_mem_section_removable(unsigned long start_pfn, unsigned long nr_pages) { struct page *page = pfn_to_page(start_pfn); - struct page *end_page = page + nr_pages; + unsigned long end_pfn = min(start_pfn + nr_pages, zone_end_pfn(page_zone(page))); + struct page *end_page = pfn_to_page(end_pfn); /* Check the starting page of each pageblock within the range */ for (; page < end_page; page = next_active_pageblock(page)) { @@ -1404,6 +1405,9 @@ int test_pages_in_a_zone(unsigned long start_pfn, unsigned long end_pfn, i++; if (i == MAX_ORDER_NR_PAGES) continue; + /* Check if we got outside of the zone */ + if (zone && !zone_spans_pfn(zone, pfn + i)) + return 0; page = pfn_to_page(pfn + i); if (zone && page_zone(page) != zone) return 0; diff --git a/mm/mempolicy.c b/mm/mempolicy.c index 63c19c24dfe1..83d5dfc3bf89 100644 --- a/mm/mempolicy.c +++ b/mm/mempolicy.c @@ -1296,7 +1296,7 @@ static int copy_nodes_to_user(unsigned long __user *mask, unsigned long maxnode, nodemask_t *nodes) { unsigned long copy = ALIGN(maxnode-1, 64) / 8; - const int nbytes = BITS_TO_LONGS(MAX_NUMNODES) * sizeof(long); + unsigned int nbytes = BITS_TO_LONGS(nr_node_ids) * sizeof(long); if (copy > nbytes) { if (copy > PAGE_SIZE) @@ -1457,7 +1457,7 @@ SYSCALL_DEFINE5(get_mempolicy, int __user *, policy, int uninitialized_var(pval); nodemask_t nodes; - if (nmask != NULL && maxnode < MAX_NUMNODES) + if (nmask != NULL && maxnode < nr_node_ids) return -EINVAL; err = do_get_mempolicy(&pval, &nodes, addr, flags); @@ -1486,7 +1486,7 @@ COMPAT_SYSCALL_DEFINE5(get_mempolicy, int __user *, policy, unsigned long nr_bits, alloc_size; DECLARE_BITMAP(bm, MAX_NUMNODES); - nr_bits = min_t(unsigned long, maxnode-1, MAX_NUMNODES); + nr_bits = min_t(unsigned long, maxnode-1, nr_node_ids); alloc_size = ALIGN(nr_bits, BITS_PER_LONG) / 8; if (nmask) diff --git a/mm/migrate.c b/mm/migrate.c index 879aabed46bb..d8d8c2a3f530 100644 --- a/mm/migrate.c +++ b/mm/migrate.c @@ -1216,6 +1216,16 @@ static int unmap_and_move_huge_page(new_page_t get_new_page, lock_page(hpage); } + /* + * Check for pages which are in the process of being freed. Without + * page_mapping() set, hugetlbfs specific move page routine will not + * be called and we could leak usage counts for subpools. + */ + if (page_private(hpage) && !page_mapping(hpage)) { + rc = -EBUSY; + goto out_unlock; + } + if (PageAnon(hpage)) anon_vma = page_get_anon_vma(hpage); @@ -1247,6 +1257,7 @@ put_anon: set_page_owner_migrate_reason(new_hpage, reason); } +out_unlock: unlock_page(hpage); out: if (rc != -EAGAIN) diff --git a/mm/mmap.c b/mm/mmap.c index 0617c12aebe4..ae4f0fc2f01b 100644 --- a/mm/mmap.c +++ b/mm/mmap.c @@ -2331,12 +2331,11 @@ int expand_downwards(struct vm_area_struct *vma, struct mm_struct *mm = vma->vm_mm; struct vm_area_struct *prev; unsigned long gap_addr; - int error; + int error = 0; address &= PAGE_MASK; - error = security_mmap_addr(address); - if (error) - return error; + if (address < mmap_min_addr) + return -EPERM; /* Enforce stack_guard_gap */ gap_addr = address - stack_guard_gap; diff --git a/mm/shmem.c b/mm/shmem.c index 6397dc2e15bc..f64a55543f3f 100644 --- a/mm/shmem.c +++ b/mm/shmem.c @@ -2293,16 +2293,20 @@ static int shmem_create(struct inode *dir, struct dentry *dentry, umode_t mode, static int shmem_link(struct dentry *old_dentry, struct inode *dir, struct dentry *dentry) { struct inode *inode = d_inode(old_dentry); - int ret; + int ret = 0; /* * No ordinary (disk based) filesystem counts links as inodes; * but each new link needs a new dentry, pinning lowmem, and * tmpfs dentries cannot be pruned until they are unlinked. + * But if an O_TMPFILE file is linked into the tmpfs, the + * first link must skip that, to get the accounting right. */ - ret = shmem_reserve_inode(inode->i_sb); - if (ret) - goto out; + if (inode->i_nlink) { + ret = shmem_reserve_inode(inode->i_sb); + if (ret) + goto out; + } dir->i_size += BOGO_DIRENT_SIZE; inode->i_ctime = dir->i_ctime = dir->i_mtime = CURRENT_TIME; diff --git a/mm/vmalloc.c b/mm/vmalloc.c index 0db25c1bf4ee..6147b4a2ba5c 100644 --- a/mm/vmalloc.c +++ b/mm/vmalloc.c @@ -2268,7 +2268,7 @@ int remap_vmalloc_range_partial(struct vm_area_struct *vma, unsigned long uaddr, if (!(area->flags & VM_USERMAP)) return -EINVAL; - if (kaddr + size > area->addr + area->size) + if (kaddr + size > area->addr + get_vm_area_size(area)) return -EINVAL; do { |
