diff options
Diffstat (limited to 'mm')
| -rw-r--r-- | mm/Kconfig | 1 | ||||
| -rw-r--r-- | mm/backing-dev.c | 4 | ||||
| -rw-r--r-- | mm/compaction.c | 2 | ||||
| -rw-r--r-- | mm/early_ioremap.c | 2 | ||||
| -rw-r--r-- | mm/filemap.c | 106 | ||||
| -rw-r--r-- | mm/memory.c | 19 | ||||
| -rw-r--r-- | mm/memory_hotplug.c | 30 | ||||
| -rw-r--r-- | mm/migrate.c | 6 | ||||
| -rw-r--r-- | mm/mmap.c | 7 | ||||
| -rw-r--r-- | mm/nommu.c | 7 | ||||
| -rw-r--r-- | mm/page-writeback.c | 18 | ||||
| -rw-r--r-- | mm/page_alloc.c | 8 | ||||
| -rw-r--r-- | mm/percpu.c | 1 | ||||
| -rw-r--r-- | mm/slab.c | 3 | ||||
| -rw-r--r-- | mm/util.c | 40 | ||||
| -rw-r--r-- | mm/vmscan.c | 15 | ||||
| -rw-r--r-- | mm/vmstat.c | 1 |
17 files changed, 191 insertions, 79 deletions
diff --git a/mm/Kconfig b/mm/Kconfig index 7077376523ed..dcca76e498df 100644 --- a/mm/Kconfig +++ b/mm/Kconfig @@ -666,6 +666,7 @@ config DEFERRED_STRUCT_PAGE_INIT default n depends on ARCH_SUPPORTS_DEFERRED_STRUCT_PAGE_INIT depends on MEMORY_HOTPLUG + depends on !NEED_PER_CPU_KM help Ordinarily all struct pages are initialised during early boot in a single thread. On very large machines this can take a considerable diff --git a/mm/backing-dev.c b/mm/backing-dev.c index 4c480a20d76c..3081f1234d4e 100644 --- a/mm/backing-dev.c +++ b/mm/backing-dev.c @@ -954,7 +954,7 @@ static atomic_t nr_wb_congested[2]; void clear_wb_congested(struct bdi_writeback_congested *congested, int sync) { wait_queue_head_t *wqh = &congestion_wqh[sync]; - enum wb_state bit; + enum wb_congested_state bit; bit = sync ? WB_sync_congested : WB_async_congested; if (test_and_clear_bit(bit, &congested->state)) @@ -967,7 +967,7 @@ EXPORT_SYMBOL(clear_wb_congested); void set_wb_congested(struct bdi_writeback_congested *congested, int sync) { - enum wb_state bit; + enum wb_congested_state bit; bit = sync ? WB_sync_congested : WB_async_congested; if (!test_and_set_bit(bit, &congested->state)) diff --git a/mm/compaction.c b/mm/compaction.c index faee4807b1d2..86687ec1d034 100644 --- a/mm/compaction.c +++ b/mm/compaction.c @@ -853,7 +853,7 @@ isolate_migratepages_block(struct compact_control *cc, unsigned long low_pfn, locked = false; } - if (isolate_movable_page(page, isolate_mode)) + if (!isolate_movable_page(page, isolate_mode)) goto isolate_success; } diff --git a/mm/early_ioremap.c b/mm/early_ioremap.c index 6d5717bd7197..57540de2b44c 100644 --- a/mm/early_ioremap.c +++ b/mm/early_ioremap.c @@ -103,7 +103,7 @@ __early_ioremap(resource_size_t phys_addr, unsigned long size, pgprot_t prot) enum fixed_addresses idx; int i, slot; - WARN_ON(system_state != SYSTEM_BOOTING); + WARN_ON(system_state >= SYSTEM_RUNNING); slot = -1; for (i = 0; i < FIX_BTMAPS_SLOTS; i++) { diff --git a/mm/filemap.c b/mm/filemap.c index 8b2cf0f6a529..f3d6d89cfd61 100644 --- a/mm/filemap.c +++ b/mm/filemap.c @@ -573,7 +573,7 @@ int replace_page_cache_page(struct page *old, struct page *new, gfp_t gfp_mask) VM_BUG_ON_PAGE(!PageLocked(new), new); VM_BUG_ON_PAGE(new->mapping, new); - error = radix_tree_preload(gfp_mask & ~__GFP_HIGHMEM); + error = radix_tree_preload(gfp_mask & GFP_RECLAIM_MASK); if (!error) { struct address_space *mapping = old->mapping; void (*freepage)(struct page *); @@ -632,7 +632,7 @@ static int __add_to_page_cache_locked(struct page *page, return error; } - error = radix_tree_maybe_preload(gfp_mask & ~__GFP_HIGHMEM); + error = radix_tree_maybe_preload(gfp_mask & GFP_RECLAIM_MASK); if (error) { if (!huge) mem_cgroup_cancel_charge(page, memcg); @@ -1194,8 +1194,7 @@ no_page: if (fgp_flags & FGP_ACCESSED) __SetPageReferenced(page); - err = add_to_page_cache_lru(page, mapping, offset, - gfp_mask & GFP_RECLAIM_MASK); + err = add_to_page_cache_lru(page, mapping, offset, gfp_mask); if (unlikely(err)) { page_cache_release(page); page = NULL; @@ -1584,6 +1583,15 @@ find_page: index, last_index - index); } if (!PageUptodate(page)) { + /* + * See comment in do_read_cache_page on why + * wait_on_page_locked is used to avoid unnecessarily + * serialisations and why it's safe. + */ + wait_on_page_locked_killable(page); + if (PageUptodate(page)) + goto page_ok; + if (inode->i_blkbits == PAGE_CACHE_SHIFT || !mapping->a_ops->is_partially_uptodate) goto page_not_up_to_date; @@ -1829,19 +1837,18 @@ EXPORT_SYMBOL(generic_file_read_iter); * This adds the requested page to the page cache if it isn't already there, * and schedules an I/O to read in its contents from disk. */ -static int page_cache_read(struct file *file, pgoff_t offset) +static int page_cache_read(struct file *file, pgoff_t offset, gfp_t gfp_mask) { struct address_space *mapping = file->f_mapping; struct page *page; int ret; do { - page = page_cache_alloc_cold(mapping); + page = __page_cache_alloc(gfp_mask|__GFP_COLD); if (!page) return -ENOMEM; - ret = add_to_page_cache_lru(page, mapping, offset, - mapping_gfp_constraint(mapping, GFP_KERNEL)); + ret = add_to_page_cache_lru(page, mapping, offset, gfp_mask); if (ret == 0) ret = mapping->a_ops->readpage(file, page); else if (ret == -EEXIST) @@ -2022,7 +2029,7 @@ no_cached_page: * We're only likely to ever get here if MADV_RANDOM is in * effect. */ - error = page_cache_read(file, offset); + error = page_cache_read(file, offset, vmf->gfp_mask); /* * The page we want has now been added to the page cache. @@ -2219,7 +2226,7 @@ static struct page *wait_on_page_read(struct page *page) return page; } -static struct page *__read_cache_page(struct address_space *mapping, +static struct page *do_read_cache_page(struct address_space *mapping, pgoff_t index, int (*filler)(void *, struct page *), void *data, @@ -2241,53 +2248,74 @@ repeat: /* Presumably ENOMEM for radix tree node */ return ERR_PTR(err); } + +filler: err = filler(data, page); if (err < 0) { page_cache_release(page); - page = ERR_PTR(err); - } else { - page = wait_on_page_read(page); + return ERR_PTR(err); } - } - return page; -} -static struct page *do_read_cache_page(struct address_space *mapping, - pgoff_t index, - int (*filler)(void *, struct page *), - void *data, - gfp_t gfp) - -{ - struct page *page; - int err; + page = wait_on_page_read(page); + if (IS_ERR(page)) + return page; + goto out; + } + if (PageUptodate(page)) + goto out; -retry: - page = __read_cache_page(mapping, index, filler, data, gfp); - if (IS_ERR(page)) - return page; + /* + * Page is not up to date and may be locked due one of the following + * case a: Page is being filled and the page lock is held + * case b: Read/write error clearing the page uptodate status + * case c: Truncation in progress (page locked) + * case d: Reclaim in progress + * + * Case a, the page will be up to date when the page is unlocked. + * There is no need to serialise on the page lock here as the page + * is pinned so the lock gives no additional protection. Even if the + * the page is truncated, the data is still valid if PageUptodate as + * it's a race vs truncate race. + * Case b, the page will not be up to date + * Case c, the page may be truncated but in itself, the data may still + * be valid after IO completes as it's a read vs truncate race. The + * operation must restart if the page is not uptodate on unlock but + * otherwise serialising on page lock to stabilise the mapping gives + * no additional guarantees to the caller as the page lock is + * released before return. + * Case d, similar to truncation. If reclaim holds the page lock, it + * will be a race with remove_mapping that determines if the mapping + * is valid on unlock but otherwise the data is valid and there is + * no need to serialise with page lock. + * + * As the page lock gives no additional guarantee, we optimistically + * wait on the page to be unlocked and check if it's up to date and + * use the page if it is. Otherwise, the page lock is required to + * distinguish between the different cases. The motivation is that we + * avoid spurious serialisations and wakeups when multiple processes + * wait on the same page for IO to complete. + */ + wait_on_page_locked(page); if (PageUptodate(page)) goto out; + /* Distinguish between all the cases under the safety of the lock */ lock_page(page); + + /* Case c or d, restart the operation */ if (!page->mapping) { unlock_page(page); page_cache_release(page); - goto retry; + goto repeat; } + + /* Someone else locked and filled the page in a very small window */ if (PageUptodate(page)) { unlock_page(page); goto out; } - err = filler(data, page); - if (err < 0) { - page_cache_release(page); - return ERR_PTR(err); - } else { - page = wait_on_page_read(page); - if (IS_ERR(page)) - return page; - } + goto filler; + out: mark_page_accessed(page); return page; diff --git a/mm/memory.c b/mm/memory.c index d6e10c888541..78ab57141731 100644 --- a/mm/memory.c +++ b/mm/memory.c @@ -72,7 +72,7 @@ #include "internal.h" -#ifdef LAST_CPUPID_NOT_IN_PAGE_FLAGS +#if defined(LAST_CPUPID_NOT_IN_PAGE_FLAGS) && !defined(CONFIG_COMPILE_TEST) #warning Unfortunate NUMA and NUMA Balancing config, growing page-frame for last_cpupid. #endif @@ -1990,6 +1990,20 @@ static inline void cow_user_page(struct page *dst, struct page *src, unsigned lo copy_user_highpage(dst, src, va, vma); } +static gfp_t __get_fault_gfp_mask(struct vm_area_struct *vma) +{ + struct file *vm_file = vma->vm_file; + + if (vm_file) + return mapping_gfp_mask(vm_file->f_mapping) | __GFP_FS | __GFP_IO; + + /* + * Special mappings (e.g. VDSO) do not have any file so fake + * a default GFP_KERNEL for them. + */ + return GFP_KERNEL; +} + /* * Notify the address space that the page is about to become writable so that * it can prohibit this or wait for the page to get into an appropriate state. @@ -2005,6 +2019,7 @@ static int do_page_mkwrite(struct vm_area_struct *vma, struct page *page, vmf.virtual_address = (void __user *)(address & PAGE_MASK); vmf.pgoff = page->index; vmf.flags = FAULT_FLAG_WRITE|FAULT_FLAG_MKWRITE; + vmf.gfp_mask = __get_fault_gfp_mask(vma); vmf.page = page; vmf.cow_page = NULL; @@ -2771,6 +2786,7 @@ static int __do_fault(struct vm_area_struct *vma, unsigned long address, vmf.pgoff = pgoff; vmf.flags = flags; vmf.page = NULL; + vmf.gfp_mask = __get_fault_gfp_mask(vma); vmf.cow_page = cow_page; ret = vma->vm_ops->fault(vma, &vmf); @@ -2937,6 +2953,7 @@ static void do_fault_around(struct vm_area_struct *vma, unsigned long address, vmf.pgoff = pgoff; vmf.max_pgoff = max_pgoff; vmf.flags = flags; + vmf.gfp_mask = __get_fault_gfp_mask(vma); vma->vm_ops->map_pages(vma, &vmf); } diff --git a/mm/memory_hotplug.c b/mm/memory_hotplug.c index 2e80a7bedef3..191b9eff7fc3 100644 --- a/mm/memory_hotplug.c +++ b/mm/memory_hotplug.c @@ -1423,10 +1423,10 @@ int test_pages_in_a_zone(unsigned long start_pfn, unsigned long end_pfn, } /* - * Scan pfn range [start,end) to find movable/migratable pages (LRU pages - * and hugepages). We scan pfn because it's much easier than scanning over - * linked list. This function returns the pfn of the first found movable - * page if it's found, otherwise 0. + * Scan pfn range [start,end) to find movable/migratable pages (LRU pages, + * non-lru movable pages and hugepages). We scan pfn because it's much + * easier than scanning over linked list. This function returns the pfn + * of the first found movable page if it's found, otherwise 0. */ static unsigned long scan_movable_pages(unsigned long start, unsigned long end) { @@ -1437,6 +1437,8 @@ static unsigned long scan_movable_pages(unsigned long start, unsigned long end) page = pfn_to_page(pfn); if (PageLRU(page)) return pfn; + if (__PageMovable(page)) + return pfn; if (PageHuge(page)) { if (page_huge_active(page)) return pfn; @@ -1480,22 +1482,24 @@ do_migrate_range(unsigned long start_pfn, unsigned long end_pfn) if (!get_page_unless_zero(page)) continue; /* - * We can skip free pages. And we can only deal with pages on - * LRU. + * We can skip free pages. And we can deal with pages on + * LRU and non-lru movable pages. */ - ret = isolate_lru_page(page); + if (PageLRU(page)) + ret = isolate_lru_page(page); + else + ret = isolate_movable_page(page, ISOLATE_UNEVICTABLE); if (!ret) { /* Success */ put_page(page); list_add_tail(&page->lru, &source); move_pages--; - inc_zone_page_state(page, NR_ISOLATED_ANON + - page_is_file_cache(page)); - + if (!__PageMovable(page)) + inc_zone_page_state(page, NR_ISOLATED_ANON + + page_is_file_cache(page)); } else { #ifdef CONFIG_DEBUG_VM - printk(KERN_ALERT "removing pfn %lx from LRU failed\n", - pfn); - dump_page(page, "failed to remove from LRU"); + pr_alert("failed to isolate pfn %lx\n", pfn); + dump_page(page, "isolation failed"); #endif put_page(page); /* Because we don't have big zone->lock. we should diff --git a/mm/migrate.c b/mm/migrate.c index a021071eceaf..921cf12b03ce 100644 --- a/mm/migrate.c +++ b/mm/migrate.c @@ -75,7 +75,7 @@ int migrate_prep_local(void) return 0; } -bool isolate_movable_page(struct page *page, isolate_mode_t mode) +int isolate_movable_page(struct page *page, isolate_mode_t mode) { struct address_space *mapping; @@ -126,14 +126,14 @@ bool isolate_movable_page(struct page *page, isolate_mode_t mode) __SetPageIsolated(page); unlock_page(page); - return true; + return 0; out_no_isolated: unlock_page(page); out_putpage: put_page(page); out: - return false; + return -EBUSY; } /* It should be called on page which is PG_movable */ diff --git a/mm/mmap.c b/mm/mmap.c index 2339b533f4b2..5457c5f4935b 100644 --- a/mm/mmap.c +++ b/mm/mmap.c @@ -206,6 +206,13 @@ int __vm_enough_memory(struct mm_struct *mm, long pages, int cap_sys_admin) free += global_page_state(NR_SLAB_RECLAIMABLE); /* + * Part of the kernel memory, which can be released + * under memory pressure. + */ + free += global_page_state( + NR_INDIRECTLY_RECLAIMABLE_BYTES) >> PAGE_SHIFT; + + /* * Leave reserved pages. The pages are not for anonymous pages. */ if (free <= totalreserve_pages) diff --git a/mm/nommu.c b/mm/nommu.c index 92be862c859b..8d75e425c21c 100644 --- a/mm/nommu.c +++ b/mm/nommu.c @@ -1880,6 +1880,13 @@ int __vm_enough_memory(struct mm_struct *mm, long pages, int cap_sys_admin) free += global_page_state(NR_SLAB_RECLAIMABLE); /* + * Part of the kernel memory, which can be released + * under memory pressure. + */ + free += global_page_state( + NR_INDIRECTLY_RECLAIMABLE_BYTES) >> PAGE_SHIFT; + + /* * Leave reserved pages. The pages are not for anonymous pages. */ if (free <= totalreserve_pages) diff --git a/mm/page-writeback.c b/mm/page-writeback.c index c5bf17598afa..a98dae1bdcff 100644 --- a/mm/page-writeback.c +++ b/mm/page-writeback.c @@ -2526,13 +2526,13 @@ void account_page_redirty(struct page *page) if (mapping && mapping_cap_account_dirty(mapping)) { struct inode *inode = mapping->host; struct bdi_writeback *wb; - bool locked; + struct wb_lock_cookie cookie = {}; - wb = unlocked_inode_to_wb_begin(inode, &locked); + wb = unlocked_inode_to_wb_begin(inode, &cookie); current->nr_dirtied--; dec_zone_page_state(page, NR_DIRTIED); dec_wb_stat(wb, WB_DIRTIED); - unlocked_inode_to_wb_end(inode, locked); + unlocked_inode_to_wb_end(inode, &cookie); } } EXPORT_SYMBOL(account_page_redirty); @@ -2638,15 +2638,15 @@ void cancel_dirty_page(struct page *page) struct inode *inode = mapping->host; struct bdi_writeback *wb; struct mem_cgroup *memcg; - bool locked; + struct wb_lock_cookie cookie = {}; memcg = mem_cgroup_begin_page_stat(page); - wb = unlocked_inode_to_wb_begin(inode, &locked); + wb = unlocked_inode_to_wb_begin(inode, &cookie); if (TestClearPageDirty(page)) account_page_cleaned(page, mapping, memcg, wb); - unlocked_inode_to_wb_end(inode, locked); + unlocked_inode_to_wb_end(inode, &cookie); mem_cgroup_end_page_stat(memcg); } else { ClearPageDirty(page); @@ -2679,7 +2679,7 @@ int clear_page_dirty_for_io(struct page *page) struct inode *inode = mapping->host; struct bdi_writeback *wb; struct mem_cgroup *memcg; - bool locked; + struct wb_lock_cookie cookie = {}; /* * Yes, Virginia, this is indeed insane. @@ -2717,14 +2717,14 @@ int clear_page_dirty_for_io(struct page *page) * exclusion. */ memcg = mem_cgroup_begin_page_stat(page); - wb = unlocked_inode_to_wb_begin(inode, &locked); + wb = unlocked_inode_to_wb_begin(inode, &cookie); if (TestClearPageDirty(page)) { mem_cgroup_dec_page_stat(memcg, MEM_CGROUP_STAT_DIRTY); dec_zone_page_state(page, NR_FILE_DIRTY); dec_wb_stat(wb, WB_RECLAIMABLE); ret = 1; } - unlocked_inode_to_wb_end(inode, locked); + unlocked_inode_to_wb_end(inode, &cookie); mem_cgroup_end_page_stat(memcg); return ret; } diff --git a/mm/page_alloc.c b/mm/page_alloc.c index 3c3d8139f594..bad5f32a9765 100644 --- a/mm/page_alloc.c +++ b/mm/page_alloc.c @@ -6652,8 +6652,9 @@ void set_pfnblock_flags_mask(struct page *page, unsigned long flags, * If @count is not zero, it is okay to include less @count unmovable pages * * PageLRU check without isolation or lru_lock could race so that - * MIGRATE_MOVABLE block might include unmovable pages. It means you can't - * expect this function should be exact. + * MIGRATE_MOVABLE block might include unmovable pages. And __PageMovable + * check without lock_page also may miss some movable non-lru pages at + * race condition. So you can't expect this function should be exact. */ bool has_unmovable_pages(struct zone *zone, struct page *page, int count, bool skip_hwpoisoned_pages) @@ -6709,6 +6710,9 @@ bool has_unmovable_pages(struct zone *zone, struct page *page, int count, if (skip_hwpoisoned_pages && PageHWPoison(page)) continue; + if (__PageMovable(page)) + continue; + if (!PageLRU(page)) found++; /* diff --git a/mm/percpu.c b/mm/percpu.c index d9f91253953e..c5f2a724101a 100644 --- a/mm/percpu.c +++ b/mm/percpu.c @@ -68,6 +68,7 @@ #include <linux/vmalloc.h> #include <linux/workqueue.h> #include <linux/kmemleak.h> +#include <linux/sched.h> #include <asm/cacheflush.h> #include <asm/sections.h> diff --git a/mm/slab.c b/mm/slab.c index 8fc762c178bd..80ca19a122f3 100644 --- a/mm/slab.c +++ b/mm/slab.c @@ -3942,7 +3942,8 @@ next: next_reap_node(); out: /* Set up the next iteration */ - schedule_delayed_work(work, round_jiffies_relative(REAPTIMEOUT_AC)); + schedule_delayed_work_on(smp_processor_id(), work, + round_jiffies_relative(REAPTIMEOUT_AC)); } #ifdef CONFIG_SLABINFO diff --git a/mm/util.c b/mm/util.c index d41da54e8d83..f5480eb305c7 100644 --- a/mm/util.c +++ b/mm/util.c @@ -80,6 +80,8 @@ EXPORT_SYMBOL(kstrdup_const); * @s: the string to duplicate * @max: read at most @max chars from @s * @gfp: the GFP mask used in the kmalloc() call when allocating memory + * + * Note: Use kmemdup_nul() instead if the size is known exactly. */ char *kstrndup(const char *s, size_t max, gfp_t gfp) { @@ -118,6 +120,28 @@ void *kmemdup(const void *src, size_t len, gfp_t gfp) EXPORT_SYMBOL(kmemdup); /** + * kmemdup_nul - Create a NUL-terminated string from unterminated data + * @s: The data to stringify + * @len: The size of the data + * @gfp: the GFP mask used in the kmalloc() call when allocating memory + */ +char *kmemdup_nul(const char *s, size_t len, gfp_t gfp) +{ + char *buf; + + if (!s) + return NULL; + + buf = kmalloc_track_caller(len + 1, gfp); + if (buf) { + memcpy(buf, s, len); + buf[len] = '\0'; + } + return buf; +} +EXPORT_SYMBOL(kmemdup_nul); + +/** * memdup_user - duplicate memory region from user space * * @src: source address in user space @@ -406,17 +430,25 @@ int get_cmdline(struct task_struct *task, char *buffer, int buflen) int res = 0; unsigned int len; struct mm_struct *mm = get_task_mm(task); + unsigned long arg_start, arg_end, env_start, env_end; if (!mm) goto out; if (!mm->arg_end) goto out_mm; /* Shh! No looking before we're done */ - len = mm->arg_end - mm->arg_start; + down_read(&mm->mmap_sem); + arg_start = mm->arg_start; + arg_end = mm->arg_end; + env_start = mm->env_start; + env_end = mm->env_end; + up_read(&mm->mmap_sem); + + len = arg_end - arg_start; if (len > buflen) len = buflen; - res = access_process_vm(task, mm->arg_start, buffer, len, 0); + res = access_process_vm(task, arg_start, buffer, len, 0); /* * If the nul at the end of args has been overwritten, then @@ -427,10 +459,10 @@ int get_cmdline(struct task_struct *task, char *buffer, int buflen) if (len < res) { res = len; } else { - len = mm->env_end - mm->env_start; + len = env_end - env_start; if (len > buflen - res) len = buflen - res; - res += access_process_vm(task, mm->env_start, + res += access_process_vm(task, env_start, buffer+res, len, 0); res = strnlen(buffer, res); } diff --git a/mm/vmscan.c b/mm/vmscan.c index 5f6e29f25af9..c7fe805048da 100644 --- a/mm/vmscan.c +++ b/mm/vmscan.c @@ -267,10 +267,13 @@ EXPORT_SYMBOL(register_shrinker); */ void unregister_shrinker(struct shrinker *shrinker) { + if (!shrinker->nr_deferred) + return; down_write(&shrinker_rwsem); list_del(&shrinker->list); up_write(&shrinker_rwsem); kfree(shrinker->nr_deferred); + shrinker->nr_deferred = NULL; } EXPORT_SYMBOL(unregister_shrinker); @@ -2191,11 +2194,17 @@ static void get_scan_count(struct lruvec *lruvec, int swappiness, } /* - * There is enough inactive page cache, do not reclaim - * anything from the anonymous working set right now. + * If there is enough inactive page cache, i.e. if the size of the + * inactive list is greater than that of the active list *and* the + * inactive list actually has some pages to scan on this priority, we + * do not reclaim anything from the anonymous working set right now. + * Without the second condition we could end up never scanning an + * lruvec even if it has plenty of old anonymous pages unless the + * system is under heavy pressure. */ if (!IS_ENABLED(CONFIG_BALANCE_ANON_FILE_RECLAIM) && - !inactive_file_is_low(lruvec)) { + !inactive_file_is_low(lruvec) && + get_lru_size(lruvec, LRU_INACTIVE_FILE) >> sc->priority) { scan_balance = SCAN_FILE; goto out; } diff --git a/mm/vmstat.c b/mm/vmstat.c index 6c841595b963..9ab13e3be5df 100644 --- a/mm/vmstat.c +++ b/mm/vmstat.c @@ -765,6 +765,7 @@ const char * const vmstat_text[] = { "nr_anon_transparent_hugepages", "nr_free_cma", "nr_swapcache", + "nr_indirectly_reclaimable", /* enum writeback_stat_item counters */ "nr_dirty_threshold", |
