diff options
Diffstat (limited to 'mm')
| -rw-r--r-- | mm/compaction.c | 44 | ||||
| -rw-r--r-- | mm/debug.c | 13 | ||||
| -rw-r--r-- | mm/huge_memory.c | 6 | ||||
| -rw-r--r-- | mm/internal.h | 2 | ||||
| -rw-r--r-- | mm/list_lru.c | 14 | ||||
| -rw-r--r-- | mm/migrate.c | 12 | ||||
| -rw-r--r-- | mm/page_alloc.c | 83 | ||||
| -rw-r--r-- | mm/page_isolation.c | 9 | ||||
| -rw-r--r-- | mm/page_owner.c | 263 | ||||
| -rw-r--r-- | mm/swap_cgroup.c | 2 | ||||
| -rw-r--r-- | mm/vmstat.c | 17 |
11 files changed, 344 insertions, 121 deletions
diff --git a/mm/compaction.c b/mm/compaction.c index 87ed50835881..f96a58e1843a 100644 --- a/mm/compaction.c +++ b/mm/compaction.c @@ -19,6 +19,7 @@ #include <linux/kasan.h> #include <linux/kthread.h> #include <linux/freezer.h> +#include <linux/page_owner.h> #include "internal.h" #ifdef CONFIG_COMPACTION @@ -59,13 +60,27 @@ static unsigned long release_freepages(struct list_head *freelist) static void map_pages(struct list_head *list) { - struct page *page; + unsigned int i, order, nr_pages; + struct page *page, *next; + LIST_HEAD(tmp_list); + + list_for_each_entry_safe(page, next, list, lru) { + list_del(&page->lru); - list_for_each_entry(page, list, lru) { - kasan_alloc_pages(page, 0); - arch_alloc_page(page, 0); - kernel_map_pages(page, 1, 1); + order = page_private(page); + nr_pages = 1 << order; + + post_alloc_hook(page, order, __GFP_MOVABLE); + if (order) + split_page(page, order); + + for (i = 0; i < nr_pages; i++) { + list_add(&page->lru, &tmp_list); + page++; + } } + + list_splice(&tmp_list, list); } static inline bool migrate_async_suitable(int migratetype) @@ -442,12 +457,13 @@ static unsigned long isolate_freepages_block(struct compact_control *cc, unsigned long flags = 0; bool locked = false; unsigned long blockpfn = *start_pfn; + unsigned int order; cursor = pfn_to_page(blockpfn); /* Isolate free pages. */ for (; blockpfn < end_pfn; blockpfn++, cursor++) { - int isolated, i; + int isolated; struct page *page = cursor; /* @@ -513,17 +529,17 @@ static unsigned long isolate_freepages_block(struct compact_control *cc, goto isolate_fail; } - /* Found a free page, break it into order-0 pages */ - isolated = split_free_page(page); + /* Found a free page, will break it into order-0 pages */ + order = page_order(page); + isolated = __isolate_free_page(page, order); if (!isolated) break; + set_page_private(page, order); total_isolated += isolated; cc->nr_freepages += isolated; - for (i = 0; i < isolated; i++) { - list_add(&page->lru, freelist); - page++; - } + list_add_tail(&page->lru, freelist); + if (!strict && cc->nr_migratepages <= cc->nr_freepages) { blockpfn += isolated; break; @@ -636,7 +652,7 @@ isolate_freepages_range(struct compact_control *cc, */ } - /* split_free_page does not map the pages */ + /* __isolate_free_page() does not map the pages */ map_pages(&freelist); if (pfn < end_pfn) { @@ -1085,7 +1101,7 @@ static void isolate_freepages(struct compact_control *cc) } } - /* split_free_page does not map the pages */ + /* __isolate_free_page() does not map the pages */ map_pages(freelist); /* diff --git a/mm/debug.c b/mm/debug.c index 5cf26f8c69a3..3621385c09ac 100644 --- a/mm/debug.c +++ b/mm/debug.c @@ -9,6 +9,18 @@ #include <linux/mm.h> #include <linux/trace_events.h> #include <linux/memcontrol.h> +#include <linux/migrate.h> +#include <linux/page_owner.h> + +char *migrate_reason_names[MR_TYPES] = { + "compaction", + "memory_failure", + "memory_hotplug", + "syscall_or_cpuset", + "mempolicy_mbind", + "numa_misplaced", + "cma", +}; static const struct trace_print_flags pageflag_names[] = { {1UL << PG_locked, "locked" }, @@ -106,6 +118,7 @@ void dump_page_badflags(struct page *page, const char *reason, void dump_page(struct page *page, const char *reason) { dump_page_badflags(page, reason, 0); + dump_page_owner(page); } EXPORT_SYMBOL(dump_page); diff --git a/mm/huge_memory.c b/mm/huge_memory.c index 47b469663822..6c6f5ccfcda1 100644 --- a/mm/huge_memory.c +++ b/mm/huge_memory.c @@ -1363,8 +1363,11 @@ int do_huge_pmd_numa_page(struct mm_struct *mm, struct vm_area_struct *vma, */ if (unlikely(pmd_trans_migrating(*pmdp))) { page = pmd_page(*pmdp); + if (!get_page_unless_zero(page)) + goto out_unlock; spin_unlock(ptl); wait_on_page_locked(page); + put_page(page); goto out; } @@ -1396,8 +1399,11 @@ int do_huge_pmd_numa_page(struct mm_struct *mm, struct vm_area_struct *vma, /* Migration could have started since the pmd_trans_migrating check */ if (!page_locked) { + if (!get_page_unless_zero(page)) + goto out_unlock; spin_unlock(ptl); wait_on_page_locked(page); + put_page(page); page_nid = -1; goto out; } diff --git a/mm/internal.h b/mm/internal.h index 7b9e313d9dea..46d27f378885 100644 --- a/mm/internal.h +++ b/mm/internal.h @@ -182,6 +182,8 @@ extern void prep_compound_page(struct page *page, unsigned int order); #ifdef CONFIG_MEMORY_FAILURE extern bool is_free_buddy_page(struct page *page); #endif +extern void post_alloc_hook(struct page *page, unsigned int order, + gfp_t gfp_flags); extern int user_min_free_kbytes; #if defined CONFIG_COMPACTION || defined CONFIG_CMA diff --git a/mm/list_lru.c b/mm/list_lru.c index 5d8dffd5b57c..786176b1a0ee 100644 --- a/mm/list_lru.c +++ b/mm/list_lru.c @@ -117,6 +117,7 @@ bool list_lru_add(struct list_lru *lru, struct list_head *item) l = list_lru_from_kmem(nlru, item); list_add_tail(item, &l->list); l->nr_items++; + nlru->nr_items++; spin_unlock(&nlru->lock); return true; } @@ -136,6 +137,7 @@ bool list_lru_del(struct list_lru *lru, struct list_head *item) l = list_lru_from_kmem(nlru, item); list_del_init(item); l->nr_items--; + nlru->nr_items--; spin_unlock(&nlru->lock); return true; } @@ -183,15 +185,10 @@ EXPORT_SYMBOL_GPL(list_lru_count_one); unsigned long list_lru_count_node(struct list_lru *lru, int nid) { - long count = 0; - int memcg_idx; + struct list_lru_node *nlru; - count += __list_lru_count_one(lru, nid, -1); - if (list_lru_memcg_aware(lru)) { - for_each_memcg_cache_index(memcg_idx) - count += __list_lru_count_one(lru, nid, memcg_idx); - } - return count; + nlru = &lru->node[nid]; + return nlru->nr_items; } EXPORT_SYMBOL_GPL(list_lru_count_node); @@ -226,6 +223,7 @@ restart: assert_spin_locked(&nlru->lock); case LRU_REMOVED: isolated++; + nlru->nr_items--; /* * If the lru lock has been dropped, our list * traversal is now invalid and so we have to diff --git a/mm/migrate.c b/mm/migrate.c index 9a5ccfc71afc..85af2816b6d2 100644 --- a/mm/migrate.c +++ b/mm/migrate.c @@ -39,6 +39,7 @@ #include <linux/balloon_compaction.h> #include <linux/mmu_notifier.h> #include <linux/page_idle.h> +#include <linux/page_owner.h> #include <asm/tlbflush.h> @@ -668,6 +669,8 @@ void migrate_page_copy(struct page *newpage, struct page *page) */ if (PageWriteback(newpage)) end_page_writeback(newpage); + + copy_page_owner(page, newpage); } EXPORT_SYMBOL(migrate_page_copy); @@ -1097,6 +1100,9 @@ static ICE_noinline int unmap_and_move(new_page_t get_new_page, goto out; rc = __unmap_and_move(page, newpage, force, mode); + if (rc == MIGRATEPAGE_SUCCESS) { + set_page_owner_migrate_reason(newpage, reason); + } out: if (rc != -EAGAIN) { @@ -1179,7 +1185,7 @@ put_new: static int unmap_and_move_huge_page(new_page_t get_new_page, free_page_t put_new_page, unsigned long private, struct page *hpage, int force, - enum migrate_mode mode) + enum migrate_mode mode, int reason) { int rc = -EAGAIN; int *result = NULL; @@ -1237,6 +1243,7 @@ put_anon: if (rc == MIGRATEPAGE_SUCCESS) { hugetlb_cgroup_migrate(hpage, new_hpage); put_new_page = NULL; + set_page_owner_migrate_reason(new_hpage, reason); } unlock_page(hpage); @@ -1311,7 +1318,7 @@ int migrate_pages(struct list_head *from, new_page_t get_new_page, if (PageHuge(page)) rc = unmap_and_move_huge_page(get_new_page, put_new_page, private, page, - pass > 2, mode); + pass > 2, mode, reason); else rc = unmap_and_move(get_new_page, put_new_page, private, page, pass > 2, mode, @@ -2001,6 +2008,7 @@ fail_putback: set_page_memcg(new_page, page_memcg(page)); set_page_memcg(page, NULL); page_remove_rmap(page); + set_page_owner_migrate_reason(new_page, MR_NUMA_MISPLACED); spin_unlock(ptl); mmu_notifier_invalidate_range_end(mm, mmun_start, mmun_end); diff --git a/mm/page_alloc.c b/mm/page_alloc.c index 4fbb23c1cba7..6759192e69de 100644 --- a/mm/page_alloc.c +++ b/mm/page_alloc.c @@ -223,6 +223,20 @@ static char * const zone_names[MAX_NR_ZONES] = { }; static void free_compound_page(struct page *page); + +char * const migratetype_names[MIGRATE_TYPES] = { + "Unmovable", + "Movable", + "Reclaimable", +#ifdef CONFIG_CMA + "CMA", +#endif + "HighAtomic", +#ifdef CONFIG_MEMORY_ISOLATION + "Isolate", +#endif +}; + compound_page_dtor * const compound_page_dtors[] = { NULL, free_compound_page, @@ -459,6 +473,7 @@ static void bad_page(struct page *page, const char *reason, printk(KERN_ALERT "BUG: Bad page state in process %s pfn:%05lx\n", current->comm, page_to_pfn(page)); dump_page_badflags(page, reason, bad_flags); + dump_page_owner(page); print_modules(); dump_stack(); @@ -569,6 +584,9 @@ static inline void set_page_guard(struct zone *zone, struct page *page, return; page_ext = lookup_page_ext(page); + if (unlikely(!page_ext)) + return; + __set_bit(PAGE_EXT_DEBUG_GUARD, &page_ext->flags); INIT_LIST_HEAD(&page->lru); @@ -586,6 +604,9 @@ static inline void clear_page_guard(struct zone *zone, struct page *page, return; page_ext = lookup_page_ext(page); + if (unlikely(!page_ext)) + return; + __clear_bit(PAGE_EXT_DEBUG_GUARD, &page_ext->flags); set_page_private(page, 0); @@ -1422,8 +1443,21 @@ static inline bool free_pages_prezeroed(void) page_poisoning_enabled(); } +inline void post_alloc_hook(struct page *page, unsigned int order, + gfp_t gfp_flags) +{ + set_page_private(page, 0); + set_page_refcounted(page); + + kasan_alloc_pages(page, order); + arch_alloc_page(page, order); + kernel_map_pages(page, 1 << order, 1); + kernel_poison_pages(page, 1 << order, 1); + set_page_owner(page, order, gfp_flags); +} + static int prep_new_page(struct page *page, unsigned int order, gfp_t gfp_flags, - int alloc_flags) + int alloc_flags) { int i; @@ -1433,13 +1467,7 @@ static int prep_new_page(struct page *page, unsigned int order, gfp_t gfp_flags, return 1; } - set_page_private(page, 0); - set_page_refcounted(page); - - kasan_alloc_pages(page, order); - arch_alloc_page(page, order); - kernel_map_pages(page, 1 << order, 1); - kernel_poison_pages(page, 1 << order, 1); + post_alloc_hook(page, order, gfp_flags); if (!free_pages_prezeroed() && (gfp_flags & __GFP_ZERO)) for (i = 0; i < (1 << order); i++) @@ -1448,8 +1476,6 @@ static int prep_new_page(struct page *page, unsigned int order, gfp_t gfp_flags, if (order && (gfp_flags & __GFP_COMP)) prep_compound_page(page, order); - set_page_owner(page, order, gfp_flags); - /* * page is set pfmemalloc when ALLOC_NO_WATERMARKS was necessary to * allocate the page. The expectation is that the caller is taking @@ -2217,7 +2243,6 @@ void free_hot_cold_page_list(struct list_head *list, bool cold) void split_page(struct page *page, unsigned int order) { int i; - gfp_t gfp_mask; VM_BUG_ON_PAGE(PageCompound(page), page); VM_BUG_ON_PAGE(!page_count(page), page); @@ -2231,12 +2256,9 @@ void split_page(struct page *page, unsigned int order) split_page(virt_to_page(page[0].shadow), order); #endif - gfp_mask = get_page_owner_gfp(page); - set_page_owner(page, 0, gfp_mask); - for (i = 1; i < (1 << order); i++) { + for (i = 1; i < (1 << order); i++) set_page_refcounted(page + i); - set_page_owner(page + i, 0, gfp_mask); - } + split_page_owner(page, order); } EXPORT_SYMBOL_GPL(split_page); @@ -2266,8 +2288,6 @@ int __isolate_free_page(struct page *page, unsigned int order) zone->free_area[order].nr_free--; rmv_page_order(page); - set_page_owner(page, order, __GFP_MOVABLE); - /* Set the pageblock if the isolated page is at least a pageblock */ if (order >= pageblock_order - 1) { struct page *endpage = page + (1 << order) - 1; @@ -2285,33 +2305,6 @@ int __isolate_free_page(struct page *page, unsigned int order) } /* - * Similar to split_page except the page is already free. As this is only - * being used for migration, the migratetype of the block also changes. - * As this is called with interrupts disabled, the caller is responsible - * for calling arch_alloc_page() and kernel_map_page() after interrupts - * are enabled. - * - * Note: this is probably too low level an operation for use in drivers. - * Please consult with lkml before using this in your driver. - */ -int split_free_page(struct page *page) -{ - unsigned int order; - int nr_pages; - - order = page_order(page); - - nr_pages = __isolate_free_page(page, order); - if (!nr_pages) - return 0; - - /* Split into individual pages */ - set_page_refcounted(page); - split_page(page, order); - return nr_pages; -} - -/* * Allocate a page from the given zone. Use pcplists for order-0 allocations. */ static inline diff --git a/mm/page_isolation.c b/mm/page_isolation.c index 3ecd3807c2c2..efb6c3c38c01 100644 --- a/mm/page_isolation.c +++ b/mm/page_isolation.c @@ -8,6 +8,7 @@ #include <linux/memory.h> #include <linux/hugetlb.h> #include <linux/kasan.h> +#include <linux/page_owner.h> #include "internal.h" static int set_migratetype_isolate(struct page *page, @@ -106,10 +107,6 @@ static void unset_migratetype_isolate(struct page *page, unsigned migratetype) if (pfn_valid_within(page_to_pfn(buddy)) && !is_migrate_isolate_page(buddy)) { __isolate_free_page(page, order); - kasan_alloc_pages(page, order); - arch_alloc_page(page, order); - kernel_map_pages(page, (1 << order), 1); - set_page_refcounted(page); isolated_page = page; } } @@ -128,8 +125,10 @@ static void unset_migratetype_isolate(struct page *page, unsigned migratetype) zone->nr_isolate_pageblock--; out: spin_unlock_irqrestore(&zone->lock, flags); - if (isolated_page) + if (isolated_page) { + post_alloc_hook(page, order, __GFP_MOVABLE); __free_pages(isolated_page, order); + } } static inline struct page * diff --git a/mm/page_owner.c b/mm/page_owner.c index 45cccaa1ce32..3a9a358e7c63 100644 --- a/mm/page_owner.c +++ b/mm/page_owner.c @@ -5,11 +5,24 @@ #include <linux/bootmem.h> #include <linux/stacktrace.h> #include <linux/page_owner.h> +#include <linux/jump_label.h> +#include <linux/migrate.h> +#include <linux/stackdepot.h> + #include "internal.h" +/* + * TODO: teach PAGE_OWNER_STACK_DEPTH (__dump_page_owner and save_stack) + * to use off stack temporal storage + */ +#define PAGE_OWNER_STACK_DEPTH (16) + static bool page_owner_disabled = !IS_ENABLED(CONFIG_PAGE_OWNER_ENABLE_DEFAULT); -bool page_owner_inited __read_mostly; +DEFINE_STATIC_KEY_FALSE(page_owner_inited); + +static depot_stack_handle_t dummy_handle; +static depot_stack_handle_t failure_handle; static void init_early_allocated_pages(void); @@ -36,12 +49,42 @@ static bool need_page_owner(void) return true; } +static noinline void register_dummy_stack(void) +{ + unsigned long entries[4]; + struct stack_trace dummy; + + dummy.nr_entries = 0; + dummy.max_entries = ARRAY_SIZE(entries); + dummy.entries = &entries[0]; + dummy.skip = 0; + + save_stack_trace(&dummy); + dummy_handle = depot_save_stack(&dummy, GFP_KERNEL); +} + +static noinline void register_failure_stack(void) +{ + unsigned long entries[4]; + struct stack_trace failure; + + failure.nr_entries = 0; + failure.max_entries = ARRAY_SIZE(entries); + failure.entries = &entries[0]; + failure.skip = 0; + + save_stack_trace(&failure); + failure_handle = depot_save_stack(&failure, GFP_KERNEL); +} + static void init_page_owner(void) { if (page_owner_disabled) return; - page_owner_inited = true; + register_dummy_stack(); + register_failure_stack(); + static_branch_enable(&page_owner_inited); init_early_allocated_pages(); } @@ -57,46 +100,141 @@ void __reset_page_owner(struct page *page, unsigned int order) for (i = 0; i < (1 << order); i++) { page_ext = lookup_page_ext(page + i); + if (unlikely(!page_ext)) + continue; __clear_bit(PAGE_EXT_OWNER, &page_ext->flags); } } -void __set_page_owner(struct page *page, unsigned int order, gfp_t gfp_mask) +static inline bool check_recursive_alloc(struct stack_trace *trace, + unsigned long ip) { - struct page_ext *page_ext = lookup_page_ext(page); + int i, count; + + if (!trace->nr_entries) + return false; + + for (i = 0, count = 0; i < trace->nr_entries; i++) { + if (trace->entries[i] == ip && ++count == 2) + return true; + } + + return false; +} + +static noinline depot_stack_handle_t save_stack(gfp_t flags) +{ + unsigned long entries[PAGE_OWNER_STACK_DEPTH]; struct stack_trace trace = { .nr_entries = 0, - .max_entries = ARRAY_SIZE(page_ext->trace_entries), - .entries = &page_ext->trace_entries[0], - .skip = 3, + .entries = entries, + .max_entries = PAGE_OWNER_STACK_DEPTH, + .skip = 0 }; + depot_stack_handle_t handle; save_stack_trace(&trace); + if (trace.nr_entries != 0 && + trace.entries[trace.nr_entries-1] == ULONG_MAX) + trace.nr_entries--; + + /* + * We need to check recursion here because our request to stackdepot + * could trigger memory allocation to save new entry. New memory + * allocation would reach here and call depot_save_stack() again + * if we don't catch it. There is still not enough memory in stackdepot + * so it would try to allocate memory again and loop forever. + */ + if (check_recursive_alloc(&trace, _RET_IP_)) + return dummy_handle; + + handle = depot_save_stack(&trace, flags); + if (!handle) + handle = failure_handle; + + return handle; +} + +noinline void __set_page_owner(struct page *page, unsigned int order, + gfp_t gfp_mask) +{ + struct page_ext *page_ext = lookup_page_ext(page); + + if (unlikely(!page_ext)) + return; + page_ext->handle = save_stack(gfp_mask); page_ext->order = order; page_ext->gfp_mask = gfp_mask; - page_ext->nr_entries = trace.nr_entries; + page_ext->last_migrate_reason = -1; __set_bit(PAGE_EXT_OWNER, &page_ext->flags); } -gfp_t __get_page_owner_gfp(struct page *page) +void __set_page_owner_migrate_reason(struct page *page, int reason) +{ + struct page_ext *page_ext = lookup_page_ext(page); + if (unlikely(!page_ext)) + return; + + page_ext->last_migrate_reason = reason; +} + +void __split_page_owner(struct page *page, unsigned int order) { + int i; struct page_ext *page_ext = lookup_page_ext(page); + if (unlikely(!page_ext)) + /* + * The caller just returns if no valid gfp + * So return here too. + */ + return; - return page_ext->gfp_mask; + page_ext->order = 0; + for (i = 1; i < (1 << order); i++) + __copy_page_owner(page, page + i); +} + +void __copy_page_owner(struct page *oldpage, struct page *newpage) +{ + struct page_ext *old_ext = lookup_page_ext(oldpage); + struct page_ext *new_ext = lookup_page_ext(newpage); + + if (unlikely(!old_ext || !new_ext)) + return; + + new_ext->order = old_ext->order; + new_ext->gfp_mask = old_ext->gfp_mask; + new_ext->last_migrate_reason = old_ext->last_migrate_reason; + new_ext->handle = old_ext->handle; + + /* + * We don't clear the bit on the oldpage as it's going to be freed + * after migration. Until then, the info can be useful in case of + * a bug, and the overal stats will be off a bit only temporarily. + * Also, migrate_misplaced_transhuge_page() can still fail the + * migration and then we want the oldpage to retain the info. But + * in that case we also don't need to explicitly clear the info from + * the new page, which will be freed. + */ + __set_bit(PAGE_EXT_OWNER, &new_ext->flags); } static ssize_t print_page_owner(char __user *buf, size_t count, unsigned long pfn, - struct page *page, struct page_ext *page_ext) + struct page *page, struct page_ext *page_ext, + depot_stack_handle_t handle) { int ret; int pageblock_mt, page_mt; char *kbuf; + unsigned long entries[PAGE_OWNER_STACK_DEPTH]; struct stack_trace trace = { - .nr_entries = page_ext->nr_entries, - .entries = &page_ext->trace_entries[0], + .nr_entries = 0, + .entries = entries, + .max_entries = PAGE_OWNER_STACK_DEPTH, + .skip = 0 }; kbuf = kmalloc(count, GFP_KERNEL); @@ -104,8 +242,9 @@ print_page_owner(char __user *buf, size_t count, unsigned long pfn, return -ENOMEM; ret = snprintf(kbuf, count, - "Page allocated via order %u, mask 0x%x\n", - page_ext->order, page_ext->gfp_mask); + "Page allocated via order %u, mask %#x(%pGg)\n", + page_ext->order, page_ext->gfp_mask, + &page_ext->gfp_mask); if (ret >= count) goto err; @@ -114,31 +253,29 @@ print_page_owner(char __user *buf, size_t count, unsigned long pfn, pageblock_mt = get_pfnblock_migratetype(page, pfn); page_mt = gfpflags_to_migratetype(page_ext->gfp_mask); ret += snprintf(kbuf + ret, count - ret, - "PFN %lu Block %lu type %d %s Flags %s%s%s%s%s%s%s%s%s%s%s%s\n", + "PFN %lu type %s Block %lu type %s Flags %#lx(%pGp)\n", pfn, + migratetype_names[page_mt], pfn >> pageblock_order, - pageblock_mt, - pageblock_mt != page_mt ? "Fallback" : " ", - PageLocked(page) ? "K" : " ", - PageError(page) ? "E" : " ", - PageReferenced(page) ? "R" : " ", - PageUptodate(page) ? "U" : " ", - PageDirty(page) ? "D" : " ", - PageLRU(page) ? "L" : " ", - PageActive(page) ? "A" : " ", - PageSlab(page) ? "S" : " ", - PageWriteback(page) ? "W" : " ", - PageCompound(page) ? "C" : " ", - PageSwapCache(page) ? "B" : " ", - PageMappedToDisk(page) ? "M" : " "); + migratetype_names[pageblock_mt], + page->flags, &page->flags); if (ret >= count) goto err; + depot_fetch_stack(handle, &trace); ret += snprint_stack_trace(kbuf + ret, count - ret, &trace, 0); if (ret >= count) goto err; + if (page_ext->last_migrate_reason != -1) { + ret += snprintf(kbuf + ret, count - ret, + "Page has been migrated, last migrate reason: %s\n", + migrate_reason_names[page_ext->last_migrate_reason]); + if (ret >= count) + goto err; + } + ret += snprintf(kbuf + ret, count - ret, "\n"); if (ret >= count) goto err; @@ -154,14 +291,58 @@ err: return -ENOMEM; } +void __dump_page_owner(struct page *page) +{ + struct page_ext *page_ext = lookup_page_ext(page); + unsigned long entries[PAGE_OWNER_STACK_DEPTH]; + struct stack_trace trace = { + .nr_entries = 0, + .entries = entries, + .max_entries = PAGE_OWNER_STACK_DEPTH, + .skip = 0 + }; + depot_stack_handle_t handle; + gfp_t gfp_mask; + int mt; + + if (unlikely(!page_ext)) { + pr_alert("There is not page extension available.\n"); + return; + } + gfp_mask = page_ext->gfp_mask; + mt = gfpflags_to_migratetype(gfp_mask); + + if (!test_bit(PAGE_EXT_OWNER, &page_ext->flags)) { + pr_alert("page_owner info is not active (free page?)\n"); + return; + } + + handle = READ_ONCE(page_ext->handle); + if (!handle) { + pr_alert("page_owner info is not active (free page?)\n"); + return; + } + + depot_fetch_stack(handle, &trace); + pr_alert("page allocated via order %u, migratetype %s, " + "gfp_mask %#x(%pGg)\n", page_ext->order, + migratetype_names[mt], gfp_mask, &gfp_mask); + print_stack_trace(&trace, 0); + + if (page_ext->last_migrate_reason != -1) + pr_alert("page has been migrated, last migrate reason: %s\n", + migrate_reason_names[page_ext->last_migrate_reason]); +} + static ssize_t read_page_owner(struct file *file, char __user *buf, size_t count, loff_t *ppos) { unsigned long pfn; struct page *page; struct page_ext *page_ext; + depot_stack_handle_t handle; - if (!page_owner_inited) + if (!static_branch_unlikely(&page_owner_inited)) return -EINVAL; page = NULL; @@ -198,6 +379,8 @@ read_page_owner(struct file *file, char __user *buf, size_t count, loff_t *ppos) } page_ext = lookup_page_ext(page); + if (unlikely(!page_ext)) + continue; /* * Some pages could be missed by concurrent allocation or free, @@ -206,10 +389,19 @@ read_page_owner(struct file *file, char __user *buf, size_t count, loff_t *ppos) if (!test_bit(PAGE_EXT_OWNER, &page_ext->flags)) continue; + /* + * Access to page_ext->handle isn't synchronous so we should + * be careful to access it. + */ + handle = READ_ONCE(page_ext->handle); + if (!handle) + continue; + /* Record the next PFN to read in the file offset */ *ppos = (pfn - min_low_pfn) + 1; - return print_page_owner(buf, count, pfn, page, page_ext); + return print_page_owner(buf, count, pfn, page, + page_ext, handle); } return 0; @@ -248,6 +440,9 @@ static void init_pages_in_zone(pg_data_t *pgdat, struct zone *zone) page = pfn_to_page(pfn); + if (page_zone(page) != zone) + continue; + /* * We are safe to check buddy flag and order, because * this is init stage and only single thread runs. @@ -261,6 +456,8 @@ static void init_pages_in_zone(pg_data_t *pgdat, struct zone *zone) continue; page_ext = lookup_page_ext(page); + if (unlikely(!page_ext)) + continue; /* Maybe overraping zone */ if (test_bit(PAGE_EXT_OWNER, &page_ext->flags)) @@ -309,7 +506,7 @@ static int __init pageowner_init(void) { struct dentry *dentry; - if (!page_owner_inited) { + if (!static_branch_unlikely(&page_owner_inited)) { pr_info("page_owner is disabled\n"); return 0; } diff --git a/mm/swap_cgroup.c b/mm/swap_cgroup.c index 40dd0f9b00d6..09f733b0424a 100644 --- a/mm/swap_cgroup.c +++ b/mm/swap_cgroup.c @@ -205,6 +205,8 @@ void swap_cgroup_swapoff(int type) struct page *page = map[i]; if (page) __free_page(page); + if (!(i % SWAP_CLUSTER_MAX)) + cond_resched(); } vfree(map); } diff --git a/mm/vmstat.c b/mm/vmstat.c index d6b4817c4416..3c0796cd3f80 100644 --- a/mm/vmstat.c +++ b/mm/vmstat.c @@ -927,19 +927,6 @@ static void walk_zones_in_node(struct seq_file *m, pg_data_t *pgdat, #endif #ifdef CONFIG_PROC_FS -static char * const migratetype_names[MIGRATE_TYPES] = { - "Unmovable", - "Movable", - "Reclaimable", -#ifdef CONFIG_CMA - "CMA", -#endif - "HighAtomic", -#ifdef CONFIG_MEMORY_ISOLATION - "Isolate", -#endif -}; - static void frag_show_print(struct seq_file *m, pg_data_t *pgdat, struct zone *zone) { @@ -1103,6 +1090,8 @@ static void pagetypeinfo_showmixedcount_print(struct seq_file *m, continue; page_ext = lookup_page_ext(page); + if (unlikely(!page_ext)) + continue; if (!test_bit(PAGE_EXT_OWNER, &page_ext->flags)) continue; @@ -1140,7 +1129,7 @@ static void pagetypeinfo_showmixedcount(struct seq_file *m, pg_data_t *pgdat) #ifdef CONFIG_PAGE_OWNER int mtype; - if (!page_owner_inited) + if (!static_branch_unlikely(&page_owner_inited)) return; drain_all_pages(NULL); |
