diff options
Diffstat (limited to 'mm/memory_hotplug.c')
| -rw-r--r-- | mm/memory_hotplug.c | 58 |
1 files changed, 35 insertions, 23 deletions
diff --git a/mm/memory_hotplug.c b/mm/memory_hotplug.c index 50bdb897ab00..aa03a24aee9f 100644 --- a/mm/memory_hotplug.c +++ b/mm/memory_hotplug.c @@ -32,6 +32,7 @@ #include <linux/hugetlb.h> #include <linux/memblock.h> #include <linux/bootmem.h> +#include <linux/compaction.h> #include <linux/rmap.h> #include <asm/tlbflush.h> @@ -45,7 +46,7 @@ * and restore_online_page_callback() for generic callback restore. */ -static void generic_online_page(struct page *page); +static int generic_online_page(struct page *page); static online_page_callback_t online_page_callback = generic_online_page; static DEFINE_MUTEX(online_page_callback_lock); @@ -857,11 +858,12 @@ void __online_page_free(struct page *page) } EXPORT_SYMBOL_GPL(__online_page_free); -static void generic_online_page(struct page *page) +static int generic_online_page(struct page *page) { __online_page_set_limits(page); __online_page_increment_counters(page); __online_page_free(page); + return 0; } static int online_pages_range(unsigned long start_pfn, unsigned long nr_pages, @@ -870,11 +872,13 @@ static int online_pages_range(unsigned long start_pfn, unsigned long nr_pages, unsigned long i; unsigned long onlined_pages = *(unsigned long *)arg; struct page *page; + int ret; if (PageReserved(pfn_to_page(start_pfn))) for (i = 0; i < nr_pages; i++) { page = pfn_to_page(start_pfn + i); - (*online_page_callback)(page); - onlined_pages++; + ret = (*online_page_callback)(page); + if (!ret) + onlined_pages++; } *(unsigned long *)arg = onlined_pages; return 0; @@ -1013,7 +1017,7 @@ int __ref online_pages(unsigned long pfn, unsigned long nr_pages, int online_typ arg.nr_pages = nr_pages; node_states_check_changes_online(nr_pages, zone, &arg); - nid = pfn_to_nid(pfn); + nid = zone_to_nid(zone); ret = memory_notify(MEM_GOING_ONLINE, &arg); ret = notifier_to_errno(ret); @@ -1053,7 +1057,7 @@ int __ref online_pages(unsigned long pfn, unsigned long nr_pages, int online_typ pgdat_resize_unlock(zone->zone_pgdat, &flags); if (onlined_pages) { - node_states_set_node(zone_to_nid(zone), &arg); + node_states_set_node(nid, &arg); if (need_zonelists_rebuild) build_all_zonelists(NULL, NULL); else @@ -1064,8 +1068,10 @@ int __ref online_pages(unsigned long pfn, unsigned long nr_pages, int online_typ init_per_zone_wmark_min(); - if (onlined_pages) - kswapd_run(zone_to_nid(zone)); + if (onlined_pages) { + kswapd_run(nid); + kcompactd_run(nid); + } vm_total_pages = nr_free_pagecache_pages(); @@ -1422,10 +1428,10 @@ int test_pages_in_a_zone(unsigned long start_pfn, unsigned long end_pfn, } /* - * Scan pfn range [start,end) to find movable/migratable pages (LRU pages - * and hugepages). We scan pfn because it's much easier than scanning over - * linked list. This function returns the pfn of the first found movable - * page if it's found, otherwise 0. + * Scan pfn range [start,end) to find movable/migratable pages (LRU pages, + * non-lru movable pages and hugepages). We scan pfn because it's much + * easier than scanning over linked list. This function returns the pfn + * of the first found movable page if it's found, otherwise 0. */ static unsigned long scan_movable_pages(unsigned long start, unsigned long end) { @@ -1436,6 +1442,8 @@ static unsigned long scan_movable_pages(unsigned long start, unsigned long end) page = pfn_to_page(pfn); if (PageLRU(page)) return pfn; + if (__PageMovable(page)) + return pfn; if (PageHuge(page)) { if (page_huge_active(page)) return pfn; @@ -1487,29 +1495,31 @@ do_migrate_range(unsigned long start_pfn, unsigned long end_pfn) if (WARN_ON(PageLRU(page))) isolate_lru_page(page); if (page_mapped(page)) - try_to_unmap(page, TTU_IGNORE_MLOCK | TTU_IGNORE_ACCESS); + try_to_unmap(page, TTU_IGNORE_MLOCK | TTU_IGNORE_ACCESS, NULL); continue; } if (!get_page_unless_zero(page)) continue; /* - * We can skip free pages. And we can only deal with pages on - * LRU. + * We can skip free pages. And we can deal with pages on + * LRU and non-lru movable pages. */ - ret = isolate_lru_page(page); + if (PageLRU(page)) + ret = isolate_lru_page(page); + else + ret = isolate_movable_page(page, ISOLATE_UNEVICTABLE); if (!ret) { /* Success */ put_page(page); list_add_tail(&page->lru, &source); move_pages--; - inc_zone_page_state(page, NR_ISOLATED_ANON + - page_is_file_cache(page)); - + if (!__PageMovable(page)) + inc_zone_page_state(page, NR_ISOLATED_ANON + + page_is_file_cache(page)); } else { #ifdef CONFIG_DEBUG_VM - printk(KERN_ALERT "removing pfn %lx from LRU failed\n", - pfn); - dump_page(page, "failed to remove from LRU"); + pr_alert("failed to isolate pfn %lx\n", pfn); + dump_page(page, "isolation failed"); #endif put_page(page); /* Because we don't have big zone->lock. we should @@ -1862,8 +1872,10 @@ repeat: zone_pcp_update(zone); node_states_clear_node(node, &arg); - if (arg.status_change_nid >= 0) + if (arg.status_change_nid >= 0) { kswapd_stop(node); + kcompactd_stop(node); + } vm_total_pages = nr_free_pagecache_pages(); writeback_set_ratelimit(); |
