diff options
Diffstat (limited to 'mm/page_alloc.c')
-rw-r--r-- | mm/page_alloc.c | 73 |
1 files changed, 65 insertions, 8 deletions
diff --git a/mm/page_alloc.c b/mm/page_alloc.c index b5368a3e6120..f39656e80469 100644 --- a/mm/page_alloc.c +++ b/mm/page_alloc.c @@ -1655,7 +1655,8 @@ static void change_pageblock_range(struct page *pageblock_page, * is worse than movable allocations stealing from unmovable and reclaimable * pageblocks. */ -static bool can_steal_fallback(unsigned int order, int start_mt) +static bool can_steal_fallback(unsigned int current_order, unsigned int start_order, + int start_mt, int fallback_mt) { /* * Leaving this order check is intended, although there is @@ -1664,12 +1665,17 @@ static bool can_steal_fallback(unsigned int order, int start_mt) * but, below check doesn't guarantee it and that is just heuristic * so could be changed anytime. */ - if (order >= pageblock_order) + if (current_order >= pageblock_order) return true; - if (order >= pageblock_order / 2 || + /* don't let unmovable allocations cause migrations simply because of free pages */ + if ((start_mt != MIGRATE_UNMOVABLE && current_order >= pageblock_order / 2) || + /* only steal reclaimable page blocks for unmovable allocations */ + (start_mt == MIGRATE_UNMOVABLE && fallback_mt != MIGRATE_MOVABLE && current_order >= pageblock_order / 2) || + /* reclaimable can steal aggressively */ start_mt == MIGRATE_RECLAIMABLE || - start_mt == MIGRATE_UNMOVABLE || + /* allow unmovable allocs up to 64K without migrating blocks */ + (start_mt == MIGRATE_UNMOVABLE && start_order >= 5) || page_group_by_mobility_disabled) return true; @@ -1709,8 +1715,9 @@ static void steal_suitable_fallback(struct zone *zone, struct page *page, * we can steal other freepages all together. This would help to reduce * fragmentation due to mixed migratetype pages in one pageblock. */ -int find_suitable_fallback(struct free_area *area, unsigned int order, - int migratetype, bool only_stealable, bool *can_steal) +int find_suitable_fallback(struct free_area *area, unsigned int current_order, + int migratetype, bool only_stealable, + int start_order, bool *can_steal) { int i; int fallback_mt; @@ -1727,7 +1734,7 @@ int find_suitable_fallback(struct free_area *area, unsigned int order, if (list_empty(&area->free_list[fallback_mt])) continue; - if (can_steal_fallback(order, migratetype)) + if (can_steal_fallback(current_order, start_order, migratetype, fallback_mt)) *can_steal = true; if (!only_stealable) @@ -1863,7 +1870,7 @@ __rmqueue_fallback(struct zone *zone, unsigned int order, int start_migratetype) --current_order) { area = &(zone->free_area[current_order]); fallback_mt = find_suitable_fallback(area, current_order, - start_migratetype, false, &can_steal); + start_migratetype, false, order, &can_steal); if (fallback_mt == -1) continue; @@ -3735,6 +3742,56 @@ static inline void show_node(struct zone *zone) printk("Node %d ", zone_to_nid(zone)); } +long si_mem_available(void) +{ + long available; + unsigned long pagecache; + unsigned long wmark_low = 0; + unsigned long pages[NR_LRU_LISTS]; + struct zone *zone; + int lru; + + for (lru = LRU_BASE; lru < NR_LRU_LISTS; lru++) + pages[lru] = global_page_state(NR_LRU_BASE + lru); + + for_each_zone(zone) + wmark_low += zone->watermark[WMARK_LOW]; + + /* + * Estimate the amount of memory available for userspace allocations, + * without causing swapping. + */ + available = global_page_state(NR_FREE_PAGES) - totalreserve_pages; + + /* + * Not all the page cache can be freed, otherwise the system will + * start swapping. Assume at least half of the page cache, or the + * low watermark worth of cache, needs to stay. + */ + pagecache = pages[LRU_ACTIVE_FILE] + pages[LRU_INACTIVE_FILE]; + pagecache -= min(pagecache / 2, wmark_low); + available += pagecache; + + /* + * Part of the reclaimable slab consists of items that are in use, + * and cannot be freed. Cap this estimate at the low watermark. + */ + available += global_page_state(NR_SLAB_RECLAIMABLE) - + min(global_page_state(NR_SLAB_RECLAIMABLE) / 2, wmark_low); + + /* + * Part of the kernel memory, which can be released under memory + * pressure. + */ + available += global_page_state(NR_INDIRECTLY_RECLAIMABLE_BYTES) >> + PAGE_SHIFT; + + if (available < 0) + available = 0; + return available; +} +EXPORT_SYMBOL_GPL(si_mem_available); + void si_meminfo(struct sysinfo *val) { val->totalram = totalram_pages; |