summaryrefslogtreecommitdiff
path: root/mm/compaction.c
diff options
context:
space:
mode:
Diffstat (limited to 'mm/compaction.c')
-rw-r--r--mm/compaction.c45
1 files changed, 35 insertions, 10 deletions
diff --git a/mm/compaction.c b/mm/compaction.c
index dba02dec7195..b4c33357f96e 100644
--- a/mm/compaction.c
+++ b/mm/compaction.c
@@ -60,9 +60,9 @@ static void map_pages(struct list_head *list)
struct page *page;
list_for_each_entry(page, list, lru) {
+ kasan_alloc_pages(page, 0);
arch_alloc_page(page, 0);
kernel_map_pages(page, 1, 1);
- kasan_alloc_pages(page, 0);
}
}
@@ -625,21 +625,46 @@ static void acct_isolated(struct zone *zone, struct compact_control *cc)
mod_zone_page_state(zone, NR_ISOLATED_FILE, count[1]);
}
-/* Similar to reclaim, but different enough that they don't share logic */
-static bool too_many_isolated(struct zone *zone)
+static bool __too_many_isolated(struct zone *zone, int safe)
{
unsigned long active, inactive, isolated;
- inactive = zone_page_state(zone, NR_INACTIVE_FILE) +
- zone_page_state(zone, NR_INACTIVE_ANON);
- active = zone_page_state(zone, NR_ACTIVE_FILE) +
- zone_page_state(zone, NR_ACTIVE_ANON);
- isolated = zone_page_state(zone, NR_ISOLATED_FILE) +
- zone_page_state(zone, NR_ISOLATED_ANON);
+ if (safe) {
+ inactive = zone_page_state_snapshot(zone, NR_INACTIVE_FILE) +
+ zone_page_state_snapshot(zone, NR_INACTIVE_ANON);
+ active = zone_page_state_snapshot(zone, NR_ACTIVE_FILE) +
+ zone_page_state_snapshot(zone, NR_ACTIVE_ANON);
+ isolated = zone_page_state_snapshot(zone, NR_ISOLATED_FILE) +
+ zone_page_state_snapshot(zone, NR_ISOLATED_ANON);
+ } else {
+ inactive = zone_page_state(zone, NR_INACTIVE_FILE) +
+ zone_page_state(zone, NR_INACTIVE_ANON);
+ active = zone_page_state(zone, NR_ACTIVE_FILE) +
+ zone_page_state(zone, NR_ACTIVE_ANON);
+ isolated = zone_page_state(zone, NR_ISOLATED_FILE) +
+ zone_page_state(zone, NR_ISOLATED_ANON);
+ }
return isolated > (inactive + active) / 2;
}
+/* Similar to reclaim, but different enough that they don't share logic */
+static bool too_many_isolated(struct compact_control *cc)
+{
+ /*
+ * __too_many_isolated(safe=0) is fast but inaccurate, because it
+ * doesn't account for the vm_stat_diff[] counters. So if it looks
+ * like too_many_isolated() is about to return true, fall back to the
+ * slower, more accurate zone_page_state_snapshot().
+ */
+ if (unlikely(__too_many_isolated(cc->zone, 0))) {
+ if (cc->mode != MIGRATE_ASYNC)
+ return __too_many_isolated(cc->zone, 1);
+ }
+
+ return false;
+}
+
/**
* isolate_migratepages_block() - isolate all migrate-able pages within
* a single pageblock
@@ -676,7 +701,7 @@ isolate_migratepages_block(struct compact_control *cc, unsigned long low_pfn,
* list by either parallel reclaimers or compaction. If there are,
* delay for some time until fewer pages are isolated
*/
- while (unlikely(too_many_isolated(zone))) {
+ while (unlikely(too_many_isolated(cc))) {
/* async migration should just abort */
if (cc->mode == MIGRATE_ASYNC)
return 0;