summaryrefslogtreecommitdiff
path: root/mm/compaction.c
diff options
context:
space:
mode:
authorVinayak Menon <vinmenon@codeaurora.org>2015-01-17 21:51:48 +0530
committerDavid Keitel <dkeitel@codeaurora.org>2016-03-22 11:03:54 -0700
commitd487a9f1f7291870ebeac3bc243f9fe65dcdfefb (patch)
tree3ae7492534ee2e17b44990a271f3d5d85f2b99eb /mm/compaction.c
parent910a8bd10838baff2a8894a4e8686a9d3eba5a35 (diff)
mm: compaction: fix the page state calculation in too_many_isolated
Commit "mm: vmscan: fix the page state calculation in too_many_isolated" fixed an issue where a number of tasks were blocked in reclaim path for seconds, because of vmstat_diff not being synced in time. A similar problem can happen in isolate_migratepages_block, where similar calculation is performed. This patch fixes that. Change-Id: Ie74f108ef770da688017b515fe37faea6f384589 Signed-off-by: Vinayak Menon <vinmenon@codeaurora.org>
Diffstat (limited to 'mm/compaction.c')
-rw-r--r--mm/compaction.c43
1 files changed, 34 insertions, 9 deletions
diff --git a/mm/compaction.c b/mm/compaction.c
index ad6ee6da933d..e0d4a58bcee4 100644
--- a/mm/compaction.c
+++ b/mm/compaction.c
@@ -627,21 +627,46 @@ static void acct_isolated(struct zone *zone, struct compact_control *cc)
mod_zone_page_state(zone, NR_ISOLATED_FILE, count[1]);
}
-/* Similar to reclaim, but different enough that they don't share logic */
-static bool too_many_isolated(struct zone *zone)
+static bool __too_many_isolated(struct zone *zone, int safe)
{
unsigned long active, inactive, isolated;
- inactive = zone_page_state(zone, NR_INACTIVE_FILE) +
- zone_page_state(zone, NR_INACTIVE_ANON);
- active = zone_page_state(zone, NR_ACTIVE_FILE) +
- zone_page_state(zone, NR_ACTIVE_ANON);
- isolated = zone_page_state(zone, NR_ISOLATED_FILE) +
- zone_page_state(zone, NR_ISOLATED_ANON);
+ if (safe) {
+ inactive = zone_page_state_snapshot(zone, NR_INACTIVE_FILE) +
+ zone_page_state_snapshot(zone, NR_INACTIVE_ANON);
+ active = zone_page_state_snapshot(zone, NR_ACTIVE_FILE) +
+ zone_page_state_snapshot(zone, NR_ACTIVE_ANON);
+ isolated = zone_page_state_snapshot(zone, NR_ISOLATED_FILE) +
+ zone_page_state_snapshot(zone, NR_ISOLATED_ANON);
+ } else {
+ inactive = zone_page_state(zone, NR_INACTIVE_FILE) +
+ zone_page_state(zone, NR_INACTIVE_ANON);
+ active = zone_page_state(zone, NR_ACTIVE_FILE) +
+ zone_page_state(zone, NR_ACTIVE_ANON);
+ isolated = zone_page_state(zone, NR_ISOLATED_FILE) +
+ zone_page_state(zone, NR_ISOLATED_ANON);
+ }
return isolated > (inactive + active) / 2;
}
+/* Similar to reclaim, but different enough that they don't share logic */
+static bool too_many_isolated(struct compact_control *cc)
+{
+ /*
+ * __too_many_isolated(safe=0) is fast but inaccurate, because it
+ * doesn't account for the vm_stat_diff[] counters. So if it looks
+ * like too_many_isolated() is about to return true, fall back to the
+ * slower, more accurate zone_page_state_snapshot().
+ */
+ if (unlikely(__too_many_isolated(cc->zone, 0))) {
+ if (cc->mode != MIGRATE_ASYNC)
+ return __too_many_isolated(cc->zone, 1);
+ }
+
+ return false;
+}
+
/**
* isolate_migratepages_block() - isolate all migrate-able pages within
* a single pageblock
@@ -678,7 +703,7 @@ isolate_migratepages_block(struct compact_control *cc, unsigned long low_pfn,
* list by either parallel reclaimers or compaction. If there are,
* delay for some time until fewer pages are isolated
*/
- while (unlikely(too_many_isolated(zone))) {
+ while (unlikely(too_many_isolated(cc))) {
/* async migration should just abort */
if (cc->mode == MIGRATE_ASYNC)
return 0;