summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
-rw-r--r--mm/Kconfig13
-rw-r--r--mm/vmscan.c30
2 files changed, 41 insertions, 2 deletions
diff --git a/mm/Kconfig b/mm/Kconfig
index 3ca6a255de03..7b7eae150c4e 100644
--- a/mm/Kconfig
+++ b/mm/Kconfig
@@ -628,6 +628,19 @@ config BALANCE_ANON_FILE_RECLAIM
Swapping anonymous pages out to memory can be efficient enough to justify
treating anonymous and file backed pages equally.
+config KSWAPD_CPU_AFFINITY_MASK
+ string "kswapd cpu affinity mask"
+ depends on SMP
+ help
+ Set the cpu affinity for the kswapd task.
+ There can be power benefits on certain targets when limiting kswapd
+ to run only on certain cores.
+ The cpu affinity bitmask is represented by a hex string where commas
+ group hex digits into chunks. Each chunk defines exactly 32 bits of
+ the resultant bitmask.
+ For example to limit kswapd to the first 4 cores use the following:
+ CONFIG_KSWAPD_CPU_AFFINITY_MASK="f"
+
# For architectures that support deferred memory initialisation
config ARCH_SUPPORTS_DEFERRED_STRUCT_PAGE_INIT
bool
diff --git a/mm/vmscan.c b/mm/vmscan.c
index 31713a87fa7b..194c534da9ba 100644
--- a/mm/vmscan.c
+++ b/mm/vmscan.c
@@ -147,6 +147,12 @@ int vm_swappiness = 60;
*/
unsigned long vm_total_pages;
+#ifdef CONFIG_KSWAPD_CPU_AFFINITY_MASK
+char *kswapd_cpu_mask = CONFIG_KSWAPD_CPU_AFFINITY_MASK;
+#else
+char *kswapd_cpu_mask = NULL;
+#endif
+
static LIST_HEAD(shrinker_list);
static DECLARE_RWSEM(shrinker_rwsem);
@@ -3485,7 +3491,7 @@ static int kswapd(void *p)
lockdep_set_current_reclaim_state(GFP_KERNEL);
- if (!cpumask_empty(cpumask))
+ if (kswapd_cpu_mask == NULL && !cpumask_empty(cpumask))
set_cpus_allowed_ptr(tsk, cpumask);
current->reclaim_state = &reclaim_state;
@@ -3655,6 +3661,22 @@ static int cpu_callback(struct notifier_block *nfb, unsigned long action,
return NOTIFY_OK;
}
+static int set_kswapd_cpu_mask(pg_data_t *pgdat)
+{
+ int ret = 0;
+ cpumask_t tmask;
+
+ if (!kswapd_cpu_mask)
+ return 0;
+
+ cpumask_clear(&tmask);
+ ret = cpumask_parse(kswapd_cpu_mask, &tmask);
+ if (ret)
+ return ret;
+
+ return set_cpus_allowed_ptr(pgdat->kswapd, &tmask);
+}
+
/*
* This kswapd start function will be called by init and node-hot-add.
* On node-hot-add, kswapd will moved to proper cpus if cpus are hot-added.
@@ -3674,6 +3696,9 @@ int kswapd_run(int nid)
pr_err("Failed to start kswapd on node %d\n", nid);
ret = PTR_ERR(pgdat->kswapd);
pgdat->kswapd = NULL;
+ } else if (kswapd_cpu_mask) {
+ if (set_kswapd_cpu_mask(pgdat))
+ pr_warn("error setting kswapd cpu affinity mask\n");
}
return ret;
}
@@ -3699,7 +3724,8 @@ static int __init kswapd_init(void)
swap_setup();
for_each_node_state(nid, N_MEMORY)
kswapd_run(nid);
- hotcpu_notifier(cpu_callback, 0);
+ if (kswapd_cpu_mask == NULL)
+ hotcpu_notifier(cpu_callback, 0);
return 0;
}