summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
-rw-r--r--drivers/staging/android/lowmemorykiller.c89
-rw-r--r--include/linux/mmzone.h8
-rw-r--r--mm/page_alloc.c5
3 files changed, 87 insertions, 15 deletions
diff --git a/drivers/staging/android/lowmemorykiller.c b/drivers/staging/android/lowmemorykiller.c
index b0c2b3eb45be..14bdf880e551 100644
--- a/drivers/staging/android/lowmemorykiller.c
+++ b/drivers/staging/android/lowmemorykiller.c
@@ -107,16 +107,47 @@ static int test_task_flag(struct task_struct *p, int flag)
static DEFINE_MUTEX(scan_mutex);
+int can_use_cma_pages(gfp_t gfp_mask)
+{
+ int can_use = 0;
+ int mtype = gfpflags_to_migratetype(gfp_mask);
+ int i = 0;
+ int *mtype_fallbacks = get_migratetype_fallbacks(mtype);
+
+ if (is_migrate_cma(mtype)) {
+ can_use = 1;
+ } else {
+ for (i = 0;; i++) {
+ int fallbacktype = mtype_fallbacks[i];
+
+ if (is_migrate_cma(fallbacktype)) {
+ can_use = 1;
+ break;
+ }
+
+ if (fallbacktype == MIGRATE_TYPES)
+ break;
+ }
+ }
+ return can_use;
+}
+
void tune_lmk_zone_param(struct zonelist *zonelist, int classzone_idx,
- int *other_free, int *other_file)
+ int *other_free, int *other_file,
+ int use_cma_pages)
{
struct zone *zone;
struct zoneref *zoneref;
int zone_idx;
for_each_zone_zonelist(zone, zoneref, zonelist, MAX_NR_ZONES) {
- if ((zone_idx = zonelist_zone_idx(zoneref)) == ZONE_MOVABLE)
+ zone_idx = zonelist_zone_idx(zoneref);
+ if (zone_idx == ZONE_MOVABLE) {
+ if (!use_cma_pages)
+ *other_free -=
+ zone_page_state(zone, NR_FREE_CMA_PAGES);
continue;
+ }
if (zone_idx > classzone_idx) {
if (other_free != NULL)
@@ -127,12 +158,22 @@ void tune_lmk_zone_param(struct zonelist *zonelist, int classzone_idx,
NR_FILE_PAGES)
- zone_page_state(zone, NR_SHMEM);
} else if (zone_idx < classzone_idx) {
- if (zone_watermark_ok(zone, 0, 0, classzone_idx, 0))
+ if (zone_watermark_ok(zone, 0, 0, classzone_idx, 0)) {
+ if (!use_cma_pages) {
+ *other_free -= min(
+ zone->lowmem_reserve[classzone_idx] +
+ zone_page_state(
+ zone, NR_FREE_CMA_PAGES),
+ zone_page_state(
+ zone, NR_FREE_PAGES));
+ } else {
+ *other_free -=
+ zone->lowmem_reserve[classzone_idx];
+ }
+ } else {
*other_free -=
- zone->lowmem_reserve[classzone_idx];
- else
- *other_free -=
- zone_page_state(zone, NR_FREE_PAGES);
+ zone_page_state(zone, NR_FREE_PAGES);
+ }
}
}
}
@@ -144,12 +185,14 @@ void tune_lmk_param(int *other_free, int *other_file, struct shrink_control *sc)
struct zonelist *zonelist;
enum zone_type high_zoneidx, classzone_idx;
unsigned long balance_gap;
+ int use_cma_pages;
gfp_mask = sc->gfp_mask;
zonelist = node_zonelist(0, gfp_mask);
high_zoneidx = gfp_zone(gfp_mask);
first_zones_zonelist(zonelist, high_zoneidx, NULL, &preferred_zone);
classzone_idx = zone_idx(preferred_zone);
+ use_cma_pages = can_use_cma_pages(gfp_mask);
balance_gap = min(low_wmark_pages(preferred_zone),
(preferred_zone->present_pages +
@@ -161,22 +204,38 @@ void tune_lmk_param(int *other_free, int *other_file, struct shrink_control *sc)
balance_gap, 0, 0))) {
if (lmk_fast_run)
tune_lmk_zone_param(zonelist, classzone_idx, other_free,
- other_file);
+ other_file, use_cma_pages);
else
tune_lmk_zone_param(zonelist, classzone_idx, other_free,
- NULL);
-
- if (zone_watermark_ok(preferred_zone, 0, 0, _ZONE, 0))
- *other_free -=
- preferred_zone->lowmem_reserve[_ZONE];
- else
+ NULL, use_cma_pages);
+
+ if (zone_watermark_ok(preferred_zone, 0, 0, _ZONE, 0)) {
+ if (!use_cma_pages) {
+ *other_free -= min(
+ preferred_zone->lowmem_reserve[_ZONE]
+ + zone_page_state(
+ preferred_zone, NR_FREE_CMA_PAGES),
+ zone_page_state(
+ preferred_zone, NR_FREE_PAGES));
+ } else {
+ *other_free -=
+ preferred_zone->lowmem_reserve[_ZONE];
+ }
+ } else {
*other_free -= zone_page_state(preferred_zone,
NR_FREE_PAGES);
+ }
+
lowmem_print(4, "lowmem_shrink of kswapd tunning for highmem "
"ofree %d, %d\n", *other_free, *other_file);
} else {
tune_lmk_zone_param(zonelist, classzone_idx, other_free,
- other_file);
+ other_file, use_cma_pages);
+
+ if (!use_cma_pages) {
+ *other_free -=
+ zone_page_state(preferred_zone, NR_FREE_CMA_PAGES);
+ }
lowmem_print(4, "lowmem_shrink tunning for others ofree %d, "
"%d\n", *other_free, *other_file);
diff --git a/include/linux/mmzone.h b/include/linux/mmzone.h
index 423d214f708b..8a5894308eb2 100644
--- a/include/linux/mmzone.h
+++ b/include/linux/mmzone.h
@@ -63,6 +63,14 @@ enum {
MIGRATE_TYPES
};
+/*
+ * Returns a list which contains the migrate types on to which
+ * an allocation falls back when the free list for the migrate
+ * type mtype is depleted.
+ * The end of the list is delimited by the type MIGRATE_TYPES.
+ */
+extern int *get_migratetype_fallbacks(int mtype);
+
#ifdef CONFIG_CMA
bool is_cma_pageblock(struct page *page);
# define is_migrate_cma(migratetype) unlikely((migratetype) == MIGRATE_CMA)
diff --git a/mm/page_alloc.c b/mm/page_alloc.c
index d63689fda9b4..1eafd75f402e 100644
--- a/mm/page_alloc.c
+++ b/mm/page_alloc.c
@@ -1466,6 +1466,11 @@ static int fallbacks[MIGRATE_TYPES][4] = {
#endif
};
+int *get_migratetype_fallbacks(int mtype)
+{
+ return fallbacks[mtype];
+}
+
#ifdef CONFIG_CMA
static struct page *__rmqueue_cma_fallback(struct zone *zone,
unsigned int order)