summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
-rw-r--r--include/linux/mmzone.h6
-rw-r--r--mm/page_alloc.c94
-rw-r--r--mm/vmstat.c2
3 files changed, 66 insertions, 36 deletions
diff --git a/include/linux/mmzone.h b/include/linux/mmzone.h
index dfb8a6159997..ad4c3f186f61 100644
--- a/include/linux/mmzone.h
+++ b/include/linux/mmzone.h
@@ -39,8 +39,6 @@ enum {
MIGRATE_UNMOVABLE,
MIGRATE_MOVABLE,
MIGRATE_RECLAIMABLE,
- MIGRATE_PCPTYPES, /* the number of types on the pcp lists */
- MIGRATE_HIGHATOMIC = MIGRATE_PCPTYPES,
#ifdef CONFIG_CMA
/*
* MIGRATE_CMA migration type is designed to mimic the way
@@ -57,6 +55,8 @@ enum {
*/
MIGRATE_CMA,
#endif
+ MIGRATE_PCPTYPES, /* the number of types on the pcp lists */
+ MIGRATE_HIGHATOMIC = MIGRATE_PCPTYPES,
#ifdef CONFIG_MEMORY_ISOLATION
MIGRATE_ISOLATE, /* can't allocate from here */
#endif
@@ -74,9 +74,11 @@ extern int *get_migratetype_fallbacks(int mtype);
#ifdef CONFIG_CMA
bool is_cma_pageblock(struct page *page);
# define is_migrate_cma(migratetype) unlikely((migratetype) == MIGRATE_CMA)
+# define get_cma_migrate_type() MIGRATE_CMA
#else
# define is_cma_pageblock(page) false
# define is_migrate_cma(migratetype) false
+# define get_cma_migrate_type() MIGRATE_MOVABLE
#endif
#define for_each_migratetype_order(order, type) \
diff --git a/mm/page_alloc.c b/mm/page_alloc.c
index 2695ca00653e..f54a84fb5e6e 100644
--- a/mm/page_alloc.c
+++ b/mm/page_alloc.c
@@ -1822,22 +1822,13 @@ static struct page *__rmqueue(struct zone *zone, unsigned int order,
return page;
}
-static struct page *__rmqueue_cma(struct zone *zone, unsigned int order,
- int migratetype, gfp_t gfp_flags)
+static struct page *__rmqueue_cma(struct zone *zone, unsigned int order)
{
struct page *page = 0;
-#ifdef CONFIG_CMA
- if (migratetype == MIGRATE_MOVABLE && !zone->cma_alloc)
- page = __rmqueue_cma_fallback(zone, order);
- else
-#endif
- page = __rmqueue_smallest(zone, order, migratetype);
-
- if (unlikely(!page)) {
- page = __rmqueue_fallback(zone, order, migratetype);
- }
-
- trace_mm_page_alloc_zone_locked(page, order, migratetype);
+ if (IS_ENABLED(CONFIG_CMA))
+ if (!zone->cma_alloc)
+ page = __rmqueue_cma_fallback(zone, order);
+ trace_mm_page_alloc_zone_locked(page, order, MIGRATE_CMA);
return page;
}
@@ -1848,15 +1839,21 @@ static struct page *__rmqueue_cma(struct zone *zone, unsigned int order,
*/
static int rmqueue_bulk(struct zone *zone, unsigned int order,
unsigned long count, struct list_head *list,
- int migratetype, bool cold, int cma)
+ int migratetype, bool cold)
{
int i;
spin_lock(&zone->lock);
for (i = 0; i < count; ++i) {
struct page *page;
- if (cma)
- page = __rmqueue_cma(zone, order, migratetype, 0);
+
+ /*
+ * If migrate type CMA is being requested only try to
+ * satisfy the request with CMA pages to try and increase
+ * CMA utlization.
+ */
+ if (is_migrate_cma(migratetype))
+ page = __rmqueue_cma(zone, order);
else
page = __rmqueue(zone, order, migratetype, 0);
if (unlikely(page == NULL))
@@ -1885,6 +1882,28 @@ static int rmqueue_bulk(struct zone *zone, unsigned int order,
return i;
}
+/*
+ * Return the pcp list that corresponds to the migrate type if that list isn't
+ * empty.
+ * If the list is empty return NULL.
+ */
+static struct list_head *get_populated_pcp_list(struct zone *zone,
+ unsigned int order, struct per_cpu_pages *pcp,
+ int migratetype, int cold)
+{
+ struct list_head *list = &pcp->lists[migratetype];
+
+ if (list_empty(list)) {
+ pcp->count += rmqueue_bulk(zone, order,
+ pcp->batch, list,
+ migratetype, cold);
+
+ if (list_empty(list))
+ list = NULL;
+ }
+ return list;
+}
+
#ifdef CONFIG_NUMA
/*
* Called from the vmstat counter updater to drain pagesets of this
@@ -2085,8 +2104,7 @@ void free_hot_cold_page(struct page *page, bool cold)
* excessively into the page allocator
*/
if (migratetype >= MIGRATE_PCPTYPES) {
- if (unlikely(is_migrate_isolate(migratetype)) ||
- is_migrate_cma(migratetype)) {
+ if (unlikely(is_migrate_isolate(migratetype))) {
free_one_page(zone, page, pfn, 0, migratetype);
goto out;
}
@@ -2235,22 +2253,32 @@ struct page *buffered_rmqueue(struct zone *preferred_zone,
gfp_t gfp_flags, int alloc_flags, int migratetype)
{
unsigned long flags;
- struct page *page;
+ struct page *page = NULL;
bool cold = ((gfp_flags & __GFP_COLD) != 0);
if (likely(order == 0)) {
struct per_cpu_pages *pcp;
- struct list_head *list;
+ struct list_head *list = NULL;
local_irq_save(flags);
pcp = &this_cpu_ptr(zone->pageset)->pcp;
- list = &pcp->lists[migratetype];
- if (list_empty(list)) {
- pcp->count += rmqueue_bulk(zone, 0,
- pcp->batch, list,
- migratetype, cold,
- gfp_flags & __GFP_CMA);
- if (unlikely(list_empty(list)))
+
+ /* First try to get CMA pages */
+ if (migratetype == MIGRATE_MOVABLE &&
+ gfp_flags & __GFP_CMA) {
+ list = get_populated_pcp_list(zone, 0, pcp,
+ get_cma_migrate_type(), cold);
+ }
+
+ if (list == NULL) {
+ /*
+ * Either CMA is not suitable or there are no free CMA
+ * pages.
+ */
+ list = get_populated_pcp_list(zone, 0, pcp,
+ migratetype, cold);
+ if (unlikely(list == NULL) ||
+ unlikely(list_empty(list)))
goto failed;
}
@@ -2283,13 +2311,13 @@ struct page *buffered_rmqueue(struct zone *preferred_zone,
if (page)
trace_mm_page_alloc_zone_locked(page, order, migratetype);
}
- if (!page) {
- if (gfp_flags & __GFP_CMA)
- page = __rmqueue_cma(zone, order, migratetype, gfp_flags);
- else
+ if (!page && migratetype == MIGRATE_MOVABLE &&
+ gfp_flags & __GFP_CMA)
+ page = __rmqueue_cma(zone, order);
+
+ if (!page)
page = __rmqueue(zone, order, migratetype, gfp_flags);
- }
spin_unlock(&zone->lock);
if (!page)
goto failed;
diff --git a/mm/vmstat.c b/mm/vmstat.c
index c0e67319d3e8..ca75eeecbad1 100644
--- a/mm/vmstat.c
+++ b/mm/vmstat.c
@@ -927,10 +927,10 @@ static char * const migratetype_names[MIGRATE_TYPES] = {
"Unmovable",
"Movable",
"Reclaimable",
- "HighAtomic",
#ifdef CONFIG_CMA
"CMA",
#endif
+ "HighAtomic",
#ifdef CONFIG_MEMORY_ISOLATION
"Isolate",
#endif