summaryrefslogtreecommitdiff
path: root/mm/page_alloc.c
diff options
context:
space:
mode:
Diffstat (limited to 'mm/page_alloc.c')
-rw-r--r--mm/page_alloc.c128
1 files changed, 107 insertions, 21 deletions
diff --git a/mm/page_alloc.c b/mm/page_alloc.c
index 1874f2902237..d41925306d75 100644
--- a/mm/page_alloc.c
+++ b/mm/page_alloc.c
@@ -493,7 +493,8 @@ void prep_compound_page(struct page *page, unsigned int order)
#ifdef CONFIG_DEBUG_PAGEALLOC
unsigned int _debug_guardpage_minorder;
-bool _debug_pagealloc_enabled __read_mostly;
+bool _debug_pagealloc_enabled __read_mostly
+ = IS_ENABLED(CONFIG_DEBUG_PAGEALLOC_ENABLE_DEFAULT);
bool _debug_guardpage_enabled __read_mostly;
static int __init early_debug_pagealloc(char *buf)
@@ -504,6 +505,9 @@ static int __init early_debug_pagealloc(char *buf)
if (strcmp(buf, "on") == 0)
_debug_pagealloc_enabled = true;
+ if (strcmp(buf, "off") == 0)
+ _debug_pagealloc_enabled = false;
+
return 0;
}
early_param("debug_pagealloc", early_debug_pagealloc);
@@ -994,7 +998,6 @@ static bool free_pages_prepare(struct page *page, unsigned int order)
trace_mm_page_free(page, order);
kmemcheck_free_shadow(page, order);
- kasan_free_pages(page, order);
if (PageAnon(page))
page->mapping = NULL;
@@ -1017,6 +1020,7 @@ static bool free_pages_prepare(struct page *page, unsigned int order)
}
arch_free_page(page, order);
kernel_map_pages(page, 1 << order, 0);
+ kasan_free_pages(page, order);
return true;
}
@@ -1037,8 +1041,7 @@ static void __free_pages_ok(struct page *page, unsigned int order)
local_irq_restore(flags);
}
-static void __init __free_pages_boot_core(struct page *page,
- unsigned long pfn, unsigned int order)
+static void __init __free_pages_boot_core(struct page *page, unsigned long pfn, unsigned int order)
{
unsigned int nr_pages = 1 << order;
struct page *p = page;
@@ -1110,7 +1113,7 @@ static inline bool __meminit meminit_pfn_in_nid(unsigned long pfn, int node,
#endif
-void __init __free_pages_bootmem(struct page *page, unsigned long pfn,
+void __free_pages_bootmem(struct page *page, unsigned long pfn,
unsigned int order)
{
if (early_page_uninitialised(pfn))
@@ -1288,6 +1291,11 @@ void __init page_alloc_init_late(void)
#endif /* CONFIG_DEFERRED_STRUCT_PAGE_INIT */
#ifdef CONFIG_CMA
+bool is_cma_pageblock(struct page *page)
+{
+ return get_pageblock_migratetype(page) == MIGRATE_CMA;
+}
+
/* Free whole pageblock and set its migration type to MIGRATE_CMA. */
void __init init_cma_reserved_pageblock(struct page *page)
{
@@ -1409,9 +1417,9 @@ static int prep_new_page(struct page *page, unsigned int order, gfp_t gfp_flags,
set_page_private(page, 0);
set_page_refcounted(page);
+ kasan_alloc_pages(page, order);
arch_alloc_page(page, order);
kernel_map_pages(page, 1 << order, 1);
- kasan_alloc_pages(page, order);
if (gfp_flags & __GFP_ZERO)
for (i = 0; i < (1 << order); i++)
@@ -1484,6 +1492,11 @@ static int fallbacks[MIGRATE_TYPES][4] = {
#endif
};
+int *get_migratetype_fallbacks(int mtype)
+{
+ return fallbacks[mtype];
+}
+
#ifdef CONFIG_CMA
static struct page *__rmqueue_cma_fallback(struct zone *zone,
unsigned int order)
@@ -1828,17 +1841,30 @@ static struct page *__rmqueue(struct zone *zone, unsigned int order,
page = __rmqueue_smallest(zone, order, migratetype);
if (unlikely(!page)) {
- if (migratetype == MIGRATE_MOVABLE)
- page = __rmqueue_cma_fallback(zone, order);
-
- if (!page)
- page = __rmqueue_fallback(zone, order, migratetype);
+ page = __rmqueue_fallback(zone, order, migratetype);
}
trace_mm_page_alloc_zone_locked(page, order, migratetype);
return page;
}
+#ifdef CONFIG_CMA
+static struct page *__rmqueue_cma(struct zone *zone, unsigned int order)
+{
+ struct page *page = 0;
+ if (IS_ENABLED(CONFIG_CMA))
+ if (!zone->cma_alloc)
+ page = __rmqueue_cma_fallback(zone, order);
+ trace_mm_page_alloc_zone_locked(page, order, MIGRATE_CMA);
+ return page;
+}
+#else
+static inline struct page *__rmqueue_cma(struct zone *zone, unsigned int order)
+{
+ return NULL;
+}
+#endif
+
/*
* Obtain a specified number of elements from the buddy allocator, all under
* a single hold of the lock, for efficiency. Add them to the supplied list.
@@ -1852,7 +1878,17 @@ static int rmqueue_bulk(struct zone *zone, unsigned int order,
spin_lock(&zone->lock);
for (i = 0; i < count; ++i) {
- struct page *page = __rmqueue(zone, order, migratetype, 0);
+ struct page *page;
+
+ /*
+ * If migrate type CMA is being requested only try to
+ * satisfy the request with CMA pages to try and increase
+ * CMA utlization.
+ */
+ if (is_migrate_cma(migratetype))
+ page = __rmqueue_cma(zone, order);
+ else
+ page = __rmqueue(zone, order, migratetype, 0);
if (unlikely(page == NULL))
break;
@@ -1879,6 +1915,28 @@ static int rmqueue_bulk(struct zone *zone, unsigned int order,
return i;
}
+/*
+ * Return the pcp list that corresponds to the migrate type if that list isn't
+ * empty.
+ * If the list is empty return NULL.
+ */
+static struct list_head *get_populated_pcp_list(struct zone *zone,
+ unsigned int order, struct per_cpu_pages *pcp,
+ int migratetype, int cold)
+{
+ struct list_head *list = &pcp->lists[migratetype];
+
+ if (list_empty(list)) {
+ pcp->count += rmqueue_bulk(zone, order,
+ pcp->batch, list,
+ migratetype, cold);
+
+ if (list_empty(list))
+ list = NULL;
+ }
+ return list;
+}
+
#ifdef CONFIG_NUMA
/*
* Called from the vmstat counter updater to drain pagesets of this
@@ -2163,7 +2221,8 @@ int __isolate_free_page(struct page *page, unsigned int order)
if (!is_migrate_isolate(mt)) {
/* Obey watermarks as if the page was being allocated */
watermark = low_wmark_pages(zone) + (1 << order);
- if (!zone_watermark_ok(zone, 0, watermark, 0, 0))
+ if (!is_migrate_cma(mt) &&
+ !zone_watermark_ok(zone, 0, watermark, 0, 0))
return 0;
__mod_zone_freepage_state(zone, -(1UL << order), mt);
@@ -2227,21 +2286,32 @@ struct page *buffered_rmqueue(struct zone *preferred_zone,
gfp_t gfp_flags, int alloc_flags, int migratetype)
{
unsigned long flags;
- struct page *page;
+ struct page *page = NULL;
bool cold = ((gfp_flags & __GFP_COLD) != 0);
if (likely(order == 0)) {
struct per_cpu_pages *pcp;
- struct list_head *list;
+ struct list_head *list = NULL;
local_irq_save(flags);
pcp = &this_cpu_ptr(zone->pageset)->pcp;
- list = &pcp->lists[migratetype];
- if (list_empty(list)) {
- pcp->count += rmqueue_bulk(zone, 0,
- pcp->batch, list,
- migratetype, cold);
- if (unlikely(list_empty(list)))
+
+ /* First try to get CMA pages */
+ if (migratetype == MIGRATE_MOVABLE &&
+ gfp_flags & __GFP_CMA) {
+ list = get_populated_pcp_list(zone, 0, pcp,
+ get_cma_migrate_type(), cold);
+ }
+
+ if (list == NULL) {
+ /*
+ * Either CMA is not suitable or there are no free CMA
+ * pages.
+ */
+ list = get_populated_pcp_list(zone, 0, pcp,
+ migratetype, cold);
+ if (unlikely(list == NULL) ||
+ unlikely(list_empty(list)))
goto failed;
}
@@ -2274,8 +2344,13 @@ struct page *buffered_rmqueue(struct zone *preferred_zone,
if (page)
trace_mm_page_alloc_zone_locked(page, order, migratetype);
}
+ if (!page && migratetype == MIGRATE_MOVABLE &&
+ gfp_flags & __GFP_CMA)
+ page = __rmqueue_cma(zone, order);
+
if (!page)
page = __rmqueue(zone, order, migratetype, gfp_flags);
+
spin_unlock(&zone->lock);
if (!page)
goto failed;
@@ -2438,6 +2513,14 @@ static bool __zone_watermark_ok(struct zone *z, unsigned int order,
return true;
for (mt = 0; mt < MIGRATE_PCPTYPES; mt++) {
+#ifdef CONFIG_CMA
+ /*
+ * Note that this check is needed only
+ * when MIGRATE_CMA < MIGRATE_PCPTYPES.
+ */
+ if (mt == MIGRATE_CMA)
+ continue;
+#endif
if (!list_empty(&area->free_list[mt]))
return true;
}
@@ -6764,6 +6847,8 @@ int alloc_contig_range(unsigned long start, unsigned long end,
if (ret)
return ret;
+ cc.zone->cma_alloc = 1;
+
ret = __alloc_contig_migrate_range(&cc, start, end);
if (ret)
goto done;
@@ -6822,6 +6907,7 @@ int alloc_contig_range(unsigned long start, unsigned long end,
done:
undo_isolate_page_range(pfn_max_align_down(start),
pfn_max_align_up(end), migratetype);
+ cc.zone->cma_alloc = 0;
return ret;
}