summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
-rw-r--r--include/linux/gfp.h11
-rw-r--r--include/linux/highmem.h15
-rw-r--r--include/linux/mmzone.h3
-rw-r--r--mm/page_alloc.c44
4 files changed, 63 insertions, 10 deletions
diff --git a/include/linux/gfp.h b/include/linux/gfp.h
index 8942af0813e3..994f08fe426f 100644
--- a/include/linux/gfp.h
+++ b/include/linux/gfp.h
@@ -36,6 +36,7 @@ struct vm_area_struct;
#define ___GFP_OTHER_NODE 0x800000u
#define ___GFP_WRITE 0x1000000u
#define ___GFP_KSWAPD_RECLAIM 0x2000000u
+#define ___GFP_CMA 0x4000000u
/* If the above are modified, __GFP_BITS_SHIFT may need updating */
/*
@@ -50,8 +51,9 @@ struct vm_area_struct;
#define __GFP_DMA32 ((__force gfp_t)___GFP_DMA32)
#define __GFP_MOVABLE ((__force gfp_t)___GFP_MOVABLE) /* Page is movable */
#define __GFP_MOVABLE ((__force gfp_t)___GFP_MOVABLE) /* ZONE_MOVABLE allowed */
-#define GFP_ZONEMASK (__GFP_DMA|__GFP_HIGHMEM|__GFP_DMA32|__GFP_MOVABLE)
-
+#define __GFP_CMA ((__force gfp_t)___GFP_CMA)
+#define GFP_ZONEMASK (__GFP_DMA|__GFP_HIGHMEM|__GFP_DMA32|__GFP_MOVABLE| \
+ __GFP_CMA)
/*
* Page mobility and placement hints
*
@@ -264,7 +266,12 @@ static inline int gfpflags_to_migratetype(const gfp_t gfp_flags)
return MIGRATE_UNMOVABLE;
/* Group based on mobility */
+#ifndef CONFIG_CMA
return (gfp_flags & GFP_MOVABLE_MASK) >> GFP_MOVABLE_SHIFT;
+#else
+ return ((gfp_flags & GFP_MOVABLE_MASK) >> GFP_MOVABLE_SHIFT) |
+ ((gfp_flags & __GFP_CMA) != 0);
+#endif
}
#undef GFP_MOVABLE_MASK
#undef GFP_MOVABLE_SHIFT
diff --git a/include/linux/highmem.h b/include/linux/highmem.h
index 4c70716759a6..61aff324bd5e 100644
--- a/include/linux/highmem.h
+++ b/include/linux/highmem.h
@@ -187,9 +187,24 @@ static inline struct page *
alloc_zeroed_user_highpage_movable(struct vm_area_struct *vma,
unsigned long vaddr)
{
+#ifndef CONFIG_CMA
return __alloc_zeroed_user_highpage(__GFP_MOVABLE, vma, vaddr);
+#else
+ return __alloc_zeroed_user_highpage(__GFP_MOVABLE|__GFP_CMA, vma,
+ vaddr);
+#endif
}
+#ifdef CONFIG_CMA
+static inline struct page *
+alloc_zeroed_user_highpage_movable_cma(struct vm_area_struct *vma,
+ unsigned long vaddr)
+{
+ return __alloc_zeroed_user_highpage(__GFP_MOVABLE|__GFP_CMA, vma,
+ vaddr);
+}
+#endif
+
static inline void clear_highpage(struct page *page)
{
void *kaddr = kmap_atomic(page);
diff --git a/include/linux/mmzone.h b/include/linux/mmzone.h
index 04030f756e7c..dfb8a6159997 100644
--- a/include/linux/mmzone.h
+++ b/include/linux/mmzone.h
@@ -376,6 +376,9 @@ struct zone {
* considered dirtyable memory.
*/
unsigned long dirty_balance_reserve;
+#ifdef CONFIG_CMA
+ bool cma_alloc;
+#endif
#ifndef CONFIG_SPARSEMEM
/*
diff --git a/mm/page_alloc.c b/mm/page_alloc.c
index 1eafd75f402e..2695ca00653e 100644
--- a/mm/page_alloc.c
+++ b/mm/page_alloc.c
@@ -1815,11 +1815,26 @@ static struct page *__rmqueue(struct zone *zone, unsigned int order,
page = __rmqueue_smallest(zone, order, migratetype);
if (unlikely(!page)) {
- if (migratetype == MIGRATE_MOVABLE)
- page = __rmqueue_cma_fallback(zone, order);
+ page = __rmqueue_fallback(zone, order, migratetype);
+ }
- if (!page)
- page = __rmqueue_fallback(zone, order, migratetype);
+ trace_mm_page_alloc_zone_locked(page, order, migratetype);
+ return page;
+}
+
+static struct page *__rmqueue_cma(struct zone *zone, unsigned int order,
+ int migratetype, gfp_t gfp_flags)
+{
+ struct page *page = 0;
+#ifdef CONFIG_CMA
+ if (migratetype == MIGRATE_MOVABLE && !zone->cma_alloc)
+ page = __rmqueue_cma_fallback(zone, order);
+ else
+#endif
+ page = __rmqueue_smallest(zone, order, migratetype);
+
+ if (unlikely(!page)) {
+ page = __rmqueue_fallback(zone, order, migratetype);
}
trace_mm_page_alloc_zone_locked(page, order, migratetype);
@@ -1833,13 +1848,17 @@ static struct page *__rmqueue(struct zone *zone, unsigned int order,
*/
static int rmqueue_bulk(struct zone *zone, unsigned int order,
unsigned long count, struct list_head *list,
- int migratetype, bool cold)
+ int migratetype, bool cold, int cma)
{
int i;
spin_lock(&zone->lock);
for (i = 0; i < count; ++i) {
- struct page *page = __rmqueue(zone, order, migratetype, 0);
+ struct page *page;
+ if (cma)
+ page = __rmqueue_cma(zone, order, migratetype, 0);
+ else
+ page = __rmqueue(zone, order, migratetype, 0);
if (unlikely(page == NULL))
break;
@@ -2229,7 +2248,8 @@ struct page *buffered_rmqueue(struct zone *preferred_zone,
if (list_empty(list)) {
pcp->count += rmqueue_bulk(zone, 0,
pcp->batch, list,
- migratetype, cold);
+ migratetype, cold,
+ gfp_flags & __GFP_CMA);
if (unlikely(list_empty(list)))
goto failed;
}
@@ -2263,8 +2283,13 @@ struct page *buffered_rmqueue(struct zone *preferred_zone,
if (page)
trace_mm_page_alloc_zone_locked(page, order, migratetype);
}
- if (!page)
+ if (!page) {
+ if (gfp_flags & __GFP_CMA)
+ page = __rmqueue_cma(zone, order, migratetype, gfp_flags);
+ else
page = __rmqueue(zone, order, migratetype, gfp_flags);
+
+ }
spin_unlock(&zone->lock);
if (!page)
goto failed;
@@ -6753,6 +6778,8 @@ int alloc_contig_range(unsigned long start, unsigned long end,
if (ret)
return ret;
+ cc.zone->cma_alloc = 1;
+
ret = __alloc_contig_migrate_range(&cc, start, end);
if (ret)
goto done;
@@ -6811,6 +6838,7 @@ int alloc_contig_range(unsigned long start, unsigned long end,
done:
undo_isolate_page_range(pfn_max_align_down(start),
pfn_max_align_up(end), migratetype);
+ cc.zone->cma_alloc = 0;
return ret;
}