summaryrefslogtreecommitdiff
path: root/include/linux
diff options
context:
space:
mode:
Diffstat (limited to 'include/linux')
-rw-r--r--include/linux/gfp.h13
-rw-r--r--include/linux/highmem.h15
-rw-r--r--include/linux/mmzone.h18
-rw-r--r--include/linux/sched.h2
4 files changed, 42 insertions, 6 deletions
diff --git a/include/linux/gfp.h b/include/linux/gfp.h
index 8942af0813e3..9796b4426710 100644
--- a/include/linux/gfp.h
+++ b/include/linux/gfp.h
@@ -36,6 +36,7 @@ struct vm_area_struct;
#define ___GFP_OTHER_NODE 0x800000u
#define ___GFP_WRITE 0x1000000u
#define ___GFP_KSWAPD_RECLAIM 0x2000000u
+#define ___GFP_CMA 0x4000000u
/* If the above are modified, __GFP_BITS_SHIFT may need updating */
/*
@@ -50,8 +51,9 @@ struct vm_area_struct;
#define __GFP_DMA32 ((__force gfp_t)___GFP_DMA32)
#define __GFP_MOVABLE ((__force gfp_t)___GFP_MOVABLE) /* Page is movable */
#define __GFP_MOVABLE ((__force gfp_t)___GFP_MOVABLE) /* ZONE_MOVABLE allowed */
-#define GFP_ZONEMASK (__GFP_DMA|__GFP_HIGHMEM|__GFP_DMA32|__GFP_MOVABLE)
-
+#define __GFP_CMA ((__force gfp_t)___GFP_CMA)
+#define GFP_ZONEMASK (__GFP_DMA|__GFP_HIGHMEM|__GFP_DMA32|__GFP_MOVABLE| \
+ __GFP_CMA)
/*
* Page mobility and placement hints
*
@@ -183,7 +185,7 @@ struct vm_area_struct;
#define __GFP_OTHER_NODE ((__force gfp_t)___GFP_OTHER_NODE)
/* Room for N __GFP_FOO bits */
-#define __GFP_BITS_SHIFT 26
+#define __GFP_BITS_SHIFT 27
#define __GFP_BITS_MASK ((__force gfp_t)((1 << __GFP_BITS_SHIFT) - 1))
/*
@@ -264,7 +266,12 @@ static inline int gfpflags_to_migratetype(const gfp_t gfp_flags)
return MIGRATE_UNMOVABLE;
/* Group based on mobility */
+#ifndef CONFIG_CMA
return (gfp_flags & GFP_MOVABLE_MASK) >> GFP_MOVABLE_SHIFT;
+#else
+ return ((gfp_flags & GFP_MOVABLE_MASK) >> GFP_MOVABLE_SHIFT) |
+ ((gfp_flags & __GFP_CMA) != 0);
+#endif
}
#undef GFP_MOVABLE_MASK
#undef GFP_MOVABLE_SHIFT
diff --git a/include/linux/highmem.h b/include/linux/highmem.h
index 4c70716759a6..61aff324bd5e 100644
--- a/include/linux/highmem.h
+++ b/include/linux/highmem.h
@@ -187,9 +187,24 @@ static inline struct page *
alloc_zeroed_user_highpage_movable(struct vm_area_struct *vma,
unsigned long vaddr)
{
+#ifndef CONFIG_CMA
return __alloc_zeroed_user_highpage(__GFP_MOVABLE, vma, vaddr);
+#else
+ return __alloc_zeroed_user_highpage(__GFP_MOVABLE|__GFP_CMA, vma,
+ vaddr);
+#endif
}
+#ifdef CONFIG_CMA
+static inline struct page *
+alloc_zeroed_user_highpage_movable_cma(struct vm_area_struct *vma,
+ unsigned long vaddr)
+{
+ return __alloc_zeroed_user_highpage(__GFP_MOVABLE|__GFP_CMA, vma,
+ vaddr);
+}
+#endif
+
static inline void clear_highpage(struct page *page)
{
void *kaddr = kmap_atomic(page);
diff --git a/include/linux/mmzone.h b/include/linux/mmzone.h
index 423d214f708b..ad4c3f186f61 100644
--- a/include/linux/mmzone.h
+++ b/include/linux/mmzone.h
@@ -39,8 +39,6 @@ enum {
MIGRATE_UNMOVABLE,
MIGRATE_MOVABLE,
MIGRATE_RECLAIMABLE,
- MIGRATE_PCPTYPES, /* the number of types on the pcp lists */
- MIGRATE_HIGHATOMIC = MIGRATE_PCPTYPES,
#ifdef CONFIG_CMA
/*
* MIGRATE_CMA migration type is designed to mimic the way
@@ -57,18 +55,30 @@ enum {
*/
MIGRATE_CMA,
#endif
+ MIGRATE_PCPTYPES, /* the number of types on the pcp lists */
+ MIGRATE_HIGHATOMIC = MIGRATE_PCPTYPES,
#ifdef CONFIG_MEMORY_ISOLATION
MIGRATE_ISOLATE, /* can't allocate from here */
#endif
MIGRATE_TYPES
};
+/*
+ * Returns a list which contains the migrate types on to which
+ * an allocation falls back when the free list for the migrate
+ * type mtype is depleted.
+ * The end of the list is delimited by the type MIGRATE_TYPES.
+ */
+extern int *get_migratetype_fallbacks(int mtype);
+
#ifdef CONFIG_CMA
bool is_cma_pageblock(struct page *page);
# define is_migrate_cma(migratetype) unlikely((migratetype) == MIGRATE_CMA)
+# define get_cma_migrate_type() MIGRATE_CMA
#else
# define is_cma_pageblock(page) false
# define is_migrate_cma(migratetype) false
+# define get_cma_migrate_type() MIGRATE_MOVABLE
#endif
#define for_each_migratetype_order(order, type) \
@@ -159,6 +169,7 @@ enum zone_stat_item {
WORKINGSET_NODERECLAIM,
NR_ANON_TRANSPARENT_HUGEPAGES,
NR_FREE_CMA_PAGES,
+ NR_SWAPCACHE,
NR_VM_ZONE_STAT_ITEMS };
/*
@@ -367,6 +378,9 @@ struct zone {
* considered dirtyable memory.
*/
unsigned long dirty_balance_reserve;
+#ifdef CONFIG_CMA
+ bool cma_alloc;
+#endif
#ifndef CONFIG_SPARSEMEM
/*
diff --git a/include/linux/sched.h b/include/linux/sched.h
index e963ff30a7f6..7ece18efd02b 100644
--- a/include/linux/sched.h
+++ b/include/linux/sched.h
@@ -2740,7 +2740,7 @@ static inline void mmdrop(struct mm_struct * mm)
}
/* mmput gets rid of the mappings and all user-space */
-extern void mmput(struct mm_struct *);
+extern int mmput(struct mm_struct *);
/* Grab a reference to a task's mm, if it is not already going away */
extern struct mm_struct *get_task_mm(struct task_struct *task);
/*