From 21f86651a66abefcfce33f4611c2de7863a8642b Mon Sep 17 00:00:00 2001 From: Liam Mark Date: Thu, 7 Feb 2013 14:31:36 -0800 Subject: android/lowmemorykiller: Ignore tasks with freed mm A killed task can stay in the task list long after its memory has been returned to the system, therefore ignore any tasks whose mm struct has been freed. Change-Id: I76394b203b4ab2312437c839976f0ecb7b6dde4e CRs-fixed: 450383 Signed-off-by: Liam Mark --- include/linux/sched.h | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'include/linux') diff --git a/include/linux/sched.h b/include/linux/sched.h index e963ff30a7f6..7ece18efd02b 100644 --- a/include/linux/sched.h +++ b/include/linux/sched.h @@ -2740,7 +2740,7 @@ static inline void mmdrop(struct mm_struct * mm) } /* mmput gets rid of the mappings and all user-space */ -extern void mmput(struct mm_struct *); +extern int mmput(struct mm_struct *); /* Grab a reference to a task's mm, if it is not already going away */ extern struct mm_struct *get_task_mm(struct task_struct *task); /* -- cgit v1.2.3 From 92c1fefed56e839edbcce0d3cc734a53ed477394 Mon Sep 17 00:00:00 2001 From: Liam Mark Date: Wed, 27 Mar 2013 12:34:51 -0700 Subject: android/lowmemorykiller: Selectively count free CMA pages In certain memory configurations there can be a large number of CMA pages which are not suitable to satisfy certain memory requests. This large number of unsuitable pages can cause the lowmemorykiller to not kill any tasks because the lowmemorykiller counts all free pages. In order to ensure the lowmemorykiller properly evaluates the free memory only count the free CMA pages if they are suitable for satisfying the memory request. Change-Id: I7f06d53e2d8cfe7439e5561fe6e5209ce73b1c90 CRs-fixed: 437016 Signed-off-by: Liam Mark --- include/linux/mmzone.h | 8 ++++++++ 1 file changed, 8 insertions(+) (limited to 'include/linux') diff --git a/include/linux/mmzone.h b/include/linux/mmzone.h index 423d214f708b..8a5894308eb2 100644 --- a/include/linux/mmzone.h +++ b/include/linux/mmzone.h @@ -63,6 +63,14 @@ enum { MIGRATE_TYPES }; +/* + * Returns a list which contains the migrate types on to which + * an allocation falls back when the free list for the migrate + * type mtype is depleted. + * The end of the list is delimited by the type MIGRATE_TYPES. + */ +extern int *get_migratetype_fallbacks(int mtype); + #ifdef CONFIG_CMA bool is_cma_pageblock(struct page *page); # define is_migrate_cma(migratetype) unlikely((migratetype) == MIGRATE_CMA) -- cgit v1.2.3 From 1426d1f8d93d9a4eea97c91eb5ebd1357afe7b16 Mon Sep 17 00:00:00 2001 From: Liam Mark Date: Fri, 27 Feb 2015 12:59:00 -0800 Subject: lowmemorykiller: Don't count swap cache pages twice The lowmem_shrink function discounts all the swap cache pages from the file cache count. The zone aware code also discounts all file cache pages from a certain zone. This results in some swap cache pages being discounted twice, which can result in the low memory killer being unnecessarily aggressive. Fix the low memory killer to only discount the swap cache pages once. Change-Id: I650bbfbf0fbbabd01d82bdb3502b57ff59c3e14f Signed-off-by: Liam Mark Signed-off-by: Vinayak Menon --- include/linux/mmzone.h | 1 + 1 file changed, 1 insertion(+) (limited to 'include/linux') diff --git a/include/linux/mmzone.h b/include/linux/mmzone.h index 8a5894308eb2..04030f756e7c 100644 --- a/include/linux/mmzone.h +++ b/include/linux/mmzone.h @@ -167,6 +167,7 @@ enum zone_stat_item { WORKINGSET_NODERECLAIM, NR_ANON_TRANSPARENT_HUGEPAGES, NR_FREE_CMA_PAGES, + NR_SWAPCACHE, NR_VM_ZONE_STAT_ITEMS }; /* -- cgit v1.2.3 From d491cf59f01c82ba8c91ff80d984a1bee9186b0d Mon Sep 17 00:00:00 2001 From: Heesub Shin Date: Mon, 7 Jan 2013 11:10:13 +0900 Subject: cma: redirect page allocation to CMA CMA pages are designed to be used as fallback for movable allocations and cannot be used for non-movable allocations. If CMA pages are utilized poorly, non-movable allocations may end up getting starved if all regular movable pages are allocated and the only pages left are CMA. Always using CMA pages first creates unacceptable performance problems. As a midway alternative, use CMA pages for certain userspace allocations. The userspace pages can be migrated or dropped quickly which giving decent utilization. Change-Id: I6165dda01b705309eebabc6dfa67146b7a95c174 CRs-Fixed: 452508 Signed-off-by: Kyungmin Park Signed-off-by: Heesub Shin [lmark@codeaurora.org: resolve conflicts relating to MIGRATE_HIGHATOMIC and some other trivial merge conflicts] Signed-off-by: Liam Mark --- include/linux/gfp.h | 11 +++++++++-- include/linux/highmem.h | 15 +++++++++++++++ include/linux/mmzone.h | 3 +++ 3 files changed, 27 insertions(+), 2 deletions(-) (limited to 'include/linux') diff --git a/include/linux/gfp.h b/include/linux/gfp.h index 8942af0813e3..994f08fe426f 100644 --- a/include/linux/gfp.h +++ b/include/linux/gfp.h @@ -36,6 +36,7 @@ struct vm_area_struct; #define ___GFP_OTHER_NODE 0x800000u #define ___GFP_WRITE 0x1000000u #define ___GFP_KSWAPD_RECLAIM 0x2000000u +#define ___GFP_CMA 0x4000000u /* If the above are modified, __GFP_BITS_SHIFT may need updating */ /* @@ -50,8 +51,9 @@ struct vm_area_struct; #define __GFP_DMA32 ((__force gfp_t)___GFP_DMA32) #define __GFP_MOVABLE ((__force gfp_t)___GFP_MOVABLE) /* Page is movable */ #define __GFP_MOVABLE ((__force gfp_t)___GFP_MOVABLE) /* ZONE_MOVABLE allowed */ -#define GFP_ZONEMASK (__GFP_DMA|__GFP_HIGHMEM|__GFP_DMA32|__GFP_MOVABLE) - +#define __GFP_CMA ((__force gfp_t)___GFP_CMA) +#define GFP_ZONEMASK (__GFP_DMA|__GFP_HIGHMEM|__GFP_DMA32|__GFP_MOVABLE| \ + __GFP_CMA) /* * Page mobility and placement hints * @@ -264,7 +266,12 @@ static inline int gfpflags_to_migratetype(const gfp_t gfp_flags) return MIGRATE_UNMOVABLE; /* Group based on mobility */ +#ifndef CONFIG_CMA return (gfp_flags & GFP_MOVABLE_MASK) >> GFP_MOVABLE_SHIFT; +#else + return ((gfp_flags & GFP_MOVABLE_MASK) >> GFP_MOVABLE_SHIFT) | + ((gfp_flags & __GFP_CMA) != 0); +#endif } #undef GFP_MOVABLE_MASK #undef GFP_MOVABLE_SHIFT diff --git a/include/linux/highmem.h b/include/linux/highmem.h index 4c70716759a6..61aff324bd5e 100644 --- a/include/linux/highmem.h +++ b/include/linux/highmem.h @@ -187,9 +187,24 @@ static inline struct page * alloc_zeroed_user_highpage_movable(struct vm_area_struct *vma, unsigned long vaddr) { +#ifndef CONFIG_CMA return __alloc_zeroed_user_highpage(__GFP_MOVABLE, vma, vaddr); +#else + return __alloc_zeroed_user_highpage(__GFP_MOVABLE|__GFP_CMA, vma, + vaddr); +#endif } +#ifdef CONFIG_CMA +static inline struct page * +alloc_zeroed_user_highpage_movable_cma(struct vm_area_struct *vma, + unsigned long vaddr) +{ + return __alloc_zeroed_user_highpage(__GFP_MOVABLE|__GFP_CMA, vma, + vaddr); +} +#endif + static inline void clear_highpage(struct page *page) { void *kaddr = kmap_atomic(page); diff --git a/include/linux/mmzone.h b/include/linux/mmzone.h index 04030f756e7c..dfb8a6159997 100644 --- a/include/linux/mmzone.h +++ b/include/linux/mmzone.h @@ -376,6 +376,9 @@ struct zone { * considered dirtyable memory. */ unsigned long dirty_balance_reserve; +#ifdef CONFIG_CMA + bool cma_alloc; +#endif #ifndef CONFIG_SPARSEMEM /* -- cgit v1.2.3 From 50050f2a1d798b65e049e8e2a910c593d29083fb Mon Sep 17 00:00:00 2001 From: Liam Mark Date: Mon, 23 Jun 2014 14:13:47 -0700 Subject: mm: add cma pcp list Add a cma pcp list in order to increase cma memory utilization. Increased cma memory utilization will improve overall memory utilization because free cma pages are ignored when memory reclaim is done with gfp mask GFP_KERNEL. Since most memory reclaim is done by kswapd, which uses a gfp mask of GFP_KERNEL, by increasing cma memory utilization we are therefore ensuring that less aggressive memory reclaim takes place. Increased cma memory utilization will improve performance, for example it will increase app concurrency. Change-Id: I809589a25c6abca51f1c963f118adfc78e955cf9 Signed-off-by: Liam Mark --- include/linux/mmzone.h | 6 ++++-- 1 file changed, 4 insertions(+), 2 deletions(-) (limited to 'include/linux') diff --git a/include/linux/mmzone.h b/include/linux/mmzone.h index dfb8a6159997..ad4c3f186f61 100644 --- a/include/linux/mmzone.h +++ b/include/linux/mmzone.h @@ -39,8 +39,6 @@ enum { MIGRATE_UNMOVABLE, MIGRATE_MOVABLE, MIGRATE_RECLAIMABLE, - MIGRATE_PCPTYPES, /* the number of types on the pcp lists */ - MIGRATE_HIGHATOMIC = MIGRATE_PCPTYPES, #ifdef CONFIG_CMA /* * MIGRATE_CMA migration type is designed to mimic the way @@ -57,6 +55,8 @@ enum { */ MIGRATE_CMA, #endif + MIGRATE_PCPTYPES, /* the number of types on the pcp lists */ + MIGRATE_HIGHATOMIC = MIGRATE_PCPTYPES, #ifdef CONFIG_MEMORY_ISOLATION MIGRATE_ISOLATE, /* can't allocate from here */ #endif @@ -74,9 +74,11 @@ extern int *get_migratetype_fallbacks(int mtype); #ifdef CONFIG_CMA bool is_cma_pageblock(struct page *page); # define is_migrate_cma(migratetype) unlikely((migratetype) == MIGRATE_CMA) +# define get_cma_migrate_type() MIGRATE_CMA #else # define is_cma_pageblock(page) false # define is_migrate_cma(migratetype) false +# define get_cma_migrate_type() MIGRATE_MOVABLE #endif #define for_each_migratetype_order(order, type) \ -- cgit v1.2.3 From 41728295e3ac3464aefa309c8247fd5cc75f5d90 Mon Sep 17 00:00:00 2001 From: Laura Abbott Date: Thu, 9 Jan 2014 14:30:11 -0800 Subject: mm: Increase number of GFP masks The __GFP_CMA mask is now placed after all available GFP masks. With this we need to increase the total number of GFP flags. Do so accordingly. CRs-Fixed: 648978 Change-Id: I53f5f064ac16a50ee10c84ff2bb50fdb7e085bd0 Signed-off-by: Laura Abbott [lmark@codeaurora.org: resolve trivial merge conflicts] Signed-off-by: Liam Mark --- include/linux/gfp.h | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'include/linux') diff --git a/include/linux/gfp.h b/include/linux/gfp.h index 994f08fe426f..9796b4426710 100644 --- a/include/linux/gfp.h +++ b/include/linux/gfp.h @@ -185,7 +185,7 @@ struct vm_area_struct; #define __GFP_OTHER_NODE ((__force gfp_t)___GFP_OTHER_NODE) /* Room for N __GFP_FOO bits */ -#define __GFP_BITS_SHIFT 26 +#define __GFP_BITS_SHIFT 27 #define __GFP_BITS_MASK ((__force gfp_t)((1 << __GFP_BITS_SHIFT) - 1)) /* -- cgit v1.2.3