summaryrefslogtreecommitdiff
path: root/mm/page_alloc.c
diff options
context:
space:
mode:
authorLiam Mark <lmark@codeaurora.org>2014-06-23 14:13:47 -0700
committerJeevan Shriram <jshriram@codeaurora.org>2016-04-13 11:11:40 -0700
commit50050f2a1d798b65e049e8e2a910c593d29083fb (patch)
tree32af0a27e24f0017e0ba58e8d472f917345668b3 /mm/page_alloc.c
parentd491cf59f01c82ba8c91ff80d984a1bee9186b0d (diff)
mm: add cma pcp list
Add a cma pcp list in order to increase cma memory utilization. Increased cma memory utilization will improve overall memory utilization because free cma pages are ignored when memory reclaim is done with gfp mask GFP_KERNEL. Since most memory reclaim is done by kswapd, which uses a gfp mask of GFP_KERNEL, by increasing cma memory utilization we are therefore ensuring that less aggressive memory reclaim takes place. Increased cma memory utilization will improve performance, for example it will increase app concurrency. Change-Id: I809589a25c6abca51f1c963f118adfc78e955cf9 Signed-off-by: Liam Mark <lmark@codeaurora.org>
Diffstat (limited to 'mm/page_alloc.c')
-rw-r--r--mm/page_alloc.c94
1 files changed, 61 insertions, 33 deletions
diff --git a/mm/page_alloc.c b/mm/page_alloc.c
index 2695ca00653e..f54a84fb5e6e 100644
--- a/mm/page_alloc.c
+++ b/mm/page_alloc.c
@@ -1822,22 +1822,13 @@ static struct page *__rmqueue(struct zone *zone, unsigned int order,
return page;
}
-static struct page *__rmqueue_cma(struct zone *zone, unsigned int order,
- int migratetype, gfp_t gfp_flags)
+static struct page *__rmqueue_cma(struct zone *zone, unsigned int order)
{
struct page *page = 0;
-#ifdef CONFIG_CMA
- if (migratetype == MIGRATE_MOVABLE && !zone->cma_alloc)
- page = __rmqueue_cma_fallback(zone, order);
- else
-#endif
- page = __rmqueue_smallest(zone, order, migratetype);
-
- if (unlikely(!page)) {
- page = __rmqueue_fallback(zone, order, migratetype);
- }
-
- trace_mm_page_alloc_zone_locked(page, order, migratetype);
+ if (IS_ENABLED(CONFIG_CMA))
+ if (!zone->cma_alloc)
+ page = __rmqueue_cma_fallback(zone, order);
+ trace_mm_page_alloc_zone_locked(page, order, MIGRATE_CMA);
return page;
}
@@ -1848,15 +1839,21 @@ static struct page *__rmqueue_cma(struct zone *zone, unsigned int order,
*/
static int rmqueue_bulk(struct zone *zone, unsigned int order,
unsigned long count, struct list_head *list,
- int migratetype, bool cold, int cma)
+ int migratetype, bool cold)
{
int i;
spin_lock(&zone->lock);
for (i = 0; i < count; ++i) {
struct page *page;
- if (cma)
- page = __rmqueue_cma(zone, order, migratetype, 0);
+
+ /*
+ * If migrate type CMA is being requested only try to
+ * satisfy the request with CMA pages to try and increase
+ * CMA utlization.
+ */
+ if (is_migrate_cma(migratetype))
+ page = __rmqueue_cma(zone, order);
else
page = __rmqueue(zone, order, migratetype, 0);
if (unlikely(page == NULL))
@@ -1885,6 +1882,28 @@ static int rmqueue_bulk(struct zone *zone, unsigned int order,
return i;
}
+/*
+ * Return the pcp list that corresponds to the migrate type if that list isn't
+ * empty.
+ * If the list is empty return NULL.
+ */
+static struct list_head *get_populated_pcp_list(struct zone *zone,
+ unsigned int order, struct per_cpu_pages *pcp,
+ int migratetype, int cold)
+{
+ struct list_head *list = &pcp->lists[migratetype];
+
+ if (list_empty(list)) {
+ pcp->count += rmqueue_bulk(zone, order,
+ pcp->batch, list,
+ migratetype, cold);
+
+ if (list_empty(list))
+ list = NULL;
+ }
+ return list;
+}
+
#ifdef CONFIG_NUMA
/*
* Called from the vmstat counter updater to drain pagesets of this
@@ -2085,8 +2104,7 @@ void free_hot_cold_page(struct page *page, bool cold)
* excessively into the page allocator
*/
if (migratetype >= MIGRATE_PCPTYPES) {
- if (unlikely(is_migrate_isolate(migratetype)) ||
- is_migrate_cma(migratetype)) {
+ if (unlikely(is_migrate_isolate(migratetype))) {
free_one_page(zone, page, pfn, 0, migratetype);
goto out;
}
@@ -2235,22 +2253,32 @@ struct page *buffered_rmqueue(struct zone *preferred_zone,
gfp_t gfp_flags, int alloc_flags, int migratetype)
{
unsigned long flags;
- struct page *page;
+ struct page *page = NULL;
bool cold = ((gfp_flags & __GFP_COLD) != 0);
if (likely(order == 0)) {
struct per_cpu_pages *pcp;
- struct list_head *list;
+ struct list_head *list = NULL;
local_irq_save(flags);
pcp = &this_cpu_ptr(zone->pageset)->pcp;
- list = &pcp->lists[migratetype];
- if (list_empty(list)) {
- pcp->count += rmqueue_bulk(zone, 0,
- pcp->batch, list,
- migratetype, cold,
- gfp_flags & __GFP_CMA);
- if (unlikely(list_empty(list)))
+
+ /* First try to get CMA pages */
+ if (migratetype == MIGRATE_MOVABLE &&
+ gfp_flags & __GFP_CMA) {
+ list = get_populated_pcp_list(zone, 0, pcp,
+ get_cma_migrate_type(), cold);
+ }
+
+ if (list == NULL) {
+ /*
+ * Either CMA is not suitable or there are no free CMA
+ * pages.
+ */
+ list = get_populated_pcp_list(zone, 0, pcp,
+ migratetype, cold);
+ if (unlikely(list == NULL) ||
+ unlikely(list_empty(list)))
goto failed;
}
@@ -2283,13 +2311,13 @@ struct page *buffered_rmqueue(struct zone *preferred_zone,
if (page)
trace_mm_page_alloc_zone_locked(page, order, migratetype);
}
- if (!page) {
- if (gfp_flags & __GFP_CMA)
- page = __rmqueue_cma(zone, order, migratetype, gfp_flags);
- else
+ if (!page && migratetype == MIGRATE_MOVABLE &&
+ gfp_flags & __GFP_CMA)
+ page = __rmqueue_cma(zone, order);
+
+ if (!page)
page = __rmqueue(zone, order, migratetype, gfp_flags);
- }
spin_unlock(&zone->lock);
if (!page)
goto failed;