summaryrefslogtreecommitdiff
path: root/mm
diff options
context:
space:
mode:
authorMichael Bestas <mkbestas@lineageos.org>2021-02-28 03:22:47 +0200
committerMichael Bestas <mkbestas@lineageos.org>2021-02-28 03:22:47 +0200
commit3d9b6f5ee02e712b6cd1d50db3c2e68064fc2eca (patch)
tree3d6a43bfb8e720a34f8f22c180b67b9776512975 /mm
parent32ed4c6cace37c492b7deacdf3fa223618b1ad2e (diff)
parent4fd124d1546d8a4300e1cdd637818af038e29fb2 (diff)
Merge branch 'android-4.4-p' of https://android.googlesource.com/kernel/common into lineage-18.1-caf-msm8998
This brings LA.UM.9.2.r1-02500-SDMxx0.0 up to date with https://android.googlesource.com/kernel/common/ android-4.4-p at commit: 4fd124d1546d8 Merge 4.4.258 into android-4.4-p Change-Id: Idbae7489bc1d831a378dd60993f46139e5e28c4c
Diffstat (limited to 'mm')
-rw-r--r--mm/backing-dev.c1
-rw-r--r--mm/hugetlb.c9
-rw-r--r--mm/memblock.c48
3 files changed, 11 insertions, 47 deletions
diff --git a/mm/backing-dev.c b/mm/backing-dev.c
index b12a49bf78de..39fc46c13950 100644
--- a/mm/backing-dev.c
+++ b/mm/backing-dev.c
@@ -21,6 +21,7 @@ struct backing_dev_info noop_backing_dev_info = {
EXPORT_SYMBOL_GPL(noop_backing_dev_info);
static struct class *bdi_class;
+const char *bdi_unknown_name = "(unknown)";
/*
* bdi_lock protects updates to bdi_list. bdi_list has RCU reader side
diff --git a/mm/hugetlb.c b/mm/hugetlb.c
index 7a2379223085..dc877712ef1f 100644
--- a/mm/hugetlb.c
+++ b/mm/hugetlb.c
@@ -1184,12 +1184,11 @@ struct hstate *size_to_hstate(unsigned long size)
*/
bool page_huge_active(struct page *page)
{
- VM_BUG_ON_PAGE(!PageHuge(page), page);
- return PageHead(page) && PagePrivate(&page[1]);
+ return PageHeadHuge(page) && PagePrivate(&page[1]);
}
/* never called for tail page */
-static void set_page_huge_active(struct page *page)
+void set_page_huge_active(struct page *page)
{
VM_BUG_ON_PAGE(!PageHeadHuge(page), page);
SetPagePrivate(&page[1]);
@@ -4544,9 +4543,9 @@ bool isolate_huge_page(struct page *page, struct list_head *list)
{
bool ret = true;
- VM_BUG_ON_PAGE(!PageHead(page), page);
spin_lock(&hugetlb_lock);
- if (!page_huge_active(page) || !get_page_unless_zero(page)) {
+ if (!PageHeadHuge(page) || !page_huge_active(page) ||
+ !get_page_unless_zero(page)) {
ret = false;
goto unlock;
}
diff --git a/mm/memblock.c b/mm/memblock.c
index e39ef2fe5c17..7e6c7fec3683 100644
--- a/mm/memblock.c
+++ b/mm/memblock.c
@@ -193,14 +193,6 @@ __memblock_find_range_top_down(phys_addr_t start, phys_addr_t end,
*
* Find @size free area aligned to @align in the specified range and node.
*
- * When allocation direction is bottom-up, the @start should be greater
- * than the end of the kernel image. Otherwise, it will be trimmed. The
- * reason is that we want the bottom-up allocation just near the kernel
- * image so it is highly likely that the allocated memory and the kernel
- * will reside in the same node.
- *
- * If bottom-up allocation failed, will try to allocate memory top-down.
- *
* RETURNS:
* Found address on success, 0 on failure.
*/
@@ -208,8 +200,6 @@ phys_addr_t __init_memblock memblock_find_in_range_node(phys_addr_t size,
phys_addr_t align, phys_addr_t start,
phys_addr_t end, int nid, ulong flags)
{
- phys_addr_t kernel_end, ret;
-
/* pump up @end */
if (end == MEMBLOCK_ALLOC_ACCESSIBLE)
end = memblock.current_limit;
@@ -217,39 +207,13 @@ phys_addr_t __init_memblock memblock_find_in_range_node(phys_addr_t size,
/* avoid allocating the first page */
start = max_t(phys_addr_t, start, PAGE_SIZE);
end = max(start, end);
- kernel_end = __pa_symbol(_end);
-
- /*
- * try bottom-up allocation only when bottom-up mode
- * is set and @end is above the kernel image.
- */
- if (memblock_bottom_up() && end > kernel_end) {
- phys_addr_t bottom_up_start;
-
- /* make sure we will allocate above the kernel */
- bottom_up_start = max(start, kernel_end);
- /* ok, try bottom-up allocation first */
- ret = __memblock_find_range_bottom_up(bottom_up_start, end,
- size, align, nid, flags);
- if (ret)
- return ret;
-
- /*
- * we always limit bottom-up allocation above the kernel,
- * but top-down allocation doesn't have the limit, so
- * retrying top-down allocation may succeed when bottom-up
- * allocation failed.
- *
- * bottom-up allocation is expected to be fail very rarely,
- * so we use WARN_ONCE() here to see the stack trace if
- * fail happens.
- */
- WARN_ONCE(1, "memblock: bottom-up allocation failed, memory hotunplug may be affected\n");
- }
-
- return __memblock_find_range_top_down(start, end, size, align, nid,
- flags);
+ if (memblock_bottom_up())
+ return __memblock_find_range_bottom_up(start, end, size, align,
+ nid, flags);
+ else
+ return __memblock_find_range_top_down(start, end, size, align,
+ nid, flags);
}
/**