summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
-rw-r--r--include/linux/mm.h1
-rw-r--r--mm/page_alloc.c9
-rw-r--r--mm/page_ext.c3
-rw-r--r--mm/page_poison.c73
4 files changed, 15 insertions, 71 deletions
diff --git a/include/linux/mm.h b/include/linux/mm.h
index d9fe27c8a57e..3c10d4638646 100644
--- a/include/linux/mm.h
+++ b/include/linux/mm.h
@@ -2306,7 +2306,6 @@ extern void copy_user_huge_page(struct page *dst, struct page *src,
#endif /* CONFIG_TRANSPARENT_HUGEPAGE || CONFIG_HUGETLBFS */
extern struct page_ext_operations debug_guardpage_ops;
-extern struct page_ext_operations page_poisoning_ops;
#ifdef CONFIG_DEBUG_PAGEALLOC
extern unsigned int _debug_guardpage_minorder;
diff --git a/mm/page_alloc.c b/mm/page_alloc.c
index a4cfa64634b4..170c1486e5c9 100644
--- a/mm/page_alloc.c
+++ b/mm/page_alloc.c
@@ -1397,24 +1397,21 @@ static inline int check_new_page(struct page *page)
return 0;
}
-static inline bool free_pages_prezeroed(bool poisoned)
+static inline bool free_pages_prezeroed(void)
{
return IS_ENABLED(CONFIG_PAGE_POISONING_ZERO) &&
- page_poisoning_enabled() && poisoned;
+ page_poisoning_enabled();
}
static int prep_new_page(struct page *page, unsigned int order, gfp_t gfp_flags,
int alloc_flags)
{
int i;
- bool poisoned = true;
for (i = 0; i < (1 << order); i++) {
struct page *p = page + i;
if (unlikely(check_new_page(p)))
return 1;
- if (poisoned)
- poisoned &= page_is_poisoned(p);
}
set_page_private(page, 0);
@@ -1425,7 +1422,7 @@ static int prep_new_page(struct page *page, unsigned int order, gfp_t gfp_flags,
kernel_map_pages(page, 1 << order, 1);
kernel_poison_pages(page, 1 << order, 1);
- if (!free_pages_prezeroed(poisoned) && (gfp_flags & __GFP_ZERO))
+ if (!free_pages_prezeroed() && (gfp_flags & __GFP_ZERO))
for (i = 0; i < (1 << order); i++)
clear_highpage(page + i);
diff --git a/mm/page_ext.c b/mm/page_ext.c
index 2d864e64f7fe..916accfec86a 100644
--- a/mm/page_ext.c
+++ b/mm/page_ext.c
@@ -54,9 +54,6 @@
static struct page_ext_operations *page_ext_ops[] = {
&debug_guardpage_ops,
-#ifdef CONFIG_PAGE_POISONING
- &page_poisoning_ops,
-#endif
#ifdef CONFIG_PAGE_OWNER
&page_owner_ops,
#endif
diff --git a/mm/page_poison.c b/mm/page_poison.c
index 09192467efc9..eb3c4f1aade3 100644
--- a/mm/page_poison.c
+++ b/mm/page_poison.c
@@ -6,7 +6,6 @@
#include <linux/poison.h>
#include <linux/ratelimit.h>
-static bool __page_poisoning_enabled __read_mostly;
static bool want_page_poisoning __read_mostly;
static int early_page_poison_param(char *buf)
@@ -25,70 +24,22 @@ early_param("page_poison", early_page_poison_param);
bool page_poisoning_enabled(void)
{
- return __page_poisoning_enabled;
-}
-
-static bool need_page_poisoning(void)
-{
- return want_page_poisoning;
-}
-
-static void init_page_poisoning(void)
-{
/*
- * page poisoning is debug page alloc for some arches. If either
- * of those options are enabled, enable poisoning
+ * Assumes that debug_pagealloc_enabled is set before
+ * free_all_bootmem.
+ * Page poisoning is debug page alloc for some arches. If
+ * either of those options are enabled, enable poisoning.
*/
- if (!IS_ENABLED(CONFIG_ARCH_SUPPORTS_DEBUG_PAGEALLOC)) {
- if (!want_page_poisoning && !debug_pagealloc_enabled())
- return;
- } else {
- if (!want_page_poisoning)
- return;
- }
-
- __page_poisoning_enabled = true;
-}
-
-struct page_ext_operations page_poisoning_ops = {
- .need = need_page_poisoning,
- .init = init_page_poisoning,
-};
-
-static inline void set_page_poison(struct page *page)
-{
- struct page_ext *page_ext;
-
- page_ext = lookup_page_ext(page);
- __set_bit(PAGE_EXT_DEBUG_POISON, &page_ext->flags);
-}
-
-static inline void clear_page_poison(struct page *page)
-{
- struct page_ext *page_ext;
-
- page_ext = lookup_page_ext(page);
- __clear_bit(PAGE_EXT_DEBUG_POISON, &page_ext->flags);
-}
-
-bool page_is_poisoned(struct page *page)
-{
- struct page_ext *page_ext;
-
- page_ext = lookup_page_ext(page);
- if (!page_ext)
- return false;
-
- return test_bit(PAGE_EXT_DEBUG_POISON, &page_ext->flags);
+ return (want_page_poisoning ||
+ (!IS_ENABLED(CONFIG_ARCH_SUPPORTS_DEBUG_PAGEALLOC) &&
+ debug_pagealloc_enabled()));
}
static void poison_page(struct page *page)
{
void *addr = kmap_atomic(page);
- set_page_poison(page);
memset(addr, PAGE_POISON, PAGE_SIZE);
- mark_addr_rdonly(addr);
kunmap_atomic(addr);
}
@@ -145,13 +96,13 @@ static void unpoison_page(struct page *page)
{
void *addr;
- if (!page_is_poisoned(page))
- return;
-
addr = kmap_atomic(page);
+ /*
+ * Page poisoning when enabled poisons each and every page
+ * that is freed to buddy. Thus no extra check is done to
+ * see if a page was posioned.
+ */
check_poison_mem(page, addr, PAGE_SIZE);
- mark_addr_rdwrite(addr);
- clear_page_poison(page);
kunmap_atomic(addr);
}