summaryrefslogtreecommitdiff
path: root/include/linux/mm.h
diff options
context:
space:
mode:
Diffstat (limited to 'include/linux/mm.h')
-rw-r--r--include/linux/mm.h48
1 files changed, 39 insertions, 9 deletions
diff --git a/include/linux/mm.h b/include/linux/mm.h
index 6424748733eb..34fcdede4604 100644
--- a/include/linux/mm.h
+++ b/include/linux/mm.h
@@ -29,6 +29,7 @@ struct file_ra_state;
struct user_struct;
struct writeback_control;
struct bdi_writeback;
+struct super_block;
#ifndef CONFIG_NEED_MULTIPLE_NODES /* Don't use mapnrs, do it properly */
extern unsigned long max_mapnr;
@@ -393,16 +394,16 @@ unsigned long vmalloc_to_pfn(const void *addr);
* On nommu, vmalloc/vfree wrap through kmalloc/kfree directly, so there
* is no special casing required.
*/
-static inline int is_vmalloc_addr(const void *x)
-{
-#ifdef CONFIG_MMU
- unsigned long addr = (unsigned long)x;
- return addr >= VMALLOC_START && addr < VMALLOC_END;
+#ifdef CONFIG_MMU
+extern int is_vmalloc_addr(const void *x);
#else
+static inline int is_vmalloc_addr(const void *x)
+{
return 0;
-#endif
}
+#endif
+
#ifdef CONFIG_MMU
extern int is_vmalloc_or_module_addr(const void *x);
#else
@@ -536,7 +537,6 @@ void put_page(struct page *page);
void put_pages_list(struct list_head *pages);
void split_page(struct page *page, unsigned int order);
-int split_free_page(struct page *page);
/*
* Compound pages have a destructor function. Provide a
@@ -1002,6 +1002,7 @@ static inline int page_mapped(struct page *page)
{
return atomic_read(&(page)->_mapcount) >= 0;
}
+struct address_space *page_mapping(struct page *page);
/*
* Return true only if the page has been allocated with
@@ -1971,8 +1972,11 @@ vm_unmapped_area(struct vm_unmapped_area_info *info)
/* truncate.c */
extern void truncate_inode_pages(struct address_space *, loff_t);
+extern void truncate_inode_pages_fill_zero(struct address_space *, loff_t);
extern void truncate_inode_pages_range(struct address_space *,
loff_t lstart, loff_t lend);
+extern void truncate_inode_pages_range_fill_zero(struct address_space *,
+ loff_t lstart, loff_t lend);
extern void truncate_inode_pages_final(struct address_space *);
/* generic vm_area_ops exported for stackable file systems */
@@ -1985,7 +1989,7 @@ int write_one_page(struct page *page, int wait);
void task_dirty_inc(struct task_struct *tsk);
/* readahead.c */
-#define VM_MAX_READAHEAD 128 /* kbytes */
+#define VM_MAX_READAHEAD 512 /* kbytes */
#define VM_MIN_READAHEAD 16 /* kbytes (includes current page) */
int force_page_cache_readahead(struct address_space *mapping, struct file *filp,
@@ -2146,6 +2150,17 @@ static inline void vm_stat_account(struct mm_struct *mm,
}
#endif /* CONFIG_PROC_FS */
+#ifdef CONFIG_PAGE_POISONING
+extern bool page_poisoning_enabled(void);
+extern void kernel_poison_pages(struct page *page, int numpages, int enable);
+extern bool page_is_poisoned(struct page *page);
+#else
+static inline bool page_poisoning_enabled(void) { return false; }
+static inline void kernel_poison_pages(struct page *page, int numpages,
+ int enable) { }
+static inline bool page_is_poisoned(struct page *page) { return false; }
+#endif
+
#ifdef CONFIG_DEBUG_PAGEALLOC
extern bool _debug_pagealloc_enabled;
extern void __kernel_map_pages(struct page *page, int numpages, int enable);
@@ -2203,6 +2218,8 @@ int drop_caches_sysctl_handler(struct ctl_table *, int,
void drop_slab(void);
void drop_slab_node(int nid);
+void drop_pagecache_sb(struct super_block *sb, void *unused);
+
#ifndef CONFIG_MMU
#define randomize_va_space 0
#else
@@ -2297,7 +2314,6 @@ extern void copy_user_huge_page(struct page *dst, struct page *src,
#endif /* CONFIG_TRANSPARENT_HUGEPAGE || CONFIG_HUGETLBFS */
extern struct page_ext_operations debug_guardpage_ops;
-extern struct page_ext_operations page_poisoning_ops;
#ifdef CONFIG_DEBUG_PAGEALLOC
extern unsigned int _debug_guardpage_minorder;
@@ -2335,5 +2351,19 @@ void __init setup_nr_node_ids(void);
static inline void setup_nr_node_ids(void) {}
#endif
+#ifdef CONFIG_PROCESS_RECLAIM
+struct reclaim_param {
+ struct vm_area_struct *vma;
+ /* Number of pages scanned */
+ int nr_scanned;
+ /* max pages to reclaim */
+ int nr_to_reclaim;
+ /* pages reclaimed */
+ int nr_reclaimed;
+};
+extern struct reclaim_param reclaim_task_anon(struct task_struct *task,
+ int nr_to_reclaim);
+#endif
+
#endif /* __KERNEL__ */
#endif /* _LINUX_MM_H */