summaryrefslogtreecommitdiff
path: root/include/linux
diff options
context:
space:
mode:
authorKees Cook <keescook@chromium.org>2016-06-07 11:05:33 -0700
committerSami Tolvanen <samitolvanen@google.com>2016-09-06 15:53:36 +0000
commitd677b3104d3c8e927211f20083a6c2e97e003b87 (patch)
tree71c92b3b1f79d7f6d2a46b5c19d35a547b29b24c /include/linux
parentc30d7340ee0b167a45cd8d6d5c48add8f62db9a5 (diff)
BACKPORT: mm: Hardened usercopy
This is the start of porting PAX_USERCOPY into the mainline kernel. This is the first set of features, controlled by CONFIG_HARDENED_USERCOPY. The work is based on code by PaX Team and Brad Spengler, and an earlier port from Casey Schaufler. Additional non-slab page tests are from Rik van Riel. This patch contains the logic for validating several conditions when performing copy_to_user() and copy_from_user() on the kernel object being copied to/from: - address range doesn't wrap around - address range isn't NULL or zero-allocated (with a non-zero copy size) - if on the slab allocator: - object size must be less than or equal to copy size (when check is implemented in the allocator, which appear in subsequent patches) - otherwise, object must not span page allocations (excepting Reserved and CMA ranges) - if on the stack - object must not extend before/after the current process stack - object must be contained by a valid stack frame (when there is arch/build support for identifying stack frames) - object must not overlap with kernel text Signed-off-by: Kees Cook <keescook@chromium.org> Tested-by: Valdis Kletnieks <valdis.kletnieks@vt.edu> Tested-by: Michael Ellerman <mpe@ellerman.id.au> Change-Id: Iff3b5f1ddb04acd99ccf9a9046c7797363962b2a (cherry picked from commit f5509cc18daa7f82bcc553be70df2117c8eedc16) Signed-off-by: Sami Tolvanen <samitolvanen@google.com>
Diffstat (limited to 'include/linux')
-rw-r--r--include/linux/slab.h12
-rw-r--r--include/linux/thread_info.h15
2 files changed, 27 insertions, 0 deletions
diff --git a/include/linux/slab.h b/include/linux/slab.h
index 2037a861e367..4ef384b172e0 100644
--- a/include/linux/slab.h
+++ b/include/linux/slab.h
@@ -144,6 +144,18 @@ void kfree(const void *);
void kzfree(const void *);
size_t ksize(const void *);
+#ifdef CONFIG_HAVE_HARDENED_USERCOPY_ALLOCATOR
+const char *__check_heap_object(const void *ptr, unsigned long n,
+ struct page *page);
+#else
+static inline const char *__check_heap_object(const void *ptr,
+ unsigned long n,
+ struct page *page)
+{
+ return NULL;
+}
+#endif
+
/*
* Some archs want to perform DMA into kmalloc caches and need a guaranteed
* alignment larger than the alignment of a 64-bit integer.
diff --git a/include/linux/thread_info.h b/include/linux/thread_info.h
index 5ecb68e86968..0ae29ff9ccfd 100644
--- a/include/linux/thread_info.h
+++ b/include/linux/thread_info.h
@@ -154,6 +154,21 @@ static inline int arch_within_stack_frames(const void * const stack,
}
#endif
+#ifdef CONFIG_HARDENED_USERCOPY
+extern void __check_object_size(const void *ptr, unsigned long n,
+ bool to_user);
+
+static inline void check_object_size(const void *ptr, unsigned long n,
+ bool to_user)
+{
+ __check_object_size(ptr, n, to_user);
+}
+#else
+static inline void check_object_size(const void *ptr, unsigned long n,
+ bool to_user)
+{ }
+#endif /* CONFIG_HARDENED_USERCOPY */
+
#endif /* __KERNEL__ */
#endif /* _LINUX_THREAD_INFO_H */