summaryrefslogtreecommitdiff
path: root/include/linux
diff options
context:
space:
mode:
authorSrinivasarao P <spathi@codeaurora.org>2018-01-09 16:30:10 +0530
committerSrinivasarao P <spathi@codeaurora.org>2018-01-18 12:50:51 +0530
commitde3efc405c531961da26b6b256389ca2f3350460 (patch)
tree644748430b32dea7d5d0178bde8e88007e2788bf /include/linux
parentfb9c0ae7a810d2724ee293d26462fcaaa33a4703 (diff)
parent5cc8c2ec619e3ce3439b3ae19b22d487e0e3a86b (diff)
Merge android-4.4.110 (5cc8c2e) into msm-4.4
* refs/heads/tmp-5cc8c2e Linux 4.4.110 kaiser: Set _PAGE_NX only if supported x86/kasan: Clear kasan_zero_page after TLB flush x86/vdso: Get pvclock data from the vvar VMA instead of the fixmap x86, vdso, pvclock: Simplify and speed up the vdso pvclock reader KPTI: Report when enabled KPTI: Rename to PAGE_TABLE_ISOLATION x86/kaiser: Move feature detection up kaiser: disabled on Xen PV x86/kaiser: Reenable PARAVIRT x86/paravirt: Dont patch flush_tlb_single kaiser: kaiser_flush_tlb_on_return_to_user() check PCID kaiser: asm/tlbflush.h handle noPGE at lower level kaiser: drop is_atomic arg to kaiser_pagetable_walk() kaiser: use ALTERNATIVE instead of x86_cr3_pcid_noflush x86/kaiser: Check boottime cmdline params x86/kaiser: Rename and simplify X86_FEATURE_KAISER handling kaiser: add "nokaiser" boot option, using ALTERNATIVE kaiser: fix unlikely error in alloc_ldt_struct() kaiser: _pgd_alloc() without __GFP_REPEAT to avoid stalls kaiser: paranoid_entry pass cr3 need to paranoid_exit kaiser: x86_cr3_pcid_noflush and x86_cr3_pcid_user kaiser: PCID 0 for kernel and 128 for user kaiser: load_new_mm_cr3() let SWITCH_USER_CR3 flush user kaiser: enhanced by kernel and user PCIDs kaiser: vmstat show NR_KAISERTABLE as nr_overhead kaiser: delete KAISER_REAL_SWITCH option kaiser: name that 0x1000 KAISER_SHADOW_PGD_OFFSET kaiser: cleanups while trying for gold link kaiser: kaiser_remove_mapping() move along the pgd kaiser: tidied up kaiser_add/remove_mapping slightly kaiser: tidied up asm/kaiser.h somewhat kaiser: ENOMEM if kaiser_pagetable_walk() NULL kaiser: fix perf crashes kaiser: fix regs to do_nmi() ifndef CONFIG_KAISER kaiser: KAISER depends on SMP kaiser: fix build and FIXME in alloc_ldt_struct() kaiser: stack map PAGE_SIZE at THREAD_SIZE-PAGE_SIZE kaiser: do not set _PAGE_NX on pgd_none kaiser: merged update KAISER: Kernel Address Isolation x86/boot: Add early cmdline parsing for options with arguments ANDROID: sdcardfs: Add default_normal option ANDROID: sdcardfs: notify lower file of opens Conflicts: kernel/fork.c Change-Id: I9c8c12e63321d79dc2c89fb470ca8de587366911 Signed-off-by: Srinivasarao P <spathi@codeaurora.org>
Diffstat (limited to 'include/linux')
-rw-r--r--include/linux/kaiser.h52
-rw-r--r--include/linux/mmzone.h3
-rw-r--r--include/linux/percpu-defs.h32
3 files changed, 85 insertions, 2 deletions
diff --git a/include/linux/kaiser.h b/include/linux/kaiser.h
new file mode 100644
index 000000000000..58c55b1589d0
--- /dev/null
+++ b/include/linux/kaiser.h
@@ -0,0 +1,52 @@
+#ifndef _LINUX_KAISER_H
+#define _LINUX_KAISER_H
+
+#ifdef CONFIG_PAGE_TABLE_ISOLATION
+#include <asm/kaiser.h>
+
+static inline int kaiser_map_thread_stack(void *stack)
+{
+ /*
+ * Map that page of kernel stack on which we enter from user context.
+ */
+ return kaiser_add_mapping((unsigned long)stack +
+ THREAD_SIZE - PAGE_SIZE, PAGE_SIZE, __PAGE_KERNEL);
+}
+
+static inline void kaiser_unmap_thread_stack(void *stack)
+{
+ /*
+ * Note: may be called even when kaiser_map_thread_stack() failed.
+ */
+ kaiser_remove_mapping((unsigned long)stack +
+ THREAD_SIZE - PAGE_SIZE, PAGE_SIZE);
+}
+#else
+
+/*
+ * These stubs are used whenever CONFIG_PAGE_TABLE_ISOLATION is off, which
+ * includes architectures that support KAISER, but have it disabled.
+ */
+
+static inline void kaiser_init(void)
+{
+}
+static inline int kaiser_add_mapping(unsigned long addr,
+ unsigned long size, unsigned long flags)
+{
+ return 0;
+}
+static inline void kaiser_remove_mapping(unsigned long start,
+ unsigned long size)
+{
+}
+static inline int kaiser_map_thread_stack(void *stack)
+{
+ return 0;
+}
+static inline void kaiser_unmap_thread_stack(void *stack)
+{
+}
+
+#endif /* !CONFIG_PAGE_TABLE_ISOLATION */
+#endif /* _LINUX_KAISER_H */
diff --git a/include/linux/mmzone.h b/include/linux/mmzone.h
index 721bdb0226bd..0db2f3cb1b6c 100644
--- a/include/linux/mmzone.h
+++ b/include/linux/mmzone.h
@@ -148,8 +148,9 @@ enum zone_stat_item {
NR_SLAB_RECLAIMABLE,
NR_SLAB_UNRECLAIMABLE,
NR_PAGETABLE, /* used for pagetables */
- NR_KERNEL_STACK,
/* Second 128 byte cacheline */
+ NR_KERNEL_STACK,
+ NR_KAISERTABLE,
NR_UNSTABLE_NFS, /* NFS unstable pages */
NR_BOUNCE,
NR_VMSCAN_WRITE,
diff --git a/include/linux/percpu-defs.h b/include/linux/percpu-defs.h
index 8f16299ca068..8902f23bb770 100644
--- a/include/linux/percpu-defs.h
+++ b/include/linux/percpu-defs.h
@@ -35,6 +35,12 @@
#endif
+#ifdef CONFIG_PAGE_TABLE_ISOLATION
+#define USER_MAPPED_SECTION "..user_mapped"
+#else
+#define USER_MAPPED_SECTION ""
+#endif
+
/*
* Base implementations of per-CPU variable declarations and definitions, where
* the section in which the variable is to be placed is provided by the
@@ -115,6 +121,12 @@
#define DEFINE_PER_CPU(type, name) \
DEFINE_PER_CPU_SECTION(type, name, "")
+#define DECLARE_PER_CPU_USER_MAPPED(type, name) \
+ DECLARE_PER_CPU_SECTION(type, name, USER_MAPPED_SECTION)
+
+#define DEFINE_PER_CPU_USER_MAPPED(type, name) \
+ DEFINE_PER_CPU_SECTION(type, name, USER_MAPPED_SECTION)
+
/*
* Declaration/definition used for per-CPU variables that must come first in
* the set of variables.
@@ -144,6 +156,14 @@
DEFINE_PER_CPU_SECTION(type, name, PER_CPU_SHARED_ALIGNED_SECTION) \
____cacheline_aligned_in_smp
+#define DECLARE_PER_CPU_SHARED_ALIGNED_USER_MAPPED(type, name) \
+ DECLARE_PER_CPU_SECTION(type, name, USER_MAPPED_SECTION PER_CPU_SHARED_ALIGNED_SECTION) \
+ ____cacheline_aligned_in_smp
+
+#define DEFINE_PER_CPU_SHARED_ALIGNED_USER_MAPPED(type, name) \
+ DEFINE_PER_CPU_SECTION(type, name, USER_MAPPED_SECTION PER_CPU_SHARED_ALIGNED_SECTION) \
+ ____cacheline_aligned_in_smp
+
#define DECLARE_PER_CPU_ALIGNED(type, name) \
DECLARE_PER_CPU_SECTION(type, name, PER_CPU_ALIGNED_SECTION) \
____cacheline_aligned
@@ -162,11 +182,21 @@
#define DEFINE_PER_CPU_PAGE_ALIGNED(type, name) \
DEFINE_PER_CPU_SECTION(type, name, "..page_aligned") \
__aligned(PAGE_SIZE)
+/*
+ * Declaration/definition used for per-CPU variables that must be page aligned and need to be mapped in user mode.
+ */
+#define DECLARE_PER_CPU_PAGE_ALIGNED_USER_MAPPED(type, name) \
+ DECLARE_PER_CPU_SECTION(type, name, USER_MAPPED_SECTION"..page_aligned") \
+ __aligned(PAGE_SIZE)
+
+#define DEFINE_PER_CPU_PAGE_ALIGNED_USER_MAPPED(type, name) \
+ DEFINE_PER_CPU_SECTION(type, name, USER_MAPPED_SECTION"..page_aligned") \
+ __aligned(PAGE_SIZE)
/*
* Declaration/definition used for per-CPU variables that must be read mostly.
*/
-#define DECLARE_PER_CPU_READ_MOSTLY(type, name) \
+#define DECLARE_PER_CPU_READ_MOSTLY(type, name) \
DECLARE_PER_CPU_SECTION(type, name, "..read_mostly")
#define DEFINE_PER_CPU_READ_MOSTLY(type, name) \