diff options
| author | Srinivasarao P <spathi@codeaurora.org> | 2018-01-24 12:19:03 +0530 |
|---|---|---|
| committer | Srinivasarao P <spathi@codeaurora.org> | 2018-01-24 12:20:03 +0530 |
| commit | 1f4bd7c8ff243f52e0a23e3b9b981b59ec288d90 (patch) | |
| tree | 08bc508546470164cf08b1ecd4f7bc25d834cb42 /arch/arm64/kernel | |
| parent | a5cabe9334b684f828dab1d28167d2b154536851 (diff) | |
| parent | f8518889ffe879b6266657e83b6cf6ace8111cdc (diff) | |
Merge android-4.4.111 (f851888) into msm-4.4
* refs/heads/tmp-f851888
Linux 4.4.111
Fix build error in vma.c
Map the vsyscall page with _PAGE_USER
proc: much faster /proc/vmstat
module: Issue warnings when tainting kernel
module: keep percpu symbols in module's symtab
genksyms: Handle string literals with spaces in reference files
x86/tlb: Drop the _GPL from the cpu_tlbstate export
parisc: Fix alignment of pa_tlb_lock in assembly on 32-bit SMP kernel
x86/microcode/AMD: Add support for fam17h microcode loading
Input: elantech - add new icbody type 15
ARC: uaccess: dont use "l" gcc inline asm constraint modifier
kernel/signal.c: remove the no longer needed SIGNAL_UNKILLABLE check in complete_signal()
kernel/signal.c: protect the SIGNAL_UNKILLABLE tasks from !sig_kernel_only() signals
kernel/signal.c: protect the traced SIGNAL_UNKILLABLE tasks from SIGKILL
kernel: make groups_sort calling a responsibility group_info allocators
fscache: Fix the default for fscache_maybe_release_page()
sunxi-rsb: Include OF based modalias in device uevent
crypto: pcrypt - fix freeing pcrypt instances
crypto: chacha20poly1305 - validate the digest size
crypto: n2 - cure use after free
kernel/acct.c: fix the acct->needcheck check in check_free_space()
x86/kasan: Write protect kasan zero shadow
clocksource: arch_timer: make virtual counter access configurable
arm64: issue isb when trapping CNTVCT_EL0 access
BACKPORT: arm64: Add CNTFRQ_EL0 trap handler
BACKPORT: arm64: Add CNTVCT_EL0 trap handler
ANDROID: sdcardfs: Fix missing break on default_normal
ANDROID: usb: f_fs: Prevent gadget unbind if it is already unbound
arm64: Kconfig: Reword UNMAP_KERNEL_AT_EL0 kconfig entry
arm64: use RET instruction for exiting the trampoline
FROMLIST: arm64: kaslr: Put kernel vectors address in separate data page
FROMLIST: arm64: mm: Introduce TTBR_ASID_MASK for getting at the ASID in the TTBR
FROMLIST: arm64: Kconfig: Add CONFIG_UNMAP_KERNEL_AT_EL0
FROMLIST: arm64: entry: Add fake CPU feature for unmapping the kernel at EL0
FROMLIST: arm64: tls: Avoid unconditional zeroing of tpidrro_el0 for native tasks
FROMLIST: arm64: erratum: Work around Falkor erratum #E1003 in trampoline code
FROMLIST: arm64: entry: Hook up entry trampoline to exception vectors
FROMLIST: arm64: entry: Explicitly pass exception level to kernel_ventry macro
FROMLIST: arm64: mm: Map entry trampoline into trampoline and kernel page tables
FROMLIST: arm64: entry: Add exception trampoline page for exceptions from EL0
FROMLIST: arm64: mm: Invalidate both kernel and user ASIDs when performing TLBI
FROMLIST: arm64: mm: Add arm64_kernel_unmapped_at_el0 helper
FROMLIST: arm64: mm: Allocate ASIDs in pairs
FROMLIST: arm64: mm: Fix and re-enable ARM64_SW_TTBR0_PAN
FROMLIST: arm64: mm: Move ASID from TTBR0 to TTBR1
FROMLIST: arm64: mm: Temporarily disable ARM64_SW_TTBR0_PAN
FROMLIST: arm64: mm: Use non-global mappings for kernel space
UPSTREAM: arm64: factor out entry stack manipulation
UPSTREAM: arm64: tlbflush.h: add __tlbi() macro
Conflicts:
arch/arm64/include/asm/cpufeature.h
arch/arm64/kernel/asm-offsets.c
arch/arm64/kernel/cpufeature.c
arch/arm64/kernel/entry.S
arch/arm64/kernel/vmlinux.lds.S
drivers/clocksource/Kconfig
drivers/clocksource/arm_arch_timer.c
drivers/usb/gadget/function/f_fs.c
Change-Id: I41e84762e30c9a7b1e283850c3f780f3dbe86f44
Signed-off-by: Srinivasarao P <spathi@codeaurora.org>
Diffstat (limited to 'arch/arm64/kernel')
| -rw-r--r-- | arch/arm64/kernel/asm-offsets.c | 5 | ||||
| -rw-r--r-- | arch/arm64/kernel/cpufeature.c | 39 | ||||
| -rw-r--r-- | arch/arm64/kernel/entry.S | 216 | ||||
| -rw-r--r-- | arch/arm64/kernel/process.c | 14 | ||||
| -rw-r--r-- | arch/arm64/kernel/vmlinux.lds.S | 21 |
5 files changed, 252 insertions, 43 deletions
diff --git a/arch/arm64/kernel/asm-offsets.c b/arch/arm64/kernel/asm-offsets.c index 36c4307c4af3..41649c509557 100644 --- a/arch/arm64/kernel/asm-offsets.c +++ b/arch/arm64/kernel/asm-offsets.c @@ -23,6 +23,7 @@ #include <linux/dma-mapping.h> #include <linux/kvm_host.h> #include <linux/suspend.h> +#include <asm/fixmap.h> #include <asm/thread_info.h> #include <asm/memory.h> #include <asm/smp_plat.h> @@ -159,5 +160,9 @@ int main(void) DEFINE(HIBERN_PBE_ORIG, offsetof(struct pbe, orig_address)); DEFINE(HIBERN_PBE_ADDR, offsetof(struct pbe, address)); DEFINE(HIBERN_PBE_NEXT, offsetof(struct pbe, next)); + BLANK(); +#ifdef CONFIG_UNMAP_KERNEL_AT_EL0 + DEFINE(TRAMP_VALIAS, TRAMP_VALIAS); +#endif return 0; } diff --git a/arch/arm64/kernel/cpufeature.c b/arch/arm64/kernel/cpufeature.c index 3beb2b5cad6f..6b01f876d8e6 100644 --- a/arch/arm64/kernel/cpufeature.c +++ b/arch/arm64/kernel/cpufeature.c @@ -656,6 +656,39 @@ static bool runs_at_el2(const struct arm64_cpu_capabilities *entry) return is_kernel_in_hyp_mode(); } +#ifdef CONFIG_UNMAP_KERNEL_AT_EL0 +static int __kpti_forced; /* 0: not forced, >0: forced on, <0: forced off */ + +static bool unmap_kernel_at_el0(const struct arm64_cpu_capabilities *entry) +{ + /* Forced on command line? */ + if (__kpti_forced) { + pr_info_once("kernel page table isolation forced %s by command line option\n", + __kpti_forced > 0 ? "ON" : "OFF"); + return __kpti_forced > 0; + } + + /* Useful for KASLR robustness */ + if (IS_ENABLED(CONFIG_RANDOMIZE_BASE)) + return true; + + return false; +} + +static int __init parse_kpti(char *str) +{ + bool enabled; + int ret = strtobool(str, &enabled); + + if (ret) + return ret; + + __kpti_forced = enabled ? 1 : -1; + return 0; +} +__setup("kpti=", parse_kpti); +#endif /* CONFIG_UNMAP_KERNEL_AT_EL0 */ + static const struct arm64_cpu_capabilities arm64_features[] = { { .desc = "GIC system register CPU interface", @@ -713,6 +746,12 @@ static const struct arm64_cpu_capabilities arm64_features[] = { .capability = ARM64_HAS_VIRT_HOST_EXTN, .matches = runs_at_el2, }, +#ifdef CONFIG_UNMAP_KERNEL_AT_EL0 + { + .capability = ARM64_UNMAP_KERNEL_AT_EL0, + .matches = unmap_kernel_at_el0, + }, +#endif {}, }; diff --git a/arch/arm64/kernel/entry.S b/arch/arm64/kernel/entry.S index c4a6865a2d4e..20be80f427f2 100644 --- a/arch/arm64/kernel/entry.S +++ b/arch/arm64/kernel/entry.S @@ -29,6 +29,7 @@ #include <asm/esr.h> #include <asm/irq.h> #include <asm/memory.h> +#include <asm/mmu.h> #include <asm/ptrace.h> #include <asm/thread_info.h> #include <asm/uaccess.h> @@ -70,8 +71,31 @@ #define BAD_FIQ 2 #define BAD_ERROR 3 - .macro kernel_entry, el, regsize = 64 + .macro kernel_ventry, el, label, regsize = 64 + .align 7 +#ifdef CONFIG_UNMAP_KERNEL_AT_EL0 +alternative_if ARM64_UNMAP_KERNEL_AT_EL0 + .if \el == 0 + .if \regsize == 64 + mrs x30, tpidrro_el0 + msr tpidrro_el0, xzr + .else + mov x30, xzr + .endif + .endif +alternative_else_nop_endif +#endif + sub sp, sp, #S_FRAME_SIZE + b el\()\el\()_\label + .endm + + .macro tramp_alias, dst, sym + mov_q \dst, TRAMP_VALIAS + add \dst, \dst, #(\sym - .entry.tramp.text) + .endm + + .macro kernel_entry, el, regsize = 64 .if \regsize == 32 mov w0, w0 // zero upper 32 bits of x0 .endif @@ -140,8 +164,8 @@ alternative_if ARM64_HAS_PAN alternative_else_nop_endif .if \el != 0 - mrs x21, ttbr0_el1 - tst x21, #0xffff << 48 // Check for the reserved ASID + mrs x21, ttbr1_el1 + tst x21, #TTBR_ASID_MASK // Check for the reserved ASID orr x23, x23, #PSR_PAN_BIT // Set the emulated PAN in the saved SPSR b.eq 1f // TTBR0 access already disabled and x23, x23, #~PSR_PAN_BIT // Clear the emulated PAN in the saved SPSR @@ -208,7 +232,7 @@ alternative_else_nop_endif tbnz x22, #22, 1f // Skip re-enabling TTBR0 access if the PSR_PAN_BIT is set .endif - __uaccess_ttbr0_enable x0 + __uaccess_ttbr0_enable x0, x1 .if \el == 0 /* @@ -229,24 +253,20 @@ alternative_else_nop_endif .if \el == 0 ldr x23, [sp, #S_SP] // load return stack pointer msr sp_el0, x23 + tst x22, #PSR_MODE32_BIT // native task? + b.eq 3f + #ifdef CONFIG_ARM64_ERRATUM_845719 -alternative_if_not ARM64_WORKAROUND_845719 - nop - nop -#ifdef CONFIG_PID_IN_CONTEXTIDR - nop -#endif -alternative_else - tbz x22, #4, 1f +alternative_if ARM64_WORKAROUND_845719 #ifdef CONFIG_PID_IN_CONTEXTIDR mrs x29, contextidr_el1 msr contextidr_el1, x29 #else msr contextidr_el1, xzr #endif -1: -alternative_endif +alternative_else_nop_endif #endif +3: .endif msr elr_el1, x21 // set up the return data @@ -268,7 +288,21 @@ alternative_endif ldp x28, x29, [sp, #16 * 14] ldr lr, [sp, #S_LR] add sp, sp, #S_FRAME_SIZE // restore sp - eret // return to kernel + + .if \el == 0 +alternative_insn eret, nop, ARM64_UNMAP_KERNEL_AT_EL0 +#ifdef CONFIG_UNMAP_KERNEL_AT_EL0 + bne 4f + msr far_el1, x30 + tramp_alias x30, tramp_exit_native + br x30 +4: + tramp_alias x30, tramp_exit_compat + br x30 +#endif + .else + eret + .endif .endm .macro irq_stack_entry @@ -346,31 +380,31 @@ tsk .req x28 // current thread_info .align 11 ENTRY(vectors) - ventry el1_sync_invalid // Synchronous EL1t - ventry el1_irq_invalid // IRQ EL1t - ventry el1_fiq_invalid // FIQ EL1t - ventry el1_error_invalid // Error EL1t + kernel_ventry 1, sync_invalid // Synchronous EL1t + kernel_ventry 1, irq_invalid // IRQ EL1t + kernel_ventry 1, fiq_invalid // FIQ EL1t + kernel_ventry 1, error_invalid // Error EL1t - ventry el1_sync // Synchronous EL1h - ventry el1_irq // IRQ EL1h - ventry el1_fiq_invalid // FIQ EL1h - ventry el1_error_invalid // Error EL1h + kernel_ventry 1, sync // Synchronous EL1h + kernel_ventry 1, irq // IRQ EL1h + kernel_ventry 1, fiq_invalid // FIQ EL1h + kernel_ventry 1, error_invalid // Error EL1h - ventry el0_sync // Synchronous 64-bit EL0 - ventry el0_irq // IRQ 64-bit EL0 - ventry el0_fiq_invalid // FIQ 64-bit EL0 - ventry el0_error_invalid // Error 64-bit EL0 + kernel_ventry 0, sync // Synchronous 64-bit EL0 + kernel_ventry 0, irq // IRQ 64-bit EL0 + kernel_ventry 0, fiq_invalid // FIQ 64-bit EL0 + kernel_ventry 0, error_invalid // Error 64-bit EL0 #ifdef CONFIG_COMPAT - ventry el0_sync_compat // Synchronous 32-bit EL0 - ventry el0_irq_compat // IRQ 32-bit EL0 - ventry el0_fiq_invalid_compat // FIQ 32-bit EL0 - ventry el0_error_invalid_compat // Error 32-bit EL0 + kernel_ventry 0, sync_compat, 32 // Synchronous 32-bit EL0 + kernel_ventry 0, irq_compat, 32 // IRQ 32-bit EL0 + kernel_ventry 0, fiq_invalid_compat, 32 // FIQ 32-bit EL0 + kernel_ventry 0, error_invalid_compat, 32 // Error 32-bit EL0 #else - ventry el0_sync_invalid // Synchronous 32-bit EL0 - ventry el0_irq_invalid // IRQ 32-bit EL0 - ventry el0_fiq_invalid // FIQ 32-bit EL0 - ventry el0_error_invalid // Error 32-bit EL0 + kernel_ventry 0, sync_invalid, 32 // Synchronous 32-bit EL0 + kernel_ventry 0, irq_invalid, 32 // IRQ 32-bit EL0 + kernel_ventry 0, fiq_invalid, 32 // FIQ 32-bit EL0 + kernel_ventry 0, error_invalid, 32 // Error 32-bit EL0 #endif END(vectors) @@ -963,6 +997,118 @@ __ni_sys_trace: b __sys_trace_return .popsection // .entry.text +#ifdef CONFIG_UNMAP_KERNEL_AT_EL0 +/* + * Exception vectors trampoline. + */ + .pushsection ".entry.tramp.text", "ax" + + .macro tramp_map_kernel, tmp + mrs \tmp, ttbr1_el1 + sub \tmp, \tmp, #(SWAPPER_DIR_SIZE + RESERVED_TTBR0_SIZE) + bic \tmp, \tmp, #USER_ASID_FLAG + msr ttbr1_el1, \tmp +#ifdef CONFIG_ARCH_MSM8996 + /* ASID already in \tmp[63:48] */ + movk \tmp, #:abs_g2_nc:(TRAMP_VALIAS >> 12) + movk \tmp, #:abs_g1_nc:(TRAMP_VALIAS >> 12) + /* 2MB boundary containing the vectors, so we nobble the walk cache */ + movk \tmp, #:abs_g0_nc:((TRAMP_VALIAS & ~(SZ_2M - 1)) >> 12) + isb + tlbi vae1, \tmp + dsb nsh +#endif /* CONFIG_ARCH_MSM8996 */ + .endm + + .macro tramp_unmap_kernel, tmp + mrs \tmp, ttbr1_el1 + add \tmp, \tmp, #(SWAPPER_DIR_SIZE + RESERVED_TTBR0_SIZE) + orr \tmp, \tmp, #USER_ASID_FLAG + msr ttbr1_el1, \tmp + /* + * We avoid running the post_ttbr_update_workaround here because the + * user and kernel ASIDs don't have conflicting mappings, so any + * "blessing" as described in: + * + * http://lkml.kernel.org/r/56BB848A.6060603@caviumnetworks.com + * + * will not hurt correctness. Whilst this may partially defeat the + * point of using split ASIDs in the first place, it avoids + * the hit of invalidating the entire I-cache on every return to + * userspace. + */ + .endm + + .macro tramp_ventry, regsize = 64 + .align 7 +1: + .if \regsize == 64 + msr tpidrro_el0, x30 // Restored in kernel_ventry + .endif + bl 2f + b . +2: + tramp_map_kernel x30 +#ifdef CONFIG_RANDOMIZE_BASE + adr x30, tramp_vectors + PAGE_SIZE +#ifndef CONFIG_ARCH_MSM8996 + isb +#endif + ldr x30, [x30] +#else + ldr x30, =vectors +#endif + prfm plil1strm, [x30, #(1b - tramp_vectors)] + msr vbar_el1, x30 + add x30, x30, #(1b - tramp_vectors) + isb + ret + .endm + + .macro tramp_exit, regsize = 64 + adr x30, tramp_vectors + msr vbar_el1, x30 + tramp_unmap_kernel x30 + .if \regsize == 64 + mrs x30, far_el1 + .endif + eret + .endm + + .align 11 +ENTRY(tramp_vectors) + .space 0x400 + + tramp_ventry + tramp_ventry + tramp_ventry + tramp_ventry + + tramp_ventry 32 + tramp_ventry 32 + tramp_ventry 32 + tramp_ventry 32 +END(tramp_vectors) + +ENTRY(tramp_exit_native) + tramp_exit +END(tramp_exit_native) + +ENTRY(tramp_exit_compat) + tramp_exit 32 +END(tramp_exit_compat) + + .ltorg + .popsection // .entry.tramp.text +#ifdef CONFIG_RANDOMIZE_BASE + .pushsection ".rodata", "a" + .align PAGE_SHIFT + .globl __entry_tramp_data_start +__entry_tramp_data_start: + .quad vectors + .popsection // .rodata +#endif /* CONFIG_RANDOMIZE_BASE */ +#endif /* CONFIG_UNMAP_KERNEL_AT_EL0 */ /* * Special system call wrappers. diff --git a/arch/arm64/kernel/process.c b/arch/arm64/kernel/process.c index 6d9203c78dc7..6df738c01f42 100644 --- a/arch/arm64/kernel/process.c +++ b/arch/arm64/kernel/process.c @@ -380,19 +380,17 @@ int copy_thread(unsigned long clone_flags, unsigned long stack_start, static void tls_thread_switch(struct task_struct *next) { - unsigned long tpidr, tpidrro; + unsigned long tpidr; asm("mrs %0, tpidr_el0" : "=r" (tpidr)); *task_user_tls(current) = tpidr; - tpidr = *task_user_tls(next); - tpidrro = is_compat_thread(task_thread_info(next)) ? - next->thread.tp_value : 0; + if (is_compat_thread(task_thread_info(next))) + write_sysreg(next->thread.tp_value, tpidrro_el0); + else if (!arm64_kernel_unmapped_at_el0()) + write_sysreg(0, tpidrro_el0); - asm( - " msr tpidr_el0, %0\n" - " msr tpidrro_el0, %1" - : : "r" (tpidr), "r" (tpidrro)); + write_sysreg(*task_user_tls(next), tpidr_el0); } /* Restore the UAO state depending on next's addr_limit */ diff --git a/arch/arm64/kernel/vmlinux.lds.S b/arch/arm64/kernel/vmlinux.lds.S index 96765dc2e449..42a6c4f35af2 100644 --- a/arch/arm64/kernel/vmlinux.lds.S +++ b/arch/arm64/kernel/vmlinux.lds.S @@ -57,6 +57,17 @@ jiffies = jiffies_64; #define HIBERNATE_TEXT #endif +#ifdef CONFIG_UNMAP_KERNEL_AT_EL0 +#define TRAMP_TEXT \ + . = ALIGN(PAGE_SIZE); \ + VMLINUX_SYMBOL(__entry_tramp_text_start) = .; \ + *(.entry.tramp.text) \ + . = ALIGN(PAGE_SIZE); \ + VMLINUX_SYMBOL(__entry_tramp_text_end) = .; +#else +#define TRAMP_TEXT +#endif + /* * The size of the PE/COFF section that covers the kernel image, which * runs from stext to _edata, must be a round multiple of the PE/COFF @@ -128,6 +139,7 @@ SECTIONS HYPERVISOR_TEXT IDMAP_TEXT HIBERNATE_TEXT + TRAMP_TEXT *(.fixup) *(.gnu.warning) . = ALIGN(16); @@ -201,6 +213,11 @@ SECTIONS . += RESERVED_TTBR0_SIZE; #endif +#ifdef CONFIG_UNMAP_KERNEL_AT_EL0 + tramp_pg_dir = .; + . += PAGE_SIZE; +#endif + _end = .; STABS_DEBUG @@ -221,6 +238,10 @@ ASSERT(__hibernate_exit_text_end - (__hibernate_exit_text_start & ~(SZ_4K - 1)) <= SZ_4K, "Hibernate exit text too big or misaligned") #endif +#ifdef CONFIG_UNMAP_KERNEL_AT_EL0 +ASSERT((__entry_tramp_text_end - __entry_tramp_text_start) == PAGE_SIZE, + "Entry trampoline text too big") +#endif /* * If padding is applied before .head.text, virt<->phys conversions will fail. */ |
