diff options
| -rw-r--r-- | arch/arm64/include/asm/processor.h | 4 | ||||
| -rw-r--r-- | arch/arm64/include/asm/signal32.h | 2 | ||||
| -rw-r--r-- | arch/arm64/kernel/signal32.c | 5 | ||||
| -rw-r--r-- | arch/arm64/kernel/vdso.c | 82 |
4 files changed, 60 insertions, 33 deletions
diff --git a/arch/arm64/include/asm/processor.h b/arch/arm64/include/asm/processor.h index a0053d23b35a..7766635158df 100644 --- a/arch/arm64/include/asm/processor.h +++ b/arch/arm64/include/asm/processor.h @@ -40,9 +40,9 @@ #ifdef __KERNEL__ #define STACK_TOP_MAX TASK_SIZE_64 #ifdef CONFIG_COMPAT -#define AARCH32_VECTORS_BASE 0xffff0000 +#define AARCH32_KUSER_HELPERS_BASE 0xffff0000 #define STACK_TOP (test_thread_flag(TIF_32BIT) ? \ - AARCH32_VECTORS_BASE : STACK_TOP_MAX) + AARCH32_KUSER_HELPERS_BASE : STACK_TOP_MAX) #else #define STACK_TOP STACK_TOP_MAX #endif /* CONFIG_COMPAT */ diff --git a/arch/arm64/include/asm/signal32.h b/arch/arm64/include/asm/signal32.h index 81abea0b7650..58e288aaf0ba 100644 --- a/arch/arm64/include/asm/signal32.h +++ b/arch/arm64/include/asm/signal32.h @@ -20,8 +20,6 @@ #ifdef CONFIG_COMPAT #include <linux/compat.h> -#define AARCH32_KERN_SIGRET_CODE_OFFSET 0x500 - int compat_setup_frame(int usig, struct ksignal *ksig, sigset_t *set, struct pt_regs *regs); int compat_setup_rt_frame(int usig, struct ksignal *ksig, sigset_t *set, diff --git a/arch/arm64/kernel/signal32.c b/arch/arm64/kernel/signal32.c index 107335637390..074950a11fae 100644 --- a/arch/arm64/kernel/signal32.c +++ b/arch/arm64/kernel/signal32.c @@ -484,14 +484,13 @@ static void compat_setup_return(struct pt_regs *regs, struct k_sigaction *ka, retcode = ptr_to_compat(ka->sa.sa_restorer); } else { /* Set up sigreturn pointer */ + void *sigreturn_base = current->mm->context.vdso; unsigned int idx = thumb << 1; if (ka->sa.sa_flags & SA_SIGINFO) idx += 3; - retcode = AARCH32_VECTORS_BASE + - AARCH32_KERN_SIGRET_CODE_OFFSET + - (idx << 2) + thumb; + retcode = ptr_to_compat(sigreturn_base) + (idx << 2) + thumb; } regs->regs[0] = usig; diff --git a/arch/arm64/kernel/vdso.c b/arch/arm64/kernel/vdso.c index a6f1df69c0c3..70f465ce58f2 100644 --- a/arch/arm64/kernel/vdso.c +++ b/arch/arm64/kernel/vdso.c @@ -1,5 +1,7 @@ /* - * VDSO implementation for AArch64 and vector page setup for AArch32. + * Additional userspace pages setup for AArch64 and AArch32. + * - AArch64: vDSO pages setup, vDSO data page update. + * - AArch32: sigreturn and kuser helpers pages setup. * * Copyright (C) 2012 ARM Limited * @@ -53,32 +55,51 @@ struct vdso_data *vdso_data = &vdso_data_store.data; /* * Create and map the vectors page for AArch32 tasks. */ -static struct page *vectors_page[1]; +static struct page *vectors_page[] __ro_after_init; +static const struct vm_special_mapping compat_vdso_spec[] = { + { + /* Must be named [sigpage] for compatibility with arm. */ + .name = "[sigpage]", + .pages = &vectors_page[0], + }, + { + .name = "[kuserhelpers]", + .pages = &vectors_page[1], + }, +}; +static struct page *vectors_page[ARRAY_SIZE(compat_vdso_spec)] __ro_after_init; static int __init alloc_vectors_page(void) { extern char __kuser_helper_start[], __kuser_helper_end[]; - extern char __aarch32_sigret_code_start[], __aarch32_sigret_code_end[]; - - int kuser_sz = __kuser_helper_end - __kuser_helper_start; - int sigret_sz = __aarch32_sigret_code_end - __aarch32_sigret_code_start; - unsigned long vpage; + size_t kuser_sz = __kuser_helper_end - __kuser_helper_start; + unsigned long kuser_vpage; - vpage = get_zeroed_page(GFP_ATOMIC); + extern char __aarch32_sigret_code_start[], __aarch32_sigret_code_end[]; + size_t sigret_sz = + __aarch32_sigret_code_end - __aarch32_sigret_code_start; + unsigned long sigret_vpage; - if (!vpage) + sigret_vpage = get_zeroed_page(GFP_ATOMIC); + if (!sigret_vpage) return -ENOMEM; - /* kuser helpers */ - memcpy((void *)vpage + 0x1000 - kuser_sz, __kuser_helper_start, - kuser_sz); + kuser_vpage = get_zeroed_page(GFP_ATOMIC); + if (!kuser_vpage) { + free_page(sigret_vpage); + return -ENOMEM; + } /* sigreturn code */ - memcpy((void *)vpage + AARCH32_KERN_SIGRET_CODE_OFFSET, - __aarch32_sigret_code_start, sigret_sz); + memcpy((void *)sigret_vpage, __aarch32_sigret_code_start, sigret_sz); + flush_icache_range(sigret_vpage, sigret_vpage + PAGE_SIZE); + vectors_page[0] = virt_to_page(sigret_vpage); - flush_icache_range(vpage, vpage + PAGE_SIZE); - vectors_page[0] = virt_to_page(vpage); + /* kuser helpers */ + memcpy((void *)kuser_vpage + 0x1000 - kuser_sz, __kuser_helper_start, + kuser_sz); + flush_icache_range(kuser_vpage, kuser_vpage + PAGE_SIZE); + vectors_page[1] = virt_to_page(kuser_vpage); return 0; } @@ -87,22 +108,31 @@ arch_initcall(alloc_vectors_page); int aarch32_setup_vectors_page(struct linux_binprm *bprm, int uses_interp) { struct mm_struct *mm = current->mm; - unsigned long addr = AARCH32_VECTORS_BASE; - static const struct vm_special_mapping spec = { - .name = "[vectors]", - .pages = vectors_page, - - }; + unsigned long addr; void *ret; down_write(&mm->mmap_sem); - current->mm->context.vdso = (void *)addr; + addr = get_unmapped_area(NULL, 0, PAGE_SIZE, 0, 0); + if (IS_ERR_VALUE(addr)) { + ret = ERR_PTR(addr); + goto out; + } - /* Map vectors page at the high address. */ ret = _install_special_mapping(mm, addr, PAGE_SIZE, - VM_READ|VM_EXEC|VM_MAYREAD|VM_MAYEXEC, - &spec); + VM_READ|VM_EXEC| + VM_MAYREAD|VM_MAYWRITE|VM_MAYEXEC, + &compat_vdso_spec[0]); + if (IS_ERR(ret)) + goto out; + current->mm->context.vdso = (void *)addr; + + /* Map the kuser helpers at the ABI-defined high address. */ + ret = _install_special_mapping(mm, AARCH32_KUSER_HELPERS_BASE, + PAGE_SIZE, + VM_READ|VM_EXEC|VM_MAYREAD|VM_MAYEXEC, + &compat_vdso_spec[1]); +out: up_write(&mm->mmap_sem); return PTR_ERR_OR_ZERO(ret); |
