diff options
Diffstat (limited to 'arch/arm/kernel')
| -rw-r--r-- | arch/arm/kernel/calls.S | 409 | ||||
| -rw-r--r-- | arch/arm/kernel/cpuidle.c | 2 | ||||
| -rw-r--r-- | arch/arm/kernel/entry-common.S | 86 | ||||
| -rw-r--r-- | arch/arm/kernel/irq.c | 1 | ||||
| -rw-r--r-- | arch/arm/kernel/module.c | 11 | ||||
| -rw-r--r-- | arch/arm/kernel/perf_event_v7.c | 15 | ||||
| -rw-r--r-- | arch/arm/kernel/process.c | 73 | ||||
| -rw-r--r-- | arch/arm/kernel/psci_smp.c | 4 | ||||
| -rw-r--r-- | arch/arm/kernel/ptrace.c | 13 | ||||
| -rw-r--r-- | arch/arm/kernel/setup.c | 20 | ||||
| -rw-r--r-- | arch/arm/kernel/signal.c | 7 | ||||
| -rw-r--r-- | arch/arm/kernel/smp.c | 67 | ||||
| -rw-r--r-- | arch/arm/kernel/stacktrace.c | 6 | ||||
| -rw-r--r-- | arch/arm/kernel/topology.c | 282 | ||||
| -rw-r--r-- | arch/arm/kernel/traps.c | 37 | ||||
| -rw-r--r-- | arch/arm/kernel/vdso.c | 28 |
16 files changed, 464 insertions, 597 deletions
diff --git a/arch/arm/kernel/calls.S b/arch/arm/kernel/calls.S deleted file mode 100644 index ac368bb068d1..000000000000 --- a/arch/arm/kernel/calls.S +++ /dev/null @@ -1,409 +0,0 @@ -/* - * linux/arch/arm/kernel/calls.S - * - * Copyright (C) 1995-2005 Russell King - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License version 2 as - * published by the Free Software Foundation. - * - * This file is included thrice in entry-common.S - */ -/* 0 */ CALL(sys_restart_syscall) - CALL(sys_exit) - CALL(sys_fork) - CALL(sys_read) - CALL(sys_write) -/* 5 */ CALL(sys_open) - CALL(sys_close) - CALL(sys_ni_syscall) /* was sys_waitpid */ - CALL(sys_creat) - CALL(sys_link) -/* 10 */ CALL(sys_unlink) - CALL(sys_execve) - CALL(sys_chdir) - CALL(OBSOLETE(sys_time)) /* used by libc4 */ - CALL(sys_mknod) -/* 15 */ CALL(sys_chmod) - CALL(sys_lchown16) - CALL(sys_ni_syscall) /* was sys_break */ - CALL(sys_ni_syscall) /* was sys_stat */ - CALL(sys_lseek) -/* 20 */ CALL(sys_getpid) - CALL(sys_mount) - CALL(OBSOLETE(sys_oldumount)) /* used by libc4 */ - CALL(sys_setuid16) - CALL(sys_getuid16) -/* 25 */ CALL(OBSOLETE(sys_stime)) - CALL(sys_ptrace) - CALL(OBSOLETE(sys_alarm)) /* used by libc4 */ - CALL(sys_ni_syscall) /* was sys_fstat */ - CALL(sys_pause) -/* 30 */ CALL(OBSOLETE(sys_utime)) /* used by libc4 */ - CALL(sys_ni_syscall) /* was sys_stty */ - CALL(sys_ni_syscall) /* was sys_getty */ - CALL(sys_access) - CALL(sys_nice) -/* 35 */ CALL(sys_ni_syscall) /* was sys_ftime */ - CALL(sys_sync) - CALL(sys_kill) - CALL(sys_rename) - CALL(sys_mkdir) -/* 40 */ CALL(sys_rmdir) - CALL(sys_dup) - CALL(sys_pipe) - CALL(sys_times) - CALL(sys_ni_syscall) /* was sys_prof */ -/* 45 */ CALL(sys_brk) - CALL(sys_setgid16) - CALL(sys_getgid16) - CALL(sys_ni_syscall) /* was sys_signal */ - CALL(sys_geteuid16) -/* 50 */ CALL(sys_getegid16) - CALL(sys_acct) - CALL(sys_umount) - CALL(sys_ni_syscall) /* was sys_lock */ - CALL(sys_ioctl) -/* 55 */ CALL(sys_fcntl) - CALL(sys_ni_syscall) /* was sys_mpx */ - CALL(sys_setpgid) - CALL(sys_ni_syscall) /* was sys_ulimit */ - CALL(sys_ni_syscall) /* was sys_olduname */ -/* 60 */ CALL(sys_umask) - CALL(sys_chroot) - CALL(sys_ustat) - CALL(sys_dup2) - CALL(sys_getppid) -/* 65 */ CALL(sys_getpgrp) - CALL(sys_setsid) - CALL(sys_sigaction) - CALL(sys_ni_syscall) /* was sys_sgetmask */ - CALL(sys_ni_syscall) /* was sys_ssetmask */ -/* 70 */ CALL(sys_setreuid16) - CALL(sys_setregid16) - CALL(sys_sigsuspend) - CALL(sys_sigpending) - CALL(sys_sethostname) -/* 75 */ CALL(sys_setrlimit) - CALL(OBSOLETE(sys_old_getrlimit)) /* used by libc4 */ - CALL(sys_getrusage) - CALL(sys_gettimeofday) - CALL(sys_settimeofday) -/* 80 */ CALL(sys_getgroups16) - CALL(sys_setgroups16) - CALL(OBSOLETE(sys_old_select)) /* used by libc4 */ - CALL(sys_symlink) - CALL(sys_ni_syscall) /* was sys_lstat */ -/* 85 */ CALL(sys_readlink) - CALL(sys_uselib) - CALL(sys_swapon) - CALL(sys_reboot) - CALL(OBSOLETE(sys_old_readdir)) /* used by libc4 */ -/* 90 */ CALL(OBSOLETE(sys_old_mmap)) /* used by libc4 */ - CALL(sys_munmap) - CALL(sys_truncate) - CALL(sys_ftruncate) - CALL(sys_fchmod) -/* 95 */ CALL(sys_fchown16) - CALL(sys_getpriority) - CALL(sys_setpriority) - CALL(sys_ni_syscall) /* was sys_profil */ - CALL(sys_statfs) -/* 100 */ CALL(sys_fstatfs) - CALL(sys_ni_syscall) /* sys_ioperm */ - CALL(OBSOLETE(ABI(sys_socketcall, sys_oabi_socketcall))) - CALL(sys_syslog) - CALL(sys_setitimer) -/* 105 */ CALL(sys_getitimer) - CALL(sys_newstat) - CALL(sys_newlstat) - CALL(sys_newfstat) - CALL(sys_ni_syscall) /* was sys_uname */ -/* 110 */ CALL(sys_ni_syscall) /* was sys_iopl */ - CALL(sys_vhangup) - CALL(sys_ni_syscall) - CALL(OBSOLETE(sys_syscall)) /* call a syscall */ - CALL(sys_wait4) -/* 115 */ CALL(sys_swapoff) - CALL(sys_sysinfo) - CALL(OBSOLETE(ABI(sys_ipc, sys_oabi_ipc))) - CALL(sys_fsync) - CALL(sys_sigreturn_wrapper) -/* 120 */ CALL(sys_clone) - CALL(sys_setdomainname) - CALL(sys_newuname) - CALL(sys_ni_syscall) /* modify_ldt */ - CALL(sys_adjtimex) -/* 125 */ CALL(sys_mprotect) - CALL(sys_sigprocmask) - CALL(sys_ni_syscall) /* was sys_create_module */ - CALL(sys_init_module) - CALL(sys_delete_module) -/* 130 */ CALL(sys_ni_syscall) /* was sys_get_kernel_syms */ - CALL(sys_quotactl) - CALL(sys_getpgid) - CALL(sys_fchdir) - CALL(sys_bdflush) -/* 135 */ CALL(sys_sysfs) - CALL(sys_personality) - CALL(sys_ni_syscall) /* reserved for afs_syscall */ - CALL(sys_setfsuid16) - CALL(sys_setfsgid16) -/* 140 */ CALL(sys_llseek) - CALL(sys_getdents) - CALL(sys_select) - CALL(sys_flock) - CALL(sys_msync) -/* 145 */ CALL(sys_readv) - CALL(sys_writev) - CALL(sys_getsid) - CALL(sys_fdatasync) - CALL(sys_sysctl) -/* 150 */ CALL(sys_mlock) - CALL(sys_munlock) - CALL(sys_mlockall) - CALL(sys_munlockall) - CALL(sys_sched_setparam) -/* 155 */ CALL(sys_sched_getparam) - CALL(sys_sched_setscheduler) - CALL(sys_sched_getscheduler) - CALL(sys_sched_yield) - CALL(sys_sched_get_priority_max) -/* 160 */ CALL(sys_sched_get_priority_min) - CALL(sys_sched_rr_get_interval) - CALL(sys_nanosleep) - CALL(sys_mremap) - CALL(sys_setresuid16) -/* 165 */ CALL(sys_getresuid16) - CALL(sys_ni_syscall) /* vm86 */ - CALL(sys_ni_syscall) /* was sys_query_module */ - CALL(sys_poll) - CALL(sys_ni_syscall) /* was nfsservctl */ -/* 170 */ CALL(sys_setresgid16) - CALL(sys_getresgid16) - CALL(sys_prctl) - CALL(sys_rt_sigreturn_wrapper) - CALL(sys_rt_sigaction) -/* 175 */ CALL(sys_rt_sigprocmask) - CALL(sys_rt_sigpending) - CALL(sys_rt_sigtimedwait) - CALL(sys_rt_sigqueueinfo) - CALL(sys_rt_sigsuspend) -/* 180 */ CALL(ABI(sys_pread64, sys_oabi_pread64)) - CALL(ABI(sys_pwrite64, sys_oabi_pwrite64)) - CALL(sys_chown16) - CALL(sys_getcwd) - CALL(sys_capget) -/* 185 */ CALL(sys_capset) - CALL(sys_sigaltstack) - CALL(sys_sendfile) - CALL(sys_ni_syscall) /* getpmsg */ - CALL(sys_ni_syscall) /* putpmsg */ -/* 190 */ CALL(sys_vfork) - CALL(sys_getrlimit) - CALL(sys_mmap2) - CALL(ABI(sys_truncate64, sys_oabi_truncate64)) - CALL(ABI(sys_ftruncate64, sys_oabi_ftruncate64)) -/* 195 */ CALL(ABI(sys_stat64, sys_oabi_stat64)) - CALL(ABI(sys_lstat64, sys_oabi_lstat64)) - CALL(ABI(sys_fstat64, sys_oabi_fstat64)) - CALL(sys_lchown) - CALL(sys_getuid) -/* 200 */ CALL(sys_getgid) - CALL(sys_geteuid) - CALL(sys_getegid) - CALL(sys_setreuid) - CALL(sys_setregid) -/* 205 */ CALL(sys_getgroups) - CALL(sys_setgroups) - CALL(sys_fchown) - CALL(sys_setresuid) - CALL(sys_getresuid) -/* 210 */ CALL(sys_setresgid) - CALL(sys_getresgid) - CALL(sys_chown) - CALL(sys_setuid) - CALL(sys_setgid) -/* 215 */ CALL(sys_setfsuid) - CALL(sys_setfsgid) - CALL(sys_getdents64) - CALL(sys_pivot_root) - CALL(sys_mincore) -/* 220 */ CALL(sys_madvise) - CALL(ABI(sys_fcntl64, sys_oabi_fcntl64)) - CALL(sys_ni_syscall) /* TUX */ - CALL(sys_ni_syscall) - CALL(sys_gettid) -/* 225 */ CALL(ABI(sys_readahead, sys_oabi_readahead)) - CALL(sys_setxattr) - CALL(sys_lsetxattr) - CALL(sys_fsetxattr) - CALL(sys_getxattr) -/* 230 */ CALL(sys_lgetxattr) - CALL(sys_fgetxattr) - CALL(sys_listxattr) - CALL(sys_llistxattr) - CALL(sys_flistxattr) -/* 235 */ CALL(sys_removexattr) - CALL(sys_lremovexattr) - CALL(sys_fremovexattr) - CALL(sys_tkill) - CALL(sys_sendfile64) -/* 240 */ CALL(sys_futex) - CALL(sys_sched_setaffinity) - CALL(sys_sched_getaffinity) - CALL(sys_io_setup) - CALL(sys_io_destroy) -/* 245 */ CALL(sys_io_getevents) - CALL(sys_io_submit) - CALL(sys_io_cancel) - CALL(sys_exit_group) - CALL(sys_lookup_dcookie) -/* 250 */ CALL(sys_epoll_create) - CALL(ABI(sys_epoll_ctl, sys_oabi_epoll_ctl)) - CALL(ABI(sys_epoll_wait, sys_oabi_epoll_wait)) - CALL(sys_remap_file_pages) - CALL(sys_ni_syscall) /* sys_set_thread_area */ -/* 255 */ CALL(sys_ni_syscall) /* sys_get_thread_area */ - CALL(sys_set_tid_address) - CALL(sys_timer_create) - CALL(sys_timer_settime) - CALL(sys_timer_gettime) -/* 260 */ CALL(sys_timer_getoverrun) - CALL(sys_timer_delete) - CALL(sys_clock_settime) - CALL(sys_clock_gettime) - CALL(sys_clock_getres) -/* 265 */ CALL(sys_clock_nanosleep) - CALL(sys_statfs64_wrapper) - CALL(sys_fstatfs64_wrapper) - CALL(sys_tgkill) - CALL(sys_utimes) -/* 270 */ CALL(sys_arm_fadvise64_64) - CALL(sys_pciconfig_iobase) - CALL(sys_pciconfig_read) - CALL(sys_pciconfig_write) - CALL(sys_mq_open) -/* 275 */ CALL(sys_mq_unlink) - CALL(sys_mq_timedsend) - CALL(sys_mq_timedreceive) - CALL(sys_mq_notify) - CALL(sys_mq_getsetattr) -/* 280 */ CALL(sys_waitid) - CALL(sys_socket) - CALL(ABI(sys_bind, sys_oabi_bind)) - CALL(ABI(sys_connect, sys_oabi_connect)) - CALL(sys_listen) -/* 285 */ CALL(sys_accept) - CALL(sys_getsockname) - CALL(sys_getpeername) - CALL(sys_socketpair) - CALL(sys_send) -/* 290 */ CALL(ABI(sys_sendto, sys_oabi_sendto)) - CALL(sys_recv) - CALL(sys_recvfrom) - CALL(sys_shutdown) - CALL(sys_setsockopt) -/* 295 */ CALL(sys_getsockopt) - CALL(ABI(sys_sendmsg, sys_oabi_sendmsg)) - CALL(sys_recvmsg) - CALL(ABI(sys_semop, sys_oabi_semop)) - CALL(sys_semget) -/* 300 */ CALL(sys_semctl) - CALL(sys_msgsnd) - CALL(sys_msgrcv) - CALL(sys_msgget) - CALL(sys_msgctl) -/* 305 */ CALL(sys_shmat) - CALL(sys_shmdt) - CALL(sys_shmget) - CALL(sys_shmctl) - CALL(sys_add_key) -/* 310 */ CALL(sys_request_key) - CALL(sys_keyctl) - CALL(ABI(sys_semtimedop, sys_oabi_semtimedop)) -/* vserver */ CALL(sys_ni_syscall) - CALL(sys_ioprio_set) -/* 315 */ CALL(sys_ioprio_get) - CALL(sys_inotify_init) - CALL(sys_inotify_add_watch) - CALL(sys_inotify_rm_watch) - CALL(sys_mbind) -/* 320 */ CALL(sys_get_mempolicy) - CALL(sys_set_mempolicy) - CALL(sys_openat) - CALL(sys_mkdirat) - CALL(sys_mknodat) -/* 325 */ CALL(sys_fchownat) - CALL(sys_futimesat) - CALL(ABI(sys_fstatat64, sys_oabi_fstatat64)) - CALL(sys_unlinkat) - CALL(sys_renameat) -/* 330 */ CALL(sys_linkat) - CALL(sys_symlinkat) - CALL(sys_readlinkat) - CALL(sys_fchmodat) - CALL(sys_faccessat) -/* 335 */ CALL(sys_pselect6) - CALL(sys_ppoll) - CALL(sys_unshare) - CALL(sys_set_robust_list) - CALL(sys_get_robust_list) -/* 340 */ CALL(sys_splice) - CALL(sys_sync_file_range2) - CALL(sys_tee) - CALL(sys_vmsplice) - CALL(sys_move_pages) -/* 345 */ CALL(sys_getcpu) - CALL(sys_epoll_pwait) - CALL(sys_kexec_load) - CALL(sys_utimensat) - CALL(sys_signalfd) -/* 350 */ CALL(sys_timerfd_create) - CALL(sys_eventfd) - CALL(sys_fallocate) - CALL(sys_timerfd_settime) - CALL(sys_timerfd_gettime) -/* 355 */ CALL(sys_signalfd4) - CALL(sys_eventfd2) - CALL(sys_epoll_create1) - CALL(sys_dup3) - CALL(sys_pipe2) -/* 360 */ CALL(sys_inotify_init1) - CALL(sys_preadv) - CALL(sys_pwritev) - CALL(sys_rt_tgsigqueueinfo) - CALL(sys_perf_event_open) -/* 365 */ CALL(sys_recvmmsg) - CALL(sys_accept4) - CALL(sys_fanotify_init) - CALL(sys_fanotify_mark) - CALL(sys_prlimit64) -/* 370 */ CALL(sys_name_to_handle_at) - CALL(sys_open_by_handle_at) - CALL(sys_clock_adjtime) - CALL(sys_syncfs) - CALL(sys_sendmmsg) -/* 375 */ CALL(sys_setns) - CALL(sys_process_vm_readv) - CALL(sys_process_vm_writev) - CALL(sys_kcmp) - CALL(sys_finit_module) -/* 380 */ CALL(sys_sched_setattr) - CALL(sys_sched_getattr) - CALL(sys_renameat2) - CALL(sys_seccomp) - CALL(sys_getrandom) -/* 385 */ CALL(sys_memfd_create) - CALL(sys_bpf) - CALL(sys_execveat) - CALL(sys_userfaultfd) - CALL(sys_membarrier) - CALL(sys_mlock2) -#ifndef syscalls_counted -.equ syscalls_padding, ((NR_syscalls + 3) & ~3) - NR_syscalls -#define syscalls_counted -#endif -.rept syscalls_padding - CALL(sys_ni_syscall) -.endr diff --git a/arch/arm/kernel/cpuidle.c b/arch/arm/kernel/cpuidle.c index 318da33465f4..703926e7007b 100644 --- a/arch/arm/kernel/cpuidle.c +++ b/arch/arm/kernel/cpuidle.c @@ -56,7 +56,7 @@ int arm_cpuidle_suspend(int index) int cpu = smp_processor_id(); if (cpuidle_ops[cpu].suspend) - ret = cpuidle_ops[cpu].suspend(cpu, index); + ret = cpuidle_ops[cpu].suspend(index); return ret; } diff --git a/arch/arm/kernel/entry-common.S b/arch/arm/kernel/entry-common.S index d31f0300d2c9..9cf480b07565 100644 --- a/arch/arm/kernel/entry-common.S +++ b/arch/arm/kernel/entry-common.S @@ -12,6 +12,12 @@ #include <asm/unistd.h> #include <asm/ftrace.h> #include <asm/unwind.h> +#include <asm/memory.h> +#ifdef CONFIG_AEABI +#include <asm/unistd-oabi.h> +#endif + + .equ NR_syscalls, __NR_syscalls #ifdef CONFIG_NEED_RET_TO_USER #include <mach/entry-macro.S> @@ -35,6 +41,9 @@ ret_fast_syscall: UNWIND(.fnstart ) UNWIND(.cantunwind ) disable_irq_notrace @ disable interrupts + ldr r2, [tsk, #TI_ADDR_LIMIT] + cmp r2, #TASK_SIZE + blne addr_limit_check_failed ldr r1, [tsk, #TI_FLAGS] @ re-check for syscall tracing tst r1, #_TIF_SYSCALL_WORK | _TIF_WORK_MASK bne fast_work_pending @@ -61,6 +70,9 @@ ret_fast_syscall: UNWIND(.cantunwind ) str r0, [sp, #S_R0 + S_OFF]! @ save returned r0 disable_irq_notrace @ disable interrupts + ldr r2, [tsk, #TI_ADDR_LIMIT] + cmp r2, #TASK_SIZE + blne addr_limit_check_failed ldr r1, [tsk, #TI_FLAGS] @ re-check for syscall tracing tst r1, #_TIF_SYSCALL_WORK | _TIF_WORK_MASK beq no_work_pending @@ -93,6 +105,9 @@ ENTRY(ret_to_user) ret_slow_syscall: disable_irq_notrace @ disable interrupts ENTRY(ret_to_user_from_irq) + ldr r2, [tsk, #TI_ADDR_LIMIT] + cmp r2, #TASK_SIZE + blne addr_limit_check_failed ldr r1, [tsk, #TI_FLAGS] tst r1, #_TIF_WORK_MASK bne slow_work_pending @@ -120,21 +135,6 @@ ENTRY(ret_from_fork) b ret_slow_syscall ENDPROC(ret_from_fork) - .equ NR_syscalls,0 -#define CALL(x) .equ NR_syscalls,NR_syscalls+1 -#include "calls.S" - -/* - * Ensure that the system call table is equal to __NR_syscalls, - * which is the value the rest of the system sees - */ -.ifne NR_syscalls - __NR_syscalls -.error "__NR_syscalls is not equal to the size of the syscall table" -.endif - -#undef CALL -#define CALL(x) .long x - /*============================================================================= * SWI handler *----------------------------------------------------------------------------- @@ -282,22 +282,48 @@ __cr_alignment: #endif .ltorg + .macro syscall_table_start, sym + .equ __sys_nr, 0 + .type \sym, #object +ENTRY(\sym) + .endm + + .macro syscall, nr, func + .ifgt __sys_nr - \nr + .error "Duplicated/unorded system call entry" + .endif + .rept \nr - __sys_nr + .long sys_ni_syscall + .endr + .long \func + .equ __sys_nr, \nr + 1 + .endm + + .macro syscall_table_end, sym + .ifgt __sys_nr - __NR_syscalls + .error "System call table too big" + .endif + .rept __NR_syscalls - __sys_nr + .long sys_ni_syscall + .endr + .size \sym, . - \sym + .endm + +#define NATIVE(nr, func) syscall nr, func + /* * This is the syscall table declaration for native ABI syscalls. * With EABI a couple syscalls are obsolete and defined as sys_ni_syscall. */ -#define ABI(native, compat) native + syscall_table_start sys_call_table +#define COMPAT(nr, native, compat) syscall nr, native #ifdef CONFIG_AEABI -#define OBSOLETE(syscall) sys_ni_syscall +#include <calls-eabi.S> #else -#define OBSOLETE(syscall) syscall +#include <calls-oabi.S> #endif - - .type sys_call_table, #object -ENTRY(sys_call_table) -#include "calls.S" -#undef ABI -#undef OBSOLETE +#undef COMPAT + syscall_table_end sys_call_table /*============================================================================ * Special system call wrappers @@ -402,14 +428,10 @@ ENDPROC(sys_oabi_readahead) * Let's declare a second syscall table for old ABI binaries * using the compatibility syscall entries. */ -#define ABI(native, compat) compat -#define OBSOLETE(syscall) syscall - - .type sys_oabi_call_table, #object -ENTRY(sys_oabi_call_table) -#include "calls.S" -#undef ABI -#undef OBSOLETE + syscall_table_start sys_oabi_call_table +#define COMPAT(nr, native, compat) syscall nr, compat +#include <calls-oabi.S> + syscall_table_end sys_oabi_call_table #endif diff --git a/arch/arm/kernel/irq.c b/arch/arm/kernel/irq.c index 900c591913d5..b64e3df58b6e 100644 --- a/arch/arm/kernel/irq.c +++ b/arch/arm/kernel/irq.c @@ -36,6 +36,7 @@ #include <linux/kallsyms.h> #include <linux/proc_fs.h> #include <linux/export.h> +#include <linux/cpumask.h> #include <asm/hardware/cache-l2x0.h> #include <asm/hardware/cache-uniphier.h> diff --git a/arch/arm/kernel/module.c b/arch/arm/kernel/module.c index efdddcb97dd1..953f4619a6d2 100644 --- a/arch/arm/kernel/module.c +++ b/arch/arm/kernel/module.c @@ -40,8 +40,15 @@ #ifdef CONFIG_MMU void *module_alloc(unsigned long size) { - void *p = __vmalloc_node_range(size, 1, MODULES_VADDR, MODULES_END, - GFP_KERNEL, PAGE_KERNEL_EXEC, 0, NUMA_NO_NODE, + gfp_t gfp_mask = GFP_KERNEL; + void *p; + + /* Silence the initial allocation */ + if (IS_ENABLED(CONFIG_ARM_MODULE_PLTS)) + gfp_mask |= __GFP_NOWARN; + + p = __vmalloc_node_range(size, 1, MODULES_VADDR, MODULES_END, + gfp_mask, PAGE_KERNEL_EXEC, 0, NUMA_NO_NODE, __builtin_return_address(0)); if (!IS_ENABLED(CONFIG_ARM_MODULE_PLTS) || p) return p; diff --git a/arch/arm/kernel/perf_event_v7.c b/arch/arm/kernel/perf_event_v7.c index 126dc679b230..0938911e4df0 100644 --- a/arch/arm/kernel/perf_event_v7.c +++ b/arch/arm/kernel/perf_event_v7.c @@ -600,6 +600,11 @@ static const unsigned scorpion_perf_cache_map[PERF_COUNT_HW_CACHE_MAX] #define ARMV7_EXCLUDE_USER (1 << 30) #define ARMV7_INCLUDE_HYP (1 << 27) +/* + * Secure debug enable reg + */ +#define ARMV7_SDER_SUNIDEN BIT(1) /* Permit non-invasive debug */ + static inline u32 armv7_pmnc_read(void) { u32 val; @@ -982,7 +987,13 @@ static int armv7pmu_set_event_filter(struct hw_perf_event *event, static void armv7pmu_reset(void *info) { struct arm_pmu *cpu_pmu = (struct arm_pmu *)info; - u32 idx, nb_cnt = cpu_pmu->num_events; + u32 idx, nb_cnt = cpu_pmu->num_events, val; + + if (cpu_pmu->secure_access) { + asm volatile("mrc p15, 0, %0, c1, c1, 1" : "=r" (val)); + val |= ARMV7_SDER_SUNIDEN; + asm volatile("mcr p15, 0, %0, c1, c1, 1" : : "r" (val)); + } /* The counter and interrupt enable registers are unknown at reset. */ for (idx = ARMV7_IDX_CYCLE_COUNTER; idx < nb_cnt; ++idx) { @@ -991,7 +1002,7 @@ static void armv7pmu_reset(void *info) } /* Initialize & Reset PMNC: C and P bits */ - armv7_pmnc_write(ARMV7_PMNC_P | ARMV7_PMNC_C); + armv7_pmnc_write(armv7_pmnc_read() | ARMV7_PMNC_P | ARMV7_PMNC_C); } static int armv7_a8_map_event(struct perf_event *event) diff --git a/arch/arm/kernel/process.c b/arch/arm/kernel/process.c index e06307ad1d1c..fd739fe04834 100644 --- a/arch/arm/kernel/process.c +++ b/arch/arm/kernel/process.c @@ -93,77 +93,6 @@ void arch_cpu_idle_exit(void) idle_notifier_call_chain(IDLE_END); } -/* - * dump a block of kernel memory from around the given address - */ -static void show_data(unsigned long addr, int nbytes, const char *name) -{ - int i, j; - int nlines; - u32 *p; - - /* - * don't attempt to dump non-kernel addresses or - * values that are probably just small negative numbers - */ - if (addr < PAGE_OFFSET || addr > -256UL) - return; - - printk("\n%s: %#lx:\n", name, addr); - - /* - * round address down to a 32 bit boundary - * and always dump a multiple of 32 bytes - */ - p = (u32 *)(addr & ~(sizeof(u32) - 1)); - nbytes += (addr & (sizeof(u32) - 1)); - nlines = (nbytes + 31) / 32; - - - for (i = 0; i < nlines; i++) { - /* - * just display low 16 bits of address to keep - * each line of the dump < 80 characters - */ - printk("%04lx ", (unsigned long)p & 0xffff); - for (j = 0; j < 8; j++) { - u32 data; - if (probe_kernel_address(p, data)) { - printk(" ********"); - } else { - printk(" %08x", data); - } - ++p; - } - printk("\n"); - } -} - -static void show_extra_register_data(struct pt_regs *regs, int nbytes) -{ - mm_segment_t fs; - - fs = get_fs(); - set_fs(KERNEL_DS); - show_data(regs->ARM_pc - nbytes, nbytes * 2, "PC"); - show_data(regs->ARM_lr - nbytes, nbytes * 2, "LR"); - show_data(regs->ARM_sp - nbytes, nbytes * 2, "SP"); - show_data(regs->ARM_ip - nbytes, nbytes * 2, "IP"); - show_data(regs->ARM_fp - nbytes, nbytes * 2, "FP"); - show_data(regs->ARM_r0 - nbytes, nbytes * 2, "R0"); - show_data(regs->ARM_r1 - nbytes, nbytes * 2, "R1"); - show_data(regs->ARM_r2 - nbytes, nbytes * 2, "R2"); - show_data(regs->ARM_r3 - nbytes, nbytes * 2, "R3"); - show_data(regs->ARM_r4 - nbytes, nbytes * 2, "R4"); - show_data(regs->ARM_r5 - nbytes, nbytes * 2, "R5"); - show_data(regs->ARM_r6 - nbytes, nbytes * 2, "R6"); - show_data(regs->ARM_r7 - nbytes, nbytes * 2, "R7"); - show_data(regs->ARM_r8 - nbytes, nbytes * 2, "R8"); - show_data(regs->ARM_r9 - nbytes, nbytes * 2, "R9"); - show_data(regs->ARM_r10 - nbytes, nbytes * 2, "R10"); - set_fs(fs); -} - void __show_regs(struct pt_regs *regs) { unsigned long flags; @@ -251,8 +180,6 @@ void __show_regs(struct pt_regs *regs) printk("Control: %08x%s\n", ctrl, buf); } #endif - - show_extra_register_data(regs, 128); } void show_regs(struct pt_regs * regs) diff --git a/arch/arm/kernel/psci_smp.c b/arch/arm/kernel/psci_smp.c index 9d479b2ea40d..aaf7a2561427 100644 --- a/arch/arm/kernel/psci_smp.c +++ b/arch/arm/kernel/psci_smp.c @@ -98,12 +98,12 @@ int psci_cpu_kill(unsigned int cpu) for (i = 0; i < 10; i++) { err = psci_ops.affinity_info(cpu_logical_map(cpu), 0); if (err == PSCI_0_2_AFFINITY_LEVEL_OFF) { - pr_info("CPU%d killed.\n", cpu); + pr_debug("CPU%d killed.\n", cpu); return 1; } msleep(10); - pr_info("Retrying again to check for CPU kill\n"); + pr_debug("Retrying again to check for CPU kill\n"); } pr_warn("CPU%d may not have shut down cleanly (AFFINITY_INFO reports %d)\n", diff --git a/arch/arm/kernel/ptrace.c b/arch/arm/kernel/ptrace.c index 711d854ea13a..364985c96a92 100644 --- a/arch/arm/kernel/ptrace.c +++ b/arch/arm/kernel/ptrace.c @@ -932,18 +932,19 @@ asmlinkage int syscall_trace_enter(struct pt_regs *regs, int scno) { current_thread_info()->syscall = scno; - /* Do the secure computing check first; failures should be fast. */ + if (test_thread_flag(TIF_SYSCALL_TRACE)) + tracehook_report_syscall(regs, PTRACE_SYSCALL_ENTER); + + /* Do seccomp after ptrace; syscall may have changed. */ #ifdef CONFIG_HAVE_ARCH_SECCOMP_FILTER - if (secure_computing() == -1) + if (secure_computing(NULL) == -1) return -1; #else /* XXX: remove this once OABI gets fixed */ - secure_computing_strict(scno); + secure_computing_strict(current_thread_info()->syscall); #endif - if (test_thread_flag(TIF_SYSCALL_TRACE)) - tracehook_report_syscall(regs, PTRACE_SYSCALL_ENTER); - + /* Tracer or seccomp may have changed syscall. */ scno = current_thread_info()->syscall; if (test_thread_flag(TIF_SYSCALL_TRACEPOINT)) diff --git a/arch/arm/kernel/setup.c b/arch/arm/kernel/setup.c index 05062273604f..18013f7e2b31 100644 --- a/arch/arm/kernel/setup.c +++ b/arch/arm/kernel/setup.c @@ -111,6 +111,15 @@ unsigned int elf_hwcap2 __read_mostly; EXPORT_SYMBOL(elf_hwcap2); +char* (*arch_read_hardware_id)(void); +EXPORT_SYMBOL(arch_read_hardware_id); + +unsigned int boot_reason; +EXPORT_SYMBOL(boot_reason); + +unsigned int cold_boot; +EXPORT_SYMBOL(cold_boot); + #ifdef MULTI_CPU struct processor processor __read_mostly; #if defined(CONFIG_BIG_LITTLE) && defined(CONFIG_HARDEN_BRANCH_PREDICTOR) @@ -944,6 +953,8 @@ void __init hyp_mode_check(void) #endif } +void __init __weak init_random_pool(void) { } + void __init setup_arch(char **cmdline_p) { const struct machine_desc *mdesc; @@ -1023,6 +1034,8 @@ void __init setup_arch(char **cmdline_p) if (mdesc->init_early) mdesc->init_early(); + + init_random_pool(); } @@ -1038,7 +1051,7 @@ static int __init topology_init(void) return 0; } -subsys_initcall(topology_init); +postcore_initcall(topology_init); #ifdef CONFIG_HAVE_PROC_CPU static int __init proc_cpu_init(void) @@ -1147,7 +1160,10 @@ static int c_show(struct seq_file *m, void *v) seq_printf(m, "CPU revision\t: %d\n\n", cpuid & 15); } - seq_printf(m, "Hardware\t: %s\n", machine_name); + if (!arch_read_hardware_id) + seq_printf(m, "Hardware\t: %s\n", machine_name); + else + seq_printf(m, "Hardware\t: %s\n", arch_read_hardware_id()); seq_printf(m, "Revision\t: %04x\n", system_rev); seq_printf(m, "Serial\t\t: %s\n", system_serial); diff --git a/arch/arm/kernel/signal.c b/arch/arm/kernel/signal.c index 180c1782ad63..f82a1ac22164 100644 --- a/arch/arm/kernel/signal.c +++ b/arch/arm/kernel/signal.c @@ -14,6 +14,7 @@ #include <linux/uaccess.h> #include <linux/tracehook.h> #include <linux/uprobes.h> +#include <linux/syscalls.h> #include <asm/elf.h> #include <asm/cacheflush.h> @@ -642,3 +643,9 @@ struct page *get_signal_page(void) return page; } + +/* Defer to generic check */ +asmlinkage void addr_limit_check_failed(void) +{ + addr_limit_user_check(); +} diff --git a/arch/arm/kernel/smp.c b/arch/arm/kernel/smp.c index d2033d09125f..35a4633d5f1a 100644 --- a/arch/arm/kernel/smp.c +++ b/arch/arm/kernel/smp.c @@ -417,12 +417,12 @@ asmlinkage void secondary_start_kernel(void) if (smp_ops.smp_secondary_init) smp_ops.smp_secondary_init(cpu); + smp_store_cpu_info(cpu); + notify_cpu_starting(cpu); calibrate_delay(); - smp_store_cpu_info(cpu); - /* * OK, now it's safe to let the boot CPU continue. Wait for * the CPU migration code to notice that the CPU is online @@ -498,6 +498,18 @@ void __init smp_prepare_cpus(unsigned int max_cpus) } static void (*__smp_cross_call)(const struct cpumask *, unsigned int); +DEFINE_PER_CPU(bool, pending_ipi); + +static void smp_cross_call_common(const struct cpumask *cpumask, + unsigned int func) +{ + unsigned int cpu; + + for_each_cpu(cpu, cpumask) + per_cpu(pending_ipi, cpu) = true; + + __smp_cross_call(cpumask, func); +} void __init set_smp_cross_call(void (*fn)(const struct cpumask *, unsigned int)) { @@ -505,6 +517,21 @@ void __init set_smp_cross_call(void (*fn)(const struct cpumask *, unsigned int)) __smp_cross_call = fn; } +void arch_send_call_function_ipi_mask(const struct cpumask *mask) +{ + smp_cross_call_common(mask, IPI_CALL_FUNC); +} + +void arch_send_wakeup_ipi_mask(const struct cpumask *mask) +{ + smp_cross_call_common(mask, IPI_WAKEUP); +} + +void arch_send_call_function_single_ipi(int cpu) +{ + smp_cross_call_common(cpumask_of(cpu), IPI_CALL_FUNC_SINGLE); +} + static const char *ipi_types[NR_IPI] __tracepoint_string = { #define S(x,s) [x] = s S(IPI_WAKEUP, "CPU wakeup interrupts"), @@ -519,6 +546,11 @@ static const char *ipi_types[NR_IPI] __tracepoint_string = { static void smp_cross_call(const struct cpumask *target, unsigned int ipinr) { + unsigned int cpu; + + for_each_cpu(cpu, target) + per_cpu(pending_ipi, cpu) = true; + trace_ipi_raise(target, ipi_types[ipinr]); __smp_cross_call(target, ipinr); } @@ -549,21 +581,6 @@ u64 smp_irq_stat_cpu(unsigned int cpu) return sum; } -void arch_send_call_function_ipi_mask(const struct cpumask *mask) -{ - smp_cross_call(mask, IPI_CALL_FUNC); -} - -void arch_send_wakeup_ipi_mask(const struct cpumask *mask) -{ - smp_cross_call(mask, IPI_WAKEUP); -} - -void arch_send_call_function_single_ipi(int cpu) -{ - smp_cross_call(cpumask_of(cpu), IPI_CALL_FUNC_SINGLE); -} - #ifdef CONFIG_IRQ_WORK void arch_irq_work_raise(void) { @@ -594,7 +611,7 @@ static void ipi_cpu_stop(unsigned int cpu) raw_spin_unlock(&stop_lock); } - set_cpu_online(cpu, false); + set_cpu_active(cpu, false); local_fiq_disable(); local_irq_disable(); @@ -698,12 +715,16 @@ void handle_IPI(int ipinr, struct pt_regs *regs) if ((unsigned)ipinr < NR_IPI) trace_ipi_exit_rcuidle(ipi_types[ipinr]); + + per_cpu(pending_ipi, cpu) = false; + set_irq_regs(old_regs); } void smp_send_reschedule(int cpu) { - smp_cross_call(cpumask_of(cpu), IPI_RESCHEDULE); + BUG_ON(cpu_is_offline(cpu)); + smp_cross_call_common(cpumask_of(cpu), IPI_RESCHEDULE); } void smp_send_stop(void) @@ -714,14 +735,14 @@ void smp_send_stop(void) cpumask_copy(&mask, cpu_online_mask); cpumask_clear_cpu(smp_processor_id(), &mask); if (!cpumask_empty(&mask)) - smp_cross_call(&mask, IPI_CPU_STOP); + smp_cross_call_common(&mask, IPI_CPU_STOP); /* Wait up to one second for other CPUs to stop */ timeout = USEC_PER_SEC; - while (num_online_cpus() > 1 && timeout--) + while (num_active_cpus() > 1 && timeout--) udelay(1); - if (num_online_cpus() > 1) + if (num_active_cpus() > 1) pr_warn("SMP: failed to stop secondary CPUs\n"); } @@ -811,7 +832,7 @@ static void raise_nmi(cpumask_t *mask) if (cpumask_test_cpu(smp_processor_id(), mask) && irqs_disabled()) nmi_cpu_backtrace(NULL); - smp_cross_call(mask, IPI_CPU_BACKTRACE); + smp_cross_call_common(mask, IPI_CPU_BACKTRACE); } void arch_trigger_all_cpu_backtrace(bool include_self) diff --git a/arch/arm/kernel/stacktrace.c b/arch/arm/kernel/stacktrace.c index 6e8a50de40e2..b75332da3bb1 100644 --- a/arch/arm/kernel/stacktrace.c +++ b/arch/arm/kernel/stacktrace.c @@ -1,4 +1,5 @@ #include <linux/export.h> +#include <linux/kasan.h> #include <linux/sched.h> #include <linux/stacktrace.h> @@ -58,12 +59,16 @@ int notrace unwind_frame(struct stackframe *frame) if (fp < low + 12 || fp > high - 4) return -EINVAL; + kasan_disable_current(); + /* restore the registers from the stack frame */ frame->fp = *(unsigned long *)(fp - 12); frame->sp = *(unsigned long *)(fp - 8); frame->pc = *(unsigned long *)(fp - 4); #endif + kasan_enable_current(); + return 0; } #endif @@ -194,6 +199,7 @@ void save_stack_trace_tsk(struct task_struct *tsk, struct stack_trace *trace) { __save_stack_trace(tsk, trace, 1); } +EXPORT_SYMBOL(save_stack_trace_tsk); void save_stack_trace(struct stack_trace *trace) { diff --git a/arch/arm/kernel/topology.c b/arch/arm/kernel/topology.c index 4f2c51ef162d..d7533f0e227b 100644 --- a/arch/arm/kernel/topology.c +++ b/arch/arm/kernel/topology.c @@ -58,6 +58,151 @@ static void set_capacity_scale(unsigned int cpu, unsigned long capacity) per_cpu(cpu_scale, cpu) = capacity; } +static int __init get_cpu_for_node(struct device_node *node) +{ + struct device_node *cpu_node; + int cpu; + + cpu_node = of_parse_phandle(node, "cpu", 0); + if (!cpu_node) + return -EINVAL; + + for_each_possible_cpu(cpu) { + if (of_get_cpu_node(cpu, NULL) == cpu_node) { + of_node_put(cpu_node); + return cpu; + } + } + + pr_crit("Unable to find CPU node for %s\n", cpu_node->full_name); + + of_node_put(cpu_node); + return -EINVAL; +} + +static int __init parse_core(struct device_node *core, int cluster_id, + int core_id) +{ + char name[10]; + bool leaf = true; + int i = 0; + int cpu; + struct device_node *t; + + do { + snprintf(name, sizeof(name), "thread%d", i); + t = of_get_child_by_name(core, name); + if (t) { + leaf = false; + cpu = get_cpu_for_node(t); + if (cpu >= 0) { + cpu_topology[cpu].cluster_id = cluster_id; + cpu_topology[cpu].core_id = core_id; + cpu_topology[cpu].thread_id = i; + } else { + pr_err("%s: Can't get CPU for thread\n", + t->full_name); + of_node_put(t); + return -EINVAL; + } + of_node_put(t); + } + i++; + } while (t); + + cpu = get_cpu_for_node(core); + if (cpu >= 0) { + if (!leaf) { + pr_err("%s: Core has both threads and CPU\n", + core->full_name); + return -EINVAL; + } + + cpu_topology[cpu].cluster_id = cluster_id; + cpu_topology[cpu].core_id = core_id; + } else if (leaf) { + pr_err("%s: Can't get CPU for leaf core\n", core->full_name); + return -EINVAL; + } + + return 0; +} + +static int __init parse_cluster(struct device_node *cluster, int depth) +{ + char name[10]; + bool leaf = true; + bool has_cores = false; + struct device_node *c; + int core_id = 0; + int i, ret; + static int cluster_id __initdata; + + /* + * First check for child clusters; we currently ignore any + * information about the nesting of clusters and present the + * scheduler with a flat list of them. + */ + i = 0; + do { + snprintf(name, sizeof(name), "cluster%d", i); + c = of_get_child_by_name(cluster, name); + if (c) { + leaf = false; + ret = parse_cluster(c, depth + 1); + of_node_put(c); + if (ret != 0) + return ret; + } + i++; + } while (c); + + /* Now check for cores */ + i = 0; + do { + snprintf(name, sizeof(name), "core%d", i); + c = of_get_child_by_name(cluster, name); + if (c) { + has_cores = true; + + if (depth == 0) { + pr_err("%s: cpu-map children should be clusters\n", + c->full_name); + of_node_put(c); + return -EINVAL; + } + + if (leaf) { + ret = parse_core(c, cluster_id, core_id++); + } else { + pr_err("%s: Non-leaf cluster with core %s\n", + cluster->full_name, name); + ret = -EINVAL; + } + + of_node_put(c); + if (ret != 0) + return ret; + } + i++; + } while (c); + + if (leaf && !has_cores) + pr_warn("%s: empty cluster\n", cluster->full_name); + + if (leaf) + cluster_id++; + + return 0; +} + +static DEFINE_PER_CPU(unsigned long, cpu_efficiency) = SCHED_CAPACITY_SCALE; + +unsigned long arch_get_cpu_efficiency(int cpu) +{ + return per_cpu(cpu_efficiency, cpu); +} + #ifdef CONFIG_OF struct cpu_efficiency { const char *compatible; @@ -93,21 +238,48 @@ static unsigned long middle_capacity = 1; * 'average' CPU is of middle capacity. Also see the comments near * table_efficiency[] and update_cpu_capacity(). */ -static void __init parse_dt_topology(void) +static int __init parse_dt_topology(void) { const struct cpu_efficiency *cpu_eff; - struct device_node *cn = NULL; + struct device_node *cn = NULL, *map; unsigned long min_capacity = ULONG_MAX; unsigned long max_capacity = 0; unsigned long capacity = 0; - int cpu = 0; + int cpu = 0, ret = 0; __cpu_capacity = kcalloc(nr_cpu_ids, sizeof(*__cpu_capacity), GFP_NOWAIT); + cn = of_find_node_by_path("/cpus"); + if (!cn) { + pr_err("No CPU information found in DT\n"); + return 0; + } + + /* + * When topology is provided cpu-map is essentially a root + * cluster with restricted subnodes. + */ + map = of_get_child_by_name(cn, "cpu-map"); + if (!map) + goto out; + + ret = parse_cluster(map, 0); + if (ret != 0) + goto out_map; + + /* + * Check that all cores are in the topology; the SMP code will + * only mark cores described in the DT as possible. + */ + for_each_possible_cpu(cpu) + if (cpu_topology[cpu].cluster_id == -1) + ret = -EINVAL; + for_each_possible_cpu(cpu) { const u32 *rate; int len; + u32 efficiency; /* too early to use cpu->of_node */ cn = of_get_cpu_node(cpu, NULL); @@ -116,12 +288,26 @@ static void __init parse_dt_topology(void) continue; } - for (cpu_eff = table_efficiency; cpu_eff->compatible; cpu_eff++) - if (of_device_is_compatible(cn, cpu_eff->compatible)) - break; + /* + * The CPU efficiency value passed from the device tree + * overrides the value defined in the table_efficiency[] + */ + if (of_property_read_u32(cn, "efficiency", &efficiency) < 0) { + + for (cpu_eff = table_efficiency; + cpu_eff->compatible; cpu_eff++) - if (cpu_eff->compatible == NULL) - continue; + if (of_device_is_compatible(cn, + cpu_eff->compatible)) + break; + + if (cpu_eff->compatible == NULL) + continue; + + efficiency = cpu_eff->efficiency; + } + + per_cpu(cpu_efficiency, cpu) = efficiency; rate = of_get_property(cn, "clock-frequency", &len); if (!rate || len != 4) { @@ -130,7 +316,7 @@ static void __init parse_dt_topology(void) continue; } - capacity = ((be32_to_cpup(rate)) >> 20) * cpu_eff->efficiency; + capacity = ((be32_to_cpup(rate)) >> 20) * efficiency; /* Save min capacity of the system */ if (capacity < min_capacity) @@ -156,7 +342,11 @@ static void __init parse_dt_topology(void) else middle_capacity = ((max_capacity / 3) >> (SCHED_CAPACITY_SHIFT-1)) + 1; - +out_map: + of_node_put(map); +out: + of_node_put(cn); + return ret; } static const struct sched_group_energy * const cpu_core_energy(int cpu); @@ -182,7 +372,7 @@ static void update_cpu_capacity(unsigned int cpu) } #else -static inline void parse_dt_topology(void) {} +static inline int parse_dt_topology(void) {} static inline void update_cpu_capacity(unsigned int cpuid) {} #endif @@ -215,7 +405,7 @@ static void update_siblings_masks(unsigned int cpuid) for_each_possible_cpu(cpu) { cpu_topo = &cpu_topology[cpu]; - if (cpuid_topo->socket_id != cpu_topo->socket_id) + if (cpuid_topo->cluster_id != cpu_topo->cluster_id) continue; cpumask_set_cpu(cpuid, &cpu_topo->core_sibling); @@ -242,9 +432,8 @@ void store_cpu_topology(unsigned int cpuid) struct cputopo_arm *cpuid_topo = &cpu_topology[cpuid]; unsigned int mpidr; - /* If the cpu topology has been already set, just return */ if (cpuid_topo->core_id != -1) - return; + goto topology_populated; mpidr = read_cpuid_mpidr(); @@ -259,12 +448,12 @@ void store_cpu_topology(unsigned int cpuid) /* core performance interdependency */ cpuid_topo->thread_id = MPIDR_AFFINITY_LEVEL(mpidr, 0); cpuid_topo->core_id = MPIDR_AFFINITY_LEVEL(mpidr, 1); - cpuid_topo->socket_id = MPIDR_AFFINITY_LEVEL(mpidr, 2); + cpuid_topo->cluster_id = MPIDR_AFFINITY_LEVEL(mpidr, 2); } else { /* largely independent cores */ cpuid_topo->thread_id = -1; cpuid_topo->core_id = MPIDR_AFFINITY_LEVEL(mpidr, 0); - cpuid_topo->socket_id = MPIDR_AFFINITY_LEVEL(mpidr, 1); + cpuid_topo->cluster_id = MPIDR_AFFINITY_LEVEL(mpidr, 1); } } else { /* @@ -274,17 +463,17 @@ void store_cpu_topology(unsigned int cpuid) */ cpuid_topo->thread_id = -1; cpuid_topo->core_id = 0; - cpuid_topo->socket_id = -1; + cpuid_topo->cluster_id = -1; } - update_siblings_masks(cpuid); - - update_cpu_capacity(cpuid); - - pr_info("CPU%u: thread %d, cpu %d, socket %d, mpidr %x\n", + pr_info("CPU%u: thread %d, cpu %d, cluster %d, mpidr %x\n", cpuid, cpu_topology[cpuid].thread_id, cpu_topology[cpuid].core_id, - cpu_topology[cpuid].socket_id, mpidr); + cpu_topology[cpuid].cluster_id, mpidr); + +topology_populated: + update_siblings_masks(cpuid); + update_cpu_capacity(cpuid); } /* @@ -397,14 +586,14 @@ static struct sched_group_energy energy_core_a15 = { static inline const struct sched_group_energy * const cpu_cluster_energy(int cpu) { - return cpu_topology[cpu].socket_id ? &energy_cluster_a7 : + return cpu_topology[cpu].cluster_id ? &energy_cluster_a7 : &energy_cluster_a15; } static inline const struct sched_group_energy * const cpu_core_energy(int cpu) { - return cpu_topology[cpu].socket_id ? &energy_core_a7 : + return cpu_topology[cpu].cluster_id ? &energy_core_a7 : &energy_core_a15; } @@ -422,29 +611,50 @@ static struct sched_domain_topology_level arm_topology[] = { { NULL, }, }; -/* - * init_cpu_topology is called at boot when only one cpu is running - * which prevent simultaneous write access to cpu_topology array - */ -void __init init_cpu_topology(void) +static void __init reset_cpu_topology(void) { unsigned int cpu; - /* init core mask and capacity */ for_each_possible_cpu(cpu) { - struct cputopo_arm *cpu_topo = &(cpu_topology[cpu]); + struct cputopo_arm *cpu_topo = &cpu_topology[cpu]; cpu_topo->thread_id = -1; - cpu_topo->core_id = -1; - cpu_topo->socket_id = -1; + cpu_topo->core_id = -1; + cpu_topo->cluster_id = -1; + cpumask_clear(&cpu_topo->core_sibling); cpumask_clear(&cpu_topo->thread_sibling); + } +} + +static void __init reset_cpu_capacity(void) +{ + unsigned int cpu; + for_each_possible_cpu(cpu) set_capacity_scale(cpu, SCHED_CAPACITY_SCALE); - } +} + +/* + * init_cpu_topology is called at boot when only one cpu is running + * which prevent simultaneous write access to cpu_topology array + */ +void __init init_cpu_topology(void) +{ + unsigned int cpu; + + /* init core mask and capacity */ + reset_cpu_topology(); + reset_cpu_capacity(); smp_wmb(); - parse_dt_topology(); + if (parse_dt_topology()) { + reset_cpu_topology(); + reset_cpu_capacity(); + } + + for_each_possible_cpu(cpu) + update_siblings_masks(cpu); /* Set scheduler topology descriptor */ set_sched_topology(arm_topology); diff --git a/arch/arm/kernel/traps.c b/arch/arm/kernel/traps.c index 306a2a581785..191f5fd87da3 100644 --- a/arch/arm/kernel/traps.c +++ b/arch/arm/kernel/traps.c @@ -29,6 +29,7 @@ #include <linux/irq.h> #include <linux/atomic.h> +#include <asm/arch_timer.h> #include <asm/cacheflush.h> #include <asm/exception.h> #include <asm/unistd.h> @@ -708,6 +709,42 @@ late_initcall(arm_mrc_hook_init); #endif +static int get_pct_trap(struct pt_regs *regs, unsigned int instr) +{ + u64 cntpct; + unsigned int res; + int rd = (instr >> 12) & 0xF; + int rn = (instr >> 16) & 0xF; + + res = arm_check_condition(instr, regs->ARM_cpsr); + if (res == ARM_OPCODE_CONDTEST_FAIL) { + regs->ARM_pc += 4; + return 0; + } + + if (rd == 15 || rn == 15) + return 1; + cntpct = arch_counter_get_cntpct(); + regs->uregs[rd] = cntpct; + regs->uregs[rn] = cntpct >> 32; + regs->ARM_pc += 4; + return 0; +} + +static struct undef_hook get_pct_hook = { + .instr_mask = 0x0ff00fff, + .instr_val = 0x0c500f0e, + .cpsr_mask = MODE_MASK, + .cpsr_val = USR_MODE, + .fn = get_pct_trap, +}; + +void get_pct_hook_init(void) +{ + register_undef_hook(&get_pct_hook); +} +EXPORT_SYMBOL(get_pct_hook_init); + /* * A data abort trap was taken, but we did not handle the instruction. * Try to abort the user program, or panic if it was the kernel. diff --git a/arch/arm/kernel/vdso.c b/arch/arm/kernel/vdso.c index 21246ffc65cc..c203b112047d 100644 --- a/arch/arm/kernel/vdso.c +++ b/arch/arm/kernel/vdso.c @@ -175,6 +175,8 @@ static void __init patch_vdso(void *ehdr) if (!cntvct_ok) { vdso_nullpatch_one(&einfo, "__vdso_gettimeofday"); vdso_nullpatch_one(&einfo, "__vdso_clock_gettime"); + vdso_nullpatch_one(&einfo, "__vdso_clock_getres"); + /* do not zero out __vdso_time, no cntvct_ok dependency */ } } @@ -189,7 +191,7 @@ static int __init vdso_init(void) } text_pages = (vdso_end - vdso_start) >> PAGE_SHIFT; - pr_debug("vdso: %i text pages at base %p\n", text_pages, vdso_start); + pr_debug("vdso: %i text pages at base %pK\n", text_pages, vdso_start); /* Allocate the VDSO text pagelist */ vdso_text_pagelist = kcalloc(text_pages, sizeof(struct page *), @@ -260,14 +262,14 @@ void arm_install_vdso(struct mm_struct *mm, unsigned long addr) static void vdso_write_begin(struct vdso_data *vdata) { - ++vdso_data->seq_count; + ++vdso_data->tb_seq_count; smp_wmb(); /* Pairs with smp_rmb in vdso_read_retry */ } static void vdso_write_end(struct vdso_data *vdata) { smp_wmb(); /* Pairs with smp_rmb in vdso_read_begin */ - ++vdso_data->seq_count; + ++vdso_data->tb_seq_count; } static bool tk_is_cntvct(const struct timekeeper *tk) @@ -291,10 +293,10 @@ static bool tk_is_cntvct(const struct timekeeper *tk) * counter again, making it even, indicating to userspace that the * update is finished. * - * Userspace is expected to sample seq_count before reading any other - * fields from the data page. If seq_count is odd, userspace is + * Userspace is expected to sample tb_seq_count before reading any other + * fields from the data page. If tb_seq_count is odd, userspace is * expected to wait until it becomes even. After copying data from - * the page, userspace must sample seq_count again; if it has changed + * the page, userspace must sample tb_seq_count again; if it has changed * from its previous value, userspace must retry the whole sequence. * * Calls to update_vsyscall are serialized by the timekeeping core. @@ -312,20 +314,28 @@ void update_vsyscall(struct timekeeper *tk) vdso_write_begin(vdso_data); - vdso_data->tk_is_cntvct = tk_is_cntvct(tk); + vdso_data->use_syscall = !tk_is_cntvct(tk); vdso_data->xtime_coarse_sec = tk->xtime_sec; vdso_data->xtime_coarse_nsec = (u32)(tk->tkr_mono.xtime_nsec >> tk->tkr_mono.shift); vdso_data->wtm_clock_sec = wtm->tv_sec; vdso_data->wtm_clock_nsec = wtm->tv_nsec; - if (vdso_data->tk_is_cntvct) { + if (!vdso_data->use_syscall) { + struct timespec btm = ktime_to_timespec(tk->offs_boot); + vdso_data->cs_cycle_last = tk->tkr_mono.cycle_last; + vdso_data->raw_time_sec = tk->raw_sec; + vdso_data->raw_time_nsec = tk->tkr_raw.xtime_nsec; vdso_data->xtime_clock_sec = tk->xtime_sec; vdso_data->xtime_clock_snsec = tk->tkr_mono.xtime_nsec; - vdso_data->cs_mult = tk->tkr_mono.mult; + vdso_data->cs_mono_mult = tk->tkr_mono.mult; + vdso_data->cs_raw_mult = tk->tkr_raw.mult; + /* tkr_mono.shift == tkr_raw.shift */ vdso_data->cs_shift = tk->tkr_mono.shift; vdso_data->cs_mask = tk->tkr_mono.mask; + vdso_data->btm_sec = btm.tv_sec; + vdso_data->btm_nsec = btm.tv_nsec; } vdso_write_end(vdso_data); |
