summaryrefslogtreecommitdiff
path: root/arch/arm/kernel
diff options
context:
space:
mode:
Diffstat (limited to 'arch/arm/kernel')
-rw-r--r--arch/arm/kernel/Makefile6
-rw-r--r--arch/arm/kernel/entry-armv.S8
-rw-r--r--arch/arm/kernel/head.S6
-rw-r--r--arch/arm/kernel/hw_breakpoint.c16
-rw-r--r--arch/arm/kernel/perf_callchain.c10
-rw-r--r--arch/arm/kernel/ptrace.c4
-rw-r--r--arch/arm/kernel/return_address.c4
-rw-r--r--arch/arm/kernel/setup.c16
-rw-r--r--arch/arm/kernel/signal.c14
-rw-r--r--arch/arm/kernel/sleep.S12
-rw-r--r--arch/arm/kernel/stacktrace.c23
-rw-r--r--arch/arm/kernel/vdso.c28
12 files changed, 100 insertions, 47 deletions
diff --git a/arch/arm/kernel/Makefile b/arch/arm/kernel/Makefile
index 649bc3300c93..a43601d61ce0 100644
--- a/arch/arm/kernel/Makefile
+++ b/arch/arm/kernel/Makefile
@@ -16,10 +16,14 @@ CFLAGS_REMOVE_return_address.o = -pg
# Object file lists.
obj-y := elf.o entry-common.o irq.o opcodes.o \
- process.o ptrace.o reboot.o return_address.o \
+ process.o ptrace.o reboot.o \
setup.o signal.o sigreturn_codes.o \
stacktrace.o sys_arm.o time.o traps.o
+ifneq ($(CONFIG_ARM_UNWIND),y)
+obj-$(CONFIG_FRAME_POINTER) += return_address.o
+endif
+
obj-$(CONFIG_ATAGS) += atags_parse.o
obj-$(CONFIG_ATAGS_PROC) += atags_proc.o
obj-$(CONFIG_DEPRECATED_PARAM_STRUCT) += atags_compat.o
diff --git a/arch/arm/kernel/entry-armv.S b/arch/arm/kernel/entry-armv.S
index 3ce377f7251f..618ceb6fe674 100644
--- a/arch/arm/kernel/entry-armv.S
+++ b/arch/arm/kernel/entry-armv.S
@@ -625,11 +625,9 @@ call_fpe:
tstne r0, #0x04000000 @ bit 26 set on both ARM and Thumb-2
reteq lr
and r8, r0, #0x00000f00 @ mask out CP number
- THUMB( lsr r8, r8, #8 )
mov r7, #1
- add r6, r10, #TI_USED_CP
- ARM( strb r7, [r6, r8, lsr #8] ) @ set appropriate used_cp[]
- THUMB( strb r7, [r6, r8] ) @ set appropriate used_cp[]
+ add r6, r10, r8, lsr #8 @ add used_cp[] array offset first
+ strb r7, [r6, #TI_USED_CP] @ set appropriate used_cp[]
#ifdef CONFIG_IWMMXT
@ Test if we need to give access to iWMMXt coprocessors
ldr r5, [r10, #TI_FLAGS]
@@ -638,7 +636,7 @@ call_fpe:
bcs iwmmxt_task_enable
#endif
ARM( add pc, pc, r8, lsr #6 )
- THUMB( lsl r8, r8, #2 )
+ THUMB( lsr r8, r8, #6 )
THUMB( add pc, r8 )
nop
diff --git a/arch/arm/kernel/head.S b/arch/arm/kernel/head.S
index 04286fd9e09c..2e336acd68b0 100644
--- a/arch/arm/kernel/head.S
+++ b/arch/arm/kernel/head.S
@@ -674,11 +674,7 @@ ARM_BE8(rev16 ip, ip)
bcc 1b
bx lr
#else
-#ifdef CONFIG_CPU_ENDIAN_BE8
- moveq r0, #0x00004000 @ set bit 22, mov to mvn instruction
-#else
moveq r0, #0x400000 @ set bit 22, mov to mvn instruction
-#endif
b 2f
1: ldr ip, [r7, r3]
#ifdef CONFIG_CPU_ENDIAN_BE8
@@ -687,7 +683,7 @@ ARM_BE8(rev16 ip, ip)
tst ip, #0x000f0000 @ check the rotation field
orrne ip, ip, r6, lsl #24 @ mask in offset bits 31-24
biceq ip, ip, #0x00004000 @ clear bit 22
- orreq ip, ip, r0 @ mask in offset bits 7-0
+ orreq ip, ip, r0, ror #8 @ mask in offset bits 7-0
#else
bic ip, ip, #0x000000ff
tst ip, #0xf00 @ check the rotation field
diff --git a/arch/arm/kernel/hw_breakpoint.c b/arch/arm/kernel/hw_breakpoint.c
index abcbea1ae30b..89fd86301242 100644
--- a/arch/arm/kernel/hw_breakpoint.c
+++ b/arch/arm/kernel/hw_breakpoint.c
@@ -631,7 +631,7 @@ int arch_validate_hwbkpt_settings(struct perf_event *bp)
info->address &= ~alignment_mask;
info->ctrl.len <<= offset;
- if (!bp->overflow_handler) {
+ if (is_default_overflow_handler(bp)) {
/*
* Mismatch breakpoints are required for single-stepping
* breakpoints.
@@ -747,6 +747,15 @@ static void watchpoint_handler(unsigned long addr, unsigned int fsr,
}
pr_debug("watchpoint fired: address = 0x%x\n", info->trigger);
+
+ /*
+ * If we triggered a user watchpoint from a uaccess routine,
+ * then handle the stepping ourselves since userspace really
+ * can't help us with this.
+ */
+ if (watchpoint_fault_on_uaccess(regs, info))
+ enable_single_step(wp, instruction_pointer(regs));
+
perf_bp_event(wp, regs);
/*
@@ -754,9 +763,8 @@ static void watchpoint_handler(unsigned long addr, unsigned int fsr,
* mismatch breakpoint so we can single-step over the
* watchpoint trigger.
*/
- if (!wp->overflow_handler)
+ if (is_default_overflow_handler(wp))
enable_single_step(wp, instruction_pointer(regs));
-
unlock:
rcu_read_unlock();
}
@@ -1146,4 +1154,4 @@ int hw_breakpoint_exceptions_notify(struct notifier_block *unused,
unsigned long val, void *data)
{
return NOTIFY_DONE;
-}
+} \ No newline at end of file
diff --git a/arch/arm/kernel/perf_callchain.c b/arch/arm/kernel/perf_callchain.c
index 4e02ae5950ff..bc552e813e7b 100644
--- a/arch/arm/kernel/perf_callchain.c
+++ b/arch/arm/kernel/perf_callchain.c
@@ -31,7 +31,7 @@ struct frame_tail {
*/
static struct frame_tail __user *
user_backtrace(struct frame_tail __user *tail,
- struct perf_callchain_entry *entry)
+ struct perf_callchain_entry_ctx *entry)
{
struct frame_tail buftail;
unsigned long err;
@@ -59,7 +59,7 @@ user_backtrace(struct frame_tail __user *tail,
}
void
-perf_callchain_user(struct perf_callchain_entry *entry, struct pt_regs *regs)
+perf_callchain_user(struct perf_callchain_entry_ctx *entry, struct pt_regs *regs)
{
struct frame_tail __user *tail;
@@ -75,7 +75,7 @@ perf_callchain_user(struct perf_callchain_entry *entry, struct pt_regs *regs)
tail = (struct frame_tail __user *)regs->ARM_fp - 1;
- while ((entry->nr < PERF_MAX_STACK_DEPTH) &&
+ while ((entry->entry->nr < entry->max_stack) &&
tail && !((unsigned long)tail & 0x3))
tail = user_backtrace(tail, entry);
}
@@ -89,13 +89,13 @@ static int
callchain_trace(struct stackframe *fr,
void *data)
{
- struct perf_callchain_entry *entry = data;
+ struct perf_callchain_entry_ctx *entry = data;
perf_callchain_store(entry, fr->pc);
return 0;
}
void
-perf_callchain_kernel(struct perf_callchain_entry *entry, struct pt_regs *regs)
+perf_callchain_kernel(struct perf_callchain_entry_ctx *entry, struct pt_regs *regs)
{
struct stackframe fr;
diff --git a/arch/arm/kernel/ptrace.c b/arch/arm/kernel/ptrace.c
index ae738a6319f6..364985c96a92 100644
--- a/arch/arm/kernel/ptrace.c
+++ b/arch/arm/kernel/ptrace.c
@@ -227,8 +227,8 @@ static struct undef_hook arm_break_hook = {
};
static struct undef_hook thumb_break_hook = {
- .instr_mask = 0xffff,
- .instr_val = 0xde01,
+ .instr_mask = 0xffffffff,
+ .instr_val = 0x0000de01,
.cpsr_mask = PSR_T_BIT,
.cpsr_val = PSR_T_BIT,
.fn = break_trap,
diff --git a/arch/arm/kernel/return_address.c b/arch/arm/kernel/return_address.c
index 36ed35073289..f945742dea44 100644
--- a/arch/arm/kernel/return_address.c
+++ b/arch/arm/kernel/return_address.c
@@ -10,8 +10,6 @@
*/
#include <linux/export.h>
#include <linux/ftrace.h>
-
-#if defined(CONFIG_FRAME_POINTER) && !defined(CONFIG_ARM_UNWIND)
#include <linux/sched.h>
#include <asm/stacktrace.h>
@@ -56,6 +54,4 @@ void *return_address(unsigned int level)
return NULL;
}
-#endif /* if defined(CONFIG_FRAME_POINTER) && !defined(CONFIG_ARM_UNWIND) */
-
EXPORT_SYMBOL_GPL(return_address);
diff --git a/arch/arm/kernel/setup.c b/arch/arm/kernel/setup.c
index 1ad40fc316b2..18013f7e2b31 100644
--- a/arch/arm/kernel/setup.c
+++ b/arch/arm/kernel/setup.c
@@ -488,9 +488,11 @@ void notrace cpu_init(void)
* In Thumb-2, msr with an immediate value is not allowed.
*/
#ifdef CONFIG_THUMB2_KERNEL
-#define PLC "r"
+#define PLC_l "l"
+#define PLC_r "r"
#else
-#define PLC "I"
+#define PLC_l "I"
+#define PLC_r "I"
#endif
/*
@@ -512,15 +514,15 @@ void notrace cpu_init(void)
"msr cpsr_c, %9"
:
: "r" (stk),
- PLC (PSR_F_BIT | PSR_I_BIT | IRQ_MODE),
+ PLC_r (PSR_F_BIT | PSR_I_BIT | IRQ_MODE),
"I" (offsetof(struct stack, irq[0])),
- PLC (PSR_F_BIT | PSR_I_BIT | ABT_MODE),
+ PLC_r (PSR_F_BIT | PSR_I_BIT | ABT_MODE),
"I" (offsetof(struct stack, abt[0])),
- PLC (PSR_F_BIT | PSR_I_BIT | UND_MODE),
+ PLC_r (PSR_F_BIT | PSR_I_BIT | UND_MODE),
"I" (offsetof(struct stack, und[0])),
- PLC (PSR_F_BIT | PSR_I_BIT | FIQ_MODE),
+ PLC_r (PSR_F_BIT | PSR_I_BIT | FIQ_MODE),
"I" (offsetof(struct stack, fiq[0])),
- PLC (PSR_F_BIT | PSR_I_BIT | SVC_MODE)
+ PLC_l (PSR_F_BIT | PSR_I_BIT | SVC_MODE)
: "r14");
#endif
}
diff --git a/arch/arm/kernel/signal.c b/arch/arm/kernel/signal.c
index 7abc908ebea0..f82a1ac22164 100644
--- a/arch/arm/kernel/signal.c
+++ b/arch/arm/kernel/signal.c
@@ -626,18 +626,20 @@ struct page *get_signal_page(void)
addr = page_address(page);
+ /* Poison the entire page */
+ memset32(addr, __opcode_to_mem_arm(0xe7fddef1),
+ PAGE_SIZE / sizeof(u32));
+
/* Give the signal return code some randomness */
offset = 0x200 + (get_random_int() & 0x7fc);
signal_return_offset = offset;
- /*
- * Copy signal return handlers into the vector page, and
- * set sigreturn to be a pointer to these.
- */
+ /* Copy signal return handlers into the page */
memcpy(addr + offset, sigreturn_codes, sizeof(sigreturn_codes));
- ptr = (unsigned long)addr + offset;
- flush_icache_range(ptr, ptr + sizeof(sigreturn_codes));
+ /* Flush out all instructions in this page */
+ ptr = (unsigned long)addr;
+ flush_icache_range(ptr, ptr + PAGE_SIZE);
return page;
}
diff --git a/arch/arm/kernel/sleep.S b/arch/arm/kernel/sleep.S
index 0f6c1000582c..c8569390e7e7 100644
--- a/arch/arm/kernel/sleep.S
+++ b/arch/arm/kernel/sleep.S
@@ -119,6 +119,14 @@ ENDPROC(cpu_resume_after_mmu)
.text
.align
+#ifdef CONFIG_MCPM
+ .arm
+THUMB( .thumb )
+ENTRY(cpu_resume_no_hyp)
+ARM_BE8(setend be) @ ensure we are in BE mode
+ b no_hyp
+#endif
+
#ifdef CONFIG_MMU
.arm
ENTRY(cpu_resume_arm)
@@ -134,6 +142,7 @@ ARM_BE8(setend be) @ ensure we are in BE mode
bl __hyp_stub_install_secondary
#endif
safe_svcmode_maskall r1
+no_hyp:
mov r1, #0
ALT_SMP(mrc p15, 0, r0, c0, c0, 5)
ALT_UP_B(1f)
@@ -163,6 +172,9 @@ ENDPROC(cpu_resume)
#ifdef CONFIG_MMU
ENDPROC(cpu_resume_arm)
#endif
+#ifdef CONFIG_MCPM
+ENDPROC(cpu_resume_no_hyp)
+#endif
.align 2
_sleep_save_sp:
diff --git a/arch/arm/kernel/stacktrace.c b/arch/arm/kernel/stacktrace.c
index a1898c6092d1..30bb8c972553 100644
--- a/arch/arm/kernel/stacktrace.c
+++ b/arch/arm/kernel/stacktrace.c
@@ -20,6 +20,19 @@
* A simple function epilogue looks like this:
* ldm sp, {fp, sp, pc}
*
+ * When compiled with clang, pc and sp are not pushed. A simple function
+ * prologue looks like this when built with clang:
+ *
+ * stmdb {..., fp, lr}
+ * add fp, sp, #x
+ * sub sp, sp, #y
+ *
+ * A simple function epilogue looks like this when built with clang:
+ *
+ * sub sp, fp, #x
+ * ldm {..., fp, pc}
+ *
+ *
* Note that with framepointer enabled, even the leaf functions have the same
* prologue and epilogue, therefore we can ignore the LR value in this case.
*/
@@ -32,6 +45,15 @@ int notrace unwind_frame(struct stackframe *frame)
low = frame->sp;
high = ALIGN(low, THREAD_SIZE);
+#ifdef CONFIG_CC_IS_CLANG
+ /* check current frame pointer is within bounds */
+ if (fp < low + 4 || fp > high - 4)
+ return -EINVAL;
+
+ frame->sp = frame->fp;
+ frame->fp = *(unsigned long *)(fp);
+ frame->pc = *(unsigned long *)(fp + 4);
+#else
/* check current frame pointer is within bounds */
if (fp < low + 12 || fp > high - 4)
return -EINVAL;
@@ -42,6 +64,7 @@ int notrace unwind_frame(struct stackframe *frame)
frame->fp = *(unsigned long *)(fp - 12);
frame->sp = *(unsigned long *)(fp - 8);
frame->pc = *(unsigned long *)(fp - 4);
+#endif
kasan_enable_current();
diff --git a/arch/arm/kernel/vdso.c b/arch/arm/kernel/vdso.c
index 9d500067a25a..c203b112047d 100644
--- a/arch/arm/kernel/vdso.c
+++ b/arch/arm/kernel/vdso.c
@@ -86,6 +86,8 @@ static bool __init cntvct_functional(void)
*/
np = of_find_compatible_node(NULL, NULL, "arm,armv7-timer");
if (!np)
+ np = of_find_compatible_node(NULL, NULL, "arm,armv8-timer");
+ if (!np)
goto out_put;
if (of_property_read_bool(np, "arm,cpu-registers-not-fw-configured"))
@@ -173,6 +175,8 @@ static void __init patch_vdso(void *ehdr)
if (!cntvct_ok) {
vdso_nullpatch_one(&einfo, "__vdso_gettimeofday");
vdso_nullpatch_one(&einfo, "__vdso_clock_gettime");
+ vdso_nullpatch_one(&einfo, "__vdso_clock_getres");
+ /* do not zero out __vdso_time, no cntvct_ok dependency */
}
}
@@ -258,14 +262,14 @@ void arm_install_vdso(struct mm_struct *mm, unsigned long addr)
static void vdso_write_begin(struct vdso_data *vdata)
{
- ++vdso_data->seq_count;
+ ++vdso_data->tb_seq_count;
smp_wmb(); /* Pairs with smp_rmb in vdso_read_retry */
}
static void vdso_write_end(struct vdso_data *vdata)
{
smp_wmb(); /* Pairs with smp_rmb in vdso_read_begin */
- ++vdso_data->seq_count;
+ ++vdso_data->tb_seq_count;
}
static bool tk_is_cntvct(const struct timekeeper *tk)
@@ -289,10 +293,10 @@ static bool tk_is_cntvct(const struct timekeeper *tk)
* counter again, making it even, indicating to userspace that the
* update is finished.
*
- * Userspace is expected to sample seq_count before reading any other
- * fields from the data page. If seq_count is odd, userspace is
+ * Userspace is expected to sample tb_seq_count before reading any other
+ * fields from the data page. If tb_seq_count is odd, userspace is
* expected to wait until it becomes even. After copying data from
- * the page, userspace must sample seq_count again; if it has changed
+ * the page, userspace must sample tb_seq_count again; if it has changed
* from its previous value, userspace must retry the whole sequence.
*
* Calls to update_vsyscall are serialized by the timekeeping core.
@@ -310,20 +314,28 @@ void update_vsyscall(struct timekeeper *tk)
vdso_write_begin(vdso_data);
- vdso_data->tk_is_cntvct = tk_is_cntvct(tk);
+ vdso_data->use_syscall = !tk_is_cntvct(tk);
vdso_data->xtime_coarse_sec = tk->xtime_sec;
vdso_data->xtime_coarse_nsec = (u32)(tk->tkr_mono.xtime_nsec >>
tk->tkr_mono.shift);
vdso_data->wtm_clock_sec = wtm->tv_sec;
vdso_data->wtm_clock_nsec = wtm->tv_nsec;
- if (vdso_data->tk_is_cntvct) {
+ if (!vdso_data->use_syscall) {
+ struct timespec btm = ktime_to_timespec(tk->offs_boot);
+
vdso_data->cs_cycle_last = tk->tkr_mono.cycle_last;
+ vdso_data->raw_time_sec = tk->raw_sec;
+ vdso_data->raw_time_nsec = tk->tkr_raw.xtime_nsec;
vdso_data->xtime_clock_sec = tk->xtime_sec;
vdso_data->xtime_clock_snsec = tk->tkr_mono.xtime_nsec;
- vdso_data->cs_mult = tk->tkr_mono.mult;
+ vdso_data->cs_mono_mult = tk->tkr_mono.mult;
+ vdso_data->cs_raw_mult = tk->tkr_raw.mult;
+ /* tkr_mono.shift == tkr_raw.shift */
vdso_data->cs_shift = tk->tkr_mono.shift;
vdso_data->cs_mask = tk->tkr_mono.mask;
+ vdso_data->btm_sec = btm.tv_sec;
+ vdso_data->btm_nsec = btm.tv_nsec;
}
vdso_write_end(vdso_data);