diff options
| author | Patrick Fay <pfay@codeaurora.org> | 2017-05-25 13:04:48 -0700 |
|---|---|---|
| committer | Patrick Fay <pfay@codeaurora.org> | 2017-05-25 16:48:16 -0700 |
| commit | 1255505e97817cbcdd71283b1103d1eef92fad3e (patch) | |
| tree | e6cdc01e8c95413e9a56a9fa9aa5bb6492dacd94 | |
| parent | 75a9d0fee5b264c89afdc8b155848625fcbe9ca0 (diff) | |
Perf: ARM: Support 32bit armv8-pmuv3 driver
Currently perf_event.c is 64bit specific due to
inline assembly. Driver needs to support 32bit as well.
Move the assembly code to asm/perf_event.h so that
perf_event.c is 32/64 bit agnostic and move perf_event.c
to drivers/perf from 64bit-specific arch dir.
Change-Id: Ic5cd188700938e7a37120065a2d781e03bf99017
Signed-off-by: Patrick Fay <pfay@codeaurora.org>
| -rw-r--r-- | arch/arm/include/asm/perf_event.h | 87 | ||||
| -rw-r--r-- | arch/arm64/include/asm/perf_event.h | 87 | ||||
| -rw-r--r-- | arch/arm64/kernel/Makefile | 3 | ||||
| -rw-r--r-- | drivers/perf/Makefile | 1 | ||||
| -rw-r--r-- | drivers/perf/perf_event_armv8.c (renamed from arch/arm64/kernel/perf_event.c) | 37 |
5 files changed, 194 insertions, 21 deletions
diff --git a/arch/arm/include/asm/perf_event.h b/arch/arm/include/asm/perf_event.h index 4f9dec489931..306c4f4e778e 100644 --- a/arch/arm/include/asm/perf_event.h +++ b/arch/arm/include/asm/perf_event.h @@ -26,4 +26,91 @@ extern unsigned long perf_misc_flags(struct pt_regs *regs); (regs)->ARM_cpsr = SVC_MODE; \ } +static inline u32 armv8pmu_pmcr_read_reg(void) +{ + u32 val; + + asm volatile("mrc p15, 0, %0, c9, c12, 0" : "=r" (val)); + return val; +} + +static inline u32 armv8pmu_pmccntr_read_reg(void) +{ + u32 val; + + asm volatile("mrc p15, 0, %0, c9, c13, 0" : "=r" (val)); + return val; +} + +static inline u32 armv8pmu_pmxevcntr_read_reg(void) +{ + u32 val; + + asm volatile("mrc p15, 0, %0, c9, c13, 2" : "=r" (val)); + return val; +} + +static inline u32 armv8pmu_pmovsclr_read_reg(void) +{ + u32 val; + + asm volatile("mrc p15, 0, %0, c9, c12, 3" : "=r" (val)); + return val; +} + +static inline void armv8pmu_pmcr_write_reg(u32 val) +{ + asm volatile("mcr p15, 0, %0, c9, c12, 0" : : "r" (val)); +} + +static inline void armv8pmu_pmselr_write_reg(u32 val) +{ + asm volatile("mcr p15, 0, %0, c9, c12, 5" : : "r" (val)); +} + +static inline void armv8pmu_pmccntr_write_reg(u32 val) +{ + asm volatile("mcr p15, 0, %0, c9, c13, 0" : : "r" (val)); +} + +static inline void armv8pmu_pmxevcntr_write_reg(u32 val) +{ + asm volatile("mcr p15, 0, %0, c9, c13, 2" : : "r" (val)); +} + +static inline void armv8pmu_pmxevtyper_write_reg(u32 val) +{ + asm volatile("mcr p15, 0, %0, c9, c13, 1" : : "r" (val)); +} + +static inline void armv8pmu_pmcntenset_write_reg(u32 val) +{ + asm volatile("mcr p15, 0, %0, c9, c12, 1" : : "r" (val)); +} + +static inline void armv8pmu_pmcntenclr_write_reg(u32 val) +{ + asm volatile("mcr p15, 0, %0, c9, c12, 2" : : "r" (val)); +} + +static inline void armv8pmu_pmintenset_write_reg(u32 val) +{ + asm volatile("mcr p15, 0, %0, c9, c14, 1" : : "r" (val)); +} + +static inline void armv8pmu_pmintenclr_write_reg(u32 val) +{ + asm volatile("mcr p15, 0, %0, c9, c14, 2" : : "r" (val)); +} + +static inline void armv8pmu_pmovsclr_write_reg(u32 val) +{ + asm volatile("mcr p15, 0, %0, c9, c12, 3" : : "r" (val)); +} + +static inline void armv8pmu_pmuserenr_write_reg(u32 val) +{ + asm volatile("mcr p15, 0, %0, c9, c14, 0" : : "r" (val)); +} + #endif /* __ARM_PERF_EVENT_H__ */ diff --git a/arch/arm64/include/asm/perf_event.h b/arch/arm64/include/asm/perf_event.h index 7bd3cdb533ea..da4397e14e0d 100644 --- a/arch/arm64/include/asm/perf_event.h +++ b/arch/arm64/include/asm/perf_event.h @@ -31,4 +31,91 @@ extern unsigned long perf_misc_flags(struct pt_regs *regs); (regs)->pstate = PSR_MODE_EL1h; \ } +static inline u32 armv8pmu_pmcr_read_reg(void) +{ + u32 val; + + asm volatile("mrs %0, pmcr_el0" : "=r" (val)); + return val; +} + +static inline u32 armv8pmu_pmccntr_read_reg(void) +{ + u32 val; + + asm volatile("mrs %0, pmccntr_el0" : "=r" (val)); + return val; +} + +static inline u32 armv8pmu_pmxevcntr_read_reg(void) +{ + u32 val; + + asm volatile("mrs %0, pmxevcntr_el0" : "=r" (val)); + return val; +} + +static inline u32 armv8pmu_pmovsclr_read_reg(void) +{ + u32 val; + + asm volatile("mrs %0, pmovsclr_el0" : "=r" (val)); + return val; +} + +static inline void armv8pmu_pmcr_write_reg(u32 val) +{ + asm volatile("msr pmcr_el0, %0" :: "r" (val)); +} + +static inline void armv8pmu_pmselr_write_reg(u32 val) +{ + asm volatile("msr pmselr_el0, %0" :: "r" (val)); +} + +static inline void armv8pmu_pmccntr_write_reg(u32 val) +{ + asm volatile("msr pmccntr_el0, %0" :: "r" (val)); +} + +static inline void armv8pmu_pmxevcntr_write_reg(u32 val) +{ + asm volatile("msr pmxevcntr_el0, %0" :: "r" (val)); +} + +static inline void armv8pmu_pmxevtyper_write_reg(u32 val) +{ + asm volatile("msr pmxevtyper_el0, %0" :: "r" (val)); +} + +static inline void armv8pmu_pmcntenset_write_reg(u32 val) +{ + asm volatile("msr pmcntenset_el0, %0" :: "r" (val)); +} + +static inline void armv8pmu_pmcntenclr_write_reg(u32 val) +{ + asm volatile("msr pmcntenclr_el0, %0" :: "r" (val)); +} + +static inline void armv8pmu_pmintenset_write_reg(u32 val) +{ + asm volatile("msr pmintenset_el1, %0" :: "r" (val)); +} + +static inline void armv8pmu_pmintenclr_write_reg(u32 val) +{ + asm volatile("msr pmintenclr_el1, %0" :: "r" (val)); +} + +static inline void armv8pmu_pmovsclr_write_reg(u32 val) +{ + asm volatile("msr pmovsclr_el0, %0" :: "r" (val)); +} + +static inline void armv8pmu_pmuserenr_write_reg(u32 val) +{ + asm volatile("msr pmuserenr_el0, %0" :: "r" (val)); +} + #endif diff --git a/arch/arm64/kernel/Makefile b/arch/arm64/kernel/Makefile index 99f4410833b4..bc1b1b0ed6ff 100644 --- a/arch/arm64/kernel/Makefile +++ b/arch/arm64/kernel/Makefile @@ -31,8 +31,7 @@ arm64-obj-$(CONFIG_FUNCTION_TRACER) += ftrace.o entry-ftrace.o arm64-obj-$(CONFIG_MODULES) += arm64ksyms.o module.o arm64-obj-$(CONFIG_ARM64_MODULE_PLTS) += module-plts.o arm64-obj-$(CONFIG_PERF_EVENTS) += perf_regs.o perf_callchain.o -arm64-obj-$(CONFIG_HW_PERF_EVENTS) += perf_event.o perf_debug.o \ - perf_trace_counters.o \ +arm64-obj-$(CONFIG_HW_PERF_EVENTS) += perf_debug.o perf_trace_counters.o \ perf_trace_user.o arm64-obj-$(CONFIG_HAVE_HW_BREAKPOINT) += hw_breakpoint.o arm64-obj-$(CONFIG_CPU_PM) += sleep.o suspend.o diff --git a/drivers/perf/Makefile b/drivers/perf/Makefile index acd2397ded94..14a13007a973 100644 --- a/drivers/perf/Makefile +++ b/drivers/perf/Makefile @@ -1 +1,2 @@ obj-$(CONFIG_ARM_PMU) += arm_pmu.o +obj-$(CONFIG_HW_PERF_EVENTS) += perf_event_armv8.o diff --git a/arch/arm64/kernel/perf_event.c b/drivers/perf/perf_event_armv8.c index eccd8c49ad69..443538a16aea 100644 --- a/arch/arm64/kernel/perf_event.c +++ b/drivers/perf/perf_event_armv8.c @@ -20,6 +20,7 @@ */ #include <asm/irq_regs.h> +#include <asm/perf_event.h> #include <linux/of.h> #include <linux/perf/arm_pmu.h> @@ -239,16 +240,14 @@ struct arm_pmu_and_idle_nb { static inline u32 armv8pmu_pmcr_read(void) { - u32 val; - asm volatile("mrs %0, pmcr_el0" : "=r" (val)); - return val; + return armv8pmu_pmcr_read_reg(); } inline void armv8pmu_pmcr_write(u32 val) { val &= ARMV8_PMCR_MASK; isb(); - asm volatile("msr pmcr_el0, %0" :: "r" (val)); + armv8pmu_pmcr_write_reg(val); } static inline int armv8pmu_has_overflowed(u32 pmovsr) @@ -270,7 +269,7 @@ static inline int armv8pmu_counter_has_overflowed(u32 pmnc, int idx) static inline int armv8pmu_select_counter(int idx) { u32 counter = ARMV8_IDX_TO_COUNTER(idx); - asm volatile("msr pmselr_el0, %0" :: "r" (counter)); + armv8pmu_pmselr_write_reg(counter); isb(); return idx; @@ -287,9 +286,9 @@ static inline u32 armv8pmu_read_counter(struct perf_event *event) pr_err("CPU%u reading wrong counter %d\n", smp_processor_id(), idx); else if (idx == ARMV8_IDX_CYCLE_COUNTER) - asm volatile("mrs %0, pmccntr_el0" : "=r" (value)); + value = armv8pmu_pmccntr_read_reg(); else if (armv8pmu_select_counter(idx) == idx) - asm volatile("mrs %0, pmxevcntr_el0" : "=r" (value)); + value = armv8pmu_pmxevcntr_read_reg(); return value; } @@ -304,47 +303,47 @@ static inline void armv8pmu_write_counter(struct perf_event *event, u32 value) pr_err("CPU%u writing wrong counter %d\n", smp_processor_id(), idx); else if (idx == ARMV8_IDX_CYCLE_COUNTER) - asm volatile("msr pmccntr_el0, %0" :: "r" (value)); + armv8pmu_pmccntr_write_reg(value); else if (armv8pmu_select_counter(idx) == idx) - asm volatile("msr pmxevcntr_el0, %0" :: "r" (value)); + armv8pmu_pmxevcntr_write_reg(value); } inline void armv8pmu_write_evtype(int idx, u32 val) { if (armv8pmu_select_counter(idx) == idx) { val &= ARMV8_EVTYPE_MASK; - asm volatile("msr pmxevtyper_el0, %0" :: "r" (val)); + armv8pmu_pmxevtyper_write_reg(val); } } inline int armv8pmu_enable_counter(int idx) { u32 counter = ARMV8_IDX_TO_COUNTER(idx); - asm volatile("msr pmcntenset_el0, %0" :: "r" (BIT(counter))); + armv8pmu_pmcntenset_write_reg(BIT(counter)); return idx; } inline int armv8pmu_disable_counter(int idx) { u32 counter = ARMV8_IDX_TO_COUNTER(idx); - asm volatile("msr pmcntenclr_el0, %0" :: "r" (BIT(counter))); + armv8pmu_pmcntenclr_write_reg(BIT(counter)); return idx; } inline int armv8pmu_enable_intens(int idx) { u32 counter = ARMV8_IDX_TO_COUNTER(idx); - asm volatile("msr pmintenset_el1, %0" :: "r" (BIT(counter))); + armv8pmu_pmintenset_write_reg(BIT(counter)); return idx; } inline int armv8pmu_disable_intens(int idx) { u32 counter = ARMV8_IDX_TO_COUNTER(idx); - asm volatile("msr pmintenclr_el1, %0" :: "r" (BIT(counter))); + armv8pmu_pmintenclr_write_reg(BIT(counter)); isb(); /* Clear the overflow flag in case an interrupt is pending. */ - asm volatile("msr pmovsclr_el0, %0" :: "r" (BIT(counter))); + armv8pmu_pmovsclr_write_reg(BIT(counter)); isb(); return idx; @@ -355,11 +354,11 @@ inline u32 armv8pmu_getreset_flags(void) u32 value; /* Read */ - asm volatile("mrs %0, pmovsclr_el0" : "=r" (value)); + value = armv8pmu_pmovsclr_read_reg(); /* Write to clear flags */ value &= ARMV8_OVSR_MASK; - asm volatile("msr pmovsclr_el0, %0" :: "r" (value)); + armv8pmu_pmovsclr_write_reg(value); return value; } @@ -566,14 +565,14 @@ static int armv8pmu_set_event_filter(struct hw_perf_event *event, static void armv8pmu_init_usermode(void) { /* Enable access from userspace. */ - asm volatile("msr pmuserenr_el0, %0" :: "r" (0xF)); + armv8pmu_pmuserenr_write_reg(0xF); } #else static inline void armv8pmu_init_usermode(void) { /* Disable access from userspace. */ - asm volatile("msr pmuserenr_el0, %0" :: "r" (0)); + armv8pmu_pmuserenr_write_reg(0); } #endif |
