summaryrefslogtreecommitdiff
path: root/drivers/perf
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/perf')
-rw-r--r--drivers/perf/Makefile1
-rw-r--r--drivers/perf/arm_pmu.c267
-rw-r--r--drivers/perf/perf_event_armv8.c762
3 files changed, 970 insertions, 60 deletions
diff --git a/drivers/perf/Makefile b/drivers/perf/Makefile
index acd2397ded94..14a13007a973 100644
--- a/drivers/perf/Makefile
+++ b/drivers/perf/Makefile
@@ -1 +1,2 @@
obj-$(CONFIG_ARM_PMU) += arm_pmu.o
+obj-$(CONFIG_HW_PERF_EVENTS) += perf_event_armv8.o
diff --git a/drivers/perf/arm_pmu.c b/drivers/perf/arm_pmu.c
index e24e77da2fd9..d6d671a925e1 100644
--- a/drivers/perf/arm_pmu.c
+++ b/drivers/perf/arm_pmu.c
@@ -23,6 +23,7 @@
#include <linux/spinlock.h>
#include <linux/irq.h>
#include <linux/irqdesc.h>
+#include <linux/debugfs.h>
#include <asm/cputype.h>
#include <asm/irq_regs.h>
@@ -372,6 +373,8 @@ armpmu_reserve_hardware(struct arm_pmu *armpmu)
return err;
}
+ armpmu->pmu_state = ARM_PMU_STATE_RUNNING;
+
return 0;
}
@@ -555,17 +558,10 @@ static void armpmu_init(struct arm_pmu *armpmu)
.stop = armpmu_stop,
.read = armpmu_read,
.filter_match = armpmu_filter_match,
+ .events_across_hotplug = 1,
};
}
-int armpmu_register(struct arm_pmu *armpmu, int type)
-{
- armpmu_init(armpmu);
- pr_info("enabled with %s PMU driver, %d counters available\n",
- armpmu->name, armpmu->num_events);
- return perf_pmu_register(&armpmu->pmu, armpmu->name, type);
-}
-
/* Set at runtime when we know what CPU type we are. */
static struct arm_pmu *__oprofile_cpu_pmu;
@@ -613,10 +609,12 @@ static void cpu_pmu_free_irq(struct arm_pmu *cpu_pmu)
struct platform_device *pmu_device = cpu_pmu->plat_device;
struct pmu_hw_events __percpu *hw_events = cpu_pmu->hw_events;
+ cpu_pmu->pmu_state = ARM_PMU_STATE_GOING_DOWN;
+
irqs = min(pmu_device->num_resources, num_possible_cpus());
irq = platform_get_irq(pmu_device, 0);
- if (irq >= 0 && irq_is_percpu(irq)) {
+ if (irq > 0 && irq_is_percpu(irq)) {
on_each_cpu(cpu_pmu_disable_percpu_irq, &irq, 1);
free_percpu_irq(irq, &hw_events->percpu_pmu);
} else {
@@ -629,10 +627,11 @@ static void cpu_pmu_free_irq(struct arm_pmu *cpu_pmu)
if (!cpumask_test_and_clear_cpu(cpu, &cpu_pmu->active_irqs))
continue;
irq = platform_get_irq(pmu_device, i);
- if (irq >= 0)
+ if (irq > 0)
free_irq(irq, per_cpu_ptr(&hw_events->percpu_pmu, cpu));
}
}
+ cpu_pmu->pmu_state = ARM_PMU_STATE_OFF;
}
static int cpu_pmu_request_irq(struct arm_pmu *cpu_pmu, irq_handler_t handler)
@@ -651,7 +650,7 @@ static int cpu_pmu_request_irq(struct arm_pmu *cpu_pmu, irq_handler_t handler)
}
irq = platform_get_irq(pmu_device, 0);
- if (irq >= 0 && irq_is_percpu(irq)) {
+ if (irq > 0 && irq_is_percpu(irq)) {
err = request_percpu_irq(irq, handler, "arm-pmu",
&hw_events->percpu_pmu);
if (err) {
@@ -660,6 +659,7 @@ static int cpu_pmu_request_irq(struct arm_pmu *cpu_pmu, irq_handler_t handler)
return err;
}
on_each_cpu(cpu_pmu_enable_percpu_irq, &irq, 1);
+ cpu_pmu->percpu_irq = irq;
} else {
for (i = 0; i < irqs; ++i) {
int cpu = i;
@@ -699,31 +699,12 @@ static int cpu_pmu_request_irq(struct arm_pmu *cpu_pmu, irq_handler_t handler)
return 0;
}
-/*
- * PMU hardware loses all context when a CPU goes offline.
- * When a CPU is hotplugged back in, since some hardware registers are
- * UNKNOWN at reset, the PMU must be explicitly reset to avoid reading
- * junk values out of them.
- */
-static int cpu_pmu_notify(struct notifier_block *b, unsigned long action,
- void *hcpu)
-{
- int cpu = (unsigned long)hcpu;
- struct arm_pmu *pmu = container_of(b, struct arm_pmu, hotplug_nb);
-
- if ((action & ~CPU_TASKS_FROZEN) != CPU_STARTING)
- return NOTIFY_DONE;
-
- if (!cpumask_test_cpu(cpu, &pmu->supported_cpus))
- return NOTIFY_DONE;
-
- if (pmu->reset)
- pmu->reset(pmu);
- else
- return NOTIFY_DONE;
-
- return NOTIFY_OK;
-}
+struct cpu_pm_pmu_args {
+ struct arm_pmu *armpmu;
+ unsigned long cmd;
+ int cpu;
+ int ret;
+};
#ifdef CONFIG_CPU_PM
static void cpu_pm_pmu_setup(struct arm_pmu *armpmu, unsigned long cmd)
@@ -771,25 +752,26 @@ static void cpu_pm_pmu_setup(struct arm_pmu *armpmu, unsigned long cmd)
}
}
-static int cpu_pm_pmu_notify(struct notifier_block *b, unsigned long cmd,
- void *v)
+static void cpu_pm_pmu_common(void *info)
{
- struct arm_pmu *armpmu = container_of(b, struct arm_pmu, cpu_pm_nb);
+ struct cpu_pm_pmu_args *data = info;
+ struct arm_pmu *armpmu = data->armpmu;
+ unsigned long cmd = data->cmd;
+ int cpu = data->cpu;
struct pmu_hw_events *hw_events = this_cpu_ptr(armpmu->hw_events);
int enabled = bitmap_weight(hw_events->used_mask, armpmu->num_events);
- if (!cpumask_test_cpu(smp_processor_id(), &armpmu->supported_cpus))
- return NOTIFY_DONE;
+ if (!cpumask_test_cpu(cpu, &armpmu->supported_cpus)) {
+ data->ret = NOTIFY_DONE;
+ return;
+ }
- /*
- * Always reset the PMU registers on power-up even if
- * there are no events running.
- */
- if (cmd == CPU_PM_EXIT && armpmu->reset)
- armpmu->reset(armpmu);
+ if (!enabled) {
+ data->ret = NOTIFY_OK;
+ return;
+ }
- if (!enabled)
- return NOTIFY_OK;
+ data->ret = NOTIFY_OK;
switch (cmd) {
case CPU_PM_ENTER:
@@ -802,10 +784,31 @@ static int cpu_pm_pmu_notify(struct notifier_block *b, unsigned long cmd,
armpmu->start(armpmu);
break;
default:
- return NOTIFY_DONE;
+ data->ret = NOTIFY_DONE;
+ break;
}
- return NOTIFY_OK;
+ return;
+}
+
+static int cpu_pm_pmu_notify(struct notifier_block *b, unsigned long cmd,
+ void *v)
+{
+ struct cpu_pm_pmu_args data = {
+ .armpmu = container_of(b, struct arm_pmu, cpu_pm_nb),
+ .cmd = cmd,
+ .cpu = smp_processor_id(),
+ };
+
+ /*
+ * Always reset the PMU registers on power-up even if
+ * there are no events running.
+ */
+ if (cmd == CPU_PM_EXIT && data.armpmu->reset)
+ data.armpmu->reset(data.armpmu);
+
+ cpu_pm_pmu_common(&data);
+ return data.ret;
}
static int cpu_pm_pmu_register(struct arm_pmu *cpu_pmu)
@@ -821,8 +824,63 @@ static void cpu_pm_pmu_unregister(struct arm_pmu *cpu_pmu)
#else
static inline int cpu_pm_pmu_register(struct arm_pmu *cpu_pmu) { return 0; }
static inline void cpu_pm_pmu_unregister(struct arm_pmu *cpu_pmu) { }
+static inline void cpu_pm_pmu_common(void *info) { }
#endif
+/*
+ * PMU hardware loses all context when a CPU goes offline.
+ * When a CPU is hotplugged back in, since some hardware registers are
+ * UNKNOWN at reset, the PMU must be explicitly reset to avoid reading
+ * junk values out of them.
+ */
+static int cpu_pmu_notify(struct notifier_block *b, unsigned long action,
+ void *hcpu)
+{
+ int irq = -1;
+ unsigned long masked_action = (action & ~CPU_TASKS_FROZEN);
+ struct cpu_pm_pmu_args data = {
+ .armpmu = container_of(b, struct arm_pmu, hotplug_nb),
+ .cpu = (unsigned long)hcpu,
+ };
+
+ if (!cpumask_test_cpu(data.cpu, &data.armpmu->supported_cpus))
+ return NOTIFY_DONE;
+
+ switch (masked_action) {
+ case CPU_STARTING:
+ case CPU_DOWN_FAILED:
+ /*
+ * Always reset the PMU registers on power-up even if
+ * there are no events running.
+ */
+ if (data.armpmu->reset)
+ data.armpmu->reset(data.armpmu);
+ if (data.armpmu->pmu_state == ARM_PMU_STATE_RUNNING) {
+ if (data.armpmu->plat_device)
+ irq = data.armpmu->percpu_irq;
+ /* Arm the PMU IRQ before appearing. */
+ if (irq > 0 && irq_is_percpu(irq))
+ cpu_pmu_enable_percpu_irq(&irq);
+ data.cmd = CPU_PM_EXIT;
+ cpu_pm_pmu_common(&data);
+ }
+ return NOTIFY_OK;
+ case CPU_DYING:
+ if (data.armpmu->pmu_state != ARM_PMU_STATE_OFF) {
+ data.cmd = CPU_PM_ENTER;
+ cpu_pm_pmu_common(&data);
+ /* Disarm the PMU IRQ before disappearing. */
+ if (data.armpmu->plat_device)
+ irq = data.armpmu->percpu_irq;
+ if (irq > 0 && irq_is_percpu(irq))
+ cpu_pmu_disable_percpu_irq(&irq);
+ }
+ return NOTIFY_OK;
+ default:
+ return NOTIFY_DONE;
+ }
+}
+
static int cpu_pmu_init(struct arm_pmu *cpu_pmu)
{
int err;
@@ -921,7 +979,7 @@ static int of_pmu_irq_cfg(struct arm_pmu *pmu)
/* Check the IRQ type and prohibit a mix of PPIs and SPIs */
irq = platform_get_irq(pdev, i);
- if (irq >= 0) {
+ if (irq > 0) {
bool spi = !irq_is_percpu(irq);
if (i > 0 && spi != using_spi) {
@@ -1000,14 +1058,29 @@ int arm_pmu_device_probe(struct platform_device *pdev,
return -ENOMEM;
}
+ armpmu_init(pmu);
+
if (!__oprofile_cpu_pmu)
__oprofile_cpu_pmu = pmu;
pmu->plat_device = pdev;
+ ret = cpu_pmu_init(pmu);
+ if (ret)
+ goto out_free;
+
if (node && (of_id = of_match_node(of_table, pdev->dev.of_node))) {
init_fn = of_id->data;
+ pmu->secure_access = of_property_read_bool(pdev->dev.of_node,
+ "secure-reg-access");
+
+ /* arm64 systems boot only as non-secure */
+ if (IS_ENABLED(CONFIG_ARM64) && pmu->secure_access) {
+ pr_warn("ignoring \"secure-reg-access\" property for arm64\n");
+ pmu->secure_access = false;
+ }
+
ret = of_pmu_irq_cfg(pmu);
if (!ret)
ret = init_fn(pmu);
@@ -1017,24 +1090,98 @@ int arm_pmu_device_probe(struct platform_device *pdev,
}
if (ret) {
- pr_info("failed to probe PMU!\n");
- goto out_free;
+ pr_info("%s: failed to probe PMU!\n", of_node_full_name(node));
+ goto out_destroy;
}
- ret = cpu_pmu_init(pmu);
- if (ret)
- goto out_free;
-
- ret = armpmu_register(pmu, -1);
+ ret = perf_pmu_register(&pmu->pmu, pmu->name, -1);
if (ret)
goto out_destroy;
+ pmu->pmu_state = ARM_PMU_STATE_OFF;
+ pmu->percpu_irq = -1;
+
+ pr_info("enabled with %s PMU driver, %d counters available\n",
+ pmu->name, pmu->num_events);
+
return 0;
out_destroy:
cpu_pmu_destroy(pmu);
out_free:
- pr_info("failed to register PMU devices!\n");
+ pr_info("%s: failed to register PMU devices!\n",
+ of_node_full_name(node));
kfree(pmu);
return ret;
}
+
+static struct dentry *perf_debug_dir;
+
+struct dentry *perf_create_debug_dir(void)
+{
+ if (!perf_debug_dir)
+ perf_debug_dir = debugfs_create_dir("msm_perf", NULL);
+ return perf_debug_dir;
+}
+
+#ifdef CONFIG_PERF_EVENTS_RESET_PMU_DEBUGFS
+static __ref void reset_pmu_force(void)
+{
+ int cpu, ret;
+ u32 save_online_mask = 0;
+
+ for_each_possible_cpu(cpu) {
+ if (!cpu_online(cpu)) {
+ save_online_mask |= BIT(cpu);
+ ret = cpu_up(cpu);
+ if (ret)
+ pr_err("Failed to bring up CPU: %d, ret: %d\n",
+ cpu, ret);
+ }
+ }
+ if (cpu_pmu && cpu_pmu->reset)
+ on_each_cpu(cpu_pmu->reset, NULL, 1);
+ if (cpu_pmu && cpu_pmu->plat_device)
+ armpmu_release_hardware(cpu_pmu);
+ for_each_possible_cpu(cpu) {
+ if ((save_online_mask & BIT(cpu)) && cpu_online(cpu)) {
+ ret = cpu_down(cpu);
+ if (ret)
+ pr_err("Failed to bring down CPU: %d, ret: %d\n",
+ cpu, ret);
+ }
+ }
+}
+
+static int write_enabled_perfpmu_action(void *data, u64 val)
+{
+ if (val != 0)
+ reset_pmu_force();
+ return 0;
+}
+
+DEFINE_SIMPLE_ATTRIBUTE(fops_pmuaction,
+ NULL, write_enabled_perfpmu_action, "%llu\n");
+
+int __init init_pmu_actions(void)
+{
+ struct dentry *dir;
+ struct dentry *file;
+ unsigned int value = 1;
+
+ dir = perf_create_debug_dir();
+ if (!dir)
+ return -ENOMEM;
+ file = debugfs_create_file("resetpmu", 0220, dir,
+ &value, &fops_pmuaction);
+ if (!file)
+ return -ENOMEM;
+ return 0;
+}
+#else
+int __init init_pmu_actions(void)
+{
+ return 0;
+}
+#endif
+late_initcall(init_pmu_actions);
diff --git a/drivers/perf/perf_event_armv8.c b/drivers/perf/perf_event_armv8.c
new file mode 100644
index 000000000000..6b722b7a95d2
--- /dev/null
+++ b/drivers/perf/perf_event_armv8.c
@@ -0,0 +1,762 @@
+/*
+ * PMU support
+ *
+ * Copyright (C) 2012 ARM Limited
+ * Author: Will Deacon <will.deacon@arm.com>
+ *
+ * This code is based heavily on the ARMv7 perf event code.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program. If not, see <http://www.gnu.org/licenses/>.
+ */
+
+#include <asm/irq_regs.h>
+#include <asm/perf_event.h>
+
+#include <linux/of.h>
+#include <linux/perf/arm_pmu.h>
+#include <linux/platform_device.h>
+
+/*
+ * ARMv8 PMUv3 Performance Events handling code.
+ * Common event types.
+ */
+enum armv8_pmuv3_perf_types {
+ /* Required events. */
+ ARMV8_PMUV3_PERFCTR_PMNC_SW_INCR = 0x00,
+ ARMV8_PMUV3_PERFCTR_L1_DCACHE_REFILL = 0x03,
+ ARMV8_PMUV3_PERFCTR_L1_DCACHE_ACCESS = 0x04,
+ ARMV8_PMUV3_PERFCTR_PC_BRANCH_MIS_PRED = 0x10,
+ ARMV8_PMUV3_PERFCTR_CLOCK_CYCLES = 0x11,
+ ARMV8_PMUV3_PERFCTR_PC_BRANCH_PRED = 0x12,
+
+ /* At least one of the following is required. */
+ ARMV8_PMUV3_PERFCTR_INSTR_EXECUTED = 0x08,
+ ARMV8_PMUV3_PERFCTR_OP_SPEC = 0x1B,
+
+ /* Common architectural events. */
+ ARMV8_PMUV3_PERFCTR_MEM_READ = 0x06,
+ ARMV8_PMUV3_PERFCTR_MEM_WRITE = 0x07,
+ ARMV8_PMUV3_PERFCTR_EXC_TAKEN = 0x09,
+ ARMV8_PMUV3_PERFCTR_EXC_EXECUTED = 0x0A,
+ ARMV8_PMUV3_PERFCTR_CID_WRITE = 0x0B,
+ ARMV8_PMUV3_PERFCTR_PC_WRITE = 0x0C,
+ ARMV8_PMUV3_PERFCTR_PC_IMM_BRANCH = 0x0D,
+ ARMV8_PMUV3_PERFCTR_PC_PROC_RETURN = 0x0E,
+ ARMV8_PMUV3_PERFCTR_MEM_UNALIGNED_ACCESS = 0x0F,
+ ARMV8_PMUV3_PERFCTR_TTBR_WRITE = 0x1C,
+
+ /* Common microarchitectural events. */
+ ARMV8_PMUV3_PERFCTR_L1_ICACHE_REFILL = 0x01,
+ ARMV8_PMUV3_PERFCTR_ITLB_REFILL = 0x02,
+ ARMV8_PMUV3_PERFCTR_DTLB_REFILL = 0x05,
+ ARMV8_PMUV3_PERFCTR_MEM_ACCESS = 0x13,
+ ARMV8_PMUV3_PERFCTR_L1_ICACHE_ACCESS = 0x14,
+ ARMV8_PMUV3_PERFCTR_L1_DCACHE_WB = 0x15,
+ ARMV8_PMUV3_PERFCTR_L2_CACHE_ACCESS = 0x16,
+ ARMV8_PMUV3_PERFCTR_L2_CACHE_REFILL = 0x17,
+ ARMV8_PMUV3_PERFCTR_L2_CACHE_WB = 0x18,
+ ARMV8_PMUV3_PERFCTR_BUS_ACCESS = 0x19,
+ ARMV8_PMUV3_PERFCTR_MEM_ERROR = 0x1A,
+ ARMV8_PMUV3_PERFCTR_BUS_CYCLES = 0x1D,
+};
+
+/* ARMv8 Cortex-A53 specific event types. */
+enum armv8_a53_pmu_perf_types {
+ ARMV8_A53_PERFCTR_PREFETCH_LINEFILL = 0xC2,
+};
+
+/* ARMv8 Cortex-A57 specific event types. */
+enum armv8_a57_perf_types {
+ ARMV8_A57_PERFCTR_L1_DCACHE_ACCESS_LD = 0x40,
+ ARMV8_A57_PERFCTR_L1_DCACHE_ACCESS_ST = 0x41,
+ ARMV8_A57_PERFCTR_L1_DCACHE_REFILL_LD = 0x42,
+ ARMV8_A57_PERFCTR_L1_DCACHE_REFILL_ST = 0x43,
+ ARMV8_A57_PERFCTR_DTLB_REFILL_LD = 0x4c,
+ ARMV8_A57_PERFCTR_DTLB_REFILL_ST = 0x4d,
+};
+
+/* PMUv3 HW events mapping. */
+const unsigned armv8_pmuv3_perf_map[PERF_COUNT_HW_MAX] = {
+ PERF_MAP_ALL_UNSUPPORTED,
+ [PERF_COUNT_HW_CPU_CYCLES] = ARMV8_PMUV3_PERFCTR_CLOCK_CYCLES,
+ [PERF_COUNT_HW_INSTRUCTIONS] = ARMV8_PMUV3_PERFCTR_INSTR_EXECUTED,
+ [PERF_COUNT_HW_CACHE_REFERENCES] = ARMV8_PMUV3_PERFCTR_L1_DCACHE_ACCESS,
+ [PERF_COUNT_HW_CACHE_MISSES] = ARMV8_PMUV3_PERFCTR_L1_DCACHE_REFILL,
+ [PERF_COUNT_HW_BRANCH_MISSES] = ARMV8_PMUV3_PERFCTR_PC_BRANCH_MIS_PRED,
+};
+
+/* ARM Cortex-A53 HW events mapping. */
+static const unsigned armv8_a53_perf_map[PERF_COUNT_HW_MAX] = {
+ PERF_MAP_ALL_UNSUPPORTED,
+ [PERF_COUNT_HW_CPU_CYCLES] = ARMV8_PMUV3_PERFCTR_CLOCK_CYCLES,
+ [PERF_COUNT_HW_INSTRUCTIONS] = ARMV8_PMUV3_PERFCTR_INSTR_EXECUTED,
+ [PERF_COUNT_HW_CACHE_REFERENCES] = ARMV8_PMUV3_PERFCTR_L1_DCACHE_ACCESS,
+ [PERF_COUNT_HW_CACHE_MISSES] = ARMV8_PMUV3_PERFCTR_L1_DCACHE_REFILL,
+ [PERF_COUNT_HW_BRANCH_INSTRUCTIONS] = ARMV8_PMUV3_PERFCTR_PC_WRITE,
+ [PERF_COUNT_HW_BRANCH_MISSES] = ARMV8_PMUV3_PERFCTR_PC_BRANCH_MIS_PRED,
+ [PERF_COUNT_HW_BUS_CYCLES] = ARMV8_PMUV3_PERFCTR_BUS_CYCLES,
+};
+
+static const unsigned armv8_a57_perf_map[PERF_COUNT_HW_MAX] = {
+ PERF_MAP_ALL_UNSUPPORTED,
+ [PERF_COUNT_HW_CPU_CYCLES] = ARMV8_PMUV3_PERFCTR_CLOCK_CYCLES,
+ [PERF_COUNT_HW_INSTRUCTIONS] = ARMV8_PMUV3_PERFCTR_INSTR_EXECUTED,
+ [PERF_COUNT_HW_CACHE_REFERENCES] = ARMV8_PMUV3_PERFCTR_L1_DCACHE_ACCESS,
+ [PERF_COUNT_HW_CACHE_MISSES] = ARMV8_PMUV3_PERFCTR_L1_DCACHE_REFILL,
+ [PERF_COUNT_HW_BRANCH_MISSES] = ARMV8_PMUV3_PERFCTR_PC_BRANCH_MIS_PRED,
+ [PERF_COUNT_HW_BUS_CYCLES] = ARMV8_PMUV3_PERFCTR_BUS_CYCLES,
+};
+
+const unsigned armv8_pmuv3_perf_cache_map[PERF_COUNT_HW_CACHE_MAX]
+ [PERF_COUNT_HW_CACHE_OP_MAX]
+ [PERF_COUNT_HW_CACHE_RESULT_MAX] = {
+ PERF_CACHE_MAP_ALL_UNSUPPORTED,
+
+ [C(L1D)][C(OP_READ)][C(RESULT_ACCESS)] = ARMV8_PMUV3_PERFCTR_L1_DCACHE_ACCESS,
+ [C(L1D)][C(OP_READ)][C(RESULT_MISS)] = ARMV8_PMUV3_PERFCTR_L1_DCACHE_REFILL,
+ [C(L1D)][C(OP_WRITE)][C(RESULT_ACCESS)] = ARMV8_PMUV3_PERFCTR_L1_DCACHE_ACCESS,
+ [C(L1D)][C(OP_WRITE)][C(RESULT_MISS)] = ARMV8_PMUV3_PERFCTR_L1_DCACHE_REFILL,
+
+ [C(BPU)][C(OP_READ)][C(RESULT_ACCESS)] = ARMV8_PMUV3_PERFCTR_PC_BRANCH_PRED,
+ [C(BPU)][C(OP_READ)][C(RESULT_MISS)] = ARMV8_PMUV3_PERFCTR_PC_BRANCH_MIS_PRED,
+ [C(BPU)][C(OP_WRITE)][C(RESULT_ACCESS)] = ARMV8_PMUV3_PERFCTR_PC_BRANCH_PRED,
+ [C(BPU)][C(OP_WRITE)][C(RESULT_MISS)] = ARMV8_PMUV3_PERFCTR_PC_BRANCH_MIS_PRED,
+};
+
+static const unsigned armv8_a53_perf_cache_map[PERF_COUNT_HW_CACHE_MAX]
+ [PERF_COUNT_HW_CACHE_OP_MAX]
+ [PERF_COUNT_HW_CACHE_RESULT_MAX] = {
+ PERF_CACHE_MAP_ALL_UNSUPPORTED,
+
+ [C(L1D)][C(OP_READ)][C(RESULT_ACCESS)] = ARMV8_PMUV3_PERFCTR_L1_DCACHE_ACCESS,
+ [C(L1D)][C(OP_READ)][C(RESULT_MISS)] = ARMV8_PMUV3_PERFCTR_L1_DCACHE_REFILL,
+ [C(L1D)][C(OP_WRITE)][C(RESULT_ACCESS)] = ARMV8_PMUV3_PERFCTR_L1_DCACHE_ACCESS,
+ [C(L1D)][C(OP_WRITE)][C(RESULT_MISS)] = ARMV8_PMUV3_PERFCTR_L1_DCACHE_REFILL,
+ [C(L1D)][C(OP_PREFETCH)][C(RESULT_MISS)] = ARMV8_A53_PERFCTR_PREFETCH_LINEFILL,
+
+ [C(L1I)][C(OP_READ)][C(RESULT_ACCESS)] = ARMV8_PMUV3_PERFCTR_L1_ICACHE_ACCESS,
+ [C(L1I)][C(OP_READ)][C(RESULT_MISS)] = ARMV8_PMUV3_PERFCTR_L1_ICACHE_REFILL,
+
+ [C(ITLB)][C(OP_READ)][C(RESULT_MISS)] = ARMV8_PMUV3_PERFCTR_ITLB_REFILL,
+
+ [C(BPU)][C(OP_READ)][C(RESULT_ACCESS)] = ARMV8_PMUV3_PERFCTR_PC_BRANCH_PRED,
+ [C(BPU)][C(OP_READ)][C(RESULT_MISS)] = ARMV8_PMUV3_PERFCTR_PC_BRANCH_MIS_PRED,
+ [C(BPU)][C(OP_WRITE)][C(RESULT_ACCESS)] = ARMV8_PMUV3_PERFCTR_PC_BRANCH_PRED,
+ [C(BPU)][C(OP_WRITE)][C(RESULT_MISS)] = ARMV8_PMUV3_PERFCTR_PC_BRANCH_MIS_PRED,
+};
+
+static const unsigned armv8_a57_perf_cache_map[PERF_COUNT_HW_CACHE_MAX]
+ [PERF_COUNT_HW_CACHE_OP_MAX]
+ [PERF_COUNT_HW_CACHE_RESULT_MAX] = {
+ PERF_CACHE_MAP_ALL_UNSUPPORTED,
+
+ [C(L1D)][C(OP_READ)][C(RESULT_ACCESS)] = ARMV8_A57_PERFCTR_L1_DCACHE_ACCESS_LD,
+ [C(L1D)][C(OP_READ)][C(RESULT_MISS)] = ARMV8_A57_PERFCTR_L1_DCACHE_REFILL_LD,
+ [C(L1D)][C(OP_WRITE)][C(RESULT_ACCESS)] = ARMV8_A57_PERFCTR_L1_DCACHE_ACCESS_ST,
+ [C(L1D)][C(OP_WRITE)][C(RESULT_MISS)] = ARMV8_A57_PERFCTR_L1_DCACHE_REFILL_ST,
+
+ [C(L1I)][C(OP_READ)][C(RESULT_ACCESS)] = ARMV8_PMUV3_PERFCTR_L1_ICACHE_ACCESS,
+ [C(L1I)][C(OP_READ)][C(RESULT_MISS)] = ARMV8_PMUV3_PERFCTR_L1_ICACHE_REFILL,
+
+ [C(DTLB)][C(OP_READ)][C(RESULT_MISS)] = ARMV8_A57_PERFCTR_DTLB_REFILL_LD,
+ [C(DTLB)][C(OP_WRITE)][C(RESULT_MISS)] = ARMV8_A57_PERFCTR_DTLB_REFILL_ST,
+
+ [C(ITLB)][C(OP_READ)][C(RESULT_MISS)] = ARMV8_PMUV3_PERFCTR_ITLB_REFILL,
+
+ [C(BPU)][C(OP_READ)][C(RESULT_ACCESS)] = ARMV8_PMUV3_PERFCTR_PC_BRANCH_PRED,
+ [C(BPU)][C(OP_READ)][C(RESULT_MISS)] = ARMV8_PMUV3_PERFCTR_PC_BRANCH_MIS_PRED,
+ [C(BPU)][C(OP_WRITE)][C(RESULT_ACCESS)] = ARMV8_PMUV3_PERFCTR_PC_BRANCH_PRED,
+ [C(BPU)][C(OP_WRITE)][C(RESULT_MISS)] = ARMV8_PMUV3_PERFCTR_PC_BRANCH_MIS_PRED,
+};
+
+
+/*
+ * Perf Events' indices
+ */
+#define ARMV8_IDX_CYCLE_COUNTER 0
+#define ARMV8_IDX_COUNTER0 1
+#define ARMV8_IDX_COUNTER_LAST(cpu_pmu) \
+ (ARMV8_IDX_CYCLE_COUNTER + cpu_pmu->num_events - 1)
+
+#define ARMV8_MAX_COUNTERS 32
+#define ARMV8_COUNTER_MASK (ARMV8_MAX_COUNTERS - 1)
+
+/*
+ * ARMv8 low level PMU access
+ */
+
+/*
+ * Perf Event to low level counters mapping
+ */
+#define ARMV8_IDX_TO_COUNTER(x) \
+ (((x) - ARMV8_IDX_COUNTER0) & ARMV8_COUNTER_MASK)
+
+/*
+ * Per-CPU PMCR: config reg
+ */
+#define ARMV8_PMCR_E (1 << 0) /* Enable all counters */
+#define ARMV8_PMCR_P (1 << 1) /* Reset all counters */
+#define ARMV8_PMCR_C (1 << 2) /* Cycle counter reset */
+#define ARMV8_PMCR_D (1 << 3) /* CCNT counts every 64th cpu cycle */
+#define ARMV8_PMCR_X (1 << 4) /* Export to ETM */
+#define ARMV8_PMCR_DP (1 << 5) /* Disable CCNT if non-invasive debug*/
+#define ARMV8_PMCR_N_SHIFT 11 /* Number of counters supported */
+#define ARMV8_PMCR_N_MASK 0x1f
+#define ARMV8_PMCR_MASK 0x3f /* Mask for writable bits */
+
+/*
+ * PMOVSR: counters overflow flag status reg
+ */
+#define ARMV8_OVSR_MASK 0xffffffff /* Mask for writable bits */
+#define ARMV8_OVERFLOWED_MASK ARMV8_OVSR_MASK
+
+/*
+ * PMXEVTYPER: Event selection reg
+ */
+#define ARMV8_EVTYPE_MASK 0xc80003ff /* Mask for writable bits */
+#define ARMV8_EVTYPE_EVENT 0x3ff /* Mask for EVENT bits */
+
+/*
+ * Event filters for PMUv3
+ */
+#define ARMV8_EXCLUDE_EL1 (1 << 31)
+#define ARMV8_EXCLUDE_EL0 (1 << 30)
+#define ARMV8_INCLUDE_EL2 (1 << 27)
+
+struct arm_pmu_and_idle_nb {
+ struct arm_pmu *cpu_pmu;
+ struct notifier_block perf_cpu_idle_nb;
+};
+
+static inline u32 armv8pmu_pmcr_read(void)
+{
+ return armv8pmu_pmcr_read_reg();
+}
+
+inline void armv8pmu_pmcr_write(u32 val)
+{
+ val &= ARMV8_PMCR_MASK;
+ isb();
+ armv8pmu_pmcr_write_reg(val);
+}
+
+static inline int armv8pmu_has_overflowed(u32 pmovsr)
+{
+ return pmovsr & ARMV8_OVERFLOWED_MASK;
+}
+
+static inline int armv8pmu_counter_valid(struct arm_pmu *cpu_pmu, int idx)
+{
+ return idx >= ARMV8_IDX_CYCLE_COUNTER &&
+ idx <= ARMV8_IDX_COUNTER_LAST(cpu_pmu);
+}
+
+static inline int armv8pmu_counter_has_overflowed(u32 pmnc, int idx)
+{
+ return pmnc & BIT(ARMV8_IDX_TO_COUNTER(idx));
+}
+
+static inline int armv8pmu_select_counter(int idx)
+{
+ u32 counter = ARMV8_IDX_TO_COUNTER(idx);
+ armv8pmu_pmselr_write_reg(counter);
+ isb();
+
+ return idx;
+}
+
+static inline u32 armv8pmu_read_counter(struct perf_event *event)
+{
+ struct arm_pmu *cpu_pmu = to_arm_pmu(event->pmu);
+ struct hw_perf_event *hwc = &event->hw;
+ int idx = hwc->idx;
+ u32 value = 0;
+
+ if (!armv8pmu_counter_valid(cpu_pmu, idx))
+ pr_err("CPU%u reading wrong counter %d\n",
+ smp_processor_id(), idx);
+ else if (idx == ARMV8_IDX_CYCLE_COUNTER)
+ value = armv8pmu_pmccntr_read_reg();
+ else if (armv8pmu_select_counter(idx) == idx)
+ value = armv8pmu_pmxevcntr_read_reg();
+
+ return value;
+}
+
+static inline void armv8pmu_write_counter(struct perf_event *event, u32 value)
+{
+ struct arm_pmu *cpu_pmu = to_arm_pmu(event->pmu);
+ struct hw_perf_event *hwc = &event->hw;
+ int idx = hwc->idx;
+
+ if (!armv8pmu_counter_valid(cpu_pmu, idx))
+ pr_err("CPU%u writing wrong counter %d\n",
+ smp_processor_id(), idx);
+ else if (idx == ARMV8_IDX_CYCLE_COUNTER)
+ armv8pmu_pmccntr_write_reg(value);
+ else if (armv8pmu_select_counter(idx) == idx)
+ armv8pmu_pmxevcntr_write_reg(value);
+}
+
+inline void armv8pmu_write_evtype(int idx, u32 val)
+{
+ if (armv8pmu_select_counter(idx) == idx) {
+ val &= ARMV8_EVTYPE_MASK;
+ armv8pmu_pmxevtyper_write_reg(val);
+ }
+}
+
+inline int armv8pmu_enable_counter(int idx)
+{
+ u32 counter = ARMV8_IDX_TO_COUNTER(idx);
+ armv8pmu_pmcntenset_write_reg(BIT(counter));
+ return idx;
+}
+
+inline int armv8pmu_disable_counter(int idx)
+{
+ u32 counter = ARMV8_IDX_TO_COUNTER(idx);
+ armv8pmu_pmcntenclr_write_reg(BIT(counter));
+ return idx;
+}
+
+inline int armv8pmu_enable_intens(int idx)
+{
+ u32 counter = ARMV8_IDX_TO_COUNTER(idx);
+ armv8pmu_pmintenset_write_reg(BIT(counter));
+ return idx;
+}
+
+inline int armv8pmu_disable_intens(int idx)
+{
+ u32 counter = ARMV8_IDX_TO_COUNTER(idx);
+ armv8pmu_pmintenclr_write_reg(BIT(counter));
+ isb();
+ /* Clear the overflow flag in case an interrupt is pending. */
+ armv8pmu_pmovsclr_write_reg(BIT(counter));
+ isb();
+
+ return idx;
+}
+
+inline u32 armv8pmu_getreset_flags(void)
+{
+ u32 value;
+
+ /* Read */
+ value = armv8pmu_pmovsclr_read_reg();
+
+ /* Write to clear flags */
+ value &= ARMV8_OVSR_MASK;
+ armv8pmu_pmovsclr_write_reg(value);
+
+ return value;
+}
+
+static void armv8pmu_enable_event(struct perf_event *event)
+{
+ unsigned long flags;
+ struct hw_perf_event *hwc = &event->hw;
+ struct arm_pmu *cpu_pmu = to_arm_pmu(event->pmu);
+ struct pmu_hw_events *events = this_cpu_ptr(cpu_pmu->hw_events);
+ int idx = hwc->idx;
+
+ /*
+ * Enable counter and interrupt, and set the counter to count
+ * the event that we're interested in.
+ */
+ raw_spin_lock_irqsave(&events->pmu_lock, flags);
+
+ /*
+ * Disable counter
+ */
+ armv8pmu_disable_counter(idx);
+
+ /*
+ * Set event (if destined for PMNx counters).
+ */
+ armv8pmu_write_evtype(idx, hwc->config_base);
+
+ /*
+ * Enable interrupt for this counter
+ */
+ armv8pmu_enable_intens(idx);
+
+ /*
+ * Enable counter
+ */
+ armv8pmu_enable_counter(idx);
+
+ raw_spin_unlock_irqrestore(&events->pmu_lock, flags);
+}
+
+static void armv8pmu_disable_event(struct perf_event *event)
+{
+ unsigned long flags;
+ struct hw_perf_event *hwc = &event->hw;
+ struct arm_pmu *cpu_pmu = to_arm_pmu(event->pmu);
+ struct pmu_hw_events *events = this_cpu_ptr(cpu_pmu->hw_events);
+ int idx = hwc->idx;
+
+ /*
+ * Disable counter and interrupt
+ */
+ raw_spin_lock_irqsave(&events->pmu_lock, flags);
+
+ /*
+ * Disable counter
+ */
+ armv8pmu_disable_counter(idx);
+
+ /*
+ * Disable interrupt for this counter
+ */
+ armv8pmu_disable_intens(idx);
+
+ raw_spin_unlock_irqrestore(&events->pmu_lock, flags);
+}
+
+static irqreturn_t armv8pmu_handle_irq(int irq_num, void *dev)
+{
+ u32 pmovsr;
+ struct perf_sample_data data;
+ struct arm_pmu *cpu_pmu = (struct arm_pmu *)dev;
+ struct pmu_hw_events *cpuc = this_cpu_ptr(cpu_pmu->hw_events);
+ struct pt_regs *regs;
+ int idx;
+
+ /*
+ * Get and reset the IRQ flags
+ */
+ pmovsr = armv8pmu_getreset_flags();
+
+ /*
+ * Did an overflow occur?
+ */
+ if (!armv8pmu_has_overflowed(pmovsr))
+ return IRQ_NONE;
+
+ /*
+ * Handle the counter(s) overflow(s)
+ */
+ regs = get_irq_regs();
+
+ for (idx = 0; idx < cpu_pmu->num_events; ++idx) {
+ struct perf_event *event = cpuc->events[idx];
+ struct hw_perf_event *hwc;
+
+ /* Ignore if we don't have an event. */
+ if (!event)
+ continue;
+
+ /*
+ * We have a single interrupt for all counters. Check that
+ * each counter has overflowed before we process it.
+ */
+ if (!armv8pmu_counter_has_overflowed(pmovsr, idx))
+ continue;
+
+ hwc = &event->hw;
+ armpmu_event_update(event);
+ perf_sample_data_init(&data, 0, hwc->last_period);
+ if (!armpmu_event_set_period(event))
+ continue;
+
+ if (perf_event_overflow(event, &data, regs))
+ cpu_pmu->disable(event);
+ }
+
+ /*
+ * Handle the pending perf events.
+ *
+ * Note: this call *must* be run with interrupts disabled. For
+ * platforms that can have the PMU interrupts raised as an NMI, this
+ * will not work.
+ */
+ irq_work_run();
+
+ return IRQ_HANDLED;
+}
+
+static void armv8pmu_start(struct arm_pmu *cpu_pmu)
+{
+ unsigned long flags;
+ struct pmu_hw_events *events = this_cpu_ptr(cpu_pmu->hw_events);
+
+ raw_spin_lock_irqsave(&events->pmu_lock, flags);
+ /* Enable all counters */
+ armv8pmu_pmcr_write(armv8pmu_pmcr_read() | ARMV8_PMCR_E);
+ raw_spin_unlock_irqrestore(&events->pmu_lock, flags);
+}
+
+static void armv8pmu_stop(struct arm_pmu *cpu_pmu)
+{
+ unsigned long flags;
+ struct pmu_hw_events *events = this_cpu_ptr(cpu_pmu->hw_events);
+
+ raw_spin_lock_irqsave(&events->pmu_lock, flags);
+ /* Disable all counters */
+ armv8pmu_pmcr_write(armv8pmu_pmcr_read() & ~ARMV8_PMCR_E);
+ raw_spin_unlock_irqrestore(&events->pmu_lock, flags);
+}
+
+static int armv8pmu_get_event_idx(struct pmu_hw_events *cpuc,
+ struct perf_event *event)
+{
+ int idx;
+ struct arm_pmu *cpu_pmu = to_arm_pmu(event->pmu);
+ struct hw_perf_event *hwc = &event->hw;
+ unsigned long evtype = hwc->config_base & ARMV8_EVTYPE_EVENT;
+
+ /* Place the first cycle counter request into the cycle counter. */
+ if (evtype == ARMV8_PMUV3_PERFCTR_CLOCK_CYCLES) {
+ if (!test_and_set_bit(ARMV8_IDX_CYCLE_COUNTER, cpuc->used_mask))
+ return ARMV8_IDX_CYCLE_COUNTER;
+ }
+
+ /*
+ * For anything other than a cycle counter, try and use
+ * the events counters
+ */
+ for (idx = ARMV8_IDX_COUNTER0; idx < cpu_pmu->num_events; ++idx) {
+ if (!test_and_set_bit(idx, cpuc->used_mask))
+ return idx;
+ }
+
+ /* The counters are all in use. */
+ return -EAGAIN;
+}
+
+/*
+ * Add an event filter to a given event. This will only work for PMUv2 PMUs.
+ */
+static int armv8pmu_set_event_filter(struct hw_perf_event *event,
+ struct perf_event_attr *attr)
+{
+ unsigned long config_base = 0;
+
+ if (attr->exclude_user)
+ config_base |= ARMV8_EXCLUDE_EL0;
+ if (attr->exclude_kernel)
+ config_base |= ARMV8_EXCLUDE_EL1;
+ if (!attr->exclude_hv)
+ config_base |= ARMV8_INCLUDE_EL2;
+
+ /*
+ * Install the filter into config_base as this is used to
+ * construct the event type.
+ */
+ event->config_base = config_base;
+
+ return 0;
+}
+
+#ifdef CONFIG_PERF_EVENTS_USERMODE
+static void armv8pmu_init_usermode(void)
+{
+ /* Enable access from userspace. */
+ armv8pmu_pmuserenr_write_reg(0xF);
+
+}
+#else
+static inline void armv8pmu_init_usermode(void)
+{
+ /* Disable access from userspace. */
+ armv8pmu_pmuserenr_write_reg(0);
+
+}
+#endif
+
+
+static void armv8pmu_idle_update(struct arm_pmu *cpu_pmu)
+{
+ struct pmu_hw_events *hw_events;
+ struct perf_event *event;
+ int idx;
+
+ if (!cpu_pmu)
+ return;
+
+ hw_events = this_cpu_ptr(cpu_pmu->hw_events);
+
+ if (!hw_events)
+ return;
+
+ for (idx = 0; idx < cpu_pmu->num_events; ++idx) {
+
+ if (!test_bit(idx, hw_events->used_mask))
+ continue;
+
+ event = hw_events->events[idx];
+
+ if (!event || !event->attr.exclude_idle ||
+ event->state != PERF_EVENT_STATE_ACTIVE)
+ continue;
+
+ cpu_pmu->pmu.read(event);
+ }
+}
+
+static void armv8pmu_reset(void *info)
+{
+ struct arm_pmu *cpu_pmu = (struct arm_pmu *)info;
+ u32 idx, nb_cnt = cpu_pmu->num_events;
+
+ /* The counter and interrupt enable registers are unknown at reset. */
+ for (idx = ARMV8_IDX_CYCLE_COUNTER; idx < nb_cnt; ++idx) {
+ armv8pmu_disable_counter(idx);
+ armv8pmu_disable_intens(idx);
+ }
+
+ /* Initialize & Reset PMNC: C and P bits. */
+ armv8pmu_pmcr_write(armv8pmu_pmcr_read() | ARMV8_PMCR_P | ARMV8_PMCR_C);
+
+ armv8pmu_init_usermode();
+}
+
+static int armv8_pmuv3_map_event(struct perf_event *event)
+{
+ return armpmu_map_event(event, &armv8_pmuv3_perf_map,
+ &armv8_pmuv3_perf_cache_map,
+ ARMV8_EVTYPE_EVENT);
+}
+
+static int armv8_a53_map_event(struct perf_event *event)
+{
+ return armpmu_map_event(event, &armv8_a53_perf_map,
+ &armv8_a53_perf_cache_map,
+ ARMV8_EVTYPE_EVENT);
+}
+
+static int armv8_a57_map_event(struct perf_event *event)
+{
+ return armpmu_map_event(event, &armv8_a57_perf_map,
+ &armv8_a57_perf_cache_map,
+ ARMV8_EVTYPE_EVENT);
+}
+
+static void armv8pmu_read_num_pmnc_events(void *info)
+{
+ int *nb_cnt = info;
+
+ /* Read the nb of CNTx counters supported from PMNC */
+ *nb_cnt = (armv8pmu_pmcr_read() >> ARMV8_PMCR_N_SHIFT) & ARMV8_PMCR_N_MASK;
+
+ /* Add the CPU cycles counter */
+ *nb_cnt += 1;
+}
+
+static int perf_cpu_idle_notifier(struct notifier_block *nb,
+ unsigned long action, void *data)
+{
+ struct arm_pmu_and_idle_nb *pmu_nb = container_of(nb,
+ struct arm_pmu_and_idle_nb, perf_cpu_idle_nb);
+
+ if (action == IDLE_START)
+ armv8pmu_idle_update(pmu_nb->cpu_pmu);
+
+ return NOTIFY_OK;
+}
+
+int armv8pmu_probe_num_events(struct arm_pmu *arm_pmu)
+{
+ int ret;
+ struct arm_pmu_and_idle_nb *pmu_idle_nb;
+
+ pmu_idle_nb = devm_kzalloc(&arm_pmu->plat_device->dev,
+ sizeof(*pmu_idle_nb), GFP_KERNEL);
+ if (!pmu_idle_nb)
+ return -ENOMEM;
+
+ pmu_idle_nb->cpu_pmu = arm_pmu;
+ pmu_idle_nb->perf_cpu_idle_nb.notifier_call = perf_cpu_idle_notifier;
+ idle_notifier_register(&pmu_idle_nb->perf_cpu_idle_nb);
+
+ ret = smp_call_function_any(&arm_pmu->supported_cpus,
+ armv8pmu_read_num_pmnc_events,
+ &arm_pmu->num_events, 1);
+ if (ret)
+ idle_notifier_unregister(&pmu_idle_nb->perf_cpu_idle_nb);
+ return ret;
+
+
+}
+
+void armv8_pmu_init(struct arm_pmu *cpu_pmu)
+{
+ cpu_pmu->handle_irq = armv8pmu_handle_irq,
+ cpu_pmu->enable = armv8pmu_enable_event,
+ cpu_pmu->disable = armv8pmu_disable_event,
+ cpu_pmu->read_counter = armv8pmu_read_counter,
+ cpu_pmu->write_counter = armv8pmu_write_counter,
+ cpu_pmu->get_event_idx = armv8pmu_get_event_idx,
+ cpu_pmu->start = armv8pmu_start,
+ cpu_pmu->stop = armv8pmu_stop,
+ cpu_pmu->reset = armv8pmu_reset,
+ cpu_pmu->max_period = (1LLU << 32) - 1,
+ cpu_pmu->set_event_filter = armv8pmu_set_event_filter;
+}
+
+static int armv8_pmuv3_init(struct arm_pmu *cpu_pmu)
+{
+ armv8_pmu_init(cpu_pmu);
+ cpu_pmu->name = "armv8_pmuv3";
+ cpu_pmu->map_event = armv8_pmuv3_map_event;
+ return armv8pmu_probe_num_events(cpu_pmu);
+}
+
+static int armv8_a53_pmu_init(struct arm_pmu *cpu_pmu)
+{
+ armv8_pmu_init(cpu_pmu);
+ cpu_pmu->name = "armv8_cortex_a53";
+ cpu_pmu->map_event = armv8_a53_map_event;
+ return armv8pmu_probe_num_events(cpu_pmu);
+}
+
+static int armv8_a57_pmu_init(struct arm_pmu *cpu_pmu)
+{
+ armv8_pmu_init(cpu_pmu);
+ cpu_pmu->name = "armv8_cortex_a57";
+ cpu_pmu->map_event = armv8_a57_map_event;
+ return armv8pmu_probe_num_events(cpu_pmu);
+}
+
+static const struct of_device_id armv8_pmu_of_device_ids[] = {
+ {.compatible = "arm,armv8-pmuv3", .data = armv8_pmuv3_init},
+ {.compatible = "arm,cortex-a53-pmu", .data = armv8_a53_pmu_init},
+ {.compatible = "arm,cortex-a57-pmu", .data = armv8_a57_pmu_init},
+#ifdef CONFIG_ARCH_MSM8996
+ {.compatible = "qcom,kryo-pmuv3", .data = kryo_pmu_init},
+#endif
+ {},
+};
+
+static int armv8_pmu_device_probe(struct platform_device *pdev)
+{
+ return arm_pmu_device_probe(pdev, armv8_pmu_of_device_ids, NULL);
+}
+
+static struct platform_driver armv8_pmu_driver = {
+ .driver = {
+ .name = "armv8-pmu",
+ .of_match_table = armv8_pmu_of_device_ids,
+ .suppress_bind_attrs = true,
+ },
+ .probe = armv8_pmu_device_probe,
+};
+
+static int __init register_armv8_pmu_driver(void)
+{
+ return platform_driver_register(&armv8_pmu_driver);
+}
+device_initcall(register_armv8_pmu_driver);