diff options
| author | Dave Airlie <airlied@redhat.com> | 2015-04-20 11:32:26 +1000 |
|---|---|---|
| committer | Dave Airlie <airlied@redhat.com> | 2015-04-20 13:05:20 +1000 |
| commit | 2c33ce009ca2389dbf0535d0672214d09738e35e (patch) | |
| tree | 6186a6458c3c160385d794a23eaf07c786a9e61b /arch/mips/kernel/perf_event_mipsxx.c | |
| parent | cec32a47010647e8b0603726ebb75b990a4057a4 (diff) | |
| parent | 09d51602cf84a1264946711dd4ea0dddbac599a1 (diff) | |
Merge Linus master into drm-next
The merge is clean, but the arm build fails afterwards,
due to API changes in the regulator tree.
I've included the patch into the merge to fix the build.
Signed-off-by: Dave Airlie <airlied@redhat.com>
Diffstat (limited to 'arch/mips/kernel/perf_event_mipsxx.c')
| -rw-r--r-- | arch/mips/kernel/perf_event_mipsxx.c | 83 |
1 files changed, 78 insertions, 5 deletions
diff --git a/arch/mips/kernel/perf_event_mipsxx.c b/arch/mips/kernel/perf_event_mipsxx.c index 9466184d0039..cc1b6fadf089 100644 --- a/arch/mips/kernel/perf_event_mipsxx.c +++ b/arch/mips/kernel/perf_event_mipsxx.c @@ -558,8 +558,10 @@ static int mipspmu_get_irq(void) if (mipspmu.irq >= 0) { /* Request my own irq handler. */ err = request_irq(mipspmu.irq, mipsxx_pmu_handle_irq, - IRQF_PERCPU | IRQF_NOBALANCING | IRQF_NO_THREAD, - "mips_perf_pmu", NULL); + IRQF_PERCPU | IRQF_NOBALANCING | + IRQF_NO_THREAD | IRQF_NO_SUSPEND | + IRQF_SHARED, + "mips_perf_pmu", &mipspmu); if (err) { pr_warn("Unable to request IRQ%d for MIPS performance counters!\n", mipspmu.irq); @@ -582,7 +584,7 @@ static int mipspmu_get_irq(void) static void mipspmu_free_irq(void) { if (mipspmu.irq >= 0) - free_irq(mipspmu.irq, NULL); + free_irq(mipspmu.irq, &mipspmu); else if (cp0_perfcount_irq < 0) perf_irq = save_perf_irq; } @@ -775,6 +777,7 @@ static int n_counters(void) case CPU_R12000: case CPU_R14000: + case CPU_R16000: counters = 4; break; @@ -822,6 +825,13 @@ static const struct mips_perf_event mipsxxcore_event_map2 [PERF_COUNT_HW_BRANCH_MISSES] = { 0x27, CNTR_ODD, T }, }; +static const struct mips_perf_event loongson3_event_map[PERF_COUNT_HW_MAX] = { + [PERF_COUNT_HW_CPU_CYCLES] = { 0x00, CNTR_EVEN }, + [PERF_COUNT_HW_INSTRUCTIONS] = { 0x00, CNTR_ODD }, + [PERF_COUNT_HW_BRANCH_INSTRUCTIONS] = { 0x01, CNTR_EVEN }, + [PERF_COUNT_HW_BRANCH_MISSES] = { 0x01, CNTR_ODD }, +}; + static const struct mips_perf_event octeon_event_map[PERF_COUNT_HW_MAX] = { [PERF_COUNT_HW_CPU_CYCLES] = { 0x01, CNTR_ALL }, [PERF_COUNT_HW_INSTRUCTIONS] = { 0x03, CNTR_ALL }, @@ -1005,6 +1015,61 @@ static const struct mips_perf_event mipsxxcore_cache_map2 }, }; +static const struct mips_perf_event loongson3_cache_map + [PERF_COUNT_HW_CACHE_MAX] + [PERF_COUNT_HW_CACHE_OP_MAX] + [PERF_COUNT_HW_CACHE_RESULT_MAX] = { +[C(L1D)] = { + /* + * Like some other architectures (e.g. ARM), the performance + * counters don't differentiate between read and write + * accesses/misses, so this isn't strictly correct, but it's the + * best we can do. Writes and reads get combined. + */ + [C(OP_READ)] = { + [C(RESULT_MISS)] = { 0x04, CNTR_ODD }, + }, + [C(OP_WRITE)] = { + [C(RESULT_MISS)] = { 0x04, CNTR_ODD }, + }, +}, +[C(L1I)] = { + [C(OP_READ)] = { + [C(RESULT_MISS)] = { 0x04, CNTR_EVEN }, + }, + [C(OP_WRITE)] = { + [C(RESULT_MISS)] = { 0x04, CNTR_EVEN }, + }, +}, +[C(DTLB)] = { + [C(OP_READ)] = { + [C(RESULT_MISS)] = { 0x09, CNTR_ODD }, + }, + [C(OP_WRITE)] = { + [C(RESULT_MISS)] = { 0x09, CNTR_ODD }, + }, +}, +[C(ITLB)] = { + [C(OP_READ)] = { + [C(RESULT_MISS)] = { 0x0c, CNTR_ODD }, + }, + [C(OP_WRITE)] = { + [C(RESULT_MISS)] = { 0x0c, CNTR_ODD }, + }, +}, +[C(BPU)] = { + /* Using the same code for *HW_BRANCH* */ + [C(OP_READ)] = { + [C(RESULT_ACCESS)] = { 0x02, CNTR_EVEN }, + [C(RESULT_MISS)] = { 0x02, CNTR_ODD }, + }, + [C(OP_WRITE)] = { + [C(RESULT_ACCESS)] = { 0x02, CNTR_EVEN }, + [C(RESULT_MISS)] = { 0x02, CNTR_ODD }, + }, +}, +}; + /* BMIPS5000 */ static const struct mips_perf_event bmips5000_cache_map [PERF_COUNT_HW_CACHE_MAX] @@ -1539,6 +1604,10 @@ static const struct mips_perf_event *mipsxx_pmu_map_raw_event(u64 config) else raw_event.cntr_mask = raw_id > 127 ? CNTR_ODD : CNTR_EVEN; + break; + case CPU_LOONGSON3: + raw_event.cntr_mask = raw_id > 127 ? CNTR_ODD : CNTR_EVEN; + break; } raw_event.event_id = base_id; @@ -1615,8 +1684,7 @@ init_hw_perf_events(void) if (get_c0_perfcount_int) irq = get_c0_perfcount_int(); - else if ((cp0_perfcount_irq >= 0) && - (cp0_compare_irq != cp0_perfcount_irq)) + else if (cp0_perfcount_irq >= 0) irq = MIPS_CPU_IRQ_BASE + cp0_perfcount_irq; else irq = -1; @@ -1669,6 +1737,11 @@ init_hw_perf_events(void) mipspmu.general_event_map = &mipsxxcore_event_map; mipspmu.cache_event_map = &mipsxxcore_cache_map; break; + case CPU_LOONGSON3: + mipspmu.name = "mips/loongson3"; + mipspmu.general_event_map = &loongson3_event_map; + mipspmu.cache_event_map = &loongson3_cache_map; + break; case CPU_CAVIUM_OCTEON: case CPU_CAVIUM_OCTEON_PLUS: case CPU_CAVIUM_OCTEON2: |
