diff options
| author | Dave Airlie <airlied@redhat.com> | 2015-04-20 11:32:26 +1000 |
|---|---|---|
| committer | Dave Airlie <airlied@redhat.com> | 2015-04-20 13:05:20 +1000 |
| commit | 2c33ce009ca2389dbf0535d0672214d09738e35e (patch) | |
| tree | 6186a6458c3c160385d794a23eaf07c786a9e61b /drivers/irqchip | |
| parent | cec32a47010647e8b0603726ebb75b990a4057a4 (diff) | |
| parent | 09d51602cf84a1264946711dd4ea0dddbac599a1 (diff) | |
Merge Linus master into drm-next
The merge is clean, but the arm build fails afterwards,
due to API changes in the regulator tree.
I've included the patch into the merge to fix the build.
Signed-off-by: Dave Airlie <airlied@redhat.com>
Diffstat (limited to 'drivers/irqchip')
| -rw-r--r-- | drivers/irqchip/Kconfig | 12 | ||||
| -rw-r--r-- | drivers/irqchip/Makefile | 4 | ||||
| -rw-r--r-- | drivers/irqchip/irq-armada-370-xp.c | 89 | ||||
| -rw-r--r-- | drivers/irqchip/irq-bcm7038-l1.c | 335 | ||||
| -rw-r--r-- | drivers/irqchip/irq-bcm7120-l2.c | 193 | ||||
| -rw-r--r-- | drivers/irqchip/irq-brcmstb-l2.c | 9 | ||||
| -rw-r--r-- | drivers/irqchip/irq-crossbar.c | 210 | ||||
| -rw-r--r-- | drivers/irqchip/irq-digicolor.c | 4 | ||||
| -rw-r--r-- | drivers/irqchip/irq-gic-v3.c | 83 | ||||
| -rw-r--r-- | drivers/irqchip/irq-gic.c | 129 | ||||
| -rw-r--r-- | drivers/irqchip/irq-mips-gic.c | 61 | ||||
| -rw-r--r-- | drivers/irqchip/irq-renesas-irqc.c | 58 | ||||
| -rw-r--r-- | drivers/irqchip/irq-st.c | 206 | ||||
| -rw-r--r-- | drivers/irqchip/irq-tegra.c | 377 | ||||
| -rw-r--r-- | drivers/irqchip/irq-vf610-mscm-ir.c | 212 |
15 files changed, 1717 insertions, 265 deletions
diff --git a/drivers/irqchip/Kconfig b/drivers/irqchip/Kconfig index cc79d2a5a8c2..6de62a96e79c 100644 --- a/drivers/irqchip/Kconfig +++ b/drivers/irqchip/Kconfig @@ -60,6 +60,11 @@ config ATMEL_AIC5_IRQ select MULTI_IRQ_HANDLER select SPARSE_IRQ +config BCM7038_L1_IRQ + bool + select GENERIC_IRQ_CHIP + select IRQ_DOMAIN + config BCM7120_L2_IRQ bool select GENERIC_IRQ_CHIP @@ -110,6 +115,13 @@ config RENESAS_IRQC bool select IRQ_DOMAIN +config ST_IRQCHIP + bool + select REGMAP + select MFD_SYSCON + help + Enables SysCfg Controlled IRQs on STi based platforms. + config TB10X_IRQC bool select IRQ_DOMAIN diff --git a/drivers/irqchip/Makefile b/drivers/irqchip/Makefile index 42965d2476bb..dda4927e47a6 100644 --- a/drivers/irqchip/Makefile +++ b/drivers/irqchip/Makefile @@ -6,6 +6,7 @@ obj-$(CONFIG_ARCH_HIP04) += irq-hip04.o obj-$(CONFIG_ARCH_MMP) += irq-mmp.o obj-$(CONFIG_ARCH_MVEBU) += irq-armada-370-xp.o obj-$(CONFIG_ARCH_MXS) += irq-mxs.o +obj-$(CONFIG_ARCH_TEGRA) += irq-tegra.o obj-$(CONFIG_ARCH_S3C24XX) += irq-s3c24xx.o obj-$(CONFIG_DW_APB_ICTL) += irq-dw-apb-ictl.o obj-$(CONFIG_METAG) += irq-metag-ext.o @@ -33,10 +34,13 @@ obj-$(CONFIG_RENESAS_IRQC) += irq-renesas-irqc.o obj-$(CONFIG_VERSATILE_FPGA_IRQ) += irq-versatile-fpga.o obj-$(CONFIG_ARCH_NSPIRE) += irq-zevio.o obj-$(CONFIG_ARCH_VT8500) += irq-vt8500.o +obj-$(CONFIG_ST_IRQCHIP) += irq-st.o obj-$(CONFIG_TB10X_IRQC) += irq-tb10x.o obj-$(CONFIG_XTENSA) += irq-xtensa-pic.o obj-$(CONFIG_XTENSA_MX) += irq-xtensa-mx.o obj-$(CONFIG_IRQ_CROSSBAR) += irq-crossbar.o +obj-$(CONFIG_SOC_VF610) += irq-vf610-mscm-ir.o +obj-$(CONFIG_BCM7038_L1_IRQ) += irq-bcm7038-l1.o obj-$(CONFIG_BCM7120_L2_IRQ) += irq-bcm7120-l2.o obj-$(CONFIG_BRCMSTB_L2_IRQ) += irq-brcmstb-l2.o obj-$(CONFIG_KEYSTONE_IRQ) += irq-keystone.o diff --git a/drivers/irqchip/irq-armada-370-xp.c b/drivers/irqchip/irq-armada-370-xp.c index 4387dae14e45..daccc8bdbb42 100644 --- a/drivers/irqchip/irq-armada-370-xp.c +++ b/drivers/irqchip/irq-armada-370-xp.c @@ -38,6 +38,8 @@ /* Interrupt Controller Registers Map */ #define ARMADA_370_XP_INT_SET_MASK_OFFS (0x48) #define ARMADA_370_XP_INT_CLEAR_MASK_OFFS (0x4C) +#define ARMADA_370_XP_INT_FABRIC_MASK_OFFS (0x54) +#define ARMADA_370_XP_INT_CAUSE_PERF(cpu) (1 << cpu) #define ARMADA_370_XP_INT_CONTROL (0x00) #define ARMADA_370_XP_INT_SET_ENABLE_OFFS (0x30) @@ -56,6 +58,7 @@ #define ARMADA_370_XP_MAX_PER_CPU_IRQS (28) #define ARMADA_370_XP_TIMER0_PER_CPU_IRQ (5) +#define ARMADA_370_XP_FABRIC_IRQ (3) #define IPI_DOORBELL_START (0) #define IPI_DOORBELL_END (8) @@ -77,6 +80,17 @@ static DEFINE_MUTEX(msi_used_lock); static phys_addr_t msi_doorbell_addr; #endif +static inline bool is_percpu_irq(irq_hw_number_t irq) +{ + switch (irq) { + case ARMADA_370_XP_TIMER0_PER_CPU_IRQ: + case ARMADA_370_XP_FABRIC_IRQ: + return true; + default: + return false; + } +} + /* * In SMP mode: * For shared global interrupts, mask/unmask global enable bit @@ -86,7 +100,7 @@ static void armada_370_xp_irq_mask(struct irq_data *d) { irq_hw_number_t hwirq = irqd_to_hwirq(d); - if (hwirq != ARMADA_370_XP_TIMER0_PER_CPU_IRQ) + if (!is_percpu_irq(hwirq)) writel(hwirq, main_int_base + ARMADA_370_XP_INT_CLEAR_ENABLE_OFFS); else @@ -98,7 +112,7 @@ static void armada_370_xp_irq_unmask(struct irq_data *d) { irq_hw_number_t hwirq = irqd_to_hwirq(d); - if (hwirq != ARMADA_370_XP_TIMER0_PER_CPU_IRQ) + if (!is_percpu_irq(hwirq)) writel(hwirq, main_int_base + ARMADA_370_XP_INT_SET_ENABLE_OFFS); else @@ -281,20 +295,21 @@ static struct irq_chip armada_370_xp_irq_chip = { #ifdef CONFIG_SMP .irq_set_affinity = armada_xp_set_affinity, #endif + .flags = IRQCHIP_SKIP_SET_WAKE | IRQCHIP_MASK_ON_SUSPEND, }; static int armada_370_xp_mpic_irq_map(struct irq_domain *h, unsigned int virq, irq_hw_number_t hw) { armada_370_xp_irq_mask(irq_get_irq_data(virq)); - if (hw != ARMADA_370_XP_TIMER0_PER_CPU_IRQ) + if (!is_percpu_irq(hw)) writel(hw, per_cpu_int_base + ARMADA_370_XP_INT_CLEAR_MASK_OFFS); else writel(hw, main_int_base + ARMADA_370_XP_INT_SET_ENABLE_OFFS); irq_set_status_flags(virq, IRQ_LEVEL); - if (hw == ARMADA_370_XP_TIMER0_PER_CPU_IRQ) { + if (is_percpu_irq(hw)) { irq_set_percpu_devid(virq); irq_set_chip_and_handler(virq, &armada_370_xp_irq_chip, handle_percpu_devid_irq); @@ -308,28 +323,6 @@ static int armada_370_xp_mpic_irq_map(struct irq_domain *h, return 0; } -#ifdef CONFIG_SMP -static void armada_mpic_send_doorbell(const struct cpumask *mask, - unsigned int irq) -{ - int cpu; - unsigned long map = 0; - - /* Convert our logical CPU mask into a physical one. */ - for_each_cpu(cpu, mask) - map |= 1 << cpu_logical_map(cpu); - - /* - * Ensure that stores to Normal memory are visible to the - * other CPUs before issuing the IPI. - */ - dsb(); - - /* submit softirq */ - writel((map << 8) | irq, main_int_base + - ARMADA_370_XP_SW_TRIG_INT_OFFS); -} - static void armada_xp_mpic_smp_cpu_init(void) { u32 control; @@ -352,11 +345,44 @@ static void armada_xp_mpic_smp_cpu_init(void) writel(0, per_cpu_int_base + ARMADA_370_XP_INT_CLEAR_MASK_OFFS); } +static void armada_xp_mpic_perf_init(void) +{ + unsigned long cpuid = cpu_logical_map(smp_processor_id()); + + /* Enable Performance Counter Overflow interrupts */ + writel(ARMADA_370_XP_INT_CAUSE_PERF(cpuid), + per_cpu_int_base + ARMADA_370_XP_INT_FABRIC_MASK_OFFS); +} + +#ifdef CONFIG_SMP +static void armada_mpic_send_doorbell(const struct cpumask *mask, + unsigned int irq) +{ + int cpu; + unsigned long map = 0; + + /* Convert our logical CPU mask into a physical one. */ + for_each_cpu(cpu, mask) + map |= 1 << cpu_logical_map(cpu); + + /* + * Ensure that stores to Normal memory are visible to the + * other CPUs before issuing the IPI. + */ + dsb(); + + /* submit softirq */ + writel((map << 8) | irq, main_int_base + + ARMADA_370_XP_SW_TRIG_INT_OFFS); +} + static int armada_xp_mpic_secondary_init(struct notifier_block *nfb, unsigned long action, void *hcpu) { - if (action == CPU_STARTING || action == CPU_STARTING_FROZEN) + if (action == CPU_STARTING || action == CPU_STARTING_FROZEN) { + armada_xp_mpic_perf_init(); armada_xp_mpic_smp_cpu_init(); + } return NOTIFY_OK; } @@ -369,8 +395,10 @@ static struct notifier_block armada_370_xp_mpic_cpu_notifier = { static int mpic_cascaded_secondary_init(struct notifier_block *nfb, unsigned long action, void *hcpu) { - if (action == CPU_STARTING || action == CPU_STARTING_FROZEN) + if (action == CPU_STARTING || action == CPU_STARTING_FROZEN) { + armada_xp_mpic_perf_init(); enable_percpu_irq(parent_irq, IRQ_TYPE_NONE); + } return NOTIFY_OK; } @@ -379,7 +407,6 @@ static struct notifier_block mpic_cascaded_cpu_notifier = { .notifier_call = mpic_cascaded_secondary_init, .priority = 100, }; - #endif /* CONFIG_SMP */ static struct irq_domain_ops armada_370_xp_mpic_irq_ops = { @@ -588,9 +615,9 @@ static int __init armada_370_xp_mpic_of_init(struct device_node *node, BUG_ON(!armada_370_xp_mpic_domain); -#ifdef CONFIG_SMP + /* Setup for the boot CPU */ + armada_xp_mpic_perf_init(); armada_xp_mpic_smp_cpu_init(); -#endif armada_370_xp_msi_init(node, main_int_res.start); diff --git a/drivers/irqchip/irq-bcm7038-l1.c b/drivers/irqchip/irq-bcm7038-l1.c new file mode 100644 index 000000000000..d3b8c8be15f6 --- /dev/null +++ b/drivers/irqchip/irq-bcm7038-l1.c @@ -0,0 +1,335 @@ +/* + * Broadcom BCM7038 style Level 1 interrupt controller driver + * + * Copyright (C) 2014 Broadcom Corporation + * Author: Kevin Cernekee + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + */ + +#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt + +#include <linux/bitops.h> +#include <linux/kconfig.h> +#include <linux/kernel.h> +#include <linux/init.h> +#include <linux/interrupt.h> +#include <linux/io.h> +#include <linux/ioport.h> +#include <linux/irq.h> +#include <linux/irqdomain.h> +#include <linux/module.h> +#include <linux/of.h> +#include <linux/of_irq.h> +#include <linux/of_address.h> +#include <linux/of_platform.h> +#include <linux/platform_device.h> +#include <linux/slab.h> +#include <linux/smp.h> +#include <linux/types.h> +#include <linux/irqchip/chained_irq.h> + +#include "irqchip.h" + +#define IRQS_PER_WORD 32 +#define REG_BYTES_PER_IRQ_WORD (sizeof(u32) * 4) +#define MAX_WORDS 8 + +struct bcm7038_l1_cpu; + +struct bcm7038_l1_chip { + raw_spinlock_t lock; + unsigned int n_words; + struct irq_domain *domain; + struct bcm7038_l1_cpu *cpus[NR_CPUS]; + u8 affinity[MAX_WORDS * IRQS_PER_WORD]; +}; + +struct bcm7038_l1_cpu { + void __iomem *map_base; + u32 mask_cache[0]; +}; + +/* + * STATUS/MASK_STATUS/MASK_SET/MASK_CLEAR are packed one right after another: + * + * 7038: + * 0x1000_1400: W0_STATUS + * 0x1000_1404: W1_STATUS + * 0x1000_1408: W0_MASK_STATUS + * 0x1000_140c: W1_MASK_STATUS + * 0x1000_1410: W0_MASK_SET + * 0x1000_1414: W1_MASK_SET + * 0x1000_1418: W0_MASK_CLEAR + * 0x1000_141c: W1_MASK_CLEAR + * + * 7445: + * 0xf03e_1500: W0_STATUS + * 0xf03e_1504: W1_STATUS + * 0xf03e_1508: W2_STATUS + * 0xf03e_150c: W3_STATUS + * 0xf03e_1510: W4_STATUS + * 0xf03e_1514: W0_MASK_STATUS + * 0xf03e_1518: W1_MASK_STATUS + * [...] + */ + +static inline unsigned int reg_status(struct bcm7038_l1_chip *intc, + unsigned int word) +{ + return (0 * intc->n_words + word) * sizeof(u32); +} + +static inline unsigned int reg_mask_status(struct bcm7038_l1_chip *intc, + unsigned int word) +{ + return (1 * intc->n_words + word) * sizeof(u32); +} + +static inline unsigned int reg_mask_set(struct bcm7038_l1_chip *intc, + unsigned int word) +{ + return (2 * intc->n_words + word) * sizeof(u32); +} + +static inline unsigned int reg_mask_clr(struct bcm7038_l1_chip *intc, + unsigned int word) +{ + return (3 * intc->n_words + word) * sizeof(u32); +} + +static inline u32 l1_readl(void __iomem *reg) +{ + if (IS_ENABLED(CONFIG_MIPS) && IS_ENABLED(CONFIG_CPU_BIG_ENDIAN)) + return ioread32be(reg); + else + return readl(reg); +} + +static inline void l1_writel(u32 val, void __iomem *reg) +{ + if (IS_ENABLED(CONFIG_MIPS) && IS_ENABLED(CONFIG_CPU_BIG_ENDIAN)) + iowrite32be(val, reg); + else + writel(val, reg); +} + +static void bcm7038_l1_irq_handle(unsigned int irq, struct irq_desc *desc) +{ + struct bcm7038_l1_chip *intc = irq_desc_get_handler_data(desc); + struct bcm7038_l1_cpu *cpu; + struct irq_chip *chip = irq_desc_get_chip(desc); + unsigned int idx; + +#ifdef CONFIG_SMP + cpu = intc->cpus[cpu_logical_map(smp_processor_id())]; +#else + cpu = intc->cpus[0]; +#endif + + chained_irq_enter(chip, desc); + + for (idx = 0; idx < intc->n_words; idx++) { + int base = idx * IRQS_PER_WORD; + unsigned long pending, flags; + int hwirq; + + raw_spin_lock_irqsave(&intc->lock, flags); + pending = l1_readl(cpu->map_base + reg_status(intc, idx)) & + ~cpu->mask_cache[idx]; + raw_spin_unlock_irqrestore(&intc->lock, flags); + + for_each_set_bit(hwirq, &pending, IRQS_PER_WORD) { + generic_handle_irq(irq_find_mapping(intc->domain, + base + hwirq)); + } + } + + chained_irq_exit(chip, desc); +} + +static void __bcm7038_l1_unmask(struct irq_data *d, unsigned int cpu_idx) +{ + struct bcm7038_l1_chip *intc = irq_data_get_irq_chip_data(d); + u32 word = d->hwirq / IRQS_PER_WORD; + u32 mask = BIT(d->hwirq % IRQS_PER_WORD); + + intc->cpus[cpu_idx]->mask_cache[word] &= ~mask; + l1_writel(mask, intc->cpus[cpu_idx]->map_base + + reg_mask_clr(intc, word)); +} + +static void __bcm7038_l1_mask(struct irq_data *d, unsigned int cpu_idx) +{ + struct bcm7038_l1_chip *intc = irq_data_get_irq_chip_data(d); + u32 word = d->hwirq / IRQS_PER_WORD; + u32 mask = BIT(d->hwirq % IRQS_PER_WORD); + + intc->cpus[cpu_idx]->mask_cache[word] |= mask; + l1_writel(mask, intc->cpus[cpu_idx]->map_base + + reg_mask_set(intc, word)); +} + +static void bcm7038_l1_unmask(struct irq_data *d) +{ + struct bcm7038_l1_chip *intc = irq_data_get_irq_chip_data(d); + unsigned long flags; + + raw_spin_lock_irqsave(&intc->lock, flags); + __bcm7038_l1_unmask(d, intc->affinity[d->hwirq]); + raw_spin_unlock_irqrestore(&intc->lock, flags); +} + +static void bcm7038_l1_mask(struct irq_data *d) +{ + struct bcm7038_l1_chip *intc = irq_data_get_irq_chip_data(d); + unsigned long flags; + + raw_spin_lock_irqsave(&intc->lock, flags); + __bcm7038_l1_mask(d, intc->affinity[d->hwirq]); + raw_spin_unlock_irqrestore(&intc->lock, flags); +} + +static int bcm7038_l1_set_affinity(struct irq_data *d, + const struct cpumask *dest, + bool force) +{ + struct bcm7038_l1_chip *intc = irq_data_get_irq_chip_data(d); + unsigned long flags; + irq_hw_number_t hw = d->hwirq; + u32 word = hw / IRQS_PER_WORD; + u32 mask = BIT(hw % IRQS_PER_WORD); + unsigned int first_cpu = cpumask_any_and(dest, cpu_online_mask); + bool was_disabled; + + raw_spin_lock_irqsave(&intc->lock, flags); + + was_disabled = !!(intc->cpus[intc->affinity[hw]]->mask_cache[word] & + mask); + __bcm7038_l1_mask(d, intc->affinity[hw]); + intc->affinity[hw] = first_cpu; + if (!was_disabled) + __bcm7038_l1_unmask(d, first_cpu); + + raw_spin_unlock_irqrestore(&intc->lock, flags); + return 0; +} + +static int __init bcm7038_l1_init_one(struct device_node *dn, + unsigned int idx, + struct bcm7038_l1_chip *intc) +{ + struct resource res; + resource_size_t sz; + struct bcm7038_l1_cpu *cpu; + unsigned int i, n_words, parent_irq; + + if (of_address_to_resource(dn, idx, &res)) + return -EINVAL; + sz = resource_size(&res); + n_words = sz / REG_BYTES_PER_IRQ_WORD; + + if (n_words > MAX_WORDS) + return -EINVAL; + else if (!intc->n_words) + intc->n_words = n_words; + else if (intc->n_words != n_words) + return -EINVAL; + + cpu = intc->cpus[idx] = kzalloc(sizeof(*cpu) + n_words * sizeof(u32), + GFP_KERNEL); + if (!cpu) + return -ENOMEM; + + cpu->map_base = ioremap(res.start, sz); + if (!cpu->map_base) + return -ENOMEM; + + for (i = 0; i < n_words; i++) { + l1_writel(0xffffffff, cpu->map_base + reg_mask_set(intc, i)); + cpu->mask_cache[i] = 0xffffffff; + } + + parent_irq = irq_of_parse_and_map(dn, idx); + if (!parent_irq) { + pr_err("failed to map parent interrupt %d\n", parent_irq); + return -EINVAL; + } + irq_set_handler_data(parent_irq, intc); + irq_set_chained_handler(parent_irq, bcm7038_l1_irq_handle); + + return 0; +} + +static struct irq_chip bcm7038_l1_irq_chip = { + .name = "bcm7038-l1", + .irq_mask = bcm7038_l1_mask, + .irq_unmask = bcm7038_l1_unmask, + .irq_set_affinity = bcm7038_l1_set_affinity, +}; + +static int bcm7038_l1_map(struct irq_domain *d, unsigned int virq, + irq_hw_number_t hw_irq) +{ + irq_set_chip_and_handler(virq, &bcm7038_l1_irq_chip, handle_level_irq); + irq_set_chip_data(virq, d->host_data); + return 0; +} + +static const struct irq_domain_ops bcm7038_l1_domain_ops = { + .xlate = irq_domain_xlate_onecell, + .map = bcm7038_l1_map, +}; + +int __init bcm7038_l1_of_init(struct device_node *dn, + struct device_node *parent) +{ + struct bcm7038_l1_chip *intc; + int idx, ret; + + intc = kzalloc(sizeof(*intc), GFP_KERNEL); + if (!intc) + return -ENOMEM; + + raw_spin_lock_init(&intc->lock); + for_each_possible_cpu(idx) { + ret = bcm7038_l1_init_one(dn, idx, intc); + if (ret < 0) { + if (idx) + break; + pr_err("failed to remap intc L1 registers\n"); + goto out_free; + } + } + + intc->domain = irq_domain_add_linear(dn, IRQS_PER_WORD * intc->n_words, + &bcm7038_l1_domain_ops, + intc); + if (!intc->domain) { + ret = -ENOMEM; + goto out_unmap; + } + + pr_info("registered BCM7038 L1 intc (mem: 0x%p, IRQs: %d)\n", + intc->cpus[0]->map_base, IRQS_PER_WORD * intc->n_words); + + return 0; + +out_unmap: + for_each_possible_cpu(idx) { + struct bcm7038_l1_cpu *cpu = intc->cpus[idx]; + + if (cpu) { + if (cpu->map_base) + iounmap(cpu->map_base); + kfree(cpu); + } + } +out_free: + kfree(intc); + return ret; +} + +IRQCHIP_DECLARE(bcm7038_l1, "brcm,bcm7038-l1-intc", bcm7038_l1_of_init); diff --git a/drivers/irqchip/irq-bcm7120-l2.c b/drivers/irqchip/irq-bcm7120-l2.c index 8eec8e1201d9..3ba5cc780fcb 100644 --- a/drivers/irqchip/irq-bcm7120-l2.c +++ b/drivers/irqchip/irq-bcm7120-l2.c @@ -14,6 +14,7 @@ #include <linux/slab.h> #include <linux/module.h> #include <linux/kconfig.h> +#include <linux/kernel.h> #include <linux/platform_device.h> #include <linux/of.h> #include <linux/of_irq.h> @@ -34,15 +35,21 @@ #define IRQSTAT 0x04 #define MAX_WORDS 4 +#define MAX_MAPPINGS (MAX_WORDS * 2) #define IRQS_PER_WORD 32 struct bcm7120_l2_intc_data { unsigned int n_words; - void __iomem *base[MAX_WORDS]; + void __iomem *map_base[MAX_MAPPINGS]; + void __iomem *pair_base[MAX_WORDS]; + int en_offset[MAX_WORDS]; + int stat_offset[MAX_WORDS]; struct irq_domain *domain; bool can_wake; u32 irq_fwd_mask[MAX_WORDS]; u32 irq_map_mask[MAX_WORDS]; + int num_parent_irqs; + const __be32 *map_mask_prop; }; static void bcm7120_l2_intc_irq_handle(unsigned int irq, struct irq_desc *desc) @@ -61,7 +68,8 @@ static void bcm7120_l2_intc_irq_handle(unsigned int irq, struct irq_desc *desc) int hwirq; irq_gc_lock(gc); - pending = irq_reg_readl(gc, IRQSTAT) & gc->mask_cache; + pending = irq_reg_readl(gc, b->stat_offset[idx]) & + gc->mask_cache; irq_gc_unlock(gc); for_each_set_bit(hwirq, &pending, IRQS_PER_WORD) { @@ -76,27 +84,30 @@ static void bcm7120_l2_intc_irq_handle(unsigned int irq, struct irq_desc *desc) static void bcm7120_l2_intc_suspend(struct irq_data *d) { struct irq_chip_generic *gc = irq_data_get_irq_chip_data(d); + struct irq_chip_type *ct = irq_data_get_chip_type(d); struct bcm7120_l2_intc_data *b = gc->private; irq_gc_lock(gc); if (b->can_wake) - irq_reg_writel(gc, gc->mask_cache | gc->wake_active, IRQEN); + irq_reg_writel(gc, gc->mask_cache | gc->wake_active, + ct->regs.mask); irq_gc_unlock(gc); } static void bcm7120_l2_intc_resume(struct irq_data *d) { struct irq_chip_generic *gc = irq_data_get_irq_chip_data(d); + struct irq_chip_type *ct = irq_data_get_chip_type(d); /* Restore the saved mask */ irq_gc_lock(gc); - irq_reg_writel(gc, gc->mask_cache, IRQEN); + irq_reg_writel(gc, gc->mask_cache, ct->regs.mask); irq_gc_unlock(gc); } static int bcm7120_l2_intc_init_one(struct device_node *dn, struct bcm7120_l2_intc_data *data, - int irq, const __be32 *map_mask) + int irq) { int parent_irq; unsigned int idx; @@ -110,9 +121,15 @@ static int bcm7120_l2_intc_init_one(struct device_node *dn, /* For multiple parent IRQs with multiple words, this looks like: * <irq0_w0 irq0_w1 irq1_w0 irq1_w1 ...> */ - for (idx = 0; idx < data->n_words; idx++) - data->irq_map_mask[idx] |= - be32_to_cpup(map_mask + irq * data->n_words + idx); + for (idx = 0; idx < data->n_words; idx++) { + if (data->map_mask_prop) { + data->irq_map_mask[idx] |= + be32_to_cpup(data->map_mask_prop + + irq * data->n_words + idx); + } else { + data->irq_map_mask[idx] = 0xffffffff; + } + } irq_set_handler_data(parent_irq, data); irq_set_chained_handler(parent_irq, bcm7120_l2_intc_irq_handle); @@ -120,68 +137,107 @@ static int bcm7120_l2_intc_init_one(struct device_node *dn, return 0; } -int __init bcm7120_l2_intc_of_init(struct device_node *dn, - struct device_node *parent) +static int __init bcm7120_l2_intc_iomap_7120(struct device_node *dn, + struct bcm7120_l2_intc_data *data) +{ + int ret; + + data->map_base[0] = of_iomap(dn, 0); + if (!data->map_base[0]) { + pr_err("unable to map registers\n"); + return -ENOMEM; + } + + data->pair_base[0] = data->map_base[0]; + data->en_offset[0] = IRQEN; + data->stat_offset[0] = IRQSTAT; + data->n_words = 1; + + ret = of_property_read_u32_array(dn, "brcm,int-fwd-mask", + data->irq_fwd_mask, data->n_words); + if (ret != 0 && ret != -EINVAL) { + /* property exists but has the wrong number of words */ + pr_err("invalid brcm,int-fwd-mask property\n"); + return -EINVAL; + } + + data->map_mask_prop = of_get_property(dn, "brcm,int-map-mask", &ret); + if (!data->map_mask_prop || + (ret != (sizeof(__be32) * data->num_parent_irqs * data->n_words))) { + pr_err("invalid brcm,int-map-mask property\n"); + return -EINVAL; + } + + return 0; +} + +static int __init bcm7120_l2_intc_iomap_3380(struct device_node *dn, + struct bcm7120_l2_intc_data *data) +{ + unsigned int gc_idx; + + for (gc_idx = 0; gc_idx < MAX_WORDS; gc_idx++) { + unsigned int map_idx = gc_idx * 2; + void __iomem *en = of_iomap(dn, map_idx + 0); + void __iomem *stat = of_iomap(dn, map_idx + 1); + void __iomem *base = min(en, stat); + + data->map_base[map_idx + 0] = en; + data->map_base[map_idx + 1] = stat; + + if (!base) + break; + + data->pair_base[gc_idx] = base; + data->en_offset[gc_idx] = en - base; + data->stat_offset[gc_idx] = stat - base; + } + + if (!gc_idx) { + pr_err("unable to map registers\n"); + return -EINVAL; + } + + data->n_words = gc_idx; + return 0; +} + +int __init bcm7120_l2_intc_probe(struct device_node *dn, + struct device_node *parent, + int (*iomap_regs_fn)(struct device_node *, + struct bcm7120_l2_intc_data *), + const char *intc_name) { unsigned int clr = IRQ_NOREQUEST | IRQ_NOPROBE | IRQ_NOAUTOEN; struct bcm7120_l2_intc_data *data; struct irq_chip_generic *gc; struct irq_chip_type *ct; - const __be32 *map_mask; - int num_parent_irqs; - int ret = 0, len; + int ret = 0; unsigned int idx, irq, flags; data = kzalloc(sizeof(*data), GFP_KERNEL); if (!data) return -ENOMEM; - for (idx = 0; idx < MAX_WORDS; idx++) { - data->base[idx] = of_iomap(dn, idx); - if (!data->base[idx]) - break; - data->n_words = idx + 1; - } - if (!data->n_words) { - pr_err("failed to remap intc L2 registers\n"); - ret = -ENOMEM; - goto out_unmap; - } - - /* Enable all interrupts specified in the interrupt forward mask; - * disable all others. If the property doesn't exist (-EINVAL), - * assume all zeroes. - */ - ret = of_property_read_u32_array(dn, "brcm,int-fwd-mask", - data->irq_fwd_mask, data->n_words); - if (ret == 0 || ret == -EINVAL) { - for (idx = 0; idx < data->n_words; idx++) - __raw_writel(data->irq_fwd_mask[idx], - data->base[idx] + IRQEN); - } else { - /* property exists but has the wrong number of words */ - pr_err("invalid int-fwd-mask property\n"); - ret = -EINVAL; - goto out_unmap; - } - - num_parent_irqs = of_irq_count(dn); - if (num_parent_irqs <= 0) { + data->num_parent_irqs = of_irq_count(dn); + if (data->num_parent_irqs <= 0) { pr_err("invalid number of parent interrupts\n"); ret = -ENOMEM; goto out_unmap; } - map_mask = of_get_property(dn, "brcm,int-map-mask", &len); - if (!map_mask || - (len != (sizeof(*map_mask) * num_parent_irqs * data->n_words))) { - pr_err("invalid brcm,int-map-mask property\n"); - ret = -EINVAL; + ret = iomap_regs_fn(dn, data); + if (ret < 0) goto out_unmap; + + for (idx = 0; idx < data->n_words; idx++) { + __raw_writel(data->irq_fwd_mask[idx], + data->pair_base[idx] + + data->en_offset[idx]); } - for (irq = 0; irq < num_parent_irqs; irq++) { - ret = bcm7120_l2_intc_init_one(dn, data, irq, map_mask); + for (irq = 0; irq < data->num_parent_irqs; irq++) { + ret = bcm7120_l2_intc_init_one(dn, data, irq); if (ret) goto out_unmap; } @@ -215,11 +271,12 @@ int __init bcm7120_l2_intc_of_init(struct device_node *dn, gc = irq_get_domain_generic_chip(data->domain, irq); gc->unused = 0xffffffff & ~data->irq_map_mask[idx]; - gc->reg_base = data->base[idx]; gc->private = data; ct = gc->chip_types; - ct->regs.mask = IRQEN; + gc->reg_base = data->pair_base[idx]; + ct->regs.mask = data->en_offset[idx]; + ct->chip.irq_mask = irq_gc_mask_clr_bit; ct->chip.irq_unmask = irq_gc_mask_set_bit; ct->chip.irq_ack = irq_gc_noop; @@ -236,20 +293,38 @@ int __init bcm7120_l2_intc_of_init(struct device_node *dn, } } - pr_info("registered BCM7120 L2 intc (mem: 0x%p, parent IRQ(s): %d)\n", - data->base[0], num_parent_irqs); + pr_info("registered %s intc (mem: 0x%p, parent IRQ(s): %d)\n", + intc_name, data->map_base[0], data->num_parent_irqs); return 0; out_free_domain: irq_domain_remove(data->domain); out_unmap: - for (idx = 0; idx < MAX_WORDS; idx++) { - if (data->base[idx]) - iounmap(data->base[idx]); + for (idx = 0; idx < MAX_MAPPINGS; idx++) { + if (data->map_base[idx]) + iounmap(data->map_base[idx]); } kfree(data); return ret; } + +int __init bcm7120_l2_intc_probe_7120(struct device_node *dn, + struct device_node *parent) +{ + return bcm7120_l2_intc_probe(dn, parent, bcm7120_l2_intc_iomap_7120, + "BCM7120 L2"); +} + +int __init bcm7120_l2_intc_probe_3380(struct device_node *dn, + struct device_node *parent) +{ + return bcm7120_l2_intc_probe(dn, parent, bcm7120_l2_intc_iomap_3380, + "BCM3380 L2"); +} + IRQCHIP_DECLARE(bcm7120_l2_intc, "brcm,bcm7120-l2-intc", - bcm7120_l2_intc_of_init); + bcm7120_l2_intc_probe_7120); + +IRQCHIP_DECLARE(bcm3380_l2_intc, "brcm,bcm3380-l2-intc", + bcm7120_l2_intc_probe_3380); diff --git a/drivers/irqchip/irq-brcmstb-l2.c b/drivers/irqchip/irq-brcmstb-l2.c index 313c2c64498a..d6bcc6be0777 100644 --- a/drivers/irqchip/irq-brcmstb-l2.c +++ b/drivers/irqchip/irq-brcmstb-l2.c @@ -136,7 +136,11 @@ int __init brcmstb_l2_intc_of_init(struct device_node *np, /* Disable all interrupts by default */ writel(0xffffffff, data->base + CPU_MASK_SET); - writel(0xffffffff, data->base + CPU_CLEAR); + + /* Wakeup interrupts may be retained from S5 (cold boot) */ + data->can_wake = of_property_read_bool(np, "brcm,irq-can-wake"); + if (!data->can_wake) + writel(0xffffffff, data->base + CPU_CLEAR); data->parent_irq = irq_of_parse_and_map(np, 0); if (!data->parent_irq) { @@ -188,8 +192,7 @@ int __init brcmstb_l2_intc_of_init(struct device_node *np, ct->chip.irq_suspend = brcmstb_l2_intc_suspend; ct->chip.irq_resume = brcmstb_l2_intc_resume; - if (of_property_read_bool(np, "brcm,irq-can-wake")) { - data->can_wake = true; + if (data->can_wake) { /* This IRQ chip can wake the system, set all child interrupts * in wake_enabled mask */ diff --git a/drivers/irqchip/irq-crossbar.c b/drivers/irqchip/irq-crossbar.c index bbbaf5de65d2..692fe2bc8197 100644 --- a/drivers/irqchip/irq-crossbar.c +++ b/drivers/irqchip/irq-crossbar.c @@ -11,11 +11,12 @@ */ #include <linux/err.h> #include <linux/io.h> +#include <linux/irqdomain.h> #include <linux/of_address.h> #include <linux/of_irq.h> #include <linux/slab.h> -#include <linux/irqchip/arm-gic.h> -#include <linux/irqchip/irq-crossbar.h> + +#include "irqchip.h" #define IRQ_FREE -1 #define IRQ_RESERVED -2 @@ -24,6 +25,7 @@ /** * struct crossbar_device - crossbar device description + * @lock: spinlock serializing access to @irq_map * @int_max: maximum number of supported interrupts * @safe_map: safe default value to initialize the crossbar * @max_crossbar_sources: Maximum number of crossbar sources @@ -33,6 +35,7 @@ * @write: register write function pointer */ struct crossbar_device { + raw_spinlock_t lock; uint int_max; uint safe_map; uint max_crossbar_sources; @@ -44,72 +47,101 @@ struct crossbar_device { static struct crossbar_device *cb; -static inline void crossbar_writel(int irq_no, int cb_no) +static void crossbar_writel(int irq_no, int cb_no) { writel(cb_no, cb->crossbar_base + cb->register_offsets[irq_no]); } -static inline void crossbar_writew(int irq_no, int cb_no) +static void crossbar_writew(int irq_no, int cb_no) { writew(cb_no, cb->crossbar_base + cb->register_offsets[irq_no]); } -static inline void crossbar_writeb(int irq_no, int cb_no) +static void crossbar_writeb(int irq_no, int cb_no) { writeb(cb_no, cb->crossbar_base + cb->register_offsets[irq_no]); } -static inline int get_prev_map_irq(int cb_no) -{ - int i; - - for (i = cb->int_max - 1; i >= 0; i--) - if (cb->irq_map[i] == cb_no) - return i; - - return -ENODEV; -} +static struct irq_chip crossbar_chip = { + .name = "CBAR", + .irq_eoi = irq_chip_eoi_parent, + .irq_mask = irq_chip_mask_parent, + .irq_unmask = irq_chip_unmask_parent, + .irq_retrigger = irq_chip_retrigger_hierarchy, + .irq_set_wake = irq_chip_set_wake_parent, +#ifdef CONFIG_SMP + .irq_set_affinity = irq_chip_set_affinity_parent, +#endif +}; -static inline int allocate_free_irq(int cb_no) +static int allocate_gic_irq(struct irq_domain *domain, unsigned virq, + irq_hw_number_t hwirq) { + struct of_phandle_args args; int i; + int err; + raw_spin_lock(&cb->lock); for (i = cb->int_max - 1; i >= 0; i--) { if (cb->irq_map[i] == IRQ_FREE) { - cb->irq_map[i] = cb_no; - return i; + cb->irq_map[i] = hwirq; + break; } } + raw_spin_unlock(&cb->lock); - return -ENODEV; -} + if (i < 0) + return -ENODEV; -static inline bool needs_crossbar_write(irq_hw_number_t hw) -{ - int cb_no; + args.np = domain->parent->of_node; + args.args_count = 3; + args.args[0] = 0; /* SPI */ + args.args[1] = i; + args.args[2] = IRQ_TYPE_LEVEL_HIGH; - if (hw > GIC_IRQ_START) { - cb_no = cb->irq_map[hw - GIC_IRQ_START]; - if (cb_no != IRQ_RESERVED && cb_no != IRQ_SKIP) - return true; - } + err = irq_domain_alloc_irqs_parent(domain, virq, 1, &args); + if (err) + cb->irq_map[i] = IRQ_FREE; + else + cb->write(i, hwirq); - return false; + return err; } -static int crossbar_domain_map(struct irq_domain *d, unsigned int irq, - irq_hw_number_t hw) +static int crossbar_domain_alloc(struct irq_domain *d, unsigned int virq, + unsigned int nr_irqs, void *data) { - if (needs_crossbar_write(hw)) - cb->write(hw - GIC_IRQ_START, cb->irq_map[hw - GIC_IRQ_START]); + struct of_phandle_args *args = data; + irq_hw_number_t hwirq; + int i; + + if (args->args_count != 3) + return -EINVAL; /* Not GIC compliant */ + if (args->args[0] != 0) + return -EINVAL; /* No PPI should point to this domain */ + + hwirq = args->args[1]; + if ((hwirq + nr_irqs) > cb->max_crossbar_sources) + return -EINVAL; /* Can't deal with this */ + + for (i = 0; i < nr_irqs; i++) { + int err = allocate_gic_irq(d, virq + i, hwirq + i); + + if (err) + return err; + + irq_domain_set_hwirq_and_chip(d, virq + i, hwirq + i, + &crossbar_chip, NULL); + } return 0; } /** - * crossbar_domain_unmap - unmap a crossbar<->irq connection - * @d: domain of irq to unmap - * @irq: virq number + * crossbar_domain_free - unmap/free a crossbar<->irq connection + * @domain: domain of irq to unmap + * @virq: virq number + * @nr_irqs: number of irqs to free * * We do not maintain a use count of total number of map/unmap * calls for a particular irq to find out if a irq can be really @@ -117,14 +149,20 @@ static int crossbar_domain_map(struct irq_domain *d, unsigned int irq, * after which irq is anyways unusable. So an explicit map has to be called * after that. */ -static void crossbar_domain_unmap(struct irq_domain *d, unsigned int irq) +static void crossbar_domain_free(struct irq_domain *domain, unsigned int virq, + unsigned int nr_irqs) { - irq_hw_number_t hw = irq_get_irq_data(irq)->hwirq; + int i; - if (needs_crossbar_write(hw)) { - cb->irq_map[hw - GIC_IRQ_START] = IRQ_FREE; - cb->write(hw - GIC_IRQ_START, cb->safe_map); + raw_spin_lock(&cb->lock); + for (i = 0; i < nr_irqs; i++) { + struct irq_data *d = irq_domain_get_irq_data(domain, virq + i); + + irq_domain_reset_irq_data(d); + cb->irq_map[d->hwirq] = IRQ_FREE; + cb->write(d->hwirq, cb->safe_map); } + raw_spin_unlock(&cb->lock); } static int crossbar_domain_xlate(struct irq_domain *d, @@ -133,44 +171,22 @@ static int crossbar_domain_xlate(struct irq_domain *d, unsigned long *out_hwirq, unsigned int *out_type) { - int ret; - int req_num = intspec[1]; - int direct_map_num; - - if (req_num >= cb->max_crossbar_sources) { - direct_map_num = req_num - cb->max_crossbar_sources; - if (direct_map_num < cb->int_max) { - ret = cb->irq_map[direct_map_num]; - if (ret == IRQ_RESERVED || ret == IRQ_SKIP) { - /* We use the interrupt num as h/w irq num */ - ret = direct_map_num; - goto found; - } - } - - pr_err("%s: requested crossbar number %d > max %d\n", - __func__, req_num, cb->max_crossbar_sources); - return -EINVAL; - } - - ret = get_prev_map_irq(req_num); - if (ret >= 0) - goto found; - - ret = allocate_free_irq(req_num); - - if (ret < 0) - return ret; - -found: - *out_hwirq = ret + GIC_IRQ_START; + if (d->of_node != controller) + return -EINVAL; /* Shouldn't happen, really... */ + if (intsize != 3) + return -EINVAL; /* Not GIC compliant */ + if (intspec[0] != 0) + return -EINVAL; /* No PPI should point to this domain */ + + *out_hwirq = intspec[1]; + *out_type = intspec[2]; return 0; } -static const struct irq_domain_ops routable_irq_domain_ops = { - .map = crossbar_domain_map, - .unmap = crossbar_domain_unmap, - .xlate = crossbar_domain_xlate +static const struct irq_domain_ops crossbar_domain_ops = { + .alloc = crossbar_domain_alloc, + .free = crossbar_domain_free, + .xlate = crossbar_domain_xlate, }; static int __init crossbar_of_init(struct device_node *node) @@ -293,7 +309,8 @@ static int __init crossbar_of_init(struct device_node *node) cb->write(i, cb->safe_map); } - register_routable_domain_ops(&routable_irq_domain_ops); + raw_spin_lock_init(&cb->lock); + return 0; err_reg_offset: @@ -309,18 +326,37 @@ err_cb: return ret; } -static const struct of_device_id crossbar_match[] __initconst = { - { .compatible = "ti,irq-crossbar" }, - {} -}; - -int __init irqcrossbar_init(void) +static int __init irqcrossbar_init(struct device_node *node, + struct device_node *parent) { - struct device_node *np; - np = of_find_matching_node(NULL, crossbar_match); - if (!np) + struct irq_domain *parent_domain, *domain; + int err; + + if (!parent) { + pr_err("%s: no parent, giving up\n", node->full_name); return -ENODEV; + } + + parent_domain = irq_find_host(parent); + if (!parent_domain) { + pr_err("%s: unable to obtain parent domain\n", node->full_name); + return -ENXIO; + } + + err = crossbar_of_init(node); + if (err) + return err; + + domain = irq_domain_add_hierarchy(parent_domain, 0, + cb->max_crossbar_sources, + node, &crossbar_domain_ops, + NULL); + if (!domain) { + pr_err("%s: failed to allocated domain\n", node->full_name); + return -ENOMEM; + } - crossbar_of_init(np); return 0; } + +IRQCHIP_DECLARE(ti_irqcrossbar, "ti,irq-crossbar", irqcrossbar_init); diff --git a/drivers/irqchip/irq-digicolor.c b/drivers/irqchip/irq-digicolor.c index 930a2a2fac7f..3cbc658afe27 100644 --- a/drivers/irqchip/irq-digicolor.c +++ b/drivers/irqchip/irq-digicolor.c @@ -55,8 +55,8 @@ static void __exception_irq_entry digicolor_handle_irq(struct pt_regs *regs) } while (1); } -static void digicolor_set_gc(void __iomem *reg_base, unsigned irq_base, - unsigned en_reg, unsigned ack_reg) +static void __init digicolor_set_gc(void __iomem *reg_base, unsigned irq_base, + unsigned en_reg, unsigned ack_reg) { struct irq_chip_generic *gc; diff --git a/drivers/irqchip/irq-gic-v3.c b/drivers/irqchip/irq-gic-v3.c index fd8850def1b8..4f2fb62e6f37 100644 --- a/drivers/irqchip/irq-gic-v3.c +++ b/drivers/irqchip/irq-gic-v3.c @@ -195,6 +195,19 @@ static void gic_enable_redist(bool enable) /* * Routines to disable, enable, EOI and route interrupts */ +static int gic_peek_irq(struct irq_data *d, u32 offset) +{ + u32 mask = 1 << (gic_irq(d) % 32); + void __iomem *base; + + if (gic_irq_in_rdist(d)) + base = gic_data_rdist_sgi_base(); + else + base = gic_data.dist_base; + + return !!(readl_relaxed(base + offset + (gic_irq(d) / 32) * 4) & mask); +} + static void gic_poke_irq(struct irq_data *d, u32 offset) { u32 mask = 1 << (gic_irq(d) % 32); @@ -223,6 +236,61 @@ static void gic_unmask_irq(struct irq_data *d) gic_poke_irq(d, GICD_ISENABLER); } +static int gic_irq_set_irqchip_state(struct irq_data *d, + enum irqchip_irq_state which, bool val) +{ + u32 reg; + + if (d->hwirq >= gic_data.irq_nr) /* PPI/SPI only */ + return -EINVAL; + + switch (which) { + case IRQCHIP_STATE_PENDING: + reg = val ? GICD_ISPENDR : GICD_ICPENDR; + break; + + case IRQCHIP_STATE_ACTIVE: + reg = val ? GICD_ISACTIVER : GICD_ICACTIVER; + break; + + case IRQCHIP_STATE_MASKED: + reg = val ? GICD_ICENABLER : GICD_ISENABLER; + break; + + default: + return -EINVAL; + } + + gic_poke_irq(d, reg); + return 0; +} + +static int gic_irq_get_irqchip_state(struct irq_data *d, + enum irqchip_irq_state which, bool *val) +{ + if (d->hwirq >= gic_data.irq_nr) /* PPI/SPI only */ + return -EINVAL; + + switch (which) { + case IRQCHIP_STATE_PENDING: + *val = gic_peek_irq(d, GICD_ISPENDR); + break; + + case IRQCHIP_STATE_ACTIVE: + *val = gic_peek_irq(d, GICD_ISACTIVER); + break; + + case IRQCHIP_STATE_MASKED: + *val = !gic_peek_irq(d, GICD_ISENABLER); + break; + + default: + return -EINVAL; + } + + return 0; +} + static void gic_eoi_irq(struct irq_data *d) { gic_write_eoir(gic_irq(d)); @@ -418,19 +486,6 @@ static void gic_cpu_init(void) } #ifdef CONFIG_SMP -static int gic_peek_irq(struct irq_data *d, u32 offset) -{ - u32 mask = 1 << (gic_irq(d) % 32); - void __iomem *base; - - if (gic_irq_in_rdist(d)) - base = gic_data_rdist_sgi_base(); - else - base = gic_data.dist_base; - - return !!(readl_relaxed(base + offset + (gic_irq(d) / 32) * 4) & mask); -} - static int gic_secondary_init(struct notifier_block *nfb, unsigned long action, void *hcpu) { @@ -601,6 +656,8 @@ static struct irq_chip gic_chip = { .irq_eoi = gic_eoi_irq, .irq_set_type = gic_set_type, .irq_set_affinity = gic_set_affinity, + .irq_get_irqchip_state = gic_irq_get_irqchip_state, + .irq_set_irqchip_state = gic_irq_set_irqchip_state, }; #define GIC_ID_NR (1U << gic_data.rdists.id_bits) diff --git a/drivers/irqchip/irq-gic.c b/drivers/irqchip/irq-gic.c index 471e1cdc1933..a6ce3476834e 100644 --- a/drivers/irqchip/irq-gic.c +++ b/drivers/irqchip/irq-gic.c @@ -151,13 +151,24 @@ static inline unsigned int gic_irq(struct irq_data *d) /* * Routines to acknowledge, disable and enable interrupts */ -static void gic_mask_irq(struct irq_data *d) +static void gic_poke_irq(struct irq_data *d, u32 offset) { u32 mask = 1 << (gic_irq(d) % 32); + writel_relaxed(mask, gic_dist_base(d) + offset + (gic_irq(d) / 32) * 4); +} + +static int gic_peek_irq(struct irq_data *d, u32 offset) +{ + u32 mask = 1 << (gic_irq(d) % 32); + return !!(readl_relaxed(gic_dist_base(d) + offset + (gic_irq(d) / 32) * 4) & mask); +} + +static void gic_mask_irq(struct irq_data *d) +{ unsigned long flags; raw_spin_lock_irqsave(&irq_controller_lock, flags); - writel_relaxed(mask, gic_dist_base(d) + GIC_DIST_ENABLE_CLEAR + (gic_irq(d) / 32) * 4); + gic_poke_irq(d, GIC_DIST_ENABLE_CLEAR); if (gic_arch_extn.irq_mask) gic_arch_extn.irq_mask(d); raw_spin_unlock_irqrestore(&irq_controller_lock, flags); @@ -165,13 +176,12 @@ static void gic_mask_irq(struct irq_data *d) static void gic_unmask_irq(struct irq_data *d) { - u32 mask = 1 << (gic_irq(d) % 32); unsigned long flags; raw_spin_lock_irqsave(&irq_controller_lock, flags); if (gic_arch_extn.irq_unmask) gic_arch_extn.irq_unmask(d); - writel_relaxed(mask, gic_dist_base(d) + GIC_DIST_ENABLE_SET + (gic_irq(d) / 32) * 4); + gic_poke_irq(d, GIC_DIST_ENABLE_SET); raw_spin_unlock_irqrestore(&irq_controller_lock, flags); } @@ -186,6 +196,55 @@ static void gic_eoi_irq(struct irq_data *d) writel_relaxed(gic_irq(d), gic_cpu_base(d) + GIC_CPU_EOI); } +static int gic_irq_set_irqchip_state(struct irq_data *d, + enum irqchip_irq_state which, bool val) +{ + u32 reg; + + switch (which) { + case IRQCHIP_STATE_PENDING: + reg = val ? GIC_DIST_PENDING_SET : GIC_DIST_PENDING_CLEAR; + break; + + case IRQCHIP_STATE_ACTIVE: + reg = val ? GIC_DIST_ACTIVE_SET : GIC_DIST_ACTIVE_CLEAR; + break; + + case IRQCHIP_STATE_MASKED: + reg = val ? GIC_DIST_ENABLE_CLEAR : GIC_DIST_ENABLE_SET; + break; + + default: + return -EINVAL; + } + + gic_poke_irq(d, reg); + return 0; +} + +static int gic_irq_get_irqchip_state(struct irq_data *d, + enum irqchip_irq_state which, bool *val) +{ + switch (which) { + case IRQCHIP_STATE_PENDING: + *val = gic_peek_irq(d, GIC_DIST_PENDING_SET); + break; + + case IRQCHIP_STATE_ACTIVE: + *val = gic_peek_irq(d, GIC_DIST_ACTIVE_SET); + break; + + case IRQCHIP_STATE_MASKED: + *val = !gic_peek_irq(d, GIC_DIST_ENABLE_SET); + break; + + default: + return -EINVAL; + } + + return 0; +} + static int gic_set_type(struct irq_data *d, unsigned int type) { void __iomem *base = gic_dist_base(d); @@ -329,6 +388,8 @@ static struct irq_chip gic_chip = { .irq_set_affinity = gic_set_affinity, #endif .irq_set_wake = gic_set_wake, + .irq_get_irqchip_state = gic_irq_get_irqchip_state, + .irq_set_irqchip_state = gic_irq_set_irqchip_state, }; void __init gic_cascade_irq(unsigned int gic_nr, unsigned int irq) @@ -353,7 +414,7 @@ static u8 gic_get_cpumask(struct gic_chip_data *gic) break; } - if (!mask) + if (!mask && num_possible_cpus() > 1) pr_crit("GIC CPU mask not found - kernel will fail to boot.\n"); return mask; @@ -802,15 +863,12 @@ static int gic_irq_domain_map(struct irq_domain *d, unsigned int irq, irq_domain_set_info(d, irq, hw, &gic_chip, d->host_data, handle_fasteoi_irq, NULL, NULL); set_irq_flags(irq, IRQF_VALID | IRQF_PROBE); - - gic_routable_irq_domain_ops->map(d, irq, hw); } return 0; } static void gic_irq_domain_unmap(struct irq_domain *d, unsigned int irq) { - gic_routable_irq_domain_ops->unmap(d, irq); } static int gic_irq_domain_xlate(struct irq_domain *d, @@ -829,16 +887,8 @@ static int gic_irq_domain_xlate(struct irq_domain *d, *out_hwirq = intspec[1] + 16; /* For SPIs, we need to add 16 more to get the GIC irq ID number */ - if (!intspec[0]) { - ret = gic_routable_irq_domain_ops->xlate(d, controller, - intspec, - intsize, - out_hwirq, - out_type); - - if (IS_ERR_VALUE(ret)) - return ret; - } + if (!intspec[0]) + *out_hwirq += 16; *out_type = intspec[2] & IRQ_TYPE_SENSE_MASK; @@ -895,37 +945,11 @@ static const struct irq_domain_ops gic_irq_domain_ops = { .xlate = gic_irq_domain_xlate, }; -/* Default functions for routable irq domain */ -static int gic_routable_irq_domain_map(struct irq_domain *d, unsigned int irq, - irq_hw_number_t hw) +void gic_set_irqchip_flags(unsigned long flags) { - return 0; + gic_chip.flags |= flags; } -static void gic_routable_irq_domain_unmap(struct irq_domain *d, - unsigned int irq) -{ -} - -static int gic_routable_irq_domain_xlate(struct irq_domain *d, - struct device_node *controller, - const u32 *intspec, unsigned int intsize, - unsigned long *out_hwirq, - unsigned int *out_type) -{ - *out_hwirq += 16; - return 0; -} - -static const struct irq_domain_ops gic_default_routable_irq_domain_ops = { - .map = gic_routable_irq_domain_map, - .unmap = gic_routable_irq_domain_unmap, - .xlate = gic_routable_irq_domain_xlate, -}; - -const struct irq_domain_ops *gic_routable_irq_domain_ops = - &gic_default_routable_irq_domain_ops; - void __init gic_init_bases(unsigned int gic_nr, int irq_start, void __iomem *dist_base, void __iomem *cpu_base, u32 percpu_offset, struct device_node *node) @@ -933,7 +957,6 @@ void __init gic_init_bases(unsigned int gic_nr, int irq_start, irq_hw_number_t hwirq_base; struct gic_chip_data *gic; int gic_irqs, irq_base, i; - int nr_routable_irqs; BUG_ON(gic_nr >= MAX_GIC_NR); @@ -989,15 +1012,9 @@ void __init gic_init_bases(unsigned int gic_nr, int irq_start, gic->gic_irqs = gic_irqs; if (node) { /* DT case */ - const struct irq_domain_ops *ops = &gic_irq_domain_hierarchy_ops; - - if (!of_property_read_u32(node, "arm,routable-irqs", - &nr_routable_irqs)) { - ops = &gic_irq_domain_ops; - gic_irqs = nr_routable_irqs; - } - - gic->domain = irq_domain_add_linear(node, gic_irqs, ops, gic); + gic->domain = irq_domain_add_linear(node, gic_irqs, + &gic_irq_domain_hierarchy_ops, + gic); } else { /* Non-DT case */ /* * For primary GICs, skip over SGIs. diff --git a/drivers/irqchip/irq-mips-gic.c b/drivers/irqchip/irq-mips-gic.c index 9acdc080e7ec..bc48b7dc89ec 100644 --- a/drivers/irqchip/irq-mips-gic.c +++ b/drivers/irqchip/irq-mips-gic.c @@ -166,6 +166,27 @@ cycle_t gic_read_compare(void) return (((cycle_t) hi) << 32) + lo; } + +void gic_start_count(void) +{ + u32 gicconfig; + + /* Start the counter */ + gicconfig = gic_read(GIC_REG(SHARED, GIC_SH_CONFIG)); + gicconfig &= ~(1 << GIC_SH_CONFIG_COUNTSTOP_SHF); + gic_write(GIC_REG(SHARED, GIC_SH_CONFIG), gicconfig); +} + +void gic_stop_count(void) +{ + u32 gicconfig; + + /* Stop the counter */ + gicconfig = gic_read(GIC_REG(SHARED, GIC_SH_CONFIG)); + gicconfig |= 1 << GIC_SH_CONFIG_COUNTSTOP_SHF; + gic_write(GIC_REG(SHARED, GIC_SH_CONFIG), gicconfig); +} + #endif static bool gic_local_irq_is_routable(int intr) @@ -218,7 +239,7 @@ int gic_get_c0_compare_int(void) int gic_get_c0_perfcount_int(void) { if (!gic_local_irq_is_routable(GIC_LOCAL_INT_PERFCTR)) { - /* Is the erformance counter shared with the timer? */ + /* Is the performance counter shared with the timer? */ if (cp0_perfcount_irq < 0) return -1; return MIPS_CPU_IRQ_BASE + cp0_perfcount_irq; @@ -227,6 +248,29 @@ int gic_get_c0_perfcount_int(void) GIC_LOCAL_TO_HWIRQ(GIC_LOCAL_INT_PERFCTR)); } +int gic_get_c0_fdc_int(void) +{ + if (!gic_local_irq_is_routable(GIC_LOCAL_INT_FDC)) { + /* Is the FDC IRQ even present? */ + if (cp0_fdc_irq < 0) + return -1; + return MIPS_CPU_IRQ_BASE + cp0_fdc_irq; + } + + /* + * Some cores claim the FDC is routable but it doesn't actually seem to + * be connected. + */ + switch (current_cpu_type()) { + case CPU_INTERAPTIV: + case CPU_PROAPTIV: + return -1; + } + + return irq_create_mapping(gic_irq_domain, + GIC_LOCAL_TO_HWIRQ(GIC_LOCAL_INT_FDC)); +} + static void gic_handle_shared_int(void) { unsigned int i, intr, virq; @@ -592,15 +636,20 @@ static int gic_local_irq_domain_map(struct irq_domain *d, unsigned int virq, * of the MIPS kernel code does not use the percpu IRQ API for * the CP0 timer and performance counter interrupts. */ - if (intr != GIC_LOCAL_INT_TIMER && intr != GIC_LOCAL_INT_PERFCTR) { + switch (intr) { + case GIC_LOCAL_INT_TIMER: + case GIC_LOCAL_INT_PERFCTR: + case GIC_LOCAL_INT_FDC: + irq_set_chip_and_handler(virq, + &gic_all_vpes_local_irq_controller, + handle_percpu_irq); + break; + default: irq_set_chip_and_handler(virq, &gic_local_irq_controller, handle_percpu_devid_irq); irq_set_percpu_devid(virq); - } else { - irq_set_chip_and_handler(virq, - &gic_all_vpes_local_irq_controller, - handle_percpu_irq); + break; } spin_lock_irqsave(&gic_lock, flags); diff --git a/drivers/irqchip/irq-renesas-irqc.c b/drivers/irqchip/irq-renesas-irqc.c index 384e6ed61d7c..cdf80b7794cd 100644 --- a/drivers/irqchip/irq-renesas-irqc.c +++ b/drivers/irqchip/irq-renesas-irqc.c @@ -17,6 +17,7 @@ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA */ +#include <linux/clk.h> #include <linux/init.h> #include <linux/platform_device.h> #include <linux/spinlock.h> @@ -29,15 +30,26 @@ #include <linux/slab.h> #include <linux/module.h> #include <linux/platform_data/irq-renesas-irqc.h> +#include <linux/pm_runtime.h> -#define IRQC_IRQ_MAX 32 /* maximum 32 interrupts per driver instance */ +#define IRQC_IRQ_MAX 32 /* maximum 32 interrupts per driver instance */ -#define IRQC_REQ_STS 0x00 -#define IRQC_EN_STS 0x04 -#define IRQC_EN_SET 0x08 +#define IRQC_REQ_STS 0x00 /* Interrupt Request Status Register */ +#define IRQC_EN_STS 0x04 /* Interrupt Enable Status Register */ +#define IRQC_EN_SET 0x08 /* Interrupt Enable Set Register */ #define IRQC_INT_CPU_BASE(n) (0x000 + ((n) * 0x10)) -#define DETECT_STATUS 0x100 + /* SYS-CPU vs. RT-CPU */ +#define DETECT_STATUS 0x100 /* IRQn Detect Status Register */ +#define MONITOR 0x104 /* IRQn Signal Level Monitor Register */ +#define HLVL_STS 0x108 /* IRQn High Level Detect Status Register */ +#define LLVL_STS 0x10c /* IRQn Low Level Detect Status Register */ +#define S_R_EDGE_STS 0x110 /* IRQn Sync Rising Edge Detect Status Reg. */ +#define S_F_EDGE_STS 0x114 /* IRQn Sync Falling Edge Detect Status Reg. */ +#define A_R_EDGE_STS 0x118 /* IRQn Async Rising Edge Detect Status Reg. */ +#define A_F_EDGE_STS 0x11c /* IRQn Async Falling Edge Detect Status Reg. */ +#define CHTEN_STS 0x120 /* Chattering Reduction Status Register */ #define IRQC_CONFIG(n) (0x180 + ((n) * 0x04)) + /* IRQn Configuration Register */ struct irqc_irq { int hw_irq; @@ -55,6 +67,7 @@ struct irqc_priv { struct platform_device *pdev; struct irq_chip irq_chip; struct irq_domain *irq_domain; + struct clk *clk; }; static void irqc_dbg(struct irqc_irq *i, char *str) @@ -94,7 +107,7 @@ static int irqc_irq_set_type(struct irq_data *d, unsigned int type) struct irqc_priv *p = irq_data_get_irq_chip_data(d); int hw_irq = irqd_to_hwirq(d); unsigned char value = irqc_sense[type & IRQ_TYPE_SENSE_MASK]; - unsigned long tmp; + u32 tmp; irqc_dbg(&p->irq[hw_irq], "sense"); @@ -108,11 +121,26 @@ static int irqc_irq_set_type(struct irq_data *d, unsigned int type) return 0; } +static int irqc_irq_set_wake(struct irq_data *d, unsigned int on) +{ + struct irqc_priv *p = irq_data_get_irq_chip_data(d); + + if (!p->clk) + return 0; + + if (on) + clk_enable(p->clk); + else + clk_disable(p->clk); + + return 0; +} + static irqreturn_t irqc_irq_handler(int irq, void *dev_id) { struct irqc_irq *i = dev_id; struct irqc_priv *p = i->p; - unsigned long bit = BIT(i->hw_irq); + u32 bit = BIT(i->hw_irq); irqc_dbg(i, "demux1"); @@ -170,6 +198,15 @@ static int irqc_probe(struct platform_device *pdev) p->pdev = pdev; platform_set_drvdata(pdev, p); + p->clk = devm_clk_get(&pdev->dev, NULL); + if (IS_ERR(p->clk)) { + dev_warn(&pdev->dev, "unable to get clock\n"); + p->clk = NULL; + } + + pm_runtime_enable(&pdev->dev); + pm_runtime_get_sync(&pdev->dev); + /* get hold of manadatory IOMEM */ io = platform_get_resource(pdev, IORESOURCE_MEM, 0); if (!io) { @@ -210,7 +247,8 @@ static int irqc_probe(struct platform_device *pdev) irq_chip->irq_mask = irqc_irq_disable; irq_chip->irq_unmask = irqc_irq_enable; irq_chip->irq_set_type = irqc_irq_set_type; - irq_chip->flags = IRQCHIP_SKIP_SET_WAKE | IRQCHIP_MASK_ON_SUSPEND; + irq_chip->irq_set_wake = irqc_irq_set_wake; + irq_chip->flags = IRQCHIP_MASK_ON_SUSPEND; p->irq_domain = irq_domain_add_simple(pdev->dev.of_node, p->number_of_irqs, @@ -250,6 +288,8 @@ err3: err2: iounmap(p->iomem); err1: + pm_runtime_put(&pdev->dev); + pm_runtime_disable(&pdev->dev); kfree(p); err0: return ret; @@ -265,6 +305,8 @@ static int irqc_remove(struct platform_device *pdev) irq_domain_remove(p->irq_domain); iounmap(p->iomem); + pm_runtime_put(&pdev->dev); + pm_runtime_disable(&pdev->dev); kfree(p); return 0; } diff --git a/drivers/irqchip/irq-st.c b/drivers/irqchip/irq-st.c new file mode 100644 index 000000000000..9af48a85c16f --- /dev/null +++ b/drivers/irqchip/irq-st.c @@ -0,0 +1,206 @@ +/* + * Copyright (C) 2014 STMicroelectronics – All Rights Reserved + * + * Author: Lee Jones <lee.jones@linaro.org> + * + * This is a re-write of Christophe Kerello's PMU driver. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + */ + +#include <dt-bindings/interrupt-controller/irq-st.h> +#include <linux/err.h> +#include <linux/mfd/syscon.h> +#include <linux/of_device.h> +#include <linux/platform_device.h> +#include <linux/regmap.h> +#include <linux/slab.h> + +#define STIH415_SYSCFG_642 0x0a8 +#define STIH416_SYSCFG_7543 0x87c +#define STIH407_SYSCFG_5102 0x198 +#define STID127_SYSCFG_734 0x088 + +#define ST_A9_IRQ_MASK 0x001FFFFF +#define ST_A9_IRQ_MAX_CHANS 2 + +#define ST_A9_IRQ_EN_CTI_0 BIT(0) +#define ST_A9_IRQ_EN_CTI_1 BIT(1) +#define ST_A9_IRQ_EN_PMU_0 BIT(2) +#define ST_A9_IRQ_EN_PMU_1 BIT(3) +#define ST_A9_IRQ_EN_PL310_L2 BIT(4) +#define ST_A9_IRQ_EN_EXT_0 BIT(5) +#define ST_A9_IRQ_EN_EXT_1 BIT(6) +#define ST_A9_IRQ_EN_EXT_2 BIT(7) + +#define ST_A9_FIQ_N_SEL(dev, chan) (dev << (8 + (chan * 3))) +#define ST_A9_IRQ_N_SEL(dev, chan) (dev << (14 + (chan * 3))) +#define ST_A9_EXTIRQ_INV_SEL(dev) (dev << 20) + +struct st_irq_syscfg { + struct regmap *regmap; + unsigned int syscfg; + unsigned int config; + bool ext_inverted; +}; + +static const struct of_device_id st_irq_syscfg_match[] = { + { + .compatible = "st,stih415-irq-syscfg", + .data = (void *)STIH415_SYSCFG_642, + }, + { + .compatible = "st,stih416-irq-syscfg", + .data = (void *)STIH416_SYSCFG_7543, + }, + { + .compatible = "st,stih407-irq-syscfg", + .data = (void *)STIH407_SYSCFG_5102, + }, + { + .compatible = "st,stid127-irq-syscfg", + .data = (void *)STID127_SYSCFG_734, + }, + {} +}; + +static int st_irq_xlate(struct platform_device *pdev, + int device, int channel, bool irq) +{ + struct st_irq_syscfg *ddata = dev_get_drvdata(&pdev->dev); + + /* Set the device enable bit. */ + switch (device) { + case ST_IRQ_SYSCFG_EXT_0: + ddata->config |= ST_A9_IRQ_EN_EXT_0; + break; + case ST_IRQ_SYSCFG_EXT_1: + ddata->config |= ST_A9_IRQ_EN_EXT_1; + break; + case ST_IRQ_SYSCFG_EXT_2: + ddata->config |= ST_A9_IRQ_EN_EXT_2; + break; + case ST_IRQ_SYSCFG_CTI_0: + ddata->config |= ST_A9_IRQ_EN_CTI_0; + break; + case ST_IRQ_SYSCFG_CTI_1: + ddata->config |= ST_A9_IRQ_EN_CTI_1; + break; + case ST_IRQ_SYSCFG_PMU_0: + ddata->config |= ST_A9_IRQ_EN_PMU_0; + break; + case ST_IRQ_SYSCFG_PMU_1: + ddata->config |= ST_A9_IRQ_EN_PMU_1; + break; + case ST_IRQ_SYSCFG_pl310_L2: + ddata->config |= ST_A9_IRQ_EN_PL310_L2; + break; + case ST_IRQ_SYSCFG_DISABLED: + return 0; + default: + dev_err(&pdev->dev, "Unrecognised device %d\n", device); + return -EINVAL; + } + + /* Select IRQ/FIQ channel for device. */ + ddata->config |= irq ? + ST_A9_IRQ_N_SEL(device, channel) : + ST_A9_FIQ_N_SEL(device, channel); + + return 0; +} + +static int st_irq_syscfg_enable(struct platform_device *pdev) +{ + struct device_node *np = pdev->dev.of_node; + struct st_irq_syscfg *ddata = dev_get_drvdata(&pdev->dev); + int channels, ret, i; + u32 device, invert; + + channels = of_property_count_u32_elems(np, "st,irq-device"); + if (channels != ST_A9_IRQ_MAX_CHANS) { + dev_err(&pdev->dev, "st,enable-irq-device must have 2 elems\n"); + return -EINVAL; + } + + channels = of_property_count_u32_elems(np, "st,fiq-device"); + if (channels != ST_A9_IRQ_MAX_CHANS) { + dev_err(&pdev->dev, "st,enable-fiq-device must have 2 elems\n"); + return -EINVAL; + } + + for (i = 0; i < ST_A9_IRQ_MAX_CHANS; i++) { + of_property_read_u32_index(np, "st,irq-device", i, &device); + + ret = st_irq_xlate(pdev, device, i, true); + if (ret) + return ret; + + of_property_read_u32_index(np, "st,fiq-device", i, &device); + + ret = st_irq_xlate(pdev, device, i, false); + if (ret) + return ret; + } + + /* External IRQs may be inverted. */ + of_property_read_u32(np, "st,invert-ext", &invert); + ddata->config |= ST_A9_EXTIRQ_INV_SEL(invert); + + return regmap_update_bits(ddata->regmap, ddata->syscfg, + ST_A9_IRQ_MASK, ddata->config); +} + +static int st_irq_syscfg_probe(struct platform_device *pdev) +{ + struct device_node *np = pdev->dev.of_node; + const struct of_device_id *match; + struct st_irq_syscfg *ddata; + + ddata = devm_kzalloc(&pdev->dev, sizeof(*ddata), GFP_KERNEL); + if (!ddata) + return -ENOMEM; + + match = of_match_device(st_irq_syscfg_match, &pdev->dev); + if (!match) + return -ENODEV; + + ddata->syscfg = (unsigned int)match->data; + + ddata->regmap = syscon_regmap_lookup_by_phandle(np, "st,syscfg"); + if (IS_ERR(ddata->regmap)) { + dev_err(&pdev->dev, "syscfg phandle missing\n"); + return PTR_ERR(ddata->regmap); + } + + dev_set_drvdata(&pdev->dev, ddata); + + return st_irq_syscfg_enable(pdev); +} + +static int st_irq_syscfg_resume(struct device *dev) +{ + struct st_irq_syscfg *ddata = dev_get_drvdata(dev); + + return regmap_update_bits(ddata->regmap, ddata->syscfg, + ST_A9_IRQ_MASK, ddata->config); +} + +static SIMPLE_DEV_PM_OPS(st_irq_syscfg_pm_ops, NULL, st_irq_syscfg_resume); + +static struct platform_driver st_irq_syscfg_driver = { + .driver = { + .name = "st_irq_syscfg", + .pm = &st_irq_syscfg_pm_ops, + .of_match_table = st_irq_syscfg_match, + }, + .probe = st_irq_syscfg_probe, +}; + +static int __init st_irq_syscfg_init(void) +{ + return platform_driver_register(&st_irq_syscfg_driver); +} +core_initcall(st_irq_syscfg_init); diff --git a/drivers/irqchip/irq-tegra.c b/drivers/irqchip/irq-tegra.c new file mode 100644 index 000000000000..51c485d9a877 --- /dev/null +++ b/drivers/irqchip/irq-tegra.c @@ -0,0 +1,377 @@ +/* + * Driver code for Tegra's Legacy Interrupt Controller + * + * Author: Marc Zyngier <marc.zyngier@arm.com> + * + * Heavily based on the original arch/arm/mach-tegra/irq.c code: + * Copyright (C) 2011 Google, Inc. + * + * Author: + * Colin Cross <ccross@android.com> + * + * Copyright (C) 2010,2013, NVIDIA Corporation + * + * This software is licensed under the terms of the GNU General Public + * License version 2, as published by the Free Software Foundation, and + * may be copied, distributed, and modified under those terms. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + */ + +#include <linux/io.h> +#include <linux/irq.h> +#include <linux/irqdomain.h> +#include <linux/of_address.h> +#include <linux/slab.h> +#include <linux/syscore_ops.h> + +#include <dt-bindings/interrupt-controller/arm-gic.h> + +#include "irqchip.h" + +#define ICTLR_CPU_IEP_VFIQ 0x08 +#define ICTLR_CPU_IEP_FIR 0x14 +#define ICTLR_CPU_IEP_FIR_SET 0x18 +#define ICTLR_CPU_IEP_FIR_CLR 0x1c + +#define ICTLR_CPU_IER 0x20 +#define ICTLR_CPU_IER_SET 0x24 +#define ICTLR_CPU_IER_CLR 0x28 +#define ICTLR_CPU_IEP_CLASS 0x2C + +#define ICTLR_COP_IER 0x30 +#define ICTLR_COP_IER_SET 0x34 +#define ICTLR_COP_IER_CLR 0x38 +#define ICTLR_COP_IEP_CLASS 0x3c + +#define TEGRA_MAX_NUM_ICTLRS 6 + +static unsigned int num_ictlrs; + +struct tegra_ictlr_soc { + unsigned int num_ictlrs; +}; + +static const struct tegra_ictlr_soc tegra20_ictlr_soc = { + .num_ictlrs = 4, +}; + +static const struct tegra_ictlr_soc tegra30_ictlr_soc = { + .num_ictlrs = 5, +}; + +static const struct tegra_ictlr_soc tegra210_ictlr_soc = { + .num_ictlrs = 6, +}; + +static const struct of_device_id ictlr_matches[] = { + { .compatible = "nvidia,tegra210-ictlr", .data = &tegra210_ictlr_soc }, + { .compatible = "nvidia,tegra30-ictlr", .data = &tegra30_ictlr_soc }, + { .compatible = "nvidia,tegra20-ictlr", .data = &tegra20_ictlr_soc }, + { } +}; + +struct tegra_ictlr_info { + void __iomem *base[TEGRA_MAX_NUM_ICTLRS]; +#ifdef CONFIG_PM_SLEEP + u32 cop_ier[TEGRA_MAX_NUM_ICTLRS]; + u32 cop_iep[TEGRA_MAX_NUM_ICTLRS]; + u32 cpu_ier[TEGRA_MAX_NUM_ICTLRS]; + u32 cpu_iep[TEGRA_MAX_NUM_ICTLRS]; + + u32 ictlr_wake_mask[TEGRA_MAX_NUM_ICTLRS]; +#endif +}; + +static struct tegra_ictlr_info *lic; + +static inline void tegra_ictlr_write_mask(struct irq_data *d, unsigned long reg) +{ + void __iomem *base = d->chip_data; + u32 mask; + + mask = BIT(d->hwirq % 32); + writel_relaxed(mask, base + reg); +} + +static void tegra_mask(struct irq_data *d) +{ + tegra_ictlr_write_mask(d, ICTLR_CPU_IER_CLR); + irq_chip_mask_parent(d); +} + +static void tegra_unmask(struct irq_data *d) +{ + tegra_ictlr_write_mask(d, ICTLR_CPU_IER_SET); + irq_chip_unmask_parent(d); +} + +static void tegra_eoi(struct irq_data *d) +{ + tegra_ictlr_write_mask(d, ICTLR_CPU_IEP_FIR_CLR); + irq_chip_eoi_parent(d); +} + +static int tegra_retrigger(struct irq_data *d) +{ + tegra_ictlr_write_mask(d, ICTLR_CPU_IEP_FIR_SET); + return irq_chip_retrigger_hierarchy(d); +} + +#ifdef CONFIG_PM_SLEEP +static int tegra_set_wake(struct irq_data *d, unsigned int enable) +{ + u32 irq = d->hwirq; + u32 index, mask; + + index = (irq / 32); + mask = BIT(irq % 32); + if (enable) + lic->ictlr_wake_mask[index] |= mask; + else + lic->ictlr_wake_mask[index] &= ~mask; + + /* + * Do *not* call into the parent, as the GIC doesn't have any + * wake-up facility... + */ + return 0; +} + +static int tegra_ictlr_suspend(void) +{ + unsigned long flags; + unsigned int i; + + local_irq_save(flags); + for (i = 0; i < num_ictlrs; i++) { + void __iomem *ictlr = lic->base[i]; + + /* Save interrupt state */ + lic->cpu_ier[i] = readl_relaxed(ictlr + ICTLR_CPU_IER); + lic->cpu_iep[i] = readl_relaxed(ictlr + ICTLR_CPU_IEP_CLASS); + lic->cop_ier[i] = readl_relaxed(ictlr + ICTLR_COP_IER); + lic->cop_iep[i] = readl_relaxed(ictlr + ICTLR_COP_IEP_CLASS); + + /* Disable COP interrupts */ + writel_relaxed(~0ul, ictlr + ICTLR_COP_IER_CLR); + + /* Disable CPU interrupts */ + writel_relaxed(~0ul, ictlr + ICTLR_CPU_IER_CLR); + + /* Enable the wakeup sources of ictlr */ + writel_relaxed(lic->ictlr_wake_mask[i], ictlr + ICTLR_CPU_IER_SET); + } + local_irq_restore(flags); + + return 0; +} + +static void tegra_ictlr_resume(void) +{ + unsigned long flags; + unsigned int i; + + local_irq_save(flags); + for (i = 0; i < num_ictlrs; i++) { + void __iomem *ictlr = lic->base[i]; + + writel_relaxed(lic->cpu_iep[i], + ictlr + ICTLR_CPU_IEP_CLASS); + writel_relaxed(~0ul, ictlr + ICTLR_CPU_IER_CLR); + writel_relaxed(lic->cpu_ier[i], + ictlr + ICTLR_CPU_IER_SET); + writel_relaxed(lic->cop_iep[i], + ictlr + ICTLR_COP_IEP_CLASS); + writel_relaxed(~0ul, ictlr + ICTLR_COP_IER_CLR); + writel_relaxed(lic->cop_ier[i], + ictlr + ICTLR_COP_IER_SET); + } + local_irq_restore(flags); +} + +static struct syscore_ops tegra_ictlr_syscore_ops = { + .suspend = tegra_ictlr_suspend, + .resume = tegra_ictlr_resume, +}; + +static void tegra_ictlr_syscore_init(void) +{ + register_syscore_ops(&tegra_ictlr_syscore_ops); +} +#else +#define tegra_set_wake NULL +static inline void tegra_ictlr_syscore_init(void) {} +#endif + +static struct irq_chip tegra_ictlr_chip = { + .name = "LIC", + .irq_eoi = tegra_eoi, + .irq_mask = tegra_mask, + .irq_unmask = tegra_unmask, + .irq_retrigger = tegra_retrigger, + .irq_set_wake = tegra_set_wake, + .flags = IRQCHIP_MASK_ON_SUSPEND, +#ifdef CONFIG_SMP + .irq_set_affinity = irq_chip_set_affinity_parent, +#endif +}; + +static int tegra_ictlr_domain_xlate(struct irq_domain *domain, + struct device_node *controller, + const u32 *intspec, + unsigned int intsize, + unsigned long *out_hwirq, + unsigned int *out_type) +{ + if (domain->of_node != controller) + return -EINVAL; /* Shouldn't happen, really... */ + if (intsize != 3) + return -EINVAL; /* Not GIC compliant */ + if (intspec[0] != GIC_SPI) + return -EINVAL; /* No PPI should point to this domain */ + + *out_hwirq = intspec[1]; + *out_type = intspec[2]; + return 0; +} + +static int tegra_ictlr_domain_alloc(struct irq_domain *domain, + unsigned int virq, + unsigned int nr_irqs, void *data) +{ + struct of_phandle_args *args = data; + struct of_phandle_args parent_args; + struct tegra_ictlr_info *info = domain->host_data; + irq_hw_number_t hwirq; + unsigned int i; + + if (args->args_count != 3) + return -EINVAL; /* Not GIC compliant */ + if (args->args[0] != GIC_SPI) + return -EINVAL; /* No PPI should point to this domain */ + + hwirq = args->args[1]; + if (hwirq >= (num_ictlrs * 32)) + return -EINVAL; + + for (i = 0; i < nr_irqs; i++) { + int ictlr = (hwirq + i) / 32; + + irq_domain_set_hwirq_and_chip(domain, virq + i, hwirq + i, + &tegra_ictlr_chip, + &info->base[ictlr]); + } + + parent_args = *args; + parent_args.np = domain->parent->of_node; + return irq_domain_alloc_irqs_parent(domain, virq, nr_irqs, &parent_args); +} + +static void tegra_ictlr_domain_free(struct irq_domain *domain, + unsigned int virq, + unsigned int nr_irqs) +{ + unsigned int i; + + for (i = 0; i < nr_irqs; i++) { + struct irq_data *d = irq_domain_get_irq_data(domain, virq + i); + irq_domain_reset_irq_data(d); + } +} + +static const struct irq_domain_ops tegra_ictlr_domain_ops = { + .xlate = tegra_ictlr_domain_xlate, + .alloc = tegra_ictlr_domain_alloc, + .free = tegra_ictlr_domain_free, +}; + +static int __init tegra_ictlr_init(struct device_node *node, + struct device_node *parent) +{ + struct irq_domain *parent_domain, *domain; + const struct of_device_id *match; + const struct tegra_ictlr_soc *soc; + unsigned int i; + int err; + + if (!parent) { + pr_err("%s: no parent, giving up\n", node->full_name); + return -ENODEV; + } + + parent_domain = irq_find_host(parent); + if (!parent_domain) { + pr_err("%s: unable to obtain parent domain\n", node->full_name); + return -ENXIO; + } + + match = of_match_node(ictlr_matches, node); + if (!match) /* Should never happen... */ + return -ENODEV; + + soc = match->data; + + lic = kzalloc(sizeof(*lic), GFP_KERNEL); + if (!lic) + return -ENOMEM; + + for (i = 0; i < TEGRA_MAX_NUM_ICTLRS; i++) { + void __iomem *base; + + base = of_iomap(node, i); + if (!base) + break; + + lic->base[i] = base; + + /* Disable all interrupts */ + writel_relaxed(~0UL, base + ICTLR_CPU_IER_CLR); + /* All interrupts target IRQ */ + writel_relaxed(0, base + ICTLR_CPU_IEP_CLASS); + + num_ictlrs++; + } + + if (!num_ictlrs) { + pr_err("%s: no valid regions, giving up\n", node->full_name); + err = -ENOMEM; + goto out_free; + } + + WARN(num_ictlrs != soc->num_ictlrs, + "%s: Found %u interrupt controllers in DT; expected %u.\n", + node->full_name, num_ictlrs, soc->num_ictlrs); + + + domain = irq_domain_add_hierarchy(parent_domain, 0, num_ictlrs * 32, + node, &tegra_ictlr_domain_ops, + lic); + if (!domain) { + pr_err("%s: failed to allocated domain\n", node->full_name); + err = -ENOMEM; + goto out_unmap; + } + + tegra_ictlr_syscore_init(); + + pr_info("%s: %d interrupts forwarded to %s\n", + node->full_name, num_ictlrs * 32, parent->full_name); + + return 0; + +out_unmap: + for (i = 0; i < num_ictlrs; i++) + iounmap(lic->base[i]); +out_free: + kfree(lic); + return err; +} + +IRQCHIP_DECLARE(tegra20_ictlr, "nvidia,tegra20-ictlr", tegra_ictlr_init); +IRQCHIP_DECLARE(tegra30_ictlr, "nvidia,tegra30-ictlr", tegra_ictlr_init); +IRQCHIP_DECLARE(tegra210_ictlr, "nvidia,tegra210-ictlr", tegra_ictlr_init); diff --git a/drivers/irqchip/irq-vf610-mscm-ir.c b/drivers/irqchip/irq-vf610-mscm-ir.c new file mode 100644 index 000000000000..9521057d4744 --- /dev/null +++ b/drivers/irqchip/irq-vf610-mscm-ir.c @@ -0,0 +1,212 @@ +/* + * Copyright (C) 2014-2015 Toradex AG + * Author: Stefan Agner <stefan@agner.ch> + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + * + * + * IRQ chip driver for MSCM interrupt router available on Vybrid SoC's. + * The interrupt router is between the CPU's interrupt controller and the + * peripheral. The router allows to route the peripheral interrupts to + * one of the two available CPU's on Vybrid VF6xx SoC's (Cortex-A5 or + * Cortex-M4). The router will be configured transparently on a IRQ + * request. + * + * o All peripheral interrupts of the Vybrid SoC can be routed to + * CPU 0, CPU 1 or both. The routing is useful for dual-core + * variants of Vybrid SoC such as VF6xx. This driver routes the + * requested interrupt to the CPU currently running on. + * + * o It is required to setup the interrupt router even on single-core + * variants of Vybrid. + */ + +#include <linux/cpu_pm.h> +#include <linux/io.h> +#include <linux/irq.h> +#include <linux/irqdomain.h> +#include <linux/mfd/syscon.h> +#include <dt-bindings/interrupt-controller/arm-gic.h> +#include <linux/of.h> +#include <linux/of_address.h> +#include <linux/slab.h> +#include <linux/regmap.h> + +#include "irqchip.h" + +#define MSCM_CPxNUM 0x4 + +#define MSCM_IRSPRC(n) (0x80 + 2 * (n)) +#define MSCM_IRSPRC_CPEN_MASK 0x3 + +#define MSCM_IRSPRC_NUM 112 + +struct vf610_mscm_ir_chip_data { + void __iomem *mscm_ir_base; + u16 cpu_mask; + u16 saved_irsprc[MSCM_IRSPRC_NUM]; +}; + +static struct vf610_mscm_ir_chip_data *mscm_ir_data; + +static inline void vf610_mscm_ir_save(struct vf610_mscm_ir_chip_data *data) +{ + int i; + + for (i = 0; i < MSCM_IRSPRC_NUM; i++) + data->saved_irsprc[i] = readw_relaxed(data->mscm_ir_base + MSCM_IRSPRC(i)); +} + +static inline void vf610_mscm_ir_restore(struct vf610_mscm_ir_chip_data *data) +{ + int i; + + for (i = 0; i < MSCM_IRSPRC_NUM; i++) + writew_relaxed(data->saved_irsprc[i], data->mscm_ir_base + MSCM_IRSPRC(i)); +} + +static int vf610_mscm_ir_notifier(struct notifier_block *self, + unsigned long cmd, void *v) +{ + switch (cmd) { + case CPU_CLUSTER_PM_ENTER: + vf610_mscm_ir_save(mscm_ir_data); + break; + case CPU_CLUSTER_PM_ENTER_FAILED: + case CPU_CLUSTER_PM_EXIT: + vf610_mscm_ir_restore(mscm_ir_data); + break; + } + + return NOTIFY_OK; +} + +static struct notifier_block mscm_ir_notifier_block = { + .notifier_call = vf610_mscm_ir_notifier, +}; + +static void vf610_mscm_ir_enable(struct irq_data *data) +{ + irq_hw_number_t hwirq = data->hwirq; + struct vf610_mscm_ir_chip_data *chip_data = data->chip_data; + u16 irsprc; + + irsprc = readw_relaxed(chip_data->mscm_ir_base + MSCM_IRSPRC(hwirq)); + irsprc &= MSCM_IRSPRC_CPEN_MASK; + + WARN_ON(irsprc & ~chip_data->cpu_mask); + + writew_relaxed(chip_data->cpu_mask, + chip_data->mscm_ir_base + MSCM_IRSPRC(hwirq)); + + irq_chip_unmask_parent(data); +} + +static void vf610_mscm_ir_disable(struct irq_data *data) +{ + irq_hw_number_t hwirq = data->hwirq; + struct vf610_mscm_ir_chip_data *chip_data = data->chip_data; + + writew_relaxed(0x0, chip_data->mscm_ir_base + MSCM_IRSPRC(hwirq)); + + irq_chip_mask_parent(data); +} + +static struct irq_chip vf610_mscm_ir_irq_chip = { + .name = "mscm-ir", + .irq_mask = irq_chip_mask_parent, + .irq_unmask = irq_chip_unmask_parent, + .irq_eoi = irq_chip_eoi_parent, + .irq_enable = vf610_mscm_ir_enable, + .irq_disable = vf610_mscm_ir_disable, + .irq_retrigger = irq_chip_retrigger_hierarchy, + .irq_set_affinity = irq_chip_set_affinity_parent, +}; + +static int vf610_mscm_ir_domain_alloc(struct irq_domain *domain, unsigned int virq, + unsigned int nr_irqs, void *arg) +{ + int i; + irq_hw_number_t hwirq; + struct of_phandle_args *irq_data = arg; + struct of_phandle_args gic_data; + + if (irq_data->args_count != 2) + return -EINVAL; + + hwirq = irq_data->args[0]; + for (i = 0; i < nr_irqs; i++) + irq_domain_set_hwirq_and_chip(domain, virq + i, hwirq + i, + &vf610_mscm_ir_irq_chip, + domain->host_data); + + gic_data.np = domain->parent->of_node; + gic_data.args_count = 3; + gic_data.args[0] = GIC_SPI; + gic_data.args[1] = irq_data->args[0]; + gic_data.args[2] = irq_data->args[1]; + return irq_domain_alloc_irqs_parent(domain, virq, nr_irqs, &gic_data); +} + +static const struct irq_domain_ops mscm_irq_domain_ops = { + .xlate = irq_domain_xlate_twocell, + .alloc = vf610_mscm_ir_domain_alloc, + .free = irq_domain_free_irqs_common, +}; + +static int __init vf610_mscm_ir_of_init(struct device_node *node, + struct device_node *parent) +{ + struct irq_domain *domain, *domain_parent; + struct regmap *mscm_cp_regmap; + int ret, cpuid; + + domain_parent = irq_find_host(parent); + if (!domain_parent) { + pr_err("vf610_mscm_ir: interrupt-parent not found\n"); + return -EINVAL; + } + + mscm_ir_data = kzalloc(sizeof(*mscm_ir_data), GFP_KERNEL); + if (!mscm_ir_data) + return -ENOMEM; + + mscm_ir_data->mscm_ir_base = of_io_request_and_map(node, 0, "mscm-ir"); + + if (!mscm_ir_data->mscm_ir_base) { + pr_err("vf610_mscm_ir: unable to map mscm register\n"); + ret = -ENOMEM; + goto out_free; + } + + mscm_cp_regmap = syscon_regmap_lookup_by_phandle(node, "fsl,cpucfg"); + if (IS_ERR(mscm_cp_regmap)) { + ret = PTR_ERR(mscm_cp_regmap); + pr_err("vf610_mscm_ir: regmap lookup for cpucfg failed\n"); + goto out_unmap; + } + + regmap_read(mscm_cp_regmap, MSCM_CPxNUM, &cpuid); + mscm_ir_data->cpu_mask = 0x1 << cpuid; + + domain = irq_domain_add_hierarchy(domain_parent, 0, + MSCM_IRSPRC_NUM, node, + &mscm_irq_domain_ops, mscm_ir_data); + if (!domain) { + ret = -ENOMEM; + goto out_unmap; + } + + cpu_pm_register_notifier(&mscm_ir_notifier_block); + + return 0; + +out_unmap: + iounmap(mscm_ir_data->mscm_ir_base); +out_free: + kfree(mscm_ir_data); + return ret; +} +IRQCHIP_DECLARE(vf610_mscm_ir, "fsl,vf610-mscm-ir", vf610_mscm_ir_of_init); |
