summaryrefslogtreecommitdiff
path: root/drivers/irqchip/irq-gic-v3.c
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/irqchip/irq-gic-v3.c')
-rw-r--r--drivers/irqchip/irq-gic-v3.c234
1 files changed, 224 insertions, 10 deletions
diff --git a/drivers/irqchip/irq-gic-v3.c b/drivers/irqchip/irq-gic-v3.c
index 5a1490b046ac..9e96d81bc5cd 100644
--- a/drivers/irqchip/irq-gic-v3.c
+++ b/drivers/irqchip/irq-gic-v3.c
@@ -24,9 +24,12 @@
#include <linux/of_irq.h>
#include <linux/percpu.h>
#include <linux/slab.h>
+#include <linux/module.h>
#include <linux/irqchip.h>
#include <linux/irqchip/arm-gic-v3.h>
+#include <linux/syscore_ops.h>
+#include <linux/irqchip/msm-mpm-irq.h>
#include <asm/cputype.h>
#include <asm/exception.h>
@@ -48,6 +51,14 @@ struct gic_chip_data {
u64 redist_stride;
u32 nr_redist_regions;
unsigned int irq_nr;
+#ifdef CONFIG_PM
+ unsigned int wakeup_irqs[32];
+ unsigned int enabled_irqs[32];
+#endif
+#ifdef CONFIG_ARM_GIC_PANIC_HANDLER
+ u32 saved_dist_regs[0x400];
+ u32 saved_router_regs[0x800];
+#endif
};
static struct gic_chip_data gic_data __read_mostly;
@@ -85,7 +96,7 @@ static void gic_do_wait_for_rwp(void __iomem *base)
{
u32 count = 1000000; /* 1s! */
- while (readl_relaxed(base + GICD_CTLR) & GICD_CTLR_RWP) {
+ while (readl_relaxed_no_log(base + GICD_CTLR) & GICD_CTLR_RWP) {
count--;
if (!count) {
pr_err_ratelimited("RWP timeout, gone fishing\n");
@@ -120,6 +131,7 @@ static u64 __maybe_unused gic_read_iar(void)
}
#endif
+#ifdef CONFIG_ARM_GIC_V3_NO_ACCESS_CONTROL
static void gic_enable_redist(bool enable)
{
void __iomem *rbase;
@@ -153,6 +165,9 @@ static void gic_enable_redist(bool enable)
pr_err_ratelimited("redistributor failed to %s...\n",
enable ? "wakeup" : "sleep");
}
+#else
+static void gic_enable_redist(bool enable) { }
+#endif
/*
* Routines to disable, enable, EOI and route interrupts
@@ -167,7 +182,7 @@ static int gic_peek_irq(struct irq_data *d, u32 offset)
else
base = gic_data.dist_base;
- return !!(readl_relaxed(base + offset + (gic_irq(d) / 32) * 4) & mask);
+ return !!(readl_relaxed_no_log(base + offset + (gic_irq(d) / 32) * 4) & mask);
}
static void gic_poke_irq(struct irq_data *d, u32 offset)
@@ -184,12 +199,15 @@ static void gic_poke_irq(struct irq_data *d, u32 offset)
rwp_wait = gic_dist_wait_for_rwp;
}
- writel_relaxed(mask, base + offset + (gic_irq(d) / 32) * 4);
+ writel_relaxed_no_log(mask, base + offset + (gic_irq(d) / 32) * 4);
rwp_wait();
}
static void gic_mask_irq(struct irq_data *d)
{
+ if (gic_arch_extn.irq_mask)
+ gic_arch_extn.irq_mask(d);
+
gic_poke_irq(d, GICD_ICENABLER);
}
@@ -210,6 +228,8 @@ static void gic_eoimode1_mask_irq(struct irq_data *d)
static void gic_unmask_irq(struct irq_data *d)
{
+ if (gic_arch_extn.irq_unmask)
+ gic_arch_extn.irq_unmask(d);
gic_poke_irq(d, GICD_ISENABLER);
}
@@ -267,9 +287,20 @@ static int gic_irq_get_irqchip_state(struct irq_data *d,
return 0;
}
+static void gic_disable_irq(struct irq_data *d)
+{
+ /* don't lazy-disable PPIs */
+ if (gic_irq(d) < 32)
+ gic_mask_irq(d);
+ if (gic_arch_extn.irq_disable)
+ gic_arch_extn.irq_disable(d);
+}
static void gic_eoi_irq(struct irq_data *d)
{
+ if (gic_arch_extn.irq_eoi)
+ gic_arch_extn.irq_eoi(d);
+
gic_write_eoir(gic_irq(d));
}
@@ -307,6 +338,9 @@ static int gic_set_type(struct irq_data *d, unsigned int type)
rwp_wait = gic_dist_wait_for_rwp;
}
+ if (gic_arch_extn.irq_set_type)
+ gic_arch_extn.irq_set_type(d, type);
+
return gic_configure_irq(irq, type, base, rwp_wait);
}
@@ -319,6 +353,134 @@ static int gic_irq_set_vcpu_affinity(struct irq_data *d, void *vcpu)
return 0;
}
+static int gic_retrigger(struct irq_data *d)
+{
+ if (gic_arch_extn.irq_retrigger)
+ return gic_arch_extn.irq_retrigger(d);
+
+ /* the genirq layer expects 0 if we can't retrigger in hardware */
+ return 0;
+}
+
+static inline void __iomem *gic_data_dist_base(struct gic_chip_data *data)
+{
+ return data->dist_base;
+}
+
+#ifdef CONFIG_ARM_GIC_PANIC_HANDLER
+static int gic_panic_handler(struct notifier_block *this,
+ unsigned long event, void *ptr)
+{
+ int i;
+ void __iomem *base;
+
+ base = gic_data.dist_base;
+ for (i = 0; i < 0x400; i += 1)
+ gic_data.saved_dist_regs[i] = readl_relaxed(base + 4 * i);
+
+ base = gic_data.dist_base + GICD_IROUTER;
+ for (i = 0; i < 0x800; i += 1)
+ gic_data.saved_router_regs[i] = readl_relaxed(base + 4 * i);
+
+ return NOTIFY_DONE;
+}
+
+static struct notifier_block gic_panic_blk = {
+ .notifier_call = gic_panic_handler,
+};
+#endif
+
+#ifdef CONFIG_PM
+static int gic_suspend_one(struct gic_chip_data *gic)
+{
+ unsigned int i;
+ void __iomem *base = gic_data_dist_base(gic);
+
+ for (i = 0; i * 32 < gic->irq_nr; i++) {
+ gic->enabled_irqs[i]
+ = readl_relaxed(base + GICD_ISENABLER + i * 4);
+ /* disable all of them */
+ writel_relaxed(0xffffffff, base + GICD_ICENABLER + i * 4);
+ /* enable the wakeup set */
+ writel_relaxed(gic->wakeup_irqs[i],
+ base + GICD_ISENABLER + i * 4);
+ }
+ return 0;
+}
+
+static int gic_suspend(void)
+{
+ gic_suspend_one(&gic_data);
+ return 0;
+}
+
+static void gic_show_resume_irq(struct gic_chip_data *gic)
+{
+ unsigned int i;
+ u32 enabled;
+ u32 pending[32];
+ void __iomem *base = gic_data_dist_base(gic);
+
+ if (!msm_show_resume_irq_mask)
+ return;
+
+ for (i = 0; i * 32 < gic->irq_nr; i++) {
+ enabled = readl_relaxed(base + GICD_ICENABLER + i * 4);
+ pending[i] = readl_relaxed(base + GICD_ISPENDR + i * 4);
+ pending[i] &= enabled;
+ }
+
+ for (i = find_first_bit((unsigned long *)pending, gic->irq_nr);
+ i < gic->irq_nr;
+ i = find_next_bit((unsigned long *)pending, gic->irq_nr, i+1)) {
+ unsigned int irq = irq_find_mapping(gic->domain, i);
+ struct irq_desc *desc = irq_to_desc(irq);
+ const char *name = "null";
+
+ if (desc == NULL)
+ name = "stray irq";
+ else if (desc->action && desc->action->name)
+ name = desc->action->name;
+
+ pr_warn("%s: %d triggered %s\n", __func__, irq, name);
+ }
+}
+
+static void gic_resume_one(struct gic_chip_data *gic)
+{
+ unsigned int i;
+ void __iomem *base = gic_data_dist_base(gic);
+
+ gic_show_resume_irq(gic);
+
+ for (i = 0; i * 32 < gic->irq_nr; i++) {
+ /* disable all of them */
+ writel_relaxed(0xffffffff, base + GICD_ICENABLER + i * 4);
+ /* enable the enabled set */
+ writel_relaxed(gic->enabled_irqs[i],
+ base + GICD_ISENABLER + i * 4);
+ }
+}
+
+static void gic_resume(void)
+{
+ gic_resume_one(&gic_data);
+}
+
+static struct syscore_ops gic_syscore_ops = {
+ .suspend = gic_suspend,
+ .resume = gic_resume,
+};
+
+static int __init gic_init_sys(void)
+{
+ register_syscore_ops(&gic_syscore_ops);
+ return 0;
+}
+arch_initcall(gic_init_sys);
+
+#endif
+
static u64 gic_mpidr_to_affinity(unsigned long mpidr)
{
u64 aff;
@@ -340,7 +502,7 @@ static asmlinkage void __exception_irq_entry gic_handle_irq(struct pt_regs *regs
if (likely(irqnr > 15 && irqnr < 1020) || irqnr >= 8192) {
int err;
-
+ uncached_logk(LOGK_IRQ, (void *)(uintptr_t)irqnr);
if (static_key_true(&supports_deactivate))
gic_write_eoir(irqnr);
@@ -357,6 +519,7 @@ static asmlinkage void __exception_irq_entry gic_handle_irq(struct pt_regs *regs
continue;
}
if (irqnr < 16) {
+ uncached_logk(LOGK_IRQ, (void *)(uintptr_t)irqnr);
gic_write_eoir(irqnr);
if (static_key_true(&supports_deactivate))
gic_write_dir(irqnr);
@@ -444,9 +607,6 @@ static int gic_populate_rdist(void)
u64 offset = ptr - gic_data.redist_regions[i].redist_base;
gic_data_rdist_rd_base() = ptr;
gic_data_rdist()->phys_base = gic_data.redist_regions[i].phys_base + offset;
- pr_info("CPU%d: found redistributor %lx region %d:%pa\n",
- smp_processor_id(), mpidr, i,
- &gic_data_rdist()->phys_base);
return 0;
}
@@ -516,7 +676,8 @@ static void gic_cpu_init(void)
gic_cpu_config(rbase, gic_redist_wait_for_rwp);
/* Give LPIs a spin */
- if (IS_ENABLED(CONFIG_ARM_GIC_V3_ITS) && gic_dist_supports_lpis())
+ if (IS_ENABLED(CONFIG_ARM_GIC_V3_ITS) && gic_dist_supports_lpis() &&
+ !IS_ENABLED(CONFIG_ARM_GIC_V3_ACL))
its_cpu_init();
/* initialise system registers */
@@ -649,6 +810,14 @@ static int gic_set_affinity(struct irq_data *d, const struct cpumask *mask_val,
gic_write_irouter(val, reg);
/*
+ * It is possible that irq is disabled from SW perspective only,
+ * because kernel takes lazy disable approach. Therefore check irq
+ * descriptor if it should kept disabled.
+ */
+ if (irqd_irq_disabled(d))
+ enabled = 0;
+
+ /*
* If the interrupt was enabled, enabled it again. Otherwise,
* just wait for the distributor to have digested our changes.
*/
@@ -664,10 +833,44 @@ static int gic_set_affinity(struct irq_data *d, const struct cpumask *mask_val,
#define gic_smp_init() do { } while(0)
#endif
+#ifdef CONFIG_PM
+int gic_set_wake(struct irq_data *d, unsigned int on)
+{
+ int ret = -ENXIO;
+ unsigned int reg_offset, bit_offset;
+ unsigned int gicirq = gic_irq(d);
+ struct gic_chip_data *gic_data = irq_data_get_irq_chip_data(d);
+
+ /* per-cpu interrupts cannot be wakeup interrupts */
+ WARN_ON(gicirq < 32);
+
+ reg_offset = gicirq / 32;
+ bit_offset = gicirq % 32;
+
+ if (on)
+ gic_data->wakeup_irqs[reg_offset] |= 1 << bit_offset;
+ else
+ gic_data->wakeup_irqs[reg_offset] &= ~(1 << bit_offset);
+
+ if (gic_arch_extn.irq_set_wake)
+ ret = gic_arch_extn.irq_set_wake(d, on);
+ else
+ pr_err("mpm: set wake is null\n");
+
+ return ret;
+}
+
+#else
+#define gic_set_wake NULL
+#endif
+
#ifdef CONFIG_CPU_PM
static int gic_cpu_pm_notifier(struct notifier_block *self,
unsigned long cmd, void *v)
{
+ if (from_suspend)
+ return NOTIFY_OK;
+
if (cmd == CPU_PM_EXIT) {
gic_enable_redist(true);
gic_cpu_sys_reg_init();
@@ -691,13 +894,16 @@ static void gic_cpu_pm_init(void)
static inline void gic_cpu_pm_init(void) { }
#endif /* CONFIG_CPU_PM */
-static struct irq_chip gic_chip = {
+struct irq_chip gic_chip = {
.name = "GICv3",
.irq_mask = gic_mask_irq,
.irq_unmask = gic_unmask_irq,
.irq_eoi = gic_eoi_irq,
.irq_set_type = gic_set_type,
+ .irq_retrigger = gic_retrigger,
.irq_set_affinity = gic_set_affinity,
+ .irq_disable = gic_disable_irq,
+ .irq_set_wake = gic_set_wake,
.irq_get_irqchip_state = gic_irq_get_irqchip_state,
.irq_set_irqchip_state = gic_irq_set_irqchip_state,
.flags = IRQCHIP_SET_TYPE_MASKED,
@@ -714,6 +920,7 @@ static struct irq_chip gic_eoimode1_chip = {
.irq_set_irqchip_state = gic_irq_set_irqchip_state,
.irq_set_vcpu_affinity = gic_irq_set_vcpu_affinity,
.flags = IRQCHIP_SET_TYPE_MASKED,
+ .irq_set_wake = gic_set_wake,
};
#define GIC_ID_NR (1U << gic_data.rdists.id_bits)
@@ -923,13 +1130,20 @@ static int __init gic_of_init(struct device_node *node, struct device_node *pare
set_handle_irq(gic_handle_irq);
- if (IS_ENABLED(CONFIG_ARM_GIC_V3_ITS) && gic_dist_supports_lpis())
+ if (IS_ENABLED(CONFIG_ARM_GIC_V3_ITS) && gic_dist_supports_lpis() &&
+ !IS_ENABLED(CONFIG_ARM_GIC_V3_ACL))
its_init(node, &gic_data.rdists, gic_data.domain);
+ gic_chip.flags |= gic_arch_extn.flags;
gic_smp_init();
gic_dist_init();
gic_cpu_init();
gic_cpu_pm_init();
+ of_mpm_init();
+
+#ifdef CONFIG_ARM_GIC_PANIC_HANDLER
+ atomic_notifier_chain_register(&panic_notifier_list, &gic_panic_blk);
+#endif
return 0;