summaryrefslogtreecommitdiff
path: root/kernel/irq
diff options
context:
space:
mode:
Diffstat (limited to 'kernel/irq')
-rw-r--r--kernel/irq/chip.c53
-rw-r--r--kernel/irq/cpuhotplug.c38
-rw-r--r--kernel/irq/handle.c3
-rw-r--r--kernel/irq/internals.h2
-rw-r--r--kernel/irq/irqdesc.c16
-rw-r--r--kernel/irq/manage.c21
-rw-r--r--kernel/irq/msi.c2
-rw-r--r--kernel/irq/pm.c2
-rw-r--r--kernel/irq/proc.c7
-rw-r--r--kernel/irq/settings.h12
10 files changed, 134 insertions, 22 deletions
diff --git a/kernel/irq/chip.c b/kernel/irq/chip.c
index 67aafc2b249c..f4c99c41fee2 100644
--- a/kernel/irq/chip.c
+++ b/kernel/irq/chip.c
@@ -33,6 +33,7 @@ static irqreturn_t bad_chained_irq(int irq, void *dev_id)
*/
struct irqaction chained_action = {
.handler = bad_chained_irq,
+ .name = "chained-irq",
};
/**
@@ -327,11 +328,12 @@ void unmask_threaded_irq(struct irq_desc *desc)
* handler. The handler function is called inside the calling
* threads context.
*/
-void handle_nested_irq(unsigned int irq)
+bool handle_nested_irq(unsigned int irq)
{
struct irq_desc *desc = irq_to_desc(irq);
struct irqaction *action;
irqreturn_t action_ret;
+ bool handled = false;
might_sleep();
@@ -356,8 +358,11 @@ void handle_nested_irq(unsigned int irq)
raw_spin_lock_irq(&desc->lock);
irqd_clear(&desc->irq_data, IRQD_IRQ_INPROGRESS);
+ handled = true;
+
out_unlock:
raw_spin_unlock_irq(&desc->lock);
+ return handled;
}
EXPORT_SYMBOL_GPL(handle_nested_irq);
@@ -404,8 +409,10 @@ static bool irq_may_run(struct irq_desc *desc)
* Note: The caller is expected to handle the ack, clear, mask and
* unmask issues if necessary.
*/
-void handle_simple_irq(struct irq_desc *desc)
+bool handle_simple_irq(struct irq_desc *desc)
{
+ bool handled = false;
+
raw_spin_lock(&desc->lock);
if (!irq_may_run(desc))
@@ -421,8 +428,11 @@ void handle_simple_irq(struct irq_desc *desc)
kstat_incr_irqs_this_cpu(desc);
handle_irq_event(desc);
+ handled = true;
+
out_unlock:
raw_spin_unlock(&desc->lock);
+ return handled;
}
EXPORT_SYMBOL_GPL(handle_simple_irq);
@@ -453,8 +463,10 @@ static void cond_unmask_irq(struct irq_desc *desc)
* it after the associated handler has acknowledged the device, so the
* interrupt line is back to inactive.
*/
-void handle_level_irq(struct irq_desc *desc)
+bool handle_level_irq(struct irq_desc *desc)
{
+ bool handled = false;
+
raw_spin_lock(&desc->lock);
mask_ack_irq(desc);
@@ -477,8 +489,11 @@ void handle_level_irq(struct irq_desc *desc)
cond_unmask_irq(desc);
+ handled = true;
+
out_unlock:
raw_spin_unlock(&desc->lock);
+ return handled;
}
EXPORT_SYMBOL_GPL(handle_level_irq);
@@ -522,9 +537,10 @@ static void cond_unmask_eoi_irq(struct irq_desc *desc, struct irq_chip *chip)
* for modern forms of interrupt handlers, which handle the flow
* details in hardware, transparently.
*/
-void handle_fasteoi_irq(struct irq_desc *desc)
+bool handle_fasteoi_irq(struct irq_desc *desc)
{
struct irq_chip *chip = desc->irq_data.chip;
+ bool handled = false;
raw_spin_lock(&desc->lock);
@@ -552,12 +568,15 @@ void handle_fasteoi_irq(struct irq_desc *desc)
cond_unmask_eoi_irq(desc, chip);
+ handled = true;
+
raw_spin_unlock(&desc->lock);
- return;
+ return handled;
out:
if (!(chip->flags & IRQCHIP_EOI_IF_HANDLED))
chip->irq_eoi(&desc->irq_data);
raw_spin_unlock(&desc->lock);
+ return handled;
}
EXPORT_SYMBOL_GPL(handle_fasteoi_irq);
@@ -576,8 +595,10 @@ EXPORT_SYMBOL_GPL(handle_fasteoi_irq);
* the handler was running. If all pending interrupts are handled, the
* loop is left.
*/
-void handle_edge_irq(struct irq_desc *desc)
+bool handle_edge_irq(struct irq_desc *desc)
{
+ bool handled = false;
+
raw_spin_lock(&desc->lock);
desc->istate &= ~(IRQS_REPLAY | IRQS_WAITING);
@@ -621,12 +642,14 @@ void handle_edge_irq(struct irq_desc *desc)
}
handle_irq_event(desc);
+ handled = true;
} while ((desc->istate & IRQS_PENDING) &&
!irqd_irq_disabled(&desc->irq_data));
out_unlock:
raw_spin_unlock(&desc->lock);
+ return handled;
}
EXPORT_SYMBOL(handle_edge_irq);
@@ -638,8 +661,9 @@ EXPORT_SYMBOL(handle_edge_irq);
* Similar as the above handle_edge_irq, but using eoi and w/o the
* mask/unmask logic.
*/
-void handle_edge_eoi_irq(struct irq_desc *desc)
+bool handle_edge_eoi_irq(struct irq_desc *desc)
{
+ bool handled = false;
struct irq_chip *chip = irq_desc_get_chip(desc);
raw_spin_lock(&desc->lock);
@@ -667,6 +691,7 @@ void handle_edge_eoi_irq(struct irq_desc *desc)
goto out_eoi;
handle_irq_event(desc);
+ handled = true;
} while ((desc->istate & IRQS_PENDING) &&
!irqd_irq_disabled(&desc->irq_data));
@@ -674,6 +699,7 @@ void handle_edge_eoi_irq(struct irq_desc *desc)
out_eoi:
chip->irq_eoi(&desc->irq_data);
raw_spin_unlock(&desc->lock);
+ return handled;
}
#endif
@@ -683,7 +709,7 @@ out_eoi:
*
* Per CPU interrupts on SMP machines without locking requirements
*/
-void handle_percpu_irq(struct irq_desc *desc)
+bool handle_percpu_irq(struct irq_desc *desc)
{
struct irq_chip *chip = irq_desc_get_chip(desc);
@@ -696,6 +722,8 @@ void handle_percpu_irq(struct irq_desc *desc)
if (chip->irq_eoi)
chip->irq_eoi(&desc->irq_data);
+
+ return true;
}
/**
@@ -709,7 +737,7 @@ void handle_percpu_irq(struct irq_desc *desc)
* contain the real device id for the cpu on which this handler is
* called
*/
-void handle_percpu_devid_irq(struct irq_desc *desc)
+bool handle_percpu_devid_irq(struct irq_desc *desc)
{
struct irq_chip *chip = irq_desc_get_chip(desc);
struct irqaction *action = desc->action;
@@ -728,6 +756,8 @@ void handle_percpu_devid_irq(struct irq_desc *desc)
if (chip->irq_eoi)
chip->irq_eoi(&desc->irq_data);
+
+ return true;
}
void
@@ -836,7 +866,8 @@ void irq_modify_status(unsigned int irq, unsigned long clr, unsigned long set)
irq_settings_clr_and_set(desc, clr, set);
irqd_clear(&desc->irq_data, IRQD_NO_BALANCING | IRQD_PER_CPU |
- IRQD_TRIGGER_MASK | IRQD_LEVEL | IRQD_MOVE_PCNTXT);
+ IRQD_TRIGGER_MASK | IRQD_LEVEL | IRQD_MOVE_PCNTXT |
+ IRQD_AFFINITY_MANAGED);
if (irq_settings_has_no_balance_set(desc))
irqd_set(&desc->irq_data, IRQD_NO_BALANCING);
if (irq_settings_is_per_cpu(desc))
@@ -845,6 +876,8 @@ void irq_modify_status(unsigned int irq, unsigned long clr, unsigned long set)
irqd_set(&desc->irq_data, IRQD_MOVE_PCNTXT);
if (irq_settings_is_level(desc))
irqd_set(&desc->irq_data, IRQD_LEVEL);
+ if (irq_settings_has_affinity_managed_set(desc))
+ irqd_set(&desc->irq_data, IRQD_AFFINITY_MANAGED);
irqd_set(&desc->irq_data, irq_settings_get_trigger_mask(desc));
diff --git a/kernel/irq/cpuhotplug.c b/kernel/irq/cpuhotplug.c
index 011f8c4c63da..4684b7595e63 100644
--- a/kernel/irq/cpuhotplug.c
+++ b/kernel/irq/cpuhotplug.c
@@ -11,6 +11,7 @@
#include <linux/interrupt.h>
#include <linux/ratelimit.h>
#include <linux/irq.h>
+#include <linux/cpumask.h>
#include "internals.h"
@@ -20,6 +21,7 @@ static bool migrate_one_irq(struct irq_desc *desc)
const struct cpumask *affinity = d->common->affinity;
struct irq_chip *c;
bool ret = false;
+ struct cpumask available_cpus;
/*
* If this is a per-CPU interrupt, or the affinity does not
@@ -29,8 +31,37 @@ static bool migrate_one_irq(struct irq_desc *desc)
!cpumask_test_cpu(smp_processor_id(), affinity))
return false;
+ cpumask_copy(&available_cpus, affinity);
+ cpumask_andnot(&available_cpus, &available_cpus, cpu_isolated_mask);
+ affinity = &available_cpus;
+
if (cpumask_any_and(affinity, cpu_online_mask) >= nr_cpu_ids) {
- affinity = cpu_online_mask;
+ /*
+ * The order of preference for selecting a fallback CPU is
+ *
+ * (1) online and un-isolated CPU from default affinity
+ * (2) online and un-isolated CPU
+ * (3) online CPU
+ */
+ cpumask_andnot(&available_cpus, cpu_online_mask,
+ cpu_isolated_mask);
+ if (cpumask_intersects(&available_cpus, irq_default_affinity))
+ cpumask_and(&available_cpus, &available_cpus,
+ irq_default_affinity);
+ else if (cpumask_empty(&available_cpus))
+ affinity = cpu_online_mask;
+
+ /*
+ * We are overriding the affinity with all online and
+ * un-isolated cpus. irq_set_affinity_locked() call
+ * below notify this mask to PM QOS affinity listener.
+ * That results in applying the CPU_DMA_LATENCY QOS
+ * to all the CPUs specified in the mask. But the low
+ * level irqchip driver sets the affinity of an irq
+ * to only one CPU. So pick only one CPU from the
+ * prepared mask while overriding the user affinity.
+ */
+ affinity = cpumask_of(cpumask_any(affinity));
ret = true;
}
@@ -38,7 +69,7 @@ static bool migrate_one_irq(struct irq_desc *desc)
if (!c->irq_set_affinity) {
pr_debug("IRQ%u: unable to set affinity\n", d->irq);
} else {
- int r = irq_do_set_affinity(d, affinity, false);
+ int r = irq_set_affinity_locked(d, affinity, false);
if (r)
pr_warn_ratelimited("IRQ%u: set affinity failed(%d).\n",
d->irq, r);
@@ -69,6 +100,9 @@ void irq_migrate_all_off_this_cpu(void)
bool affinity_broken;
desc = irq_to_desc(irq);
+ if (!desc)
+ continue;
+
raw_spin_lock(&desc->lock);
affinity_broken = migrate_one_irq(desc);
raw_spin_unlock(&desc->lock);
diff --git a/kernel/irq/handle.c b/kernel/irq/handle.c
index 57bff7857e87..80e76ddbf804 100644
--- a/kernel/irq/handle.c
+++ b/kernel/irq/handle.c
@@ -26,13 +26,14 @@
*
* Handles spurious and unhandled IRQ's. It also prints a debugmessage.
*/
-void handle_bad_irq(struct irq_desc *desc)
+bool handle_bad_irq(struct irq_desc *desc)
{
unsigned int irq = irq_desc_get_irq(desc);
print_irq_desc(irq, desc);
kstat_incr_irqs_this_cpu(desc);
ack_bad_irq(irq);
+ return true;
}
EXPORT_SYMBOL_GPL(handle_bad_irq);
diff --git a/kernel/irq/internals.h b/kernel/irq/internals.h
index fcab63c66905..56afc0be6289 100644
--- a/kernel/irq/internals.h
+++ b/kernel/irq/internals.h
@@ -105,6 +105,8 @@ static inline void unregister_handler_proc(unsigned int irq,
struct irqaction *action) { }
#endif
+extern bool irq_can_set_affinity_usr(unsigned int irq);
+
extern int irq_select_affinity_usr(unsigned int irq, struct cpumask *mask);
extern void irq_set_thread_affinity(struct irq_desc *desc);
diff --git a/kernel/irq/irqdesc.c b/kernel/irq/irqdesc.c
index 239e2ae2c947..52fbf88cd2d8 100644
--- a/kernel/irq/irqdesc.c
+++ b/kernel/irq/irqdesc.c
@@ -15,6 +15,7 @@
#include <linux/radix-tree.h>
#include <linux/bitmap.h>
#include <linux/irqdomain.h>
+#include <linux/wakeup_reason.h>
#include "internals.h"
@@ -339,16 +340,25 @@ void irq_init_desc(unsigned int irq)
/**
* generic_handle_irq - Invoke the handler for a particular irq
* @irq: The irq number to handle
- *
+ * returns:
+ * negative on error
+ * 0 when the interrupt handler was not called
+ * 1 when the interrupt handler was called
*/
+
int generic_handle_irq(unsigned int irq)
{
struct irq_desc *desc = irq_to_desc(irq);
if (!desc)
return -EINVAL;
- generic_handle_irq_desc(desc);
- return 0;
+
+ if (unlikely(logging_wakeup_reasons_nosync()))
+ return log_possible_wakeup_reason(irq,
+ desc,
+ generic_handle_irq_desc);
+
+ return generic_handle_irq_desc(desc);
}
EXPORT_SYMBOL_GPL(generic_handle_irq);
diff --git a/kernel/irq/manage.c b/kernel/irq/manage.c
index f5bb63cbb6b4..668141c23d8f 100644
--- a/kernel/irq/manage.c
+++ b/kernel/irq/manage.c
@@ -115,12 +115,12 @@ EXPORT_SYMBOL(synchronize_irq);
#ifdef CONFIG_SMP
cpumask_var_t irq_default_affinity;
-static int __irq_can_set_affinity(struct irq_desc *desc)
+static bool __irq_can_set_affinity(struct irq_desc *desc)
{
if (!desc || !irqd_can_balance(&desc->irq_data) ||
!desc->irq_data.chip || !desc->irq_data.chip->irq_set_affinity)
- return 0;
- return 1;
+ return false;
+ return true;
}
/**
@@ -134,6 +134,21 @@ int irq_can_set_affinity(unsigned int irq)
}
/**
+ * irq_can_set_affinity_usr - Check if affinity of a irq can be set from user space
+ * @irq: Interrupt to check
+ *
+ * Like irq_can_set_affinity() above, but additionally checks for the
+ * AFFINITY_MANAGED flag.
+ */
+bool irq_can_set_affinity_usr(unsigned int irq)
+{
+ struct irq_desc *desc = irq_to_desc(irq);
+
+ return __irq_can_set_affinity(desc) &&
+ !irqd_affinity_is_managed(&desc->irq_data);
+}
+
+/**
* irq_set_thread_affinity - Notify irq threads to adjust affinity
* @desc: irq descriptor which has affitnity changed
*
diff --git a/kernel/irq/msi.c b/kernel/irq/msi.c
index cd6009006510..41b40f310c28 100644
--- a/kernel/irq/msi.c
+++ b/kernel/irq/msi.c
@@ -268,7 +268,7 @@ int msi_domain_alloc_irqs(struct irq_domain *domain, struct device *dev,
struct msi_domain_ops *ops = info->ops;
msi_alloc_info_t arg;
struct msi_desc *desc;
- int i, ret, virq;
+ int i, ret, virq = 0;
ret = ops->msi_check(domain, info, dev);
if (ret == 0)
diff --git a/kernel/irq/pm.c b/kernel/irq/pm.c
index cea1de0161f1..28e134310435 100644
--- a/kernel/irq/pm.c
+++ b/kernel/irq/pm.c
@@ -11,7 +11,7 @@
#include <linux/interrupt.h>
#include <linux/suspend.h>
#include <linux/syscore_ops.h>
-
+#include <linux/wakeup_reason.h>
#include "internals.h"
bool irq_pm_check_wakeup(struct irq_desc *desc)
diff --git a/kernel/irq/proc.c b/kernel/irq/proc.c
index a2c02fd5d6d0..b05509af0352 100644
--- a/kernel/irq/proc.c
+++ b/kernel/irq/proc.c
@@ -96,7 +96,7 @@ static ssize_t write_irq_affinity(int type, struct file *file,
cpumask_var_t new_value;
int err;
- if (!irq_can_set_affinity(irq) || no_irq_affinity)
+ if (!irq_can_set_affinity_usr(irq) || no_irq_affinity)
return -EIO;
if (!alloc_cpumask_var(&new_value, GFP_KERNEL))
@@ -114,6 +114,11 @@ static ssize_t write_irq_affinity(int type, struct file *file,
goto free_cpumask;
}
+ if (cpumask_subset(new_value, cpu_isolated_mask)) {
+ err = -EINVAL;
+ goto free_cpumask;
+ }
+
/*
* Do not allow disabling IRQs completely - it's a too easy
* way to make the system unusable accidentally :-) At least
diff --git a/kernel/irq/settings.h b/kernel/irq/settings.h
index 320579d89091..f0964f058521 100644
--- a/kernel/irq/settings.h
+++ b/kernel/irq/settings.h
@@ -17,6 +17,7 @@ enum {
_IRQ_IS_POLLED = IRQ_IS_POLLED,
_IRQ_DISABLE_UNLAZY = IRQ_DISABLE_UNLAZY,
_IRQF_MODIFY_MASK = IRQF_MODIFY_MASK,
+ _IRQ_AFFINITY_MANAGED = IRQ_AFFINITY_MANAGED,
};
#define IRQ_PER_CPU GOT_YOU_MORON
@@ -32,6 +33,7 @@ enum {
#define IRQ_DISABLE_UNLAZY GOT_YOU_MORON
#undef IRQF_MODIFY_MASK
#define IRQF_MODIFY_MASK GOT_YOU_MORON
+#define IRQ_AFFINITY_MANAGED GOT_YOU_MORON
static inline void
irq_settings_clr_and_set(struct irq_desc *desc, u32 clr, u32 set)
@@ -65,6 +67,16 @@ static inline bool irq_settings_has_no_balance_set(struct irq_desc *desc)
return desc->status_use_accessors & _IRQ_NO_BALANCING;
}
+static inline void irq_settings_set_affinity_managed(struct irq_desc *desc)
+{
+ desc->status_use_accessors |= _IRQ_AFFINITY_MANAGED;
+}
+
+static inline bool irq_settings_has_affinity_managed_set(struct irq_desc *desc)
+{
+ return desc->status_use_accessors & _IRQ_AFFINITY_MANAGED;
+}
+
static inline u32 irq_settings_get_trigger_mask(struct irq_desc *desc)
{
return desc->status_use_accessors & IRQ_TYPE_SENSE_MASK;