summaryrefslogtreecommitdiff
path: root/kernel/irq/chip.c
diff options
context:
space:
mode:
Diffstat (limited to 'kernel/irq/chip.c')
-rw-r--r--kernel/irq/chip.c48
1 files changed, 39 insertions, 9 deletions
diff --git a/kernel/irq/chip.c b/kernel/irq/chip.c
index 5a2932713d0c..f4c99c41fee2 100644
--- a/kernel/irq/chip.c
+++ b/kernel/irq/chip.c
@@ -33,6 +33,7 @@ static irqreturn_t bad_chained_irq(int irq, void *dev_id)
*/
struct irqaction chained_action = {
.handler = bad_chained_irq,
+ .name = "chained-irq",
};
/**
@@ -327,11 +328,12 @@ void unmask_threaded_irq(struct irq_desc *desc)
* handler. The handler function is called inside the calling
* threads context.
*/
-void handle_nested_irq(unsigned int irq)
+bool handle_nested_irq(unsigned int irq)
{
struct irq_desc *desc = irq_to_desc(irq);
struct irqaction *action;
irqreturn_t action_ret;
+ bool handled = false;
might_sleep();
@@ -356,8 +358,11 @@ void handle_nested_irq(unsigned int irq)
raw_spin_lock_irq(&desc->lock);
irqd_clear(&desc->irq_data, IRQD_IRQ_INPROGRESS);
+ handled = true;
+
out_unlock:
raw_spin_unlock_irq(&desc->lock);
+ return handled;
}
EXPORT_SYMBOL_GPL(handle_nested_irq);
@@ -404,8 +409,10 @@ static bool irq_may_run(struct irq_desc *desc)
* Note: The caller is expected to handle the ack, clear, mask and
* unmask issues if necessary.
*/
-void handle_simple_irq(struct irq_desc *desc)
+bool handle_simple_irq(struct irq_desc *desc)
{
+ bool handled = false;
+
raw_spin_lock(&desc->lock);
if (!irq_may_run(desc))
@@ -421,8 +428,11 @@ void handle_simple_irq(struct irq_desc *desc)
kstat_incr_irqs_this_cpu(desc);
handle_irq_event(desc);
+ handled = true;
+
out_unlock:
raw_spin_unlock(&desc->lock);
+ return handled;
}
EXPORT_SYMBOL_GPL(handle_simple_irq);
@@ -453,8 +463,10 @@ static void cond_unmask_irq(struct irq_desc *desc)
* it after the associated handler has acknowledged the device, so the
* interrupt line is back to inactive.
*/
-void handle_level_irq(struct irq_desc *desc)
+bool handle_level_irq(struct irq_desc *desc)
{
+ bool handled = false;
+
raw_spin_lock(&desc->lock);
mask_ack_irq(desc);
@@ -477,8 +489,11 @@ void handle_level_irq(struct irq_desc *desc)
cond_unmask_irq(desc);
+ handled = true;
+
out_unlock:
raw_spin_unlock(&desc->lock);
+ return handled;
}
EXPORT_SYMBOL_GPL(handle_level_irq);
@@ -522,9 +537,10 @@ static void cond_unmask_eoi_irq(struct irq_desc *desc, struct irq_chip *chip)
* for modern forms of interrupt handlers, which handle the flow
* details in hardware, transparently.
*/
-void handle_fasteoi_irq(struct irq_desc *desc)
+bool handle_fasteoi_irq(struct irq_desc *desc)
{
struct irq_chip *chip = desc->irq_data.chip;
+ bool handled = false;
raw_spin_lock(&desc->lock);
@@ -552,12 +568,15 @@ void handle_fasteoi_irq(struct irq_desc *desc)
cond_unmask_eoi_irq(desc, chip);
+ handled = true;
+
raw_spin_unlock(&desc->lock);
- return;
+ return handled;
out:
if (!(chip->flags & IRQCHIP_EOI_IF_HANDLED))
chip->irq_eoi(&desc->irq_data);
raw_spin_unlock(&desc->lock);
+ return handled;
}
EXPORT_SYMBOL_GPL(handle_fasteoi_irq);
@@ -576,8 +595,10 @@ EXPORT_SYMBOL_GPL(handle_fasteoi_irq);
* the handler was running. If all pending interrupts are handled, the
* loop is left.
*/
-void handle_edge_irq(struct irq_desc *desc)
+bool handle_edge_irq(struct irq_desc *desc)
{
+ bool handled = false;
+
raw_spin_lock(&desc->lock);
desc->istate &= ~(IRQS_REPLAY | IRQS_WAITING);
@@ -621,12 +642,14 @@ void handle_edge_irq(struct irq_desc *desc)
}
handle_irq_event(desc);
+ handled = true;
} while ((desc->istate & IRQS_PENDING) &&
!irqd_irq_disabled(&desc->irq_data));
out_unlock:
raw_spin_unlock(&desc->lock);
+ return handled;
}
EXPORT_SYMBOL(handle_edge_irq);
@@ -638,8 +661,9 @@ EXPORT_SYMBOL(handle_edge_irq);
* Similar as the above handle_edge_irq, but using eoi and w/o the
* mask/unmask logic.
*/
-void handle_edge_eoi_irq(struct irq_desc *desc)
+bool handle_edge_eoi_irq(struct irq_desc *desc)
{
+ bool handled = false;
struct irq_chip *chip = irq_desc_get_chip(desc);
raw_spin_lock(&desc->lock);
@@ -667,6 +691,7 @@ void handle_edge_eoi_irq(struct irq_desc *desc)
goto out_eoi;
handle_irq_event(desc);
+ handled = true;
} while ((desc->istate & IRQS_PENDING) &&
!irqd_irq_disabled(&desc->irq_data));
@@ -674,6 +699,7 @@ void handle_edge_eoi_irq(struct irq_desc *desc)
out_eoi:
chip->irq_eoi(&desc->irq_data);
raw_spin_unlock(&desc->lock);
+ return handled;
}
#endif
@@ -683,7 +709,7 @@ out_eoi:
*
* Per CPU interrupts on SMP machines without locking requirements
*/
-void handle_percpu_irq(struct irq_desc *desc)
+bool handle_percpu_irq(struct irq_desc *desc)
{
struct irq_chip *chip = irq_desc_get_chip(desc);
@@ -696,6 +722,8 @@ void handle_percpu_irq(struct irq_desc *desc)
if (chip->irq_eoi)
chip->irq_eoi(&desc->irq_data);
+
+ return true;
}
/**
@@ -709,7 +737,7 @@ void handle_percpu_irq(struct irq_desc *desc)
* contain the real device id for the cpu on which this handler is
* called
*/
-void handle_percpu_devid_irq(struct irq_desc *desc)
+bool handle_percpu_devid_irq(struct irq_desc *desc)
{
struct irq_chip *chip = irq_desc_get_chip(desc);
struct irqaction *action = desc->action;
@@ -728,6 +756,8 @@ void handle_percpu_devid_irq(struct irq_desc *desc)
if (chip->irq_eoi)
chip->irq_eoi(&desc->irq_data);
+
+ return true;
}
void