diff options
| author | Runmin Wang <runminw@codeaurora.org> | 2017-01-23 12:30:07 -0800 |
|---|---|---|
| committer | Runmin Wang <runminw@codeaurora.org> | 2017-01-23 16:01:01 -0800 |
| commit | 778031ccb58be6dccfb46314965bc8ff52c55f3d (patch) | |
| tree | ca2b29ff6ddf75cf6e1b6db8ff114535f1a52be0 /include/linux/irq.h | |
| parent | 1cc869442a204358ddb29d4c58cc4fd84847fe85 (diff) | |
genirq: Add IRQ_AFFINITY_MANAGED flag
Add IRQ_AFFINITY_MANAGED flag and related kernel APIs so that
kernel driver can modify an irq's status in such a way that
user space affinity change will be ignored. Kernel space's
affinity setting will not be changed.
Change-Id: Ib2d5ea651263bff4317562af69079ad950c9e71e
Signed-off-by: Runmin Wang <runminw@codeaurora.org>
Diffstat (limited to 'include/linux/irq.h')
| -rw-r--r-- | include/linux/irq.h | 4 |
1 files changed, 3 insertions, 1 deletions
diff --git a/include/linux/irq.h b/include/linux/irq.h index de7f8d306feb..b0bcc1561d3d 100644 --- a/include/linux/irq.h +++ b/include/linux/irq.h @@ -73,6 +73,7 @@ enum irqchip_irq_state; * it from the spurious interrupt detection * mechanism and from core side polling. * IRQ_DISABLE_UNLAZY - Disable lazy irq disable + * IRQ_AFFINITY_MANAGED - Affinity is auto-managed by the kernel */ enum { IRQ_TYPE_NONE = 0x00000000, @@ -99,13 +100,14 @@ enum { IRQ_PER_CPU_DEVID = (1 << 17), IRQ_IS_POLLED = (1 << 18), IRQ_DISABLE_UNLAZY = (1 << 19), + IRQ_AFFINITY_MANAGED = (1 << 21), }; #define IRQF_MODIFY_MASK \ (IRQ_TYPE_SENSE_MASK | IRQ_NOPROBE | IRQ_NOREQUEST | \ IRQ_NOAUTOEN | IRQ_MOVE_PCNTXT | IRQ_LEVEL | IRQ_NO_BALANCING | \ IRQ_PER_CPU | IRQ_NESTED_THREAD | IRQ_NOTHREAD | IRQ_PER_CPU_DEVID | \ - IRQ_IS_POLLED | IRQ_DISABLE_UNLAZY) + IRQ_IS_POLLED | IRQ_DISABLE_UNLAZY | IRQ_AFFINITY_MANAGED) #define IRQ_NO_BALANCING_MASK (IRQ_PER_CPU | IRQ_NO_BALANCING) |
