lkml.org 
[lkml]   [2011]   [Feb]   [2]   [last100]   RSS Feed
Views: [wrap][no wrap]   [headers]  [forward] 
 
Messages in this thread
Patch in this message
/
Date
From
Subject[patch 4/8] genirq: Introduce IRQ_EDGE_EOI flag
powerpc/cell has a slightly modified version of the edge handler. The
main difference is that it does not ack and mask, but uses
eoi. handle_edge_irq is an oddball handler anyway, which should be
avoided wherever it can. So it's not worth to have a slightly
different copy around.

Add a special flag to reuse handle_edge_irq for this odd case.

Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
Cc: Benjamin Herrenschmidt <benh@kernel.crashing.org>
Cc: Jeremy Kerr <jk@ozlabs.org>
Cc: Arnd Bergmann <arnd@arndb.de>
---
include/linux/irq.h | 3 ++-
kernel/irq/chip.c | 11 ++++++++---
2 files changed, 10 insertions(+), 4 deletions(-)

Index: linux-2.6-tip/include/linux/irq.h
===================================================================
--- linux-2.6-tip.orig/include/linux/irq.h
+++ linux-2.6-tip/include/linux/irq.h
@@ -71,10 +71,11 @@ typedef void (*irq_flow_handler_t)(unsig
#define IRQ_SUSPENDED 0x04000000 /* IRQ has gone through suspend sequence */
#define IRQ_ONESHOT 0x08000000 /* IRQ is not unmasked after hardirq */
#define IRQ_NESTED_THREAD 0x10000000 /* IRQ is nested into another, no own handler thread */
+#define IRQ_EDGE_EOI 0x20000000 /* IRQ abuses edge handler with EOI */

#define IRQF_MODIFY_MASK \
(IRQ_TYPE_SENSE_MASK | IRQ_NOPROBE | IRQ_NOREQUEST | \
- IRQ_NOAUTOEN | IRQ_MOVE_PCNTXT | IRQ_LEVEL)
+ IRQ_NOAUTOEN | IRQ_MOVE_PCNTXT | IRQ_LEVEL | IRQ_EDGE_EOI)

#ifdef CONFIG_IRQ_PER_CPU
# define CHECK_IRQ_PER_CPU(var) ((var) & IRQ_PER_CPU)
Index: linux-2.6-tip/kernel/irq/chip.c
===================================================================
--- linux-2.6-tip.orig/kernel/irq/chip.c
+++ linux-2.6-tip/kernel/irq/chip.c
@@ -633,13 +633,15 @@ handle_edge_irq(unsigned int irq, struct
if (unlikely((desc->status & (IRQ_INPROGRESS | IRQ_DISABLED)) ||
!desc->action)) {
desc->status |= IRQ_PENDING;
- mask_ack_irq(desc);
+ if (!(desc->status & IRQ_EDGE_EOI))
+ mask_ack_irq(desc);
goto out_unlock;
}
kstat_incr_irqs_this_cpu(irq, desc);

/* Start handling the irq */
- desc->irq_data.chip->irq_ack(&desc->irq_data);
+ if (!(desc->status & IRQ_EDGE_EOI))
+ desc->irq_data.chip->irq_ack(&desc->irq_data);

/* Mark the IRQ currently in progress.*/
desc->status |= IRQ_INPROGRESS;
@@ -649,7 +651,8 @@ handle_edge_irq(unsigned int irq, struct
irqreturn_t action_ret;

if (unlikely(!action)) {
- mask_irq(desc);
+ if (!(desc->status & IRQ_EDGE_EOI))
+ mask_irq(desc);
goto out_unlock;
}

@@ -675,6 +678,8 @@ handle_edge_irq(unsigned int irq, struct

desc->status &= ~IRQ_INPROGRESS;
out_unlock:
+ if (desc->status & IRQ_EDGE_EOI)
+ desc->irq_data.chip->irq_eoi(&desc->irq_data);
raw_spin_unlock(&desc->lock);
}




\
 
 \ /
  Last update: 2011-02-02 22:45    [W:0.644 / U:0.268 seconds]
©2003-2020 Jasper Spaans|hosted at Digital Ocean and TransIP|Read the blog|Advertise on this site