genirq: Provide edge_eoi flow handler
authorThomas Gleixner <tglx@linutronix.de>
Mon, 28 Mar 2011 14:13:24 +0000 (16:13 +0200)
committerThomas Gleixner <tglx@linutronix.de>
Mon, 28 Mar 2011 14:55:11 +0000 (16:55 +0200)
This is a replacment for the cell flow handler which is in the way of
cleanups. Must be selected to avoid general bloat.

Signed-off-by: Thomas Gleixner <tglx@linutronix.de>

include/linux/irq.h
kernel/irq/Kconfig
kernel/irq/chip.c

index 18aaccc..44ebca7 100644 (file)
@@ -423,6 +423,7 @@ extern irqreturn_t handle_IRQ_event(unsigned int irq, struct irqaction *action);
 extern void handle_level_irq(unsigned int irq, struct irq_desc *desc);
 extern void handle_fasteoi_irq(unsigned int irq, struct irq_desc *desc);
 extern void handle_edge_irq(unsigned int irq, struct irq_desc *desc);
+extern void handle_edge_eoi_irq(unsigned int irq, struct irq_desc *desc);
 extern void handle_simple_irq(unsigned int irq, struct irq_desc *desc);
 extern void handle_percpu_irq(unsigned int irq, struct irq_desc *desc);
 extern void handle_bad_irq(unsigned int irq, struct irq_desc *desc);
index 00f2c03..72606ba 100644 (file)
@@ -51,6 +51,10 @@ config HARDIRQS_SW_RESEND
 config IRQ_PREFLOW_FASTEOI
        bool
 
+# Edge style eoi based handler (cell)
+config IRQ_EDGE_EOI_HANDLER
+       bool
+
 # Support forced irq threading
 config IRQ_FORCED_THREADING
        bool
index e00bdc5..451d1e8 100644 (file)
@@ -604,6 +604,51 @@ out_unlock:
        raw_spin_unlock(&desc->lock);
 }
 
+#ifdef CONFIG_IRQ_EDGE_EOI_HANDLER
+/**
+ *     handle_edge_eoi_irq - edge eoi type IRQ handler
+ *     @irq:   the interrupt number
+ *     @desc:  the interrupt description structure for this irq
+ *
+ * Similar as the above handle_edge_irq, but using eoi and w/o the
+ * mask/unmask logic.
+ */
+void handle_edge_eoi_irq(unsigned int irq, struct irq_desc *desc)
+{
+       struct irq_chip *chip = irq_desc_get_chip(desc);
+
+       raw_spin_lock(&desc->lock);
+
+       desc->istate &= ~(IRQS_REPLAY | IRQS_WAITING);
+       /*
+        * If we're currently running this IRQ, or its disabled,
+        * we shouldn't process the IRQ. Mark it pending, handle
+        * the necessary masking and go out
+        */
+       if (unlikely(irqd_irq_disabled(&desc->irq_data) ||
+                    irqd_irq_inprogress(&desc->irq_data) || !desc->action)) {
+               if (!irq_check_poll(desc)) {
+                       desc->istate |= IRQS_PENDING;
+                       goto out_eoi;
+               }
+       }
+       kstat_incr_irqs_this_cpu(irq, desc);
+
+       do {
+               if (unlikely(!desc->action))
+                       goto out_eoi;
+
+               handle_irq_event(desc);
+
+       } while ((desc->istate & IRQS_PENDING) &&
+                !irqd_irq_disabled(&desc->irq_data));
+
+out_unlock:
+       chip->irq_eoi(&desc->irq_data);
+       raw_spin_unlock(&desc->lock);
+}
+#endif
+
 /**
  *     handle_percpu_irq - Per CPU local irq handler
  *     @irq:   the interrupt number