2 * This file is subject to the terms and conditions of the GNU General Public
3 * License. See the file "COPYING" in the main directory of this archive
6 * Code to handle x86 style IRQs plus some generic interrupt stuff.
8 * Copyright (C) 1992 Linus Torvalds
9 * Copyright (C) 1994 - 2000 Ralf Baechle
11 #include <linux/config.h>
12 #include <linux/kernel.h>
13 #include <linux/delay.h>
14 #include <linux/init.h>
15 #include <linux/interrupt.h>
16 #include <linux/kernel_stat.h>
17 #include <linux/module.h>
18 #include <linux/proc_fs.h>
19 #include <linux/slab.h>
21 #include <linux/random.h>
22 #include <linux/sched.h>
23 #include <linux/seq_file.h>
24 #include <linux/kallsyms.h>
26 #include <asm/atomic.h>
27 #include <asm/system.h>
28 #include <asm/uaccess.h>
31 * Controller mappings for all interrupt sources:
33 irq_desc_t irq_desc[NR_IRQS] __cacheline_aligned = {
35 .handler = &no_irq_type,
36 .lock = SPIN_LOCK_UNLOCKED
40 static void register_irq_proc (unsigned int irq);
43 * Special irq handlers.
46 irqreturn_t no_action(int cpl, void *dev_id, struct pt_regs *regs)
50 * Generic no controller code
53 static void enable_none(unsigned int irq) { }
54 static unsigned int startup_none(unsigned int irq) { return 0; }
55 static void disable_none(unsigned int irq) { }
56 static void ack_none(unsigned int irq)
59 * 'what should we do if we get a hw irq event on an illegal vector'.
60 * each architecture has to answer this themselves, it doesn't deserve
61 * a generic callback i think.
63 printk("unexpected interrupt %d\n", irq);
66 /* startup is the same as "enable", shutdown is same as "disable" */
67 #define shutdown_none disable_none
68 #define end_none enable_none
70 struct hw_interrupt_type no_irq_type = {
80 atomic_t irq_err_count;
83 * Generic, controller-independent functions:
86 int show_interrupts(struct seq_file *p, void *v)
89 struct irqaction * action;
93 for (j=0; j<NR_CPUS; j++)
95 seq_printf(p, "CPU%d ",j);
98 for (i = 0 ; i < NR_IRQS ; i++) {
99 spin_lock_irqsave(&irq_desc[i].lock, flags);
100 action = irq_desc[i].action;
103 seq_printf(p, "%3d: ",i);
105 seq_printf(p, "%10u ", kstat_irqs(i));
107 for (j = 0; j < NR_CPUS; j++)
109 seq_printf(p, "%10u ", kstat_cpu(j).irqs[i]);
111 seq_printf(p, " %14s", irq_desc[i].handler->typename);
112 seq_printf(p, " %s", action->name);
114 for (action=action->next; action; action = action->next)
115 seq_printf(p, ", %s", action->name);
119 spin_unlock_irqrestore(&irq_desc[i].lock, flags);
122 seq_printf(p, "ERR: %10u\n", atomic_read(&irq_err_count));
128 inline void synchronize_irq(unsigned int irq)
130 while (irq_desc[irq].status & IRQ_INPROGRESS)
136 * This should really return information about whether
137 * we should do bottom half handling etc. Right now we
138 * end up _always_ checking the bottom half, which is a
139 * waste of time and is not what some drivers would
142 int handle_IRQ_event(unsigned int irq, struct pt_regs * regs, struct irqaction * action)
144 int status = 1; /* Force the "do bottom halves" bit */
147 if (!(action->flags & SA_INTERRUPT))
151 status |= action->flags;
152 retval |= action->handler(irq, action->dev_id, regs);
153 action = action->next;
155 if (status & SA_SAMPLE_RANDOM)
156 add_interrupt_randomness(irq);
162 static void __report_bad_irq(int irq, irq_desc_t *desc, irqreturn_t action_ret)
164 struct irqaction *action;
166 if (action_ret != IRQ_HANDLED && action_ret != IRQ_NONE) {
167 printk(KERN_ERR "irq event %d: bogus return value %x\n",
170 printk(KERN_ERR "irq %d: nobody cared!\n", irq);
173 printk(KERN_ERR "handlers:\n");
174 action = desc->action;
176 printk(KERN_ERR "[<%p>]", action->handler);
177 print_symbol(" (%s)",
178 (unsigned long)action->handler);
180 action = action->next;
184 static void report_bad_irq(int irq, irq_desc_t *desc, irqreturn_t action_ret)
186 static int count = 100;
190 __report_bad_irq(irq, desc, action_ret);
194 static int noirqdebug;
196 static int __init noirqdebug_setup(char *str)
199 printk("IRQ lockup detection disabled\n");
203 __setup("noirqdebug", noirqdebug_setup);
206 * If 99,900 of the previous 100,000 interrupts have not been handled then
207 * assume that the IRQ is stuck in some manner. Drop a diagnostic and try to
210 * (The other 100-of-100,000 interrupts may have been a correctly-functioning
211 * device sharing an IRQ with the failing one)
213 * Called under desc->lock
215 static void note_interrupt(int irq, irq_desc_t *desc, irqreturn_t action_ret)
217 if (action_ret != IRQ_HANDLED) {
218 desc->irqs_unhandled++;
219 if (action_ret != IRQ_NONE)
220 report_bad_irq(irq, desc, action_ret);
224 if (desc->irq_count < 100000)
228 if (desc->irqs_unhandled > 99900) {
230 * The interrupt is stuck
232 __report_bad_irq(irq, desc, action_ret);
236 printk(KERN_EMERG "Disabling IRQ #%d\n", irq);
237 desc->status |= IRQ_DISABLED;
238 desc->handler->disable(irq);
240 desc->irqs_unhandled = 0;
244 * Generic enable/disable code: this just calls
245 * down into the PIC-specific version for the actual
246 * hardware disable after having gotten the irq
251 * disable_irq_nosync - disable an irq without waiting
252 * @irq: Interrupt to disable
254 * Disable the selected interrupt line. Disables of an interrupt
255 * stack. Unlike disable_irq(), this function does not ensure existing
256 * instances of the IRQ handler have completed before returning.
258 * This function may be called from IRQ context.
261 void inline disable_irq_nosync(unsigned int irq)
263 irq_desc_t *desc = irq_desc + irq;
266 spin_lock_irqsave(&desc->lock, flags);
267 if (!desc->depth++) {
268 desc->status |= IRQ_DISABLED;
269 desc->handler->disable(irq);
271 spin_unlock_irqrestore(&desc->lock, flags);
275 * disable_irq - disable an irq and wait for completion
276 * @irq: Interrupt to disable
278 * Disable the selected interrupt line. Disables of an interrupt
279 * stack. That is for two disables you need two enables. This
280 * function waits for any pending IRQ handlers for this interrupt
281 * to complete before returning. If you use this function while
282 * holding a resource the IRQ handler may need you will deadlock.
284 * This function may be called - with care - from IRQ context.
287 void disable_irq(unsigned int irq)
289 disable_irq_nosync(irq);
290 synchronize_irq(irq);
294 * enable_irq - enable interrupt handling on an irq
295 * @irq: Interrupt to enable
297 * Re-enables the processing of interrupts on this IRQ line
298 * providing no disable_irq calls are now in effect.
300 * This function may be called from IRQ context.
303 void enable_irq(unsigned int irq)
305 irq_desc_t *desc = irq_desc + irq;
308 spin_lock_irqsave(&desc->lock, flags);
309 switch (desc->depth) {
311 unsigned int status = desc->status & ~IRQ_DISABLED;
312 desc->status = status;
313 if ((status & (IRQ_PENDING | IRQ_REPLAY)) == IRQ_PENDING) {
314 desc->status = status | IRQ_REPLAY;
315 hw_resend_irq(desc->handler,irq);
317 desc->handler->enable(irq);
324 printk("enable_irq(%u) unbalanced from %p\n", irq,
325 __builtin_return_address(0));
327 spin_unlock_irqrestore(&desc->lock, flags);
331 * do_IRQ handles all normal device IRQ's (the special
332 * SMP cross-CPU interrupts have their own specific
335 asmlinkage unsigned int do_IRQ(int irq, struct pt_regs *regs)
338 * We ack quickly, we don't want the irq controller
339 * thinking we're snobs just because some other CPU has
340 * disabled global interrupts (we have already done the
341 * INT_ACK cycles, it's too late to try to pretend to the
342 * controller that we aren't taking the interrupt).
344 * 0 return value means that this irq is already being
345 * handled by some other CPU. (or is disabled)
347 int cpu = smp_processor_id();
348 irq_desc_t *desc = irq_desc + irq;
349 struct irqaction * action;
353 kstat_cpu(cpu).irqs[irq]++;
354 spin_lock(&desc->lock);
355 desc->handler->ack(irq);
357 REPLAY is when Linux resends an IRQ that was dropped earlier
358 WAITING is used by probe to mark irqs that are being tested
360 status = desc->status & ~(IRQ_REPLAY | IRQ_WAITING);
361 status |= IRQ_PENDING; /* we _want_ to handle it */
364 * If the IRQ is disabled for whatever reason, we cannot
365 * use the action we have.
368 if (likely(!(status & (IRQ_DISABLED | IRQ_INPROGRESS)))) {
369 action = desc->action;
370 status &= ~IRQ_PENDING; /* we commit to handling */
371 status |= IRQ_INPROGRESS; /* we are handling it */
373 desc->status = status;
376 * If there is no IRQ handler or it was disabled, exit early.
377 Since we set PENDING, if another processor is handling
378 a different instance of this same irq, the other processor
379 will take care of it.
381 if (unlikely(!action))
385 * Edge triggered interrupts need to remember
387 * This applies to any hw interrupts that allow a second
388 * instance of the same irq to arrive while we are in do_IRQ
389 * or in the handler. But the code here only handles the _second_
390 * instance of the irq, not the third or fourth. So it is mostly
391 * useful for irq hardware that does not mask cleanly in an
395 irqreturn_t action_ret;
397 spin_unlock(&desc->lock);
398 action_ret = handle_IRQ_event(irq, ®s, action);
399 spin_lock(&desc->lock);
401 note_interrupt(irq, desc, action_ret);
402 if (likely(!(desc->status & IRQ_PENDING)))
404 desc->status &= ~IRQ_PENDING;
406 desc->status &= ~IRQ_INPROGRESS;
410 * The ->end() handler has to deal with interrupts which got
411 * disabled while the handler was running.
413 desc->handler->end(irq);
414 spin_unlock(&desc->lock);
422 * request_irq - allocate an interrupt line
423 * @irq: Interrupt line to allocate
424 * @handler: Function to be called when the IRQ occurs
425 * @irqflags: Interrupt type flags
426 * @devname: An ascii name for the claiming device
427 * @dev_id: A cookie passed back to the handler function
429 * This call allocates interrupt resources and enables the
430 * interrupt line and IRQ handling. From the point this
431 * call is made your handler function may be invoked. Since
432 * your handler function must clear any interrupt the board
433 * raises, you must take care both to initialise your hardware
434 * and to set up the interrupt handler in the right order.
436 * Dev_id must be globally unique. Normally the address of the
437 * device data structure is used as the cookie. Since the handler
438 * receives this value it makes sense to use it.
440 * If your interrupt is shared you must pass a non NULL dev_id
441 * as this is required when freeing the interrupt.
445 * SA_SHIRQ Interrupt is shared
447 * SA_INTERRUPT Disable local interrupts while processing
449 * SA_SAMPLE_RANDOM The interrupt can be used for entropy
453 int request_irq(unsigned int irq,
454 irqreturn_t (*handler)(int, void *, struct pt_regs *),
455 unsigned long irqflags,
456 const char * devname,
460 struct irqaction * action;
464 * Sanity-check: shared interrupts should REALLY pass in
465 * a real dev-ID, otherwise we'll have trouble later trying
466 * to figure out which interrupt is which (messes up the
467 * interrupt freeing logic etc).
469 if (irqflags & SA_SHIRQ) {
471 printk("Bad boy: %s (at 0x%x) called us without a dev_id!\n", devname, (&irq)[-1]);
480 action = (struct irqaction *)
481 kmalloc(sizeof(struct irqaction), GFP_ATOMIC);
485 action->handler = handler;
486 action->flags = irqflags;
488 action->name = devname;
490 action->dev_id = dev_id;
492 retval = setup_irq(irq, action);
499 * free_irq - free an interrupt
500 * @irq: Interrupt line to free
501 * @dev_id: Device identity to free
503 * Remove an interrupt handler. The handler is removed and if the
504 * interrupt line is no longer in use by any driver it is disabled.
505 * On a shared IRQ the caller must ensure the interrupt is disabled
506 * on the card it drives before calling this function. The function
507 * does not return until any executing interrupts for this IRQ
510 * This function must not be called from interrupt context.
513 void free_irq(unsigned int irq, void *dev_id)
516 struct irqaction **p;
522 desc = irq_desc + irq;
523 spin_lock_irqsave(&desc->lock,flags);
526 struct irqaction * action = *p;
528 struct irqaction **pp = p;
530 if (action->dev_id != dev_id)
533 /* Found it - now remove it from the list of entries */
536 desc->status |= IRQ_DISABLED;
537 desc->handler->shutdown(irq);
539 spin_unlock_irqrestore(&desc->lock,flags);
541 /* Wait to make sure it's not being used on another CPU */
542 synchronize_irq(irq);
546 printk("Trying to free free IRQ%d\n",irq);
547 spin_unlock_irqrestore(&desc->lock,flags);
553 * IRQ autodetection code..
555 * This depends on the fact that any interrupt that
556 * comes in on to an unassigned handler will get stuck
557 * with "IRQ_WAITING" cleared and the interrupt
561 static DECLARE_MUTEX(probe_sem);
564 * probe_irq_on - begin an interrupt autodetect
566 * Commence probing for an interrupt. The interrupts are scanned
567 * and a mask of potential interrupt lines is returned.
571 unsigned long probe_irq_on(void)
580 * something may have generated an irq long ago and we want to
581 * flush such a longstanding irq before considering it as spurious.
583 for (i = NR_IRQS-1; i > 0; i--) {
586 spin_lock_irq(&desc->lock);
587 if (!irq_desc[i].action)
588 irq_desc[i].handler->startup(i);
589 spin_unlock_irq(&desc->lock);
592 /* Wait for longstanding interrupts to trigger. */
593 for (delay = jiffies + HZ/50; time_after(delay, jiffies); )
594 /* about 20ms delay */ barrier();
597 * enable any unassigned irqs
598 * (we must startup again here because if a longstanding irq
599 * happened in the previous stage, it may have masked itself)
601 for (i = NR_IRQS-1; i > 0; i--) {
604 spin_lock_irq(&desc->lock);
606 desc->status |= IRQ_AUTODETECT | IRQ_WAITING;
607 if (desc->handler->startup(i))
608 desc->status |= IRQ_PENDING;
610 spin_unlock_irq(&desc->lock);
614 * Wait for spurious interrupts to trigger
616 for (delay = jiffies + HZ/10; time_after(delay, jiffies); )
617 /* about 100ms delay */ barrier();
620 * Now filter out any obviously spurious interrupts
623 for (i = 0; i < NR_IRQS; i++) {
624 irq_desc_t *desc = irq_desc + i;
627 spin_lock_irq(&desc->lock);
628 status = desc->status;
630 if (status & IRQ_AUTODETECT) {
631 /* It triggered already - consider it spurious. */
632 if (!(status & IRQ_WAITING)) {
633 desc->status = status & ~IRQ_AUTODETECT;
634 desc->handler->shutdown(i);
639 spin_unlock_irq(&desc->lock);
646 * Return a mask of triggered interrupts (this
647 * can handle only legacy ISA interrupts).
651 * probe_irq_mask - scan a bitmap of interrupt lines
652 * @val: mask of interrupts to consider
654 * Scan the ISA bus interrupt lines and return a bitmap of
655 * active interrupts. The interrupt probe logic state is then
656 * returned to its previous value.
658 * Note: we need to scan all the irq's even though we will
659 * only return ISA irq numbers - just so that we reset them
660 * all to a known state.
662 unsigned int probe_irq_mask(unsigned long val)
668 for (i = 0; i < NR_IRQS; i++) {
669 irq_desc_t *desc = irq_desc + i;
672 spin_lock_irq(&desc->lock);
673 status = desc->status;
675 if (status & IRQ_AUTODETECT) {
676 if (i < 16 && !(status & IRQ_WAITING))
679 desc->status = status & ~IRQ_AUTODETECT;
680 desc->handler->shutdown(i);
682 spin_unlock_irq(&desc->lock);
690 * Return the one interrupt that triggered (this can
691 * handle any interrupt source).
695 * probe_irq_off - end an interrupt autodetect
696 * @val: mask of potential interrupts (unused)
698 * Scans the unused interrupt lines and returns the line which
699 * appears to have triggered the interrupt. If no interrupt was
700 * found then zero is returned. If more than one interrupt is
701 * found then minus the first candidate is returned to indicate
704 * The interrupt probe logic state is returned to its previous
707 * BUGS: When used in a module (which arguably shouldnt happen)
708 * nothing prevents two IRQ probe callers from overlapping. The
709 * results of this are non-optimal.
712 int probe_irq_off(unsigned long val)
714 int i, irq_found, nr_irqs;
718 for (i = 0; i < NR_IRQS; i++) {
719 irq_desc_t *desc = irq_desc + i;
722 spin_lock_irq(&desc->lock);
723 status = desc->status;
725 if (status & IRQ_AUTODETECT) {
726 if (!(status & IRQ_WAITING)) {
731 desc->status = status & ~IRQ_AUTODETECT;
732 desc->handler->shutdown(i);
734 spin_unlock_irq(&desc->lock);
739 irq_found = -irq_found;
743 /* this was setup_x86_irq but it seems pretty generic */
744 int setup_irq(unsigned int irq, struct irqaction * new)
748 struct irqaction *old, **p;
749 irq_desc_t *desc = irq_desc + irq;
752 * Some drivers like serial.c use request_irq() heavily,
753 * so we have to be careful not to interfere with a
756 if (new->flags & SA_SAMPLE_RANDOM) {
758 * This function might sleep, we want to call it first,
759 * outside of the atomic block.
760 * Yes, this might clear the entropy pool if the wrong
761 * driver is attempted to be loaded, without actually
762 * installing a new handler, but is this really a problem,
763 * only the sysadmin is able to do this.
765 rand_initialize_irq(irq);
769 * The following block of code has to be executed atomically
771 spin_lock_irqsave(&desc->lock,flags);
773 if ((old = *p) != NULL) {
774 /* Can't share interrupts unless both agree to */
775 if (!(old->flags & new->flags & SA_SHIRQ)) {
776 spin_unlock_irqrestore(&desc->lock,flags);
780 /* add new interrupt at end of irq queue */
792 desc->status &= ~(IRQ_DISABLED | IRQ_AUTODETECT | IRQ_WAITING | IRQ_INPROGRESS);
793 desc->handler->startup(irq);
795 spin_unlock_irqrestore(&desc->lock,flags);
797 register_irq_proc(irq);
801 void __init init_generic_irq(void)
805 for (i = 0; i < NR_IRQS; i++) {
806 irq_desc[i].status = IRQ_DISABLED;
807 irq_desc[i].action = NULL;
808 irq_desc[i].depth = 1;
809 irq_desc[i].handler = &no_irq_type;
813 EXPORT_SYMBOL(disable_irq_nosync);
814 EXPORT_SYMBOL(disable_irq);
815 EXPORT_SYMBOL(enable_irq);
816 EXPORT_SYMBOL(probe_irq_mask);
818 static struct proc_dir_entry * root_irq_dir;
819 static struct proc_dir_entry * irq_dir [NR_IRQS];
823 static unsigned int parse_hex_value (const char *buffer,
824 unsigned long count, unsigned long *ret)
826 unsigned char hexnum [HEX_DIGITS];
832 if (count > HEX_DIGITS)
834 if (copy_from_user(hexnum, buffer, count))
838 * Parse the first 8 characters as a hex string, any non-hex char
839 * is end-of-string. '00e1', 'e1', '00E1', 'E1' are all the same.
843 for (i = 0; i < count; i++) {
844 unsigned int c = hexnum[i];
847 case '0' ... '9': c -= '0'; break;
848 case 'a' ... 'f': c -= 'a'-10; break;
849 case 'A' ... 'F': c -= 'A'-10; break;
853 value = (value << 4) | c;
862 static struct proc_dir_entry * smp_affinity_entry [NR_IRQS];
864 static unsigned long irq_affinity [NR_IRQS] = { [0 ... NR_IRQS-1] = ~0UL };
865 static int irq_affinity_read_proc (char *page, char **start, off_t off,
866 int count, int *eof, void *data)
868 if (count < HEX_DIGITS+1)
870 return sprintf (page, "%08lx\n", irq_affinity[(long)data]);
873 static int irq_affinity_write_proc (struct file *file, const char *buffer,
874 unsigned long count, void *data)
876 int irq = (long) data, full_count = count, err;
877 unsigned long new_value;
879 if (!irq_desc[irq].handler->set_affinity)
882 err = parse_hex_value(buffer, count, &new_value);
885 * Do not allow disabling IRQs completely - it's a too easy
886 * way to make the system unusable accidentally :-) At least
887 * one online CPU still has to be targeted.
889 if (!(new_value & cpu_online_map))
892 irq_affinity[irq] = new_value;
893 irq_desc[irq].handler->set_affinity(irq, new_value);
900 static int prof_cpu_mask_read_proc (char *page, char **start, off_t off,
901 int count, int *eof, void *data)
903 unsigned long *mask = (unsigned long *) data;
904 if (count < HEX_DIGITS+1)
906 return sprintf (page, "%08lx\n", *mask);
909 static int prof_cpu_mask_write_proc (struct file *file, const char *buffer,
910 unsigned long count, void *data)
912 unsigned long *mask = (unsigned long *) data, full_count = count, err;
913 unsigned long new_value;
915 err = parse_hex_value(buffer, count, &new_value);
923 #define MAX_NAMELEN 10
925 static void register_irq_proc (unsigned int irq)
927 char name [MAX_NAMELEN];
929 if (!root_irq_dir || (irq_desc[irq].handler == &no_irq_type) ||
933 memset(name, 0, MAX_NAMELEN);
934 sprintf(name, "%d", irq);
936 /* create /proc/irq/1234 */
937 irq_dir[irq] = proc_mkdir(name, root_irq_dir);
941 struct proc_dir_entry *entry;
943 /* create /proc/irq/1234/smp_affinity */
944 entry = create_proc_entry("smp_affinity", 0600, irq_dir[irq]);
948 entry->data = (void *)(long)irq;
949 entry->read_proc = irq_affinity_read_proc;
950 entry->write_proc = irq_affinity_write_proc;
953 smp_affinity_entry[irq] = entry;
958 unsigned long prof_cpu_mask = -1;
960 void init_irq_proc (void)
962 struct proc_dir_entry *entry;
965 /* create /proc/irq */
966 root_irq_dir = proc_mkdir("irq", 0);
968 /* create /proc/irq/prof_cpu_mask */
969 entry = create_proc_entry("prof_cpu_mask", 0600, root_irq_dir);
975 entry->data = (void *)&prof_cpu_mask;
976 entry->read_proc = prof_cpu_mask_read_proc;
977 entry->write_proc = prof_cpu_mask_write_proc;
980 * Create entries for all existing IRQs.
982 for (i = 0; i < NR_IRQS; i++)
983 register_irq_proc(i);