2 * arch/ppc/kernel/irq.c
4 * Derived from arch/i386/kernel/irq.c
5 * Copyright (C) 1992 Linus Torvalds
6 * Adapted from arch/i386 by Gary Thomas
7 * Copyright (C) 1995-1996 Gary Thomas (gdt@linuxppc.org)
8 * Updated and modified by Cort Dougan (cort@cs.nmt.edu)
9 * Copyright (C) 1996 Cort Dougan
10 * Adapted for Power Macintosh by Paul Mackerras
11 * Copyright (C) 1996 Paul Mackerras (paulus@cs.anu.edu.au)
12 * Amiga/APUS changes by Jesper Skov (jskov@cygnus.co.uk).
14 * This program is free software; you can redistribute it and/or
15 * modify it under the terms of the GNU General Public License
16 * as published by the Free Software Foundation; either version
17 * 2 of the License, or (at your option) any later version.
19 * This file contains the code used by various IRQ handling routines:
20 * asking for different IRQ's should be done through these routines
21 * instead of just grabbing them. Thus setups with different IRQ numbers
22 * shouldn't result in any weird surprises, and installing new handlers
26 #include <linux/errno.h>
27 #include <linux/threads.h>
28 #include <linux/kernel_stat.h>
29 #include <linux/signal.h>
30 #include <linux/sched.h>
31 #include <linux/ioport.h>
32 #include <linux/interrupt.h>
33 #include <linux/timex.h>
34 #include <linux/config.h>
35 #include <linux/init.h>
36 #include <linux/slab.h>
37 #include <linux/pci.h>
38 #include <linux/delay.h>
39 #include <linux/irq.h>
40 #include <linux/proc_fs.h>
41 #include <linux/random.h>
43 #include <asm/uaccess.h>
44 #include <asm/bitops.h>
45 #include <asm/system.h>
47 #include <asm/pgtable.h>
49 #include <asm/cache.h>
51 #include <asm/ptrace.h>
52 #include <asm/iSeries/LparData.h>
53 #include <asm/machdep.h>
56 void enable_irq(unsigned int irq_nr);
57 void disable_irq(unsigned int irq_nr);
60 extern void iSeries_smp_message_recv( struct pt_regs * );
63 volatile unsigned char *chrp_int_ack_special;
64 static void register_irq_proc (unsigned int irq);
66 irq_desc_t irq_desc[NR_IRQS] __cacheline_aligned = {
68 .lock = SPIN_LOCK_UNLOCKED
72 int ppc_spurious_interrupts = 0;
73 unsigned long lpEvent_count = 0;
75 /* nasty hack for shared irq's since we need to do kmalloc calls but
76 * can't very early in the boot when we need to do a request irq.
77 * this needs to be removed.
80 #define IRQ_KMALLOC_ENTRIES 16
81 static int cache_bitmask = 0;
82 static struct irqaction malloc_cache[IRQ_KMALLOC_ENTRIES];
83 extern int mem_init_done;
85 void *irq_kmalloc(size_t size, int pri)
89 return kmalloc(size,pri);
90 for ( i = 0; i < IRQ_KMALLOC_ENTRIES ; i++ )
91 if ( ! ( cache_bitmask & (1<<i) ) ) {
92 cache_bitmask |= (1<<i);
93 return (void *)(&malloc_cache[i]);
98 void irq_kfree(void *ptr)
101 for ( i = 0 ; i < IRQ_KMALLOC_ENTRIES ; i++ )
102 if ( ptr == &malloc_cache[i] ) {
103 cache_bitmask &= ~(1<<i);
110 setup_irq(unsigned int irq, struct irqaction * new)
114 struct irqaction *old, **p;
115 irq_desc_t *desc = irq_desc + irq;
118 * Some drivers like serial.c use request_irq() heavily,
119 * so we have to be careful not to interfere with a
122 if (new->flags & SA_SAMPLE_RANDOM) {
124 * This function might sleep, we want to call it first,
125 * outside of the atomic block.
126 * Yes, this might clear the entropy pool if the wrong
127 * driver is attempted to be loaded, without actually
128 * installing a new handler, but is this really a problem,
129 * only the sysadmin is able to do this.
131 rand_initialize_irq(irq);
135 * The following block of code has to be executed atomically
137 spin_lock_irqsave(&desc->lock,flags);
139 if ((old = *p) != NULL) {
140 /* Can't share interrupts unless both agree to */
141 if (!(old->flags & new->flags & SA_SHIRQ)) {
142 spin_unlock_irqrestore(&desc->lock,flags);
146 /* add new interrupt at end of irq queue */
158 desc->status &= ~(IRQ_DISABLED | IRQ_AUTODETECT | IRQ_WAITING);
161 spin_unlock_irqrestore(&desc->lock,flags);
163 register_irq_proc(irq);
169 inline void synchronize_irq(unsigned int irq)
171 while (irq_desc[irq].status & IRQ_INPROGRESS)
175 #endif /* CONFIG_SMP */
177 /* XXX Make this into free_irq() - Anton */
179 /* This could be promoted to a real free_irq() ... */
181 do_free_irq(int irq, void* dev_id)
184 struct irqaction **p;
187 desc = irq_desc + irq;
188 spin_lock_irqsave(&desc->lock,flags);
191 struct irqaction * action = *p;
193 struct irqaction **pp = p;
195 if (action->dev_id != dev_id)
198 /* Found it - now remove it from the list of entries */
201 desc->status |= IRQ_DISABLED;
204 spin_unlock_irqrestore(&desc->lock,flags);
206 /* Wait to make sure it's not being used on another CPU */
207 synchronize_irq(irq);
211 printk("Trying to free free IRQ%d\n",irq);
212 spin_unlock_irqrestore(&desc->lock,flags);
218 int request_irq(unsigned int irq,
219 irqreturn_t (*handler)(int, void *, struct pt_regs *),
220 unsigned long irqflags, const char * devname, void *dev_id)
222 struct irqaction *action;
228 /* We could implement really free_irq() instead of that... */
229 return do_free_irq(irq, dev_id);
231 action = (struct irqaction *)
232 irq_kmalloc(sizeof(struct irqaction), GFP_KERNEL);
234 printk(KERN_ERR "irq_kmalloc() failed for irq %d !\n", irq);
238 action->handler = handler;
239 action->flags = irqflags;
241 action->name = devname;
242 action->dev_id = dev_id;
245 retval = setup_irq(irq, action);
252 void free_irq(unsigned int irq, void *dev_id)
254 request_irq(irq, NULL, 0, NULL, dev_id);
258 * Generic enable/disable code: this just calls
259 * down into the PIC-specific version for the actual
260 * hardware disable after having gotten the irq
265 * disable_irq_nosync - disable an irq without waiting
266 * @irq: Interrupt to disable
268 * Disable the selected interrupt line. Disables of an interrupt
269 * stack. Unlike disable_irq(), this function does not ensure existing
270 * instances of the IRQ handler have completed before returning.
272 * This function may be called from IRQ context.
275 void disable_irq_nosync(unsigned int irq)
277 irq_desc_t *desc = irq_desc + irq;
280 spin_lock_irqsave(&desc->lock, flags);
281 if (!desc->depth++) {
282 if (!(desc->status & IRQ_PER_CPU))
283 desc->status |= IRQ_DISABLED;
286 spin_unlock_irqrestore(&desc->lock, flags);
290 * disable_irq - disable an irq and wait for completion
291 * @irq: Interrupt to disable
293 * Disable the selected interrupt line. Disables of an interrupt
294 * stack. That is for two disables you need two enables. This
295 * function waits for any pending IRQ handlers for this interrupt
296 * to complete before returning. If you use this function while
297 * holding a resource the IRQ handler may need you will deadlock.
299 * This function may be called - with care - from IRQ context.
302 void disable_irq(unsigned int irq)
304 disable_irq_nosync(irq);
305 synchronize_irq(irq);
309 * enable_irq - enable interrupt handling on an irq
310 * @irq: Interrupt to enable
312 * Re-enables the processing of interrupts on this IRQ line
313 * providing no disable_irq calls are now in effect.
315 * This function may be called from IRQ context.
318 void enable_irq(unsigned int irq)
320 irq_desc_t *desc = irq_desc + irq;
323 spin_lock_irqsave(&desc->lock, flags);
324 switch (desc->depth) {
326 unsigned int status = desc->status & ~IRQ_DISABLED;
327 desc->status = status;
328 if ((status & (IRQ_PENDING | IRQ_REPLAY)) == IRQ_PENDING) {
329 desc->status = status | IRQ_REPLAY;
330 hw_resend_irq(desc->handler,irq);
339 printk("enable_irq(%u) unbalanced\n", irq);
341 spin_unlock_irqrestore(&desc->lock, flags);
344 int show_interrupts(struct seq_file *p, void *v)
347 struct irqaction * action;
351 for (j=0; j<NR_CPUS; j++) {
353 seq_printf(p, "CPU%d ",j);
357 for (i = 0 ; i < NR_IRQS ; i++) {
358 spin_lock_irqsave(&irq_desc[i].lock, flags);
359 action = irq_desc[i].action;
360 if (!action || !action->handler)
362 seq_printf(p, "%3d: ", i);
364 for (j = 0; j < NR_CPUS; j++) {
366 seq_printf(p, "%10u ", kstat_cpu(j).irqs[i]);
369 seq_printf(p, "%10u ", kstat_irqs(i));
370 #endif /* CONFIG_SMP */
371 if (irq_desc[i].handler)
372 seq_printf(p, " %s ", irq_desc[i].handler->typename );
374 seq_printf(p, " None ");
375 seq_printf(p, "%s", (irq_desc[i].status & IRQ_LEVEL) ? "Level " : "Edge ");
376 seq_printf(p, " %s",action->name);
377 for (action=action->next; action; action = action->next)
378 seq_printf(p, ", %s", action->name);
381 spin_unlock_irqrestore(&irq_desc[i].lock, flags);
383 seq_printf(p, "BAD: %10u\n", ppc_spurious_interrupts);
387 extern char *ppc_find_proc_name(unsigned *p, char *buf, unsigned buflen);
389 static inline void handle_irq_event(int irq, struct pt_regs *regs,
390 struct irqaction *action)
394 struct irqaction *first_action = action;
396 if (!(action->flags & SA_INTERRUPT))
400 status |= action->flags;
401 retval |= action->handler(irq, action->dev_id, regs);
402 action = action->next;
404 if (status & SA_SAMPLE_RANDOM)
405 add_interrupt_randomness(irq);
408 static int count = 100;
413 printk("irq event %d: bogus retval mask %x\n",
416 printk("irq %d: nobody cared!\n", irq);
419 printk("handlers:\n");
420 action = first_action;
422 printk("[<%p>]", action->handler);
424 ppc_find_proc_name((unsigned *)action->handler, name_buf, 256));
425 action = action->next;
432 * Eventually, this should take an array of interrupts and an array size
433 * so it can dispatch multiple interrupts.
435 void ppc_irq_dispatch_handler(struct pt_regs *regs, int irq)
438 struct irqaction *action;
439 int cpu = smp_processor_id();
440 irq_desc_t *desc = irq_desc + irq;
442 kstat_cpu(cpu).irqs[irq]++;
443 spin_lock(&desc->lock);
446 REPLAY is when Linux resends an IRQ that was dropped earlier
447 WAITING is used by probe to mark irqs that are being tested
449 status = desc->status & ~(IRQ_REPLAY | IRQ_WAITING);
450 if (!(status & IRQ_PER_CPU))
451 status |= IRQ_PENDING; /* we _want_ to handle it */
454 * If the IRQ is disabled for whatever reason, we cannot
455 * use the action we have.
458 if (likely(!(status & (IRQ_DISABLED | IRQ_INPROGRESS)))) {
459 action = desc->action;
460 if (!action || !action->handler) {
461 ppc_spurious_interrupts++;
462 printk(KERN_DEBUG "Unhandled interrupt %x, disabled\n", irq);
463 /* We can't call disable_irq here, it would deadlock */
466 desc->status |= IRQ_DISABLED;
467 /* This is not a real spurrious interrupt, we
468 * have to eoi it, so we jump to out
473 status &= ~IRQ_PENDING; /* we commit to handling */
474 if (!(status & IRQ_PER_CPU))
475 status |= IRQ_INPROGRESS; /* we are handling it */
477 desc->status = status;
480 * If there is no IRQ handler or it was disabled, exit early.
481 Since we set PENDING, if another processor is handling
482 a different instance of this same irq, the other processor
483 will take care of it.
485 if (unlikely(!action))
489 * Edge triggered interrupts need to remember
491 * This applies to any hw interrupts that allow a second
492 * instance of the same irq to arrive while we are in do_IRQ
493 * or in the handler. But the code here only handles the _second_
494 * instance of the irq, not the third or fourth. So it is mostly
495 * useful for irq hardware that does not mask cleanly in an
499 spin_unlock(&desc->lock);
500 handle_irq_event(irq, regs, action);
501 spin_lock(&desc->lock);
503 if (likely(!(desc->status & IRQ_PENDING)))
505 desc->status &= ~IRQ_PENDING;
508 desc->status &= ~IRQ_INPROGRESS;
510 * The ->end() handler has to deal with interrupts which got
511 * disabled while the handler was running.
513 if (irq_desc[irq].handler) {
514 if (irq_desc[irq].handler->end)
515 irq_desc[irq].handler->end(irq);
516 else if (irq_desc[irq].handler->enable)
517 irq_desc[irq].handler->enable(irq);
519 spin_unlock(&desc->lock);
522 int do_IRQ(struct pt_regs *regs)
525 #ifdef CONFIG_PPC_ISERIES
526 struct paca_struct *lpaca;
527 struct ItLpQueue *lpq;
532 #ifdef CONFIG_PPC_ISERIES
535 if (lpaca->xLpPaca.xIntDword.xFields.xIpiCnt) {
536 lpaca->xLpPaca.xIntDword.xFields.xIpiCnt = 0;
537 iSeries_smp_message_recv(regs);
539 #endif /* CONFIG_SMP */
540 lpq = lpaca->lpQueuePtr;
541 if (lpq && ItLpQueue_isLpIntPending(lpq))
542 lpEvent_count += ItLpQueue_process(lpq, regs);
545 * Every arch is required to implement ppc_md.get_irq.
546 * This function will either return an irq number or -1 to
547 * indicate there are no more pending. But the first time
548 * through the loop this means there wasn't an IRQ pending.
549 * The value -2 is for buggy hardware and means that this IRQ
550 * has already been handled. -- Tom
552 while ((irq = ppc_md.get_irq(regs)) >= 0) {
553 ppc_irq_dispatch_handler(regs, irq);
556 if (irq != -2 && first)
557 /* That's not SMP safe ... but who cares ? */
558 ppc_spurious_interrupts++;
563 #ifdef CONFIG_PPC_ISERIES
564 if (lpaca->xLpPaca.xIntDword.xFields.xDecrInt) {
565 lpaca->xLpPaca.xIntDword.xFields.xDecrInt = 0;
566 /* Signal a fake decrementer interrupt */
567 timer_interrupt(regs);
571 return 1; /* lets ret_from_int know we can do checks */
574 unsigned long probe_irq_on (void)
579 int probe_irq_off (unsigned long irqs)
584 unsigned int probe_irq_mask(unsigned long irqs)
589 void __init init_IRQ(void)
601 static struct proc_dir_entry * root_irq_dir;
602 static struct proc_dir_entry * irq_dir [NR_IRQS];
603 static struct proc_dir_entry * smp_affinity_entry [NR_IRQS];
605 #ifdef CONFIG_IRQ_ALL_CPUS
606 unsigned long irq_affinity [NR_IRQS] = { [0 ... NR_IRQS-1] = -1UL};
607 #else /* CONFIG_IRQ_ALL_CPUS */
608 unsigned long irq_affinity [NR_IRQS] = { [0 ... NR_IRQS-1] = 0x0};
609 #endif /* CONFIG_IRQ_ALL_CPUS */
611 #define HEX_DIGITS 16
613 static int irq_affinity_read_proc (char *page, char **start, off_t off,
614 int count, int *eof, void *data)
616 if (count < HEX_DIGITS+1)
618 return sprintf(page, "%16lx\n", irq_affinity[(long)data]);
621 static unsigned int parse_hex_value (const char *buffer,
622 unsigned long count, unsigned long *ret)
624 unsigned char hexnum [HEX_DIGITS];
630 if (count > HEX_DIGITS)
632 if (copy_from_user(hexnum, buffer, count))
636 * Parse the first 16 characters as a hex string, any non-hex char
637 * is end-of-string. '00e1', 'e1', '00E1', 'E1' are all the same.
641 for (i = 0; i < count; i++) {
642 unsigned int c = hexnum[i];
645 case '0' ... '9': c -= '0'; break;
646 case 'a' ... 'f': c -= 'a'-10; break;
647 case 'A' ... 'F': c -= 'A'-10; break;
651 value = (value << 4) | c;
658 static int irq_affinity_write_proc (struct file *file, const char *buffer,
659 unsigned long count, void *data)
661 int irq = (long)data, full_count = count, err;
662 unsigned long new_value;
664 if (!irq_desc[irq].handler->set_affinity)
667 err = parse_hex_value(buffer, count, &new_value);
670 * Do not allow disabling IRQs completely - it's a too easy
671 * way to make the system unusable accidentally :-) At least
672 * one online CPU still has to be targeted.
674 if (!(new_value & cpu_online_map))
677 irq_affinity[irq] = new_value;
678 irq_desc[irq].handler->set_affinity(irq, new_value);
683 static int prof_cpu_mask_read_proc (char *page, char **start, off_t off,
684 int count, int *eof, void *data)
686 unsigned long *mask = (unsigned long *) data;
687 if (count < HEX_DIGITS+1)
689 return sprintf (page, "%08lx\n", *mask);
692 static int prof_cpu_mask_write_proc (struct file *file, const char *buffer,
693 unsigned long count, void *data)
695 unsigned long *mask = (unsigned long *) data, full_count = count, err;
696 unsigned long new_value;
698 err = parse_hex_value(buffer, count, &new_value);
704 #ifdef CONFIG_PPC_ISERIES
707 for (i=0; i<NR_CPUS; ++i) {
708 if ( paca[i].prof_buffer && (new_value & 1) )
709 paca[i].prof_enabled = 1;
711 paca[i].prof_enabled = 0;
720 #define MAX_NAMELEN 10
722 static void register_irq_proc (unsigned int irq)
724 struct proc_dir_entry *entry;
725 char name [MAX_NAMELEN];
727 if (!root_irq_dir || (irq_desc[irq].handler == NULL) || irq_dir[irq])
730 memset(name, 0, MAX_NAMELEN);
731 sprintf(name, "%d", irq);
733 /* create /proc/irq/1234 */
734 irq_dir[irq] = proc_mkdir(name, root_irq_dir);
736 /* create /proc/irq/1234/smp_affinity */
737 entry = create_proc_entry("smp_affinity", 0600, irq_dir[irq]);
740 entry->data = (void *)(long)irq;
741 entry->read_proc = irq_affinity_read_proc;
742 entry->write_proc = irq_affinity_write_proc;
744 smp_affinity_entry[irq] = entry;
747 unsigned long prof_cpu_mask = -1;
749 void init_irq_proc (void)
751 struct proc_dir_entry *entry;
754 /* create /proc/irq */
755 root_irq_dir = proc_mkdir("irq", 0);
757 /* create /proc/irq/prof_cpu_mask */
758 entry = create_proc_entry("prof_cpu_mask", 0600, root_irq_dir);
761 entry->data = (void *)&prof_cpu_mask;
762 entry->read_proc = prof_cpu_mask_read_proc;
763 entry->write_proc = prof_cpu_mask_write_proc;
766 * Create entries for all existing IRQs.
768 for (i = 0; i < NR_IRQS; i++) {
769 if (irq_desc[i].handler == NULL)
771 register_irq_proc(i);
775 irqreturn_t no_action(int irq, void *dev, struct pt_regs *regs)