7f5b927882fe6bbd9c3f66b361eddc57e378ea9f
[linux-flexiantxendom0-3.2.10.git] / arch / ppc64 / kernel / irq.c
1 /*
2  *  arch/ppc/kernel/irq.c
3  *
4  *  Derived from arch/i386/kernel/irq.c
5  *    Copyright (C) 1992 Linus Torvalds
6  *  Adapted from arch/i386 by Gary Thomas
7  *    Copyright (C) 1995-1996 Gary Thomas (gdt@linuxppc.org)
8  *  Updated and modified by Cort Dougan (cort@cs.nmt.edu)
9  *    Copyright (C) 1996 Cort Dougan
10  *  Adapted for Power Macintosh by Paul Mackerras
11  *    Copyright (C) 1996 Paul Mackerras (paulus@cs.anu.edu.au)
12  *  Amiga/APUS changes by Jesper Skov (jskov@cygnus.co.uk).
13  * 
14  * This program is free software; you can redistribute it and/or
15  * modify it under the terms of the GNU General Public License
16  * as published by the Free Software Foundation; either version
17  * 2 of the License, or (at your option) any later version.
18  *
19  * This file contains the code used by various IRQ handling routines:
20  * asking for different IRQ's should be done through these routines
21  * instead of just grabbing them. Thus setups with different IRQ numbers
22  * shouldn't result in any weird surprises, and installing new handlers
23  * should be easier.
24  */
25
26 #include <linux/errno.h>
27 #include <linux/threads.h>
28 #include <linux/kernel_stat.h>
29 #include <linux/signal.h>
30 #include <linux/sched.h>
31 #include <linux/ioport.h>
32 #include <linux/interrupt.h>
33 #include <linux/timex.h>
34 #include <linux/config.h>
35 #include <linux/init.h>
36 #include <linux/slab.h>
37 #include <linux/pci.h>
38 #include <linux/delay.h>
39 #include <linux/irq.h>
40 #include <linux/proc_fs.h>
41 #include <linux/random.h>
42
43 #include <asm/uaccess.h>
44 #include <asm/bitops.h>
45 #include <asm/system.h>
46 #include <asm/io.h>
47 #include <asm/pgtable.h>
48 #include <asm/irq.h>
49 #include <asm/cache.h>
50 #include <asm/prom.h>
51 #include <asm/ptrace.h>
52 #include <asm/iSeries/LparData.h>
53 #include <asm/machdep.h>
54 #include <asm/paca.h>
55
56 void enable_irq(unsigned int irq_nr);
57 void disable_irq(unsigned int irq_nr);
58
59 #ifdef CONFIG_SMP
60 extern void iSeries_smp_message_recv( struct pt_regs * );
61 #endif
62
63 volatile unsigned char *chrp_int_ack_special;
64 static void register_irq_proc (unsigned int irq);
65
66 irq_desc_t irq_desc[NR_IRQS] __cacheline_aligned = {
67         [0 ... NR_IRQS-1] = {
68                 .lock = SPIN_LOCK_UNLOCKED
69         }
70 };
71         
72 int ppc_spurious_interrupts = 0;
73 unsigned long lpEvent_count = 0;
74
75 /* nasty hack for shared irq's since we need to do kmalloc calls but
76  * can't very early in the boot when we need to do a request irq.
77  * this needs to be removed.
78  * -- Cort
79  */
80 #define IRQ_KMALLOC_ENTRIES 16
81 static int cache_bitmask = 0;
82 static struct irqaction malloc_cache[IRQ_KMALLOC_ENTRIES];
83 extern int mem_init_done;
84
85 void *irq_kmalloc(size_t size, int pri)
86 {
87         unsigned int i;
88         if ( mem_init_done )
89                 return kmalloc(size,pri);
90         for ( i = 0; i < IRQ_KMALLOC_ENTRIES ; i++ )
91                 if ( ! ( cache_bitmask & (1<<i) ) ) {
92                         cache_bitmask |= (1<<i);
93                         return (void *)(&malloc_cache[i]);
94                 }
95         return 0;
96 }
97
98 void irq_kfree(void *ptr)
99 {
100         unsigned int i;
101         for ( i = 0 ; i < IRQ_KMALLOC_ENTRIES ; i++ )
102                 if ( ptr == &malloc_cache[i] ) {
103                         cache_bitmask &= ~(1<<i);
104                         return;
105                 }
106         kfree(ptr);
107 }
108
109 int
110 setup_irq(unsigned int irq, struct irqaction * new)
111 {
112         int shared = 0;
113         unsigned long flags;
114         struct irqaction *old, **p;
115         irq_desc_t *desc = irq_desc + irq;
116
117         /*
118          * Some drivers like serial.c use request_irq() heavily,
119          * so we have to be careful not to interfere with a
120          * running system.
121          */
122         if (new->flags & SA_SAMPLE_RANDOM) {
123                 /*
124                  * This function might sleep, we want to call it first,
125                  * outside of the atomic block.
126                  * Yes, this might clear the entropy pool if the wrong
127                  * driver is attempted to be loaded, without actually
128                  * installing a new handler, but is this really a problem,
129                  * only the sysadmin is able to do this.
130                  */
131                 rand_initialize_irq(irq);
132         }
133
134         /*
135          * The following block of code has to be executed atomically
136          */
137         spin_lock_irqsave(&desc->lock,flags);
138         p = &desc->action;
139         if ((old = *p) != NULL) {
140                 /* Can't share interrupts unless both agree to */
141                 if (!(old->flags & new->flags & SA_SHIRQ)) {
142                         spin_unlock_irqrestore(&desc->lock,flags);
143                         return -EBUSY;
144                 }
145
146                 /* add new interrupt at end of irq queue */
147                 do {
148                         p = &old->next;
149                         old = *p;
150                 } while (old);
151                 shared = 1;
152         }
153
154         *p = new;
155
156         if (!shared) {
157                 desc->depth = 0;
158                 desc->status &= ~(IRQ_DISABLED | IRQ_AUTODETECT | IRQ_WAITING);
159                 unmask_irq(irq);
160         }
161         spin_unlock_irqrestore(&desc->lock,flags);
162
163         register_irq_proc(irq);
164         return 0;
165 }
166
167 #ifdef CONFIG_SMP
168
169 inline void synchronize_irq(unsigned int irq)
170 {
171         while (irq_desc[irq].status & IRQ_INPROGRESS)
172                 cpu_relax();
173 }
174
175 #endif /* CONFIG_SMP */
176
177 /* XXX Make this into free_irq() - Anton */
178
179 /* This could be promoted to a real free_irq() ... */
180 static int
181 do_free_irq(int irq, void* dev_id)
182 {
183         irq_desc_t *desc;
184         struct irqaction **p;
185         unsigned long flags;
186
187         desc = irq_desc + irq;
188         spin_lock_irqsave(&desc->lock,flags);
189         p = &desc->action;
190         for (;;) {
191                 struct irqaction * action = *p;
192                 if (action) {
193                         struct irqaction **pp = p;
194                         p = &action->next;
195                         if (action->dev_id != dev_id)
196                                 continue;
197
198                         /* Found it - now remove it from the list of entries */
199                         *pp = action->next;
200                         if (!desc->action) {
201                                 desc->status |= IRQ_DISABLED;
202                                 mask_irq(irq);
203                         }
204                         spin_unlock_irqrestore(&desc->lock,flags);
205
206                         /* Wait to make sure it's not being used on another CPU */
207                         synchronize_irq(irq);
208                         irq_kfree(action);
209                         return 0;
210                 }
211                 printk("Trying to free free IRQ%d\n",irq);
212                 spin_unlock_irqrestore(&desc->lock,flags);
213                 break;
214         }
215         return -ENOENT;
216 }
217
218 int request_irq(unsigned int irq,
219         irqreturn_t (*handler)(int, void *, struct pt_regs *),
220         unsigned long irqflags, const char * devname, void *dev_id)
221 {
222         struct irqaction *action;
223         int retval;
224
225         if (irq >= NR_IRQS)
226                 return -EINVAL;
227         if (!handler)
228                 /* We could implement really free_irq() instead of that... */
229                 return do_free_irq(irq, dev_id);
230         
231         action = (struct irqaction *)
232                 irq_kmalloc(sizeof(struct irqaction), GFP_KERNEL);
233         if (!action) {
234                 printk(KERN_ERR "irq_kmalloc() failed for irq %d !\n", irq);
235                 return -ENOMEM;
236         }
237         
238         action->handler = handler;
239         action->flags = irqflags;                                       
240         action->mask = 0;
241         action->name = devname;
242         action->dev_id = dev_id;
243         action->next = NULL;
244         
245         retval = setup_irq(irq, action);
246         if (retval)
247                 kfree(action);
248                 
249         return 0;
250 }
251
252 void free_irq(unsigned int irq, void *dev_id)
253 {
254         request_irq(irq, NULL, 0, NULL, dev_id);
255 }
256
257 /*
258  * Generic enable/disable code: this just calls
259  * down into the PIC-specific version for the actual
260  * hardware disable after having gotten the irq
261  * controller lock. 
262  */
263  
264 /**
265  *      disable_irq_nosync - disable an irq without waiting
266  *      @irq: Interrupt to disable
267  *
268  *      Disable the selected interrupt line. Disables of an interrupt
269  *      stack. Unlike disable_irq(), this function does not ensure existing
270  *      instances of the IRQ handler have completed before returning.
271  *
272  *      This function may be called from IRQ context.
273  */
274  
275  void disable_irq_nosync(unsigned int irq)
276 {
277         irq_desc_t *desc = irq_desc + irq;
278         unsigned long flags;
279
280         spin_lock_irqsave(&desc->lock, flags);
281         if (!desc->depth++) {
282                 if (!(desc->status & IRQ_PER_CPU))
283                         desc->status |= IRQ_DISABLED;
284                 mask_irq(irq);
285         }
286         spin_unlock_irqrestore(&desc->lock, flags);
287 }
288
289 /**
290  *      disable_irq - disable an irq and wait for completion
291  *      @irq: Interrupt to disable
292  *
293  *      Disable the selected interrupt line. Disables of an interrupt
294  *      stack. That is for two disables you need two enables. This
295  *      function waits for any pending IRQ handlers for this interrupt
296  *      to complete before returning. If you use this function while
297  *      holding a resource the IRQ handler may need you will deadlock.
298  *
299  *      This function may be called - with care - from IRQ context.
300  */
301  
302 void disable_irq(unsigned int irq)
303 {
304         disable_irq_nosync(irq);
305         synchronize_irq(irq);
306 }
307
308 /**
309  *      enable_irq - enable interrupt handling on an irq
310  *      @irq: Interrupt to enable
311  *
312  *      Re-enables the processing of interrupts on this IRQ line
313  *      providing no disable_irq calls are now in effect.
314  *
315  *      This function may be called from IRQ context.
316  */
317  
318 void enable_irq(unsigned int irq)
319 {
320         irq_desc_t *desc = irq_desc + irq;
321         unsigned long flags;
322
323         spin_lock_irqsave(&desc->lock, flags);
324         switch (desc->depth) {
325         case 1: {
326                 unsigned int status = desc->status & ~IRQ_DISABLED;
327                 desc->status = status;
328                 if ((status & (IRQ_PENDING | IRQ_REPLAY)) == IRQ_PENDING) {
329                         desc->status = status | IRQ_REPLAY;
330                         hw_resend_irq(desc->handler,irq);
331                 }
332                 unmask_irq(irq);
333                 /* fall-through */
334         }
335         default:
336                 desc->depth--;
337                 break;
338         case 0:
339                 printk("enable_irq(%u) unbalanced\n", irq);
340         }
341         spin_unlock_irqrestore(&desc->lock, flags);
342 }
343
344 int show_interrupts(struct seq_file *p, void *v)
345 {
346         int i, j;
347         struct irqaction * action;
348         unsigned long flags;
349
350         seq_printf(p, "           ");
351         for (j=0; j<NR_CPUS; j++) {
352                 if (cpu_online(j))
353                         seq_printf(p, "CPU%d       ",j);
354         }
355         seq_putc(p, '\n');
356
357         for (i = 0 ; i < NR_IRQS ; i++) {
358                 spin_lock_irqsave(&irq_desc[i].lock, flags);
359                 action = irq_desc[i].action;
360                 if (!action || !action->handler)
361                         goto skip;
362                 seq_printf(p, "%3d: ", i);              
363 #ifdef CONFIG_SMP
364                 for (j = 0; j < NR_CPUS; j++) {
365                         if (cpu_online(j))
366                                 seq_printf(p, "%10u ", kstat_cpu(j).irqs[i]);
367                 }
368 #else           
369                 seq_printf(p, "%10u ", kstat_irqs(i));
370 #endif /* CONFIG_SMP */
371                 if (irq_desc[i].handler)                
372                         seq_printf(p, " %s ", irq_desc[i].handler->typename );
373                 else
374                         seq_printf(p, "  None      ");
375                 seq_printf(p, "%s", (irq_desc[i].status & IRQ_LEVEL) ? "Level " : "Edge  ");
376                 seq_printf(p, "    %s",action->name);
377                 for (action=action->next; action; action = action->next)
378                         seq_printf(p, ", %s", action->name);
379                 seq_putc(p, '\n');
380 skip:
381                 spin_unlock_irqrestore(&irq_desc[i].lock, flags);
382         }
383         seq_printf(p, "BAD: %10u\n", ppc_spurious_interrupts);
384         return 0;
385 }
386
387 extern char *ppc_find_proc_name(unsigned *p, char *buf, unsigned buflen);
388
389 static inline void handle_irq_event(int irq, struct pt_regs *regs,
390                                     struct irqaction *action)
391 {
392         int status = 0;
393         int retval = 0;
394         struct irqaction *first_action = action;
395
396         if (!(action->flags & SA_INTERRUPT))
397                 local_irq_enable();
398
399         do {
400                 status |= action->flags;
401                 retval |= action->handler(irq, action->dev_id, regs);
402                 action = action->next;
403         } while (action);
404         if (status & SA_SAMPLE_RANDOM)
405                 add_interrupt_randomness(irq);
406         local_irq_disable();
407         if (retval != 1) {
408                 static int count = 100;
409                 char name_buf[256];
410                 if (count) {
411                         count--;
412                         if (retval) {
413                                 printk("irq event %d: bogus retval mask %x\n",
414                                         irq, retval);
415                         } else {
416                                 printk("irq %d: nobody cared!\n", irq);
417                         }
418                         dump_stack();
419                         printk("handlers:\n");
420                         action = first_action;
421                         do {
422                                 printk("[<%p>]", action->handler);
423                                 printk(" (%s)\n",
424                                        ppc_find_proc_name((unsigned *)action->handler, name_buf, 256));
425                                 action = action->next;
426                         } while (action);
427                 }
428         }
429 }
430
431 /*
432  * Eventually, this should take an array of interrupts and an array size
433  * so it can dispatch multiple interrupts.
434  */
435 void ppc_irq_dispatch_handler(struct pt_regs *regs, int irq)
436 {
437         int status;
438         struct irqaction *action;
439         int cpu = smp_processor_id();
440         irq_desc_t *desc = irq_desc + irq;
441
442         kstat_cpu(cpu).irqs[irq]++;
443         spin_lock(&desc->lock);
444         ack_irq(irq);   
445         /*
446            REPLAY is when Linux resends an IRQ that was dropped earlier
447            WAITING is used by probe to mark irqs that are being tested
448            */
449         status = desc->status & ~(IRQ_REPLAY | IRQ_WAITING);
450         if (!(status & IRQ_PER_CPU))
451                 status |= IRQ_PENDING; /* we _want_ to handle it */
452
453         /*
454          * If the IRQ is disabled for whatever reason, we cannot
455          * use the action we have.
456          */
457         action = NULL;
458         if (likely(!(status & (IRQ_DISABLED | IRQ_INPROGRESS)))) {
459                 action = desc->action;
460                 if (!action || !action->handler) {
461                         ppc_spurious_interrupts++;
462                         printk(KERN_DEBUG "Unhandled interrupt %x, disabled\n", irq);
463                         /* We can't call disable_irq here, it would deadlock */
464                         if (!desc->depth)
465                                 desc->depth = 1;
466                         desc->status |= IRQ_DISABLED;
467                         /* This is not a real spurrious interrupt, we
468                          * have to eoi it, so we jump to out
469                          */
470                         mask_irq(irq);
471                         goto out;
472                 }
473                 status &= ~IRQ_PENDING; /* we commit to handling */
474                 if (!(status & IRQ_PER_CPU))
475                         status |= IRQ_INPROGRESS; /* we are handling it */
476         }
477         desc->status = status;
478
479         /*
480          * If there is no IRQ handler or it was disabled, exit early.
481            Since we set PENDING, if another processor is handling
482            a different instance of this same irq, the other processor
483            will take care of it.
484          */
485         if (unlikely(!action))
486                 goto out;
487
488         /*
489          * Edge triggered interrupts need to remember
490          * pending events.
491          * This applies to any hw interrupts that allow a second
492          * instance of the same irq to arrive while we are in do_IRQ
493          * or in the handler. But the code here only handles the _second_
494          * instance of the irq, not the third or fourth. So it is mostly
495          * useful for irq hardware that does not mask cleanly in an
496          * SMP environment.
497          */
498         for (;;) {
499                 spin_unlock(&desc->lock);
500                 handle_irq_event(irq, regs, action);
501                 spin_lock(&desc->lock);
502                 
503                 if (likely(!(desc->status & IRQ_PENDING)))
504                         break;
505                 desc->status &= ~IRQ_PENDING;
506         }
507 out:
508         desc->status &= ~IRQ_INPROGRESS;
509         /*
510          * The ->end() handler has to deal with interrupts which got
511          * disabled while the handler was running.
512          */
513         if (irq_desc[irq].handler) {
514                 if (irq_desc[irq].handler->end)
515                         irq_desc[irq].handler->end(irq);
516                 else if (irq_desc[irq].handler->enable)
517                         irq_desc[irq].handler->enable(irq);
518         }
519         spin_unlock(&desc->lock);
520 }
521
522 int do_IRQ(struct pt_regs *regs)
523 {
524         int irq, first = 1;
525 #ifdef CONFIG_PPC_ISERIES
526         struct paca_struct *lpaca;
527         struct ItLpQueue *lpq;
528 #endif
529
530         irq_enter();
531
532 #ifdef CONFIG_PPC_ISERIES
533         lpaca = get_paca();
534 #ifdef CONFIG_SMP
535         if (lpaca->xLpPaca.xIntDword.xFields.xIpiCnt) {
536                 lpaca->xLpPaca.xIntDword.xFields.xIpiCnt = 0;
537                 iSeries_smp_message_recv(regs);
538         }
539 #endif /* CONFIG_SMP */
540         lpq = lpaca->lpQueuePtr;
541         if (lpq && ItLpQueue_isLpIntPending(lpq))
542                 lpEvent_count += ItLpQueue_process(lpq, regs);
543 #else
544         /*
545          * Every arch is required to implement ppc_md.get_irq.
546          * This function will either return an irq number or -1 to
547          * indicate there are no more pending.  But the first time
548          * through the loop this means there wasn't an IRQ pending.
549          * The value -2 is for buggy hardware and means that this IRQ
550          * has already been handled. -- Tom
551          */
552         while ((irq = ppc_md.get_irq(regs)) >= 0) {
553                 ppc_irq_dispatch_handler(regs, irq);
554                 first = 0;
555         }
556         if (irq != -2 && first)
557                 /* That's not SMP safe ... but who cares ? */
558                 ppc_spurious_interrupts++;
559 #endif
560
561         irq_exit();
562
563 #ifdef CONFIG_PPC_ISERIES
564         if (lpaca->xLpPaca.xIntDword.xFields.xDecrInt) {
565                 lpaca->xLpPaca.xIntDword.xFields.xDecrInt = 0;
566                 /* Signal a fake decrementer interrupt */
567                 timer_interrupt(regs);
568         }
569 #endif
570
571         return 1; /* lets ret_from_int know we can do checks */
572 }
573
574 unsigned long probe_irq_on (void)
575 {
576         return 0;
577 }
578
579 int probe_irq_off (unsigned long irqs)
580 {
581         return 0;
582 }
583
584 unsigned int probe_irq_mask(unsigned long irqs)
585 {
586         return 0;
587 }
588
589 void __init init_IRQ(void)
590 {
591         static int once = 0;
592
593         if ( once )
594                 return;
595         else
596                 once++;
597         
598         ppc_md.init_IRQ();
599 }
600
601 static struct proc_dir_entry * root_irq_dir;
602 static struct proc_dir_entry * irq_dir [NR_IRQS];
603 static struct proc_dir_entry * smp_affinity_entry [NR_IRQS];
604
605 #ifdef CONFIG_IRQ_ALL_CPUS
606 unsigned long irq_affinity [NR_IRQS] = { [0 ... NR_IRQS-1] = -1UL};
607 #else  /* CONFIG_IRQ_ALL_CPUS */
608 unsigned long irq_affinity [NR_IRQS] = { [0 ... NR_IRQS-1] = 0x0};
609 #endif /* CONFIG_IRQ_ALL_CPUS */
610
611 #define HEX_DIGITS 16
612
613 static int irq_affinity_read_proc (char *page, char **start, off_t off,
614                         int count, int *eof, void *data)
615 {
616         if (count < HEX_DIGITS+1)
617                 return -EINVAL;
618         return sprintf(page, "%16lx\n", irq_affinity[(long)data]);
619 }
620
621 static unsigned int parse_hex_value (const char *buffer,
622                 unsigned long count, unsigned long *ret)
623 {
624         unsigned char hexnum [HEX_DIGITS];
625         unsigned long value;
626         int i;
627
628         if (!count)
629                 return -EINVAL;
630         if (count > HEX_DIGITS)
631                 count = HEX_DIGITS;
632         if (copy_from_user(hexnum, buffer, count))
633                 return -EFAULT;
634
635         /*
636          * Parse the first 16 characters as a hex string, any non-hex char
637          * is end-of-string. '00e1', 'e1', '00E1', 'E1' are all the same.
638          */
639         value = 0;
640
641         for (i = 0; i < count; i++) {
642                 unsigned int c = hexnum[i];
643
644                 switch (c) {
645                         case '0' ... '9': c -= '0'; break;
646                         case 'a' ... 'f': c -= 'a'-10; break;
647                         case 'A' ... 'F': c -= 'A'-10; break;
648                 default:
649                         goto out;
650                 }
651                 value = (value << 4) | c;
652         }
653 out:
654         *ret = value;
655         return 0;
656 }
657
658 static int irq_affinity_write_proc (struct file *file, const char *buffer,
659                                         unsigned long count, void *data)
660 {
661         int irq = (long)data, full_count = count, err;
662         unsigned long new_value;
663
664         if (!irq_desc[irq].handler->set_affinity)
665                 return -EIO;
666
667         err = parse_hex_value(buffer, count, &new_value);
668
669         /*
670          * Do not allow disabling IRQs completely - it's a too easy
671          * way to make the system unusable accidentally :-) At least
672          * one online CPU still has to be targeted.
673          */
674         if (!(new_value & cpu_online_map))
675                 return -EINVAL;
676
677         irq_affinity[irq] = new_value;
678         irq_desc[irq].handler->set_affinity(irq, new_value);
679
680         return full_count;
681 }
682
683 static int prof_cpu_mask_read_proc (char *page, char **start, off_t off,
684                         int count, int *eof, void *data)
685 {
686         unsigned long *mask = (unsigned long *) data;
687         if (count < HEX_DIGITS+1)
688                 return -EINVAL;
689         return sprintf (page, "%08lx\n", *mask);
690 }
691
692 static int prof_cpu_mask_write_proc (struct file *file, const char *buffer,
693                                         unsigned long count, void *data)
694 {
695         unsigned long *mask = (unsigned long *) data, full_count = count, err;
696         unsigned long new_value;
697
698         err = parse_hex_value(buffer, count, &new_value);
699         if (err)
700                 return err;
701
702         *mask = new_value;
703
704 #ifdef CONFIG_PPC_ISERIES
705         {
706                 unsigned i;
707                 for (i=0; i<NR_CPUS; ++i) {
708                         if ( paca[i].prof_buffer && (new_value & 1) )
709                                 paca[i].prof_enabled = 1;
710                         else
711                                 paca[i].prof_enabled = 0;
712                         new_value >>= 1;
713                 }
714         }
715 #endif
716
717         return full_count;
718 }
719
720 #define MAX_NAMELEN 10
721
722 static void register_irq_proc (unsigned int irq)
723 {
724         struct proc_dir_entry *entry;
725         char name [MAX_NAMELEN];
726
727         if (!root_irq_dir || (irq_desc[irq].handler == NULL) || irq_dir[irq])
728                 return;
729
730         memset(name, 0, MAX_NAMELEN);
731         sprintf(name, "%d", irq);
732
733         /* create /proc/irq/1234 */
734         irq_dir[irq] = proc_mkdir(name, root_irq_dir);
735
736         /* create /proc/irq/1234/smp_affinity */
737         entry = create_proc_entry("smp_affinity", 0600, irq_dir[irq]);
738
739         entry->nlink = 1;
740         entry->data = (void *)(long)irq;
741         entry->read_proc = irq_affinity_read_proc;
742         entry->write_proc = irq_affinity_write_proc;
743
744         smp_affinity_entry[irq] = entry;
745 }
746
747 unsigned long prof_cpu_mask = -1;
748
749 void init_irq_proc (void)
750 {
751         struct proc_dir_entry *entry;
752         int i;
753
754         /* create /proc/irq */
755         root_irq_dir = proc_mkdir("irq", 0);
756
757         /* create /proc/irq/prof_cpu_mask */
758         entry = create_proc_entry("prof_cpu_mask", 0600, root_irq_dir);
759
760         entry->nlink = 1;
761         entry->data = (void *)&prof_cpu_mask;
762         entry->read_proc = prof_cpu_mask_read_proc;
763         entry->write_proc = prof_cpu_mask_write_proc;
764
765         /*
766          * Create entries for all existing IRQs.
767          */
768         for (i = 0; i < NR_IRQS; i++) {
769                 if (irq_desc[i].handler == NULL)
770                         continue;
771                 register_irq_proc(i);
772         }
773 }
774
775 irqreturn_t no_action(int irq, void *dev, struct pt_regs *regs)
776 {
777         return IRQ_NONE;
778 }