genirq: Provide compat handling for chip->mask()
[linux-flexiantxendom0-3.2.10.git] / kernel / irq / chip.c
1 /*
2  * linux/kernel/irq/chip.c
3  *
4  * Copyright (C) 1992, 1998-2006 Linus Torvalds, Ingo Molnar
5  * Copyright (C) 2005-2006, Thomas Gleixner, Russell King
6  *
7  * This file contains the core interrupt handling code, for irq-chip
8  * based architectures.
9  *
10  * Detailed information is available in Documentation/DocBook/genericirq
11  */
12
13 #include <linux/irq.h>
14 #include <linux/msi.h>
15 #include <linux/module.h>
16 #include <linux/interrupt.h>
17 #include <linux/kernel_stat.h>
18
19 #include "internals.h"
20
21 static void dynamic_irq_init_x(unsigned int irq, bool keep_chip_data)
22 {
23         struct irq_desc *desc;
24         unsigned long flags;
25
26         desc = irq_to_desc(irq);
27         if (!desc) {
28                 WARN(1, KERN_ERR "Trying to initialize invalid IRQ%d\n", irq);
29                 return;
30         }
31
32         /* Ensure we don't have left over values from a previous use of this irq */
33         raw_spin_lock_irqsave(&desc->lock, flags);
34         desc->status = IRQ_DISABLED;
35         desc->irq_data.chip = &no_irq_chip;
36         desc->handle_irq = handle_bad_irq;
37         desc->depth = 1;
38         desc->irq_data.msi_desc = NULL;
39         desc->irq_data.handler_data = NULL;
40         if (!keep_chip_data)
41                 desc->irq_data.chip_data = NULL;
42         desc->action = NULL;
43         desc->irq_count = 0;
44         desc->irqs_unhandled = 0;
45 #ifdef CONFIG_SMP
46         cpumask_setall(desc->irq_data.affinity);
47 #ifdef CONFIG_GENERIC_PENDING_IRQ
48         cpumask_clear(desc->pending_mask);
49 #endif
50 #endif
51         raw_spin_unlock_irqrestore(&desc->lock, flags);
52 }
53
54 /**
55  *      dynamic_irq_init - initialize a dynamically allocated irq
56  *      @irq:   irq number to initialize
57  */
58 void dynamic_irq_init(unsigned int irq)
59 {
60         dynamic_irq_init_x(irq, false);
61 }
62
63 /**
64  *      dynamic_irq_init_keep_chip_data - initialize a dynamically allocated irq
65  *      @irq:   irq number to initialize
66  *
67  *      does not set irq_to_desc(irq)->irq_data.chip_data to NULL
68  */
69 void dynamic_irq_init_keep_chip_data(unsigned int irq)
70 {
71         dynamic_irq_init_x(irq, true);
72 }
73
74 static void dynamic_irq_cleanup_x(unsigned int irq, bool keep_chip_data)
75 {
76         struct irq_desc *desc = irq_to_desc(irq);
77         unsigned long flags;
78
79         if (!desc) {
80                 WARN(1, KERN_ERR "Trying to cleanup invalid IRQ%d\n", irq);
81                 return;
82         }
83
84         raw_spin_lock_irqsave(&desc->lock, flags);
85         if (desc->action) {
86                 raw_spin_unlock_irqrestore(&desc->lock, flags);
87                 WARN(1, KERN_ERR "Destroying IRQ%d without calling free_irq\n",
88                         irq);
89                 return;
90         }
91         desc->irq_data.msi_desc = NULL;
92         desc->irq_data.handler_data = NULL;
93         if (!keep_chip_data)
94                 desc->irq_data.chip_data = NULL;
95         desc->handle_irq = handle_bad_irq;
96         desc->irq_data.chip = &no_irq_chip;
97         desc->name = NULL;
98         clear_kstat_irqs(desc);
99         raw_spin_unlock_irqrestore(&desc->lock, flags);
100 }
101
102 /**
103  *      dynamic_irq_cleanup - cleanup a dynamically allocated irq
104  *      @irq:   irq number to initialize
105  */
106 void dynamic_irq_cleanup(unsigned int irq)
107 {
108         dynamic_irq_cleanup_x(irq, false);
109 }
110
111 /**
112  *      dynamic_irq_cleanup_keep_chip_data - cleanup a dynamically allocated irq
113  *      @irq:   irq number to initialize
114  *
115  *      does not set irq_to_desc(irq)->irq_data.chip_data to NULL
116  */
117 void dynamic_irq_cleanup_keep_chip_data(unsigned int irq)
118 {
119         dynamic_irq_cleanup_x(irq, true);
120 }
121
122
123 /**
124  *      set_irq_chip - set the irq chip for an irq
125  *      @irq:   irq number
126  *      @chip:  pointer to irq chip description structure
127  */
128 int set_irq_chip(unsigned int irq, struct irq_chip *chip)
129 {
130         struct irq_desc *desc = irq_to_desc(irq);
131         unsigned long flags;
132
133         if (!desc) {
134                 WARN(1, KERN_ERR "Trying to install chip for IRQ%d\n", irq);
135                 return -EINVAL;
136         }
137
138         if (!chip)
139                 chip = &no_irq_chip;
140
141         raw_spin_lock_irqsave(&desc->lock, flags);
142         irq_chip_set_defaults(chip);
143         desc->irq_data.chip = chip;
144         raw_spin_unlock_irqrestore(&desc->lock, flags);
145
146         return 0;
147 }
148 EXPORT_SYMBOL(set_irq_chip);
149
150 /**
151  *      set_irq_type - set the irq trigger type for an irq
152  *      @irq:   irq number
153  *      @type:  IRQ_TYPE_{LEVEL,EDGE}_* value - see include/linux/irq.h
154  */
155 int set_irq_type(unsigned int irq, unsigned int type)
156 {
157         struct irq_desc *desc = irq_to_desc(irq);
158         unsigned long flags;
159         int ret = -ENXIO;
160
161         if (!desc) {
162                 printk(KERN_ERR "Trying to set irq type for IRQ%d\n", irq);
163                 return -ENODEV;
164         }
165
166         type &= IRQ_TYPE_SENSE_MASK;
167         if (type == IRQ_TYPE_NONE)
168                 return 0;
169
170         raw_spin_lock_irqsave(&desc->lock, flags);
171         ret = __irq_set_trigger(desc, irq, type);
172         raw_spin_unlock_irqrestore(&desc->lock, flags);
173         return ret;
174 }
175 EXPORT_SYMBOL(set_irq_type);
176
177 /**
178  *      set_irq_data - set irq type data for an irq
179  *      @irq:   Interrupt number
180  *      @data:  Pointer to interrupt specific data
181  *
182  *      Set the hardware irq controller data for an irq
183  */
184 int set_irq_data(unsigned int irq, void *data)
185 {
186         struct irq_desc *desc = irq_to_desc(irq);
187         unsigned long flags;
188
189         if (!desc) {
190                 printk(KERN_ERR
191                        "Trying to install controller data for IRQ%d\n", irq);
192                 return -EINVAL;
193         }
194
195         raw_spin_lock_irqsave(&desc->lock, flags);
196         desc->irq_data.handler_data = data;
197         raw_spin_unlock_irqrestore(&desc->lock, flags);
198         return 0;
199 }
200 EXPORT_SYMBOL(set_irq_data);
201
202 /**
203  *      set_irq_msi - set MSI descriptor data for an irq
204  *      @irq:   Interrupt number
205  *      @entry: Pointer to MSI descriptor data
206  *
207  *      Set the MSI descriptor entry for an irq
208  */
209 int set_irq_msi(unsigned int irq, struct msi_desc *entry)
210 {
211         struct irq_desc *desc = irq_to_desc(irq);
212         unsigned long flags;
213
214         if (!desc) {
215                 printk(KERN_ERR
216                        "Trying to install msi data for IRQ%d\n", irq);
217                 return -EINVAL;
218         }
219
220         raw_spin_lock_irqsave(&desc->lock, flags);
221         desc->irq_data.msi_desc = entry;
222         if (entry)
223                 entry->irq = irq;
224         raw_spin_unlock_irqrestore(&desc->lock, flags);
225         return 0;
226 }
227
228 /**
229  *      set_irq_chip_data - set irq chip data for an irq
230  *      @irq:   Interrupt number
231  *      @data:  Pointer to chip specific data
232  *
233  *      Set the hardware irq chip data for an irq
234  */
235 int set_irq_chip_data(unsigned int irq, void *data)
236 {
237         struct irq_desc *desc = irq_to_desc(irq);
238         unsigned long flags;
239
240         if (!desc) {
241                 printk(KERN_ERR
242                        "Trying to install chip data for IRQ%d\n", irq);
243                 return -EINVAL;
244         }
245
246         if (!desc->irq_data.chip) {
247                 printk(KERN_ERR "BUG: bad set_irq_chip_data(IRQ#%d)\n", irq);
248                 return -EINVAL;
249         }
250
251         raw_spin_lock_irqsave(&desc->lock, flags);
252         desc->irq_data.chip_data = data;
253         raw_spin_unlock_irqrestore(&desc->lock, flags);
254
255         return 0;
256 }
257 EXPORT_SYMBOL(set_irq_chip_data);
258
259 /**
260  *      set_irq_nested_thread - Set/Reset the IRQ_NESTED_THREAD flag of an irq
261  *
262  *      @irq:   Interrupt number
263  *      @nest:  0 to clear / 1 to set the IRQ_NESTED_THREAD flag
264  *
265  *      The IRQ_NESTED_THREAD flag indicates that on
266  *      request_threaded_irq() no separate interrupt thread should be
267  *      created for the irq as the handler are called nested in the
268  *      context of a demultiplexing interrupt handler thread.
269  */
270 void set_irq_nested_thread(unsigned int irq, int nest)
271 {
272         struct irq_desc *desc = irq_to_desc(irq);
273         unsigned long flags;
274
275         if (!desc)
276                 return;
277
278         raw_spin_lock_irqsave(&desc->lock, flags);
279         if (nest)
280                 desc->status |= IRQ_NESTED_THREAD;
281         else
282                 desc->status &= ~IRQ_NESTED_THREAD;
283         raw_spin_unlock_irqrestore(&desc->lock, flags);
284 }
285 EXPORT_SYMBOL_GPL(set_irq_nested_thread);
286
287 /*
288  * default enable function
289  */
290 static void default_enable(unsigned int irq)
291 {
292         struct irq_desc *desc = irq_to_desc(irq);
293
294         desc->irq_data.chip->unmask(irq);
295         desc->status &= ~IRQ_MASKED;
296 }
297
298 /*
299  * default disable function
300  */
301 static void default_disable(unsigned int irq)
302 {
303 }
304
305 /*
306  * default startup function
307  */
308 static unsigned int default_startup(unsigned int irq)
309 {
310         struct irq_desc *desc = irq_to_desc(irq);
311
312         desc->irq_data.chip->enable(irq);
313         return 0;
314 }
315
316 /*
317  * default shutdown function
318  */
319 static void default_shutdown(unsigned int irq)
320 {
321         struct irq_desc *desc = irq_to_desc(irq);
322
323         desc->irq_data.chip->irq_mask(&desc->irq_data);
324         desc->status |= IRQ_MASKED;
325 }
326
327 /* Temporary migration helpers */
328 static void compat_irq_mask(struct irq_data *data)
329 {
330         data->chip->mask(data->irq);
331 }
332
333 static void compat_bus_lock(struct irq_data *data)
334 {
335         data->chip->bus_lock(data->irq);
336 }
337
338 static void compat_bus_sync_unlock(struct irq_data *data)
339 {
340         data->chip->bus_sync_unlock(data->irq);
341 }
342
343 /*
344  * Fixup enable/disable function pointers
345  */
346 void irq_chip_set_defaults(struct irq_chip *chip)
347 {
348         if (!chip->enable)
349                 chip->enable = default_enable;
350         if (!chip->disable)
351                 chip->disable = default_disable;
352         if (!chip->startup)
353                 chip->startup = default_startup;
354         /*
355          * We use chip->disable, when the user provided its own. When
356          * we have default_disable set for chip->disable, then we need
357          * to use default_shutdown, otherwise the irq line is not
358          * disabled on free_irq():
359          */
360         if (!chip->shutdown)
361                 chip->shutdown = chip->disable != default_disable ?
362                         chip->disable : default_shutdown;
363         if (!chip->end)
364                 chip->end = dummy_irq_chip.end;
365
366         if (chip->bus_lock)
367                 chip->irq_bus_lock = compat_bus_lock;
368         if (chip->bus_sync_unlock)
369                 chip->irq_bus_sync_unlock = compat_bus_sync_unlock;
370
371         if (chip->mask)
372                 chip->irq_mask = compat_irq_mask;
373 }
374
375 static inline void mask_ack_irq(struct irq_desc *desc, int irq)
376 {
377         if (desc->irq_data.chip->mask_ack)
378                 desc->irq_data.chip->mask_ack(irq);
379         else {
380                 desc->irq_data.chip->irq_mask(&desc->irq_data);
381                 if (desc->irq_data.chip->ack)
382                         desc->irq_data.chip->ack(irq);
383         }
384         desc->status |= IRQ_MASKED;
385 }
386
387 static inline void mask_irq(struct irq_desc *desc)
388 {
389         if (desc->irq_data.chip->irq_mask) {
390                 desc->irq_data.chip->irq_mask(&desc->irq_data);
391                 desc->status |= IRQ_MASKED;
392         }
393 }
394
395 static inline void unmask_irq(struct irq_desc *desc, int irq)
396 {
397         if (desc->irq_data.chip->unmask) {
398                 desc->irq_data.chip->unmask(irq);
399                 desc->status &= ~IRQ_MASKED;
400         }
401 }
402
403 /*
404  *      handle_nested_irq - Handle a nested irq from a irq thread
405  *      @irq:   the interrupt number
406  *
407  *      Handle interrupts which are nested into a threaded interrupt
408  *      handler. The handler function is called inside the calling
409  *      threads context.
410  */
411 void handle_nested_irq(unsigned int irq)
412 {
413         struct irq_desc *desc = irq_to_desc(irq);
414         struct irqaction *action;
415         irqreturn_t action_ret;
416
417         might_sleep();
418
419         raw_spin_lock_irq(&desc->lock);
420
421         kstat_incr_irqs_this_cpu(irq, desc);
422
423         action = desc->action;
424         if (unlikely(!action || (desc->status & IRQ_DISABLED)))
425                 goto out_unlock;
426
427         desc->status |= IRQ_INPROGRESS;
428         raw_spin_unlock_irq(&desc->lock);
429
430         action_ret = action->thread_fn(action->irq, action->dev_id);
431         if (!noirqdebug)
432                 note_interrupt(irq, desc, action_ret);
433
434         raw_spin_lock_irq(&desc->lock);
435         desc->status &= ~IRQ_INPROGRESS;
436
437 out_unlock:
438         raw_spin_unlock_irq(&desc->lock);
439 }
440 EXPORT_SYMBOL_GPL(handle_nested_irq);
441
442 /**
443  *      handle_simple_irq - Simple and software-decoded IRQs.
444  *      @irq:   the interrupt number
445  *      @desc:  the interrupt description structure for this irq
446  *
447  *      Simple interrupts are either sent from a demultiplexing interrupt
448  *      handler or come from hardware, where no interrupt hardware control
449  *      is necessary.
450  *
451  *      Note: The caller is expected to handle the ack, clear, mask and
452  *      unmask issues if necessary.
453  */
454 void
455 handle_simple_irq(unsigned int irq, struct irq_desc *desc)
456 {
457         struct irqaction *action;
458         irqreturn_t action_ret;
459
460         raw_spin_lock(&desc->lock);
461
462         if (unlikely(desc->status & IRQ_INPROGRESS))
463                 goto out_unlock;
464         desc->status &= ~(IRQ_REPLAY | IRQ_WAITING);
465         kstat_incr_irqs_this_cpu(irq, desc);
466
467         action = desc->action;
468         if (unlikely(!action || (desc->status & IRQ_DISABLED)))
469                 goto out_unlock;
470
471         desc->status |= IRQ_INPROGRESS;
472         raw_spin_unlock(&desc->lock);
473
474         action_ret = handle_IRQ_event(irq, action);
475         if (!noirqdebug)
476                 note_interrupt(irq, desc, action_ret);
477
478         raw_spin_lock(&desc->lock);
479         desc->status &= ~IRQ_INPROGRESS;
480 out_unlock:
481         raw_spin_unlock(&desc->lock);
482 }
483
484 /**
485  *      handle_level_irq - Level type irq handler
486  *      @irq:   the interrupt number
487  *      @desc:  the interrupt description structure for this irq
488  *
489  *      Level type interrupts are active as long as the hardware line has
490  *      the active level. This may require to mask the interrupt and unmask
491  *      it after the associated handler has acknowledged the device, so the
492  *      interrupt line is back to inactive.
493  */
494 void
495 handle_level_irq(unsigned int irq, struct irq_desc *desc)
496 {
497         struct irqaction *action;
498         irqreturn_t action_ret;
499
500         raw_spin_lock(&desc->lock);
501         mask_ack_irq(desc, irq);
502
503         if (unlikely(desc->status & IRQ_INPROGRESS))
504                 goto out_unlock;
505         desc->status &= ~(IRQ_REPLAY | IRQ_WAITING);
506         kstat_incr_irqs_this_cpu(irq, desc);
507
508         /*
509          * If its disabled or no action available
510          * keep it masked and get out of here
511          */
512         action = desc->action;
513         if (unlikely(!action || (desc->status & IRQ_DISABLED)))
514                 goto out_unlock;
515
516         desc->status |= IRQ_INPROGRESS;
517         raw_spin_unlock(&desc->lock);
518
519         action_ret = handle_IRQ_event(irq, action);
520         if (!noirqdebug)
521                 note_interrupt(irq, desc, action_ret);
522
523         raw_spin_lock(&desc->lock);
524         desc->status &= ~IRQ_INPROGRESS;
525
526         if (!(desc->status & (IRQ_DISABLED | IRQ_ONESHOT)))
527                 unmask_irq(desc, irq);
528 out_unlock:
529         raw_spin_unlock(&desc->lock);
530 }
531 EXPORT_SYMBOL_GPL(handle_level_irq);
532
533 /**
534  *      handle_fasteoi_irq - irq handler for transparent controllers
535  *      @irq:   the interrupt number
536  *      @desc:  the interrupt description structure for this irq
537  *
538  *      Only a single callback will be issued to the chip: an ->eoi()
539  *      call when the interrupt has been serviced. This enables support
540  *      for modern forms of interrupt handlers, which handle the flow
541  *      details in hardware, transparently.
542  */
543 void
544 handle_fasteoi_irq(unsigned int irq, struct irq_desc *desc)
545 {
546         struct irqaction *action;
547         irqreturn_t action_ret;
548
549         raw_spin_lock(&desc->lock);
550
551         if (unlikely(desc->status & IRQ_INPROGRESS))
552                 goto out;
553
554         desc->status &= ~(IRQ_REPLAY | IRQ_WAITING);
555         kstat_incr_irqs_this_cpu(irq, desc);
556
557         /*
558          * If its disabled or no action available
559          * then mask it and get out of here:
560          */
561         action = desc->action;
562         if (unlikely(!action || (desc->status & IRQ_DISABLED))) {
563                 desc->status |= IRQ_PENDING;
564                 mask_irq(desc);
565                 goto out;
566         }
567
568         desc->status |= IRQ_INPROGRESS;
569         desc->status &= ~IRQ_PENDING;
570         raw_spin_unlock(&desc->lock);
571
572         action_ret = handle_IRQ_event(irq, action);
573         if (!noirqdebug)
574                 note_interrupt(irq, desc, action_ret);
575
576         raw_spin_lock(&desc->lock);
577         desc->status &= ~IRQ_INPROGRESS;
578 out:
579         desc->irq_data.chip->eoi(irq);
580
581         raw_spin_unlock(&desc->lock);
582 }
583
584 /**
585  *      handle_edge_irq - edge type IRQ handler
586  *      @irq:   the interrupt number
587  *      @desc:  the interrupt description structure for this irq
588  *
589  *      Interrupt occures on the falling and/or rising edge of a hardware
590  *      signal. The occurence is latched into the irq controller hardware
591  *      and must be acked in order to be reenabled. After the ack another
592  *      interrupt can happen on the same source even before the first one
593  *      is handled by the associated event handler. If this happens it
594  *      might be necessary to disable (mask) the interrupt depending on the
595  *      controller hardware. This requires to reenable the interrupt inside
596  *      of the loop which handles the interrupts which have arrived while
597  *      the handler was running. If all pending interrupts are handled, the
598  *      loop is left.
599  */
600 void
601 handle_edge_irq(unsigned int irq, struct irq_desc *desc)
602 {
603         raw_spin_lock(&desc->lock);
604
605         desc->status &= ~(IRQ_REPLAY | IRQ_WAITING);
606
607         /*
608          * If we're currently running this IRQ, or its disabled,
609          * we shouldn't process the IRQ. Mark it pending, handle
610          * the necessary masking and go out
611          */
612         if (unlikely((desc->status & (IRQ_INPROGRESS | IRQ_DISABLED)) ||
613                     !desc->action)) {
614                 desc->status |= (IRQ_PENDING | IRQ_MASKED);
615                 mask_ack_irq(desc, irq);
616                 goto out_unlock;
617         }
618         kstat_incr_irqs_this_cpu(irq, desc);
619
620         /* Start handling the irq */
621         if (desc->irq_data.chip->ack)
622                 desc->irq_data.chip->ack(irq);
623
624         /* Mark the IRQ currently in progress.*/
625         desc->status |= IRQ_INPROGRESS;
626
627         do {
628                 struct irqaction *action = desc->action;
629                 irqreturn_t action_ret;
630
631                 if (unlikely(!action)) {
632                         mask_irq(desc);
633                         goto out_unlock;
634                 }
635
636                 /*
637                  * When another irq arrived while we were handling
638                  * one, we could have masked the irq.
639                  * Renable it, if it was not disabled in meantime.
640                  */
641                 if (unlikely((desc->status &
642                                (IRQ_PENDING | IRQ_MASKED | IRQ_DISABLED)) ==
643                               (IRQ_PENDING | IRQ_MASKED))) {
644                         unmask_irq(desc, irq);
645                 }
646
647                 desc->status &= ~IRQ_PENDING;
648                 raw_spin_unlock(&desc->lock);
649                 action_ret = handle_IRQ_event(irq, action);
650                 if (!noirqdebug)
651                         note_interrupt(irq, desc, action_ret);
652                 raw_spin_lock(&desc->lock);
653
654         } while ((desc->status & (IRQ_PENDING | IRQ_DISABLED)) == IRQ_PENDING);
655
656         desc->status &= ~IRQ_INPROGRESS;
657 out_unlock:
658         raw_spin_unlock(&desc->lock);
659 }
660
661 /**
662  *      handle_percpu_irq - Per CPU local irq handler
663  *      @irq:   the interrupt number
664  *      @desc:  the interrupt description structure for this irq
665  *
666  *      Per CPU interrupts on SMP machines without locking requirements
667  */
668 void
669 handle_percpu_irq(unsigned int irq, struct irq_desc *desc)
670 {
671         irqreturn_t action_ret;
672
673         kstat_incr_irqs_this_cpu(irq, desc);
674
675         if (desc->irq_data.chip->ack)
676                 desc->irq_data.chip->ack(irq);
677
678         action_ret = handle_IRQ_event(irq, desc->action);
679         if (!noirqdebug)
680                 note_interrupt(irq, desc, action_ret);
681
682         if (desc->irq_data.chip->eoi)
683                 desc->irq_data.chip->eoi(irq);
684 }
685
686 void
687 __set_irq_handler(unsigned int irq, irq_flow_handler_t handle, int is_chained,
688                   const char *name)
689 {
690         struct irq_desc *desc = irq_to_desc(irq);
691         unsigned long flags;
692
693         if (!desc) {
694                 printk(KERN_ERR
695                        "Trying to install type control for IRQ%d\n", irq);
696                 return;
697         }
698
699         if (!handle)
700                 handle = handle_bad_irq;
701         else if (desc->irq_data.chip == &no_irq_chip) {
702                 printk(KERN_WARNING "Trying to install %sinterrupt handler "
703                        "for IRQ%d\n", is_chained ? "chained " : "", irq);
704                 /*
705                  * Some ARM implementations install a handler for really dumb
706                  * interrupt hardware without setting an irq_chip. This worked
707                  * with the ARM no_irq_chip but the check in setup_irq would
708                  * prevent us to setup the interrupt at all. Switch it to
709                  * dummy_irq_chip for easy transition.
710                  */
711                 desc->irq_data.chip = &dummy_irq_chip;
712         }
713
714         chip_bus_lock(desc);
715         raw_spin_lock_irqsave(&desc->lock, flags);
716
717         /* Uninstall? */
718         if (handle == handle_bad_irq) {
719                 if (desc->irq_data.chip != &no_irq_chip)
720                         mask_ack_irq(desc, irq);
721                 desc->status |= IRQ_DISABLED;
722                 desc->depth = 1;
723         }
724         desc->handle_irq = handle;
725         desc->name = name;
726
727         if (handle != handle_bad_irq && is_chained) {
728                 desc->status &= ~IRQ_DISABLED;
729                 desc->status |= IRQ_NOREQUEST | IRQ_NOPROBE;
730                 desc->depth = 0;
731                 desc->irq_data.chip->startup(irq);
732         }
733         raw_spin_unlock_irqrestore(&desc->lock, flags);
734         chip_bus_sync_unlock(desc);
735 }
736 EXPORT_SYMBOL_GPL(__set_irq_handler);
737
738 void
739 set_irq_chip_and_handler(unsigned int irq, struct irq_chip *chip,
740                          irq_flow_handler_t handle)
741 {
742         set_irq_chip(irq, chip);
743         __set_irq_handler(irq, handle, 0, NULL);
744 }
745
746 void
747 set_irq_chip_and_handler_name(unsigned int irq, struct irq_chip *chip,
748                               irq_flow_handler_t handle, const char *name)
749 {
750         set_irq_chip(irq, chip);
751         __set_irq_handler(irq, handle, 0, name);
752 }
753
754 void set_irq_noprobe(unsigned int irq)
755 {
756         struct irq_desc *desc = irq_to_desc(irq);
757         unsigned long flags;
758
759         if (!desc) {
760                 printk(KERN_ERR "Trying to mark IRQ%d non-probeable\n", irq);
761                 return;
762         }
763
764         raw_spin_lock_irqsave(&desc->lock, flags);
765         desc->status |= IRQ_NOPROBE;
766         raw_spin_unlock_irqrestore(&desc->lock, flags);
767 }
768
769 void set_irq_probe(unsigned int irq)
770 {
771         struct irq_desc *desc = irq_to_desc(irq);
772         unsigned long flags;
773
774         if (!desc) {
775                 printk(KERN_ERR "Trying to mark IRQ%d probeable\n", irq);
776                 return;
777         }
778
779         raw_spin_lock_irqsave(&desc->lock, flags);
780         desc->status &= ~IRQ_NOPROBE;
781         raw_spin_unlock_irqrestore(&desc->lock, flags);
782 }