- Update Xen patches to 3.3-rc5 and c/s 1157.
[linux-flexiantxendom0-3.2.10.git] / drivers / xen / core / evtchn.c
1 /******************************************************************************
2  * evtchn.c
3  * 
4  * Communication via Xen event channels.
5  * 
6  * Copyright (c) 2002-2005, K A Fraser
7  * 
8  * This program is free software; you can redistribute it and/or
9  * modify it under the terms of the GNU General Public License version 2
10  * as published by the Free Software Foundation; or, when distributed
11  * separately from the Linux kernel or incorporated into other
12  * software packages, subject to the following license:
13  * 
14  * Permission is hereby granted, free of charge, to any person obtaining a copy
15  * of this source file (the "Software"), to deal in the Software without
16  * restriction, including without limitation the rights to use, copy, modify,
17  * merge, publish, distribute, sublicense, and/or sell copies of the Software,
18  * and to permit persons to whom the Software is furnished to do so, subject to
19  * the following conditions:
20  * 
21  * The above copyright notice and this permission notice shall be included in
22  * all copies or substantial portions of the Software.
23  * 
24  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
25  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
26  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
27  * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
28  * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
29  * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
30  * IN THE SOFTWARE.
31  */
32
33 #include <linux/module.h>
34 #include <linux/slab.h>
35 #include <linux/irq.h>
36 #include <linux/interrupt.h>
37 #include <linux/sched.h>
38 #include <linux/kernel_stat.h>
39 #include <linux/ftrace.h>
40 #include <linux/atomic.h>
41 #include <asm/system.h>
42 #include <asm/ptrace.h>
43 #include <asm/sync_bitops.h>
44 #include <xen/evtchn.h>
45 #include <xen/interface/event_channel.h>
46 #include <xen/interface/physdev.h>
47 #include <asm/hypervisor.h>
48 #include <linux/mc146818rtc.h> /* RTC_IRQ */
49 #include "../../../kernel/irq/internals.h" /* IRQS_AUTODETECT, IRQS_PENDING */
50
51 /*
52  * This lock protects updates to the following mapping and reference-count
53  * arrays. The lock does not need to be acquired to read the mapping tables.
54  */
55 static DEFINE_SPINLOCK(irq_mapping_update_lock);
56
57 /* IRQ <-> event-channel mappings. */
58 static int evtchn_to_irq[NR_EVENT_CHANNELS] = {
59         [0 ...  NR_EVENT_CHANNELS-1] = -1 };
60
61 #if defined(CONFIG_SMP) && defined(CONFIG_X86)
62 static struct percpu_irqaction {
63         struct irqaction action; /* must be first */
64         struct percpu_irqaction *next;
65         cpumask_var_t cpus;
66 } *virq_actions[NR_VIRQS];
67 /* IRQ <-> VIRQ mapping. */
68 static DECLARE_BITMAP(virq_per_cpu, NR_VIRQS) __read_mostly;
69 static DEFINE_PER_CPU_READ_MOSTLY(int[NR_VIRQS], virq_to_evtchn);
70 #define BUG_IF_VIRQ_PER_CPU(irq_cfg) \
71         BUG_ON(type_from_irq_cfg(irq_cfg) == IRQT_VIRQ \
72                && test_bit(index_from_irq_cfg(irq_cfg), virq_per_cpu))
73 #else
74 #define BUG_IF_VIRQ_PER_CPU(irq_cfg) ((void)0)
75 #define PER_CPU_VIRQ_IRQ
76 #endif
77
78 /* IRQ <-> IPI mapping. */
79 #if defined(CONFIG_SMP) && defined(CONFIG_X86)
80 static int __read_mostly ipi_irq = -1;
81 DEFINE_PER_CPU(DECLARE_BITMAP(, NR_IPIS), ipi_pending);
82 static DEFINE_PER_CPU_READ_MOSTLY(evtchn_port_t, ipi_evtchn);
83 #else
84 #define PER_CPU_IPI_IRQ
85 #endif
86 #if !defined(CONFIG_SMP) || !defined(PER_CPU_IPI_IRQ)
87 #define BUG_IF_IPI(irq_cfg) BUG_ON(type_from_irq_cfg(irq_cfg) == IRQT_IPI)
88 #else
89 #define BUG_IF_IPI(irq_cfg) ((void)0)
90 #endif
91
92 /* Binding types. */
93 enum {
94         IRQT_UNBOUND,
95         IRQT_PIRQ,
96         IRQT_VIRQ,
97         IRQT_IPI,
98         IRQT_LOCAL_PORT,
99         IRQT_CALLER_PORT,
100         _IRQT_COUNT
101 };
102
103 #define _IRQT_BITS 4
104 #define _EVTCHN_BITS 12
105 #define _INDEX_BITS (32 - _IRQT_BITS - _EVTCHN_BITS)
106
107 /* Convenient shorthand for packed representation of an unbound IRQ. */
108 #define IRQ_UNBOUND     (IRQT_UNBOUND << (32 - _IRQT_BITS))
109
110 static struct irq_cfg _irq_cfg[] = {
111         [0 ...
112 #ifdef CONFIG_SPARSE_IRQ
113                BUILD_BUG_ON_ZERO(PIRQ_BASE) + NR_IRQS_LEGACY
114 #else
115                NR_IRQS
116 #endif
117                        - 1].info = IRQ_UNBOUND
118 };
119
120 static inline struct irq_cfg *__pure irq_cfg(unsigned int irq)
121 {
122 #ifdef CONFIG_SPARSE_IRQ
123         return irq_get_chip_data(irq);
124 #else
125         return irq < NR_IRQS ? _irq_cfg + irq : NULL;
126 #endif
127 }
128
129 static inline struct irq_cfg *__pure irq_data_cfg(struct irq_data *data)
130 {
131         return irq_data_get_irq_chip_data(data);
132 }
133
134 /* Constructor for packed IRQ information. */
135 static inline u32 mk_irq_info(u32 type, u32 index, u32 evtchn)
136 {
137         BUILD_BUG_ON(_IRQT_COUNT > (1U << _IRQT_BITS));
138
139         BUILD_BUG_ON(NR_PIRQS > (1U << _INDEX_BITS));
140         BUILD_BUG_ON(NR_VIRQS > (1U << _INDEX_BITS));
141 #if defined(PER_CPU_IPI_IRQ) && defined(NR_IPIS)
142         BUILD_BUG_ON(NR_IPIS > (1U << _INDEX_BITS));
143 #endif
144         BUG_ON(index >> _INDEX_BITS);
145
146         BUILD_BUG_ON(NR_EVENT_CHANNELS > (1U << _EVTCHN_BITS));
147
148         return ((type << (32 - _IRQT_BITS)) | (index << _EVTCHN_BITS) | evtchn);
149 }
150
151 /*
152  * Accessors for packed IRQ information.
153  */
154
155 static inline unsigned int index_from_irq_cfg(const struct irq_cfg *cfg)
156 {
157         return (cfg->info >> _EVTCHN_BITS) & ((1U << _INDEX_BITS) - 1);
158 }
159
160 static inline unsigned int index_from_irq(int irq)
161 {
162         const struct irq_cfg *cfg = irq_cfg(irq);
163
164         return cfg ? index_from_irq_cfg(cfg) : 0;
165 }
166
167 static inline unsigned int type_from_irq_cfg(const struct irq_cfg *cfg)
168 {
169         return cfg->info >> (32 - _IRQT_BITS);
170 }
171
172 static inline unsigned int type_from_irq(int irq)
173 {
174         const struct irq_cfg *cfg = irq_cfg(irq);
175
176         return cfg ? type_from_irq_cfg(cfg) : IRQT_UNBOUND;
177 }
178
179 static inline unsigned int evtchn_from_per_cpu_irq(const struct irq_cfg *cfg,
180                                                    unsigned int cpu)
181 {
182         switch (type_from_irq_cfg(cfg)) {
183 #ifndef PER_CPU_VIRQ_IRQ
184         case IRQT_VIRQ:
185                 return per_cpu(virq_to_evtchn, cpu)[index_from_irq_cfg(cfg)];
186 #endif
187 #ifndef PER_CPU_IPI_IRQ
188         case IRQT_IPI:
189                 return per_cpu(ipi_evtchn, cpu);
190 #endif
191         }
192         BUG();
193         return 0;
194 }
195
196 static inline unsigned int evtchn_from_irq_cfg(const struct irq_cfg *cfg)
197 {
198         switch (type_from_irq_cfg(cfg)) {
199 #ifndef PER_CPU_VIRQ_IRQ
200         case IRQT_VIRQ:
201 #endif
202 #ifndef PER_CPU_IPI_IRQ
203         case IRQT_IPI:
204 #endif
205                 return evtchn_from_per_cpu_irq(cfg, smp_processor_id());
206         }
207         return cfg->info & ((1U << _EVTCHN_BITS) - 1);
208 }
209
210 static inline unsigned int evtchn_from_irq_data(struct irq_data *data)
211 {
212         const struct irq_cfg *cfg = irq_data_cfg(data);
213
214         return cfg ? evtchn_from_irq_cfg(cfg) : 0;
215 }
216
217 static inline unsigned int evtchn_from_irq(int irq)
218 {
219         struct irq_data *data = irq_get_irq_data(irq);
220
221         return data ? evtchn_from_irq_data(data) : 0;
222 }
223
224 unsigned int irq_from_evtchn(unsigned int port)
225 {
226         return evtchn_to_irq[port];
227 }
228 EXPORT_SYMBOL_GPL(irq_from_evtchn);
229
230 /* IRQ <-> VIRQ mapping. */
231 DEFINE_PER_CPU(int[NR_VIRQS], virq_to_irq) = {[0 ... NR_VIRQS-1] = -1};
232
233 #if defined(CONFIG_SMP) && defined(PER_CPU_IPI_IRQ)
234 /* IRQ <-> IPI mapping. */
235 #ifndef NR_IPIS
236 #define NR_IPIS 1
237 #endif
238 DEFINE_PER_CPU(int[NR_IPIS], ipi_to_irq) = {[0 ... NR_IPIS-1] = -1};
239 #endif
240
241 #ifdef CONFIG_SMP
242
243 #if CONFIG_NR_CPUS <= 256
244 static u8 cpu_evtchn[NR_EVENT_CHANNELS];
245 #else
246 static u16 cpu_evtchn[NR_EVENT_CHANNELS];
247 #endif
248 static DEFINE_PER_CPU(unsigned long[BITS_TO_LONGS(NR_EVENT_CHANNELS)],
249                       cpu_evtchn_mask);
250
251 static inline unsigned long active_evtchns(unsigned int idx)
252 {
253         shared_info_t *sh = HYPERVISOR_shared_info;
254
255         return (sh->evtchn_pending[idx] &
256                 percpu_read(cpu_evtchn_mask[idx]) &
257                 ~sh->evtchn_mask[idx]);
258 }
259
260 static void bind_evtchn_to_cpu(unsigned int chn, unsigned int cpu)
261 {
262         shared_info_t *s = HYPERVISOR_shared_info;
263         int irq = evtchn_to_irq[chn];
264
265         BUG_ON(!test_bit(chn, s->evtchn_mask));
266
267         if (irq != -1) {
268                 struct irq_data *data = irq_get_irq_data(irq);
269
270                 if (!irqd_is_per_cpu(data))
271                         cpumask_copy(data->affinity, cpumask_of(cpu));
272                 else
273                         cpumask_set_cpu(cpu, data->affinity);
274         }
275
276         clear_bit(chn, per_cpu(cpu_evtchn_mask, cpu_evtchn[chn]));
277         set_bit(chn, per_cpu(cpu_evtchn_mask, cpu));
278         cpu_evtchn[chn] = cpu;
279 }
280
281 static void init_evtchn_cpu_bindings(void)
282 {
283         int i;
284
285         /* By default all event channels notify CPU#0. */
286         for (i = 0; i < nr_irqs; i++) {
287                 struct irq_data *data = irq_get_irq_data(i);
288
289                 if (data)
290                         cpumask_copy(data->affinity, cpumask_of(0));
291         }
292
293         memset(cpu_evtchn, 0, sizeof(cpu_evtchn));
294         for_each_possible_cpu(i)
295                 memset(per_cpu(cpu_evtchn_mask, i), -!i,
296                        sizeof(per_cpu(cpu_evtchn_mask, i)));
297 }
298
299 static inline unsigned int cpu_from_evtchn(unsigned int evtchn)
300 {
301         return cpu_evtchn[evtchn];
302 }
303
304 #else
305
306 static inline unsigned long active_evtchns(unsigned int idx)
307 {
308         shared_info_t *sh = HYPERVISOR_shared_info;
309
310         return (sh->evtchn_pending[idx] & ~sh->evtchn_mask[idx]);
311 }
312
313 static void bind_evtchn_to_cpu(unsigned int chn, unsigned int cpu)
314 {
315 }
316
317 static void init_evtchn_cpu_bindings(void)
318 {
319 }
320
321 static inline unsigned int cpu_from_evtchn(unsigned int evtchn)
322 {
323         return 0;
324 }
325
326 #endif
327
328 #ifdef CONFIG_X86
329 void __init xen_init_IRQ(void);
330 void __init init_IRQ(void)
331 {
332         irq_ctx_init(0);
333         xen_init_IRQ();
334 }
335 #include <asm/idle.h>
336 #endif
337
338 /* Xen will never allocate port zero for any purpose. */
339 #define VALID_EVTCHN(chn)       ((chn) != 0)
340
341 /*
342  * Force a proper event-channel callback from Xen after clearing the
343  * callback mask. We do this in a very simple manner, by making a call
344  * down into Xen. The pending flag will be checked by Xen on return.
345  */
346 void force_evtchn_callback(void)
347 {
348         VOID(HYPERVISOR_xen_version(0, NULL));
349 }
350 /* Not a GPL symbol: used in ubiquitous macros, so too restrictive. */
351 EXPORT_SYMBOL(force_evtchn_callback);
352
353 static DEFINE_PER_CPU(unsigned int, upcall_count);
354 static DEFINE_PER_CPU(unsigned int, current_l1i);
355 static DEFINE_PER_CPU(unsigned int, current_l2i);
356
357 #ifndef vcpu_info_xchg
358 #define vcpu_info_xchg(fld, val) xchg(&current_vcpu_info()->fld, val)
359 #endif
360
361 /* NB. Interrupts are disabled on entry. */
362 asmlinkage void __irq_entry evtchn_do_upcall(struct pt_regs *regs)
363 {
364         unsigned long       l1, l2;
365         unsigned long       masked_l1, masked_l2;
366         unsigned int        l1i, l2i, start_l1i, start_l2i, port, i;
367         int                 irq;
368         struct pt_regs     *old_regs;
369
370         /* Nested invocations bail immediately. */
371         if (unlikely(this_cpu_inc_return(upcall_count) != 1))
372                 return;
373
374         old_regs = set_irq_regs(regs);
375         xen_spin_irq_enter();
376         irq_enter();
377         exit_idle();
378
379         do {
380                 /* Avoid a callback storm when we reenable delivery. */
381                 vcpu_info_write(evtchn_upcall_pending, 0);
382
383 #ifndef CONFIG_X86 /* No need for a barrier -- XCHG is a barrier on x86. */
384                 /* Clear master flag /before/ clearing selector flag. */
385                 wmb();
386 #else
387                 barrier();
388 #endif
389
390 #ifndef CONFIG_NO_HZ
391                 /*
392                  * Handle timer interrupts before all others, so that all
393                  * hardirq handlers see an up-to-date system time even if we
394                  * have just woken from a long idle period.
395                  */
396 #ifdef PER_CPU_VIRQ_IRQ
397                 if ((irq = percpu_read(virq_to_irq[VIRQ_TIMER])) != -1) {
398                         port = evtchn_from_irq(irq);
399 #else
400                 port = percpu_read(virq_to_evtchn[VIRQ_TIMER]);
401                 if (VALID_EVTCHN(port)) {
402 #endif
403                         l1i = port / BITS_PER_LONG;
404                         l2i = port % BITS_PER_LONG;
405                         if (active_evtchns(l1i) & (1ul<<l2i)) {
406                                 mask_evtchn(port);
407                                 clear_evtchn(port);
408 #ifndef PER_CPU_VIRQ_IRQ
409                                 irq = evtchn_to_irq[port];
410                                 BUG_ON(irq == -1);
411 #endif
412                                 if (!handle_irq(irq, regs))
413                                         BUG();
414                         }
415                 }
416 #endif /* CONFIG_NO_HZ */
417
418                 l1 = vcpu_info_xchg(evtchn_pending_sel, 0);
419
420                 start_l1i = l1i = percpu_read(current_l1i);
421                 start_l2i = percpu_read(current_l2i);
422
423                 for (i = 0; l1 != 0; i++) {
424                         masked_l1 = l1 & ((~0UL) << l1i);
425                         /* If we masked out all events, wrap to beginning. */
426                         if (masked_l1 == 0) {
427                                 l1i = l2i = 0;
428                                 continue;
429                         }
430                         l1i = __ffs(masked_l1);
431
432                         l2 = active_evtchns(l1i);
433                         l2i = 0; /* usually scan entire word from start */
434                         if (l1i == start_l1i) {
435                                 /* We scan the starting word in two parts. */
436                                 if (i == 0)
437                                         /* 1st time: start in the middle */
438                                         l2i = start_l2i;
439                                 else
440                                         /* 2nd time: mask bits done already */
441                                         l2 &= (1ul << start_l2i) - 1;
442                         }
443
444                         do {
445                                 bool handled = false;
446
447                                 masked_l2 = l2 & ((~0UL) << l2i);
448                                 if (masked_l2 == 0)
449                                         break;
450                                 l2i = __ffs(masked_l2);
451
452                                 /* process port */
453                                 port = (l1i * BITS_PER_LONG) + l2i;
454                                 mask_evtchn(port);
455                                 if ((irq = evtchn_to_irq[port]) != -1) {
456 #ifndef PER_CPU_IPI_IRQ
457                                         if (port != percpu_read(ipi_evtchn))
458 #endif
459                                                 clear_evtchn(port);
460                                         handled = handle_irq(irq, regs);
461                                 }
462                                 if (!handled && printk_ratelimit())
463                                         pr_emerg("No handler for irq %d"
464                                                  " (port %u)\n",
465                                                  irq, port);
466
467                                 l2i = (l2i + 1) % BITS_PER_LONG;
468
469                                 /* Next caller starts at last processed + 1 */
470                                 percpu_write(current_l1i,
471                                         l2i ? l1i : (l1i + 1) % BITS_PER_LONG);
472                                 percpu_write(current_l2i, l2i);
473
474                         } while (l2i != 0);
475
476                         /* Scan start_l1i twice; all others once. */
477                         if ((l1i != start_l1i) || (i != 0))
478                                 l1 &= ~(1UL << l1i);
479
480                         l1i = (l1i + 1) % BITS_PER_LONG;
481                 }
482
483                 /* If there were nested callbacks then we have more to do. */
484         } while (unlikely(this_cpu_xchg(upcall_count, 1) != 1));
485
486         this_cpu_write(upcall_count, 0);
487         irq_exit();
488         xen_spin_irq_exit();
489         set_irq_regs(old_regs);
490 }
491
492 static int find_unbound_irq(unsigned int node, struct irq_cfg **pcfg,
493                             struct irq_chip *chip, bool percpu)
494 {
495         static int warned;
496         int irq;
497
498         for (irq = DYNIRQ_BASE; irq < nr_irqs; irq++) {
499                 struct irq_cfg *cfg = alloc_irq_and_cfg_at(irq, node);
500                 struct irq_data *data = irq_get_irq_data(irq);
501
502                 if (unlikely(!cfg))
503                         return -ENOMEM;
504                 if (data->chip != &no_irq_chip &&
505                     data->chip != chip)
506                         continue;
507
508                 if (!cfg->bindcount) {
509                         irq_flow_handler_t handle;
510                         const char *name;
511
512                         *pcfg = cfg;
513                         irq_set_noprobe(irq);
514                         if (!percpu) {
515                                 handle = handle_fasteoi_irq;
516                                 name = "fasteoi";
517                         } else {
518                                 handle = handle_percpu_irq;
519                                 name = "percpu";
520                         }
521                         irq_set_chip_and_handler_name(irq, chip,
522                                                       handle, name);
523                         return irq;
524                 }
525         }
526
527         if (!warned) {
528                 warned = 1;
529                 pr_warning("No available IRQ to bind to: "
530                            "increase NR_DYNIRQS.\n");
531         }
532
533         return -ENOSPC;
534 }
535
536 static struct irq_chip dynirq_chip;
537
538 static int bind_caller_port_to_irq(unsigned int caller_port)
539 {
540         struct irq_cfg *cfg;
541         int irq;
542
543         spin_lock(&irq_mapping_update_lock);
544
545         if ((irq = evtchn_to_irq[caller_port]) == -1) {
546                 if ((irq = find_unbound_irq(numa_node_id(), &cfg,
547                                             &dynirq_chip, false)) < 0)
548                         goto out;
549
550                 evtchn_to_irq[caller_port] = irq;
551                 cfg->info = mk_irq_info(IRQT_CALLER_PORT, 0, caller_port);
552         } else
553                 cfg = irq_cfg(irq);
554
555         cfg->bindcount++;
556
557  out:
558         spin_unlock(&irq_mapping_update_lock);
559         return irq;
560 }
561
562 static int bind_local_port_to_irq(unsigned int local_port)
563 {
564         struct irq_cfg *cfg;
565         int irq;
566
567         spin_lock(&irq_mapping_update_lock);
568
569         BUG_ON(evtchn_to_irq[local_port] != -1);
570
571         if ((irq = find_unbound_irq(numa_node_id(), &cfg, &dynirq_chip,
572                                     false)) < 0) {
573                 if (close_evtchn(local_port))
574                         BUG();
575                 goto out;
576         }
577
578         evtchn_to_irq[local_port] = irq;
579         cfg->info = mk_irq_info(IRQT_LOCAL_PORT, 0, local_port);
580         cfg->bindcount++;
581
582  out:
583         spin_unlock(&irq_mapping_update_lock);
584         return irq;
585 }
586
587 static int bind_listening_port_to_irq(unsigned int remote_domain)
588 {
589         struct evtchn_alloc_unbound alloc_unbound;
590         int err;
591
592         alloc_unbound.dom        = DOMID_SELF;
593         alloc_unbound.remote_dom = remote_domain;
594
595         err = HYPERVISOR_event_channel_op(EVTCHNOP_alloc_unbound,
596                                           &alloc_unbound);
597
598         return err ? : bind_local_port_to_irq(alloc_unbound.port);
599 }
600
601 static int bind_interdomain_evtchn_to_irq(unsigned int remote_domain,
602                                           unsigned int remote_port)
603 {
604         struct evtchn_bind_interdomain bind_interdomain;
605         int err;
606
607         bind_interdomain.remote_dom  = remote_domain;
608         bind_interdomain.remote_port = remote_port;
609
610         err = HYPERVISOR_event_channel_op(EVTCHNOP_bind_interdomain,
611                                           &bind_interdomain);
612
613         return err ? : bind_local_port_to_irq(bind_interdomain.local_port);
614 }
615
616 static int bind_virq_to_irq(unsigned int virq, unsigned int cpu)
617 {
618         struct evtchn_bind_virq bind_virq;
619         struct irq_cfg *cfg;
620         int evtchn, irq;
621
622         spin_lock(&irq_mapping_update_lock);
623
624         if ((irq = per_cpu(virq_to_irq, cpu)[virq]) == -1) {
625                 if ((irq = find_unbound_irq(cpu_to_node(cpu), &cfg,
626                                             &dynirq_chip, false)) < 0)
627                         goto out;
628
629                 bind_virq.virq = virq;
630                 bind_virq.vcpu = cpu;
631                 if (HYPERVISOR_event_channel_op(EVTCHNOP_bind_virq,
632                                                 &bind_virq) != 0)
633                         BUG();
634                 evtchn = bind_virq.port;
635
636                 evtchn_to_irq[evtchn] = irq;
637 #ifndef PER_CPU_VIRQ_IRQ
638                 {
639                         unsigned int cpu;
640
641                         for_each_possible_cpu(cpu)
642                                 per_cpu(virq_to_evtchn, cpu)[virq] = evtchn;
643                 }
644 #endif
645                 cfg->info = mk_irq_info(IRQT_VIRQ, virq, evtchn);
646
647                 per_cpu(virq_to_irq, cpu)[virq] = irq;
648
649                 bind_evtchn_to_cpu(evtchn, cpu);
650         } else
651                 cfg = irq_cfg(irq);
652
653         cfg->bindcount++;
654
655  out:
656         spin_unlock(&irq_mapping_update_lock);
657         return irq;
658 }
659
660 #if defined(CONFIG_SMP) && defined(PER_CPU_IPI_IRQ)
661 static int bind_ipi_to_irq(unsigned int ipi, unsigned int cpu)
662 {
663         struct evtchn_bind_ipi bind_ipi;
664         struct irq_cfg *cfg;
665         int evtchn, irq;
666
667         spin_lock(&irq_mapping_update_lock);
668
669         if ((irq = per_cpu(ipi_to_irq, cpu)[ipi]) == -1) {
670                 if ((irq = find_unbound_irq(cpu_to_node(cpu), &cfg,
671                                             &dynirq_chip, false)) < 0)
672                         goto out;
673
674                 bind_ipi.vcpu = cpu;
675                 if (HYPERVISOR_event_channel_op(EVTCHNOP_bind_ipi,
676                                                 &bind_ipi) != 0)
677                         BUG();
678                 evtchn = bind_ipi.port;
679
680                 evtchn_to_irq[evtchn] = irq;
681                 cfg->info = mk_irq_info(IRQT_IPI, ipi, evtchn);
682
683                 per_cpu(ipi_to_irq, cpu)[ipi] = irq;
684
685                 bind_evtchn_to_cpu(evtchn, cpu);
686         } else
687                 cfg = irq_cfg(irq);
688
689         cfg->bindcount++;
690
691  out:
692         spin_unlock(&irq_mapping_update_lock);
693         return irq;
694 }
695 #endif
696
697 static void unbind_from_irq(unsigned int irq)
698 {
699         struct irq_cfg *cfg = irq_cfg(irq);
700         unsigned int evtchn = evtchn_from_irq_cfg(cfg);
701
702         BUG_IF_VIRQ_PER_CPU(cfg);
703         BUG_IF_IPI(cfg);
704
705         spin_lock(&irq_mapping_update_lock);
706
707         if (!--cfg->bindcount && VALID_EVTCHN(evtchn)) {
708                 if ((type_from_irq_cfg(cfg) != IRQT_CALLER_PORT) &&
709                     close_evtchn(evtchn))
710                         BUG();
711
712                 switch (type_from_irq_cfg(cfg)) {
713                 case IRQT_VIRQ:
714                         per_cpu(virq_to_irq, cpu_from_evtchn(evtchn))
715                                 [index_from_irq_cfg(cfg)] = -1;
716 #ifndef PER_CPU_VIRQ_IRQ
717                         {
718                                 unsigned int cpu;
719
720                                 for_each_possible_cpu(cpu)
721                                         per_cpu(virq_to_evtchn, cpu)
722                                                 [index_from_irq_cfg(cfg)] = 0;
723                         }
724 #endif
725                         break;
726 #if defined(CONFIG_SMP) && defined(PER_CPU_IPI_IRQ)
727                 case IRQT_IPI:
728                         per_cpu(ipi_to_irq, cpu_from_evtchn(evtchn))
729                                 [index_from_irq_cfg(cfg)] = -1;
730                         break;
731 #endif
732                 default:
733                         break;
734                 }
735
736                 /* Closed ports are implicitly re-bound to VCPU0. */
737                 bind_evtchn_to_cpu(evtchn, 0);
738
739                 evtchn_to_irq[evtchn] = -1;
740                 cfg->info = IRQ_UNBOUND;
741
742                 dynamic_irq_cleanup(irq);
743         }
744
745         spin_unlock(&irq_mapping_update_lock);
746 }
747
748 #if !defined(PER_CPU_IPI_IRQ) || !defined(PER_CPU_VIRQ_IRQ)
749 static inline struct percpu_irqaction *alloc_percpu_irqaction(gfp_t gfp)
750 {
751         struct percpu_irqaction *new = kzalloc(sizeof(*new), GFP_ATOMIC);
752
753         if (new && !zalloc_cpumask_var(&new->cpus, gfp)) {
754                 kfree(new);
755                 new = NULL;
756         }
757         return new;
758 }
759
760 static inline void free_percpu_irqaction(struct percpu_irqaction *action)
761 {
762         if (!action)
763                 return;
764         free_cpumask_var(action->cpus);
765         kfree(action);
766 }
767
768 void unbind_from_per_cpu_irq(unsigned int irq, unsigned int cpu,
769                              struct irqaction *action)
770 {
771         struct evtchn_close close;
772         struct irq_data *data = irq_get_irq_data(irq);
773         struct irq_cfg *cfg = irq_data_cfg(data);
774         unsigned int evtchn = evtchn_from_per_cpu_irq(cfg, cpu);
775         struct percpu_irqaction *free_action = NULL;
776
777         spin_lock(&irq_mapping_update_lock);
778
779         if (VALID_EVTCHN(evtchn)) {
780                 mask_evtchn(evtchn);
781
782                 BUG_ON(cfg->bindcount <= 1);
783                 cfg->bindcount--;
784
785 #ifndef PER_CPU_VIRQ_IRQ
786                 if (type_from_irq_cfg(cfg) == IRQT_VIRQ) {
787                         unsigned int virq = index_from_irq_cfg(cfg);
788                         struct percpu_irqaction *cur, *prev = NULL;
789
790                         cur = virq_actions[virq];
791                         while (cur) {
792                                 if (cur->action.dev_id == action) {
793                                         cpumask_clear_cpu(cpu, cur->cpus);
794                                         if (cpumask_empty(cur->cpus)) {
795                                                 WARN_ON(free_action);
796                                                 if (prev)
797                                                         prev->next = cur->next;
798                                                 else
799                                                         virq_actions[virq]
800                                                                 = cur->next;
801                                                 free_action = cur;
802                                         }
803                                 } else if (cpumask_test_cpu(cpu, cur->cpus))
804                                         evtchn = 0;
805                                 cur = (prev = cur)->next;
806                         }
807                         if (!VALID_EVTCHN(evtchn))
808                                 goto done;
809                 }
810 #endif
811
812                 cpumask_clear_cpu(cpu, data->affinity);
813
814                 close.port = evtchn;
815                 if (HYPERVISOR_event_channel_op(EVTCHNOP_close, &close))
816                         BUG();
817
818                 switch (type_from_irq_cfg(cfg)) {
819 #ifndef PER_CPU_VIRQ_IRQ
820                 case IRQT_VIRQ:
821                         per_cpu(virq_to_evtchn, cpu)
822                                 [index_from_irq_cfg(cfg)] = 0;
823                         break;
824 #endif
825 #ifndef PER_CPU_IPI_IRQ
826                 case IRQT_IPI:
827                         per_cpu(ipi_evtchn, cpu) = 0;
828                         break;
829 #endif
830                 default:
831                         BUG();
832                         break;
833                 }
834
835                 /* Closed ports are implicitly re-bound to VCPU0. */
836                 bind_evtchn_to_cpu(evtchn, 0);
837
838                 evtchn_to_irq[evtchn] = -1;
839         }
840
841 #ifndef PER_CPU_VIRQ_IRQ
842 done:
843 #endif
844         spin_unlock(&irq_mapping_update_lock);
845
846         if (free_action) {
847                 cpumask_t *cpus = free_action->cpus;
848
849                 free_irq(irq, free_action->action.dev_id);
850                 free_cpumask_var(cpus);
851         }
852 }
853 EXPORT_SYMBOL_GPL(unbind_from_per_cpu_irq);
854 #endif /* !PER_CPU_IPI_IRQ || !PER_CPU_VIRQ_IRQ */
855
856 int bind_caller_port_to_irqhandler(
857         unsigned int caller_port,
858         irq_handler_t handler,
859         unsigned long irqflags,
860         const char *devname,
861         void *dev_id)
862 {
863         int irq, retval;
864
865         irq = bind_caller_port_to_irq(caller_port);
866         if (irq < 0)
867                 return irq;
868
869         retval = request_irq(irq, handler, irqflags, devname, dev_id);
870         if (retval != 0) {
871                 unbind_from_irq(irq);
872                 return retval;
873         }
874
875         return irq;
876 }
877 EXPORT_SYMBOL_GPL(bind_caller_port_to_irqhandler);
878
879 int bind_listening_port_to_irqhandler(
880         unsigned int remote_domain,
881         irq_handler_t handler,
882         unsigned long irqflags,
883         const char *devname,
884         void *dev_id)
885 {
886         int irq, retval;
887
888         irq = bind_listening_port_to_irq(remote_domain);
889         if (irq < 0)
890                 return irq;
891
892         retval = request_irq(irq, handler, irqflags, devname, dev_id);
893         if (retval != 0) {
894                 unbind_from_irq(irq);
895                 return retval;
896         }
897
898         return irq;
899 }
900 EXPORT_SYMBOL_GPL(bind_listening_port_to_irqhandler);
901
902 int bind_interdomain_evtchn_to_irqhandler(
903         unsigned int remote_domain,
904         unsigned int remote_port,
905         irq_handler_t handler,
906         unsigned long irqflags,
907         const char *devname,
908         void *dev_id)
909 {
910         int irq, retval;
911
912         irq = bind_interdomain_evtchn_to_irq(remote_domain, remote_port);
913         if (irq < 0)
914                 return irq;
915
916         retval = request_irq(irq, handler, irqflags, devname, dev_id);
917         if (retval != 0) {
918                 unbind_from_irq(irq);
919                 return retval;
920         }
921
922         return irq;
923 }
924 EXPORT_SYMBOL_GPL(bind_interdomain_evtchn_to_irqhandler);
925
926 int bind_virq_to_irqhandler(
927         unsigned int virq,
928         unsigned int cpu,
929         irq_handler_t handler,
930         unsigned long irqflags,
931         const char *devname,
932         void *dev_id)
933 {
934         int irq, retval;
935
936 #ifndef PER_CPU_VIRQ_IRQ
937         BUG_ON(test_bit(virq, virq_per_cpu));
938 #endif
939
940         irq = bind_virq_to_irq(virq, cpu);
941         if (irq < 0)
942                 return irq;
943
944         retval = request_irq(irq, handler, irqflags, devname, dev_id);
945         if (retval != 0) {
946                 unbind_from_irq(irq);
947                 return retval;
948         }
949
950         return irq;
951 }
952 EXPORT_SYMBOL_GPL(bind_virq_to_irqhandler);
953
954 #ifdef CONFIG_SMP
955 #ifndef PER_CPU_VIRQ_IRQ
956 int bind_virq_to_irqaction(
957         unsigned int virq,
958         unsigned int cpu,
959         struct irqaction *action)
960 {
961         struct evtchn_bind_virq bind_virq;
962         struct irq_cfg *cfg;
963         unsigned int evtchn;
964         int irq, retval = 0;
965         struct percpu_irqaction *cur = NULL, *new;
966
967         BUG_ON(!test_bit(virq, virq_per_cpu));
968
969         if (action->dev_id)
970                 return -EINVAL;
971
972         new = alloc_percpu_irqaction(GFP_ATOMIC);
973         if (new) {
974                 new->action = *action;
975                 new->action.dev_id = action;
976         }
977
978         spin_lock(&irq_mapping_update_lock);
979
980         for (cur = virq_actions[virq]; cur; cur = cur->next)
981                 if (cur->action.dev_id == action)
982                         break;
983         if (!cur) {
984                 if (!new) {
985                         spin_unlock(&irq_mapping_update_lock);
986                         return -ENOMEM;
987                 }
988                 new->next = virq_actions[virq];
989                 virq_actions[virq] = cur = new;
990                 new = NULL;
991                 retval = 1;
992         }
993         cpumask_set_cpu(cpu, cur->cpus);
994         action = &cur->action;
995
996         if ((irq = per_cpu(virq_to_irq, cpu)[virq]) == -1) {
997                 unsigned int nr;
998
999                 BUG_ON(!retval);
1000
1001                 if ((irq = find_unbound_irq(cpu_to_node(cpu), &cfg,
1002                                             &dynirq_chip, true)) < 0) {
1003                         virq_actions[virq] = cur->next;
1004                         spin_unlock(&irq_mapping_update_lock);
1005                         free_percpu_irqaction(new);
1006                         return irq;
1007                 }
1008
1009                 /* Extra reference so count will never drop to zero. */
1010                 cfg->bindcount++;
1011
1012                 for_each_possible_cpu(nr)
1013                         per_cpu(virq_to_irq, nr)[virq] = irq;
1014                 cfg->info = mk_irq_info(IRQT_VIRQ, virq, 0);
1015         } else
1016                 cfg = irq_cfg(irq);
1017
1018         evtchn = per_cpu(virq_to_evtchn, cpu)[virq];
1019         if (!VALID_EVTCHN(evtchn)) {
1020                 bind_virq.virq = virq;
1021                 bind_virq.vcpu = cpu;
1022                 if (HYPERVISOR_event_channel_op(EVTCHNOP_bind_virq,
1023                                                 &bind_virq) != 0)
1024                         BUG();
1025                 evtchn = bind_virq.port;
1026                 evtchn_to_irq[evtchn] = irq;
1027                 per_cpu(virq_to_evtchn, cpu)[virq] = evtchn;
1028
1029                 bind_evtchn_to_cpu(evtchn, cpu);
1030         }
1031
1032         cfg->bindcount++;
1033
1034         spin_unlock(&irq_mapping_update_lock);
1035
1036         free_percpu_irqaction(new);
1037
1038         if (retval == 0) {
1039                 unsigned long flags;
1040
1041                 local_irq_save(flags);
1042                 unmask_evtchn(evtchn);
1043                 local_irq_restore(flags);
1044         } else {
1045                 action->flags |= IRQF_PERCPU;
1046                 retval = setup_irq(irq, action);
1047                 if (retval) {
1048                         unbind_from_per_cpu_irq(irq, cpu, action);
1049                         BUG_ON(retval > 0);
1050                         irq = retval;
1051                 }
1052         }
1053
1054         return irq;
1055 }
1056 EXPORT_SYMBOL_GPL(bind_virq_to_irqaction);
1057 #endif
1058
1059 #ifdef PER_CPU_IPI_IRQ
1060 int bind_ipi_to_irqhandler(
1061         unsigned int ipi,
1062         unsigned int cpu,
1063         irq_handler_t handler,
1064         unsigned long irqflags,
1065         const char *devname,
1066         void *dev_id)
1067 {
1068         int irq, retval;
1069
1070         irq = bind_ipi_to_irq(ipi, cpu);
1071         if (irq < 0)
1072                 return irq;
1073
1074         retval = request_irq(irq, handler, irqflags | IRQF_NO_SUSPEND,
1075                              devname, dev_id);
1076         if (retval != 0) {
1077                 unbind_from_irq(irq);
1078                 return retval;
1079         }
1080
1081         return irq;
1082 }
1083 #else
1084 int __cpuinit bind_ipi_to_irqaction(
1085         unsigned int cpu,
1086         struct irqaction *action)
1087 {
1088         struct evtchn_bind_ipi bind_ipi;
1089         struct irq_cfg *cfg;
1090         unsigned int evtchn;
1091         int retval = 0;
1092
1093         spin_lock(&irq_mapping_update_lock);
1094
1095         if (VALID_EVTCHN(per_cpu(ipi_evtchn, cpu))) {
1096                 spin_unlock(&irq_mapping_update_lock);
1097                 return -EBUSY;
1098         }
1099
1100         if (ipi_irq < 0) {
1101                 if ((ipi_irq = find_unbound_irq(cpu_to_node(cpu), &cfg,
1102                                                 &dynirq_chip, true)) < 0) {
1103                         spin_unlock(&irq_mapping_update_lock);
1104                         return ipi_irq;
1105                 }
1106
1107                 /* Extra reference so count will never drop to zero. */
1108                 cfg->bindcount++;
1109
1110                 cfg->info = mk_irq_info(IRQT_IPI, 0, 0);
1111                 retval = 1;
1112         } else
1113                 cfg = irq_cfg(ipi_irq);
1114
1115         bind_ipi.vcpu = cpu;
1116         if (HYPERVISOR_event_channel_op(EVTCHNOP_bind_ipi, &bind_ipi))
1117                 BUG();
1118
1119         evtchn = bind_ipi.port;
1120         evtchn_to_irq[evtchn] = ipi_irq;
1121         per_cpu(ipi_evtchn, cpu) = evtchn;
1122
1123         bind_evtchn_to_cpu(evtchn, cpu);
1124
1125         cfg->bindcount++;
1126
1127         spin_unlock(&irq_mapping_update_lock);
1128
1129         if (retval == 0) {
1130                 unsigned long flags;
1131
1132                 local_irq_save(flags);
1133                 unmask_evtchn(evtchn);
1134                 local_irq_restore(flags);
1135         } else {
1136                 action->flags |= IRQF_PERCPU | IRQF_NO_SUSPEND;
1137                 retval = setup_irq(ipi_irq, action);
1138                 if (retval) {
1139                         unbind_from_per_cpu_irq(ipi_irq, cpu, NULL);
1140                         BUG_ON(retval > 0);
1141                         ipi_irq = retval;
1142                 }
1143         }
1144
1145         return ipi_irq;
1146 }
1147 #endif /* PER_CPU_IPI_IRQ */
1148 #endif /* CONFIG_SMP */
1149
1150 void unbind_from_irqhandler(unsigned int irq, void *dev_id)
1151 {
1152         free_irq(irq, dev_id);
1153         unbind_from_irq(irq);
1154 }
1155 EXPORT_SYMBOL_GPL(unbind_from_irqhandler);
1156
1157 #ifdef CONFIG_SMP
1158 static int set_affinity_irq(struct irq_data *data,
1159                             const struct cpumask *dest, bool force)
1160 {
1161         const struct irq_cfg *cfg = irq_data_cfg(data);
1162         unsigned int port = evtchn_from_irq_cfg(cfg);
1163         unsigned int cpu = cpumask_any(dest);
1164         struct evtchn_bind_vcpu ebv = { .port = port, .vcpu = cpu };
1165         bool masked;
1166         int rc;
1167
1168         BUG_IF_VIRQ_PER_CPU(cfg);
1169         BUG_IF_IPI(cfg);
1170
1171         if (!VALID_EVTCHN(port))
1172                 return -ENXIO;
1173
1174         masked = test_and_set_evtchn_mask(port);
1175         rc = HYPERVISOR_event_channel_op(EVTCHNOP_bind_vcpu, &ebv);
1176         if (rc == 0) {
1177                 bind_evtchn_to_cpu(port, cpu);
1178                 rc = evtchn_to_irq[port] != -1 ? IRQ_SET_MASK_OK_NOCOPY
1179                                                : IRQ_SET_MASK_OK;
1180         }
1181         if (!masked)
1182                 unmask_evtchn(port);
1183
1184         return rc;
1185 }
1186 #endif
1187
1188 int resend_irq_on_evtchn(struct irq_data *data)
1189 {
1190         unsigned int evtchn = evtchn_from_irq_data(data);
1191         bool masked;
1192
1193         if (!VALID_EVTCHN(evtchn))
1194                 return 1;
1195
1196         masked = test_and_set_evtchn_mask(evtchn);
1197         set_evtchn(evtchn);
1198         if (!masked)
1199                 unmask_evtchn(evtchn);
1200
1201         return 1;
1202 }
1203
1204 /*
1205  * Interface to generic handling in irq.c
1206  */
1207
1208 static void unmask_dynirq(struct irq_data *data)
1209 {
1210         unsigned int evtchn = evtchn_from_irq_data(data);
1211
1212         if (VALID_EVTCHN(evtchn))
1213                 unmask_evtchn(evtchn);
1214 }
1215
1216 static void mask_dynirq(struct irq_data *data)
1217 {
1218         unsigned int evtchn = evtchn_from_irq_data(data);
1219
1220         if (VALID_EVTCHN(evtchn))
1221                 mask_evtchn(evtchn);
1222 }
1223
1224 static unsigned int startup_dynirq(struct irq_data *data)
1225 {
1226         unmask_dynirq(data);
1227         return 0;
1228 }
1229
1230 #define shutdown_dynirq mask_dynirq
1231
1232 static void end_dynirq(struct irq_data *data)
1233 {
1234         if (!irqd_irq_disabled(data)) {
1235                 irq_move_masked_irq(data);
1236                 unmask_dynirq(data);
1237         }
1238 }
1239
1240 static struct irq_chip dynirq_chip = {
1241         .name             = "Dynamic",
1242         .irq_startup      = startup_dynirq,
1243         .irq_shutdown     = shutdown_dynirq,
1244         .irq_enable       = unmask_dynirq,
1245         .irq_disable      = mask_dynirq,
1246         .irq_mask         = mask_dynirq,
1247         .irq_unmask       = unmask_dynirq,
1248         .irq_eoi          = end_dynirq,
1249 #ifdef CONFIG_SMP
1250         .irq_set_affinity = set_affinity_irq,
1251 #endif
1252         .irq_retrigger    = resend_irq_on_evtchn,
1253 };
1254
1255 /* Bitmap indicating which PIRQs require Xen to be notified on unmask. */
1256 static bool pirq_eoi_does_unmask;
1257 static unsigned long *pirq_needs_eoi;
1258 static DECLARE_BITMAP(probing_pirq, NR_PIRQS);
1259
1260 static void pirq_unmask_and_notify(unsigned int evtchn, unsigned int irq)
1261 {
1262         struct physdev_eoi eoi = { .irq = evtchn_get_xen_pirq(irq) };
1263
1264         if (pirq_eoi_does_unmask) {
1265                 if (test_bit(eoi.irq, pirq_needs_eoi))
1266                         VOID(HYPERVISOR_physdev_op(PHYSDEVOP_eoi, &eoi));
1267                 else
1268                         unmask_evtchn(evtchn);
1269         } else if (test_bit(irq - PIRQ_BASE, pirq_needs_eoi)) {
1270                 if (smp_processor_id() != cpu_from_evtchn(evtchn)) {
1271                         struct evtchn_unmask unmask = { .port = evtchn };
1272                         struct multicall_entry mcl[2];
1273
1274                         mcl[0].op = __HYPERVISOR_event_channel_op;
1275                         mcl[0].args[0] = EVTCHNOP_unmask;
1276                         mcl[0].args[1] = (unsigned long)&unmask;
1277                         mcl[1].op = __HYPERVISOR_physdev_op;
1278                         mcl[1].args[0] = PHYSDEVOP_eoi;
1279                         mcl[1].args[1] = (unsigned long)&eoi;
1280
1281                         if (HYPERVISOR_multicall(mcl, 2))
1282                                 BUG();
1283                 } else {
1284                         unmask_evtchn(evtchn);
1285                         VOID(HYPERVISOR_physdev_op(PHYSDEVOP_eoi, &eoi));
1286                 }
1287         } else
1288                 unmask_evtchn(evtchn);
1289 }
1290
1291 static inline void pirq_query_unmask(int irq)
1292 {
1293         struct physdev_irq_status_query irq_status;
1294
1295         if (pirq_eoi_does_unmask)
1296                 return;
1297         irq_status.irq = evtchn_get_xen_pirq(irq);
1298         if (HYPERVISOR_physdev_op(PHYSDEVOP_irq_status_query, &irq_status))
1299                 irq_status.flags = 0;
1300         clear_bit(irq - PIRQ_BASE, pirq_needs_eoi);
1301         if (irq_status.flags & XENIRQSTAT_needs_eoi)
1302                 set_bit(irq - PIRQ_BASE, pirq_needs_eoi);
1303 }
1304
1305 static int set_type_pirq(struct irq_data *data, unsigned int type)
1306 {
1307         if (type != IRQ_TYPE_PROBE)
1308                 return -EINVAL;
1309         set_bit(data->irq - PIRQ_BASE, probing_pirq);
1310         return 0;
1311 }
1312
1313 static void enable_pirq(struct irq_data *data)
1314 {
1315         struct evtchn_bind_pirq bind_pirq;
1316         struct irq_cfg *cfg = irq_data_cfg(data);
1317         unsigned int evtchn = evtchn_from_irq_cfg(cfg);
1318         unsigned int irq = data->irq, pirq = irq - PIRQ_BASE;
1319
1320         if (VALID_EVTCHN(evtchn)) {
1321                 if (pirq < nr_pirqs)
1322                         clear_bit(pirq, probing_pirq);
1323                 goto out;
1324         }
1325
1326         bind_pirq.pirq = evtchn_get_xen_pirq(irq);
1327         /* NB. We are happy to share unless we are probing. */
1328         bind_pirq.flags = (pirq < nr_pirqs
1329                            && test_and_clear_bit(pirq, probing_pirq))
1330                           || (irq_to_desc(irq)->istate & IRQS_AUTODETECT)
1331                           ? 0 : BIND_PIRQ__WILL_SHARE;
1332         if (HYPERVISOR_event_channel_op(EVTCHNOP_bind_pirq, &bind_pirq) != 0) {
1333                 if (bind_pirq.flags)
1334                         pr_info("Failed to obtain physical IRQ %d\n", irq);
1335                 return;
1336         }
1337         evtchn = bind_pirq.port;
1338
1339         pirq_query_unmask(irq);
1340
1341         evtchn_to_irq[evtchn] = irq;
1342         bind_evtchn_to_cpu(evtchn, 0);
1343         cfg->info = mk_irq_info(IRQT_PIRQ, bind_pirq.pirq, evtchn);
1344
1345  out:
1346         pirq_unmask_and_notify(evtchn, irq);
1347 }
1348
1349 #define disable_pirq mask_pirq
1350
1351 static unsigned int startup_pirq(struct irq_data *data)
1352 {
1353         enable_pirq(data);
1354         return 0;
1355 }
1356
1357 static void shutdown_pirq(struct irq_data *data)
1358 {
1359         struct irq_cfg *cfg = irq_data_cfg(data);
1360         unsigned int evtchn = evtchn_from_irq_cfg(cfg);
1361
1362         if (!VALID_EVTCHN(evtchn))
1363                 return;
1364
1365         mask_evtchn(evtchn);
1366
1367         if (close_evtchn(evtchn))
1368                 BUG();
1369
1370         bind_evtchn_to_cpu(evtchn, 0);
1371         evtchn_to_irq[evtchn] = -1;
1372         cfg->info = mk_irq_info(IRQT_PIRQ, index_from_irq_cfg(cfg), 0);
1373 }
1374
1375 static void unmask_pirq(struct irq_data *data)
1376 {
1377         unsigned int evtchn = evtchn_from_irq_data(data);
1378
1379         if (VALID_EVTCHN(evtchn))
1380                 pirq_unmask_and_notify(evtchn, data->irq);
1381 }
1382
1383 #define mask_pirq mask_dynirq
1384
1385 static void end_pirq(struct irq_data *data)
1386 {
1387         bool disabled = irqd_irq_disabled(data);
1388
1389         if (disabled && (irq_to_desc(data->irq)->istate & IRQS_PENDING))
1390                 shutdown_pirq(data);
1391         else {
1392                 if (!disabled)
1393                         irq_move_masked_irq(data);
1394                 unmask_pirq(data);
1395         }
1396 }
1397
1398 static struct irq_chip pirq_chip = {
1399         .name             = "Phys",
1400         .irq_startup      = startup_pirq,
1401         .irq_shutdown     = shutdown_pirq,
1402         .irq_enable       = enable_pirq,
1403         .irq_disable      = disable_pirq,
1404         .irq_mask         = mask_pirq,
1405         .irq_unmask       = unmask_pirq,
1406         .irq_eoi          = end_pirq,
1407         .irq_set_type     = set_type_pirq,
1408 #ifdef CONFIG_SMP
1409         .irq_set_affinity = set_affinity_irq,
1410 #endif
1411         .irq_retrigger    = resend_irq_on_evtchn,
1412 };
1413
1414 int irq_ignore_unhandled(unsigned int irq)
1415 {
1416         struct physdev_irq_status_query irq_status = { .irq = irq };
1417
1418         if (!is_running_on_xen() || irq >= nr_pirqs)
1419                 return 0;
1420
1421         if (HYPERVISOR_physdev_op(PHYSDEVOP_irq_status_query, &irq_status))
1422                 return 0;
1423         return !!(irq_status.flags & XENIRQSTAT_shared);
1424 }
1425
1426 #if defined(CONFIG_SMP) && !defined(PER_CPU_IPI_IRQ)
1427 void notify_remote_via_ipi(unsigned int ipi, unsigned int cpu)
1428 {
1429         unsigned int evtchn = per_cpu(ipi_evtchn, cpu);
1430
1431 #ifdef NMI_VECTOR
1432         if (ipi == NMI_VECTOR) {
1433                 int rc = HYPERVISOR_vcpu_op(VCPUOP_send_nmi, cpu, NULL);
1434
1435                 if (rc)
1436                         pr_warn_once("Unable (%d) to send NMI to CPU#%u\n",
1437                                      rc, cpu);
1438                 return;
1439         }
1440 #endif
1441
1442         if (VALID_EVTCHN(evtchn)
1443             && !test_and_set_bit(ipi, per_cpu(ipi_pending, cpu))
1444             && !test_evtchn(evtchn))
1445                 notify_remote_via_evtchn(evtchn);
1446 }
1447
1448 void clear_ipi_evtchn(void)
1449 {
1450         unsigned int evtchn = percpu_read(ipi_evtchn);
1451
1452         BUG_ON(!VALID_EVTCHN(evtchn));
1453         clear_evtchn(evtchn);
1454 }
1455 #endif
1456
1457 void notify_remote_via_irq(int irq)
1458 {
1459         const struct irq_cfg *cfg = irq_cfg(irq);
1460         unsigned int evtchn;
1461
1462         if (WARN_ON_ONCE(!cfg))
1463                 return;
1464         BUG_ON(type_from_irq_cfg(cfg) == IRQT_VIRQ);
1465         BUG_IF_IPI(cfg);
1466
1467         evtchn = evtchn_from_irq_cfg(cfg);
1468         if (VALID_EVTCHN(evtchn))
1469                 notify_remote_via_evtchn(evtchn);
1470 }
1471 EXPORT_SYMBOL_GPL(notify_remote_via_irq);
1472
1473 #if defined(CONFIG_XEN_BACKEND) || defined(CONFIG_XEN_BACKEND_MODULE)
1474 int multi_notify_remote_via_irq(multicall_entry_t *mcl, int irq)
1475 {
1476         const struct irq_cfg *cfg = irq_cfg(irq);
1477         unsigned int evtchn;
1478
1479         if (WARN_ON_ONCE(!cfg))
1480                 return -EINVAL;
1481         BUG_ON(type_from_irq_cfg(cfg) == IRQT_VIRQ);
1482         BUG_IF_IPI(cfg);
1483
1484         evtchn = evtchn_from_irq_cfg(cfg);
1485         if (!VALID_EVTCHN(evtchn))
1486                 return -EINVAL;
1487
1488         multi_notify_remote_via_evtchn(mcl, evtchn);
1489         return 0;
1490 }
1491 EXPORT_SYMBOL_GPL(multi_notify_remote_via_irq);
1492 #endif
1493
1494 int irq_to_evtchn_port(int irq)
1495 {
1496         const struct irq_cfg *cfg = irq_cfg(irq);
1497
1498         if (!cfg)
1499                 return 0;
1500         BUG_IF_VIRQ_PER_CPU(cfg);
1501         BUG_IF_IPI(cfg);
1502         return evtchn_from_irq_cfg(cfg);
1503 }
1504 EXPORT_SYMBOL_GPL(irq_to_evtchn_port);
1505
1506 void mask_evtchn(int port)
1507 {
1508         shared_info_t *s = HYPERVISOR_shared_info;
1509         sync_set_bit(port, s->evtchn_mask);
1510 }
1511 EXPORT_SYMBOL_GPL(mask_evtchn);
1512
1513 void unmask_evtchn(int port)
1514 {
1515         shared_info_t *s = HYPERVISOR_shared_info;
1516         unsigned int cpu = smp_processor_id();
1517
1518         BUG_ON(!irqs_disabled());
1519
1520         /* Slow path (hypercall) if this is a non-local port. */
1521         if (unlikely(cpu != cpu_from_evtchn(port))) {
1522                 struct evtchn_unmask unmask = { .port = port };
1523                 VOID(HYPERVISOR_event_channel_op(EVTCHNOP_unmask, &unmask));
1524                 return;
1525         }
1526
1527         sync_clear_bit(port, s->evtchn_mask);
1528
1529         /* Did we miss an interrupt 'edge'? Re-fire if so. */
1530         if (sync_test_bit(port, s->evtchn_pending)) {
1531                 vcpu_info_t *v = current_vcpu_info();
1532
1533                 if (!sync_test_and_set_bit(port / BITS_PER_LONG,
1534                                            &v->evtchn_pending_sel))
1535                         v->evtchn_upcall_pending = 1;
1536         }
1537 }
1538 EXPORT_SYMBOL_GPL(unmask_evtchn);
1539
1540 void disable_all_local_evtchn(void)
1541 {
1542         unsigned i, cpu = smp_processor_id();
1543         shared_info_t *s = HYPERVISOR_shared_info;
1544
1545         for (i = 0; i < NR_EVENT_CHANNELS; ++i)
1546                 if (cpu_from_evtchn(i) == cpu)
1547                         sync_set_bit(i, &s->evtchn_mask[0]);
1548 }
1549
1550 /* Test an irq's pending state. */
1551 int xen_test_irq_pending(int irq)
1552 {
1553         unsigned int evtchn = evtchn_from_irq(irq);
1554
1555         return VALID_EVTCHN(evtchn) && test_evtchn(evtchn);
1556 }
1557
1558 #ifdef CONFIG_PM_SLEEP
1559 #include <linux/syscore_ops.h>
1560
1561 static void restore_cpu_virqs(unsigned int cpu)
1562 {
1563         struct evtchn_bind_virq bind_virq;
1564         int virq, irq, evtchn;
1565
1566         for (virq = 0; virq < NR_VIRQS; virq++) {
1567                 if ((irq = per_cpu(virq_to_irq, cpu)[virq]) == -1)
1568                         continue;
1569
1570 #ifndef PER_CPU_VIRQ_IRQ
1571                 if (test_bit(virq, virq_per_cpu)
1572                     && !VALID_EVTCHN(per_cpu(virq_to_evtchn, cpu)[virq]))
1573                         continue;
1574 #endif
1575
1576                 BUG_ON(irq_cfg(irq)->info != mk_irq_info(IRQT_VIRQ, virq, 0));
1577
1578                 /* Get a new binding from Xen. */
1579                 bind_virq.virq = virq;
1580                 bind_virq.vcpu = cpu;
1581                 if (HYPERVISOR_event_channel_op(EVTCHNOP_bind_virq,
1582                                                 &bind_virq) != 0)
1583                         BUG();
1584                 evtchn = bind_virq.port;
1585
1586                 /* Record the new mapping. */
1587                 evtchn_to_irq[evtchn] = irq;
1588 #ifdef PER_CPU_VIRQ_IRQ
1589                 irq_cfg(irq)->info = mk_irq_info(IRQT_VIRQ, virq, evtchn);
1590 #else
1591                 if (test_bit(virq, virq_per_cpu))
1592                         per_cpu(virq_to_evtchn, cpu)[virq] = evtchn;
1593                 else {
1594                         unsigned int cpu;
1595
1596                         irq_cfg(irq)->info = mk_irq_info(IRQT_VIRQ, virq,
1597                                                          evtchn);
1598                         for_each_possible_cpu(cpu)
1599                                 per_cpu(virq_to_evtchn, cpu)[virq] = evtchn;
1600                 }
1601 #endif
1602                 bind_evtchn_to_cpu(evtchn, cpu);
1603
1604                 /* Ready for use. */
1605                 unmask_evtchn(evtchn);
1606         }
1607 }
1608
1609 static void restore_cpu_ipis(unsigned int cpu)
1610 {
1611 #ifdef CONFIG_SMP
1612         struct evtchn_bind_ipi bind_ipi;
1613         struct irq_data *data;
1614         unsigned int evtchn;
1615 #ifdef PER_CPU_IPI_IRQ
1616         int ipi, irq;
1617
1618         for (ipi = 0; ipi < NR_IPIS; ipi++) {
1619                 if ((irq = per_cpu(ipi_to_irq, cpu)[ipi]) == -1)
1620                         continue;
1621 #else
1622 #define ipi 0
1623 #define irq ipi_irq
1624                 if (irq == -1
1625                     || !VALID_EVTCHN(per_cpu(ipi_evtchn, cpu)))
1626                         return;
1627 #endif
1628
1629                 data = irq_get_irq_data(irq);
1630                 BUG_ON(irq_data_cfg(data)->info != mk_irq_info(IRQT_IPI, ipi, 0));
1631
1632                 /* Get a new binding from Xen. */
1633                 bind_ipi.vcpu = cpu;
1634                 if (HYPERVISOR_event_channel_op(EVTCHNOP_bind_ipi,
1635                                                 &bind_ipi) != 0)
1636                         BUG();
1637                 evtchn = bind_ipi.port;
1638
1639                 /* Record the new mapping. */
1640                 evtchn_to_irq[evtchn] = irq;
1641 #ifdef PER_CPU_IPI_IRQ
1642                 irq_data_cfg(data)->info = mk_irq_info(IRQT_IPI, ipi, evtchn);
1643 #else
1644                 per_cpu(ipi_evtchn, cpu) = evtchn;
1645 #endif
1646                 bind_evtchn_to_cpu(evtchn, cpu);
1647
1648                 /* Ready for use. */
1649                 if (!irqd_irq_disabled(data))
1650                         unmask_evtchn(evtchn);
1651 #ifdef PER_CPU_IPI_IRQ
1652         }
1653 #else
1654 #undef irq
1655 #undef ipi
1656 #endif
1657 #endif /* CONFIG_SMP */
1658 }
1659
1660 static void evtchn_resume(void)
1661 {
1662         unsigned int cpu, irq, evtchn;
1663         struct evtchn_status status;
1664
1665         /* Avoid doing anything in the 'suspend cancelled' case. */
1666         status.dom = DOMID_SELF;
1667 #ifdef PER_CPU_VIRQ_IRQ
1668         status.port = evtchn_from_irq(percpu_read(virq_to_irq[VIRQ_TIMER]));
1669 #else
1670         status.port = percpu_read(virq_to_evtchn[VIRQ_TIMER]);
1671 #endif
1672         if (HYPERVISOR_event_channel_op(EVTCHNOP_status, &status))
1673                 BUG();
1674         if (status.status == EVTCHNSTAT_virq
1675             && status.vcpu == smp_processor_id()
1676             && status.u.virq == VIRQ_TIMER)
1677                 return;
1678
1679         init_evtchn_cpu_bindings();
1680
1681         if (pirq_eoi_does_unmask) {
1682                 struct physdev_pirq_eoi_gmfn eoi_gmfn;
1683
1684                 eoi_gmfn.gmfn = virt_to_machine(pirq_needs_eoi) >> PAGE_SHIFT;
1685                 if (HYPERVISOR_physdev_op(PHYSDEVOP_pirq_eoi_gmfn, &eoi_gmfn))
1686                         BUG();
1687         }
1688
1689         /* New event-channel space is not 'live' yet. */
1690         for (evtchn = 0; evtchn < NR_EVENT_CHANNELS; evtchn++)
1691                 mask_evtchn(evtchn);
1692
1693         /* No IRQ <-> event-channel mappings. */
1694         for (irq = 0; irq < nr_irqs; irq++) {
1695                 struct irq_cfg *cfg = irq_cfg(irq);
1696
1697                 if (!cfg)
1698                         continue;
1699
1700                 /* Check that no PIRQs are still bound. */
1701 #ifdef CONFIG_SPARSE_IRQ
1702                 if (irq < PIRQ_BASE || irq >= PIRQ_BASE + nr_pirqs)
1703                         BUG_ON(type_from_irq_cfg(cfg) == IRQT_PIRQ);
1704                 else
1705 #endif
1706                         BUG_ON(cfg->info != IRQ_UNBOUND);
1707
1708                 cfg->info &= ~((1U << _EVTCHN_BITS) - 1);
1709         }
1710         for (evtchn = 0; evtchn < NR_EVENT_CHANNELS; evtchn++)
1711                 evtchn_to_irq[evtchn] = -1;
1712
1713         for_each_possible_cpu(cpu) {
1714                 restore_cpu_virqs(cpu);
1715                 restore_cpu_ipis(cpu);
1716         }
1717 }
1718
1719 static struct syscore_ops evtchn_syscore_ops = {
1720         .resume = evtchn_resume,
1721 };
1722
1723 static int __init evtchn_register(void)
1724 {
1725         if (!is_initial_xendomain())
1726                 register_syscore_ops(&evtchn_syscore_ops);
1727         return 0;
1728 }
1729 core_initcall(evtchn_register);
1730 #endif
1731
1732 int __init arch_early_irq_init(void)
1733 {
1734         unsigned int i;
1735
1736         for (i = 0; i < ARRAY_SIZE(_irq_cfg); i++)
1737                 irq_set_chip_data(i, _irq_cfg + i);
1738
1739         return 0;
1740 }
1741
1742 struct irq_cfg *alloc_irq_and_cfg_at(unsigned int at, int node)
1743 {
1744         int res = irq_alloc_desc_at(at, node);
1745         struct irq_cfg *cfg = NULL;
1746
1747         if (res < 0) {
1748                 if (res != -EEXIST)
1749                         return NULL;
1750                 cfg = irq_get_chip_data(at);
1751                 if (cfg)
1752                         return cfg;
1753         }
1754
1755 #ifdef CONFIG_SPARSE_IRQ
1756 #ifdef CONFIG_SMP
1757         /* By default all event channels notify CPU#0. */
1758         cpumask_copy(irq_get_irq_data(at)->affinity, cpumask_of(0));
1759 #endif
1760
1761         cfg = kzalloc(sizeof(*cfg), GFP_KERNEL);
1762         if (cfg)
1763                 irq_set_chip_data(at, cfg);
1764         else
1765                 irq_free_desc(at);
1766
1767         return cfg;
1768 #else
1769         return irq_cfg(at);
1770 #endif
1771 }
1772
1773 #ifdef CONFIG_SPARSE_IRQ
1774 #ifdef CONFIG_X86_IO_APIC
1775 #include <asm/io_apic.h>
1776 #endif
1777
1778 int nr_pirqs = NR_PIRQS;
1779 EXPORT_SYMBOL_GPL(nr_pirqs);
1780
1781 int __init arch_probe_nr_irqs(void)
1782 {
1783         int nr = 64 + CONFIG_XEN_NR_GUEST_DEVICES, nr_irqs_gsi;
1784
1785         if (is_initial_xendomain()) {
1786                 nr_irqs_gsi = NR_IRQS_LEGACY;
1787 #ifdef CONFIG_X86_IO_APIC
1788                 nr_irqs_gsi += gsi_top;
1789 #endif
1790 #ifdef CONFIG_PCI_MSI
1791                 nr += max(nr_irqs_gsi * 16, nr_cpu_ids * 8);
1792 #endif
1793         } else {
1794                 nr_irqs_gsi = NR_VECTORS;
1795 #ifdef CONFIG_PCI_MSI
1796                 nr += max(NR_IRQS_LEGACY * 16, nr_cpu_ids * 8);
1797 #endif
1798         }
1799
1800         if (nr_pirqs > nr_irqs_gsi)
1801                 nr_pirqs = nr_irqs_gsi;
1802         if (nr > min_t(int, NR_DYNIRQS, NR_EVENT_CHANNELS))
1803                 nr = min_t(int, NR_DYNIRQS, NR_EVENT_CHANNELS);
1804         nr_irqs = min_t(int, nr_pirqs + nr, PAGE_SIZE * 8);
1805
1806         printk(KERN_DEBUG "nr_pirqs: %d\n", nr_pirqs);
1807
1808         return ARRAY_SIZE(_irq_cfg);
1809 }
1810 #endif
1811
1812 #if defined(CONFIG_X86_IO_APIC)
1813 int assign_irq_vector(int irq, struct irq_cfg *cfg, const struct cpumask *mask)
1814 {
1815         struct physdev_irq irq_op;
1816
1817         if (irq < PIRQ_BASE || irq - PIRQ_BASE >= nr_pirqs)
1818                 return -EINVAL;
1819
1820         if (cfg->vector)
1821                 return 0;
1822
1823         irq_op.irq = irq;
1824         if (HYPERVISOR_physdev_op(PHYSDEVOP_alloc_irq_vector, &irq_op))
1825                 return -ENOSPC;
1826
1827         cfg->vector = irq_op.vector;
1828
1829         return 0;
1830 }
1831 #define identity_mapped_irq(irq) (!IO_APIC_IRQ((irq) - PIRQ_BASE))
1832 #elif defined(CONFIG_X86)
1833 #define identity_mapped_irq(irq) (((irq) - PIRQ_BASE) < NR_IRQS_LEGACY)
1834 #else
1835 #define identity_mapped_irq(irq) (1)
1836 #endif
1837
1838 void evtchn_register_pirq(int irq)
1839 {
1840         struct irq_cfg *cfg = irq_cfg(irq);
1841
1842         BUG_ON(irq < PIRQ_BASE || irq - PIRQ_BASE >= nr_pirqs);
1843         if (identity_mapped_irq(irq) || type_from_irq_cfg(cfg) != IRQT_UNBOUND)
1844                 return;
1845         cfg->info = mk_irq_info(IRQT_PIRQ, irq, 0);
1846         irq_set_chip_and_handler_name(irq, &pirq_chip, handle_fasteoi_irq,
1847                                       "fasteoi");
1848 }
1849
1850 #ifdef CONFIG_PCI_MSI
1851 int evtchn_map_pirq(int irq, int xen_pirq)
1852 {
1853         if (irq < 0) {
1854 #ifdef CONFIG_SPARSE_IRQ
1855                 struct irq_cfg *cfg;
1856
1857                 spin_lock(&irq_mapping_update_lock);
1858                 irq = find_unbound_irq(numa_node_id(), &cfg, &pirq_chip,
1859                                        false);
1860                 if (irq >= 0) {
1861                         BUG_ON(type_from_irq_cfg(cfg) != IRQT_UNBOUND);
1862                         cfg->bindcount++;
1863                         cfg->info = mk_irq_info(IRQT_PIRQ, xen_pirq, 0);
1864                 }
1865                 spin_unlock(&irq_mapping_update_lock);
1866                 if (irq < 0)
1867                         return irq;
1868         } else if (irq >= PIRQ_BASE && irq < PIRQ_BASE + nr_pirqs) {
1869                 WARN_ONCE(1, "Non-MSI IRQ#%d (Xen %d)\n", irq, xen_pirq);
1870                 return -EINVAL;
1871 #else
1872                 static DEFINE_SPINLOCK(irq_alloc_lock);
1873
1874                 irq = PIRQ_BASE + nr_pirqs - 1;
1875                 spin_lock(&irq_alloc_lock);
1876                 do {
1877                         struct irq_cfg *cfg;
1878
1879                         if (identity_mapped_irq(irq))
1880                                 continue;
1881                         cfg = alloc_irq_and_cfg_at(irq, numa_node_id());
1882                         if (unlikely(!cfg)) {
1883                                 spin_unlock(&irq_alloc_lock);
1884                                 return -ENOMEM;
1885                         }
1886                         if (!index_from_irq_cfg(cfg)) {
1887                                 BUG_ON(type_from_irq_cfg(cfg) != IRQT_UNBOUND);
1888                                 cfg->info = mk_irq_info(IRQT_PIRQ,
1889                                                         xen_pirq, 0);
1890                                 break;
1891                         }
1892                 } while (--irq >= PIRQ_BASE);
1893                 spin_unlock(&irq_alloc_lock);
1894                 if (irq < PIRQ_BASE)
1895                         return -ENOSPC;
1896                 irq_set_chip_and_handler_name(irq, &pirq_chip,
1897                                               handle_fasteoi_irq, "fasteoi");
1898 #endif
1899         } else if (!xen_pirq) {
1900                 struct irq_cfg *cfg = irq_cfg(irq);
1901
1902                 if (!cfg || unlikely(type_from_irq_cfg(cfg) != IRQT_PIRQ))
1903                         return -EINVAL;
1904                 /*
1905                  * dynamic_irq_cleanup(irq) would seem to be the correct thing
1906                  * here, but cannot be used as we get here also during shutdown
1907                  * when a driver didn't free_irq() its MSI(-X) IRQ(s), which
1908                  * then causes a warning in dynamic_irq_cleanup().
1909                  */
1910                 irq_set_chip_and_handler(irq, NULL, NULL);
1911                 cfg->info = IRQ_UNBOUND;
1912 #ifdef CONFIG_SPARSE_IRQ
1913                 cfg->bindcount--;
1914 #endif
1915                 return 0;
1916         } else if (type_from_irq(irq) != IRQT_PIRQ
1917                    || index_from_irq(irq) != xen_pirq) {
1918                 pr_err("IRQ#%d is already mapped to %d:%u - "
1919                        "cannot map to PIRQ#%u\n",
1920                        irq, type_from_irq(irq), index_from_irq(irq), xen_pirq);
1921                 return -EINVAL;
1922         }
1923         return index_from_irq(irq) ? irq : -EINVAL;
1924 }
1925 #endif
1926
1927 int evtchn_get_xen_pirq(int irq)
1928 {
1929         struct irq_cfg *cfg = irq_cfg(irq);
1930
1931         if (identity_mapped_irq(irq))
1932                 return irq;
1933         BUG_ON(type_from_irq_cfg(cfg) != IRQT_PIRQ);
1934         return index_from_irq_cfg(cfg);
1935 }
1936
1937 void __init xen_init_IRQ(void)
1938 {
1939         unsigned int i;
1940         struct physdev_pirq_eoi_gmfn eoi_gmfn;
1941
1942 #ifndef PER_CPU_VIRQ_IRQ
1943         __set_bit(VIRQ_TIMER, virq_per_cpu);
1944         __set_bit(VIRQ_DEBUG, virq_per_cpu);
1945         __set_bit(VIRQ_XENOPROF, virq_per_cpu);
1946 #ifdef CONFIG_IA64
1947         __set_bit(VIRQ_ITC, virq_per_cpu);
1948 #endif
1949 #endif
1950
1951         init_evtchn_cpu_bindings();
1952
1953 #ifdef CONFIG_SPARSE_IRQ
1954         i = nr_irqs;
1955 #else
1956         i = nr_pirqs;
1957 #endif
1958         i = get_order(sizeof(unsigned long) * BITS_TO_LONGS(i));
1959         pirq_needs_eoi = (void *)__get_free_pages(GFP_KERNEL|__GFP_ZERO, i);
1960         BUILD_BUG_ON(NR_PIRQS > PAGE_SIZE * 8);
1961         eoi_gmfn.gmfn = virt_to_machine(pirq_needs_eoi) >> PAGE_SHIFT;
1962         if (HYPERVISOR_physdev_op(PHYSDEVOP_pirq_eoi_gmfn, &eoi_gmfn) == 0)
1963                 pirq_eoi_does_unmask = true;
1964
1965         /* No event channels are 'live' right now. */
1966         for (i = 0; i < NR_EVENT_CHANNELS; i++)
1967                 mask_evtchn(i);
1968
1969 #ifndef CONFIG_SPARSE_IRQ
1970         for (i = DYNIRQ_BASE; i < (DYNIRQ_BASE + NR_DYNIRQS); i++) {
1971                 irq_set_noprobe(i);
1972                 irq_set_chip_and_handler_name(i, &dynirq_chip,
1973                                               handle_fasteoi_irq, "fasteoi");
1974         }
1975
1976         for (i = PIRQ_BASE; i < (PIRQ_BASE + nr_pirqs); i++) {
1977 #else
1978         for (i = PIRQ_BASE; i < (PIRQ_BASE + NR_IRQS_LEGACY); i++) {
1979 #endif
1980                 if (!identity_mapped_irq(i))
1981                         continue;
1982
1983 #ifdef RTC_IRQ
1984                 /* If not domain 0, force our RTC driver to fail its probe. */
1985                 if (i - PIRQ_BASE == RTC_IRQ && !is_initial_xendomain())
1986                         continue;
1987 #endif
1988
1989                 irq_set_chip_and_handler_name(i, &pirq_chip,
1990                                               handle_fasteoi_irq, "fasteoi");
1991         }
1992 }