1 /******************************************************************************
4 * Communication via Xen event channels.
6 * Copyright (c) 2002-2005, K A Fraser
8 * This program is free software; you can redistribute it and/or
9 * modify it under the terms of the GNU General Public License version 2
10 * as published by the Free Software Foundation; or, when distributed
11 * separately from the Linux kernel or incorporated into other
12 * software packages, subject to the following license:
14 * Permission is hereby granted, free of charge, to any person obtaining a copy
15 * of this source file (the "Software"), to deal in the Software without
16 * restriction, including without limitation the rights to use, copy, modify,
17 * merge, publish, distribute, sublicense, and/or sell copies of the Software,
18 * and to permit persons to whom the Software is furnished to do so, subject to
19 * the following conditions:
21 * The above copyright notice and this permission notice shall be included in
22 * all copies or substantial portions of the Software.
24 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
25 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
26 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
27 * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
28 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
29 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
33 #include <linux/module.h>
34 #include <linux/slab.h>
35 #include <linux/irq.h>
36 #include <linux/interrupt.h>
37 #include <linux/sched.h>
38 #include <linux/kernel_stat.h>
39 #include <linux/ftrace.h>
40 #include <linux/atomic.h>
41 #include <asm/system.h>
42 #include <asm/ptrace.h>
43 #include <asm/sync_bitops.h>
44 #include <xen/evtchn.h>
45 #include <xen/interface/event_channel.h>
46 #include <xen/interface/physdev.h>
47 #include <asm/hypervisor.h>
48 #include <linux/mc146818rtc.h> /* RTC_IRQ */
49 #include "../../../kernel/irq/internals.h" /* IRQS_AUTODETECT, IRQS_PENDING */
52 * This lock protects updates to the following mapping and reference-count
53 * arrays. The lock does not need to be acquired to read the mapping tables.
55 static DEFINE_SPINLOCK(irq_mapping_update_lock);
57 /* IRQ <-> event-channel mappings. */
58 static int evtchn_to_irq[NR_EVENT_CHANNELS] = {
59 [0 ... NR_EVENT_CHANNELS-1] = -1 };
61 #if defined(CONFIG_SMP) && defined(CONFIG_X86)
62 static struct percpu_irqaction {
63 struct irqaction action; /* must be first */
64 struct percpu_irqaction *next;
66 } *virq_actions[NR_VIRQS];
67 /* IRQ <-> VIRQ mapping. */
68 static DECLARE_BITMAP(virq_per_cpu, NR_VIRQS) __read_mostly;
69 static DEFINE_PER_CPU_READ_MOSTLY(int[NR_VIRQS], virq_to_evtchn);
70 #define BUG_IF_VIRQ_PER_CPU(irq_cfg) \
71 BUG_ON(type_from_irq_cfg(irq_cfg) == IRQT_VIRQ \
72 && test_bit(index_from_irq_cfg(irq_cfg), virq_per_cpu))
74 #define BUG_IF_VIRQ_PER_CPU(irq_cfg) ((void)0)
75 #define PER_CPU_VIRQ_IRQ
78 /* IRQ <-> IPI mapping. */
79 #if defined(CONFIG_SMP) && defined(CONFIG_X86)
80 static int __read_mostly ipi_irq = -1;
81 DEFINE_PER_CPU(DECLARE_BITMAP(, NR_IPIS), ipi_pending);
82 static DEFINE_PER_CPU_READ_MOSTLY(evtchn_port_t, ipi_evtchn);
84 #define PER_CPU_IPI_IRQ
86 #if !defined(CONFIG_SMP) || !defined(PER_CPU_IPI_IRQ)
87 #define BUG_IF_IPI(irq_cfg) BUG_ON(type_from_irq_cfg(irq_cfg) == IRQT_IPI)
89 #define BUG_IF_IPI(irq_cfg) ((void)0)
104 #define _EVTCHN_BITS 12
105 #define _INDEX_BITS (32 - _IRQT_BITS - _EVTCHN_BITS)
107 /* Convenient shorthand for packed representation of an unbound IRQ. */
108 #define IRQ_UNBOUND (IRQT_UNBOUND << (32 - _IRQT_BITS))
110 static struct irq_cfg _irq_cfg[] = {
112 #ifdef CONFIG_SPARSE_IRQ
113 BUILD_BUG_ON_ZERO(PIRQ_BASE) + NR_IRQS_LEGACY
117 - 1].info = IRQ_UNBOUND
120 static inline struct irq_cfg *__pure irq_cfg(unsigned int irq)
122 #ifdef CONFIG_SPARSE_IRQ
123 return irq_get_chip_data(irq);
125 return irq < NR_IRQS ? _irq_cfg + irq : NULL;
129 static inline struct irq_cfg *__pure irq_data_cfg(struct irq_data *data)
131 return irq_data_get_irq_chip_data(data);
134 /* Constructor for packed IRQ information. */
135 static inline u32 mk_irq_info(u32 type, u32 index, u32 evtchn)
137 BUILD_BUG_ON(_IRQT_COUNT > (1U << _IRQT_BITS));
139 BUILD_BUG_ON(NR_PIRQS > (1U << _INDEX_BITS));
140 BUILD_BUG_ON(NR_VIRQS > (1U << _INDEX_BITS));
141 #if defined(PER_CPU_IPI_IRQ) && defined(NR_IPIS)
142 BUILD_BUG_ON(NR_IPIS > (1U << _INDEX_BITS));
144 BUG_ON(index >> _INDEX_BITS);
146 BUILD_BUG_ON(NR_EVENT_CHANNELS > (1U << _EVTCHN_BITS));
148 return ((type << (32 - _IRQT_BITS)) | (index << _EVTCHN_BITS) | evtchn);
152 * Accessors for packed IRQ information.
155 static inline unsigned int index_from_irq_cfg(const struct irq_cfg *cfg)
157 return (cfg->info >> _EVTCHN_BITS) & ((1U << _INDEX_BITS) - 1);
160 static inline unsigned int index_from_irq(int irq)
162 const struct irq_cfg *cfg = irq_cfg(irq);
164 return cfg ? index_from_irq_cfg(cfg) : 0;
167 static inline unsigned int type_from_irq_cfg(const struct irq_cfg *cfg)
169 return cfg->info >> (32 - _IRQT_BITS);
172 static inline unsigned int type_from_irq(int irq)
174 const struct irq_cfg *cfg = irq_cfg(irq);
176 return cfg ? type_from_irq_cfg(cfg) : IRQT_UNBOUND;
179 static inline unsigned int evtchn_from_per_cpu_irq(const struct irq_cfg *cfg,
182 switch (type_from_irq_cfg(cfg)) {
183 #ifndef PER_CPU_VIRQ_IRQ
185 return per_cpu(virq_to_evtchn, cpu)[index_from_irq_cfg(cfg)];
187 #ifndef PER_CPU_IPI_IRQ
189 return per_cpu(ipi_evtchn, cpu);
196 static inline unsigned int evtchn_from_irq_cfg(const struct irq_cfg *cfg)
198 switch (type_from_irq_cfg(cfg)) {
199 #ifndef PER_CPU_VIRQ_IRQ
202 #ifndef PER_CPU_IPI_IRQ
205 return evtchn_from_per_cpu_irq(cfg, smp_processor_id());
207 return cfg->info & ((1U << _EVTCHN_BITS) - 1);
210 static inline unsigned int evtchn_from_irq_data(struct irq_data *data)
212 const struct irq_cfg *cfg = irq_data_cfg(data);
214 return cfg ? evtchn_from_irq_cfg(cfg) : 0;
217 static inline unsigned int evtchn_from_irq(int irq)
219 struct irq_data *data = irq_get_irq_data(irq);
221 return data ? evtchn_from_irq_data(data) : 0;
224 unsigned int irq_from_evtchn(unsigned int port)
226 return evtchn_to_irq[port];
228 EXPORT_SYMBOL_GPL(irq_from_evtchn);
230 /* IRQ <-> VIRQ mapping. */
231 DEFINE_PER_CPU(int[NR_VIRQS], virq_to_irq) = {[0 ... NR_VIRQS-1] = -1};
233 #if defined(CONFIG_SMP) && defined(PER_CPU_IPI_IRQ)
234 /* IRQ <-> IPI mapping. */
238 DEFINE_PER_CPU(int[NR_IPIS], ipi_to_irq) = {[0 ... NR_IPIS-1] = -1};
243 #if CONFIG_NR_CPUS <= 256
244 static u8 cpu_evtchn[NR_EVENT_CHANNELS];
246 static u16 cpu_evtchn[NR_EVENT_CHANNELS];
248 static DEFINE_PER_CPU(unsigned long[BITS_TO_LONGS(NR_EVENT_CHANNELS)],
251 static inline unsigned long active_evtchns(unsigned int idx)
253 shared_info_t *sh = HYPERVISOR_shared_info;
255 return (sh->evtchn_pending[idx] &
256 percpu_read(cpu_evtchn_mask[idx]) &
257 ~sh->evtchn_mask[idx]);
260 static void bind_evtchn_to_cpu(unsigned int chn, unsigned int cpu)
262 shared_info_t *s = HYPERVISOR_shared_info;
263 int irq = evtchn_to_irq[chn];
265 BUG_ON(!test_bit(chn, s->evtchn_mask));
268 struct irq_data *data = irq_get_irq_data(irq);
270 if (!irqd_is_per_cpu(data))
271 cpumask_copy(data->affinity, cpumask_of(cpu));
273 cpumask_set_cpu(cpu, data->affinity);
276 clear_bit(chn, per_cpu(cpu_evtchn_mask, cpu_evtchn[chn]));
277 set_bit(chn, per_cpu(cpu_evtchn_mask, cpu));
278 cpu_evtchn[chn] = cpu;
281 static void init_evtchn_cpu_bindings(void)
285 /* By default all event channels notify CPU#0. */
286 for (i = 0; i < nr_irqs; i++) {
287 struct irq_data *data = irq_get_irq_data(i);
290 cpumask_copy(data->affinity, cpumask_of(0));
293 memset(cpu_evtchn, 0, sizeof(cpu_evtchn));
294 for_each_possible_cpu(i)
295 memset(per_cpu(cpu_evtchn_mask, i), -!i,
296 sizeof(per_cpu(cpu_evtchn_mask, i)));
299 static inline unsigned int cpu_from_evtchn(unsigned int evtchn)
301 return cpu_evtchn[evtchn];
306 static inline unsigned long active_evtchns(unsigned int idx)
308 shared_info_t *sh = HYPERVISOR_shared_info;
310 return (sh->evtchn_pending[idx] & ~sh->evtchn_mask[idx]);
313 static void bind_evtchn_to_cpu(unsigned int chn, unsigned int cpu)
317 static void init_evtchn_cpu_bindings(void)
321 static inline unsigned int cpu_from_evtchn(unsigned int evtchn)
329 void __init xen_init_IRQ(void);
330 void __init init_IRQ(void)
335 #include <asm/idle.h>
338 /* Xen will never allocate port zero for any purpose. */
339 #define VALID_EVTCHN(chn) ((chn) != 0)
342 * Force a proper event-channel callback from Xen after clearing the
343 * callback mask. We do this in a very simple manner, by making a call
344 * down into Xen. The pending flag will be checked by Xen on return.
346 void force_evtchn_callback(void)
348 VOID(HYPERVISOR_xen_version(0, NULL));
350 /* Not a GPL symbol: used in ubiquitous macros, so too restrictive. */
351 EXPORT_SYMBOL(force_evtchn_callback);
353 static DEFINE_PER_CPU(unsigned int, upcall_count);
354 static DEFINE_PER_CPU(unsigned int, current_l1i);
355 static DEFINE_PER_CPU(unsigned int, current_l2i);
357 #ifndef vcpu_info_xchg
358 #define vcpu_info_xchg(fld, val) xchg(¤t_vcpu_info()->fld, val)
361 /* NB. Interrupts are disabled on entry. */
362 asmlinkage void __irq_entry evtchn_do_upcall(struct pt_regs *regs)
364 unsigned long l1, l2;
365 unsigned long masked_l1, masked_l2;
366 unsigned int l1i, l2i, start_l1i, start_l2i, port, i;
368 struct pt_regs *old_regs;
370 /* Nested invocations bail immediately. */
371 if (unlikely(this_cpu_inc_return(upcall_count) != 1))
374 old_regs = set_irq_regs(regs);
375 xen_spin_irq_enter();
380 /* Avoid a callback storm when we reenable delivery. */
381 vcpu_info_write(evtchn_upcall_pending, 0);
383 #ifndef CONFIG_X86 /* No need for a barrier -- XCHG is a barrier on x86. */
384 /* Clear master flag /before/ clearing selector flag. */
392 * Handle timer interrupts before all others, so that all
393 * hardirq handlers see an up-to-date system time even if we
394 * have just woken from a long idle period.
396 #ifdef PER_CPU_VIRQ_IRQ
397 if ((irq = percpu_read(virq_to_irq[VIRQ_TIMER])) != -1) {
398 port = evtchn_from_irq(irq);
400 port = percpu_read(virq_to_evtchn[VIRQ_TIMER]);
401 if (VALID_EVTCHN(port)) {
403 l1i = port / BITS_PER_LONG;
404 l2i = port % BITS_PER_LONG;
405 if (active_evtchns(l1i) & (1ul<<l2i)) {
408 #ifndef PER_CPU_VIRQ_IRQ
409 irq = evtchn_to_irq[port];
412 if (!handle_irq(irq, regs))
416 #endif /* CONFIG_NO_HZ */
418 l1 = vcpu_info_xchg(evtchn_pending_sel, 0);
420 start_l1i = l1i = percpu_read(current_l1i);
421 start_l2i = percpu_read(current_l2i);
423 for (i = 0; l1 != 0; i++) {
424 masked_l1 = l1 & ((~0UL) << l1i);
425 /* If we masked out all events, wrap to beginning. */
426 if (masked_l1 == 0) {
430 l1i = __ffs(masked_l1);
432 l2 = active_evtchns(l1i);
433 l2i = 0; /* usually scan entire word from start */
434 if (l1i == start_l1i) {
435 /* We scan the starting word in two parts. */
437 /* 1st time: start in the middle */
440 /* 2nd time: mask bits done already */
441 l2 &= (1ul << start_l2i) - 1;
445 bool handled = false;
447 masked_l2 = l2 & ((~0UL) << l2i);
450 l2i = __ffs(masked_l2);
453 port = (l1i * BITS_PER_LONG) + l2i;
455 if ((irq = evtchn_to_irq[port]) != -1) {
456 #ifndef PER_CPU_IPI_IRQ
457 if (port != percpu_read(ipi_evtchn))
460 handled = handle_irq(irq, regs);
462 if (!handled && printk_ratelimit())
463 pr_emerg("No handler for irq %d"
467 l2i = (l2i + 1) % BITS_PER_LONG;
469 /* Next caller starts at last processed + 1 */
470 percpu_write(current_l1i,
471 l2i ? l1i : (l1i + 1) % BITS_PER_LONG);
472 percpu_write(current_l2i, l2i);
476 /* Scan start_l1i twice; all others once. */
477 if ((l1i != start_l1i) || (i != 0))
480 l1i = (l1i + 1) % BITS_PER_LONG;
483 /* If there were nested callbacks then we have more to do. */
484 } while (unlikely(this_cpu_xchg(upcall_count, 1) != 1));
486 this_cpu_write(upcall_count, 0);
489 set_irq_regs(old_regs);
492 static int find_unbound_irq(unsigned int node, struct irq_cfg **pcfg,
493 struct irq_chip *chip, bool percpu)
498 for (irq = DYNIRQ_BASE; irq < nr_irqs; irq++) {
499 struct irq_cfg *cfg = alloc_irq_and_cfg_at(irq, node);
500 struct irq_data *data = irq_get_irq_data(irq);
504 if (data->chip != &no_irq_chip &&
508 if (!cfg->bindcount) {
509 irq_flow_handler_t handle;
513 irq_set_noprobe(irq);
515 handle = handle_fasteoi_irq;
518 handle = handle_percpu_irq;
521 irq_set_chip_and_handler_name(irq, chip,
529 pr_warning("No available IRQ to bind to: "
530 "increase NR_DYNIRQS.\n");
536 static struct irq_chip dynirq_chip;
538 static int bind_caller_port_to_irq(unsigned int caller_port)
543 spin_lock(&irq_mapping_update_lock);
545 if ((irq = evtchn_to_irq[caller_port]) == -1) {
546 if ((irq = find_unbound_irq(numa_node_id(), &cfg,
547 &dynirq_chip, false)) < 0)
550 evtchn_to_irq[caller_port] = irq;
551 cfg->info = mk_irq_info(IRQT_CALLER_PORT, 0, caller_port);
558 spin_unlock(&irq_mapping_update_lock);
562 static int bind_local_port_to_irq(unsigned int local_port)
567 spin_lock(&irq_mapping_update_lock);
569 BUG_ON(evtchn_to_irq[local_port] != -1);
571 if ((irq = find_unbound_irq(numa_node_id(), &cfg, &dynirq_chip,
573 if (close_evtchn(local_port))
578 evtchn_to_irq[local_port] = irq;
579 cfg->info = mk_irq_info(IRQT_LOCAL_PORT, 0, local_port);
583 spin_unlock(&irq_mapping_update_lock);
587 static int bind_listening_port_to_irq(unsigned int remote_domain)
589 struct evtchn_alloc_unbound alloc_unbound;
592 alloc_unbound.dom = DOMID_SELF;
593 alloc_unbound.remote_dom = remote_domain;
595 err = HYPERVISOR_event_channel_op(EVTCHNOP_alloc_unbound,
598 return err ? : bind_local_port_to_irq(alloc_unbound.port);
601 static int bind_interdomain_evtchn_to_irq(unsigned int remote_domain,
602 unsigned int remote_port)
604 struct evtchn_bind_interdomain bind_interdomain;
607 bind_interdomain.remote_dom = remote_domain;
608 bind_interdomain.remote_port = remote_port;
610 err = HYPERVISOR_event_channel_op(EVTCHNOP_bind_interdomain,
613 return err ? : bind_local_port_to_irq(bind_interdomain.local_port);
616 static int bind_virq_to_irq(unsigned int virq, unsigned int cpu)
618 struct evtchn_bind_virq bind_virq;
622 spin_lock(&irq_mapping_update_lock);
624 if ((irq = per_cpu(virq_to_irq, cpu)[virq]) == -1) {
625 if ((irq = find_unbound_irq(cpu_to_node(cpu), &cfg,
626 &dynirq_chip, false)) < 0)
629 bind_virq.virq = virq;
630 bind_virq.vcpu = cpu;
631 if (HYPERVISOR_event_channel_op(EVTCHNOP_bind_virq,
634 evtchn = bind_virq.port;
636 evtchn_to_irq[evtchn] = irq;
637 #ifndef PER_CPU_VIRQ_IRQ
641 for_each_possible_cpu(cpu)
642 per_cpu(virq_to_evtchn, cpu)[virq] = evtchn;
645 cfg->info = mk_irq_info(IRQT_VIRQ, virq, evtchn);
647 per_cpu(virq_to_irq, cpu)[virq] = irq;
649 bind_evtchn_to_cpu(evtchn, cpu);
656 spin_unlock(&irq_mapping_update_lock);
660 #if defined(CONFIG_SMP) && defined(PER_CPU_IPI_IRQ)
661 static int bind_ipi_to_irq(unsigned int ipi, unsigned int cpu)
663 struct evtchn_bind_ipi bind_ipi;
667 spin_lock(&irq_mapping_update_lock);
669 if ((irq = per_cpu(ipi_to_irq, cpu)[ipi]) == -1) {
670 if ((irq = find_unbound_irq(cpu_to_node(cpu), &cfg,
671 &dynirq_chip, false)) < 0)
675 if (HYPERVISOR_event_channel_op(EVTCHNOP_bind_ipi,
678 evtchn = bind_ipi.port;
680 evtchn_to_irq[evtchn] = irq;
681 cfg->info = mk_irq_info(IRQT_IPI, ipi, evtchn);
683 per_cpu(ipi_to_irq, cpu)[ipi] = irq;
685 bind_evtchn_to_cpu(evtchn, cpu);
692 spin_unlock(&irq_mapping_update_lock);
697 static void unbind_from_irq(unsigned int irq)
699 struct irq_cfg *cfg = irq_cfg(irq);
700 unsigned int evtchn = evtchn_from_irq_cfg(cfg);
702 BUG_IF_VIRQ_PER_CPU(cfg);
705 spin_lock(&irq_mapping_update_lock);
707 if (!--cfg->bindcount && VALID_EVTCHN(evtchn)) {
708 if ((type_from_irq_cfg(cfg) != IRQT_CALLER_PORT) &&
709 close_evtchn(evtchn))
712 switch (type_from_irq_cfg(cfg)) {
714 per_cpu(virq_to_irq, cpu_from_evtchn(evtchn))
715 [index_from_irq_cfg(cfg)] = -1;
716 #ifndef PER_CPU_VIRQ_IRQ
720 for_each_possible_cpu(cpu)
721 per_cpu(virq_to_evtchn, cpu)
722 [index_from_irq_cfg(cfg)] = 0;
726 #if defined(CONFIG_SMP) && defined(PER_CPU_IPI_IRQ)
728 per_cpu(ipi_to_irq, cpu_from_evtchn(evtchn))
729 [index_from_irq_cfg(cfg)] = -1;
736 /* Closed ports are implicitly re-bound to VCPU0. */
737 bind_evtchn_to_cpu(evtchn, 0);
739 evtchn_to_irq[evtchn] = -1;
740 cfg->info = IRQ_UNBOUND;
742 dynamic_irq_cleanup(irq);
745 spin_unlock(&irq_mapping_update_lock);
748 #if !defined(PER_CPU_IPI_IRQ) || !defined(PER_CPU_VIRQ_IRQ)
749 static inline struct percpu_irqaction *alloc_percpu_irqaction(gfp_t gfp)
751 struct percpu_irqaction *new = kzalloc(sizeof(*new), GFP_ATOMIC);
753 if (new && !zalloc_cpumask_var(&new->cpus, gfp)) {
760 static inline void free_percpu_irqaction(struct percpu_irqaction *action)
764 free_cpumask_var(action->cpus);
768 void unbind_from_per_cpu_irq(unsigned int irq, unsigned int cpu,
769 struct irqaction *action)
771 struct evtchn_close close;
772 struct irq_data *data = irq_get_irq_data(irq);
773 struct irq_cfg *cfg = irq_data_cfg(data);
774 unsigned int evtchn = evtchn_from_per_cpu_irq(cfg, cpu);
775 struct percpu_irqaction *free_action = NULL;
777 spin_lock(&irq_mapping_update_lock);
779 if (VALID_EVTCHN(evtchn)) {
782 BUG_ON(cfg->bindcount <= 1);
785 #ifndef PER_CPU_VIRQ_IRQ
786 if (type_from_irq_cfg(cfg) == IRQT_VIRQ) {
787 unsigned int virq = index_from_irq_cfg(cfg);
788 struct percpu_irqaction *cur, *prev = NULL;
790 cur = virq_actions[virq];
792 if (cur->action.dev_id == action) {
793 cpumask_clear_cpu(cpu, cur->cpus);
794 if (cpumask_empty(cur->cpus)) {
795 WARN_ON(free_action);
797 prev->next = cur->next;
803 } else if (cpumask_test_cpu(cpu, cur->cpus))
805 cur = (prev = cur)->next;
807 if (!VALID_EVTCHN(evtchn))
812 cpumask_clear_cpu(cpu, data->affinity);
815 if (HYPERVISOR_event_channel_op(EVTCHNOP_close, &close))
818 switch (type_from_irq_cfg(cfg)) {
819 #ifndef PER_CPU_VIRQ_IRQ
821 per_cpu(virq_to_evtchn, cpu)
822 [index_from_irq_cfg(cfg)] = 0;
825 #ifndef PER_CPU_IPI_IRQ
827 per_cpu(ipi_evtchn, cpu) = 0;
835 /* Closed ports are implicitly re-bound to VCPU0. */
836 bind_evtchn_to_cpu(evtchn, 0);
838 evtchn_to_irq[evtchn] = -1;
841 #ifndef PER_CPU_VIRQ_IRQ
844 spin_unlock(&irq_mapping_update_lock);
847 cpumask_t *cpus = free_action->cpus;
849 free_irq(irq, free_action->action.dev_id);
850 free_cpumask_var(cpus);
853 EXPORT_SYMBOL_GPL(unbind_from_per_cpu_irq);
854 #endif /* !PER_CPU_IPI_IRQ || !PER_CPU_VIRQ_IRQ */
856 int bind_caller_port_to_irqhandler(
857 unsigned int caller_port,
858 irq_handler_t handler,
859 unsigned long irqflags,
865 irq = bind_caller_port_to_irq(caller_port);
869 retval = request_irq(irq, handler, irqflags, devname, dev_id);
871 unbind_from_irq(irq);
877 EXPORT_SYMBOL_GPL(bind_caller_port_to_irqhandler);
879 int bind_listening_port_to_irqhandler(
880 unsigned int remote_domain,
881 irq_handler_t handler,
882 unsigned long irqflags,
888 irq = bind_listening_port_to_irq(remote_domain);
892 retval = request_irq(irq, handler, irqflags, devname, dev_id);
894 unbind_from_irq(irq);
900 EXPORT_SYMBOL_GPL(bind_listening_port_to_irqhandler);
902 int bind_interdomain_evtchn_to_irqhandler(
903 unsigned int remote_domain,
904 unsigned int remote_port,
905 irq_handler_t handler,
906 unsigned long irqflags,
912 irq = bind_interdomain_evtchn_to_irq(remote_domain, remote_port);
916 retval = request_irq(irq, handler, irqflags, devname, dev_id);
918 unbind_from_irq(irq);
924 EXPORT_SYMBOL_GPL(bind_interdomain_evtchn_to_irqhandler);
926 int bind_virq_to_irqhandler(
929 irq_handler_t handler,
930 unsigned long irqflags,
936 #ifndef PER_CPU_VIRQ_IRQ
937 BUG_ON(test_bit(virq, virq_per_cpu));
940 irq = bind_virq_to_irq(virq, cpu);
944 retval = request_irq(irq, handler, irqflags, devname, dev_id);
946 unbind_from_irq(irq);
952 EXPORT_SYMBOL_GPL(bind_virq_to_irqhandler);
955 #ifndef PER_CPU_VIRQ_IRQ
956 int bind_virq_to_irqaction(
959 struct irqaction *action)
961 struct evtchn_bind_virq bind_virq;
965 struct percpu_irqaction *cur = NULL, *new;
967 BUG_ON(!test_bit(virq, virq_per_cpu));
972 new = alloc_percpu_irqaction(GFP_ATOMIC);
974 new->action = *action;
975 new->action.dev_id = action;
978 spin_lock(&irq_mapping_update_lock);
980 for (cur = virq_actions[virq]; cur; cur = cur->next)
981 if (cur->action.dev_id == action)
985 spin_unlock(&irq_mapping_update_lock);
988 new->next = virq_actions[virq];
989 virq_actions[virq] = cur = new;
993 cpumask_set_cpu(cpu, cur->cpus);
994 action = &cur->action;
996 if ((irq = per_cpu(virq_to_irq, cpu)[virq]) == -1) {
1001 if ((irq = find_unbound_irq(cpu_to_node(cpu), &cfg,
1002 &dynirq_chip, true)) < 0) {
1003 virq_actions[virq] = cur->next;
1004 spin_unlock(&irq_mapping_update_lock);
1005 free_percpu_irqaction(new);
1009 /* Extra reference so count will never drop to zero. */
1012 for_each_possible_cpu(nr)
1013 per_cpu(virq_to_irq, nr)[virq] = irq;
1014 cfg->info = mk_irq_info(IRQT_VIRQ, virq, 0);
1018 evtchn = per_cpu(virq_to_evtchn, cpu)[virq];
1019 if (!VALID_EVTCHN(evtchn)) {
1020 bind_virq.virq = virq;
1021 bind_virq.vcpu = cpu;
1022 if (HYPERVISOR_event_channel_op(EVTCHNOP_bind_virq,
1025 evtchn = bind_virq.port;
1026 evtchn_to_irq[evtchn] = irq;
1027 per_cpu(virq_to_evtchn, cpu)[virq] = evtchn;
1029 bind_evtchn_to_cpu(evtchn, cpu);
1034 spin_unlock(&irq_mapping_update_lock);
1036 free_percpu_irqaction(new);
1039 unsigned long flags;
1041 local_irq_save(flags);
1042 unmask_evtchn(evtchn);
1043 local_irq_restore(flags);
1045 action->flags |= IRQF_PERCPU;
1046 retval = setup_irq(irq, action);
1048 unbind_from_per_cpu_irq(irq, cpu, action);
1056 EXPORT_SYMBOL_GPL(bind_virq_to_irqaction);
1059 #ifdef PER_CPU_IPI_IRQ
1060 int bind_ipi_to_irqhandler(
1063 irq_handler_t handler,
1064 unsigned long irqflags,
1065 const char *devname,
1070 irq = bind_ipi_to_irq(ipi, cpu);
1074 retval = request_irq(irq, handler, irqflags | IRQF_NO_SUSPEND,
1077 unbind_from_irq(irq);
1084 int __cpuinit bind_ipi_to_irqaction(
1086 struct irqaction *action)
1088 struct evtchn_bind_ipi bind_ipi;
1089 struct irq_cfg *cfg;
1090 unsigned int evtchn;
1093 spin_lock(&irq_mapping_update_lock);
1095 if (VALID_EVTCHN(per_cpu(ipi_evtchn, cpu))) {
1096 spin_unlock(&irq_mapping_update_lock);
1101 if ((ipi_irq = find_unbound_irq(cpu_to_node(cpu), &cfg,
1102 &dynirq_chip, true)) < 0) {
1103 spin_unlock(&irq_mapping_update_lock);
1107 /* Extra reference so count will never drop to zero. */
1110 cfg->info = mk_irq_info(IRQT_IPI, 0, 0);
1113 cfg = irq_cfg(ipi_irq);
1115 bind_ipi.vcpu = cpu;
1116 if (HYPERVISOR_event_channel_op(EVTCHNOP_bind_ipi, &bind_ipi))
1119 evtchn = bind_ipi.port;
1120 evtchn_to_irq[evtchn] = ipi_irq;
1121 per_cpu(ipi_evtchn, cpu) = evtchn;
1123 bind_evtchn_to_cpu(evtchn, cpu);
1127 spin_unlock(&irq_mapping_update_lock);
1130 unsigned long flags;
1132 local_irq_save(flags);
1133 unmask_evtchn(evtchn);
1134 local_irq_restore(flags);
1136 action->flags |= IRQF_PERCPU | IRQF_NO_SUSPEND;
1137 retval = setup_irq(ipi_irq, action);
1139 unbind_from_per_cpu_irq(ipi_irq, cpu, NULL);
1147 #endif /* PER_CPU_IPI_IRQ */
1148 #endif /* CONFIG_SMP */
1150 void unbind_from_irqhandler(unsigned int irq, void *dev_id)
1152 free_irq(irq, dev_id);
1153 unbind_from_irq(irq);
1155 EXPORT_SYMBOL_GPL(unbind_from_irqhandler);
1158 static int set_affinity_irq(struct irq_data *data,
1159 const struct cpumask *dest, bool force)
1161 const struct irq_cfg *cfg = irq_data_cfg(data);
1162 unsigned int port = evtchn_from_irq_cfg(cfg);
1163 unsigned int cpu = cpumask_any(dest);
1164 struct evtchn_bind_vcpu ebv = { .port = port, .vcpu = cpu };
1168 BUG_IF_VIRQ_PER_CPU(cfg);
1171 if (!VALID_EVTCHN(port))
1174 masked = test_and_set_evtchn_mask(port);
1175 rc = HYPERVISOR_event_channel_op(EVTCHNOP_bind_vcpu, &ebv);
1177 bind_evtchn_to_cpu(port, cpu);
1178 rc = evtchn_to_irq[port] != -1 ? IRQ_SET_MASK_OK_NOCOPY
1182 unmask_evtchn(port);
1188 int resend_irq_on_evtchn(struct irq_data *data)
1190 unsigned int evtchn = evtchn_from_irq_data(data);
1193 if (!VALID_EVTCHN(evtchn))
1196 masked = test_and_set_evtchn_mask(evtchn);
1199 unmask_evtchn(evtchn);
1205 * Interface to generic handling in irq.c
1208 static void unmask_dynirq(struct irq_data *data)
1210 unsigned int evtchn = evtchn_from_irq_data(data);
1212 if (VALID_EVTCHN(evtchn))
1213 unmask_evtchn(evtchn);
1216 static void mask_dynirq(struct irq_data *data)
1218 unsigned int evtchn = evtchn_from_irq_data(data);
1220 if (VALID_EVTCHN(evtchn))
1221 mask_evtchn(evtchn);
1224 static unsigned int startup_dynirq(struct irq_data *data)
1226 unmask_dynirq(data);
1230 #define shutdown_dynirq mask_dynirq
1232 static void end_dynirq(struct irq_data *data)
1234 if (!irqd_irq_disabled(data)) {
1235 irq_move_masked_irq(data);
1236 unmask_dynirq(data);
1240 static struct irq_chip dynirq_chip = {
1242 .irq_startup = startup_dynirq,
1243 .irq_shutdown = shutdown_dynirq,
1244 .irq_enable = unmask_dynirq,
1245 .irq_disable = mask_dynirq,
1246 .irq_mask = mask_dynirq,
1247 .irq_unmask = unmask_dynirq,
1248 .irq_eoi = end_dynirq,
1250 .irq_set_affinity = set_affinity_irq,
1252 .irq_retrigger = resend_irq_on_evtchn,
1255 /* Bitmap indicating which PIRQs require Xen to be notified on unmask. */
1256 static bool pirq_eoi_does_unmask;
1257 static unsigned long *pirq_needs_eoi;
1258 static DECLARE_BITMAP(probing_pirq, NR_PIRQS);
1260 static void pirq_unmask_and_notify(unsigned int evtchn, unsigned int irq)
1262 struct physdev_eoi eoi = { .irq = evtchn_get_xen_pirq(irq) };
1264 if (pirq_eoi_does_unmask) {
1265 if (test_bit(eoi.irq, pirq_needs_eoi))
1266 VOID(HYPERVISOR_physdev_op(PHYSDEVOP_eoi, &eoi));
1268 unmask_evtchn(evtchn);
1269 } else if (test_bit(irq - PIRQ_BASE, pirq_needs_eoi)) {
1270 if (smp_processor_id() != cpu_from_evtchn(evtchn)) {
1271 struct evtchn_unmask unmask = { .port = evtchn };
1272 struct multicall_entry mcl[2];
1274 mcl[0].op = __HYPERVISOR_event_channel_op;
1275 mcl[0].args[0] = EVTCHNOP_unmask;
1276 mcl[0].args[1] = (unsigned long)&unmask;
1277 mcl[1].op = __HYPERVISOR_physdev_op;
1278 mcl[1].args[0] = PHYSDEVOP_eoi;
1279 mcl[1].args[1] = (unsigned long)&eoi;
1281 if (HYPERVISOR_multicall(mcl, 2))
1284 unmask_evtchn(evtchn);
1285 VOID(HYPERVISOR_physdev_op(PHYSDEVOP_eoi, &eoi));
1288 unmask_evtchn(evtchn);
1291 static inline void pirq_query_unmask(int irq)
1293 struct physdev_irq_status_query irq_status;
1295 if (pirq_eoi_does_unmask)
1297 irq_status.irq = evtchn_get_xen_pirq(irq);
1298 if (HYPERVISOR_physdev_op(PHYSDEVOP_irq_status_query, &irq_status))
1299 irq_status.flags = 0;
1300 clear_bit(irq - PIRQ_BASE, pirq_needs_eoi);
1301 if (irq_status.flags & XENIRQSTAT_needs_eoi)
1302 set_bit(irq - PIRQ_BASE, pirq_needs_eoi);
1305 static int set_type_pirq(struct irq_data *data, unsigned int type)
1307 if (type != IRQ_TYPE_PROBE)
1309 set_bit(data->irq - PIRQ_BASE, probing_pirq);
1313 static void enable_pirq(struct irq_data *data)
1315 struct evtchn_bind_pirq bind_pirq;
1316 struct irq_cfg *cfg = irq_data_cfg(data);
1317 unsigned int evtchn = evtchn_from_irq_cfg(cfg);
1318 unsigned int irq = data->irq, pirq = irq - PIRQ_BASE;
1320 if (VALID_EVTCHN(evtchn)) {
1321 if (pirq < nr_pirqs)
1322 clear_bit(pirq, probing_pirq);
1326 bind_pirq.pirq = evtchn_get_xen_pirq(irq);
1327 /* NB. We are happy to share unless we are probing. */
1328 bind_pirq.flags = (pirq < nr_pirqs
1329 && test_and_clear_bit(pirq, probing_pirq))
1330 || (irq_to_desc(irq)->istate & IRQS_AUTODETECT)
1331 ? 0 : BIND_PIRQ__WILL_SHARE;
1332 if (HYPERVISOR_event_channel_op(EVTCHNOP_bind_pirq, &bind_pirq) != 0) {
1333 if (bind_pirq.flags)
1334 pr_info("Failed to obtain physical IRQ %d\n", irq);
1337 evtchn = bind_pirq.port;
1339 pirq_query_unmask(irq);
1341 evtchn_to_irq[evtchn] = irq;
1342 bind_evtchn_to_cpu(evtchn, 0);
1343 cfg->info = mk_irq_info(IRQT_PIRQ, bind_pirq.pirq, evtchn);
1346 pirq_unmask_and_notify(evtchn, irq);
1349 #define disable_pirq mask_pirq
1351 static unsigned int startup_pirq(struct irq_data *data)
1357 static void shutdown_pirq(struct irq_data *data)
1359 struct irq_cfg *cfg = irq_data_cfg(data);
1360 unsigned int evtchn = evtchn_from_irq_cfg(cfg);
1362 if (!VALID_EVTCHN(evtchn))
1365 mask_evtchn(evtchn);
1367 if (close_evtchn(evtchn))
1370 bind_evtchn_to_cpu(evtchn, 0);
1371 evtchn_to_irq[evtchn] = -1;
1372 cfg->info = mk_irq_info(IRQT_PIRQ, index_from_irq_cfg(cfg), 0);
1375 static void unmask_pirq(struct irq_data *data)
1377 unsigned int evtchn = evtchn_from_irq_data(data);
1379 if (VALID_EVTCHN(evtchn))
1380 pirq_unmask_and_notify(evtchn, data->irq);
1383 #define mask_pirq mask_dynirq
1385 static void end_pirq(struct irq_data *data)
1387 bool disabled = irqd_irq_disabled(data);
1389 if (disabled && (irq_to_desc(data->irq)->istate & IRQS_PENDING))
1390 shutdown_pirq(data);
1393 irq_move_masked_irq(data);
1398 static struct irq_chip pirq_chip = {
1400 .irq_startup = startup_pirq,
1401 .irq_shutdown = shutdown_pirq,
1402 .irq_enable = enable_pirq,
1403 .irq_disable = disable_pirq,
1404 .irq_mask = mask_pirq,
1405 .irq_unmask = unmask_pirq,
1406 .irq_eoi = end_pirq,
1407 .irq_set_type = set_type_pirq,
1409 .irq_set_affinity = set_affinity_irq,
1411 .irq_retrigger = resend_irq_on_evtchn,
1414 int irq_ignore_unhandled(unsigned int irq)
1416 struct physdev_irq_status_query irq_status = { .irq = irq };
1418 if (!is_running_on_xen() || irq >= nr_pirqs)
1421 if (HYPERVISOR_physdev_op(PHYSDEVOP_irq_status_query, &irq_status))
1423 return !!(irq_status.flags & XENIRQSTAT_shared);
1426 #if defined(CONFIG_SMP) && !defined(PER_CPU_IPI_IRQ)
1427 void notify_remote_via_ipi(unsigned int ipi, unsigned int cpu)
1429 unsigned int evtchn = per_cpu(ipi_evtchn, cpu);
1432 if (ipi == NMI_VECTOR) {
1433 int rc = HYPERVISOR_vcpu_op(VCPUOP_send_nmi, cpu, NULL);
1436 pr_warn_once("Unable (%d) to send NMI to CPU#%u\n",
1442 if (VALID_EVTCHN(evtchn)
1443 && !test_and_set_bit(ipi, per_cpu(ipi_pending, cpu))
1444 && !test_evtchn(evtchn))
1445 notify_remote_via_evtchn(evtchn);
1448 void clear_ipi_evtchn(void)
1450 unsigned int evtchn = percpu_read(ipi_evtchn);
1452 BUG_ON(!VALID_EVTCHN(evtchn));
1453 clear_evtchn(evtchn);
1457 void notify_remote_via_irq(int irq)
1459 const struct irq_cfg *cfg = irq_cfg(irq);
1460 unsigned int evtchn;
1462 if (WARN_ON_ONCE(!cfg))
1464 BUG_ON(type_from_irq_cfg(cfg) == IRQT_VIRQ);
1467 evtchn = evtchn_from_irq_cfg(cfg);
1468 if (VALID_EVTCHN(evtchn))
1469 notify_remote_via_evtchn(evtchn);
1471 EXPORT_SYMBOL_GPL(notify_remote_via_irq);
1473 #if defined(CONFIG_XEN_BACKEND) || defined(CONFIG_XEN_BACKEND_MODULE)
1474 int multi_notify_remote_via_irq(multicall_entry_t *mcl, int irq)
1476 const struct irq_cfg *cfg = irq_cfg(irq);
1477 unsigned int evtchn;
1479 if (WARN_ON_ONCE(!cfg))
1481 BUG_ON(type_from_irq_cfg(cfg) == IRQT_VIRQ);
1484 evtchn = evtchn_from_irq_cfg(cfg);
1485 if (!VALID_EVTCHN(evtchn))
1488 multi_notify_remote_via_evtchn(mcl, evtchn);
1491 EXPORT_SYMBOL_GPL(multi_notify_remote_via_irq);
1494 int irq_to_evtchn_port(int irq)
1496 const struct irq_cfg *cfg = irq_cfg(irq);
1500 BUG_IF_VIRQ_PER_CPU(cfg);
1502 return evtchn_from_irq_cfg(cfg);
1504 EXPORT_SYMBOL_GPL(irq_to_evtchn_port);
1506 void mask_evtchn(int port)
1508 shared_info_t *s = HYPERVISOR_shared_info;
1509 sync_set_bit(port, s->evtchn_mask);
1511 EXPORT_SYMBOL_GPL(mask_evtchn);
1513 void unmask_evtchn(int port)
1515 shared_info_t *s = HYPERVISOR_shared_info;
1516 unsigned int cpu = smp_processor_id();
1518 BUG_ON(!irqs_disabled());
1520 /* Slow path (hypercall) if this is a non-local port. */
1521 if (unlikely(cpu != cpu_from_evtchn(port))) {
1522 struct evtchn_unmask unmask = { .port = port };
1523 VOID(HYPERVISOR_event_channel_op(EVTCHNOP_unmask, &unmask));
1527 sync_clear_bit(port, s->evtchn_mask);
1529 /* Did we miss an interrupt 'edge'? Re-fire if so. */
1530 if (sync_test_bit(port, s->evtchn_pending)) {
1531 vcpu_info_t *v = current_vcpu_info();
1533 if (!sync_test_and_set_bit(port / BITS_PER_LONG,
1534 &v->evtchn_pending_sel))
1535 v->evtchn_upcall_pending = 1;
1538 EXPORT_SYMBOL_GPL(unmask_evtchn);
1540 void disable_all_local_evtchn(void)
1542 unsigned i, cpu = smp_processor_id();
1543 shared_info_t *s = HYPERVISOR_shared_info;
1545 for (i = 0; i < NR_EVENT_CHANNELS; ++i)
1546 if (cpu_from_evtchn(i) == cpu)
1547 sync_set_bit(i, &s->evtchn_mask[0]);
1550 /* Test an irq's pending state. */
1551 int xen_test_irq_pending(int irq)
1553 unsigned int evtchn = evtchn_from_irq(irq);
1555 return VALID_EVTCHN(evtchn) && test_evtchn(evtchn);
1558 #ifdef CONFIG_PM_SLEEP
1559 #include <linux/syscore_ops.h>
1561 static void restore_cpu_virqs(unsigned int cpu)
1563 struct evtchn_bind_virq bind_virq;
1564 int virq, irq, evtchn;
1566 for (virq = 0; virq < NR_VIRQS; virq++) {
1567 if ((irq = per_cpu(virq_to_irq, cpu)[virq]) == -1)
1570 #ifndef PER_CPU_VIRQ_IRQ
1571 if (test_bit(virq, virq_per_cpu)
1572 && !VALID_EVTCHN(per_cpu(virq_to_evtchn, cpu)[virq]))
1576 BUG_ON(irq_cfg(irq)->info != mk_irq_info(IRQT_VIRQ, virq, 0));
1578 /* Get a new binding from Xen. */
1579 bind_virq.virq = virq;
1580 bind_virq.vcpu = cpu;
1581 if (HYPERVISOR_event_channel_op(EVTCHNOP_bind_virq,
1584 evtchn = bind_virq.port;
1586 /* Record the new mapping. */
1587 evtchn_to_irq[evtchn] = irq;
1588 #ifdef PER_CPU_VIRQ_IRQ
1589 irq_cfg(irq)->info = mk_irq_info(IRQT_VIRQ, virq, evtchn);
1591 if (test_bit(virq, virq_per_cpu))
1592 per_cpu(virq_to_evtchn, cpu)[virq] = evtchn;
1596 irq_cfg(irq)->info = mk_irq_info(IRQT_VIRQ, virq,
1598 for_each_possible_cpu(cpu)
1599 per_cpu(virq_to_evtchn, cpu)[virq] = evtchn;
1602 bind_evtchn_to_cpu(evtchn, cpu);
1604 /* Ready for use. */
1605 unmask_evtchn(evtchn);
1609 static void restore_cpu_ipis(unsigned int cpu)
1612 struct evtchn_bind_ipi bind_ipi;
1613 struct irq_data *data;
1614 unsigned int evtchn;
1615 #ifdef PER_CPU_IPI_IRQ
1618 for (ipi = 0; ipi < NR_IPIS; ipi++) {
1619 if ((irq = per_cpu(ipi_to_irq, cpu)[ipi]) == -1)
1625 || !VALID_EVTCHN(per_cpu(ipi_evtchn, cpu)))
1629 data = irq_get_irq_data(irq);
1630 BUG_ON(irq_data_cfg(data)->info != mk_irq_info(IRQT_IPI, ipi, 0));
1632 /* Get a new binding from Xen. */
1633 bind_ipi.vcpu = cpu;
1634 if (HYPERVISOR_event_channel_op(EVTCHNOP_bind_ipi,
1637 evtchn = bind_ipi.port;
1639 /* Record the new mapping. */
1640 evtchn_to_irq[evtchn] = irq;
1641 #ifdef PER_CPU_IPI_IRQ
1642 irq_data_cfg(data)->info = mk_irq_info(IRQT_IPI, ipi, evtchn);
1644 per_cpu(ipi_evtchn, cpu) = evtchn;
1646 bind_evtchn_to_cpu(evtchn, cpu);
1648 /* Ready for use. */
1649 if (!irqd_irq_disabled(data))
1650 unmask_evtchn(evtchn);
1651 #ifdef PER_CPU_IPI_IRQ
1657 #endif /* CONFIG_SMP */
1660 static void evtchn_resume(void)
1662 unsigned int cpu, irq, evtchn;
1663 struct evtchn_status status;
1665 /* Avoid doing anything in the 'suspend cancelled' case. */
1666 status.dom = DOMID_SELF;
1667 #ifdef PER_CPU_VIRQ_IRQ
1668 status.port = evtchn_from_irq(percpu_read(virq_to_irq[VIRQ_TIMER]));
1670 status.port = percpu_read(virq_to_evtchn[VIRQ_TIMER]);
1672 if (HYPERVISOR_event_channel_op(EVTCHNOP_status, &status))
1674 if (status.status == EVTCHNSTAT_virq
1675 && status.vcpu == smp_processor_id()
1676 && status.u.virq == VIRQ_TIMER)
1679 init_evtchn_cpu_bindings();
1681 if (pirq_eoi_does_unmask) {
1682 struct physdev_pirq_eoi_gmfn eoi_gmfn;
1684 eoi_gmfn.gmfn = virt_to_machine(pirq_needs_eoi) >> PAGE_SHIFT;
1685 if (HYPERVISOR_physdev_op(PHYSDEVOP_pirq_eoi_gmfn, &eoi_gmfn))
1689 /* New event-channel space is not 'live' yet. */
1690 for (evtchn = 0; evtchn < NR_EVENT_CHANNELS; evtchn++)
1691 mask_evtchn(evtchn);
1693 /* No IRQ <-> event-channel mappings. */
1694 for (irq = 0; irq < nr_irqs; irq++) {
1695 struct irq_cfg *cfg = irq_cfg(irq);
1700 /* Check that no PIRQs are still bound. */
1701 #ifdef CONFIG_SPARSE_IRQ
1702 if (irq < PIRQ_BASE || irq >= PIRQ_BASE + nr_pirqs)
1703 BUG_ON(type_from_irq_cfg(cfg) == IRQT_PIRQ);
1706 BUG_ON(cfg->info != IRQ_UNBOUND);
1708 cfg->info &= ~((1U << _EVTCHN_BITS) - 1);
1710 for (evtchn = 0; evtchn < NR_EVENT_CHANNELS; evtchn++)
1711 evtchn_to_irq[evtchn] = -1;
1713 for_each_possible_cpu(cpu) {
1714 restore_cpu_virqs(cpu);
1715 restore_cpu_ipis(cpu);
1719 static struct syscore_ops evtchn_syscore_ops = {
1720 .resume = evtchn_resume,
1723 static int __init evtchn_register(void)
1725 if (!is_initial_xendomain())
1726 register_syscore_ops(&evtchn_syscore_ops);
1729 core_initcall(evtchn_register);
1732 int __init arch_early_irq_init(void)
1736 for (i = 0; i < ARRAY_SIZE(_irq_cfg); i++)
1737 irq_set_chip_data(i, _irq_cfg + i);
1742 struct irq_cfg *alloc_irq_and_cfg_at(unsigned int at, int node)
1744 int res = irq_alloc_desc_at(at, node);
1745 struct irq_cfg *cfg = NULL;
1750 cfg = irq_get_chip_data(at);
1755 #ifdef CONFIG_SPARSE_IRQ
1757 /* By default all event channels notify CPU#0. */
1758 cpumask_copy(irq_get_irq_data(at)->affinity, cpumask_of(0));
1761 cfg = kzalloc(sizeof(*cfg), GFP_KERNEL);
1763 irq_set_chip_data(at, cfg);
1773 #ifdef CONFIG_SPARSE_IRQ
1774 #ifdef CONFIG_X86_IO_APIC
1775 #include <asm/io_apic.h>
1778 int nr_pirqs = NR_PIRQS;
1779 EXPORT_SYMBOL_GPL(nr_pirqs);
1781 int __init arch_probe_nr_irqs(void)
1783 int nr = 64 + CONFIG_XEN_NR_GUEST_DEVICES, nr_irqs_gsi;
1785 if (is_initial_xendomain()) {
1786 nr_irqs_gsi = NR_IRQS_LEGACY;
1787 #ifdef CONFIG_X86_IO_APIC
1788 nr_irqs_gsi += gsi_top;
1790 #ifdef CONFIG_PCI_MSI
1791 nr += max(nr_irqs_gsi * 16, nr_cpu_ids * 8);
1794 nr_irqs_gsi = NR_VECTORS;
1795 #ifdef CONFIG_PCI_MSI
1796 nr += max(NR_IRQS_LEGACY * 16, nr_cpu_ids * 8);
1800 if (nr_pirqs > nr_irqs_gsi)
1801 nr_pirqs = nr_irqs_gsi;
1802 if (nr > min_t(int, NR_DYNIRQS, NR_EVENT_CHANNELS))
1803 nr = min_t(int, NR_DYNIRQS, NR_EVENT_CHANNELS);
1804 nr_irqs = min_t(int, nr_pirqs + nr, PAGE_SIZE * 8);
1806 printk(KERN_DEBUG "nr_pirqs: %d\n", nr_pirqs);
1808 return ARRAY_SIZE(_irq_cfg);
1812 #if defined(CONFIG_X86_IO_APIC)
1813 int assign_irq_vector(int irq, struct irq_cfg *cfg, const struct cpumask *mask)
1815 struct physdev_irq irq_op;
1817 if (irq < PIRQ_BASE || irq - PIRQ_BASE >= nr_pirqs)
1824 if (HYPERVISOR_physdev_op(PHYSDEVOP_alloc_irq_vector, &irq_op))
1827 cfg->vector = irq_op.vector;
1831 #define identity_mapped_irq(irq) (!IO_APIC_IRQ((irq) - PIRQ_BASE))
1832 #elif defined(CONFIG_X86)
1833 #define identity_mapped_irq(irq) (((irq) - PIRQ_BASE) < NR_IRQS_LEGACY)
1835 #define identity_mapped_irq(irq) (1)
1838 void evtchn_register_pirq(int irq)
1840 struct irq_cfg *cfg = irq_cfg(irq);
1842 BUG_ON(irq < PIRQ_BASE || irq - PIRQ_BASE >= nr_pirqs);
1843 if (identity_mapped_irq(irq) || type_from_irq_cfg(cfg) != IRQT_UNBOUND)
1845 cfg->info = mk_irq_info(IRQT_PIRQ, irq, 0);
1846 irq_set_chip_and_handler_name(irq, &pirq_chip, handle_fasteoi_irq,
1850 #ifdef CONFIG_PCI_MSI
1851 int evtchn_map_pirq(int irq, int xen_pirq)
1854 #ifdef CONFIG_SPARSE_IRQ
1855 struct irq_cfg *cfg;
1857 spin_lock(&irq_mapping_update_lock);
1858 irq = find_unbound_irq(numa_node_id(), &cfg, &pirq_chip,
1861 BUG_ON(type_from_irq_cfg(cfg) != IRQT_UNBOUND);
1863 cfg->info = mk_irq_info(IRQT_PIRQ, xen_pirq, 0);
1865 spin_unlock(&irq_mapping_update_lock);
1868 } else if (irq >= PIRQ_BASE && irq < PIRQ_BASE + nr_pirqs) {
1869 WARN_ONCE(1, "Non-MSI IRQ#%d (Xen %d)\n", irq, xen_pirq);
1872 static DEFINE_SPINLOCK(irq_alloc_lock);
1874 irq = PIRQ_BASE + nr_pirqs - 1;
1875 spin_lock(&irq_alloc_lock);
1877 struct irq_cfg *cfg;
1879 if (identity_mapped_irq(irq))
1881 cfg = alloc_irq_and_cfg_at(irq, numa_node_id());
1882 if (unlikely(!cfg)) {
1883 spin_unlock(&irq_alloc_lock);
1886 if (!index_from_irq_cfg(cfg)) {
1887 BUG_ON(type_from_irq_cfg(cfg) != IRQT_UNBOUND);
1888 cfg->info = mk_irq_info(IRQT_PIRQ,
1892 } while (--irq >= PIRQ_BASE);
1893 spin_unlock(&irq_alloc_lock);
1894 if (irq < PIRQ_BASE)
1896 irq_set_chip_and_handler_name(irq, &pirq_chip,
1897 handle_fasteoi_irq, "fasteoi");
1899 } else if (!xen_pirq) {
1900 struct irq_cfg *cfg = irq_cfg(irq);
1902 if (!cfg || unlikely(type_from_irq_cfg(cfg) != IRQT_PIRQ))
1905 * dynamic_irq_cleanup(irq) would seem to be the correct thing
1906 * here, but cannot be used as we get here also during shutdown
1907 * when a driver didn't free_irq() its MSI(-X) IRQ(s), which
1908 * then causes a warning in dynamic_irq_cleanup().
1910 irq_set_chip_and_handler(irq, NULL, NULL);
1911 cfg->info = IRQ_UNBOUND;
1912 #ifdef CONFIG_SPARSE_IRQ
1916 } else if (type_from_irq(irq) != IRQT_PIRQ
1917 || index_from_irq(irq) != xen_pirq) {
1918 pr_err("IRQ#%d is already mapped to %d:%u - "
1919 "cannot map to PIRQ#%u\n",
1920 irq, type_from_irq(irq), index_from_irq(irq), xen_pirq);
1923 return index_from_irq(irq) ? irq : -EINVAL;
1927 int evtchn_get_xen_pirq(int irq)
1929 struct irq_cfg *cfg = irq_cfg(irq);
1931 if (identity_mapped_irq(irq))
1933 BUG_ON(type_from_irq_cfg(cfg) != IRQT_PIRQ);
1934 return index_from_irq_cfg(cfg);
1937 void __init xen_init_IRQ(void)
1940 struct physdev_pirq_eoi_gmfn eoi_gmfn;
1942 #ifndef PER_CPU_VIRQ_IRQ
1943 __set_bit(VIRQ_TIMER, virq_per_cpu);
1944 __set_bit(VIRQ_DEBUG, virq_per_cpu);
1945 __set_bit(VIRQ_XENOPROF, virq_per_cpu);
1947 __set_bit(VIRQ_ITC, virq_per_cpu);
1951 init_evtchn_cpu_bindings();
1953 #ifdef CONFIG_SPARSE_IRQ
1958 i = get_order(sizeof(unsigned long) * BITS_TO_LONGS(i));
1959 pirq_needs_eoi = (void *)__get_free_pages(GFP_KERNEL|__GFP_ZERO, i);
1960 BUILD_BUG_ON(NR_PIRQS > PAGE_SIZE * 8);
1961 eoi_gmfn.gmfn = virt_to_machine(pirq_needs_eoi) >> PAGE_SHIFT;
1962 if (HYPERVISOR_physdev_op(PHYSDEVOP_pirq_eoi_gmfn, &eoi_gmfn) == 0)
1963 pirq_eoi_does_unmask = true;
1965 /* No event channels are 'live' right now. */
1966 for (i = 0; i < NR_EVENT_CHANNELS; i++)
1969 #ifndef CONFIG_SPARSE_IRQ
1970 for (i = DYNIRQ_BASE; i < (DYNIRQ_BASE + NR_DYNIRQS); i++) {
1972 irq_set_chip_and_handler_name(i, &dynirq_chip,
1973 handle_fasteoi_irq, "fasteoi");
1976 for (i = PIRQ_BASE; i < (PIRQ_BASE + nr_pirqs); i++) {
1978 for (i = PIRQ_BASE; i < (PIRQ_BASE + NR_IRQS_LEGACY); i++) {
1980 if (!identity_mapped_irq(i))
1984 /* If not domain 0, force our RTC driver to fail its probe. */
1985 if (i - PIRQ_BASE == RTC_IRQ && !is_initial_xendomain())
1989 irq_set_chip_and_handler_name(i, &pirq_chip,
1990 handle_fasteoi_irq, "fasteoi");