1 /******************************************************************************
4 * Communication via Xen event channels.
6 * Copyright (c) 2002-2005, K A Fraser
8 * This program is free software; you can redistribute it and/or
9 * modify it under the terms of the GNU General Public License version 2
10 * as published by the Free Software Foundation; or, when distributed
11 * separately from the Linux kernel or incorporated into other
12 * software packages, subject to the following license:
14 * Permission is hereby granted, free of charge, to any person obtaining a copy
15 * of this source file (the "Software"), to deal in the Software without
16 * restriction, including without limitation the rights to use, copy, modify,
17 * merge, publish, distribute, sublicense, and/or sell copies of the Software,
18 * and to permit persons to whom the Software is furnished to do so, subject to
19 * the following conditions:
21 * The above copyright notice and this permission notice shall be included in
22 * all copies or substantial portions of the Software.
24 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
25 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
26 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
27 * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
28 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
29 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
33 #include <linux/module.h>
34 #include <linux/slab.h>
35 #include <linux/irq.h>
36 #include <linux/interrupt.h>
37 #include <linux/sched.h>
38 #include <linux/kernel_stat.h>
39 #include <linux/ftrace.h>
40 #include <linux/atomic.h>
41 #include <asm/barrier.h>
42 #include <asm/ptrace.h>
43 #include <xen/evtchn.h>
44 #include <xen/interface/event_channel.h>
45 #include <xen/interface/physdev.h>
46 #include <asm/hypervisor.h>
47 #include <linux/mc146818rtc.h> /* RTC_IRQ */
48 #include "../../../kernel/irq/internals.h" /* IRQS_AUTODETECT, IRQS_PENDING */
51 * This lock protects updates to the following mapping and reference-count
52 * arrays. The lock does not need to be acquired to read the mapping tables.
54 static DEFINE_SPINLOCK(irq_mapping_update_lock);
56 /* IRQ <-> event-channel mappings. */
57 static int evtchn_to_irq[NR_EVENT_CHANNELS] = {
58 [0 ... NR_EVENT_CHANNELS-1] = -1 };
60 #if defined(CONFIG_SMP) && defined(CONFIG_X86)
61 static struct percpu_irqaction {
62 struct irqaction action; /* must be first */
63 struct percpu_irqaction *next;
65 } *virq_actions[NR_VIRQS];
66 /* IRQ <-> VIRQ mapping. */
67 static DECLARE_BITMAP(virq_per_cpu, NR_VIRQS) __read_mostly;
68 static DEFINE_PER_CPU_READ_MOSTLY(int[NR_VIRQS], virq_to_evtchn);
69 #define BUG_IF_VIRQ_PER_CPU(irq_cfg) \
70 BUG_ON(type_from_irq_cfg(irq_cfg) == IRQT_VIRQ \
71 && test_bit(index_from_irq_cfg(irq_cfg), virq_per_cpu))
73 #define BUG_IF_VIRQ_PER_CPU(irq_cfg) ((void)0)
74 #define PER_CPU_VIRQ_IRQ
77 /* IRQ <-> IPI mapping. */
78 #if defined(CONFIG_SMP) && defined(CONFIG_X86)
79 static int __read_mostly ipi_irq = -1;
80 DEFINE_PER_CPU(DECLARE_BITMAP(, NR_IPIS), ipi_pending);
81 static DEFINE_PER_CPU_READ_MOSTLY(evtchn_port_t, ipi_evtchn);
83 #define PER_CPU_IPI_IRQ
85 #if !defined(CONFIG_SMP) || !defined(PER_CPU_IPI_IRQ)
86 #define BUG_IF_IPI(irq_cfg) BUG_ON(type_from_irq_cfg(irq_cfg) == IRQT_IPI)
88 #define BUG_IF_IPI(irq_cfg) ((void)0)
103 #define _EVTCHN_BITS 12
104 #define _INDEX_BITS (32 - _IRQT_BITS - _EVTCHN_BITS)
106 /* Convenient shorthand for packed representation of an unbound IRQ. */
107 #define IRQ_UNBOUND (IRQT_UNBOUND << (32 - _IRQT_BITS))
109 static struct irq_cfg _irq_cfg[] = {
111 #ifdef CONFIG_SPARSE_IRQ
112 BUILD_BUG_ON_ZERO(PIRQ_BASE) + NR_IRQS_LEGACY
116 - 1].info = IRQ_UNBOUND
119 static inline struct irq_cfg *__pure irq_cfg(unsigned int irq)
121 #ifdef CONFIG_SPARSE_IRQ
122 return irq_get_chip_data(irq);
124 return irq < NR_IRQS ? _irq_cfg + irq : NULL;
128 static inline struct irq_cfg *__pure irq_data_cfg(struct irq_data *data)
130 return irq_data_get_irq_chip_data(data);
133 /* Constructor for packed IRQ information. */
134 static inline u32 mk_irq_info(u32 type, u32 index, u32 evtchn)
136 BUILD_BUG_ON(_IRQT_COUNT > (1U << _IRQT_BITS));
138 BUILD_BUG_ON(NR_PIRQS > (1U << _INDEX_BITS));
139 BUILD_BUG_ON(NR_VIRQS > (1U << _INDEX_BITS));
140 #if defined(PER_CPU_IPI_IRQ) && defined(NR_IPIS)
141 BUILD_BUG_ON(NR_IPIS > (1U << _INDEX_BITS));
143 BUG_ON(index >> _INDEX_BITS);
145 BUILD_BUG_ON(NR_EVENT_CHANNELS > (1U << _EVTCHN_BITS));
147 return ((type << (32 - _IRQT_BITS)) | (index << _EVTCHN_BITS) | evtchn);
151 * Accessors for packed IRQ information.
154 static inline unsigned int index_from_irq_cfg(const struct irq_cfg *cfg)
156 return (cfg->info >> _EVTCHN_BITS) & ((1U << _INDEX_BITS) - 1);
159 static inline unsigned int index_from_irq(int irq)
161 const struct irq_cfg *cfg = irq_cfg(irq);
163 return cfg ? index_from_irq_cfg(cfg) : 0;
166 static inline unsigned int type_from_irq_cfg(const struct irq_cfg *cfg)
168 return cfg->info >> (32 - _IRQT_BITS);
171 static inline unsigned int type_from_irq(int irq)
173 const struct irq_cfg *cfg = irq_cfg(irq);
175 return cfg ? type_from_irq_cfg(cfg) : IRQT_UNBOUND;
178 static inline unsigned int evtchn_from_per_cpu_irq(const struct irq_cfg *cfg,
181 switch (type_from_irq_cfg(cfg)) {
182 #ifndef PER_CPU_VIRQ_IRQ
184 return per_cpu(virq_to_evtchn, cpu)[index_from_irq_cfg(cfg)];
186 #ifndef PER_CPU_IPI_IRQ
188 return per_cpu(ipi_evtchn, cpu);
195 static inline unsigned int evtchn_from_irq_cfg(const struct irq_cfg *cfg)
197 switch (type_from_irq_cfg(cfg)) {
198 #ifndef PER_CPU_VIRQ_IRQ
201 #ifndef PER_CPU_IPI_IRQ
204 return evtchn_from_per_cpu_irq(cfg, smp_processor_id());
206 return cfg->info & ((1U << _EVTCHN_BITS) - 1);
209 static inline unsigned int evtchn_from_irq_data(struct irq_data *data)
211 const struct irq_cfg *cfg = irq_data_cfg(data);
213 return cfg ? evtchn_from_irq_cfg(cfg) : 0;
216 static inline unsigned int evtchn_from_irq(int irq)
218 struct irq_data *data = irq_get_irq_data(irq);
220 return data ? evtchn_from_irq_data(data) : 0;
223 unsigned int irq_from_evtchn(unsigned int port)
225 return evtchn_to_irq[port];
227 EXPORT_SYMBOL_GPL(irq_from_evtchn);
229 /* IRQ <-> VIRQ mapping. */
230 DEFINE_PER_CPU(int[NR_VIRQS], virq_to_irq) = {[0 ... NR_VIRQS-1] = -1};
232 #if defined(CONFIG_SMP) && defined(PER_CPU_IPI_IRQ)
233 /* IRQ <-> IPI mapping. */
237 DEFINE_PER_CPU(int[NR_IPIS], ipi_to_irq) = {[0 ... NR_IPIS-1] = -1};
242 #if CONFIG_NR_CPUS <= 256
243 static u8 cpu_evtchn[NR_EVENT_CHANNELS];
245 static u16 cpu_evtchn[NR_EVENT_CHANNELS];
247 static DEFINE_PER_CPU(unsigned long[BITS_TO_LONGS(NR_EVENT_CHANNELS)],
250 static inline unsigned long active_evtchns(unsigned int idx)
252 shared_info_t *sh = HYPERVISOR_shared_info;
254 return (sh->evtchn_pending[idx] &
255 percpu_read(cpu_evtchn_mask[idx]) &
256 ~sh->evtchn_mask[idx]);
259 static void bind_evtchn_to_cpu(unsigned int chn, unsigned int cpu)
261 shared_info_t *s = HYPERVISOR_shared_info;
262 int irq = evtchn_to_irq[chn];
264 BUG_ON(!test_bit(chn, s->evtchn_mask));
267 struct irq_data *data = irq_get_irq_data(irq);
269 if (!irqd_is_per_cpu(data))
270 cpumask_copy(data->affinity, cpumask_of(cpu));
272 cpumask_set_cpu(cpu, data->affinity);
275 clear_bit(chn, per_cpu(cpu_evtchn_mask, cpu_evtchn[chn]));
276 set_bit(chn, per_cpu(cpu_evtchn_mask, cpu));
277 cpu_evtchn[chn] = cpu;
280 static void init_evtchn_cpu_bindings(void)
284 /* By default all event channels notify CPU#0. */
285 for (i = 0; i < nr_irqs; i++) {
286 struct irq_data *data = irq_get_irq_data(i);
289 cpumask_copy(data->affinity, cpumask_of(0));
292 memset(cpu_evtchn, 0, sizeof(cpu_evtchn));
293 for_each_possible_cpu(i)
294 memset(per_cpu(cpu_evtchn_mask, i), -!i,
295 sizeof(per_cpu(cpu_evtchn_mask, i)));
298 static inline unsigned int cpu_from_evtchn(unsigned int evtchn)
300 return cpu_evtchn[evtchn];
305 static inline unsigned long active_evtchns(unsigned int idx)
307 shared_info_t *sh = HYPERVISOR_shared_info;
309 return (sh->evtchn_pending[idx] & ~sh->evtchn_mask[idx]);
312 static void bind_evtchn_to_cpu(unsigned int chn, unsigned int cpu)
316 static void init_evtchn_cpu_bindings(void)
320 static inline unsigned int cpu_from_evtchn(unsigned int evtchn)
328 void __init xen_init_IRQ(void);
329 void __init init_IRQ(void)
334 #include <asm/idle.h>
337 /* Xen will never allocate port zero for any purpose. */
338 #define VALID_EVTCHN(chn) ((chn) != 0)
341 * Force a proper event-channel callback from Xen after clearing the
342 * callback mask. We do this in a very simple manner, by making a call
343 * down into Xen. The pending flag will be checked by Xen on return.
345 void force_evtchn_callback(void)
347 VOID(HYPERVISOR_xen_version(0, NULL));
349 /* Not a GPL symbol: used in ubiquitous macros, so too restrictive. */
350 EXPORT_SYMBOL(force_evtchn_callback);
352 #define UPC_INACTIVE 0
354 #define UPC_NESTED_LATCH 2
355 #define UPC_RESTART (UPC_ACTIVE|UPC_NESTED_LATCH)
356 static DEFINE_PER_CPU(unsigned int, upcall_state);
357 static DEFINE_PER_CPU(unsigned int, current_l1i);
358 static DEFINE_PER_CPU(unsigned int, current_l2i);
360 #ifndef vcpu_info_xchg
361 #define vcpu_info_xchg(fld, val) xchg(¤t_vcpu_info()->fld, val)
364 /* NB. Interrupts are disabled on entry. */
365 asmlinkage void __irq_entry evtchn_do_upcall(struct pt_regs *regs)
367 unsigned long l1, l2;
368 unsigned long masked_l1, masked_l2;
369 unsigned int l1i, l2i, start_l1i, start_l2i, port, i;
371 struct pt_regs *old_regs;
373 /* Nested invocations bail immediately. */
374 if (unlikely(__this_cpu_cmpxchg(upcall_state, UPC_INACTIVE,
375 UPC_ACTIVE) != UPC_INACTIVE)) {
376 __this_cpu_or(upcall_state, UPC_NESTED_LATCH);
377 /* Avoid a callback storm when we reenable delivery. */
378 vcpu_info_write(evtchn_upcall_pending, 0);
382 old_regs = set_irq_regs(regs);
383 xen_spin_irq_enter();
388 vcpu_info_write(evtchn_upcall_pending, 0);
390 #ifndef CONFIG_X86 /* No need for a barrier -- XCHG is a barrier on x86. */
391 /* Clear master flag /before/ clearing selector flag. */
399 * Handle timer interrupts before all others, so that all
400 * hardirq handlers see an up-to-date system time even if we
401 * have just woken from a long idle period.
403 #ifdef PER_CPU_VIRQ_IRQ
404 if ((irq = percpu_read(virq_to_irq[VIRQ_TIMER])) != -1) {
405 port = evtchn_from_irq(irq);
407 port = __this_cpu_read(virq_to_evtchn[VIRQ_TIMER]);
408 if (VALID_EVTCHN(port)) {
410 l1i = port / BITS_PER_LONG;
411 l2i = port % BITS_PER_LONG;
412 if (active_evtchns(l1i) & (1ul<<l2i)) {
415 #ifndef PER_CPU_VIRQ_IRQ
416 irq = evtchn_to_irq[port];
419 if (!handle_irq(irq, regs))
423 #endif /* CONFIG_NO_HZ */
425 l1 = vcpu_info_xchg(evtchn_pending_sel, 0);
427 start_l1i = l1i = percpu_read(current_l1i);
428 start_l2i = percpu_read(current_l2i);
430 for (i = 0; l1 != 0; i++) {
431 masked_l1 = l1 & ((~0UL) << l1i);
432 /* If we masked out all events, wrap to beginning. */
433 if (masked_l1 == 0) {
437 l1i = __ffs(masked_l1);
439 l2 = active_evtchns(l1i);
440 l2i = 0; /* usually scan entire word from start */
441 if (l1i == start_l1i) {
442 /* We scan the starting word in two parts. */
444 /* 1st time: start in the middle */
447 /* 2nd time: mask bits done already */
448 l2 &= (1ul << start_l2i) - 1;
452 bool handled = false;
454 masked_l2 = l2 & ((~0UL) << l2i);
457 l2i = __ffs(masked_l2);
460 port = (l1i * BITS_PER_LONG) + l2i;
462 if ((irq = evtchn_to_irq[port]) != -1) {
463 #ifndef PER_CPU_IPI_IRQ
464 if (port != __this_cpu_read(ipi_evtchn))
467 handled = handle_irq(irq, regs);
469 if (!handled && printk_ratelimit())
470 pr_emerg("No handler for irq %d"
474 l2i = (l2i + 1) % BITS_PER_LONG;
476 /* Next caller starts at last processed + 1 */
477 percpu_write(current_l1i,
478 l2i ? l1i : (l1i + 1) % BITS_PER_LONG);
479 percpu_write(current_l2i, l2i);
483 /* Scan start_l1i twice; all others once. */
484 if ((l1i != start_l1i) || (i != 0))
487 l1i = (l1i + 1) % BITS_PER_LONG;
490 /* If there were nested callbacks then we have more to do. */
491 } while (unlikely(__this_cpu_cmpxchg(upcall_state, UPC_RESTART,
492 UPC_ACTIVE) == UPC_RESTART));
494 __this_cpu_write(upcall_state, UPC_INACTIVE);
497 set_irq_regs(old_regs);
500 static int find_unbound_irq(unsigned int node, struct irq_cfg **pcfg,
501 struct irq_chip *chip, bool percpu)
506 for (irq = DYNIRQ_BASE; irq < nr_irqs; irq++) {
507 struct irq_cfg *cfg = alloc_irq_and_cfg_at(irq, node);
508 struct irq_data *data = irq_get_irq_data(irq);
512 if (data->chip != &no_irq_chip &&
516 if (!cfg->bindcount) {
517 irq_flow_handler_t handle;
521 irq_set_noprobe(irq);
523 handle = handle_fasteoi_irq;
526 handle = handle_percpu_irq;
529 irq_set_chip_and_handler_name(irq, chip,
537 pr_warning("No available IRQ to bind to: "
538 "increase NR_DYNIRQS.\n");
544 static struct irq_chip dynirq_chip;
546 static int bind_caller_port_to_irq(unsigned int caller_port)
551 spin_lock(&irq_mapping_update_lock);
553 if ((irq = evtchn_to_irq[caller_port]) == -1) {
554 if ((irq = find_unbound_irq(numa_node_id(), &cfg,
555 &dynirq_chip, false)) < 0)
558 evtchn_to_irq[caller_port] = irq;
559 cfg->info = mk_irq_info(IRQT_CALLER_PORT, 0, caller_port);
566 spin_unlock(&irq_mapping_update_lock);
570 static int bind_local_port_to_irq(unsigned int local_port)
575 spin_lock(&irq_mapping_update_lock);
577 BUG_ON(evtchn_to_irq[local_port] != -1);
579 if ((irq = find_unbound_irq(numa_node_id(), &cfg, &dynirq_chip,
581 if (close_evtchn(local_port))
586 evtchn_to_irq[local_port] = irq;
587 cfg->info = mk_irq_info(IRQT_LOCAL_PORT, 0, local_port);
591 spin_unlock(&irq_mapping_update_lock);
595 static int bind_listening_port_to_irq(unsigned int remote_domain)
597 struct evtchn_alloc_unbound alloc_unbound;
600 alloc_unbound.dom = DOMID_SELF;
601 alloc_unbound.remote_dom = remote_domain;
603 err = HYPERVISOR_event_channel_op(EVTCHNOP_alloc_unbound,
606 return err ? : bind_local_port_to_irq(alloc_unbound.port);
609 static int bind_interdomain_evtchn_to_irq(unsigned int remote_domain,
610 unsigned int remote_port)
612 struct evtchn_bind_interdomain bind_interdomain;
615 bind_interdomain.remote_dom = remote_domain;
616 bind_interdomain.remote_port = remote_port;
618 err = HYPERVISOR_event_channel_op(EVTCHNOP_bind_interdomain,
621 return err ? : bind_local_port_to_irq(bind_interdomain.local_port);
624 static int bind_virq_to_irq(unsigned int virq, unsigned int cpu)
626 struct evtchn_bind_virq bind_virq;
630 spin_lock(&irq_mapping_update_lock);
632 if ((irq = per_cpu(virq_to_irq, cpu)[virq]) == -1) {
633 if ((irq = find_unbound_irq(cpu_to_node(cpu), &cfg,
634 &dynirq_chip, false)) < 0)
637 bind_virq.virq = virq;
638 bind_virq.vcpu = cpu;
639 if (HYPERVISOR_event_channel_op(EVTCHNOP_bind_virq,
642 evtchn = bind_virq.port;
644 evtchn_to_irq[evtchn] = irq;
645 #ifndef PER_CPU_VIRQ_IRQ
649 for_each_possible_cpu(cpu)
650 per_cpu(virq_to_evtchn, cpu)[virq] = evtchn;
653 cfg->info = mk_irq_info(IRQT_VIRQ, virq, evtchn);
655 per_cpu(virq_to_irq, cpu)[virq] = irq;
657 bind_evtchn_to_cpu(evtchn, cpu);
664 spin_unlock(&irq_mapping_update_lock);
668 #if defined(CONFIG_SMP) && defined(PER_CPU_IPI_IRQ)
669 static int bind_ipi_to_irq(unsigned int ipi, unsigned int cpu)
671 struct evtchn_bind_ipi bind_ipi;
675 spin_lock(&irq_mapping_update_lock);
677 if ((irq = per_cpu(ipi_to_irq, cpu)[ipi]) == -1) {
678 if ((irq = find_unbound_irq(cpu_to_node(cpu), &cfg,
679 &dynirq_chip, false)) < 0)
683 if (HYPERVISOR_event_channel_op(EVTCHNOP_bind_ipi,
686 evtchn = bind_ipi.port;
688 evtchn_to_irq[evtchn] = irq;
689 cfg->info = mk_irq_info(IRQT_IPI, ipi, evtchn);
691 per_cpu(ipi_to_irq, cpu)[ipi] = irq;
693 bind_evtchn_to_cpu(evtchn, cpu);
700 spin_unlock(&irq_mapping_update_lock);
705 static void unbind_from_irq(unsigned int irq)
707 struct irq_cfg *cfg = irq_cfg(irq);
708 unsigned int evtchn = evtchn_from_irq_cfg(cfg);
710 BUG_IF_VIRQ_PER_CPU(cfg);
713 spin_lock(&irq_mapping_update_lock);
715 if (!--cfg->bindcount && VALID_EVTCHN(evtchn)) {
716 if ((type_from_irq_cfg(cfg) != IRQT_CALLER_PORT) &&
717 close_evtchn(evtchn))
720 switch (type_from_irq_cfg(cfg)) {
722 per_cpu(virq_to_irq, cpu_from_evtchn(evtchn))
723 [index_from_irq_cfg(cfg)] = -1;
724 #ifndef PER_CPU_VIRQ_IRQ
728 for_each_possible_cpu(cpu)
729 per_cpu(virq_to_evtchn, cpu)
730 [index_from_irq_cfg(cfg)] = 0;
734 #if defined(CONFIG_SMP) && defined(PER_CPU_IPI_IRQ)
736 per_cpu(ipi_to_irq, cpu_from_evtchn(evtchn))
737 [index_from_irq_cfg(cfg)] = -1;
744 /* Closed ports are implicitly re-bound to VCPU0. */
745 bind_evtchn_to_cpu(evtchn, 0);
747 evtchn_to_irq[evtchn] = -1;
748 cfg->info = IRQ_UNBOUND;
750 dynamic_irq_cleanup(irq);
753 spin_unlock(&irq_mapping_update_lock);
756 #if !defined(PER_CPU_IPI_IRQ) || !defined(PER_CPU_VIRQ_IRQ)
757 static inline struct percpu_irqaction *alloc_percpu_irqaction(gfp_t gfp)
759 struct percpu_irqaction *new = kzalloc(sizeof(*new), GFP_ATOMIC);
761 if (new && !zalloc_cpumask_var(&new->cpus, gfp)) {
768 static inline void free_percpu_irqaction(struct percpu_irqaction *action)
772 free_cpumask_var(action->cpus);
776 void unbind_from_per_cpu_irq(unsigned int irq, unsigned int cpu,
777 struct irqaction *action)
779 struct evtchn_close close;
780 struct irq_data *data = irq_get_irq_data(irq);
781 struct irq_cfg *cfg = irq_data_cfg(data);
782 unsigned int evtchn = evtchn_from_per_cpu_irq(cfg, cpu);
783 struct percpu_irqaction *free_action = NULL;
785 spin_lock(&irq_mapping_update_lock);
787 if (VALID_EVTCHN(evtchn)) {
790 BUG_ON(cfg->bindcount <= 1);
793 #ifndef PER_CPU_VIRQ_IRQ
794 if (type_from_irq_cfg(cfg) == IRQT_VIRQ) {
795 unsigned int virq = index_from_irq_cfg(cfg);
796 struct percpu_irqaction *cur, *prev = NULL;
798 cur = virq_actions[virq];
800 if (cur->action.dev_id == action) {
801 cpumask_clear_cpu(cpu, cur->cpus);
802 if (cpumask_empty(cur->cpus)) {
803 WARN_ON(free_action);
805 prev->next = cur->next;
811 } else if (cpumask_test_cpu(cpu, cur->cpus))
813 cur = (prev = cur)->next;
815 if (!VALID_EVTCHN(evtchn))
820 cpumask_clear_cpu(cpu, data->affinity);
823 if (HYPERVISOR_event_channel_op(EVTCHNOP_close, &close))
826 switch (type_from_irq_cfg(cfg)) {
827 #ifndef PER_CPU_VIRQ_IRQ
829 per_cpu(virq_to_evtchn, cpu)
830 [index_from_irq_cfg(cfg)] = 0;
833 #ifndef PER_CPU_IPI_IRQ
835 per_cpu(ipi_evtchn, cpu) = 0;
843 /* Closed ports are implicitly re-bound to VCPU0. */
844 bind_evtchn_to_cpu(evtchn, 0);
846 evtchn_to_irq[evtchn] = -1;
849 #ifndef PER_CPU_VIRQ_IRQ
852 spin_unlock(&irq_mapping_update_lock);
855 cpumask_t *cpus = free_action->cpus;
857 free_irq(irq, free_action->action.dev_id);
858 free_cpumask_var(cpus);
861 EXPORT_SYMBOL_GPL(unbind_from_per_cpu_irq);
862 #endif /* !PER_CPU_IPI_IRQ || !PER_CPU_VIRQ_IRQ */
864 int bind_caller_port_to_irqhandler(
865 unsigned int caller_port,
866 irq_handler_t handler,
867 unsigned long irqflags,
873 irq = bind_caller_port_to_irq(caller_port);
877 retval = request_irq(irq, handler, irqflags, devname, dev_id);
879 unbind_from_irq(irq);
885 EXPORT_SYMBOL_GPL(bind_caller_port_to_irqhandler);
887 int bind_listening_port_to_irqhandler(
888 unsigned int remote_domain,
889 irq_handler_t handler,
890 unsigned long irqflags,
896 irq = bind_listening_port_to_irq(remote_domain);
900 retval = request_irq(irq, handler, irqflags, devname, dev_id);
902 unbind_from_irq(irq);
908 EXPORT_SYMBOL_GPL(bind_listening_port_to_irqhandler);
910 int bind_interdomain_evtchn_to_irqhandler(
911 unsigned int remote_domain,
912 unsigned int remote_port,
913 irq_handler_t handler,
914 unsigned long irqflags,
920 irq = bind_interdomain_evtchn_to_irq(remote_domain, remote_port);
924 retval = request_irq(irq, handler, irqflags, devname, dev_id);
926 unbind_from_irq(irq);
932 EXPORT_SYMBOL_GPL(bind_interdomain_evtchn_to_irqhandler);
934 int bind_virq_to_irqhandler(
937 irq_handler_t handler,
938 unsigned long irqflags,
944 #ifndef PER_CPU_VIRQ_IRQ
945 BUG_ON(test_bit(virq, virq_per_cpu));
948 irq = bind_virq_to_irq(virq, cpu);
952 retval = request_irq(irq, handler, irqflags, devname, dev_id);
954 unbind_from_irq(irq);
960 EXPORT_SYMBOL_GPL(bind_virq_to_irqhandler);
963 #ifndef PER_CPU_VIRQ_IRQ
964 int bind_virq_to_irqaction(
967 struct irqaction *action)
969 struct evtchn_bind_virq bind_virq;
973 struct percpu_irqaction *cur = NULL, *new;
975 BUG_ON(!test_bit(virq, virq_per_cpu));
980 new = alloc_percpu_irqaction(GFP_ATOMIC);
982 new->action = *action;
983 new->action.dev_id = action;
986 spin_lock(&irq_mapping_update_lock);
988 for (cur = virq_actions[virq]; cur; cur = cur->next)
989 if (cur->action.dev_id == action)
993 spin_unlock(&irq_mapping_update_lock);
996 new->next = virq_actions[virq];
997 virq_actions[virq] = cur = new;
1001 cpumask_set_cpu(cpu, cur->cpus);
1002 action = &cur->action;
1004 if ((irq = per_cpu(virq_to_irq, cpu)[virq]) == -1) {
1009 if ((irq = find_unbound_irq(cpu_to_node(cpu), &cfg,
1010 &dynirq_chip, true)) < 0) {
1011 virq_actions[virq] = cur->next;
1012 spin_unlock(&irq_mapping_update_lock);
1013 free_percpu_irqaction(new);
1017 /* Extra reference so count will never drop to zero. */
1020 for_each_possible_cpu(nr)
1021 per_cpu(virq_to_irq, nr)[virq] = irq;
1022 cfg->info = mk_irq_info(IRQT_VIRQ, virq, 0);
1026 evtchn = per_cpu(virq_to_evtchn, cpu)[virq];
1027 if (!VALID_EVTCHN(evtchn)) {
1028 bind_virq.virq = virq;
1029 bind_virq.vcpu = cpu;
1030 if (HYPERVISOR_event_channel_op(EVTCHNOP_bind_virq,
1033 evtchn = bind_virq.port;
1034 evtchn_to_irq[evtchn] = irq;
1035 per_cpu(virq_to_evtchn, cpu)[virq] = evtchn;
1037 bind_evtchn_to_cpu(evtchn, cpu);
1042 spin_unlock(&irq_mapping_update_lock);
1044 free_percpu_irqaction(new);
1047 unsigned long flags;
1049 local_irq_save(flags);
1050 unmask_evtchn(evtchn);
1051 local_irq_restore(flags);
1053 action->flags |= IRQF_PERCPU;
1054 retval = setup_irq(irq, action);
1056 unbind_from_per_cpu_irq(irq, cpu, action);
1064 EXPORT_SYMBOL_GPL(bind_virq_to_irqaction);
1067 #ifdef PER_CPU_IPI_IRQ
1068 int bind_ipi_to_irqhandler(
1071 irq_handler_t handler,
1072 unsigned long irqflags,
1073 const char *devname,
1078 irq = bind_ipi_to_irq(ipi, cpu);
1082 retval = request_irq(irq, handler, irqflags | IRQF_NO_SUSPEND,
1085 unbind_from_irq(irq);
1092 int __cpuinit bind_ipi_to_irqaction(
1094 struct irqaction *action)
1096 struct evtchn_bind_ipi bind_ipi;
1097 struct irq_cfg *cfg;
1098 unsigned int evtchn;
1101 spin_lock(&irq_mapping_update_lock);
1103 if (VALID_EVTCHN(per_cpu(ipi_evtchn, cpu))) {
1104 spin_unlock(&irq_mapping_update_lock);
1109 if ((ipi_irq = find_unbound_irq(cpu_to_node(cpu), &cfg,
1110 &dynirq_chip, true)) < 0) {
1111 spin_unlock(&irq_mapping_update_lock);
1115 /* Extra reference so count will never drop to zero. */
1118 cfg->info = mk_irq_info(IRQT_IPI, 0, 0);
1121 cfg = irq_cfg(ipi_irq);
1123 bind_ipi.vcpu = cpu;
1124 if (HYPERVISOR_event_channel_op(EVTCHNOP_bind_ipi, &bind_ipi))
1127 evtchn = bind_ipi.port;
1128 evtchn_to_irq[evtchn] = ipi_irq;
1129 per_cpu(ipi_evtchn, cpu) = evtchn;
1131 bind_evtchn_to_cpu(evtchn, cpu);
1135 spin_unlock(&irq_mapping_update_lock);
1138 unsigned long flags;
1140 local_irq_save(flags);
1141 unmask_evtchn(evtchn);
1142 local_irq_restore(flags);
1144 action->flags |= IRQF_PERCPU | IRQF_NO_SUSPEND;
1145 retval = setup_irq(ipi_irq, action);
1147 unbind_from_per_cpu_irq(ipi_irq, cpu, NULL);
1155 #endif /* PER_CPU_IPI_IRQ */
1156 #endif /* CONFIG_SMP */
1158 void unbind_from_irqhandler(unsigned int irq, void *dev_id)
1160 free_irq(irq, dev_id);
1161 unbind_from_irq(irq);
1163 EXPORT_SYMBOL_GPL(unbind_from_irqhandler);
1166 static int set_affinity_irq(struct irq_data *data,
1167 const struct cpumask *dest, bool force)
1169 const struct irq_cfg *cfg = irq_data_cfg(data);
1170 unsigned int port = evtchn_from_irq_cfg(cfg);
1171 unsigned int cpu = cpumask_any(dest);
1172 struct evtchn_bind_vcpu ebv = { .port = port, .vcpu = cpu };
1176 BUG_IF_VIRQ_PER_CPU(cfg);
1179 if (!VALID_EVTCHN(port))
1182 masked = test_and_set_evtchn_mask(port);
1183 rc = HYPERVISOR_event_channel_op(EVTCHNOP_bind_vcpu, &ebv);
1185 bind_evtchn_to_cpu(port, cpu);
1186 rc = evtchn_to_irq[port] != -1 ? IRQ_SET_MASK_OK_NOCOPY
1190 unmask_evtchn(port);
1196 int resend_irq_on_evtchn(struct irq_data *data)
1198 unsigned int evtchn = evtchn_from_irq_data(data);
1201 if (!VALID_EVTCHN(evtchn))
1204 masked = test_and_set_evtchn_mask(evtchn);
1207 unmask_evtchn(evtchn);
1213 * Interface to generic handling in irq.c
1216 static void unmask_dynirq(struct irq_data *data)
1218 unsigned int evtchn = evtchn_from_irq_data(data);
1220 if (VALID_EVTCHN(evtchn))
1221 unmask_evtchn(evtchn);
1224 static void mask_dynirq(struct irq_data *data)
1226 unsigned int evtchn = evtchn_from_irq_data(data);
1228 if (VALID_EVTCHN(evtchn))
1229 mask_evtchn(evtchn);
1232 static unsigned int startup_dynirq(struct irq_data *data)
1234 unmask_dynirq(data);
1238 #define shutdown_dynirq mask_dynirq
1240 static void end_dynirq(struct irq_data *data)
1242 if (!irqd_irq_disabled(data)) {
1243 irq_move_masked_irq(data);
1244 unmask_dynirq(data);
1248 static struct irq_chip dynirq_chip = {
1250 .irq_startup = startup_dynirq,
1251 .irq_shutdown = shutdown_dynirq,
1252 .irq_enable = unmask_dynirq,
1253 .irq_disable = mask_dynirq,
1254 .irq_mask = mask_dynirq,
1255 .irq_unmask = unmask_dynirq,
1256 .irq_eoi = end_dynirq,
1258 .irq_set_affinity = set_affinity_irq,
1260 .irq_retrigger = resend_irq_on_evtchn,
1263 /* Bitmap indicating which PIRQs require Xen to be notified on unmask. */
1264 static bool pirq_eoi_does_unmask;
1265 static unsigned long *pirq_needs_eoi;
1266 static DECLARE_BITMAP(probing_pirq, NR_PIRQS);
1268 static void pirq_unmask_and_notify(unsigned int evtchn, unsigned int irq)
1270 struct physdev_eoi eoi = { .irq = evtchn_get_xen_pirq(irq) };
1272 if (pirq_eoi_does_unmask) {
1273 if (test_bit(eoi.irq, pirq_needs_eoi))
1274 VOID(HYPERVISOR_physdev_op(PHYSDEVOP_eoi, &eoi));
1276 unmask_evtchn(evtchn);
1277 } else if (test_bit(irq - PIRQ_BASE, pirq_needs_eoi)) {
1278 if (smp_processor_id() != cpu_from_evtchn(evtchn)) {
1279 struct evtchn_unmask unmask = { .port = evtchn };
1280 struct multicall_entry mcl[2];
1282 mcl[0].op = __HYPERVISOR_event_channel_op;
1283 mcl[0].args[0] = EVTCHNOP_unmask;
1284 mcl[0].args[1] = (unsigned long)&unmask;
1285 mcl[1].op = __HYPERVISOR_physdev_op;
1286 mcl[1].args[0] = PHYSDEVOP_eoi;
1287 mcl[1].args[1] = (unsigned long)&eoi;
1289 if (HYPERVISOR_multicall(mcl, 2))
1292 unmask_evtchn(evtchn);
1293 VOID(HYPERVISOR_physdev_op(PHYSDEVOP_eoi, &eoi));
1296 unmask_evtchn(evtchn);
1299 static inline void pirq_query_unmask(int irq)
1301 struct physdev_irq_status_query irq_status;
1303 if (pirq_eoi_does_unmask)
1305 irq_status.irq = evtchn_get_xen_pirq(irq);
1306 if (HYPERVISOR_physdev_op(PHYSDEVOP_irq_status_query, &irq_status))
1307 irq_status.flags = 0;
1308 clear_bit(irq - PIRQ_BASE, pirq_needs_eoi);
1309 if (irq_status.flags & XENIRQSTAT_needs_eoi)
1310 set_bit(irq - PIRQ_BASE, pirq_needs_eoi);
1313 static int set_type_pirq(struct irq_data *data, unsigned int type)
1315 if (type != IRQ_TYPE_PROBE)
1317 set_bit(data->irq - PIRQ_BASE, probing_pirq);
1321 static void enable_pirq(struct irq_data *data)
1323 struct evtchn_bind_pirq bind_pirq;
1324 struct irq_cfg *cfg = irq_data_cfg(data);
1325 unsigned int evtchn = evtchn_from_irq_cfg(cfg);
1326 unsigned int irq = data->irq, pirq = irq - PIRQ_BASE;
1328 if (VALID_EVTCHN(evtchn)) {
1329 if (pirq < nr_pirqs)
1330 clear_bit(pirq, probing_pirq);
1334 bind_pirq.pirq = evtchn_get_xen_pirq(irq);
1335 /* NB. We are happy to share unless we are probing. */
1336 bind_pirq.flags = (pirq < nr_pirqs
1337 && test_and_clear_bit(pirq, probing_pirq))
1338 || (irq_to_desc(irq)->istate & IRQS_AUTODETECT)
1339 ? 0 : BIND_PIRQ__WILL_SHARE;
1340 if (HYPERVISOR_event_channel_op(EVTCHNOP_bind_pirq, &bind_pirq) != 0) {
1341 if (bind_pirq.flags)
1342 pr_info("Failed to obtain physical IRQ %d\n", irq);
1345 evtchn = bind_pirq.port;
1347 pirq_query_unmask(irq);
1349 evtchn_to_irq[evtchn] = irq;
1350 bind_evtchn_to_cpu(evtchn, 0);
1351 cfg->info = mk_irq_info(IRQT_PIRQ, bind_pirq.pirq, evtchn);
1354 pirq_unmask_and_notify(evtchn, irq);
1357 #define disable_pirq mask_pirq
1359 static unsigned int startup_pirq(struct irq_data *data)
1365 static void shutdown_pirq(struct irq_data *data)
1367 struct irq_cfg *cfg = irq_data_cfg(data);
1368 unsigned int evtchn = evtchn_from_irq_cfg(cfg);
1370 if (!VALID_EVTCHN(evtchn))
1373 mask_evtchn(evtchn);
1375 if (close_evtchn(evtchn))
1378 bind_evtchn_to_cpu(evtchn, 0);
1379 evtchn_to_irq[evtchn] = -1;
1380 cfg->info = mk_irq_info(IRQT_PIRQ, index_from_irq_cfg(cfg), 0);
1383 static void unmask_pirq(struct irq_data *data)
1385 unsigned int evtchn = evtchn_from_irq_data(data);
1387 if (VALID_EVTCHN(evtchn))
1388 pirq_unmask_and_notify(evtchn, data->irq);
1391 #define mask_pirq mask_dynirq
1393 static void end_pirq(struct irq_data *data)
1395 bool disabled = irqd_irq_disabled(data);
1397 if (disabled && (irq_to_desc(data->irq)->istate & IRQS_PENDING))
1398 shutdown_pirq(data);
1401 irq_move_masked_irq(data);
1406 static struct irq_chip pirq_chip = {
1408 .irq_startup = startup_pirq,
1409 .irq_shutdown = shutdown_pirq,
1410 .irq_enable = enable_pirq,
1411 .irq_disable = disable_pirq,
1412 .irq_mask = mask_pirq,
1413 .irq_unmask = unmask_pirq,
1414 .irq_eoi = end_pirq,
1415 .irq_set_type = set_type_pirq,
1417 .irq_set_affinity = set_affinity_irq,
1419 .irq_retrigger = resend_irq_on_evtchn,
1422 int irq_ignore_unhandled(unsigned int irq)
1424 struct physdev_irq_status_query irq_status = { .irq = irq };
1426 if (!is_running_on_xen() || irq >= nr_pirqs)
1429 if (HYPERVISOR_physdev_op(PHYSDEVOP_irq_status_query, &irq_status))
1431 return !!(irq_status.flags & XENIRQSTAT_shared);
1434 #if defined(CONFIG_SMP) && !defined(PER_CPU_IPI_IRQ)
1435 void notify_remote_via_ipi(unsigned int ipi, unsigned int cpu)
1437 unsigned int evtchn = per_cpu(ipi_evtchn, cpu);
1440 if (ipi == NMI_VECTOR) {
1441 int rc = HYPERVISOR_vcpu_op(VCPUOP_send_nmi, cpu, NULL);
1444 pr_warn_once("Unable (%d) to send NMI to CPU#%u\n",
1450 if (VALID_EVTCHN(evtchn)
1451 && !test_and_set_bit(ipi, per_cpu(ipi_pending, cpu))
1452 && !test_evtchn(evtchn))
1453 notify_remote_via_evtchn(evtchn);
1456 void clear_ipi_evtchn(void)
1458 unsigned int evtchn = this_cpu_read(ipi_evtchn);
1460 BUG_ON(!VALID_EVTCHN(evtchn));
1461 clear_evtchn(evtchn);
1465 void notify_remote_via_irq(int irq)
1467 const struct irq_cfg *cfg = irq_cfg(irq);
1468 unsigned int evtchn;
1470 if (WARN_ON_ONCE(!cfg))
1472 BUG_ON(type_from_irq_cfg(cfg) == IRQT_VIRQ);
1475 evtchn = evtchn_from_irq_cfg(cfg);
1476 if (VALID_EVTCHN(evtchn))
1477 notify_remote_via_evtchn(evtchn);
1479 EXPORT_SYMBOL_GPL(notify_remote_via_irq);
1481 #if defined(CONFIG_XEN_BACKEND) || defined(CONFIG_XEN_BACKEND_MODULE)
1482 int multi_notify_remote_via_irq(multicall_entry_t *mcl, int irq)
1484 const struct irq_cfg *cfg = irq_cfg(irq);
1485 unsigned int evtchn;
1487 if (WARN_ON_ONCE(!cfg))
1489 BUG_ON(type_from_irq_cfg(cfg) == IRQT_VIRQ);
1492 evtchn = evtchn_from_irq_cfg(cfg);
1493 if (!VALID_EVTCHN(evtchn))
1496 multi_notify_remote_via_evtchn(mcl, evtchn);
1499 EXPORT_SYMBOL_GPL(multi_notify_remote_via_irq);
1502 int irq_to_evtchn_port(int irq)
1504 const struct irq_cfg *cfg = irq_cfg(irq);
1508 BUG_IF_VIRQ_PER_CPU(cfg);
1510 return evtchn_from_irq_cfg(cfg);
1512 EXPORT_SYMBOL_GPL(irq_to_evtchn_port);
1514 void mask_evtchn(int port)
1516 shared_info_t *s = HYPERVISOR_shared_info;
1517 sync_set_bit(port, s->evtchn_mask);
1519 EXPORT_SYMBOL_GPL(mask_evtchn);
1521 void unmask_evtchn(int port)
1523 shared_info_t *s = HYPERVISOR_shared_info;
1524 unsigned int cpu = smp_processor_id();
1526 BUG_ON(!irqs_disabled());
1528 /* Slow path (hypercall) if this is a non-local port. */
1529 if (unlikely(cpu != cpu_from_evtchn(port))) {
1530 struct evtchn_unmask unmask = { .port = port };
1531 VOID(HYPERVISOR_event_channel_op(EVTCHNOP_unmask, &unmask));
1535 sync_clear_bit(port, s->evtchn_mask);
1537 /* Did we miss an interrupt 'edge'? Re-fire if so. */
1538 if (sync_test_bit(port, s->evtchn_pending)) {
1539 vcpu_info_t *v = current_vcpu_info();
1541 if (!sync_test_and_set_bit(port / BITS_PER_LONG,
1542 &v->evtchn_pending_sel))
1543 v->evtchn_upcall_pending = 1;
1546 EXPORT_SYMBOL_GPL(unmask_evtchn);
1548 void disable_all_local_evtchn(void)
1550 unsigned i, cpu = smp_processor_id();
1551 shared_info_t *s = HYPERVISOR_shared_info;
1553 for (i = 0; i < NR_EVENT_CHANNELS; ++i)
1554 if (cpu_from_evtchn(i) == cpu)
1555 sync_set_bit(i, &s->evtchn_mask[0]);
1558 /* Test an irq's pending state. */
1559 int xen_test_irq_pending(int irq)
1561 unsigned int evtchn = evtchn_from_irq(irq);
1563 return VALID_EVTCHN(evtchn) && test_evtchn(evtchn);
1566 #ifdef CONFIG_PM_SLEEP
1567 #include <linux/syscore_ops.h>
1569 static void restore_cpu_virqs(unsigned int cpu)
1571 struct evtchn_bind_virq bind_virq;
1572 int virq, irq, evtchn;
1574 for (virq = 0; virq < NR_VIRQS; virq++) {
1575 if ((irq = per_cpu(virq_to_irq, cpu)[virq]) == -1)
1578 #ifndef PER_CPU_VIRQ_IRQ
1579 if (test_bit(virq, virq_per_cpu)
1580 && !VALID_EVTCHN(per_cpu(virq_to_evtchn, cpu)[virq]))
1584 BUG_ON(irq_cfg(irq)->info != mk_irq_info(IRQT_VIRQ, virq, 0));
1586 /* Get a new binding from Xen. */
1587 bind_virq.virq = virq;
1588 bind_virq.vcpu = cpu;
1589 if (HYPERVISOR_event_channel_op(EVTCHNOP_bind_virq,
1592 evtchn = bind_virq.port;
1594 /* Record the new mapping. */
1595 evtchn_to_irq[evtchn] = irq;
1596 #ifdef PER_CPU_VIRQ_IRQ
1597 irq_cfg(irq)->info = mk_irq_info(IRQT_VIRQ, virq, evtchn);
1599 if (test_bit(virq, virq_per_cpu))
1600 per_cpu(virq_to_evtchn, cpu)[virq] = evtchn;
1604 irq_cfg(irq)->info = mk_irq_info(IRQT_VIRQ, virq,
1606 for_each_possible_cpu(cpu)
1607 per_cpu(virq_to_evtchn, cpu)[virq] = evtchn;
1610 bind_evtchn_to_cpu(evtchn, cpu);
1612 /* Ready for use. */
1613 unmask_evtchn(evtchn);
1617 static void restore_cpu_ipis(unsigned int cpu)
1620 struct evtchn_bind_ipi bind_ipi;
1621 struct irq_data *data;
1622 unsigned int evtchn;
1623 #ifdef PER_CPU_IPI_IRQ
1626 for (ipi = 0; ipi < NR_IPIS; ipi++) {
1627 if ((irq = per_cpu(ipi_to_irq, cpu)[ipi]) == -1)
1633 || !VALID_EVTCHN(per_cpu(ipi_evtchn, cpu)))
1637 data = irq_get_irq_data(irq);
1638 BUG_ON(irq_data_cfg(data)->info != mk_irq_info(IRQT_IPI, ipi, 0));
1640 /* Get a new binding from Xen. */
1641 bind_ipi.vcpu = cpu;
1642 if (HYPERVISOR_event_channel_op(EVTCHNOP_bind_ipi,
1645 evtchn = bind_ipi.port;
1647 /* Record the new mapping. */
1648 evtchn_to_irq[evtchn] = irq;
1649 #ifdef PER_CPU_IPI_IRQ
1650 irq_data_cfg(data)->info = mk_irq_info(IRQT_IPI, ipi, evtchn);
1652 per_cpu(ipi_evtchn, cpu) = evtchn;
1654 bind_evtchn_to_cpu(evtchn, cpu);
1656 /* Ready for use. */
1657 if (!irqd_irq_disabled(data))
1658 unmask_evtchn(evtchn);
1659 #ifdef PER_CPU_IPI_IRQ
1665 #endif /* CONFIG_SMP */
1668 static void evtchn_resume(void)
1670 unsigned int cpu, irq, evtchn;
1671 struct evtchn_status status;
1673 /* Avoid doing anything in the 'suspend cancelled' case. */
1674 status.dom = DOMID_SELF;
1675 #ifdef PER_CPU_VIRQ_IRQ
1676 status.port = evtchn_from_irq(__this_cpu_read(virq_to_irq[VIRQ_TIMER]));
1678 status.port = __this_cpu_read(virq_to_evtchn[VIRQ_TIMER]);
1680 if (HYPERVISOR_event_channel_op(EVTCHNOP_status, &status))
1682 if (status.status == EVTCHNSTAT_virq
1683 && status.vcpu == smp_processor_id()
1684 && status.u.virq == VIRQ_TIMER)
1687 init_evtchn_cpu_bindings();
1689 if (pirq_eoi_does_unmask) {
1690 struct physdev_pirq_eoi_gmfn eoi_gmfn;
1692 eoi_gmfn.gmfn = virt_to_machine(pirq_needs_eoi) >> PAGE_SHIFT;
1693 if (HYPERVISOR_physdev_op(PHYSDEVOP_pirq_eoi_gmfn, &eoi_gmfn))
1697 /* New event-channel space is not 'live' yet. */
1698 for (evtchn = 0; evtchn < NR_EVENT_CHANNELS; evtchn++)
1699 mask_evtchn(evtchn);
1701 /* No IRQ <-> event-channel mappings. */
1702 for (irq = 0; irq < nr_irqs; irq++) {
1703 struct irq_cfg *cfg = irq_cfg(irq);
1708 /* Check that no PIRQs are still bound. */
1709 #ifdef CONFIG_SPARSE_IRQ
1710 if (irq < PIRQ_BASE || irq >= PIRQ_BASE + nr_pirqs)
1711 BUG_ON(type_from_irq_cfg(cfg) == IRQT_PIRQ);
1714 BUG_ON(cfg->info != IRQ_UNBOUND);
1716 cfg->info &= ~((1U << _EVTCHN_BITS) - 1);
1718 for (evtchn = 0; evtchn < NR_EVENT_CHANNELS; evtchn++)
1719 evtchn_to_irq[evtchn] = -1;
1721 for_each_possible_cpu(cpu) {
1722 restore_cpu_virqs(cpu);
1723 restore_cpu_ipis(cpu);
1727 static struct syscore_ops evtchn_syscore_ops = {
1728 .resume = evtchn_resume,
1731 static int __init evtchn_register(void)
1733 if (!is_initial_xendomain())
1734 register_syscore_ops(&evtchn_syscore_ops);
1737 core_initcall(evtchn_register);
1740 int __init arch_early_irq_init(void)
1744 for (i = 0; i < ARRAY_SIZE(_irq_cfg); i++)
1745 irq_set_chip_data(i, _irq_cfg + i);
1750 struct irq_cfg *alloc_irq_and_cfg_at(unsigned int at, int node)
1752 int res = irq_alloc_desc_at(at, node);
1753 struct irq_cfg *cfg = NULL;
1758 cfg = irq_get_chip_data(at);
1763 #ifdef CONFIG_SPARSE_IRQ
1765 /* By default all event channels notify CPU#0. */
1766 cpumask_copy(irq_get_irq_data(at)->affinity, cpumask_of(0));
1769 cfg = kzalloc(sizeof(*cfg), GFP_KERNEL);
1771 irq_set_chip_data(at, cfg);
1781 #ifdef CONFIG_SPARSE_IRQ
1782 #ifdef CONFIG_X86_IO_APIC
1783 #include <asm/io_apic.h>
1786 int nr_pirqs = NR_PIRQS;
1787 EXPORT_SYMBOL_GPL(nr_pirqs);
1789 int __init arch_probe_nr_irqs(void)
1791 int nr = 64 + CONFIG_XEN_NR_GUEST_DEVICES, nr_irqs_gsi;
1793 if (is_initial_xendomain()) {
1794 nr_irqs_gsi = NR_IRQS_LEGACY;
1795 #ifdef CONFIG_X86_IO_APIC
1796 nr_irqs_gsi += gsi_top;
1798 #ifdef CONFIG_PCI_MSI
1799 nr += max(nr_irqs_gsi * 16, nr_cpu_ids * 8);
1802 nr_irqs_gsi = NR_VECTORS;
1803 #ifdef CONFIG_PCI_MSI
1804 nr += max(NR_IRQS_LEGACY * 16, nr_cpu_ids * 8);
1808 if (nr_pirqs > nr_irqs_gsi)
1809 nr_pirqs = nr_irqs_gsi;
1810 if (nr > min_t(int, NR_DYNIRQS, NR_EVENT_CHANNELS))
1811 nr = min_t(int, NR_DYNIRQS, NR_EVENT_CHANNELS);
1812 nr_irqs = min_t(int, nr_pirqs + nr, PAGE_SIZE * 8);
1814 printk(KERN_DEBUG "nr_pirqs: %d\n", nr_pirqs);
1816 return ARRAY_SIZE(_irq_cfg);
1820 #if defined(CONFIG_X86_IO_APIC)
1821 int assign_irq_vector(int irq, struct irq_cfg *cfg, const struct cpumask *mask)
1823 struct physdev_irq irq_op;
1825 if (irq < PIRQ_BASE || irq - PIRQ_BASE >= nr_pirqs)
1832 if (HYPERVISOR_physdev_op(PHYSDEVOP_alloc_irq_vector, &irq_op))
1835 cfg->vector = irq_op.vector;
1839 #define identity_mapped_irq(irq) (!IO_APIC_IRQ((irq) - PIRQ_BASE))
1840 #elif defined(CONFIG_X86)
1841 #define identity_mapped_irq(irq) (((irq) - PIRQ_BASE) < NR_IRQS_LEGACY)
1843 #define identity_mapped_irq(irq) (1)
1846 void evtchn_register_pirq(int irq)
1848 struct irq_cfg *cfg = irq_cfg(irq);
1850 BUG_ON(irq < PIRQ_BASE || irq - PIRQ_BASE >= nr_pirqs);
1851 if (identity_mapped_irq(irq) || type_from_irq_cfg(cfg) != IRQT_UNBOUND)
1853 cfg->info = mk_irq_info(IRQT_PIRQ, irq, 0);
1854 irq_set_chip_and_handler_name(irq, &pirq_chip, handle_fasteoi_irq,
1858 #ifdef CONFIG_PCI_MSI
1859 int evtchn_map_pirq(int irq, int xen_pirq)
1862 #ifdef CONFIG_SPARSE_IRQ
1863 struct irq_cfg *cfg;
1865 spin_lock(&irq_mapping_update_lock);
1866 irq = find_unbound_irq(numa_node_id(), &cfg, &pirq_chip,
1869 BUG_ON(type_from_irq_cfg(cfg) != IRQT_UNBOUND);
1871 cfg->info = mk_irq_info(IRQT_PIRQ, xen_pirq, 0);
1873 spin_unlock(&irq_mapping_update_lock);
1876 } else if (irq >= PIRQ_BASE && irq < PIRQ_BASE + nr_pirqs) {
1877 WARN_ONCE(1, "Non-MSI IRQ#%d (Xen %d)\n", irq, xen_pirq);
1880 static DEFINE_SPINLOCK(irq_alloc_lock);
1882 irq = PIRQ_BASE + nr_pirqs - 1;
1883 spin_lock(&irq_alloc_lock);
1885 struct irq_cfg *cfg;
1887 if (identity_mapped_irq(irq))
1889 cfg = alloc_irq_and_cfg_at(irq, numa_node_id());
1890 if (unlikely(!cfg)) {
1891 spin_unlock(&irq_alloc_lock);
1894 if (!index_from_irq_cfg(cfg)) {
1895 BUG_ON(type_from_irq_cfg(cfg) != IRQT_UNBOUND);
1896 cfg->info = mk_irq_info(IRQT_PIRQ,
1900 } while (--irq >= PIRQ_BASE);
1901 spin_unlock(&irq_alloc_lock);
1902 if (irq < PIRQ_BASE)
1904 irq_set_chip_and_handler_name(irq, &pirq_chip,
1905 handle_fasteoi_irq, "fasteoi");
1907 } else if (!xen_pirq) {
1908 struct irq_cfg *cfg = irq_cfg(irq);
1910 if (!cfg || unlikely(type_from_irq_cfg(cfg) != IRQT_PIRQ))
1913 * dynamic_irq_cleanup(irq) would seem to be the correct thing
1914 * here, but cannot be used as we get here also during shutdown
1915 * when a driver didn't free_irq() its MSI(-X) IRQ(s), which
1916 * then causes a warning in dynamic_irq_cleanup().
1918 irq_set_chip_and_handler(irq, NULL, NULL);
1919 cfg->info = IRQ_UNBOUND;
1920 #ifdef CONFIG_SPARSE_IRQ
1924 } else if (type_from_irq(irq) != IRQT_PIRQ
1925 || index_from_irq(irq) != xen_pirq) {
1926 pr_err("IRQ#%d is already mapped to %d:%u - "
1927 "cannot map to PIRQ#%u\n",
1928 irq, type_from_irq(irq), index_from_irq(irq), xen_pirq);
1931 return index_from_irq(irq) ? irq : -EINVAL;
1935 int evtchn_get_xen_pirq(int irq)
1937 struct irq_cfg *cfg = irq_cfg(irq);
1939 if (identity_mapped_irq(irq))
1941 BUG_ON(type_from_irq_cfg(cfg) != IRQT_PIRQ);
1942 return index_from_irq_cfg(cfg);
1945 void __init xen_init_IRQ(void)
1948 struct physdev_pirq_eoi_gmfn eoi_gmfn;
1950 #ifndef PER_CPU_VIRQ_IRQ
1951 __set_bit(VIRQ_TIMER, virq_per_cpu);
1952 __set_bit(VIRQ_DEBUG, virq_per_cpu);
1953 __set_bit(VIRQ_XENOPROF, virq_per_cpu);
1955 __set_bit(VIRQ_ITC, virq_per_cpu);
1959 init_evtchn_cpu_bindings();
1961 #ifdef CONFIG_SPARSE_IRQ
1966 i = get_order(sizeof(unsigned long) * BITS_TO_LONGS(i));
1967 pirq_needs_eoi = (void *)__get_free_pages(GFP_KERNEL|__GFP_ZERO, i);
1968 BUILD_BUG_ON(NR_PIRQS > PAGE_SIZE * 8);
1969 eoi_gmfn.gmfn = virt_to_machine(pirq_needs_eoi) >> PAGE_SHIFT;
1970 if (HYPERVISOR_physdev_op(PHYSDEVOP_pirq_eoi_gmfn, &eoi_gmfn) == 0)
1971 pirq_eoi_does_unmask = true;
1973 /* No event channels are 'live' right now. */
1974 for (i = 0; i < NR_EVENT_CHANNELS; i++)
1977 #ifndef CONFIG_SPARSE_IRQ
1978 for (i = DYNIRQ_BASE; i < (DYNIRQ_BASE + NR_DYNIRQS); i++) {
1980 irq_set_chip_and_handler_name(i, &dynirq_chip,
1981 handle_fasteoi_irq, "fasteoi");
1984 for (i = PIRQ_BASE; i < (PIRQ_BASE + nr_pirqs); i++) {
1986 for (i = PIRQ_BASE; i < (PIRQ_BASE + NR_IRQS_LEGACY); i++) {
1988 if (!identity_mapped_irq(i))
1992 /* If not domain 0, force our RTC driver to fail its probe. */
1993 if (i - PIRQ_BASE == RTC_IRQ && !is_initial_xendomain())
1997 irq_set_chip_and_handler_name(i, &pirq_chip,
1998 handle_fasteoi_irq, "fasteoi");