- Update Xen patches to 3.3-rc5 and c/s 1157.
[linux-flexiantxendom0-3.2.10.git] / arch / x86 / kernel / irq-xen.c
1 /*
2  * Common interrupt code for 32 and 64 bit
3  */
4 #include <linux/cpu.h>
5 #include <linux/interrupt.h>
6 #include <linux/kernel_stat.h>
7 #include <linux/of.h>
8 #include <linux/seq_file.h>
9 #include <linux/smp.h>
10 #include <linux/ftrace.h>
11 #include <linux/delay.h>
12 #include <linux/export.h>
13
14 #include <asm/apic.h>
15 #include <asm/io_apic.h>
16 #include <asm/irq.h>
17 #include <asm/idle.h>
18 #include <asm/mce.h>
19 #include <asm/hw_irq.h>
20
21 #ifndef CONFIG_XEN
22 atomic_t irq_err_count;
23
24 /* Function pointer for generic interrupt vector handling */
25 void (*x86_platform_ipi_callback)(void) = NULL;
26 #endif
27
28 /*
29  * 'what should we do if we get a hw irq event on an illegal vector'.
30  * each architecture has to answer this themselves.
31  */
32 void ack_bad_irq(unsigned int irq)
33 {
34         if (printk_ratelimit())
35                 pr_err("unexpected IRQ trap at vector %02x\n", irq);
36
37 #ifndef CONFIG_XEN
38         /*
39          * Currently unexpected vectors happen only on SMP and APIC.
40          * We _must_ ack these because every local APIC has only N
41          * irq slots per priority level, and a 'hanging, unacked' IRQ
42          * holds up an irq slot - in excessive cases (when multiple
43          * unexpected vectors occur) that might lock up the APIC
44          * completely.
45          * But only ack when the APIC is enabled -AK
46          */
47         ack_APIC_irq();
48 #endif
49 }
50
51 #define irq_stats(x)            (&per_cpu(irq_stat, x))
52 /*
53  * /proc/interrupts printing for arch specific interrupts
54  */
55 int arch_show_interrupts(struct seq_file *p, int prec)
56 {
57         int j;
58
59         seq_printf(p, "%*s: ", prec, "NMI");
60         for_each_online_cpu(j)
61                 seq_printf(p, "%10u ", irq_stats(j)->__nmi_count);
62         seq_printf(p, "  Non-maskable interrupts\n");
63 #ifdef CONFIG_X86_LOCAL_APIC
64 #ifndef CONFIG_XEN
65         seq_printf(p, "%*s: ", prec, "LOC");
66         for_each_online_cpu(j)
67                 seq_printf(p, "%10u ", irq_stats(j)->apic_timer_irqs);
68         seq_printf(p, "  Local timer interrupts\n");
69
70         seq_printf(p, "%*s: ", prec, "SPU");
71         for_each_online_cpu(j)
72                 seq_printf(p, "%10u ", irq_stats(j)->irq_spurious_count);
73         seq_printf(p, "  Spurious interrupts\n");
74         seq_printf(p, "%*s: ", prec, "PMI");
75         for_each_online_cpu(j)
76                 seq_printf(p, "%10u ", irq_stats(j)->apic_perf_irqs);
77         seq_printf(p, "  Performance monitoring interrupts\n");
78 #endif
79         seq_printf(p, "%*s: ", prec, "IWI");
80         for_each_online_cpu(j)
81                 seq_printf(p, "%10u ", irq_stats(j)->apic_irq_work_irqs);
82         seq_printf(p, "  IRQ work interrupts\n");
83 #ifndef CONFIG_XEN
84         seq_printf(p, "%*s: ", prec, "RTR");
85         for_each_online_cpu(j)
86                 seq_printf(p, "%10u ", irq_stats(j)->icr_read_retry_count);
87         seq_printf(p, "  APIC ICR read retries\n");
88 #endif
89 #endif
90 #ifndef CONFIG_XEN
91         if (x86_platform_ipi_callback) {
92                 seq_printf(p, "%*s: ", prec, "PLT");
93                 for_each_online_cpu(j)
94                         seq_printf(p, "%10u ", irq_stats(j)->x86_platform_ipis);
95                 seq_printf(p, "  Platform interrupts\n");
96         }
97 #endif
98 #ifdef CONFIG_SMP
99         seq_printf(p, "%*s: ", prec, "RES");
100         for_each_online_cpu(j)
101                 seq_printf(p, "%10u ", irq_stats(j)->irq_resched_count);
102         seq_printf(p, "  Rescheduling interrupts\n");
103         seq_printf(p, "%*s: ", prec, "CAL");
104         for_each_online_cpu(j)
105                 seq_printf(p, "%10u ", irq_stats(j)->irq_call_count);
106         seq_printf(p, "  Function call interrupts\n");
107 #ifndef CONFIG_XEN
108         seq_printf(p, "%*s: ", prec, "TLB");
109         for_each_online_cpu(j)
110                 seq_printf(p, "%10u ", irq_stats(j)->irq_tlb_count);
111         seq_printf(p, "  TLB shootdowns\n");
112 #else
113         seq_printf(p, "%*s: ", prec, "LCK");
114         for_each_online_cpu(j)
115                 seq_printf(p, "%10u ", irq_stats(j)->irq_lock_count);
116         seq_printf(p, "  Spinlock wakeups\n");
117 #endif
118 #endif
119 #ifdef CONFIG_X86_THERMAL_VECTOR
120         seq_printf(p, "%*s: ", prec, "TRM");
121         for_each_online_cpu(j)
122                 seq_printf(p, "%10u ", irq_stats(j)->irq_thermal_count);
123         seq_printf(p, "  Thermal event interrupts\n");
124 #endif
125 #ifdef CONFIG_X86_MCE_THRESHOLD
126         seq_printf(p, "%*s: ", prec, "THR");
127         for_each_online_cpu(j)
128                 seq_printf(p, "%10u ", irq_stats(j)->irq_threshold_count);
129         seq_printf(p, "  Threshold APIC interrupts\n");
130 #endif
131 #ifdef CONFIG_X86_MCE
132         seq_printf(p, "%*s: ", prec, "MCE");
133         for_each_online_cpu(j)
134                 seq_printf(p, "%10u ", per_cpu(mce_exception_count, j));
135         seq_printf(p, "  Machine check exceptions\n");
136         seq_printf(p, "%*s: ", prec, "MCP");
137         for_each_online_cpu(j)
138                 seq_printf(p, "%10u ", per_cpu(mce_poll_count, j));
139         seq_printf(p, "  Machine check polls\n");
140 #endif
141 #ifndef CONFIG_XEN
142         seq_printf(p, "%*s: %10u\n", prec, "ERR", atomic_read(&irq_err_count));
143 #if defined(CONFIG_X86_IO_APIC)
144         seq_printf(p, "%*s: %10u\n", prec, "MIS", atomic_read(&irq_mis_count));
145 #endif
146 #endif
147         return 0;
148 }
149
150 /*
151  * /proc/stat helpers
152  */
153 u64 arch_irq_stat_cpu(unsigned int cpu)
154 {
155         u64 sum = irq_stats(cpu)->__nmi_count;
156
157 #ifdef CONFIG_X86_LOCAL_APIC
158         sum += irq_stats(cpu)->apic_timer_irqs;
159         sum += irq_stats(cpu)->irq_spurious_count;
160         sum += irq_stats(cpu)->apic_perf_irqs;
161         sum += irq_stats(cpu)->apic_irq_work_irqs;
162         sum += irq_stats(cpu)->icr_read_retry_count;
163 #endif
164 #ifndef CONFIG_XEN
165         if (x86_platform_ipi_callback)
166                 sum += irq_stats(cpu)->x86_platform_ipis;
167 #endif
168 #ifdef CONFIG_SMP
169         sum += irq_stats(cpu)->irq_resched_count;
170         sum += irq_stats(cpu)->irq_call_count;
171 #ifndef CONFIG_XEN
172         sum += irq_stats(cpu)->irq_tlb_count;
173 #else
174         sum += irq_stats(cpu)->irq_lock_count;
175 #endif
176 #endif
177 #ifdef CONFIG_X86_THERMAL_VECTOR
178         sum += irq_stats(cpu)->irq_thermal_count;
179 #endif
180 #ifdef CONFIG_X86_MCE_THRESHOLD
181         sum += irq_stats(cpu)->irq_threshold_count;
182 #endif
183 #ifdef CONFIG_X86_MCE
184         sum += per_cpu(mce_exception_count, cpu);
185         sum += per_cpu(mce_poll_count, cpu);
186 #endif
187         return sum;
188 }
189
190 u64 arch_irq_stat(void)
191 {
192 #ifndef CONFIG_XEN
193         u64 sum = atomic_read(&irq_err_count);
194
195 #ifdef CONFIG_X86_IO_APIC
196         sum += atomic_read(&irq_mis_count);
197 #endif
198         return sum;
199 #else
200         return 0;
201 #endif
202 }
203
204
205 #ifndef CONFIG_XEN
206 /*
207  * do_IRQ handles all normal device IRQ's (the special
208  * SMP cross-CPU interrupts have their own specific
209  * handlers).
210  */
211 unsigned int __irq_entry do_IRQ(struct pt_regs *regs)
212 {
213         struct pt_regs *old_regs = set_irq_regs(regs);
214
215         /* high bit used in ret_from_ code  */
216         unsigned vector = ~regs->orig_ax;
217         unsigned irq;
218
219         irq_enter();
220         exit_idle();
221
222         irq = __this_cpu_read(vector_irq[vector]);
223
224         if (!handle_irq(irq, regs)) {
225                 ack_APIC_irq();
226
227                 if (printk_ratelimit())
228                         pr_emerg("%s: %d.%d No irq handler for vector (irq %d)\n",
229                                 __func__, smp_processor_id(), vector, irq);
230         }
231
232         irq_exit();
233
234         set_irq_regs(old_regs);
235         return 1;
236 }
237
238 /*
239  * Handler for X86_PLATFORM_IPI_VECTOR.
240  */
241 void smp_x86_platform_ipi(struct pt_regs *regs)
242 {
243         struct pt_regs *old_regs = set_irq_regs(regs);
244
245         ack_APIC_irq();
246
247         irq_enter();
248
249         exit_idle();
250
251         inc_irq_stat(x86_platform_ipis);
252
253         if (x86_platform_ipi_callback)
254                 x86_platform_ipi_callback();
255
256         irq_exit();
257
258         set_irq_regs(old_regs);
259 }
260 #endif
261
262 #ifdef CONFIG_HOTPLUG_CPU
263 #include <xen/evtchn.h>
264 /* A cpu has been removed from cpu_online_mask.  Reset irq affinities. */
265 void fixup_irqs(void)
266 {
267         unsigned int irq;
268         static int warned;
269         struct irq_desc *desc;
270         struct irq_data *data;
271         struct irq_chip *chip;
272         static DECLARE_BITMAP(irqs_used, NR_IRQS);
273
274         for_each_irq_desc(irq, desc) {
275                 int break_affinity = 0;
276                 int set_affinity = 1;
277                 const struct cpumask *affinity;
278
279                 if (!desc)
280                         continue;
281                 if (irq == 2)
282                         continue;
283
284                 /* interrupt's are disabled at this point */
285                 raw_spin_lock(&desc->lock);
286
287                 data = irq_desc_get_irq_data(desc);
288                 affinity = data->affinity;
289                 if (!irq_has_action(irq) || irqd_is_per_cpu(data) ||
290                     cpumask_subset(affinity, cpu_online_mask)) {
291                         raw_spin_unlock(&desc->lock);
292                         continue;
293                 }
294
295                 if (cpumask_test_cpu(smp_processor_id(), affinity))
296                         __set_bit(irq, irqs_used);
297
298                 if (cpumask_any_and(affinity, cpu_online_mask) >= nr_cpu_ids) {
299                         break_affinity = 1;
300                         affinity = cpu_all_mask;
301                 }
302
303                 chip = irq_data_get_irq_chip(data);
304                 if (!irqd_can_move_in_process_context(data) && chip->irq_mask)
305                         chip->irq_mask(data);
306
307                 if (chip->irq_set_affinity)
308                         chip->irq_set_affinity(data, affinity, true);
309                 else if (data->chip != &no_irq_chip && !(warned++))
310                         set_affinity = 0;
311
312                 if (!irqd_can_move_in_process_context(data) &&
313                     !irqd_irq_disabled(data) && chip->irq_unmask)
314                         chip->irq_unmask(data);
315
316                 raw_spin_unlock(&desc->lock);
317
318                 if (break_affinity && set_affinity)
319                         /*printk("Broke affinity for irq %i\n", irq)*/;
320                 else if (!set_affinity)
321                         printk("Cannot set affinity for irq %i\n", irq);
322         }
323
324         /*
325          * We can remove mdelay() and then send spuriuous interrupts to
326          * new cpu targets for all the irqs that were handled previously by
327          * this cpu. While it works, I have seen spurious interrupt messages
328          * (nothing wrong but still...).
329          *
330          * So for now, retain mdelay(1) and check the IRR and then send those
331          * interrupts to new targets as this cpu is already offlined...
332          */
333         mdelay(1);
334
335         for_each_irq_desc(irq, desc) {
336                 if (!__test_and_clear_bit(irq, irqs_used))
337                         continue;
338
339                 if (xen_test_irq_pending(irq)) {
340                         desc = irq_to_desc(irq);
341                         data = irq_desc_get_irq_data(desc);
342                         chip = irq_data_get_irq_chip(data);
343                         raw_spin_lock(&desc->lock);
344                         if (chip->irq_retrigger)
345                                 chip->irq_retrigger(data);
346                         raw_spin_unlock(&desc->lock);
347                 }
348         }
349 }
350 #endif