- Update Xen patches to 3.3-rc5 and c/s 1157.
[linux-flexiantxendom0-3.2.10.git] / arch / x86 / kernel / apic / hw_nmi.c
1 /*
2  *  HW NMI watchdog support
3  *
4  *  started by Don Zickus, Copyright (C) 2010 Red Hat, Inc.
5  *
6  *  Arch specific calls to support NMI watchdog
7  *
8  *  Bits copied from original nmi.c file
9  *
10  */
11 #include <asm/apic.h>
12
13 #include <linux/cpumask.h>
14 #include <linux/kdebug.h>
15 #include <linux/notifier.h>
16 #include <linux/kprobes.h>
17 #include <linux/nmi.h>
18 #include <linux/module.h>
19 #include <linux/delay.h>
20
21 #ifdef CONFIG_HARDLOCKUP_DETECTOR
22 u64 hw_nmi_get_sample_period(int watchdog_thresh)
23 {
24         return (u64)(cpu_khz) * 1000 * watchdog_thresh;
25 }
26 #endif
27
28 #ifdef arch_trigger_all_cpu_backtrace
29 #ifdef CONFIG_XEN
30 #include <asm/ipi.h>
31 #endif
32
33 /* For reliability, we're prepared to waste bits here. */
34 static DECLARE_BITMAP(backtrace_mask, NR_CPUS) __read_mostly;
35
36 /* "in progress" flag of arch_trigger_all_cpu_backtrace */
37 static unsigned long backtrace_flag;
38
39 void arch_trigger_all_cpu_backtrace(void)
40 {
41         int i;
42
43         if (test_and_set_bit(0, &backtrace_flag))
44                 /*
45                  * If there is already a trigger_all_cpu_backtrace() in progress
46                  * (backtrace_flag == 1), don't output double cpu dump infos.
47                  */
48                 return;
49
50         cpumask_copy(to_cpumask(backtrace_mask), cpu_online_mask);
51
52         printk(KERN_INFO "sending NMI to all CPUs:\n");
53 #ifndef CONFIG_XEN
54         apic->send_IPI_all(NMI_VECTOR);
55 #else /* this works even without CONFIG_X86_LOCAL_APIC */
56         xen_send_IPI_all(NMI_VECTOR);
57 #endif
58
59         /* Wait for up to 10 seconds for all CPUs to do the backtrace */
60         for (i = 0; i < 10 * 1000; i++) {
61                 if (cpumask_empty(to_cpumask(backtrace_mask)))
62                         break;
63                 mdelay(1);
64         }
65
66         clear_bit(0, &backtrace_flag);
67         smp_mb__after_clear_bit();
68 }
69
70 static int __kprobes
71 arch_trigger_all_cpu_backtrace_handler(unsigned int cmd, struct pt_regs *regs)
72 {
73         int cpu;
74
75         cpu = smp_processor_id();
76
77         if (cpumask_test_cpu(cpu, to_cpumask(backtrace_mask))) {
78                 static arch_spinlock_t lock = __ARCH_SPIN_LOCK_UNLOCKED;
79
80                 arch_spin_lock(&lock);
81                 printk(KERN_WARNING "NMI backtrace for cpu %d\n", cpu);
82                 show_regs(regs);
83                 arch_spin_unlock(&lock);
84                 cpumask_clear_cpu(cpu, to_cpumask(backtrace_mask));
85                 return NMI_HANDLED;
86         }
87
88         return NMI_DONE;
89 }
90
91 static int __init register_trigger_all_cpu_backtrace(void)
92 {
93         register_nmi_handler(NMI_LOCAL, arch_trigger_all_cpu_backtrace_handler,
94                                 0, "arch_bt");
95         return 0;
96 }
97 early_initcall(register_trigger_all_cpu_backtrace);
98 #endif