2 * Detect hard and soft lockups on a system
4 * started by Don Zickus, Copyright (C) 2010 Red Hat, Inc.
6 * this code detects hard lockups: incidents in where on a CPU
7 * the kernel does not respond to anything except NMI.
9 * Note: Most of this code is borrowed heavily from softlockup.c,
10 * so thanks to Ingo for the initial implementation.
11 * Some chunks also taken from arch/x86/kernel/apic/nmi.c, thanks
12 * to those contributors as well.
16 #include <linux/cpu.h>
17 #include <linux/nmi.h>
18 #include <linux/init.h>
19 #include <linux/delay.h>
20 #include <linux/freezer.h>
21 #include <linux/kthread.h>
22 #include <linux/lockdep.h>
23 #include <linux/notifier.h>
24 #include <linux/module.h>
25 #include <linux/sysctl.h>
27 #include <asm/irq_regs.h>
28 #include <linux/perf_event.h>
31 int __read_mostly softlockup_thresh = 60;
33 static DEFINE_PER_CPU(unsigned long, watchdog_touch_ts);
34 static DEFINE_PER_CPU(struct task_struct *, softlockup_watchdog);
35 static DEFINE_PER_CPU(struct hrtimer, watchdog_hrtimer);
36 static DEFINE_PER_CPU(bool, softlockup_touch_sync);
37 static DEFINE_PER_CPU(bool, soft_watchdog_warn);
38 #ifdef CONFIG_HARDLOCKUP_DETECTOR
39 static DEFINE_PER_CPU(bool, hard_watchdog_warn);
40 static DEFINE_PER_CPU(bool, watchdog_nmi_touch);
41 static DEFINE_PER_CPU(unsigned long, hrtimer_interrupts);
42 static DEFINE_PER_CPU(unsigned long, hrtimer_interrupts_saved);
43 static DEFINE_PER_CPU(struct perf_event *, watchdog_ev);
46 static int __initdata no_watchdog;
51 * Should we panic when a soft-lockup or hard-lockup occurs:
53 #ifdef CONFIG_HARDLOCKUP_DETECTOR
54 static int hardlockup_panic;
56 static int __init hardlockup_panic_setup(char *str)
58 if (!strncmp(str, "panic", 5))
62 __setup("nmi_watchdog=", hardlockup_panic_setup);
65 unsigned int __read_mostly softlockup_panic =
66 CONFIG_BOOTPARAM_SOFTLOCKUP_PANIC_VALUE;
68 static int __init softlockup_panic_setup(char *str)
70 softlockup_panic = simple_strtoul(str, NULL, 0);
74 __setup("softlockup_panic=", softlockup_panic_setup);
76 static int __init nowatchdog_setup(char *str)
81 __setup("nowatchdog", nowatchdog_setup);
84 static int __init nosoftlockup_setup(char *str)
89 __setup("nosoftlockup", nosoftlockup_setup);
94 * Returns seconds, approximately. We don't need nanosecond
95 * resolution, and we don't need to waste time with a big divide when
98 static unsigned long get_timestamp(int this_cpu)
100 return cpu_clock(this_cpu) >> 30LL; /* 2^30 ~= 10^9 */
103 static unsigned long get_sample_period(void)
106 * convert softlockup_thresh from seconds to ns
107 * the divide by 5 is to give hrtimer 5 chances to
108 * increment before the hardlockup detector generates
111 return softlockup_thresh / 5 * NSEC_PER_SEC;
114 /* Commands for resetting the watchdog */
115 static void __touch_watchdog(void)
117 int this_cpu = smp_processor_id();
119 __get_cpu_var(watchdog_touch_ts) = get_timestamp(this_cpu);
122 void touch_softlockup_watchdog(void)
124 __get_cpu_var(watchdog_touch_ts) = 0;
126 EXPORT_SYMBOL(touch_softlockup_watchdog);
128 void touch_all_softlockup_watchdogs(void)
133 * this is done lockless
134 * do we care if a 0 races with a timestamp?
135 * all it means is the softlock check starts one cycle later
137 for_each_online_cpu(cpu)
138 per_cpu(watchdog_touch_ts, cpu) = 0;
141 #ifdef CONFIG_HARDLOCKUP_DETECTOR
142 void touch_nmi_watchdog(void)
144 __get_cpu_var(watchdog_nmi_touch) = true;
145 touch_softlockup_watchdog();
147 EXPORT_SYMBOL(touch_nmi_watchdog);
151 void touch_softlockup_watchdog_sync(void)
153 __raw_get_cpu_var(softlockup_touch_sync) = true;
154 __raw_get_cpu_var(watchdog_touch_ts) = 0;
157 #ifdef CONFIG_HARDLOCKUP_DETECTOR
158 /* watchdog detector functions */
159 static int is_hardlockup(void)
161 unsigned long hrint = __get_cpu_var(hrtimer_interrupts);
163 if (__get_cpu_var(hrtimer_interrupts_saved) == hrint)
166 __get_cpu_var(hrtimer_interrupts_saved) = hrint;
171 static int is_softlockup(unsigned long touch_ts)
173 unsigned long now = get_timestamp(smp_processor_id());
175 /* Warn about unreasonable delays: */
176 if (time_after(now, touch_ts + softlockup_thresh))
177 return now - touch_ts;
182 #ifdef CONFIG_HARDLOCKUP_DETECTOR
183 static struct perf_event_attr wd_hw_attr = {
184 .type = PERF_TYPE_HARDWARE,
185 .config = PERF_COUNT_HW_CPU_CYCLES,
186 .size = sizeof(struct perf_event_attr),
191 /* Callback function for perf event subsystem */
192 void watchdog_overflow_callback(struct perf_event *event, int nmi,
193 struct perf_sample_data *data,
194 struct pt_regs *regs)
196 /* Ensure the watchdog never gets throttled */
197 event->hw.interrupts = 0;
199 if (__get_cpu_var(watchdog_nmi_touch) == true) {
200 __get_cpu_var(watchdog_nmi_touch) = false;
204 /* check for a hardlockup
205 * This is done by making sure our timer interrupt
206 * is incrementing. The timer interrupt should have
207 * fired multiple times before we overflow'd. If it hasn't
208 * then this is a good indication the cpu is stuck
210 if (is_hardlockup()) {
211 int this_cpu = smp_processor_id();
213 /* only print hardlockups once */
214 if (__get_cpu_var(hard_watchdog_warn) == true)
217 if (hardlockup_panic)
218 panic("Watchdog detected hard LOCKUP on cpu %d", this_cpu);
220 WARN(1, "Watchdog detected hard LOCKUP on cpu %d", this_cpu);
222 __get_cpu_var(hard_watchdog_warn) = true;
226 __get_cpu_var(hard_watchdog_warn) = false;
229 static void watchdog_interrupt_count(void)
231 __get_cpu_var(hrtimer_interrupts)++;
234 static inline void watchdog_interrupt_count(void) { return; }
235 #endif /* CONFIG_HARDLOCKUP_DETECTOR */
237 /* watchdog kicker functions */
238 static enum hrtimer_restart watchdog_timer_fn(struct hrtimer *hrtimer)
240 unsigned long touch_ts = __get_cpu_var(watchdog_touch_ts);
241 struct pt_regs *regs = get_irq_regs();
244 /* kick the hardlockup detector */
245 watchdog_interrupt_count();
247 /* kick the softlockup detector */
248 wake_up_process(__get_cpu_var(softlockup_watchdog));
251 hrtimer_forward_now(hrtimer, ns_to_ktime(get_sample_period()));
254 if (unlikely(__get_cpu_var(softlockup_touch_sync))) {
256 * If the time stamp was touched atomically
257 * make sure the scheduler tick is up to date.
259 __get_cpu_var(softlockup_touch_sync) = false;
263 return HRTIMER_RESTART;
266 /* check for a softlockup
267 * This is done by making sure a high priority task is
268 * being scheduled. The task touches the watchdog to
269 * indicate it is getting cpu time. If it hasn't then
270 * this is a good indication some task is hogging the cpu
272 duration = is_softlockup(touch_ts);
273 if (unlikely(duration)) {
275 if (__get_cpu_var(soft_watchdog_warn) == true)
276 return HRTIMER_RESTART;
278 printk(KERN_ERR "BUG: soft lockup - CPU#%d stuck for %us! [%s:%d]\n",
279 smp_processor_id(), duration,
280 current->comm, task_pid_nr(current));
282 print_irqtrace_events(current);
288 if (softlockup_panic)
289 panic("softlockup: hung tasks");
290 __get_cpu_var(soft_watchdog_warn) = true;
292 __get_cpu_var(soft_watchdog_warn) = false;
294 return HRTIMER_RESTART;
299 * The watchdog thread - touches the timestamp.
301 static int watchdog(void *unused)
303 struct sched_param param = { .sched_priority = MAX_RT_PRIO-1 };
304 struct hrtimer *hrtimer = &__raw_get_cpu_var(watchdog_hrtimer);
306 sched_setscheduler(current, SCHED_FIFO, ¶m);
308 /* initialize timestamp */
311 /* kick off the timer for the hardlockup detector */
312 /* done here because hrtimer_start can only pin to smp_processor_id() */
313 hrtimer_start(hrtimer, ns_to_ktime(get_sample_period()),
314 HRTIMER_MODE_REL_PINNED);
316 set_current_state(TASK_INTERRUPTIBLE);
318 * Run briefly once per second to reset the softlockup timestamp.
319 * If this gets delayed for more than 60 seconds then the
320 * debug-printout triggers in watchdog_timer_fn().
322 while (!kthread_should_stop()) {
326 if (kthread_should_stop())
329 set_current_state(TASK_INTERRUPTIBLE);
331 __set_current_state(TASK_RUNNING);
337 #ifdef CONFIG_HARDLOCKUP_DETECTOR
338 static int watchdog_nmi_enable(int cpu)
340 struct perf_event_attr *wd_attr;
341 struct perf_event *event = per_cpu(watchdog_ev, cpu);
343 /* is it already setup and enabled? */
344 if (event && event->state > PERF_EVENT_STATE_OFF)
347 /* it is setup but not enabled */
351 /* Try to register using hardware perf events */
352 wd_attr = &wd_hw_attr;
353 wd_attr->sample_period = hw_nmi_get_sample_period();
354 event = perf_event_create_kernel_counter(wd_attr, cpu, -1, watchdog_overflow_callback);
355 if (!IS_ERR(event)) {
356 printk(KERN_INFO "NMI watchdog enabled, takes one hw-pmu counter.\n");
360 printk(KERN_ERR "NMI watchdog failed to create perf event on cpu%i: %p\n", cpu, event);
361 return PTR_ERR(event);
365 per_cpu(watchdog_ev, cpu) = event;
367 perf_event_enable(per_cpu(watchdog_ev, cpu));
372 static void watchdog_nmi_disable(int cpu)
374 struct perf_event *event = per_cpu(watchdog_ev, cpu);
377 perf_event_disable(event);
378 per_cpu(watchdog_ev, cpu) = NULL;
380 /* should be in cleanup, but blocks oprofile */
381 perf_event_release_kernel(event);
386 static int watchdog_nmi_enable(int cpu) { return 0; }
387 static void watchdog_nmi_disable(int cpu) { return; }
388 #endif /* CONFIG_HARDLOCKUP_DETECTOR */
390 /* prepare/enable/disable routines */
391 static int watchdog_prepare_cpu(int cpu)
393 struct hrtimer *hrtimer = &per_cpu(watchdog_hrtimer, cpu);
395 WARN_ON(per_cpu(softlockup_watchdog, cpu));
396 hrtimer_init(hrtimer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
397 hrtimer->function = watchdog_timer_fn;
402 static int watchdog_enable(int cpu)
404 struct task_struct *p = per_cpu(softlockup_watchdog, cpu);
407 /* enable the perf event */
408 err = watchdog_nmi_enable(cpu);
412 /* create the watchdog thread */
414 p = kthread_create(watchdog, (void *)(unsigned long)cpu, "watchdog/%d", cpu);
416 printk(KERN_ERR "softlockup watchdog for %i failed\n", cpu);
419 kthread_bind(p, cpu);
420 per_cpu(watchdog_touch_ts, cpu) = 0;
421 per_cpu(softlockup_watchdog, cpu) = p;
428 static void watchdog_disable(int cpu)
430 struct task_struct *p = per_cpu(softlockup_watchdog, cpu);
431 struct hrtimer *hrtimer = &per_cpu(watchdog_hrtimer, cpu);
434 * cancel the timer first to stop incrementing the stats
435 * and waking up the kthread
437 hrtimer_cancel(hrtimer);
439 /* disable the perf event */
440 watchdog_nmi_disable(cpu);
442 /* stop the watchdog thread */
444 per_cpu(softlockup_watchdog, cpu) = NULL;
448 /* if any cpu succeeds, watchdog is considered enabled for the system */
449 watchdog_enabled = 1;
452 static void watchdog_enable_all_cpus(void)
457 for_each_online_cpu(cpu)
458 result += watchdog_enable(cpu);
461 printk(KERN_ERR "watchdog: failed to be enabled on some cpus\n");
465 static void watchdog_disable_all_cpus(void)
469 for_each_online_cpu(cpu)
470 watchdog_disable(cpu);
472 /* if all watchdogs are disabled, then they are disabled for the system */
473 watchdog_enabled = 0;
477 /* sysctl functions */
480 * proc handler for /proc/sys/kernel/nmi_watchdog
483 int proc_dowatchdog_enabled(struct ctl_table *table, int write,
484 void __user *buffer, size_t *length, loff_t *ppos)
486 proc_dointvec(table, write, buffer, length, ppos);
488 if (watchdog_enabled)
489 watchdog_enable_all_cpus();
491 watchdog_disable_all_cpus();
495 int proc_dowatchdog_thresh(struct ctl_table *table, int write,
497 size_t *lenp, loff_t *ppos)
499 return proc_dointvec_minmax(table, write, buffer, lenp, ppos);
501 #endif /* CONFIG_SYSCTL */
505 * Create/destroy watchdog threads as CPUs come and go:
508 cpu_callback(struct notifier_block *nfb, unsigned long action, void *hcpu)
510 int hotcpu = (unsigned long)hcpu;
515 case CPU_UP_PREPARE_FROZEN:
516 err = watchdog_prepare_cpu(hotcpu);
519 case CPU_ONLINE_FROZEN:
520 err = watchdog_enable(hotcpu);
522 #ifdef CONFIG_HOTPLUG_CPU
523 case CPU_UP_CANCELED:
524 case CPU_UP_CANCELED_FROZEN:
525 watchdog_disable(hotcpu);
528 case CPU_DEAD_FROZEN:
529 watchdog_disable(hotcpu);
531 #endif /* CONFIG_HOTPLUG_CPU */
533 return notifier_from_errno(err);
536 static struct notifier_block __cpuinitdata cpu_nfb = {
537 .notifier_call = cpu_callback
540 static int __init spawn_watchdog_task(void)
542 void *cpu = (void *)(long)smp_processor_id();
548 err = cpu_callback(&cpu_nfb, CPU_UP_PREPARE, cpu);
549 WARN_ON(notifier_to_errno(err));
551 cpu_callback(&cpu_nfb, CPU_ONLINE, cpu);
552 register_cpu_notifier(&cpu_nfb);
556 early_initcall(spawn_watchdog_task);