2 * Detect hard and soft lockups on a system
4 * started by Don Zickus, Copyright (C) 2010 Red Hat, Inc.
6 * this code detects hard lockups: incidents in where on a CPU
7 * the kernel does not respond to anything except NMI.
9 * Note: Most of this code is borrowed heavily from softlockup.c,
10 * so thanks to Ingo for the initial implementation.
11 * Some chunks also taken from arch/x86/kernel/apic/nmi.c, thanks
12 * to those contributors as well.
16 #include <linux/cpu.h>
17 #include <linux/nmi.h>
18 #include <linux/init.h>
19 #include <linux/delay.h>
20 #include <linux/freezer.h>
21 #include <linux/kthread.h>
22 #include <linux/lockdep.h>
23 #include <linux/notifier.h>
24 #include <linux/module.h>
25 #include <linux/sysctl.h>
27 #include <asm/irq_regs.h>
28 #include <linux/perf_event.h>
31 int __read_mostly softlockup_thresh = 60;
33 static DEFINE_PER_CPU(unsigned long, watchdog_touch_ts);
34 static DEFINE_PER_CPU(struct task_struct *, softlockup_watchdog);
35 static DEFINE_PER_CPU(struct hrtimer, watchdog_hrtimer);
36 static DEFINE_PER_CPU(bool, softlockup_touch_sync);
37 static DEFINE_PER_CPU(bool, hard_watchdog_warn);
38 static DEFINE_PER_CPU(bool, soft_watchdog_warn);
39 #ifdef CONFIG_PERF_EVENTS_NMI
40 static DEFINE_PER_CPU(unsigned long, hrtimer_interrupts);
41 static DEFINE_PER_CPU(unsigned long, hrtimer_interrupts_saved);
42 static DEFINE_PER_CPU(struct perf_event *, watchdog_ev);
45 static int __read_mostly did_panic;
46 static int __initdata no_watchdog;
51 * Should we panic when a soft-lockup or hard-lockup occurs:
53 #ifdef CONFIG_PERF_EVENTS_NMI
54 static int hardlockup_panic;
56 static int __init hardlockup_panic_setup(char *str)
58 if (!strncmp(str, "panic", 5))
62 __setup("nmi_watchdog=", hardlockup_panic_setup);
65 unsigned int __read_mostly softlockup_panic =
66 CONFIG_BOOTPARAM_SOFTLOCKUP_PANIC_VALUE;
68 static int __init softlockup_panic_setup(char *str)
70 softlockup_panic = simple_strtoul(str, NULL, 0);
74 __setup("softlockup_panic=", softlockup_panic_setup);
76 static int __init nowatchdog_setup(char *str)
81 __setup("nowatchdog", nowatchdog_setup);
84 static int __init nosoftlockup_setup(char *str)
89 __setup("nosoftlockup", nosoftlockup_setup);
94 * Returns seconds, approximately. We don't need nanosecond
95 * resolution, and we don't need to waste time with a big divide when
98 static unsigned long get_timestamp(int this_cpu)
100 return cpu_clock(this_cpu) >> 30LL; /* 2^30 ~= 10^9 */
103 static unsigned long get_sample_period(void)
106 * convert softlockup_thresh from seconds to ns
107 * the divide by 5 is to give hrtimer 5 chances to
108 * increment before the hardlockup detector generates
111 return softlockup_thresh / 5 * NSEC_PER_SEC;
114 /* Commands for resetting the watchdog */
115 static void __touch_watchdog(void)
117 int this_cpu = raw_smp_processor_id();
119 __get_cpu_var(watchdog_touch_ts) = get_timestamp(this_cpu);
122 void touch_softlockup_watchdog(void)
124 __get_cpu_var(watchdog_touch_ts) = 0;
127 void touch_all_softlockup_watchdogs(void)
132 * this is done lockless
133 * do we care if a 0 races with a timestamp?
134 * all it means is the softlock check starts one cycle later
136 for_each_online_cpu(cpu)
137 per_cpu(watchdog_touch_ts, cpu) = 0;
140 void touch_nmi_watchdog(void)
142 touch_softlockup_watchdog();
144 EXPORT_SYMBOL(touch_nmi_watchdog);
146 void touch_softlockup_watchdog_sync(void)
148 __raw_get_cpu_var(softlockup_touch_sync) = true;
149 __raw_get_cpu_var(watchdog_touch_ts) = 0;
152 #ifdef CONFIG_PERF_EVENTS_NMI
153 /* watchdog detector functions */
154 static int is_hardlockup(int cpu)
156 unsigned long hrint = per_cpu(hrtimer_interrupts, cpu);
158 if (per_cpu(hrtimer_interrupts_saved, cpu) == hrint)
161 per_cpu(hrtimer_interrupts_saved, cpu) = hrint;
166 static int is_softlockup(unsigned long touch_ts, int cpu)
168 unsigned long now = get_timestamp(cpu);
170 /* Warn about unreasonable delays: */
171 if (time_after(now, touch_ts + softlockup_thresh))
172 return now - touch_ts;
178 watchdog_panic(struct notifier_block *this, unsigned long event, void *ptr)
185 static struct notifier_block panic_block = {
186 .notifier_call = watchdog_panic,
189 #ifdef CONFIG_PERF_EVENTS_NMI
190 static struct perf_event_attr wd_hw_attr = {
191 .type = PERF_TYPE_HARDWARE,
192 .config = PERF_COUNT_HW_CPU_CYCLES,
193 .size = sizeof(struct perf_event_attr),
198 /* Callback function for perf event subsystem */
199 void watchdog_overflow_callback(struct perf_event *event, int nmi,
200 struct perf_sample_data *data,
201 struct pt_regs *regs)
203 int this_cpu = smp_processor_id();
204 unsigned long touch_ts = per_cpu(watchdog_touch_ts, this_cpu);
211 /* check for a hardlockup
212 * This is done by making sure our timer interrupt
213 * is incrementing. The timer interrupt should have
214 * fired multiple times before we overflow'd. If it hasn't
215 * then this is a good indication the cpu is stuck
217 if (is_hardlockup(this_cpu)) {
218 /* only print hardlockups once */
219 if (__get_cpu_var(hard_watchdog_warn) == true)
222 if (hardlockup_panic)
223 panic("Watchdog detected hard LOCKUP on cpu %d", this_cpu);
225 WARN(1, "Watchdog detected hard LOCKUP on cpu %d", this_cpu);
227 __get_cpu_var(hard_watchdog_warn) = true;
231 __get_cpu_var(hard_watchdog_warn) = false;
234 static void watchdog_interrupt_count(void)
236 __get_cpu_var(hrtimer_interrupts)++;
239 static inline void watchdog_interrupt_count(void) { return; }
240 #endif /* CONFIG_PERF_EVENTS_NMI */
242 /* watchdog kicker functions */
243 static enum hrtimer_restart watchdog_timer_fn(struct hrtimer *hrtimer)
245 int this_cpu = smp_processor_id();
246 unsigned long touch_ts = __get_cpu_var(watchdog_touch_ts);
247 struct pt_regs *regs = get_irq_regs();
250 /* kick the hardlockup detector */
251 watchdog_interrupt_count();
253 /* kick the softlockup detector */
254 wake_up_process(__get_cpu_var(softlockup_watchdog));
257 hrtimer_forward_now(hrtimer, ns_to_ktime(get_sample_period()));
260 if (unlikely(per_cpu(softlockup_touch_sync, this_cpu))) {
262 * If the time stamp was touched atomically
263 * make sure the scheduler tick is up to date.
265 per_cpu(softlockup_touch_sync, this_cpu) = false;
269 return HRTIMER_RESTART;
272 /* check for a softlockup
273 * This is done by making sure a high priority task is
274 * being scheduled. The task touches the watchdog to
275 * indicate it is getting cpu time. If it hasn't then
276 * this is a good indication some task is hogging the cpu
278 duration = is_softlockup(touch_ts, this_cpu);
279 if (unlikely(duration)) {
281 if (__get_cpu_var(soft_watchdog_warn) == true)
282 return HRTIMER_RESTART;
284 printk(KERN_ERR "BUG: soft lockup - CPU#%d stuck for %us! [%s:%d]\n",
286 current->comm, task_pid_nr(current));
288 print_irqtrace_events(current);
294 if (softlockup_panic)
295 panic("softlockup: hung tasks");
296 __get_cpu_var(soft_watchdog_warn) = true;
298 __get_cpu_var(soft_watchdog_warn) = false;
300 return HRTIMER_RESTART;
305 * The watchdog thread - touches the timestamp.
307 static int watchdog(void *__bind_cpu)
309 struct sched_param param = { .sched_priority = MAX_RT_PRIO-1 };
310 struct hrtimer *hrtimer = &per_cpu(watchdog_hrtimer, (unsigned long)__bind_cpu);
312 sched_setscheduler(current, SCHED_FIFO, ¶m);
314 /* initialize timestamp */
317 /* kick off the timer for the hardlockup detector */
318 /* done here because hrtimer_start can only pin to smp_processor_id() */
319 hrtimer_start(hrtimer, ns_to_ktime(get_sample_period()),
320 HRTIMER_MODE_REL_PINNED);
322 set_current_state(TASK_INTERRUPTIBLE);
324 * Run briefly once per second to reset the softlockup timestamp.
325 * If this gets delayed for more than 60 seconds then the
326 * debug-printout triggers in softlockup_tick().
328 while (!kthread_should_stop()) {
332 if (kthread_should_stop())
335 set_current_state(TASK_INTERRUPTIBLE);
337 __set_current_state(TASK_RUNNING);
343 #ifdef CONFIG_PERF_EVENTS_NMI
344 static int watchdog_nmi_enable(int cpu)
346 struct perf_event_attr *wd_attr;
347 struct perf_event *event = per_cpu(watchdog_ev, cpu);
349 /* is it already setup and enabled? */
350 if (event && event->state > PERF_EVENT_STATE_OFF)
353 /* it is setup but not enabled */
357 /* Try to register using hardware perf events */
358 wd_attr = &wd_hw_attr;
359 wd_attr->sample_period = hw_nmi_get_sample_period();
360 event = perf_event_create_kernel_counter(wd_attr, cpu, -1, watchdog_overflow_callback);
361 if (!IS_ERR(event)) {
362 printk(KERN_INFO "NMI watchdog enabled, takes one hw-pmu counter.\n");
366 printk(KERN_ERR "NMI watchdog failed to create perf event on cpu%i: %p\n", cpu, event);
371 per_cpu(watchdog_ev, cpu) = event;
373 perf_event_enable(per_cpu(watchdog_ev, cpu));
378 static void watchdog_nmi_disable(int cpu)
380 struct perf_event *event = per_cpu(watchdog_ev, cpu);
383 perf_event_disable(event);
384 per_cpu(watchdog_ev, cpu) = NULL;
386 /* should be in cleanup, but blocks oprofile */
387 perf_event_release_kernel(event);
392 static int watchdog_nmi_enable(int cpu) { return 0; }
393 static void watchdog_nmi_disable(int cpu) { return; }
394 #endif /* CONFIG_PERF_EVENTS_NMI */
396 /* prepare/enable/disable routines */
397 static int watchdog_prepare_cpu(int cpu)
399 struct hrtimer *hrtimer = &per_cpu(watchdog_hrtimer, cpu);
401 WARN_ON(per_cpu(softlockup_watchdog, cpu));
402 hrtimer_init(hrtimer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
403 hrtimer->function = watchdog_timer_fn;
408 static int watchdog_enable(int cpu)
410 struct task_struct *p = per_cpu(softlockup_watchdog, cpu);
412 /* enable the perf event */
413 if (watchdog_nmi_enable(cpu) != 0)
416 /* create the watchdog thread */
418 p = kthread_create(watchdog, (void *)(unsigned long)cpu, "watchdog/%d", cpu);
420 printk(KERN_ERR "softlockup watchdog for %i failed\n", cpu);
423 kthread_bind(p, cpu);
424 per_cpu(watchdog_touch_ts, cpu) = 0;
425 per_cpu(softlockup_watchdog, cpu) = p;
432 static void watchdog_disable(int cpu)
434 struct task_struct *p = per_cpu(softlockup_watchdog, cpu);
435 struct hrtimer *hrtimer = &per_cpu(watchdog_hrtimer, cpu);
438 * cancel the timer first to stop incrementing the stats
439 * and waking up the kthread
441 hrtimer_cancel(hrtimer);
443 /* disable the perf event */
444 watchdog_nmi_disable(cpu);
446 /* stop the watchdog thread */
448 per_cpu(softlockup_watchdog, cpu) = NULL;
452 /* if any cpu succeeds, watchdog is considered enabled for the system */
453 watchdog_enabled = 1;
456 static void watchdog_enable_all_cpus(void)
461 for_each_online_cpu(cpu)
462 result += watchdog_enable(cpu);
465 printk(KERN_ERR "watchdog: failed to be enabled on some cpus\n");
469 static void watchdog_disable_all_cpus(void)
473 for_each_online_cpu(cpu)
474 watchdog_disable(cpu);
476 /* if all watchdogs are disabled, then they are disabled for the system */
477 watchdog_enabled = 0;
481 /* sysctl functions */
484 * proc handler for /proc/sys/kernel/nmi_watchdog
487 int proc_dowatchdog_enabled(struct ctl_table *table, int write,
488 void __user *buffer, size_t *length, loff_t *ppos)
490 proc_dointvec(table, write, buffer, length, ppos);
492 if (watchdog_enabled)
493 watchdog_enable_all_cpus();
495 watchdog_disable_all_cpus();
499 int proc_dowatchdog_thresh(struct ctl_table *table, int write,
501 size_t *lenp, loff_t *ppos)
503 return proc_dointvec_minmax(table, write, buffer, lenp, ppos);
505 #endif /* CONFIG_SYSCTL */
509 * Create/destroy watchdog threads as CPUs come and go:
512 cpu_callback(struct notifier_block *nfb, unsigned long action, void *hcpu)
514 int hotcpu = (unsigned long)hcpu;
518 case CPU_UP_PREPARE_FROZEN:
519 if (watchdog_prepare_cpu(hotcpu))
523 case CPU_ONLINE_FROZEN:
524 if (watchdog_enable(hotcpu))
527 #ifdef CONFIG_HOTPLUG_CPU
528 case CPU_UP_CANCELED:
529 case CPU_UP_CANCELED_FROZEN:
530 watchdog_disable(hotcpu);
533 case CPU_DEAD_FROZEN:
534 watchdog_disable(hotcpu);
536 #endif /* CONFIG_HOTPLUG_CPU */
541 static struct notifier_block __cpuinitdata cpu_nfb = {
542 .notifier_call = cpu_callback
545 static int __init spawn_watchdog_task(void)
547 void *cpu = (void *)(long)smp_processor_id();
553 err = cpu_callback(&cpu_nfb, CPU_UP_PREPARE, cpu);
554 WARN_ON(err == NOTIFY_BAD);
556 cpu_callback(&cpu_nfb, CPU_ONLINE, cpu);
557 register_cpu_notifier(&cpu_nfb);
559 atomic_notifier_chain_register(&panic_notifier_list, &panic_block);
563 early_initcall(spawn_watchdog_task);