2 * Copyright (C) 1991, 1992 Linus Torvalds
3 * Copyright (C) 2000, 2001, 2002 Andi Kleen, SuSE Labs
5 #include <linux/kallsyms.h>
6 #include <linux/kprobes.h>
7 #include <linux/uaccess.h>
8 #include <linux/utsname.h>
9 #include <linux/hardirq.h>
10 #include <linux/kdebug.h>
11 #include <linux/module.h>
12 #include <linux/ptrace.h>
13 #include <linux/ftrace.h>
14 #include <linux/kexec.h>
15 #include <linux/bug.h>
16 #include <linux/nmi.h>
17 #include <linux/sysfs.h>
19 #include <asm/stacktrace.h>
20 #include <linux/unwind.h>
23 int panic_on_unrecovered_nmi;
25 unsigned int code_bytes = 64;
26 int kstack_depth_to_print = 3 * STACKSLOTS_PER_LINE;
27 #ifdef CONFIG_STACK_UNWIND
28 static int call_trace = 1;
30 #define call_trace (-1)
32 static int die_counter;
34 void printk_address(unsigned long address, int reliable)
36 printk(" [<%p>] %s%pB\n", (void *) address,
37 reliable ? "" : "? ", (void *) address);
40 #ifdef CONFIG_FUNCTION_GRAPH_TRACER
42 print_ftrace_graph_addr(unsigned long addr, void *data,
43 const struct stacktrace_ops *ops,
44 struct thread_info *tinfo, int *graph)
46 struct task_struct *task;
47 unsigned long ret_addr;
50 if (addr != (unsigned long)return_to_handler)
54 index = task->curr_ret_stack;
56 if (!task->ret_stack || index < *graph)
60 ret_addr = task->ret_stack[index].ret;
62 ops->address(data, ret_addr, 1);
68 print_ftrace_graph_addr(unsigned long addr, void *data,
69 const struct stacktrace_ops *ops,
70 struct thread_info *tinfo, int *graph)
74 int asmlinkage dump_trace_unwind(struct unwind_frame_info *info,
75 const struct stacktrace_ops *ops, void *data)
78 #ifdef CONFIG_STACK_UNWIND
79 unsigned long sp = UNW_SP(info);
81 if (arch_unw_user_mode(info))
83 while (unwind(info) == 0 && UNW_PC(info)) {
85 ops->address(data, UNW_PC(info), 1);
86 if (arch_unw_user_mode(info))
88 if ((sp & ~(PAGE_SIZE - 1)) == (UNW_SP(info) & ~(PAGE_SIZE - 1))
97 int try_stack_unwind(struct task_struct *task, struct pt_regs *regs,
98 unsigned long **stack, unsigned long *bp,
99 const struct stacktrace_ops *ops, void *data)
101 #ifdef CONFIG_STACK_UNWIND
103 struct unwind_frame_info info;
108 if (unwind_init_frame_info(&info, task, regs) == 0)
109 unw_ret = dump_trace_unwind(&info, ops, data);
110 } else if (task == current)
111 unw_ret = unwind_init_running(&info, dump_trace_unwind, ops, data);
113 else if (task->on_cpu)
116 else if (unwind_init_blocked(&info, task) == 0)
117 unw_ret = dump_trace_unwind(&info, ops, data);
119 if (call_trace == 1 && !arch_unw_user_mode(&info)) {
120 ops->warning_symbol(data, "DWARF2 unwinder stuck at %s\n",
122 if (UNW_SP(&info) >= PAGE_OFFSET) {
123 ops->warning(data, "Leftover inexact backtrace:\n");
124 *stack = (void *)UNW_SP(&info);
128 } else if (call_trace >= 1)
130 ops->warning(data, "Full inexact backtrace again:\n");
132 ops->warning(data, "Inexact backtrace:\n");
138 * x86-64 can have up to three kernel stacks:
141 * severe exception (double fault, nmi, stack fault, debug, mce) hardware stack
144 static inline int valid_stack_ptr(struct thread_info *tinfo,
145 void *p, unsigned int size, void *end)
149 if (p < end && p >= (end-THREAD_SIZE))
154 return p > t && p < t + THREAD_SIZE - size;
158 print_context_stack(struct thread_info *tinfo,
159 unsigned long *stack, unsigned long bp,
160 const struct stacktrace_ops *ops, void *data,
161 unsigned long *end, int *graph)
163 struct stack_frame *frame = (struct stack_frame *)bp;
165 while (valid_stack_ptr(tinfo, stack, sizeof(*stack), end)) {
169 if (__kernel_text_address(addr)) {
170 if ((unsigned long) stack == bp + sizeof(long)) {
171 ops->address(data, addr, 1);
172 frame = frame->next_frame;
173 bp = (unsigned long) frame;
175 ops->address(data, addr, 0);
177 print_ftrace_graph_addr(addr, data, ops, tinfo, graph);
183 EXPORT_SYMBOL_GPL(print_context_stack);
186 print_context_stack_bp(struct thread_info *tinfo,
187 unsigned long *stack, unsigned long bp,
188 const struct stacktrace_ops *ops, void *data,
189 unsigned long *end, int *graph)
191 struct stack_frame *frame = (struct stack_frame *)bp;
192 unsigned long *ret_addr = &frame->return_address;
194 while (valid_stack_ptr(tinfo, ret_addr, sizeof(*ret_addr), end)) {
195 unsigned long addr = *ret_addr;
197 if (!__kernel_text_address(addr))
200 ops->address(data, addr, 1);
201 frame = frame->next_frame;
202 ret_addr = &frame->return_address;
203 print_ftrace_graph_addr(addr, data, ops, tinfo, graph);
206 return (unsigned long)frame;
208 EXPORT_SYMBOL_GPL(print_context_stack_bp);
212 print_trace_warning_symbol(void *data, char *msg, unsigned long symbol)
215 print_symbol(msg, symbol);
219 static void print_trace_warning(void *data, char *msg)
221 printk("%s%s\n", (char *)data, msg);
224 static int print_trace_stack(void *data, char *name)
226 printk("%s <%s> ", (char *)data, name);
231 * Print one address/symbol entries per line.
233 static void print_trace_address(void *data, unsigned long addr, int reliable)
235 touch_nmi_watchdog();
237 printk_address(addr, reliable);
240 static const struct stacktrace_ops print_trace_ops = {
241 .warning = print_trace_warning,
242 .warning_symbol = print_trace_warning_symbol,
243 .stack = print_trace_stack,
244 .address = print_trace_address,
245 .walk_stack = print_context_stack,
249 show_trace_log_lvl(struct task_struct *task, struct pt_regs *regs,
250 unsigned long *stack, unsigned long bp, char *log_lvl)
252 printk("%sCall Trace:\n", log_lvl);
253 dump_trace(task, regs, stack, bp, &print_trace_ops, log_lvl);
256 void show_trace(struct task_struct *task, struct pt_regs *regs,
257 unsigned long *stack, unsigned long bp)
259 show_trace_log_lvl(task, regs, stack, bp, "");
262 void show_stack(struct task_struct *task, unsigned long *sp)
264 show_stack_log_lvl(task, NULL, sp, 0, "");
268 * The architecture-independent dump_stack generator
270 void dump_stack(void)
275 bp = stack_frame(current, NULL);
276 printk("Pid: %d, comm: %.20s %s %s %.*s\n",
277 current->pid, current->comm, print_tainted(),
278 init_utsname()->release,
279 (int)strcspn(init_utsname()->version, " "),
280 init_utsname()->version);
281 show_trace(NULL, NULL, &stack, bp);
283 EXPORT_SYMBOL(dump_stack);
285 static arch_spinlock_t die_lock = __ARCH_SPIN_LOCK_UNLOCKED;
286 static int die_owner = -1;
287 static unsigned int die_nest_count;
289 unsigned __kprobes long oops_begin(void)
296 /* racy, but better than risking deadlock. */
297 raw_local_irq_save(flags);
298 cpu = smp_processor_id();
299 if (!arch_spin_trylock(&die_lock)) {
300 if (cpu == die_owner)
301 /* nested oops. should stop eventually */;
303 arch_spin_lock(&die_lock);
311 EXPORT_SYMBOL_GPL(oops_begin);
313 void __kprobes oops_end(unsigned long flags, struct pt_regs *regs, int signr)
315 if (regs && kexec_should_crash(current))
320 add_taint(TAINT_DIE);
323 /* Nest count reaches zero, release the lock. */
324 arch_spin_unlock(&die_lock);
325 raw_local_irq_restore(flags);
331 panic("Fatal exception in interrupt");
333 panic("Fatal exception");
337 int __kprobes __die(const char *str, struct pt_regs *regs, long err)
344 "%s: %04lx [#%d] ", str, err & 0xffff, ++die_counter);
345 #ifdef CONFIG_PREEMPT
351 #ifdef CONFIG_DEBUG_PAGEALLOC
352 printk("DEBUG_PAGEALLOC");
355 if (notify_die(DIE_OOPS, str, regs, err,
356 current->thread.trap_nr, SIGSEGV) == NOTIFY_STOP)
359 show_registers(regs);
361 if (user_mode_vm(regs)) {
363 ss = regs->ss & 0xffff;
365 sp = kernel_stack_pointer(regs);
368 printk(KERN_EMERG "EIP: [<%08lx>] ", regs->ip);
369 print_symbol("%s", regs->ip);
370 printk(" SS:ESP %04x:%08lx\n", ss, sp);
372 /* Executive summary in case the oops scrolled away */
373 printk(KERN_ALERT "RIP ");
374 printk_address(regs->ip, 1);
375 printk(" RSP <%016lx>\n", regs->sp);
381 * This is gone through when something in the kernel has done something bad
382 * and is about to be terminated:
384 void die(const char *str, struct pt_regs *regs, long err)
386 unsigned long flags = oops_begin();
389 if (!user_mode_vm(regs))
390 report_bug(regs->ip, regs);
392 if (__die(str, regs, err))
394 oops_end(flags, regs, sig);
397 static int __init kstack_setup(char *s)
401 kstack_depth_to_print = simple_strtoul(s, NULL, 0);
404 early_param("kstack", kstack_setup);
406 static int __init code_bytes_setup(char *s)
408 code_bytes = simple_strtoul(s, NULL, 0);
409 if (code_bytes > 8192)
414 __setup("code_bytes=", code_bytes_setup);
416 #ifdef CONFIG_STACK_UNWIND
417 static int __init call_trace_setup(char *s)
421 if (strcmp(s, "old") == 0)
423 else if (strcmp(s, "both") == 0)
425 else if (strcmp(s, "newfallback") == 0)
427 else if (strcmp(s, "new") == 0)
431 early_param("call_trace", call_trace_setup);