2 * This file is subject to the terms and conditions of the GNU General Public
3 * License. See the file "COPYING" in the main directory of this archive
6 * Copyright (c) 2006, 2007-2009 Silicon Graphics, Inc. All Rights Reserved.
8 * Common code for doing accurate backtraces on i386 and x86_64, including
9 * printing the values of arguments.
12 #include <linux/slab.h>
13 #include <linux/init.h>
14 #include <linux/kallsyms.h>
15 #include <linux/kdb.h>
16 #include <linux/kdbprivate.h>
17 #include <linux/ctype.h>
18 #include <linux/string.h>
19 #include <linux/stringify.h>
20 #include <linux/kernel.h>
21 #include <linux/sched.h>
22 #include <linux/nmi.h>
23 #include <asm/asm-offsets.h>
24 #include <asm/system.h>
26 #define KDB_DEBUG_BB(fmt, ...) \
27 {if (KDB_DEBUG(BB)) kdb_printf(fmt, ## __VA_ARGS__);}
28 #define KDB_DEBUG_BB_OFFSET_PRINTF(offset, prefix, suffix) \
29 kdb_printf(prefix "%c0x%x" suffix, \
30 offset >= 0 ? '+' : '-', \
31 offset >= 0 ? offset : -offset)
32 #define KDB_DEBUG_BB_OFFSET(offset, prefix, suffix) \
33 {if (KDB_DEBUG(BB)) KDB_DEBUG_BB_OFFSET_PRINTF(offset, prefix, suffix);}
35 #define BB_CHECK(expr, val, ret) \
37 if (unlikely(expr)) { \
38 kdb_printf("%s, line %d: BB_CHECK(" #expr ") failed " \
40 __FUNCTION__, __LINE__, (long)val); \
48 /* Use BBRG_Rxx for both i386 and x86_64. RAX through R15 must be at the end,
49 * starting with RAX. Some of these codes do not reflect actual registers,
50 * such codes are special cases when parsing the record of register changes.
51 * When updating BBRG_ entries, update bbrg_name as well.
56 BBRG_UNDEFINED = 0, /* Register contents are undefined */
57 BBRG_OSP, /* original stack pointer on entry to function */
76 const static char *bbrg_name[] = {
77 [BBRG_UNDEFINED] = "undefined",
97 /* Map a register name to its register code. This includes the sub-register
98 * addressable fields, e.g. parts of rax can be addressed as ax, al, ah, eax.
99 * The list is sorted so it can be binary chopped, sort command is:
100 * LANG=C sort -t '"' -k2
103 struct bb_reg_code_map {
104 enum bb_reg_code reg;
108 const static struct bb_reg_code_map
109 bb_reg_code_map[] = {
135 { BBRG_R10, "r10d" },
136 { BBRG_R10, "r10l" },
137 { BBRG_R10, "r10w" },
139 { BBRG_R11, "r11d" },
140 { BBRG_R11, "r11l" },
141 { BBRG_R11, "r11w" },
143 { BBRG_R12, "r12d" },
144 { BBRG_R12, "r12l" },
145 { BBRG_R12, "r12w" },
147 { BBRG_R13, "r13d" },
148 { BBRG_R13, "r13l" },
149 { BBRG_R13, "r13w" },
151 { BBRG_R14, "r14d" },
152 { BBRG_R14, "r14l" },
153 { BBRG_R14, "r14w" },
155 { BBRG_R15, "r15d" },
156 { BBRG_R15, "r15l" },
157 { BBRG_R15, "r15w" },
180 /* Record register contents in terms of the values that were passed to this
181 * function, IOW track which registers contain an input value. A register's
182 * contents can be undefined, it can contain an input register value or it can
183 * contain an offset from the original stack pointer.
185 * This structure is used to represent the current contents of the integer
186 * registers, it is held in an array that is indexed by BBRG_xxx. The element
187 * for BBRG_xxx indicates what input value is currently in BBRG_xxx. When
188 * 'value' is BBRG_OSP then register BBRG_xxx contains a stack pointer,
189 * pointing at 'offset' from the original stack pointer on entry to the
190 * function. When 'value' is not BBRG_OSP then element BBRG_xxx contains the
191 * original contents of an input register and offset is ignored.
193 * An input register 'value' can be stored in more than one register and/or in
194 * more than one memory location.
197 struct bb_reg_contains
199 enum bb_reg_code value: 8;
203 /* Note: the offsets in struct bb_mem_contains in this code are _NOT_ offsets
204 * from OSP, they are offsets from current RSP. It fits better with the way
205 * that struct pt_regs is built, some code pushes extra data before pt_regs so
206 * working with OSP relative offsets gets messy. struct bb_mem_contains
207 * entries must be in descending order of RSP offset.
210 typedef struct { DECLARE_BITMAP(bits, BBRG_R15+1); } bbrgmask_t;
211 #define BB_SKIP(reg) (1 << (BBRG_ ## reg))
212 struct bb_mem_contains {
213 short offset_address;
214 enum bb_reg_code value: 8;
217 /* Transfer of control to a label outside the current function. If the
218 * transfer is to a known common restore path that expects known registers
219 * and/or a known memory state (e.g. struct pt_regs) then do a sanity check on
220 * the state at this point.
223 struct bb_name_state {
224 const char *name; /* target function */
225 bfd_vma address; /* Address of target function */
226 const char *fname; /* optional from function name */
227 const struct bb_mem_contains *mem; /* expected memory state */
228 const struct bb_reg_contains *regs; /* expected register state */
229 const unsigned short mem_size; /* ARRAY_SIZE(mem) */
230 const unsigned short regs_size; /* ARRAY_SIZE(regs) */
231 const short osp_offset; /* RSP in regs == OSP+osp_offset */
232 const bbrgmask_t skip_mem; /* Some slots in mem may be undefined */
233 const bbrgmask_t skip_regs; /* Some slots in regs may be undefined */
236 /* NS (NAME_STATE) macros define the register and memory state when we transfer
237 * control to or start decoding a special case name. Use NS when the target
238 * label always has the same state. Use NS_FROM and specify the source label
239 * if the target state is slightly different depending on where it is branched
240 * from. This gives better state checking, by isolating the special cases.
242 * Note: for the same target label, NS_FROM entries must be followed by a
246 #define NS_FROM(iname, ifname, imem, iregs, iskip_mem, iskip_regs, iosp_offset) \
252 .mem_size = ARRAY_SIZE(imem), \
253 .regs_size = ARRAY_SIZE(iregs), \
254 .skip_mem.bits[0] = iskip_mem, \
255 .skip_regs.bits[0] = iskip_regs, \
256 .osp_offset = iosp_offset, \
260 /* Shorter forms for the common cases */
261 #define NS(iname, imem, iregs, iskip_mem, iskip_regs, iosp_offset) \
262 NS_FROM(iname, NULL, imem, iregs, iskip_mem, iskip_regs, iosp_offset)
263 #define NS_MEM(iname, imem, iskip_mem) \
264 NS_FROM(iname, NULL, imem, no_regs, iskip_mem, 0, 0)
265 #define NS_MEM_FROM(iname, ifname, imem, iskip_mem) \
266 NS_FROM(iname, ifname, imem, no_regs, iskip_mem, 0, 0)
267 #define NS_REG(iname, iregs, iskip_regs) \
268 NS_FROM(iname, NULL, no_memory, iregs, 0, iskip_regs, 0)
269 #define NS_REG_FROM(iname, ifname, iregs, iskip_regs) \
270 NS_FROM(iname, ifname, no_memory, iregs, 0, iskip_regs, 0)
273 bb_reg_code_set_value(enum bb_reg_code dst, enum bb_reg_code src);
275 static const char *bb_mod_name, *bb_func_name;
278 bb_noret(const char *name)
280 if (strcmp(name, "panic") == 0 ||
281 strcmp(name, "do_exit") == 0 ||
282 strcmp(name, "do_group_exit") == 0 ||
283 strcmp(name, "complete_and_exit") == 0)
288 /*============================================================================*/
290 /* Most of the basic block code and data is common to x86_64 and i386. This */
291 /* large ifdef contains almost all of the differences between the two */
294 /* Make sure you update the correct section of this ifdef. */
296 /*============================================================================*/
300 /* Registers that can be used to pass parameters, in the order that parameters
304 const static enum bb_reg_code
314 const static enum bb_reg_code
315 bb_preserved_reg[] = {
325 static const struct bb_mem_contains full_pt_regs[] = {
342 static const struct bb_mem_contains full_pt_regs_plus_1[] = {
360 * Going into error_exit we have the hardware pushed error_code on the stack
361 * plus a full pt_regs
363 static const struct bb_mem_contains error_code_full_pt_regs[] = {
364 { 0x78, BBRG_UNDEFINED },
381 static const struct bb_mem_contains partial_pt_regs[] = {
392 static const struct bb_mem_contains partial_pt_regs_plus_1[] = {
403 static const struct bb_mem_contains partial_pt_regs_plus_2[] = {
414 static const struct bb_mem_contains no_memory[] = {
416 /* Hardware has already pushed an error_code on the stack. Use undefined just
417 * to set the initial stack offset.
419 static const struct bb_mem_contains error_code[] = {
420 { 0x0, BBRG_UNDEFINED },
422 /* error_code plus original rax */
423 static const struct bb_mem_contains error_code_rax[] = {
424 { 0x8, BBRG_UNDEFINED },
428 static const struct bb_reg_contains all_regs[] = {
429 [BBRG_RAX] = { BBRG_RAX, 0 },
430 [BBRG_RBX] = { BBRG_RBX, 0 },
431 [BBRG_RCX] = { BBRG_RCX, 0 },
432 [BBRG_RDX] = { BBRG_RDX, 0 },
433 [BBRG_RDI] = { BBRG_RDI, 0 },
434 [BBRG_RSI] = { BBRG_RSI, 0 },
435 [BBRG_RBP] = { BBRG_RBP, 0 },
436 [BBRG_RSP] = { BBRG_OSP, 0 },
437 [BBRG_R8 ] = { BBRG_R8, 0 },
438 [BBRG_R9 ] = { BBRG_R9, 0 },
439 [BBRG_R10] = { BBRG_R10, 0 },
440 [BBRG_R11] = { BBRG_R11, 0 },
441 [BBRG_R12] = { BBRG_R12, 0 },
442 [BBRG_R13] = { BBRG_R13, 0 },
443 [BBRG_R14] = { BBRG_R14, 0 },
444 [BBRG_R15] = { BBRG_R15, 0 },
446 static const struct bb_reg_contains no_regs[] = {
449 static struct bb_name_state bb_special_cases[] = {
451 /* First the cases that pass data only in memory. We do not check any
452 * register state for these cases.
455 /* Simple cases, no exceptions */
456 NS_MEM("ia32_ptregs_common", partial_pt_regs_plus_1, 0),
457 NS_MEM("ia32_sysret", partial_pt_regs, 0),
458 NS_MEM("int_careful", partial_pt_regs, 0),
459 NS_MEM("ia32_badarg", partial_pt_regs, 0),
460 NS_MEM("int_restore_rest", full_pt_regs, 0),
461 NS_MEM("int_signal", full_pt_regs, 0),
462 NS_MEM("int_very_careful", partial_pt_regs, 0),
463 NS_MEM("ptregscall_common", full_pt_regs_plus_1, 0),
464 NS_MEM("ret_from_intr", partial_pt_regs_plus_2, 0),
465 NS_MEM("stub32_clone", partial_pt_regs_plus_1, 0),
466 NS_MEM("stub32_execve", partial_pt_regs_plus_1, 0),
467 NS_MEM("stub32_fork", partial_pt_regs_plus_1, 0),
468 NS_MEM("stub32_iopl", partial_pt_regs_plus_1, 0),
469 NS_MEM("stub32_rt_sigreturn", partial_pt_regs_plus_1, 0),
470 NS_MEM("stub32_sigaltstack", partial_pt_regs_plus_1, 0),
471 NS_MEM("stub32_sigreturn", partial_pt_regs_plus_1, 0),
472 NS_MEM("stub32_vfork", partial_pt_regs_plus_1, 0),
473 NS_MEM("stub_clone", partial_pt_regs_plus_1, 0),
474 NS_MEM("stub_execve", partial_pt_regs_plus_1, 0),
475 NS_MEM("stub_fork", partial_pt_regs_plus_1, 0),
476 NS_MEM("stub_iopl", partial_pt_regs_plus_1, 0),
477 NS_MEM("stub_rt_sigreturn", partial_pt_regs_plus_1, 0),
478 NS_MEM("stub_sigaltstack", partial_pt_regs_plus_1, 0),
479 NS_MEM("stub_vfork", partial_pt_regs_plus_1, 0),
480 NS_MEM("sysenter_auditsys", partial_pt_regs,
481 BB_SKIP(R8) | BB_SKIP(R9) | BB_SKIP(R10) | BB_SKIP(R11)),
483 NS_MEM("paranoid_exit", error_code_full_pt_regs, 0),
485 NS_MEM_FROM("ia32_badsys", "ia32_sysenter_target",
487 /* ia32_sysenter_target uses CLEAR_RREGS to clear R8-R11 on
488 * some paths. It also stomps on RAX.
490 BB_SKIP(R8) | BB_SKIP(R9) | BB_SKIP(R10) | BB_SKIP(R11) |
492 NS_MEM_FROM("ia32_badsys", "ia32_cstar_target",
494 /* ia32_cstar_target uses CLEAR_RREGS to clear R8-R11 on some
495 * paths. It also stomps on RAX. Even more confusing, instead
496 * of storing RCX it stores RBP. WTF?
498 BB_SKIP(R8) | BB_SKIP(R9) | BB_SKIP(R10) | BB_SKIP(R11) |
499 BB_SKIP(RAX) | BB_SKIP(RCX)),
500 NS_MEM_FROM("ia32_badsys", "ia32_syscall",
502 BB_SKIP(R8) | BB_SKIP(R9) | BB_SKIP(R10) | BB_SKIP(R11)),
503 NS_MEM("ia32_badsys", partial_pt_regs, 0),
505 #ifdef CONFIG_AUDITSYSCALL
506 NS_MEM_FROM("int_with_check", "sysexit_audit", partial_pt_regs,
507 BB_SKIP(R8) | BB_SKIP(R9) | BB_SKIP(R10) | BB_SKIP(R11) |
509 NS_MEM_FROM("int_with_check", "ia32_cstar_target", partial_pt_regs,
510 BB_SKIP(R8) | BB_SKIP(R9) | BB_SKIP(R10) | BB_SKIP(R11) |
511 BB_SKIP(RAX) | BB_SKIP(RCX)),
513 NS_MEM("int_with_check", no_memory, 0),
515 /* Various bits of code branch to int_ret_from_sys_call, with slightly
516 * different missing values in pt_regs.
518 NS_MEM_FROM("int_ret_from_sys_call", "ret_from_fork",
521 NS_MEM_FROM("int_ret_from_sys_call", "stub_execve",
523 BB_SKIP(RAX) | BB_SKIP(RCX)),
524 NS_MEM_FROM("int_ret_from_sys_call", "stub_rt_sigreturn",
526 BB_SKIP(RAX) | BB_SKIP(RCX)),
527 NS_MEM_FROM("int_ret_from_sys_call", "kernel_execve",
530 NS_MEM_FROM("int_ret_from_sys_call", "ia32_syscall",
532 /* ia32_syscall only saves RDI through RCX. */
533 BB_SKIP(R8) | BB_SKIP(R9) | BB_SKIP(R10) | BB_SKIP(R11) |
535 NS_MEM_FROM("int_ret_from_sys_call", "ia32_sysenter_target",
537 /* ia32_sysenter_target uses CLEAR_RREGS to clear R8-R11 on
538 * some paths. It also stomps on RAX.
540 BB_SKIP(R8) | BB_SKIP(R9) | BB_SKIP(R10) | BB_SKIP(R11) |
542 NS_MEM_FROM("int_ret_from_sys_call", "ia32_cstar_target",
544 /* ia32_cstar_target uses CLEAR_RREGS to clear R8-R11 on some
545 * paths. It also stomps on RAX. Even more confusing, instead
546 * of storing RCX it stores RBP. WTF?
548 BB_SKIP(R8) | BB_SKIP(R9) | BB_SKIP(R10) | BB_SKIP(R11) |
549 BB_SKIP(RAX) | BB_SKIP(RCX)),
550 NS_MEM_FROM("int_ret_from_sys_call", "ia32_badsys",
551 partial_pt_regs, BB_SKIP(RAX)),
552 NS_MEM("int_ret_from_sys_call", partial_pt_regs, 0),
554 #ifdef CONFIG_PREEMPT
555 NS_MEM("retint_kernel", partial_pt_regs, BB_SKIP(RAX)),
556 #endif /* CONFIG_PREEMPT */
558 NS_MEM("retint_careful", partial_pt_regs, BB_SKIP(RAX)),
560 /* Horrible hack: For a brand new x86_64 task, switch_to() branches to
561 * ret_from_fork with a totally different stack state from all the
562 * other tasks that come out of switch_to(). This non-standard state
563 * cannot be represented so just ignore the branch from switch_to() to
564 * ret_from_fork. Due to inlining and linker labels, switch_to() can
565 * appear as several different function labels, including schedule,
566 * context_switch and __sched_text_start.
568 NS_MEM_FROM("ret_from_fork", "schedule", no_memory, 0),
569 NS_MEM_FROM("ret_from_fork", "__schedule", no_memory, 0),
570 NS_MEM_FROM("ret_from_fork", "__sched_text_start", no_memory, 0),
571 NS_MEM_FROM("ret_from_fork", "context_switch", no_memory, 0),
572 NS_MEM("ret_from_fork", full_pt_regs, 0),
574 NS_MEM_FROM("ret_from_sys_call", "ret_from_fork",
577 NS_MEM("ret_from_sys_call", partial_pt_regs, 0),
579 NS_MEM("retint_restore_args",
581 BB_SKIP(RAX) | BB_SKIP(RCX)),
583 NS_MEM("retint_swapgs",
585 BB_SKIP(RAX) | BB_SKIP(RCX)),
587 /* Now the cases that pass data in registers. We do not check any
588 * memory state for these cases.
591 NS_REG("bad_put_user",
592 all_regs, BB_SKIP(RBX)),
594 NS_REG("bad_get_user",
595 all_regs, BB_SKIP(RAX) | BB_SKIP(RDX)),
597 NS_REG("bad_to_user",
599 BB_SKIP(RAX) | BB_SKIP(RCX)),
601 NS_REG("ia32_ptregs_common",
605 NS_REG("copy_user_generic_unrolled",
607 BB_SKIP(RAX) | BB_SKIP(RCX)),
609 NS_REG("copy_user_generic_string",
611 BB_SKIP(RAX) | BB_SKIP(RCX)),
617 /* Finally the cases that pass data in both registers and memory.
620 NS("invalid_TSS", error_code, all_regs, 0, 0, 0),
621 NS("segment_not_present", error_code, all_regs, 0, 0, 0),
622 NS("alignment_check", error_code, all_regs, 0, 0, 0),
623 NS("page_fault", error_code, all_regs, 0, 0, 0),
624 NS("general_protection", error_code, all_regs, 0, 0, 0),
625 NS("error_entry", error_code_rax, all_regs, 0, BB_SKIP(RAX), -0x10),
626 NS("error_exit", error_code_full_pt_regs, no_regs, 0, 0, 0x30),
627 NS("common_interrupt", error_code, all_regs, 0, 0, -0x8),
628 NS("save_args", error_code, all_regs, 0, 0, -0x50),
629 NS("int3", no_memory, all_regs, 0, 0, -0x80),
632 static const char *bb_spurious[] = {
636 "system_call_after_swapgs",
637 "system_call_fastpath",
643 #ifdef CONFIG_AUDITSYSCALL
648 "int_ret_from_sys_call",
654 /* common_interrupt */
657 "retint_with_reschedule",
660 "retint_restore_args",
666 #ifdef CONFIG_PREEMPT
668 #endif /* CONFIG_PREEMPT */
672 "paranoid_userspace",
679 #ifdef CONFIG_TRACE_IRQFLAGS
688 /* ia32_sysenter_target */
691 "sysexit_from_sys_call",
692 #ifdef CONFIG_AUDITSYSCALL
697 /* ia32_cstar_target */
700 "sysretl_from_sys_call",
701 #ifdef CONFIG_AUDITSYSCALL
710 #ifdef CONFIG_HIBERNATION
714 #endif /* CONFIG_HIBERNATION */
715 #ifdef CONFIG_KPROBES
718 /* kretprobe_trampoline_holder */
719 "kretprobe_trampoline",
720 #endif /* CONFIG_KPROBES */
722 /* relocate_kernel */
723 "relocate_new_kernel",
724 #endif /* CONFIG_KEXEC */
725 #ifdef CONFIG_PARAVIRT_XEN
726 /* arch/i386/xen/xen-asm.S */
727 "xen_irq_enable_direct_end",
728 "xen_irq_disable_direct_end",
729 "xen_save_fl_direct_end",
730 "xen_restore_fl_direct_end",
731 "xen_iret_start_crit",
735 #endif /* CONFIG_XEN */
738 static const char *bb_hardware_handlers[] = {
752 bb_hardware_pushed_arch(kdb_machreg_t rsp,
753 const struct kdb_activation_record *ar)
755 /* x86_64 interrupt stacks are 16 byte aligned and you must get the
756 * next rsp from stack, it cannot be statically calculated. Do not
757 * include the word at rsp, it is pushed by hardware but is treated as
758 * a normal software return value.
760 * When an IST switch occurs (e.g. NMI) then the saved rsp points to
761 * another stack entirely. Assume that the IST stack is 16 byte
762 * aligned and just return the size of the hardware data on this stack.
763 * The stack unwind code will take care of the stack switch.
765 kdb_machreg_t saved_rsp = *((kdb_machreg_t *)rsp + 3);
766 int hardware_pushed = saved_rsp - rsp - KDB_WORD_SIZE;
767 if (hardware_pushed < 4 * KDB_WORD_SIZE ||
768 saved_rsp < ar->stack.logical_start ||
769 saved_rsp >= ar->stack.logical_end)
770 return 4 * KDB_WORD_SIZE;
772 return hardware_pushed;
776 bb_start_block0(void)
778 bb_reg_code_set_value(BBRG_RAX, BBRG_RAX);
779 bb_reg_code_set_value(BBRG_RBX, BBRG_RBX);
780 bb_reg_code_set_value(BBRG_RCX, BBRG_RCX);
781 bb_reg_code_set_value(BBRG_RDX, BBRG_RDX);
782 bb_reg_code_set_value(BBRG_RDI, BBRG_RDI);
783 bb_reg_code_set_value(BBRG_RSI, BBRG_RSI);
784 bb_reg_code_set_value(BBRG_RBP, BBRG_RBP);
785 bb_reg_code_set_value(BBRG_RSP, BBRG_OSP);
786 bb_reg_code_set_value(BBRG_R8, BBRG_R8);
787 bb_reg_code_set_value(BBRG_R9, BBRG_R9);
788 bb_reg_code_set_value(BBRG_R10, BBRG_R10);
789 bb_reg_code_set_value(BBRG_R11, BBRG_R11);
790 bb_reg_code_set_value(BBRG_R12, BBRG_R12);
791 bb_reg_code_set_value(BBRG_R13, BBRG_R13);
792 bb_reg_code_set_value(BBRG_R14, BBRG_R14);
793 bb_reg_code_set_value(BBRG_R15, BBRG_R15);
796 /* x86_64 does not have a special case for __switch_to */
799 bb_fixup_switch_to(char *p)
804 bb_asmlinkage_arch(void)
806 return strncmp(bb_func_name, "__down", 6) == 0 ||
807 strncmp(bb_func_name, "__up", 4) == 0 ||
808 strncmp(bb_func_name, "stub_", 5) == 0 ||
809 strcmp(bb_func_name, "ret_from_fork") == 0 ||
810 strcmp(bb_func_name, "ptregscall_common") == 0;
813 #else /* !CONFIG_X86_64 */
815 /* Registers that can be used to pass parameters, in the order that parameters
819 const static enum bb_reg_code
826 const static enum bb_reg_code
827 bb_preserved_reg[] = {
835 static const struct bb_mem_contains full_pt_regs[] = {
844 static const struct bb_mem_contains no_memory[] = {
846 /* Hardware has already pushed an error_code on the stack. Use undefined just
847 * to set the initial stack offset.
849 static const struct bb_mem_contains error_code[] = {
850 { 0x0, BBRG_UNDEFINED },
852 /* rbx already pushed */
853 static const struct bb_mem_contains rbx_pushed[] = {
856 #ifdef CONFIG_MATH_EMULATION
857 static const struct bb_mem_contains mem_fpu_reg_round[] = {
863 #endif /* CONFIG_MATH_EMULATION */
865 static const struct bb_reg_contains all_regs[] = {
866 [BBRG_RAX] = { BBRG_RAX, 0 },
867 [BBRG_RBX] = { BBRG_RBX, 0 },
868 [BBRG_RCX] = { BBRG_RCX, 0 },
869 [BBRG_RDX] = { BBRG_RDX, 0 },
870 [BBRG_RDI] = { BBRG_RDI, 0 },
871 [BBRG_RSI] = { BBRG_RSI, 0 },
872 [BBRG_RBP] = { BBRG_RBP, 0 },
873 [BBRG_RSP] = { BBRG_OSP, 0 },
875 static const struct bb_reg_contains no_regs[] = {
877 #ifdef CONFIG_MATH_EMULATION
878 static const struct bb_reg_contains reg_fpu_reg_round[] = {
879 [BBRG_RBP] = { BBRG_OSP, -0x4 },
880 [BBRG_RSP] = { BBRG_OSP, -0x10 },
882 #endif /* CONFIG_MATH_EMULATION */
884 static struct bb_name_state bb_special_cases[] = {
886 /* First the cases that pass data only in memory. We do not check any
887 * register state for these cases.
890 /* Simple cases, no exceptions */
891 NS_MEM("check_userspace", full_pt_regs, 0),
892 NS_MEM("device_not_available_emulate", full_pt_regs, 0),
893 NS_MEM("ldt_ss", full_pt_regs, 0),
894 NS_MEM("no_singlestep", full_pt_regs, 0),
895 NS_MEM("restore_all", full_pt_regs, 0),
896 NS_MEM("restore_nocheck", full_pt_regs, 0),
897 NS_MEM("restore_nocheck_notrace", full_pt_regs, 0),
898 NS_MEM("ret_from_exception", full_pt_regs, 0),
899 NS_MEM("ret_from_fork", full_pt_regs, 0),
900 NS_MEM("ret_from_intr", full_pt_regs, 0),
901 NS_MEM("work_notifysig", full_pt_regs, 0),
902 NS_MEM("work_pending", full_pt_regs, 0),
904 #ifdef CONFIG_PREEMPT
905 NS_MEM("resume_kernel", full_pt_regs, 0),
906 #endif /* CONFIG_PREEMPT */
908 NS_MEM("common_interrupt", error_code, 0),
909 NS_MEM("error_code", error_code, 0),
911 NS_MEM("bad_put_user", rbx_pushed, 0),
913 NS_MEM_FROM("resume_userspace", "syscall_badsys",
914 full_pt_regs, BB_SKIP(RAX)),
915 NS_MEM_FROM("resume_userspace", "syscall_fault",
916 full_pt_regs, BB_SKIP(RAX)),
917 NS_MEM_FROM("resume_userspace", "syscall_trace_entry",
918 full_pt_regs, BB_SKIP(RAX)),
919 /* Too difficult to trace through the various vm86 functions for now.
920 * They are C functions that start off with some memory state, fiddle
921 * the registers then jmp directly to resume_userspace. For the
922 * moment, just assume that they are valid and do no checks.
924 NS_FROM("resume_userspace", "do_int",
925 no_memory, no_regs, 0, 0, 0),
926 NS_FROM("resume_userspace", "do_sys_vm86",
927 no_memory, no_regs, 0, 0, 0),
928 NS_FROM("resume_userspace", "handle_vm86_fault",
929 no_memory, no_regs, 0, 0, 0),
930 NS_FROM("resume_userspace", "handle_vm86_trap",
931 no_memory, no_regs, 0, 0, 0),
932 NS_MEM("resume_userspace", full_pt_regs, 0),
934 NS_MEM_FROM("syscall_badsys", "ia32_sysenter_target",
935 full_pt_regs, BB_SKIP(RBP)),
936 NS_MEM("syscall_badsys", full_pt_regs, 0),
938 NS_MEM_FROM("syscall_call", "syscall_trace_entry",
939 full_pt_regs, BB_SKIP(RAX)),
940 NS_MEM("syscall_call", full_pt_regs, 0),
942 NS_MEM_FROM("syscall_exit", "syscall_trace_entry",
943 full_pt_regs, BB_SKIP(RAX)),
944 NS_MEM("syscall_exit", full_pt_regs, 0),
946 NS_MEM_FROM("syscall_exit_work", "ia32_sysenter_target",
947 full_pt_regs, BB_SKIP(RAX) | BB_SKIP(RBP)),
948 NS_MEM_FROM("syscall_exit_work", "system_call",
949 full_pt_regs, BB_SKIP(RAX)),
950 NS_MEM("syscall_exit_work", full_pt_regs, 0),
952 NS_MEM_FROM("syscall_trace_entry", "ia32_sysenter_target",
953 full_pt_regs, BB_SKIP(RBP)),
954 NS_MEM_FROM("syscall_trace_entry", "system_call",
955 full_pt_regs, BB_SKIP(RAX)),
956 NS_MEM("syscall_trace_entry", full_pt_regs, 0),
958 /* Now the cases that pass data in registers. We do not check any
959 * memory state for these cases.
962 NS_REG("syscall_fault", all_regs, 0),
964 NS_REG("bad_get_user", all_regs,
965 BB_SKIP(RAX) | BB_SKIP(RDX)),
967 /* Finally the cases that pass data in both registers and memory.
970 /* This entry is redundant now because bb_fixup_switch_to() hides the
971 * jmp __switch_to case, however the entry is left here as
974 * NS("__switch_to", no_memory, no_regs, 0, 0, 0),
977 NS("iret_exc", no_memory, all_regs, 0, 0, 0x20),
979 #ifdef CONFIG_MATH_EMULATION
980 NS("fpu_reg_round", mem_fpu_reg_round, reg_fpu_reg_round, 0, 0, 0),
981 #endif /* CONFIG_MATH_EMULATION */
984 static const char *bb_spurious[] = {
985 /* ret_from_exception */
990 #ifdef CONFIG_PREEMPT
992 #endif /* CONFIG_PREEMPT */
993 /* ia32_sysenter_target */
1001 "restore_nocheck_notrace",
1003 /* do not include iret_exc, it is in a .fixup section */
1008 "work_notifysig_v86",
1009 #endif /* CONFIG_VM86 */
1012 /* device_not_available */
1013 "device_not_available_emulate",
1015 "debug_esp_fix_insn",
1016 "debug_stack_correct",
1018 "nmi_stack_correct",
1020 "nmi_debug_stack_check",
1022 #ifdef CONFIG_HIBERNATION
1026 #endif /* CONFIG_HIBERNATION */
1027 #ifdef CONFIG_KPROBES
1029 "jprobe_return_end",
1030 #endif /* CONFIG_KPROBES */
1032 /* relocate_kernel */
1033 "relocate_new_kernel",
1034 #endif /* CONFIG_KEXEC */
1035 #ifdef CONFIG_MATH_EMULATION
1036 /* assorted *.S files in arch/i386/math_emu */
1038 "Denorm_shift_more_than_32",
1039 "Denorm_shift_more_than_63",
1040 "Denorm_shift_more_than_64",
1041 "Do_unmasked_underflow",
1042 "Exp_not_underflow",
1045 "fpu_reg_round_signed_special_exit",
1046 "fpu_reg_round_special_exit",
1056 "L_bugged_denorm_486",
1060 "LCheck_24_round_up",
1061 "LCheck_53_round_up",
1062 "LCheck_Round_Overflow",
1063 "LCheck_truncate_24",
1064 "LCheck_truncate_53",
1065 "LCheck_truncate_64",
1066 "LDenormal_adj_exponent",
1109 "L_no_precision_loss",
1115 "L_precision_lost_down",
1116 "L_precision_lost_up",
1117 "LPrevent_2nd_overflow",
1118 "LPrevent_3rd_overflow",
1121 "LResult_Normalised",
1124 "LRound_nearest_24",
1125 "LRound_nearest_53",
1126 "LRound_nearest_64",
1131 "L_round_the_result",
1136 "LSecond_div_not_1",
1144 "L_Store_significand",
1150 "L_underflow_to_zero",
1158 "sqrt_get_more_precision",
1159 "sqrt_more_prec_large",
1160 "sqrt_more_prec_ok",
1161 "sqrt_more_prec_small",
1163 "sqrt_near_exact_large",
1164 "sqrt_near_exact_ok",
1165 "sqrt_near_exact_small",
1166 "sqrt_near_exact_x",
1167 "sqrt_prelim_no_adjust",
1168 "sqrt_round_result",
1169 "sqrt_stage_2_done",
1170 "sqrt_stage_2_error",
1171 "sqrt_stage_2_finish",
1172 "sqrt_stage_2_positive",
1173 "sqrt_stage_3_error",
1174 "sqrt_stage_3_finished",
1175 "sqrt_stage_3_no_error",
1176 "sqrt_stage_3_positive",
1177 "Unmasked_underflow",
1178 "xExp_not_underflow",
1179 #endif /* CONFIG_MATH_EMULATION */
1182 static const char *bb_hardware_handlers[] = {
1183 "ret_from_exception",
1188 "coprocessor_error",
1189 "simd_coprocessor_error",
1190 "device_not_available",
1197 "coprocessor_segment_overrun",
1199 "segment_not_present",
1201 "general_protection",
1206 "spurious_interrupt_bug",
1210 bb_hardware_pushed_arch(kdb_machreg_t rsp,
1211 const struct kdb_activation_record *ar)
1213 return (2 * KDB_WORD_SIZE);
1217 bb_start_block0(void)
1219 bb_reg_code_set_value(BBRG_RAX, BBRG_RAX);
1220 bb_reg_code_set_value(BBRG_RBX, BBRG_RBX);
1221 bb_reg_code_set_value(BBRG_RCX, BBRG_RCX);
1222 bb_reg_code_set_value(BBRG_RDX, BBRG_RDX);
1223 bb_reg_code_set_value(BBRG_RDI, BBRG_RDI);
1224 bb_reg_code_set_value(BBRG_RSI, BBRG_RSI);
1225 bb_reg_code_set_value(BBRG_RBP, BBRG_RBP);
1226 bb_reg_code_set_value(BBRG_RSP, BBRG_OSP);
1229 /* The i386 code that switches stack in a context switch is an extremely
1230 * special case. It saves the rip pointing to a label that is not otherwise
1231 * referenced, saves the current rsp then pushes a word. The magic code that
1232 * resumes the new task picks up the saved rip and rsp, effectively referencing
1233 * a label that otherwise is not used and ignoring the pushed word.
1235 * The simplest way to handle this very strange case is to recognise jmp
1236 * address <__switch_to> and treat it as a popfl instruction. This avoids
1237 * terminating the block on this jmp and removes one word from the stack state,
1238 * which is the end effect of all the magic code.
1240 * Called with the instruction line, starting after the first ':'.
1244 bb_fixup_switch_to(char *p)
1247 p += strspn(p, " \t"); /* start of instruction */
1248 if (strncmp(p, "jmp", 3))
1250 p += strcspn(p, " \t"); /* end of instruction */
1251 p += strspn(p, " \t"); /* start of address */
1252 p += strcspn(p, " \t"); /* end of address */
1253 p += strspn(p, " \t"); /* start of comment */
1254 if (strcmp(p, "<__switch_to>") == 0)
1255 strcpy(p1, "popfl");
1259 bb_asmlinkage_arch(void)
1261 return strcmp(bb_func_name, "ret_from_exception") == 0 ||
1262 strcmp(bb_func_name, "syscall_trace_entry") == 0;
1265 #endif /* CONFIG_X86_64 */
1268 /*============================================================================*/
1270 /* Common code and data. */
1272 /*============================================================================*/
1275 /* Tracking registers by decoding the instructions is quite a bit harder than
1276 * doing the same tracking using compiler generated information. Register
1277 * contents can remain in the same register, they can be copied to other
1278 * registers, they can be stored on stack or they can be modified/overwritten.
1279 * At any one time, there are 0 or more copies of the original value that was
1280 * supplied in each register on input to the current function. If a register
1281 * exists in multiple places, one copy of that register is the master version,
1282 * the others are temporary copies which may or may not be destroyed before the
1283 * end of the function.
1285 * The compiler knows which copy of a register is the master and which are
1286 * temporary copies, which makes it relatively easy to track register contents
1287 * as they are saved and restored. Without that compiler based knowledge, this
1288 * code has to track _every_ possible copy of each register, simply because we
1289 * do not know which is the master copy and which are temporary copies which
1290 * may be destroyed later.
1292 * It gets worse: registers that contain parameters can be copied to other
1293 * registers which are then saved on stack in a lower level function. Also the
1294 * stack pointer may be held in multiple registers (typically RSP and RBP)
1295 * which contain different offsets from the base of the stack on entry to this
1296 * function. All of which means that we have to track _all_ register
1297 * movements, or at least as much as possible.
1299 * Start with the basic block that contains the start of the function, by
1300 * definition all registers contain their initial value. Track each
1301 * instruction's effect on register contents, this includes reading from a
1302 * parameter register before any write to that register, IOW the register
1303 * really does contain a parameter. The register state is represented by a
1304 * dynamically sized array with each entry containing :-
1307 * Location it is copied to (another register or stack + offset)
1309 * Besides the register tracking array, we track which parameter registers are
1310 * read before being written, to determine how many parameters are passed in
1311 * registers. We also track which registers contain stack pointers, including
1312 * their offset from the original stack pointer on entry to the function.
1314 * At each exit from the current basic block (via JMP instruction or drop
1315 * through), the register state is cloned to form the state on input to the
1316 * target basic block and the target is marked for processing using this state.
1317 * When there are multiple ways to enter a basic block (e.g. several JMP
1318 * instructions referencing the same target) then there will be multiple sets
1319 * of register state to form the "input" for that basic block, there is no
1320 * guarantee that all paths to that block will have the same register state.
1322 * As each target block is processed, all the known sets of register state are
1323 * merged to form a suitable subset of the state which agrees with all the
1324 * inputs. The most common case is where one path to this block copies a
1325 * register to another register but another path does not, therefore the copy
1326 * is only a temporary and should not be propogated into this block.
1328 * If the target block already has an input state from the current transfer
1329 * point and the new input state is identical to the previous input state then
1330 * we have reached a steady state for the arc from the current location to the
1331 * target block. Therefore there is no need to process the target block again.
1333 * The steps of "process a block, create state for target block(s), pick a new
1334 * target block, merge state for target block, process target block" will
1335 * continue until all the state changes have propogated all the way down the
1336 * basic block tree, including round any cycles in the tree. The merge step
1337 * only deletes tracking entries from the input state(s), it never adds a
1338 * tracking entry. Therefore the overall algorithm is guaranteed to converge
1339 * to a steady state, the worst possible case is that every tracking entry into
1340 * a block is deleted, which will result in an empty output state.
1342 * As each instruction is decoded, it is checked to see if this is the point at
1343 * which execution left this function. This can be a call to another function
1344 * (actually the return address to this function) or is the instruction which
1345 * was about to be executed when an interrupt occurred (including an oops).
1346 * Save the register state at this point.
1348 * We always know what the registers contain when execution left this function.
1349 * For an interrupt, the registers are in struct pt_regs. For a call to
1350 * another function, we have already deduced the register state on entry to the
1351 * other function by unwinding to the start of that function. Given the
1352 * register state on exit from this function plus the known register contents
1353 * on entry to the next function, we can determine the stack pointer value on
1354 * input to this function. That in turn lets us calculate the address of input
1355 * registers that have been stored on stack, giving us the input parameters.
1356 * Finally the stack pointer gives us the return address which is the exit
1357 * point from the calling function, repeat the unwind process on that function.
1359 * The data that tracks which registers contain input parameters is function
1360 * global, not local to any basic block. To determine which input registers
1361 * contain parameters, we have to decode the entire function. Otherwise an
1362 * exit early in the function might not have read any parameters yet.
1365 /* Record memory contents in terms of the values that were passed to this
1366 * function, IOW track which memory locations contain an input value. A memory
1367 * location's contents can be undefined, it can contain an input register value
1368 * or it can contain an offset from the original stack pointer.
1370 * This structure is used to record register contents that have been stored in
1371 * memory. Location (BBRG_OSP + 'offset_address') contains the input value
1372 * from register 'value'. When 'value' is BBRG_OSP then offset_value contains
1373 * the offset from the original stack pointer that was stored in this memory
1374 * location. When 'value' is not BBRG_OSP then the memory location contains
1375 * the original contents of an input register and offset_value is ignored.
1377 * An input register 'value' can be stored in more than one register and/or in
1378 * more than one memory location.
1381 struct bb_memory_contains
1383 short offset_address;
1384 enum bb_reg_code value: 8;
1388 /* Track the register state in each basic block. */
1392 /* Indexed by register value 'reg - BBRG_RAX' */
1393 struct bb_reg_contains contains[KDB_INT_REGISTERS];
1396 /* dynamic size for memory locations, see mem_count */
1397 struct bb_memory_contains memory[0];
1400 static struct bb_reg_state *bb_reg_state, *bb_exit_state;
1401 static int bb_reg_state_max, bb_reg_params, bb_memory_params;
1409 /* Contains the actual hex value of a register, plus a valid bit. Indexed by
1410 * register value 'reg - BBRG_RAX'
1412 static struct bb_actual bb_actual[KDB_INT_REGISTERS];
1414 static bfd_vma bb_func_start, bb_func_end;
1415 static bfd_vma bb_common_interrupt, bb_error_entry, bb_ret_from_intr,
1416 bb_thread_return, bb_sync_regs, bb_save_v86_state,
1417 bb__sched_text_start, bb__sched_text_end,
1418 bb_save_args, bb_save_rest, bb_save_paranoid;
1420 /* Record jmp instructions, both conditional and unconditional. These form the
1421 * arcs between the basic blocks. This is also used to record the state when
1422 * one block drops through into the next.
1424 * A bb can have multiple associated bb_jmp entries, one for each jcc
1425 * instruction plus at most one bb_jmp for the drop through case. If a bb
1426 * drops through to the next bb then the drop through bb_jmp entry will be the
1427 * last entry in the set of bb_jmp's that are associated with the bb. This is
1428 * enforced by the fact that jcc entries are added during the disassembly phase
1429 * of pass 1, the drop through entries are added near the end of pass 1.
1431 * At address 'from' in this block, we have a jump to address 'to'. The
1432 * register state at 'from' is copied to the target block.
1439 struct bb_reg_state *state;
1440 unsigned int drop_through: 1;
1446 /* The end address of a basic block is sloppy. It can be the first
1447 * byte of the last instruction in the block or it can be the last byte
1451 unsigned int changed: 1;
1452 unsigned int drop_through: 1;
1455 static struct bb **bb_list, *bb_curr;
1456 static int bb_max, bb_count;
1458 static struct bb_jmp *bb_jmp_list;
1459 static int bb_jmp_max, bb_jmp_count;
1461 /* Add a new bb entry to the list. This does an insert sort. */
1464 bb_new(bfd_vma order)
1470 if (bb_count == bb_max) {
1471 struct bb **bb_list_new;
1473 bb_list_new = debug_kmalloc(bb_max*sizeof(*bb_list_new),
1476 kdb_printf("\n\n%s: out of debug_kmalloc\n", __FUNCTION__);
1480 memcpy(bb_list_new, bb_list, bb_count*sizeof(*bb_list));
1481 debug_kfree(bb_list);
1482 bb_list = bb_list_new;
1484 bb = debug_kmalloc(sizeof(*bb), GFP_ATOMIC);
1486 kdb_printf("\n\n%s: out of debug_kmalloc\n", __FUNCTION__);
1490 memset(bb, 0, sizeof(*bb));
1491 for (i = 0; i < bb_count; ++i) {
1493 if ((p->start && p->start > order) ||
1494 (p->end && p->end > order))
1497 for (j = bb_count-1; j >= i; --j)
1498 bb_list[j+1] = bb_list[j];
1504 /* Add a new bb_jmp entry to the list. This list is not sorted. */
1506 static struct bb_jmp *
1507 bb_jmp_new(bfd_vma from, bfd_vma to, unsigned int drop_through)
1509 struct bb_jmp *bb_jmp;
1512 if (bb_jmp_count == bb_jmp_max) {
1513 struct bb_jmp *bb_jmp_list_new;
1516 debug_kmalloc(bb_jmp_max*sizeof(*bb_jmp_list_new),
1518 if (!bb_jmp_list_new) {
1519 kdb_printf("\n\n%s: out of debug_kmalloc\n",
1524 memcpy(bb_jmp_list_new, bb_jmp_list,
1525 bb_jmp_count*sizeof(*bb_jmp_list));
1526 debug_kfree(bb_jmp_list);
1527 bb_jmp_list = bb_jmp_list_new;
1529 bb_jmp = bb_jmp_list + bb_jmp_count++;
1530 bb_jmp->from = from;
1532 bb_jmp->drop_through = drop_through;
1533 bb_jmp->state = NULL;
1540 struct bb *bb = bb_list[i];
1541 memcpy(bb_list+i, bb_list+i+1, (bb_count-i-1)*sizeof(*bb_list));
1542 bb_list[--bb_count] = NULL;
1547 bb_add(bfd_vma start, bfd_vma end)
1551 /* Ignore basic blocks whose start address is outside the current
1552 * function. These occur for call instructions and for tail recursion.
1555 (start < bb_func_start || start >= bb_func_end))
1557 for (i = 0; i < bb_count; ++i) {
1559 if ((start && bb->start == start) ||
1560 (end && bb->end == end))
1563 bb = bb_new(start ? start : end);
1571 static struct bb_jmp *
1572 bb_jmp_add(bfd_vma from, bfd_vma to, unsigned int drop_through)
1575 struct bb_jmp *bb_jmp;
1576 for (i = 0, bb_jmp = bb_jmp_list; i < bb_jmp_count; ++i, ++bb_jmp) {
1577 if (bb_jmp->from == from &&
1579 bb_jmp->drop_through == drop_through)
1582 bb_jmp = bb_jmp_new(from, to, drop_through);
1586 static unsigned long bb_curr_addr, bb_exit_addr;
1587 static char bb_buffer[256]; /* A bit too big to go on stack */
1589 /* Computed jmp uses 'jmp *addr(,%reg,[48])' where 'addr' is the start of a
1590 * table of addresses that point into the current function. Run the table and
1591 * generate bb starts for each target address plus a bb_jmp from this address
1592 * to the target address.
1594 * Only called for 'jmp' instructions, with the pointer starting at 'jmp'.
1598 bb_pass1_computed_jmp(char *p)
1600 unsigned long table, scale;
1603 p += strcspn(p, " \t"); /* end of instruction */
1604 p += strspn(p, " \t"); /* start of address */
1607 table = simple_strtoul(p, &p, 0);
1608 if (strncmp(p, "(,%", 3) != 0)
1611 p += strcspn(p, ","); /* end of reg */
1614 scale = simple_strtoul(p, &p, 0);
1615 if (scale != KDB_WORD_SIZE || strcmp(p, ")"))
1617 while (!bb_giveup) {
1618 if (kdb_getword(&addr, table, sizeof(addr)))
1620 if (addr < bb_func_start || addr >= bb_func_end)
1622 bb = bb_add(addr, 0);
1624 bb_jmp_add(bb_curr_addr, addr, 0);
1625 table += KDB_WORD_SIZE;
1629 /* Pass 1, identify the start and end of each basic block */
1632 bb_dis_pass1(PTR file, const char *fmt, ...)
1634 int l = strlen(bb_buffer);
1638 vsnprintf(bb_buffer + l, sizeof(bb_buffer) - l, fmt, ap);
1640 if ((p = strchr(bb_buffer, '\n'))) {
1642 /* ret[q], iret[q], sysexit, sysret, ud2a or jmp[q] end a
1643 * block. As does a call to a function marked noret.
1646 p += strcspn(p, ":");
1648 bb_fixup_switch_to(p);
1649 p += strspn(p, " \t"); /* start of instruction */
1650 if (strncmp(p, "ret", 3) == 0 ||
1651 strncmp(p, "iret", 4) == 0 ||
1652 strncmp(p, "sysexit", 7) == 0 ||
1653 strncmp(p, "sysret", 6) == 0 ||
1654 strncmp(p, "ud2a", 4) == 0 ||
1655 strncmp(p, "jmp", 3) == 0) {
1656 if (strncmp(p, "jmp", 3) == 0)
1657 bb_pass1_computed_jmp(p);
1658 bb_add(0, bb_curr_addr);
1660 if (strncmp(p, "call", 4) == 0) {
1661 strsep(&p, " \t"); /* end of opcode */
1663 p += strspn(p, " \t"); /* operand(s) */
1664 if (p && strchr(p, '<')) {
1665 p = strchr(p, '<') + 1;
1666 *strchr(p, '>') = '\0';
1668 bb_add(0, bb_curr_addr);
1672 bb_buffer[0] = '\0';
1678 bb_printaddr_pass1(bfd_vma addr, disassemble_info *dip)
1680 kdb_symtab_t symtab;
1681 unsigned int offset;
1683 /* disasm only calls the printaddr routine for the target of jmp, loop
1684 * or call instructions, i.e. the start of a basic block. call is
1685 * ignored by bb_add because the target address is outside the current
1688 dip->fprintf_func(dip->stream, "0x%lx", addr);
1689 kdbnearsym(addr, &symtab);
1690 if (symtab.sym_name) {
1691 dip->fprintf_func(dip->stream, " <%s", symtab.sym_name);
1692 if ((offset = addr - symtab.sym_start))
1693 dip->fprintf_func(dip->stream, "+0x%x", offset);
1694 dip->fprintf_func(dip->stream, ">");
1696 bb = bb_add(addr, 0);
1698 bb_jmp_add(bb_curr_addr, addr, 0);
1707 struct bb_jmp *bb_jmp;
1709 if (KDB_DEBUG(BB) | KDB_DEBUG(BB_SUMM))
1710 kdb_printf("%s: func_name %s func_start " kdb_bfd_vma_fmt0
1711 " func_end " kdb_bfd_vma_fmt0 "\n",
1716 kdb_di.fprintf_func = bb_dis_pass1;
1717 kdb_di.print_address_func = bb_printaddr_pass1;
1719 bb_add(bb_func_start, 0);
1720 for (bb_curr_addr = bb_func_start;
1721 bb_curr_addr < bb_func_end;
1724 if (kdb_getarea(c, bb_curr_addr)) {
1725 kdb_printf("%s: unreadable function code at ",
1727 kdb_symbol_print(bb_curr_addr, NULL, KDB_SP_DEFAULT);
1728 kdb_printf(", giving up\n");
1733 for (addr = bb_func_start; addr < bb_func_end; ) {
1734 bb_curr_addr = addr;
1735 addr += kdba_id_printinsn(addr, &kdb_di);
1736 kdb_di.fprintf_func(NULL, "\n");
1741 /* Special case: a block consisting of a single instruction which is
1742 * both the target of a jmp and is also an ending instruction, so we
1743 * add two blocks using the same address, one as a start and one as an
1744 * end, in no guaranteed order. The end must be ordered after the
1747 for (i = 0; i < bb_count-1; ++i) {
1748 struct bb *bb1 = bb_list[i], *bb2 = bb_list[i+1];
1749 if (bb1->end && bb1->end == bb2->start) {
1751 bb_list[i+1] = bb_list[i];
1756 /* Some bb have a start address, some have an end address. Collapse
1757 * them into entries that have both start and end addresses. The first
1758 * entry is guaranteed to have a start address.
1760 for (i = 0; i < bb_count-1; ++i) {
1761 struct bb *bb1 = bb_list[i], *bb2 = bb_list[i+1];
1765 bb1->end = bb2->start - 1;
1766 bb1->drop_through = 1;
1767 bb_jmp_add(bb1->end, bb2->start, 1);
1769 bb1->end = bb2->end;
1773 bb = bb_list[bb_count-1];
1775 bb->end = bb_func_end - 1;
1777 /* It would be nice to check that all bb have a valid start and end
1778 * address but there is just too much garbage code in the kernel to do
1779 * that check. Aligned functions in assembler code mean that there is
1780 * space between the end of one function and the start of the next and
1781 * that space contains previous code from the assembler's buffers. It
1782 * looks like dead code with nothing that branches to it, so no start
1783 * address. do_sys_vm86() ends with 'jmp resume_userspace' which the C
1784 * compiler does not know about so gcc appends the normal exit code,
1785 * again nothing branches to this dangling code.
1787 * The best we can do is delete bb entries with no start address.
1789 for (i = 0; i < bb_count; ++i) {
1790 struct bb *bb = bb_list[i];
1794 for (i = 0; i < bb_count; ++i) {
1795 struct bb *bb = bb_list[i];
1797 kdb_printf("%s: incomplete bb state\n", __FUNCTION__);
1807 kdb_printf("%s: end\n", __FUNCTION__);
1808 for (i = 0; i < bb_count; ++i) {
1810 kdb_printf(" bb[%d] start "
1812 " end " kdb_bfd_vma_fmt0
1814 i, bb->start, bb->end, bb->drop_through);
1817 for (i = 0; i < bb_jmp_count; ++i) {
1818 bb_jmp = bb_jmp_list + i;
1819 kdb_printf(" bb_jmp[%d] from "
1821 " to " kdb_bfd_vma_fmt0
1822 " drop_through %d\n",
1823 i, bb_jmp->from, bb_jmp->to, bb_jmp->drop_through);
1827 /* Pass 2, record register changes in each basic block */
1829 /* For each opcode that we care about, indicate how it uses its operands. Most
1830 * opcodes can be handled generically because they completely specify their
1831 * operands in the instruction, however many opcodes have side effects such as
1832 * reading or writing rax or updating rsp. Instructions that change registers
1833 * that are not listed in the operands must be handled as special cases. In
1834 * addition, instructions that copy registers while preserving their contents
1835 * (push, pop, mov) or change the contents in a well defined way (add with an
1836 * immediate, lea) must be handled as special cases in order to track the
1837 * register contents.
1839 * The tables below only list opcodes that are actually used in the Linux
1840 * kernel, so they omit most of the floating point and all of the SSE type
1841 * instructions. The operand usage entries only cater for accesses to memory
1842 * and to the integer registers, accesses to floating point registers and flags
1843 * are not relevant for kernel backtraces.
1846 enum bb_operand_usage {
1848 /* generic entries. because xchg can do any combinations of
1849 * read src, write src, read dst and write dst we need to
1850 * define all 16 possibilities. These are ordered by rs = 1,
1851 * rd = 2, ws = 4, wd = 8, bb_usage_x*() functions rely on this
1854 BBOU_RS = 1, /* read src */ /* 1 */
1855 BBOU_RD, /* read dst */ /* 2 */
1857 BBOU_WS, /* write src */ /* 4 */
1860 BBOU_RSRDWS, /* 7 */
1861 BBOU_WD, /* write dst */ /* 8 */
1864 BBOU_RSRDWD, /* 11 */
1866 BBOU_RSWSWD, /* 13 */
1867 BBOU_RDWSWD, /* 14 */
1868 BBOU_RSRDWSWD, /* 15 */
1869 /* opcode specific entries */
1915 struct bb_opcode_usage {
1917 enum bb_operand_usage usage;
1921 /* This table is sorted in alphabetical order of opcode, except that the
1922 * trailing '"' is treated as a high value. For example, 'in' sorts after
1923 * 'inc', 'bt' after 'btc'. This modified sort order ensures that shorter
1924 * opcodes come after long ones. A normal sort would put 'in' first, so 'in'
1925 * would match both 'inc' and 'in'. When adding any new entries to this table,
1926 * be careful to put shorter entries last in their group.
1928 * To automatically sort the table (in vi)
1929 * Mark the first and last opcode line with 'a and 'b
1931 * !'bsed -e 's/"}/}}/' | LANG=C sort -t '"' -k2 | sed -e 's/}}/"}/'
1933 * If a new instruction has to be added, first consider if it affects registers
1934 * other than those listed in the operands. Also consider if you want to track
1935 * the results of issuing the instruction, IOW can you extract useful
1936 * information by looking in detail at the modified registers or memory. If
1937 * either test is true then you need a special case to handle the instruction.
1939 * The generic entries at the start of enum bb_operand_usage all have one thing
1940 * in common, if a register or memory location is updated then that location
1941 * becomes undefined, i.e. we lose track of anything that was previously saved
1942 * in that location. So only use a generic BBOU_* value when the result of the
1943 * instruction cannot be calculated exactly _and_ when all the affected
1944 * registers are listed in the operands.
1948 * 'call' does not generate a known result, but as a side effect of call,
1949 * several scratch registers become undefined, so it needs a special BBOU_CALL
1952 * 'adc' generates a variable result, it depends on the carry flag, so 'adc'
1953 * gets a generic entry. 'add' can generate an exact result (add with
1954 * immediate on a register that points to the stack) or it can generate an
1955 * unknown result (add a variable, or add immediate to a register that does not
1956 * contain a stack pointer) so 'add' has its own BBOU_ADD entry.
1959 static const struct bb_opcode_usage
1960 bb_opcode_usage_all[] = {
1961 {3, BBOU_RSRDWD, "adc"},
1962 {3, BBOU_ADD, "add"},
1963 {3, BBOU_AND, "and"},
1964 {3, BBOU_RSWD, "bsf"},
1965 {3, BBOU_RSWD, "bsr"},
1966 {5, BBOU_RSWS, "bswap"},
1967 {3, BBOU_RSRDWD, "btc"},
1968 {3, BBOU_RSRDWD, "btr"},
1969 {3, BBOU_RSRDWD, "bts"},
1970 {2, BBOU_RSRD, "bt"},
1971 {4, BBOU_CALL, "call"},
1972 {4, BBOU_CBW, "cbtw"}, /* Intel cbw */
1973 {3, BBOU_NOP, "clc"},
1974 {3, BBOU_NOP, "cld"},
1975 {7, BBOU_RS, "clflush"},
1976 {4, BBOU_NOP, "clgi"},
1977 {3, BBOU_NOP, "cli"},
1978 {4, BBOU_CWD, "cltd"}, /* Intel cdq */
1979 {4, BBOU_CBW, "cltq"}, /* Intel cdqe */
1980 {4, BBOU_NOP, "clts"},
1981 {4, BBOU_CMOV, "cmov"},
1982 {9, BBOU_CMPXCHGD,"cmpxchg16"},
1983 {8, BBOU_CMPXCHGD,"cmpxchg8"},
1984 {7, BBOU_CMPXCHG, "cmpxchg"},
1985 {3, BBOU_RSRD, "cmp"},
1986 {5, BBOU_CPUID, "cpuid"},
1987 {4, BBOU_CWD, "cqto"}, /* Intel cdo */
1988 {4, BBOU_CWD, "cwtd"}, /* Intel cwd */
1989 {4, BBOU_CBW, "cwtl"}, /* Intel cwde */
1990 {4, BBOU_NOP, "data"}, /* alternative ASM_NOP<n> generates data16 on x86_64 */
1991 {3, BBOU_RSWS, "dec"},
1992 {3, BBOU_DIV, "div"},
1993 {5, BBOU_RS, "fdivl"},
1994 {5, BBOU_NOP, "finit"},
1995 {6, BBOU_RS, "fistpl"},
1996 {4, BBOU_RS, "fldl"},
1997 {4, BBOU_RS, "fmul"},
1998 {6, BBOU_NOP, "fnclex"},
1999 {6, BBOU_NOP, "fninit"},
2000 {6, BBOU_RS, "fnsave"},
2001 {7, BBOU_NOP, "fnsetpm"},
2002 {6, BBOU_RS, "frstor"},
2003 {5, BBOU_WS, "fstsw"},
2004 {5, BBOU_RS, "fsubp"},
2005 {5, BBOU_NOP, "fwait"},
2006 {7, BBOU_RS, "fxrstor"},
2007 {6, BBOU_RS, "fxsave"},
2008 {3, BBOU_NOP, "hlt"},
2009 {4, BBOU_IDIV, "idiv"},
2010 {4, BBOU_IMUL, "imul"},
2011 {3, BBOU_RSWS, "inc"},
2012 {3, BBOU_NOP, "int"},
2013 {7, BBOU_RSRD, "invlpga"},
2014 {6, BBOU_RS, "invlpg"},
2015 {2, BBOU_RSWD, "in"},
2016 {4, BBOU_IRET, "iret"},
2018 {4, BBOU_LAHF, "lahf"},
2019 {3, BBOU_RSWD, "lar"},
2020 {5, BBOU_RS, "lcall"},
2021 {5, BBOU_LEAVE, "leave"},
2022 {3, BBOU_LEA, "lea"},
2023 {6, BBOU_NOP, "lfence"},
2024 {4, BBOU_RS, "lgdt"},
2025 {4, BBOU_RS, "lidt"},
2026 {4, BBOU_RS, "ljmp"},
2027 {4, BBOU_RS, "lldt"},
2028 {4, BBOU_RS, "lmsw"},
2029 {4, BBOU_LODS, "lods"},
2030 {4, BBOU_LOOP, "loop"},
2031 {4, BBOU_NOP, "lret"},
2032 {3, BBOU_RSWD, "lsl"},
2033 {3, BBOU_LSS, "lss"},
2034 {3, BBOU_RS, "ltr"},
2035 {6, BBOU_NOP, "mfence"},
2036 {7, BBOU_MONITOR, "monitor"},
2037 {4, BBOU_MOVS, "movs"},
2038 {3, BBOU_MOV, "mov"},
2039 {3, BBOU_MUL, "mul"},
2040 {5, BBOU_MWAIT, "mwait"},
2041 {3, BBOU_RSWS, "neg"},
2042 {3, BBOU_NOP, "nop"},
2043 {3, BBOU_RSWS, "not"},
2044 {2, BBOU_RSRDWD, "or"},
2045 {4, BBOU_OUTS, "outs"},
2046 {3, BBOU_RSRD, "out"},
2047 {5, BBOU_NOP, "pause"},
2048 {4, BBOU_POPF, "popf"},
2049 {3, BBOU_POP, "pop"},
2050 {8, BBOU_RS, "prefetch"},
2051 {5, BBOU_PUSHF, "pushf"},
2052 {4, BBOU_PUSH, "push"},
2053 {3, BBOU_RSRDWD, "rcl"},
2054 {3, BBOU_RSRDWD, "rcr"},
2055 {5, BBOU_RDMSR, "rdmsr"},
2056 {5, BBOU_RDMSR, "rdpmc"}, /* same side effects as rdmsr */
2057 {5, BBOU_RDTSC, "rdtsc"},
2058 {3, BBOU_RET, "ret"},
2059 {3, BBOU_RSRDWD, "rol"},
2060 {3, BBOU_RSRDWD, "ror"},
2061 {4, BBOU_SAHF, "sahf"},
2062 {3, BBOU_RSRDWD, "sar"},
2063 {3, BBOU_RSRDWD, "sbb"},
2064 {4, BBOU_SCAS, "scas"},
2065 {3, BBOU_WS, "set"},
2066 {6, BBOU_NOP, "sfence"},
2067 {4, BBOU_WS, "sgdt"},
2068 {3, BBOU_RSRDWD, "shl"},
2069 {3, BBOU_RSRDWD, "shr"},
2070 {4, BBOU_WS, "sidt"},
2071 {4, BBOU_WS, "sldt"},
2072 {3, BBOU_NOP, "stc"},
2073 {3, BBOU_NOP, "std"},
2074 {4, BBOU_NOP, "stgi"},
2075 {3, BBOU_NOP, "sti"},
2076 {4, BBOU_SCAS, "stos"},
2077 {4, BBOU_WS, "strl"},
2078 {3, BBOU_WS, "str"},
2079 {3, BBOU_SUB, "sub"},
2080 {6, BBOU_NOP, "swapgs"},
2081 {7, BBOU_SYSEXIT, "sysexit"},
2082 {6, BBOU_SYSRET, "sysret"},
2083 {4, BBOU_NOP, "test"},
2084 {4, BBOU_NOP, "ud2a"},
2085 {7, BBOU_RS, "vmclear"},
2086 {8, BBOU_NOP, "vmlaunch"},
2087 {6, BBOU_RS, "vmload"},
2088 {7, BBOU_RS, "vmptrld"},
2089 {6, BBOU_WD, "vmread"}, /* vmread src is an encoding, not a register */
2090 {8, BBOU_NOP, "vmresume"},
2091 {5, BBOU_RS, "vmrun"},
2092 {6, BBOU_RS, "vmsave"},
2093 {7, BBOU_WD, "vmwrite"}, /* vmwrite src is an encoding, not a register */
2094 {3, BBOU_NOP, "vmxoff"},
2095 {6, BBOU_NOP, "wbinvd"},
2096 {5, BBOU_WRMSR, "wrmsr"},
2097 {4, BBOU_XADD, "xadd"},
2098 {4, BBOU_XCHG, "xchg"},
2099 {3, BBOU_XOR, "xor"},
2100 {4, BBOU_NOP, "xrstor"},
2101 {4, BBOU_NOP, "xsave"},
2102 {10, BBOU_WS, "xstore-rng"},
2105 /* To speed up searching, index bb_opcode_usage_all by the first letter of each
2109 const struct bb_opcode_usage *opcode;
2111 } bb_opcode_usage[26];
2119 enum bb_reg_code base_rc; /* UNDEFINED or RAX through R15 */
2120 enum bb_reg_code index_rc; /* UNDEFINED or RAX through R15 */
2121 unsigned int present :1;
2122 unsigned int disp_present :1;
2123 unsigned int indirect :1; /* must be combined with reg or memory */
2124 unsigned int immediate :1; /* exactly one of these 3 must be set */
2125 unsigned int reg :1;
2126 unsigned int memory :1;
2132 const struct bb_opcode_usage *match;
2133 struct bb_operand src;
2134 struct bb_operand dst;
2135 struct bb_operand dst2;
2138 static struct bb_decode bb_decode;
2140 static enum bb_reg_code
2141 bb_reg_map(const char *reg)
2144 const struct bb_reg_code_map *p;
2146 hi = ARRAY_SIZE(bb_reg_code_map) - 1;
2148 int mid = (hi + lo) / 2;
2149 p = bb_reg_code_map + mid;
2150 c = strcmp(p->name, reg+1);
2158 return BBRG_UNDEFINED;
2162 bb_parse_operand(char *str, struct bb_operand *operand)
2166 operand->present = 1;
2167 /* extract any segment prefix */
2168 if (p[0] == '%' && p[1] && p[2] == 's' && p[3] == ':') {
2169 operand->memory = 1;
2170 operand->segment = p;
2174 /* extract displacement, base, index, scale */
2176 /* jmp/call *disp(%reg), *%reg or *0xnnn */
2177 operand->indirect = 1;
2185 operand->immediate = 1;
2186 operand->disp_present = 1;
2187 operand->disp = simple_strtoul(p+1, &p, 0);
2188 } else if (isdigit(*p)) {
2189 operand->memory = 1;
2190 operand->disp_present = 1;
2191 operand->disp = simple_strtoul(p, &p, 0) * sign;
2196 } else if (*p == '(') {
2197 operand->memory = 1;
2198 operand->base = ++p;
2199 p += strcspn(p, ",)");
2200 if (p == operand->base)
2201 operand->base = NULL;
2204 operand->index = ++p;
2205 p += strcspn(p, ",)");
2206 if (p == operand->index)
2207 operand->index = NULL;
2211 operand->scale = simple_strtoul(p+1, &p, 0);
2215 kdb_printf("%s: unexpected token '%c' after disp '%s'\n",
2216 __FUNCTION__, *p, str);
2219 if ((operand->immediate + operand->reg + operand->memory != 1) ||
2220 (operand->indirect && operand->immediate)) {
2221 kdb_printf("%s: incorrect decode '%s' N %d I %d R %d M %d\n",
2223 operand->indirect, operand->immediate, operand->reg,
2228 operand->base_rc = bb_reg_map(operand->base);
2230 operand->index_rc = bb_reg_map(operand->index);
2234 bb_print_operand(const char *type, const struct bb_operand *operand)
2236 if (!operand->present)
2238 kdb_printf(" %s %c%c: ",
2240 operand->indirect ? 'N' : ' ',
2241 operand->immediate ? 'I' :
2242 operand->reg ? 'R' :
2243 operand->memory ? 'M' :
2246 if (operand->segment)
2247 kdb_printf("%s:", operand->segment);
2248 if (operand->immediate) {
2249 kdb_printf("$0x%lx", operand->disp);
2250 } else if (operand->reg) {
2251 if (operand->indirect)
2253 kdb_printf("%s", operand->base);
2254 } else if (operand->memory) {
2255 if (operand->indirect && (operand->base || operand->index))
2257 if (operand->disp_present) {
2258 kdb_printf("0x%lx", operand->disp);
2260 if (operand->base || operand->index || operand->scale) {
2263 kdb_printf("%s", operand->base);
2264 if (operand->index || operand->scale)
2267 kdb_printf("%s", operand->index);
2269 kdb_printf(",%d", operand->scale);
2273 if (operand->base_rc)
2274 kdb_printf(" base_rc %d (%s)",
2275 operand->base_rc, bbrg_name[operand->base_rc]);
2276 if (operand->index_rc)
2277 kdb_printf(" index_rc %d (%s)",
2279 bbrg_name[operand->index_rc]);
2284 bb_print_opcode(void)
2286 const struct bb_opcode_usage *o = bb_decode.match;
2288 if (bb_decode.prefix)
2289 kdb_printf("%s ", bb_decode.prefix);
2290 kdb_printf("opcode '%s' matched by '%s', usage %d\n",
2291 bb_decode.opcode, o->opcode, o->usage);
2295 bb_parse_opcode(void)
2298 const struct bb_opcode_usage *o;
2299 static int bb_parse_opcode_error_limit = 5;
2300 c = bb_decode.opcode[0] - 'a';
2301 if (c < 0 || c >= ARRAY_SIZE(bb_opcode_usage))
2303 o = bb_opcode_usage[c].opcode;
2306 for (i = 0; i < bb_opcode_usage[c].size; ++i, ++o) {
2307 if (strncmp(bb_decode.opcode, o->opcode, o->length) == 0) {
2308 bb_decode.match = o;
2315 if (!bb_parse_opcode_error_limit)
2317 --bb_parse_opcode_error_limit;
2318 kdb_printf("%s: no match at [%s]%s " kdb_bfd_vma_fmt0 " - '%s'\n",
2320 bb_mod_name, bb_func_name, bb_curr_addr,
2326 bb_is_int_reg(enum bb_reg_code reg)
2328 return reg >= BBRG_RAX && reg < (BBRG_RAX + KDB_INT_REGISTERS);
2332 bb_is_simple_memory(const struct bb_operand *operand)
2334 return operand->memory &&
2335 bb_is_int_reg(operand->base_rc) &&
2336 !operand->index_rc &&
2337 operand->scale == 0 &&
2342 bb_is_static_disp(const struct bb_operand *operand)
2344 return operand->memory &&
2345 !operand->base_rc &&
2346 !operand->index_rc &&
2347 operand->scale == 0 &&
2348 !operand->segment &&
2352 static enum bb_reg_code
2353 bb_reg_code_value(enum bb_reg_code reg)
2355 BB_CHECK(!bb_is_int_reg(reg), reg, 0);
2356 return bb_reg_state->contains[reg - BBRG_RAX].value;
2360 bb_reg_code_offset(enum bb_reg_code reg)
2362 BB_CHECK(!bb_is_int_reg(reg), reg, 0);
2363 return bb_reg_state->contains[reg - BBRG_RAX].offset;
2367 bb_reg_code_set_value(enum bb_reg_code dst, enum bb_reg_code src)
2369 BB_CHECK(!bb_is_int_reg(dst), dst, );
2370 bb_reg_state->contains[dst - BBRG_RAX].value = src;
2374 bb_reg_code_set_offset(enum bb_reg_code dst, short offset)
2376 BB_CHECK(!bb_is_int_reg(dst), dst, );
2377 bb_reg_state->contains[dst - BBRG_RAX].offset = offset;
2381 bb_is_osp_defined(enum bb_reg_code reg)
2383 if (bb_is_int_reg(reg))
2384 return bb_reg_code_value(reg) == BBRG_OSP;
2390 bb_actual_value(enum bb_reg_code reg)
2392 BB_CHECK(!bb_is_int_reg(reg), reg, 0);
2393 return bb_actual[reg - BBRG_RAX].value;
2397 bb_actual_valid(enum bb_reg_code reg)
2399 BB_CHECK(!bb_is_int_reg(reg), reg, 0);
2400 return bb_actual[reg - BBRG_RAX].valid;
2404 bb_actual_set_value(enum bb_reg_code reg, bfd_vma value)
2406 BB_CHECK(!bb_is_int_reg(reg), reg, );
2407 bb_actual[reg - BBRG_RAX].value = value;
2411 bb_actual_set_valid(enum bb_reg_code reg, int valid)
2413 BB_CHECK(!bb_is_int_reg(reg), reg, );
2414 bb_actual[reg - BBRG_RAX].valid = valid;
2417 /* The scheduler code switches RSP then does PUSH, it is not an error for RSP
2418 * to be undefined in this area of the code.
2421 bb_is_scheduler_address(void)
2423 return bb_curr_addr >= bb__sched_text_start &&
2424 bb_curr_addr < bb__sched_text_end;
2428 bb_reg_read(enum bb_reg_code reg)
2431 if (!bb_is_int_reg(reg) ||
2432 bb_reg_code_value(reg) != reg)
2435 i < min_t(unsigned int, REGPARM, ARRAY_SIZE(bb_param_reg));
2437 if (reg == bb_param_reg[i]) {
2442 bb_reg_params = max(bb_reg_params, r);
2446 bb_do_reg_state_print(const struct bb_reg_state *s)
2448 int i, offset_address, offset_value;
2449 const struct bb_memory_contains *c;
2450 enum bb_reg_code value;
2451 kdb_printf(" bb_reg_state %p\n", s);
2452 for (i = 0; i < ARRAY_SIZE(s->contains); ++i) {
2453 value = s->contains[i].value;
2454 offset_value = s->contains[i].offset;
2455 kdb_printf(" %s = %s",
2456 bbrg_name[i + BBRG_RAX], bbrg_name[value]);
2457 if (value == BBRG_OSP)
2458 KDB_DEBUG_BB_OFFSET_PRINTF(offset_value, "", "");
2461 for (i = 0, c = s->memory; i < s->mem_count; ++i, ++c) {
2462 offset_address = c->offset_address;
2464 offset_value = c->offset_value;
2465 kdb_printf(" slot %d offset_address %c0x%x %s",
2467 offset_address >= 0 ? '+' : '-',
2468 offset_address >= 0 ? offset_address : -offset_address,
2470 if (value == BBRG_OSP)
2471 KDB_DEBUG_BB_OFFSET_PRINTF(offset_value, "", "");
2477 bb_reg_state_print(const struct bb_reg_state *s)
2480 bb_do_reg_state_print(s);
2483 /* Set register 'dst' to contain the value from 'src'. This includes reading
2484 * from 'src' and writing to 'dst'. The offset value is copied iff 'src'
2485 * contains a stack pointer.
2487 * Be very careful about the context here. 'dst' and 'src' reflect integer
2488 * registers by name, _not_ by the value of their contents. "mov %rax,%rsi"
2489 * will call this function as bb_reg_set_reg(BBRG_RSI, BBRG_RAX), which
2490 * reflects what the assembler code is doing. However we need to track the
2491 * _values_ in the registers, not their names. IOW, we really care about "what
2492 * value does rax contain when it is copied into rsi?", so we can record the
2493 * fact that we now have two copies of that value, one in rax and one in rsi.
2497 bb_reg_set_reg(enum bb_reg_code dst, enum bb_reg_code src)
2499 enum bb_reg_code src_value = BBRG_UNDEFINED;
2500 short offset_value = 0;
2501 KDB_DEBUG_BB(" %s = %s", bbrg_name[dst], bbrg_name[src]);
2502 if (bb_is_int_reg(src)) {
2504 src_value = bb_reg_code_value(src);
2505 KDB_DEBUG_BB(" (%s", bbrg_name[src_value]);
2506 if (bb_is_osp_defined(src)) {
2507 offset_value = bb_reg_code_offset(src);
2508 KDB_DEBUG_BB_OFFSET(offset_value, "", "");
2512 if (bb_is_int_reg(dst)) {
2513 bb_reg_code_set_value(dst, src_value);
2514 bb_reg_code_set_offset(dst, offset_value);
2520 bb_reg_set_undef(enum bb_reg_code dst)
2522 bb_reg_set_reg(dst, BBRG_UNDEFINED);
2525 /* Delete any record of a stored register held in osp + 'offset' */
2528 bb_delete_memory(short offset)
2531 struct bb_memory_contains *c;
2532 for (i = 0, c = bb_reg_state->memory;
2533 i < bb_reg_state->mem_count;
2535 if (c->offset_address == offset &&
2536 c->value != BBRG_UNDEFINED) {
2537 KDB_DEBUG_BB(" delete %s from ",
2538 bbrg_name[c->value]);
2539 KDB_DEBUG_BB_OFFSET(offset, "osp", "");
2540 KDB_DEBUG_BB(" slot %d\n",
2541 (int)(c - bb_reg_state->memory));
2542 memset(c, BBRG_UNDEFINED, sizeof(*c));
2543 if (i == bb_reg_state->mem_count - 1)
2544 --bb_reg_state->mem_count;
2549 /* Set memory location *('dst' + 'offset_address') to contain the supplied
2550 * value and offset. 'dst' is assumed to be a register that contains a stack
2555 bb_memory_set_reg_value(enum bb_reg_code dst, short offset_address,
2556 enum bb_reg_code value, short offset_value)
2559 struct bb_memory_contains *c, *free = NULL;
2560 BB_CHECK(!bb_is_osp_defined(dst), dst, );
2561 KDB_DEBUG_BB(" *(%s", bbrg_name[dst]);
2562 KDB_DEBUG_BB_OFFSET(offset_address, "", "");
2563 offset_address += bb_reg_code_offset(dst);
2564 KDB_DEBUG_BB_OFFSET(offset_address, " osp", ") = ");
2565 KDB_DEBUG_BB("%s", bbrg_name[value]);
2566 if (value == BBRG_OSP)
2567 KDB_DEBUG_BB_OFFSET(offset_value, "", "");
2568 for (i = 0, c = bb_reg_state->memory;
2569 i < bb_reg_state_max;
2571 if (c->offset_address == offset_address)
2573 else if (c->value == BBRG_UNDEFINED && !free)
2577 struct bb_reg_state *new, *old = bb_reg_state;
2578 size_t old_size, new_size;
2580 old_size = sizeof(*old) + bb_reg_state_max *
2581 sizeof(old->memory[0]);
2582 slot = bb_reg_state_max;
2583 bb_reg_state_max += 5;
2584 new_size = sizeof(*new) + bb_reg_state_max *
2585 sizeof(new->memory[0]);
2586 new = debug_kmalloc(new_size, GFP_ATOMIC);
2588 kdb_printf("\n\n%s: out of debug_kmalloc\n", __FUNCTION__);
2591 memcpy(new, old, old_size);
2592 memset((char *)new + old_size, BBRG_UNDEFINED,
2593 new_size - old_size);
2596 free = bb_reg_state->memory + slot;
2600 int slot = free - bb_reg_state->memory;
2601 free->offset_address = offset_address;
2602 free->value = value;
2603 free->offset_value = offset_value;
2604 KDB_DEBUG_BB(" slot %d", slot);
2605 bb_reg_state->mem_count = max(bb_reg_state->mem_count, slot+1);
2610 /* Set memory location *('dst' + 'offset') to contain the value from register
2611 * 'src'. 'dst' is assumed to be a register that contains a stack pointer.
2612 * This differs from bb_memory_set_reg_value because it takes a src register
2613 * which contains a value and possibly an offset, bb_memory_set_reg_value is
2614 * passed the value and offset directly.
2618 bb_memory_set_reg(enum bb_reg_code dst, enum bb_reg_code src,
2619 short offset_address)
2622 enum bb_reg_code value;
2623 BB_CHECK(!bb_is_osp_defined(dst), dst, );
2624 if (!bb_is_int_reg(src))
2626 value = bb_reg_code_value(src);
2627 if (value == BBRG_UNDEFINED) {
2628 bb_delete_memory(offset_address + bb_reg_code_offset(dst));
2631 offset_value = bb_reg_code_offset(src);
2633 bb_memory_set_reg_value(dst, offset_address, value, offset_value);
2636 /* Set register 'dst' to contain the value from memory *('src' + offset_address).
2637 * 'src' is assumed to be a register that contains a stack pointer.
2641 bb_reg_set_memory(enum bb_reg_code dst, enum bb_reg_code src, short offset_address)
2644 struct bb_memory_contains *s;
2645 BB_CHECK(!bb_is_osp_defined(src), src, );
2646 KDB_DEBUG_BB(" %s = *(%s",
2647 bbrg_name[dst], bbrg_name[src]);
2648 KDB_DEBUG_BB_OFFSET(offset_address, "", ")");
2649 offset_address += bb_reg_code_offset(src);
2650 KDB_DEBUG_BB_OFFSET(offset_address, " (osp", ")");
2651 for (i = 0, s = bb_reg_state->memory;
2652 i < bb_reg_state->mem_count;
2654 if (s->offset_address == offset_address && bb_is_int_reg(dst)) {
2655 bb_reg_code_set_value(dst, s->value);
2656 KDB_DEBUG_BB(" value %s", bbrg_name[s->value]);
2657 if (s->value == BBRG_OSP) {
2658 bb_reg_code_set_offset(dst, s->offset_value);
2659 KDB_DEBUG_BB_OFFSET(s->offset_value, "", "");
2661 bb_reg_code_set_offset(dst, 0);
2667 bb_reg_set_reg(dst, BBRG_UNDEFINED);
2672 /* A generic read from an operand. */
2675 bb_read_operand(const struct bb_operand *operand)
2678 if (operand->base_rc)
2679 bb_reg_read(operand->base_rc);
2680 if (operand->index_rc)
2681 bb_reg_read(operand->index_rc);
2682 if (bb_is_simple_memory(operand) &&
2683 bb_is_osp_defined(operand->base_rc) &&
2684 bb_decode.match->usage != BBOU_LEA) {
2685 m = (bb_reg_code_offset(operand->base_rc) + operand->disp +
2686 KDB_WORD_SIZE - 1) / KDB_WORD_SIZE;
2687 bb_memory_params = max(bb_memory_params, m);
2691 /* A generic write to an operand, resulting in an undefined value in that
2692 * location. All well defined operands are handled separately, this function
2693 * only handles the opcodes where the result is undefined.
2697 bb_write_operand(const struct bb_operand *operand)
2699 enum bb_reg_code base_rc = operand->base_rc;
2700 if (operand->memory) {
2702 bb_reg_read(base_rc);
2703 if (operand->index_rc)
2704 bb_reg_read(operand->index_rc);
2705 } else if (operand->reg && base_rc) {
2706 bb_reg_set_undef(base_rc);
2708 if (bb_is_simple_memory(operand) && bb_is_osp_defined(base_rc)) {
2710 offset = bb_reg_code_offset(base_rc) + operand->disp;
2711 offset = ALIGN(offset - KDB_WORD_SIZE + 1, KDB_WORD_SIZE);
2712 bb_delete_memory(offset);
2716 /* Adjust a register that contains a stack pointer */
2719 bb_adjust_osp(enum bb_reg_code reg, int adjust)
2721 int offset = bb_reg_code_offset(reg), old_offset = offset;
2722 KDB_DEBUG_BB(" %s osp offset ", bbrg_name[reg]);
2723 KDB_DEBUG_BB_OFFSET(bb_reg_code_offset(reg), "", " -> ");
2725 bb_reg_code_set_offset(reg, offset);
2726 KDB_DEBUG_BB_OFFSET(bb_reg_code_offset(reg), "", "\n");
2727 /* When RSP is adjusted upwards, it invalidates any memory
2728 * stored between the old and current stack offsets.
2730 if (reg == BBRG_RSP) {
2731 while (old_offset < bb_reg_code_offset(reg)) {
2732 bb_delete_memory(old_offset);
2733 old_offset += KDB_WORD_SIZE;
2738 /* The current instruction adjusts a register that contains a stack pointer.
2739 * Direction is 1 or -1, depending on whether the instruction is add/lea or
2744 bb_adjust_osp_instruction(int direction)
2746 enum bb_reg_code dst_reg = bb_decode.dst.base_rc;
2747 if (bb_decode.src.immediate ||
2748 bb_decode.match->usage == BBOU_LEA /* lea has its own checks */) {
2749 int adjust = direction * bb_decode.src.disp;
2750 bb_adjust_osp(dst_reg, adjust);
2752 /* variable stack adjustment, osp offset is not well defined */
2753 KDB_DEBUG_BB(" %s osp offset ", bbrg_name[dst_reg]);
2754 KDB_DEBUG_BB_OFFSET(bb_reg_code_offset(dst_reg), "", " -> undefined\n");
2755 bb_reg_code_set_value(dst_reg, BBRG_UNDEFINED);
2756 bb_reg_code_set_offset(dst_reg, 0);
2760 /* Some instructions using memory have an explicit length suffix (b, w, l, q).
2761 * The equivalent instructions using a register imply the length from the
2762 * register name. Deduce the operand length.
2766 bb_operand_length(const struct bb_operand *operand, char opcode_suffix)
2769 switch (opcode_suffix) {
2783 if (l == 0 && operand->reg) {
2784 switch (strlen(operand->base)) {
2786 switch (operand->base[2]) {
2796 if (operand->base[1] == 'r')
2807 bb_reg_state_size(const struct bb_reg_state *state)
2809 return sizeof(*state) +
2810 state->mem_count * sizeof(state->memory[0]);
2813 /* Canonicalize the current bb_reg_state so it can be compared against
2814 * previously created states. Sort the memory entries in descending order of
2815 * offset_address (stack grows down). Empty slots are moved to the end of the
2820 bb_reg_state_canonicalize(void)
2822 int i, order, changed;
2823 struct bb_memory_contains *p1, *p2, temp;
2826 for (i = 0, p1 = bb_reg_state->memory;
2827 i < bb_reg_state->mem_count-1;
2830 if (p2->value == BBRG_UNDEFINED) {
2832 } else if (p1->value == BBRG_UNDEFINED) {
2834 } else if (p1->offset_address < p2->offset_address) {
2836 } else if (p1->offset_address > p2->offset_address) {
2849 for (i = 0, p1 = bb_reg_state->memory;
2850 i < bb_reg_state_max;
2852 if (p1->value != BBRG_UNDEFINED)
2853 bb_reg_state->mem_count = i + 1;
2855 bb_reg_state_print(bb_reg_state);
2859 bb_special_case(bfd_vma to)
2861 int i, j, rsp_offset, expect_offset, offset, errors = 0, max_errors = 40;
2862 enum bb_reg_code reg, expect_value, value;
2863 struct bb_name_state *r;
2865 for (i = 0, r = bb_special_cases;
2866 i < ARRAY_SIZE(bb_special_cases);
2868 if (to == r->address &&
2869 (r->fname == NULL || strcmp(bb_func_name, r->fname) == 0))
2872 /* Some inline assembler code has jumps to .fixup sections which result
2873 * in out of line transfers with undefined state, ignore them.
2875 if (strcmp(bb_func_name, "strnlen_user") == 0 ||
2876 strcmp(bb_func_name, "copy_from_user") == 0)
2881 /* Check the running registers match */
2882 for (reg = BBRG_RAX; reg < r->regs_size; ++reg) {
2883 expect_value = r->regs[reg].value;
2884 if (test_bit(expect_value, r->skip_regs.bits)) {
2885 /* this regs entry is not defined for this label */
2888 if (expect_value == BBRG_UNDEFINED)
2890 expect_offset = r->regs[reg].offset;
2891 value = bb_reg_code_value(reg);
2892 offset = bb_reg_code_offset(reg);
2893 if (expect_value == value &&
2894 (value != BBRG_OSP || r->osp_offset == offset))
2896 kdb_printf("%s: Expected %s to contain %s",
2899 bbrg_name[expect_value]);
2901 KDB_DEBUG_BB_OFFSET_PRINTF(r->osp_offset, "", "");
2902 kdb_printf(". It actually contains %s", bbrg_name[value]);
2904 KDB_DEBUG_BB_OFFSET_PRINTF(offset, "", "");
2907 if (max_errors-- == 0)
2910 /* Check that any memory data on stack matches */
2912 while (i < bb_reg_state->mem_count &&
2914 expect_value = r->mem[j].value;
2915 if (test_bit(expect_value, r->skip_mem.bits) ||
2916 expect_value == BBRG_UNDEFINED) {
2917 /* this memory slot is not defined for this label */
2921 rsp_offset = bb_reg_state->memory[i].offset_address -
2922 bb_reg_code_offset(BBRG_RSP);
2924 r->mem[j].offset_address) {
2925 /* extra slots in memory are OK */
2927 } else if (rsp_offset <
2928 r->mem[j].offset_address) {
2929 /* Required memory slot is missing */
2930 kdb_printf("%s: Invalid bb_reg_state.memory, "
2931 "missing memory entry[%d] %s\n",
2932 __FUNCTION__, j, bbrg_name[expect_value]);
2934 if (max_errors-- == 0)
2938 if (bb_reg_state->memory[i].offset_value ||
2939 bb_reg_state->memory[i].value != expect_value) {
2940 /* memory slot is present but contains wrong
2943 kdb_printf("%s: Invalid bb_reg_state.memory, "
2944 "wrong value in slot %d, "
2945 "should be %s, it is %s\n",
2947 bbrg_name[expect_value],
2948 bbrg_name[bb_reg_state->memory[i].value]);
2950 if (max_errors-- == 0)
2957 while (j < r->mem_size) {
2958 expect_value = r->mem[j].value;
2959 if (test_bit(expect_value, r->skip_mem.bits) ||
2960 expect_value == BBRG_UNDEFINED)
2965 if (j != r->mem_size) {
2966 /* Hit end of memory before testing all the pt_reg slots */
2967 kdb_printf("%s: Invalid bb_reg_state.memory, "
2968 "missing trailing entries\n",
2971 if (max_errors-- == 0)
2978 kdb_printf("%s: on transfer to %s\n", __FUNCTION__, r->name);
2983 /* Transfer of control to a label outside the current function. If the
2984 * transfer is to a known common code path then do a sanity check on the state
2989 bb_sanity_check(int type)
2991 enum bb_reg_code expect, actual;
2992 int i, offset, error = 0;
2994 for (i = 0; i < ARRAY_SIZE(bb_preserved_reg); ++i) {
2995 expect = bb_preserved_reg[i];
2996 actual = bb_reg_code_value(expect);
2997 offset = bb_reg_code_offset(expect);
2998 if (expect == actual)
3000 /* type == 1 is sysret/sysexit, ignore RSP */
3001 if (type && expect == BBRG_RSP)
3003 /* type == 1 is sysret/sysexit, ignore RBP for i386 */
3004 /* We used to have "#ifndef CONFIG_X86_64" for the type=1 RBP
3005 * test; however, x86_64 can run ia32 compatible mode and
3006 * hit this problem. Perform the following test anyway!
3008 if (type && expect == BBRG_RBP)
3010 /* RSP should contain OSP+0. Except for ptregscall_common and
3011 * ia32_ptregs_common, they get a partial pt_regs, fudge the
3012 * stack to make it a full pt_regs then reverse the effect on
3013 * exit, so the offset is -0x50 on exit.
3015 if (expect == BBRG_RSP &&
3016 bb_is_osp_defined(expect) &&
3019 (strcmp(bb_func_name, "ptregscall_common") == 0 ||
3020 strcmp(bb_func_name, "ia32_ptregs_common") == 0))))
3022 /* The put_user and save_paranoid functions are special.
3023 * %rbx gets clobbered */
3024 if (expect == BBRG_RBX &&
3025 (strncmp(bb_func_name, "__put_user_", 11) == 0 ||
3026 strcmp(bb_func_name, "save_paranoid") == 0))
3028 /* Ignore rbp and rsp for error_entry */
3029 if ((strcmp(bb_func_name, "error_entry") == 0) &&
3030 (expect == BBRG_RBX ||
3031 (expect == BBRG_RSP && bb_is_osp_defined(expect) && offset == -0x10)))
3033 kdb_printf("%s: Expected %s, got %s",
3035 bbrg_name[expect], bbrg_name[actual]);
3037 KDB_DEBUG_BB_OFFSET_PRINTF(offset, "", "");
3041 BB_CHECK(error, error, );
3044 /* Transfer of control. Follow the arc and save the current state as input to
3045 * another basic block.
3049 bb_transfer(bfd_vma from, bfd_vma to, unsigned int drop_through)
3053 struct bb* bb = NULL; /*stupid gcc */
3054 struct bb_jmp *bb_jmp;
3055 struct bb_reg_state *state;
3056 bb_reg_state_canonicalize();
3058 for (i = 0; i < bb_jmp_count; ++i) {
3059 bb_jmp = bb_jmp_list + i;
3060 if (bb_jmp->from == from &&
3062 bb_jmp->drop_through == drop_through) {
3068 /* Transfer outside the current function. Check the special
3069 * cases (mainly in entry.S) first. If it is not a known
3070 * special case then check if the target address is the start
3071 * of a function or not. If it is the start of a function then
3072 * assume tail recursion and require that the state be the same
3073 * as on entry. Otherwise assume out of line code (e.g.
3074 * spinlock contention path) and ignore it, the state can be
3077 kdb_symtab_t symtab;
3078 if (bb_special_case(to))
3080 kdbnearsym(to, &symtab);
3081 if (symtab.sym_start != to)
3087 /* Only print this message when the kernel is compiled with
3088 * -fno-optimize-sibling-calls. Otherwise it would print a
3089 * message for every tail recursion call. If you see the
3090 * message below then you probably have an assembler label that
3091 * is not listed in the special cases.
3093 kdb_printf(" not matched: from "
3095 " to " kdb_bfd_vma_fmt0
3096 " drop_through %d bb_jmp[%d]\n",
3097 from, to, drop_through, i);
3098 #endif /* NO_SIBLINGS */
3101 KDB_DEBUG_BB(" matched: from " kdb_bfd_vma_fmt0
3102 " to " kdb_bfd_vma_fmt0
3103 " drop_through %d bb_jmp[%d]\n",
3104 from, to, drop_through, i);
3106 for (i = 0; i < bb_count; ++i) {
3108 if (bb->start == to) {
3113 BB_CHECK(!found, to, );
3114 /* If the register state for this arc has already been set (we are
3115 * rescanning the block that originates the arc) and the state is the
3116 * same as the previous state for this arc then this input to the
3117 * target block is the same as last time, so there is no need to rescan
3120 state = bb_jmp->state;
3121 size = bb_reg_state_size(bb_reg_state);
3123 bb_reg_state->ref_count = state->ref_count;
3124 if (memcmp(state, bb_reg_state, size) == 0) {
3125 KDB_DEBUG_BB(" no state change\n");
3128 if (--state->ref_count == 0)
3130 bb_jmp->state = NULL;
3132 /* New input state is required. To save space, check if any other arcs
3133 * have the same state and reuse them where possible. The overall set
3134 * of inputs to the target block is now different so the target block
3135 * must be rescanned.
3138 for (i = 0; i < bb_jmp_count; ++i) {
3139 state = bb_jmp_list[i].state;
3142 bb_reg_state->ref_count = state->ref_count;
3143 if (memcmp(state, bb_reg_state, size) == 0) {
3144 KDB_DEBUG_BB(" reuse bb_jmp[%d]\n", i);
3145 bb_jmp->state = state;
3150 state = debug_kmalloc(size, GFP_ATOMIC);
3152 kdb_printf("\n\n%s: out of debug_kmalloc\n", __FUNCTION__);
3156 memcpy(state, bb_reg_state, size);
3157 state->ref_count = 1;
3158 bb_jmp->state = state;
3159 KDB_DEBUG_BB(" new state %p\n", state);
3162 /* Isolate the processing for 'mov' so it can be used for 'xadd'/'xchg' as
3165 * xadd/xchg expect this function to return BBOU_NOP for special cases,
3166 * otherwise it returns BBOU_RSWD. All special cases must be handled entirely
3167 * within this function, including doing bb_read_operand or bb_write_operand
3171 static enum bb_operand_usage
3172 bb_usage_mov(const struct bb_operand *src, const struct bb_operand *dst, int l)
3174 int full_register_src, full_register_dst;
3175 full_register_src = bb_operand_length(src, bb_decode.opcode[l])
3176 == KDB_WORD_SIZE * 8;
3177 full_register_dst = bb_operand_length(dst, bb_decode.opcode[l])
3178 == KDB_WORD_SIZE * 8;
3179 /* If both src and dst are full integer registers then record the
3183 bb_is_int_reg(src->base_rc) &&
3185 bb_is_int_reg(dst->base_rc) &&
3186 full_register_src &&
3187 full_register_dst) {
3188 /* Special case for the code that switches stacks in
3189 * jprobe_return. That code must modify RSP but it does it in
3190 * a well defined manner. Do not invalidate RSP.
3192 if (src->base_rc == BBRG_RBX &&
3193 dst->base_rc == BBRG_RSP &&
3194 strcmp(bb_func_name, "jprobe_return") == 0) {
3195 bb_read_operand(src);
3198 /* math_abort takes the equivalent of a longjmp structure and
3199 * resets the stack. Ignore this, it leaves RSP well defined.
3201 if (dst->base_rc == BBRG_RSP &&
3202 strcmp(bb_func_name, "math_abort") == 0) {
3203 bb_read_operand(src);
3206 bb_reg_set_reg(dst->base_rc, src->base_rc);
3209 /* If the move is from a full integer register to stack then record it.
3212 bb_is_simple_memory(dst) &&
3213 bb_is_osp_defined(dst->base_rc) &&
3214 full_register_src) {
3215 /* Ugly special case. Initializing list heads on stack causes
3216 * false references to stack variables when the list head is
3217 * used. Static code analysis cannot detect that the list head
3218 * has been changed by a previous execution loop and that a
3219 * basic block is only executed after the list head has been
3222 * These false references can result in valid stack variables
3223 * being incorrectly cleared on some logic paths. Ignore
3224 * stores to stack variables which point to themselves or to
3225 * the previous word so the list head initialization is not
3228 if (bb_is_osp_defined(src->base_rc)) {
3229 int stack1 = bb_reg_code_offset(src->base_rc);
3230 int stack2 = bb_reg_code_offset(dst->base_rc) +
3232 if (stack1 == stack2 ||
3233 stack1 == stack2 - KDB_WORD_SIZE)
3236 bb_memory_set_reg(dst->base_rc, src->base_rc, dst->disp);
3239 /* If the move is from stack to a full integer register then record it.
3241 if (bb_is_simple_memory(src) &&
3242 bb_is_osp_defined(src->base_rc) &&
3244 bb_is_int_reg(dst->base_rc) &&
3245 full_register_dst) {
3246 #ifdef CONFIG_X86_32
3247 #ifndef TSS_sysenter_sp0
3248 #define TSS_sysenter_sp0 SYSENTER_stack_sp0
3250 /* mov from TSS_sysenter_sp0+offset to esp to fix up the
3251 * sysenter stack, it leaves esp well defined. mov
3252 * TSS_ysenter_sp0+offset(%esp),%esp is followed by up to 5
3253 * push instructions to mimic the hardware stack push. If
3254 * TSS_sysenter_sp0 is offset then only 3 words will be
3257 if (dst->base_rc == BBRG_RSP &&
3258 src->disp >= TSS_sysenter_sp0 &&
3259 bb_is_osp_defined(BBRG_RSP)) {
3261 pushes = src->disp == TSS_sysenter_sp0 ? 5 : 3;
3262 bb_reg_code_set_offset(BBRG_RSP,
3263 bb_reg_code_offset(BBRG_RSP) +
3264 pushes * KDB_WORD_SIZE);
3265 KDB_DEBUG_BB_OFFSET(
3266 bb_reg_code_offset(BBRG_RSP),
3267 " sysenter fixup, RSP",
3271 #endif /* CONFIG_X86_32 */
3272 bb_read_operand(src);
3273 bb_reg_set_memory(dst->base_rc, src->base_rc, src->disp);
3276 /* move %gs:0x<nn>,%rsp is used to unconditionally switch to another
3277 * stack. Ignore this special case, it is handled by the stack
3281 strcmp(src->segment, "%gs") == 0 &&
3283 dst->base_rc == BBRG_RSP)
3285 /* move %reg,%reg is a nop */
3290 strcmp(src->base, dst->base) == 0)
3292 /* Special case for the code that switches stacks in the scheduler
3293 * (switch_to()). That code must modify RSP but it does it in a well
3294 * defined manner. Do not invalidate RSP.
3297 dst->base_rc == BBRG_RSP &&
3298 full_register_dst &&
3299 bb_is_scheduler_address()) {
3300 bb_read_operand(src);
3303 /* Special case for the code that switches stacks in resume from
3304 * hibernation code. That code must modify RSP but it does it in a
3305 * well defined manner. Do not invalidate RSP.
3309 dst->base_rc == BBRG_RSP &&
3310 full_register_dst &&
3311 strcmp(bb_func_name, "restore_image") == 0) {
3312 bb_read_operand(src);
3318 static enum bb_operand_usage
3319 bb_usage_xadd(const struct bb_operand *src, const struct bb_operand *dst)
3321 /* Simulate xadd as a series of instructions including mov, that way we
3322 * get the benefit of all the special cases already handled by
3325 * tmp = src + dst, src = dst, dst = tmp.
3327 * For tmp, pick a register that is undefined. If all registers are
3328 * defined then pick one that is not being used by xadd.
3330 enum bb_reg_code reg = BBRG_UNDEFINED;
3331 struct bb_operand tmp;
3332 struct bb_reg_contains save_tmp;
3333 enum bb_operand_usage usage;
3335 for (reg = BBRG_RAX; reg < BBRG_RAX + KDB_INT_REGISTERS; ++reg) {
3336 if (bb_reg_code_value(reg) == BBRG_UNDEFINED) {
3342 for (reg = BBRG_RAX; reg < BBRG_RAX + KDB_INT_REGISTERS; ++reg) {
3343 if (reg != src->base_rc &&
3344 reg != src->index_rc &&
3345 reg != dst->base_rc &&
3346 reg != dst->index_rc &&
3351 KDB_DEBUG_BB(" %s saving tmp %s\n", __FUNCTION__, bbrg_name[reg]);
3352 save_tmp = bb_reg_state->contains[reg - BBRG_RAX];
3353 bb_reg_set_undef(reg);
3354 memset(&tmp, 0, sizeof(tmp));
3357 tmp.base = debug_kmalloc(strlen(bbrg_name[reg]) + 2, GFP_ATOMIC);
3360 strcpy(tmp.base + 1, bbrg_name[reg]);
3363 bb_read_operand(src);
3364 bb_read_operand(dst);
3365 if (bb_usage_mov(src, dst, sizeof("xadd")-1) == BBOU_NOP)
3368 usage = BBOU_RSRDWS;
3369 bb_usage_mov(&tmp, dst, sizeof("xadd")-1);
3370 KDB_DEBUG_BB(" %s restoring tmp %s\n", __FUNCTION__, bbrg_name[reg]);
3371 bb_reg_state->contains[reg - BBRG_RAX] = save_tmp;
3372 debug_kfree(tmp.base);
3376 static enum bb_operand_usage
3377 bb_usage_xchg(const struct bb_operand *src, const struct bb_operand *dst)
3379 /* Simulate xchg as a series of mov instructions, that way we get the
3380 * benefit of all the special cases already handled by BBOU_MOV.
3382 * mov dst,tmp; mov src,dst; mov tmp,src;
3384 * For tmp, pick a register that is undefined. If all registers are
3385 * defined then pick one that is not being used by xchg.
3387 enum bb_reg_code reg = BBRG_UNDEFINED;
3388 int rs = BBOU_RS, rd = BBOU_RD, ws = BBOU_WS, wd = BBOU_WD;
3389 struct bb_operand tmp;
3390 struct bb_reg_contains save_tmp;
3392 for (reg = BBRG_RAX; reg < BBRG_RAX + KDB_INT_REGISTERS; ++reg) {
3393 if (bb_reg_code_value(reg) == BBRG_UNDEFINED) {
3399 for (reg = BBRG_RAX; reg < BBRG_RAX + KDB_INT_REGISTERS; ++reg) {
3400 if (reg != src->base_rc &&
3401 reg != src->index_rc &&
3402 reg != dst->base_rc &&
3403 reg != dst->index_rc &&
3408 KDB_DEBUG_BB(" %s saving tmp %s\n", __FUNCTION__, bbrg_name[reg]);
3409 save_tmp = bb_reg_state->contains[reg - BBRG_RAX];
3410 memset(&tmp, 0, sizeof(tmp));
3413 tmp.base = debug_kmalloc(strlen(bbrg_name[reg]) + 2, GFP_ATOMIC);
3416 strcpy(tmp.base + 1, bbrg_name[reg]);
3419 if (bb_usage_mov(dst, &tmp, sizeof("xchg")-1) == BBOU_NOP)
3421 if (bb_usage_mov(src, dst, sizeof("xchg")-1) == BBOU_NOP) {
3425 if (bb_usage_mov(&tmp, src, sizeof("xchg")-1) == BBOU_NOP)
3427 KDB_DEBUG_BB(" %s restoring tmp %s\n", __FUNCTION__, bbrg_name[reg]);
3428 bb_reg_state->contains[reg - BBRG_RAX] = save_tmp;
3429 debug_kfree(tmp.base);
3430 return rs | rd | ws | wd;
3433 /* Invalidate all the scratch registers */
3436 bb_invalidate_scratch_reg(void)
3439 for (i = BBRG_RAX; i < BBRG_RAX + KDB_INT_REGISTERS; ++i) {
3440 for (j = 0; j < ARRAY_SIZE(bb_preserved_reg); ++j) {
3441 if (i == bb_preserved_reg[j])
3444 bb_reg_set_undef(i);
3451 bb_pass2_computed_jmp(const struct bb_operand *src)
3453 unsigned long table = src->disp;
3455 while (!bb_giveup) {
3456 if (kdb_getword(&addr, table, sizeof(addr)))
3458 if (addr < bb_func_start || addr >= bb_func_end)
3460 bb_transfer(bb_curr_addr, addr, 0);
3461 table += KDB_WORD_SIZE;
3465 /* The current instruction has been decoded and all the information is in
3466 * bb_decode. Based on the opcode, track any operand usage that we care about.
3472 enum bb_operand_usage usage = bb_decode.match->usage;
3473 struct bb_operand *src = &bb_decode.src;
3474 struct bb_operand *dst = &bb_decode.dst;
3475 struct bb_operand *dst2 = &bb_decode.dst2;
3476 int opcode_suffix, operand_length;
3478 /* First handle all the special usage cases, and map them to a generic
3479 * case after catering for the side effects.
3482 if (usage == BBOU_IMUL &&
3483 src->present && !dst->present && !dst2->present) {
3484 /* single operand imul, same effects as mul */
3488 /* AT&T syntax uses movs<l1><l2> for move with sign extension, instead
3489 * of the Intel movsx. The AT&T syntax causes problems for the opcode
3490 * mapping; movs with sign extension needs to be treated as a generic
3491 * read src, write dst, but instead it falls under the movs I/O
3492 * instruction. Fix it.
3494 if (usage == BBOU_MOVS && strlen(bb_decode.opcode) > 5)
3497 /* This switch statement deliberately does not use 'default' at the top
3498 * level. That way the compiler will complain if a new BBOU_ enum is
3499 * added above and not explicitly handled here.
3502 case BBOU_UNKNOWN: /* drop through */
3503 case BBOU_RS: /* drop through */
3504 case BBOU_RD: /* drop through */
3505 case BBOU_RSRD: /* drop through */
3506 case BBOU_WS: /* drop through */
3507 case BBOU_RSWS: /* drop through */
3508 case BBOU_RDWS: /* drop through */
3509 case BBOU_RSRDWS: /* drop through */
3510 case BBOU_WD: /* drop through */
3511 case BBOU_RSWD: /* drop through */
3512 case BBOU_RDWD: /* drop through */
3513 case BBOU_RSRDWD: /* drop through */
3514 case BBOU_WSWD: /* drop through */
3515 case BBOU_RSWSWD: /* drop through */
3516 case BBOU_RDWSWD: /* drop through */
3518 break; /* ignore generic usage for now */
3520 /* Special case for add instructions that adjust registers
3521 * which are mapping the stack.
3523 if (dst->reg && bb_is_osp_defined(dst->base_rc)) {
3524 bb_adjust_osp_instruction(1);
3527 usage = BBOU_RSRDWD;
3531 /* Special case when trying to round the stack pointer
3532 * to achieve byte alignment
3534 if (dst->reg && dst->base_rc == BBRG_RSP &&
3535 src->immediate && strncmp(bb_func_name, "efi_call", 8) == 0) {
3538 usage = BBOU_RSRDWD;
3542 bb_reg_state_print(bb_reg_state);
3544 if (bb_is_static_disp(src)) {
3545 /* save_args is special. It saves
3546 * a partial pt_regs onto the stack and switches
3547 * to the interrupt stack.
3549 if (src->disp == bb_save_args) {
3550 bb_memory_set_reg(BBRG_RSP, BBRG_RDI, 0x48);
3551 bb_memory_set_reg(BBRG_RSP, BBRG_RSI, 0x40);
3552 bb_memory_set_reg(BBRG_RSP, BBRG_RDX, 0x38);
3553 bb_memory_set_reg(BBRG_RSP, BBRG_RCX, 0x30);
3554 bb_memory_set_reg(BBRG_RSP, BBRG_RAX, 0x28);
3555 bb_memory_set_reg(BBRG_RSP, BBRG_R8, 0x20);
3556 bb_memory_set_reg(BBRG_RSP, BBRG_R9, 0x18);
3557 bb_memory_set_reg(BBRG_RSP, BBRG_R10, 0x10);
3558 bb_memory_set_reg(BBRG_RSP, BBRG_R11, 0x08);
3559 bb_memory_set_reg(BBRG_RSP, BBRG_RBP, 0);
3560 /* This is actually on the interrupt stack,
3561 * but we fudge it so the unwind works.
3563 bb_memory_set_reg_value(BBRG_RSP, -0x8, BBRG_RBP, 0);
3564 bb_reg_set_reg(BBRG_RBP, BBRG_RSP);
3565 bb_adjust_osp(BBRG_RSP, -KDB_WORD_SIZE);
3567 /* save_rest juggles the stack frame to append the
3568 * rest of the pt_regs onto a stack where SAVE_ARGS
3569 * or save_args has already been done.
3571 else if (src->disp == bb_save_rest) {
3572 bb_memory_set_reg(BBRG_RSP, BBRG_RBX, 0x30);
3573 bb_memory_set_reg(BBRG_RSP, BBRG_RBP, 0x28);
3574 bb_memory_set_reg(BBRG_RSP, BBRG_R12, 0x20);
3575 bb_memory_set_reg(BBRG_RSP, BBRG_R13, 0x18);
3576 bb_memory_set_reg(BBRG_RSP, BBRG_R14, 0x10);
3577 bb_memory_set_reg(BBRG_RSP, BBRG_R15, 0x08);
3579 /* error_entry and save_paranoid save a full pt_regs.
3580 * Break out so the scratch registers aren't invalidated.
3582 else if (src->disp == bb_error_entry || src->disp == bb_save_paranoid) {
3583 bb_memory_set_reg(BBRG_RSP, BBRG_RDI, 0x70);
3584 bb_memory_set_reg(BBRG_RSP, BBRG_RSI, 0x68);
3585 bb_memory_set_reg(BBRG_RSP, BBRG_RDX, 0x60);
3586 bb_memory_set_reg(BBRG_RSP, BBRG_RCX, 0x58);
3587 bb_memory_set_reg(BBRG_RSP, BBRG_RAX, 0x50);
3588 bb_memory_set_reg(BBRG_RSP, BBRG_R8, 0x48);
3589 bb_memory_set_reg(BBRG_RSP, BBRG_R9, 0x40);
3590 bb_memory_set_reg(BBRG_RSP, BBRG_R10, 0x38);
3591 bb_memory_set_reg(BBRG_RSP, BBRG_R11, 0x30);
3592 bb_memory_set_reg(BBRG_RSP, BBRG_RBX, 0x28);
3593 bb_memory_set_reg(BBRG_RSP, BBRG_RBP, 0x20);
3594 bb_memory_set_reg(BBRG_RSP, BBRG_R12, 0x18);
3595 bb_memory_set_reg(BBRG_RSP, BBRG_R13, 0x10);
3596 bb_memory_set_reg(BBRG_RSP, BBRG_R14, 0x08);
3597 bb_memory_set_reg(BBRG_RSP, BBRG_R15, 0);
3601 /* Invalidate the scratch registers */
3602 bb_invalidate_scratch_reg();
3604 /* These special cases need scratch registers invalidated first */
3605 if (bb_is_static_disp(src)) {
3606 /* Function sync_regs and save_v86_state are special.
3607 * Their return value is the new stack pointer
3609 if (src->disp == bb_sync_regs) {
3610 bb_reg_set_reg(BBRG_RAX, BBRG_RSP);
3611 } else if (src->disp == bb_save_v86_state) {
3612 bb_reg_set_reg(BBRG_RAX, BBRG_RSP);
3613 bb_adjust_osp(BBRG_RAX, +KDB_WORD_SIZE);
3618 /* Convert word in RAX. Read RAX, write RAX */
3619 bb_reg_read(BBRG_RAX);
3620 bb_reg_set_undef(BBRG_RAX);
3624 /* cmove %gs:0x<nn>,%rsp is used to conditionally switch to
3625 * another stack. Ignore this special case, it is handled by
3626 * the stack unwinding code.
3629 strcmp(src->segment, "%gs") == 0 &&
3631 dst->base_rc == BBRG_RSP)
3637 /* Read RAX, write RAX plus src read, dst write */
3638 bb_reg_read(BBRG_RAX);
3639 bb_reg_set_undef(BBRG_RAX);
3643 /* Read RAX, RBX, RCX, RDX, write RAX, RDX plus src read/write */
3644 bb_reg_read(BBRG_RAX);
3645 bb_reg_read(BBRG_RBX);
3646 bb_reg_read(BBRG_RCX);
3647 bb_reg_read(BBRG_RDX);
3648 bb_reg_set_undef(BBRG_RAX);
3649 bb_reg_set_undef(BBRG_RDX);
3653 /* Read RAX, write RAX, RBX, RCX, RDX */
3654 bb_reg_read(BBRG_RAX);
3655 bb_reg_set_undef(BBRG_RAX);
3656 bb_reg_set_undef(BBRG_RBX);
3657 bb_reg_set_undef(BBRG_RCX);
3658 bb_reg_set_undef(BBRG_RDX);
3662 /* Convert word in RAX, RDX. Read RAX, write RDX */
3663 bb_reg_read(BBRG_RAX);
3664 bb_reg_set_undef(BBRG_RDX);
3667 case BBOU_DIV: /* drop through */
3669 /* The 8 bit variants only affect RAX, the 16, 32 and 64 bit
3670 * variants affect RDX as well.
3674 opcode_suffix = bb_decode.opcode[3];
3677 opcode_suffix = bb_decode.opcode[4];
3680 opcode_suffix = 'q';
3683 operand_length = bb_operand_length(src, opcode_suffix);
3684 bb_reg_read(BBRG_RAX);
3685 bb_reg_set_undef(BBRG_RAX);
3686 if (operand_length != 8) {
3687 bb_reg_read(BBRG_RDX);
3688 bb_reg_set_undef(BBRG_RDX);
3693 /* Only the two and three operand forms get here. The one
3694 * operand form is treated as mul.
3696 if (dst2->present) {
3697 /* The three operand form is a special case, read the first two
3698 * operands, write the third.
3700 bb_read_operand(src);
3701 bb_read_operand(dst);
3702 bb_write_operand(dst2);
3705 usage = BBOU_RSRDWD;
3713 if (bb_is_static_disp(src))
3714 bb_transfer(bb_curr_addr, src->disp, 0);
3715 else if (src->indirect &&
3717 src->base == NULL &&
3719 src->scale == KDB_WORD_SIZE)
3720 bb_pass2_computed_jmp(src);
3725 bb_reg_set_undef(BBRG_RAX);
3729 /* dst = src + disp. Often used to calculate offsets into the
3730 * stack, so check if it uses a stack pointer.
3733 if (bb_is_simple_memory(src)) {
3734 if (bb_is_osp_defined(src->base_rc)) {
3735 bb_reg_set_reg(dst->base_rc, src->base_rc);
3736 bb_adjust_osp_instruction(1);
3738 } else if (src->disp == 0 &&
3739 src->base_rc == dst->base_rc) {
3740 /* lea 0(%reg),%reg is generated by i386
3744 } else if (src->disp == 4096 &&
3745 (src->base_rc == BBRG_R8 ||
3746 src->base_rc == BBRG_RDI) &&
3747 strcmp(bb_func_name, "relocate_kernel") == 0) {
3748 /* relocate_kernel: setup a new stack at the
3749 * end of the physical control page, using
3750 * (x86_64) lea 4096(%r8),%rsp or (i386) lea
3758 /* RSP = RBP; RBP = *(RSP); RSP += KDB_WORD_SIZE; */
3759 bb_reg_set_reg(BBRG_RSP, BBRG_RBP);
3760 if (bb_is_osp_defined(BBRG_RSP))
3761 bb_reg_set_memory(BBRG_RBP, BBRG_RSP, 0);
3763 bb_reg_set_undef(BBRG_RBP);
3764 if (bb_is_osp_defined(BBRG_RSP))
3765 bb_adjust_osp(BBRG_RSP, KDB_WORD_SIZE);
3766 /* common_interrupt uses leave in a non-standard manner */
3767 if (strcmp(bb_func_name, "common_interrupt") != 0)
3772 /* Read RSI, write RAX, RSI */
3773 bb_reg_read(BBRG_RSI);
3774 bb_reg_set_undef(BBRG_RAX);
3775 bb_reg_set_undef(BBRG_RSI);
3779 /* Read and write RCX */
3780 bb_reg_read(BBRG_RCX);
3781 bb_reg_set_undef(BBRG_RCX);
3782 if (bb_is_static_disp(src))
3783 bb_transfer(bb_curr_addr, src->disp, 0);
3787 /* lss offset(%esp),%esp leaves esp well defined */
3789 dst->base_rc == BBRG_RSP &&
3790 bb_is_simple_memory(src) &&
3791 src->base_rc == BBRG_RSP) {
3792 bb_adjust_osp(BBRG_RSP, 2*KDB_WORD_SIZE + src->disp);
3799 /* Read RAX, RCX, RDX */
3800 bb_reg_set_undef(BBRG_RAX);
3801 bb_reg_set_undef(BBRG_RCX);
3802 bb_reg_set_undef(BBRG_RDX);
3806 usage = bb_usage_mov(src, dst, sizeof("mov")-1);
3809 /* Read RSI, RDI, write RSI, RDI */
3810 bb_reg_read(BBRG_RSI);
3811 bb_reg_read(BBRG_RDI);
3812 bb_reg_set_undef(BBRG_RSI);
3813 bb_reg_set_undef(BBRG_RDI);
3817 /* imul (one operand form only) or mul. Read RAX. If the
3818 * operand length is not 8 then write RDX.
3820 if (bb_decode.opcode[0] == 'i')
3821 opcode_suffix = bb_decode.opcode[4];
3823 opcode_suffix = bb_decode.opcode[3];
3824 operand_length = bb_operand_length(src, opcode_suffix);
3825 bb_reg_read(BBRG_RAX);
3826 if (operand_length != 8)
3827 bb_reg_set_undef(BBRG_RDX);
3832 bb_reg_read(BBRG_RAX);
3833 bb_reg_read(BBRG_RCX);
3839 /* Read RSI, RDX, write RSI */
3840 bb_reg_read(BBRG_RSI);
3841 bb_reg_read(BBRG_RDX);
3842 bb_reg_set_undef(BBRG_RSI);
3846 /* Complicated by the fact that you can pop from top of stack
3847 * to a stack location, for this case the destination location
3848 * is calculated after adjusting RSP. Analysis of the kernel
3849 * code shows that gcc only uses this strange format to get the
3850 * flags into a local variable, e.g. pushf; popl 0x10(%esp); so
3851 * I am going to ignore this special case.
3854 if (!bb_is_osp_defined(BBRG_RSP)) {
3855 if (!bb_is_scheduler_address()) {
3856 kdb_printf("pop when BBRG_RSP is undefined?\n");
3861 bb_reg_set_memory(src->base_rc, BBRG_RSP, 0);
3864 /* pop %rsp does not adjust rsp */
3866 src->base_rc != BBRG_RSP)
3867 bb_adjust_osp(BBRG_RSP, KDB_WORD_SIZE);
3871 /* Do not care about flags, just adjust RSP */
3872 if (!bb_is_osp_defined(BBRG_RSP)) {
3873 if (!bb_is_scheduler_address()) {
3874 kdb_printf("popf when BBRG_RSP is undefined?\n");
3878 bb_adjust_osp(BBRG_RSP, KDB_WORD_SIZE);
3883 /* Complicated by the fact that you can push from a stack
3884 * location to top of stack, the source location is calculated
3885 * before adjusting RSP. Analysis of the kernel code shows
3886 * that gcc only uses this strange format to restore the flags
3887 * from a local variable, e.g. pushl 0x10(%esp); popf; so I am
3888 * going to ignore this special case.
3891 if (!bb_is_osp_defined(BBRG_RSP)) {
3892 if (!bb_is_scheduler_address()) {
3893 kdb_printf("push when BBRG_RSP is undefined?\n");
3897 bb_adjust_osp(BBRG_RSP, -KDB_WORD_SIZE);
3899 bb_reg_code_offset(BBRG_RSP) <= 0)
3900 bb_memory_set_reg(BBRG_RSP, src->base_rc, 0);
3904 /* Do not care about flags, just adjust RSP */
3905 if (!bb_is_osp_defined(BBRG_RSP)) {
3906 if (!bb_is_scheduler_address()) {
3907 kdb_printf("pushf when BBRG_RSP is undefined?\n");
3911 bb_adjust_osp(BBRG_RSP, -KDB_WORD_SIZE);
3916 /* Read RCX, write RAX, RDX */
3917 bb_reg_read(BBRG_RCX);
3918 bb_reg_set_undef(BBRG_RAX);
3919 bb_reg_set_undef(BBRG_RDX);
3923 /* Write RAX, RDX */
3924 bb_reg_set_undef(BBRG_RAX);
3925 bb_reg_set_undef(BBRG_RDX);
3930 if (src->immediate && bb_is_osp_defined(BBRG_RSP)) {
3931 bb_adjust_osp(BBRG_RSP, src->disp);
3933 /* Functions that restore state which was saved by another
3934 * function or build new kernel stacks. We cannot verify what
3935 * is being restored so skip the sanity check.
3937 if (strcmp(bb_func_name, "restore_image") == 0 ||
3938 strcmp(bb_func_name, "relocate_kernel") == 0 ||
3939 strcmp(bb_func_name, "identity_mapped") == 0 ||
3940 strcmp(bb_func_name, "xen_iret_crit_fixup") == 0 ||
3941 strcmp(bb_func_name, "math_abort") == 0 ||
3942 strcmp(bb_func_name, "save_args") == 0 ||
3943 strcmp(bb_func_name, "kretprobe_trampoline_holder") == 0)
3949 bb_reg_read(BBRG_RAX);
3953 /* Read RAX, RDI, write RDI */
3954 bb_reg_read(BBRG_RAX);
3955 bb_reg_read(BBRG_RDI);
3956 bb_reg_set_undef(BBRG_RDI);
3960 /* Special case for sub instructions that adjust registers
3961 * which are mapping the stack.
3963 if (dst->reg && bb_is_osp_defined(dst->base_rc)) {
3964 bb_adjust_osp_instruction(-1);
3967 usage = BBOU_RSRDWD;
3979 /* Read RCX, RAX, RDX */
3980 bb_reg_read(BBRG_RCX);
3981 bb_reg_read(BBRG_RAX);
3982 bb_reg_read(BBRG_RDX);
3986 usage = bb_usage_xadd(src, dst);
3989 /* i386 do_IRQ with 4K stacks does xchg %ebx,%esp; call
3990 * irq_handler; mov %ebx,%esp; to switch stacks. Ignore this
3991 * stack switch when tracking registers, it is handled by
3992 * higher level backtrace code. Convert xchg %ebx,%esp to mov
3993 * %esp,%ebx so the later mov %ebx,%esp becomes a NOP and the
3994 * stack remains defined so we can backtrace through do_IRQ's
3997 * Ditto for do_softirq.
4001 src->base_rc == BBRG_RBX &&
4002 dst->base_rc == BBRG_RSP &&
4003 (strcmp(bb_func_name, "do_IRQ") == 0 ||
4004 strcmp(bb_func_name, "do_softirq") == 0)) {
4005 strcpy(bb_decode.opcode, "mov");
4006 usage = bb_usage_mov(dst, src, sizeof("mov")-1);
4008 usage = bb_usage_xchg(src, dst);
4012 /* xor %reg,%reg only counts as a register write, the original
4013 * contents of reg are irrelevant.
4015 if (src->reg && dst->reg && src->base_rc == dst->base_rc)
4018 usage = BBOU_RSRDWD;
4022 /* The switch statement above handled all the special cases. Every
4023 * opcode should now have a usage of NOP or one of the generic cases.
4025 if (usage == BBOU_UNKNOWN || usage == BBOU_NOP) {
4027 } else if (usage >= BBOU_RS && usage <= BBOU_RSRDWSWD) {
4028 if (usage & BBOU_RS)
4029 bb_read_operand(src);
4030 if (usage & BBOU_RD)
4031 bb_read_operand(dst);
4032 if (usage & BBOU_WS)
4033 bb_write_operand(src);
4034 if (usage & BBOU_WD)
4035 bb_write_operand(dst);
4037 kdb_printf("%s: opcode not fully handled\n", __FUNCTION__);
4038 if (!KDB_DEBUG(BB)) {
4040 if (bb_decode.src.present)
4041 bb_print_operand("src", &bb_decode.src);
4042 if (bb_decode.dst.present)
4043 bb_print_operand("dst", &bb_decode.dst);
4044 if (bb_decode.dst2.present)
4045 bb_print_operand("dst2", &bb_decode.dst2);
4052 bb_parse_buffer(void)
4054 char *p, *src, *dst = NULL, *dst2 = NULL;
4057 memset(&bb_decode, 0, sizeof(bb_decode));
4058 KDB_DEBUG_BB(" '%s'\n", p);
4059 p += strcspn(p, ":"); /* skip address and function name+offset: */
4061 kdb_printf("%s: cannot find ':' in buffer '%s'\n",
4062 __FUNCTION__, bb_buffer);
4066 p += strspn(p, " \t"); /* step to opcode */
4067 if (strncmp(p, "(bad)", 5) == 0)
4069 /* separate any opcode prefix */
4070 if (strncmp(p, "lock", 4) == 0 ||
4071 strncmp(p, "rep", 3) == 0 ||
4072 strncmp(p, "rex", 3) == 0 ||
4073 strncmp(p, "addr", 4) == 0) {
4074 bb_decode.prefix = p;
4075 p += strcspn(p, " \t");
4077 p += strspn(p, " \t");
4079 bb_decode.opcode = p;
4080 strsep(&p, " \t"); /* step to end of opcode */
4081 if (bb_parse_opcode())
4085 p += strspn(p, " \t"); /* step to operand(s) */
4089 p = strsep(&p, " \t"); /* strip comments after operands */
4090 /* split 'src','dst' but ignore ',' inside '(' ')' */
4094 } else if (*p == ')') {
4096 } else if (*p == ',' && paren == 0) {
4105 bb_parse_operand(src, &bb_decode.src);
4107 bb_print_operand("src", &bb_decode.src);
4108 if (dst && !bb_giveup) {
4109 bb_parse_operand(dst, &bb_decode.dst);
4111 bb_print_operand("dst", &bb_decode.dst);
4113 if (dst2 && !bb_giveup) {
4114 bb_parse_operand(dst2, &bb_decode.dst2);
4116 bb_print_operand("dst2", &bb_decode.dst2);
4124 bb_dis_pass2(PTR file, const char *fmt, ...)
4127 int l = strlen(bb_buffer);
4130 vsnprintf(bb_buffer + l, sizeof(bb_buffer) - l, fmt, ap);
4132 if ((p = strchr(bb_buffer, '\n'))) {
4135 p += strcspn(p, ":");
4137 bb_fixup_switch_to(p);
4139 bb_buffer[0] = '\0';
4145 bb_printaddr_pass2(bfd_vma addr, disassemble_info *dip)
4147 kdb_symtab_t symtab;
4148 unsigned int offset;
4149 dip->fprintf_func(dip->stream, "0x%lx", addr);
4150 kdbnearsym(addr, &symtab);
4151 if (symtab.sym_name) {
4152 dip->fprintf_func(dip->stream, " <%s", symtab.sym_name);
4153 if ((offset = addr - symtab.sym_start))
4154 dip->fprintf_func(dip->stream, "+0x%x", offset);
4155 dip->fprintf_func(dip->stream, ">");
4159 /* Set the starting register and memory state for the current bb */
4162 bb_start_block0_special(void)
4165 short offset_address;
4166 enum bb_reg_code reg, value;
4167 struct bb_name_state *r;
4168 for (i = 0, r = bb_special_cases;
4169 i < ARRAY_SIZE(bb_special_cases);
4171 if (bb_func_start == r->address && r->fname == NULL)
4176 /* Set the running registers */
4177 for (reg = BBRG_RAX; reg < r->regs_size; ++reg) {
4178 value = r->regs[reg].value;
4179 if (test_bit(value, r->skip_regs.bits)) {
4180 /* this regs entry is not defined for this label */
4183 bb_reg_code_set_value(reg, value);
4184 bb_reg_code_set_offset(reg, r->regs[reg].offset);
4186 /* Set any memory contents, e.g. pt_regs. Adjust RSP as required. */
4188 for (i = 0; i < r->mem_size; ++i) {
4189 offset_address = max_t(int,
4190 r->mem[i].offset_address + KDB_WORD_SIZE,
4193 if (bb_reg_code_offset(BBRG_RSP) > -offset_address)
4194 bb_adjust_osp(BBRG_RSP, -offset_address - bb_reg_code_offset(BBRG_RSP));
4195 for (i = 0; i < r->mem_size; ++i) {
4196 value = r->mem[i].value;
4197 if (test_bit(value, r->skip_mem.bits)) {
4198 /* this memory entry is not defined for this label */
4201 bb_memory_set_reg_value(BBRG_RSP, r->mem[i].offset_address,
4203 bb_reg_set_undef(value);
4209 bb_pass2_start_block(int number)
4211 int i, j, k, first, changed;
4213 struct bb_jmp *bb_jmp;
4214 struct bb_reg_state *state;
4215 struct bb_memory_contains *c1, *c2;
4216 bb_reg_state->mem_count = bb_reg_state_max;
4217 size = bb_reg_state_size(bb_reg_state);
4218 memset(bb_reg_state, 0, size);
4221 /* The first block is assumed to have well defined inputs */
4223 /* Some assembler labels have non-standard entry
4226 bb_start_block0_special();
4227 bb_reg_state_print(bb_reg_state);
4231 /* Merge all the input states for the current bb together */
4234 for (i = 0; i < bb_jmp_count; ++i) {
4235 bb_jmp = bb_jmp_list + i;
4236 if (bb_jmp->to != bb_curr->start)
4238 state = bb_jmp->state;
4242 size = bb_reg_state_size(state);
4243 memcpy(bb_reg_state, state, size);
4244 KDB_DEBUG_BB(" first state %p\n", state);
4245 bb_reg_state_print(bb_reg_state);
4250 KDB_DEBUG_BB(" merging state %p\n", state);
4251 /* Merge the register states */
4252 for (j = 0; j < ARRAY_SIZE(state->contains); ++j) {
4253 if (memcmp(bb_reg_state->contains + j,
4254 state->contains + j,
4255 sizeof(bb_reg_state->contains[0]))) {
4256 /* Different states for this register from two
4257 * or more inputs, make it undefined.
4259 if (bb_reg_state->contains[j].value ==
4261 KDB_DEBUG_BB(" ignoring %s\n",
4262 bbrg_name[j + BBRG_RAX]);
4264 bb_reg_set_undef(BBRG_RAX + j);
4270 /* Merge the memory states. This relies on both
4271 * bb_reg_state->memory and state->memory being sorted in
4272 * descending order, with undefined entries at the end.
4274 c1 = bb_reg_state->memory;
4277 while (j < bb_reg_state->mem_count &&
4278 k < state->mem_count) {
4279 if (c1->offset_address < c2->offset_address) {
4280 KDB_DEBUG_BB_OFFSET(c2->offset_address,
4281 " ignoring c2->offset_address ",
4287 if (c1->offset_address > c2->offset_address) {
4288 /* Memory location is not in all input states,
4289 * delete the memory location.
4291 bb_delete_memory(c1->offset_address);
4297 if (memcmp(c1, c2, sizeof(*c1))) {
4298 /* Same location, different contents, delete
4299 * the memory location.
4301 bb_delete_memory(c1->offset_address);
4302 KDB_DEBUG_BB_OFFSET(c2->offset_address,
4303 " ignoring c2->offset_address ",
4312 while (j < bb_reg_state->mem_count) {
4313 bb_delete_memory(c1->offset_address);
4320 KDB_DEBUG_BB(" final state\n");
4321 bb_reg_state_print(bb_reg_state);
4325 /* We have reached the exit point from the current function, either a call to
4326 * the next function or the instruction that was about to executed when an
4327 * interrupt occurred. Save the current register state in bb_exit_state.
4331 bb_save_exit_state(void)
4334 debug_kfree(bb_exit_state);
4335 bb_exit_state = NULL;
4336 bb_reg_state_canonicalize();
4337 size = bb_reg_state_size(bb_reg_state);
4338 bb_exit_state = debug_kmalloc(size, GFP_ATOMIC);
4339 if (!bb_exit_state) {
4340 kdb_printf("\n\n%s: out of debug_kmalloc\n", __FUNCTION__);
4344 memcpy(bb_exit_state, bb_reg_state, size);
4348 bb_pass2_do_changed_blocks(int allow_missing)
4350 int i, j, missing, changed, maxloops;
4352 struct bb_jmp *bb_jmp;
4353 KDB_DEBUG_BB("\n %s: allow_missing %d\n", __FUNCTION__, allow_missing);
4354 /* Absolute worst case is we have to iterate over all the basic blocks
4355 * in an "out of order" state, each iteration losing one register or
4356 * memory state. Any more loops than that is a bug. "out of order"
4357 * means that the layout of blocks in memory does not match the logic
4358 * flow through those blocks so (for example) block 27 comes before
4359 * block 2. To allow for out of order blocks, multiply maxloops by the
4362 maxloops = (KDB_INT_REGISTERS + bb_reg_state_max) * bb_count;
4366 for (i = 0; i < bb_count; ++i) {
4367 bb_curr = bb_list[i];
4368 if (!bb_curr->changed)
4371 for (j = 0, bb_jmp = bb_jmp_list;
4374 if (bb_jmp->to == bb_curr->start &&
4378 if (missing > allow_missing)
4380 bb_curr->changed = 0;
4382 KDB_DEBUG_BB("\n bb[%d]\n", i);
4383 bb_pass2_start_block(i);
4384 for (addr = bb_curr->start;
4385 addr <= bb_curr->end; ) {
4386 bb_curr_addr = addr;
4387 if (addr == bb_exit_addr)
4388 bb_save_exit_state();
4389 addr += kdba_id_printinsn(addr, &kdb_di);
4390 kdb_di.fprintf_func(NULL, "\n");
4394 if (!bb_exit_state) {
4395 /* ATTRIB_NORET functions are a problem with
4396 * the current gcc. Allow the trailing address
4399 if (addr == bb_exit_addr ||
4400 addr == bb_exit_addr + 1)
4401 bb_save_exit_state();
4403 if (bb_curr->drop_through)
4404 bb_transfer(bb_curr->end,
4405 bb_list[i+1]->start, 1);
4407 if (maxloops-- == 0) {
4408 kdb_printf("\n\n%s maxloops reached\n",
4415 for (i = 0; i < bb_count; ++i) {
4416 bb_curr = bb_list[i];
4417 if (bb_curr->changed)
4418 return 1; /* more to do, increase allow_missing */
4420 return 0; /* all blocks done */
4423 /* Assume that the current function is a pass through function that does not
4424 * refer to its register parameters. Exclude known asmlinkage functions and
4425 * assume the other functions actually use their registers.
4429 bb_assume_pass_through(void)
4431 static int first_time = 1;
4432 if (strncmp(bb_func_name, "sys_", 4) == 0 ||
4433 strncmp(bb_func_name, "compat_sys_", 11) == 0 ||
4434 strcmp(bb_func_name, "schedule") == 0 ||
4435 strcmp(bb_func_name, "do_softirq") == 0 ||
4436 strcmp(bb_func_name, "printk") == 0 ||
4437 strcmp(bb_func_name, "vprintk") == 0 ||
4438 strcmp(bb_func_name, "preempt_schedule") == 0 ||
4439 strcmp(bb_func_name, "start_kernel") == 0 ||
4440 strcmp(bb_func_name, "csum_partial") == 0 ||
4441 strcmp(bb_func_name, "csum_partial_copy_generic") == 0 ||
4442 strcmp(bb_func_name, "math_state_restore") == 0 ||
4443 strcmp(bb_func_name, "panic") == 0 ||
4444 strcmp(bb_func_name, "kdb_printf") == 0 ||
4445 strcmp(bb_func_name, "kdb_interrupt") == 0)
4447 if (bb_asmlinkage_arch())
4449 bb_reg_params = REGPARM;
4451 kdb_printf(" %s has memory parameters but no register "
4452 "parameters.\n Assuming it is a 'pass "
4453 "through' function that does not refer to "
4454 "its register\n parameters and setting %d "
4455 "register parameters\n",
4456 bb_func_name, REGPARM);
4460 kdb_printf(" Assuming %s is 'pass through' with %d register "
4462 bb_func_name, REGPARM);
4469 if (KDB_DEBUG(BB) | KDB_DEBUG(BB_SUMM))
4470 kdb_printf("%s: start\n", __FUNCTION__);
4472 kdb_di.fprintf_func = bb_dis_pass2;
4473 kdb_di.print_address_func = bb_printaddr_pass2;
4475 bb_reg_state = debug_kmalloc(sizeof(*bb_reg_state), GFP_ATOMIC);
4476 if (!bb_reg_state) {
4477 kdb_printf("\n\n%s: out of debug_kmalloc\n", __FUNCTION__);
4481 bb_list[0]->changed = 1;
4483 /* If a block does not have all its input states available then it is
4484 * possible for a register to initially appear to hold a known value,
4485 * but when other inputs are available then it becomes a variable
4486 * value. The initial false state of "known" can generate false values
4487 * for other registers and can even make it look like stack locations
4488 * are being changed.
4490 * To avoid these false positives, only process blocks which have all
4491 * their inputs defined. That gives a clean depth first traversal of
4492 * the tree, except for loops. If there are any loops, then start
4493 * processing blocks with one missing input, then two missing inputs
4496 * Absolute worst case is we have to iterate over all the jmp entries,
4497 * each iteration allowing one more missing input. Any more loops than
4498 * that is a bug. Watch out for the corner case of 0 jmp entries.
4500 for (allow_missing = 0; allow_missing <= bb_jmp_count; ++allow_missing) {
4501 if (!bb_pass2_do_changed_blocks(allow_missing))
4506 if (allow_missing > bb_jmp_count) {
4507 kdb_printf("\n\n%s maxloops reached\n",
4513 if (bb_memory_params && bb_reg_params)
4514 bb_reg_params = REGPARM;
4518 bb_assume_pass_through();
4519 if (KDB_DEBUG(BB) | KDB_DEBUG(BB_SUMM)) {
4520 kdb_printf("%s: end bb_reg_params %d bb_memory_params %d\n",
4521 __FUNCTION__, bb_reg_params, bb_memory_params);
4522 if (bb_exit_state) {
4523 kdb_printf("%s: bb_exit_state at " kdb_bfd_vma_fmt0 "\n",
4524 __FUNCTION__, bb_exit_addr);
4525 bb_do_reg_state_print(bb_exit_state);
4535 struct bb_reg_state *state;
4540 debug_kfree(bb_list);
4542 bb_count = bb_max = 0;
4543 for (i = 0; i < bb_jmp_count; ++i) {
4544 state = bb_jmp_list[i].state;
4545 if (state && --state->ref_count == 0)
4548 debug_kfree(bb_jmp_list);
4550 bb_jmp_count = bb_jmp_max = 0;
4551 debug_kfree(bb_reg_state);
4552 bb_reg_state = NULL;
4553 bb_reg_state_max = 0;
4554 debug_kfree(bb_exit_state);
4555 bb_exit_state = NULL;
4556 bb_reg_params = bb_memory_params = 0;
4561 bb_spurious_global_label(const char *func_name)
4564 for (i = 0; i < ARRAY_SIZE(bb_spurious); ++i) {
4565 if (strcmp(bb_spurious[i], func_name) == 0)
4571 /* Given the current actual register contents plus the exit state deduced from
4572 * a basic block analysis of the current function, rollback the actual register
4573 * contents to the values they had on entry to this function.
4577 bb_actual_rollback(const struct kdb_activation_record *ar)
4579 int i, offset_address;
4580 struct bb_memory_contains *c;
4581 enum bb_reg_code reg;
4582 unsigned long address, osp = 0;
4583 struct bb_actual new[ARRAY_SIZE(bb_actual)];
4586 if (!bb_exit_state) {
4587 kdb_printf("%s: no bb_exit_state, cannot rollback\n",
4592 memcpy(bb_reg_state, bb_exit_state, bb_reg_state_size(bb_exit_state));
4593 memset(new, 0, sizeof(new));
4595 /* The most important register for obtaining saved state is rsp so get
4596 * its new value first. Prefer rsp if it is valid, then other
4597 * registers. Saved values of rsp in memory are unusable without a
4598 * register that points to memory.
4600 if (!bb_actual_valid(BBRG_RSP)) {
4601 kdb_printf("%s: no starting value for RSP, cannot rollback\n",
4606 if (KDB_DEBUG(BB) | KDB_DEBUG(BB_SUMM))
4607 kdb_printf("%s: rsp " kdb_bfd_vma_fmt0,
4608 __FUNCTION__, bb_actual_value(BBRG_RSP));
4610 if (!bb_is_osp_defined(i)) {
4611 for (i = BBRG_RAX; i < BBRG_RAX + KDB_INT_REGISTERS; ++i) {
4612 if (bb_is_osp_defined(i) && bb_actual_valid(i))
4616 if (bb_is_osp_defined(i) && bb_actual_valid(i)) {
4617 osp = new[BBRG_RSP - BBRG_RAX].value =
4618 bb_actual_value(i) - bb_reg_code_offset(i);
4619 new[BBRG_RSP - BBRG_RAX].valid = 1;
4620 if (KDB_DEBUG(BB) | KDB_DEBUG(BB_SUMM))
4621 kdb_printf(" -> osp " kdb_bfd_vma_fmt0 "\n", osp);
4623 bb_actual_set_valid(BBRG_RSP, 0);
4624 if (KDB_DEBUG(BB) | KDB_DEBUG(BB_SUMM))
4625 kdb_printf(" -> undefined\n");
4626 kdb_printf("%s: no ending value for RSP, cannot rollback\n",
4632 /* Now the other registers. First look at register values that have
4633 * been copied to other registers.
4635 for (i = BBRG_RAX; i < BBRG_RAX + KDB_INT_REGISTERS; ++i) {
4636 reg = bb_reg_code_value(i);
4637 if (bb_is_int_reg(reg)) {
4638 new[reg - BBRG_RAX] = bb_actual[i - BBRG_RAX];
4639 if (KDB_DEBUG(BB) | KDB_DEBUG(BB_SUMM)) {
4640 kdb_printf("%s: %s is in %s ",
4644 if (bb_actual_valid(i))
4645 kdb_printf(" -> " kdb_bfd_vma_fmt0 "\n",
4646 bb_actual_value(i));
4648 kdb_printf("(invalid)\n");
4653 /* Finally register values that have been saved on stack */
4654 for (i = 0, c = bb_reg_state->memory;
4655 i < bb_reg_state->mem_count;
4657 offset_address = c->offset_address;
4659 if (!bb_is_int_reg(reg))
4661 address = osp + offset_address;
4662 if (address < ar->stack.logical_start ||
4663 address >= ar->stack.logical_end) {
4664 new[reg - BBRG_RAX].value = 0;
4665 new[reg - BBRG_RAX].valid = 0;
4666 if (KDB_DEBUG(BB) | KDB_DEBUG(BB_SUMM))
4667 kdb_printf("%s: %s -> undefined\n",
4671 if (KDB_DEBUG(BB) | KDB_DEBUG(BB_SUMM)) {
4672 kdb_printf("%s: %s -> *(osp",
4675 KDB_DEBUG_BB_OFFSET_PRINTF(offset_address, "", " ");
4676 kdb_printf(kdb_bfd_vma_fmt0, address);
4678 new[reg - BBRG_RAX].value = *(bfd_vma *)address;
4679 new[reg - BBRG_RAX].valid = 1;
4680 if (KDB_DEBUG(BB) | KDB_DEBUG(BB_SUMM))
4681 kdb_printf(") = " kdb_bfd_vma_fmt0 "\n",
4682 new[reg - BBRG_RAX].value);
4686 memcpy(bb_actual, new, sizeof(bb_actual));
4689 /* Return true if the current function is an interrupt handler */
4692 bb_interrupt_handler(kdb_machreg_t rip)
4694 unsigned long disp8, disp32, target, addr = (unsigned long)rip;
4695 unsigned char code[5];
4698 for (i = 0; i < ARRAY_SIZE(bb_hardware_handlers); ++i)
4699 if (strcmp(bb_func_name, bb_hardware_handlers[i]) == 0)
4702 /* Given the large number of interrupt handlers, it is easiest to look
4703 * at the next instruction and see if it is a jmp to the common exit
4706 if (kdb_getarea(code, addr) ||
4707 kdb_getword(&disp32, addr+1, 4) ||
4708 kdb_getword(&disp8, addr+1, 1))
4709 return 0; /* not a valid code address */
4710 if (code[0] == 0xe9) {
4711 target = addr + (s32) disp32 + 5; /* jmp disp32 */
4712 if (target == bb_ret_from_intr ||
4713 target == bb_common_interrupt ||
4714 target == bb_error_entry)
4717 if (code[0] == 0xeb) {
4718 target = addr + (s8) disp8 + 2; /* jmp disp8 */
4719 if (target == bb_ret_from_intr ||
4720 target == bb_common_interrupt ||
4721 target == bb_error_entry)
4728 /* Copy argument information that was deduced by the basic block analysis and
4729 * rollback into the kdb stack activation record.
4733 bb_arguments(struct kdb_activation_record *ar)
4736 enum bb_reg_code reg;
4738 ar->args = bb_reg_params + bb_memory_params;
4739 bitmap_zero(ar->valid.bits, KDBA_MAXARGS);
4740 for (i = 0; i < bb_reg_params; ++i) {
4741 reg = bb_param_reg[i];
4742 if (bb_actual_valid(reg)) {
4743 ar->arg[i] = bb_actual_value(reg);
4744 set_bit(i, ar->valid.bits);
4747 if (!bb_actual_valid(BBRG_RSP))
4749 rsp = bb_actual_value(BBRG_RSP);
4750 for (i = bb_reg_params; i < ar->args; ++i) {
4751 rsp += KDB_WORD_SIZE;
4752 if (kdb_getarea(ar->arg[i], rsp) == 0)
4753 set_bit(i, ar->valid.bits);
4757 /* Given an exit address from a function, decompose the entire function into
4758 * basic blocks and determine the register state at the exit point.
4762 kdb_bb(unsigned long exit)
4764 kdb_symtab_t symtab;
4765 if (!kdbnearsym(exit, &symtab)) {
4766 kdb_printf("%s: address " kdb_bfd_vma_fmt0 " not recognised\n",
4767 __FUNCTION__, exit);
4771 bb_exit_addr = exit;
4772 bb_mod_name = symtab.mod_name;
4773 bb_func_name = symtab.sym_name;
4774 bb_func_start = symtab.sym_start;
4775 bb_func_end = symtab.sym_end;
4776 /* Various global labels exist in the middle of assembler code and have
4777 * a non-standard state. Ignore these labels and use the start of the
4778 * previous label instead.
4780 while (bb_spurious_global_label(symtab.sym_name)) {
4781 if (!kdbnearsym(symtab.sym_start - 1, &symtab))
4783 bb_func_start = symtab.sym_start;
4785 bb_mod_name = symtab.mod_name;
4786 bb_func_name = symtab.sym_name;
4787 bb_func_start = symtab.sym_start;
4788 /* Ignore spurious labels past this point and use the next non-spurious
4789 * label as the end point.
4791 if (kdbnearsym(bb_func_end, &symtab)) {
4792 while (bb_spurious_global_label(symtab.sym_name)) {
4793 bb_func_end = symtab.sym_end;
4794 if (!kdbnearsym(symtab.sym_end + 1, &symtab))
4802 kdb_printf("%s: " kdb_bfd_vma_fmt0
4803 " [%s]%s failed at " kdb_bfd_vma_fmt0 "\n\n",
4805 bb_mod_name, bb_func_name, bb_curr_addr);
4809 kdb_bb1(int argc, const char **argv)
4811 int diag, nextarg = 1;
4813 unsigned long offset;
4815 bb_cleanup(); /* in case previous command was interrupted */
4816 kdba_id_init(&kdb_di);
4818 return KDB_ARGCOUNT;
4819 diag = kdbgetaddrarg(argc, argv, &nextarg, &addr, &offset, NULL);
4825 kdb_flags |= KDB_DEBUG_FLAG_BB << KDB_DEBUG_FLAG_SHIFT;
4828 kdb_restore_flags();
4829 kdbnearsym_cleanup();
4833 /* Run a basic block analysis on every function in the base kernel. Used as a
4834 * global sanity check to find errors in the basic block code.
4838 kdb_bb_all(int argc, const char **argv)
4841 const char *symname;
4843 int i, max_errors = 20;
4844 struct bb_name_state *r;
4845 kdb_printf("%s: build variables:"
4846 " CCVERSION \"" __stringify(CCVERSION) "\""
4847 #ifdef CONFIG_X86_64
4850 #ifdef CONFIG_4KSTACKS
4853 #ifdef CONFIG_PREEMPT
4859 #ifdef CONFIG_FRAME_POINTER
4860 " CONFIG_FRAME_POINTER"
4862 #ifdef CONFIG_TRACE_IRQFLAGS
4863 " CONFIG_TRACE_IRQFLAGS"
4865 #ifdef CONFIG_HIBERNATION
4866 " CONFIG_HIBERNATION"
4868 #ifdef CONFIG_KPROBES
4874 #ifdef CONFIG_MATH_EMULATION
4875 " CONFIG_MATH_EMULATION"
4877 #ifdef CONFIG_PARAVIRT_XEN
4880 #ifdef CONFIG_DEBUG_INFO
4881 " CONFIG_DEBUG_INFO"
4886 " REGPARM=" __stringify(REGPARM)
4887 "\n\n", __FUNCTION__);
4888 for (i = 0, r = bb_special_cases;
4889 i < ARRAY_SIZE(bb_special_cases);
4892 kdb_printf("%s: cannot find special_case name %s\n",
4893 __FUNCTION__, r->name);
4895 for (i = 0; i < ARRAY_SIZE(bb_spurious); ++i) {
4896 if (!kallsyms_lookup_name(bb_spurious[i]))
4897 kdb_printf("%s: cannot find spurious label %s\n",
4898 __FUNCTION__, bb_spurious[i]);
4900 while ((symname = kdb_walk_kallsyms(&pos))) {
4901 if (strcmp(symname, "_stext") == 0 ||
4902 strcmp(symname, "stext") == 0)
4906 kdb_printf("%s: cannot find _stext\n", __FUNCTION__);
4909 kdba_id_init(&kdb_di);
4911 while ((symname = kdb_walk_kallsyms(&pos))) {
4912 if (strcmp(symname, "_etext") == 0)
4916 /* x86_64 has some 16 bit functions that appear between stext
4917 * and _etext. Skip them.
4919 if (strcmp(symname, "verify_cpu") == 0 ||
4920 strcmp(symname, "verify_cpu_noamd") == 0 ||
4921 strcmp(symname, "verify_cpu_sse_test") == 0 ||
4922 strcmp(symname, "verify_cpu_no_longmode") == 0 ||
4923 strcmp(symname, "verify_cpu_sse_ok") == 0 ||
4924 strcmp(symname, "mode_seta") == 0 ||
4925 strcmp(symname, "bad_address") == 0 ||
4926 strcmp(symname, "wakeup_code") == 0 ||
4927 strcmp(symname, "wakeup_code_start") == 0 ||
4928 strcmp(symname, "wakeup_start") == 0 ||
4929 strcmp(symname, "wakeup_32_vector") == 0 ||
4930 strcmp(symname, "wakeup_32") == 0 ||
4931 strcmp(symname, "wakeup_long64_vector") == 0 ||
4932 strcmp(symname, "wakeup_long64") == 0 ||
4933 strcmp(symname, "gdta") == 0 ||
4934 strcmp(symname, "idt_48a") == 0 ||
4935 strcmp(symname, "gdt_48a") == 0 ||
4936 strcmp(symname, "bogus_real_magic") == 0 ||
4937 strcmp(symname, "bogus_64_magic") == 0 ||
4938 strcmp(symname, "no_longmode") == 0 ||
4939 strcmp(symname, "mode_set") == 0 ||
4940 strcmp(symname, "mode_seta") == 0 ||
4941 strcmp(symname, "setbada") == 0 ||
4942 strcmp(symname, "check_vesa") == 0 ||
4943 strcmp(symname, "check_vesaa") == 0 ||
4944 strcmp(symname, "_setbada") == 0 ||
4945 strcmp(symname, "wakeup_stack_begin") == 0 ||
4946 strcmp(symname, "wakeup_stack") == 0 ||
4947 strcmp(symname, "wakeup_level4_pgt") == 0 ||
4948 strcmp(symname, "acpi_copy_wakeup_routine") == 0 ||
4949 strcmp(symname, "wakeup_end") == 0 ||
4950 strcmp(symname, "do_suspend_lowlevel_s4bios") == 0 ||
4951 strcmp(symname, "do_suspend_lowlevel") == 0 ||
4952 strcmp(symname, "wakeup_pmode_return") == 0 ||
4953 strcmp(symname, "restore_registers") == 0)
4955 /* __kprobes_text_end contains branches to the middle of code,
4956 * with undefined states.
4958 if (strcmp(symname, "__kprobes_text_end") == 0)
4960 /* Data in the middle of the text segment :( */
4961 if (strcmp(symname, "level2_kernel_pgt") == 0 ||
4962 strcmp(symname, "level3_kernel_pgt") == 0)
4964 if (bb_spurious_global_label(symname))
4966 if ((addr = kallsyms_lookup_name(symname)) == 0)
4968 // kdb_printf("BB " kdb_bfd_vma_fmt0 " %s\n", addr, symname);
4969 bb_cleanup(); /* in case previous command was interrupted */
4970 kdbnearsym_cleanup();
4972 touch_nmi_watchdog();
4974 if (max_errors-- == 0) {
4975 kdb_printf("%s: max_errors reached, giving up\n",
4985 kdbnearsym_cleanup();
4990 *=============================================================================
4992 * Everything above this line is doing basic block analysis, function by
4993 * function. Everything below this line uses the basic block data to do a
4994 * complete backtrace over all functions that are used by a process.
4996 *=============================================================================
5000 /*============================================================================*/
5002 /* Most of the backtrace code and data is common to x86_64 and i386. This */
5003 /* large ifdef contains all of the differences between the two architectures. */
5005 /* Make sure you update the correct section of this ifdef. */
5007 /*============================================================================*/
5014 #ifdef CONFIG_X86_64
5016 #define ARCH_NORMAL_PADDING (16 * 8)
5018 /* x86_64 has multiple alternate stacks, with different sizes and different
5019 * offsets to get the link from one stack to the next. All of the stacks are
5020 * in the per_cpu area: either in the orig_ist or irq_stack_ptr. Debug events
5021 * can even have multiple nested stacks within the single physical stack,
5022 * each nested stack has its own link and some of those links are wrong.
5024 * Consistent it's not!
5026 * Do not assume that these stacks are aligned on their size.
5028 #define INTERRUPT_STACK (N_EXCEPTION_STACKS + 1)
5030 kdba_get_stack_info_alternate(kdb_machreg_t addr, int cpu,
5031 struct kdb_activation_record *ar)
5035 unsigned int total_size;
5036 unsigned int nested_size;
5038 } *sdp, stack_data[] = {
5039 [STACKFAULT_STACK - 1] = { "stackfault", EXCEPTION_STKSZ, EXCEPTION_STKSZ, EXCEPTION_STKSZ - 2*sizeof(void *) },
5040 [DOUBLEFAULT_STACK - 1] = { "doublefault", EXCEPTION_STKSZ, EXCEPTION_STKSZ, EXCEPTION_STKSZ - 2*sizeof(void *) },
5041 [NMI_STACK - 1] = { "nmi", EXCEPTION_STKSZ, EXCEPTION_STKSZ, EXCEPTION_STKSZ - 2*sizeof(void *) },
5042 [DEBUG_STACK - 1] = { "debug", DEBUG_STKSZ, EXCEPTION_STKSZ, EXCEPTION_STKSZ - 2*sizeof(void *) },
5043 [MCE_STACK - 1] = { "machine check", EXCEPTION_STKSZ, EXCEPTION_STKSZ, EXCEPTION_STKSZ - 2*sizeof(void *) },
5044 [INTERRUPT_STACK - 1] = { "interrupt", IRQ_STACK_SIZE, IRQ_STACK_SIZE, IRQ_STACK_SIZE - sizeof(void *) },
5046 unsigned long total_start = 0, total_size, total_end;
5048 extern unsigned long kdba_orig_ist(int, int);
5050 for (sd = 0, sdp = stack_data;
5051 sd < ARRAY_SIZE(stack_data);
5053 total_size = sdp->total_size;
5055 continue; /* in case stack_data[] has any holes */
5057 /* Arbitrary address which can be on any cpu, see if it
5058 * falls within any of the alternate stacks
5061 for_each_online_cpu(c) {
5062 if (sd == INTERRUPT_STACK - 1)
5063 total_end = (unsigned long)per_cpu(irq_stack_ptr, c);
5065 total_end = per_cpu(orig_ist, c).ist[sd];
5066 total_start = total_end - total_size;
5067 if (addr >= total_start && addr < total_end) {
5076 /* Only check the supplied or found cpu */
5077 if (sd == INTERRUPT_STACK - 1)
5078 total_end = (unsigned long)per_cpu(irq_stack_ptr, cpu);
5080 total_end = per_cpu(orig_ist, cpu).ist[sd];
5081 total_start = total_end - total_size;
5082 if (addr >= total_start && addr < total_end) {
5089 /* find which nested stack the address is in */
5090 while (addr > total_start + sdp->nested_size)
5091 total_start += sdp->nested_size;
5092 ar->stack.physical_start = total_start;
5093 ar->stack.physical_end = total_start + sdp->nested_size;
5094 ar->stack.logical_start = total_start;
5095 ar->stack.logical_end = total_start + sdp->next;
5096 ar->stack.next = *(unsigned long *)ar->stack.logical_end;
5097 ar->stack.id = sdp->id;
5099 /* Nasty: when switching to the interrupt stack, the stack state of the
5100 * caller is split over two stacks, the original stack and the
5101 * interrupt stack. One word (the previous frame pointer) is stored on
5102 * the interrupt stack, the rest of the interrupt data is in the old
5103 * frame. To make the interrupted stack state look as though it is
5104 * contiguous, copy the missing word from the interrupt stack to the
5105 * original stack and adjust the new stack pointer accordingly.
5108 if (sd == INTERRUPT_STACK - 1) {
5109 *(unsigned long *)(ar->stack.next - KDB_WORD_SIZE) =
5111 ar->stack.next -= KDB_WORD_SIZE;
5115 /* rip is not in the thread struct for x86_64. We know that the stack value
5116 * was saved in schedule near the label thread_return. Setting rip to
5117 * thread_return lets the stack trace find that we are in schedule and
5118 * correctly decode its prologue.
5121 static kdb_machreg_t
5122 kdba_bt_stack_rip(const struct task_struct *p)
5124 return bb_thread_return;
5127 #else /* !CONFIG_X86_64 */
5129 #define ARCH_NORMAL_PADDING (19 * 4)
5131 #ifdef CONFIG_4KSTACKS
5132 static struct thread_info **kdba_hardirq_ctx, **kdba_softirq_ctx;
5133 #endif /* CONFIG_4KSTACKS */
5135 /* On a 4K stack kernel, hardirq_ctx and softirq_ctx are [NR_CPUS] arrays. The
5136 * first element of each per-cpu stack is a struct thread_info.
5139 kdba_get_stack_info_alternate(kdb_machreg_t addr, int cpu,
5140 struct kdb_activation_record *ar)
5142 #ifdef CONFIG_4KSTACKS
5143 struct thread_info *tinfo;
5144 tinfo = (struct thread_info *)(addr & -THREAD_SIZE);
5146 /* Arbitrary address, see if it falls within any of the irq
5150 for_each_online_cpu(cpu) {
5151 if (tinfo == kdba_hardirq_ctx[cpu] ||
5152 tinfo == kdba_softirq_ctx[cpu]) {
5160 if (tinfo == kdba_hardirq_ctx[cpu] ||
5161 tinfo == kdba_softirq_ctx[cpu]) {
5162 ar->stack.physical_start = (kdb_machreg_t)tinfo;
5163 ar->stack.physical_end = ar->stack.physical_start + THREAD_SIZE;
5164 ar->stack.logical_start = ar->stack.physical_start +
5165 sizeof(struct thread_info);
5166 ar->stack.logical_end = ar->stack.physical_end;
5167 ar->stack.next = tinfo->previous_esp;
5168 if (tinfo == kdba_hardirq_ctx[cpu])
5169 ar->stack.id = "hardirq_ctx";
5171 ar->stack.id = "softirq_ctx";
5173 #endif /* CONFIG_4KSTACKS */
5176 /* rip is in the thread struct for i386 */
5178 static kdb_machreg_t
5179 kdba_bt_stack_rip(const struct task_struct *p)
5181 return p->thread.ip;
5184 #endif /* CONFIG_X86_64 */
5186 /* Given an address which claims to be on a stack, an optional cpu number and
5187 * an optional task address, get information about the stack.
5189 * t == NULL, cpu < 0 indicates an arbitrary stack address with no associated
5190 * struct task, the address can be in an alternate stack or any task's normal
5193 * t != NULL, cpu >= 0 indicates a running task, the address can be in an
5194 * alternate stack or that task's normal stack.
5196 * t != NULL, cpu < 0 indicates a blocked task, the address can only be in that
5197 * task's normal stack.
5199 * t == NULL, cpu >= 0 is not a valid combination.
5203 kdba_get_stack_info(kdb_machreg_t rsp, int cpu,
5204 struct kdb_activation_record *ar,
5205 const struct task_struct *t)
5207 struct thread_info *tinfo;
5208 struct task_struct *g, *p;
5209 memset(&ar->stack, 0, sizeof(ar->stack));
5211 kdb_printf("%s: " RSP "=0x%lx cpu=%d task=%p\n",
5212 __FUNCTION__, rsp, cpu, t);
5213 if (t == NULL || cpu >= 0) {
5214 kdba_get_stack_info_alternate(rsp, cpu, ar);
5215 if (ar->stack.logical_start)
5218 rsp &= -THREAD_SIZE;
5219 tinfo = (struct thread_info *)rsp;
5221 /* Arbitrary stack address without an associated task, see if
5222 * it falls within any normal process stack, including the idle
5225 kdb_do_each_thread(g, p) {
5226 if (tinfo == task_thread_info(p)) {
5230 } kdb_while_each_thread(g, p);
5231 for_each_online_cpu(cpu) {
5233 if (tinfo == task_thread_info(p)) {
5240 kdb_printf("%s: found task %p\n", __FUNCTION__, t);
5241 } else if (cpu >= 0) {
5243 struct kdb_running_process *krp = kdb_running_process + cpu;
5244 if (krp->p != t || tinfo != task_thread_info(t))
5247 kdb_printf("%s: running task %p\n", __FUNCTION__, t);
5250 if (tinfo != task_thread_info(t))
5253 kdb_printf("%s: blocked task %p\n", __FUNCTION__, t);
5256 ar->stack.physical_start = rsp;
5257 ar->stack.physical_end = rsp + THREAD_SIZE;
5258 ar->stack.logical_start = rsp + sizeof(struct thread_info);
5259 ar->stack.logical_end = ar->stack.physical_end - ARCH_NORMAL_PADDING;
5261 ar->stack.id = "normal";
5264 if (ar->stack.physical_start && KDB_DEBUG(ARA)) {
5265 kdb_printf("%s: ar->stack\n", __FUNCTION__);
5266 kdb_printf(" physical_start=0x%lx\n", ar->stack.physical_start);
5267 kdb_printf(" physical_end=0x%lx\n", ar->stack.physical_end);
5268 kdb_printf(" logical_start=0x%lx\n", ar->stack.logical_start);
5269 kdb_printf(" logical_end=0x%lx\n", ar->stack.logical_end);
5270 kdb_printf(" next=0x%lx\n", ar->stack.next);
5271 kdb_printf(" id=%s\n", ar->stack.id);
5272 kdb_printf(" set MDCOUNT %ld\n",
5273 (ar->stack.physical_end - ar->stack.physical_start) /
5275 kdb_printf(" mds " kdb_machreg_fmt0 "\n",
5276 ar->stack.physical_start);
5281 bt_print_one(kdb_machreg_t rip, kdb_machreg_t rsp,
5282 const struct kdb_activation_record *ar,
5283 const kdb_symtab_t *symtab, int argcount)
5288 kdbgetintenv("BTSYMARG", &btsymarg);
5289 kdbgetintenv("NOSECT", &nosect);
5291 kdb_printf(kdb_machreg_fmt0, rsp);
5292 kdb_symbol_print(rip, symtab,
5293 KDB_SP_SPACEB|KDB_SP_VALUE);
5294 if (argcount && ar->args) {
5295 int i, argc = ar->args;
5297 if (argc > argcount)
5299 for (i = 0; i < argc; i++) {
5302 if (test_bit(i, ar->valid.bits))
5303 kdb_printf("0x%lx", ar->arg[i]);
5305 kdb_printf("invalid");
5310 if (symtab->sym_name) {
5314 if (symtab->sec_name && symtab->sec_start)
5315 kdb_printf(" 0x%lx 0x%lx",
5316 symtab->sec_start, symtab->sec_end);
5317 kdb_printf(" 0x%lx 0x%lx\n",
5318 symtab->sym_start, symtab->sym_end);
5321 if (argcount && ar->args && btsymarg) {
5322 int i, argc = ar->args;
5323 kdb_symtab_t arg_symtab;
5324 for (i = 0; i < argc; i++) {
5325 kdb_machreg_t arg = ar->arg[i];
5326 if (test_bit(i, ar->valid.bits) &&
5327 kdbnearsym(arg, &arg_symtab)) {
5328 kdb_printf(" ARG %2d ", i);
5329 kdb_symbol_print(arg, &arg_symtab,
5330 KDB_SP_DEFAULT|KDB_SP_NEWLINE);
5337 kdba_bt_new_stack(struct kdb_activation_record *ar, kdb_machreg_t *rsp,
5338 int *count, int *suppress)
5340 /* Nasty: save_args builds a partial pt_regs, with r15 through
5341 * rbx not being filled in. It passes struct pt_regs* to do_IRQ (in
5342 * rdi) but the stack pointer is not adjusted to account for r15
5343 * through rbx. This has two effects :-
5345 * (1) struct pt_regs on an external interrupt actually overlaps with
5346 * the local stack area used by do_IRQ. Not only are r15-rbx
5347 * undefined, the area that claims to hold their values can even
5348 * change as the irq is processed.
5350 * (2) The back stack pointer saved for the new frame is not pointing
5351 * at pt_regs, it is pointing at rbx within the pt_regs passed to
5354 * There is nothing that I can do about (1) but I have to fix (2)
5355 * because kdb backtrace looks for the "start" address of pt_regs as it
5356 * walks back through the stacks. When switching from the interrupt
5357 * stack to another stack, we have to assume that pt_regs has been
5358 * seen and turn off backtrace supression.
5360 int probable_pt_regs = strcmp(ar->stack.id, "interrupt") == 0;
5361 *rsp = ar->stack.next;
5363 kdb_printf("new " RSP "=" kdb_machreg_fmt0 "\n", *rsp);
5364 bb_actual_set_value(BBRG_RSP, *rsp);
5365 kdba_get_stack_info(*rsp, -1, ar, NULL);
5366 if (!ar->stack.physical_start) {
5367 kdb_printf("+++ Cannot resolve next stack\n");
5368 } else if (!*suppress) {
5369 kdb_printf(" ======================= <%s>\n",
5373 if (probable_pt_regs)
5381 * addr Address provided to 'bt' command, if any.
5383 * p Pointer to task for 'btp' command.
5387 * zero for success, a kdb diagnostic if error
5391 * Ultimately all the bt* commands come through this routine. If
5392 * old_style is 0 then it uses the basic block analysis to get an accurate
5393 * backtrace with arguments, otherwise it falls back to the old method of
5394 * printing anything on stack that looks like a kernel address.
5396 * Allowing for the stack data pushed by the hardware is tricky. We
5397 * deduce the presence of hardware pushed data by looking for interrupt
5398 * handlers, either by name or by the code that they contain. This
5399 * information must be applied to the next function up the stack, because
5400 * the hardware data is above the saved rip for the interrupted (next)
5403 * To make things worse, the amount of data pushed is arch specific and
5404 * may depend on the rsp for the next function, not the current function.
5405 * The number of bytes pushed by hardware cannot be calculated until we
5406 * are actually processing the stack for the interrupted function and have
5409 * It is also possible for an interrupt to occur in user space and for the
5410 * interrupt handler to also be interrupted. Check the code selector
5411 * whenever the previous function is an interrupt handler and stop
5412 * backtracing if the interrupt was not in kernel space.
5416 kdba_bt_stack(kdb_machreg_t addr, int argcount, const struct task_struct *p,
5419 struct kdb_activation_record ar;
5420 kdb_machreg_t rip = 0, rsp = 0, prev_rsp, cs;
5421 kdb_symtab_t symtab;
5422 int rip_at_rsp = 0, count = 0, btsp = 0, suppress,
5423 interrupt_handler = 0, prev_interrupt_handler = 0, hardware_pushed,
5425 struct pt_regs *regs = NULL;
5427 kdbgetintenv("BTSP", &btsp);
5429 memset(&ar, 0, sizeof(ar));
5431 kdb_printf("Using old style backtrace, unreliable with no arguments\n");
5434 * The caller may have supplied an address at which the stack traceback
5435 * operation should begin. This address is assumed by this code to
5436 * point to a return address on the stack to be traced back.
5438 * Warning: type in the wrong address and you will get garbage in the
5443 kdb_getword(&rip, rsp, sizeof(rip));
5446 kdba_get_stack_info(rsp, -1, &ar, NULL);
5449 struct kdb_running_process *krp =
5450 kdb_running_process + task_cpu(p);
5455 krp->seqno >= kdb_seqno - 1 &&
5456 !KDB_NULL_REGS(regs)) {
5457 /* valid saved state, continue processing */
5460 ("Process did not save state, cannot backtrace\n");
5464 kdba_getregcontents(XCS, regs, &cs);
5465 if ((cs & 0xffff) != __KERNEL_CS) {
5466 kdb_printf("Stack is not in kernel space, backtrace not available\n");
5469 rip = krp->arch.ARCH_RIP;
5470 rsp = krp->arch.ARCH_RSP;
5471 kdba_get_stack_info(rsp, kdb_process_cpu(p), &ar, p);
5473 /* Not on cpu, assume blocked. Blocked tasks do not
5474 * have pt_regs. p->thread contains some data, alas
5475 * what it contains differs between i386 and x86_64.
5477 rip = kdba_bt_stack_rip(p);
5480 kdba_get_stack_info(rsp, -1, &ar, p);
5483 if (!ar.stack.physical_start) {
5484 kdb_printf(RSP "=0x%lx is not in a valid kernel stack, backtrace not available\n",
5488 memset(&bb_actual, 0, sizeof(bb_actual));
5489 bb_actual_set_value(BBRG_RSP, rsp);
5490 bb_actual_set_valid(BBRG_RSP, 1);
5492 kdb_printf(RSP "%*s" RIP "%*sFunction (args)\n",
5493 2*KDB_WORD_SIZE, " ",
5494 2*KDB_WORD_SIZE, " ");
5495 if (ar.stack.next && !suppress)
5496 kdb_printf(" ======================= <%s>\n",
5500 /* Run through all the stacks */
5501 while (ar.stack.physical_start) {
5503 rip = *(kdb_machreg_t *)rsp;
5504 /* I wish that gcc was fixed to include a nop
5505 * instruction after ATTRIB_NORET functions. The lack
5506 * of a nop means that the return address points to the
5507 * start of next function, so fudge it to point to one
5510 * No, we cannot just decrement all rip values.
5511 * Sometimes an rip legally points to the start of a
5512 * function, e.g. interrupted code or hand crafted
5516 kdbnearsym(rip, &symtab);
5517 if (rip == symtab.sym_start) {
5520 kdb_printf("\tprev_noret, " RIP
5525 kdbnearsym(rip, &symtab);
5527 if (__kernel_text_address(rip) && !suppress) {
5528 bt_print_one(rip, rsp, &ar, &symtab, 0);
5531 if (rsp == (unsigned long)regs) {
5532 if (ar.stack.next && suppress)
5533 kdb_printf(" ======================= <%s>\n",
5540 if (rsp >= ar.stack.logical_end) {
5543 kdba_bt_new_stack(&ar, &rsp, &count, &suppress);
5548 /* Start each analysis with no dynamic data from the
5549 * previous kdb_bb() run.
5555 prev_interrupt_handler = interrupt_handler;
5556 interrupt_handler = bb_interrupt_handler(rip);
5559 if (prev_interrupt_handler) {
5560 cs = *((kdb_machreg_t *)rsp + 1) & 0xffff;
5562 bb_hardware_pushed_arch(rsp, &ar);
5565 hardware_pushed = 0;
5567 rsp += sizeof(rip) + hardware_pushed;
5569 kdb_printf("%s: " RSP " "
5571 " -> " kdb_machreg_fmt0
5572 " hardware_pushed %d"
5573 " prev_interrupt_handler %d"
5579 prev_interrupt_handler,
5581 if (rsp >= ar.stack.logical_end &&
5583 kdba_bt_new_stack(&ar, &rsp, &count,
5588 bb_actual_set_value(BBRG_RSP, rsp);
5593 bb_actual_rollback(&ar);
5596 if (bb_actual_value(BBRG_RSP) < rsp) {
5597 kdb_printf("%s: " RSP " is going backwards, "
5598 kdb_machreg_fmt0 " -> "
5599 kdb_machreg_fmt0 "\n",
5602 bb_actual_value(BBRG_RSP));
5608 bt_print_one(rip, prev_rsp, &ar, &symtab, argcount);
5611 /* Functions that terminate the backtrace */
5612 if (strcmp(bb_func_name, "cpu_idle") == 0 ||
5613 strcmp(bb_func_name, "child_rip") == 0)
5615 if (rsp >= ar.stack.logical_end &&
5618 if (rsp <= (unsigned long)regs &&
5619 bb_actual_value(BBRG_RSP) > (unsigned long)regs) {
5620 if (ar.stack.next && suppress)
5621 kdb_printf(" ======================= <%s>\n",
5626 if (cs != __KERNEL_CS) {
5627 kdb_printf("Reached user space\n");
5630 rsp = bb_actual_value(BBRG_RSP);
5632 prev_noret = bb_noret(bb_func_name);
5639 kdbnearsym_cleanup();
5642 kdb_printf("bt truncated, count limit reached\n");
5644 } else if (suppress) {
5646 ("bt did not find pt_regs - no trace produced. Suggest 'set BTSP 1'\n");
5656 * Do a backtrace starting at a specified stack address. Use this if the
5657 * heuristics get the stack decode wrong.
5660 * addr Address provided to 'bt' command.
5665 * zero for success, a kdb diagnostic if error
5669 * mds %rsp comes in handy when examining the stack to do a manual
5673 int kdba_bt_address(kdb_machreg_t addr, int argcount)
5676 kdba_id_init(&kdb_di); /* kdb_bb needs this done once */
5677 ret = kdba_bt_stack(addr, argcount, NULL, 0);
5679 ret = kdba_bt_stack(addr, argcount, NULL, 1);
5686 * Do a backtrace for a specified process.
5689 * p Struct task pointer extracted by 'bt' command.
5694 * zero for success, a kdb diagnostic if error
5699 int kdba_bt_process(const struct task_struct *p, int argcount)
5702 kdba_id_init(&kdb_di); /* kdb_bb needs this done once */
5703 ret = kdba_bt_stack(0, argcount, p, 0);
5705 ret = kdba_bt_stack(0, argcount, p, 1);
5709 static int __init kdba_bt_x86_init(void)
5712 struct bb_name_state *r;
5714 kdb_register_repeat("bb1", kdb_bb1, "<vaddr>", "Analyse one basic block", 0, KDB_REPEAT_NONE);
5715 kdb_register_repeat("bb_all", kdb_bb_all, "", "Backtrace check on all built in functions", 0, KDB_REPEAT_NONE);
5717 /* Split the opcode usage table by the first letter of each set of
5718 * opcodes, for faster mapping of opcode to its operand usage.
5720 for (i = 0; i < ARRAY_SIZE(bb_opcode_usage_all); ++i) {
5721 c = bb_opcode_usage_all[i].opcode[0] - 'a';
5724 bb_opcode_usage[c].opcode = bb_opcode_usage_all + i;
5726 ++bb_opcode_usage[c].size;
5729 bb_common_interrupt = kallsyms_lookup_name("common_interrupt");
5730 bb_error_entry = kallsyms_lookup_name("error_entry");
5731 bb_ret_from_intr = kallsyms_lookup_name("ret_from_intr");
5732 bb_thread_return = kallsyms_lookup_name("thread_return");
5733 bb_sync_regs = kallsyms_lookup_name("sync_regs");
5734 bb_save_v86_state = kallsyms_lookup_name("save_v86_state");
5735 bb__sched_text_start = kallsyms_lookup_name("__sched_text_start");
5736 bb__sched_text_end = kallsyms_lookup_name("__sched_text_end");
5737 bb_save_args = kallsyms_lookup_name("save_args");
5738 bb_save_rest = kallsyms_lookup_name("save_rest");
5739 bb_save_paranoid = kallsyms_lookup_name("save_paranoid");
5740 for (i = 0, r = bb_special_cases;
5741 i < ARRAY_SIZE(bb_special_cases);
5743 r->address = kallsyms_lookup_name(r->name);
5746 #ifdef CONFIG_4KSTACKS
5747 kdba_hardirq_ctx = (struct thread_info **)kallsyms_lookup_name("hardirq_ctx");
5748 kdba_softirq_ctx = (struct thread_info **)kallsyms_lookup_name("softirq_ctx");
5749 #endif /* CONFIG_4KSTACKS */
5754 static void __exit kdba_bt_x86_exit(void)
5756 kdb_unregister("bb1");
5757 kdb_unregister("bb_all");
5760 module_init(kdba_bt_x86_init)
5761 module_exit(kdba_bt_x86_exit)