2 * This file is subject to the terms and conditions of the GNU General Public
3 * License. See the file "COPYING" in the main directory of this archive
6 * Copyright (c) 2006, 2007-2009 Silicon Graphics, Inc. All Rights Reserved.
8 * Common code for doing accurate backtraces on i386 and x86_64, including
9 * printing the values of arguments.
12 #include <linux/init.h>
13 #include <linux/kallsyms.h>
14 #include <linux/kdb.h>
15 #include <linux/kdbprivate.h>
16 #include <linux/ctype.h>
17 #include <linux/string.h>
18 #include <linux/stringify.h>
19 #include <linux/kernel.h>
20 #include <linux/sched.h>
21 #include <linux/nmi.h>
22 #include <asm/asm-offsets.h>
23 #include <asm/system.h>
25 #define KDB_DEBUG_BB(fmt, ...) \
26 {if (KDB_DEBUG(BB)) kdb_printf(fmt, ## __VA_ARGS__);}
27 #define KDB_DEBUG_BB_OFFSET_PRINTF(offset, prefix, suffix) \
28 kdb_printf(prefix "%c0x%x" suffix, \
29 offset >= 0 ? '+' : '-', \
30 offset >= 0 ? offset : -offset)
31 #define KDB_DEBUG_BB_OFFSET(offset, prefix, suffix) \
32 {if (KDB_DEBUG(BB)) KDB_DEBUG_BB_OFFSET_PRINTF(offset, prefix, suffix);}
34 #define BB_CHECK(expr, val, ret) \
36 if (unlikely(expr)) { \
37 kdb_printf("%s, line %d: BB_CHECK(" #expr ") failed " \
39 __FUNCTION__, __LINE__, (long)val); \
47 /* Use BBRG_Rxx for both i386 and x86_64. RAX through R15 must be at the end,
48 * starting with RAX. Some of these codes do not reflect actual registers,
49 * such codes are special cases when parsing the record of register changes.
50 * When updating BBRG_ entries, update bbrg_name as well.
55 BBRG_UNDEFINED = 0, /* Register contents are undefined */
56 BBRG_OSP, /* original stack pointer on entry to function */
75 const static char *bbrg_name[] = {
76 [BBRG_UNDEFINED] = "undefined",
96 /* Map a register name to its register code. This includes the sub-register
97 * addressable fields, e.g. parts of rax can be addressed as ax, al, ah, eax.
98 * The list is sorted so it can be binary chopped, sort command is:
99 * LANG=C sort -t '"' -k2
102 struct bb_reg_code_map {
103 enum bb_reg_code reg;
107 const static struct bb_reg_code_map
108 bb_reg_code_map[] = {
134 { BBRG_R10, "r10d" },
135 { BBRG_R10, "r10l" },
136 { BBRG_R10, "r10w" },
138 { BBRG_R11, "r11d" },
139 { BBRG_R11, "r11l" },
140 { BBRG_R11, "r11w" },
142 { BBRG_R12, "r12d" },
143 { BBRG_R12, "r12l" },
144 { BBRG_R12, "r12w" },
146 { BBRG_R13, "r13d" },
147 { BBRG_R13, "r13l" },
148 { BBRG_R13, "r13w" },
150 { BBRG_R14, "r14d" },
151 { BBRG_R14, "r14l" },
152 { BBRG_R14, "r14w" },
154 { BBRG_R15, "r15d" },
155 { BBRG_R15, "r15l" },
156 { BBRG_R15, "r15w" },
179 /* Record register contents in terms of the values that were passed to this
180 * function, IOW track which registers contain an input value. A register's
181 * contents can be undefined, it can contain an input register value or it can
182 * contain an offset from the original stack pointer.
184 * This structure is used to represent the current contents of the integer
185 * registers, it is held in an array that is indexed by BBRG_xxx. The element
186 * for BBRG_xxx indicates what input value is currently in BBRG_xxx. When
187 * 'value' is BBRG_OSP then register BBRG_xxx contains a stack pointer,
188 * pointing at 'offset' from the original stack pointer on entry to the
189 * function. When 'value' is not BBRG_OSP then element BBRG_xxx contains the
190 * original contents of an input register and offset is ignored.
192 * An input register 'value' can be stored in more than one register and/or in
193 * more than one memory location.
196 struct bb_reg_contains
198 enum bb_reg_code value: 8;
202 /* Note: the offsets in struct bb_mem_contains in this code are _NOT_ offsets
203 * from OSP, they are offsets from current RSP. It fits better with the way
204 * that struct pt_regs is built, some code pushes extra data before pt_regs so
205 * working with OSP relative offsets gets messy. struct bb_mem_contains
206 * entries must be in descending order of RSP offset.
209 typedef struct { DECLARE_BITMAP(bits, BBRG_R15+1); } bbrgmask_t;
210 #define BB_SKIP(reg) (1 << (BBRG_ ## reg))
211 struct bb_mem_contains {
212 short offset_address;
213 enum bb_reg_code value: 8;
216 /* Transfer of control to a label outside the current function. If the
217 * transfer is to a known common restore path that expects known registers
218 * and/or a known memory state (e.g. struct pt_regs) then do a sanity check on
219 * the state at this point.
222 struct bb_name_state {
223 const char *name; /* target function */
224 bfd_vma address; /* Address of target function */
225 const char *fname; /* optional from function name */
226 const struct bb_mem_contains *mem; /* expected memory state */
227 const struct bb_reg_contains *regs; /* expected register state */
228 const unsigned short mem_size; /* ARRAY_SIZE(mem) */
229 const unsigned short regs_size; /* ARRAY_SIZE(regs) */
230 const short osp_offset; /* RSP in regs == OSP+osp_offset */
231 const bbrgmask_t skip_mem; /* Some slots in mem may be undefined */
232 const bbrgmask_t skip_regs; /* Some slots in regs may be undefined */
235 /* NS (NAME_STATE) macros define the register and memory state when we transfer
236 * control to or start decoding a special case name. Use NS when the target
237 * label always has the same state. Use NS_FROM and specify the source label
238 * if the target state is slightly different depending on where it is branched
239 * from. This gives better state checking, by isolating the special cases.
241 * Note: for the same target label, NS_FROM entries must be followed by a
245 #define NS_FROM(iname, ifname, imem, iregs, iskip_mem, iskip_regs, iosp_offset) \
251 .mem_size = ARRAY_SIZE(imem), \
252 .regs_size = ARRAY_SIZE(iregs), \
253 .skip_mem.bits[0] = iskip_mem, \
254 .skip_regs.bits[0] = iskip_regs, \
255 .osp_offset = iosp_offset, \
259 /* Shorter forms for the common cases */
260 #define NS(iname, imem, iregs, iskip_mem, iskip_regs, iosp_offset) \
261 NS_FROM(iname, NULL, imem, iregs, iskip_mem, iskip_regs, iosp_offset)
262 #define NS_MEM(iname, imem, iskip_mem) \
263 NS_FROM(iname, NULL, imem, no_regs, iskip_mem, 0, 0)
264 #define NS_MEM_FROM(iname, ifname, imem, iskip_mem) \
265 NS_FROM(iname, ifname, imem, no_regs, iskip_mem, 0, 0)
266 #define NS_REG(iname, iregs, iskip_regs) \
267 NS_FROM(iname, NULL, no_memory, iregs, 0, iskip_regs, 0)
268 #define NS_REG_FROM(iname, ifname, iregs, iskip_regs) \
269 NS_FROM(iname, ifname, no_memory, iregs, 0, iskip_regs, 0)
272 bb_reg_code_set_value(enum bb_reg_code dst, enum bb_reg_code src);
274 static const char *bb_mod_name, *bb_func_name;
277 bb_noret(const char *name)
279 if (strcmp(name, "panic") == 0 ||
280 strcmp(name, "do_exit") == 0 ||
281 strcmp(name, "do_group_exit") == 0 ||
282 strcmp(name, "complete_and_exit") == 0)
287 /*============================================================================*/
289 /* Most of the basic block code and data is common to x86_64 and i386. This */
290 /* large ifdef contains almost all of the differences between the two */
293 /* Make sure you update the correct section of this ifdef. */
295 /*============================================================================*/
299 /* Registers that can be used to pass parameters, in the order that parameters
303 const static enum bb_reg_code
313 const static enum bb_reg_code
314 bb_preserved_reg[] = {
324 static const struct bb_mem_contains full_pt_regs[] = {
341 static const struct bb_mem_contains full_pt_regs_plus_1[] = {
359 * Going into error_exit we have the hardware pushed error_code on the stack
360 * plus a full pt_regs
362 static const struct bb_mem_contains error_code_full_pt_regs[] = {
363 { 0x78, BBRG_UNDEFINED },
380 static const struct bb_mem_contains partial_pt_regs[] = {
391 static const struct bb_mem_contains partial_pt_regs_plus_1[] = {
402 static const struct bb_mem_contains partial_pt_regs_plus_2[] = {
413 static const struct bb_mem_contains no_memory[] = {
415 /* Hardware has already pushed an error_code on the stack. Use undefined just
416 * to set the initial stack offset.
418 static const struct bb_mem_contains error_code[] = {
419 { 0x0, BBRG_UNDEFINED },
421 /* error_code plus original rax */
422 static const struct bb_mem_contains error_code_rax[] = {
423 { 0x8, BBRG_UNDEFINED },
427 static const struct bb_reg_contains all_regs[] = {
428 [BBRG_RAX] = { BBRG_RAX, 0 },
429 [BBRG_RBX] = { BBRG_RBX, 0 },
430 [BBRG_RCX] = { BBRG_RCX, 0 },
431 [BBRG_RDX] = { BBRG_RDX, 0 },
432 [BBRG_RDI] = { BBRG_RDI, 0 },
433 [BBRG_RSI] = { BBRG_RSI, 0 },
434 [BBRG_RBP] = { BBRG_RBP, 0 },
435 [BBRG_RSP] = { BBRG_OSP, 0 },
436 [BBRG_R8 ] = { BBRG_R8, 0 },
437 [BBRG_R9 ] = { BBRG_R9, 0 },
438 [BBRG_R10] = { BBRG_R10, 0 },
439 [BBRG_R11] = { BBRG_R11, 0 },
440 [BBRG_R12] = { BBRG_R12, 0 },
441 [BBRG_R13] = { BBRG_R13, 0 },
442 [BBRG_R14] = { BBRG_R14, 0 },
443 [BBRG_R15] = { BBRG_R15, 0 },
445 static const struct bb_reg_contains no_regs[] = {
448 static struct bb_name_state bb_special_cases[] = {
450 /* First the cases that pass data only in memory. We do not check any
451 * register state for these cases.
454 /* Simple cases, no exceptions */
455 NS_MEM("ia32_ptregs_common", partial_pt_regs_plus_1, 0),
456 NS_MEM("ia32_sysret", partial_pt_regs, 0),
457 NS_MEM("int_careful", partial_pt_regs, 0),
458 NS_MEM("ia32_badarg", partial_pt_regs, 0),
459 NS_MEM("int_restore_rest", full_pt_regs, 0),
460 NS_MEM("int_signal", full_pt_regs, 0),
461 NS_MEM("int_very_careful", partial_pt_regs, 0),
462 NS_MEM("ptregscall_common", full_pt_regs_plus_1, 0),
463 NS_MEM("ret_from_intr", partial_pt_regs_plus_2, 0),
464 NS_MEM("stub32_clone", partial_pt_regs_plus_1, 0),
465 NS_MEM("stub32_execve", partial_pt_regs_plus_1, 0),
466 NS_MEM("stub32_fork", partial_pt_regs_plus_1, 0),
467 NS_MEM("stub32_iopl", partial_pt_regs_plus_1, 0),
468 NS_MEM("stub32_rt_sigreturn", partial_pt_regs_plus_1, 0),
469 NS_MEM("stub32_sigaltstack", partial_pt_regs_plus_1, 0),
470 NS_MEM("stub32_sigreturn", partial_pt_regs_plus_1, 0),
471 NS_MEM("stub32_vfork", partial_pt_regs_plus_1, 0),
472 NS_MEM("stub_clone", partial_pt_regs_plus_1, 0),
473 NS_MEM("stub_execve", partial_pt_regs_plus_1, 0),
474 NS_MEM("stub_fork", partial_pt_regs_plus_1, 0),
475 NS_MEM("stub_iopl", partial_pt_regs_plus_1, 0),
476 NS_MEM("stub_rt_sigreturn", partial_pt_regs_plus_1, 0),
477 NS_MEM("stub_sigaltstack", partial_pt_regs_plus_1, 0),
478 NS_MEM("stub_vfork", partial_pt_regs_plus_1, 0),
479 NS_MEM("sysenter_auditsys", partial_pt_regs,
480 BB_SKIP(R8) | BB_SKIP(R9) | BB_SKIP(R10) | BB_SKIP(R11)),
482 NS_MEM("paranoid_exit", error_code_full_pt_regs, 0),
484 NS_MEM_FROM("ia32_badsys", "ia32_sysenter_target",
486 /* ia32_sysenter_target uses CLEAR_RREGS to clear R8-R11 on
487 * some paths. It also stomps on RAX.
489 BB_SKIP(R8) | BB_SKIP(R9) | BB_SKIP(R10) | BB_SKIP(R11) |
491 NS_MEM_FROM("ia32_badsys", "ia32_cstar_target",
493 /* ia32_cstar_target uses CLEAR_RREGS to clear R8-R11 on some
494 * paths. It also stomps on RAX. Even more confusing, instead
495 * of storing RCX it stores RBP. WTF?
497 BB_SKIP(R8) | BB_SKIP(R9) | BB_SKIP(R10) | BB_SKIP(R11) |
498 BB_SKIP(RAX) | BB_SKIP(RCX)),
499 NS_MEM_FROM("ia32_badsys", "ia32_syscall",
501 BB_SKIP(R8) | BB_SKIP(R9) | BB_SKIP(R10) | BB_SKIP(R11)),
502 NS_MEM("ia32_badsys", partial_pt_regs, 0),
504 #ifdef CONFIG_AUDITSYSCALL
505 NS_MEM_FROM("int_with_check", "sysexit_audit", partial_pt_regs,
506 BB_SKIP(R8) | BB_SKIP(R9) | BB_SKIP(R10) | BB_SKIP(R11) |
508 NS_MEM_FROM("int_with_check", "ia32_cstar_target", partial_pt_regs,
509 BB_SKIP(R8) | BB_SKIP(R9) | BB_SKIP(R10) | BB_SKIP(R11) |
510 BB_SKIP(RAX) | BB_SKIP(RCX)),
512 NS_MEM("int_with_check", no_memory, 0),
514 /* Various bits of code branch to int_ret_from_sys_call, with slightly
515 * different missing values in pt_regs.
517 NS_MEM_FROM("int_ret_from_sys_call", "ret_from_fork",
520 NS_MEM_FROM("int_ret_from_sys_call", "stub_execve",
522 BB_SKIP(RAX) | BB_SKIP(RCX)),
523 NS_MEM_FROM("int_ret_from_sys_call", "stub_rt_sigreturn",
525 BB_SKIP(RAX) | BB_SKIP(RCX)),
526 NS_MEM_FROM("int_ret_from_sys_call", "kernel_execve",
529 NS_MEM_FROM("int_ret_from_sys_call", "ia32_syscall",
531 /* ia32_syscall only saves RDI through RCX. */
532 BB_SKIP(R8) | BB_SKIP(R9) | BB_SKIP(R10) | BB_SKIP(R11) |
534 NS_MEM_FROM("int_ret_from_sys_call", "ia32_sysenter_target",
536 /* ia32_sysenter_target uses CLEAR_RREGS to clear R8-R11 on
537 * some paths. It also stomps on RAX.
539 BB_SKIP(R8) | BB_SKIP(R9) | BB_SKIP(R10) | BB_SKIP(R11) |
541 NS_MEM_FROM("int_ret_from_sys_call", "ia32_cstar_target",
543 /* ia32_cstar_target uses CLEAR_RREGS to clear R8-R11 on some
544 * paths. It also stomps on RAX. Even more confusing, instead
545 * of storing RCX it stores RBP. WTF?
547 BB_SKIP(R8) | BB_SKIP(R9) | BB_SKIP(R10) | BB_SKIP(R11) |
548 BB_SKIP(RAX) | BB_SKIP(RCX)),
549 NS_MEM_FROM("int_ret_from_sys_call", "ia32_badsys",
550 partial_pt_regs, BB_SKIP(RAX)),
551 NS_MEM("int_ret_from_sys_call", partial_pt_regs, 0),
553 #ifdef CONFIG_PREEMPT
554 NS_MEM("retint_kernel", partial_pt_regs, BB_SKIP(RAX)),
555 #endif /* CONFIG_PREEMPT */
557 NS_MEM("retint_careful", partial_pt_regs, BB_SKIP(RAX)),
559 /* Horrible hack: For a brand new x86_64 task, switch_to() branches to
560 * ret_from_fork with a totally different stack state from all the
561 * other tasks that come out of switch_to(). This non-standard state
562 * cannot be represented so just ignore the branch from switch_to() to
563 * ret_from_fork. Due to inlining and linker labels, switch_to() can
564 * appear as several different function labels, including schedule,
565 * context_switch and __sched_text_start.
567 NS_MEM_FROM("ret_from_fork", "schedule", no_memory, 0),
568 NS_MEM_FROM("ret_from_fork", "__schedule", no_memory, 0),
569 NS_MEM_FROM("ret_from_fork", "__sched_text_start", no_memory, 0),
570 NS_MEM_FROM("ret_from_fork", "context_switch", no_memory, 0),
571 NS_MEM("ret_from_fork", full_pt_regs, 0),
573 NS_MEM_FROM("ret_from_sys_call", "ret_from_fork",
576 NS_MEM("ret_from_sys_call", partial_pt_regs, 0),
578 NS_MEM("retint_restore_args",
580 BB_SKIP(RAX) | BB_SKIP(RCX)),
582 NS_MEM("retint_swapgs",
584 BB_SKIP(RAX) | BB_SKIP(RCX)),
586 /* Now the cases that pass data in registers. We do not check any
587 * memory state for these cases.
590 NS_REG("bad_put_user",
591 all_regs, BB_SKIP(RBX)),
593 NS_REG("bad_get_user",
594 all_regs, BB_SKIP(RAX) | BB_SKIP(RDX)),
596 NS_REG("bad_to_user",
598 BB_SKIP(RAX) | BB_SKIP(RCX)),
600 NS_REG("ia32_ptregs_common",
604 NS_REG("copy_user_generic_unrolled",
606 BB_SKIP(RAX) | BB_SKIP(RCX)),
608 NS_REG("copy_user_generic_string",
610 BB_SKIP(RAX) | BB_SKIP(RCX)),
616 /* Finally the cases that pass data in both registers and memory.
619 NS("invalid_TSS", error_code, all_regs, 0, 0, 0),
620 NS("segment_not_present", error_code, all_regs, 0, 0, 0),
621 NS("alignment_check", error_code, all_regs, 0, 0, 0),
622 NS("page_fault", error_code, all_regs, 0, 0, 0),
623 NS("general_protection", error_code, all_regs, 0, 0, 0),
624 NS("error_entry", error_code_rax, all_regs, 0, BB_SKIP(RAX), -0x10),
625 NS("error_exit", error_code_full_pt_regs, no_regs, 0, 0, 0x30),
626 NS("common_interrupt", error_code, all_regs, 0, 0, -0x8),
627 NS("save_args", error_code, all_regs, 0, 0, -0x50),
628 NS("int3", no_memory, all_regs, 0, 0, -0x80),
631 static const char *bb_spurious[] = {
635 "system_call_after_swapgs",
636 "system_call_fastpath",
642 #ifdef CONFIG_AUDITSYSCALL
647 "int_ret_from_sys_call",
653 /* common_interrupt */
656 "retint_with_reschedule",
659 "retint_restore_args",
665 #ifdef CONFIG_PREEMPT
667 #endif /* CONFIG_PREEMPT */
671 "paranoid_userspace",
678 #ifdef CONFIG_TRACE_IRQFLAGS
687 /* ia32_sysenter_target */
690 "sysexit_from_sys_call",
691 #ifdef CONFIG_AUDITSYSCALL
696 /* ia32_cstar_target */
699 "sysretl_from_sys_call",
700 #ifdef CONFIG_AUDITSYSCALL
709 #ifdef CONFIG_HIBERNATION
713 #endif /* CONFIG_HIBERNATION */
714 #ifdef CONFIG_KPROBES
717 /* kretprobe_trampoline_holder */
718 "kretprobe_trampoline",
719 #endif /* CONFIG_KPROBES */
721 /* relocate_kernel */
722 "relocate_new_kernel",
723 #endif /* CONFIG_KEXEC */
725 /* arch/i386/xen/xen-asm.S */
726 "xen_irq_enable_direct_end",
727 "xen_irq_disable_direct_end",
728 "xen_save_fl_direct_end",
729 "xen_restore_fl_direct_end",
730 "xen_iret_start_crit",
734 #endif /* CONFIG_XEN */
737 static const char *bb_hardware_handlers[] = {
751 bb_hardware_pushed_arch(kdb_machreg_t rsp,
752 const struct kdb_activation_record *ar)
754 /* x86_64 interrupt stacks are 16 byte aligned and you must get the
755 * next rsp from stack, it cannot be statically calculated. Do not
756 * include the word at rsp, it is pushed by hardware but is treated as
757 * a normal software return value.
759 * When an IST switch occurs (e.g. NMI) then the saved rsp points to
760 * another stack entirely. Assume that the IST stack is 16 byte
761 * aligned and just return the size of the hardware data on this stack.
762 * The stack unwind code will take care of the stack switch.
764 kdb_machreg_t saved_rsp = *((kdb_machreg_t *)rsp + 3);
765 int hardware_pushed = saved_rsp - rsp - KDB_WORD_SIZE;
766 if (hardware_pushed < 4 * KDB_WORD_SIZE ||
767 saved_rsp < ar->stack.logical_start ||
768 saved_rsp >= ar->stack.logical_end)
769 return 4 * KDB_WORD_SIZE;
771 return hardware_pushed;
775 bb_start_block0(void)
777 bb_reg_code_set_value(BBRG_RAX, BBRG_RAX);
778 bb_reg_code_set_value(BBRG_RBX, BBRG_RBX);
779 bb_reg_code_set_value(BBRG_RCX, BBRG_RCX);
780 bb_reg_code_set_value(BBRG_RDX, BBRG_RDX);
781 bb_reg_code_set_value(BBRG_RDI, BBRG_RDI);
782 bb_reg_code_set_value(BBRG_RSI, BBRG_RSI);
783 bb_reg_code_set_value(BBRG_RBP, BBRG_RBP);
784 bb_reg_code_set_value(BBRG_RSP, BBRG_OSP);
785 bb_reg_code_set_value(BBRG_R8, BBRG_R8);
786 bb_reg_code_set_value(BBRG_R9, BBRG_R9);
787 bb_reg_code_set_value(BBRG_R10, BBRG_R10);
788 bb_reg_code_set_value(BBRG_R11, BBRG_R11);
789 bb_reg_code_set_value(BBRG_R12, BBRG_R12);
790 bb_reg_code_set_value(BBRG_R13, BBRG_R13);
791 bb_reg_code_set_value(BBRG_R14, BBRG_R14);
792 bb_reg_code_set_value(BBRG_R15, BBRG_R15);
795 /* x86_64 does not have a special case for __switch_to */
798 bb_fixup_switch_to(char *p)
803 bb_asmlinkage_arch(void)
805 return strncmp(bb_func_name, "__down", 6) == 0 ||
806 strncmp(bb_func_name, "__up", 4) == 0 ||
807 strncmp(bb_func_name, "stub_", 5) == 0 ||
808 strcmp(bb_func_name, "ret_from_fork") == 0 ||
809 strcmp(bb_func_name, "ptregscall_common") == 0;
812 #else /* !CONFIG_X86_64 */
814 /* Registers that can be used to pass parameters, in the order that parameters
818 const static enum bb_reg_code
825 const static enum bb_reg_code
826 bb_preserved_reg[] = {
834 static const struct bb_mem_contains full_pt_regs[] = {
843 static const struct bb_mem_contains no_memory[] = {
845 /* Hardware has already pushed an error_code on the stack. Use undefined just
846 * to set the initial stack offset.
848 static const struct bb_mem_contains error_code[] = {
849 { 0x0, BBRG_UNDEFINED },
851 /* rbx already pushed */
852 static const struct bb_mem_contains rbx_pushed[] = {
855 #ifdef CONFIG_MATH_EMULATION
856 static const struct bb_mem_contains mem_fpu_reg_round[] = {
862 #endif /* CONFIG_MATH_EMULATION */
864 static const struct bb_reg_contains all_regs[] = {
865 [BBRG_RAX] = { BBRG_RAX, 0 },
866 [BBRG_RBX] = { BBRG_RBX, 0 },
867 [BBRG_RCX] = { BBRG_RCX, 0 },
868 [BBRG_RDX] = { BBRG_RDX, 0 },
869 [BBRG_RDI] = { BBRG_RDI, 0 },
870 [BBRG_RSI] = { BBRG_RSI, 0 },
871 [BBRG_RBP] = { BBRG_RBP, 0 },
872 [BBRG_RSP] = { BBRG_OSP, 0 },
874 static const struct bb_reg_contains no_regs[] = {
876 #ifdef CONFIG_MATH_EMULATION
877 static const struct bb_reg_contains reg_fpu_reg_round[] = {
878 [BBRG_RBP] = { BBRG_OSP, -0x4 },
879 [BBRG_RSP] = { BBRG_OSP, -0x10 },
881 #endif /* CONFIG_MATH_EMULATION */
883 static struct bb_name_state bb_special_cases[] = {
885 /* First the cases that pass data only in memory. We do not check any
886 * register state for these cases.
889 /* Simple cases, no exceptions */
890 NS_MEM("check_userspace", full_pt_regs, 0),
891 NS_MEM("device_not_available_emulate", full_pt_regs, 0),
892 NS_MEM("ldt_ss", full_pt_regs, 0),
893 NS_MEM("no_singlestep", full_pt_regs, 0),
894 NS_MEM("restore_all", full_pt_regs, 0),
895 NS_MEM("restore_nocheck", full_pt_regs, 0),
896 NS_MEM("restore_nocheck_notrace", full_pt_regs, 0),
897 NS_MEM("ret_from_exception", full_pt_regs, 0),
898 NS_MEM("ret_from_fork", full_pt_regs, 0),
899 NS_MEM("ret_from_intr", full_pt_regs, 0),
900 NS_MEM("work_notifysig", full_pt_regs, 0),
901 NS_MEM("work_pending", full_pt_regs, 0),
903 #ifdef CONFIG_PREEMPT
904 NS_MEM("resume_kernel", full_pt_regs, 0),
905 #endif /* CONFIG_PREEMPT */
907 NS_MEM("common_interrupt", error_code, 0),
908 NS_MEM("error_code", error_code, 0),
910 NS_MEM("bad_put_user", rbx_pushed, 0),
912 NS_MEM_FROM("resume_userspace", "syscall_badsys",
913 full_pt_regs, BB_SKIP(RAX)),
914 NS_MEM_FROM("resume_userspace", "syscall_fault",
915 full_pt_regs, BB_SKIP(RAX)),
916 NS_MEM_FROM("resume_userspace", "syscall_trace_entry",
917 full_pt_regs, BB_SKIP(RAX)),
918 /* Too difficult to trace through the various vm86 functions for now.
919 * They are C functions that start off with some memory state, fiddle
920 * the registers then jmp directly to resume_userspace. For the
921 * moment, just assume that they are valid and do no checks.
923 NS_FROM("resume_userspace", "do_int",
924 no_memory, no_regs, 0, 0, 0),
925 NS_FROM("resume_userspace", "do_sys_vm86",
926 no_memory, no_regs, 0, 0, 0),
927 NS_FROM("resume_userspace", "handle_vm86_fault",
928 no_memory, no_regs, 0, 0, 0),
929 NS_FROM("resume_userspace", "handle_vm86_trap",
930 no_memory, no_regs, 0, 0, 0),
931 NS_MEM("resume_userspace", full_pt_regs, 0),
933 NS_MEM_FROM("syscall_badsys", "ia32_sysenter_target",
934 full_pt_regs, BB_SKIP(RBP)),
935 NS_MEM("syscall_badsys", full_pt_regs, 0),
937 NS_MEM_FROM("syscall_call", "syscall_trace_entry",
938 full_pt_regs, BB_SKIP(RAX)),
939 NS_MEM("syscall_call", full_pt_regs, 0),
941 NS_MEM_FROM("syscall_exit", "syscall_trace_entry",
942 full_pt_regs, BB_SKIP(RAX)),
943 NS_MEM("syscall_exit", full_pt_regs, 0),
945 NS_MEM_FROM("syscall_exit_work", "ia32_sysenter_target",
946 full_pt_regs, BB_SKIP(RAX) | BB_SKIP(RBP)),
947 NS_MEM_FROM("syscall_exit_work", "system_call",
948 full_pt_regs, BB_SKIP(RAX)),
949 NS_MEM("syscall_exit_work", full_pt_regs, 0),
951 NS_MEM_FROM("syscall_trace_entry", "ia32_sysenter_target",
952 full_pt_regs, BB_SKIP(RBP)),
953 NS_MEM_FROM("syscall_trace_entry", "system_call",
954 full_pt_regs, BB_SKIP(RAX)),
955 NS_MEM("syscall_trace_entry", full_pt_regs, 0),
957 /* Now the cases that pass data in registers. We do not check any
958 * memory state for these cases.
961 NS_REG("syscall_fault", all_regs, 0),
963 NS_REG("bad_get_user", all_regs,
964 BB_SKIP(RAX) | BB_SKIP(RDX)),
966 /* Finally the cases that pass data in both registers and memory.
969 /* This entry is redundant now because bb_fixup_switch_to() hides the
970 * jmp __switch_to case, however the entry is left here as
973 * NS("__switch_to", no_memory, no_regs, 0, 0, 0),
976 NS("iret_exc", no_memory, all_regs, 0, 0, 0x20),
978 #ifdef CONFIG_MATH_EMULATION
979 NS("fpu_reg_round", mem_fpu_reg_round, reg_fpu_reg_round, 0, 0, 0),
980 #endif /* CONFIG_MATH_EMULATION */
983 static const char *bb_spurious[] = {
984 /* ret_from_exception */
989 #ifdef CONFIG_PREEMPT
991 #endif /* CONFIG_PREEMPT */
992 /* ia32_sysenter_target */
1000 "restore_nocheck_notrace",
1002 /* do not include iret_exc, it is in a .fixup section */
1007 "work_notifysig_v86",
1008 #endif /* CONFIG_VM86 */
1011 /* device_not_available */
1012 "device_not_available_emulate",
1014 "debug_esp_fix_insn",
1015 "debug_stack_correct",
1017 "nmi_stack_correct",
1019 "nmi_debug_stack_check",
1021 #ifdef CONFIG_HIBERNATION
1025 #endif /* CONFIG_HIBERNATION */
1026 #ifdef CONFIG_KPROBES
1028 "jprobe_return_end",
1029 #endif /* CONFIG_KPROBES */
1031 /* relocate_kernel */
1032 "relocate_new_kernel",
1033 #endif /* CONFIG_KEXEC */
1034 #ifdef CONFIG_MATH_EMULATION
1035 /* assorted *.S files in arch/i386/math_emu */
1037 "Denorm_shift_more_than_32",
1038 "Denorm_shift_more_than_63",
1039 "Denorm_shift_more_than_64",
1040 "Do_unmasked_underflow",
1041 "Exp_not_underflow",
1044 "fpu_reg_round_signed_special_exit",
1045 "fpu_reg_round_special_exit",
1055 "L_bugged_denorm_486",
1059 "LCheck_24_round_up",
1060 "LCheck_53_round_up",
1061 "LCheck_Round_Overflow",
1062 "LCheck_truncate_24",
1063 "LCheck_truncate_53",
1064 "LCheck_truncate_64",
1065 "LDenormal_adj_exponent",
1108 "L_no_precision_loss",
1114 "L_precision_lost_down",
1115 "L_precision_lost_up",
1116 "LPrevent_2nd_overflow",
1117 "LPrevent_3rd_overflow",
1120 "LResult_Normalised",
1123 "LRound_nearest_24",
1124 "LRound_nearest_53",
1125 "LRound_nearest_64",
1130 "L_round_the_result",
1135 "LSecond_div_not_1",
1143 "L_Store_significand",
1149 "L_underflow_to_zero",
1157 "sqrt_get_more_precision",
1158 "sqrt_more_prec_large",
1159 "sqrt_more_prec_ok",
1160 "sqrt_more_prec_small",
1162 "sqrt_near_exact_large",
1163 "sqrt_near_exact_ok",
1164 "sqrt_near_exact_small",
1165 "sqrt_near_exact_x",
1166 "sqrt_prelim_no_adjust",
1167 "sqrt_round_result",
1168 "sqrt_stage_2_done",
1169 "sqrt_stage_2_error",
1170 "sqrt_stage_2_finish",
1171 "sqrt_stage_2_positive",
1172 "sqrt_stage_3_error",
1173 "sqrt_stage_3_finished",
1174 "sqrt_stage_3_no_error",
1175 "sqrt_stage_3_positive",
1176 "Unmasked_underflow",
1177 "xExp_not_underflow",
1178 #endif /* CONFIG_MATH_EMULATION */
1181 static const char *bb_hardware_handlers[] = {
1182 "ret_from_exception",
1187 "coprocessor_error",
1188 "simd_coprocessor_error",
1189 "device_not_available",
1196 "coprocessor_segment_overrun",
1198 "segment_not_present",
1200 "general_protection",
1205 "spurious_interrupt_bug",
1209 bb_hardware_pushed_arch(kdb_machreg_t rsp,
1210 const struct kdb_activation_record *ar)
1212 return (2 * KDB_WORD_SIZE);
1216 bb_start_block0(void)
1218 bb_reg_code_set_value(BBRG_RAX, BBRG_RAX);
1219 bb_reg_code_set_value(BBRG_RBX, BBRG_RBX);
1220 bb_reg_code_set_value(BBRG_RCX, BBRG_RCX);
1221 bb_reg_code_set_value(BBRG_RDX, BBRG_RDX);
1222 bb_reg_code_set_value(BBRG_RDI, BBRG_RDI);
1223 bb_reg_code_set_value(BBRG_RSI, BBRG_RSI);
1224 bb_reg_code_set_value(BBRG_RBP, BBRG_RBP);
1225 bb_reg_code_set_value(BBRG_RSP, BBRG_OSP);
1228 /* The i386 code that switches stack in a context switch is an extremely
1229 * special case. It saves the rip pointing to a label that is not otherwise
1230 * referenced, saves the current rsp then pushes a word. The magic code that
1231 * resumes the new task picks up the saved rip and rsp, effectively referencing
1232 * a label that otherwise is not used and ignoring the pushed word.
1234 * The simplest way to handle this very strange case is to recognise jmp
1235 * address <__switch_to> and treat it as a popfl instruction. This avoids
1236 * terminating the block on this jmp and removes one word from the stack state,
1237 * which is the end effect of all the magic code.
1239 * Called with the instruction line, starting after the first ':'.
1243 bb_fixup_switch_to(char *p)
1246 p += strspn(p, " \t"); /* start of instruction */
1247 if (strncmp(p, "jmp", 3))
1249 p += strcspn(p, " \t"); /* end of instruction */
1250 p += strspn(p, " \t"); /* start of address */
1251 p += strcspn(p, " \t"); /* end of address */
1252 p += strspn(p, " \t"); /* start of comment */
1253 if (strcmp(p, "<__switch_to>") == 0)
1254 strcpy(p1, "popfl");
1258 bb_asmlinkage_arch(void)
1260 return strcmp(bb_func_name, "ret_from_exception") == 0 ||
1261 strcmp(bb_func_name, "syscall_trace_entry") == 0;
1264 #endif /* CONFIG_X86_64 */
1267 /*============================================================================*/
1269 /* Common code and data. */
1271 /*============================================================================*/
1274 /* Tracking registers by decoding the instructions is quite a bit harder than
1275 * doing the same tracking using compiler generated information. Register
1276 * contents can remain in the same register, they can be copied to other
1277 * registers, they can be stored on stack or they can be modified/overwritten.
1278 * At any one time, there are 0 or more copies of the original value that was
1279 * supplied in each register on input to the current function. If a register
1280 * exists in multiple places, one copy of that register is the master version,
1281 * the others are temporary copies which may or may not be destroyed before the
1282 * end of the function.
1284 * The compiler knows which copy of a register is the master and which are
1285 * temporary copies, which makes it relatively easy to track register contents
1286 * as they are saved and restored. Without that compiler based knowledge, this
1287 * code has to track _every_ possible copy of each register, simply because we
1288 * do not know which is the master copy and which are temporary copies which
1289 * may be destroyed later.
1291 * It gets worse: registers that contain parameters can be copied to other
1292 * registers which are then saved on stack in a lower level function. Also the
1293 * stack pointer may be held in multiple registers (typically RSP and RBP)
1294 * which contain different offsets from the base of the stack on entry to this
1295 * function. All of which means that we have to track _all_ register
1296 * movements, or at least as much as possible.
1298 * Start with the basic block that contains the start of the function, by
1299 * definition all registers contain their initial value. Track each
1300 * instruction's effect on register contents, this includes reading from a
1301 * parameter register before any write to that register, IOW the register
1302 * really does contain a parameter. The register state is represented by a
1303 * dynamically sized array with each entry containing :-
1306 * Location it is copied to (another register or stack + offset)
1308 * Besides the register tracking array, we track which parameter registers are
1309 * read before being written, to determine how many parameters are passed in
1310 * registers. We also track which registers contain stack pointers, including
1311 * their offset from the original stack pointer on entry to the function.
1313 * At each exit from the current basic block (via JMP instruction or drop
1314 * through), the register state is cloned to form the state on input to the
1315 * target basic block and the target is marked for processing using this state.
1316 * When there are multiple ways to enter a basic block (e.g. several JMP
1317 * instructions referencing the same target) then there will be multiple sets
1318 * of register state to form the "input" for that basic block, there is no
1319 * guarantee that all paths to that block will have the same register state.
1321 * As each target block is processed, all the known sets of register state are
1322 * merged to form a suitable subset of the state which agrees with all the
1323 * inputs. The most common case is where one path to this block copies a
1324 * register to another register but another path does not, therefore the copy
1325 * is only a temporary and should not be propogated into this block.
1327 * If the target block already has an input state from the current transfer
1328 * point and the new input state is identical to the previous input state then
1329 * we have reached a steady state for the arc from the current location to the
1330 * target block. Therefore there is no need to process the target block again.
1332 * The steps of "process a block, create state for target block(s), pick a new
1333 * target block, merge state for target block, process target block" will
1334 * continue until all the state changes have propogated all the way down the
1335 * basic block tree, including round any cycles in the tree. The merge step
1336 * only deletes tracking entries from the input state(s), it never adds a
1337 * tracking entry. Therefore the overall algorithm is guaranteed to converge
1338 * to a steady state, the worst possible case is that every tracking entry into
1339 * a block is deleted, which will result in an empty output state.
1341 * As each instruction is decoded, it is checked to see if this is the point at
1342 * which execution left this function. This can be a call to another function
1343 * (actually the return address to this function) or is the instruction which
1344 * was about to be executed when an interrupt occurred (including an oops).
1345 * Save the register state at this point.
1347 * We always know what the registers contain when execution left this function.
1348 * For an interrupt, the registers are in struct pt_regs. For a call to
1349 * another function, we have already deduced the register state on entry to the
1350 * other function by unwinding to the start of that function. Given the
1351 * register state on exit from this function plus the known register contents
1352 * on entry to the next function, we can determine the stack pointer value on
1353 * input to this function. That in turn lets us calculate the address of input
1354 * registers that have been stored on stack, giving us the input parameters.
1355 * Finally the stack pointer gives us the return address which is the exit
1356 * point from the calling function, repeat the unwind process on that function.
1358 * The data that tracks which registers contain input parameters is function
1359 * global, not local to any basic block. To determine which input registers
1360 * contain parameters, we have to decode the entire function. Otherwise an
1361 * exit early in the function might not have read any parameters yet.
1364 /* Record memory contents in terms of the values that were passed to this
1365 * function, IOW track which memory locations contain an input value. A memory
1366 * location's contents can be undefined, it can contain an input register value
1367 * or it can contain an offset from the original stack pointer.
1369 * This structure is used to record register contents that have been stored in
1370 * memory. Location (BBRG_OSP + 'offset_address') contains the input value
1371 * from register 'value'. When 'value' is BBRG_OSP then offset_value contains
1372 * the offset from the original stack pointer that was stored in this memory
1373 * location. When 'value' is not BBRG_OSP then the memory location contains
1374 * the original contents of an input register and offset_value is ignored.
1376 * An input register 'value' can be stored in more than one register and/or in
1377 * more than one memory location.
1380 struct bb_memory_contains
1382 short offset_address;
1383 enum bb_reg_code value: 8;
1387 /* Track the register state in each basic block. */
1391 /* Indexed by register value 'reg - BBRG_RAX' */
1392 struct bb_reg_contains contains[KDB_INT_REGISTERS];
1395 /* dynamic size for memory locations, see mem_count */
1396 struct bb_memory_contains memory[0];
1399 static struct bb_reg_state *bb_reg_state, *bb_exit_state;
1400 static int bb_reg_state_max, bb_reg_params, bb_memory_params;
1408 /* Contains the actual hex value of a register, plus a valid bit. Indexed by
1409 * register value 'reg - BBRG_RAX'
1411 static struct bb_actual bb_actual[KDB_INT_REGISTERS];
1413 static bfd_vma bb_func_start, bb_func_end;
1414 static bfd_vma bb_common_interrupt, bb_error_entry, bb_ret_from_intr,
1415 bb_thread_return, bb_sync_regs, bb_save_v86_state,
1416 bb__sched_text_start, bb__sched_text_end,
1417 bb_save_args, bb_save_rest, bb_save_paranoid;
1419 /* Record jmp instructions, both conditional and unconditional. These form the
1420 * arcs between the basic blocks. This is also used to record the state when
1421 * one block drops through into the next.
1423 * A bb can have multiple associated bb_jmp entries, one for each jcc
1424 * instruction plus at most one bb_jmp for the drop through case. If a bb
1425 * drops through to the next bb then the drop through bb_jmp entry will be the
1426 * last entry in the set of bb_jmp's that are associated with the bb. This is
1427 * enforced by the fact that jcc entries are added during the disassembly phase
1428 * of pass 1, the drop through entries are added near the end of pass 1.
1430 * At address 'from' in this block, we have a jump to address 'to'. The
1431 * register state at 'from' is copied to the target block.
1438 struct bb_reg_state *state;
1439 unsigned int drop_through: 1;
1445 /* The end address of a basic block is sloppy. It can be the first
1446 * byte of the last instruction in the block or it can be the last byte
1450 unsigned int changed: 1;
1451 unsigned int drop_through: 1;
1454 static struct bb **bb_list, *bb_curr;
1455 static int bb_max, bb_count;
1457 static struct bb_jmp *bb_jmp_list;
1458 static int bb_jmp_max, bb_jmp_count;
1460 /* Add a new bb entry to the list. This does an insert sort. */
1463 bb_new(bfd_vma order)
1469 if (bb_count == bb_max) {
1470 struct bb **bb_list_new;
1472 bb_list_new = debug_kmalloc(bb_max*sizeof(*bb_list_new),
1475 kdb_printf("\n\n%s: out of debug_kmalloc\n", __FUNCTION__);
1479 memcpy(bb_list_new, bb_list, bb_count*sizeof(*bb_list));
1480 debug_kfree(bb_list);
1481 bb_list = bb_list_new;
1483 bb = debug_kmalloc(sizeof(*bb), GFP_ATOMIC);
1485 kdb_printf("\n\n%s: out of debug_kmalloc\n", __FUNCTION__);
1489 memset(bb, 0, sizeof(*bb));
1490 for (i = 0; i < bb_count; ++i) {
1492 if ((p->start && p->start > order) ||
1493 (p->end && p->end > order))
1496 for (j = bb_count-1; j >= i; --j)
1497 bb_list[j+1] = bb_list[j];
1503 /* Add a new bb_jmp entry to the list. This list is not sorted. */
1505 static struct bb_jmp *
1506 bb_jmp_new(bfd_vma from, bfd_vma to, unsigned int drop_through)
1508 struct bb_jmp *bb_jmp;
1511 if (bb_jmp_count == bb_jmp_max) {
1512 struct bb_jmp *bb_jmp_list_new;
1515 debug_kmalloc(bb_jmp_max*sizeof(*bb_jmp_list_new),
1517 if (!bb_jmp_list_new) {
1518 kdb_printf("\n\n%s: out of debug_kmalloc\n",
1523 memcpy(bb_jmp_list_new, bb_jmp_list,
1524 bb_jmp_count*sizeof(*bb_jmp_list));
1525 debug_kfree(bb_jmp_list);
1526 bb_jmp_list = bb_jmp_list_new;
1528 bb_jmp = bb_jmp_list + bb_jmp_count++;
1529 bb_jmp->from = from;
1531 bb_jmp->drop_through = drop_through;
1532 bb_jmp->state = NULL;
1539 struct bb *bb = bb_list[i];
1540 memcpy(bb_list+i, bb_list+i+1, (bb_count-i-1)*sizeof(*bb_list));
1541 bb_list[--bb_count] = NULL;
1546 bb_add(bfd_vma start, bfd_vma end)
1550 /* Ignore basic blocks whose start address is outside the current
1551 * function. These occur for call instructions and for tail recursion.
1554 (start < bb_func_start || start >= bb_func_end))
1556 for (i = 0; i < bb_count; ++i) {
1558 if ((start && bb->start == start) ||
1559 (end && bb->end == end))
1562 bb = bb_new(start ? start : end);
1570 static struct bb_jmp *
1571 bb_jmp_add(bfd_vma from, bfd_vma to, unsigned int drop_through)
1574 struct bb_jmp *bb_jmp;
1575 for (i = 0, bb_jmp = bb_jmp_list; i < bb_jmp_count; ++i, ++bb_jmp) {
1576 if (bb_jmp->from == from &&
1578 bb_jmp->drop_through == drop_through)
1581 bb_jmp = bb_jmp_new(from, to, drop_through);
1585 static unsigned long bb_curr_addr, bb_exit_addr;
1586 static char bb_buffer[256]; /* A bit too big to go on stack */
1588 /* Computed jmp uses 'jmp *addr(,%reg,[48])' where 'addr' is the start of a
1589 * table of addresses that point into the current function. Run the table and
1590 * generate bb starts for each target address plus a bb_jmp from this address
1591 * to the target address.
1593 * Only called for 'jmp' instructions, with the pointer starting at 'jmp'.
1597 bb_pass1_computed_jmp(char *p)
1599 unsigned long table, scale;
1602 p += strcspn(p, " \t"); /* end of instruction */
1603 p += strspn(p, " \t"); /* start of address */
1606 table = simple_strtoul(p, &p, 0);
1607 if (strncmp(p, "(,%", 3) != 0)
1610 p += strcspn(p, ","); /* end of reg */
1613 scale = simple_strtoul(p, &p, 0);
1614 if (scale != KDB_WORD_SIZE || strcmp(p, ")"))
1616 while (!bb_giveup) {
1617 if (kdb_getword(&addr, table, sizeof(addr)))
1619 if (addr < bb_func_start || addr >= bb_func_end)
1621 bb = bb_add(addr, 0);
1623 bb_jmp_add(bb_curr_addr, addr, 0);
1624 table += KDB_WORD_SIZE;
1628 /* Pass 1, identify the start and end of each basic block */
1631 bb_dis_pass1(PTR file, const char *fmt, ...)
1633 int l = strlen(bb_buffer);
1637 vsnprintf(bb_buffer + l, sizeof(bb_buffer) - l, fmt, ap);
1639 if ((p = strchr(bb_buffer, '\n'))) {
1641 /* ret[q], iret[q], sysexit, sysret, ud2a or jmp[q] end a
1642 * block. As does a call to a function marked noret.
1645 p += strcspn(p, ":");
1647 bb_fixup_switch_to(p);
1648 p += strspn(p, " \t"); /* start of instruction */
1649 if (strncmp(p, "ret", 3) == 0 ||
1650 strncmp(p, "iret", 4) == 0 ||
1651 strncmp(p, "sysexit", 7) == 0 ||
1652 strncmp(p, "sysret", 6) == 0 ||
1653 strncmp(p, "ud2a", 4) == 0 ||
1654 strncmp(p, "jmp", 3) == 0) {
1655 if (strncmp(p, "jmp", 3) == 0)
1656 bb_pass1_computed_jmp(p);
1657 bb_add(0, bb_curr_addr);
1659 if (strncmp(p, "call", 4) == 0) {
1660 strsep(&p, " \t"); /* end of opcode */
1662 p += strspn(p, " \t"); /* operand(s) */
1663 if (p && strchr(p, '<')) {
1664 p = strchr(p, '<') + 1;
1665 *strchr(p, '>') = '\0';
1667 bb_add(0, bb_curr_addr);
1671 bb_buffer[0] = '\0';
1677 bb_printaddr_pass1(bfd_vma addr, disassemble_info *dip)
1679 kdb_symtab_t symtab;
1680 unsigned int offset;
1682 /* disasm only calls the printaddr routine for the target of jmp, loop
1683 * or call instructions, i.e. the start of a basic block. call is
1684 * ignored by bb_add because the target address is outside the current
1687 dip->fprintf_func(dip->stream, "0x%lx", addr);
1688 kdbnearsym(addr, &symtab);
1689 if (symtab.sym_name) {
1690 dip->fprintf_func(dip->stream, " <%s", symtab.sym_name);
1691 if ((offset = addr - symtab.sym_start))
1692 dip->fprintf_func(dip->stream, "+0x%x", offset);
1693 dip->fprintf_func(dip->stream, ">");
1695 bb = bb_add(addr, 0);
1697 bb_jmp_add(bb_curr_addr, addr, 0);
1706 struct bb_jmp *bb_jmp;
1708 if (KDB_DEBUG(BB) | KDB_DEBUG(BB_SUMM))
1709 kdb_printf("%s: func_name %s func_start " kdb_bfd_vma_fmt0
1710 " func_end " kdb_bfd_vma_fmt0 "\n",
1715 kdb_di.fprintf_func = bb_dis_pass1;
1716 kdb_di.print_address_func = bb_printaddr_pass1;
1718 bb_add(bb_func_start, 0);
1719 for (bb_curr_addr = bb_func_start;
1720 bb_curr_addr < bb_func_end;
1723 if (kdb_getarea(c, bb_curr_addr)) {
1724 kdb_printf("%s: unreadable function code at ",
1726 kdb_symbol_print(bb_curr_addr, NULL, KDB_SP_DEFAULT);
1727 kdb_printf(", giving up\n");
1732 for (addr = bb_func_start; addr < bb_func_end; ) {
1733 bb_curr_addr = addr;
1734 addr += kdba_id_printinsn(addr, &kdb_di);
1735 kdb_di.fprintf_func(NULL, "\n");
1740 /* Special case: a block consisting of a single instruction which is
1741 * both the target of a jmp and is also an ending instruction, so we
1742 * add two blocks using the same address, one as a start and one as an
1743 * end, in no guaranteed order. The end must be ordered after the
1746 for (i = 0; i < bb_count-1; ++i) {
1747 struct bb *bb1 = bb_list[i], *bb2 = bb_list[i+1];
1748 if (bb1->end && bb1->end == bb2->start) {
1750 bb_list[i+1] = bb_list[i];
1755 /* Some bb have a start address, some have an end address. Collapse
1756 * them into entries that have both start and end addresses. The first
1757 * entry is guaranteed to have a start address.
1759 for (i = 0; i < bb_count-1; ++i) {
1760 struct bb *bb1 = bb_list[i], *bb2 = bb_list[i+1];
1764 bb1->end = bb2->start - 1;
1765 bb1->drop_through = 1;
1766 bb_jmp_add(bb1->end, bb2->start, 1);
1768 bb1->end = bb2->end;
1772 bb = bb_list[bb_count-1];
1774 bb->end = bb_func_end - 1;
1776 /* It would be nice to check that all bb have a valid start and end
1777 * address but there is just too much garbage code in the kernel to do
1778 * that check. Aligned functions in assembler code mean that there is
1779 * space between the end of one function and the start of the next and
1780 * that space contains previous code from the assembler's buffers. It
1781 * looks like dead code with nothing that branches to it, so no start
1782 * address. do_sys_vm86() ends with 'jmp resume_userspace' which the C
1783 * compiler does not know about so gcc appends the normal exit code,
1784 * again nothing branches to this dangling code.
1786 * The best we can do is delete bb entries with no start address.
1788 for (i = 0; i < bb_count; ++i) {
1789 struct bb *bb = bb_list[i];
1793 for (i = 0; i < bb_count; ++i) {
1794 struct bb *bb = bb_list[i];
1796 kdb_printf("%s: incomplete bb state\n", __FUNCTION__);
1806 kdb_printf("%s: end\n", __FUNCTION__);
1807 for (i = 0; i < bb_count; ++i) {
1809 kdb_printf(" bb[%d] start "
1811 " end " kdb_bfd_vma_fmt0
1813 i, bb->start, bb->end, bb->drop_through);
1816 for (i = 0; i < bb_jmp_count; ++i) {
1817 bb_jmp = bb_jmp_list + i;
1818 kdb_printf(" bb_jmp[%d] from "
1820 " to " kdb_bfd_vma_fmt0
1821 " drop_through %d\n",
1822 i, bb_jmp->from, bb_jmp->to, bb_jmp->drop_through);
1826 /* Pass 2, record register changes in each basic block */
1828 /* For each opcode that we care about, indicate how it uses its operands. Most
1829 * opcodes can be handled generically because they completely specify their
1830 * operands in the instruction, however many opcodes have side effects such as
1831 * reading or writing rax or updating rsp. Instructions that change registers
1832 * that are not listed in the operands must be handled as special cases. In
1833 * addition, instructions that copy registers while preserving their contents
1834 * (push, pop, mov) or change the contents in a well defined way (add with an
1835 * immediate, lea) must be handled as special cases in order to track the
1836 * register contents.
1838 * The tables below only list opcodes that are actually used in the Linux
1839 * kernel, so they omit most of the floating point and all of the SSE type
1840 * instructions. The operand usage entries only cater for accesses to memory
1841 * and to the integer registers, accesses to floating point registers and flags
1842 * are not relevant for kernel backtraces.
1845 enum bb_operand_usage {
1847 /* generic entries. because xchg can do any combinations of
1848 * read src, write src, read dst and write dst we need to
1849 * define all 16 possibilities. These are ordered by rs = 1,
1850 * rd = 2, ws = 4, wd = 8, bb_usage_x*() functions rely on this
1853 BBOU_RS = 1, /* read src */ /* 1 */
1854 BBOU_RD, /* read dst */ /* 2 */
1856 BBOU_WS, /* write src */ /* 4 */
1859 BBOU_RSRDWS, /* 7 */
1860 BBOU_WD, /* write dst */ /* 8 */
1863 BBOU_RSRDWD, /* 11 */
1865 BBOU_RSWSWD, /* 13 */
1866 BBOU_RDWSWD, /* 14 */
1867 BBOU_RSRDWSWD, /* 15 */
1868 /* opcode specific entries */
1914 struct bb_opcode_usage {
1916 enum bb_operand_usage usage;
1920 /* This table is sorted in alphabetical order of opcode, except that the
1921 * trailing '"' is treated as a high value. For example, 'in' sorts after
1922 * 'inc', 'bt' after 'btc'. This modified sort order ensures that shorter
1923 * opcodes come after long ones. A normal sort would put 'in' first, so 'in'
1924 * would match both 'inc' and 'in'. When adding any new entries to this table,
1925 * be careful to put shorter entries last in their group.
1927 * To automatically sort the table (in vi)
1928 * Mark the first and last opcode line with 'a and 'b
1930 * !'bsed -e 's/"}/}}/' | LANG=C sort -t '"' -k2 | sed -e 's/}}/"}/'
1932 * If a new instruction has to be added, first consider if it affects registers
1933 * other than those listed in the operands. Also consider if you want to track
1934 * the results of issuing the instruction, IOW can you extract useful
1935 * information by looking in detail at the modified registers or memory. If
1936 * either test is true then you need a special case to handle the instruction.
1938 * The generic entries at the start of enum bb_operand_usage all have one thing
1939 * in common, if a register or memory location is updated then that location
1940 * becomes undefined, i.e. we lose track of anything that was previously saved
1941 * in that location. So only use a generic BBOU_* value when the result of the
1942 * instruction cannot be calculated exactly _and_ when all the affected
1943 * registers are listed in the operands.
1947 * 'call' does not generate a known result, but as a side effect of call,
1948 * several scratch registers become undefined, so it needs a special BBOU_CALL
1951 * 'adc' generates a variable result, it depends on the carry flag, so 'adc'
1952 * gets a generic entry. 'add' can generate an exact result (add with
1953 * immediate on a register that points to the stack) or it can generate an
1954 * unknown result (add a variable, or add immediate to a register that does not
1955 * contain a stack pointer) so 'add' has its own BBOU_ADD entry.
1958 static const struct bb_opcode_usage
1959 bb_opcode_usage_all[] = {
1960 {3, BBOU_RSRDWD, "adc"},
1961 {3, BBOU_ADD, "add"},
1962 {3, BBOU_AND, "and"},
1963 {3, BBOU_RSWD, "bsf"},
1964 {3, BBOU_RSWD, "bsr"},
1965 {5, BBOU_RSWS, "bswap"},
1966 {3, BBOU_RSRDWD, "btc"},
1967 {3, BBOU_RSRDWD, "btr"},
1968 {3, BBOU_RSRDWD, "bts"},
1969 {2, BBOU_RSRD, "bt"},
1970 {4, BBOU_CALL, "call"},
1971 {4, BBOU_CBW, "cbtw"}, /* Intel cbw */
1972 {3, BBOU_NOP, "clc"},
1973 {3, BBOU_NOP, "cld"},
1974 {7, BBOU_RS, "clflush"},
1975 {4, BBOU_NOP, "clgi"},
1976 {3, BBOU_NOP, "cli"},
1977 {4, BBOU_CWD, "cltd"}, /* Intel cdq */
1978 {4, BBOU_CBW, "cltq"}, /* Intel cdqe */
1979 {4, BBOU_NOP, "clts"},
1980 {4, BBOU_CMOV, "cmov"},
1981 {9, BBOU_CMPXCHGD,"cmpxchg16"},
1982 {8, BBOU_CMPXCHGD,"cmpxchg8"},
1983 {7, BBOU_CMPXCHG, "cmpxchg"},
1984 {3, BBOU_RSRD, "cmp"},
1985 {5, BBOU_CPUID, "cpuid"},
1986 {4, BBOU_CWD, "cqto"}, /* Intel cdo */
1987 {4, BBOU_CWD, "cwtd"}, /* Intel cwd */
1988 {4, BBOU_CBW, "cwtl"}, /* Intel cwde */
1989 {4, BBOU_NOP, "data"}, /* alternative ASM_NOP<n> generates data16 on x86_64 */
1990 {3, BBOU_RSWS, "dec"},
1991 {3, BBOU_DIV, "div"},
1992 {5, BBOU_RS, "fdivl"},
1993 {5, BBOU_NOP, "finit"},
1994 {6, BBOU_RS, "fistpl"},
1995 {4, BBOU_RS, "fldl"},
1996 {4, BBOU_RS, "fmul"},
1997 {6, BBOU_NOP, "fnclex"},
1998 {6, BBOU_NOP, "fninit"},
1999 {6, BBOU_RS, "fnsave"},
2000 {7, BBOU_NOP, "fnsetpm"},
2001 {6, BBOU_RS, "frstor"},
2002 {5, BBOU_WS, "fstsw"},
2003 {5, BBOU_RS, "fsubp"},
2004 {5, BBOU_NOP, "fwait"},
2005 {7, BBOU_RS, "fxrstor"},
2006 {6, BBOU_RS, "fxsave"},
2007 {3, BBOU_NOP, "hlt"},
2008 {4, BBOU_IDIV, "idiv"},
2009 {4, BBOU_IMUL, "imul"},
2010 {3, BBOU_RSWS, "inc"},
2011 {3, BBOU_NOP, "int"},
2012 {7, BBOU_RSRD, "invlpga"},
2013 {6, BBOU_RS, "invlpg"},
2014 {2, BBOU_RSWD, "in"},
2015 {4, BBOU_IRET, "iret"},
2017 {4, BBOU_LAHF, "lahf"},
2018 {3, BBOU_RSWD, "lar"},
2019 {5, BBOU_RS, "lcall"},
2020 {5, BBOU_LEAVE, "leave"},
2021 {3, BBOU_LEA, "lea"},
2022 {6, BBOU_NOP, "lfence"},
2023 {4, BBOU_RS, "lgdt"},
2024 {4, BBOU_RS, "lidt"},
2025 {4, BBOU_RS, "ljmp"},
2026 {4, BBOU_RS, "lldt"},
2027 {4, BBOU_RS, "lmsw"},
2028 {4, BBOU_LODS, "lods"},
2029 {4, BBOU_LOOP, "loop"},
2030 {4, BBOU_NOP, "lret"},
2031 {3, BBOU_RSWD, "lsl"},
2032 {3, BBOU_LSS, "lss"},
2033 {3, BBOU_RS, "ltr"},
2034 {6, BBOU_NOP, "mfence"},
2035 {7, BBOU_MONITOR, "monitor"},
2036 {4, BBOU_MOVS, "movs"},
2037 {3, BBOU_MOV, "mov"},
2038 {3, BBOU_MUL, "mul"},
2039 {5, BBOU_MWAIT, "mwait"},
2040 {3, BBOU_RSWS, "neg"},
2041 {3, BBOU_NOP, "nop"},
2042 {3, BBOU_RSWS, "not"},
2043 {2, BBOU_RSRDWD, "or"},
2044 {4, BBOU_OUTS, "outs"},
2045 {3, BBOU_RSRD, "out"},
2046 {5, BBOU_NOP, "pause"},
2047 {4, BBOU_POPF, "popf"},
2048 {3, BBOU_POP, "pop"},
2049 {8, BBOU_RS, "prefetch"},
2050 {5, BBOU_PUSHF, "pushf"},
2051 {4, BBOU_PUSH, "push"},
2052 {3, BBOU_RSRDWD, "rcl"},
2053 {3, BBOU_RSRDWD, "rcr"},
2054 {5, BBOU_RDMSR, "rdmsr"},
2055 {5, BBOU_RDMSR, "rdpmc"}, /* same side effects as rdmsr */
2056 {5, BBOU_RDTSC, "rdtsc"},
2057 {3, BBOU_RET, "ret"},
2058 {3, BBOU_RSRDWD, "rol"},
2059 {3, BBOU_RSRDWD, "ror"},
2060 {4, BBOU_SAHF, "sahf"},
2061 {3, BBOU_RSRDWD, "sar"},
2062 {3, BBOU_RSRDWD, "sbb"},
2063 {4, BBOU_SCAS, "scas"},
2064 {3, BBOU_WS, "set"},
2065 {6, BBOU_NOP, "sfence"},
2066 {4, BBOU_WS, "sgdt"},
2067 {3, BBOU_RSRDWD, "shl"},
2068 {3, BBOU_RSRDWD, "shr"},
2069 {4, BBOU_WS, "sidt"},
2070 {4, BBOU_WS, "sldt"},
2071 {3, BBOU_NOP, "stc"},
2072 {3, BBOU_NOP, "std"},
2073 {4, BBOU_NOP, "stgi"},
2074 {3, BBOU_NOP, "sti"},
2075 {4, BBOU_SCAS, "stos"},
2076 {4, BBOU_WS, "strl"},
2077 {3, BBOU_WS, "str"},
2078 {3, BBOU_SUB, "sub"},
2079 {6, BBOU_NOP, "swapgs"},
2080 {7, BBOU_SYSEXIT, "sysexit"},
2081 {6, BBOU_SYSRET, "sysret"},
2082 {4, BBOU_NOP, "test"},
2083 {4, BBOU_NOP, "ud2a"},
2084 {7, BBOU_RS, "vmclear"},
2085 {8, BBOU_NOP, "vmlaunch"},
2086 {6, BBOU_RS, "vmload"},
2087 {7, BBOU_RS, "vmptrld"},
2088 {6, BBOU_WD, "vmread"}, /* vmread src is an encoding, not a register */
2089 {8, BBOU_NOP, "vmresume"},
2090 {5, BBOU_RS, "vmrun"},
2091 {6, BBOU_RS, "vmsave"},
2092 {7, BBOU_WD, "vmwrite"}, /* vmwrite src is an encoding, not a register */
2093 {3, BBOU_NOP, "vmxoff"},
2094 {6, BBOU_NOP, "wbinvd"},
2095 {5, BBOU_WRMSR, "wrmsr"},
2096 {4, BBOU_XADD, "xadd"},
2097 {4, BBOU_XCHG, "xchg"},
2098 {3, BBOU_XOR, "xor"},
2099 {4, BBOU_NOP, "xrstor"},
2100 {4, BBOU_NOP, "xsave"},
2101 {10, BBOU_WS, "xstore-rng"},
2104 /* To speed up searching, index bb_opcode_usage_all by the first letter of each
2108 const struct bb_opcode_usage *opcode;
2110 } bb_opcode_usage[26];
2118 enum bb_reg_code base_rc; /* UNDEFINED or RAX through R15 */
2119 enum bb_reg_code index_rc; /* UNDEFINED or RAX through R15 */
2120 unsigned int present :1;
2121 unsigned int disp_present :1;
2122 unsigned int indirect :1; /* must be combined with reg or memory */
2123 unsigned int immediate :1; /* exactly one of these 3 must be set */
2124 unsigned int reg :1;
2125 unsigned int memory :1;
2131 const struct bb_opcode_usage *match;
2132 struct bb_operand src;
2133 struct bb_operand dst;
2134 struct bb_operand dst2;
2137 static struct bb_decode bb_decode;
2139 static enum bb_reg_code
2140 bb_reg_map(const char *reg)
2143 const struct bb_reg_code_map *p;
2145 hi = ARRAY_SIZE(bb_reg_code_map) - 1;
2147 int mid = (hi + lo) / 2;
2148 p = bb_reg_code_map + mid;
2149 c = strcmp(p->name, reg+1);
2157 return BBRG_UNDEFINED;
2161 bb_parse_operand(char *str, struct bb_operand *operand)
2165 operand->present = 1;
2166 /* extract any segment prefix */
2167 if (p[0] == '%' && p[1] && p[2] == 's' && p[3] == ':') {
2168 operand->memory = 1;
2169 operand->segment = p;
2173 /* extract displacement, base, index, scale */
2175 /* jmp/call *disp(%reg), *%reg or *0xnnn */
2176 operand->indirect = 1;
2184 operand->immediate = 1;
2185 operand->disp_present = 1;
2186 operand->disp = simple_strtoul(p+1, &p, 0);
2187 } else if (isdigit(*p)) {
2188 operand->memory = 1;
2189 operand->disp_present = 1;
2190 operand->disp = simple_strtoul(p, &p, 0) * sign;
2195 } else if (*p == '(') {
2196 operand->memory = 1;
2197 operand->base = ++p;
2198 p += strcspn(p, ",)");
2199 if (p == operand->base)
2200 operand->base = NULL;
2203 operand->index = ++p;
2204 p += strcspn(p, ",)");
2205 if (p == operand->index)
2206 operand->index = NULL;
2210 operand->scale = simple_strtoul(p+1, &p, 0);
2214 kdb_printf("%s: unexpected token '%c' after disp '%s'\n",
2215 __FUNCTION__, *p, str);
2218 if ((operand->immediate + operand->reg + operand->memory != 1) ||
2219 (operand->indirect && operand->immediate)) {
2220 kdb_printf("%s: incorrect decode '%s' N %d I %d R %d M %d\n",
2222 operand->indirect, operand->immediate, operand->reg,
2227 operand->base_rc = bb_reg_map(operand->base);
2229 operand->index_rc = bb_reg_map(operand->index);
2233 bb_print_operand(const char *type, const struct bb_operand *operand)
2235 if (!operand->present)
2237 kdb_printf(" %s %c%c: ",
2239 operand->indirect ? 'N' : ' ',
2240 operand->immediate ? 'I' :
2241 operand->reg ? 'R' :
2242 operand->memory ? 'M' :
2245 if (operand->segment)
2246 kdb_printf("%s:", operand->segment);
2247 if (operand->immediate) {
2248 kdb_printf("$0x%lx", operand->disp);
2249 } else if (operand->reg) {
2250 if (operand->indirect)
2252 kdb_printf("%s", operand->base);
2253 } else if (operand->memory) {
2254 if (operand->indirect && (operand->base || operand->index))
2256 if (operand->disp_present) {
2257 kdb_printf("0x%lx", operand->disp);
2259 if (operand->base || operand->index || operand->scale) {
2262 kdb_printf("%s", operand->base);
2263 if (operand->index || operand->scale)
2266 kdb_printf("%s", operand->index);
2268 kdb_printf(",%d", operand->scale);
2272 if (operand->base_rc)
2273 kdb_printf(" base_rc %d (%s)",
2274 operand->base_rc, bbrg_name[operand->base_rc]);
2275 if (operand->index_rc)
2276 kdb_printf(" index_rc %d (%s)",
2278 bbrg_name[operand->index_rc]);
2283 bb_print_opcode(void)
2285 const struct bb_opcode_usage *o = bb_decode.match;
2287 if (bb_decode.prefix)
2288 kdb_printf("%s ", bb_decode.prefix);
2289 kdb_printf("opcode '%s' matched by '%s', usage %d\n",
2290 bb_decode.opcode, o->opcode, o->usage);
2294 bb_parse_opcode(void)
2297 const struct bb_opcode_usage *o;
2298 static int bb_parse_opcode_error_limit = 5;
2299 c = bb_decode.opcode[0] - 'a';
2300 if (c < 0 || c >= ARRAY_SIZE(bb_opcode_usage))
2302 o = bb_opcode_usage[c].opcode;
2305 for (i = 0; i < bb_opcode_usage[c].size; ++i, ++o) {
2306 if (strncmp(bb_decode.opcode, o->opcode, o->length) == 0) {
2307 bb_decode.match = o;
2314 if (!bb_parse_opcode_error_limit)
2316 --bb_parse_opcode_error_limit;
2317 kdb_printf("%s: no match at [%s]%s " kdb_bfd_vma_fmt0 " - '%s'\n",
2319 bb_mod_name, bb_func_name, bb_curr_addr,
2325 bb_is_int_reg(enum bb_reg_code reg)
2327 return reg >= BBRG_RAX && reg < (BBRG_RAX + KDB_INT_REGISTERS);
2331 bb_is_simple_memory(const struct bb_operand *operand)
2333 return operand->memory &&
2334 bb_is_int_reg(operand->base_rc) &&
2335 !operand->index_rc &&
2336 operand->scale == 0 &&
2341 bb_is_static_disp(const struct bb_operand *operand)
2343 return operand->memory &&
2344 !operand->base_rc &&
2345 !operand->index_rc &&
2346 operand->scale == 0 &&
2347 !operand->segment &&
2351 static enum bb_reg_code
2352 bb_reg_code_value(enum bb_reg_code reg)
2354 BB_CHECK(!bb_is_int_reg(reg), reg, 0);
2355 return bb_reg_state->contains[reg - BBRG_RAX].value;
2359 bb_reg_code_offset(enum bb_reg_code reg)
2361 BB_CHECK(!bb_is_int_reg(reg), reg, 0);
2362 return bb_reg_state->contains[reg - BBRG_RAX].offset;
2366 bb_reg_code_set_value(enum bb_reg_code dst, enum bb_reg_code src)
2368 BB_CHECK(!bb_is_int_reg(dst), dst, );
2369 bb_reg_state->contains[dst - BBRG_RAX].value = src;
2373 bb_reg_code_set_offset(enum bb_reg_code dst, short offset)
2375 BB_CHECK(!bb_is_int_reg(dst), dst, );
2376 bb_reg_state->contains[dst - BBRG_RAX].offset = offset;
2380 bb_is_osp_defined(enum bb_reg_code reg)
2382 if (bb_is_int_reg(reg))
2383 return bb_reg_code_value(reg) == BBRG_OSP;
2389 bb_actual_value(enum bb_reg_code reg)
2391 BB_CHECK(!bb_is_int_reg(reg), reg, 0);
2392 return bb_actual[reg - BBRG_RAX].value;
2396 bb_actual_valid(enum bb_reg_code reg)
2398 BB_CHECK(!bb_is_int_reg(reg), reg, 0);
2399 return bb_actual[reg - BBRG_RAX].valid;
2403 bb_actual_set_value(enum bb_reg_code reg, bfd_vma value)
2405 BB_CHECK(!bb_is_int_reg(reg), reg, );
2406 bb_actual[reg - BBRG_RAX].value = value;
2410 bb_actual_set_valid(enum bb_reg_code reg, int valid)
2412 BB_CHECK(!bb_is_int_reg(reg), reg, );
2413 bb_actual[reg - BBRG_RAX].valid = valid;
2416 /* The scheduler code switches RSP then does PUSH, it is not an error for RSP
2417 * to be undefined in this area of the code.
2420 bb_is_scheduler_address(void)
2422 return bb_curr_addr >= bb__sched_text_start &&
2423 bb_curr_addr < bb__sched_text_end;
2427 bb_reg_read(enum bb_reg_code reg)
2430 if (!bb_is_int_reg(reg) ||
2431 bb_reg_code_value(reg) != reg)
2434 i < min_t(unsigned int, REGPARM, ARRAY_SIZE(bb_param_reg));
2436 if (reg == bb_param_reg[i]) {
2441 bb_reg_params = max(bb_reg_params, r);
2445 bb_do_reg_state_print(const struct bb_reg_state *s)
2447 int i, offset_address, offset_value;
2448 const struct bb_memory_contains *c;
2449 enum bb_reg_code value;
2450 kdb_printf(" bb_reg_state %p\n", s);
2451 for (i = 0; i < ARRAY_SIZE(s->contains); ++i) {
2452 value = s->contains[i].value;
2453 offset_value = s->contains[i].offset;
2454 kdb_printf(" %s = %s",
2455 bbrg_name[i + BBRG_RAX], bbrg_name[value]);
2456 if (value == BBRG_OSP)
2457 KDB_DEBUG_BB_OFFSET_PRINTF(offset_value, "", "");
2460 for (i = 0, c = s->memory; i < s->mem_count; ++i, ++c) {
2461 offset_address = c->offset_address;
2463 offset_value = c->offset_value;
2464 kdb_printf(" slot %d offset_address %c0x%x %s",
2466 offset_address >= 0 ? '+' : '-',
2467 offset_address >= 0 ? offset_address : -offset_address,
2469 if (value == BBRG_OSP)
2470 KDB_DEBUG_BB_OFFSET_PRINTF(offset_value, "", "");
2476 bb_reg_state_print(const struct bb_reg_state *s)
2479 bb_do_reg_state_print(s);
2482 /* Set register 'dst' to contain the value from 'src'. This includes reading
2483 * from 'src' and writing to 'dst'. The offset value is copied iff 'src'
2484 * contains a stack pointer.
2486 * Be very careful about the context here. 'dst' and 'src' reflect integer
2487 * registers by name, _not_ by the value of their contents. "mov %rax,%rsi"
2488 * will call this function as bb_reg_set_reg(BBRG_RSI, BBRG_RAX), which
2489 * reflects what the assembler code is doing. However we need to track the
2490 * _values_ in the registers, not their names. IOW, we really care about "what
2491 * value does rax contain when it is copied into rsi?", so we can record the
2492 * fact that we now have two copies of that value, one in rax and one in rsi.
2496 bb_reg_set_reg(enum bb_reg_code dst, enum bb_reg_code src)
2498 enum bb_reg_code src_value = BBRG_UNDEFINED;
2499 short offset_value = 0;
2500 KDB_DEBUG_BB(" %s = %s", bbrg_name[dst], bbrg_name[src]);
2501 if (bb_is_int_reg(src)) {
2503 src_value = bb_reg_code_value(src);
2504 KDB_DEBUG_BB(" (%s", bbrg_name[src_value]);
2505 if (bb_is_osp_defined(src)) {
2506 offset_value = bb_reg_code_offset(src);
2507 KDB_DEBUG_BB_OFFSET(offset_value, "", "");
2511 if (bb_is_int_reg(dst)) {
2512 bb_reg_code_set_value(dst, src_value);
2513 bb_reg_code_set_offset(dst, offset_value);
2519 bb_reg_set_undef(enum bb_reg_code dst)
2521 bb_reg_set_reg(dst, BBRG_UNDEFINED);
2524 /* Delete any record of a stored register held in osp + 'offset' */
2527 bb_delete_memory(short offset)
2530 struct bb_memory_contains *c;
2531 for (i = 0, c = bb_reg_state->memory;
2532 i < bb_reg_state->mem_count;
2534 if (c->offset_address == offset &&
2535 c->value != BBRG_UNDEFINED) {
2536 KDB_DEBUG_BB(" delete %s from ",
2537 bbrg_name[c->value]);
2538 KDB_DEBUG_BB_OFFSET(offset, "osp", "");
2539 KDB_DEBUG_BB(" slot %d\n",
2540 (int)(c - bb_reg_state->memory));
2541 memset(c, BBRG_UNDEFINED, sizeof(*c));
2542 if (i == bb_reg_state->mem_count - 1)
2543 --bb_reg_state->mem_count;
2548 /* Set memory location *('dst' + 'offset_address') to contain the supplied
2549 * value and offset. 'dst' is assumed to be a register that contains a stack
2554 bb_memory_set_reg_value(enum bb_reg_code dst, short offset_address,
2555 enum bb_reg_code value, short offset_value)
2558 struct bb_memory_contains *c, *free = NULL;
2559 BB_CHECK(!bb_is_osp_defined(dst), dst, );
2560 KDB_DEBUG_BB(" *(%s", bbrg_name[dst]);
2561 KDB_DEBUG_BB_OFFSET(offset_address, "", "");
2562 offset_address += bb_reg_code_offset(dst);
2563 KDB_DEBUG_BB_OFFSET(offset_address, " osp", ") = ");
2564 KDB_DEBUG_BB("%s", bbrg_name[value]);
2565 if (value == BBRG_OSP)
2566 KDB_DEBUG_BB_OFFSET(offset_value, "", "");
2567 for (i = 0, c = bb_reg_state->memory;
2568 i < bb_reg_state_max;
2570 if (c->offset_address == offset_address)
2572 else if (c->value == BBRG_UNDEFINED && !free)
2576 struct bb_reg_state *new, *old = bb_reg_state;
2577 size_t old_size, new_size;
2579 old_size = sizeof(*old) + bb_reg_state_max *
2580 sizeof(old->memory[0]);
2581 slot = bb_reg_state_max;
2582 bb_reg_state_max += 5;
2583 new_size = sizeof(*new) + bb_reg_state_max *
2584 sizeof(new->memory[0]);
2585 new = debug_kmalloc(new_size, GFP_ATOMIC);
2587 kdb_printf("\n\n%s: out of debug_kmalloc\n", __FUNCTION__);
2590 memcpy(new, old, old_size);
2591 memset((char *)new + old_size, BBRG_UNDEFINED,
2592 new_size - old_size);
2595 free = bb_reg_state->memory + slot;
2599 int slot = free - bb_reg_state->memory;
2600 free->offset_address = offset_address;
2601 free->value = value;
2602 free->offset_value = offset_value;
2603 KDB_DEBUG_BB(" slot %d", slot);
2604 bb_reg_state->mem_count = max(bb_reg_state->mem_count, slot+1);
2609 /* Set memory location *('dst' + 'offset') to contain the value from register
2610 * 'src'. 'dst' is assumed to be a register that contains a stack pointer.
2611 * This differs from bb_memory_set_reg_value because it takes a src register
2612 * which contains a value and possibly an offset, bb_memory_set_reg_value is
2613 * passed the value and offset directly.
2617 bb_memory_set_reg(enum bb_reg_code dst, enum bb_reg_code src,
2618 short offset_address)
2621 enum bb_reg_code value;
2622 BB_CHECK(!bb_is_osp_defined(dst), dst, );
2623 if (!bb_is_int_reg(src))
2625 value = bb_reg_code_value(src);
2626 if (value == BBRG_UNDEFINED) {
2627 bb_delete_memory(offset_address + bb_reg_code_offset(dst));
2630 offset_value = bb_reg_code_offset(src);
2632 bb_memory_set_reg_value(dst, offset_address, value, offset_value);
2635 /* Set register 'dst' to contain the value from memory *('src' + offset_address).
2636 * 'src' is assumed to be a register that contains a stack pointer.
2640 bb_reg_set_memory(enum bb_reg_code dst, enum bb_reg_code src, short offset_address)
2643 struct bb_memory_contains *s;
2644 BB_CHECK(!bb_is_osp_defined(src), src, );
2645 KDB_DEBUG_BB(" %s = *(%s",
2646 bbrg_name[dst], bbrg_name[src]);
2647 KDB_DEBUG_BB_OFFSET(offset_address, "", ")");
2648 offset_address += bb_reg_code_offset(src);
2649 KDB_DEBUG_BB_OFFSET(offset_address, " (osp", ")");
2650 for (i = 0, s = bb_reg_state->memory;
2651 i < bb_reg_state->mem_count;
2653 if (s->offset_address == offset_address && bb_is_int_reg(dst)) {
2654 bb_reg_code_set_value(dst, s->value);
2655 KDB_DEBUG_BB(" value %s", bbrg_name[s->value]);
2656 if (s->value == BBRG_OSP) {
2657 bb_reg_code_set_offset(dst, s->offset_value);
2658 KDB_DEBUG_BB_OFFSET(s->offset_value, "", "");
2660 bb_reg_code_set_offset(dst, 0);
2666 bb_reg_set_reg(dst, BBRG_UNDEFINED);
2671 /* A generic read from an operand. */
2674 bb_read_operand(const struct bb_operand *operand)
2677 if (operand->base_rc)
2678 bb_reg_read(operand->base_rc);
2679 if (operand->index_rc)
2680 bb_reg_read(operand->index_rc);
2681 if (bb_is_simple_memory(operand) &&
2682 bb_is_osp_defined(operand->base_rc) &&
2683 bb_decode.match->usage != BBOU_LEA) {
2684 m = (bb_reg_code_offset(operand->base_rc) + operand->disp +
2685 KDB_WORD_SIZE - 1) / KDB_WORD_SIZE;
2686 bb_memory_params = max(bb_memory_params, m);
2690 /* A generic write to an operand, resulting in an undefined value in that
2691 * location. All well defined operands are handled separately, this function
2692 * only handles the opcodes where the result is undefined.
2696 bb_write_operand(const struct bb_operand *operand)
2698 enum bb_reg_code base_rc = operand->base_rc;
2699 if (operand->memory) {
2701 bb_reg_read(base_rc);
2702 if (operand->index_rc)
2703 bb_reg_read(operand->index_rc);
2704 } else if (operand->reg && base_rc) {
2705 bb_reg_set_undef(base_rc);
2707 if (bb_is_simple_memory(operand) && bb_is_osp_defined(base_rc)) {
2709 offset = bb_reg_code_offset(base_rc) + operand->disp;
2710 offset = ALIGN(offset - KDB_WORD_SIZE + 1, KDB_WORD_SIZE);
2711 bb_delete_memory(offset);
2715 /* Adjust a register that contains a stack pointer */
2718 bb_adjust_osp(enum bb_reg_code reg, int adjust)
2720 int offset = bb_reg_code_offset(reg), old_offset = offset;
2721 KDB_DEBUG_BB(" %s osp offset ", bbrg_name[reg]);
2722 KDB_DEBUG_BB_OFFSET(bb_reg_code_offset(reg), "", " -> ");
2724 bb_reg_code_set_offset(reg, offset);
2725 KDB_DEBUG_BB_OFFSET(bb_reg_code_offset(reg), "", "\n");
2726 /* When RSP is adjusted upwards, it invalidates any memory
2727 * stored between the old and current stack offsets.
2729 if (reg == BBRG_RSP) {
2730 while (old_offset < bb_reg_code_offset(reg)) {
2731 bb_delete_memory(old_offset);
2732 old_offset += KDB_WORD_SIZE;
2737 /* The current instruction adjusts a register that contains a stack pointer.
2738 * Direction is 1 or -1, depending on whether the instruction is add/lea or
2743 bb_adjust_osp_instruction(int direction)
2745 enum bb_reg_code dst_reg = bb_decode.dst.base_rc;
2746 if (bb_decode.src.immediate ||
2747 bb_decode.match->usage == BBOU_LEA /* lea has its own checks */) {
2748 int adjust = direction * bb_decode.src.disp;
2749 bb_adjust_osp(dst_reg, adjust);
2751 /* variable stack adjustment, osp offset is not well defined */
2752 KDB_DEBUG_BB(" %s osp offset ", bbrg_name[dst_reg]);
2753 KDB_DEBUG_BB_OFFSET(bb_reg_code_offset(dst_reg), "", " -> undefined\n");
2754 bb_reg_code_set_value(dst_reg, BBRG_UNDEFINED);
2755 bb_reg_code_set_offset(dst_reg, 0);
2759 /* Some instructions using memory have an explicit length suffix (b, w, l, q).
2760 * The equivalent instructions using a register imply the length from the
2761 * register name. Deduce the operand length.
2765 bb_operand_length(const struct bb_operand *operand, char opcode_suffix)
2768 switch (opcode_suffix) {
2782 if (l == 0 && operand->reg) {
2783 switch (strlen(operand->base)) {
2785 switch (operand->base[2]) {
2795 if (operand->base[1] == 'r')
2806 bb_reg_state_size(const struct bb_reg_state *state)
2808 return sizeof(*state) +
2809 state->mem_count * sizeof(state->memory[0]);
2812 /* Canonicalize the current bb_reg_state so it can be compared against
2813 * previously created states. Sort the memory entries in descending order of
2814 * offset_address (stack grows down). Empty slots are moved to the end of the
2819 bb_reg_state_canonicalize(void)
2821 int i, order, changed;
2822 struct bb_memory_contains *p1, *p2, temp;
2825 for (i = 0, p1 = bb_reg_state->memory;
2826 i < bb_reg_state->mem_count-1;
2829 if (p2->value == BBRG_UNDEFINED) {
2831 } else if (p1->value == BBRG_UNDEFINED) {
2833 } else if (p1->offset_address < p2->offset_address) {
2835 } else if (p1->offset_address > p2->offset_address) {
2848 for (i = 0, p1 = bb_reg_state->memory;
2849 i < bb_reg_state_max;
2851 if (p1->value != BBRG_UNDEFINED)
2852 bb_reg_state->mem_count = i + 1;
2854 bb_reg_state_print(bb_reg_state);
2858 bb_special_case(bfd_vma to)
2860 int i, j, rsp_offset, expect_offset, offset, errors = 0, max_errors = 40;
2861 enum bb_reg_code reg, expect_value, value;
2862 struct bb_name_state *r;
2864 for (i = 0, r = bb_special_cases;
2865 i < ARRAY_SIZE(bb_special_cases);
2867 if (to == r->address &&
2868 (r->fname == NULL || strcmp(bb_func_name, r->fname) == 0))
2871 /* Some inline assembler code has jumps to .fixup sections which result
2872 * in out of line transfers with undefined state, ignore them.
2874 if (strcmp(bb_func_name, "strnlen_user") == 0 ||
2875 strcmp(bb_func_name, "copy_from_user") == 0)
2880 /* Check the running registers match */
2881 for (reg = BBRG_RAX; reg < r->regs_size; ++reg) {
2882 expect_value = r->regs[reg].value;
2883 if (test_bit(expect_value, r->skip_regs.bits)) {
2884 /* this regs entry is not defined for this label */
2887 if (expect_value == BBRG_UNDEFINED)
2889 expect_offset = r->regs[reg].offset;
2890 value = bb_reg_code_value(reg);
2891 offset = bb_reg_code_offset(reg);
2892 if (expect_value == value &&
2893 (value != BBRG_OSP || r->osp_offset == offset))
2895 kdb_printf("%s: Expected %s to contain %s",
2898 bbrg_name[expect_value]);
2900 KDB_DEBUG_BB_OFFSET_PRINTF(r->osp_offset, "", "");
2901 kdb_printf(". It actually contains %s", bbrg_name[value]);
2903 KDB_DEBUG_BB_OFFSET_PRINTF(offset, "", "");
2906 if (max_errors-- == 0)
2909 /* Check that any memory data on stack matches */
2911 while (i < bb_reg_state->mem_count &&
2913 expect_value = r->mem[j].value;
2914 if (test_bit(expect_value, r->skip_mem.bits) ||
2915 expect_value == BBRG_UNDEFINED) {
2916 /* this memory slot is not defined for this label */
2920 rsp_offset = bb_reg_state->memory[i].offset_address -
2921 bb_reg_code_offset(BBRG_RSP);
2923 r->mem[j].offset_address) {
2924 /* extra slots in memory are OK */
2926 } else if (rsp_offset <
2927 r->mem[j].offset_address) {
2928 /* Required memory slot is missing */
2929 kdb_printf("%s: Invalid bb_reg_state.memory, "
2930 "missing memory entry[%d] %s\n",
2931 __FUNCTION__, j, bbrg_name[expect_value]);
2933 if (max_errors-- == 0)
2937 if (bb_reg_state->memory[i].offset_value ||
2938 bb_reg_state->memory[i].value != expect_value) {
2939 /* memory slot is present but contains wrong
2942 kdb_printf("%s: Invalid bb_reg_state.memory, "
2943 "wrong value in slot %d, "
2944 "should be %s, it is %s\n",
2946 bbrg_name[expect_value],
2947 bbrg_name[bb_reg_state->memory[i].value]);
2949 if (max_errors-- == 0)
2956 while (j < r->mem_size) {
2957 expect_value = r->mem[j].value;
2958 if (test_bit(expect_value, r->skip_mem.bits) ||
2959 expect_value == BBRG_UNDEFINED)
2964 if (j != r->mem_size) {
2965 /* Hit end of memory before testing all the pt_reg slots */
2966 kdb_printf("%s: Invalid bb_reg_state.memory, "
2967 "missing trailing entries\n",
2970 if (max_errors-- == 0)
2977 kdb_printf("%s: on transfer to %s\n", __FUNCTION__, r->name);
2982 /* Transfer of control to a label outside the current function. If the
2983 * transfer is to a known common code path then do a sanity check on the state
2988 bb_sanity_check(int type)
2990 enum bb_reg_code expect, actual;
2991 int i, offset, error = 0;
2993 for (i = 0; i < ARRAY_SIZE(bb_preserved_reg); ++i) {
2994 expect = bb_preserved_reg[i];
2995 actual = bb_reg_code_value(expect);
2996 offset = bb_reg_code_offset(expect);
2997 if (expect == actual)
2999 /* type == 1 is sysret/sysexit, ignore RSP */
3000 if (type && expect == BBRG_RSP)
3002 /* type == 1 is sysret/sysexit, ignore RBP for i386 */
3003 /* We used to have "#ifndef CONFIG_X86_64" for the type=1 RBP
3004 * test; however, x86_64 can run ia32 compatible mode and
3005 * hit this problem. Perform the following test anyway!
3007 if (type && expect == BBRG_RBP)
3009 /* RSP should contain OSP+0. Except for ptregscall_common and
3010 * ia32_ptregs_common, they get a partial pt_regs, fudge the
3011 * stack to make it a full pt_regs then reverse the effect on
3012 * exit, so the offset is -0x50 on exit.
3014 if (expect == BBRG_RSP &&
3015 bb_is_osp_defined(expect) &&
3018 (strcmp(bb_func_name, "ptregscall_common") == 0 ||
3019 strcmp(bb_func_name, "ia32_ptregs_common") == 0))))
3021 /* The put_user and save_paranoid functions are special.
3022 * %rbx gets clobbered */
3023 if (expect == BBRG_RBX &&
3024 (strncmp(bb_func_name, "__put_user_", 11) == 0 ||
3025 strcmp(bb_func_name, "save_paranoid") == 0))
3027 /* Ignore rbp and rsp for error_entry */
3028 if ((strcmp(bb_func_name, "error_entry") == 0) &&
3029 (expect == BBRG_RBX ||
3030 (expect == BBRG_RSP && bb_is_osp_defined(expect) && offset == -0x10)))
3032 kdb_printf("%s: Expected %s, got %s",
3034 bbrg_name[expect], bbrg_name[actual]);
3036 KDB_DEBUG_BB_OFFSET_PRINTF(offset, "", "");
3040 BB_CHECK(error, error, );
3043 /* Transfer of control. Follow the arc and save the current state as input to
3044 * another basic block.
3048 bb_transfer(bfd_vma from, bfd_vma to, unsigned int drop_through)
3052 struct bb* bb = NULL; /*stupid gcc */
3053 struct bb_jmp *bb_jmp;
3054 struct bb_reg_state *state;
3055 bb_reg_state_canonicalize();
3057 for (i = 0; i < bb_jmp_count; ++i) {
3058 bb_jmp = bb_jmp_list + i;
3059 if (bb_jmp->from == from &&
3061 bb_jmp->drop_through == drop_through) {
3067 /* Transfer outside the current function. Check the special
3068 * cases (mainly in entry.S) first. If it is not a known
3069 * special case then check if the target address is the start
3070 * of a function or not. If it is the start of a function then
3071 * assume tail recursion and require that the state be the same
3072 * as on entry. Otherwise assume out of line code (e.g.
3073 * spinlock contention path) and ignore it, the state can be
3076 kdb_symtab_t symtab;
3077 if (bb_special_case(to))
3079 kdbnearsym(to, &symtab);
3080 if (symtab.sym_start != to)
3086 /* Only print this message when the kernel is compiled with
3087 * -fno-optimize-sibling-calls. Otherwise it would print a
3088 * message for every tail recursion call. If you see the
3089 * message below then you probably have an assembler label that
3090 * is not listed in the special cases.
3092 kdb_printf(" not matched: from "
3094 " to " kdb_bfd_vma_fmt0
3095 " drop_through %d bb_jmp[%d]\n",
3096 from, to, drop_through, i);
3097 #endif /* NO_SIBLINGS */
3100 KDB_DEBUG_BB(" matched: from " kdb_bfd_vma_fmt0
3101 " to " kdb_bfd_vma_fmt0
3102 " drop_through %d bb_jmp[%d]\n",
3103 from, to, drop_through, i);
3105 for (i = 0; i < bb_count; ++i) {
3107 if (bb->start == to) {
3112 BB_CHECK(!found, to, );
3113 /* If the register state for this arc has already been set (we are
3114 * rescanning the block that originates the arc) and the state is the
3115 * same as the previous state for this arc then this input to the
3116 * target block is the same as last time, so there is no need to rescan
3119 state = bb_jmp->state;
3120 size = bb_reg_state_size(bb_reg_state);
3122 bb_reg_state->ref_count = state->ref_count;
3123 if (memcmp(state, bb_reg_state, size) == 0) {
3124 KDB_DEBUG_BB(" no state change\n");
3127 if (--state->ref_count == 0)
3129 bb_jmp->state = NULL;
3131 /* New input state is required. To save space, check if any other arcs
3132 * have the same state and reuse them where possible. The overall set
3133 * of inputs to the target block is now different so the target block
3134 * must be rescanned.
3137 for (i = 0; i < bb_jmp_count; ++i) {
3138 state = bb_jmp_list[i].state;
3141 bb_reg_state->ref_count = state->ref_count;
3142 if (memcmp(state, bb_reg_state, size) == 0) {
3143 KDB_DEBUG_BB(" reuse bb_jmp[%d]\n", i);
3144 bb_jmp->state = state;
3149 state = debug_kmalloc(size, GFP_ATOMIC);
3151 kdb_printf("\n\n%s: out of debug_kmalloc\n", __FUNCTION__);
3155 memcpy(state, bb_reg_state, size);
3156 state->ref_count = 1;
3157 bb_jmp->state = state;
3158 KDB_DEBUG_BB(" new state %p\n", state);
3161 /* Isolate the processing for 'mov' so it can be used for 'xadd'/'xchg' as
3164 * xadd/xchg expect this function to return BBOU_NOP for special cases,
3165 * otherwise it returns BBOU_RSWD. All special cases must be handled entirely
3166 * within this function, including doing bb_read_operand or bb_write_operand
3170 static enum bb_operand_usage
3171 bb_usage_mov(const struct bb_operand *src, const struct bb_operand *dst, int l)
3173 int full_register_src, full_register_dst;
3174 full_register_src = bb_operand_length(src, bb_decode.opcode[l])
3175 == KDB_WORD_SIZE * 8;
3176 full_register_dst = bb_operand_length(dst, bb_decode.opcode[l])
3177 == KDB_WORD_SIZE * 8;
3178 /* If both src and dst are full integer registers then record the
3182 bb_is_int_reg(src->base_rc) &&
3184 bb_is_int_reg(dst->base_rc) &&
3185 full_register_src &&
3186 full_register_dst) {
3187 /* Special case for the code that switches stacks in
3188 * jprobe_return. That code must modify RSP but it does it in
3189 * a well defined manner. Do not invalidate RSP.
3191 if (src->base_rc == BBRG_RBX &&
3192 dst->base_rc == BBRG_RSP &&
3193 strcmp(bb_func_name, "jprobe_return") == 0) {
3194 bb_read_operand(src);
3197 /* math_abort takes the equivalent of a longjmp structure and
3198 * resets the stack. Ignore this, it leaves RSP well defined.
3200 if (dst->base_rc == BBRG_RSP &&
3201 strcmp(bb_func_name, "math_abort") == 0) {
3202 bb_read_operand(src);
3205 bb_reg_set_reg(dst->base_rc, src->base_rc);
3208 /* If the move is from a full integer register to stack then record it.
3211 bb_is_simple_memory(dst) &&
3212 bb_is_osp_defined(dst->base_rc) &&
3213 full_register_src) {
3214 /* Ugly special case. Initializing list heads on stack causes
3215 * false references to stack variables when the list head is
3216 * used. Static code analysis cannot detect that the list head
3217 * has been changed by a previous execution loop and that a
3218 * basic block is only executed after the list head has been
3221 * These false references can result in valid stack variables
3222 * being incorrectly cleared on some logic paths. Ignore
3223 * stores to stack variables which point to themselves or to
3224 * the previous word so the list head initialization is not
3227 if (bb_is_osp_defined(src->base_rc)) {
3228 int stack1 = bb_reg_code_offset(src->base_rc);
3229 int stack2 = bb_reg_code_offset(dst->base_rc) +
3231 if (stack1 == stack2 ||
3232 stack1 == stack2 - KDB_WORD_SIZE)
3235 bb_memory_set_reg(dst->base_rc, src->base_rc, dst->disp);
3238 /* If the move is from stack to a full integer register then record it.
3240 if (bb_is_simple_memory(src) &&
3241 bb_is_osp_defined(src->base_rc) &&
3243 bb_is_int_reg(dst->base_rc) &&
3244 full_register_dst) {
3245 #ifdef CONFIG_X86_32
3246 /* mov from TSS_sysenter_sp0+offset to esp to fix up the
3247 * sysenter stack, it leaves esp well defined. mov
3248 * TSS_ysenter_sp0+offset(%esp),%esp is followed by up to 5
3249 * push instructions to mimic the hardware stack push. If
3250 * TSS_sysenter_sp0 is offset then only 3 words will be
3253 if (dst->base_rc == BBRG_RSP &&
3254 src->disp >= TSS_sysenter_sp0 &&
3255 bb_is_osp_defined(BBRG_RSP)) {
3257 pushes = src->disp == TSS_sysenter_sp0 ? 5 : 3;
3258 bb_reg_code_set_offset(BBRG_RSP,
3259 bb_reg_code_offset(BBRG_RSP) +
3260 pushes * KDB_WORD_SIZE);
3261 KDB_DEBUG_BB_OFFSET(
3262 bb_reg_code_offset(BBRG_RSP),
3263 " sysenter fixup, RSP",
3267 #endif /* CONFIG_X86_32 */
3268 bb_read_operand(src);
3269 bb_reg_set_memory(dst->base_rc, src->base_rc, src->disp);
3272 /* move %gs:0x<nn>,%rsp is used to unconditionally switch to another
3273 * stack. Ignore this special case, it is handled by the stack
3277 strcmp(src->segment, "%gs") == 0 &&
3279 dst->base_rc == BBRG_RSP)
3281 /* move %reg,%reg is a nop */
3286 strcmp(src->base, dst->base) == 0)
3288 /* Special case for the code that switches stacks in the scheduler
3289 * (switch_to()). That code must modify RSP but it does it in a well
3290 * defined manner. Do not invalidate RSP.
3293 dst->base_rc == BBRG_RSP &&
3294 full_register_dst &&
3295 bb_is_scheduler_address()) {
3296 bb_read_operand(src);
3299 /* Special case for the code that switches stacks in resume from
3300 * hibernation code. That code must modify RSP but it does it in a
3301 * well defined manner. Do not invalidate RSP.
3305 dst->base_rc == BBRG_RSP &&
3306 full_register_dst &&
3307 strcmp(bb_func_name, "restore_image") == 0) {
3308 bb_read_operand(src);
3314 static enum bb_operand_usage
3315 bb_usage_xadd(const struct bb_operand *src, const struct bb_operand *dst)
3317 /* Simulate xadd as a series of instructions including mov, that way we
3318 * get the benefit of all the special cases already handled by
3321 * tmp = src + dst, src = dst, dst = tmp.
3323 * For tmp, pick a register that is undefined. If all registers are
3324 * defined then pick one that is not being used by xadd.
3326 enum bb_reg_code reg = BBRG_UNDEFINED;
3327 struct bb_operand tmp;
3328 struct bb_reg_contains save_tmp;
3329 enum bb_operand_usage usage;
3331 for (reg = BBRG_RAX; reg < BBRG_RAX + KDB_INT_REGISTERS; ++reg) {
3332 if (bb_reg_code_value(reg) == BBRG_UNDEFINED) {
3338 for (reg = BBRG_RAX; reg < BBRG_RAX + KDB_INT_REGISTERS; ++reg) {
3339 if (reg != src->base_rc &&
3340 reg != src->index_rc &&
3341 reg != dst->base_rc &&
3342 reg != dst->index_rc &&
3347 KDB_DEBUG_BB(" %s saving tmp %s\n", __FUNCTION__, bbrg_name[reg]);
3348 save_tmp = bb_reg_state->contains[reg - BBRG_RAX];
3349 bb_reg_set_undef(reg);
3350 memset(&tmp, 0, sizeof(tmp));
3353 tmp.base = debug_kmalloc(strlen(bbrg_name[reg]) + 2, GFP_ATOMIC);
3356 strcpy(tmp.base + 1, bbrg_name[reg]);
3359 bb_read_operand(src);
3360 bb_read_operand(dst);
3361 if (bb_usage_mov(src, dst, sizeof("xadd")-1) == BBOU_NOP)
3364 usage = BBOU_RSRDWS;
3365 bb_usage_mov(&tmp, dst, sizeof("xadd")-1);
3366 KDB_DEBUG_BB(" %s restoring tmp %s\n", __FUNCTION__, bbrg_name[reg]);
3367 bb_reg_state->contains[reg - BBRG_RAX] = save_tmp;
3368 debug_kfree(tmp.base);
3372 static enum bb_operand_usage
3373 bb_usage_xchg(const struct bb_operand *src, const struct bb_operand *dst)
3375 /* Simulate xchg as a series of mov instructions, that way we get the
3376 * benefit of all the special cases already handled by BBOU_MOV.
3378 * mov dst,tmp; mov src,dst; mov tmp,src;
3380 * For tmp, pick a register that is undefined. If all registers are
3381 * defined then pick one that is not being used by xchg.
3383 enum bb_reg_code reg = BBRG_UNDEFINED;
3384 int rs = BBOU_RS, rd = BBOU_RD, ws = BBOU_WS, wd = BBOU_WD;
3385 struct bb_operand tmp;
3386 struct bb_reg_contains save_tmp;
3388 for (reg = BBRG_RAX; reg < BBRG_RAX + KDB_INT_REGISTERS; ++reg) {
3389 if (bb_reg_code_value(reg) == BBRG_UNDEFINED) {
3395 for (reg = BBRG_RAX; reg < BBRG_RAX + KDB_INT_REGISTERS; ++reg) {
3396 if (reg != src->base_rc &&
3397 reg != src->index_rc &&
3398 reg != dst->base_rc &&
3399 reg != dst->index_rc &&
3404 KDB_DEBUG_BB(" %s saving tmp %s\n", __FUNCTION__, bbrg_name[reg]);
3405 save_tmp = bb_reg_state->contains[reg - BBRG_RAX];
3406 memset(&tmp, 0, sizeof(tmp));
3409 tmp.base = debug_kmalloc(strlen(bbrg_name[reg]) + 2, GFP_ATOMIC);
3412 strcpy(tmp.base + 1, bbrg_name[reg]);
3415 if (bb_usage_mov(dst, &tmp, sizeof("xchg")-1) == BBOU_NOP)
3417 if (bb_usage_mov(src, dst, sizeof("xchg")-1) == BBOU_NOP) {
3421 if (bb_usage_mov(&tmp, src, sizeof("xchg")-1) == BBOU_NOP)
3423 KDB_DEBUG_BB(" %s restoring tmp %s\n", __FUNCTION__, bbrg_name[reg]);
3424 bb_reg_state->contains[reg - BBRG_RAX] = save_tmp;
3425 debug_kfree(tmp.base);
3426 return rs | rd | ws | wd;
3429 /* Invalidate all the scratch registers */
3432 bb_invalidate_scratch_reg(void)
3435 for (i = BBRG_RAX; i < BBRG_RAX + KDB_INT_REGISTERS; ++i) {
3436 for (j = 0; j < ARRAY_SIZE(bb_preserved_reg); ++j) {
3437 if (i == bb_preserved_reg[j])
3440 bb_reg_set_undef(i);
3447 bb_pass2_computed_jmp(const struct bb_operand *src)
3449 unsigned long table = src->disp;
3451 while (!bb_giveup) {
3452 if (kdb_getword(&addr, table, sizeof(addr)))
3454 if (addr < bb_func_start || addr >= bb_func_end)
3456 bb_transfer(bb_curr_addr, addr, 0);
3457 table += KDB_WORD_SIZE;
3461 /* The current instruction has been decoded and all the information is in
3462 * bb_decode. Based on the opcode, track any operand usage that we care about.
3468 enum bb_operand_usage usage = bb_decode.match->usage;
3469 struct bb_operand *src = &bb_decode.src;
3470 struct bb_operand *dst = &bb_decode.dst;
3471 struct bb_operand *dst2 = &bb_decode.dst2;
3472 int opcode_suffix, operand_length;
3474 /* First handle all the special usage cases, and map them to a generic
3475 * case after catering for the side effects.
3478 if (usage == BBOU_IMUL &&
3479 src->present && !dst->present && !dst2->present) {
3480 /* single operand imul, same effects as mul */
3484 /* AT&T syntax uses movs<l1><l2> for move with sign extension, instead
3485 * of the Intel movsx. The AT&T syntax causes problems for the opcode
3486 * mapping; movs with sign extension needs to be treated as a generic
3487 * read src, write dst, but instead it falls under the movs I/O
3488 * instruction. Fix it.
3490 if (usage == BBOU_MOVS && strlen(bb_decode.opcode) > 5)
3493 /* This switch statement deliberately does not use 'default' at the top
3494 * level. That way the compiler will complain if a new BBOU_ enum is
3495 * added above and not explicitly handled here.
3498 case BBOU_UNKNOWN: /* drop through */
3499 case BBOU_RS: /* drop through */
3500 case BBOU_RD: /* drop through */
3501 case BBOU_RSRD: /* drop through */
3502 case BBOU_WS: /* drop through */
3503 case BBOU_RSWS: /* drop through */
3504 case BBOU_RDWS: /* drop through */
3505 case BBOU_RSRDWS: /* drop through */
3506 case BBOU_WD: /* drop through */
3507 case BBOU_RSWD: /* drop through */
3508 case BBOU_RDWD: /* drop through */
3509 case BBOU_RSRDWD: /* drop through */
3510 case BBOU_WSWD: /* drop through */
3511 case BBOU_RSWSWD: /* drop through */
3512 case BBOU_RDWSWD: /* drop through */
3514 break; /* ignore generic usage for now */
3516 /* Special case for add instructions that adjust registers
3517 * which are mapping the stack.
3519 if (dst->reg && bb_is_osp_defined(dst->base_rc)) {
3520 bb_adjust_osp_instruction(1);
3523 usage = BBOU_RSRDWD;
3527 /* Special case when trying to round the stack pointer
3528 * to achieve byte alignment
3530 if (dst->reg && dst->base_rc == BBRG_RSP &&
3531 src->immediate && strncmp(bb_func_name, "efi_call", 8) == 0) {
3534 usage = BBOU_RSRDWD;
3538 bb_reg_state_print(bb_reg_state);
3540 if (bb_is_static_disp(src)) {
3541 /* save_args is special. It saves
3542 * a partial pt_regs onto the stack and switches
3543 * to the interrupt stack.
3545 if (src->disp == bb_save_args) {
3546 bb_memory_set_reg(BBRG_RSP, BBRG_RDI, 0x48);
3547 bb_memory_set_reg(BBRG_RSP, BBRG_RSI, 0x40);
3548 bb_memory_set_reg(BBRG_RSP, BBRG_RDX, 0x38);
3549 bb_memory_set_reg(BBRG_RSP, BBRG_RCX, 0x30);
3550 bb_memory_set_reg(BBRG_RSP, BBRG_RAX, 0x28);
3551 bb_memory_set_reg(BBRG_RSP, BBRG_R8, 0x20);
3552 bb_memory_set_reg(BBRG_RSP, BBRG_R9, 0x18);
3553 bb_memory_set_reg(BBRG_RSP, BBRG_R10, 0x10);
3554 bb_memory_set_reg(BBRG_RSP, BBRG_R11, 0x08);
3555 bb_memory_set_reg(BBRG_RSP, BBRG_RBP, 0);
3556 /* This is actually on the interrupt stack,
3557 * but we fudge it so the unwind works.
3559 bb_memory_set_reg_value(BBRG_RSP, -0x8, BBRG_RBP, 0);
3560 bb_reg_set_reg(BBRG_RBP, BBRG_RSP);
3561 bb_adjust_osp(BBRG_RSP, -KDB_WORD_SIZE);
3563 /* save_rest juggles the stack frame to append the
3564 * rest of the pt_regs onto a stack where SAVE_ARGS
3565 * or save_args has already been done.
3567 else if (src->disp == bb_save_rest) {
3568 bb_memory_set_reg(BBRG_RSP, BBRG_RBX, 0x30);
3569 bb_memory_set_reg(BBRG_RSP, BBRG_RBP, 0x28);
3570 bb_memory_set_reg(BBRG_RSP, BBRG_R12, 0x20);
3571 bb_memory_set_reg(BBRG_RSP, BBRG_R13, 0x18);
3572 bb_memory_set_reg(BBRG_RSP, BBRG_R14, 0x10);
3573 bb_memory_set_reg(BBRG_RSP, BBRG_R15, 0x08);
3575 /* error_entry and save_paranoid save a full pt_regs.
3576 * Break out so the scratch registers aren't invalidated.
3578 else if (src->disp == bb_error_entry || src->disp == bb_save_paranoid) {
3579 bb_memory_set_reg(BBRG_RSP, BBRG_RDI, 0x70);
3580 bb_memory_set_reg(BBRG_RSP, BBRG_RSI, 0x68);
3581 bb_memory_set_reg(BBRG_RSP, BBRG_RDX, 0x60);
3582 bb_memory_set_reg(BBRG_RSP, BBRG_RCX, 0x58);
3583 bb_memory_set_reg(BBRG_RSP, BBRG_RAX, 0x50);
3584 bb_memory_set_reg(BBRG_RSP, BBRG_R8, 0x48);
3585 bb_memory_set_reg(BBRG_RSP, BBRG_R9, 0x40);
3586 bb_memory_set_reg(BBRG_RSP, BBRG_R10, 0x38);
3587 bb_memory_set_reg(BBRG_RSP, BBRG_R11, 0x30);
3588 bb_memory_set_reg(BBRG_RSP, BBRG_RBX, 0x28);
3589 bb_memory_set_reg(BBRG_RSP, BBRG_RBP, 0x20);
3590 bb_memory_set_reg(BBRG_RSP, BBRG_R12, 0x18);
3591 bb_memory_set_reg(BBRG_RSP, BBRG_R13, 0x10);
3592 bb_memory_set_reg(BBRG_RSP, BBRG_R14, 0x08);
3593 bb_memory_set_reg(BBRG_RSP, BBRG_R15, 0);
3597 /* Invalidate the scratch registers */
3598 bb_invalidate_scratch_reg();
3600 /* These special cases need scratch registers invalidated first */
3601 if (bb_is_static_disp(src)) {
3602 /* Function sync_regs and save_v86_state are special.
3603 * Their return value is the new stack pointer
3605 if (src->disp == bb_sync_regs) {
3606 bb_reg_set_reg(BBRG_RAX, BBRG_RSP);
3607 } else if (src->disp == bb_save_v86_state) {
3608 bb_reg_set_reg(BBRG_RAX, BBRG_RSP);
3609 bb_adjust_osp(BBRG_RAX, +KDB_WORD_SIZE);
3614 /* Convert word in RAX. Read RAX, write RAX */
3615 bb_reg_read(BBRG_RAX);
3616 bb_reg_set_undef(BBRG_RAX);
3620 /* cmove %gs:0x<nn>,%rsp is used to conditionally switch to
3621 * another stack. Ignore this special case, it is handled by
3622 * the stack unwinding code.
3625 strcmp(src->segment, "%gs") == 0 &&
3627 dst->base_rc == BBRG_RSP)
3633 /* Read RAX, write RAX plus src read, dst write */
3634 bb_reg_read(BBRG_RAX);
3635 bb_reg_set_undef(BBRG_RAX);
3639 /* Read RAX, RBX, RCX, RDX, write RAX, RDX plus src read/write */
3640 bb_reg_read(BBRG_RAX);
3641 bb_reg_read(BBRG_RBX);
3642 bb_reg_read(BBRG_RCX);
3643 bb_reg_read(BBRG_RDX);
3644 bb_reg_set_undef(BBRG_RAX);
3645 bb_reg_set_undef(BBRG_RDX);
3649 /* Read RAX, write RAX, RBX, RCX, RDX */
3650 bb_reg_read(BBRG_RAX);
3651 bb_reg_set_undef(BBRG_RAX);
3652 bb_reg_set_undef(BBRG_RBX);
3653 bb_reg_set_undef(BBRG_RCX);
3654 bb_reg_set_undef(BBRG_RDX);
3658 /* Convert word in RAX, RDX. Read RAX, write RDX */
3659 bb_reg_read(BBRG_RAX);
3660 bb_reg_set_undef(BBRG_RDX);
3663 case BBOU_DIV: /* drop through */
3665 /* The 8 bit variants only affect RAX, the 16, 32 and 64 bit
3666 * variants affect RDX as well.
3670 opcode_suffix = bb_decode.opcode[3];
3673 opcode_suffix = bb_decode.opcode[4];
3676 opcode_suffix = 'q';
3679 operand_length = bb_operand_length(src, opcode_suffix);
3680 bb_reg_read(BBRG_RAX);
3681 bb_reg_set_undef(BBRG_RAX);
3682 if (operand_length != 8) {
3683 bb_reg_read(BBRG_RDX);
3684 bb_reg_set_undef(BBRG_RDX);
3689 /* Only the two and three operand forms get here. The one
3690 * operand form is treated as mul.
3692 if (dst2->present) {
3693 /* The three operand form is a special case, read the first two
3694 * operands, write the third.
3696 bb_read_operand(src);
3697 bb_read_operand(dst);
3698 bb_write_operand(dst2);
3701 usage = BBOU_RSRDWD;
3709 if (bb_is_static_disp(src))
3710 bb_transfer(bb_curr_addr, src->disp, 0);
3711 else if (src->indirect &&
3713 src->base == NULL &&
3715 src->scale == KDB_WORD_SIZE)
3716 bb_pass2_computed_jmp(src);
3721 bb_reg_set_undef(BBRG_RAX);
3725 /* dst = src + disp. Often used to calculate offsets into the
3726 * stack, so check if it uses a stack pointer.
3729 if (bb_is_simple_memory(src)) {
3730 if (bb_is_osp_defined(src->base_rc)) {
3731 bb_reg_set_reg(dst->base_rc, src->base_rc);
3732 bb_adjust_osp_instruction(1);
3734 } else if (src->disp == 0 &&
3735 src->base_rc == dst->base_rc) {
3736 /* lea 0(%reg),%reg is generated by i386
3740 } else if (src->disp == 4096 &&
3741 (src->base_rc == BBRG_R8 ||
3742 src->base_rc == BBRG_RDI) &&
3743 strcmp(bb_func_name, "relocate_kernel") == 0) {
3744 /* relocate_kernel: setup a new stack at the
3745 * end of the physical control page, using
3746 * (x86_64) lea 4096(%r8),%rsp or (i386) lea
3754 /* RSP = RBP; RBP = *(RSP); RSP += KDB_WORD_SIZE; */
3755 bb_reg_set_reg(BBRG_RSP, BBRG_RBP);
3756 if (bb_is_osp_defined(BBRG_RSP))
3757 bb_reg_set_memory(BBRG_RBP, BBRG_RSP, 0);
3759 bb_reg_set_undef(BBRG_RBP);
3760 if (bb_is_osp_defined(BBRG_RSP))
3761 bb_adjust_osp(BBRG_RSP, KDB_WORD_SIZE);
3762 /* common_interrupt uses leave in a non-standard manner */
3763 if (strcmp(bb_func_name, "common_interrupt") != 0)
3768 /* Read RSI, write RAX, RSI */
3769 bb_reg_read(BBRG_RSI);
3770 bb_reg_set_undef(BBRG_RAX);
3771 bb_reg_set_undef(BBRG_RSI);
3775 /* Read and write RCX */
3776 bb_reg_read(BBRG_RCX);
3777 bb_reg_set_undef(BBRG_RCX);
3778 if (bb_is_static_disp(src))
3779 bb_transfer(bb_curr_addr, src->disp, 0);
3783 /* lss offset(%esp),%esp leaves esp well defined */
3785 dst->base_rc == BBRG_RSP &&
3786 bb_is_simple_memory(src) &&
3787 src->base_rc == BBRG_RSP) {
3788 bb_adjust_osp(BBRG_RSP, 2*KDB_WORD_SIZE + src->disp);
3795 /* Read RAX, RCX, RDX */
3796 bb_reg_set_undef(BBRG_RAX);
3797 bb_reg_set_undef(BBRG_RCX);
3798 bb_reg_set_undef(BBRG_RDX);
3802 usage = bb_usage_mov(src, dst, sizeof("mov")-1);
3805 /* Read RSI, RDI, write RSI, RDI */
3806 bb_reg_read(BBRG_RSI);
3807 bb_reg_read(BBRG_RDI);
3808 bb_reg_set_undef(BBRG_RSI);
3809 bb_reg_set_undef(BBRG_RDI);
3813 /* imul (one operand form only) or mul. Read RAX. If the
3814 * operand length is not 8 then write RDX.
3816 if (bb_decode.opcode[0] == 'i')
3817 opcode_suffix = bb_decode.opcode[4];
3819 opcode_suffix = bb_decode.opcode[3];
3820 operand_length = bb_operand_length(src, opcode_suffix);
3821 bb_reg_read(BBRG_RAX);
3822 if (operand_length != 8)
3823 bb_reg_set_undef(BBRG_RDX);
3828 bb_reg_read(BBRG_RAX);
3829 bb_reg_read(BBRG_RCX);
3835 /* Read RSI, RDX, write RSI */
3836 bb_reg_read(BBRG_RSI);
3837 bb_reg_read(BBRG_RDX);
3838 bb_reg_set_undef(BBRG_RSI);
3842 /* Complicated by the fact that you can pop from top of stack
3843 * to a stack location, for this case the destination location
3844 * is calculated after adjusting RSP. Analysis of the kernel
3845 * code shows that gcc only uses this strange format to get the
3846 * flags into a local variable, e.g. pushf; popl 0x10(%esp); so
3847 * I am going to ignore this special case.
3850 if (!bb_is_osp_defined(BBRG_RSP)) {
3851 if (!bb_is_scheduler_address()) {
3852 kdb_printf("pop when BBRG_RSP is undefined?\n");
3857 bb_reg_set_memory(src->base_rc, BBRG_RSP, 0);
3860 /* pop %rsp does not adjust rsp */
3862 src->base_rc != BBRG_RSP)
3863 bb_adjust_osp(BBRG_RSP, KDB_WORD_SIZE);
3867 /* Do not care about flags, just adjust RSP */
3868 if (!bb_is_osp_defined(BBRG_RSP)) {
3869 if (!bb_is_scheduler_address()) {
3870 kdb_printf("popf when BBRG_RSP is undefined?\n");
3874 bb_adjust_osp(BBRG_RSP, KDB_WORD_SIZE);
3879 /* Complicated by the fact that you can push from a stack
3880 * location to top of stack, the source location is calculated
3881 * before adjusting RSP. Analysis of the kernel code shows
3882 * that gcc only uses this strange format to restore the flags
3883 * from a local variable, e.g. pushl 0x10(%esp); popf; so I am
3884 * going to ignore this special case.
3887 if (!bb_is_osp_defined(BBRG_RSP)) {
3888 if (!bb_is_scheduler_address()) {
3889 kdb_printf("push when BBRG_RSP is undefined?\n");
3893 bb_adjust_osp(BBRG_RSP, -KDB_WORD_SIZE);
3895 bb_reg_code_offset(BBRG_RSP) <= 0)
3896 bb_memory_set_reg(BBRG_RSP, src->base_rc, 0);
3900 /* Do not care about flags, just adjust RSP */
3901 if (!bb_is_osp_defined(BBRG_RSP)) {
3902 if (!bb_is_scheduler_address()) {
3903 kdb_printf("pushf when BBRG_RSP is undefined?\n");
3907 bb_adjust_osp(BBRG_RSP, -KDB_WORD_SIZE);
3912 /* Read RCX, write RAX, RDX */
3913 bb_reg_read(BBRG_RCX);
3914 bb_reg_set_undef(BBRG_RAX);
3915 bb_reg_set_undef(BBRG_RDX);
3919 /* Write RAX, RDX */
3920 bb_reg_set_undef(BBRG_RAX);
3921 bb_reg_set_undef(BBRG_RDX);
3926 if (src->immediate && bb_is_osp_defined(BBRG_RSP)) {
3927 bb_adjust_osp(BBRG_RSP, src->disp);
3929 /* Functions that restore state which was saved by another
3930 * function or build new kernel stacks. We cannot verify what
3931 * is being restored so skip the sanity check.
3933 if (strcmp(bb_func_name, "restore_image") == 0 ||
3934 strcmp(bb_func_name, "relocate_kernel") == 0 ||
3935 strcmp(bb_func_name, "identity_mapped") == 0 ||
3936 strcmp(bb_func_name, "xen_iret_crit_fixup") == 0 ||
3937 strcmp(bb_func_name, "math_abort") == 0 ||
3938 strcmp(bb_func_name, "save_args") == 0 ||
3939 strcmp(bb_func_name, "kretprobe_trampoline_holder") == 0)
3945 bb_reg_read(BBRG_RAX);
3949 /* Read RAX, RDI, write RDI */
3950 bb_reg_read(BBRG_RAX);
3951 bb_reg_read(BBRG_RDI);
3952 bb_reg_set_undef(BBRG_RDI);
3956 /* Special case for sub instructions that adjust registers
3957 * which are mapping the stack.
3959 if (dst->reg && bb_is_osp_defined(dst->base_rc)) {
3960 bb_adjust_osp_instruction(-1);
3963 usage = BBOU_RSRDWD;
3975 /* Read RCX, RAX, RDX */
3976 bb_reg_read(BBRG_RCX);
3977 bb_reg_read(BBRG_RAX);
3978 bb_reg_read(BBRG_RDX);
3982 usage = bb_usage_xadd(src, dst);
3985 /* i386 do_IRQ with 4K stacks does xchg %ebx,%esp; call
3986 * irq_handler; mov %ebx,%esp; to switch stacks. Ignore this
3987 * stack switch when tracking registers, it is handled by
3988 * higher level backtrace code. Convert xchg %ebx,%esp to mov
3989 * %esp,%ebx so the later mov %ebx,%esp becomes a NOP and the
3990 * stack remains defined so we can backtrace through do_IRQ's
3993 * Ditto for do_softirq.
3997 src->base_rc == BBRG_RBX &&
3998 dst->base_rc == BBRG_RSP &&
3999 (strcmp(bb_func_name, "do_IRQ") == 0 ||
4000 strcmp(bb_func_name, "do_softirq") == 0)) {
4001 strcpy(bb_decode.opcode, "mov");
4002 usage = bb_usage_mov(dst, src, sizeof("mov")-1);
4004 usage = bb_usage_xchg(src, dst);
4008 /* xor %reg,%reg only counts as a register write, the original
4009 * contents of reg are irrelevant.
4011 if (src->reg && dst->reg && src->base_rc == dst->base_rc)
4014 usage = BBOU_RSRDWD;
4018 /* The switch statement above handled all the special cases. Every
4019 * opcode should now have a usage of NOP or one of the generic cases.
4021 if (usage == BBOU_UNKNOWN || usage == BBOU_NOP) {
4023 } else if (usage >= BBOU_RS && usage <= BBOU_RSRDWSWD) {
4024 if (usage & BBOU_RS)
4025 bb_read_operand(src);
4026 if (usage & BBOU_RD)
4027 bb_read_operand(dst);
4028 if (usage & BBOU_WS)
4029 bb_write_operand(src);
4030 if (usage & BBOU_WD)
4031 bb_write_operand(dst);
4033 kdb_printf("%s: opcode not fully handled\n", __FUNCTION__);
4034 if (!KDB_DEBUG(BB)) {
4036 if (bb_decode.src.present)
4037 bb_print_operand("src", &bb_decode.src);
4038 if (bb_decode.dst.present)
4039 bb_print_operand("dst", &bb_decode.dst);
4040 if (bb_decode.dst2.present)
4041 bb_print_operand("dst2", &bb_decode.dst2);
4048 bb_parse_buffer(void)
4050 char *p, *src, *dst = NULL, *dst2 = NULL;
4053 memset(&bb_decode, 0, sizeof(bb_decode));
4054 KDB_DEBUG_BB(" '%s'\n", p);
4055 p += strcspn(p, ":"); /* skip address and function name+offset: */
4057 kdb_printf("%s: cannot find ':' in buffer '%s'\n",
4058 __FUNCTION__, bb_buffer);
4062 p += strspn(p, " \t"); /* step to opcode */
4063 if (strncmp(p, "(bad)", 5) == 0)
4065 /* separate any opcode prefix */
4066 if (strncmp(p, "lock", 4) == 0 ||
4067 strncmp(p, "rep", 3) == 0 ||
4068 strncmp(p, "rex", 3) == 0 ||
4069 strncmp(p, "addr", 4) == 0) {
4070 bb_decode.prefix = p;
4071 p += strcspn(p, " \t");
4073 p += strspn(p, " \t");
4075 bb_decode.opcode = p;
4076 strsep(&p, " \t"); /* step to end of opcode */
4077 if (bb_parse_opcode())
4081 p += strspn(p, " \t"); /* step to operand(s) */
4085 p = strsep(&p, " \t"); /* strip comments after operands */
4086 /* split 'src','dst' but ignore ',' inside '(' ')' */
4090 } else if (*p == ')') {
4092 } else if (*p == ',' && paren == 0) {
4101 bb_parse_operand(src, &bb_decode.src);
4103 bb_print_operand("src", &bb_decode.src);
4104 if (dst && !bb_giveup) {
4105 bb_parse_operand(dst, &bb_decode.dst);
4107 bb_print_operand("dst", &bb_decode.dst);
4109 if (dst2 && !bb_giveup) {
4110 bb_parse_operand(dst2, &bb_decode.dst2);
4112 bb_print_operand("dst2", &bb_decode.dst2);
4120 bb_dis_pass2(PTR file, const char *fmt, ...)
4123 int l = strlen(bb_buffer);
4126 vsnprintf(bb_buffer + l, sizeof(bb_buffer) - l, fmt, ap);
4128 if ((p = strchr(bb_buffer, '\n'))) {
4131 p += strcspn(p, ":");
4133 bb_fixup_switch_to(p);
4135 bb_buffer[0] = '\0';
4141 bb_printaddr_pass2(bfd_vma addr, disassemble_info *dip)
4143 kdb_symtab_t symtab;
4144 unsigned int offset;
4145 dip->fprintf_func(dip->stream, "0x%lx", addr);
4146 kdbnearsym(addr, &symtab);
4147 if (symtab.sym_name) {
4148 dip->fprintf_func(dip->stream, " <%s", symtab.sym_name);
4149 if ((offset = addr - symtab.sym_start))
4150 dip->fprintf_func(dip->stream, "+0x%x", offset);
4151 dip->fprintf_func(dip->stream, ">");
4155 /* Set the starting register and memory state for the current bb */
4158 bb_start_block0_special(void)
4161 short offset_address;
4162 enum bb_reg_code reg, value;
4163 struct bb_name_state *r;
4164 for (i = 0, r = bb_special_cases;
4165 i < ARRAY_SIZE(bb_special_cases);
4167 if (bb_func_start == r->address && r->fname == NULL)
4172 /* Set the running registers */
4173 for (reg = BBRG_RAX; reg < r->regs_size; ++reg) {
4174 value = r->regs[reg].value;
4175 if (test_bit(value, r->skip_regs.bits)) {
4176 /* this regs entry is not defined for this label */
4179 bb_reg_code_set_value(reg, value);
4180 bb_reg_code_set_offset(reg, r->regs[reg].offset);
4182 /* Set any memory contents, e.g. pt_regs. Adjust RSP as required. */
4184 for (i = 0; i < r->mem_size; ++i) {
4185 offset_address = max_t(int,
4186 r->mem[i].offset_address + KDB_WORD_SIZE,
4189 if (bb_reg_code_offset(BBRG_RSP) > -offset_address)
4190 bb_adjust_osp(BBRG_RSP, -offset_address - bb_reg_code_offset(BBRG_RSP));
4191 for (i = 0; i < r->mem_size; ++i) {
4192 value = r->mem[i].value;
4193 if (test_bit(value, r->skip_mem.bits)) {
4194 /* this memory entry is not defined for this label */
4197 bb_memory_set_reg_value(BBRG_RSP, r->mem[i].offset_address,
4199 bb_reg_set_undef(value);
4205 bb_pass2_start_block(int number)
4207 int i, j, k, first, changed;
4209 struct bb_jmp *bb_jmp;
4210 struct bb_reg_state *state;
4211 struct bb_memory_contains *c1, *c2;
4212 bb_reg_state->mem_count = bb_reg_state_max;
4213 size = bb_reg_state_size(bb_reg_state);
4214 memset(bb_reg_state, 0, size);
4217 /* The first block is assumed to have well defined inputs */
4219 /* Some assembler labels have non-standard entry
4222 bb_start_block0_special();
4223 bb_reg_state_print(bb_reg_state);
4227 /* Merge all the input states for the current bb together */
4230 for (i = 0; i < bb_jmp_count; ++i) {
4231 bb_jmp = bb_jmp_list + i;
4232 if (bb_jmp->to != bb_curr->start)
4234 state = bb_jmp->state;
4238 size = bb_reg_state_size(state);
4239 memcpy(bb_reg_state, state, size);
4240 KDB_DEBUG_BB(" first state %p\n", state);
4241 bb_reg_state_print(bb_reg_state);
4246 KDB_DEBUG_BB(" merging state %p\n", state);
4247 /* Merge the register states */
4248 for (j = 0; j < ARRAY_SIZE(state->contains); ++j) {
4249 if (memcmp(bb_reg_state->contains + j,
4250 state->contains + j,
4251 sizeof(bb_reg_state->contains[0]))) {
4252 /* Different states for this register from two
4253 * or more inputs, make it undefined.
4255 if (bb_reg_state->contains[j].value ==
4257 KDB_DEBUG_BB(" ignoring %s\n",
4258 bbrg_name[j + BBRG_RAX]);
4260 bb_reg_set_undef(BBRG_RAX + j);
4266 /* Merge the memory states. This relies on both
4267 * bb_reg_state->memory and state->memory being sorted in
4268 * descending order, with undefined entries at the end.
4270 c1 = bb_reg_state->memory;
4273 while (j < bb_reg_state->mem_count &&
4274 k < state->mem_count) {
4275 if (c1->offset_address < c2->offset_address) {
4276 KDB_DEBUG_BB_OFFSET(c2->offset_address,
4277 " ignoring c2->offset_address ",
4283 if (c1->offset_address > c2->offset_address) {
4284 /* Memory location is not in all input states,
4285 * delete the memory location.
4287 bb_delete_memory(c1->offset_address);
4293 if (memcmp(c1, c2, sizeof(*c1))) {
4294 /* Same location, different contents, delete
4295 * the memory location.
4297 bb_delete_memory(c1->offset_address);
4298 KDB_DEBUG_BB_OFFSET(c2->offset_address,
4299 " ignoring c2->offset_address ",
4308 while (j < bb_reg_state->mem_count) {
4309 bb_delete_memory(c1->offset_address);
4316 KDB_DEBUG_BB(" final state\n");
4317 bb_reg_state_print(bb_reg_state);
4321 /* We have reached the exit point from the current function, either a call to
4322 * the next function or the instruction that was about to executed when an
4323 * interrupt occurred. Save the current register state in bb_exit_state.
4327 bb_save_exit_state(void)
4330 debug_kfree(bb_exit_state);
4331 bb_exit_state = NULL;
4332 bb_reg_state_canonicalize();
4333 size = bb_reg_state_size(bb_reg_state);
4334 bb_exit_state = debug_kmalloc(size, GFP_ATOMIC);
4335 if (!bb_exit_state) {
4336 kdb_printf("\n\n%s: out of debug_kmalloc\n", __FUNCTION__);
4340 memcpy(bb_exit_state, bb_reg_state, size);
4344 bb_pass2_do_changed_blocks(int allow_missing)
4346 int i, j, missing, changed, maxloops;
4348 struct bb_jmp *bb_jmp;
4349 KDB_DEBUG_BB("\n %s: allow_missing %d\n", __FUNCTION__, allow_missing);
4350 /* Absolute worst case is we have to iterate over all the basic blocks
4351 * in an "out of order" state, each iteration losing one register or
4352 * memory state. Any more loops than that is a bug. "out of order"
4353 * means that the layout of blocks in memory does not match the logic
4354 * flow through those blocks so (for example) block 27 comes before
4355 * block 2. To allow for out of order blocks, multiply maxloops by the
4358 maxloops = (KDB_INT_REGISTERS + bb_reg_state_max) * bb_count;
4362 for (i = 0; i < bb_count; ++i) {
4363 bb_curr = bb_list[i];
4364 if (!bb_curr->changed)
4367 for (j = 0, bb_jmp = bb_jmp_list;
4370 if (bb_jmp->to == bb_curr->start &&
4374 if (missing > allow_missing)
4376 bb_curr->changed = 0;
4378 KDB_DEBUG_BB("\n bb[%d]\n", i);
4379 bb_pass2_start_block(i);
4380 for (addr = bb_curr->start;
4381 addr <= bb_curr->end; ) {
4382 bb_curr_addr = addr;
4383 if (addr == bb_exit_addr)
4384 bb_save_exit_state();
4385 addr += kdba_id_printinsn(addr, &kdb_di);
4386 kdb_di.fprintf_func(NULL, "\n");
4390 if (!bb_exit_state) {
4391 /* ATTRIB_NORET functions are a problem with
4392 * the current gcc. Allow the trailing address
4395 if (addr == bb_exit_addr ||
4396 addr == bb_exit_addr + 1)
4397 bb_save_exit_state();
4399 if (bb_curr->drop_through)
4400 bb_transfer(bb_curr->end,
4401 bb_list[i+1]->start, 1);
4403 if (maxloops-- == 0) {
4404 kdb_printf("\n\n%s maxloops reached\n",
4411 for (i = 0; i < bb_count; ++i) {
4412 bb_curr = bb_list[i];
4413 if (bb_curr->changed)
4414 return 1; /* more to do, increase allow_missing */
4416 return 0; /* all blocks done */
4419 /* Assume that the current function is a pass through function that does not
4420 * refer to its register parameters. Exclude known asmlinkage functions and
4421 * assume the other functions actually use their registers.
4425 bb_assume_pass_through(void)
4427 static int first_time = 1;
4428 if (strncmp(bb_func_name, "sys_", 4) == 0 ||
4429 strncmp(bb_func_name, "compat_sys_", 11) == 0 ||
4430 strcmp(bb_func_name, "schedule") == 0 ||
4431 strcmp(bb_func_name, "do_softirq") == 0 ||
4432 strcmp(bb_func_name, "printk") == 0 ||
4433 strcmp(bb_func_name, "vprintk") == 0 ||
4434 strcmp(bb_func_name, "preempt_schedule") == 0 ||
4435 strcmp(bb_func_name, "start_kernel") == 0 ||
4436 strcmp(bb_func_name, "csum_partial") == 0 ||
4437 strcmp(bb_func_name, "csum_partial_copy_generic") == 0 ||
4438 strcmp(bb_func_name, "math_state_restore") == 0 ||
4439 strcmp(bb_func_name, "panic") == 0 ||
4440 strcmp(bb_func_name, "kdb_printf") == 0 ||
4441 strcmp(bb_func_name, "kdb_interrupt") == 0)
4443 if (bb_asmlinkage_arch())
4445 bb_reg_params = REGPARM;
4447 kdb_printf(" %s has memory parameters but no register "
4448 "parameters.\n Assuming it is a 'pass "
4449 "through' function that does not refer to "
4450 "its register\n parameters and setting %d "
4451 "register parameters\n",
4452 bb_func_name, REGPARM);
4456 kdb_printf(" Assuming %s is 'pass through' with %d register "
4458 bb_func_name, REGPARM);
4465 if (KDB_DEBUG(BB) | KDB_DEBUG(BB_SUMM))
4466 kdb_printf("%s: start\n", __FUNCTION__);
4468 kdb_di.fprintf_func = bb_dis_pass2;
4469 kdb_di.print_address_func = bb_printaddr_pass2;
4471 bb_reg_state = debug_kmalloc(sizeof(*bb_reg_state), GFP_ATOMIC);
4472 if (!bb_reg_state) {
4473 kdb_printf("\n\n%s: out of debug_kmalloc\n", __FUNCTION__);
4477 bb_list[0]->changed = 1;
4479 /* If a block does not have all its input states available then it is
4480 * possible for a register to initially appear to hold a known value,
4481 * but when other inputs are available then it becomes a variable
4482 * value. The initial false state of "known" can generate false values
4483 * for other registers and can even make it look like stack locations
4484 * are being changed.
4486 * To avoid these false positives, only process blocks which have all
4487 * their inputs defined. That gives a clean depth first traversal of
4488 * the tree, except for loops. If there are any loops, then start
4489 * processing blocks with one missing input, then two missing inputs
4492 * Absolute worst case is we have to iterate over all the jmp entries,
4493 * each iteration allowing one more missing input. Any more loops than
4494 * that is a bug. Watch out for the corner case of 0 jmp entries.
4496 for (allow_missing = 0; allow_missing <= bb_jmp_count; ++allow_missing) {
4497 if (!bb_pass2_do_changed_blocks(allow_missing))
4502 if (allow_missing > bb_jmp_count) {
4503 kdb_printf("\n\n%s maxloops reached\n",
4509 if (bb_memory_params && bb_reg_params)
4510 bb_reg_params = REGPARM;
4514 bb_assume_pass_through();
4515 if (KDB_DEBUG(BB) | KDB_DEBUG(BB_SUMM)) {
4516 kdb_printf("%s: end bb_reg_params %d bb_memory_params %d\n",
4517 __FUNCTION__, bb_reg_params, bb_memory_params);
4518 if (bb_exit_state) {
4519 kdb_printf("%s: bb_exit_state at " kdb_bfd_vma_fmt0 "\n",
4520 __FUNCTION__, bb_exit_addr);
4521 bb_do_reg_state_print(bb_exit_state);
4531 struct bb_reg_state *state;
4536 debug_kfree(bb_list);
4538 bb_count = bb_max = 0;
4539 for (i = 0; i < bb_jmp_count; ++i) {
4540 state = bb_jmp_list[i].state;
4541 if (state && --state->ref_count == 0)
4544 debug_kfree(bb_jmp_list);
4546 bb_jmp_count = bb_jmp_max = 0;
4547 debug_kfree(bb_reg_state);
4548 bb_reg_state = NULL;
4549 bb_reg_state_max = 0;
4550 debug_kfree(bb_exit_state);
4551 bb_exit_state = NULL;
4552 bb_reg_params = bb_memory_params = 0;
4557 bb_spurious_global_label(const char *func_name)
4560 for (i = 0; i < ARRAY_SIZE(bb_spurious); ++i) {
4561 if (strcmp(bb_spurious[i], func_name) == 0)
4567 /* Given the current actual register contents plus the exit state deduced from
4568 * a basic block analysis of the current function, rollback the actual register
4569 * contents to the values they had on entry to this function.
4573 bb_actual_rollback(const struct kdb_activation_record *ar)
4575 int i, offset_address;
4576 struct bb_memory_contains *c;
4577 enum bb_reg_code reg;
4578 unsigned long address, osp = 0;
4579 struct bb_actual new[ARRAY_SIZE(bb_actual)];
4582 if (!bb_exit_state) {
4583 kdb_printf("%s: no bb_exit_state, cannot rollback\n",
4588 memcpy(bb_reg_state, bb_exit_state, bb_reg_state_size(bb_exit_state));
4589 memset(new, 0, sizeof(new));
4591 /* The most important register for obtaining saved state is rsp so get
4592 * its new value first. Prefer rsp if it is valid, then other
4593 * registers. Saved values of rsp in memory are unusable without a
4594 * register that points to memory.
4596 if (!bb_actual_valid(BBRG_RSP)) {
4597 kdb_printf("%s: no starting value for RSP, cannot rollback\n",
4602 if (KDB_DEBUG(BB) | KDB_DEBUG(BB_SUMM))
4603 kdb_printf("%s: rsp " kdb_bfd_vma_fmt0,
4604 __FUNCTION__, bb_actual_value(BBRG_RSP));
4606 if (!bb_is_osp_defined(i)) {
4607 for (i = BBRG_RAX; i < BBRG_RAX + KDB_INT_REGISTERS; ++i) {
4608 if (bb_is_osp_defined(i) && bb_actual_valid(i))
4612 if (bb_is_osp_defined(i) && bb_actual_valid(i)) {
4613 osp = new[BBRG_RSP - BBRG_RAX].value =
4614 bb_actual_value(i) - bb_reg_code_offset(i);
4615 new[BBRG_RSP - BBRG_RAX].valid = 1;
4616 if (KDB_DEBUG(BB) | KDB_DEBUG(BB_SUMM))
4617 kdb_printf(" -> osp " kdb_bfd_vma_fmt0 "\n", osp);
4619 bb_actual_set_valid(BBRG_RSP, 0);
4620 if (KDB_DEBUG(BB) | KDB_DEBUG(BB_SUMM))
4621 kdb_printf(" -> undefined\n");
4622 kdb_printf("%s: no ending value for RSP, cannot rollback\n",
4628 /* Now the other registers. First look at register values that have
4629 * been copied to other registers.
4631 for (i = BBRG_RAX; i < BBRG_RAX + KDB_INT_REGISTERS; ++i) {
4632 reg = bb_reg_code_value(i);
4633 if (bb_is_int_reg(reg)) {
4634 new[reg - BBRG_RAX] = bb_actual[i - BBRG_RAX];
4635 if (KDB_DEBUG(BB) | KDB_DEBUG(BB_SUMM)) {
4636 kdb_printf("%s: %s is in %s ",
4640 if (bb_actual_valid(i))
4641 kdb_printf(" -> " kdb_bfd_vma_fmt0 "\n",
4642 bb_actual_value(i));
4644 kdb_printf("(invalid)\n");
4649 /* Finally register values that have been saved on stack */
4650 for (i = 0, c = bb_reg_state->memory;
4651 i < bb_reg_state->mem_count;
4653 offset_address = c->offset_address;
4655 if (!bb_is_int_reg(reg))
4657 address = osp + offset_address;
4658 if (address < ar->stack.logical_start ||
4659 address >= ar->stack.logical_end) {
4660 new[reg - BBRG_RAX].value = 0;
4661 new[reg - BBRG_RAX].valid = 0;
4662 if (KDB_DEBUG(BB) | KDB_DEBUG(BB_SUMM))
4663 kdb_printf("%s: %s -> undefined\n",
4667 if (KDB_DEBUG(BB) | KDB_DEBUG(BB_SUMM)) {
4668 kdb_printf("%s: %s -> *(osp",
4671 KDB_DEBUG_BB_OFFSET_PRINTF(offset_address, "", " ");
4672 kdb_printf(kdb_bfd_vma_fmt0, address);
4674 new[reg - BBRG_RAX].value = *(bfd_vma *)address;
4675 new[reg - BBRG_RAX].valid = 1;
4676 if (KDB_DEBUG(BB) | KDB_DEBUG(BB_SUMM))
4677 kdb_printf(") = " kdb_bfd_vma_fmt0 "\n",
4678 new[reg - BBRG_RAX].value);
4682 memcpy(bb_actual, new, sizeof(bb_actual));
4685 /* Return true if the current function is an interrupt handler */
4688 bb_interrupt_handler(kdb_machreg_t rip)
4690 unsigned long disp8, disp32, target, addr = (unsigned long)rip;
4691 unsigned char code[5];
4694 for (i = 0; i < ARRAY_SIZE(bb_hardware_handlers); ++i)
4695 if (strcmp(bb_func_name, bb_hardware_handlers[i]) == 0)
4698 /* Given the large number of interrupt handlers, it is easiest to look
4699 * at the next instruction and see if it is a jmp to the common exit
4702 if (kdb_getarea(code, addr) ||
4703 kdb_getword(&disp32, addr+1, 4) ||
4704 kdb_getword(&disp8, addr+1, 1))
4705 return 0; /* not a valid code address */
4706 if (code[0] == 0xe9) {
4707 target = addr + (s32) disp32 + 5; /* jmp disp32 */
4708 if (target == bb_ret_from_intr ||
4709 target == bb_common_interrupt ||
4710 target == bb_error_entry)
4713 if (code[0] == 0xeb) {
4714 target = addr + (s8) disp8 + 2; /* jmp disp8 */
4715 if (target == bb_ret_from_intr ||
4716 target == bb_common_interrupt ||
4717 target == bb_error_entry)
4724 /* Copy argument information that was deduced by the basic block analysis and
4725 * rollback into the kdb stack activation record.
4729 bb_arguments(struct kdb_activation_record *ar)
4732 enum bb_reg_code reg;
4734 ar->args = bb_reg_params + bb_memory_params;
4735 bitmap_zero(ar->valid.bits, KDBA_MAXARGS);
4736 for (i = 0; i < bb_reg_params; ++i) {
4737 reg = bb_param_reg[i];
4738 if (bb_actual_valid(reg)) {
4739 ar->arg[i] = bb_actual_value(reg);
4740 set_bit(i, ar->valid.bits);
4743 if (!bb_actual_valid(BBRG_RSP))
4745 rsp = bb_actual_value(BBRG_RSP);
4746 for (i = bb_reg_params; i < ar->args; ++i) {
4747 rsp += KDB_WORD_SIZE;
4748 if (kdb_getarea(ar->arg[i], rsp) == 0)
4749 set_bit(i, ar->valid.bits);
4753 /* Given an exit address from a function, decompose the entire function into
4754 * basic blocks and determine the register state at the exit point.
4758 kdb_bb(unsigned long exit)
4760 kdb_symtab_t symtab;
4761 if (!kdbnearsym(exit, &symtab)) {
4762 kdb_printf("%s: address " kdb_bfd_vma_fmt0 " not recognised\n",
4763 __FUNCTION__, exit);
4767 bb_exit_addr = exit;
4768 bb_mod_name = symtab.mod_name;
4769 bb_func_name = symtab.sym_name;
4770 bb_func_start = symtab.sym_start;
4771 bb_func_end = symtab.sym_end;
4772 /* Various global labels exist in the middle of assembler code and have
4773 * a non-standard state. Ignore these labels and use the start of the
4774 * previous label instead.
4776 while (bb_spurious_global_label(symtab.sym_name)) {
4777 if (!kdbnearsym(symtab.sym_start - 1, &symtab))
4779 bb_func_start = symtab.sym_start;
4781 bb_mod_name = symtab.mod_name;
4782 bb_func_name = symtab.sym_name;
4783 bb_func_start = symtab.sym_start;
4784 /* Ignore spurious labels past this point and use the next non-spurious
4785 * label as the end point.
4787 if (kdbnearsym(bb_func_end, &symtab)) {
4788 while (bb_spurious_global_label(symtab.sym_name)) {
4789 bb_func_end = symtab.sym_end;
4790 if (!kdbnearsym(symtab.sym_end + 1, &symtab))
4798 kdb_printf("%s: " kdb_bfd_vma_fmt0
4799 " [%s]%s failed at " kdb_bfd_vma_fmt0 "\n\n",
4801 bb_mod_name, bb_func_name, bb_curr_addr);
4805 kdb_bb1(int argc, const char **argv)
4807 int diag, nextarg = 1;
4809 unsigned long offset;
4811 bb_cleanup(); /* in case previous command was interrupted */
4812 kdba_id_init(&kdb_di);
4814 return KDB_ARGCOUNT;
4815 diag = kdbgetaddrarg(argc, argv, &nextarg, &addr, &offset, NULL);
4821 kdb_flags |= KDB_DEBUG_FLAG_BB << KDB_DEBUG_FLAG_SHIFT;
4824 kdb_restore_flags();
4825 kdbnearsym_cleanup();
4829 /* Run a basic block analysis on every function in the base kernel. Used as a
4830 * global sanity check to find errors in the basic block code.
4834 kdb_bb_all(int argc, const char **argv)
4837 const char *symname;
4839 int i, max_errors = 20;
4840 struct bb_name_state *r;
4841 kdb_printf("%s: build variables:"
4842 " CCVERSION \"" __stringify(CCVERSION) "\""
4843 #ifdef CONFIG_X86_64
4846 #ifdef CONFIG_4KSTACKS
4849 #ifdef CONFIG_PREEMPT
4855 #ifdef CONFIG_FRAME_POINTER
4856 " CONFIG_FRAME_POINTER"
4858 #ifdef CONFIG_TRACE_IRQFLAGS
4859 " CONFIG_TRACE_IRQFLAGS"
4861 #ifdef CONFIG_HIBERNATION
4862 " CONFIG_HIBERNATION"
4864 #ifdef CONFIG_KPROBES
4870 #ifdef CONFIG_MATH_EMULATION
4871 " CONFIG_MATH_EMULATION"
4876 #ifdef CONFIG_DEBUG_INFO
4877 " CONFIG_DEBUG_INFO"
4882 " REGPARM=" __stringify(REGPARM)
4883 "\n\n", __FUNCTION__);
4884 for (i = 0, r = bb_special_cases;
4885 i < ARRAY_SIZE(bb_special_cases);
4888 kdb_printf("%s: cannot find special_case name %s\n",
4889 __FUNCTION__, r->name);
4891 for (i = 0; i < ARRAY_SIZE(bb_spurious); ++i) {
4892 if (!kallsyms_lookup_name(bb_spurious[i]))
4893 kdb_printf("%s: cannot find spurious label %s\n",
4894 __FUNCTION__, bb_spurious[i]);
4896 while ((symname = kdb_walk_kallsyms(&pos))) {
4897 if (strcmp(symname, "_stext") == 0 ||
4898 strcmp(symname, "stext") == 0)
4902 kdb_printf("%s: cannot find _stext\n", __FUNCTION__);
4905 kdba_id_init(&kdb_di);
4907 while ((symname = kdb_walk_kallsyms(&pos))) {
4908 if (strcmp(symname, "_etext") == 0)
4912 /* x86_64 has some 16 bit functions that appear between stext
4913 * and _etext. Skip them.
4915 if (strcmp(symname, "verify_cpu") == 0 ||
4916 strcmp(symname, "verify_cpu_noamd") == 0 ||
4917 strcmp(symname, "verify_cpu_sse_test") == 0 ||
4918 strcmp(symname, "verify_cpu_no_longmode") == 0 ||
4919 strcmp(symname, "verify_cpu_sse_ok") == 0 ||
4920 strcmp(symname, "mode_seta") == 0 ||
4921 strcmp(symname, "bad_address") == 0 ||
4922 strcmp(symname, "wakeup_code") == 0 ||
4923 strcmp(symname, "wakeup_code_start") == 0 ||
4924 strcmp(symname, "wakeup_start") == 0 ||
4925 strcmp(symname, "wakeup_32_vector") == 0 ||
4926 strcmp(symname, "wakeup_32") == 0 ||
4927 strcmp(symname, "wakeup_long64_vector") == 0 ||
4928 strcmp(symname, "wakeup_long64") == 0 ||
4929 strcmp(symname, "gdta") == 0 ||
4930 strcmp(symname, "idt_48a") == 0 ||
4931 strcmp(symname, "gdt_48a") == 0 ||
4932 strcmp(symname, "bogus_real_magic") == 0 ||
4933 strcmp(symname, "bogus_64_magic") == 0 ||
4934 strcmp(symname, "no_longmode") == 0 ||
4935 strcmp(symname, "mode_set") == 0 ||
4936 strcmp(symname, "mode_seta") == 0 ||
4937 strcmp(symname, "setbada") == 0 ||
4938 strcmp(symname, "check_vesa") == 0 ||
4939 strcmp(symname, "check_vesaa") == 0 ||
4940 strcmp(symname, "_setbada") == 0 ||
4941 strcmp(symname, "wakeup_stack_begin") == 0 ||
4942 strcmp(symname, "wakeup_stack") == 0 ||
4943 strcmp(symname, "wakeup_level4_pgt") == 0 ||
4944 strcmp(symname, "acpi_copy_wakeup_routine") == 0 ||
4945 strcmp(symname, "wakeup_end") == 0 ||
4946 strcmp(symname, "do_suspend_lowlevel_s4bios") == 0 ||
4947 strcmp(symname, "do_suspend_lowlevel") == 0 ||
4948 strcmp(symname, "wakeup_pmode_return") == 0 ||
4949 strcmp(symname, "restore_registers") == 0)
4951 /* __kprobes_text_end contains branches to the middle of code,
4952 * with undefined states.
4954 if (strcmp(symname, "__kprobes_text_end") == 0)
4956 /* Data in the middle of the text segment :( */
4957 if (strcmp(symname, "level2_kernel_pgt") == 0 ||
4958 strcmp(symname, "level3_kernel_pgt") == 0)
4960 if (bb_spurious_global_label(symname))
4962 if ((addr = kallsyms_lookup_name(symname)) == 0)
4964 // kdb_printf("BB " kdb_bfd_vma_fmt0 " %s\n", addr, symname);
4965 bb_cleanup(); /* in case previous command was interrupted */
4966 kdbnearsym_cleanup();
4968 touch_nmi_watchdog();
4970 if (max_errors-- == 0) {
4971 kdb_printf("%s: max_errors reached, giving up\n",
4981 kdbnearsym_cleanup();
4986 *=============================================================================
4988 * Everything above this line is doing basic block analysis, function by
4989 * function. Everything below this line uses the basic block data to do a
4990 * complete backtrace over all functions that are used by a process.
4992 *=============================================================================
4996 /*============================================================================*/
4998 /* Most of the backtrace code and data is common to x86_64 and i386. This */
4999 /* large ifdef contains all of the differences between the two architectures. */
5001 /* Make sure you update the correct section of this ifdef. */
5003 /*============================================================================*/
5010 #ifdef CONFIG_X86_64
5012 #define ARCH_NORMAL_PADDING (16 * 8)
5014 /* x86_64 has multiple alternate stacks, with different sizes and different
5015 * offsets to get the link from one stack to the next. All of the stacks are
5016 * in the per_cpu area: either in the orig_ist or irq_stack_ptr. Debug events
5017 * can even have multiple nested stacks within the single physical stack,
5018 * each nested stack has its own link and some of those links are wrong.
5020 * Consistent it's not!
5022 * Do not assume that these stacks are aligned on their size.
5024 #define INTERRUPT_STACK (N_EXCEPTION_STACKS + 1)
5026 kdba_get_stack_info_alternate(kdb_machreg_t addr, int cpu,
5027 struct kdb_activation_record *ar)
5031 unsigned int total_size;
5032 unsigned int nested_size;
5034 } *sdp, stack_data[] = {
5035 [STACKFAULT_STACK - 1] = { "stackfault", EXCEPTION_STKSZ, EXCEPTION_STKSZ, EXCEPTION_STKSZ - 2*sizeof(void *) },
5036 [DOUBLEFAULT_STACK - 1] = { "doublefault", EXCEPTION_STKSZ, EXCEPTION_STKSZ, EXCEPTION_STKSZ - 2*sizeof(void *) },
5037 [NMI_STACK - 1] = { "nmi", EXCEPTION_STKSZ, EXCEPTION_STKSZ, EXCEPTION_STKSZ - 2*sizeof(void *) },
5038 [DEBUG_STACK - 1] = { "debug", DEBUG_STKSZ, EXCEPTION_STKSZ, EXCEPTION_STKSZ - 2*sizeof(void *) },
5039 [MCE_STACK - 1] = { "machine check", EXCEPTION_STKSZ, EXCEPTION_STKSZ, EXCEPTION_STKSZ - 2*sizeof(void *) },
5040 [INTERRUPT_STACK - 1] = { "interrupt", IRQ_STACK_SIZE, IRQ_STACK_SIZE, IRQ_STACK_SIZE - sizeof(void *) },
5042 unsigned long total_start = 0, total_size, total_end;
5044 extern unsigned long kdba_orig_ist(int, int);
5046 for (sd = 0, sdp = stack_data;
5047 sd < ARRAY_SIZE(stack_data);
5049 total_size = sdp->total_size;
5051 continue; /* in case stack_data[] has any holes */
5053 /* Arbitrary address which can be on any cpu, see if it
5054 * falls within any of the alternate stacks
5057 for_each_online_cpu(c) {
5058 if (sd == INTERRUPT_STACK - 1)
5059 total_end = (unsigned long)per_cpu(irq_stack_ptr, c);
5061 total_end = per_cpu(orig_ist, c).ist[sd];
5062 total_start = total_end - total_size;
5063 if (addr >= total_start && addr < total_end) {
5072 /* Only check the supplied or found cpu */
5073 if (sd == INTERRUPT_STACK - 1)
5074 total_end = (unsigned long)per_cpu(irq_stack_ptr, cpu);
5076 total_end = per_cpu(orig_ist, cpu).ist[sd];
5077 total_start = total_end - total_size;
5078 if (addr >= total_start && addr < total_end) {
5085 /* find which nested stack the address is in */
5086 while (addr > total_start + sdp->nested_size)
5087 total_start += sdp->nested_size;
5088 ar->stack.physical_start = total_start;
5089 ar->stack.physical_end = total_start + sdp->nested_size;
5090 ar->stack.logical_start = total_start;
5091 ar->stack.logical_end = total_start + sdp->next;
5092 ar->stack.next = *(unsigned long *)ar->stack.logical_end;
5093 ar->stack.id = sdp->id;
5095 /* Nasty: when switching to the interrupt stack, the stack state of the
5096 * caller is split over two stacks, the original stack and the
5097 * interrupt stack. One word (the previous frame pointer) is stored on
5098 * the interrupt stack, the rest of the interrupt data is in the old
5099 * frame. To make the interrupted stack state look as though it is
5100 * contiguous, copy the missing word from the interrupt stack to the
5101 * original stack and adjust the new stack pointer accordingly.
5104 if (sd == INTERRUPT_STACK - 1) {
5105 *(unsigned long *)(ar->stack.next - KDB_WORD_SIZE) =
5107 ar->stack.next -= KDB_WORD_SIZE;
5111 /* rip is not in the thread struct for x86_64. We know that the stack value
5112 * was saved in schedule near the label thread_return. Setting rip to
5113 * thread_return lets the stack trace find that we are in schedule and
5114 * correctly decode its prologue.
5117 static kdb_machreg_t
5118 kdba_bt_stack_rip(const struct task_struct *p)
5120 return bb_thread_return;
5123 #else /* !CONFIG_X86_64 */
5125 #define ARCH_NORMAL_PADDING (19 * 4)
5127 #ifdef CONFIG_4KSTACKS
5128 static struct thread_info **kdba_hardirq_ctx, **kdba_softirq_ctx;
5129 #endif /* CONFIG_4KSTACKS */
5131 /* On a 4K stack kernel, hardirq_ctx and softirq_ctx are [NR_CPUS] arrays. The
5132 * first element of each per-cpu stack is a struct thread_info.
5135 kdba_get_stack_info_alternate(kdb_machreg_t addr, int cpu,
5136 struct kdb_activation_record *ar)
5138 #ifdef CONFIG_4KSTACKS
5139 struct thread_info *tinfo;
5140 tinfo = (struct thread_info *)(addr & -THREAD_SIZE);
5142 /* Arbitrary address, see if it falls within any of the irq
5146 for_each_online_cpu(cpu) {
5147 if (tinfo == kdba_hardirq_ctx[cpu] ||
5148 tinfo == kdba_softirq_ctx[cpu]) {
5156 if (tinfo == kdba_hardirq_ctx[cpu] ||
5157 tinfo == kdba_softirq_ctx[cpu]) {
5158 ar->stack.physical_start = (kdb_machreg_t)tinfo;
5159 ar->stack.physical_end = ar->stack.physical_start + THREAD_SIZE;
5160 ar->stack.logical_start = ar->stack.physical_start +
5161 sizeof(struct thread_info);
5162 ar->stack.logical_end = ar->stack.physical_end;
5163 ar->stack.next = tinfo->previous_esp;
5164 if (tinfo == kdba_hardirq_ctx[cpu])
5165 ar->stack.id = "hardirq_ctx";
5167 ar->stack.id = "softirq_ctx";
5169 #endif /* CONFIG_4KSTACKS */
5172 /* rip is in the thread struct for i386 */
5174 static kdb_machreg_t
5175 kdba_bt_stack_rip(const struct task_struct *p)
5177 return p->thread.ip;
5180 #endif /* CONFIG_X86_64 */
5182 /* Given an address which claims to be on a stack, an optional cpu number and
5183 * an optional task address, get information about the stack.
5185 * t == NULL, cpu < 0 indicates an arbitrary stack address with no associated
5186 * struct task, the address can be in an alternate stack or any task's normal
5189 * t != NULL, cpu >= 0 indicates a running task, the address can be in an
5190 * alternate stack or that task's normal stack.
5192 * t != NULL, cpu < 0 indicates a blocked task, the address can only be in that
5193 * task's normal stack.
5195 * t == NULL, cpu >= 0 is not a valid combination.
5199 kdba_get_stack_info(kdb_machreg_t rsp, int cpu,
5200 struct kdb_activation_record *ar,
5201 const struct task_struct *t)
5203 struct thread_info *tinfo;
5204 struct task_struct *g, *p;
5205 memset(&ar->stack, 0, sizeof(ar->stack));
5207 kdb_printf("%s: " RSP "=0x%lx cpu=%d task=%p\n",
5208 __FUNCTION__, rsp, cpu, t);
5209 if (t == NULL || cpu >= 0) {
5210 kdba_get_stack_info_alternate(rsp, cpu, ar);
5211 if (ar->stack.logical_start)
5214 rsp &= -THREAD_SIZE;
5215 tinfo = (struct thread_info *)rsp;
5217 /* Arbitrary stack address without an associated task, see if
5218 * it falls within any normal process stack, including the idle
5221 kdb_do_each_thread(g, p) {
5222 if (tinfo == task_thread_info(p)) {
5226 } kdb_while_each_thread(g, p);
5227 for_each_online_cpu(cpu) {
5229 if (tinfo == task_thread_info(p)) {
5236 kdb_printf("%s: found task %p\n", __FUNCTION__, t);
5237 } else if (cpu >= 0) {
5239 struct kdb_running_process *krp = kdb_running_process + cpu;
5240 if (krp->p != t || tinfo != task_thread_info(t))
5243 kdb_printf("%s: running task %p\n", __FUNCTION__, t);
5246 if (tinfo != task_thread_info(t))
5249 kdb_printf("%s: blocked task %p\n", __FUNCTION__, t);
5252 ar->stack.physical_start = rsp;
5253 ar->stack.physical_end = rsp + THREAD_SIZE;
5254 ar->stack.logical_start = rsp + sizeof(struct thread_info);
5255 ar->stack.logical_end = ar->stack.physical_end - ARCH_NORMAL_PADDING;
5257 ar->stack.id = "normal";
5260 if (ar->stack.physical_start && KDB_DEBUG(ARA)) {
5261 kdb_printf("%s: ar->stack\n", __FUNCTION__);
5262 kdb_printf(" physical_start=0x%lx\n", ar->stack.physical_start);
5263 kdb_printf(" physical_end=0x%lx\n", ar->stack.physical_end);
5264 kdb_printf(" logical_start=0x%lx\n", ar->stack.logical_start);
5265 kdb_printf(" logical_end=0x%lx\n", ar->stack.logical_end);
5266 kdb_printf(" next=0x%lx\n", ar->stack.next);
5267 kdb_printf(" id=%s\n", ar->stack.id);
5268 kdb_printf(" set MDCOUNT %ld\n",
5269 (ar->stack.physical_end - ar->stack.physical_start) /
5271 kdb_printf(" mds " kdb_machreg_fmt0 "\n",
5272 ar->stack.physical_start);
5277 bt_print_one(kdb_machreg_t rip, kdb_machreg_t rsp,
5278 const struct kdb_activation_record *ar,
5279 const kdb_symtab_t *symtab, int argcount)
5284 kdbgetintenv("BTSYMARG", &btsymarg);
5285 kdbgetintenv("NOSECT", &nosect);
5287 kdb_printf(kdb_machreg_fmt0, rsp);
5288 kdb_symbol_print(rip, symtab,
5289 KDB_SP_SPACEB|KDB_SP_VALUE);
5290 if (argcount && ar->args) {
5291 int i, argc = ar->args;
5293 if (argc > argcount)
5295 for (i = 0; i < argc; i++) {
5298 if (test_bit(i, ar->valid.bits))
5299 kdb_printf("0x%lx", ar->arg[i]);
5301 kdb_printf("invalid");
5306 if (symtab->sym_name) {
5310 if (symtab->sec_name && symtab->sec_start)
5311 kdb_printf(" 0x%lx 0x%lx",
5312 symtab->sec_start, symtab->sec_end);
5313 kdb_printf(" 0x%lx 0x%lx\n",
5314 symtab->sym_start, symtab->sym_end);
5317 if (argcount && ar->args && btsymarg) {
5318 int i, argc = ar->args;
5319 kdb_symtab_t arg_symtab;
5320 for (i = 0; i < argc; i++) {
5321 kdb_machreg_t arg = ar->arg[i];
5322 if (test_bit(i, ar->valid.bits) &&
5323 kdbnearsym(arg, &arg_symtab)) {
5324 kdb_printf(" ARG %2d ", i);
5325 kdb_symbol_print(arg, &arg_symtab,
5326 KDB_SP_DEFAULT|KDB_SP_NEWLINE);
5333 kdba_bt_new_stack(struct kdb_activation_record *ar, kdb_machreg_t *rsp,
5334 int *count, int *suppress)
5336 /* Nasty: save_args builds a partial pt_regs, with r15 through
5337 * rbx not being filled in. It passes struct pt_regs* to do_IRQ (in
5338 * rdi) but the stack pointer is not adjusted to account for r15
5339 * through rbx. This has two effects :-
5341 * (1) struct pt_regs on an external interrupt actually overlaps with
5342 * the local stack area used by do_IRQ. Not only are r15-rbx
5343 * undefined, the area that claims to hold their values can even
5344 * change as the irq is processed.
5346 * (2) The back stack pointer saved for the new frame is not pointing
5347 * at pt_regs, it is pointing at rbx within the pt_regs passed to
5350 * There is nothing that I can do about (1) but I have to fix (2)
5351 * because kdb backtrace looks for the "start" address of pt_regs as it
5352 * walks back through the stacks. When switching from the interrupt
5353 * stack to another stack, we have to assume that pt_regs has been
5354 * seen and turn off backtrace supression.
5356 int probable_pt_regs = strcmp(ar->stack.id, "interrupt") == 0;
5357 *rsp = ar->stack.next;
5359 kdb_printf("new " RSP "=" kdb_machreg_fmt0 "\n", *rsp);
5360 bb_actual_set_value(BBRG_RSP, *rsp);
5361 kdba_get_stack_info(*rsp, -1, ar, NULL);
5362 if (!ar->stack.physical_start) {
5363 kdb_printf("+++ Cannot resolve next stack\n");
5364 } else if (!*suppress) {
5365 kdb_printf(" ======================= <%s>\n",
5369 if (probable_pt_regs)
5377 * addr Address provided to 'bt' command, if any.
5379 * p Pointer to task for 'btp' command.
5383 * zero for success, a kdb diagnostic if error
5387 * Ultimately all the bt* commands come through this routine. If
5388 * old_style is 0 then it uses the basic block analysis to get an accurate
5389 * backtrace with arguments, otherwise it falls back to the old method of
5390 * printing anything on stack that looks like a kernel address.
5392 * Allowing for the stack data pushed by the hardware is tricky. We
5393 * deduce the presence of hardware pushed data by looking for interrupt
5394 * handlers, either by name or by the code that they contain. This
5395 * information must be applied to the next function up the stack, because
5396 * the hardware data is above the saved rip for the interrupted (next)
5399 * To make things worse, the amount of data pushed is arch specific and
5400 * may depend on the rsp for the next function, not the current function.
5401 * The number of bytes pushed by hardware cannot be calculated until we
5402 * are actually processing the stack for the interrupted function and have
5405 * It is also possible for an interrupt to occur in user space and for the
5406 * interrupt handler to also be interrupted. Check the code selector
5407 * whenever the previous function is an interrupt handler and stop
5408 * backtracing if the interrupt was not in kernel space.
5412 kdba_bt_stack(kdb_machreg_t addr, int argcount, const struct task_struct *p,
5415 struct kdb_activation_record ar;
5416 kdb_machreg_t rip = 0, rsp = 0, prev_rsp, cs;
5417 kdb_symtab_t symtab;
5418 int rip_at_rsp = 0, count = 0, btsp = 0, suppress,
5419 interrupt_handler = 0, prev_interrupt_handler = 0, hardware_pushed,
5421 struct pt_regs *regs = NULL;
5423 kdbgetintenv("BTSP", &btsp);
5425 memset(&ar, 0, sizeof(ar));
5427 kdb_printf("Using old style backtrace, unreliable with no arguments\n");
5430 * The caller may have supplied an address at which the stack traceback
5431 * operation should begin. This address is assumed by this code to
5432 * point to a return address on the stack to be traced back.
5434 * Warning: type in the wrong address and you will get garbage in the
5439 kdb_getword(&rip, rsp, sizeof(rip));
5442 kdba_get_stack_info(rsp, -1, &ar, NULL);
5445 struct kdb_running_process *krp =
5446 kdb_running_process + task_cpu(p);
5451 krp->seqno >= kdb_seqno - 1 &&
5452 !KDB_NULL_REGS(regs)) {
5453 /* valid saved state, continue processing */
5456 ("Process did not save state, cannot backtrace\n");
5460 kdba_getregcontents(XCS, regs, &cs);
5461 if ((cs & 0xffff) != __KERNEL_CS) {
5462 kdb_printf("Stack is not in kernel space, backtrace not available\n");
5465 rip = krp->arch.ARCH_RIP;
5466 rsp = krp->arch.ARCH_RSP;
5467 kdba_get_stack_info(rsp, kdb_process_cpu(p), &ar, p);
5469 /* Not on cpu, assume blocked. Blocked tasks do not
5470 * have pt_regs. p->thread contains some data, alas
5471 * what it contains differs between i386 and x86_64.
5473 rip = kdba_bt_stack_rip(p);
5476 kdba_get_stack_info(rsp, -1, &ar, p);
5479 if (!ar.stack.physical_start) {
5480 kdb_printf(RSP "=0x%lx is not in a valid kernel stack, backtrace not available\n",
5484 memset(&bb_actual, 0, sizeof(bb_actual));
5485 bb_actual_set_value(BBRG_RSP, rsp);
5486 bb_actual_set_valid(BBRG_RSP, 1);
5488 kdb_printf(RSP "%*s" RIP "%*sFunction (args)\n",
5489 2*KDB_WORD_SIZE, " ",
5490 2*KDB_WORD_SIZE, " ");
5491 if (ar.stack.next && !suppress)
5492 kdb_printf(" ======================= <%s>\n",
5496 /* Run through all the stacks */
5497 while (ar.stack.physical_start) {
5499 rip = *(kdb_machreg_t *)rsp;
5500 /* I wish that gcc was fixed to include a nop
5501 * instruction after ATTRIB_NORET functions. The lack
5502 * of a nop means that the return address points to the
5503 * start of next function, so fudge it to point to one
5506 * No, we cannot just decrement all rip values.
5507 * Sometimes an rip legally points to the start of a
5508 * function, e.g. interrupted code or hand crafted
5512 kdbnearsym(rip, &symtab);
5513 if (rip == symtab.sym_start) {
5516 kdb_printf("\tprev_noret, " RIP
5521 kdbnearsym(rip, &symtab);
5523 if (__kernel_text_address(rip) && !suppress) {
5524 bt_print_one(rip, rsp, &ar, &symtab, 0);
5527 if (rsp == (unsigned long)regs) {
5528 if (ar.stack.next && suppress)
5529 kdb_printf(" ======================= <%s>\n",
5536 if (rsp >= ar.stack.logical_end) {
5539 kdba_bt_new_stack(&ar, &rsp, &count, &suppress);
5544 /* Start each analysis with no dynamic data from the
5545 * previous kdb_bb() run.
5551 prev_interrupt_handler = interrupt_handler;
5552 interrupt_handler = bb_interrupt_handler(rip);
5555 if (prev_interrupt_handler) {
5556 cs = *((kdb_machreg_t *)rsp + 1) & 0xffff;
5558 bb_hardware_pushed_arch(rsp, &ar);
5561 hardware_pushed = 0;
5563 rsp += sizeof(rip) + hardware_pushed;
5565 kdb_printf("%s: " RSP " "
5567 " -> " kdb_machreg_fmt0
5568 " hardware_pushed %d"
5569 " prev_interrupt_handler %d"
5575 prev_interrupt_handler,
5577 if (rsp >= ar.stack.logical_end &&
5579 kdba_bt_new_stack(&ar, &rsp, &count,
5584 bb_actual_set_value(BBRG_RSP, rsp);
5589 bb_actual_rollback(&ar);
5592 if (bb_actual_value(BBRG_RSP) < rsp) {
5593 kdb_printf("%s: " RSP " is going backwards, "
5594 kdb_machreg_fmt0 " -> "
5595 kdb_machreg_fmt0 "\n",
5598 bb_actual_value(BBRG_RSP));
5604 bt_print_one(rip, prev_rsp, &ar, &symtab, argcount);
5607 /* Functions that terminate the backtrace */
5608 if (strcmp(bb_func_name, "cpu_idle") == 0 ||
5609 strcmp(bb_func_name, "child_rip") == 0)
5611 if (rsp >= ar.stack.logical_end &&
5614 if (rsp <= (unsigned long)regs &&
5615 bb_actual_value(BBRG_RSP) > (unsigned long)regs) {
5616 if (ar.stack.next && suppress)
5617 kdb_printf(" ======================= <%s>\n",
5622 if (cs != __KERNEL_CS) {
5623 kdb_printf("Reached user space\n");
5626 rsp = bb_actual_value(BBRG_RSP);
5628 prev_noret = bb_noret(bb_func_name);
5635 kdbnearsym_cleanup();
5638 kdb_printf("bt truncated, count limit reached\n");
5640 } else if (suppress) {
5642 ("bt did not find pt_regs - no trace produced. Suggest 'set BTSP 1'\n");
5652 * Do a backtrace starting at a specified stack address. Use this if the
5653 * heuristics get the stack decode wrong.
5656 * addr Address provided to 'bt' command.
5661 * zero for success, a kdb diagnostic if error
5665 * mds %rsp comes in handy when examining the stack to do a manual
5669 int kdba_bt_address(kdb_machreg_t addr, int argcount)
5672 kdba_id_init(&kdb_di); /* kdb_bb needs this done once */
5673 ret = kdba_bt_stack(addr, argcount, NULL, 0);
5675 ret = kdba_bt_stack(addr, argcount, NULL, 1);
5682 * Do a backtrace for a specified process.
5685 * p Struct task pointer extracted by 'bt' command.
5690 * zero for success, a kdb diagnostic if error
5695 int kdba_bt_process(const struct task_struct *p, int argcount)
5698 kdba_id_init(&kdb_di); /* kdb_bb needs this done once */
5699 ret = kdba_bt_stack(0, argcount, p, 0);
5701 ret = kdba_bt_stack(0, argcount, p, 1);
5705 static int __init kdba_bt_x86_init(void)
5708 struct bb_name_state *r;
5710 kdb_register_repeat("bb1", kdb_bb1, "<vaddr>", "Analyse one basic block", 0, KDB_REPEAT_NONE);
5711 kdb_register_repeat("bb_all", kdb_bb_all, "", "Backtrace check on all built in functions", 0, KDB_REPEAT_NONE);
5713 /* Split the opcode usage table by the first letter of each set of
5714 * opcodes, for faster mapping of opcode to its operand usage.
5716 for (i = 0; i < ARRAY_SIZE(bb_opcode_usage_all); ++i) {
5717 c = bb_opcode_usage_all[i].opcode[0] - 'a';
5720 bb_opcode_usage[c].opcode = bb_opcode_usage_all + i;
5722 ++bb_opcode_usage[c].size;
5725 bb_common_interrupt = kallsyms_lookup_name("common_interrupt");
5726 bb_error_entry = kallsyms_lookup_name("error_entry");
5727 bb_ret_from_intr = kallsyms_lookup_name("ret_from_intr");
5728 bb_thread_return = kallsyms_lookup_name("thread_return");
5729 bb_sync_regs = kallsyms_lookup_name("sync_regs");
5730 bb_save_v86_state = kallsyms_lookup_name("save_v86_state");
5731 bb__sched_text_start = kallsyms_lookup_name("__sched_text_start");
5732 bb__sched_text_end = kallsyms_lookup_name("__sched_text_end");
5733 bb_save_args = kallsyms_lookup_name("save_args");
5734 bb_save_rest = kallsyms_lookup_name("save_rest");
5735 bb_save_paranoid = kallsyms_lookup_name("save_paranoid");
5736 for (i = 0, r = bb_special_cases;
5737 i < ARRAY_SIZE(bb_special_cases);
5739 r->address = kallsyms_lookup_name(r->name);
5742 #ifdef CONFIG_4KSTACKS
5743 kdba_hardirq_ctx = (struct thread_info **)kallsyms_lookup_name("hardirq_ctx");
5744 kdba_softirq_ctx = (struct thread_info **)kallsyms_lookup_name("softirq_ctx");
5745 #endif /* CONFIG_4KSTACKS */
5750 static void __exit kdba_bt_x86_exit(void)
5752 kdb_unregister("bb1");
5753 kdb_unregister("bb_all");
5756 module_init(kdba_bt_x86_init)
5757 module_exit(kdba_bt_x86_exit)