2 * Copyright (C) 1999-2003 Hewlett-Packard Co
3 * David Mosberger-Tang <davidm@hpl.hp.com>
6 * This file implements call frame unwind support for the Linux
7 * kernel. Parsing and processing the unwind information is
8 * time-consuming, so this implementation translates the unwind
9 * descriptors into unwind scripts. These scripts are very simple
10 * (basically a sequence of assignments) and efficient to execute.
11 * They are cached for later re-use. Each script is specific for a
12 * given instruction pointer address and the set of predicate values
13 * that the script depends on (most unwind descriptors are
14 * unconditional and scripts often do not depend on predicates at
15 * all). This code is based on the unwind conventions described in
16 * the "IA-64 Software Conventions and Runtime Architecture" manual.
19 * o updates to the global unwind data (in structure "unw") are serialized
20 * by the unw.lock spinlock
21 * o each unwind script has its own read-write lock; a thread must acquire
22 * a read lock before executing a script and must acquire a write lock
23 * before modifying a script
24 * o if both the unw.lock spinlock and a script's read-write lock must be
25 * acquired, then the read-write lock must be acquired first.
27 #include <linux/bootmem.h>
28 #include <linux/kernel.h>
29 #include <linux/sched.h>
30 #include <linux/slab.h>
32 #include <asm/unwind.h>
34 #include <asm/delay.h>
36 #include <asm/ptrace.h>
37 #include <asm/ptrace_offsets.h>
39 #include <asm/system.h>
40 #include <asm/uaccess.h>
45 #define MIN(a,b) ((a) < (b) ? (a) : (b))
48 #define UNW_LOG_CACHE_SIZE 7 /* each unw_script is ~256 bytes in size */
49 #define UNW_CACHE_SIZE (1 << UNW_LOG_CACHE_SIZE)
51 #define UNW_LOG_HASH_SIZE (UNW_LOG_CACHE_SIZE + 1)
52 #define UNW_HASH_SIZE (1 << UNW_LOG_HASH_SIZE)
54 #define UNW_STATS 0 /* WARNING: this disabled interrupts for long time-spans!! */
57 static unsigned int unw_debug_level = UNW_DEBUG;
59 # include <linux/kdb.h>
60 # define UNW_DEBUG_ON(n) (unw_debug_level >= n && !KDB_IS_RUNNING())
61 # define UNW_DPRINT(n, ...) if (UNW_DEBUG_ON(n)) kdb_printf(__VA_ARGS__)
62 # else /* !CONFIG_KDB */
63 # define UNW_DEBUG_ON(n) unw_debug_level >= n
64 /* Do not code a printk level, not all debug lines end in newline */
65 # define UNW_DPRINT(n, ...) if (UNW_DEBUG_ON(n)) printk(__VA_ARGS__)
66 # endif /* CONFIG_KDB */
68 #else /* !UNW_DEBUG */
69 # define UNW_DEBUG_ON(n) 0
70 # define UNW_DPRINT(n, ...)
71 #endif /* UNW_DEBUG */
79 #define alloc_reg_state() kmalloc(sizeof(struct unw_state_record), GFP_ATOMIC)
80 #define free_reg_state(usr) kfree(usr)
81 #define alloc_labeled_state() kmalloc(sizeof(struct unw_labeled_state), GFP_ATOMIC)
82 #define free_labeled_state(usr) kfree(usr)
84 typedef unsigned long unw_word;
85 typedef unsigned char unw_hash_index_t;
87 #define struct_offset(str,fld) ((char *)&((str *)NULL)->fld - (char *) 0)
90 spinlock_t lock; /* spinlock for unwind data */
92 /* list of unwind tables (one per load-module) */
93 struct unw_table *tables;
95 /* table of registers that prologues can save (and order in which they're saved): */
96 const unsigned char save_order[8];
98 /* maps a preserved register index (preg_index) to corresponding switch_stack offset: */
99 unsigned short sw_off[sizeof(struct unw_frame_info) / 8];
101 unsigned short lru_head; /* index of lead-recently used script */
102 unsigned short lru_tail; /* index of most-recently used script */
104 /* index into unw_frame_info for preserved register i */
105 unsigned short preg_index[UNW_NUM_REGS];
107 /* unwind table for the kernel: */
108 struct unw_table kernel_table;
110 /* unwind table describing the gate page (kernel code that is mapped into user space): */
111 size_t gate_table_size;
112 unsigned long *gate_table;
114 /* hash table that maps instruction pointer to script index: */
115 unsigned short hash[UNW_HASH_SIZE];
118 struct unw_script cache[UNW_CACHE_SIZE];
121 const char *preg_name[UNW_NUM_REGS];
129 int collision_chain_traversals;
132 unsigned long build_time;
133 unsigned long run_time;
134 unsigned long parse_time;
141 unsigned long init_time;
142 unsigned long unwind_time;
149 .tables = &unw.kernel_table,
150 .lock = SPIN_LOCK_UNLOCKED,
152 UNW_REG_RP, UNW_REG_PFS, UNW_REG_PSP, UNW_REG_PR,
153 UNW_REG_UNAT, UNW_REG_LC, UNW_REG_FPSR, UNW_REG_PRI_UNAT_GR
156 struct_offset(struct unw_frame_info, pri_unat_loc)/8, /* PRI_UNAT_GR */
157 struct_offset(struct unw_frame_info, pri_unat_loc)/8, /* PRI_UNAT_MEM */
158 struct_offset(struct unw_frame_info, bsp_loc)/8,
159 struct_offset(struct unw_frame_info, bspstore_loc)/8,
160 struct_offset(struct unw_frame_info, pfs_loc)/8,
161 struct_offset(struct unw_frame_info, rnat_loc)/8,
162 struct_offset(struct unw_frame_info, psp)/8,
163 struct_offset(struct unw_frame_info, rp_loc)/8,
164 struct_offset(struct unw_frame_info, r4)/8,
165 struct_offset(struct unw_frame_info, r5)/8,
166 struct_offset(struct unw_frame_info, r6)/8,
167 struct_offset(struct unw_frame_info, r7)/8,
168 struct_offset(struct unw_frame_info, unat_loc)/8,
169 struct_offset(struct unw_frame_info, pr_loc)/8,
170 struct_offset(struct unw_frame_info, lc_loc)/8,
171 struct_offset(struct unw_frame_info, fpsr_loc)/8,
172 struct_offset(struct unw_frame_info, b1_loc)/8,
173 struct_offset(struct unw_frame_info, b2_loc)/8,
174 struct_offset(struct unw_frame_info, b3_loc)/8,
175 struct_offset(struct unw_frame_info, b4_loc)/8,
176 struct_offset(struct unw_frame_info, b5_loc)/8,
177 struct_offset(struct unw_frame_info, f2_loc)/8,
178 struct_offset(struct unw_frame_info, f3_loc)/8,
179 struct_offset(struct unw_frame_info, f4_loc)/8,
180 struct_offset(struct unw_frame_info, f5_loc)/8,
181 struct_offset(struct unw_frame_info, fr_loc[16 - 16])/8,
182 struct_offset(struct unw_frame_info, fr_loc[17 - 16])/8,
183 struct_offset(struct unw_frame_info, fr_loc[18 - 16])/8,
184 struct_offset(struct unw_frame_info, fr_loc[19 - 16])/8,
185 struct_offset(struct unw_frame_info, fr_loc[20 - 16])/8,
186 struct_offset(struct unw_frame_info, fr_loc[21 - 16])/8,
187 struct_offset(struct unw_frame_info, fr_loc[22 - 16])/8,
188 struct_offset(struct unw_frame_info, fr_loc[23 - 16])/8,
189 struct_offset(struct unw_frame_info, fr_loc[24 - 16])/8,
190 struct_offset(struct unw_frame_info, fr_loc[25 - 16])/8,
191 struct_offset(struct unw_frame_info, fr_loc[26 - 16])/8,
192 struct_offset(struct unw_frame_info, fr_loc[27 - 16])/8,
193 struct_offset(struct unw_frame_info, fr_loc[28 - 16])/8,
194 struct_offset(struct unw_frame_info, fr_loc[29 - 16])/8,
195 struct_offset(struct unw_frame_info, fr_loc[30 - 16])/8,
196 struct_offset(struct unw_frame_info, fr_loc[31 - 16])/8,
198 .hash = { [0 ... UNW_HASH_SIZE - 1] = -1 },
201 "pri_unat_gr", "pri_unat_mem", "bsp", "bspstore", "ar.pfs", "ar.rnat", "psp", "rp",
202 "r4", "r5", "r6", "r7",
203 "ar.unat", "pr", "ar.lc", "ar.fpsr",
204 "b1", "b2", "b3", "b4", "b5",
205 "f2", "f3", "f4", "f5",
206 "f16", "f17", "f18", "f19", "f20", "f21", "f22", "f23",
207 "f24", "f25", "f26", "f27", "f28", "f29", "f30", "f31"
213 /* Unwind accessors. */
216 * Returns offset of rREG in struct pt_regs.
218 static inline unsigned long
219 pt_regs_off (unsigned long reg)
221 unsigned long off =0;
223 if (reg >= 1 && reg <= 3)
224 off = struct_offset(struct pt_regs, r1) + 8*(reg - 1);
226 off = struct_offset(struct pt_regs, r8) + 8*(reg - 8);
228 off = struct_offset(struct pt_regs, r12) + 8*(reg - 12);
230 off = struct_offset(struct pt_regs, r16) + 8*(reg - 16);
232 UNW_DPRINT(0, "unwind.%s: bad scratch reg r%lu\n", __FUNCTION__, reg);
236 static inline struct pt_regs *
237 get_scratch_regs (struct unw_frame_info *info)
240 /* This should not happen with valid unwind info. */
241 UNW_DPRINT(0, "unwind.%s: bad unwind info: resetting info->pt\n", __FUNCTION__);
242 info->pt = info->sp - 16;
244 UNW_DPRINT(3, "unwind.%s: sp 0x%lx pt 0x%lx\n", __FUNCTION__, info->sp, info->pt);
245 return (struct pt_regs *) info->pt;
249 unw_access_gr (struct unw_frame_info *info, int regnum, unsigned long *val, char *nat, int write)
251 unsigned long *addr, *nat_addr, nat_mask = 0, dummy_nat;
252 struct unw_ireg *ireg;
255 if ((unsigned) regnum - 1 >= 127) {
256 if (regnum == 0 && !write) {
257 *val = 0; /* read r0 always returns 0 */
261 UNW_DPRINT(0, "unwind.%s: trying to access non-existent r%u\n",
262 __FUNCTION__, regnum);
267 if (regnum >= 4 && regnum <= 7) {
268 /* access a preserved register */
269 ireg = &info->r4 + (regnum - 4);
272 nat_addr = addr + ireg->nat.off;
273 switch (ireg->nat.type) {
275 /* simulate getf.sig/setf.sig */
278 /* write NaTVal and be done with it */
285 if (addr[0] == 0 && addr[1] == 0x1ffe) {
286 /* return NaT and be done with it */
295 nat_addr = &dummy_nat;
299 nat_mask = (1UL << ((long) addr & 0x1f8)/8);
303 nat_addr = ia64_rse_rnat_addr(addr);
304 if ((unsigned long) addr < info->regstk.limit
305 || (unsigned long) addr >= info->regstk.top)
307 UNW_DPRINT(0, "unwind.%s: %p outside of regstk "
309 __FUNCTION__, (void *) addr,
314 if ((unsigned long) nat_addr >= info->regstk.top)
315 nat_addr = &info->sw->ar_rnat;
316 nat_mask = (1UL << ia64_rse_slot_num(addr));
320 addr = &info->sw->r4 + (regnum - 4);
321 nat_addr = &info->sw->ar_unat;
322 nat_mask = (1UL << ((long) addr & 0x1f8)/8);
325 /* access a scratch register */
326 pt = get_scratch_regs(info);
327 addr = (unsigned long *) ((unsigned long)pt + pt_regs_off(regnum));
328 if (info->pri_unat_loc)
329 nat_addr = info->pri_unat_loc;
331 nat_addr = &info->sw->ar_unat;
332 nat_mask = (1UL << ((long) addr & 0x1f8)/8);
335 /* access a stacked register */
336 addr = ia64_rse_skip_regs((unsigned long *) info->bsp, regnum - 32);
337 nat_addr = ia64_rse_rnat_addr(addr);
338 if ((unsigned long) addr < info->regstk.limit
339 || (unsigned long) addr >= info->regstk.top)
341 UNW_DPRINT(0, "unwind.%s: ignoring attempt to access register outside "
342 "of rbs\n", __FUNCTION__);
345 if ((unsigned long) nat_addr >= info->regstk.top)
346 nat_addr = &info->sw->ar_rnat;
347 nat_mask = (1UL << ia64_rse_slot_num(addr));
353 *nat_addr |= nat_mask;
355 *nat_addr &= ~nat_mask;
357 if ((*nat_addr & nat_mask) == 0) {
361 *val = 0; /* if register is a NaT, *addr may contain kernel data! */
369 unw_access_br (struct unw_frame_info *info, int regnum, unsigned long *val, int write)
374 pt = get_scratch_regs(info);
377 case 0: addr = &pt->b0; break;
378 case 6: addr = &pt->b6; break;
379 case 7: addr = &pt->b7; break;
382 case 1: case 2: case 3: case 4: case 5:
383 addr = *(&info->b1_loc + (regnum - 1));
385 addr = &info->sw->b1 + (regnum - 1);
389 UNW_DPRINT(0, "unwind.%s: trying to access non-existent b%u\n",
390 __FUNCTION__, regnum);
401 unw_access_fr (struct unw_frame_info *info, int regnum, struct ia64_fpreg *val, int write)
403 struct ia64_fpreg *addr = 0;
406 if ((unsigned) (regnum - 2) >= 126) {
407 UNW_DPRINT(0, "unwind.%s: trying to access non-existent f%u\n",
408 __FUNCTION__, regnum);
412 pt = get_scratch_regs(info);
415 addr = *(&info->f2_loc + (regnum - 2));
417 addr = &info->sw->f2 + (regnum - 2);
418 } else if (regnum <= 15) {
420 addr = &pt->f6 + (regnum - 6);
422 addr = &info->sw->f10 + (regnum - 10);
423 } else if (regnum <= 31) {
424 addr = info->fr_loc[regnum - 16];
426 addr = &info->sw->f16 + (regnum - 16);
428 struct task_struct *t = info->task;
434 addr = t->thread.fph + (regnum - 32);
445 unw_access_ar (struct unw_frame_info *info, int regnum, unsigned long *val, int write)
450 pt = get_scratch_regs(info);
453 addr = info->bsp_loc;
455 addr = &info->sw->ar_bspstore;
458 case UNW_AR_BSPSTORE:
459 addr = info->bspstore_loc;
461 addr = &info->sw->ar_bspstore;
465 addr = info->pfs_loc;
467 addr = &info->sw->ar_pfs;
471 addr = info->rnat_loc;
473 addr = &info->sw->ar_rnat;
477 addr = info->unat_loc;
479 addr = &info->sw->ar_unat;
485 addr = &info->sw->ar_lc;
493 (*info->cfm_loc & ~(0x3fUL << 52)) | ((*val & 0x3f) << 52);
495 *val = (*info->cfm_loc >> 52) & 0x3f;
499 addr = info->fpsr_loc;
501 addr = &info->sw->ar_fpsr;
513 UNW_DPRINT(0, "unwind.%s: trying to access non-existent ar%u\n",
514 __FUNCTION__, regnum);
526 unw_access_pr (struct unw_frame_info *info, unsigned long *val, int write)
532 addr = &info->sw->pr;
542 /* Routines to manipulate the state stack. */
545 push (struct unw_state_record *sr)
547 struct unw_reg_state *rs;
549 rs = alloc_reg_state();
551 printk(KERN_ERR "unwind: cannot stack reg state!\n");
554 memcpy(rs, &sr->curr, sizeof(*rs));
559 pop (struct unw_state_record *sr)
561 struct unw_reg_state *rs = sr->curr.next;
564 printk(KERN_ERR "unwind: stack underflow!\n");
567 memcpy(&sr->curr, rs, sizeof(*rs));
571 /* Make a copy of the state stack. Non-recursive to avoid stack overflows. */
572 static struct unw_reg_state *
573 dup_state_stack (struct unw_reg_state *rs)
575 struct unw_reg_state *copy, *prev = NULL, *first = NULL;
578 copy = alloc_reg_state();
580 printk(KERN_ERR "unwind.dup_state_stack: out of memory\n");
583 memcpy(copy, rs, sizeof(*copy));
594 /* Free all stacked register states (but not RS itself). */
596 free_state_stack (struct unw_reg_state *rs)
598 struct unw_reg_state *p, *next;
600 for (p = rs->next; p != NULL; p = next) {
607 /* Unwind decoder routines */
609 static enum unw_register_index __attribute__((const))
610 decode_abreg (unsigned char abreg, int memory)
613 case 0x04 ... 0x07: return UNW_REG_R4 + (abreg - 0x04);
614 case 0x22 ... 0x25: return UNW_REG_F2 + (abreg - 0x22);
615 case 0x30 ... 0x3f: return UNW_REG_F16 + (abreg - 0x30);
616 case 0x41 ... 0x45: return UNW_REG_B1 + (abreg - 0x41);
617 case 0x60: return UNW_REG_PR;
618 case 0x61: return UNW_REG_PSP;
619 case 0x62: return memory ? UNW_REG_PRI_UNAT_MEM : UNW_REG_PRI_UNAT_GR;
620 case 0x63: return UNW_REG_RP;
621 case 0x64: return UNW_REG_BSP;
622 case 0x65: return UNW_REG_BSPSTORE;
623 case 0x66: return UNW_REG_RNAT;
624 case 0x67: return UNW_REG_UNAT;
625 case 0x68: return UNW_REG_FPSR;
626 case 0x69: return UNW_REG_PFS;
627 case 0x6a: return UNW_REG_LC;
631 UNW_DPRINT(0, "unwind.%s: bad abreg=0x%x\n", __FUNCTION__, abreg);
636 set_reg (struct unw_reg_info *reg, enum unw_where where, int when, unsigned long val)
640 if (reg->when == UNW_WHEN_NEVER)
645 alloc_spill_area (unsigned long *offp, unsigned long regsize,
646 struct unw_reg_info *lo, struct unw_reg_info *hi)
648 struct unw_reg_info *reg;
650 for (reg = hi; reg >= lo; --reg) {
651 if (reg->where == UNW_WHERE_SPILL_HOME) {
652 reg->where = UNW_WHERE_PSPREL;
660 spill_next_when (struct unw_reg_info **regp, struct unw_reg_info *lim, unw_word t)
662 struct unw_reg_info *reg;
664 for (reg = *regp; reg <= lim; ++reg) {
665 if (reg->where == UNW_WHERE_SPILL_HOME) {
671 UNW_DPRINT(0, "unwind.%s: excess spill!\n", __FUNCTION__);
675 finish_prologue (struct unw_state_record *sr)
677 struct unw_reg_info *reg;
682 * First, resolve implicit register save locations (see Section "11.4.2.3 Rules
683 * for Using Unwind Descriptors", rule 3):
685 for (i = 0; i < (int) ARRAY_SIZE(unw.save_order); ++i) {
686 reg = sr->curr.reg + unw.save_order[i];
687 if (reg->where == UNW_WHERE_GR_SAVE) {
688 reg->where = UNW_WHERE_GR;
689 reg->val = sr->gr_save_loc++;
694 * Next, compute when the fp, general, and branch registers get
695 * saved. This must come before alloc_spill_area() because
696 * we need to know which registers are spilled to their home
700 unsigned char kind, mask = 0, *cp = sr->imask;
702 static const unsigned char limit[3] = {
703 UNW_REG_F31, UNW_REG_R7, UNW_REG_B5
705 struct unw_reg_info *(regs[3]);
707 regs[0] = sr->curr.reg + UNW_REG_F2;
708 regs[1] = sr->curr.reg + UNW_REG_R4;
709 regs[2] = sr->curr.reg + UNW_REG_B1;
711 for (t = 0; t < sr->region_len; ++t) {
714 kind = (mask >> 2*(3-(t & 3))) & 3;
716 spill_next_when(®s[kind - 1], sr->curr.reg + limit[kind - 1],
717 sr->region_start + t);
721 * Next, lay out the memory stack spill area:
723 if (sr->any_spills) {
724 off = sr->spill_offset;
725 alloc_spill_area(&off, 16, sr->curr.reg + UNW_REG_F2, sr->curr.reg + UNW_REG_F31);
726 alloc_spill_area(&off, 8, sr->curr.reg + UNW_REG_B1, sr->curr.reg + UNW_REG_B5);
727 alloc_spill_area(&off, 8, sr->curr.reg + UNW_REG_R4, sr->curr.reg + UNW_REG_R7);
732 * Region header descriptors.
736 desc_prologue (int body, unw_word rlen, unsigned char mask, unsigned char grsave,
737 struct unw_state_record *sr)
741 if (!(sr->in_body || sr->first_region))
743 sr->first_region = 0;
745 /* check if we're done: */
746 if (sr->when_target < sr->region_start + sr->region_len) {
751 region_start = sr->region_start + sr->region_len;
753 for (i = 0; i < sr->epilogue_count; ++i)
755 sr->epilogue_count = 0;
756 sr->epilogue_start = UNW_WHEN_NEVER;
758 sr->region_start = region_start;
759 sr->region_len = rlen;
765 for (i = 0; i < 4; ++i) {
767 set_reg(sr->curr.reg + unw.save_order[i], UNW_WHERE_GR,
768 sr->region_start + sr->region_len - 1, grsave++);
771 sr->gr_save_loc = grsave;
774 sr->spill_offset = 0x10; /* default to psp+16 */
779 * Prologue descriptors.
783 desc_abi (unsigned char abi, unsigned char context, struct unw_state_record *sr)
785 if (abi == 0 && context == 'i') {
786 sr->flags |= UNW_FLAG_INTERRUPT_FRAME;
787 UNW_DPRINT(3, "unwind.%s: interrupt frame\n", __FUNCTION__);
790 UNW_DPRINT(0, "unwind%s: ignoring unwabi(abi=0x%x,context=0x%x)\n",
791 __FUNCTION__, abi, context);
795 desc_br_gr (unsigned char brmask, unsigned char gr, struct unw_state_record *sr)
799 for (i = 0; i < 5; ++i) {
801 set_reg(sr->curr.reg + UNW_REG_B1 + i, UNW_WHERE_GR,
802 sr->region_start + sr->region_len - 1, gr++);
808 desc_br_mem (unsigned char brmask, struct unw_state_record *sr)
812 for (i = 0; i < 5; ++i) {
814 set_reg(sr->curr.reg + UNW_REG_B1 + i, UNW_WHERE_SPILL_HOME,
815 sr->region_start + sr->region_len - 1, 0);
823 desc_frgr_mem (unsigned char grmask, unw_word frmask, struct unw_state_record *sr)
827 for (i = 0; i < 4; ++i) {
828 if ((grmask & 1) != 0) {
829 set_reg(sr->curr.reg + UNW_REG_R4 + i, UNW_WHERE_SPILL_HOME,
830 sr->region_start + sr->region_len - 1, 0);
835 for (i = 0; i < 20; ++i) {
836 if ((frmask & 1) != 0) {
837 int base = (i < 4) ? UNW_REG_F2 : UNW_REG_F16 - 4;
838 set_reg(sr->curr.reg + base + i, UNW_WHERE_SPILL_HOME,
839 sr->region_start + sr->region_len - 1, 0);
847 desc_fr_mem (unsigned char frmask, struct unw_state_record *sr)
851 for (i = 0; i < 4; ++i) {
852 if ((frmask & 1) != 0) {
853 set_reg(sr->curr.reg + UNW_REG_F2 + i, UNW_WHERE_SPILL_HOME,
854 sr->region_start + sr->region_len - 1, 0);
862 desc_gr_gr (unsigned char grmask, unsigned char gr, struct unw_state_record *sr)
866 for (i = 0; i < 4; ++i) {
867 if ((grmask & 1) != 0)
868 set_reg(sr->curr.reg + UNW_REG_R4 + i, UNW_WHERE_GR,
869 sr->region_start + sr->region_len - 1, gr++);
875 desc_gr_mem (unsigned char grmask, struct unw_state_record *sr)
879 for (i = 0; i < 4; ++i) {
880 if ((grmask & 1) != 0) {
881 set_reg(sr->curr.reg + UNW_REG_R4 + i, UNW_WHERE_SPILL_HOME,
882 sr->region_start + sr->region_len - 1, 0);
890 desc_mem_stack_f (unw_word t, unw_word size, struct unw_state_record *sr)
892 set_reg(sr->curr.reg + UNW_REG_PSP, UNW_WHERE_NONE,
893 sr->region_start + MIN((int)t, sr->region_len - 1), 16*size);
897 desc_mem_stack_v (unw_word t, struct unw_state_record *sr)
899 sr->curr.reg[UNW_REG_PSP].when = sr->region_start + MIN((int)t, sr->region_len - 1);
903 desc_reg_gr (unsigned char reg, unsigned char dst, struct unw_state_record *sr)
905 set_reg(sr->curr.reg + reg, UNW_WHERE_GR, sr->region_start + sr->region_len - 1, dst);
909 desc_reg_psprel (unsigned char reg, unw_word pspoff, struct unw_state_record *sr)
911 set_reg(sr->curr.reg + reg, UNW_WHERE_PSPREL, sr->region_start + sr->region_len - 1,
916 desc_reg_sprel (unsigned char reg, unw_word spoff, struct unw_state_record *sr)
918 set_reg(sr->curr.reg + reg, UNW_WHERE_SPREL, sr->region_start + sr->region_len - 1,
923 desc_rp_br (unsigned char dst, struct unw_state_record *sr)
925 sr->return_link_reg = dst;
929 desc_reg_when (unsigned char regnum, unw_word t, struct unw_state_record *sr)
931 struct unw_reg_info *reg = sr->curr.reg + regnum;
933 if (reg->where == UNW_WHERE_NONE)
934 reg->where = UNW_WHERE_GR_SAVE;
935 reg->when = sr->region_start + MIN((int)t, sr->region_len - 1);
939 desc_spill_base (unw_word pspoff, struct unw_state_record *sr)
941 sr->spill_offset = 0x10 - 4*pspoff;
944 static inline unsigned char *
945 desc_spill_mask (unsigned char *imaskp, struct unw_state_record *sr)
948 return imaskp + (2*sr->region_len + 7)/8;
955 desc_epilogue (unw_word t, unw_word ecount, struct unw_state_record *sr)
957 sr->epilogue_start = sr->region_start + sr->region_len - 1 - t;
958 sr->epilogue_count = ecount + 1;
962 desc_copy_state (unw_word label, struct unw_state_record *sr)
964 struct unw_labeled_state *ls;
966 for (ls = sr->labeled_states; ls; ls = ls->next) {
967 if (ls->label == label) {
968 free_state_stack(&sr->curr);
969 memcpy(&sr->curr, &ls->saved_state, sizeof(sr->curr));
970 sr->curr.next = dup_state_stack(ls->saved_state.next);
974 printk(KERN_ERR "unwind: failed to find state labeled 0x%lx\n", label);
978 desc_label_state (unw_word label, struct unw_state_record *sr)
980 struct unw_labeled_state *ls;
982 ls = alloc_labeled_state();
984 printk(KERN_ERR "unwind.desc_label_state(): out of memory\n");
988 memcpy(&ls->saved_state, &sr->curr, sizeof(ls->saved_state));
989 ls->saved_state.next = dup_state_stack(sr->curr.next);
991 /* insert into list of labeled states: */
992 ls->next = sr->labeled_states;
993 sr->labeled_states = ls;
997 * General descriptors.
1001 desc_is_active (unsigned char qp, unw_word t, struct unw_state_record *sr)
1003 if (sr->when_target <= sr->region_start + MIN((int)t, sr->region_len - 1))
1006 if ((sr->pr_val & (1UL << qp)) == 0)
1008 sr->pr_mask |= (1UL << qp);
1014 desc_restore_p (unsigned char qp, unw_word t, unsigned char abreg, struct unw_state_record *sr)
1016 struct unw_reg_info *r;
1018 if (!desc_is_active(qp, t, sr))
1021 r = sr->curr.reg + decode_abreg(abreg, 0);
1022 r->where = UNW_WHERE_NONE;
1023 r->when = UNW_WHEN_NEVER;
1028 desc_spill_reg_p (unsigned char qp, unw_word t, unsigned char abreg, unsigned char x,
1029 unsigned char ytreg, struct unw_state_record *sr)
1031 enum unw_where where = UNW_WHERE_GR;
1032 struct unw_reg_info *r;
1034 if (!desc_is_active(qp, t, sr))
1038 where = UNW_WHERE_BR;
1039 else if (ytreg & 0x80)
1040 where = UNW_WHERE_FR;
1042 r = sr->curr.reg + decode_abreg(abreg, 0);
1044 r->when = sr->region_start + MIN((int)t, sr->region_len - 1);
1045 r->val = (ytreg & 0x7f);
1049 desc_spill_psprel_p (unsigned char qp, unw_word t, unsigned char abreg, unw_word pspoff,
1050 struct unw_state_record *sr)
1052 struct unw_reg_info *r;
1054 if (!desc_is_active(qp, t, sr))
1057 r = sr->curr.reg + decode_abreg(abreg, 1);
1058 r->where = UNW_WHERE_PSPREL;
1059 r->when = sr->region_start + MIN((int)t, sr->region_len - 1);
1060 r->val = 0x10 - 4*pspoff;
1064 desc_spill_sprel_p (unsigned char qp, unw_word t, unsigned char abreg, unw_word spoff,
1065 struct unw_state_record *sr)
1067 struct unw_reg_info *r;
1069 if (!desc_is_active(qp, t, sr))
1072 r = sr->curr.reg + decode_abreg(abreg, 1);
1073 r->where = UNW_WHERE_SPREL;
1074 r->when = sr->region_start + MIN((int)t, sr->region_len - 1);
1078 #define UNW_DEC_BAD_CODE(code) printk(KERN_ERR "unwind: unknown code 0x%02x\n", \
1084 #define UNW_DEC_PROLOGUE_GR(fmt,r,m,gr,arg) desc_prologue(0,r,m,gr,arg)
1085 #define UNW_DEC_PROLOGUE(fmt,b,r,arg) desc_prologue(b,r,0,32,arg)
1087 * prologue descriptors:
1089 #define UNW_DEC_ABI(fmt,a,c,arg) desc_abi(a,c,arg)
1090 #define UNW_DEC_BR_GR(fmt,b,g,arg) desc_br_gr(b,g,arg)
1091 #define UNW_DEC_BR_MEM(fmt,b,arg) desc_br_mem(b,arg)
1092 #define UNW_DEC_FRGR_MEM(fmt,g,f,arg) desc_frgr_mem(g,f,arg)
1093 #define UNW_DEC_FR_MEM(fmt,f,arg) desc_fr_mem(f,arg)
1094 #define UNW_DEC_GR_GR(fmt,m,g,arg) desc_gr_gr(m,g,arg)
1095 #define UNW_DEC_GR_MEM(fmt,m,arg) desc_gr_mem(m,arg)
1096 #define UNW_DEC_MEM_STACK_F(fmt,t,s,arg) desc_mem_stack_f(t,s,arg)
1097 #define UNW_DEC_MEM_STACK_V(fmt,t,arg) desc_mem_stack_v(t,arg)
1098 #define UNW_DEC_REG_GR(fmt,r,d,arg) desc_reg_gr(r,d,arg)
1099 #define UNW_DEC_REG_PSPREL(fmt,r,o,arg) desc_reg_psprel(r,o,arg)
1100 #define UNW_DEC_REG_SPREL(fmt,r,o,arg) desc_reg_sprel(r,o,arg)
1101 #define UNW_DEC_REG_WHEN(fmt,r,t,arg) desc_reg_when(r,t,arg)
1102 #define UNW_DEC_PRIUNAT_WHEN_GR(fmt,t,arg) desc_reg_when(UNW_REG_PRI_UNAT_GR,t,arg)
1103 #define UNW_DEC_PRIUNAT_WHEN_MEM(fmt,t,arg) desc_reg_when(UNW_REG_PRI_UNAT_MEM,t,arg)
1104 #define UNW_DEC_PRIUNAT_GR(fmt,r,arg) desc_reg_gr(UNW_REG_PRI_UNAT_GR,r,arg)
1105 #define UNW_DEC_PRIUNAT_PSPREL(fmt,o,arg) desc_reg_psprel(UNW_REG_PRI_UNAT_MEM,o,arg)
1106 #define UNW_DEC_PRIUNAT_SPREL(fmt,o,arg) desc_reg_sprel(UNW_REG_PRI_UNAT_MEM,o,arg)
1107 #define UNW_DEC_RP_BR(fmt,d,arg) desc_rp_br(d,arg)
1108 #define UNW_DEC_SPILL_BASE(fmt,o,arg) desc_spill_base(o,arg)
1109 #define UNW_DEC_SPILL_MASK(fmt,m,arg) (m = desc_spill_mask(m,arg))
1113 #define UNW_DEC_EPILOGUE(fmt,t,c,arg) desc_epilogue(t,c,arg)
1114 #define UNW_DEC_COPY_STATE(fmt,l,arg) desc_copy_state(l,arg)
1115 #define UNW_DEC_LABEL_STATE(fmt,l,arg) desc_label_state(l,arg)
1117 * general unwind descriptors:
1119 #define UNW_DEC_SPILL_REG_P(f,p,t,a,x,y,arg) desc_spill_reg_p(p,t,a,x,y,arg)
1120 #define UNW_DEC_SPILL_REG(f,t,a,x,y,arg) desc_spill_reg_p(0,t,a,x,y,arg)
1121 #define UNW_DEC_SPILL_PSPREL_P(f,p,t,a,o,arg) desc_spill_psprel_p(p,t,a,o,arg)
1122 #define UNW_DEC_SPILL_PSPREL(f,t,a,o,arg) desc_spill_psprel_p(0,t,a,o,arg)
1123 #define UNW_DEC_SPILL_SPREL_P(f,p,t,a,o,arg) desc_spill_sprel_p(p,t,a,o,arg)
1124 #define UNW_DEC_SPILL_SPREL(f,t,a,o,arg) desc_spill_sprel_p(0,t,a,o,arg)
1125 #define UNW_DEC_RESTORE_P(f,p,t,a,arg) desc_restore_p(p,t,a,arg)
1126 #define UNW_DEC_RESTORE(f,t,a,arg) desc_restore_p(0,t,a,arg)
1128 #include "unwind_decoder.c"
1131 /* Unwind scripts. */
1133 static inline unw_hash_index_t
1134 hash (unsigned long ip)
1136 # define magic 0x9e3779b97f4a7c16 /* based on (sqrt(5)/2-1)*2^64 */
1138 return (ip >> 4)*magic >> (64 - UNW_LOG_HASH_SIZE);
1142 cache_match (struct unw_script *script, unsigned long ip, unsigned long pr)
1144 read_lock(&script->lock);
1145 if (ip == script->ip && ((pr ^ script->pr_val) & script->pr_mask) == 0)
1146 /* keep the read lock... */
1148 read_unlock(&script->lock);
1152 static inline struct unw_script *
1153 script_lookup (struct unw_frame_info *info)
1155 struct unw_script *script = unw.cache + info->hint;
1156 unsigned short index;
1157 unsigned long ip, pr;
1159 if (UNW_DEBUG_ON(0))
1160 return 0; /* Always regenerate scripts in debug mode */
1162 STAT(++unw.stat.cache.lookups);
1167 if (cache_match(script, ip, pr)) {
1168 STAT(++unw.stat.cache.hinted_hits);
1172 index = unw.hash[hash(ip)];
1173 if (index >= UNW_CACHE_SIZE)
1176 script = unw.cache + index;
1178 if (cache_match(script, ip, pr)) {
1179 /* update hint; no locking required as single-word writes are atomic */
1180 STAT(++unw.stat.cache.normal_hits);
1181 unw.cache[info->prev_script].hint = script - unw.cache;
1184 if (script->coll_chain >= UNW_HASH_SIZE)
1186 script = unw.cache + script->coll_chain;
1187 STAT(++unw.stat.cache.collision_chain_traversals);
1192 * On returning, a write lock for the SCRIPT is still being held.
1194 static inline struct unw_script *
1195 script_new (unsigned long ip)
1197 struct unw_script *script, *prev, *tmp;
1198 unw_hash_index_t index;
1199 unsigned long flags;
1200 unsigned short head;
1202 STAT(++unw.stat.script.news);
1205 * Can't (easily) use cmpxchg() here because of ABA problem
1206 * that is intrinsic in cmpxchg()...
1208 spin_lock_irqsave(&unw.lock, flags);
1210 head = unw.lru_head;
1211 script = unw.cache + head;
1212 unw.lru_head = script->lru_chain;
1214 spin_unlock(&unw.lock);
1217 * We'd deadlock here if we interrupted a thread that is holding a read lock on
1218 * script->lock. Thus, if the write_trylock() fails, we simply bail out. The
1219 * alternative would be to disable interrupts whenever we hold a read-lock, but
1222 if (!write_trylock(&script->lock))
1225 spin_lock(&unw.lock);
1227 /* re-insert script at the tail of the LRU chain: */
1228 unw.cache[unw.lru_tail].lru_chain = head;
1229 unw.lru_tail = head;
1231 /* remove the old script from the hash table (if it's there): */
1233 index = hash(script->ip);
1234 tmp = unw.cache + unw.hash[index];
1237 if (tmp == script) {
1239 prev->coll_chain = tmp->coll_chain;
1241 unw.hash[index] = tmp->coll_chain;
1245 if (tmp->coll_chain >= UNW_CACHE_SIZE)
1246 /* old script wasn't in the hash-table */
1248 tmp = unw.cache + tmp->coll_chain;
1252 /* enter new script in the hash table */
1254 script->coll_chain = unw.hash[index];
1255 unw.hash[index] = script - unw.cache;
1257 script->ip = ip; /* set new IP while we're holding the locks */
1259 STAT(if (script->coll_chain < UNW_CACHE_SIZE) ++unw.stat.script.collisions);
1261 spin_unlock_irqrestore(&unw.lock, flags);
1270 script_finalize (struct unw_script *script, struct unw_state_record *sr)
1272 script->pr_mask = sr->pr_mask;
1273 script->pr_val = sr->pr_val;
1275 * We could down-grade our write-lock on script->lock here but
1276 * the rwlock API doesn't offer atomic lock downgrading, so
1277 * we'll just keep the write-lock and release it later when
1278 * we're done using the script.
1283 script_emit (struct unw_script *script, struct unw_insn insn)
1285 if (script->count >= UNW_MAX_SCRIPT_LEN) {
1286 UNW_DPRINT(0, "unwind.%s: script exceeds maximum size of %u instructions!\n",
1287 __FUNCTION__, UNW_MAX_SCRIPT_LEN);
1290 script->insn[script->count++] = insn;
1294 emit_nat_info (struct unw_state_record *sr, int i, struct unw_script *script)
1296 struct unw_reg_info *r = sr->curr.reg + i;
1297 enum unw_insn_opcode opc;
1298 struct unw_insn insn;
1299 unsigned long val = 0;
1304 /* register got spilled to a stacked register */
1305 opc = UNW_INSN_SETNAT_TYPE;
1306 val = UNW_NAT_REGSTK;
1308 /* register got spilled to a scratch register */
1309 opc = UNW_INSN_SETNAT_MEMSTK;
1313 opc = UNW_INSN_SETNAT_TYPE;
1318 opc = UNW_INSN_SETNAT_TYPE;
1322 case UNW_WHERE_PSPREL:
1323 case UNW_WHERE_SPREL:
1324 opc = UNW_INSN_SETNAT_MEMSTK;
1328 UNW_DPRINT(0, "unwind.%s: don't know how to emit nat info for where = %u\n",
1329 __FUNCTION__, r->where);
1333 insn.dst = unw.preg_index[i];
1335 script_emit(script, insn);
1339 compile_reg (struct unw_state_record *sr, int i, struct unw_script *script)
1341 struct unw_reg_info *r = sr->curr.reg + i;
1342 enum unw_insn_opcode opc;
1343 unsigned long val, rval;
1344 struct unw_insn insn;
1347 if (r->where == UNW_WHERE_NONE || r->when >= sr->when_target)
1350 opc = UNW_INSN_MOVE;
1351 val = rval = r->val;
1352 need_nat_info = (i >= UNW_REG_R4 && i <= UNW_REG_R7);
1357 opc = UNW_INSN_MOVE_STACKED;
1359 } else if (rval >= 4 && rval <= 7) {
1360 if (need_nat_info) {
1361 opc = UNW_INSN_MOVE2;
1364 val = unw.preg_index[UNW_REG_R4 + (rval - 4)];
1366 /* register got spilled to a scratch register */
1367 opc = UNW_INSN_MOVE_SCRATCH;
1368 val = pt_regs_off(rval);
1374 val = unw.preg_index[UNW_REG_F2 + (rval - 1)];
1375 else if (rval >= 16 && rval <= 31)
1376 val = unw.preg_index[UNW_REG_F16 + (rval - 16)];
1378 opc = UNW_INSN_MOVE_SCRATCH;
1380 val = struct_offset(struct pt_regs, f6) + 16*(rval - 6);
1382 UNW_DPRINT(0, "unwind.%s: kernel may not touch f%lu\n",
1383 __FUNCTION__, rval);
1388 if (rval >= 1 && rval <= 5)
1389 val = unw.preg_index[UNW_REG_B1 + (rval - 1)];
1391 opc = UNW_INSN_MOVE_SCRATCH;
1393 val = struct_offset(struct pt_regs, b0);
1395 val = struct_offset(struct pt_regs, b6);
1397 val = struct_offset(struct pt_regs, b7);
1401 case UNW_WHERE_SPREL:
1402 opc = UNW_INSN_ADD_SP;
1405 case UNW_WHERE_PSPREL:
1406 opc = UNW_INSN_ADD_PSP;
1410 UNW_DPRINT(0, "unwind%s: register %u has unexpected `where' value of %u\n",
1411 __FUNCTION__, i, r->where);
1415 insn.dst = unw.preg_index[i];
1417 script_emit(script, insn);
1419 emit_nat_info(sr, i, script);
1421 if (i == UNW_REG_PSP) {
1423 * info->psp must contain the _value_ of the previous
1424 * sp, not it's save location. We get this by
1425 * dereferencing the value we just stored in
1428 insn.opc = UNW_INSN_LOAD;
1429 insn.dst = insn.val = unw.preg_index[UNW_REG_PSP];
1430 script_emit(script, insn);
1434 static inline const struct unw_table_entry *
1435 lookup (struct unw_table *table, unsigned long rel_ip)
1437 const struct unw_table_entry *e = 0;
1438 unsigned long lo, hi, mid;
1440 /* do a binary search for right entry: */
1441 for (lo = 0, hi = table->length; lo < hi; ) {
1442 mid = (lo + hi) / 2;
1443 e = &table->array[mid];
1444 if (rel_ip < e->start_offset)
1446 else if (rel_ip >= e->end_offset)
1451 if (rel_ip < e->start_offset || rel_ip >= e->end_offset)
1457 * Build an unwind script that unwinds from state OLD_STATE to the
1458 * entrypoint of the function that called OLD_STATE.
1460 static inline struct unw_script *
1461 build_script (struct unw_frame_info *info)
1463 const struct unw_table_entry *e = 0;
1464 struct unw_script *script = 0;
1465 struct unw_labeled_state *ls, *next;
1466 unsigned long ip = info->ip;
1467 struct unw_state_record sr;
1468 struct unw_table *table;
1469 struct unw_reg_info *r;
1470 struct unw_insn insn;
1474 STAT(unsigned long start, parse_start;)
1476 STAT(++unw.stat.script.builds; start = ia64_get_itc());
1478 /* build state record */
1479 memset(&sr, 0, sizeof(sr));
1480 for (r = sr.curr.reg; r < sr.curr.reg + UNW_NUM_REGS; ++r)
1481 r->when = UNW_WHEN_NEVER;
1482 sr.pr_val = info->pr;
1484 UNW_DPRINT(3, "unwind.%s: ip 0x%lx\n", __FUNCTION__, ip);
1485 script = script_new(ip);
1487 UNW_DPRINT(0, "unwind.%s: failed to create unwind script\n", __FUNCTION__);
1488 STAT(unw.stat.script.build_time += ia64_get_itc() - start);
1491 unw.cache[info->prev_script].hint = script - unw.cache;
1493 /* search the kernels and the modules' unwind tables for IP: */
1495 STAT(parse_start = ia64_get_itc());
1497 for (table = unw.tables; table; table = table->next) {
1498 if (ip >= table->start && ip < table->end) {
1499 e = lookup(table, ip - table->segment_base);
1504 /* no info, return default unwinder (leaf proc, no mem stack, no saved regs) */
1505 UNW_DPRINT(1, "unwind.%s: no unwind info for ip=0x%lx (prev ip=0x%lx)\n",
1506 __FUNCTION__, ip, unw.cache[info->prev_script].ip);
1507 sr.curr.reg[UNW_REG_RP].where = UNW_WHERE_BR;
1508 sr.curr.reg[UNW_REG_RP].when = -1;
1509 sr.curr.reg[UNW_REG_RP].val = 0;
1510 compile_reg(&sr, UNW_REG_RP, script);
1511 script_finalize(script, &sr);
1512 STAT(unw.stat.script.parse_time += ia64_get_itc() - parse_start);
1513 STAT(unw.stat.script.build_time += ia64_get_itc() - start);
1517 sr.when_target = (3*((ip & ~0xfUL) - (table->segment_base + e->start_offset))/16
1519 hdr = *(u64 *) (table->segment_base + e->info_offset);
1520 dp = (u8 *) (table->segment_base + e->info_offset + 8);
1521 desc_end = dp + 8*UNW_LENGTH(hdr);
1523 while (!sr.done && dp < desc_end)
1524 dp = unw_decode(dp, sr.in_body, &sr);
1526 if (sr.when_target > sr.epilogue_start) {
1528 * sp has been restored and all values on the memory stack below
1529 * psp also have been restored.
1531 sr.curr.reg[UNW_REG_PSP].val = 0;
1532 sr.curr.reg[UNW_REG_PSP].where = UNW_WHERE_NONE;
1533 sr.curr.reg[UNW_REG_PSP].when = UNW_WHEN_NEVER;
1534 for (r = sr.curr.reg; r < sr.curr.reg + UNW_NUM_REGS; ++r)
1535 if ((r->where == UNW_WHERE_PSPREL && r->val <= 0x10)
1536 || r->where == UNW_WHERE_SPREL)
1539 r->where = UNW_WHERE_NONE;
1540 r->when = UNW_WHEN_NEVER;
1544 script->flags = sr.flags;
1547 * If RP did't get saved, generate entry for the return link
1550 if (sr.curr.reg[UNW_REG_RP].when >= sr.when_target) {
1551 sr.curr.reg[UNW_REG_RP].where = UNW_WHERE_BR;
1552 sr.curr.reg[UNW_REG_RP].when = -1;
1553 sr.curr.reg[UNW_REG_RP].val = sr.return_link_reg;
1554 UNW_DPRINT(1, "unwind.%s: using default for rp at ip=0x%lx where=%d val=0x%lx\n",
1555 __FUNCTION__, ip, sr.curr.reg[UNW_REG_RP].where,
1556 sr.curr.reg[UNW_REG_RP].val);
1560 UNW_DPRINT(1, "unwind.%s: state record for func 0x%lx, t=%u:\n",
1561 __FUNCTION__, table->segment_base + e->start_offset, sr.when_target);
1562 for (r = sr.curr.reg; r < sr.curr.reg + UNW_NUM_REGS; ++r) {
1563 if (r->where != UNW_WHERE_NONE || r->when != UNW_WHEN_NEVER) {
1564 UNW_DPRINT(1, " %s <- ", unw.preg_name[r - sr.curr.reg]);
1566 case UNW_WHERE_GR: UNW_DPRINT(1, "r%lu", r->val); break;
1567 case UNW_WHERE_FR: UNW_DPRINT(1, "f%lu", r->val); break;
1568 case UNW_WHERE_BR: UNW_DPRINT(1, "b%lu", r->val); break;
1569 case UNW_WHERE_SPREL: UNW_DPRINT(1, "[sp+0x%lx]", r->val); break;
1570 case UNW_WHERE_PSPREL: UNW_DPRINT(1, "[psp+0x%lx]", r->val); break;
1571 case UNW_WHERE_NONE:
1572 UNW_DPRINT(1, "%s+0x%lx", unw.preg_name[r - sr.curr.reg], r->val);
1576 UNW_DPRINT(1, "BADWHERE(%d)", r->where);
1579 UNW_DPRINT(1, "\t\t%d\n", r->when);
1584 STAT(unw.stat.script.parse_time += ia64_get_itc() - parse_start);
1586 /* translate state record into unwinder instructions: */
1589 * First, set psp if we're dealing with a fixed-size frame;
1590 * subsequent instructions may depend on this value.
1592 if (sr.when_target > sr.curr.reg[UNW_REG_PSP].when
1593 && (sr.curr.reg[UNW_REG_PSP].where == UNW_WHERE_NONE)
1594 && sr.curr.reg[UNW_REG_PSP].val != 0) {
1595 /* new psp is sp plus frame size */
1596 insn.opc = UNW_INSN_ADD;
1597 insn.dst = struct_offset(struct unw_frame_info, psp)/8;
1598 insn.val = sr.curr.reg[UNW_REG_PSP].val; /* frame size */
1599 script_emit(script, insn);
1602 /* determine where the primary UNaT is: */
1603 if (sr.when_target < sr.curr.reg[UNW_REG_PRI_UNAT_GR].when)
1604 i = UNW_REG_PRI_UNAT_MEM;
1605 else if (sr.when_target < sr.curr.reg[UNW_REG_PRI_UNAT_MEM].when)
1606 i = UNW_REG_PRI_UNAT_GR;
1607 else if (sr.curr.reg[UNW_REG_PRI_UNAT_MEM].when > sr.curr.reg[UNW_REG_PRI_UNAT_GR].when)
1608 i = UNW_REG_PRI_UNAT_MEM;
1610 i = UNW_REG_PRI_UNAT_GR;
1612 compile_reg(&sr, i, script);
1614 for (i = UNW_REG_BSP; i < UNW_NUM_REGS; ++i)
1615 compile_reg(&sr, i, script);
1617 /* free labeled register states & stack: */
1619 STAT(parse_start = ia64_get_itc());
1620 for (ls = sr.labeled_states; ls; ls = next) {
1622 free_state_stack(&ls->saved_state);
1623 free_labeled_state(ls);
1625 free_state_stack(&sr.curr);
1626 STAT(unw.stat.script.parse_time += ia64_get_itc() - parse_start);
1628 script_finalize(script, &sr);
1629 STAT(unw.stat.script.build_time += ia64_get_itc() - start);
1634 * Apply the unwinding actions represented by OPS and update SR to
1635 * reflect the state that existed upon entry to the function that this
1636 * unwinder represents.
1639 run_script (struct unw_script *script, struct unw_frame_info *state)
1641 struct unw_insn *ip, *limit, next_insn;
1642 unsigned long opc, dst, val, off;
1643 unsigned long *s = (unsigned long *) state;
1644 STAT(unsigned long start;)
1646 STAT(++unw.stat.script.runs; start = ia64_get_itc());
1647 state->flags = script->flags;
1649 limit = script->insn + script->count;
1652 while (ip++ < limit) {
1653 opc = next_insn.opc;
1654 dst = next_insn.dst;
1655 val = next_insn.val;
1664 case UNW_INSN_MOVE2:
1667 s[dst+1] = s[val+1];
1677 case UNW_INSN_MOVE_SCRATCH:
1679 s[dst] = (unsigned long) get_scratch_regs(state) + val;
1682 UNW_DPRINT(0, "unwind.%s: no state->pt, dst=%ld, val=%ld\n",
1683 __FUNCTION__, dst, val);
1687 case UNW_INSN_MOVE_STACKED:
1688 s[dst] = (unsigned long) ia64_rse_skip_regs((unsigned long *)state->bsp,
1692 case UNW_INSN_ADD_PSP:
1693 s[dst] = state->psp + val;
1696 case UNW_INSN_ADD_SP:
1697 s[dst] = state->sp + val;
1700 case UNW_INSN_SETNAT_MEMSTK:
1701 if (!state->pri_unat_loc)
1702 state->pri_unat_loc = &state->sw->ar_unat;
1703 /* register off. is a multiple of 8, so the least 3 bits (type) are 0 */
1704 s[dst+1] = (*state->pri_unat_loc - s[dst]) | UNW_NAT_MEMSTK;
1707 case UNW_INSN_SETNAT_TYPE:
1713 if ((s[val] & (local_cpu_data->unimpl_va_mask | 0x7)) != 0
1714 || s[val] < TASK_SIZE)
1716 UNW_DPRINT(0, "unwind.%s: rejecting bad psp=0x%lx\n",
1717 __FUNCTION__, s[val]);
1721 s[dst] = *(unsigned long *) s[val];
1725 STAT(unw.stat.script.run_time += ia64_get_itc() - start);
1729 off = unw.sw_off[val];
1730 s[val] = (unsigned long) state->sw + off;
1731 if (off >= struct_offset(struct switch_stack, r4)
1732 && off <= struct_offset(struct switch_stack, r7))
1734 * We're initializing a general register: init NaT info, too. Note that
1735 * the offset is a multiple of 8 which gives us the 3 bits needed for
1738 s[val+1] = (struct_offset(struct switch_stack, ar_unat) - off) | UNW_NAT_MEMSTK;
1743 find_save_locs (struct unw_frame_info *info)
1745 int have_write_lock = 0;
1746 struct unw_script *scr;
1748 if ((info->ip & (local_cpu_data->unimpl_va_mask | 0xf)) || info->ip < TASK_SIZE) {
1749 /* don't let obviously bad addresses pollute the cache */
1750 /* FIXME: should really be level 0 but it occurs too often. KAO */
1751 UNW_DPRINT(1, "unwind.%s: rejecting bad ip=0x%lx\n", __FUNCTION__, info->ip);
1756 scr = script_lookup(info);
1758 scr = build_script(info);
1761 "unwind.%s: failed to locate/build unwind script for ip %lx\n",
1762 __FUNCTION__, info->ip);
1765 have_write_lock = 1;
1767 info->hint = scr->hint;
1768 info->prev_script = scr - unw.cache;
1770 run_script(scr, info);
1772 if (have_write_lock)
1773 write_unlock(&scr->lock);
1775 read_unlock(&scr->lock);
1780 unw_unwind (struct unw_frame_info *info)
1782 unsigned long prev_ip, prev_sp, prev_bsp;
1783 unsigned long ip, pr, num_regs;
1784 STAT(unsigned long start, flags;)
1787 STAT(local_irq_save(flags); ++unw.stat.api.unwinds; start = ia64_get_itc());
1791 prev_bsp = info->bsp;
1793 /* restore the ip */
1794 if (!info->rp_loc) {
1795 /* FIXME: should really be level 0 but it occurs too often. KAO */
1796 UNW_DPRINT(1, "unwind.%s: failed to locate return link (ip=0x%lx)!\n",
1797 __FUNCTION__, info->ip);
1798 STAT(unw.stat.api.unwind_time += ia64_get_itc() - start; local_irq_restore(flags));
1801 ip = info->ip = *info->rp_loc;
1802 if (ip < GATE_ADDR + PAGE_SIZE) {
1804 * We don't have unwind info for the gate page, so we consider that part
1805 * of user-space for the purpose of unwinding.
1807 UNW_DPRINT(2, "unwind.%s: reached user-space (ip=0x%lx)\n", __FUNCTION__, ip);
1808 STAT(unw.stat.api.unwind_time += ia64_get_itc() - start; local_irq_restore(flags));
1812 /* restore the cfm: */
1813 if (!info->pfs_loc) {
1814 UNW_DPRINT(0, "unwind.%s: failed to locate ar.pfs!\n", __FUNCTION__);
1815 STAT(unw.stat.api.unwind_time += ia64_get_itc() - start; local_irq_restore(flags));
1818 info->cfm_loc = info->pfs_loc;
1820 /* restore the bsp: */
1823 if ((info->flags & UNW_FLAG_INTERRUPT_FRAME)) {
1824 info->pt = info->sp + 16;
1825 if ((pr & (1UL << pNonSys)) != 0)
1826 num_regs = *info->cfm_loc & 0x7f; /* size of frame */
1828 (unsigned long *) (info->pt + struct_offset(struct pt_regs, ar_pfs));
1829 UNW_DPRINT(3, "unwind.%s: interrupt_frame pt 0x%lx\n", __FUNCTION__, info->pt);
1831 num_regs = (*info->cfm_loc >> 7) & 0x7f; /* size of locals */
1832 info->bsp = (unsigned long) ia64_rse_skip_regs((unsigned long *) info->bsp, -num_regs);
1833 if (info->bsp < info->regstk.limit || info->bsp > info->regstk.top) {
1834 UNW_DPRINT(0, "unwind.%s: bsp (0x%lx) out of range [0x%lx-0x%lx]\n",
1835 __FUNCTION__, info->bsp, info->regstk.limit, info->regstk.top);
1836 STAT(unw.stat.api.unwind_time += ia64_get_itc() - start; local_irq_restore(flags));
1840 /* restore the sp: */
1841 info->sp = info->psp;
1842 if (info->sp < info->memstk.top || info->sp > info->memstk.limit) {
1843 UNW_DPRINT(0, "unwind.%s: sp (0x%lx) out of range [0x%lx-0x%lx]\n",
1844 __FUNCTION__, info->sp, info->memstk.top, info->memstk.limit);
1845 STAT(unw.stat.api.unwind_time += ia64_get_itc() - start; local_irq_restore(flags));
1849 if (info->ip == prev_ip && info->sp == prev_sp && info->bsp == prev_bsp) {
1850 UNW_DPRINT(0, "unwind.%s: ip, sp, bsp unchanged; stopping here (ip=0x%lx)\n",
1852 STAT(unw.stat.api.unwind_time += ia64_get_itc() - start; local_irq_restore(flags));
1856 /* as we unwind, the saved ar.unat becomes the primary unat: */
1857 info->pri_unat_loc = info->unat_loc;
1859 /* finally, restore the predicates: */
1860 unw_get_pr(info, &info->pr);
1862 retval = find_save_locs(info);
1863 STAT(unw.stat.api.unwind_time += ia64_get_itc() - start; local_irq_restore(flags));
1868 unw_unwind_to_user (struct unw_frame_info *info)
1872 while (unw_unwind(info) >= 0) {
1873 if (unw_get_rp(info, &ip) < 0) {
1874 unw_get_ip(info, &ip);
1875 UNW_DPRINT(0, "unwind.%s: failed to read return pointer (ip=0x%lx)\n",
1880 * We don't have unwind info for the gate page, so we consider that part
1881 * of user-space for the purpose of unwinding.
1883 if (ip < GATE_ADDR + PAGE_SIZE)
1886 unw_get_ip(info, &ip);
1887 UNW_DPRINT(0, "unwind.%s: failed to unwind to user-level (ip=0x%lx)\n", __FUNCTION__, ip);
1892 init_frame_info (struct unw_frame_info *info, struct task_struct *t,
1893 struct switch_stack *sw, unsigned long stktop)
1895 unsigned long rbslimit, rbstop, stklimit;
1896 STAT(unsigned long start, flags;)
1898 STAT(local_irq_save(flags); ++unw.stat.api.inits; start = ia64_get_itc());
1901 * Subtle stuff here: we _could_ unwind through the switch_stack frame but we
1902 * don't want to do that because it would be slow as each preserved register would
1903 * have to be processed. Instead, what we do here is zero out the frame info and
1904 * start the unwind process at the function that created the switch_stack frame.
1905 * When a preserved value in switch_stack needs to be accessed, run_script() will
1906 * initialize the appropriate pointer on demand.
1908 memset(info, 0, sizeof(*info));
1910 rbslimit = (unsigned long) t + IA64_RBS_OFFSET;
1911 rbstop = sw->ar_bspstore;
1912 if (rbstop - (unsigned long) t >= IA64_STK_OFFSET)
1915 stklimit = (unsigned long) t + IA64_STK_OFFSET;
1916 if (stktop <= rbstop)
1919 info->regstk.limit = rbslimit;
1920 info->regstk.top = rbstop;
1921 info->memstk.limit = stklimit;
1922 info->memstk.top = stktop;
1925 info->sp = info->psp = stktop;
1927 UNW_DPRINT(3, "unwind.%s:\n"
1929 " rbs = [0x%lx-0x%lx)\n"
1930 " stk = [0x%lx-0x%lx)\n"
1934 __FUNCTION__, (unsigned long) t, rbslimit, rbstop, stktop, stklimit,
1935 info->pr, (unsigned long) info->sw, info->sp);
1936 STAT(unw.stat.api.init_time += ia64_get_itc() - start; local_irq_restore(flags));
1940 unw_init_from_interruption (struct unw_frame_info *info, struct task_struct *t,
1941 struct pt_regs *pt, struct switch_stack *sw)
1945 init_frame_info(info, t, sw, pt->r12);
1946 info->cfm_loc = &pt->cr_ifs;
1947 info->unat_loc = &pt->ar_unat;
1948 info->pfs_loc = &pt->ar_pfs;
1949 sof = *info->cfm_loc & 0x7f;
1950 info->bsp = (unsigned long) ia64_rse_skip_regs((unsigned long *) info->regstk.top, -sof);
1951 info->ip = pt->cr_iip + ia64_psr(pt)->ri;
1952 info->pt = (unsigned long) pt;
1953 UNW_DPRINT(3, "unwind.%s:\n"
1957 __FUNCTION__, info->bsp, sof, info->ip);
1958 find_save_locs(info);
1962 unw_init_frame_info (struct unw_frame_info *info, struct task_struct *t, struct switch_stack *sw)
1966 init_frame_info(info, t, sw, (unsigned long) (sw + 1) - 16);
1967 info->cfm_loc = &sw->ar_pfs;
1968 sol = (*info->cfm_loc >> 7) & 0x7f;
1969 info->bsp = (unsigned long) ia64_rse_skip_regs((unsigned long *) info->regstk.top, -sol);
1971 UNW_DPRINT(3, "unwind.%s:\n"
1975 __FUNCTION__, info->bsp, sol, info->ip);
1976 find_save_locs(info);
1980 unw_init_from_blocked_task (struct unw_frame_info *info, struct task_struct *t)
1982 struct switch_stack *sw = (struct switch_stack *) (t->thread.ksp + 16);
1984 UNW_DPRINT(1, "unwind.%s\n", __FUNCTION__);
1985 unw_init_frame_info(info, t, sw);
1989 init_unwind_table (struct unw_table *table, const char *name, unsigned long segment_base,
1990 unsigned long gp, const void *table_start, const void *table_end)
1992 const struct unw_table_entry *start = table_start, *end = table_end;
1995 table->segment_base = segment_base;
1997 table->start = segment_base + start[0].start_offset;
1998 table->end = segment_base + end[-1].end_offset;
1999 table->array = start;
2000 table->length = end - start;
2004 unw_add_unwind_table (const char *name, unsigned long segment_base, unsigned long gp,
2005 const void *table_start, const void *table_end)
2007 const struct unw_table_entry *start = table_start, *end = table_end;
2008 struct unw_table *table;
2009 unsigned long flags;
2011 if (end - start <= 0) {
2012 UNW_DPRINT(0, "unwind.%s: ignoring attempt to insert empty unwind table\n",
2017 table = kmalloc(sizeof(*table), GFP_USER);
2021 init_unwind_table(table, name, segment_base, gp, table_start, table_end);
2023 spin_lock_irqsave(&unw.lock, flags);
2025 /* keep kernel unwind table at the front (it's searched most commonly): */
2026 table->next = unw.tables->next;
2027 unw.tables->next = table;
2029 spin_unlock_irqrestore(&unw.lock, flags);
2035 unw_remove_unwind_table (void *handle)
2037 struct unw_table *table, *prev;
2038 struct unw_script *tmp;
2039 unsigned long flags;
2043 UNW_DPRINT(0, "unwind.%s: ignoring attempt to remove non-existent unwind table\n",
2049 if (table == &unw.kernel_table) {
2050 UNW_DPRINT(0, "unwind.%s: sorry, freeing the kernel's unwind table is a "
2051 "no-can-do!\n", __FUNCTION__);
2055 spin_lock_irqsave(&unw.lock, flags);
2057 /* first, delete the table: */
2059 for (prev = (struct unw_table *) &unw.tables; prev; prev = prev->next)
2060 if (prev->next == table)
2063 UNW_DPRINT(0, "unwind.%s: failed to find unwind table %p\n",
2064 __FUNCTION__, (void *) table);
2065 spin_unlock_irqrestore(&unw.lock, flags);
2068 prev->next = table->next;
2070 spin_unlock_irqrestore(&unw.lock, flags);
2072 /* next, remove hash table entries for this table */
2074 for (index = 0; index <= UNW_HASH_SIZE; ++index) {
2075 tmp = unw.cache + unw.hash[index];
2076 if (unw.hash[index] >= UNW_CACHE_SIZE
2077 || tmp->ip < table->start || tmp->ip >= table->end)
2080 write_lock(&tmp->lock);
2082 if (tmp->ip >= table->start && tmp->ip < table->end) {
2083 unw.hash[index] = tmp->coll_chain;
2087 write_unlock(&tmp->lock);
2094 unw_create_gate_table (void)
2096 extern char __start_gate_section[], __stop_gate_section[];
2097 unsigned long *lp, start, end, segbase = unw.kernel_table.segment_base;
2098 const struct unw_table_entry *entry, *first, *unw_table_end;
2099 extern int ia64_unw_end;
2100 size_t info_size, size;
2103 start = (unsigned long) __start_gate_section - segbase;
2104 end = (unsigned long) __stop_gate_section - segbase;
2105 unw_table_end = (struct unw_table_entry *) &ia64_unw_end;
2107 first = lookup(&unw.kernel_table, start);
2109 for (entry = first; entry < unw_table_end && entry->start_offset < end; ++entry)
2110 size += 3*8 + 8 + 8*UNW_LENGTH(*(u64 *) (segbase + entry->info_offset));
2111 size += 8; /* reserve space for "end of table" marker */
2113 unw.gate_table = alloc_bootmem(size);
2114 if (!unw.gate_table) {
2115 unw.gate_table_size = 0;
2116 printk(KERN_ERR "unwind: unable to create unwind data for gate page!\n");
2119 unw.gate_table_size = size;
2121 lp = unw.gate_table;
2122 info = (char *) unw.gate_table + size;
2124 for (entry = first; entry < unw_table_end && entry->start_offset < end; ++entry, lp += 3) {
2125 info_size = 8 + 8*UNW_LENGTH(*(u64 *) (segbase + entry->info_offset));
2127 memcpy(info, (char *) segbase + entry->info_offset, info_size);
2129 lp[0] = entry->start_offset - start + GATE_ADDR; /* start */
2130 lp[1] = entry->end_offset - start + GATE_ADDR; /* end */
2131 lp[2] = info - (char *) unw.gate_table; /* info */
2133 *lp = 0; /* end-of-table marker */
2139 extern int ia64_unw_start, ia64_unw_end, __gp;
2140 extern void unw_hash_index_t_is_too_narrow (void);
2143 if (8*sizeof(unw_hash_index_t) < UNW_LOG_HASH_SIZE)
2144 unw_hash_index_t_is_too_narrow();
2146 unw.sw_off[unw.preg_index[UNW_REG_PRI_UNAT_GR]] = SW(AR_UNAT);
2147 unw.sw_off[unw.preg_index[UNW_REG_BSPSTORE]] = SW(AR_BSPSTORE);
2148 unw.sw_off[unw.preg_index[UNW_REG_PFS]] = SW(AR_UNAT);
2149 unw.sw_off[unw.preg_index[UNW_REG_RP]] = SW(B0);
2150 unw.sw_off[unw.preg_index[UNW_REG_UNAT]] = SW(AR_UNAT);
2151 unw.sw_off[unw.preg_index[UNW_REG_PR]] = SW(PR);
2152 unw.sw_off[unw.preg_index[UNW_REG_LC]] = SW(AR_LC);
2153 unw.sw_off[unw.preg_index[UNW_REG_FPSR]] = SW(AR_FPSR);
2154 for (i = UNW_REG_R4, off = SW(R4); i <= UNW_REG_R7; ++i, off += 8)
2155 unw.sw_off[unw.preg_index[i]] = off;
2156 for (i = UNW_REG_B1, off = SW(B1); i <= UNW_REG_B5; ++i, off += 8)
2157 unw.sw_off[unw.preg_index[i]] = off;
2158 for (i = UNW_REG_F2, off = SW(F2); i <= UNW_REG_F5; ++i, off += 16)
2159 unw.sw_off[unw.preg_index[i]] = off;
2160 for (i = UNW_REG_F16, off = SW(F16); i <= UNW_REG_F31; ++i, off += 16)
2161 unw.sw_off[unw.preg_index[i]] = off;
2163 for (i = 0; i < UNW_CACHE_SIZE; ++i) {
2165 unw.cache[i].lru_chain = (i - 1);
2166 unw.cache[i].coll_chain = -1;
2167 unw.cache[i].lock = RW_LOCK_UNLOCKED;
2169 unw.lru_head = UNW_CACHE_SIZE - 1;
2172 init_unwind_table(&unw.kernel_table, "kernel", KERNEL_START, (unsigned long) &__gp,
2173 &ia64_unw_start, &ia64_unw_end);
2177 * This system call copies the unwind data into the buffer pointed to by BUF and returns
2178 * the size of the unwind data. If BUF_SIZE is smaller than the size of the unwind data
2179 * or if BUF is NULL, nothing is copied, but the system call still returns the size of the
2182 * The first portion of the unwind data contains an unwind table and rest contains the
2183 * associated unwind info (in no particular order). The unwind table consists of a table
2184 * of entries of the form:
2186 * u64 start; (64-bit address of start of function)
2187 * u64 end; (64-bit address of start of function)
2188 * u64 info; (BUF-relative offset to unwind info)
2190 * The end of the unwind table is indicated by an entry with a START address of zero.
2192 * Please see the IA-64 Software Conventions and Runtime Architecture manual for details
2193 * on the format of the unwind info.
2196 * EFAULT BUF points outside your accessible address space.
2199 sys_getunwind (void *buf, size_t buf_size)
2201 if (buf && buf_size >= unw.gate_table_size)
2202 if (copy_to_user(buf, unw.gate_table, unw.gate_table_size) != 0)
2204 return unw.gate_table_size;