2 * Copyright (C) 1999-2003 Hewlett-Packard Co
3 * David Mosberger-Tang <davidm@hpl.hp.com>
6 * This file implements call frame unwind support for the Linux
7 * kernel. Parsing and processing the unwind information is
8 * time-consuming, so this implementation translates the unwind
9 * descriptors into unwind scripts. These scripts are very simple
10 * (basically a sequence of assignments) and efficient to execute.
11 * They are cached for later re-use. Each script is specific for a
12 * given instruction pointer address and the set of predicate values
13 * that the script depends on (most unwind descriptors are
14 * unconditional and scripts often do not depend on predicates at
15 * all). This code is based on the unwind conventions described in
16 * the "IA-64 Software Conventions and Runtime Architecture" manual.
19 * o updates to the global unwind data (in structure "unw") are serialized
20 * by the unw.lock spinlock
21 * o each unwind script has its own read-write lock; a thread must acquire
22 * a read lock before executing a script and must acquire a write lock
23 * before modifying a script
24 * o if both the unw.lock spinlock and a script's read-write lock must be
25 * acquired, then the read-write lock must be acquired first.
27 #include <linux/bootmem.h>
28 #include <linux/kernel.h>
29 #include <linux/sched.h>
30 #include <linux/slab.h>
32 #include <asm/unwind.h>
34 #include <asm/delay.h>
36 #include <asm/ptrace.h>
37 #include <asm/ptrace_offsets.h>
39 #include <asm/system.h>
40 #include <asm/uaccess.h>
45 #define MIN(a,b) ((a) < (b) ? (a) : (b))
48 #define UNW_LOG_CACHE_SIZE 7 /* each unw_script is ~256 bytes in size */
49 #define UNW_CACHE_SIZE (1 << UNW_LOG_CACHE_SIZE)
51 #define UNW_LOG_HASH_SIZE (UNW_LOG_CACHE_SIZE + 1)
52 #define UNW_HASH_SIZE (1 << UNW_LOG_HASH_SIZE)
54 #define UNW_STATS 0 /* WARNING: this disabled interrupts for long time-spans!! */
57 static unsigned int unw_debug_level = UNW_DEBUG;
59 # include <linux/kdb.h>
60 # define UNW_DEBUG_ON(n) (unw_debug_level >= n && !KDB_IS_RUNNING())
61 # define UNW_DPRINT(n, ...) if (UNW_DEBUG_ON(n)) kdb_printf(__VA_ARGS__)
62 # else /* !CONFIG_KDB */
63 # define UNW_DEBUG_ON(n) unw_debug_level >= n
64 /* Do not code a printk level, not all debug lines end in newline */
65 # define UNW_DPRINT(n, ...) if (UNW_DEBUG_ON(n)) printk(__VA_ARGS__)
66 # endif /* CONFIG_KDB */
68 #else /* !UNW_DEBUG */
69 # define UNW_DEBUG_ON(n) 0
70 # define UNW_DPRINT(n, ...)
71 #endif /* UNW_DEBUG */
79 #define alloc_reg_state() kmalloc(sizeof(struct unw_state_record), GFP_ATOMIC)
80 #define free_reg_state(usr) kfree(usr)
81 #define alloc_labeled_state() kmalloc(sizeof(struct unw_labeled_state), GFP_ATOMIC)
82 #define free_labeled_state(usr) kfree(usr)
84 typedef unsigned long unw_word;
85 typedef unsigned char unw_hash_index_t;
87 #define struct_offset(str,fld) ((char *)&((str *)NULL)->fld - (char *) 0)
90 spinlock_t lock; /* spinlock for unwind data */
92 /* list of unwind tables (one per load-module) */
93 struct unw_table *tables;
95 /* table of registers that prologues can save (and order in which they're saved): */
96 const unsigned char save_order[8];
98 /* maps a preserved register index (preg_index) to corresponding switch_stack offset: */
99 unsigned short sw_off[sizeof(struct unw_frame_info) / 8];
101 unsigned short lru_head; /* index of lead-recently used script */
102 unsigned short lru_tail; /* index of most-recently used script */
104 /* index into unw_frame_info for preserved register i */
105 unsigned short preg_index[UNW_NUM_REGS];
107 /* unwind table for the kernel: */
108 struct unw_table kernel_table;
110 /* unwind table describing the gate page (kernel code that is mapped into user space): */
111 size_t gate_table_size;
112 unsigned long *gate_table;
114 /* hash table that maps instruction pointer to script index: */
115 unsigned short hash[UNW_HASH_SIZE];
118 struct unw_script cache[UNW_CACHE_SIZE];
121 const char *preg_name[UNW_NUM_REGS];
129 int collision_chain_traversals;
132 unsigned long build_time;
133 unsigned long run_time;
134 unsigned long parse_time;
141 unsigned long init_time;
142 unsigned long unwind_time;
149 .tables = &unw.kernel_table,
150 .lock = SPIN_LOCK_UNLOCKED,
152 UNW_REG_RP, UNW_REG_PFS, UNW_REG_PSP, UNW_REG_PR,
153 UNW_REG_UNAT, UNW_REG_LC, UNW_REG_FPSR, UNW_REG_PRI_UNAT_GR
156 struct_offset(struct unw_frame_info, pri_unat_loc)/8, /* PRI_UNAT_GR */
157 struct_offset(struct unw_frame_info, pri_unat_loc)/8, /* PRI_UNAT_MEM */
158 struct_offset(struct unw_frame_info, bsp_loc)/8,
159 struct_offset(struct unw_frame_info, bspstore_loc)/8,
160 struct_offset(struct unw_frame_info, pfs_loc)/8,
161 struct_offset(struct unw_frame_info, rnat_loc)/8,
162 struct_offset(struct unw_frame_info, psp)/8,
163 struct_offset(struct unw_frame_info, rp_loc)/8,
164 struct_offset(struct unw_frame_info, r4)/8,
165 struct_offset(struct unw_frame_info, r5)/8,
166 struct_offset(struct unw_frame_info, r6)/8,
167 struct_offset(struct unw_frame_info, r7)/8,
168 struct_offset(struct unw_frame_info, unat_loc)/8,
169 struct_offset(struct unw_frame_info, pr_loc)/8,
170 struct_offset(struct unw_frame_info, lc_loc)/8,
171 struct_offset(struct unw_frame_info, fpsr_loc)/8,
172 struct_offset(struct unw_frame_info, b1_loc)/8,
173 struct_offset(struct unw_frame_info, b2_loc)/8,
174 struct_offset(struct unw_frame_info, b3_loc)/8,
175 struct_offset(struct unw_frame_info, b4_loc)/8,
176 struct_offset(struct unw_frame_info, b5_loc)/8,
177 struct_offset(struct unw_frame_info, f2_loc)/8,
178 struct_offset(struct unw_frame_info, f3_loc)/8,
179 struct_offset(struct unw_frame_info, f4_loc)/8,
180 struct_offset(struct unw_frame_info, f5_loc)/8,
181 struct_offset(struct unw_frame_info, fr_loc[16 - 16])/8,
182 struct_offset(struct unw_frame_info, fr_loc[17 - 16])/8,
183 struct_offset(struct unw_frame_info, fr_loc[18 - 16])/8,
184 struct_offset(struct unw_frame_info, fr_loc[19 - 16])/8,
185 struct_offset(struct unw_frame_info, fr_loc[20 - 16])/8,
186 struct_offset(struct unw_frame_info, fr_loc[21 - 16])/8,
187 struct_offset(struct unw_frame_info, fr_loc[22 - 16])/8,
188 struct_offset(struct unw_frame_info, fr_loc[23 - 16])/8,
189 struct_offset(struct unw_frame_info, fr_loc[24 - 16])/8,
190 struct_offset(struct unw_frame_info, fr_loc[25 - 16])/8,
191 struct_offset(struct unw_frame_info, fr_loc[26 - 16])/8,
192 struct_offset(struct unw_frame_info, fr_loc[27 - 16])/8,
193 struct_offset(struct unw_frame_info, fr_loc[28 - 16])/8,
194 struct_offset(struct unw_frame_info, fr_loc[29 - 16])/8,
195 struct_offset(struct unw_frame_info, fr_loc[30 - 16])/8,
196 struct_offset(struct unw_frame_info, fr_loc[31 - 16])/8,
198 .hash = { [0 ... UNW_HASH_SIZE - 1] = -1 },
201 "pri_unat_gr", "pri_unat_mem", "bsp", "bspstore", "ar.pfs", "ar.rnat", "psp", "rp",
202 "r4", "r5", "r6", "r7",
203 "ar.unat", "pr", "ar.lc", "ar.fpsr",
204 "b1", "b2", "b3", "b4", "b5",
205 "f2", "f3", "f4", "f5",
206 "f16", "f17", "f18", "f19", "f20", "f21", "f22", "f23",
207 "f24", "f25", "f26", "f27", "f28", "f29", "f30", "f31"
213 /* Unwind accessors. */
216 * Returns offset of rREG in struct pt_regs.
218 static inline unsigned long
219 pt_regs_off (unsigned long reg)
221 unsigned long off =0;
223 if (reg >= 1 && reg <= 3)
224 off = struct_offset(struct pt_regs, r1) + 8*(reg - 1);
226 off = struct_offset(struct pt_regs, r8) + 8*(reg - 8);
228 off = struct_offset(struct pt_regs, r12) + 8*(reg - 12);
230 off = struct_offset(struct pt_regs, r16) + 8*(reg - 16);
232 UNW_DPRINT(0, "unwind.%s: bad scratch reg r%lu\n", __FUNCTION__, reg);
236 static inline struct pt_regs *
237 get_scratch_regs (struct unw_frame_info *info)
240 /* This should not happen with valid unwind info. */
241 UNW_DPRINT(0, "unwind.%s: bad unwind info: resetting info->pt\n", __FUNCTION__);
242 if (info->flags & UNW_FLAG_INTERRUPT_FRAME)
243 info->pt = (unsigned long) ((struct pt_regs *) info->psp - 1);
245 info->pt = info->sp - 16;
247 UNW_DPRINT(3, "unwind.%s: sp 0x%lx pt 0x%lx\n", __FUNCTION__, info->sp, info->pt);
248 return (struct pt_regs *) info->pt;
252 unw_access_gr (struct unw_frame_info *info, int regnum, unsigned long *val, char *nat, int write)
254 unsigned long *addr, *nat_addr, nat_mask = 0, dummy_nat;
255 struct unw_ireg *ireg;
258 if ((unsigned) regnum - 1 >= 127) {
259 if (regnum == 0 && !write) {
260 *val = 0; /* read r0 always returns 0 */
264 UNW_DPRINT(0, "unwind.%s: trying to access non-existent r%u\n",
265 __FUNCTION__, regnum);
270 if (regnum >= 4 && regnum <= 7) {
271 /* access a preserved register */
272 ireg = &info->r4 + (regnum - 4);
275 nat_addr = addr + ireg->nat.off;
276 switch (ireg->nat.type) {
278 /* simulate getf.sig/setf.sig */
281 /* write NaTVal and be done with it */
288 if (addr[0] == 0 && addr[1] == 0x1ffe) {
289 /* return NaT and be done with it */
298 nat_addr = &dummy_nat;
302 nat_mask = (1UL << ((long) addr & 0x1f8)/8);
306 nat_addr = ia64_rse_rnat_addr(addr);
307 if ((unsigned long) addr < info->regstk.limit
308 || (unsigned long) addr >= info->regstk.top)
310 UNW_DPRINT(0, "unwind.%s: %p outside of regstk "
312 __FUNCTION__, (void *) addr,
317 if ((unsigned long) nat_addr >= info->regstk.top)
318 nat_addr = &info->sw->ar_rnat;
319 nat_mask = (1UL << ia64_rse_slot_num(addr));
323 addr = &info->sw->r4 + (regnum - 4);
324 nat_addr = &info->sw->ar_unat;
325 nat_mask = (1UL << ((long) addr & 0x1f8)/8);
328 /* access a scratch register */
329 pt = get_scratch_regs(info);
330 addr = (unsigned long *) ((unsigned long)pt + pt_regs_off(regnum));
331 if (info->pri_unat_loc)
332 nat_addr = info->pri_unat_loc;
334 nat_addr = &info->sw->ar_unat;
335 nat_mask = (1UL << ((long) addr & 0x1f8)/8);
338 /* access a stacked register */
339 addr = ia64_rse_skip_regs((unsigned long *) info->bsp, regnum - 32);
340 nat_addr = ia64_rse_rnat_addr(addr);
341 if ((unsigned long) addr < info->regstk.limit
342 || (unsigned long) addr >= info->regstk.top)
344 UNW_DPRINT(0, "unwind.%s: ignoring attempt to access register outside "
345 "of rbs\n", __FUNCTION__);
348 if ((unsigned long) nat_addr >= info->regstk.top)
349 nat_addr = &info->sw->ar_rnat;
350 nat_mask = (1UL << ia64_rse_slot_num(addr));
356 *nat_addr |= nat_mask;
358 *nat_addr &= ~nat_mask;
360 if ((*nat_addr & nat_mask) == 0) {
364 *val = 0; /* if register is a NaT, *addr may contain kernel data! */
372 unw_access_br (struct unw_frame_info *info, int regnum, unsigned long *val, int write)
377 pt = get_scratch_regs(info);
380 case 0: addr = &pt->b0; break;
381 case 6: addr = &pt->b6; break;
382 case 7: addr = &pt->b7; break;
385 case 1: case 2: case 3: case 4: case 5:
386 addr = *(&info->b1_loc + (regnum - 1));
388 addr = &info->sw->b1 + (regnum - 1);
392 UNW_DPRINT(0, "unwind.%s: trying to access non-existent b%u\n",
393 __FUNCTION__, regnum);
404 unw_access_fr (struct unw_frame_info *info, int regnum, struct ia64_fpreg *val, int write)
406 struct ia64_fpreg *addr = 0;
409 if ((unsigned) (regnum - 2) >= 126) {
410 UNW_DPRINT(0, "unwind.%s: trying to access non-existent f%u\n",
411 __FUNCTION__, regnum);
415 pt = get_scratch_regs(info);
418 addr = *(&info->f2_loc + (regnum - 2));
420 addr = &info->sw->f2 + (regnum - 2);
421 } else if (regnum <= 15) {
423 addr = &pt->f6 + (regnum - 6);
425 addr = &info->sw->f10 + (regnum - 10);
426 } else if (regnum <= 31) {
427 addr = info->fr_loc[regnum - 16];
429 addr = &info->sw->f16 + (regnum - 16);
431 struct task_struct *t = info->task;
437 addr = t->thread.fph + (regnum - 32);
448 unw_access_ar (struct unw_frame_info *info, int regnum, unsigned long *val, int write)
453 pt = get_scratch_regs(info);
456 addr = info->bsp_loc;
458 addr = &info->sw->ar_bspstore;
461 case UNW_AR_BSPSTORE:
462 addr = info->bspstore_loc;
464 addr = &info->sw->ar_bspstore;
468 addr = info->pfs_loc;
470 addr = &info->sw->ar_pfs;
474 addr = info->rnat_loc;
476 addr = &info->sw->ar_rnat;
480 addr = info->unat_loc;
482 addr = &info->sw->ar_unat;
488 addr = &info->sw->ar_lc;
496 (*info->cfm_loc & ~(0x3fUL << 52)) | ((*val & 0x3f) << 52);
498 *val = (*info->cfm_loc >> 52) & 0x3f;
502 addr = info->fpsr_loc;
504 addr = &info->sw->ar_fpsr;
516 UNW_DPRINT(0, "unwind.%s: trying to access non-existent ar%u\n",
517 __FUNCTION__, regnum);
529 unw_access_pr (struct unw_frame_info *info, unsigned long *val, int write)
535 addr = &info->sw->pr;
545 /* Routines to manipulate the state stack. */
548 push (struct unw_state_record *sr)
550 struct unw_reg_state *rs;
552 rs = alloc_reg_state();
554 printk(KERN_ERR "unwind: cannot stack reg state!\n");
557 memcpy(rs, &sr->curr, sizeof(*rs));
562 pop (struct unw_state_record *sr)
564 struct unw_reg_state *rs = sr->curr.next;
567 printk(KERN_ERR "unwind: stack underflow!\n");
570 memcpy(&sr->curr, rs, sizeof(*rs));
574 /* Make a copy of the state stack. Non-recursive to avoid stack overflows. */
575 static struct unw_reg_state *
576 dup_state_stack (struct unw_reg_state *rs)
578 struct unw_reg_state *copy, *prev = NULL, *first = NULL;
581 copy = alloc_reg_state();
583 printk(KERN_ERR "unwind.dup_state_stack: out of memory\n");
586 memcpy(copy, rs, sizeof(*copy));
597 /* Free all stacked register states (but not RS itself). */
599 free_state_stack (struct unw_reg_state *rs)
601 struct unw_reg_state *p, *next;
603 for (p = rs->next; p != NULL; p = next) {
610 /* Unwind decoder routines */
612 static enum unw_register_index __attribute__((const))
613 decode_abreg (unsigned char abreg, int memory)
616 case 0x04 ... 0x07: return UNW_REG_R4 + (abreg - 0x04);
617 case 0x22 ... 0x25: return UNW_REG_F2 + (abreg - 0x22);
618 case 0x30 ... 0x3f: return UNW_REG_F16 + (abreg - 0x30);
619 case 0x41 ... 0x45: return UNW_REG_B1 + (abreg - 0x41);
620 case 0x60: return UNW_REG_PR;
621 case 0x61: return UNW_REG_PSP;
622 case 0x62: return memory ? UNW_REG_PRI_UNAT_MEM : UNW_REG_PRI_UNAT_GR;
623 case 0x63: return UNW_REG_RP;
624 case 0x64: return UNW_REG_BSP;
625 case 0x65: return UNW_REG_BSPSTORE;
626 case 0x66: return UNW_REG_RNAT;
627 case 0x67: return UNW_REG_UNAT;
628 case 0x68: return UNW_REG_FPSR;
629 case 0x69: return UNW_REG_PFS;
630 case 0x6a: return UNW_REG_LC;
634 UNW_DPRINT(0, "unwind.%s: bad abreg=0x%x\n", __FUNCTION__, abreg);
639 set_reg (struct unw_reg_info *reg, enum unw_where where, int when, unsigned long val)
643 if (reg->when == UNW_WHEN_NEVER)
648 alloc_spill_area (unsigned long *offp, unsigned long regsize,
649 struct unw_reg_info *lo, struct unw_reg_info *hi)
651 struct unw_reg_info *reg;
653 for (reg = hi; reg >= lo; --reg) {
654 if (reg->where == UNW_WHERE_SPILL_HOME) {
655 reg->where = UNW_WHERE_PSPREL;
663 spill_next_when (struct unw_reg_info **regp, struct unw_reg_info *lim, unw_word t)
665 struct unw_reg_info *reg;
667 for (reg = *regp; reg <= lim; ++reg) {
668 if (reg->where == UNW_WHERE_SPILL_HOME) {
674 UNW_DPRINT(0, "unwind.%s: excess spill!\n", __FUNCTION__);
678 finish_prologue (struct unw_state_record *sr)
680 struct unw_reg_info *reg;
685 * First, resolve implicit register save locations (see Section "11.4.2.3 Rules
686 * for Using Unwind Descriptors", rule 3):
688 for (i = 0; i < (int) ARRAY_SIZE(unw.save_order); ++i) {
689 reg = sr->curr.reg + unw.save_order[i];
690 if (reg->where == UNW_WHERE_GR_SAVE) {
691 reg->where = UNW_WHERE_GR;
692 reg->val = sr->gr_save_loc++;
697 * Next, compute when the fp, general, and branch registers get
698 * saved. This must come before alloc_spill_area() because
699 * we need to know which registers are spilled to their home
703 unsigned char kind, mask = 0, *cp = sr->imask;
705 static const unsigned char limit[3] = {
706 UNW_REG_F31, UNW_REG_R7, UNW_REG_B5
708 struct unw_reg_info *(regs[3]);
710 regs[0] = sr->curr.reg + UNW_REG_F2;
711 regs[1] = sr->curr.reg + UNW_REG_R4;
712 regs[2] = sr->curr.reg + UNW_REG_B1;
714 for (t = 0; t < sr->region_len; ++t) {
717 kind = (mask >> 2*(3-(t & 3))) & 3;
719 spill_next_when(®s[kind - 1], sr->curr.reg + limit[kind - 1],
720 sr->region_start + t);
724 * Next, lay out the memory stack spill area:
726 if (sr->any_spills) {
727 off = sr->spill_offset;
728 alloc_spill_area(&off, 16, sr->curr.reg + UNW_REG_F2, sr->curr.reg + UNW_REG_F31);
729 alloc_spill_area(&off, 8, sr->curr.reg + UNW_REG_B1, sr->curr.reg + UNW_REG_B5);
730 alloc_spill_area(&off, 8, sr->curr.reg + UNW_REG_R4, sr->curr.reg + UNW_REG_R7);
735 * Region header descriptors.
739 desc_prologue (int body, unw_word rlen, unsigned char mask, unsigned char grsave,
740 struct unw_state_record *sr)
744 if (!(sr->in_body || sr->first_region))
746 sr->first_region = 0;
748 /* check if we're done: */
749 if (sr->when_target < sr->region_start + sr->region_len) {
754 region_start = sr->region_start + sr->region_len;
756 for (i = 0; i < sr->epilogue_count; ++i)
758 sr->epilogue_count = 0;
759 sr->epilogue_start = UNW_WHEN_NEVER;
761 sr->region_start = region_start;
762 sr->region_len = rlen;
768 for (i = 0; i < 4; ++i) {
770 set_reg(sr->curr.reg + unw.save_order[i], UNW_WHERE_GR,
771 sr->region_start + sr->region_len - 1, grsave++);
774 sr->gr_save_loc = grsave;
777 sr->spill_offset = 0x10; /* default to psp+16 */
782 * Prologue descriptors.
786 desc_abi (unsigned char abi, unsigned char context, struct unw_state_record *sr)
788 if (abi == 0 && context == 'i') {
789 sr->flags |= UNW_FLAG_INTERRUPT_FRAME;
790 UNW_DPRINT(3, "unwind.%s: interrupt frame\n", __FUNCTION__);
793 UNW_DPRINT(0, "unwind%s: ignoring unwabi(abi=0x%x,context=0x%x)\n",
794 __FUNCTION__, abi, context);
798 desc_br_gr (unsigned char brmask, unsigned char gr, struct unw_state_record *sr)
802 for (i = 0; i < 5; ++i) {
804 set_reg(sr->curr.reg + UNW_REG_B1 + i, UNW_WHERE_GR,
805 sr->region_start + sr->region_len - 1, gr++);
811 desc_br_mem (unsigned char brmask, struct unw_state_record *sr)
815 for (i = 0; i < 5; ++i) {
817 set_reg(sr->curr.reg + UNW_REG_B1 + i, UNW_WHERE_SPILL_HOME,
818 sr->region_start + sr->region_len - 1, 0);
826 desc_frgr_mem (unsigned char grmask, unw_word frmask, struct unw_state_record *sr)
830 for (i = 0; i < 4; ++i) {
831 if ((grmask & 1) != 0) {
832 set_reg(sr->curr.reg + UNW_REG_R4 + i, UNW_WHERE_SPILL_HOME,
833 sr->region_start + sr->region_len - 1, 0);
838 for (i = 0; i < 20; ++i) {
839 if ((frmask & 1) != 0) {
840 int base = (i < 4) ? UNW_REG_F2 : UNW_REG_F16 - 4;
841 set_reg(sr->curr.reg + base + i, UNW_WHERE_SPILL_HOME,
842 sr->region_start + sr->region_len - 1, 0);
850 desc_fr_mem (unsigned char frmask, struct unw_state_record *sr)
854 for (i = 0; i < 4; ++i) {
855 if ((frmask & 1) != 0) {
856 set_reg(sr->curr.reg + UNW_REG_F2 + i, UNW_WHERE_SPILL_HOME,
857 sr->region_start + sr->region_len - 1, 0);
865 desc_gr_gr (unsigned char grmask, unsigned char gr, struct unw_state_record *sr)
869 for (i = 0; i < 4; ++i) {
870 if ((grmask & 1) != 0)
871 set_reg(sr->curr.reg + UNW_REG_R4 + i, UNW_WHERE_GR,
872 sr->region_start + sr->region_len - 1, gr++);
878 desc_gr_mem (unsigned char grmask, struct unw_state_record *sr)
882 for (i = 0; i < 4; ++i) {
883 if ((grmask & 1) != 0) {
884 set_reg(sr->curr.reg + UNW_REG_R4 + i, UNW_WHERE_SPILL_HOME,
885 sr->region_start + sr->region_len - 1, 0);
893 desc_mem_stack_f (unw_word t, unw_word size, struct unw_state_record *sr)
895 set_reg(sr->curr.reg + UNW_REG_PSP, UNW_WHERE_NONE,
896 sr->region_start + MIN((int)t, sr->region_len - 1), 16*size);
900 desc_mem_stack_v (unw_word t, struct unw_state_record *sr)
902 sr->curr.reg[UNW_REG_PSP].when = sr->region_start + MIN((int)t, sr->region_len - 1);
906 desc_reg_gr (unsigned char reg, unsigned char dst, struct unw_state_record *sr)
908 set_reg(sr->curr.reg + reg, UNW_WHERE_GR, sr->region_start + sr->region_len - 1, dst);
912 desc_reg_psprel (unsigned char reg, unw_word pspoff, struct unw_state_record *sr)
914 set_reg(sr->curr.reg + reg, UNW_WHERE_PSPREL, sr->region_start + sr->region_len - 1,
919 desc_reg_sprel (unsigned char reg, unw_word spoff, struct unw_state_record *sr)
921 set_reg(sr->curr.reg + reg, UNW_WHERE_SPREL, sr->region_start + sr->region_len - 1,
926 desc_rp_br (unsigned char dst, struct unw_state_record *sr)
928 sr->return_link_reg = dst;
932 desc_reg_when (unsigned char regnum, unw_word t, struct unw_state_record *sr)
934 struct unw_reg_info *reg = sr->curr.reg + regnum;
936 if (reg->where == UNW_WHERE_NONE)
937 reg->where = UNW_WHERE_GR_SAVE;
938 reg->when = sr->region_start + MIN((int)t, sr->region_len - 1);
942 desc_spill_base (unw_word pspoff, struct unw_state_record *sr)
944 sr->spill_offset = 0x10 - 4*pspoff;
947 static inline unsigned char *
948 desc_spill_mask (unsigned char *imaskp, struct unw_state_record *sr)
951 return imaskp + (2*sr->region_len + 7)/8;
958 desc_epilogue (unw_word t, unw_word ecount, struct unw_state_record *sr)
960 sr->epilogue_start = sr->region_start + sr->region_len - 1 - t;
961 sr->epilogue_count = ecount + 1;
965 desc_copy_state (unw_word label, struct unw_state_record *sr)
967 struct unw_labeled_state *ls;
969 for (ls = sr->labeled_states; ls; ls = ls->next) {
970 if (ls->label == label) {
971 free_state_stack(&sr->curr);
972 memcpy(&sr->curr, &ls->saved_state, sizeof(sr->curr));
973 sr->curr.next = dup_state_stack(ls->saved_state.next);
977 printk(KERN_ERR "unwind: failed to find state labeled 0x%lx\n", label);
981 desc_label_state (unw_word label, struct unw_state_record *sr)
983 struct unw_labeled_state *ls;
985 ls = alloc_labeled_state();
987 printk(KERN_ERR "unwind.desc_label_state(): out of memory\n");
991 memcpy(&ls->saved_state, &sr->curr, sizeof(ls->saved_state));
992 ls->saved_state.next = dup_state_stack(sr->curr.next);
994 /* insert into list of labeled states: */
995 ls->next = sr->labeled_states;
996 sr->labeled_states = ls;
1000 * General descriptors.
1004 desc_is_active (unsigned char qp, unw_word t, struct unw_state_record *sr)
1006 if (sr->when_target <= sr->region_start + MIN((int)t, sr->region_len - 1))
1009 if ((sr->pr_val & (1UL << qp)) == 0)
1011 sr->pr_mask |= (1UL << qp);
1017 desc_restore_p (unsigned char qp, unw_word t, unsigned char abreg, struct unw_state_record *sr)
1019 struct unw_reg_info *r;
1021 if (!desc_is_active(qp, t, sr))
1024 r = sr->curr.reg + decode_abreg(abreg, 0);
1025 r->where = UNW_WHERE_NONE;
1026 r->when = UNW_WHEN_NEVER;
1031 desc_spill_reg_p (unsigned char qp, unw_word t, unsigned char abreg, unsigned char x,
1032 unsigned char ytreg, struct unw_state_record *sr)
1034 enum unw_where where = UNW_WHERE_GR;
1035 struct unw_reg_info *r;
1037 if (!desc_is_active(qp, t, sr))
1041 where = UNW_WHERE_BR;
1042 else if (ytreg & 0x80)
1043 where = UNW_WHERE_FR;
1045 r = sr->curr.reg + decode_abreg(abreg, 0);
1047 r->when = sr->region_start + MIN((int)t, sr->region_len - 1);
1048 r->val = (ytreg & 0x7f);
1052 desc_spill_psprel_p (unsigned char qp, unw_word t, unsigned char abreg, unw_word pspoff,
1053 struct unw_state_record *sr)
1055 struct unw_reg_info *r;
1057 if (!desc_is_active(qp, t, sr))
1060 r = sr->curr.reg + decode_abreg(abreg, 1);
1061 r->where = UNW_WHERE_PSPREL;
1062 r->when = sr->region_start + MIN((int)t, sr->region_len - 1);
1063 r->val = 0x10 - 4*pspoff;
1067 desc_spill_sprel_p (unsigned char qp, unw_word t, unsigned char abreg, unw_word spoff,
1068 struct unw_state_record *sr)
1070 struct unw_reg_info *r;
1072 if (!desc_is_active(qp, t, sr))
1075 r = sr->curr.reg + decode_abreg(abreg, 1);
1076 r->where = UNW_WHERE_SPREL;
1077 r->when = sr->region_start + MIN((int)t, sr->region_len - 1);
1081 #define UNW_DEC_BAD_CODE(code) printk(KERN_ERR "unwind: unknown code 0x%02x\n", \
1087 #define UNW_DEC_PROLOGUE_GR(fmt,r,m,gr,arg) desc_prologue(0,r,m,gr,arg)
1088 #define UNW_DEC_PROLOGUE(fmt,b,r,arg) desc_prologue(b,r,0,32,arg)
1090 * prologue descriptors:
1092 #define UNW_DEC_ABI(fmt,a,c,arg) desc_abi(a,c,arg)
1093 #define UNW_DEC_BR_GR(fmt,b,g,arg) desc_br_gr(b,g,arg)
1094 #define UNW_DEC_BR_MEM(fmt,b,arg) desc_br_mem(b,arg)
1095 #define UNW_DEC_FRGR_MEM(fmt,g,f,arg) desc_frgr_mem(g,f,arg)
1096 #define UNW_DEC_FR_MEM(fmt,f,arg) desc_fr_mem(f,arg)
1097 #define UNW_DEC_GR_GR(fmt,m,g,arg) desc_gr_gr(m,g,arg)
1098 #define UNW_DEC_GR_MEM(fmt,m,arg) desc_gr_mem(m,arg)
1099 #define UNW_DEC_MEM_STACK_F(fmt,t,s,arg) desc_mem_stack_f(t,s,arg)
1100 #define UNW_DEC_MEM_STACK_V(fmt,t,arg) desc_mem_stack_v(t,arg)
1101 #define UNW_DEC_REG_GR(fmt,r,d,arg) desc_reg_gr(r,d,arg)
1102 #define UNW_DEC_REG_PSPREL(fmt,r,o,arg) desc_reg_psprel(r,o,arg)
1103 #define UNW_DEC_REG_SPREL(fmt,r,o,arg) desc_reg_sprel(r,o,arg)
1104 #define UNW_DEC_REG_WHEN(fmt,r,t,arg) desc_reg_when(r,t,arg)
1105 #define UNW_DEC_PRIUNAT_WHEN_GR(fmt,t,arg) desc_reg_when(UNW_REG_PRI_UNAT_GR,t,arg)
1106 #define UNW_DEC_PRIUNAT_WHEN_MEM(fmt,t,arg) desc_reg_when(UNW_REG_PRI_UNAT_MEM,t,arg)
1107 #define UNW_DEC_PRIUNAT_GR(fmt,r,arg) desc_reg_gr(UNW_REG_PRI_UNAT_GR,r,arg)
1108 #define UNW_DEC_PRIUNAT_PSPREL(fmt,o,arg) desc_reg_psprel(UNW_REG_PRI_UNAT_MEM,o,arg)
1109 #define UNW_DEC_PRIUNAT_SPREL(fmt,o,arg) desc_reg_sprel(UNW_REG_PRI_UNAT_MEM,o,arg)
1110 #define UNW_DEC_RP_BR(fmt,d,arg) desc_rp_br(d,arg)
1111 #define UNW_DEC_SPILL_BASE(fmt,o,arg) desc_spill_base(o,arg)
1112 #define UNW_DEC_SPILL_MASK(fmt,m,arg) (m = desc_spill_mask(m,arg))
1116 #define UNW_DEC_EPILOGUE(fmt,t,c,arg) desc_epilogue(t,c,arg)
1117 #define UNW_DEC_COPY_STATE(fmt,l,arg) desc_copy_state(l,arg)
1118 #define UNW_DEC_LABEL_STATE(fmt,l,arg) desc_label_state(l,arg)
1120 * general unwind descriptors:
1122 #define UNW_DEC_SPILL_REG_P(f,p,t,a,x,y,arg) desc_spill_reg_p(p,t,a,x,y,arg)
1123 #define UNW_DEC_SPILL_REG(f,t,a,x,y,arg) desc_spill_reg_p(0,t,a,x,y,arg)
1124 #define UNW_DEC_SPILL_PSPREL_P(f,p,t,a,o,arg) desc_spill_psprel_p(p,t,a,o,arg)
1125 #define UNW_DEC_SPILL_PSPREL(f,t,a,o,arg) desc_spill_psprel_p(0,t,a,o,arg)
1126 #define UNW_DEC_SPILL_SPREL_P(f,p,t,a,o,arg) desc_spill_sprel_p(p,t,a,o,arg)
1127 #define UNW_DEC_SPILL_SPREL(f,t,a,o,arg) desc_spill_sprel_p(0,t,a,o,arg)
1128 #define UNW_DEC_RESTORE_P(f,p,t,a,arg) desc_restore_p(p,t,a,arg)
1129 #define UNW_DEC_RESTORE(f,t,a,arg) desc_restore_p(0,t,a,arg)
1131 #include "unwind_decoder.c"
1134 /* Unwind scripts. */
1136 static inline unw_hash_index_t
1137 hash (unsigned long ip)
1139 # define magic 0x9e3779b97f4a7c16 /* based on (sqrt(5)/2-1)*2^64 */
1141 return (ip >> 4)*magic >> (64 - UNW_LOG_HASH_SIZE);
1145 cache_match (struct unw_script *script, unsigned long ip, unsigned long pr)
1147 read_lock(&script->lock);
1148 if (ip == script->ip && ((pr ^ script->pr_val) & script->pr_mask) == 0)
1149 /* keep the read lock... */
1151 read_unlock(&script->lock);
1155 static inline struct unw_script *
1156 script_lookup (struct unw_frame_info *info)
1158 struct unw_script *script = unw.cache + info->hint;
1159 unsigned short index;
1160 unsigned long ip, pr;
1162 if (UNW_DEBUG_ON(0))
1163 return 0; /* Always regenerate scripts in debug mode */
1165 STAT(++unw.stat.cache.lookups);
1170 if (cache_match(script, ip, pr)) {
1171 STAT(++unw.stat.cache.hinted_hits);
1175 index = unw.hash[hash(ip)];
1176 if (index >= UNW_CACHE_SIZE)
1179 script = unw.cache + index;
1181 if (cache_match(script, ip, pr)) {
1182 /* update hint; no locking required as single-word writes are atomic */
1183 STAT(++unw.stat.cache.normal_hits);
1184 unw.cache[info->prev_script].hint = script - unw.cache;
1187 if (script->coll_chain >= UNW_HASH_SIZE)
1189 script = unw.cache + script->coll_chain;
1190 STAT(++unw.stat.cache.collision_chain_traversals);
1195 * On returning, a write lock for the SCRIPT is still being held.
1197 static inline struct unw_script *
1198 script_new (unsigned long ip)
1200 struct unw_script *script, *prev, *tmp;
1201 unw_hash_index_t index;
1202 unsigned long flags;
1203 unsigned short head;
1205 STAT(++unw.stat.script.news);
1208 * Can't (easily) use cmpxchg() here because of ABA problem
1209 * that is intrinsic in cmpxchg()...
1211 spin_lock_irqsave(&unw.lock, flags);
1213 head = unw.lru_head;
1214 script = unw.cache + head;
1215 unw.lru_head = script->lru_chain;
1217 spin_unlock(&unw.lock);
1220 * We'd deadlock here if we interrupted a thread that is holding a read lock on
1221 * script->lock. Thus, if the write_trylock() fails, we simply bail out. The
1222 * alternative would be to disable interrupts whenever we hold a read-lock, but
1225 if (!write_trylock(&script->lock))
1228 spin_lock(&unw.lock);
1230 /* re-insert script at the tail of the LRU chain: */
1231 unw.cache[unw.lru_tail].lru_chain = head;
1232 unw.lru_tail = head;
1234 /* remove the old script from the hash table (if it's there): */
1236 index = hash(script->ip);
1237 tmp = unw.cache + unw.hash[index];
1240 if (tmp == script) {
1242 prev->coll_chain = tmp->coll_chain;
1244 unw.hash[index] = tmp->coll_chain;
1248 if (tmp->coll_chain >= UNW_CACHE_SIZE)
1249 /* old script wasn't in the hash-table */
1251 tmp = unw.cache + tmp->coll_chain;
1255 /* enter new script in the hash table */
1257 script->coll_chain = unw.hash[index];
1258 unw.hash[index] = script - unw.cache;
1260 script->ip = ip; /* set new IP while we're holding the locks */
1262 STAT(if (script->coll_chain < UNW_CACHE_SIZE) ++unw.stat.script.collisions);
1264 spin_unlock_irqrestore(&unw.lock, flags);
1273 script_finalize (struct unw_script *script, struct unw_state_record *sr)
1275 script->pr_mask = sr->pr_mask;
1276 script->pr_val = sr->pr_val;
1278 * We could down-grade our write-lock on script->lock here but
1279 * the rwlock API doesn't offer atomic lock downgrading, so
1280 * we'll just keep the write-lock and release it later when
1281 * we're done using the script.
1286 script_emit (struct unw_script *script, struct unw_insn insn)
1288 if (script->count >= UNW_MAX_SCRIPT_LEN) {
1289 UNW_DPRINT(0, "unwind.%s: script exceeds maximum size of %u instructions!\n",
1290 __FUNCTION__, UNW_MAX_SCRIPT_LEN);
1293 script->insn[script->count++] = insn;
1297 emit_nat_info (struct unw_state_record *sr, int i, struct unw_script *script)
1299 struct unw_reg_info *r = sr->curr.reg + i;
1300 enum unw_insn_opcode opc;
1301 struct unw_insn insn;
1302 unsigned long val = 0;
1307 /* register got spilled to a stacked register */
1308 opc = UNW_INSN_SETNAT_TYPE;
1309 val = UNW_NAT_REGSTK;
1311 /* register got spilled to a scratch register */
1312 opc = UNW_INSN_SETNAT_MEMSTK;
1316 opc = UNW_INSN_SETNAT_TYPE;
1321 opc = UNW_INSN_SETNAT_TYPE;
1325 case UNW_WHERE_PSPREL:
1326 case UNW_WHERE_SPREL:
1327 opc = UNW_INSN_SETNAT_MEMSTK;
1331 UNW_DPRINT(0, "unwind.%s: don't know how to emit nat info for where = %u\n",
1332 __FUNCTION__, r->where);
1336 insn.dst = unw.preg_index[i];
1338 script_emit(script, insn);
1342 compile_reg (struct unw_state_record *sr, int i, struct unw_script *script)
1344 struct unw_reg_info *r = sr->curr.reg + i;
1345 enum unw_insn_opcode opc;
1346 unsigned long val, rval;
1347 struct unw_insn insn;
1350 if (r->where == UNW_WHERE_NONE || r->when >= sr->when_target)
1353 opc = UNW_INSN_MOVE;
1354 val = rval = r->val;
1355 need_nat_info = (i >= UNW_REG_R4 && i <= UNW_REG_R7);
1360 opc = UNW_INSN_MOVE_STACKED;
1362 } else if (rval >= 4 && rval <= 7) {
1363 if (need_nat_info) {
1364 opc = UNW_INSN_MOVE2;
1367 val = unw.preg_index[UNW_REG_R4 + (rval - 4)];
1369 /* register got spilled to a scratch register */
1370 opc = UNW_INSN_MOVE_SCRATCH;
1371 val = pt_regs_off(rval);
1377 val = unw.preg_index[UNW_REG_F2 + (rval - 1)];
1378 else if (rval >= 16 && rval <= 31)
1379 val = unw.preg_index[UNW_REG_F16 + (rval - 16)];
1381 opc = UNW_INSN_MOVE_SCRATCH;
1383 val = struct_offset(struct pt_regs, f6) + 16*(rval - 6);
1385 UNW_DPRINT(0, "unwind.%s: kernel may not touch f%lu\n",
1386 __FUNCTION__, rval);
1391 if (rval >= 1 && rval <= 5)
1392 val = unw.preg_index[UNW_REG_B1 + (rval - 1)];
1394 opc = UNW_INSN_MOVE_SCRATCH;
1396 val = struct_offset(struct pt_regs, b0);
1398 val = struct_offset(struct pt_regs, b6);
1400 val = struct_offset(struct pt_regs, b7);
1404 case UNW_WHERE_SPREL:
1405 opc = UNW_INSN_ADD_SP;
1408 case UNW_WHERE_PSPREL:
1409 opc = UNW_INSN_ADD_PSP;
1413 UNW_DPRINT(0, "unwind%s: register %u has unexpected `where' value of %u\n",
1414 __FUNCTION__, i, r->where);
1418 insn.dst = unw.preg_index[i];
1420 script_emit(script, insn);
1422 emit_nat_info(sr, i, script);
1424 if (i == UNW_REG_PSP) {
1426 * info->psp must contain the _value_ of the previous
1427 * sp, not it's save location. We get this by
1428 * dereferencing the value we just stored in
1431 insn.opc = UNW_INSN_LOAD;
1432 insn.dst = insn.val = unw.preg_index[UNW_REG_PSP];
1433 script_emit(script, insn);
1437 static inline const struct unw_table_entry *
1438 lookup (struct unw_table *table, unsigned long rel_ip)
1440 const struct unw_table_entry *e = 0;
1441 unsigned long lo, hi, mid;
1443 /* do a binary search for right entry: */
1444 for (lo = 0, hi = table->length; lo < hi; ) {
1445 mid = (lo + hi) / 2;
1446 e = &table->array[mid];
1447 if (rel_ip < e->start_offset)
1449 else if (rel_ip >= e->end_offset)
1454 if (rel_ip < e->start_offset || rel_ip >= e->end_offset)
1460 * Build an unwind script that unwinds from state OLD_STATE to the
1461 * entrypoint of the function that called OLD_STATE.
1463 static inline struct unw_script *
1464 build_script (struct unw_frame_info *info)
1466 const struct unw_table_entry *e = 0;
1467 struct unw_script *script = 0;
1468 struct unw_labeled_state *ls, *next;
1469 unsigned long ip = info->ip;
1470 struct unw_state_record sr;
1471 struct unw_table *table;
1472 struct unw_reg_info *r;
1473 struct unw_insn insn;
1477 STAT(unsigned long start, parse_start;)
1479 STAT(++unw.stat.script.builds; start = ia64_get_itc());
1481 /* build state record */
1482 memset(&sr, 0, sizeof(sr));
1483 for (r = sr.curr.reg; r < sr.curr.reg + UNW_NUM_REGS; ++r)
1484 r->when = UNW_WHEN_NEVER;
1485 sr.pr_val = info->pr;
1487 UNW_DPRINT(3, "unwind.%s: ip 0x%lx\n", __FUNCTION__, ip);
1488 script = script_new(ip);
1490 UNW_DPRINT(0, "unwind.%s: failed to create unwind script\n", __FUNCTION__);
1491 STAT(unw.stat.script.build_time += ia64_get_itc() - start);
1494 unw.cache[info->prev_script].hint = script - unw.cache;
1496 /* search the kernels and the modules' unwind tables for IP: */
1498 STAT(parse_start = ia64_get_itc());
1500 for (table = unw.tables; table; table = table->next) {
1501 if (ip >= table->start && ip < table->end) {
1502 e = lookup(table, ip - table->segment_base);
1507 /* no info, return default unwinder (leaf proc, no mem stack, no saved regs) */
1508 UNW_DPRINT(1, "unwind.%s: no unwind info for ip=0x%lx (prev ip=0x%lx)\n",
1509 __FUNCTION__, ip, unw.cache[info->prev_script].ip);
1510 sr.curr.reg[UNW_REG_RP].where = UNW_WHERE_BR;
1511 sr.curr.reg[UNW_REG_RP].when = -1;
1512 sr.curr.reg[UNW_REG_RP].val = 0;
1513 compile_reg(&sr, UNW_REG_RP, script);
1514 script_finalize(script, &sr);
1515 STAT(unw.stat.script.parse_time += ia64_get_itc() - parse_start);
1516 STAT(unw.stat.script.build_time += ia64_get_itc() - start);
1520 sr.when_target = (3*((ip & ~0xfUL) - (table->segment_base + e->start_offset))/16
1522 hdr = *(u64 *) (table->segment_base + e->info_offset);
1523 dp = (u8 *) (table->segment_base + e->info_offset + 8);
1524 desc_end = dp + 8*UNW_LENGTH(hdr);
1526 while (!sr.done && dp < desc_end)
1527 dp = unw_decode(dp, sr.in_body, &sr);
1529 if (sr.when_target > sr.epilogue_start) {
1531 * sp has been restored and all values on the memory stack below
1532 * psp also have been restored.
1534 sr.curr.reg[UNW_REG_PSP].val = 0;
1535 sr.curr.reg[UNW_REG_PSP].where = UNW_WHERE_NONE;
1536 sr.curr.reg[UNW_REG_PSP].when = UNW_WHEN_NEVER;
1537 for (r = sr.curr.reg; r < sr.curr.reg + UNW_NUM_REGS; ++r)
1538 if ((r->where == UNW_WHERE_PSPREL && r->val <= 0x10)
1539 || r->where == UNW_WHERE_SPREL)
1542 r->where = UNW_WHERE_NONE;
1543 r->when = UNW_WHEN_NEVER;
1547 script->flags = sr.flags;
1550 * If RP did't get saved, generate entry for the return link
1553 if (sr.curr.reg[UNW_REG_RP].when >= sr.when_target) {
1554 sr.curr.reg[UNW_REG_RP].where = UNW_WHERE_BR;
1555 sr.curr.reg[UNW_REG_RP].when = -1;
1556 sr.curr.reg[UNW_REG_RP].val = sr.return_link_reg;
1557 UNW_DPRINT(1, "unwind.%s: using default for rp at ip=0x%lx where=%d val=0x%lx\n",
1558 __FUNCTION__, ip, sr.curr.reg[UNW_REG_RP].where,
1559 sr.curr.reg[UNW_REG_RP].val);
1563 UNW_DPRINT(1, "unwind.%s: state record for func 0x%lx, t=%u:\n",
1564 __FUNCTION__, table->segment_base + e->start_offset, sr.when_target);
1565 for (r = sr.curr.reg; r < sr.curr.reg + UNW_NUM_REGS; ++r) {
1566 if (r->where != UNW_WHERE_NONE || r->when != UNW_WHEN_NEVER) {
1567 UNW_DPRINT(1, " %s <- ", unw.preg_name[r - sr.curr.reg]);
1569 case UNW_WHERE_GR: UNW_DPRINT(1, "r%lu", r->val); break;
1570 case UNW_WHERE_FR: UNW_DPRINT(1, "f%lu", r->val); break;
1571 case UNW_WHERE_BR: UNW_DPRINT(1, "b%lu", r->val); break;
1572 case UNW_WHERE_SPREL: UNW_DPRINT(1, "[sp+0x%lx]", r->val); break;
1573 case UNW_WHERE_PSPREL: UNW_DPRINT(1, "[psp+0x%lx]", r->val); break;
1574 case UNW_WHERE_NONE:
1575 UNW_DPRINT(1, "%s+0x%lx", unw.preg_name[r - sr.curr.reg], r->val);
1579 UNW_DPRINT(1, "BADWHERE(%d)", r->where);
1582 UNW_DPRINT(1, "\t\t%d\n", r->when);
1587 STAT(unw.stat.script.parse_time += ia64_get_itc() - parse_start);
1589 /* translate state record into unwinder instructions: */
1592 * First, set psp if we're dealing with a fixed-size frame;
1593 * subsequent instructions may depend on this value.
1595 if (sr.when_target > sr.curr.reg[UNW_REG_PSP].when
1596 && (sr.curr.reg[UNW_REG_PSP].where == UNW_WHERE_NONE)
1597 && sr.curr.reg[UNW_REG_PSP].val != 0) {
1598 /* new psp is sp plus frame size */
1599 insn.opc = UNW_INSN_ADD;
1600 insn.dst = struct_offset(struct unw_frame_info, psp)/8;
1601 insn.val = sr.curr.reg[UNW_REG_PSP].val; /* frame size */
1602 script_emit(script, insn);
1605 /* determine where the primary UNaT is: */
1606 if (sr.when_target < sr.curr.reg[UNW_REG_PRI_UNAT_GR].when)
1607 i = UNW_REG_PRI_UNAT_MEM;
1608 else if (sr.when_target < sr.curr.reg[UNW_REG_PRI_UNAT_MEM].when)
1609 i = UNW_REG_PRI_UNAT_GR;
1610 else if (sr.curr.reg[UNW_REG_PRI_UNAT_MEM].when > sr.curr.reg[UNW_REG_PRI_UNAT_GR].when)
1611 i = UNW_REG_PRI_UNAT_MEM;
1613 i = UNW_REG_PRI_UNAT_GR;
1615 compile_reg(&sr, i, script);
1617 for (i = UNW_REG_BSP; i < UNW_NUM_REGS; ++i)
1618 compile_reg(&sr, i, script);
1620 /* free labeled register states & stack: */
1622 STAT(parse_start = ia64_get_itc());
1623 for (ls = sr.labeled_states; ls; ls = next) {
1625 free_state_stack(&ls->saved_state);
1626 free_labeled_state(ls);
1628 free_state_stack(&sr.curr);
1629 STAT(unw.stat.script.parse_time += ia64_get_itc() - parse_start);
1631 script_finalize(script, &sr);
1632 STAT(unw.stat.script.build_time += ia64_get_itc() - start);
1637 * Apply the unwinding actions represented by OPS and update SR to
1638 * reflect the state that existed upon entry to the function that this
1639 * unwinder represents.
1642 run_script (struct unw_script *script, struct unw_frame_info *state)
1644 struct unw_insn *ip, *limit, next_insn;
1645 unsigned long opc, dst, val, off;
1646 unsigned long *s = (unsigned long *) state;
1647 STAT(unsigned long start;)
1649 STAT(++unw.stat.script.runs; start = ia64_get_itc());
1650 state->flags = script->flags;
1652 limit = script->insn + script->count;
1655 while (ip++ < limit) {
1656 opc = next_insn.opc;
1657 dst = next_insn.dst;
1658 val = next_insn.val;
1667 case UNW_INSN_MOVE2:
1670 s[dst+1] = s[val+1];
1680 case UNW_INSN_MOVE_SCRATCH:
1682 s[dst] = (unsigned long) get_scratch_regs(state) + val;
1685 UNW_DPRINT(0, "unwind.%s: no state->pt, dst=%ld, val=%ld\n",
1686 __FUNCTION__, dst, val);
1690 case UNW_INSN_MOVE_STACKED:
1691 s[dst] = (unsigned long) ia64_rse_skip_regs((unsigned long *)state->bsp,
1695 case UNW_INSN_ADD_PSP:
1696 s[dst] = state->psp + val;
1699 case UNW_INSN_ADD_SP:
1700 s[dst] = state->sp + val;
1703 case UNW_INSN_SETNAT_MEMSTK:
1704 if (!state->pri_unat_loc)
1705 state->pri_unat_loc = &state->sw->ar_unat;
1706 /* register off. is a multiple of 8, so the least 3 bits (type) are 0 */
1707 s[dst+1] = (*state->pri_unat_loc - s[dst]) | UNW_NAT_MEMSTK;
1710 case UNW_INSN_SETNAT_TYPE:
1716 if ((s[val] & (local_cpu_data->unimpl_va_mask | 0x7)) != 0
1717 || s[val] < TASK_SIZE)
1719 UNW_DPRINT(0, "unwind.%s: rejecting bad psp=0x%lx\n",
1720 __FUNCTION__, s[val]);
1724 s[dst] = *(unsigned long *) s[val];
1728 STAT(unw.stat.script.run_time += ia64_get_itc() - start);
1732 off = unw.sw_off[val];
1733 s[val] = (unsigned long) state->sw + off;
1734 if (off >= struct_offset(struct switch_stack, r4)
1735 && off <= struct_offset(struct switch_stack, r7))
1737 * We're initializing a general register: init NaT info, too. Note that
1738 * the offset is a multiple of 8 which gives us the 3 bits needed for
1741 s[val+1] = (struct_offset(struct switch_stack, ar_unat) - off) | UNW_NAT_MEMSTK;
1746 find_save_locs (struct unw_frame_info *info)
1748 int have_write_lock = 0;
1749 struct unw_script *scr;
1751 if ((info->ip & (local_cpu_data->unimpl_va_mask | 0xf)) || info->ip < TASK_SIZE) {
1752 /* don't let obviously bad addresses pollute the cache */
1753 /* FIXME: should really be level 0 but it occurs too often. KAO */
1754 UNW_DPRINT(1, "unwind.%s: rejecting bad ip=0x%lx\n", __FUNCTION__, info->ip);
1759 scr = script_lookup(info);
1761 scr = build_script(info);
1764 "unwind.%s: failed to locate/build unwind script for ip %lx\n",
1765 __FUNCTION__, info->ip);
1768 have_write_lock = 1;
1770 info->hint = scr->hint;
1771 info->prev_script = scr - unw.cache;
1773 run_script(scr, info);
1775 if (have_write_lock)
1776 write_unlock(&scr->lock);
1778 read_unlock(&scr->lock);
1783 unw_unwind (struct unw_frame_info *info)
1785 unsigned long prev_ip, prev_sp, prev_bsp;
1786 unsigned long ip, pr, num_regs;
1787 STAT(unsigned long start, flags;)
1790 STAT(local_irq_save(flags); ++unw.stat.api.unwinds; start = ia64_get_itc());
1794 prev_bsp = info->bsp;
1796 /* restore the ip */
1797 if (!info->rp_loc) {
1798 /* FIXME: should really be level 0 but it occurs too often. KAO */
1799 UNW_DPRINT(1, "unwind.%s: failed to locate return link (ip=0x%lx)!\n",
1800 __FUNCTION__, info->ip);
1801 STAT(unw.stat.api.unwind_time += ia64_get_itc() - start; local_irq_restore(flags));
1804 ip = info->ip = *info->rp_loc;
1805 if (ip < GATE_ADDR + PAGE_SIZE) {
1807 * We don't have unwind info for the gate page, so we consider that part
1808 * of user-space for the purpose of unwinding.
1810 UNW_DPRINT(2, "unwind.%s: reached user-space (ip=0x%lx)\n", __FUNCTION__, ip);
1811 STAT(unw.stat.api.unwind_time += ia64_get_itc() - start; local_irq_restore(flags));
1815 /* restore the cfm: */
1816 if (!info->pfs_loc) {
1817 UNW_DPRINT(0, "unwind.%s: failed to locate ar.pfs!\n", __FUNCTION__);
1818 STAT(unw.stat.api.unwind_time += ia64_get_itc() - start; local_irq_restore(flags));
1821 info->cfm_loc = info->pfs_loc;
1823 /* restore the bsp: */
1826 if ((info->flags & UNW_FLAG_INTERRUPT_FRAME)) {
1827 info->pt = info->sp + 16;
1828 if ((pr & (1UL << pNonSys)) != 0)
1829 num_regs = *info->cfm_loc & 0x7f; /* size of frame */
1831 (unsigned long *) (info->pt + struct_offset(struct pt_regs, ar_pfs));
1832 UNW_DPRINT(3, "unwind.%s: interrupt_frame pt 0x%lx\n", __FUNCTION__, info->pt);
1834 num_regs = (*info->cfm_loc >> 7) & 0x7f; /* size of locals */
1835 info->bsp = (unsigned long) ia64_rse_skip_regs((unsigned long *) info->bsp, -num_regs);
1836 if (info->bsp < info->regstk.limit || info->bsp > info->regstk.top) {
1837 UNW_DPRINT(0, "unwind.%s: bsp (0x%lx) out of range [0x%lx-0x%lx]\n",
1838 __FUNCTION__, info->bsp, info->regstk.limit, info->regstk.top);
1839 STAT(unw.stat.api.unwind_time += ia64_get_itc() - start; local_irq_restore(flags));
1843 /* restore the sp: */
1844 info->sp = info->psp;
1845 if (info->sp < info->memstk.top || info->sp > info->memstk.limit) {
1846 UNW_DPRINT(0, "unwind.%s: sp (0x%lx) out of range [0x%lx-0x%lx]\n",
1847 __FUNCTION__, info->sp, info->memstk.top, info->memstk.limit);
1848 STAT(unw.stat.api.unwind_time += ia64_get_itc() - start; local_irq_restore(flags));
1852 if (info->ip == prev_ip && info->sp == prev_sp && info->bsp == prev_bsp) {
1853 UNW_DPRINT(0, "unwind.%s: ip, sp, bsp unchanged; stopping here (ip=0x%lx)\n",
1855 STAT(unw.stat.api.unwind_time += ia64_get_itc() - start; local_irq_restore(flags));
1859 /* as we unwind, the saved ar.unat becomes the primary unat: */
1860 info->pri_unat_loc = info->unat_loc;
1862 /* finally, restore the predicates: */
1863 unw_get_pr(info, &info->pr);
1865 retval = find_save_locs(info);
1866 STAT(unw.stat.api.unwind_time += ia64_get_itc() - start; local_irq_restore(flags));
1871 unw_unwind_to_user (struct unw_frame_info *info)
1875 while (unw_unwind(info) >= 0) {
1876 if (unw_get_rp(info, &ip) < 0) {
1877 unw_get_ip(info, &ip);
1878 UNW_DPRINT(0, "unwind.%s: failed to read return pointer (ip=0x%lx)\n",
1883 * We don't have unwind info for the gate page, so we consider that part
1884 * of user-space for the purpose of unwinding.
1886 if (ip < GATE_ADDR + PAGE_SIZE)
1889 unw_get_ip(info, &ip);
1890 UNW_DPRINT(0, "unwind.%s: failed to unwind to user-level (ip=0x%lx)\n", __FUNCTION__, ip);
1895 init_frame_info (struct unw_frame_info *info, struct task_struct *t,
1896 struct switch_stack *sw, unsigned long stktop)
1898 unsigned long rbslimit, rbstop, stklimit;
1899 STAT(unsigned long start, flags;)
1901 STAT(local_irq_save(flags); ++unw.stat.api.inits; start = ia64_get_itc());
1904 * Subtle stuff here: we _could_ unwind through the switch_stack frame but we
1905 * don't want to do that because it would be slow as each preserved register would
1906 * have to be processed. Instead, what we do here is zero out the frame info and
1907 * start the unwind process at the function that created the switch_stack frame.
1908 * When a preserved value in switch_stack needs to be accessed, run_script() will
1909 * initialize the appropriate pointer on demand.
1911 memset(info, 0, sizeof(*info));
1913 rbslimit = (unsigned long) t + IA64_RBS_OFFSET;
1914 rbstop = sw->ar_bspstore;
1915 if (rbstop - (unsigned long) t >= IA64_STK_OFFSET)
1918 stklimit = (unsigned long) t + IA64_STK_OFFSET;
1919 if (stktop <= rbstop)
1922 info->regstk.limit = rbslimit;
1923 info->regstk.top = rbstop;
1924 info->memstk.limit = stklimit;
1925 info->memstk.top = stktop;
1928 info->sp = info->psp = stktop;
1930 UNW_DPRINT(3, "unwind.%s:\n"
1932 " rbs = [0x%lx-0x%lx)\n"
1933 " stk = [0x%lx-0x%lx)\n"
1937 __FUNCTION__, (unsigned long) t, rbslimit, rbstop, stktop, stklimit,
1938 info->pr, (unsigned long) info->sw, info->sp);
1939 STAT(unw.stat.api.init_time += ia64_get_itc() - start; local_irq_restore(flags));
1943 unw_init_from_interruption (struct unw_frame_info *info, struct task_struct *t,
1944 struct pt_regs *pt, struct switch_stack *sw)
1948 init_frame_info(info, t, sw, pt->r12);
1949 info->cfm_loc = &pt->cr_ifs;
1950 info->unat_loc = &pt->ar_unat;
1951 info->pfs_loc = &pt->ar_pfs;
1952 sof = *info->cfm_loc & 0x7f;
1953 info->bsp = (unsigned long) ia64_rse_skip_regs((unsigned long *) info->regstk.top, -sof);
1954 info->ip = pt->cr_iip + ia64_psr(pt)->ri;
1955 info->pt = (unsigned long) pt;
1956 UNW_DPRINT(3, "unwind.%s:\n"
1960 __FUNCTION__, info->bsp, sof, info->ip);
1961 find_save_locs(info);
1965 unw_init_frame_info (struct unw_frame_info *info, struct task_struct *t, struct switch_stack *sw)
1969 init_frame_info(info, t, sw, (unsigned long) (sw + 1) - 16);
1970 info->cfm_loc = &sw->ar_pfs;
1971 sol = (*info->cfm_loc >> 7) & 0x7f;
1972 info->bsp = (unsigned long) ia64_rse_skip_regs((unsigned long *) info->regstk.top, -sol);
1974 UNW_DPRINT(3, "unwind.%s:\n"
1978 __FUNCTION__, info->bsp, sol, info->ip);
1979 find_save_locs(info);
1983 unw_init_from_blocked_task (struct unw_frame_info *info, struct task_struct *t)
1985 struct switch_stack *sw = (struct switch_stack *) (t->thread.ksp + 16);
1987 UNW_DPRINT(1, "unwind.%s\n", __FUNCTION__);
1988 unw_init_frame_info(info, t, sw);
1992 init_unwind_table (struct unw_table *table, const char *name, unsigned long segment_base,
1993 unsigned long gp, const void *table_start, const void *table_end)
1995 const struct unw_table_entry *start = table_start, *end = table_end;
1998 table->segment_base = segment_base;
2000 table->start = segment_base + start[0].start_offset;
2001 table->end = segment_base + end[-1].end_offset;
2002 table->array = start;
2003 table->length = end - start;
2007 unw_add_unwind_table (const char *name, unsigned long segment_base, unsigned long gp,
2008 const void *table_start, const void *table_end)
2010 const struct unw_table_entry *start = table_start, *end = table_end;
2011 struct unw_table *table;
2012 unsigned long flags;
2014 if (end - start <= 0) {
2015 UNW_DPRINT(0, "unwind.%s: ignoring attempt to insert empty unwind table\n",
2020 table = kmalloc(sizeof(*table), GFP_USER);
2024 init_unwind_table(table, name, segment_base, gp, table_start, table_end);
2026 spin_lock_irqsave(&unw.lock, flags);
2028 /* keep kernel unwind table at the front (it's searched most commonly): */
2029 table->next = unw.tables->next;
2030 unw.tables->next = table;
2032 spin_unlock_irqrestore(&unw.lock, flags);
2038 unw_remove_unwind_table (void *handle)
2040 struct unw_table *table, *prev;
2041 struct unw_script *tmp;
2042 unsigned long flags;
2046 UNW_DPRINT(0, "unwind.%s: ignoring attempt to remove non-existent unwind table\n",
2052 if (table == &unw.kernel_table) {
2053 UNW_DPRINT(0, "unwind.%s: sorry, freeing the kernel's unwind table is a "
2054 "no-can-do!\n", __FUNCTION__);
2058 spin_lock_irqsave(&unw.lock, flags);
2060 /* first, delete the table: */
2062 for (prev = (struct unw_table *) &unw.tables; prev; prev = prev->next)
2063 if (prev->next == table)
2066 UNW_DPRINT(0, "unwind.%s: failed to find unwind table %p\n",
2067 __FUNCTION__, (void *) table);
2068 spin_unlock_irqrestore(&unw.lock, flags);
2071 prev->next = table->next;
2073 spin_unlock_irqrestore(&unw.lock, flags);
2075 /* next, remove hash table entries for this table */
2077 for (index = 0; index <= UNW_HASH_SIZE; ++index) {
2078 tmp = unw.cache + unw.hash[index];
2079 if (unw.hash[index] >= UNW_CACHE_SIZE
2080 || tmp->ip < table->start || tmp->ip >= table->end)
2083 write_lock(&tmp->lock);
2085 if (tmp->ip >= table->start && tmp->ip < table->end) {
2086 unw.hash[index] = tmp->coll_chain;
2090 write_unlock(&tmp->lock);
2097 unw_create_gate_table (void)
2099 extern char __start_gate_section[], __stop_gate_section[];
2100 unsigned long *lp, start, end, segbase = unw.kernel_table.segment_base;
2101 const struct unw_table_entry *entry, *first, *unw_table_end;
2102 extern int ia64_unw_end;
2103 size_t info_size, size;
2106 start = (unsigned long) __start_gate_section - segbase;
2107 end = (unsigned long) __stop_gate_section - segbase;
2108 unw_table_end = (struct unw_table_entry *) &ia64_unw_end;
2110 first = lookup(&unw.kernel_table, start);
2112 for (entry = first; entry < unw_table_end && entry->start_offset < end; ++entry)
2113 size += 3*8 + 8 + 8*UNW_LENGTH(*(u64 *) (segbase + entry->info_offset));
2114 size += 8; /* reserve space for "end of table" marker */
2116 unw.gate_table = alloc_bootmem(size);
2117 if (!unw.gate_table) {
2118 unw.gate_table_size = 0;
2119 printk(KERN_ERR "unwind: unable to create unwind data for gate page!\n");
2122 unw.gate_table_size = size;
2124 lp = unw.gate_table;
2125 info = (char *) unw.gate_table + size;
2127 for (entry = first; entry < unw_table_end && entry->start_offset < end; ++entry, lp += 3) {
2128 info_size = 8 + 8*UNW_LENGTH(*(u64 *) (segbase + entry->info_offset));
2130 memcpy(info, (char *) segbase + entry->info_offset, info_size);
2132 lp[0] = entry->start_offset - start + GATE_ADDR; /* start */
2133 lp[1] = entry->end_offset - start + GATE_ADDR; /* end */
2134 lp[2] = info - (char *) unw.gate_table; /* info */
2136 *lp = 0; /* end-of-table marker */
2142 extern int ia64_unw_start, ia64_unw_end, __gp;
2143 extern void unw_hash_index_t_is_too_narrow (void);
2146 if (8*sizeof(unw_hash_index_t) < UNW_LOG_HASH_SIZE)
2147 unw_hash_index_t_is_too_narrow();
2149 unw.sw_off[unw.preg_index[UNW_REG_PRI_UNAT_GR]] = SW(AR_UNAT);
2150 unw.sw_off[unw.preg_index[UNW_REG_BSPSTORE]] = SW(AR_BSPSTORE);
2151 unw.sw_off[unw.preg_index[UNW_REG_PFS]] = SW(AR_UNAT);
2152 unw.sw_off[unw.preg_index[UNW_REG_RP]] = SW(B0);
2153 unw.sw_off[unw.preg_index[UNW_REG_UNAT]] = SW(AR_UNAT);
2154 unw.sw_off[unw.preg_index[UNW_REG_PR]] = SW(PR);
2155 unw.sw_off[unw.preg_index[UNW_REG_LC]] = SW(AR_LC);
2156 unw.sw_off[unw.preg_index[UNW_REG_FPSR]] = SW(AR_FPSR);
2157 for (i = UNW_REG_R4, off = SW(R4); i <= UNW_REG_R7; ++i, off += 8)
2158 unw.sw_off[unw.preg_index[i]] = off;
2159 for (i = UNW_REG_B1, off = SW(B1); i <= UNW_REG_B5; ++i, off += 8)
2160 unw.sw_off[unw.preg_index[i]] = off;
2161 for (i = UNW_REG_F2, off = SW(F2); i <= UNW_REG_F5; ++i, off += 16)
2162 unw.sw_off[unw.preg_index[i]] = off;
2163 for (i = UNW_REG_F16, off = SW(F16); i <= UNW_REG_F31; ++i, off += 16)
2164 unw.sw_off[unw.preg_index[i]] = off;
2166 for (i = 0; i < UNW_CACHE_SIZE; ++i) {
2168 unw.cache[i].lru_chain = (i - 1);
2169 unw.cache[i].coll_chain = -1;
2170 unw.cache[i].lock = RW_LOCK_UNLOCKED;
2172 unw.lru_head = UNW_CACHE_SIZE - 1;
2175 init_unwind_table(&unw.kernel_table, "kernel", KERNEL_START, (unsigned long) &__gp,
2176 &ia64_unw_start, &ia64_unw_end);
2180 * This system call copies the unwind data into the buffer pointed to by BUF and returns
2181 * the size of the unwind data. If BUF_SIZE is smaller than the size of the unwind data
2182 * or if BUF is NULL, nothing is copied, but the system call still returns the size of the
2185 * The first portion of the unwind data contains an unwind table and rest contains the
2186 * associated unwind info (in no particular order). The unwind table consists of a table
2187 * of entries of the form:
2189 * u64 start; (64-bit address of start of function)
2190 * u64 end; (64-bit address of start of function)
2191 * u64 info; (BUF-relative offset to unwind info)
2193 * The end of the unwind table is indicated by an entry with a START address of zero.
2195 * Please see the IA-64 Software Conventions and Runtime Architecture manual for details
2196 * on the format of the unwind info.
2199 * EFAULT BUF points outside your accessible address space.
2202 sys_getunwind (void *buf, size_t buf_size)
2204 if (buf && buf_size >= unw.gate_table_size)
2205 if (copy_to_user(buf, unw.gate_table, unw.gate_table_size) != 0)
2207 return unw.gate_table_size;