2 * Copyright (C) 1999-2003 Hewlett-Packard Co
3 * David Mosberger-Tang <davidm@hpl.hp.com>
4 * Copyright (C) 2003 Fenghua Yu <fenghua.yu@intel.com>
5 * - Change pt_regs_off() to make it less dependant on pt_regs structure.
8 * This file implements call frame unwind support for the Linux
9 * kernel. Parsing and processing the unwind information is
10 * time-consuming, so this implementation translates the unwind
11 * descriptors into unwind scripts. These scripts are very simple
12 * (basically a sequence of assignments) and efficient to execute.
13 * They are cached for later re-use. Each script is specific for a
14 * given instruction pointer address and the set of predicate values
15 * that the script depends on (most unwind descriptors are
16 * unconditional and scripts often do not depend on predicates at
17 * all). This code is based on the unwind conventions described in
18 * the "IA-64 Software Conventions and Runtime Architecture" manual.
21 * o updates to the global unwind data (in structure "unw") are serialized
22 * by the unw.lock spinlock
23 * o each unwind script has its own read-write lock; a thread must acquire
24 * a read lock before executing a script and must acquire a write lock
25 * before modifying a script
26 * o if both the unw.lock spinlock and a script's read-write lock must be
27 * acquired, then the read-write lock must be acquired first.
29 #include <linux/bootmem.h>
30 #include <linux/elf.h>
31 #include <linux/kernel.h>
32 #include <linux/sched.h>
33 #include <linux/slab.h>
35 #include <asm/unwind.h>
37 #include <asm/delay.h>
39 #include <asm/ptrace.h>
40 #include <asm/ptrace_offsets.h>
42 #include <asm/system.h>
43 #include <asm/uaccess.h>
48 #define MIN(a,b) ((a) < (b) ? (a) : (b))
51 #define UNW_LOG_CACHE_SIZE 7 /* each unw_script is ~256 bytes in size */
52 #define UNW_CACHE_SIZE (1 << UNW_LOG_CACHE_SIZE)
54 #define UNW_LOG_HASH_SIZE (UNW_LOG_CACHE_SIZE + 1)
55 #define UNW_HASH_SIZE (1 << UNW_LOG_HASH_SIZE)
57 #define UNW_STATS 0 /* WARNING: this disabled interrupts for long time-spans!! */
60 static unsigned int unw_debug_level = UNW_DEBUG;
62 # include <linux/kdb.h>
63 # define UNW_DEBUG_ON(n) (unw_debug_level >= n && !KDB_IS_RUNNING())
64 # define UNW_DPRINT(n, ...) if (UNW_DEBUG_ON(n)) kdb_printf(__VA_ARGS__)
65 # else /* !CONFIG_KDB */
66 # define UNW_DEBUG_ON(n) unw_debug_level >= n
67 /* Do not code a printk level, not all debug lines end in newline */
68 # define UNW_DPRINT(n, ...) if (UNW_DEBUG_ON(n)) printk(__VA_ARGS__)
69 # endif /* CONFIG_KDB */
71 #else /* !UNW_DEBUG */
72 # define UNW_DEBUG_ON(n) 0
73 # define UNW_DPRINT(n, ...)
74 #endif /* UNW_DEBUG */
82 #define alloc_reg_state() kmalloc(sizeof(struct unw_reg_state), GFP_ATOMIC)
83 #define free_reg_state(usr) kfree(usr)
84 #define alloc_labeled_state() kmalloc(sizeof(struct unw_labeled_state), GFP_ATOMIC)
85 #define free_labeled_state(usr) kfree(usr)
87 typedef unsigned long unw_word;
88 typedef unsigned char unw_hash_index_t;
91 spinlock_t lock; /* spinlock for unwind data */
93 /* list of unwind tables (one per load-module) */
94 struct unw_table *tables;
96 /* table of registers that prologues can save (and order in which they're saved): */
97 const unsigned char save_order[8];
99 /* maps a preserved register index (preg_index) to corresponding switch_stack offset: */
100 unsigned short sw_off[sizeof(struct unw_frame_info) / 8];
102 unsigned short lru_head; /* index of lead-recently used script */
103 unsigned short lru_tail; /* index of most-recently used script */
105 /* index into unw_frame_info for preserved register i */
106 unsigned short preg_index[UNW_NUM_REGS];
108 short pt_regs_offsets[32];
110 /* unwind table for the kernel: */
111 struct unw_table kernel_table;
113 /* unwind table describing the gate page (kernel code that is mapped into user space): */
114 size_t gate_table_size;
115 unsigned long *gate_table;
117 /* hash table that maps instruction pointer to script index: */
118 unsigned short hash[UNW_HASH_SIZE];
121 struct unw_script cache[UNW_CACHE_SIZE];
124 const char *preg_name[UNW_NUM_REGS];
132 int collision_chain_traversals;
135 unsigned long build_time;
136 unsigned long run_time;
137 unsigned long parse_time;
144 unsigned long init_time;
145 unsigned long unwind_time;
152 .tables = &unw.kernel_table,
153 .lock = SPIN_LOCK_UNLOCKED,
155 UNW_REG_RP, UNW_REG_PFS, UNW_REG_PSP, UNW_REG_PR,
156 UNW_REG_UNAT, UNW_REG_LC, UNW_REG_FPSR, UNW_REG_PRI_UNAT_GR
159 offsetof(struct unw_frame_info, pri_unat_loc)/8, /* PRI_UNAT_GR */
160 offsetof(struct unw_frame_info, pri_unat_loc)/8, /* PRI_UNAT_MEM */
161 offsetof(struct unw_frame_info, bsp_loc)/8,
162 offsetof(struct unw_frame_info, bspstore_loc)/8,
163 offsetof(struct unw_frame_info, pfs_loc)/8,
164 offsetof(struct unw_frame_info, rnat_loc)/8,
165 offsetof(struct unw_frame_info, psp)/8,
166 offsetof(struct unw_frame_info, rp_loc)/8,
167 offsetof(struct unw_frame_info, r4)/8,
168 offsetof(struct unw_frame_info, r5)/8,
169 offsetof(struct unw_frame_info, r6)/8,
170 offsetof(struct unw_frame_info, r7)/8,
171 offsetof(struct unw_frame_info, unat_loc)/8,
172 offsetof(struct unw_frame_info, pr_loc)/8,
173 offsetof(struct unw_frame_info, lc_loc)/8,
174 offsetof(struct unw_frame_info, fpsr_loc)/8,
175 offsetof(struct unw_frame_info, b1_loc)/8,
176 offsetof(struct unw_frame_info, b2_loc)/8,
177 offsetof(struct unw_frame_info, b3_loc)/8,
178 offsetof(struct unw_frame_info, b4_loc)/8,
179 offsetof(struct unw_frame_info, b5_loc)/8,
180 offsetof(struct unw_frame_info, f2_loc)/8,
181 offsetof(struct unw_frame_info, f3_loc)/8,
182 offsetof(struct unw_frame_info, f4_loc)/8,
183 offsetof(struct unw_frame_info, f5_loc)/8,
184 offsetof(struct unw_frame_info, fr_loc[16 - 16])/8,
185 offsetof(struct unw_frame_info, fr_loc[17 - 16])/8,
186 offsetof(struct unw_frame_info, fr_loc[18 - 16])/8,
187 offsetof(struct unw_frame_info, fr_loc[19 - 16])/8,
188 offsetof(struct unw_frame_info, fr_loc[20 - 16])/8,
189 offsetof(struct unw_frame_info, fr_loc[21 - 16])/8,
190 offsetof(struct unw_frame_info, fr_loc[22 - 16])/8,
191 offsetof(struct unw_frame_info, fr_loc[23 - 16])/8,
192 offsetof(struct unw_frame_info, fr_loc[24 - 16])/8,
193 offsetof(struct unw_frame_info, fr_loc[25 - 16])/8,
194 offsetof(struct unw_frame_info, fr_loc[26 - 16])/8,
195 offsetof(struct unw_frame_info, fr_loc[27 - 16])/8,
196 offsetof(struct unw_frame_info, fr_loc[28 - 16])/8,
197 offsetof(struct unw_frame_info, fr_loc[29 - 16])/8,
198 offsetof(struct unw_frame_info, fr_loc[30 - 16])/8,
199 offsetof(struct unw_frame_info, fr_loc[31 - 16])/8,
203 offsetof(struct pt_regs, r1),
204 offsetof(struct pt_regs, r2),
205 offsetof(struct pt_regs, r3),
206 [4] = -1, [5] = -1, [6] = -1, [7] = -1,
207 offsetof(struct pt_regs, r8),
208 offsetof(struct pt_regs, r9),
209 offsetof(struct pt_regs, r10),
210 offsetof(struct pt_regs, r11),
211 offsetof(struct pt_regs, r12),
212 offsetof(struct pt_regs, r13),
213 offsetof(struct pt_regs, r14),
214 offsetof(struct pt_regs, r15),
215 offsetof(struct pt_regs, r16),
216 offsetof(struct pt_regs, r17),
217 offsetof(struct pt_regs, r18),
218 offsetof(struct pt_regs, r19),
219 offsetof(struct pt_regs, r20),
220 offsetof(struct pt_regs, r21),
221 offsetof(struct pt_regs, r22),
222 offsetof(struct pt_regs, r23),
223 offsetof(struct pt_regs, r24),
224 offsetof(struct pt_regs, r25),
225 offsetof(struct pt_regs, r26),
226 offsetof(struct pt_regs, r27),
227 offsetof(struct pt_regs, r28),
228 offsetof(struct pt_regs, r29),
229 offsetof(struct pt_regs, r30),
230 offsetof(struct pt_regs, r31),
232 .hash = { [0 ... UNW_HASH_SIZE - 1] = -1 },
235 "pri_unat_gr", "pri_unat_mem", "bsp", "bspstore", "ar.pfs", "ar.rnat", "psp", "rp",
236 "r4", "r5", "r6", "r7",
237 "ar.unat", "pr", "ar.lc", "ar.fpsr",
238 "b1", "b2", "b3", "b4", "b5",
239 "f2", "f3", "f4", "f5",
240 "f16", "f17", "f18", "f19", "f20", "f21", "f22", "f23",
241 "f24", "f25", "f26", "f27", "f28", "f29", "f30", "f31"
246 /* Unwind accessors. */
249 * Returns offset of rREG in struct pt_regs.
251 static inline unsigned long
252 pt_regs_off (unsigned long reg)
256 if (reg < ARRAY_SIZE(unw.pt_regs_offsets))
257 off = unw.pt_regs_offsets[reg];
260 UNW_DPRINT(0, "unwind.%s: bad scratch reg r%lu\n", __FUNCTION__, reg);
263 return (unsigned long) off;
266 static inline struct pt_regs *
267 get_scratch_regs (struct unw_frame_info *info)
270 /* This should not happen with valid unwind info. */
271 UNW_DPRINT(0, "unwind.%s: bad unwind info: resetting info->pt\n", __FUNCTION__);
272 if (info->flags & UNW_FLAG_INTERRUPT_FRAME)
273 info->pt = (unsigned long) ((struct pt_regs *) info->psp - 1);
275 info->pt = info->sp - 16;
277 UNW_DPRINT(3, "unwind.%s: sp 0x%lx pt 0x%lx\n", __FUNCTION__, info->sp, info->pt);
278 return (struct pt_regs *) info->pt;
282 unw_access_gr (struct unw_frame_info *info, int regnum, unsigned long *val, char *nat, int write)
284 unsigned long *addr, *nat_addr, nat_mask = 0, dummy_nat;
285 struct unw_ireg *ireg;
288 if ((unsigned) regnum - 1 >= 127) {
289 if (regnum == 0 && !write) {
290 *val = 0; /* read r0 always returns 0 */
294 UNW_DPRINT(0, "unwind.%s: trying to access non-existent r%u\n",
295 __FUNCTION__, regnum);
300 if (regnum >= 4 && regnum <= 7) {
301 /* access a preserved register */
302 ireg = &info->r4 + (regnum - 4);
305 nat_addr = addr + ireg->nat.off;
306 switch (ireg->nat.type) {
308 /* simulate getf.sig/setf.sig */
311 /* write NaTVal and be done with it */
318 if (addr[0] == 0 && addr[1] == 0x1ffe) {
319 /* return NaT and be done with it */
328 nat_addr = &dummy_nat;
332 nat_mask = (1UL << ((long) addr & 0x1f8)/8);
336 nat_addr = ia64_rse_rnat_addr(addr);
337 if ((unsigned long) addr < info->regstk.limit
338 || (unsigned long) addr >= info->regstk.top)
340 UNW_DPRINT(0, "unwind.%s: %p outside of regstk "
342 __FUNCTION__, (void *) addr,
347 if ((unsigned long) nat_addr >= info->regstk.top)
348 nat_addr = &info->sw->ar_rnat;
349 nat_mask = (1UL << ia64_rse_slot_num(addr));
353 addr = &info->sw->r4 + (regnum - 4);
354 nat_addr = &info->sw->ar_unat;
355 nat_mask = (1UL << ((long) addr & 0x1f8)/8);
358 /* access a scratch register */
359 pt = get_scratch_regs(info);
360 addr = (unsigned long *) ((unsigned long)pt + pt_regs_off(regnum));
361 if (info->pri_unat_loc)
362 nat_addr = info->pri_unat_loc;
364 nat_addr = &info->sw->ar_unat;
365 nat_mask = (1UL << ((long) addr & 0x1f8)/8);
368 /* access a stacked register */
369 addr = ia64_rse_skip_regs((unsigned long *) info->bsp, regnum - 32);
370 nat_addr = ia64_rse_rnat_addr(addr);
371 if ((unsigned long) addr < info->regstk.limit
372 || (unsigned long) addr >= info->regstk.top)
374 UNW_DPRINT(0, "unwind.%s: ignoring attempt to access register outside "
375 "of rbs\n", __FUNCTION__);
378 if ((unsigned long) nat_addr >= info->regstk.top)
379 nat_addr = &info->sw->ar_rnat;
380 nat_mask = (1UL << ia64_rse_slot_num(addr));
386 *nat_addr |= nat_mask;
388 *nat_addr &= ~nat_mask;
390 if ((*nat_addr & nat_mask) == 0) {
394 *val = 0; /* if register is a NaT, *addr may contain kernel data! */
402 unw_access_br (struct unw_frame_info *info, int regnum, unsigned long *val, int write)
409 case 0: pt = get_scratch_regs(info); addr = &pt->b0; break;
410 case 6: pt = get_scratch_regs(info); addr = &pt->b6; break;
411 case 7: pt = get_scratch_regs(info); addr = &pt->b7; break;
414 case 1: case 2: case 3: case 4: case 5:
415 addr = *(&info->b1_loc + (regnum - 1));
417 addr = &info->sw->b1 + (regnum - 1);
421 UNW_DPRINT(0, "unwind.%s: trying to access non-existent b%u\n",
422 __FUNCTION__, regnum);
433 unw_access_fr (struct unw_frame_info *info, int regnum, struct ia64_fpreg *val, int write)
435 struct ia64_fpreg *addr = 0;
438 if ((unsigned) (regnum - 2) >= 126) {
439 UNW_DPRINT(0, "unwind.%s: trying to access non-existent f%u\n",
440 __FUNCTION__, regnum);
445 addr = *(&info->f2_loc + (regnum - 2));
447 addr = &info->sw->f2 + (regnum - 2);
448 } else if (regnum <= 15) {
450 pt = get_scratch_regs(info);
451 addr = &pt->f6 + (regnum - 6);
454 addr = &info->sw->f12 + (regnum - 12);
455 } else if (regnum <= 31) {
456 addr = info->fr_loc[regnum - 16];
458 addr = &info->sw->f16 + (regnum - 16);
460 struct task_struct *t = info->task;
466 addr = t->thread.fph + (regnum - 32);
477 unw_access_ar (struct unw_frame_info *info, int regnum, unsigned long *val, int write)
484 addr = info->bsp_loc;
486 addr = &info->sw->ar_bspstore;
489 case UNW_AR_BSPSTORE:
490 addr = info->bspstore_loc;
492 addr = &info->sw->ar_bspstore;
496 addr = info->pfs_loc;
498 addr = &info->sw->ar_pfs;
502 addr = info->rnat_loc;
504 addr = &info->sw->ar_rnat;
508 addr = info->unat_loc;
510 addr = &info->sw->ar_unat;
516 addr = &info->sw->ar_lc;
524 (*info->cfm_loc & ~(0x3fUL << 52)) | ((*val & 0x3f) << 52);
526 *val = (*info->cfm_loc >> 52) & 0x3f;
530 addr = info->fpsr_loc;
532 addr = &info->sw->ar_fpsr;
536 pt = get_scratch_regs(info);
541 pt = get_scratch_regs(info);
546 pt = get_scratch_regs(info);
551 pt = get_scratch_regs(info);
556 UNW_DPRINT(0, "unwind.%s: trying to access non-existent ar%u\n",
557 __FUNCTION__, regnum);
569 unw_access_pr (struct unw_frame_info *info, unsigned long *val, int write)
575 addr = &info->sw->pr;
585 /* Routines to manipulate the state stack. */
588 push (struct unw_state_record *sr)
590 struct unw_reg_state *rs;
592 rs = alloc_reg_state();
594 printk(KERN_ERR "unwind: cannot stack reg state!\n");
597 memcpy(rs, &sr->curr, sizeof(*rs));
602 pop (struct unw_state_record *sr)
604 struct unw_reg_state *rs = sr->curr.next;
607 printk(KERN_ERR "unwind: stack underflow!\n");
610 memcpy(&sr->curr, rs, sizeof(*rs));
614 /* Make a copy of the state stack. Non-recursive to avoid stack overflows. */
615 static struct unw_reg_state *
616 dup_state_stack (struct unw_reg_state *rs)
618 struct unw_reg_state *copy, *prev = NULL, *first = NULL;
621 copy = alloc_reg_state();
623 printk(KERN_ERR "unwind.dup_state_stack: out of memory\n");
626 memcpy(copy, rs, sizeof(*copy));
637 /* Free all stacked register states (but not RS itself). */
639 free_state_stack (struct unw_reg_state *rs)
641 struct unw_reg_state *p, *next;
643 for (p = rs->next; p != NULL; p = next) {
650 /* Unwind decoder routines */
652 static enum unw_register_index __attribute__((const))
653 decode_abreg (unsigned char abreg, int memory)
656 case 0x04 ... 0x07: return UNW_REG_R4 + (abreg - 0x04);
657 case 0x22 ... 0x25: return UNW_REG_F2 + (abreg - 0x22);
658 case 0x30 ... 0x3f: return UNW_REG_F16 + (abreg - 0x30);
659 case 0x41 ... 0x45: return UNW_REG_B1 + (abreg - 0x41);
660 case 0x60: return UNW_REG_PR;
661 case 0x61: return UNW_REG_PSP;
662 case 0x62: return memory ? UNW_REG_PRI_UNAT_MEM : UNW_REG_PRI_UNAT_GR;
663 case 0x63: return UNW_REG_RP;
664 case 0x64: return UNW_REG_BSP;
665 case 0x65: return UNW_REG_BSPSTORE;
666 case 0x66: return UNW_REG_RNAT;
667 case 0x67: return UNW_REG_UNAT;
668 case 0x68: return UNW_REG_FPSR;
669 case 0x69: return UNW_REG_PFS;
670 case 0x6a: return UNW_REG_LC;
674 UNW_DPRINT(0, "unwind.%s: bad abreg=0x%x\n", __FUNCTION__, abreg);
679 set_reg (struct unw_reg_info *reg, enum unw_where where, int when, unsigned long val)
683 if (reg->when == UNW_WHEN_NEVER)
688 alloc_spill_area (unsigned long *offp, unsigned long regsize,
689 struct unw_reg_info *lo, struct unw_reg_info *hi)
691 struct unw_reg_info *reg;
693 for (reg = hi; reg >= lo; --reg) {
694 if (reg->where == UNW_WHERE_SPILL_HOME) {
695 reg->where = UNW_WHERE_PSPREL;
703 spill_next_when (struct unw_reg_info **regp, struct unw_reg_info *lim, unw_word t)
705 struct unw_reg_info *reg;
707 for (reg = *regp; reg <= lim; ++reg) {
708 if (reg->where == UNW_WHERE_SPILL_HOME) {
714 UNW_DPRINT(0, "unwind.%s: excess spill!\n", __FUNCTION__);
718 finish_prologue (struct unw_state_record *sr)
720 struct unw_reg_info *reg;
725 * First, resolve implicit register save locations (see Section "11.4.2.3 Rules
726 * for Using Unwind Descriptors", rule 3):
728 for (i = 0; i < (int) ARRAY_SIZE(unw.save_order); ++i) {
729 reg = sr->curr.reg + unw.save_order[i];
730 if (reg->where == UNW_WHERE_GR_SAVE) {
731 reg->where = UNW_WHERE_GR;
732 reg->val = sr->gr_save_loc++;
737 * Next, compute when the fp, general, and branch registers get
738 * saved. This must come before alloc_spill_area() because
739 * we need to know which registers are spilled to their home
743 unsigned char kind, mask = 0, *cp = sr->imask;
745 static const unsigned char limit[3] = {
746 UNW_REG_F31, UNW_REG_R7, UNW_REG_B5
748 struct unw_reg_info *(regs[3]);
750 regs[0] = sr->curr.reg + UNW_REG_F2;
751 regs[1] = sr->curr.reg + UNW_REG_R4;
752 regs[2] = sr->curr.reg + UNW_REG_B1;
754 for (t = 0; t < sr->region_len; ++t) {
757 kind = (mask >> 2*(3-(t & 3))) & 3;
759 spill_next_when(®s[kind - 1], sr->curr.reg + limit[kind - 1],
760 sr->region_start + t);
764 * Next, lay out the memory stack spill area:
766 if (sr->any_spills) {
767 off = sr->spill_offset;
768 alloc_spill_area(&off, 16, sr->curr.reg + UNW_REG_F2, sr->curr.reg + UNW_REG_F31);
769 alloc_spill_area(&off, 8, sr->curr.reg + UNW_REG_B1, sr->curr.reg + UNW_REG_B5);
770 alloc_spill_area(&off, 8, sr->curr.reg + UNW_REG_R4, sr->curr.reg + UNW_REG_R7);
775 * Region header descriptors.
779 desc_prologue (int body, unw_word rlen, unsigned char mask, unsigned char grsave,
780 struct unw_state_record *sr)
784 if (!(sr->in_body || sr->first_region))
786 sr->first_region = 0;
788 /* check if we're done: */
789 if (sr->when_target < sr->region_start + sr->region_len) {
794 region_start = sr->region_start + sr->region_len;
796 for (i = 0; i < sr->epilogue_count; ++i)
798 sr->epilogue_count = 0;
799 sr->epilogue_start = UNW_WHEN_NEVER;
801 sr->region_start = region_start;
802 sr->region_len = rlen;
808 for (i = 0; i < 4; ++i) {
810 set_reg(sr->curr.reg + unw.save_order[i], UNW_WHERE_GR,
811 sr->region_start + sr->region_len - 1, grsave++);
814 sr->gr_save_loc = grsave;
817 sr->spill_offset = 0x10; /* default to psp+16 */
822 * Prologue descriptors.
826 desc_abi (unsigned char abi, unsigned char context, struct unw_state_record *sr)
828 if (abi == 3 && context == 'i') {
829 sr->flags |= UNW_FLAG_INTERRUPT_FRAME;
830 UNW_DPRINT(3, "unwind.%s: interrupt frame\n", __FUNCTION__);
833 UNW_DPRINT(0, "unwind%s: ignoring unwabi(abi=0x%x,context=0x%x)\n",
834 __FUNCTION__, abi, context);
838 desc_br_gr (unsigned char brmask, unsigned char gr, struct unw_state_record *sr)
842 for (i = 0; i < 5; ++i) {
844 set_reg(sr->curr.reg + UNW_REG_B1 + i, UNW_WHERE_GR,
845 sr->region_start + sr->region_len - 1, gr++);
851 desc_br_mem (unsigned char brmask, struct unw_state_record *sr)
855 for (i = 0; i < 5; ++i) {
857 set_reg(sr->curr.reg + UNW_REG_B1 + i, UNW_WHERE_SPILL_HOME,
858 sr->region_start + sr->region_len - 1, 0);
866 desc_frgr_mem (unsigned char grmask, unw_word frmask, struct unw_state_record *sr)
870 for (i = 0; i < 4; ++i) {
871 if ((grmask & 1) != 0) {
872 set_reg(sr->curr.reg + UNW_REG_R4 + i, UNW_WHERE_SPILL_HOME,
873 sr->region_start + sr->region_len - 1, 0);
878 for (i = 0; i < 20; ++i) {
879 if ((frmask & 1) != 0) {
880 int base = (i < 4) ? UNW_REG_F2 : UNW_REG_F16 - 4;
881 set_reg(sr->curr.reg + base + i, UNW_WHERE_SPILL_HOME,
882 sr->region_start + sr->region_len - 1, 0);
890 desc_fr_mem (unsigned char frmask, struct unw_state_record *sr)
894 for (i = 0; i < 4; ++i) {
895 if ((frmask & 1) != 0) {
896 set_reg(sr->curr.reg + UNW_REG_F2 + i, UNW_WHERE_SPILL_HOME,
897 sr->region_start + sr->region_len - 1, 0);
905 desc_gr_gr (unsigned char grmask, unsigned char gr, struct unw_state_record *sr)
909 for (i = 0; i < 4; ++i) {
910 if ((grmask & 1) != 0)
911 set_reg(sr->curr.reg + UNW_REG_R4 + i, UNW_WHERE_GR,
912 sr->region_start + sr->region_len - 1, gr++);
918 desc_gr_mem (unsigned char grmask, struct unw_state_record *sr)
922 for (i = 0; i < 4; ++i) {
923 if ((grmask & 1) != 0) {
924 set_reg(sr->curr.reg + UNW_REG_R4 + i, UNW_WHERE_SPILL_HOME,
925 sr->region_start + sr->region_len - 1, 0);
933 desc_mem_stack_f (unw_word t, unw_word size, struct unw_state_record *sr)
935 set_reg(sr->curr.reg + UNW_REG_PSP, UNW_WHERE_NONE,
936 sr->region_start + MIN((int)t, sr->region_len - 1), 16*size);
940 desc_mem_stack_v (unw_word t, struct unw_state_record *sr)
942 sr->curr.reg[UNW_REG_PSP].when = sr->region_start + MIN((int)t, sr->region_len - 1);
946 desc_reg_gr (unsigned char reg, unsigned char dst, struct unw_state_record *sr)
948 set_reg(sr->curr.reg + reg, UNW_WHERE_GR, sr->region_start + sr->region_len - 1, dst);
952 desc_reg_psprel (unsigned char reg, unw_word pspoff, struct unw_state_record *sr)
954 set_reg(sr->curr.reg + reg, UNW_WHERE_PSPREL, sr->region_start + sr->region_len - 1,
959 desc_reg_sprel (unsigned char reg, unw_word spoff, struct unw_state_record *sr)
961 set_reg(sr->curr.reg + reg, UNW_WHERE_SPREL, sr->region_start + sr->region_len - 1,
966 desc_rp_br (unsigned char dst, struct unw_state_record *sr)
968 sr->return_link_reg = dst;
972 desc_reg_when (unsigned char regnum, unw_word t, struct unw_state_record *sr)
974 struct unw_reg_info *reg = sr->curr.reg + regnum;
976 if (reg->where == UNW_WHERE_NONE)
977 reg->where = UNW_WHERE_GR_SAVE;
978 reg->when = sr->region_start + MIN((int)t, sr->region_len - 1);
982 desc_spill_base (unw_word pspoff, struct unw_state_record *sr)
984 sr->spill_offset = 0x10 - 4*pspoff;
987 static inline unsigned char *
988 desc_spill_mask (unsigned char *imaskp, struct unw_state_record *sr)
991 return imaskp + (2*sr->region_len + 7)/8;
998 desc_epilogue (unw_word t, unw_word ecount, struct unw_state_record *sr)
1000 sr->epilogue_start = sr->region_start + sr->region_len - 1 - t;
1001 sr->epilogue_count = ecount + 1;
1005 desc_copy_state (unw_word label, struct unw_state_record *sr)
1007 struct unw_labeled_state *ls;
1009 for (ls = sr->labeled_states; ls; ls = ls->next) {
1010 if (ls->label == label) {
1011 free_state_stack(&sr->curr);
1012 memcpy(&sr->curr, &ls->saved_state, sizeof(sr->curr));
1013 sr->curr.next = dup_state_stack(ls->saved_state.next);
1017 printk(KERN_ERR "unwind: failed to find state labeled 0x%lx\n", label);
1021 desc_label_state (unw_word label, struct unw_state_record *sr)
1023 struct unw_labeled_state *ls;
1025 ls = alloc_labeled_state();
1027 printk(KERN_ERR "unwind.desc_label_state(): out of memory\n");
1031 memcpy(&ls->saved_state, &sr->curr, sizeof(ls->saved_state));
1032 ls->saved_state.next = dup_state_stack(sr->curr.next);
1034 /* insert into list of labeled states: */
1035 ls->next = sr->labeled_states;
1036 sr->labeled_states = ls;
1040 * General descriptors.
1044 desc_is_active (unsigned char qp, unw_word t, struct unw_state_record *sr)
1046 if (sr->when_target <= sr->region_start + MIN((int)t, sr->region_len - 1))
1049 if ((sr->pr_val & (1UL << qp)) == 0)
1051 sr->pr_mask |= (1UL << qp);
1057 desc_restore_p (unsigned char qp, unw_word t, unsigned char abreg, struct unw_state_record *sr)
1059 struct unw_reg_info *r;
1061 if (!desc_is_active(qp, t, sr))
1064 r = sr->curr.reg + decode_abreg(abreg, 0);
1065 r->where = UNW_WHERE_NONE;
1066 r->when = UNW_WHEN_NEVER;
1071 desc_spill_reg_p (unsigned char qp, unw_word t, unsigned char abreg, unsigned char x,
1072 unsigned char ytreg, struct unw_state_record *sr)
1074 enum unw_where where = UNW_WHERE_GR;
1075 struct unw_reg_info *r;
1077 if (!desc_is_active(qp, t, sr))
1081 where = UNW_WHERE_BR;
1082 else if (ytreg & 0x80)
1083 where = UNW_WHERE_FR;
1085 r = sr->curr.reg + decode_abreg(abreg, 0);
1087 r->when = sr->region_start + MIN((int)t, sr->region_len - 1);
1088 r->val = (ytreg & 0x7f);
1092 desc_spill_psprel_p (unsigned char qp, unw_word t, unsigned char abreg, unw_word pspoff,
1093 struct unw_state_record *sr)
1095 struct unw_reg_info *r;
1097 if (!desc_is_active(qp, t, sr))
1100 r = sr->curr.reg + decode_abreg(abreg, 1);
1101 r->where = UNW_WHERE_PSPREL;
1102 r->when = sr->region_start + MIN((int)t, sr->region_len - 1);
1103 r->val = 0x10 - 4*pspoff;
1107 desc_spill_sprel_p (unsigned char qp, unw_word t, unsigned char abreg, unw_word spoff,
1108 struct unw_state_record *sr)
1110 struct unw_reg_info *r;
1112 if (!desc_is_active(qp, t, sr))
1115 r = sr->curr.reg + decode_abreg(abreg, 1);
1116 r->where = UNW_WHERE_SPREL;
1117 r->when = sr->region_start + MIN((int)t, sr->region_len - 1);
1121 #define UNW_DEC_BAD_CODE(code) printk(KERN_ERR "unwind: unknown code 0x%02x\n", \
1127 #define UNW_DEC_PROLOGUE_GR(fmt,r,m,gr,arg) desc_prologue(0,r,m,gr,arg)
1128 #define UNW_DEC_PROLOGUE(fmt,b,r,arg) desc_prologue(b,r,0,32,arg)
1130 * prologue descriptors:
1132 #define UNW_DEC_ABI(fmt,a,c,arg) desc_abi(a,c,arg)
1133 #define UNW_DEC_BR_GR(fmt,b,g,arg) desc_br_gr(b,g,arg)
1134 #define UNW_DEC_BR_MEM(fmt,b,arg) desc_br_mem(b,arg)
1135 #define UNW_DEC_FRGR_MEM(fmt,g,f,arg) desc_frgr_mem(g,f,arg)
1136 #define UNW_DEC_FR_MEM(fmt,f,arg) desc_fr_mem(f,arg)
1137 #define UNW_DEC_GR_GR(fmt,m,g,arg) desc_gr_gr(m,g,arg)
1138 #define UNW_DEC_GR_MEM(fmt,m,arg) desc_gr_mem(m,arg)
1139 #define UNW_DEC_MEM_STACK_F(fmt,t,s,arg) desc_mem_stack_f(t,s,arg)
1140 #define UNW_DEC_MEM_STACK_V(fmt,t,arg) desc_mem_stack_v(t,arg)
1141 #define UNW_DEC_REG_GR(fmt,r,d,arg) desc_reg_gr(r,d,arg)
1142 #define UNW_DEC_REG_PSPREL(fmt,r,o,arg) desc_reg_psprel(r,o,arg)
1143 #define UNW_DEC_REG_SPREL(fmt,r,o,arg) desc_reg_sprel(r,o,arg)
1144 #define UNW_DEC_REG_WHEN(fmt,r,t,arg) desc_reg_when(r,t,arg)
1145 #define UNW_DEC_PRIUNAT_WHEN_GR(fmt,t,arg) desc_reg_when(UNW_REG_PRI_UNAT_GR,t,arg)
1146 #define UNW_DEC_PRIUNAT_WHEN_MEM(fmt,t,arg) desc_reg_when(UNW_REG_PRI_UNAT_MEM,t,arg)
1147 #define UNW_DEC_PRIUNAT_GR(fmt,r,arg) desc_reg_gr(UNW_REG_PRI_UNAT_GR,r,arg)
1148 #define UNW_DEC_PRIUNAT_PSPREL(fmt,o,arg) desc_reg_psprel(UNW_REG_PRI_UNAT_MEM,o,arg)
1149 #define UNW_DEC_PRIUNAT_SPREL(fmt,o,arg) desc_reg_sprel(UNW_REG_PRI_UNAT_MEM,o,arg)
1150 #define UNW_DEC_RP_BR(fmt,d,arg) desc_rp_br(d,arg)
1151 #define UNW_DEC_SPILL_BASE(fmt,o,arg) desc_spill_base(o,arg)
1152 #define UNW_DEC_SPILL_MASK(fmt,m,arg) (m = desc_spill_mask(m,arg))
1156 #define UNW_DEC_EPILOGUE(fmt,t,c,arg) desc_epilogue(t,c,arg)
1157 #define UNW_DEC_COPY_STATE(fmt,l,arg) desc_copy_state(l,arg)
1158 #define UNW_DEC_LABEL_STATE(fmt,l,arg) desc_label_state(l,arg)
1160 * general unwind descriptors:
1162 #define UNW_DEC_SPILL_REG_P(f,p,t,a,x,y,arg) desc_spill_reg_p(p,t,a,x,y,arg)
1163 #define UNW_DEC_SPILL_REG(f,t,a,x,y,arg) desc_spill_reg_p(0,t,a,x,y,arg)
1164 #define UNW_DEC_SPILL_PSPREL_P(f,p,t,a,o,arg) desc_spill_psprel_p(p,t,a,o,arg)
1165 #define UNW_DEC_SPILL_PSPREL(f,t,a,o,arg) desc_spill_psprel_p(0,t,a,o,arg)
1166 #define UNW_DEC_SPILL_SPREL_P(f,p,t,a,o,arg) desc_spill_sprel_p(p,t,a,o,arg)
1167 #define UNW_DEC_SPILL_SPREL(f,t,a,o,arg) desc_spill_sprel_p(0,t,a,o,arg)
1168 #define UNW_DEC_RESTORE_P(f,p,t,a,arg) desc_restore_p(p,t,a,arg)
1169 #define UNW_DEC_RESTORE(f,t,a,arg) desc_restore_p(0,t,a,arg)
1171 #include "unwind_decoder.c"
1174 /* Unwind scripts. */
1176 static inline unw_hash_index_t
1177 hash (unsigned long ip)
1179 # define magic 0x9e3779b97f4a7c16 /* based on (sqrt(5)/2-1)*2^64 */
1181 return (ip >> 4)*magic >> (64 - UNW_LOG_HASH_SIZE);
1185 cache_match (struct unw_script *script, unsigned long ip, unsigned long pr)
1187 read_lock(&script->lock);
1188 if (ip == script->ip && ((pr ^ script->pr_val) & script->pr_mask) == 0)
1189 /* keep the read lock... */
1191 read_unlock(&script->lock);
1195 static inline struct unw_script *
1196 script_lookup (struct unw_frame_info *info)
1198 struct unw_script *script = unw.cache + info->hint;
1199 unsigned short index;
1200 unsigned long ip, pr;
1202 if (UNW_DEBUG_ON(0))
1203 return 0; /* Always regenerate scripts in debug mode */
1205 STAT(++unw.stat.cache.lookups);
1210 if (cache_match(script, ip, pr)) {
1211 STAT(++unw.stat.cache.hinted_hits);
1215 index = unw.hash[hash(ip)];
1216 if (index >= UNW_CACHE_SIZE)
1219 script = unw.cache + index;
1221 if (cache_match(script, ip, pr)) {
1222 /* update hint; no locking required as single-word writes are atomic */
1223 STAT(++unw.stat.cache.normal_hits);
1224 unw.cache[info->prev_script].hint = script - unw.cache;
1227 if (script->coll_chain >= UNW_HASH_SIZE)
1229 script = unw.cache + script->coll_chain;
1230 STAT(++unw.stat.cache.collision_chain_traversals);
1235 * On returning, a write lock for the SCRIPT is still being held.
1237 static inline struct unw_script *
1238 script_new (unsigned long ip)
1240 struct unw_script *script, *prev, *tmp;
1241 unw_hash_index_t index;
1242 unsigned long flags;
1243 unsigned short head;
1245 STAT(++unw.stat.script.news);
1248 * Can't (easily) use cmpxchg() here because of ABA problem
1249 * that is intrinsic in cmpxchg()...
1251 spin_lock_irqsave(&unw.lock, flags);
1253 head = unw.lru_head;
1254 script = unw.cache + head;
1255 unw.lru_head = script->lru_chain;
1257 spin_unlock(&unw.lock);
1260 * We'd deadlock here if we interrupted a thread that is holding a read lock on
1261 * script->lock. Thus, if the write_trylock() fails, we simply bail out. The
1262 * alternative would be to disable interrupts whenever we hold a read-lock, but
1265 if (!write_trylock(&script->lock))
1268 spin_lock(&unw.lock);
1270 /* re-insert script at the tail of the LRU chain: */
1271 unw.cache[unw.lru_tail].lru_chain = head;
1272 unw.lru_tail = head;
1274 /* remove the old script from the hash table (if it's there): */
1276 index = hash(script->ip);
1277 tmp = unw.cache + unw.hash[index];
1280 if (tmp == script) {
1282 prev->coll_chain = tmp->coll_chain;
1284 unw.hash[index] = tmp->coll_chain;
1288 if (tmp->coll_chain >= UNW_CACHE_SIZE)
1289 /* old script wasn't in the hash-table */
1291 tmp = unw.cache + tmp->coll_chain;
1295 /* enter new script in the hash table */
1297 script->coll_chain = unw.hash[index];
1298 unw.hash[index] = script - unw.cache;
1300 script->ip = ip; /* set new IP while we're holding the locks */
1302 STAT(if (script->coll_chain < UNW_CACHE_SIZE) ++unw.stat.script.collisions);
1304 spin_unlock_irqrestore(&unw.lock, flags);
1313 script_finalize (struct unw_script *script, struct unw_state_record *sr)
1315 script->pr_mask = sr->pr_mask;
1316 script->pr_val = sr->pr_val;
1318 * We could down-grade our write-lock on script->lock here but
1319 * the rwlock API doesn't offer atomic lock downgrading, so
1320 * we'll just keep the write-lock and release it later when
1321 * we're done using the script.
1326 script_emit (struct unw_script *script, struct unw_insn insn)
1328 if (script->count >= UNW_MAX_SCRIPT_LEN) {
1329 UNW_DPRINT(0, "unwind.%s: script exceeds maximum size of %u instructions!\n",
1330 __FUNCTION__, UNW_MAX_SCRIPT_LEN);
1333 script->insn[script->count++] = insn;
1337 emit_nat_info (struct unw_state_record *sr, int i, struct unw_script *script)
1339 struct unw_reg_info *r = sr->curr.reg + i;
1340 enum unw_insn_opcode opc;
1341 struct unw_insn insn;
1342 unsigned long val = 0;
1347 /* register got spilled to a stacked register */
1348 opc = UNW_INSN_SETNAT_TYPE;
1349 val = UNW_NAT_REGSTK;
1351 /* register got spilled to a scratch register */
1352 opc = UNW_INSN_SETNAT_MEMSTK;
1356 opc = UNW_INSN_SETNAT_TYPE;
1361 opc = UNW_INSN_SETNAT_TYPE;
1365 case UNW_WHERE_PSPREL:
1366 case UNW_WHERE_SPREL:
1367 opc = UNW_INSN_SETNAT_MEMSTK;
1371 UNW_DPRINT(0, "unwind.%s: don't know how to emit nat info for where = %u\n",
1372 __FUNCTION__, r->where);
1376 insn.dst = unw.preg_index[i];
1378 script_emit(script, insn);
1382 compile_reg (struct unw_state_record *sr, int i, struct unw_script *script)
1384 struct unw_reg_info *r = sr->curr.reg + i;
1385 enum unw_insn_opcode opc;
1386 unsigned long val, rval;
1387 struct unw_insn insn;
1390 if (r->where == UNW_WHERE_NONE || r->when >= sr->when_target)
1393 opc = UNW_INSN_MOVE;
1394 val = rval = r->val;
1395 need_nat_info = (i >= UNW_REG_R4 && i <= UNW_REG_R7);
1400 opc = UNW_INSN_MOVE_STACKED;
1402 } else if (rval >= 4 && rval <= 7) {
1403 if (need_nat_info) {
1404 opc = UNW_INSN_MOVE2;
1407 val = unw.preg_index[UNW_REG_R4 + (rval - 4)];
1409 /* register got spilled to a scratch register */
1410 opc = UNW_INSN_MOVE_SCRATCH;
1411 val = pt_regs_off(rval);
1417 val = unw.preg_index[UNW_REG_F2 + (rval - 1)];
1418 else if (rval >= 16 && rval <= 31)
1419 val = unw.preg_index[UNW_REG_F16 + (rval - 16)];
1421 opc = UNW_INSN_MOVE_SCRATCH;
1423 val = offsetof(struct pt_regs, f6) + 16*(rval - 6);
1425 UNW_DPRINT(0, "unwind.%s: kernel may not touch f%lu\n",
1426 __FUNCTION__, rval);
1431 if (rval >= 1 && rval <= 5)
1432 val = unw.preg_index[UNW_REG_B1 + (rval - 1)];
1434 opc = UNW_INSN_MOVE_SCRATCH;
1436 val = offsetof(struct pt_regs, b0);
1438 val = offsetof(struct pt_regs, b6);
1440 val = offsetof(struct pt_regs, b7);
1444 case UNW_WHERE_SPREL:
1445 opc = UNW_INSN_ADD_SP;
1448 case UNW_WHERE_PSPREL:
1449 opc = UNW_INSN_ADD_PSP;
1453 UNW_DPRINT(0, "unwind%s: register %u has unexpected `where' value of %u\n",
1454 __FUNCTION__, i, r->where);
1458 insn.dst = unw.preg_index[i];
1460 script_emit(script, insn);
1462 emit_nat_info(sr, i, script);
1464 if (i == UNW_REG_PSP) {
1466 * info->psp must contain the _value_ of the previous
1467 * sp, not it's save location. We get this by
1468 * dereferencing the value we just stored in
1471 insn.opc = UNW_INSN_LOAD;
1472 insn.dst = insn.val = unw.preg_index[UNW_REG_PSP];
1473 script_emit(script, insn);
1477 static inline const struct unw_table_entry *
1478 lookup (struct unw_table *table, unsigned long rel_ip)
1480 const struct unw_table_entry *e = 0;
1481 unsigned long lo, hi, mid;
1483 /* do a binary search for right entry: */
1484 for (lo = 0, hi = table->length; lo < hi; ) {
1485 mid = (lo + hi) / 2;
1486 e = &table->array[mid];
1487 if (rel_ip < e->start_offset)
1489 else if (rel_ip >= e->end_offset)
1494 if (rel_ip < e->start_offset || rel_ip >= e->end_offset)
1500 * Build an unwind script that unwinds from state OLD_STATE to the
1501 * entrypoint of the function that called OLD_STATE.
1503 static inline struct unw_script *
1504 build_script (struct unw_frame_info *info)
1506 const struct unw_table_entry *e = 0;
1507 struct unw_script *script = 0;
1508 struct unw_labeled_state *ls, *next;
1509 unsigned long ip = info->ip;
1510 struct unw_state_record sr;
1511 struct unw_table *table;
1512 struct unw_reg_info *r;
1513 struct unw_insn insn;
1517 STAT(unsigned long start, parse_start;)
1519 STAT(++unw.stat.script.builds; start = ia64_get_itc());
1521 /* build state record */
1522 memset(&sr, 0, sizeof(sr));
1523 for (r = sr.curr.reg; r < sr.curr.reg + UNW_NUM_REGS; ++r)
1524 r->when = UNW_WHEN_NEVER;
1525 sr.pr_val = info->pr;
1527 UNW_DPRINT(3, "unwind.%s: ip 0x%lx\n", __FUNCTION__, ip);
1528 script = script_new(ip);
1530 UNW_DPRINT(0, "unwind.%s: failed to create unwind script\n", __FUNCTION__);
1531 STAT(unw.stat.script.build_time += ia64_get_itc() - start);
1534 unw.cache[info->prev_script].hint = script - unw.cache;
1536 /* search the kernels and the modules' unwind tables for IP: */
1538 STAT(parse_start = ia64_get_itc());
1540 for (table = unw.tables; table; table = table->next) {
1541 if (ip >= table->start && ip < table->end) {
1542 e = lookup(table, ip - table->segment_base);
1547 /* no info, return default unwinder (leaf proc, no mem stack, no saved regs) */
1548 UNW_DPRINT(1, "unwind.%s: no unwind info for ip=0x%lx (prev ip=0x%lx)\n",
1549 __FUNCTION__, ip, unw.cache[info->prev_script].ip);
1550 sr.curr.reg[UNW_REG_RP].where = UNW_WHERE_BR;
1551 sr.curr.reg[UNW_REG_RP].when = -1;
1552 sr.curr.reg[UNW_REG_RP].val = 0;
1553 compile_reg(&sr, UNW_REG_RP, script);
1554 script_finalize(script, &sr);
1555 STAT(unw.stat.script.parse_time += ia64_get_itc() - parse_start);
1556 STAT(unw.stat.script.build_time += ia64_get_itc() - start);
1560 sr.when_target = (3*((ip & ~0xfUL) - (table->segment_base + e->start_offset))/16
1562 hdr = *(u64 *) (table->segment_base + e->info_offset);
1563 dp = (u8 *) (table->segment_base + e->info_offset + 8);
1564 desc_end = dp + 8*UNW_LENGTH(hdr);
1566 while (!sr.done && dp < desc_end)
1567 dp = unw_decode(dp, sr.in_body, &sr);
1569 if (sr.when_target > sr.epilogue_start) {
1571 * sp has been restored and all values on the memory stack below
1572 * psp also have been restored.
1574 sr.curr.reg[UNW_REG_PSP].val = 0;
1575 sr.curr.reg[UNW_REG_PSP].where = UNW_WHERE_NONE;
1576 sr.curr.reg[UNW_REG_PSP].when = UNW_WHEN_NEVER;
1577 for (r = sr.curr.reg; r < sr.curr.reg + UNW_NUM_REGS; ++r)
1578 if ((r->where == UNW_WHERE_PSPREL && r->val <= 0x10)
1579 || r->where == UNW_WHERE_SPREL)
1582 r->where = UNW_WHERE_NONE;
1583 r->when = UNW_WHEN_NEVER;
1587 script->flags = sr.flags;
1590 * If RP did't get saved, generate entry for the return link
1593 if (sr.curr.reg[UNW_REG_RP].when >= sr.when_target) {
1594 sr.curr.reg[UNW_REG_RP].where = UNW_WHERE_BR;
1595 sr.curr.reg[UNW_REG_RP].when = -1;
1596 sr.curr.reg[UNW_REG_RP].val = sr.return_link_reg;
1597 UNW_DPRINT(1, "unwind.%s: using default for rp at ip=0x%lx where=%d val=0x%lx\n",
1598 __FUNCTION__, ip, sr.curr.reg[UNW_REG_RP].where,
1599 sr.curr.reg[UNW_REG_RP].val);
1603 UNW_DPRINT(1, "unwind.%s: state record for func 0x%lx, t=%u:\n",
1604 __FUNCTION__, table->segment_base + e->start_offset, sr.when_target);
1605 for (r = sr.curr.reg; r < sr.curr.reg + UNW_NUM_REGS; ++r) {
1606 if (r->where != UNW_WHERE_NONE || r->when != UNW_WHEN_NEVER) {
1607 UNW_DPRINT(1, " %s <- ", unw.preg_name[r - sr.curr.reg]);
1609 case UNW_WHERE_GR: UNW_DPRINT(1, "r%lu", r->val); break;
1610 case UNW_WHERE_FR: UNW_DPRINT(1, "f%lu", r->val); break;
1611 case UNW_WHERE_BR: UNW_DPRINT(1, "b%lu", r->val); break;
1612 case UNW_WHERE_SPREL: UNW_DPRINT(1, "[sp+0x%lx]", r->val); break;
1613 case UNW_WHERE_PSPREL: UNW_DPRINT(1, "[psp+0x%lx]", r->val); break;
1614 case UNW_WHERE_NONE:
1615 UNW_DPRINT(1, "%s+0x%lx", unw.preg_name[r - sr.curr.reg], r->val);
1619 UNW_DPRINT(1, "BADWHERE(%d)", r->where);
1622 UNW_DPRINT(1, "\t\t%d\n", r->when);
1627 STAT(unw.stat.script.parse_time += ia64_get_itc() - parse_start);
1629 /* translate state record into unwinder instructions: */
1632 * First, set psp if we're dealing with a fixed-size frame;
1633 * subsequent instructions may depend on this value.
1635 if (sr.when_target > sr.curr.reg[UNW_REG_PSP].when
1636 && (sr.curr.reg[UNW_REG_PSP].where == UNW_WHERE_NONE)
1637 && sr.curr.reg[UNW_REG_PSP].val != 0) {
1638 /* new psp is sp plus frame size */
1639 insn.opc = UNW_INSN_ADD;
1640 insn.dst = offsetof(struct unw_frame_info, psp)/8;
1641 insn.val = sr.curr.reg[UNW_REG_PSP].val; /* frame size */
1642 script_emit(script, insn);
1645 /* determine where the primary UNaT is: */
1646 if (sr.when_target < sr.curr.reg[UNW_REG_PRI_UNAT_GR].when)
1647 i = UNW_REG_PRI_UNAT_MEM;
1648 else if (sr.when_target < sr.curr.reg[UNW_REG_PRI_UNAT_MEM].when)
1649 i = UNW_REG_PRI_UNAT_GR;
1650 else if (sr.curr.reg[UNW_REG_PRI_UNAT_MEM].when > sr.curr.reg[UNW_REG_PRI_UNAT_GR].when)
1651 i = UNW_REG_PRI_UNAT_MEM;
1653 i = UNW_REG_PRI_UNAT_GR;
1655 compile_reg(&sr, i, script);
1657 for (i = UNW_REG_BSP; i < UNW_NUM_REGS; ++i)
1658 compile_reg(&sr, i, script);
1660 /* free labeled register states & stack: */
1662 STAT(parse_start = ia64_get_itc());
1663 for (ls = sr.labeled_states; ls; ls = next) {
1665 free_state_stack(&ls->saved_state);
1666 free_labeled_state(ls);
1668 free_state_stack(&sr.curr);
1669 STAT(unw.stat.script.parse_time += ia64_get_itc() - parse_start);
1671 script_finalize(script, &sr);
1672 STAT(unw.stat.script.build_time += ia64_get_itc() - start);
1677 * Apply the unwinding actions represented by OPS and update SR to
1678 * reflect the state that existed upon entry to the function that this
1679 * unwinder represents.
1682 run_script (struct unw_script *script, struct unw_frame_info *state)
1684 struct unw_insn *ip, *limit, next_insn;
1685 unsigned long opc, dst, val, off;
1686 unsigned long *s = (unsigned long *) state;
1687 STAT(unsigned long start;)
1689 STAT(++unw.stat.script.runs; start = ia64_get_itc());
1690 state->flags = script->flags;
1692 limit = script->insn + script->count;
1695 while (ip++ < limit) {
1696 opc = next_insn.opc;
1697 dst = next_insn.dst;
1698 val = next_insn.val;
1707 case UNW_INSN_MOVE2:
1710 s[dst+1] = s[val+1];
1720 case UNW_INSN_MOVE_SCRATCH:
1722 s[dst] = (unsigned long) get_scratch_regs(state) + val;
1725 UNW_DPRINT(0, "unwind.%s: no state->pt, dst=%ld, val=%ld\n",
1726 __FUNCTION__, dst, val);
1730 case UNW_INSN_MOVE_STACKED:
1731 s[dst] = (unsigned long) ia64_rse_skip_regs((unsigned long *)state->bsp,
1735 case UNW_INSN_ADD_PSP:
1736 s[dst] = state->psp + val;
1739 case UNW_INSN_ADD_SP:
1740 s[dst] = state->sp + val;
1743 case UNW_INSN_SETNAT_MEMSTK:
1744 if (!state->pri_unat_loc)
1745 state->pri_unat_loc = &state->sw->ar_unat;
1746 /* register off. is a multiple of 8, so the least 3 bits (type) are 0 */
1747 s[dst+1] = (*state->pri_unat_loc - s[dst]) | UNW_NAT_MEMSTK;
1750 case UNW_INSN_SETNAT_TYPE:
1756 if ((s[val] & (local_cpu_data->unimpl_va_mask | 0x7)) != 0
1757 || s[val] < TASK_SIZE)
1759 UNW_DPRINT(0, "unwind.%s: rejecting bad psp=0x%lx\n",
1760 __FUNCTION__, s[val]);
1764 s[dst] = *(unsigned long *) s[val];
1768 STAT(unw.stat.script.run_time += ia64_get_itc() - start);
1772 off = unw.sw_off[val];
1773 s[val] = (unsigned long) state->sw + off;
1774 if (off >= offsetof(struct switch_stack, r4) && off <= offsetof(struct switch_stack, r7))
1776 * We're initializing a general register: init NaT info, too. Note that
1777 * the offset is a multiple of 8 which gives us the 3 bits needed for
1780 s[val+1] = (offsetof(struct switch_stack, ar_unat) - off) | UNW_NAT_MEMSTK;
1785 find_save_locs (struct unw_frame_info *info)
1787 int have_write_lock = 0;
1788 struct unw_script *scr;
1790 if ((info->ip & (local_cpu_data->unimpl_va_mask | 0xf)) || info->ip < TASK_SIZE) {
1791 /* don't let obviously bad addresses pollute the cache */
1792 /* FIXME: should really be level 0 but it occurs too often. KAO */
1793 UNW_DPRINT(1, "unwind.%s: rejecting bad ip=0x%lx\n", __FUNCTION__, info->ip);
1798 scr = script_lookup(info);
1800 scr = build_script(info);
1803 "unwind.%s: failed to locate/build unwind script for ip %lx\n",
1804 __FUNCTION__, info->ip);
1807 have_write_lock = 1;
1809 info->hint = scr->hint;
1810 info->prev_script = scr - unw.cache;
1812 run_script(scr, info);
1814 if (have_write_lock)
1815 write_unlock(&scr->lock);
1817 read_unlock(&scr->lock);
1822 unw_unwind (struct unw_frame_info *info)
1824 unsigned long prev_ip, prev_sp, prev_bsp;
1825 unsigned long ip, pr, num_regs;
1826 STAT(unsigned long start, flags;)
1829 STAT(local_irq_save(flags); ++unw.stat.api.unwinds; start = ia64_get_itc());
1833 prev_bsp = info->bsp;
1835 /* restore the ip */
1836 if (!info->rp_loc) {
1837 /* FIXME: should really be level 0 but it occurs too often. KAO */
1838 UNW_DPRINT(1, "unwind.%s: failed to locate return link (ip=0x%lx)!\n",
1839 __FUNCTION__, info->ip);
1840 STAT(unw.stat.api.unwind_time += ia64_get_itc() - start; local_irq_restore(flags));
1843 ip = info->ip = *info->rp_loc;
1844 if (ip < GATE_ADDR) {
1845 UNW_DPRINT(2, "unwind.%s: reached user-space (ip=0x%lx)\n", __FUNCTION__, ip);
1846 STAT(unw.stat.api.unwind_time += ia64_get_itc() - start; local_irq_restore(flags));
1850 /* restore the cfm: */
1851 if (!info->pfs_loc) {
1852 UNW_DPRINT(0, "unwind.%s: failed to locate ar.pfs!\n", __FUNCTION__);
1853 STAT(unw.stat.api.unwind_time += ia64_get_itc() - start; local_irq_restore(flags));
1856 info->cfm_loc = info->pfs_loc;
1858 /* restore the bsp: */
1861 if ((info->flags & UNW_FLAG_INTERRUPT_FRAME)) {
1862 info->pt = info->sp + 16;
1863 if ((pr & (1UL << pNonSys)) != 0)
1864 num_regs = *info->cfm_loc & 0x7f; /* size of frame */
1866 (unsigned long *) (info->pt + offsetof(struct pt_regs, ar_pfs));
1867 UNW_DPRINT(3, "unwind.%s: interrupt_frame pt 0x%lx\n", __FUNCTION__, info->pt);
1869 num_regs = (*info->cfm_loc >> 7) & 0x7f; /* size of locals */
1870 info->bsp = (unsigned long) ia64_rse_skip_regs((unsigned long *) info->bsp, -num_regs);
1871 if (info->bsp < info->regstk.limit || info->bsp > info->regstk.top) {
1872 UNW_DPRINT(0, "unwind.%s: bsp (0x%lx) out of range [0x%lx-0x%lx]\n",
1873 __FUNCTION__, info->bsp, info->regstk.limit, info->regstk.top);
1874 STAT(unw.stat.api.unwind_time += ia64_get_itc() - start; local_irq_restore(flags));
1878 /* restore the sp: */
1879 info->sp = info->psp;
1880 if (info->sp < info->memstk.top || info->sp > info->memstk.limit) {
1881 UNW_DPRINT(0, "unwind.%s: sp (0x%lx) out of range [0x%lx-0x%lx]\n",
1882 __FUNCTION__, info->sp, info->memstk.top, info->memstk.limit);
1883 STAT(unw.stat.api.unwind_time += ia64_get_itc() - start; local_irq_restore(flags));
1887 if (info->ip == prev_ip && info->sp == prev_sp && info->bsp == prev_bsp) {
1888 UNW_DPRINT(0, "unwind.%s: ip, sp, bsp unchanged; stopping here (ip=0x%lx)\n",
1890 STAT(unw.stat.api.unwind_time += ia64_get_itc() - start; local_irq_restore(flags));
1894 /* as we unwind, the saved ar.unat becomes the primary unat: */
1895 info->pri_unat_loc = info->unat_loc;
1897 /* finally, restore the predicates: */
1898 unw_get_pr(info, &info->pr);
1900 retval = find_save_locs(info);
1901 STAT(unw.stat.api.unwind_time += ia64_get_itc() - start; local_irq_restore(flags));
1906 unw_unwind_to_user (struct unw_frame_info *info)
1910 while (unw_unwind(info) >= 0) {
1911 if (unw_get_rp(info, &ip) < 0) {
1912 unw_get_ip(info, &ip);
1913 UNW_DPRINT(0, "unwind.%s: failed to read return pointer (ip=0x%lx)\n",
1920 unw_get_ip(info, &ip);
1921 UNW_DPRINT(0, "unwind.%s: failed to unwind to user-level (ip=0x%lx)\n", __FUNCTION__, ip);
1926 init_frame_info (struct unw_frame_info *info, struct task_struct *t,
1927 struct switch_stack *sw, unsigned long stktop)
1929 unsigned long rbslimit, rbstop, stklimit;
1930 STAT(unsigned long start, flags;)
1932 STAT(local_irq_save(flags); ++unw.stat.api.inits; start = ia64_get_itc());
1935 * Subtle stuff here: we _could_ unwind through the switch_stack frame but we
1936 * don't want to do that because it would be slow as each preserved register would
1937 * have to be processed. Instead, what we do here is zero out the frame info and
1938 * start the unwind process at the function that created the switch_stack frame.
1939 * When a preserved value in switch_stack needs to be accessed, run_script() will
1940 * initialize the appropriate pointer on demand.
1942 memset(info, 0, sizeof(*info));
1944 rbslimit = (unsigned long) t + IA64_RBS_OFFSET;
1945 rbstop = sw->ar_bspstore;
1946 if (rbstop - (unsigned long) t >= IA64_STK_OFFSET)
1949 stklimit = (unsigned long) t + IA64_STK_OFFSET;
1950 if (stktop <= rbstop)
1953 info->regstk.limit = rbslimit;
1954 info->regstk.top = rbstop;
1955 info->memstk.limit = stklimit;
1956 info->memstk.top = stktop;
1959 info->sp = info->psp = stktop;
1961 UNW_DPRINT(3, "unwind.%s:\n"
1963 " rbs = [0x%lx-0x%lx)\n"
1964 " stk = [0x%lx-0x%lx)\n"
1968 __FUNCTION__, (unsigned long) t, rbslimit, rbstop, stktop, stklimit,
1969 info->pr, (unsigned long) info->sw, info->sp);
1970 STAT(unw.stat.api.init_time += ia64_get_itc() - start; local_irq_restore(flags));
1974 unw_init_from_interruption (struct unw_frame_info *info, struct task_struct *t,
1975 struct pt_regs *pt, struct switch_stack *sw)
1979 init_frame_info(info, t, sw, pt->r12);
1980 info->cfm_loc = &pt->cr_ifs;
1981 info->unat_loc = &pt->ar_unat;
1982 info->pfs_loc = &pt->ar_pfs;
1983 sof = *info->cfm_loc & 0x7f;
1984 info->bsp = (unsigned long) ia64_rse_skip_regs((unsigned long *) info->regstk.top, -sof);
1985 info->ip = pt->cr_iip + ia64_psr(pt)->ri;
1986 info->pt = (unsigned long) pt;
1987 UNW_DPRINT(3, "unwind.%s:\n"
1991 __FUNCTION__, info->bsp, sof, info->ip);
1992 find_save_locs(info);
1996 unw_init_frame_info (struct unw_frame_info *info, struct task_struct *t, struct switch_stack *sw)
2000 init_frame_info(info, t, sw, (unsigned long) (sw + 1) - 16);
2001 info->cfm_loc = &sw->ar_pfs;
2002 sol = (*info->cfm_loc >> 7) & 0x7f;
2003 info->bsp = (unsigned long) ia64_rse_skip_regs((unsigned long *) info->regstk.top, -sol);
2005 UNW_DPRINT(3, "unwind.%s:\n"
2009 __FUNCTION__, info->bsp, sol, info->ip);
2010 find_save_locs(info);
2014 unw_init_from_blocked_task (struct unw_frame_info *info, struct task_struct *t)
2016 struct switch_stack *sw = (struct switch_stack *) (t->thread.ksp + 16);
2018 UNW_DPRINT(1, "unwind.%s\n", __FUNCTION__);
2019 unw_init_frame_info(info, t, sw);
2023 init_unwind_table (struct unw_table *table, const char *name, unsigned long segment_base,
2024 unsigned long gp, const void *table_start, const void *table_end)
2026 const struct unw_table_entry *start = table_start, *end = table_end;
2029 table->segment_base = segment_base;
2031 table->start = segment_base + start[0].start_offset;
2032 table->end = segment_base + end[-1].end_offset;
2033 table->array = start;
2034 table->length = end - start;
2038 unw_add_unwind_table (const char *name, unsigned long segment_base, unsigned long gp,
2039 const void *table_start, const void *table_end)
2041 const struct unw_table_entry *start = table_start, *end = table_end;
2042 struct unw_table *table;
2043 unsigned long flags;
2045 if (end - start <= 0) {
2046 UNW_DPRINT(0, "unwind.%s: ignoring attempt to insert empty unwind table\n",
2051 table = kmalloc(sizeof(*table), GFP_USER);
2055 init_unwind_table(table, name, segment_base, gp, table_start, table_end);
2057 spin_lock_irqsave(&unw.lock, flags);
2059 /* keep kernel unwind table at the front (it's searched most commonly): */
2060 table->next = unw.tables->next;
2061 unw.tables->next = table;
2063 spin_unlock_irqrestore(&unw.lock, flags);
2069 unw_remove_unwind_table (void *handle)
2071 struct unw_table *table, *prev;
2072 struct unw_script *tmp;
2073 unsigned long flags;
2077 UNW_DPRINT(0, "unwind.%s: ignoring attempt to remove non-existent unwind table\n",
2083 if (table == &unw.kernel_table) {
2084 UNW_DPRINT(0, "unwind.%s: sorry, freeing the kernel's unwind table is a "
2085 "no-can-do!\n", __FUNCTION__);
2089 spin_lock_irqsave(&unw.lock, flags);
2091 /* first, delete the table: */
2093 for (prev = (struct unw_table *) &unw.tables; prev; prev = prev->next)
2094 if (prev->next == table)
2097 UNW_DPRINT(0, "unwind.%s: failed to find unwind table %p\n",
2098 __FUNCTION__, (void *) table);
2099 spin_unlock_irqrestore(&unw.lock, flags);
2102 prev->next = table->next;
2104 spin_unlock_irqrestore(&unw.lock, flags);
2106 /* next, remove hash table entries for this table */
2108 for (index = 0; index <= UNW_HASH_SIZE; ++index) {
2109 tmp = unw.cache + unw.hash[index];
2110 if (unw.hash[index] >= UNW_CACHE_SIZE
2111 || tmp->ip < table->start || tmp->ip >= table->end)
2114 write_lock(&tmp->lock);
2116 if (tmp->ip >= table->start && tmp->ip < table->end) {
2117 unw.hash[index] = tmp->coll_chain;
2121 write_unlock(&tmp->lock);
2128 create_gate_table (void)
2130 const struct unw_table_entry *entry, *start, *end;
2131 unsigned long *lp, segbase = GATE_ADDR;
2132 size_t info_size, size;
2134 Elf64_Phdr *punw = NULL, *phdr = (Elf64_Phdr *) (GATE_ADDR + GATE_EHDR->e_phoff);
2137 for (i = 0; i < GATE_EHDR->e_phnum; ++i, ++phdr)
2138 if (phdr->p_type == PT_IA_64_UNWIND) {
2144 printk("%s: failed to find gate DSO's unwind table!\n", __FUNCTION__);
2148 start = (const struct unw_table_entry *) punw->p_vaddr;
2149 end = (struct unw_table_entry *) ((char *) start + punw->p_memsz);
2152 unw_add_unwind_table("linux-gate.so", segbase, 0, start, end);
2154 for (entry = start; entry < end; ++entry)
2155 size += 3*8 + 8 + 8*UNW_LENGTH(*(u64 *) (segbase + entry->info_offset));
2156 size += 8; /* reserve space for "end of table" marker */
2158 unw.gate_table = kmalloc(size, GFP_KERNEL);
2159 if (!unw.gate_table) {
2160 unw.gate_table_size = 0;
2161 printk(KERN_ERR "%s: unable to create unwind data for gate page!\n", __FUNCTION__);
2164 unw.gate_table_size = size;
2166 lp = unw.gate_table;
2167 info = (char *) unw.gate_table + size;
2169 for (entry = start; entry < end; ++entry, lp += 3) {
2170 info_size = 8 + 8*UNW_LENGTH(*(u64 *) (segbase + entry->info_offset));
2172 memcpy(info, (char *) segbase + entry->info_offset, info_size);
2174 lp[0] = segbase + entry->start_offset; /* start */
2175 lp[1] = segbase + entry->end_offset; /* end */
2176 lp[2] = info - (char *) unw.gate_table; /* info */
2178 *lp = 0; /* end-of-table marker */
2181 __initcall(create_gate_table);
2186 extern int ia64_unw_start, ia64_unw_end, __gp;
2187 extern void unw_hash_index_t_is_too_narrow (void);
2190 if (8*sizeof(unw_hash_index_t) < UNW_LOG_HASH_SIZE)
2191 unw_hash_index_t_is_too_narrow();
2193 unw.sw_off[unw.preg_index[UNW_REG_PRI_UNAT_GR]] = SW(AR_UNAT);
2194 unw.sw_off[unw.preg_index[UNW_REG_BSPSTORE]] = SW(AR_BSPSTORE);
2195 unw.sw_off[unw.preg_index[UNW_REG_PFS]] = SW(AR_UNAT);
2196 unw.sw_off[unw.preg_index[UNW_REG_RP]] = SW(B0);
2197 unw.sw_off[unw.preg_index[UNW_REG_UNAT]] = SW(AR_UNAT);
2198 unw.sw_off[unw.preg_index[UNW_REG_PR]] = SW(PR);
2199 unw.sw_off[unw.preg_index[UNW_REG_LC]] = SW(AR_LC);
2200 unw.sw_off[unw.preg_index[UNW_REG_FPSR]] = SW(AR_FPSR);
2201 for (i = UNW_REG_R4, off = SW(R4); i <= UNW_REG_R7; ++i, off += 8)
2202 unw.sw_off[unw.preg_index[i]] = off;
2203 for (i = UNW_REG_B1, off = SW(B1); i <= UNW_REG_B5; ++i, off += 8)
2204 unw.sw_off[unw.preg_index[i]] = off;
2205 for (i = UNW_REG_F2, off = SW(F2); i <= UNW_REG_F5; ++i, off += 16)
2206 unw.sw_off[unw.preg_index[i]] = off;
2207 for (i = UNW_REG_F16, off = SW(F16); i <= UNW_REG_F31; ++i, off += 16)
2208 unw.sw_off[unw.preg_index[i]] = off;
2210 for (i = 0; i < UNW_CACHE_SIZE; ++i) {
2212 unw.cache[i].lru_chain = (i - 1);
2213 unw.cache[i].coll_chain = -1;
2214 unw.cache[i].lock = RW_LOCK_UNLOCKED;
2216 unw.lru_head = UNW_CACHE_SIZE - 1;
2219 init_unwind_table(&unw.kernel_table, "kernel", KERNEL_START, (unsigned long) &__gp,
2220 &ia64_unw_start, &ia64_unw_end);
2224 * DEPRECATED DEPRECATED DEPRECATED DEPRECATED DEPRECATED DEPRECATED DEPRECATED
2226 * This system call has been deprecated. The new and improved way to get
2227 * at the kernel's unwind info is via the gate DSO. The address of the
2228 * ELF header for this DSO is passed to user-level via AT_SYSINFO_EHDR.
2230 * DEPRECATED DEPRECATED DEPRECATED DEPRECATED DEPRECATED DEPRECATED DEPRECATED
2232 * This system call copies the unwind data into the buffer pointed to by BUF and returns
2233 * the size of the unwind data. If BUF_SIZE is smaller than the size of the unwind data
2234 * or if BUF is NULL, nothing is copied, but the system call still returns the size of the
2237 * The first portion of the unwind data contains an unwind table and rest contains the
2238 * associated unwind info (in no particular order). The unwind table consists of a table
2239 * of entries of the form:
2241 * u64 start; (64-bit address of start of function)
2242 * u64 end; (64-bit address of start of function)
2243 * u64 info; (BUF-relative offset to unwind info)
2245 * The end of the unwind table is indicated by an entry with a START address of zero.
2247 * Please see the IA-64 Software Conventions and Runtime Architecture manual for details
2248 * on the format of the unwind info.
2251 * EFAULT BUF points outside your accessible address space.
2254 sys_getunwind (void *buf, size_t buf_size)
2256 if (buf && buf_size >= unw.gate_table_size)
2257 if (copy_to_user(buf, unw.gate_table, unw.gate_table_size) != 0)
2259 return unw.gate_table_size;