2 * Kernel support for the ptrace() and syscall tracing interfaces.
4 * Copyright (C) 1999-2003 Hewlett-Packard Co
5 * David Mosberger-Tang <davidm@hpl.hp.com>
7 * Derived from the x86 and Alpha versions. Most of the code in here
8 * could actually be factored into a common set of routines.
10 #include <linux/config.h>
11 #include <linux/kernel.h>
12 #include <linux/sched.h>
13 #include <linux/slab.h>
15 #include <linux/errno.h>
16 #include <linux/ptrace.h>
17 #include <linux/smp_lock.h>
18 #include <linux/user.h>
19 #include <linux/security.h>
21 #include <asm/pgtable.h>
22 #include <asm/processor.h>
23 #include <asm/ptrace_offsets.h>
25 #include <asm/system.h>
26 #include <asm/uaccess.h>
27 #include <asm/unwind.h>
29 #include <asm/perfmon.h>
33 * Bits in the PSR that we allow ptrace() to change:
34 * be, up, ac, mfl, mfh (the user mask; five bits total)
35 * db (debug breakpoint fault; one bit)
36 * id (instruction debug fault disable; one bit)
37 * dd (data debug fault disable; one bit)
38 * ri (restart instruction; two bits)
39 * is (instruction set; one bit)
41 #define IPSR_WRITE_MASK \
42 (IA64_PSR_UM | IA64_PSR_DB | IA64_PSR_IS | IA64_PSR_ID | IA64_PSR_DD | IA64_PSR_RI)
43 #define IPSR_READ_MASK IPSR_WRITE_MASK
45 #define PTRACE_DEBUG 1
48 # define dprintk(format...) printk(format)
51 # define dprintk(format...)
55 * Collect the NaT bits for r1-r31 from scratch_unat and return a NaT
56 * bitset where bit i is set iff the NaT bit of register i is set.
59 ia64_get_scratch_nat_bits (struct pt_regs *pt, unsigned long scratch_unat)
61 # define GET_BITS(first, last, unat) \
63 unsigned long bit = ia64_unat_pos(&pt->r##first); \
64 unsigned long mask = ((1UL << (last - first + 1)) - 1) << first; \
65 (ia64_rotl(unat, first) >> bit) & mask; \
69 val = GET_BITS( 1, 3, scratch_unat);
70 val |= GET_BITS(12, 15, scratch_unat);
71 val |= GET_BITS( 8, 11, scratch_unat);
72 val |= GET_BITS(16, 31, scratch_unat);
79 * Set the NaT bits for the scratch registers according to NAT and
80 * return the resulting unat (assuming the scratch registers are
84 ia64_put_scratch_nat_bits (struct pt_regs *pt, unsigned long nat)
86 unsigned long scratch_unat;
88 # define PUT_BITS(first, last, nat) \
90 unsigned long bit = ia64_unat_pos(&pt->r##first); \
91 unsigned long mask = ((1UL << (last - first + 1)) - 1) << bit; \
92 (ia64_rotr(nat, first) << bit) & mask; \
94 scratch_unat = PUT_BITS( 1, 3, nat);
95 scratch_unat |= PUT_BITS(12, 15, nat);
96 scratch_unat |= PUT_BITS( 8, 11, nat);
97 scratch_unat |= PUT_BITS(16, 31, nat);
104 #define IA64_MLX_TEMPLATE 0x2
105 #define IA64_MOVL_OPCODE 6
108 ia64_increment_ip (struct pt_regs *regs)
110 unsigned long w0, ri = ia64_psr(regs)->ri + 1;
115 } else if (ri == 2) {
116 get_user(w0, (char *) regs->cr_iip + 0);
117 if (((w0 >> 1) & 0xf) == IA64_MLX_TEMPLATE) {
119 * rfi'ing to slot 2 of an MLX bundle causes
120 * an illegal operation fault. We don't want
127 ia64_psr(regs)->ri = ri;
131 ia64_decrement_ip (struct pt_regs *regs)
133 unsigned long w0, ri = ia64_psr(regs)->ri - 1;
135 if (ia64_psr(regs)->ri == 0) {
138 get_user(w0, (char *) regs->cr_iip + 0);
139 if (((w0 >> 1) & 0xf) == IA64_MLX_TEMPLATE) {
141 * rfi'ing to slot 2 of an MLX bundle causes
142 * an illegal operation fault. We don't want
148 ia64_psr(regs)->ri = ri;
152 * This routine is used to read an rnat bits that are stored on the kernel backing store.
153 * Since, in general, the alignment of the user and kernel are different, this is not
154 * completely trivial. In essence, we need to construct the user RNAT based on up to two
155 * kernel RNAT values and/or the RNAT value saved in the child's pt_regs.
159 * +--------+ <-- lowest address
166 * | slot01 | > child_regs->ar_rnat
168 * | slot02 | / kernel rbs
169 * +--------+ +--------+
170 * <- child_regs->ar_bspstore | slot61 | <-- krbs
171 * +- - - - + +--------+
173 * +- - - - + +--------+
175 * +- - - - + +--------+
177 * +- - - - + +--------+
182 * | slot01 | > child_stack->ar_rnat
186 * <--- child_stack->ar_bspstore
188 * The way to think of this code is as follows: bit 0 in the user rnat corresponds to some
189 * bit N (0 <= N <= 62) in one of the kernel rnat value. The kernel rnat value holding
190 * this bit is stored in variable rnat0. rnat1 is loaded with the kernel rnat value that
191 * form the upper bits of the user rnat value.
195 * o when reading the rnat "below" the first rnat slot on the kernel backing store,
196 * rnat0/rnat1 are set to 0 and the low order bits are merged in from pt->ar_rnat.
198 * o when reading the rnat "above" the last rnat slot on the kernel backing store,
199 * rnat0/rnat1 gets its value from sw->ar_rnat.
202 get_rnat (struct pt_regs *pt, struct switch_stack *sw,
203 unsigned long *krbs, unsigned long *urnat_addr)
205 unsigned long rnat0 = 0, rnat1 = 0, urnat = 0, *slot0_kaddr, umask = 0UL;
206 unsigned long *kbsp, *ubspstore, *rnat0_kaddr, *rnat1_kaddr, shift;
209 kbsp = (unsigned long *) sw->ar_bspstore;
210 ubspstore = (unsigned long *) pt->ar_bspstore;
212 * First, figure out which bit number slot 0 in user-land maps to in the kernel
213 * rnat. Do this by figuring out how many register slots we're beyond the user's
214 * backingstore and then computing the equivalent address in kernel space.
216 num_regs = ia64_rse_num_regs(ubspstore, urnat_addr + 1);
217 slot0_kaddr = ia64_rse_skip_regs(krbs, num_regs);
218 shift = ia64_rse_slot_num(slot0_kaddr);
219 rnat1_kaddr = ia64_rse_rnat_addr(slot0_kaddr);
220 rnat0_kaddr = rnat1_kaddr - 64;
222 if (ubspstore + 63 > urnat_addr) {
223 /* some bits need to be merged in from pt->ar_rnat */
224 umask = ((1UL << ia64_rse_slot_num(ubspstore)) - 1);
225 urnat = (pt->ar_rnat & umask);
227 if (rnat0_kaddr >= kbsp) {
229 } else if (rnat0_kaddr > krbs) {
230 rnat0 = *rnat0_kaddr;
232 if (rnat1_kaddr >= kbsp) {
234 } else if (rnat1_kaddr > krbs) {
235 rnat1 = *rnat1_kaddr;
237 urnat |= ((rnat1 << (63 - shift)) | (rnat0 >> shift)) & ~umask;
242 * The reverse of get_rnat.
245 put_rnat (struct pt_regs *pt, struct switch_stack *sw,
246 unsigned long *krbs, unsigned long *urnat_addr, unsigned long urnat)
248 unsigned long rnat0 = 0, rnat1 = 0, *slot0_kaddr, umask = 0, mask, m;
249 unsigned long *kbsp, *ubspstore, *rnat0_kaddr, *rnat1_kaddr, shift, slot, ndirty;
250 long num_regs, nbits;
252 ndirty = ia64_rse_num_regs(krbs, krbs + (pt->loadrs >> 19));
255 kbsp = (unsigned long *) sw->ar_bspstore;
256 ubspstore = (unsigned long *) pt->ar_bspstore;
258 * First, figure out which bit number slot 0 in user-land maps to in the kernel
259 * rnat. Do this by figuring out how many register slots we're beyond the user's
260 * backingstore and then computing the equivalent address in kernel space.
262 num_regs = (long) ia64_rse_num_regs(ubspstore, urnat_addr + 1);
263 slot0_kaddr = ia64_rse_skip_regs(krbs, num_regs);
264 shift = ia64_rse_slot_num(slot0_kaddr);
265 rnat1_kaddr = ia64_rse_rnat_addr(slot0_kaddr);
266 rnat0_kaddr = rnat1_kaddr - 64;
268 printk("%s: ubspstore=%p urnat_addr=%p\n", __FUNCTION__, ubspstore, urnat_addr);
269 if (ubspstore + 63 > urnat_addr) {
270 /* some bits need to be place in pt->ar_rnat: */
271 slot = ia64_rse_slot_num(ubspstore);
272 umask = ((1UL << slot) - 1);
273 pt->ar_rnat = (pt->ar_rnat & ~umask) | (urnat & umask);
278 mask = (1UL << nbits) - 1;
280 * Note: Section 11.1 of the EAS guarantees that bit 63 of an
281 * rnat slot is ignored. so we don't have to clear it here.
283 rnat0 = (urnat << shift);
285 printk("%s: rnat0=%016lx, m=%016lx, rnat0_kaddr=%p kbsp=%p\n", __FUNCTION__, rnat0, m, rnat0_kaddr, kbsp);
286 if (rnat0_kaddr >= kbsp) {
287 sw->ar_rnat = (sw->ar_rnat & ~m) | (rnat0 & m);
288 } else if (rnat0_kaddr > krbs) {
289 *rnat0_kaddr = ((*rnat0_kaddr & ~m) | (rnat0 & m));
292 rnat1 = (urnat >> (63 - shift));
293 m = mask >> (63 - shift);
294 printk("%s: rnat1=%016lx, m=%016lx, rnat1_kaddr=%p kbsp=%p\n", __FUNCTION__, rnat1, m, rnat1_kaddr, kbsp);
295 if (rnat1_kaddr >= kbsp) {
296 sw->ar_rnat = (sw->ar_rnat & ~m) | (rnat1 & m);
297 } else if (rnat1_kaddr > krbs) {
298 *rnat1_kaddr = ((*rnat1_kaddr & ~m) | (rnat1 & m));
303 * Read a word from the user-level backing store of task CHILD. ADDR is the user-level
304 * address to read the word from, VAL a pointer to the return value, and USER_BSP gives
305 * the end of the user-level backing store (i.e., it's the address that would be in ar.bsp
306 * after the user executed a "cover" instruction).
308 * This routine takes care of accessing the kernel register backing store for those
309 * registers that got spilled there. It also takes care of calculating the appropriate
310 * RNaT collection words.
313 ia64_peek (struct task_struct *child, struct switch_stack *child_stack, unsigned long user_rbs_end,
314 unsigned long addr, long *val)
316 unsigned long *bspstore, *krbs, regnum, *laddr, *urbs_end, *rnat_addr;
317 struct pt_regs *child_regs;
321 urbs_end = (long *) user_rbs_end;
322 laddr = (unsigned long *) addr;
323 child_regs = ia64_task_regs(child);
324 bspstore = (unsigned long *) child_regs->ar_bspstore;
325 krbs = (unsigned long *) child + IA64_RBS_OFFSET/8;
326 if (laddr >= bspstore && laddr <= ia64_rse_rnat_addr(urbs_end)) {
328 * Attempt to read the RBS in an area that's actually on the kernel RBS =>
329 * read the corresponding bits in the kernel RBS.
331 rnat_addr = ia64_rse_rnat_addr(laddr);
332 ret = get_rnat(child_regs, child_stack, krbs, rnat_addr);
334 if (laddr == rnat_addr) {
335 /* return NaT collection word itself */
340 if (((1UL << ia64_rse_slot_num(laddr)) & ret) != 0) {
342 * It is implementation dependent whether the data portion of a
343 * NaT value gets saved on a st8.spill or RSE spill (e.g., see
344 * EAS 2.6, 4.4.4.6 Register Spill and Fill). To get consistent
345 * behavior across all possible IA-64 implementations, we return
352 if (laddr < urbs_end) {
353 /* the desired word is on the kernel RBS and is not a NaT */
354 regnum = ia64_rse_num_regs(bspstore, laddr);
355 *val = *ia64_rse_skip_regs(krbs, regnum);
359 copied = access_process_vm(child, addr, &ret, sizeof(ret), 0);
360 if (copied != sizeof(ret))
367 ia64_poke (struct task_struct *child, struct switch_stack *child_stack, unsigned long user_rbs_end,
368 unsigned long addr, long val)
370 unsigned long *bspstore, *krbs, regnum, *laddr, *urbs_end = (long *) user_rbs_end;
371 struct pt_regs *child_regs;
373 laddr = (unsigned long *) addr;
374 child_regs = ia64_task_regs(child);
375 bspstore = (unsigned long *) child_regs->ar_bspstore;
376 krbs = (unsigned long *) child + IA64_RBS_OFFSET/8;
377 if (laddr >= bspstore && laddr <= ia64_rse_rnat_addr(urbs_end)) {
379 * Attempt to write the RBS in an area that's actually on the kernel RBS
380 * => write the corresponding bits in the kernel RBS.
382 if (ia64_rse_is_rnat_slot(laddr))
383 put_rnat(child_regs, child_stack, krbs, laddr, val);
385 if (laddr < urbs_end) {
386 regnum = ia64_rse_num_regs(bspstore, laddr);
387 *ia64_rse_skip_regs(krbs, regnum) = val;
390 } else if (access_process_vm(child, addr, &val, sizeof(val), 1) != sizeof(val)) {
397 * Calculate the address of the end of the user-level register backing store. This is the
398 * address that would have been stored in ar.bsp if the user had executed a "cover"
399 * instruction right before entering the kernel. If CFMP is not NULL, it is used to
400 * return the "current frame mask" that was active at the time the kernel was entered.
403 ia64_get_user_rbs_end (struct task_struct *child, struct pt_regs *pt, unsigned long *cfmp)
405 unsigned long *krbs, *bspstore, cfm;
406 struct unw_frame_info info;
409 krbs = (unsigned long *) child + IA64_RBS_OFFSET/8;
410 bspstore = (unsigned long *) pt->ar_bspstore;
411 ndirty = ia64_rse_num_regs(krbs, krbs + (pt->loadrs >> 19));
412 cfm = pt->cr_ifs & ~(1UL << 63);
414 if ((long) pt->cr_ifs >= 0) {
416 * If bit 63 of cr.ifs is cleared, the kernel was entered via a system
417 * call and we need to recover the CFM that existed on entry to the
418 * kernel by unwinding the kernel stack.
420 unw_init_from_blocked_task(&info, child);
421 if (unw_unwind_to_user(&info) == 0) {
422 unw_get_cfm(&info, &cfm);
423 ndirty += (cfm & 0x7f);
428 return (unsigned long) ia64_rse_skip_regs(bspstore, ndirty);
432 * Synchronize (i.e, write) the RSE backing store living in kernel space to the VM of the
433 * CHILD task. SW and PT are the pointers to the switch_stack and pt_regs structures,
434 * respectively. USER_RBS_END is the user-level address at which the backing store ends.
437 ia64_sync_user_rbs (struct task_struct *child, struct switch_stack *sw,
438 unsigned long user_rbs_start, unsigned long user_rbs_end)
440 unsigned long addr, val;
443 /* now copy word for word from kernel rbs to user rbs: */
444 for (addr = user_rbs_start; addr < user_rbs_end; addr += 8) {
445 ret = ia64_peek(child, sw, user_rbs_end, addr, &val);
448 if (access_process_vm(child, addr, &val, sizeof(val), 1) != sizeof(val))
455 * Simulate user-level "flushrs". Note: we can't just add pt->loadrs>>16 to
456 * pt->ar_bspstore because the kernel backing store and the user-level backing store may
457 * have different alignments (and therefore a different number of intervening rnat slots).
460 user_flushrs (struct task_struct *task, struct pt_regs *pt)
465 krbs = (unsigned long *) task + IA64_RBS_OFFSET/8;
466 ndirty = ia64_rse_num_regs(krbs, krbs + (pt->loadrs >> 19));
468 pt->ar_bspstore = (unsigned long) ia64_rse_skip_regs((unsigned long *) pt->ar_bspstore,
474 sync_user_rbs_one_thread (struct task_struct *p, int make_writable)
476 struct switch_stack *sw;
477 unsigned long urbs_end;
480 sw = (struct switch_stack *) (p->thread.ksp + 16);
481 pt = ia64_task_regs(p);
482 urbs_end = ia64_get_user_rbs_end(p, pt, NULL);
483 ia64_sync_user_rbs(p, sw, pt->ar_bspstore, urbs_end);
489 struct task_list *next;
490 struct task_struct *task;
496 collect_task (struct task_list **listp, struct task_struct *p, int make_writable)
500 e = kmalloc(sizeof(*e), GFP_KERNEL);
502 /* oops, can't collect more: finish at least what we collected so far... */
511 static inline struct task_list *
512 finish_task (struct task_list *list, int make_writable)
514 struct task_list *next = list->next;
516 sync_user_rbs_one_thread(list->task, make_writable);
517 put_task_struct(list->task);
523 # define collect_task(list, p, make_writable) sync_user_rbs_one_thread(p, make_writable)
524 # define finish_task(list, make_writable) (NULL)
528 * Synchronize the RSE backing store of CHILD and all tasks that share the address space
529 * with it. CHILD_URBS_END is the address of the end of the register backing store of
530 * CHILD. If MAKE_WRITABLE is set, a user-level "flushrs" is simulated such that the VM
531 * can be written via ptrace() and the tasks will pick up the newly written values. It
532 * would be OK to unconditionally simulate a "flushrs", but this would be more intrusive
533 * than strictly necessary (e.g., it would make it impossible to obtain the original value
537 threads_sync_user_rbs (struct task_struct *child, unsigned long child_urbs_end, int make_writable)
539 struct switch_stack *sw;
540 struct task_struct *g, *p;
541 struct mm_struct *mm;
548 multi_threaded = mm && (atomic_read(&mm->mm_users) > 1);
552 if (!multi_threaded) {
553 sw = (struct switch_stack *) (child->thread.ksp + 16);
554 pt = ia64_task_regs(child);
555 ia64_sync_user_rbs(child, sw, pt->ar_bspstore, child_urbs_end);
557 user_flushrs(child, pt);
560 * Note: we can't call ia64_sync_user_rbs() while holding the
561 * tasklist_lock because that may cause a dead-lock: ia64_sync_user_rbs()
562 * may indirectly call tlb_flush_all(), which triggers an IPI.
563 * Furthermore, tasklist_lock is acquired by fork() with interrupts
564 * disabled, so with the right timing, the IPI never completes, hence
565 * tasklist_lock never gets released, hence fork() never completes...
567 struct task_list *list = NULL;
569 read_lock(&tasklist_lock);
571 do_each_thread(g, p) {
572 if (p->mm == mm && p->state != TASK_RUNNING)
573 collect_task(&list, p, make_writable);
574 } while_each_thread(g, p);
576 read_unlock(&tasklist_lock);
579 list = finish_task(list, make_writable);
581 child->thread.flags |= IA64_THREAD_KRBS_SYNCED; /* set the flag in the child thread only */
585 * Write f32-f127 back to task->thread.fph if it has been modified.
588 ia64_flush_fph (struct task_struct *task)
590 struct ia64_psr *psr = ia64_psr(ia64_task_regs(task));
592 struct task_struct *fpu_owner = current;
594 struct task_struct *fpu_owner = ia64_get_fpu_owner();
597 if (task == fpu_owner && psr->mfh) {
599 ia64_save_fpu(&task->thread.fph[0]);
600 task->thread.flags |= IA64_THREAD_FPH_VALID;
601 task->thread.last_fph_cpu = smp_processor_id();
606 * Sync the fph state of the task so that it can be manipulated
607 * through thread.fph. If necessary, f32-f127 are written back to
608 * thread.fph or, if the fph state hasn't been used before, thread.fph
609 * is cleared to zeroes. Also, access to f32-f127 is disabled to
610 * ensure that the task picks up the state from thread.fph when it
614 ia64_sync_fph (struct task_struct *task)
616 struct ia64_psr *psr = ia64_psr(ia64_task_regs(task));
618 ia64_flush_fph(task);
619 if (!(task->thread.flags & IA64_THREAD_FPH_VALID)) {
620 task->thread.flags |= IA64_THREAD_FPH_VALID;
621 task->thread.last_fph_cpu = -1; /* force reload */
622 memset(&task->thread.fph, 0, sizeof(task->thread.fph));
624 if (ia64_get_fpu_owner() == task)
625 ia64_set_fpu_owner(0);
630 access_fr (struct unw_frame_info *info, int regnum, int hi, unsigned long *data, int write_access)
632 struct ia64_fpreg fpval;
635 ret = unw_get_fr(info, regnum, &fpval);
640 fpval.u.bits[hi] = *data;
641 ret = unw_set_fr(info, regnum, fpval);
643 *data = fpval.u.bits[hi];
648 access_uarea (struct task_struct *child, unsigned long addr, unsigned long *data, int write_access)
650 unsigned long *ptr, regnum, urbs_end, rnat_addr;
651 struct switch_stack *sw;
652 struct unw_frame_info info;
655 pt = ia64_task_regs(child);
656 sw = (struct switch_stack *) (child->thread.ksp + 16);
658 if ((addr & 0x7) != 0) {
659 dprintk("ptrace: unaligned register address 0x%lx\n", addr);
663 if (addr < PT_F127 + 16) {
666 ia64_sync_fph(child);
668 ia64_flush_fph(child);
669 ptr = (unsigned long *) ((unsigned long) &child->thread.fph + addr);
670 } else if (addr >= PT_F10 && addr < PT_F15 + 16) {
671 /* scratch registers untouched by kernel (saved in switch_stack) */
672 ptr = (unsigned long *) ((long) sw + addr - PT_NAT_BITS);
673 } else if (addr < PT_AR_LC + 8) {
674 /* preserved state: */
675 unsigned long nat_bits, scratch_unat, dummy = 0;
676 struct unw_frame_info info;
680 unw_init_from_blocked_task(&info, child);
681 if (unw_unwind_to_user(&info) < 0)
688 scratch_unat = ia64_put_scratch_nat_bits(pt, nat_bits);
689 if (unw_set_ar(&info, UNW_AR_UNAT, scratch_unat) < 0) {
690 dprintk("ptrace: failed to set ar.unat\n");
693 for (regnum = 4; regnum <= 7; ++regnum) {
694 unw_get_gr(&info, regnum, &dummy, &nat);
695 unw_set_gr(&info, regnum, dummy, (nat_bits >> regnum) & 1);
698 if (unw_get_ar(&info, UNW_AR_UNAT, &scratch_unat) < 0) {
699 dprintk("ptrace: failed to read ar.unat\n");
702 nat_bits = ia64_get_scratch_nat_bits(pt, scratch_unat);
703 for (regnum = 4; regnum <= 7; ++regnum) {
704 unw_get_gr(&info, regnum, &dummy, &nat);
705 nat_bits |= (nat != 0) << regnum;
711 case PT_R4: case PT_R5: case PT_R6: case PT_R7:
713 /* read NaT bit first: */
716 ret = unw_get_gr(&info, (addr - PT_R4)/8 + 4, &dummy, &nat);
720 return unw_access_gr(&info, (addr - PT_R4)/8 + 4, data, &nat,
723 case PT_B1: case PT_B2: case PT_B3: case PT_B4: case PT_B5:
724 return unw_access_br(&info, (addr - PT_B1)/8 + 1, data, write_access);
727 return unw_access_ar(&info, UNW_AR_EC, data, write_access);
730 return unw_access_ar(&info, UNW_AR_LC, data, write_access);
733 if (addr >= PT_F2 && addr < PT_F5 + 16)
734 return access_fr(&info, (addr - PT_F2)/16 + 2, (addr & 8) != 0,
736 else if (addr >= PT_F16 && addr < PT_F31 + 16)
737 return access_fr(&info, (addr - PT_F16)/16 + 16, (addr & 8) != 0,
740 dprintk("ptrace: rejecting access to register address 0x%lx\n",
745 } else if (addr < PT_F9+16) {
750 * By convention, we use PT_AR_BSP to refer to the end of the user-level
751 * backing store. Use ia64_rse_skip_regs(PT_AR_BSP, -CFM.sof) to get
752 * the real value of ar.bsp at the time the kernel was entered.
754 urbs_end = ia64_get_user_rbs_end(child, pt, NULL);
756 if (*data != urbs_end) {
757 if (ia64_sync_user_rbs(child, sw,
758 pt->ar_bspstore, urbs_end) < 0)
760 /* simulate user-level write of ar.bsp: */
762 pt->ar_bspstore = *data;
769 if ((long) pt->cr_ifs < 0) {
771 pt->cr_ifs = ((pt->cr_ifs & ~0x3fffffffffUL)
772 | (*data & 0x3fffffffffUL));
774 *data = pt->cr_ifs & 0x3fffffffffUL;
776 /* kernel was entered through a system call */
779 unw_init_from_blocked_task(&info, child);
780 if (unw_unwind_to_user(&info) < 0)
783 unw_get_cfm(&info, &cfm);
785 unw_set_cfm(&info, ((cfm & ~0x3fffffffffU)
786 | (*data & 0x3fffffffffUL)));
794 pt->cr_ipsr = ((*data & IPSR_WRITE_MASK)
795 | (pt->cr_ipsr & ~IPSR_WRITE_MASK));
797 *data = (pt->cr_ipsr & IPSR_READ_MASK);
801 urbs_end = ia64_get_user_rbs_end(child, pt, NULL);
802 rnat_addr = (long) ia64_rse_rnat_addr((long *) urbs_end);
804 return ia64_poke(child, sw, urbs_end, rnat_addr, *data);
806 return ia64_peek(child, sw, urbs_end, rnat_addr, data);
808 case PT_R1: case PT_R2: case PT_R3:
809 case PT_R8: case PT_R9: case PT_R10: case PT_R11:
810 case PT_R12: case PT_R13: case PT_R14: case PT_R15:
811 case PT_R16: case PT_R17: case PT_R18: case PT_R19:
812 case PT_R20: case PT_R21: case PT_R22: case PT_R23:
813 case PT_R24: case PT_R25: case PT_R26: case PT_R27:
814 case PT_R28: case PT_R29: case PT_R30: case PT_R31:
815 case PT_B0: case PT_B6: case PT_B7:
816 case PT_F6: case PT_F6+8: case PT_F7: case PT_F7+8:
817 case PT_F8: case PT_F8+8: case PT_F9: case PT_F9+8:
819 case PT_AR_RSC: case PT_AR_UNAT: case PT_AR_PFS:
820 case PT_AR_CCV: case PT_AR_FPSR: case PT_CR_IIP: case PT_PR:
821 /* scratch register */
822 ptr = (unsigned long *) ((long) pt + addr - PT_CR_IPSR);
826 /* disallow accessing anything else... */
827 dprintk("ptrace: rejecting access to register address 0x%lx\n",
832 /* access debug registers */
834 if (addr >= PT_IBR) {
835 regnum = (addr - PT_IBR) >> 3;
836 ptr = &child->thread.ibr[0];
838 regnum = (addr - PT_DBR) >> 3;
839 ptr = &child->thread.dbr[0];
843 dprintk("ptrace: rejecting access to register address 0x%lx\n", addr);
846 #ifdef CONFIG_PERFMON
848 * Check if debug registers are used by perfmon. This test must be done
849 * once we know that we can do the operation, i.e. the arguments are all
850 * valid, but before we start modifying the state.
852 * Perfmon needs to keep a count of how many processes are trying to
853 * modify the debug registers for system wide monitoring sessions.
855 * We also include read access here, because they may cause the
856 * PMU-installed debug register state (dbr[], ibr[]) to be reset. The two
857 * arrays are also used by perfmon, but we do not use
858 * IA64_THREAD_DBG_VALID. The registers are restored by the PMU context
861 if (pfm_use_debug_registers(child)) return -1;
864 if (!(child->thread.flags & IA64_THREAD_DBG_VALID)) {
865 child->thread.flags |= IA64_THREAD_DBG_VALID;
866 memset(child->thread.dbr, 0, sizeof(child->thread.dbr));
867 memset(child->thread.ibr, 0, sizeof(child->thread.ibr));
873 /* don't let the user set kernel-level breakpoints... */
874 *ptr = *data & ~(7UL << 56);
887 ptrace_getregs (struct task_struct *child, struct pt_all_user_regs *ppr)
889 struct switch_stack *sw;
892 struct unw_frame_info info;
896 retval = verify_area(VERIFY_WRITE, ppr, sizeof(struct pt_all_user_regs));
901 pt = ia64_task_regs(child);
902 sw = (struct switch_stack *) (child->thread.ksp + 16);
903 unw_init_from_blocked_task(&info, child);
904 if (unw_unwind_to_user(&info) < 0) {
908 if (((unsigned long) ppr & 0x7) != 0) {
909 dprintk("ptrace:unaligned register address %p\n", ppr);
917 retval |= __put_user(pt->cr_iip, &ppr->cr_iip);
918 retval |= access_uarea(child, PT_CR_IPSR, &ppr->cr_ipsr, 0);
922 retval |= __put_user(pt->ar_pfs, &ppr->ar[PT_AUR_PFS]);
923 retval |= __put_user(pt->ar_rsc, &ppr->ar[PT_AUR_RSC]);
924 retval |= __put_user(pt->ar_bspstore, &ppr->ar[PT_AUR_BSPSTORE]);
925 retval |= __put_user(pt->ar_unat, &ppr->ar[PT_AUR_UNAT]);
926 retval |= __put_user(pt->ar_ccv, &ppr->ar[PT_AUR_CCV]);
927 retval |= __put_user(pt->ar_fpsr, &ppr->ar[PT_AUR_FPSR]);
929 retval |= access_uarea(child, PT_AR_EC, &ppr->ar[PT_AUR_EC], 0);
930 retval |= access_uarea(child, PT_AR_LC, &ppr->ar[PT_AUR_LC], 0);
931 retval |= access_uarea(child, PT_AR_RNAT, &ppr->ar[PT_AUR_RNAT], 0);
932 retval |= access_uarea(child, PT_AR_BSP, &ppr->ar[PT_AUR_BSP], 0);
933 retval |= access_uarea(child, PT_CFM, &ppr->cfm, 0);
937 retval |= __copy_to_user(&ppr->gr[1], &pt->r1, sizeof(long) * 3);
941 for (i = 4; i < 8; i++) {
942 retval |= unw_access_gr(&info, i, &ppr->gr[i], &nat, 0);
947 retval |= __copy_to_user(&ppr->gr[8], &pt->r8, sizeof(long) * 4);
951 retval |= __copy_to_user(&ppr->gr[12], &pt->r12, sizeof(long) * 4);
955 retval |= __copy_to_user(&ppr->gr[16], &pt->r16, sizeof(long) * 16);
959 retval |= __put_user(pt->b0, &ppr->br[0]);
963 for (i = 1; i < 6; i++) {
964 retval |= unw_access_br(&info, i, &ppr->br[i], 0);
969 retval |= __put_user(pt->b6, &ppr->br[6]);
970 retval |= __put_user(pt->b7, &ppr->br[7]);
974 for (i = 2; i < 6; i++) {
975 retval |= access_fr(&info, i, 0, (unsigned long *) &ppr->fr[i], 0);
976 retval |= access_fr(&info, i, 1, (unsigned long *) &ppr->fr[i] + 1, 0);
981 retval |= __copy_to_user(&ppr->fr[6], &pt->f6, sizeof(struct ia64_fpreg) * 4);
983 /* fp scratch regs(10-15) */
985 retval |= __copy_to_user(&ppr->fr[10], &sw->f10, sizeof(struct ia64_fpreg) * 6);
989 for (i = 16; i < 32; i++) {
990 retval |= access_fr(&info, i, 0, (unsigned long *) &ppr->fr[i], 0);
991 retval |= access_fr(&info, i, 1, (unsigned long *) &ppr->fr[i] + 1, 0);
996 ia64_flush_fph(child);
997 retval |= __copy_to_user(&ppr->fr[32], &child->thread.fph, sizeof(ppr->fr[32]) * 96);
1001 retval |= __put_user(pt->pr, &ppr->pr);
1005 retval |= access_uarea(child, PT_NAT_BITS, &ppr->nat, 0);
1007 ret = retval ? -EIO : 0;
1012 ptrace_setregs (struct task_struct *child, struct pt_all_user_regs *ppr)
1014 struct switch_stack *sw;
1017 struct unw_frame_info info;
1021 retval = verify_area(VERIFY_READ, ppr, sizeof(struct pt_all_user_regs));
1026 pt = ia64_task_regs(child);
1027 sw = (struct switch_stack *) (child->thread.ksp + 16);
1028 unw_init_from_blocked_task(&info, child);
1029 if (unw_unwind_to_user(&info) < 0) {
1033 if (((unsigned long) ppr & 0x7) != 0) {
1034 dprintk("ptrace:unaligned register address %p\n", ppr);
1042 retval |= __get_user(pt->cr_iip, &ppr->cr_iip);
1043 retval |= access_uarea(child, PT_CR_IPSR, &ppr->cr_ipsr, 1);
1047 retval |= __get_user(pt->ar_pfs, &ppr->ar[PT_AUR_PFS]);
1048 retval |= __get_user(pt->ar_rsc, &ppr->ar[PT_AUR_RSC]);
1049 retval |= __get_user(pt->ar_bspstore, &ppr->ar[PT_AUR_BSPSTORE]);
1050 retval |= __get_user(pt->ar_unat, &ppr->ar[PT_AUR_UNAT]);
1051 retval |= __get_user(pt->ar_ccv, &ppr->ar[PT_AUR_CCV]);
1052 retval |= __get_user(pt->ar_fpsr, &ppr->ar[PT_AUR_FPSR]);
1054 retval |= access_uarea(child, PT_AR_EC, &ppr->ar[PT_AUR_EC], 1);
1055 retval |= access_uarea(child, PT_AR_LC, &ppr->ar[PT_AUR_LC], 1);
1056 retval |= access_uarea(child, PT_AR_RNAT, &ppr->ar[PT_AUR_RNAT], 1);
1057 retval |= access_uarea(child, PT_AR_BSP, &ppr->ar[PT_AUR_BSP], 1);
1058 retval |= access_uarea(child, PT_CFM, &ppr->cfm, 1);
1062 retval |= __copy_from_user(&pt->r1, &ppr->gr[1], sizeof(long) * 3);
1066 for (i = 4; i < 8; i++) {
1067 long ret = unw_get_gr(&info, i, &ppr->gr[i], &nat);
1071 retval |= unw_access_gr(&info, i, &ppr->gr[i], &nat, 1);
1076 retval |= __copy_from_user(&pt->r8, &ppr->gr[8], sizeof(long) * 4);
1080 retval |= __copy_from_user(&pt->r12, &ppr->gr[12], sizeof(long) * 4);
1084 retval |= __copy_from_user(&pt->r16, &ppr->gr[16], sizeof(long) * 16);
1088 retval |= __get_user(pt->b0, &ppr->br[0]);
1092 for (i = 1; i < 6; i++) {
1093 retval |= unw_access_br(&info, i, &ppr->br[i], 1);
1098 retval |= __get_user(pt->b6, &ppr->br[6]);
1099 retval |= __get_user(pt->b7, &ppr->br[7]);
1103 for (i = 2; i < 6; i++) {
1104 retval |= access_fr(&info, i, 0, (unsigned long *) &ppr->fr[i], 1);
1105 retval |= access_fr(&info, i, 1, (unsigned long *) &ppr->fr[i] + 1, 1);
1110 retval |= __copy_from_user(&pt->f6, &ppr->fr[6], sizeof(ppr->fr[6]) * 4);
1112 /* fp scratch regs(10-15) */
1114 retval |= __copy_from_user(&sw->f10, &ppr->fr[10], sizeof(ppr->fr[10]) * 6);
1118 for (i = 16; i < 32; i++) {
1119 retval |= access_fr(&info, i, 0, (unsigned long *) &ppr->fr[i], 1);
1120 retval |= access_fr(&info, i, 1, (unsigned long *) &ppr->fr[i] + 1, 1);
1125 ia64_sync_fph(child);
1126 retval |= __copy_from_user(&child->thread.fph, &ppr->fr[32], sizeof(ppr->fr[32]) * 96);
1130 retval |= __get_user(pt->pr, &ppr->pr);
1134 retval |= access_uarea(child, PT_NAT_BITS, &ppr->nat, 1);
1136 ret = retval ? -EIO : 0;
1141 * Called by kernel/ptrace.c when detaching..
1143 * Make sure the single step bit is not set.
1146 ptrace_disable (struct task_struct *child)
1148 struct ia64_psr *child_psr = ia64_psr(ia64_task_regs(child));
1150 /* make sure the single step/take-branch tra bits are not set: */
1154 /* Turn off flag indicating that the KRBS is sync'd with child's VM: */
1155 child->thread.flags &= ~IA64_THREAD_KRBS_SYNCED;
1159 sys_ptrace (long request, pid_t pid, unsigned long addr, unsigned long data,
1160 long arg4, long arg5, long arg6, long arg7, long stack)
1162 struct pt_regs *pt, *regs = (struct pt_regs *) &stack;
1163 unsigned long urbs_end;
1164 struct task_struct *child;
1165 struct switch_stack *sw;
1170 if (request == PTRACE_TRACEME) {
1171 /* are we already being traced? */
1172 if (current->ptrace & PT_PTRACED)
1174 ret = security_ptrace(current->parent, current);
1177 current->ptrace |= PT_PTRACED;
1183 read_lock(&tasklist_lock);
1185 child = find_task_by_pid(pid);
1187 get_task_struct(child);
1189 read_unlock(&tasklist_lock);
1193 if (pid == 1) /* no messing around with init! */
1196 if (request == PTRACE_ATTACH) {
1197 ret = ptrace_attach(child);
1201 ret = ptrace_check_attach(child, request == PTRACE_KILL);
1205 pt = ia64_task_regs(child);
1206 sw = (struct switch_stack *) (child->thread.ksp + 16);
1209 case PTRACE_PEEKTEXT:
1210 case PTRACE_PEEKDATA: /* read word at location addr */
1211 urbs_end = ia64_get_user_rbs_end(child, pt, NULL);
1213 if (!(child->thread.flags & IA64_THREAD_KRBS_SYNCED))
1214 threads_sync_user_rbs(child, urbs_end, 0);
1216 ret = ia64_peek(child, sw, urbs_end, addr, &data);
1219 regs->r8 = 0; /* ensure "ret" is not mistaken as an error code */
1223 case PTRACE_POKETEXT:
1224 case PTRACE_POKEDATA: /* write the word at location addr */
1225 urbs_end = ia64_get_user_rbs_end(child, pt, NULL);
1226 if (!(child->thread.flags & IA64_THREAD_KRBS_SYNCED))
1227 threads_sync_user_rbs(child, urbs_end, 1);
1229 ret = ia64_poke(child, sw, urbs_end, addr, data);
1232 case PTRACE_PEEKUSR: /* read the word at addr in the USER area */
1233 if (access_uarea(child, addr, &data, 0) < 0) {
1238 regs->r8 = 0; /* ensure "ret" is not mistaken as an error code */
1241 case PTRACE_POKEUSR: /* write the word at addr in the USER area */
1242 if (access_uarea(child, addr, &data, 1) < 0) {
1249 case PTRACE_OLD_GETSIGINFO: /* for backwards-compatibility */
1250 ret = ptrace_request(child, PTRACE_GETSIGINFO, addr, data);
1253 case PTRACE_OLD_SETSIGINFO: /* for backwards-compatibility */
1254 ret = ptrace_request(child, PTRACE_SETSIGINFO, addr, data);
1257 case PTRACE_SYSCALL: /* continue and stop at next (return from) syscall */
1258 case PTRACE_CONT: /* restart after signal. */
1262 if (request == PTRACE_SYSCALL)
1263 set_tsk_thread_flag(child, TIF_SYSCALL_TRACE);
1265 clear_tsk_thread_flag(child, TIF_SYSCALL_TRACE);
1266 child->exit_code = data;
1268 /* make sure the single step/taken-branch trap bits are not set: */
1269 ia64_psr(pt)->ss = 0;
1270 ia64_psr(pt)->tb = 0;
1272 /* Turn off flag indicating that the KRBS is sync'd with child's VM: */
1273 child->thread.flags &= ~IA64_THREAD_KRBS_SYNCED;
1275 wake_up_process(child);
1281 * Make the child exit. Best I can do is send it a
1282 * sigkill. Perhaps it should be put in the status
1283 * that it wants to exit.
1285 if (child->state == TASK_ZOMBIE) /* already dead */
1287 child->exit_code = SIGKILL;
1289 /* make sure the single step/take-branch tra bits are not set: */
1290 ia64_psr(pt)->ss = 0;
1291 ia64_psr(pt)->tb = 0;
1293 /* Turn off flag indicating that the KRBS is sync'd with child's VM: */
1294 child->thread.flags &= ~IA64_THREAD_KRBS_SYNCED;
1296 wake_up_process(child);
1300 case PTRACE_SINGLESTEP: /* let child execute for one instruction */
1301 case PTRACE_SINGLEBLOCK:
1306 clear_tsk_thread_flag(child, TIF_SYSCALL_TRACE);
1307 if (request == PTRACE_SINGLESTEP) {
1308 ia64_psr(pt)->ss = 1;
1310 ia64_psr(pt)->tb = 1;
1312 child->exit_code = data;
1314 /* Turn off flag indicating that the KRBS is sync'd with child's VM: */
1315 child->thread.flags &= ~IA64_THREAD_KRBS_SYNCED;
1317 /* give it a chance to run. */
1318 wake_up_process(child);
1322 case PTRACE_DETACH: /* detach a process that was attached. */
1323 ret = ptrace_detach(child, data);
1326 case PTRACE_GETREGS:
1327 ret = ptrace_getregs(child, (struct pt_all_user_regs*) data);
1330 case PTRACE_SETREGS:
1331 ret = ptrace_setregs(child, (struct pt_all_user_regs*) data);
1335 ret = ptrace_request(child, request, addr, data);
1339 put_task_struct(child);
1346 syscall_trace (void)
1348 if (!test_thread_flag(TIF_SYSCALL_TRACE))
1350 if (!(current->ptrace & PT_PTRACED))
1353 * The 0x80 provides a way for the tracing parent to distinguish between a syscall
1354 * stop and SIGTRAP delivery.
1356 ptrace_notify(SIGTRAP | ((current->ptrace & PT_TRACESYSGOOD) ? 0x80 : 0));
1359 * This isn't the same as continuing with a signal, but it will do for normal use.
1360 * strace only continues with a signal if the stopping signal is not SIGTRAP.
1363 if (current->exit_code) {
1364 send_sig(current->exit_code, current, 1);
1365 current->exit_code = 0;