get_rnat (struct pt_regs *pt, struct switch_stack *sw,
unsigned long *krbs, unsigned long *urnat_addr)
{
- unsigned long rnat0 = 0, rnat1 = 0, urnat = 0, *slot0_kaddr, umask = 0UL;
+ unsigned long rnat0 = 0, rnat1 = 0, urnat = 0, *slot0_kaddr, kmask = ~0UL;
unsigned long *kbsp, *ubspstore, *rnat0_kaddr, *rnat1_kaddr, shift;
long num_regs;
kbsp = (unsigned long *) sw->ar_bspstore;
ubspstore = (unsigned long *) pt->ar_bspstore;
/*
- * First, figure out which bit number slot 0 in user-land maps to in the kernel
- * rnat. Do this by figuring out how many register slots we're beyond the user's
- * backingstore and then computing the equivalent address in kernel space.
+ * First, figure out which bit number slot 0 in user-land maps
+ * to in the kernel rnat. Do this by figuring out how many
+ * register slots we're beyond the user's backingstore and
+ * then computing the equivalent address in kernel space.
*/
num_regs = ia64_rse_num_regs(ubspstore, urnat_addr + 1);
slot0_kaddr = ia64_rse_skip_regs(krbs, num_regs);
if (ubspstore + 63 > urnat_addr) {
/* some bits need to be merged in from pt->ar_rnat */
- umask = ((1UL << ia64_rse_slot_num(ubspstore)) - 1);
- urnat = (pt->ar_rnat & umask);
+ kmask = ~((1UL << ia64_rse_slot_num(ubspstore)) - 1);
+ urnat = (pt->ar_rnat & ~kmask);
}
if (rnat0_kaddr >= kbsp) {
rnat0 = sw->ar_rnat;
} else if (rnat1_kaddr > krbs) {
rnat1 = *rnat1_kaddr;
}
- urnat |= ((rnat1 << (63 - shift)) | (rnat0 >> shift)) & ~umask;
+ urnat |= ((rnat1 << (63 - shift)) | (rnat0 >> shift)) & kmask;
return urnat;
}
put_rnat (struct pt_regs *pt, struct switch_stack *sw,
unsigned long *krbs, unsigned long *urnat_addr, unsigned long urnat)
{
- unsigned long rnat0 = 0, rnat1 = 0, *slot0_kaddr, umask = 0, mask, m;
- unsigned long *kbsp, *ubspstore, *rnat0_kaddr, *rnat1_kaddr, shift, slot, ndirty;
- long num_regs, nbits;
-
- ndirty = ia64_rse_num_regs(krbs, krbs + (pt->loadrs >> 19));
- nbits = ndirty % 63;
+ unsigned long rnat0 = 0, rnat1 = 0, rnat = 0, *slot0_kaddr, kmask = ~0UL, mask;
+ unsigned long *kbsp, *ubspstore, *rnat0_kaddr, *rnat1_kaddr, shift;
+ long num_regs;
kbsp = (unsigned long *) sw->ar_bspstore;
ubspstore = (unsigned long *) pt->ar_bspstore;
/*
- * First, figure out which bit number slot 0 in user-land maps to in the kernel
- * rnat. Do this by figuring out how many register slots we're beyond the user's
- * backingstore and then computing the equivalent address in kernel space.
+ * First, figure out which bit number slot 0 in user-land maps
+ * to in the kernel rnat. Do this by figuring out how many
+ * register slots we're beyond the user's backingstore and
+ * then computing the equivalent address in kernel space.
*/
num_regs = (long) ia64_rse_num_regs(ubspstore, urnat_addr + 1);
slot0_kaddr = ia64_rse_skip_regs(krbs, num_regs);
rnat1_kaddr = ia64_rse_rnat_addr(slot0_kaddr);
rnat0_kaddr = rnat1_kaddr - 64;
-printk("%s: ubspstore=%p urnat_addr=%p\n", __FUNCTION__, ubspstore, urnat_addr);
if (ubspstore + 63 > urnat_addr) {
/* some bits need to be place in pt->ar_rnat: */
- slot = ia64_rse_slot_num(ubspstore);
- umask = ((1UL << slot) - 1);
- pt->ar_rnat = (pt->ar_rnat & ~umask) | (urnat & umask);
- nbits -= slot;
- if (nbits <= 0)
- return;
+ kmask = ~((1UL << ia64_rse_slot_num(ubspstore)) - 1);
+ pt->ar_rnat = (pt->ar_rnat & kmask) | (rnat & ~kmask);
}
- mask = (1UL << nbits) - 1;
/*
* Note: Section 11.1 of the EAS guarantees that bit 63 of an
* rnat slot is ignored. so we don't have to clear it here.
*/
rnat0 = (urnat << shift);
- m = mask << shift;
-printk("%s: rnat0=%016lx, m=%016lx, rnat0_kaddr=%p kbsp=%p\n", __FUNCTION__, rnat0, m, rnat0_kaddr, kbsp);
+ mask = ~0UL << shift;
if (rnat0_kaddr >= kbsp) {
- sw->ar_rnat = (sw->ar_rnat & ~m) | (rnat0 & m);
+ sw->ar_rnat = (sw->ar_rnat & ~mask) | (rnat0 & mask);
} else if (rnat0_kaddr > krbs) {
- *rnat0_kaddr = ((*rnat0_kaddr & ~m) | (rnat0 & m));
+ *rnat0_kaddr = ((*rnat0_kaddr & ~mask) | (rnat0 & mask));
}
rnat1 = (urnat >> (63 - shift));
- m = mask >> (63 - shift);
-printk("%s: rnat1=%016lx, m=%016lx, rnat1_kaddr=%p kbsp=%p\n", __FUNCTION__, rnat1, m, rnat1_kaddr, kbsp);
+ mask = ~0UL >> (63 - shift);
if (rnat1_kaddr >= kbsp) {
- sw->ar_rnat = (sw->ar_rnat & ~m) | (rnat1 & m);
+ sw->ar_rnat = (sw->ar_rnat & ~mask) | (rnat1 & mask);
} else if (rnat1_kaddr > krbs) {
- *rnat1_kaddr = ((*rnat1_kaddr & ~m) | (rnat1 & m));
+ *rnat1_kaddr = ((*rnat1_kaddr & ~mask) | (rnat1 & mask));
}
}
psr->mfh = 0;
ia64_save_fpu(&task->thread.fph[0]);
task->thread.flags |= IA64_THREAD_FPH_VALID;
- task->thread.last_fph_cpu = smp_processor_id();
}
}
ia64_flush_fph(task);
if (!(task->thread.flags & IA64_THREAD_FPH_VALID)) {
task->thread.flags |= IA64_THREAD_FPH_VALID;
- task->thread.last_fph_cpu = -1; /* force reload */
memset(&task->thread.fph, 0, sizeof(task->thread.fph));
}
+#ifndef CONFIG_SMP
if (ia64_get_fpu_owner() == task)
ia64_set_fpu_owner(0);
+#endif
psr->dfh = 1;
}
case PT_R4: case PT_R5: case PT_R6: case PT_R7:
if (write_access) {
/* read NaT bit first: */
- unsigned long dummy;
-
- ret = unw_get_gr(&info, (addr - PT_R4)/8 + 4, &dummy, &nat);
+ ret = unw_get_gr(&info, (addr - PT_R4)/8 + 4, data, &nat);
if (ret < 0)
return ret;
}