* Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
*/
-#include <linux/config.h>
-#include <asm/offsets.h>
+#include <asm/asm-offsets.h>
/* we have the following possibilities to act on an interruption:
* - handle in assembly and use shadowed registers only
* - save registers to kernel stack and handle in assembly or C */
+#include <asm/psw.h>
+#include <asm/cache.h> /* for L1_CACHE_SHIFT */
#include <asm/assembly.h> /* for LDREG/STREG defines */
#include <asm/pgtable.h>
-#include <asm/psw.h>
#include <asm/signal.h>
#include <asm/unistd.h>
#include <asm/thread_info.h>
-#ifdef __LP64__
-#define FRAME_SIZE 128
-#define CMPIB cmpib,*
-#define CMPB cmpb,*
+#include <linux/linkage.h>
+#ifdef CONFIG_64BIT
.level 2.0w
#else
-#define FRAME_SIZE 64
-#define CMPIB cmpib,
-#define CMPB cmpb,
-
.level 2.0
#endif
/* Switch to virtual mapping, trashing only %r1 */
.macro virt_map
- rsm PSW_SM_Q,%r0
- tovirt_r1 %r29
- mfsp %sr7, %r1
- or,= %r0,%r1,%r0 /* Only save sr7 in sr3 if sr7 != 0 */
- mtsp %r1, %sr3
+ /* pcxt_ssm_bug */
+ rsm PSW_SM_I, %r0 /* barrier for "Relied upon Translation */
mtsp %r0, %sr4
mtsp %r0, %sr5
+ mfsp %sr7, %r1
+ or,= %r0,%r1,%r0 /* Only save sr7 in sr3 if sr7 != 0 */
+ mtsp %r1, %sr3
+ tovirt_r1 %r29
+ load32 KERNEL_PSW, %r1
+
+ rsm PSW_SM_QUIET,%r0 /* second "heavy weight" ctl op */
mtsp %r0, %sr6
mtsp %r0, %sr7
- ldil L%KERNEL_PSW, %r1
- ldo R%KERNEL_PSW(%r1), %r1
- mtctl %r1, %cr22
- mtctl %r0, %cr17
- mtctl %r0, %cr17
- ldil L%4f, %r1
- ldo R%4f(%r1), %r1
- mtctl %r1, %cr18
+ mtctl %r0, %cr17 /* Clear IIASQ tail */
+ mtctl %r0, %cr17 /* Clear IIASQ head */
+ mtctl %r1, %ipsw
+ load32 4f, %r1
+ mtctl %r1, %cr18 /* Set IIAOQ tail */
ldo 4(%r1), %r1
- mtctl %r1, %cr18
+ mtctl %r1, %cr18 /* Set IIAOQ head */
rfir
nop
4:
* The "get_stack" macros are responsible for determining the
* kernel stack value.
*
- * For Faults:
* If sr7 == 0
* Already using a kernel stack, so call the
* get_stack_use_r30 macro to push a pt_regs structure
* task pointer pointed to by cr30. Set the stack
* pointer to point to the end of the task structure.
*
- * For Interrupts:
- * If sr7 == 0
- * Already using a kernel stack, check to see if r30
- * is already pointing to the per processor interrupt
- * stack. If it is, call the get_stack_use_r30 macro
- * to push a pt_regs structure on the stack, and store
- * registers there. Otherwise, call get_stack_use_cr31
- * to get a pointer to the base of the interrupt stack
- * and push a pt_regs structure on that stack.
- * else
- * Need to set up a kernel stack, so call the
- * get_stack_use_cr30 macro to set up a pointer
- * to the pt_regs structure contained within the
- * task pointer pointed to by cr30. Set the stack
- * pointer to point to the end of the task structure.
- * N.B: We don't use the interrupt stack for the
- * first interrupt from userland, because signals/
- * resched's are processed when returning to userland,
- * and we can sleep in those cases.
- *
* Note that we use shadowed registers for temps until
* we can save %r26 and %r29. %r26 is used to preserve
* %r8 (a shadowed register) which temporarily contained
mfctl %cr30, %r1
tophys %r1,%r9
- LDREG 0(%r9), %r1 /* thread_info -> task_struct */
+ LDREG TI_TASK(%r9), %r1 /* thread_info -> task_struct */
tophys %r1,%r9
ldo TASK_REGS(%r9),%r9
STREG %r30, PT_GR30(%r9)
/* HPMC handler */
.macro hpmc code
nop /* must be a NOP, will be patched later */
- ldil L%PA(os_hpmc), %r3
- ldo R%PA(os_hpmc)(%r3), %r3
+ load32 PA(os_hpmc), %r3
bv,n 0(%r3)
nop
.word 0 /* checksum (will be patched) */
/* Register definitions for tlb miss handler macros */
- va = r8 /* virtual address for which the trap occured */
- spc = r24 /* space for which the trap occured */
+ va = r8 /* virtual address for which the trap occurred */
+ spc = r24 /* space for which the trap occurred */
-#ifndef __LP64__
+#ifndef CONFIG_64BIT
/*
* itlb miss interruption handler (parisc 1.1 - 32 bit)
.macro itlb_20 code
mfctl %pcsq, spc
-#ifdef __LP64__
+#ifdef CONFIG_64BIT
b itlb_miss_20w
#else
b itlb_miss_20
.align 32
.endm
-#ifndef __LP64__
+#ifndef CONFIG_64BIT
/*
* naitlb miss interruption handler (parisc 1.1 - 32 bit)
- *
- * Note: naitlb misses will be treated
- * as an ordinary itlb miss for now.
- * However, note that naitlb misses
- * have the faulting address in the
- * IOR/ISR.
*/
.macro naitlb_11 code
mfctl %isr,spc
- b itlb_miss_11
+ b naitlb_miss_11
mfctl %ior,va
- /* FIXME: If user causes a naitlb miss, the priv level may not be in
- * lower bits of va, where the itlb miss handler is expecting them
- */
.align 32
.endm
/*
* naitlb miss interruption handler (parisc 2.0)
- *
- * Note: naitlb misses will be treated
- * as an ordinary itlb miss for now.
- * However, note that naitlb misses
- * have the faulting address in the
- * IOR/ISR.
*/
.macro naitlb_20 code
mfctl %isr,spc
-#ifdef __LP64__
- b itlb_miss_20w
+#ifdef CONFIG_64BIT
+ b naitlb_miss_20w
#else
- b itlb_miss_20
+ b naitlb_miss_20
#endif
mfctl %ior,va
- /* FIXME: If user causes a naitlb miss, the priv level may not be in
- * lower bits of va, where the itlb miss handler is expecting them
- */
.align 32
.endm
-#ifndef __LP64__
+#ifndef CONFIG_64BIT
/*
* dtlb miss interruption handler (parisc 1.1 - 32 bit)
*/
.macro dtlb_20 code
mfctl %isr, spc
-#ifdef __LP64__
+#ifdef CONFIG_64BIT
b dtlb_miss_20w
#else
b dtlb_miss_20
.align 32
.endm
-#ifndef __LP64__
+#ifndef CONFIG_64BIT
/* nadtlb miss interruption handler (parisc 1.1 - 32 bit) */
.macro nadtlb_11 code
.macro nadtlb_20 code
mfctl %isr,spc
-#ifdef __LP64__
+#ifdef CONFIG_64BIT
b nadtlb_miss_20w
#else
b nadtlb_miss_20
.align 32
.endm
-#ifndef __LP64__
+#ifndef CONFIG_64BIT
/*
* dirty bit trap interruption handler (parisc 1.1 - 32 bit)
*/
.macro dbit_20 code
mfctl %isr,spc
-#ifdef __LP64__
+#ifdef CONFIG_64BIT
b dbit_trap_20w
#else
b dbit_trap_20
.align 32
.endm
+ /* In LP64, the space contains part of the upper 32 bits of the
+ * fault. We have to extract this and place it in the va,
+ * zeroing the corresponding bits in the space register */
+ .macro space_adjust spc,va,tmp
+#ifdef CONFIG_64BIT
+ extrd,u \spc,63,SPACEID_SHIFT,\tmp
+ depd %r0,63,SPACEID_SHIFT,\spc
+ depd \tmp,31,SPACEID_SHIFT,\va
+#endif
+ .endm
+
+ .import swapper_pg_dir,code
+
+ /* Get the pgd. For faults on space zero (kernel space), this
+ * is simply swapper_pg_dir. For user space faults, the
+ * pgd is stored in %cr25 */
+ .macro get_pgd spc,reg
+ ldil L%PA(swapper_pg_dir),\reg
+ ldo R%PA(swapper_pg_dir)(\reg),\reg
+ or,COND(=) %r0,\spc,%r0
+ mfctl %cr25,\reg
+ .endm
+
+ /*
+ space_check(spc,tmp,fault)
+
+ spc - The space we saw the fault with.
+ tmp - The place to store the current space.
+ fault - Function to call on failure.
+
+ Only allow faults on different spaces from the
+ currently active one if we're the kernel
+
+ */
+ .macro space_check spc,tmp,fault
+ mfsp %sr7,\tmp
+ or,COND(<>) %r0,\spc,%r0 /* user may execute gateway page
+ * as kernel, so defeat the space
+ * check if it is */
+ copy \spc,\tmp
+ or,COND(=) %r0,\tmp,%r0 /* nullify if executing as kernel */
+ cmpb,COND(<>),n \tmp,\spc,\fault
+ .endm
+
+ /* Look up a PTE in a 2-Level scheme (faulting at each
+ * level if the entry isn't present
+ *
+ * NOTE: we use ldw even for LP64, since the short pointers
+ * can address up to 1TB
+ */
+ .macro L2_ptep pmd,pte,index,va,fault
+#if PT_NLEVELS == 3
+ extru \va,31-ASM_PMD_SHIFT,ASM_BITS_PER_PMD,\index
+#else
+ extru \va,31-ASM_PGDIR_SHIFT,ASM_BITS_PER_PGD,\index
+#endif
+ dep %r0,31,PAGE_SHIFT,\pmd /* clear offset */
+ copy %r0,\pte
+ ldw,s \index(\pmd),\pmd
+ bb,>=,n \pmd,_PxD_PRESENT_BIT,\fault
+ dep %r0,31,PxD_FLAG_SHIFT,\pmd /* clear flags */
+ copy \pmd,%r9
+ SHLREG %r9,PxD_VALUE_SHIFT,\pmd
+ extru \va,31-PAGE_SHIFT,ASM_BITS_PER_PTE,\index
+ dep %r0,31,PAGE_SHIFT,\pmd /* clear offset */
+ shladd \index,BITS_PER_PTE_ENTRY,\pmd,\pmd
+ LDREG %r0(\pmd),\pte /* pmd is now pte */
+ bb,>=,n \pte,_PAGE_PRESENT_BIT,\fault
+ .endm
+
+ /* Look up PTE in a 3-Level scheme.
+ *
+ * Here we implement a Hybrid L2/L3 scheme: we allocate the
+ * first pmd adjacent to the pgd. This means that we can
+ * subtract a constant offset to get to it. The pmd and pgd
+ * sizes are arranged so that a single pmd covers 4GB (giving
+ * a full LP64 process access to 8TB) so our lookups are
+ * effectively L2 for the first 4GB of the kernel (i.e. for
+ * all ILP32 processes and all the kernel for machines with
+ * under 4GB of memory) */
+ .macro L3_ptep pgd,pte,index,va,fault
+#if PT_NLEVELS == 3 /* we might have a 2-Level scheme, e.g. with 16kb page size */
+ extrd,u \va,63-ASM_PGDIR_SHIFT,ASM_BITS_PER_PGD,\index
+ copy %r0,\pte
+ extrd,u,*= \va,63-ASM_PGDIR_SHIFT,64-ASM_PGDIR_SHIFT,%r0
+ ldw,s \index(\pgd),\pgd
+ extrd,u,*= \va,63-ASM_PGDIR_SHIFT,64-ASM_PGDIR_SHIFT,%r0
+ bb,>=,n \pgd,_PxD_PRESENT_BIT,\fault
+ extrd,u,*= \va,63-ASM_PGDIR_SHIFT,64-ASM_PGDIR_SHIFT,%r0
+ shld \pgd,PxD_VALUE_SHIFT,\index
+ extrd,u,*= \va,63-ASM_PGDIR_SHIFT,64-ASM_PGDIR_SHIFT,%r0
+ copy \index,\pgd
+ extrd,u,*<> \va,63-ASM_PGDIR_SHIFT,64-ASM_PGDIR_SHIFT,%r0
+ ldo ASM_PGD_PMD_OFFSET(\pgd),\pgd
+#endif
+ L2_ptep \pgd,\pte,\index,\va,\fault
+ .endm
+
+ /* Set the _PAGE_ACCESSED bit of the PTE. Be clever and
+ * don't needlessly dirty the cache line if it was already set */
+ .macro update_ptep ptep,pte,tmp,tmp1
+ ldi _PAGE_ACCESSED,\tmp1
+ or \tmp1,\pte,\tmp
+ and,COND(<>) \tmp1,\pte,%r0
+ STREG \tmp,0(\ptep)
+ .endm
+
+ /* Set the dirty bit (and accessed bit). No need to be
+ * clever, this is only used from the dirty fault */
+ .macro update_dirty ptep,pte,tmp
+ ldi _PAGE_ACCESSED|_PAGE_DIRTY,\tmp
+ or \tmp,\pte,\pte
+ STREG \pte,0(\ptep)
+ .endm
+
+ /* bitshift difference between a PFN (based on kernel's PAGE_SIZE)
+ * to a CPU TLB 4k PFN (4k => 12 bits to shift) */
+ #define PAGE_ADD_SHIFT (PAGE_SHIFT-12)
+
+ /* Drop prot bits and convert to page addr for iitlbt and idtlbt */
+ .macro convert_for_tlb_insert20 pte
+ extrd,u \pte,(63-ASM_PFN_PTE_SHIFT)+(63-58)+PAGE_ADD_SHIFT,\
+ 64-PAGE_SHIFT-PAGE_ADD_SHIFT,\pte
+ depdi _PAGE_SIZE_ENCODING_DEFAULT,63,\
+ (63-58)+PAGE_ADD_SHIFT,\pte
+ .endm
+
+ /* Convert the pte and prot to tlb insertion values. How
+ * this happens is quite subtle, read below */
+ .macro make_insert_tlb spc,pte,prot
+ space_to_prot \spc \prot /* create prot id from space */
+ /* The following is the real subtlety. This is depositing
+ * T <-> _PAGE_REFTRAP
+ * D <-> _PAGE_DIRTY
+ * B <-> _PAGE_DMB (memory break)
+ *
+ * Then incredible subtlety: The access rights are
+ * _PAGE_GATEWAY _PAGE_EXEC _PAGE_READ
+ * See 3-14 of the parisc 2.0 manual
+ *
+ * Finally, _PAGE_READ goes in the top bit of PL1 (so we
+ * trigger an access rights trap in user space if the user
+ * tries to read an unreadable page */
+ depd \pte,8,7,\prot
+
+ /* PAGE_USER indicates the page can be read with user privileges,
+ * so deposit X1|11 to PL1|PL2 (remember the upper bit of PL1
+ * contains _PAGE_READ */
+ extrd,u,*= \pte,_PAGE_USER_BIT+32,1,%r0
+ depdi 7,11,3,\prot
+ /* If we're a gateway page, drop PL2 back to zero for promotion
+ * to kernel privilege (so we can execute the page as kernel).
+ * Any privilege promotion page always denys read and write */
+ extrd,u,*= \pte,_PAGE_GATEWAY_BIT+32,1,%r0
+ depd %r0,11,2,\prot /* If Gateway, Set PL2 to 0 */
+
+ /* Enforce uncacheable pages.
+ * This should ONLY be use for MMIO on PA 2.0 machines.
+ * Memory/DMA is cache coherent on all PA2.0 machines we support
+ * (that means T-class is NOT supported) and the memory controllers
+ * on most of those machines only handles cache transactions.
+ */
+ extrd,u,*= \pte,_PAGE_NO_CACHE_BIT+32,1,%r0
+ depdi 1,12,1,\prot
+
+ /* Drop prot bits and convert to page addr for iitlbt and idtlbt */
+ convert_for_tlb_insert20 \pte
+ .endm
+
+ /* Identical macro to make_insert_tlb above, except it
+ * makes the tlb entry for the differently formatted pa11
+ * insertion instructions */
+ .macro make_insert_tlb_11 spc,pte,prot
+ zdep \spc,30,15,\prot
+ dep \pte,8,7,\prot
+ extru,= \pte,_PAGE_NO_CACHE_BIT,1,%r0
+ depi 1,12,1,\prot
+ extru,= \pte,_PAGE_USER_BIT,1,%r0
+ depi 7,11,3,\prot /* Set for user space (1 rsvd for read) */
+ extru,= \pte,_PAGE_GATEWAY_BIT,1,%r0
+ depi 0,11,2,\prot /* If Gateway, Set PL2 to 0 */
+
+ /* Get rid of prot bits and convert to page addr for iitlba */
+
+ depi 0,31,ASM_PFN_PTE_SHIFT,\pte
+ SHRREG \pte,(ASM_PFN_PTE_SHIFT-(31-26)),\pte
+ .endm
+
+ /* This is for ILP32 PA2.0 only. The TLB insertion needs
+ * to extend into I/O space if the address is 0xfXXXXXXX
+ * so we extend the f's into the top word of the pte in
+ * this case */
+ .macro f_extend pte,tmp
+ extrd,s \pte,42,4,\tmp
+ addi,<> 1,\tmp,%r0
+ extrd,s \pte,63,25,\pte
+ .endm
+
+ /* The alias region is an 8MB aligned 16MB to do clear and
+ * copy user pages at addresses congruent with the user
+ * virtual address.
+ *
+ * To use the alias page, you set %r26 up with the to TLB
+ * entry (identifying the physical page) and %r23 up with
+ * the from tlb entry (or nothing if only a to entry---for
+ * clear_user_page_asm) */
+ .macro do_alias spc,tmp,tmp1,va,pte,prot,fault
+ cmpib,COND(<>),n 0,\spc,\fault
+ ldil L%(TMPALIAS_MAP_START),\tmp
+#if defined(CONFIG_64BIT) && (TMPALIAS_MAP_START >= 0x80000000)
+ /* on LP64, ldi will sign extend into the upper 32 bits,
+ * which is behaviour we don't want */
+ depdi 0,31,32,\tmp
+#endif
+ copy \va,\tmp1
+ depi 0,31,23,\tmp1
+ cmpb,COND(<>),n \tmp,\tmp1,\fault
+ mfctl %cr19,\tmp /* iir */
+ /* get the opcode (first six bits) into \tmp */
+ extrw,u \tmp,5,6,\tmp
+ /*
+ * Only setting the T bit prevents data cache movein
+ * Setting access rights to zero prevents instruction cache movein
+ *
+ * Note subtlety here: _PAGE_GATEWAY, _PAGE_EXEC and _PAGE_WRITE go
+ * to type field and _PAGE_READ goes to top bit of PL1
+ */
+ ldi (_PAGE_REFTRAP|_PAGE_READ|_PAGE_WRITE),\prot
+ /*
+ * so if the opcode is one (i.e. this is a memory management
+ * instruction) nullify the next load so \prot is only T.
+ * Otherwise this is a normal data operation
+ */
+ cmpiclr,= 0x01,\tmp,%r0
+ ldi (_PAGE_DIRTY|_PAGE_READ|_PAGE_WRITE),\prot
+#ifdef CONFIG_64BIT
+ depd,z \prot,8,7,\prot
+#else
+ depw,z \prot,8,7,\prot
+#endif
+ /*
+ * OK, it is in the temp alias region, check whether "from" or "to".
+ * Check "subtle" note in pacache.S re: r23/r26.
+ */
+#ifdef CONFIG_64BIT
+ extrd,u,*= \va,41,1,%r0
+#else
+ extrw,u,= \va,9,1,%r0
+#endif
+ or,COND(tr) %r23,%r0,\pte
+ or %r26,%r0,\pte
+ .endm
+
+
/*
* Align fault_vector_20 on 4K boundary so that both
* fault_vector_11 and fault_vector_20 are on the
* the static part of the kernel address space.
*/
- .export fault_vector_20
-
.text
- .align 4096
+ .align PAGE_SIZE
-fault_vector_20:
+ENTRY(fault_vector_20)
/* First vector is invalid (0) */
.ascii "cows can fly"
.byte 0
def 13
def 14
dtlb_20 15
-#if 0
naitlb_20 16
-#else
- def 16
-#endif
nadtlb_20 17
def 18
def 19
def 29
def 30
def 31
+END(fault_vector_20)
-#ifndef __LP64__
+#ifndef CONFIG_64BIT
- .export fault_vector_11
-
.align 2048
-fault_vector_11:
+ENTRY(fault_vector_11)
/* First vector is invalid (0) */
.ascii "cows can fly"
.byte 0
def 13
def 14
dtlb_11 15
-#if 0
naitlb_11 16
-#else
- def 16
-#endif
nadtlb_11 17
def 18
def 19
def 29
def 30
def 31
+END(fault_vector_11)
#endif
+ /* Fault vector is separately protected and *must* be on its own page */
+ .align PAGE_SIZE
+ENTRY(end_fault_vector)
.import handle_interruption,code
- .import handle_real_interruption,code
.import do_cpu_irq_mask,code
- .import parisc_stopkernel,code
/*
* r26 = function to be called
#define CLONE_VM 0x100 /* Must agree with <linux/sched.h> */
#define CLONE_UNTRACED 0x00800000
- .export __kernel_thread, code
.import do_fork
-__kernel_thread:
+ENTRY(__kernel_thread)
STREG %r2, -RP_OFFSET(%r30)
copy %r30, %r1
ldo PT_SZ_ALGN(%r30),%r30
-#ifdef __LP64__
+#ifdef CONFIG_64BIT
/* Yo, function pointers in wide mode are little structs... -PB */
ldd 24(%r26), %r2
STREG %r2, PT_GR27(%r1) /* Store childs %dp */
ldd 16(%r26), %r26
- STREG %r22, PT_GR22(%r1) /* Store childs %dp */
+ STREG %r22, PT_GR22(%r1) /* save r22 (arg5) */
copy %r0, %r22 /* user_tid */
#endif
STREG %r26, PT_GR26(%r1) /* Store function & argument for child */
ldil L%CLONE_UNTRACED, %r26
ldo CLONE_VM(%r26), %r26 /* Force CLONE_VM since only init_mm */
or %r26, %r24, %r26 /* will have kernel mappings. */
- copy %r0, %r25 /* stack_start */
+ ldi 1, %r25 /* stack_start, signals kernel thread */
stw %r0, -52(%r30) /* user_tid */
-#ifdef __LP64__
+#ifdef CONFIG_64BIT
ldo -16(%r30),%r29 /* Reference param save area */
#endif
- bl do_fork, %r2
+ BL do_fork, %r2
copy %r1, %r24 /* pt_regs */
/* Parent Returns here */
LDREG -PT_SZ_ALGN-RP_OFFSET(%r30), %r2
ldo -PT_SZ_ALGN(%r30), %r30
bv %r0(%r2)
- ldw TASK_PID(%r28), %r28
+ nop
+ENDPROC(__kernel_thread)
/*
* Child Returns here
* into task save area.
*/
- .export ret_from_kernel_thread
-ret_from_kernel_thread:
+ENTRY(ret_from_kernel_thread)
-#if CONFIG_PREEMPT || CONFIG_SMP
/* Call schedule_tail first though */
- bl schedule_tail, %r2
+ BL schedule_tail, %r2
nop
-#endif
- LDREG -THREAD_SZ_ALGN(%r30), %r1
+ LDREG TI_TASK-THREAD_SZ_ALGN(%r30), %r1
LDREG TASK_PT_GR25(%r1), %r26
-#ifdef __LP64__
+#ifdef CONFIG_64BIT
LDREG TASK_PT_GR27(%r1), %r27
LDREG TASK_PT_GR22(%r1), %r22
#endif
ble 0(%sr7, %r1)
copy %r31, %r2
-#ifdef __LP64__
+#ifdef CONFIG_64BIT
ldo -16(%r30),%r29 /* Reference param save area */
loadgp /* Thread could have been in a module */
#endif
+#ifndef CONFIG_64BIT
b sys_exit
+#else
+ load32 sys_exit, %r1
+ bv %r0(%r1)
+#endif
ldi 0, %r26
+ENDPROC(ret_from_kernel_thread)
.import sys_execve, code
- .export __execve, code
-__execve:
+ENTRY(__execve)
copy %r2, %r15
copy %r30, %r16
ldo PT_SZ_ALGN(%r30), %r30
STREG %r26, PT_GR26(%r16)
STREG %r25, PT_GR25(%r16)
STREG %r24, PT_GR24(%r16)
-#ifdef __LP64__
+#ifdef CONFIG_64BIT
ldo -16(%r30),%r29 /* Reference param save area */
#endif
- bl sys_execve, %r2
+ BL sys_execve, %r2
copy %r16, %r26
cmpib,=,n 0,%r28,intr_return /* forward */
copy %r16, %r30
bv %r0(%r2)
nop
+ENDPROC(__execve)
- .align 4
/*
* struct task_struct *_switch_to(struct task_struct *prev,
* struct task_struct *next)
*
* switch kernel stacks and return prev */
- .export _switch_to, code
-_switch_to:
+ENTRY(_switch_to)
STREG %r2, -RP_OFFSET(%r30)
+ callee_save_float
callee_save
- ldil L%_switch_to_ret, %r2
- ldo R%_switch_to_ret(%r2), %r2
+ load32 _switch_to_ret, %r2
STREG %r2, TASK_PT_KPC(%r26)
LDREG TASK_PT_KPC(%r25), %r2
STREG %r30, TASK_PT_KSP(%r26)
LDREG TASK_PT_KSP(%r25), %r30
-#ifdef __LP64__
- LDREG 8(%r25), %r25
-#else
- LDREG 4(%r25), %r25
-#endif
+ LDREG TASK_THREAD_INFO(%r25), %r25
bv %r0(%r2)
mtctl %r25,%cr30
_switch_to_ret:
mtctl %r0, %cr0 /* Needed for single stepping */
callee_rest
+ callee_rest_float
LDREG -RP_OFFSET(%r30), %r2
bv %r0(%r2)
copy %r26, %r28
+ENDPROC(_switch_to)
/*
* Common rfi return path for interruptions, kernel execve, and
* this way, then we will need to copy %sr3 in to PT_SR[3..7], and
* adjust IASQ[0..1].
*
- * Note that the following code uses a "relied upon translation".
- * See the parisc ACD for details. The ssm is necessary due to a
- * PCXT bug.
*/
- .align 4096
+ .align PAGE_SIZE
- .export syscall_exit_rfi
-syscall_exit_rfi:
+ENTRY(syscall_exit_rfi)
mfctl %cr30,%r16
- LDREG 0(%r16), %r16 /* thread_info -> task_struct */
+ LDREG TI_TASK(%r16), %r16 /* thread_info -> task_struct */
ldo TASK_REGS(%r16),%r16
/* Force iaoq to userspace, as the user has had access to our current
* context via sigcontext. Also Filter the PSW for the same reason.
depi 3,31,2,%r19
STREG %r19,PT_IAOQ1(%r16)
LDREG PT_PSW(%r16),%r19
- ldil L%USER_PSW_MASK,%r1
- ldo R%USER_PSW_MASK(%r1),%r1
-#ifdef __LP64__
- ldil L%USER_PSW_HI_MASK,%r20
- ldo R%USER_PSW_HI_MASK(%r20),%r20
+ load32 USER_PSW_MASK,%r1
+#ifdef CONFIG_64BIT
+ load32 USER_PSW_HI_MASK,%r20
depd %r20,31,32,%r1
#endif
and %r19,%r1,%r19 /* Mask out bits that user shouldn't play with */
- ldil L%USER_PSW,%r1
- ldo R%USER_PSW(%r1),%r1
+ load32 USER_PSW,%r1
or %r19,%r1,%r19 /* Make sure default USER_PSW bits are set */
STREG %r19,PT_PSW(%r16)
* (we don't store them in the sigcontext), so set them
* to "proper" values now (otherwise we'll wind up restoring
* whatever was last stored in the task structure, which might
- * be inconsistant if an interrupt occured while on the gateway
- * page) Note that we may be "trashing" values the user put in
- * them, but we don't support the the user changing them.
+ * be inconsistent if an interrupt occurred while on the gateway
+ * page). Note that we may be "trashing" values the user put in
+ * them, but we don't support the user changing them.
*/
STREG %r0,PT_SR2(%r16)
STREG %r19,PT_SR7(%r16)
intr_return:
+ /* NOTE: Need to enable interrupts incase we schedule. */
ssm PSW_SM_I, %r0
- /* Check for software interrupts */
-
- .import irq_stat,data
-
- ldil L%irq_stat,%r19
- ldo R%irq_stat(%r19),%r19
-#ifdef CONFIG_SMP
- mfctl %cr30,%r1
- ldw TI_CPU(%r1),%r1 /* get cpu # - int */
- /* shift left ____cacheline_aligned (aka L1_CACHE_BYTES) amount
- ** irq_stat[] is defined using ____cacheline_aligned.
- */
-#ifdef __LP64__
- shld %r1, 6, %r20
-#else
- shlw %r1, 5, %r20
-#endif
- add %r19,%r20,%r19 /* now have &irq_stat[smp_processor_id()] */
-#endif /* CONFIG_SMP */
-
- LDREG IRQSTAT_SIRQ_PEND(%r19),%r20 /* hardirq.h: unsigned long */
- cmpib,<>,n 0,%r20,intr_do_softirq /* forward */
-
intr_check_resched:
/* check for reschedule */
LDREG TI_FLAGS(%r1),%r19 /* sched.h: TIF_NEED_RESCHED */
bb,<,n %r19,31-TIF_NEED_RESCHED,intr_do_resched /* forward */
+ .import do_notify_resume,code
intr_check_sig:
/* As above */
mfctl %cr30,%r1
- LDREG TI_FLAGS(%r1),%r19 /* sched.h: TIF_SIGPENDING */
- bb,<,n %r19, 31-TIF_SIGPENDING, intr_do_signal /* forward */
+ LDREG TI_FLAGS(%r1),%r19
+ ldi (_TIF_SIGPENDING|_TIF_RESTORE_SIGMASK|_TIF_NOTIFY_RESUME), %r20
+ and,COND(<>) %r19, %r20, %r0
+ b,n intr_restore /* skip past if we've nothing to do */
+
+ /* This check is critical to having LWS
+ * working. The IASQ is zero on the gateway
+ * page and we cannot deliver any signals until
+ * we get off the gateway page.
+ *
+ * Only do signals if we are returning to user space
+ */
+ LDREG PT_IASQ0(%r16), %r20
+ cmpib,COND(=),n 0,%r20,intr_restore /* backward */
+ LDREG PT_IASQ1(%r16), %r20
+ cmpib,COND(=),n 0,%r20,intr_restore /* backward */
+
+ copy %r0, %r25 /* long in_syscall = 0 */
+#ifdef CONFIG_64BIT
+ ldo -16(%r30),%r29 /* Reference param save area */
+#endif
+
+ BL do_notify_resume,%r2
+ copy %r16, %r26 /* struct pt_regs *regs */
+
+ b,n intr_check_sig
intr_restore:
copy %r16,%r29
ldo PT_FR31(%r29),%r1
rest_fp %r1
rest_general %r29
- ssm 0,%r0
- nop
- nop
- nop
- nop
- nop
- nop
- nop
+
+ /* inverse of virt_map */
+ pcxt_ssm_bug
+ rsm PSW_SM_QUIET,%r0 /* prepare for rfi */
tophys_r1 %r29
- rsm (PSW_SM_Q|PSW_SM_P|PSW_SM_D|PSW_SM_I),%r0
+
+ /* Restore space id's and special cr's from PT_REGS
+ * structure pointed to by r29
+ */
rest_specials %r29
+
+ /* IMPORTANT: rest_stack restores r29 last (we are using it)!
+ * It also restores r1 and r30.
+ */
rest_stack
+
rfi
nop
- nop
- nop
- nop
- nop
- nop
- nop
- nop
- .import do_softirq,code
-intr_do_softirq:
- bl do_softirq,%r2
-#ifdef __LP64__
- ldo -16(%r30),%r29 /* Reference param save area */
-#else
- nop
-#endif
- b intr_check_resched
- nop
+#ifndef CONFIG_PREEMPT
+# define intr_do_preempt intr_restore
+#endif /* !CONFIG_PREEMPT */
.import schedule,code
intr_do_resched:
- /* Only do reschedule if we are returning to user space */
+ /* Only call schedule on return to userspace. If we're returning
+ * to kernel space, we may schedule if CONFIG_PREEMPT, otherwise
+ * we jump back to intr_restore.
+ */
LDREG PT_IASQ0(%r16), %r20
- CMPIB= 0,%r20,intr_restore /* backward */
+ cmpib,COND(=) 0, %r20, intr_do_preempt
nop
LDREG PT_IASQ1(%r16), %r20
- CMPIB= 0,%r20,intr_restore /* backward */
+ cmpib,COND(=) 0, %r20, intr_do_preempt
nop
-#ifdef __LP64__
+#ifdef CONFIG_64BIT
ldo -16(%r30),%r29 /* Reference param save area */
#endif
ldil L%intr_check_sig, %r2
+#ifndef CONFIG_64BIT
b schedule
+#else
+ load32 schedule, %r20
+ bv %r0(%r20)
+#endif
ldo R%intr_check_sig(%r2), %r2
-
- .import do_signal,code
-intr_do_signal:
- /* Only do signals if we are returning to user space */
- LDREG PT_IASQ0(%r16), %r20
- CMPIB= 0,%r20,intr_restore /* backward */
- nop
- LDREG PT_IASQ1(%r16), %r20
- CMPIB= 0,%r20,intr_restore /* backward */
+ /* preempt the current task on returning to kernel
+ * mode from an interrupt, iff need_resched is set,
+ * and preempt_count is 0. otherwise, we continue on
+ * our merry way back to the current running task.
+ */
+#ifdef CONFIG_PREEMPT
+ .import preempt_schedule_irq,code
+intr_do_preempt:
+ rsm PSW_SM_I, %r0 /* disable interrupts */
+
+ /* current_thread_info()->preempt_count */
+ mfctl %cr30, %r1
+ LDREG TI_PRE_COUNT(%r1), %r19
+ cmpib,COND(<>) 0, %r19, intr_restore /* if preempt_count > 0 */
+ nop /* prev insn branched backwards */
+
+ /* check if we interrupted a critical path */
+ LDREG PT_PSW(%r16), %r20
+ bb,<,n %r20, 31 - PSW_SM_I, intr_restore
nop
- copy %r0, %r24 /* unsigned long in_syscall */
- copy %r16, %r25 /* struct pt_regs *regs */
-#ifdef __LP64__
- ldo -16(%r30),%r29 /* Reference param save area */
-#endif
-
-#warning TAUSQ FIXME - review 2.5 signal return path changes
-
- bl do_signal,%r2
- copy %r0, %r26 /* sigset_t *oldset = NULL */
-
- b intr_restore
+ BL preempt_schedule_irq, %r2
nop
+ b,n intr_restore /* ssm PSW_SM_I done by intr_restore */
+#endif /* CONFIG_PREEMPT */
+
/*
* External interrupts.
*/
intr_extint:
- CMPIB=,n 0,%r16,1f
+ cmpib,COND(=),n 0,%r16,1f
+
get_stack_use_cr30
- b,n 3f
+ b,n 2f
1:
-#if 0 /* Interrupt Stack support not working yet! */
- mfctl %cr31,%r1
- copy %r30,%r17
- /* FIXME! depi below has hardcoded idea of interrupt stack size (32k)*/
-#ifdef __LP64__
- depdi 0,63,15,%r17
-#else
- depi 0,31,15,%r17
-#endif
- CMPB=,n %r1,%r17,2f
- get_stack_use_cr31
- b,n 3f
-#endif
-2:
get_stack_use_r30
-
-3:
+2:
save_specials %r29
virt_map
save_general %r29
ldil L%intr_return, %r2
-#ifdef __LP64__
+#ifdef CONFIG_64BIT
ldo -16(%r30),%r29 /* Reference param save area */
#endif
b do_cpu_irq_mask
ldo R%intr_return(%r2), %r2 /* return to intr_return, not here */
+ENDPROC(syscall_exit_rfi)
/* Generic interruptions (illegal insn, unaligned, page fault, etc) */
- .export intr_save, code /* for os_hpmc */
-
-intr_save:
+ENTRY(intr_save) /* for os_hpmc */
mfsp %sr7,%r16
- CMPIB=,n 0,%r16,1f
+ cmpib,COND(=),n 0,%r16,1f
get_stack_use_cr30
b 2f
copy %r8,%r26
* adjust isr/ior below.
*/
- CMPIB=,n 6,%r26,skip_save_ior
+ cmpib,COND(=),n 6,%r26,skip_save_ior
- /* save_specials left ipsw value in r8 for us to test */
mfctl %cr20, %r16 /* isr */
+ nop /* serialize mfctl on PA 2.0 to avoid 4 cycle penalty */
mfctl %cr21, %r17 /* ior */
-#ifdef __LP64__
+
+#ifdef CONFIG_64BIT
/*
* If the interrupted code was running with W bit off (32 bit),
* clear the b bits (bits 0 & 1) in the ior.
+ * save_specials left ipsw value in r8 for us to test.
*/
extrd,u,*<> %r8,PSW_W_BIT,1,%r0
depdi 0,1,2,%r17
*/
/* adjust isr/ior. */
-
- extrd,u %r16,63,7,%r1 /* get high bits from isr for ior */
- depd %r1,31,7,%r17 /* deposit them into ior */
- depdi 0,63,7,%r16 /* clear them from isr */
+ extrd,u %r16,63,SPACEID_SHIFT,%r1 /* get high bits from isr for ior */
+ depd %r1,31,SPACEID_SHIFT,%r17 /* deposit them into ior */
+ depdi 0,63,SPACEID_SHIFT,%r16 /* clear them from isr */
#endif
STREG %r16, PT_ISR(%r29)
STREG %r17, PT_IOR(%r29)
loadgp
copy %r29, %r25 /* arg1 is pt_regs */
-#ifdef __LP64__
+#ifdef CONFIG_64BIT
ldo -16(%r30),%r29 /* Reference param save area */
#endif
- ldil L%intr_restore, %r2
+ ldil L%intr_check_sig, %r2
copy %r25, %r16 /* save pt_regs */
b handle_interruption
- ldo R%intr_restore(%r2), %r2
+ ldo R%intr_check_sig(%r2), %r2
+ENDPROC(intr_save)
/*
*/
t0 = r1 /* temporary register 0 */
- va = r8 /* virtual address for which the trap occured */
+ va = r8 /* virtual address for which the trap occurred */
t1 = r9 /* temporary register 1 */
pte = r16 /* pte/phys page # */
prot = r17 /* prot bits */
- spc = r24 /* space for which the trap occured */
+ spc = r24 /* space for which the trap occurred */
ptp = r25 /* page directory/page table pointer */
-#ifdef __LP64__
+#ifdef CONFIG_64BIT
dtlb_miss_20w:
- extrd,u spc,63,7,t1 /* adjust va */
- depd t1,31,7,va /* adjust va */
- depdi 0,63,7,spc /* adjust space */
- mfctl %cr25,ptp /* Assume user space miss */
- or,*<> %r0,spc,%r0 /* If it is user space, nullify */
- mfctl %cr24,ptp /* Load kernel pgd instead */
- extrd,u va,33,9,t1 /* Get pgd index */
-
- mfsp %sr7,t0 /* Get current space */
- or,*= %r0,t0,%r0 /* If kernel, nullify following test */
- cmpb,*<>,n t0,spc,dtlb_fault /* forward */
-
- /* First level page table lookup */
-
- ldd,s t1(ptp),ptp
- extrd,u va,42,9,t0 /* get second-level index */
- bb,>=,n ptp,_PAGE_PRESENT_BIT,dtlb_check_alias_20w
- depdi 0,63,12,ptp /* clear prot bits */
-
- /* Second level page table lookup */
-
- ldd,s t0(ptp),ptp
- extrd,u va,51,9,t0 /* get third-level index */
- bb,>=,n ptp,_PAGE_PRESENT_BIT,dtlb_check_alias_20w
- depdi 0,63,12,ptp /* clear prot bits */
-
- /* Third level page table lookup */
+ space_adjust spc,va,t0
+ get_pgd spc,ptp
+ space_check spc,t0,dtlb_fault
- shladd t0,3,ptp,ptp
- ldi _PAGE_ACCESSED,t1
- ldd 0(ptp),pte
- bb,>=,n pte,_PAGE_PRESENT_BIT,dtlb_check_alias_20w
+ L3_ptep ptp,pte,t0,va,dtlb_check_alias_20w
- /* Check whether the "accessed" bit was set, otherwise do so */
+ update_ptep ptp,pte,t0,t1
- or t1,pte,t0 /* t0 has R bit set */
- and,*<> t1,pte,%r0 /* test and nullify if already set */
- std t0,0(ptp) /* write back pte */
-
- space_to_prot spc prot /* create prot id from space */
- depd pte,8,7,prot /* add in prot bits from pte */
-
- extrd,u,*= pte,_PAGE_USER_BIT+32,1,r0
- depdi 7,11,3,prot /* Set for user space (1 rsvd for read) */
- extrd,u,*= pte,_PAGE_GATEWAY_BIT+32,1,r0
- depdi 0,11,2,prot /* If Gateway, Set PL2 to 0 */
-
- /* Get rid of prot bits and convert to page addr for idtlbt */
-
- depdi 0,63,12,pte
- extrd,u pte,56,52,pte
+ make_insert_tlb spc,pte,prot
+
idtlbt pte,prot
rfir
nop
dtlb_check_alias_20w:
-
- /* Check to see if fault is in the temporary alias region */
-
- cmpib,*<>,n 0,spc,dtlb_fault /* forward */
- ldil L%(TMPALIAS_MAP_START),t0
- copy va,t1
- depdi 0,63,23,t1
- cmpb,*<>,n t0,t1,dtlb_fault /* forward */
- ldi (_PAGE_DIRTY|_PAGE_WRITE|_PAGE_READ),prot
- depd,z prot,8,7,prot
-
- /*
- * OK, it is in the temp alias region, check whether "from" or "to".
- * Check "subtle" note in pacache.S re: r23/r26.
- */
-
- extrd,u,*= va,41,1,r0
- or,*tr %r23,%r0,pte /* If "from" use "from" page */
- or,* %r26,%r0,pte /* else "to", use "to" page */
+ do_alias spc,t0,t1,va,pte,prot,dtlb_fault
idtlbt pte,prot
nop
nadtlb_miss_20w:
- extrd,u spc,63,7,t1 /* adjust va */
- depd t1,31,7,va /* adjust va */
- depdi 0,63,7,spc /* adjust space */
- mfctl %cr25,ptp /* Assume user space miss */
- or,*<> %r0,spc,%r0 /* If it is user space, nullify */
- mfctl %cr24,ptp /* Load kernel pgd instead */
- extrd,u va,33,9,t1 /* Get pgd index */
+ space_adjust spc,va,t0
+ get_pgd spc,ptp
+ space_check spc,t0,nadtlb_fault
- mfsp %sr7,t0 /* Get current space */
- or,*= %r0,t0,%r0 /* If kernel, nullify following test */
- cmpb,*<>,n t0,spc,nadtlb_fault /* forward */
+ L3_ptep ptp,pte,t0,va,nadtlb_check_alias_20w
- /* First level page table lookup */
+ update_ptep ptp,pte,t0,t1
- ldd,s t1(ptp),ptp
- extrd,u va,42,9,t0 /* get second-level index */
- bb,>=,n ptp,_PAGE_PRESENT_BIT,nadtlb_emulate
- depdi 0,63,12,ptp /* clear prot bits */
+ make_insert_tlb spc,pte,prot
- /* Second level page table lookup */
-
- ldd,s t0(ptp),ptp
- extrd,u va,51,9,t0 /* get third-level index */
- bb,>=,n ptp,_PAGE_PRESENT_BIT,nadtlb_emulate
- depdi 0,63,12,ptp /* clear prot bits */
-
- /* Third level page table lookup */
-
- shladd t0,3,ptp,ptp
- ldi _PAGE_ACCESSED,t1
- ldd 0(ptp),pte
- bb,>=,n pte,_PAGE_PRESENT_BIT,nadtlb_check_flush_20w
-
- space_to_prot spc prot /* create prot id from space */
- depd pte,8,7,prot /* add in prot bits from pte */
-
- extrd,u,*= pte,_PAGE_USER_BIT+32,1,r0
- depdi 7,11,3,prot /* Set for user space (1 rsvd for read) */
- extrd,u,*= pte,_PAGE_GATEWAY_BIT+32,1,r0
- depdi 0,11,2,prot /* If Gateway, Set PL2 to 0 */
-
- /* Get rid of prot bits and convert to page addr for idtlbt */
-
- depdi 0,63,12,pte
- extrd,u pte,56,52,pte
idtlbt pte,prot
rfir
nop
-nadtlb_check_flush_20w:
- bb,>=,n pte,_PAGE_FLUSH_BIT,nadtlb_emulate
+nadtlb_check_alias_20w:
+ do_alias spc,t0,t1,va,pte,prot,nadtlb_emulate
- /* Insert a "flush only" translation */
-
- depdi,z 7,7,3,prot
- depdi 1,10,1,prot
-
- /* Get rid of prot bits and convert to page addr for idtlbt */
-
- depdi 0,63,12,pte
- extrd,u pte,56,52,pte
idtlbt pte,prot
rfir
#else
dtlb_miss_11:
- mfctl %cr25,ptp /* Assume user space miss */
- or,<> %r0,spc,%r0 /* If it is user space, nullify */
- mfctl %cr24,ptp /* Load kernel pgd instead */
- extru va,9,10,t1 /* Get pgd index */
-
- mfsp %sr7,t0 /* Get current space */
- or,= %r0,t0,%r0 /* If kernel, nullify following test */
- cmpb,<>,n t0,spc,dtlb_fault /* forward */
-
- /* First level page table lookup */
-
- ldwx,s t1(ptp),ptp
- extru va,19,10,t0 /* get second-level index */
- bb,>=,n ptp,_PAGE_PRESENT_BIT,dtlb_check_alias_11
- depi 0,31,12,ptp /* clear prot bits */
+ get_pgd spc,ptp
- /* Second level page table lookup */
+ space_check spc,t0,dtlb_fault
- sh2addl t0,ptp,ptp
- ldi _PAGE_ACCESSED,t1
- ldw 0(ptp),pte
- bb,>=,n pte,_PAGE_PRESENT_BIT,dtlb_check_alias_11
+ L2_ptep ptp,pte,t0,va,dtlb_check_alias_11
- /* Check whether the "accessed" bit was set, otherwise do so */
+ update_ptep ptp,pte,t0,t1
- or t1,pte,t0 /* t0 has R bit set */
- and,<> t1,pte,%r0 /* test and nullify if already set */
- stw t0,0(ptp) /* write back pte */
-
- zdep spc,30,15,prot /* create prot id from space */
- dep pte,8,7,prot /* add in prot bits from pte */
-
- extru,= pte,_PAGE_NO_CACHE_BIT,1,r0
- depi 1,12,1,prot
- extru,= pte,_PAGE_USER_BIT,1,r0
- depi 7,11,3,prot /* Set for user space (1 rsvd for read) */
- extru,= pte,_PAGE_GATEWAY_BIT,1,r0
- depi 0,11,2,prot /* If Gateway, Set PL2 to 0 */
-
- /* Get rid of prot bits and convert to page addr for idtlba */
-
- depi 0,31,12,pte
- extru pte,24,25,pte
+ make_insert_tlb_11 spc,pte,prot
mfsp %sr1,t0 /* Save sr1 so we can use it in tlb inserts */
mtsp spc,%sr1
nop
dtlb_check_alias_11:
-
- /* Check to see if fault is in the temporary alias region */
-
- cmpib,<>,n 0,spc,dtlb_fault /* forward */
- ldil L%(TMPALIAS_MAP_START),t0
- copy va,t1
- depwi 0,31,23,t1
- cmpb,<>,n t0,t1,dtlb_fault /* forward */
- ldi (_PAGE_DIRTY|_PAGE_WRITE|_PAGE_READ),prot
- depw,z prot,8,7,prot
-
- /*
- * OK, it is in the temp alias region, check whether "from" or "to".
- * Check "subtle" note in pacache.S re: r23/r26.
- */
-
- extrw,u,= va,9,1,r0
- or,tr %r23,%r0,pte /* If "from" use "from" page */
- or %r26,%r0,pte /* else "to", use "to" page */
+ do_alias spc,t0,t1,va,pte,prot,dtlb_fault
idtlba pte,(va)
idtlbp prot,(va)
nop
nadtlb_miss_11:
- mfctl %cr25,ptp /* Assume user space miss */
- or,<> %r0,spc,%r0 /* If it is user space, nullify */
- mfctl %cr24,ptp /* Load kernel pgd instead */
- extru va,9,10,t1 /* Get pgd index */
-
- mfsp %sr7,t0 /* Get current space */
- or,= %r0,t0,%r0 /* If kernel, nullify following test */
- cmpb,<>,n t0,spc,nadtlb_fault /* forward */
+ get_pgd spc,ptp
- /* First level page table lookup */
+ space_check spc,t0,nadtlb_fault
- ldwx,s t1(ptp),ptp
- extru va,19,10,t0 /* get second-level index */
- bb,>=,n ptp,_PAGE_PRESENT_BIT,nadtlb_emulate
- depi 0,31,12,ptp /* clear prot bits */
+ L2_ptep ptp,pte,t0,va,nadtlb_check_alias_11
- /* Second level page table lookup */
+ update_ptep ptp,pte,t0,t1
- sh2addl t0,ptp,ptp
- ldi _PAGE_ACCESSED,t1
- ldw 0(ptp),pte
- bb,>=,n pte,_PAGE_PRESENT_BIT,nadtlb_check_flush_11
+ make_insert_tlb_11 spc,pte,prot
- zdep spc,30,15,prot /* create prot id from space */
- dep pte,8,7,prot /* add in prot bits from pte */
-
- extru,= pte,_PAGE_NO_CACHE_BIT,1,r0
- depi 1,12,1,prot
- extru,= pte,_PAGE_USER_BIT,1,r0
- depi 7,11,3,prot /* Set for user space (1 rsvd for read) */
- extru,= pte,_PAGE_GATEWAY_BIT,1,r0
- depi 0,11,2,prot /* If Gateway, Set PL2 to 0 */
-
- /* Get rid of prot bits and convert to page addr for idtlba */
-
- depi 0,31,12,pte
- extru pte,24,25,pte
mfsp %sr1,t0 /* Save sr1 so we can use it in tlb inserts */
mtsp spc,%sr1
rfir
nop
-nadtlb_check_flush_11:
- bb,>=,n pte,_PAGE_FLUSH_BIT,nadtlb_emulate
-
- /* Insert a "flush only" translation */
-
- zdepi 7,7,3,prot
- depi 1,10,1,prot
-
- /* Get rid of prot bits and convert to page addr for idtlba */
-
- depi 0,31,12,pte
- extru pte,24,25,pte
+nadtlb_check_alias_11:
+ do_alias spc,t0,t1,va,pte,prot,nadtlb_emulate
- mfsp %sr1,t0 /* Save sr1 so we can use it in tlb inserts */
- mtsp spc,%sr1
-
- idtlba pte,(%sr1,va)
- idtlbp prot,(%sr1,va)
-
- mtsp t0, %sr1 /* Restore sr1 */
+ idtlba pte,(va)
+ idtlbp prot,(va)
rfir
nop
dtlb_miss_20:
- mfctl %cr25,ptp /* Assume user space miss */
- or,<> %r0,spc,%r0 /* If it is user space, nullify */
- mfctl %cr24,ptp /* Load kernel pgd instead */
- extru va,9,10,t1 /* Get pgd index */
-
- mfsp %sr7,t0 /* Get current space */
- or,= %r0,t0,%r0 /* If kernel, nullify following test */
- cmpb,<>,n t0,spc,dtlb_fault /* forward */
-
- /* First level page table lookup */
-
- ldwx,s t1(ptp),ptp
- extru va,19,10,t0 /* get second-level index */
- bb,>=,n ptp,_PAGE_PRESENT_BIT,dtlb_check_alias_20
- depi 0,31,12,ptp /* clear prot bits */
-
- /* Second level page table lookup */
-
- sh2addl t0,ptp,ptp
- ldi _PAGE_ACCESSED,t1
- ldw 0(ptp),pte
- bb,>=,n pte,_PAGE_PRESENT_BIT,dtlb_check_alias_20
+ space_adjust spc,va,t0
+ get_pgd spc,ptp
+ space_check spc,t0,dtlb_fault
- /* Check whether the "accessed" bit was set, otherwise do so */
+ L2_ptep ptp,pte,t0,va,dtlb_check_alias_20
- or t1,pte,t0 /* t0 has R bit set */
- and,<> t1,pte,%r0 /* test and nullify if already set */
- stw t0,0(ptp) /* write back pte */
+ update_ptep ptp,pte,t0,t1
- space_to_prot spc prot /* create prot id from space */
- depd pte,8,7,prot /* add in prot bits from pte */
+ make_insert_tlb spc,pte,prot
- extrd,u,*= pte,_PAGE_USER_BIT+32,1,r0
- depdi 7,11,3,prot /* Set for user space (1 rsvd for read) */
- extrd,u,*= pte,_PAGE_GATEWAY_BIT+32,1,r0
- depdi 0,11,2,prot /* If Gateway, Set PL2 to 0 */
+ f_extend pte,t0
- /* Get rid of prot bits and convert to page addr for idtlbt */
-
- extrd,s pte,35,4,t0
- depdi 0,63,12,pte /* clear lower 12 bits */
- addi,= 1,t0,0
- extrd,u,*tr pte,56,25,pte
- extrd,s pte,56,25,pte /* bit 31:8 >> 8 */
idtlbt pte,prot
rfir
nop
dtlb_check_alias_20:
-
- /* Check to see if fault is in the temporary alias region */
-
- cmpib,<>,n 0,spc,dtlb_fault /* forward */
- ldil L%(TMPALIAS_MAP_START),t0
- copy va,t1
- depwi 0,31,23,t1
- cmpb,<>,n t0,t1,dtlb_fault /* forward */
- ldi (_PAGE_DIRTY|_PAGE_WRITE|_PAGE_READ),prot
- depd,z prot,8,7,prot
-
- /*
- * OK, it is in the temp alias region, check whether "from" or "to".
- * Check "subtle" note in pacache.S re: r23/r26.
- */
-
- extrw,u,= va,9,1,r0
- or,tr %r23,%r0,pte /* If "from" use "from" page */
- or %r26,%r0,pte /* else "to", use "to" page */
-
+ do_alias spc,t0,t1,va,pte,prot,dtlb_fault
+
idtlbt pte,prot
rfir
nop
nadtlb_miss_20:
- mfctl %cr25,ptp /* Assume user space miss */
- or,<> %r0,spc,%r0 /* If it is user space, nullify */
- mfctl %cr24,ptp /* Load kernel pgd instead */
- extru va,9,10,t1 /* Get pgd index */
-
- mfsp %sr7,t0 /* Get current space */
- or,= %r0,t0,%r0 /* If kernel, nullify following test */
- cmpb,<>,n t0,spc,nadtlb_fault /* forward */
-
- /* First level page table lookup */
+ get_pgd spc,ptp
- ldwx,s t1(ptp),ptp
- extru va,19,10,t0 /* get second-level index */
- bb,>=,n ptp,_PAGE_PRESENT_BIT,nadtlb_emulate
- depi 0,31,12,ptp /* clear prot bits */
+ space_check spc,t0,nadtlb_fault
- /* Second level page table lookup */
+ L2_ptep ptp,pte,t0,va,nadtlb_check_alias_20
- sh2addl t0,ptp,ptp
- ldi _PAGE_ACCESSED,t1
- ldw 0(ptp),pte
- bb,>=,n pte,_PAGE_PRESENT_BIT,nadtlb_check_flush_20
+ update_ptep ptp,pte,t0,t1
- space_to_prot spc prot /* create prot id from space */
- depd pte,8,7,prot /* add in prot bits from pte */
+ make_insert_tlb spc,pte,prot
- extrd,u,*= pte,_PAGE_USER_BIT+32,1,r0
- depdi 7,11,3,prot /* Set for user space (1 rsvd for read) */
- extrd,u,*= pte,_PAGE_GATEWAY_BIT+32,1,r0
- depdi 0,11,2,prot /* If Gateway, Set PL2 to 0 */
-
- /* Get rid of prot bits and convert to page addr for idtlbt */
-
- extrd,s pte,35,4,t0
- depdi 0,63,12,pte /* clear lower 12 bits */
- addi,= 1,t0,0
- extrd,u,*tr pte,56,25,pte
- extrd,s pte,56,25,pte /* bit 31:8 >> 8 */
+ f_extend pte,t0
+
idtlbt pte,prot
rfir
nop
-nadtlb_check_flush_20:
- bb,>=,n pte,_PAGE_FLUSH_BIT,nadtlb_emulate
-
- /* Insert a "flush only" translation */
-
- depdi,z 7,7,3,prot
- depdi 1,10,1,prot
-
- /* Get rid of prot bits and convert to page addr for idtlbt */
+nadtlb_check_alias_20:
+ do_alias spc,t0,t1,va,pte,prot,nadtlb_emulate
- depdi 0,63,12,pte
- extrd,u pte,56,32,pte
idtlbt pte,prot
rfir
nop
+
#endif
nadtlb_emulate:
* of the instruction. Since we don't insert a translation
* we can get a lot of faults during a flush loop, so it makes
* sense to try to do it here with minimum overhead. We only
- * emulate fdc,fic & pdc instructions whose base and index
- * registers are not shadowed. We defer everything else to the
- * "slow" path.
+ * emulate fdc,fic,pdc,probew,prober instructions whose base
+ * and index registers are not shadowed. We defer everything
+ * else to the "slow" path.
*/
mfctl %cr19,%r9 /* Get iir */
+
+ /* PA 2.0 Arch Ref. Book pg 382 has a good description of the insn bits.
+ Checks for fdc,fdce,pdc,"fic,4f",prober,probeir,probew, probeiw */
+
+ /* Checks for fdc,fdce,pdc,"fic,4f" only */
ldi 0x280,%r16
and %r9,%r16,%r17
- cmpb,<>,n %r16,%r17,nadtlb_fault /* Not fdc,fic,pdc */
+ cmpb,<>,n %r16,%r17,nadtlb_probe_check
bb,>=,n %r9,26,nadtlb_nullify /* m bit not set, just nullify */
- b,l get_register,%r25
+ BL get_register,%r25
extrw,u %r9,15,5,%r8 /* Get index register # */
- CMPIB=,n -1,%r1,nadtlb_fault /* have to use slow path */
+ cmpib,COND(=),n -1,%r1,nadtlb_fault /* have to use slow path */
copy %r1,%r24
- b,l get_register,%r25
+ BL get_register,%r25
extrw,u %r9,10,5,%r8 /* Get base register # */
- CMPIB=,n -1,%r1,nadtlb_fault /* have to use slow path */
- b,l set_register,%r25
+ cmpib,COND(=),n -1,%r1,nadtlb_fault /* have to use slow path */
+ BL set_register,%r25
add,l %r1,%r24,%r1 /* doesn't affect c/b bits */
nadtlb_nullify:
- mfctl %cr22,%r8 /* Get ipsw */
+ mfctl %ipsw,%r8
ldil L%PSW_N,%r9
or %r8,%r9,%r8 /* Set PSW_N */
- mtctl %r8,%cr22
+ mtctl %r8,%ipsw
rfir
nop
-#ifdef __LP64__
+ /*
+ When there is no translation for the probe address then we
+ must nullify the insn and return zero in the target regsiter.
+ This will indicate to the calling code that it does not have
+ write/read privileges to this address.
+
+ This should technically work for prober and probew in PA 1.1,
+ and also probe,r and probe,w in PA 2.0
+
+ WARNING: USE ONLY NON-SHADOW REGISTERS WITH PROBE INSN!
+ THE SLOW-PATH EMULATION HAS NOT BEEN WRITTEN YET.
+
+ */
+nadtlb_probe_check:
+ ldi 0x80,%r16
+ and %r9,%r16,%r17
+ cmpb,<>,n %r16,%r17,nadtlb_fault /* Must be probe,[rw]*/
+ BL get_register,%r25 /* Find the target register */
+ extrw,u %r9,31,5,%r8 /* Get target register */
+ cmpib,COND(=),n -1,%r1,nadtlb_fault /* have to use slow path */
+ BL set_register,%r25
+ copy %r0,%r1 /* Write zero to target register */
+ b nadtlb_nullify /* Nullify return insn */
+ nop
+
+
+#ifdef CONFIG_64BIT
itlb_miss_20w:
/*
* on the gateway page which is in the kernel address space.
*/
- extrd,u spc,63,7,t1 /* adjust va */
- depd t1,31,7,va /* adjust va */
- depdi 0,63,7,spc /* adjust space */
- cmpib,*= 0,spc,itlb_miss_kernel_20w
- extrd,u va,33,9,t1 /* Get pgd index */
+ space_adjust spc,va,t0
+ get_pgd spc,ptp
+ space_check spc,t0,itlb_fault
- mfctl %cr25,ptp /* load user pgd */
+ L3_ptep ptp,pte,t0,va,itlb_fault
- mfsp %sr7,t0 /* Get current space */
- or,*= %r0,t0,%r0 /* If kernel, nullify following test */
- cmpb,*<>,n t0,spc,itlb_fault /* forward */
+ update_ptep ptp,pte,t0,t1
- /* First level page table lookup */
+ make_insert_tlb spc,pte,prot
+
+ iitlbt pte,prot
-itlb_miss_common_20w:
- ldd,s t1(ptp),ptp
- extrd,u va,42,9,t0 /* get second-level index */
- bb,>=,n ptp,_PAGE_PRESENT_BIT,itlb_fault
- depdi 0,63,12,ptp /* clear prot bits */
+ rfir
+ nop
- /* Second level page table lookup */
+naitlb_miss_20w:
- ldd,s t0(ptp),ptp
- extrd,u va,51,9,t0 /* get third-level index */
- bb,>=,n ptp,_PAGE_PRESENT_BIT,itlb_fault
- depdi 0,63,12,ptp /* clear prot bits */
+ /*
+ * I miss is a little different, since we allow users to fault
+ * on the gateway page which is in the kernel address space.
+ */
- /* Third level page table lookup */
+ space_adjust spc,va,t0
+ get_pgd spc,ptp
+ space_check spc,t0,naitlb_fault
- shladd t0,3,ptp,ptp
- ldi _PAGE_ACCESSED,t1
- ldd 0(ptp),pte
- bb,>=,n pte,_PAGE_PRESENT_BIT,itlb_fault
+ L3_ptep ptp,pte,t0,va,naitlb_check_alias_20w
- /* Check whether the "accessed" bit was set, otherwise do so */
+ update_ptep ptp,pte,t0,t1
- or t1,pte,t0 /* t0 has R bit set */
- and,*<> t1,pte,%r0 /* test and nullify if already set */
- std t0,0(ptp) /* write back pte */
+ make_insert_tlb spc,pte,prot
- space_to_prot spc prot /* create prot id from space */
- depd pte,8,7,prot /* add in prot bits from pte */
+ iitlbt pte,prot
- extrd,u,*= pte,_PAGE_USER_BIT+32,1,r0
- depdi 7,11,3,prot /* Set for user space (1 rsvd for read) */
- extrd,u,*= pte,_PAGE_GATEWAY_BIT+32,1,r0
- depdi 0,11,2,prot /* If Gateway, Set PL2 to 0 */
+ rfir
+ nop
- /* Get rid of prot bits and convert to page addr for iitlbt */
+naitlb_check_alias_20w:
+ do_alias spc,t0,t1,va,pte,prot,naitlb_fault
- depdi 0,63,12,pte
- extrd,u pte,56,32,pte
- iitlbt pte,prot
+ iitlbt pte,prot
rfir
nop
-itlb_miss_kernel_20w:
- b itlb_miss_common_20w
- mfctl %cr24,ptp /* Load kernel pgd */
#else
itlb_miss_11:
+ get_pgd spc,ptp
- /*
- * I miss is a little different, since we allow users to fault
- * on the gateway page which is in the kernel address space.
- */
-
- cmpib,= 0,spc,itlb_miss_kernel_11
- extru va,9,10,t1 /* Get pgd index */
+ space_check spc,t0,itlb_fault
- mfctl %cr25,ptp /* load user pgd */
+ L2_ptep ptp,pte,t0,va,itlb_fault
- mfsp %sr7,t0 /* Get current space */
- or,= %r0,t0,%r0 /* If kernel, nullify following test */
- cmpb,<>,n t0,spc,itlb_fault /* forward */
+ update_ptep ptp,pte,t0,t1
- /* First level page table lookup */
+ make_insert_tlb_11 spc,pte,prot
-itlb_miss_common_11:
- ldwx,s t1(ptp),ptp
- extru va,19,10,t0 /* get second-level index */
- bb,>=,n ptp,_PAGE_PRESENT_BIT,itlb_fault
- depi 0,31,12,ptp /* clear prot bits */
+ mfsp %sr1,t0 /* Save sr1 so we can use it in tlb inserts */
+ mtsp spc,%sr1
- /* Second level page table lookup */
+ iitlba pte,(%sr1,va)
+ iitlbp prot,(%sr1,va)
- sh2addl t0,ptp,ptp
- ldi _PAGE_ACCESSED,t1
- ldw 0(ptp),pte
- bb,>=,n pte,_PAGE_PRESENT_BIT,itlb_fault
+ mtsp t0, %sr1 /* Restore sr1 */
- /* Check whether the "accessed" bit was set, otherwise do so */
+ rfir
+ nop
- or t1,pte,t0 /* t0 has R bit set */
- and,<> t1,pte,%r0 /* test and nullify if already set */
- stw t0,0(ptp) /* write back pte */
+naitlb_miss_11:
+ get_pgd spc,ptp
- zdep spc,30,15,prot /* create prot id from space */
- dep pte,8,7,prot /* add in prot bits from pte */
+ space_check spc,t0,naitlb_fault
- extru,= pte,_PAGE_NO_CACHE_BIT,1,r0
- depi 1,12,1,prot
- extru,= pte,_PAGE_USER_BIT,1,r0
- depi 7,11,3,prot /* Set for user space (1 rsvd for read) */
- extru,= pte,_PAGE_GATEWAY_BIT,1,r0
- depi 0,11,2,prot /* If Gateway, Set PL2 to 0 */
+ L2_ptep ptp,pte,t0,va,naitlb_check_alias_11
- /* Get rid of prot bits and convert to page addr for iitlba */
+ update_ptep ptp,pte,t0,t1
- depi 0,31,12,pte
- extru pte,24,25,pte
+ make_insert_tlb_11 spc,pte,prot
mfsp %sr1,t0 /* Save sr1 so we can use it in tlb inserts */
mtsp spc,%sr1
rfir
nop
-itlb_miss_kernel_11:
- b itlb_miss_common_11
- mfctl %cr24,ptp /* Load kernel pgd */
+naitlb_check_alias_11:
+ do_alias spc,t0,t1,va,pte,prot,itlb_fault
+
+ iitlba pte,(%sr0, va)
+ iitlbp prot,(%sr0, va)
+
+ rfir
+ nop
+
itlb_miss_20:
+ get_pgd spc,ptp
- /*
- * I miss is a little different, since we allow users to fault
- * on the gateway page which is in the kernel address space.
- */
+ space_check spc,t0,itlb_fault
- cmpib,= 0,spc,itlb_miss_kernel_20
- extru va,9,10,t1 /* Get pgd index */
+ L2_ptep ptp,pte,t0,va,itlb_fault
- mfctl %cr25,ptp /* load user pgd */
+ update_ptep ptp,pte,t0,t1
- mfsp %sr7,t0 /* Get current space */
- or,= %r0,t0,%r0 /* If kernel, nullify following test */
- cmpb,<>,n t0,spc,itlb_fault /* forward */
+ make_insert_tlb spc,pte,prot
- /* First level page table lookup */
+ f_extend pte,t0
-itlb_miss_common_20:
- ldwx,s t1(ptp),ptp
- extru va,19,10,t0 /* get second-level index */
- bb,>=,n ptp,_PAGE_PRESENT_BIT,itlb_fault
- depi 0,31,12,ptp /* clear prot bits */
+ iitlbt pte,prot
- /* Second level page table lookup */
+ rfir
+ nop
- sh2addl t0,ptp,ptp
- ldi _PAGE_ACCESSED,t1
- ldw 0(ptp),pte
- bb,>=,n pte,_PAGE_PRESENT_BIT,itlb_fault
+naitlb_miss_20:
+ get_pgd spc,ptp
- /* Check whether the "accessed" bit was set, otherwise do so */
+ space_check spc,t0,naitlb_fault
- or t1,pte,t0 /* t0 has R bit set */
- and,<> t1,pte,%r0 /* test and nullify if already set */
- stw t0,0(ptp) /* write back pte */
+ L2_ptep ptp,pte,t0,va,naitlb_check_alias_20
- space_to_prot spc prot /* create prot id from space */
- depd pte,8,7,prot /* add in prot bits from pte */
+ update_ptep ptp,pte,t0,t1
- extrd,u,*= pte,_PAGE_USER_BIT+32,1,r0
- depdi 7,11,3,prot /* Set for user space (1 rsvd for read) */
- extrd,u,*= pte,_PAGE_GATEWAY_BIT+32,1,r0
- depdi 0,11,2,prot /* If Gateway, Set PL2 to 0 */
+ make_insert_tlb spc,pte,prot
- /* Get rid of prot bits and convert to page addr for iitlbt */
+ f_extend pte,t0
- extrd,s pte,35,4,t0
- depdi 0,63,12,pte /* clear lower 12 bits */
- addi,= 1,t0,0
- extrd,u,*tr pte,56,25,pte
- extrd,s pte,56,25,pte /* bit 31:8 >> 8 */
iitlbt pte,prot
rfir
nop
+naitlb_check_alias_20:
+ do_alias spc,t0,t1,va,pte,prot,naitlb_fault
-itlb_miss_kernel_20:
- b itlb_miss_common_20
- mfctl %cr24,ptp /* Load kernel pgd */
-#endif
-
-#ifdef __LP64__
-
-dbit_trap_20w:
- extrd,u spc,63,7,t1 /* adjust va */
- depd t1,31,7,va /* adjust va */
- depdi 0,1,2,va /* adjust va */
- depdi 0,63,7,spc /* adjust space */
- mfctl %cr25,ptp /* Assume user space miss */
- or,*<> %r0,spc,%r0 /* If it is user space, nullify */
- mfctl %cr24,ptp /* Load kernel pgd instead */
- extrd,u va,33,9,t1 /* Get pgd index */
-
- mfsp %sr7,t0 /* Get current space */
- or,*= %r0,t0,%r0 /* If kernel, nullify following test */
- cmpb,*<>,n t0,spc,dbit_fault /* forward */
+ iitlbt pte,prot
- /* First level page table lookup */
+ rfir
+ nop
- ldd,s t1(ptp),ptp
- extrd,u va,42,9,t0 /* get second-level index */
- bb,>=,n ptp,_PAGE_PRESENT_BIT,dbit_fault
- depdi 0,63,12,ptp /* clear prot bits */
+#endif
- /* Second level page table lookup */
+#ifdef CONFIG_64BIT
- ldd,s t0(ptp),ptp
- extrd,u va,51,9,t0 /* get third-level index */
- bb,>=,n ptp,_PAGE_PRESENT_BIT,dbit_fault
- depdi 0,63,12,ptp /* clear prot bits */
+dbit_trap_20w:
+ space_adjust spc,va,t0
+ get_pgd spc,ptp
+ space_check spc,t0,dbit_fault
- /* Third level page table lookup */
+ L3_ptep ptp,pte,t0,va,dbit_fault
- shladd t0,3,ptp,ptp
#ifdef CONFIG_SMP
- CMPIB=,n 0,spc,dbit_nolock_20w
- ldil L%PA(pa_dbit_lock),t0
- ldo R%PA(pa_dbit_lock)(t0),t0
+ cmpib,COND(=),n 0,spc,dbit_nolock_20w
+ load32 PA(pa_dbit_lock),t0
dbit_spin_20w:
- ldcw 0(t0),t1
- cmpib,= 0,t1,dbit_spin_20w
+ LDCW 0(t0),t1
+ cmpib,COND(=) 0,t1,dbit_spin_20w
nop
dbit_nolock_20w:
#endif
- ldi (_PAGE_ACCESSED|_PAGE_DIRTY),t1
- ldd 0(ptp),pte
- bb,>=,n pte,_PAGE_PRESENT_BIT,dbit_fault
-
- /* Set Accessed and Dirty bits in the pte */
+ update_dirty ptp,pte,t1
- or t1,pte,pte
- std pte,0(ptp) /* write back pte */
-
- space_to_prot spc prot /* create prot id from space */
- depd pte,8,7,prot /* add in prot bits from pte */
-
- extrd,u,*= pte,_PAGE_USER_BIT+32,1,r0
- depdi 7,11,3,prot /* Set for user space (1 rsvd for read) */
- extrd,u,*= pte,_PAGE_GATEWAY_BIT+32,1,r0
- depdi 0,11,2,prot /* If Gateway, Set PL2 to 0 */
-
- /* Get rid of prot bits and convert to page addr for idtlbt */
-
- depdi 0,63,12,pte
- extrd,u pte,56,52,pte
+ make_insert_tlb spc,pte,prot
+
idtlbt pte,prot
#ifdef CONFIG_SMP
- CMPIB=,n 0,spc,dbit_nounlock_20w
+ cmpib,COND(=),n 0,spc,dbit_nounlock_20w
ldi 1,t1
stw t1,0(t0)
#else
dbit_trap_11:
- mfctl %cr25,ptp /* Assume user space trap */
- or,<> %r0,spc,%r0 /* If it is user space, nullify */
- mfctl %cr24,ptp /* Load kernel pgd instead */
- extru va,9,10,t1 /* Get pgd index */
-
- mfsp %sr7,t0 /* Get current space */
- or,= %r0,t0,%r0 /* If kernel, nullify following test */
- cmpb,<>,n t0,spc,dbit_fault /* forward */
- /* First level page table lookup */
+ get_pgd spc,ptp
- ldwx,s t1(ptp),ptp
- extru va,19,10,t0 /* get second-level index */
- bb,>=,n ptp,_PAGE_PRESENT_BIT,dbit_fault
- depi 0,31,12,ptp /* clear prot bits */
+ space_check spc,t0,dbit_fault
- /* Second level page table lookup */
+ L2_ptep ptp,pte,t0,va,dbit_fault
- sh2addl t0,ptp,ptp
#ifdef CONFIG_SMP
- CMPIB=,n 0,spc,dbit_nolock_11
- ldil L%PA(pa_dbit_lock),t0
- ldo R%PA(pa_dbit_lock)(t0),t0
+ cmpib,COND(=),n 0,spc,dbit_nolock_11
+ load32 PA(pa_dbit_lock),t0
dbit_spin_11:
- ldcw 0(t0),t1
+ LDCW 0(t0),t1
cmpib,= 0,t1,dbit_spin_11
nop
dbit_nolock_11:
#endif
- ldi (_PAGE_ACCESSED|_PAGE_DIRTY),t1
- ldw 0(ptp),pte
- bb,>=,n pte,_PAGE_PRESENT_BIT,dbit_fault
-
- /* Set Accessed and Dirty bits in the pte */
+ update_dirty ptp,pte,t1
- or t1,pte,pte
- stw pte,0(ptp) /* write back pte */
-
- zdep spc,30,15,prot /* create prot id from space */
- dep pte,8,7,prot /* add in prot bits from pte */
-
- extru,= pte,_PAGE_NO_CACHE_BIT,1,r0
- depi 1,12,1,prot
- extru,= pte,_PAGE_USER_BIT,1,r0
- depi 7,11,3,prot /* Set for user space (1 rsvd for read) */
- extru,= pte,_PAGE_GATEWAY_BIT,1,r0
- depi 0,11,2,prot /* If Gateway, Set PL2 to 0 */
-
- /* Get rid of prot bits and convert to page addr for idtlba */
-
- depi 0,31,12,pte
- extru pte,24,25,pte
+ make_insert_tlb_11 spc,pte,prot
mfsp %sr1,t1 /* Save sr1 so we can use it in tlb inserts */
mtsp spc,%sr1
mtsp t1, %sr1 /* Restore sr1 */
#ifdef CONFIG_SMP
- CMPIB=,n 0,spc,dbit_nounlock_11
+ cmpib,COND(=),n 0,spc,dbit_nounlock_11
ldi 1,t1
stw t1,0(t0)
nop
dbit_trap_20:
- mfctl %cr25,ptp /* Assume user space trap */
- or,<> %r0,spc,%r0 /* If it is user space, nullify */
- mfctl %cr24,ptp /* Load kernel pgd instead */
- extru va,9,10,t1 /* Get pgd index */
+ get_pgd spc,ptp
- mfsp %sr7,t0 /* Get current space */
- or,= %r0,t0,%r0 /* If kernel, nullify following test */
- cmpb,<>,n t0,spc,dbit_fault /* forward */
+ space_check spc,t0,dbit_fault
- /* First level page table lookup */
+ L2_ptep ptp,pte,t0,va,dbit_fault
- ldwx,s t1(ptp),ptp
- extru va,19,10,t0 /* get second-level index */
- bb,>=,n ptp,_PAGE_PRESENT_BIT,dbit_fault
- depi 0,31,12,ptp /* clear prot bits */
-
- /* Second level page table lookup */
-
- sh2addl t0,ptp,ptp
#ifdef CONFIG_SMP
- CMPIB=,n 0,spc,dbit_nolock_20
- ldil L%PA(pa_dbit_lock),t0
- ldo R%PA(pa_dbit_lock)(t0),t0
+ cmpib,COND(=),n 0,spc,dbit_nolock_20
+ load32 PA(pa_dbit_lock),t0
dbit_spin_20:
- ldcw 0(t0),t1
+ LDCW 0(t0),t1
cmpib,= 0,t1,dbit_spin_20
nop
dbit_nolock_20:
#endif
- ldi (_PAGE_ACCESSED|_PAGE_DIRTY),t1
- ldw 0(ptp),pte
- bb,>=,n pte,_PAGE_PRESENT_BIT,dbit_fault
-
- /* Set Accessed and Dirty bits in the pte */
+ update_dirty ptp,pte,t1
- or t1,pte,pte
- stw pte,0(ptp) /* write back pte */
+ make_insert_tlb spc,pte,prot
- space_to_prot spc prot /* create prot id from space */
- depd pte,8,7,prot /* add in prot bits from pte */
-
- extrd,u,*= pte,_PAGE_USER_BIT+32,1,r0
- depdi 7,11,3,prot /* Set for user space (1 rsvd for read) */
- extrd,u,*= pte,_PAGE_GATEWAY_BIT+32,1,r0
- depdi 0,11,2,prot /* If Gateway, Set PL2 to 0 */
-
- extrd,s pte,35,4,t0
- depdi 0,63,12,pte /* clear lower 12 bits */
- addi,= 1,t0,0
- extrd,u,*tr pte,56,25,pte
- extrd,s pte,56,25,pte /* bit 31:8 >> 8 */
+ f_extend pte,t1
+
idtlbt pte,prot
#ifdef CONFIG_SMP
- CMPIB=,n 0,spc,dbit_nounlock_20
+ cmpib,COND(=),n 0,spc,dbit_nounlock_20
ldi 1,t1
stw t1,0(t0)
b intr_save
ldi 17,%r8
+naitlb_fault:
+ b intr_save
+ ldi 16,%r8
+
dtlb_fault:
b intr_save
ldi 15,%r8
LDREG PT_GR18(\regs),%r18
.endm
- .export sys_fork_wrapper
- .export child_return
-sys_fork_wrapper:
- mfctl %cr30,%r1 /* get pt regs */
- LDREG 0(%r1),%r1
+ENTRY(sys_fork_wrapper)
+ LDREG TI_TASK-THREAD_SZ_ALGN-FRAME_SIZE(%r30), %r1
ldo TASK_REGS(%r1),%r1
reg_save %r1
mfctl %cr27, %r3
STREG %r2,-RP_OFFSET(%r30)
ldo FRAME_SIZE(%r30),%r30
-#ifdef __LP64__
+#ifdef CONFIG_64BIT
ldo -16(%r30),%r29 /* Reference param save area */
#endif
LDREG PT_GR30(%r1),%r25
copy %r1,%r24
- bl sys_clone,%r2
+ BL sys_clone,%r2
ldi SIGCHLD,%r26
LDREG -RP_OFFSET-FRAME_SIZE(%r30),%r2
wrapper_exit:
ldo -FRAME_SIZE(%r30),%r30 /* get the stackframe */
- mfctl %cr30,%r1 /* get pt regs */
- LDREG 0(%r1),%r1
- ldo TASK_REGS(%r1),%r1
+ LDREG TI_TASK-THREAD_SZ_ALGN-FRAME_SIZE(%r30),%r1
+ ldo TASK_REGS(%r1),%r1 /* get pt regs */
LDREG PT_CR27(%r1), %r3
mtctl %r3, %cr27
ldi __NR_fork,%r20
bv %r0(%r2)
STREG %r20,PT_GR20(%r1)
+ENDPROC(sys_fork_wrapper)
/* Set the return value for the child */
-child_return:
-#if CONFIG_SMP || CONFIG_PREEMPT
- bl schedule_tail, %r2
+ENTRY(child_return)
+ BL schedule_tail, %r2
nop
-#endif
- mfctl %cr30,%r2
- LDREG 0(%r2),%r2
- LDREG TASK_PT_GR19(%r2),%r2
+ LDREG TI_TASK-THREAD_SZ_ALGN-FRAME_SIZE-FRAME_SIZE(%r30), %r1
+ LDREG TASK_PT_GR19(%r1),%r2
b wrapper_exit
copy %r0,%r28
+ENDPROC(child_return)
-
- .export sys_clone_wrapper
-sys_clone_wrapper:
- mfctl %cr30,%r1 /* get pt regs */
- LDREG 0(%r1),%r1
- ldo TASK_REGS(%r1),%r1
+
+ENTRY(sys_clone_wrapper)
+ LDREG TI_TASK-THREAD_SZ_ALGN-FRAME_SIZE(%r30),%r1
+ ldo TASK_REGS(%r1),%r1 /* get pt regs */
reg_save %r1
mfctl %cr27, %r3
STREG %r3, PT_CR27(%r1)
STREG %r2,-RP_OFFSET(%r30)
ldo FRAME_SIZE(%r30),%r30
-#ifdef __LP64__
+#ifdef CONFIG_64BIT
ldo -16(%r30),%r29 /* Reference param save area */
#endif
+ /* WARNING - Clobbers r19 and r21, userspace must save these! */
STREG %r2,PT_GR19(%r1) /* save for child */
STREG %r30,PT_GR21(%r1)
- bl sys_clone,%r2
+ BL sys_clone,%r2
copy %r1,%r24
b wrapper_exit
LDREG -RP_OFFSET-FRAME_SIZE(%r30),%r2
+ENDPROC(sys_clone_wrapper)
- .export sys_vfork_wrapper
-sys_vfork_wrapper:
- mfctl %cr30,%r1 /* get pt regs */
- LDREG 0(%r1),%r1
- ldo TASK_REGS(%r1),%r1
+
+ENTRY(sys_vfork_wrapper)
+ LDREG TI_TASK-THREAD_SZ_ALGN-FRAME_SIZE(%r30),%r1
+ ldo TASK_REGS(%r1),%r1 /* get pt regs */
reg_save %r1
mfctl %cr27, %r3
STREG %r3, PT_CR27(%r1)
STREG %r2,-RP_OFFSET(%r30)
ldo FRAME_SIZE(%r30),%r30
-#ifdef __LP64__
+#ifdef CONFIG_64BIT
ldo -16(%r30),%r29 /* Reference param save area */
#endif
STREG %r2,PT_GR19(%r1) /* save for child */
STREG %r30,PT_GR21(%r1)
- bl sys_vfork,%r2
+ BL sys_vfork,%r2
copy %r1,%r26
b wrapper_exit
LDREG -RP_OFFSET-FRAME_SIZE(%r30),%r2
+ENDPROC(sys_vfork_wrapper)
.macro execve_wrapper execve
- mfctl %cr30,%r1 /* get pt regs */
- LDREG 0(%r1),%r1
- ldo TASK_REGS(%r1),%r1
+ LDREG TI_TASK-THREAD_SZ_ALGN-FRAME_SIZE(%r30),%r1
+ ldo TASK_REGS(%r1),%r1 /* get pt regs */
/*
* Do we need to save/restore r3-r18 here?
STREG %r2,-RP_OFFSET(%r30)
ldo FRAME_SIZE(%r30),%r30
-#ifdef __LP64__
+#ifdef CONFIG_64BIT
ldo -16(%r30),%r29 /* Reference param save area */
#endif
- bl \execve,%r2
+ BL \execve,%r2
copy %r1,%arg0
ldo -FRAME_SIZE(%r30),%r30
nop
.endm
- .export sys_execve_wrapper
.import sys_execve
-
-sys_execve_wrapper:
+ENTRY(sys_execve_wrapper)
execve_wrapper sys_execve
+ENDPROC(sys_execve_wrapper)
-#ifdef __LP64__
- .export sys32_execve_wrapper
+#ifdef CONFIG_64BIT
.import sys32_execve
-
-sys32_execve_wrapper:
+ENTRY(sys32_execve_wrapper)
execve_wrapper sys32_execve
+ENDPROC(sys32_execve_wrapper)
#endif
- .export sys_rt_sigreturn_wrapper
-sys_rt_sigreturn_wrapper:
- mfctl %cr30,%r26 /* get pt regs */
- LDREG 0(%r26),%r26
- ldo TASK_REGS(%r26),%r26
+ENTRY(sys_rt_sigreturn_wrapper)
+ LDREG TI_TASK-THREAD_SZ_ALGN-FRAME_SIZE(%r30),%r26
+ ldo TASK_REGS(%r26),%r26 /* get pt regs */
/* Don't save regs, we are going to restore them from sigcontext. */
STREG %r2, -RP_OFFSET(%r30)
-#ifdef __LP64__
+#ifdef CONFIG_64BIT
ldo FRAME_SIZE(%r30), %r30
- bl sys_rt_sigreturn,%r2
+ BL sys_rt_sigreturn,%r2
ldo -16(%r30),%r29 /* Reference param save area */
#else
- bl sys_rt_sigreturn,%r2
+ BL sys_rt_sigreturn,%r2
ldo FRAME_SIZE(%r30), %r30
#endif
LDREG -RP_OFFSET(%r30), %r2
/* FIXME: I think we need to restore a few more things here. */
- mfctl %cr30,%r1 /* get pt regs */
- LDREG 0(%r1),%r1
- ldo TASK_REGS(%r1),%r1
+ LDREG TI_TASK-THREAD_SZ_ALGN-FRAME_SIZE(%r30),%r1
+ ldo TASK_REGS(%r1),%r1 /* get pt regs */
reg_restore %r1
/* If the signal was received while the process was blocked on a
*/
bv %r0(%r2)
LDREG PT_GR28(%r1),%r28 /* reload original r28 for syscall_exit */
+ENDPROC(sys_rt_sigreturn_wrapper)
- .export sys_sigaltstack_wrapper
-sys_sigaltstack_wrapper:
+ENTRY(sys_sigaltstack_wrapper)
/* Get the user stack pointer */
- mfctl %cr30,%r24
- LDREG 0(%r24),%r24
+ LDREG TI_TASK-THREAD_SZ_ALGN-FRAME_SIZE(%r30),%r1
+ ldo TASK_REGS(%r1),%r24 /* get pt regs */
LDREG TASK_PT_GR30(%r24),%r24
STREG %r2, -RP_OFFSET(%r30)
-#ifdef __LP64__
+#ifdef CONFIG_64BIT
ldo FRAME_SIZE(%r30), %r30
- bl do_sigaltstack,%r2
+ BL do_sigaltstack,%r2
ldo -16(%r30),%r29 /* Reference param save area */
#else
- bl do_sigaltstack,%r2
+ BL do_sigaltstack,%r2
ldo FRAME_SIZE(%r30), %r30
#endif
LDREG -RP_OFFSET(%r30), %r2
bv %r0(%r2)
nop
+ENDPROC(sys_sigaltstack_wrapper)
-#ifdef __LP64__
- .export sys32_sigaltstack_wrapper
-sys32_sigaltstack_wrapper:
+#ifdef CONFIG_64BIT
+ENTRY(sys32_sigaltstack_wrapper)
/* Get the user stack pointer */
- mfctl %cr30,%r24
- LDREG 0(%r24),%r24
+ LDREG TI_TASK-THREAD_SZ_ALGN-FRAME_SIZE(%r30),%r24
LDREG TASK_PT_GR30(%r24),%r24
STREG %r2, -RP_OFFSET(%r30)
ldo FRAME_SIZE(%r30), %r30
- bl do_sigaltstack32,%r2
+ BL do_sigaltstack32,%r2
ldo -16(%r30),%r29 /* Reference param save area */
ldo -FRAME_SIZE(%r30), %r30
LDREG -RP_OFFSET(%r30), %r2
bv %r0(%r2)
nop
+ENDPROC(sys32_sigaltstack_wrapper)
#endif
- .export sys_rt_sigsuspend_wrapper
-sys_rt_sigsuspend_wrapper:
- mfctl %cr30,%r24 /* get pt regs */
- LDREG 0(%r24),%r24
- ldo TASK_REGS(%r24),%r24
- reg_save %r24
-
- STREG %r2, -RP_OFFSET(%r30)
-#ifdef __LP64__
- ldo FRAME_SIZE(%r30), %r30
- bl sys_rt_sigsuspend,%r2
- ldo -16(%r30),%r29 /* Reference param save area */
-#else
- bl sys_rt_sigsuspend,%r2
- ldo FRAME_SIZE(%r30), %r30
-#endif
-
- ldo -FRAME_SIZE(%r30), %r30
- LDREG -RP_OFFSET(%r30), %r2
-
- mfctl %cr30,%r1 /* get pt regs */
- LDREG 0(%r1),%r1
- ldo TASK_REGS(%r1),%r1
- reg_restore %r1
-
- bv %r0(%r2)
- nop
-
- .export syscall_exit
-syscall_exit:
+ENTRY(syscall_exit)
/* NOTE: HP-UX syscalls also come through here
- after hpux_syscall_exit fixes up return
- values. */
+ * after hpux_syscall_exit fixes up return
+ * values. */
+
/* NOTE: Not all syscalls exit this way. rt_sigreturn will exit
* via syscall_exit_rfi if the signal was received while the process
* was running.
/* save return value now */
mfctl %cr30, %r1
- LDREG 0(%r1),%r1
+ LDREG TI_TASK(%r1),%r1
STREG %r28,TASK_PT_GR28(%r1)
- /* Save other hpux returns if personality is PER_HPUX */
-
+#ifdef CONFIG_HPUX
/* <linux/personality.h> cannot be easily included */
#define PER_HPUX 0x10
- LDREG TASK_PERSONALITY(%r1),%r19
-#warning the ldo+CMPIB could probably be done better but 0x10 i soutside of range of CMPIB
+ ldw TASK_PERSONALITY(%r1),%r19
+
+ /* We can't use "CMPIB<> PER_HPUX" since "im5" field is sign extended */
ldo -PER_HPUX(%r19), %r19
- CMPIB<>,n 0,%r19,1f
+ cmpib,COND(<>),n 0,%r19,1f
+
+ /* Save other hpux returns if personality is PER_HPUX */
STREG %r22,TASK_PT_GR22(%r1)
STREG %r29,TASK_PT_GR29(%r1)
1:
+#endif /* CONFIG_HPUX */
+
/* Seems to me that dp could be wrong here, if the syscall involved
* calling a module, and nothing got round to restoring dp on return.
*/
loadgp
-syscall_check_bh:
+syscall_check_resched:
- /* Check for software interrupts */
+ /* check for reschedule */
- .import irq_stat,data
+ LDREG TI_FLAGS-THREAD_SZ_ALGN-FRAME_SIZE(%r30),%r19 /* long */
+ bb,<,n %r19, 31-TIF_NEED_RESCHED, syscall_do_resched /* forward */
- ldil L%irq_stat,%r19
- ldo R%irq_stat(%r19),%r19
+ .import do_signal,code
+syscall_check_sig:
+ LDREG TI_FLAGS-THREAD_SZ_ALGN-FRAME_SIZE(%r30),%r19
+ ldi (_TIF_SIGPENDING|_TIF_RESTORE_SIGMASK), %r26
+ and,COND(<>) %r19, %r26, %r0
+ b,n syscall_restore /* skip past if we've nothing to do */
-#ifdef CONFIG_SMP
- /* sched.h: int processor */
- /* %r26 is used as scratch register to index into irq_stat[] */
- ldw TI_CPU-THREAD_SZ_ALGN-FRAME_SIZE(%r30),%r26 /* cpu # */
+syscall_do_signal:
+ /* Save callee-save registers (for sigcontext).
+ * FIXME: After this point the process structure should be
+ * consistent with all the relevant state of the process
+ * before the syscall. We need to verify this.
+ */
+ LDREG TI_TASK-THREAD_SZ_ALGN-FRAME_SIZE(%r30),%r1
+ ldo TASK_REGS(%r1), %r26 /* struct pt_regs *regs */
+ reg_save %r26
- /* shift left ____cacheline_aligned (aka L1_CACHE_BYTES) bits */
-#ifdef __LP64__
- shld %r26, 6, %r20
-#else
- shlw %r26, 5, %r20
+#ifdef CONFIG_64BIT
+ ldo -16(%r30),%r29 /* Reference param save area */
#endif
- add %r19,%r20,%r19 /* now have &irq_stat[smp_processor_id()] */
-#endif /* CONFIG_SMP */
- LDREG IRQSTAT_SIRQ_PEND(%r19),%r20 /* hardirq.h: unsigned long */
- cmpib,<>,n 0,%r20,syscall_do_softirq /* forward */
+ BL do_notify_resume,%r2
+ ldi 1, %r25 /* long in_syscall = 1 */
-syscall_check_resched:
+ LDREG TI_TASK-THREAD_SZ_ALGN-FRAME_SIZE(%r30),%r1
+ ldo TASK_REGS(%r1), %r20 /* reload pt_regs */
+ reg_restore %r20
- /* check for reschedule */
+ b,n syscall_check_sig
- LDREG TI_FLAGS-THREAD_SZ_ALGN-FRAME_SIZE(%r30),%r19 /* long */
- bb,<,n %r19, 31-TIF_NEED_RESCHED, syscall_do_resched /* forward */
+syscall_restore:
+ LDREG TI_TASK-THREAD_SZ_ALGN-FRAME_SIZE(%r30),%r1
-syscall_check_sig:
- /* These should be the same effect, but which is faster? */
-#if 1
- mfctl %cr30,%r1
-#else
- ldo -THREAD_SZ_ALGN-FRAME_SIZE(%r30),%r1 /* get thread info ptr */
-#endif
- /* check for pending signals */
- LDREG TI_FLAGS(%r1),%r19
- bb,<,n %r19, 31-TIF_SIGPENDING, syscall_do_signal /* forward */
+ /* Are we being ptraced? */
+ ldw TASK_FLAGS(%r1),%r19
+ ldi (_TIF_SINGLESTEP|_TIF_BLOCKSTEP),%r2
+ and,COND(=) %r19,%r2,%r0
+ b,n syscall_restore_rfi
-syscall_restore:
- mfctl %cr30,%r1
- LDREG TI_FLAGS(%r1), %r19 /* Are we being ptraced? */
- bb,< %r19, 31-TIF_SYSCALL_TRACE,syscall_restore_rfi
- LDREG 0(%r1),%r1 /* delay slot! */
ldo TASK_PT_FR31(%r1),%r19 /* reload fpregs */
rest_fp %r19
LDREG TASK_PT_GR29(%r1),%r29
LDREG TASK_PT_GR31(%r1),%r31 /* restore syscall rp */
+ /* NOTE: We use rsm/ssm pair to make this operation atomic */
+ LDREG TASK_PT_GR30(%r1),%r1 /* Get user sp */
rsm PSW_SM_I, %r0
- LDREG TASK_PT_GR30(%r1),%r30 /* restore user sp */
- mfsp %sr3,%r1 /* Get users space id */
+ copy %r1,%r30 /* Restore user sp */
+ mfsp %sr3,%r1 /* Get user space id */
mtsp %r1,%sr7 /* Restore sr7 */
ssm PSW_SM_I, %r0
+
+ /* Set sr2 to zero for userspace syscalls to work. */
+ mtsp %r0,%sr2
mtsp %r1,%sr4 /* Restore sr4 */
mtsp %r1,%sr5 /* Restore sr5 */
mtsp %r1,%sr6 /* Restore sr6 */
depi 3,31,2,%r31 /* ensure return to user mode. */
-#ifdef __LP64__
- /* Since we are returning to a 32 bit user process, we always
- * clear the W bit. This means that the be (and mtsp) gets
- * executed in narrow mode, but that is OK, since we are
- * returning to a 32 bit process. When we support 64 bit processes
- * we won't clear the W bit, so the be will run in wide mode.
- */
-
- be 0(%sr3,%r31) /* return to user space */
+#ifdef CONFIG_64BIT
+ /* decide whether to reset the wide mode bit
+ *
+ * For a syscall, the W bit is stored in the lowest bit
+ * of sp. Extract it and reset W if it is zero */
+ extrd,u,*<> %r30,63,1,%r1
rsm PSW_SM_W, %r0
-#else
- be,n 0(%sr3,%r31) /* return to user space */
+ /* now reset the lowest bit of sp if it was set */
+ xor %r30,%r1,%r30
#endif
+ be,n 0(%sr3,%r31) /* return to user space */
/* We have to return via an RFI, so that PSW T and R bits can be set
* appropriately.
* the most efficient way of doing things, but it works.
*/
syscall_restore_rfi:
- LDREG TASK_PTRACE(%r1), %r19
ldo -1(%r0),%r2 /* Set recovery cntr to -1 */
mtctl %r2,%cr0 /* for immediate trap */
LDREG TASK_PT_PSW(%r1),%r2 /* Get old PSW */
ldi 0x0b,%r20 /* Create new PSW */
depi -1,13,1,%r20 /* C, Q, D, and I bits */
- bb,>=,n %r19,15,try_tbit /* PT_SINGLESTEP */
+
+ /* The values of SINGLESTEP_BIT and BLOCKSTEP_BIT are
+ * set in thread_info.h and converted to PA bitmap
+ * numbers in asm-offsets.c */
+
+ /* if ((%r19.SINGLESTEP_BIT)) { %r20.27=1} */
+ extru,= %r19,TIF_SINGLESTEP_PA_BIT,1,%r0
depi -1,27,1,%r20 /* R bit */
-try_tbit:
- bb,>=,n %r19,14,psw_setup /* PT_BLOCKSTEP, see ptrace.c */
+
+ /* if ((%r19.BLOCKSTEP_BIT)) { %r20.7=1} */
+ extru,= %r19,TIF_BLOCKSTEP_PA_BIT,1,%r0
depi -1,7,1,%r20 /* T bit */
-psw_setup:
+
STREG %r20,TASK_PT_PSW(%r1)
/* Always store space registers, since sr3 can be changed (e.g. fork) */
bb,< %r2,30,pt_regs_ok /* Branch if D set */
ldo TASK_REGS(%r1),%r25
reg_save %r25 /* Save r3 to r18 */
+
+ /* Save the current sr */
mfsp %sr0,%r2
STREG %r2,TASK_PT_SR0(%r1)
+
+ /* Save the scratch sr */
mfsp %sr1,%r2
STREG %r2,TASK_PT_SR1(%r1)
- mfsp %sr2,%r2
- STREG %r2,TASK_PT_SR2(%r1)
+
+ /* sr2 should be set to zero for userspace syscalls */
+ STREG %r0,TASK_PT_SR2(%r1)
+
pt_regs_ok:
LDREG TASK_PT_GR31(%r1),%r2
depi 3,31,2,%r2 /* ensure return to user mode. */
b intr_restore
nop
- .import do_softirq,code
-syscall_do_softirq:
- bl do_softirq,%r2
- nop
- b syscall_check_resched
- ssm PSW_SM_I, %r0 /* do_softirq returns with I bit off */
-
.import schedule,code
syscall_do_resched:
- bl schedule,%r2
-#ifdef __LP64__
+ BL schedule,%r2
+#ifdef CONFIG_64BIT
ldo -16(%r30),%r29 /* Reference param save area */
#else
nop
#endif
- b syscall_check_bh /* if resched, we start over again */
+ b syscall_check_resched /* if resched, we start over again */
nop
+ENDPROC(syscall_exit)
- .import do_signal,code
-syscall_do_signal:
- /* Save callee-save registers (for sigcontext).
- FIXME: After this point the process structure should be
- consistent with all the relevant state of the process
- before the syscall. We need to verify this. */
- LDREG 0(%r1),%r1
- ldo TASK_REGS(%r1), %r25 /* struct pt_regs *regs */
- reg_save %r25
- ldi 1, %r24 /* unsigned long in_syscall */
-
-#ifdef __LP64__
- ldo -16(%r30),%r29 /* Reference param save area */
-#endif
-#warning TAUSQ FIXME, this is wrong
- bl do_signal,%r2
- copy %r0, %r26 /* sigset_t *oldset = NULL */
+#ifdef CONFIG_FUNCTION_TRACER
+ .import ftrace_function_trampoline,code
+ENTRY(_mcount)
+ copy %r3, %arg2
+ b ftrace_function_trampoline
+ nop
+ENDPROC(_mcount)
- mfctl %cr30,%r1 /* reload task ptr */
- LDREG 0(%r1),%r1
- ldo TASK_REGS(%r1), %r20 /* reload pt_regs */
- reg_restore %r20
+ENTRY(return_to_handler)
+ load32 return_trampoline, %rp
+ copy %ret0, %arg0
+ copy %ret1, %arg1
+ b ftrace_return_to_handler
+ nop
+return_trampoline:
+ copy %ret0, %rp
+ copy %r23, %ret0
+ copy %r24, %ret1
+
+.globl ftrace_stub
+ftrace_stub:
+ bv %r0(%rp)
+ nop
+ENDPROC(return_to_handler)
+#endif /* CONFIG_FUNCTION_TRACER */
- b,n syscall_restore
+get_register:
/*
* get_register is used by the non access tlb miss handlers to
* copy the value of the general register specified in r8 into
* a -1 in it, but that is OK, it just means that we will have
* to use the slow path instead).
*/
-
-get_register:
blr %r8,%r0
nop
bv %r0(%r25) /* r0 */
bv %r0(%r25) /* r31 */
copy %r31,%r1
+
+set_register:
/*
* set_register is used by the non access tlb miss handlers to
* copy the value of r1 into the general register specified in
* r8.
*/
-
-set_register:
blr %r8,%r0
nop
bv %r0(%r25) /* r0 (silly, but it is a place holder) */
copy %r1,%r30
bv %r0(%r25) /* r31 */
copy %r1,%r31
+