1 /* $Id: dtlb_base.S,v 1.9 2001/03/22 00:12:32 davem Exp $
2 * dtlb_base.S: Front end to DTLB miss replacement strategy.
3 * This is included directly into the trap table.
5 * Copyright (C) 1996,1998 David S. Miller (davem@redhat.com)
6 * Copyright (C) 1997,1998 Jakub Jelinek (jj@ultra.linux.cz)
9 #define TAG_CONTEXT_BITS 0x3ff
10 #define VPTE_SHIFT (PAGE_SHIFT - 3)
11 #define KERN_HIGHBITS ((_PAGE_VALID | _PAGE_SZ4MB) ^ 0xfffff80000000000)
12 #define KERN_LOWBITS (_PAGE_CP | _PAGE_CV | _PAGE_P | _PAGE_W)
14 /* %g1 TLB_SFSR (%g1 + %g1 == TLB_TAG_ACCESS)
15 * %g2 (KERN_HIGHBITS | KERN_LOWBITS)
16 * %g3 VPTE base (0xfffffffe00000000) Spitfire/Blackbird (44-bit VA space)
17 * (0xffe0000000000000) Cheetah (64-bit VA space)
18 * %g7 __pa(current->mm->pgd)
20 * The VPTE base value is completely magic, but note that
21 * nothing else in the kernel other than these TLB miss
22 * handlers know anything about the VPTE mechanism or
23 * how it works. Consider the 44-bit VADDR Ultra-I/II
26 * VA[0 : (1<<43)] produce VPTE index [%g3 : 0]
27 * VA[0 : -(1<<43)] produce VPTE index [%g3-(1<<(43-PAGE_SHIFT+3)) : %g3]
29 * For Cheetah's 64-bit VADDR space this is:
31 * VA[0 : (1<<63)] produce VPTE index [%g3 : 0]
32 * VA[0 : -(1<<63)] produce VPTE index [%g3-(1<<(63-PAGE_SHIFT+3)) : %g3]
34 * If you're paying attention you'll notice that this means half of
35 * the VPTE table is above %g3 and half is below, low VA addresses
36 * map progressively upwards from %g3, and high VA addresses map
37 * progressively downwards from %g3. This trick was needed to make
38 * the same 8 instruction handler work both for Spitfire/Blackbird's
39 * peculiar VA space hole configuration and the full 64-bit VA space
40 * one of Cheetah at the same time.
43 /* Ways we can get here:
45 * 1) Nucleus loads and stores to/from PA-->VA direct mappings.
46 * 2) Nucleus loads and stores to/from vmalloc() areas.
47 * 3) User loads and stores.
48 * 4) User space accesses by nucleus at tl0
51 /* DTLB ** ICACHE line 1: Quick user TLB misses */
52 ldxa [%g1 + %g1] ASI_DMMU, %g4 ! Get TAG_ACCESS
53 andcc %g4, TAG_CONTEXT_BITS, %g0 ! From Nucleus?
54 be,pn %xcc, 3f ! Yep, special processing
55 srax %g4, VPTE_SHIFT, %g6 ! Create VPTE offset
56 ldxa [%g3 + %g6] ASI_S, %g5 ! Load VPTE
57 1: brlz,pt %g5, 9f ! Valid, load into TLB
59 ba,a,pt %xcc, 4f ! Invalid, branch out
61 /* DTLB ** ICACHE line 2: Quick kernel TLB misses */
62 3: brlz,pt %g4, 9f ! Kernel virtual map?
63 xor %g2, %g4, %g5 ! Finish bit twiddles
64 ldxa [%g3 + %g6] ASI_N, %g5 ! Yep, load k-vpte
65 ba,pt %xcc, 1b ! Continue tlb reload
67 9: stxa %g5, [%g0] ASI_DTLB_DATA_IN ! Reload TLB
69 4: rdpr %pstate, %g5 ! Move into alternate globals
71 /* DTLB ** ICACHE line 3: winfixups+real_faults */
72 wrpr %g5, PSTATE_AG|PSTATE_MG, %pstate
73 rdpr %tl, %g4 ! See where we came from.
74 cmp %g4, 1 ! Is etrap/rtrap window fault?
75 mov TLB_TAG_ACCESS, %g4 ! Prepare for fault processing
76 ldxa [%g4] ASI_DMMU, %g5 ! Load faulting VA page
77 be,pt %xcc, sparc64_realfault_common ! Jump to normal fault handling
78 mov FAULT_CODE_DTLB, %g4 ! It was read from DTLB
79 ba,a,pt %xcc, winfix_trampoline ! Call window fixup code
81 /* DTLB ** ICACHE line 4: Unused... */
91 #undef TAG_CONTEXT_BITS