2 * TLB exception handling code for r4k.
4 * Copyright (C) 1994, 1995, 1996 by Ralf Baechle and Andreas Busse
6 * Multi-cpu abstraction and reworking:
7 * Copyright (C) 1996 David S. Miller (dm@engr.sgi.com)
9 * Carsten Langgaard, carstenl@mips.com
10 * Copyright (C) 2000 MIPS Technologies, Inc. All rights reserved.
12 #include <linux/init.h>
13 #include <linux/config.h>
16 #include <asm/offset.h>
17 #include <asm/cachectl.h>
18 #include <asm/fpregdef.h>
19 #include <asm/mipsregs.h>
21 #include <asm/pgtable-bits.h>
22 #include <asm/processor.h>
23 #include <asm/regdef.h>
24 #include <asm/stackframe.h>
27 #define TLB_OPTIMIZE /* If you are paranoid, disable this. */
29 #ifdef CONFIG_64BIT_PHYS_ADDR
35 #define PTEP_INDX_MSK 0xff0
36 #define PTE_INDX_MSK 0xff8
37 #define PTE_INDX_SHIFT 9
44 #define PTEP_INDX_MSK 0xff8
45 #define PTE_INDX_MSK 0xffc
46 #define PTE_INDX_SHIFT 10
50 * ABUSE of CPP macros 101.
52 * After this macro runs, the pte faulted on is
53 * in register PTE, a ptr into the table in which
54 * the pte belongs is in PTR.
58 #define GET_PGD(scratch, ptr) \
59 mfc0 ptr, CP0_CONTEXT; \
60 la scratch, pgd_current;\
63 addu ptr, scratch, ptr; \
66 #define GET_PGD(scratch, ptr) \
70 #define LOAD_PTE(pte, ptr) \
72 mfc0 pte, CP0_BADVADDR; \
73 srl pte, pte, _PGDIR_SHIFT; \
76 mfc0 pte, CP0_BADVADDR; \
78 srl pte, pte, PTE_INDX_SHIFT; \
79 and pte, pte, PTE_INDX_MSK; \
83 /* This places the even/odd pte pair in the page
84 * table at PTR into ENTRYLO0 and ENTRYLO1 using
85 * TMP as a scratch register.
87 #define PTE_RELOAD(ptr, tmp) \
88 ori ptr, ptr, PTE_SIZE; \
89 xori ptr, ptr, PTE_SIZE; \
90 PTE_L tmp, PTE_SIZE(ptr); \
92 PTE_SRL tmp, tmp, 6; \
93 P_MTC0 tmp, CP0_ENTRYLO1; \
94 PTE_SRL ptr, ptr, 6; \
95 P_MTC0 ptr, CP0_ENTRYLO0;
97 #define DO_FAULT(write) \
100 mfc0 a2, CP0_BADVADDR; \
106 j ret_from_exception; \
110 /* Check is PTE is present, if not then jump to LABEL.
111 * PTR points to the page table where this PTE is located,
112 * when the macro is done executing PTE will be restored
113 * with it's original value.
115 #define PTE_PRESENT(pte, ptr, label) \
116 andi pte, pte, (_PAGE_PRESENT | _PAGE_READ); \
117 xori pte, pte, (_PAGE_PRESENT | _PAGE_READ); \
121 /* Make PTE valid, store result in PTR. */
122 #define PTE_MAKEVALID(pte, ptr) \
123 ori pte, pte, (_PAGE_VALID | _PAGE_ACCESSED); \
126 /* Check if PTE can be written to, if not branch to LABEL.
127 * Regardless restore PTE with value from PTR when done.
129 #define PTE_WRITABLE(pte, ptr, label) \
130 andi pte, pte, (_PAGE_PRESENT | _PAGE_WRITE); \
131 xori pte, pte, (_PAGE_PRESENT | _PAGE_WRITE); \
135 /* Make PTE writable, update software status bits as well,
138 #define PTE_MAKEWRITE(pte, ptr) \
139 ori pte, pte, (_PAGE_ACCESSED | _PAGE_MODIFIED | \
140 _PAGE_VALID | _PAGE_DIRTY); \
145 #ifdef CONFIG_64BIT_PHYS_ADDR
146 #define GET_PTE_OFF(reg)
147 #elif CONFIG_CPU_VR41XX
148 #define GET_PTE_OFF(reg) srl reg, reg, 3
150 #define GET_PTE_OFF(reg) srl reg, reg, 1
154 * These handlers much be written in a relocatable manner
155 * because based upon the cpu type an arbitrary one of the
156 * following pieces of code will be copied to the KSEG0
159 /* TLB refill, EXL == 0, R4xx0, non-R4600 version */
162 LEAF(except_vec0_r4000)
164 GET_PGD(k0, k1) # get pgd pointer
165 mfc0 k0, CP0_BADVADDR # Get faulting address
166 srl k0, k0, _PGDIR_SHIFT # get pgd only bits
169 addu k1, k1, k0 # add in pgd offset
170 mfc0 k0, CP0_CONTEXT # get context reg
172 GET_PTE_OFF(k0) # get pte offset
173 and k0, k0, PTEP_INDX_MSK
174 addu k1, k1, k0 # add in offset
175 PTE_L k0, 0(k1) # get even pte
176 PTE_L k1, PTE_SIZE(k1) # get odd pte
177 PTE_SRL k0, k0, 6 # convert to entrylo0
178 P_MTC0 k0, CP0_ENTRYLO0 # load it
179 PTE_SRL k1, k1, 6 # convert to entrylo1
180 P_MTC0 k1, CP0_ENTRYLO1 # load it
182 tlbwr # write random tlb entry
185 eret # return from trap
186 END(except_vec0_r4000)
188 /* TLB refill, EXL == 0, R4600 version */
189 LEAF(except_vec0_r4600)
191 GET_PGD(k0, k1) # get pgd pointer
192 mfc0 k0, CP0_BADVADDR
193 srl k0, k0, _PGDIR_SHIFT
194 sll k0, k0, 2 # log2(sizeof(pgd_t)
198 GET_PTE_OFF(k0) # get pte offset
199 and k0, k0, PTEP_INDX_MSK
202 PTE_L k1, PTE_SIZE(k1)
204 P_MTC0 k0, CP0_ENTRYLO0
206 P_MTC0 k1, CP0_ENTRYLO1
211 END(except_vec0_r4600)
213 /* TLB refill, EXL == 0, R52x0 "Nevada" version */
215 * This version has a bug workaround for the Nevada. It seems
216 * as if under certain circumstances the move from cp0_context
217 * might produce a bogus result when the mfc0 instruction and
218 * it's consumer are in a different cacheline or a load instruction,
219 * probably any memory reference, is between them. This is
220 * potencially slower than the R4000 version, so we use this
225 LEAF(except_vec0_nevada)
227 mfc0 k0, CP0_BADVADDR # Get faulting address
228 srl k0, k0, _PGDIR_SHIFT # get pgd only bits
229 lw k1, pgd_current # get pgd pointer
230 sll k0, k0, 2 # log2(sizeof(pgd_t)
231 addu k1, k1, k0 # add in pgd offset
233 mfc0 k0, CP0_CONTEXT # get context reg
234 GET_PTE_OFF(k0) # get pte offset
235 and k0, k0, PTEP_INDX_MSK
236 addu k1, k1, k0 # add in offset
237 PTE_L k0, 0(k1) # get even pte
238 PTE_L k1, PTE_SIZE(k1) # get odd pte
239 PTE_SRL k0, k0, 6 # convert to entrylo0
240 P_MTC0 k0, CP0_ENTRYLO0 # load it
241 PTE_SRL k1, k1, 6 # convert to entrylo1
242 P_MTC0 k1, CP0_ENTRYLO1 # load it
243 nop # QED specified nops
245 tlbwr # write random tlb entry
246 nop # traditional nop
247 eret # return from trap
248 END(except_vec0_nevada)
250 /* TLB refill, EXL == 0, SB1 with M3 errata handling version */
251 LEAF(except_vec0_sb1)
253 mfc0 k0, CP0_BADVADDR
256 srl k0, k0, PAGE_SHIFT+1
259 GET_PGD(k0, k1) # get pgd pointer
260 mfc0 k0, CP0_BADVADDR # Get faulting address
261 srl k0, k0, _PGDIR_SHIFT # get pgd only bits
263 addu k1, k1, k0 # add in pgd offset
264 mfc0 k0, CP0_CONTEXT # get context reg
266 GET_PTE_OFF(k0) # get pte offset
267 and k0, k0, PTEP_INDX_MSK
268 addu k1, k1, k0 # add in offset
269 PTE_L k0, 0(k1) # get even pte
270 PTE_L k1, PTE_SIZE(k1) # get odd pte
271 PTE_SRL k0, k0, 6 # convert to entrylo0
272 P_MTC0 k0, CP0_ENTRYLO0 # load it
273 PTE_SRL k1, k1, 6 # convert to entrylo1
274 P_MTC0 k1, CP0_ENTRYLO1 # load it
275 tlbwr # write random tlb entry
276 1: eret # return from trap
279 /* TLB refill, EXL == 0, R4[40]00/R5000 badvaddr hwbug version */
280 LEAF(except_vec0_r45k_bvahwbug)
282 GET_PGD(k0, k1) # get pgd pointer
283 mfc0 k0, CP0_BADVADDR
284 srl k0, k0, _PGDIR_SHIFT
285 sll k0, k0, 2 # log2(sizeof(pgd_t)
289 #ifndef CONFIG_64BIT_PHYS_ADDR
292 and k0, k0, PTEP_INDX_MSK
295 PTE_L k1, PTE_SIZE(k1)
299 P_MTC0 k0, CP0_ENTRYLO0
302 P_MTC0 k1, CP0_ENTRYLO1
308 END(except_vec0_r45k_bvahwbug)
311 /* TLB refill, EXL == 0, R4000 MP badvaddr hwbug version */
312 LEAF(except_vec0_r4k_mphwbug)
314 GET_PGD(k0, k1) # get pgd pointer
315 mfc0 k0, CP0_BADVADDR
316 srl k0, k0, _PGDIR_SHIFT
317 sll k0, k0, 2 # log2(sizeof(pgd_t)
321 #ifndef CONFIG_64BIT_PHYS_ADDR
324 and k0, k0, PTEP_INDX_MSK
327 PTE_L k1, PTE_SIZE(k1)
331 P_MTC0 k0, CP0_ENTRYLO0
334 P_MTC0 k1, CP0_ENTRYLO1
340 END(except_vec0_r4k_mphwbug)
343 /* TLB refill, EXL == 0, R4000 UP 250MHZ entrylo[01] hwbug version */
344 LEAF(except_vec0_r4k_250MHZhwbug)
346 GET_PGD(k0, k1) # get pgd pointer
347 mfc0 k0, CP0_BADVADDR
348 srl k0, k0, _PGDIR_SHIFT
349 sll k0, k0, 2 # log2(sizeof(pgd_t)
353 #ifndef CONFIG_64BIT_PHYS_ADDR
356 and k0, k0, PTEP_INDX_MSK
359 PTE_L k1, PTE_SIZE(k1)
361 P_MTC0 zero, CP0_ENTRYLO0
362 P_MTC0 k0, CP0_ENTRYLO0
364 P_MTC0 zero, CP0_ENTRYLO1
365 P_MTC0 k1, CP0_ENTRYLO1
371 END(except_vec0_r4k_250MHZhwbug)
374 /* TLB refill, EXL == 0, R4000 MP 250MHZ entrylo[01]+badvaddr bug version */
375 LEAF(except_vec0_r4k_MP250MHZhwbug)
377 GET_PGD(k0, k1) # get pgd pointer
378 mfc0 k0, CP0_BADVADDR
379 srl k0, k0, _PGDIR_SHIFT
380 sll k0, k0, 2 # log2(sizeof(pgd_t)
384 #ifndef CONFIG_64BIT_PHYS_ADDR
387 and k0, k0, PTEP_INDX_MSK
390 PTE_L k1, PTE_SIZE(k1)
394 P_MTC0 zero, CP0_ENTRYLO0
395 P_MTC0 k0, CP0_ENTRYLO0
398 P_MTC0 zero, CP0_ENTRYLO1
399 P_MTC0 k1, CP0_ENTRYLO1
405 END(except_vec0_r4k_MP250MHZhwbug)
413 * From the IDT errata for the QED RM5230 (Nevada), processor revision 1.0:
414 * 2. A timing hazard exists for the TLBP instruction.
416 * stalling_instruction
419 * The JTLB is being read for the TLBP throughout the stall generated by the
420 * previous instruction. This is not really correct as the stalling instruction
421 * can modify the address used to access the JTLB. The failure symptom is that
422 * the TLBP instruction will use an address created for the stalling instruction
423 * and not the address held in C0_ENHI and thus report the wrong results.
425 * The software work-around is to not allow the instruction preceding the TLBP
426 * to stall - make it an NOP or some other instruction guaranteed not to stall.
428 * Errata 2 will not be fixed. This errata is also on the R5000.
430 * As if we MIPS hackers wouldn't know how to nop pipelines happy ...
432 #define R5K_HAZARD nop
435 * Note for many R4k variants tlb probes cannot be executed out
436 * of the instruction cache else you get bogus results.
439 NESTED(handle_tlbl, PT_SIZE, sp)
442 mfc0 k0, CP0_BADVADDR
445 srl k0, k0, PAGE_SHIFT+1
455 /* Test present bit in entry. */
459 PTE_PRESENT(k0, k1, nopage_tlbl)
460 PTE_MAKEVALID(k0, k1)
477 NESTED(handle_tlbs, PT_SIZE, sp)
484 tlbp # find faulting entry
485 PTE_WRITABLE(k0, k1, nopage_tlbs)
486 PTE_MAKEWRITE(k0, k1)
503 NESTED(handle_mod, PT_SIZE, sp)
509 tlbp # find faulting entry
510 andi k0, k0, _PAGE_WRITE
514 /* Present and writable bits set, set accessed and dirty bits. */
515 PTE_MAKEWRITE(k0, k1)
517 /* Now reload the entry into the tlb. */