2 * Carsten Langgaard, carstenl@mips.com
3 * Copyright (C) 2002 MIPS Technologies, Inc. All rights reserved.
5 * This program is free software; you can distribute it and/or modify it
6 * under the terms of the GNU General Public License (Version 2) as
7 * published by the Free Software Foundation.
9 * This program is distributed in the hope it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
14 * You should have received a copy of the GNU General Public License along
15 * with this program; if not, write to the Free Software Foundation, Inc.,
16 * 59 Temple Place - Suite 330, Boston MA 02111-1307, USA.
18 * MIPS64 CPU variant specific MMU routines.
19 * These routine are not optimized in any way, they are done in a generic way
20 * so they can be used on all MIPS64 compliant CPUs, and also done in an
21 * attempt not to break anything for the R4xx0 style CPUs.
23 #include <linux/init.h>
24 #include <linux/sched.h>
28 #include <asm/bootinfo.h>
29 #include <asm/mmu_context.h>
30 #include <asm/pgtable.h>
31 #include <asm/system.h>
34 #undef DEBUG_TLBUPDATE
36 extern void except_vec1_r4k(void);
38 /* CP0 hazard avoidance. */
39 #define BARRIER __asm__ __volatile__(".set noreorder\n\t" \
40 "nop; nop; nop; nop; nop; nop;\n\t" \
43 void local_flush_tlb_all(void)
46 unsigned long old_ctx;
53 local_irq_save(flags);
54 /* Save old context and create impossible VPN2 value */
55 old_ctx = read_c0_entryhi() & ASID_MASK;
56 write_c0_entryhi(XKPHYS);
61 entry = read_c0_wired();
63 /* Blast 'em all away. */
64 while(entry < current_cpu_data.tlbsize) {
65 /* Make sure all entries differ. */
66 write_c0_entryhi(XKPHYS+entry*0x2000);
67 write_c0_index(entry);
74 write_c0_entryhi(old_ctx);
75 local_irq_restore(flags);
78 void local_flush_tlb_mm(struct mm_struct *mm)
80 int cpu = smp_processor_id();
82 if (cpu_context(cpu, mm) != 0) {
84 printk("[tlbmm<%d>]", cpu_context(cpu, mm));
86 drop_mmu_context(mm,cpu);
90 void local_flush_tlb_range(struct vm_area_struct *vma, unsigned long start,
93 struct mm_struct *mm = vma->vm_mm;
94 int cpu = smp_processor_id();
96 if (cpu_context(cpu, mm) != 0) {
101 printk("[tlbrange<%02x,%08lx,%08lx>]", cpu_context(cpu, mm) & ASID_MASK,
104 local_irq_save(flags);
105 size = (end - start + (PAGE_SIZE - 1)) >> PAGE_SHIFT;
106 size = (size + 1) >> 1;
107 if(size <= current_cpu_data.tlbsize/2) {
108 int oldpid = read_c0_entryhi() & ASID_MASK;
109 int newpid = cpu_asid(cpu, mm);
111 start &= (PAGE_MASK << 1);
112 end += ((PAGE_SIZE << 1) - 1);
113 end &= (PAGE_MASK << 1);
114 while (start < end) {
117 write_c0_entryhi(start | newpid);
118 start += (PAGE_SIZE << 1);
122 idx = read_c0_index();
123 write_c0_entrylo0(0);
124 write_c0_entrylo1(0);
127 /* Make sure all entries differ. */
128 write_c0_entryhi(XKPHYS+idx*0x2000);
133 write_c0_entryhi(oldpid);
135 drop_mmu_context(mm, cpu);
137 local_irq_restore(flags);
141 void local_flush_tlb_kernel_range(unsigned long start, unsigned long end)
147 printk("[tlbkernelrange<%08lx,%08lx>]", start, end);
149 local_irq_save(flags);
150 size = (end - start + (PAGE_SIZE - 1)) >> PAGE_SHIFT;
151 size = (size + 1) >> 1;
152 if (size <= current_cpu_data.tlbsize/2) {
153 int pid = read_c0_entryhi();
155 start &= (PAGE_MASK << 1);
156 end += ((PAGE_SIZE << 1) - 1);
157 end &= (PAGE_MASK << 1);
159 while (start < end) {
162 write_c0_entryhi(start);
163 start += (PAGE_SIZE << 1);
167 idx = read_c0_index();
168 write_c0_entrylo0(0);
169 write_c0_entrylo1(0);
172 /* Make sure all entries differ. */
173 write_c0_entryhi(XKPHYS+idx*0x2000);
178 write_c0_entryhi(pid);
180 local_flush_tlb_all();
182 local_irq_restore(flags);
185 void local_flush_tlb_page(struct vm_area_struct *vma, unsigned long page)
187 int cpu = smp_processor_id();
189 if (cpu_context(cpu, vma->vm_mm) != 0) {
191 unsigned long oldpid, newpid, idx;
194 printk("[tlbpage<%d,%08lx>]", cpu_asid(cpu, vma->vm_mm), page);
196 newpid = cpu_asid(cpu, vma->vm_mm);
197 page &= (PAGE_MASK << 1);
198 local_irq_save(flags);
199 oldpid = read_c0_entryhi() & ASID_MASK;
200 write_c0_entryhi(page | newpid);
204 idx = read_c0_index();
205 write_c0_entrylo0(0);
206 write_c0_entrylo1(0);
209 /* Make sure all entries differ. */
210 write_c0_entryhi(XKPHYS+idx*0x2000);
215 write_c0_entryhi(oldpid);
216 local_irq_restore(flags);
221 * This one is only used for pages with the global bit set so we don't care
222 * much about the ASID.
224 void local_flush_tlb_one(unsigned long page)
229 local_irq_save(flags);
230 page &= (PAGE_MASK << 1);
231 oldpid = read_c0_entryhi() & 0xff;
232 write_c0_entryhi(page);
236 idx = read_c0_index();
237 write_c0_entrylo0(0);
238 write_c0_entrylo1(0);
240 /* Make sure all entries differ. */
241 write_c0_entryhi(KSEG0+(idx<<(PAGE_SHIFT+1)));
246 write_c0_entryhi(oldpid);
248 local_irq_restore(flags);
252 * Updates the TLB with the new pte(s).
254 void __update_tlb(struct vm_area_struct * vma, unsigned long address, pte_t pte)
263 * Handle debugger faulting in for debugee.
265 if (current->active_mm != vma->vm_mm)
268 pid = read_c0_entryhi() & ASID_MASK;
271 if ((pid != cpu_asid(smp_processor_id(), vma->vm_mm))) ||
272 (cpu_context(smp_processor_id(), vma->vm_mm) == 0)) {
273 printk("update_mmu_cache: Wheee, bogus tlbpid mmpid=%d "
274 "tlbpid=%d\n", (int) cpu_asid(smp_processor_id(),
279 local_irq_save(flags);
280 address &= (PAGE_MASK << 1);
281 write_c0_entryhi(address | (pid));
282 pgdp = pgd_offset(vma->vm_mm, address);
286 pmdp = pmd_offset(pgdp, address);
287 idx = read_c0_index();
288 ptep = pte_offset_map(pmdp, address);
290 write_c0_entrylo0(pte_val(*ptep++) >> 6);
291 write_c0_entrylo1(pte_val(*ptep) >> 6);
292 write_c0_entryhi(address | (pid));
300 write_c0_entryhi(pid);
302 local_irq_restore(flags);
305 void add_wired_entry(unsigned long entrylo0, unsigned long entrylo1,
306 unsigned long entryhi, unsigned long pagemask)
310 unsigned long old_pagemask;
311 unsigned long old_ctx;
313 local_irq_save(flags);
314 /* Save old context and create impossible VPN2 value */
315 old_ctx = read_c0_entryhi() & ASID_MASK;
316 old_pagemask = read_c0_pagemask();
317 wired = read_c0_wired();
318 write_c0_wired(wired + 1);
319 write_c0_index(wired);
321 write_c0_pagemask(pagemask);
322 write_c0_entryhi(entryhi);
323 write_c0_entrylo0(entrylo0);
324 write_c0_entrylo1(entrylo1);
329 write_c0_entryhi(old_ctx);
331 write_c0_pagemask(old_pagemask);
332 local_flush_tlb_all();
333 local_irq_restore(flags);
337 * Used for loading TLB entries before trap_init() has started, when we
338 * don't actually want to add a wired entry which remains throughout the
339 * lifetime of the system
342 static int temp_tlb_entry __initdata;
344 __init int add_temporary_entry(unsigned long entrylo0, unsigned long entrylo1,
345 unsigned long entryhi, unsigned long pagemask)
350 unsigned long old_pagemask;
351 unsigned long old_ctx;
353 local_irq_save(flags);
354 /* Save old context and create impossible VPN2 value */
355 old_ctx = read_c0_entryhi() & ASID_MASK;
356 old_pagemask = read_c0_pagemask();
357 wired = read_c0_wired();
358 if (--temp_tlb_entry < wired) {
359 printk(KERN_WARNING "No TLB space left for add_temporary_entry\n");
364 write_c0_index(temp_tlb_entry);
366 write_c0_pagemask(pagemask);
367 write_c0_entryhi(entryhi);
368 write_c0_entrylo0(entrylo0);
369 write_c0_entrylo1(entrylo1);
374 write_c0_entryhi(old_ctx);
376 write_c0_pagemask(old_pagemask);
378 local_irq_restore(flags);
382 static void __init probe_tlb(unsigned long config)
384 unsigned long config1;
386 if (!(config & (1 << 31))) {
388 * Not a MIPS64 complainant CPU.
389 * Config 1 register not supported, we assume R4k style.
391 current_cpu_data.tlbsize = 48;
393 config1 = read_c0_config1();
394 if (!((config >> 7) & 3))
395 panic("No MMU present");
397 current_cpu_data.tlbsize = ((config1 >> 25) & 0x3f) + 1;
400 printk("Number of TLB entries %d.\n", current_cpu_data.tlbsize);
403 void __init r4k_tlb_init(void)
405 unsigned long config = read_c0_config();
408 write_c0_pagemask(PM_4K);
410 temp_tlb_entry = current_cpu_data.tlbsize - 1;
411 local_flush_tlb_all();
413 memcpy((void *)(KSEG0 + 0x80), except_vec1_r4k, 0x80);
414 flush_icache_range(KSEG0, KSEG0 + 0x80);