6a7fe6a85712680a5e291266732857cc6668575b
[linux-flexiantxendom0-3.2.10.git] / arch / mips64 / mm / tlb-r4k.c
1 /*
2  * Carsten Langgaard, carstenl@mips.com
3  * Copyright (C) 2002 MIPS Technologies, Inc.  All rights reserved.
4  *
5  * This program is free software; you can distribute it and/or modify it
6  * under the terms of the GNU General Public License (Version 2) as
7  * published by the Free Software Foundation.
8  *
9  * This program is distributed in the hope it will be useful, but WITHOUT
10  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
12  * for more details.
13  *
14  * You should have received a copy of the GNU General Public License along
15  * with this program; if not, write to the Free Software Foundation, Inc.,
16  * 59 Temple Place - Suite 330, Boston MA 02111-1307, USA.
17  *
18  * MIPS64 CPU variant specific MMU routines.
19  * These routine are not optimized in any way, they are done in a generic way
20  * so they can be used on all MIPS64 compliant CPUs, and also done in an
21  * attempt not to break anything for the R4xx0 style CPUs.
22  */
23 #include <linux/init.h>
24 #include <linux/sched.h>
25 #include <linux/mm.h>
26
27 #include <asm/cpu.h>
28 #include <asm/bootinfo.h>
29 #include <asm/mmu_context.h>
30 #include <asm/pgtable.h>
31 #include <asm/system.h>
32
33 #undef DEBUG_TLB
34 #undef DEBUG_TLBUPDATE
35
36 extern void except_vec1_r4k(void);
37
38 /* CP0 hazard avoidance. */
39 #define BARRIER __asm__ __volatile__(".set noreorder\n\t" \
40                                      "nop; nop; nop; nop; nop; nop;\n\t" \
41                                      ".set reorder\n\t")
42
43 void local_flush_tlb_all(void)
44 {
45         unsigned long flags;
46         unsigned long old_ctx;
47         int entry;
48
49 #ifdef DEBUG_TLB
50         printk("[tlball]");
51 #endif
52
53         local_irq_save(flags);
54         /* Save old context and create impossible VPN2 value */
55         old_ctx = read_c0_entryhi() & ASID_MASK;
56         write_c0_entryhi(XKPHYS);
57         write_c0_entrylo0(0);
58         write_c0_entrylo1(0);
59         BARRIER;
60
61         entry = read_c0_wired();
62
63         /* Blast 'em all away. */
64         while(entry < current_cpu_data.tlbsize) {
65                 /* Make sure all entries differ. */
66                 write_c0_entryhi(XKPHYS+entry*0x2000);
67                 write_c0_index(entry);
68                 BARRIER;
69                 tlb_write_indexed();
70                 BARRIER;
71                 entry++;
72         }
73         BARRIER;
74         write_c0_entryhi(old_ctx);
75         local_irq_restore(flags);
76 }
77
78 void local_flush_tlb_mm(struct mm_struct *mm)
79 {
80         int cpu = smp_processor_id();
81
82         if (cpu_context(cpu, mm) != 0) {
83 #ifdef DEBUG_TLB
84                 printk("[tlbmm<%d>]", cpu_context(cpu, mm));
85 #endif
86                 drop_mmu_context(mm,cpu);
87         }
88 }
89
90 void local_flush_tlb_range(struct vm_area_struct *vma, unsigned long start,
91                                 unsigned long end)
92 {
93         struct mm_struct *mm = vma->vm_mm;
94         int cpu = smp_processor_id();
95
96         if (cpu_context(cpu, mm) != 0) {
97                 unsigned long flags;
98                 int size;
99
100 #ifdef DEBUG_TLB
101                 printk("[tlbrange<%02x,%08lx,%08lx>]", cpu_context(cpu, mm) & ASID_MASK,
102                        start, end);
103 #endif
104                 local_irq_save(flags);
105                 size = (end - start + (PAGE_SIZE - 1)) >> PAGE_SHIFT;
106                 size = (size + 1) >> 1;
107                 if(size <= current_cpu_data.tlbsize/2) {
108                         int oldpid = read_c0_entryhi() & ASID_MASK;
109                         int newpid = cpu_asid(cpu, mm);
110
111                         start &= (PAGE_MASK << 1);
112                         end += ((PAGE_SIZE << 1) - 1);
113                         end &= (PAGE_MASK << 1);
114                         while (start < end) {
115                                 int idx;
116
117                                 write_c0_entryhi(start | newpid);
118                                 start += (PAGE_SIZE << 1);
119                                 BARRIER;
120                                 tlb_probe();
121                                 BARRIER;
122                                 idx = read_c0_index();
123                                 write_c0_entrylo0(0);
124                                 write_c0_entrylo1(0);
125                                 if(idx < 0)
126                                         continue;
127                                 /* Make sure all entries differ. */
128                                 write_c0_entryhi(XKPHYS+idx*0x2000);
129                                 BARRIER;
130                                 tlb_write_indexed();
131                                 BARRIER;
132                         }
133                         write_c0_entryhi(oldpid);
134                 } else {
135                         drop_mmu_context(mm, cpu);
136                 }
137                 local_irq_restore(flags);
138         }
139 }
140
141 void local_flush_tlb_kernel_range(unsigned long start, unsigned long end)
142 {
143         unsigned long flags;
144         int size;
145
146 #ifdef DEBUG_TLB
147         printk("[tlbkernelrange<%08lx,%08lx>]", start, end);
148 #endif
149         local_irq_save(flags);
150         size = (end - start + (PAGE_SIZE - 1)) >> PAGE_SHIFT;
151         size = (size + 1) >> 1;
152         if (size <= current_cpu_data.tlbsize/2) {
153                 int pid = read_c0_entryhi();
154
155                 start &= (PAGE_MASK << 1);
156                 end += ((PAGE_SIZE << 1) - 1);
157                 end &= (PAGE_MASK << 1);
158
159                 while (start < end) {
160                         int idx;
161
162                         write_c0_entryhi(start);
163                         start += (PAGE_SIZE << 1);
164                         BARRIER;
165                         tlb_probe();
166                         BARRIER;
167                         idx = read_c0_index();
168                         write_c0_entrylo0(0);
169                         write_c0_entrylo1(0);
170                         if (idx < 0)
171                                 continue;
172                         /* Make sure all entries differ. */
173                         write_c0_entryhi(XKPHYS+idx*0x2000);
174                         BARRIER;
175                         tlb_write_indexed();
176                         BARRIER;
177                 }
178                 write_c0_entryhi(pid);
179         } else {
180                 local_flush_tlb_all();
181         }
182         local_irq_restore(flags);
183 }
184
185 void local_flush_tlb_page(struct vm_area_struct *vma, unsigned long page)
186 {
187         int cpu = smp_processor_id();
188
189         if (cpu_context(cpu, vma->vm_mm) != 0) {
190                 unsigned long flags;
191                 unsigned long oldpid, newpid, idx;
192
193 #ifdef DEBUG_TLB
194                 printk("[tlbpage<%d,%08lx>]", cpu_asid(cpu, vma->vm_mm), page);
195 #endif
196                 newpid = cpu_asid(cpu, vma->vm_mm);
197                 page &= (PAGE_MASK << 1);
198                 local_irq_save(flags);
199                 oldpid = read_c0_entryhi() & ASID_MASK;
200                 write_c0_entryhi(page | newpid);
201                 BARRIER;
202                 tlb_probe();
203                 BARRIER;
204                 idx = read_c0_index();
205                 write_c0_entrylo0(0);
206                 write_c0_entrylo1(0);
207                 if(idx < 0)
208                         goto finish;
209                 /* Make sure all entries differ. */
210                 write_c0_entryhi(XKPHYS+idx*0x2000);
211                 BARRIER;
212                 tlb_write_indexed();
213         finish:
214                 BARRIER;
215                 write_c0_entryhi(oldpid);
216                 local_irq_restore(flags);
217         }
218 }
219
220 /*
221  * This one is only used for pages with the global bit set so we don't care
222  * much about the ASID.
223  */
224 void local_flush_tlb_one(unsigned long page)
225 {
226         unsigned long flags;
227         int oldpid, idx;
228
229         local_irq_save(flags);
230         page &= (PAGE_MASK << 1);
231         oldpid = read_c0_entryhi() & 0xff;
232         write_c0_entryhi(page);
233         BARRIER;
234         tlb_probe();
235         BARRIER;
236         idx = read_c0_index();
237         write_c0_entrylo0(0);
238         write_c0_entrylo1(0);
239         if (idx >= 0) {
240                 /* Make sure all entries differ. */
241                 write_c0_entryhi(KSEG0+(idx<<(PAGE_SHIFT+1)));
242                 BARRIER;
243                 tlb_write_indexed();
244         }
245         BARRIER;
246         write_c0_entryhi(oldpid);
247
248         local_irq_restore(flags);
249 }
250
251 /*
252  * Updates the TLB with the new pte(s).
253  */
254 void __update_tlb(struct vm_area_struct * vma, unsigned long address, pte_t pte)
255 {
256         unsigned long flags;
257         pgd_t *pgdp;
258         pmd_t *pmdp;
259         pte_t *ptep;
260         int idx, pid;
261
262         /*
263          * Handle debugger faulting in for debugee.
264          */
265         if (current->active_mm != vma->vm_mm)
266                 return;
267
268         pid = read_c0_entryhi() & ASID_MASK;
269
270 #ifdef DEBUG_TLB
271         if ((pid != cpu_asid(smp_processor_id(), vma->vm_mm))) ||
272            (cpu_context(smp_processor_id(), vma->vm_mm) == 0)) {
273                 printk("update_mmu_cache: Wheee, bogus tlbpid mmpid=%d "
274                        "tlbpid=%d\n", (int) cpu_asid(smp_processor_id(),
275                        vma->vm_mm), pid);
276         }
277 #endif
278
279         local_irq_save(flags);
280         address &= (PAGE_MASK << 1);
281         write_c0_entryhi(address | (pid));
282         pgdp = pgd_offset(vma->vm_mm, address);
283         BARRIER;
284         tlb_probe();
285         BARRIER;
286         pmdp = pmd_offset(pgdp, address);
287         idx = read_c0_index();
288         ptep = pte_offset_map(pmdp, address);
289         BARRIER;
290         write_c0_entrylo0(pte_val(*ptep++) >> 6);
291         write_c0_entrylo1(pte_val(*ptep) >> 6);
292         write_c0_entryhi(address | (pid));
293         BARRIER;
294         if(idx < 0) {
295                 tlb_write_random();
296         } else {
297                 tlb_write_indexed();
298         }
299         BARRIER;
300         write_c0_entryhi(pid);
301         BARRIER;
302         local_irq_restore(flags);
303 }
304
305 void add_wired_entry(unsigned long entrylo0, unsigned long entrylo1,
306                                       unsigned long entryhi, unsigned long pagemask)
307 {
308         unsigned long flags;
309         unsigned long wired;
310         unsigned long old_pagemask;
311         unsigned long old_ctx;
312
313         local_irq_save(flags);
314         /* Save old context and create impossible VPN2 value */
315         old_ctx = read_c0_entryhi() & ASID_MASK;
316         old_pagemask = read_c0_pagemask();
317         wired = read_c0_wired();
318         write_c0_wired(wired + 1);
319         write_c0_index(wired);
320         BARRIER;
321         write_c0_pagemask(pagemask);
322         write_c0_entryhi(entryhi);
323         write_c0_entrylo0(entrylo0);
324         write_c0_entrylo1(entrylo1);
325         BARRIER;
326         tlb_write_indexed();
327         BARRIER;
328
329         write_c0_entryhi(old_ctx);
330         BARRIER;
331         write_c0_pagemask(old_pagemask);
332         local_flush_tlb_all();
333         local_irq_restore(flags);
334 }
335
336 /*
337  * Used for loading TLB entries before trap_init() has started, when we
338  * don't actually want to add a wired entry which remains throughout the
339  * lifetime of the system
340  */
341
342 static int temp_tlb_entry __initdata;
343
344 __init int add_temporary_entry(unsigned long entrylo0, unsigned long entrylo1,
345                                unsigned long entryhi, unsigned long pagemask)
346 {
347         int ret = 0;
348         unsigned long flags;
349         unsigned long wired;
350         unsigned long old_pagemask;
351         unsigned long old_ctx;
352
353         local_irq_save(flags);
354         /* Save old context and create impossible VPN2 value */
355         old_ctx = read_c0_entryhi() & ASID_MASK;
356         old_pagemask = read_c0_pagemask();
357         wired = read_c0_wired();
358         if (--temp_tlb_entry < wired) {
359                 printk(KERN_WARNING "No TLB space left for add_temporary_entry\n");
360                 ret = -ENOSPC;
361                 goto out;
362         }
363
364         write_c0_index(temp_tlb_entry);
365         BARRIER;
366         write_c0_pagemask(pagemask);
367         write_c0_entryhi(entryhi);
368         write_c0_entrylo0(entrylo0);
369         write_c0_entrylo1(entrylo1);
370         BARRIER;
371         tlb_write_indexed();
372         BARRIER;
373
374         write_c0_entryhi(old_ctx);
375         BARRIER;
376         write_c0_pagemask(old_pagemask);
377 out:
378         local_irq_restore(flags);
379         return ret;
380 }
381
382 static void __init probe_tlb(unsigned long config)
383 {
384         unsigned long config1;
385
386         if (!(config & (1 << 31))) {
387                 /*
388                  * Not a MIPS64 complainant CPU.
389                  * Config 1 register not supported, we assume R4k style.
390                  */
391                 current_cpu_data.tlbsize = 48;
392         } else {
393                 config1 = read_c0_config1();
394                 if (!((config >> 7) & 3))
395                         panic("No MMU present");
396                 else
397                         current_cpu_data.tlbsize = ((config1 >> 25) & 0x3f) + 1;
398         }
399
400         printk("Number of TLB entries %d.\n", current_cpu_data.tlbsize);
401 }
402
403 void __init r4k_tlb_init(void)
404 {
405         unsigned long config = read_c0_config();
406
407         probe_tlb(config);
408         write_c0_pagemask(PM_4K);
409         write_c0_wired(0);
410         temp_tlb_entry = current_cpu_data.tlbsize - 1;
411         local_flush_tlb_all();
412
413         memcpy((void *)(KSEG0 + 0x80), except_vec1_r4k, 0x80);
414         flush_icache_range(KSEG0, KSEG0 + 0x80);
415 }