2 * This file is subject to the terms and conditions of the GNU General Public
3 * License. See the file "COPYING" in the main directory of this archive
6 * r4xx0.c: R4000 processor variant specific MMU/Cache routines.
8 * Copyright (C) 1996 David S. Miller (dm@engr.sgi.com)
9 * Copyright (C) 1997, 1998, 1999, 2000 Ralf Baechle ralf@gnu.org
13 * - this code is a overbloated pig
14 * - many of the bug workarounds are not efficient at all, but at
15 * least they are functional ...
17 #include <linux/init.h>
18 #include <linux/kernel.h>
19 #include <linux/sched.h>
22 #include <asm/bcache.h>
25 #include <asm/pgtable.h>
26 #include <asm/system.h>
27 #include <asm/bootinfo.h>
28 #include <asm/mmu_context.h>
30 /* CP0 hazard avoidance. */
31 #define BARRIER __asm__ __volatile__(".set noreorder\n\t" \
32 "nop; nop; nop; nop; nop; nop;\n\t" \
35 /* Primary cache parameters. */
36 static int icache_size, dcache_size; /* Size in bytes */
37 static int ic_lsize, dc_lsize; /* LineSize in bytes */
39 /* Secondary cache (if present) parameters. */
40 static unsigned int scache_size, sc_lsize; /* Again, in bytes */
42 #include <asm/cacheops.h>
43 #include <asm/r4kcache.h>
48 * Dummy cache handling routines for machines without boardcaches
50 static void no_sc_noop(void) {}
52 static struct bcache_ops no_sc_ops = {
53 (void *)no_sc_noop, (void *)no_sc_noop,
54 (void *)no_sc_noop, (void *)no_sc_noop
57 struct bcache_ops *bcops = &no_sc_ops;
60 * On processors with QED R4600 style two set assosicative cache
61 * this is the bit which selects the way in the cache for the
64 #define icache_waybit (icache_size >> 1)
65 #define dcache_waybit (dcache_size >> 1)
68 * Zero an entire page. Basically a simple unrolled loop should do the
69 * job but we want more performance by saving memory bus bandwidth. We
70 * have five flavours of the routine available for:
72 * - 16byte cachelines and no second level cache
73 * - 32byte cachelines second level cache
74 * - a version which handles the buggy R4600 v1.x
75 * - a version which handles the buggy R4600 v2.0
76 * - Finally a last version without fancy cache games for the SC and MC
77 * versions of R4000 and R4400.
80 static void r4k_clear_page_d16(void * page)
87 "1:\tcache\t%3,(%0)\n\t"
90 "cache\t%3,16(%0)\n\t"
94 "cache\t%3,-32(%0)\n\t"
97 "cache\t%3,-16(%0)\n\t"
107 "i" (Create_Dirty_Excl_D)
111 static void r4k_clear_page_d32(void * page)
113 __asm__ __volatile__(
114 ".set\tnoreorder\n\t"
118 "1:\tcache\t%3,(%0)\n\t"
124 "cache\t%3,-32(%0)\n\t"
136 "i" (Create_Dirty_Excl_D)
142 * This flavour of r4k_clear_page is for the R4600 V1.x. Cite from the
143 * IDT R4600 V1.7 errata:
145 * 18. The CACHE instructions Hit_Writeback_Invalidate_D, Hit_Writeback_D,
146 * Hit_Invalidate_D and Create_Dirty_Excl_D should only be
147 * executed if there is no other dcache activity. If the dcache is
148 * accessed for another instruction immeidately preceding when these
149 * cache instructions are executing, it is possible that the dcache
150 * tag match outputs used by these cache instructions will be
151 * incorrect. These cache instructions should be preceded by at least
152 * four instructions that are not any kind of load or store
155 * This is not allowed: lw
159 * cache Hit_Writeback_Invalidate_D
161 * This is allowed: lw
166 * cache Hit_Writeback_Invalidate_D
168 static void r4k_clear_page_r4600_v1(void * page)
170 __asm__ __volatile__(
171 ".set\tnoreorder\n\t"
188 "cache\t%3,-32(%0)\n\t"
200 "i" (Create_Dirty_Excl_D)
205 * And this one is for the R4600 V2.0
207 static void r4k_clear_page_r4600_v2(void * page)
212 *(volatile unsigned int *)KSEG1;
213 __asm__ __volatile__(
214 ".set\tnoreorder\n\t"
218 "1:\tcache\t%3,(%0)\n\t"
224 "cache\t%3,-32(%0)\n\t"
236 "i" (Create_Dirty_Excl_D)
238 restore_flags(flags);
242 * The next 4 versions are optimized for all possible scache configurations
243 * of the SC / MC versions of R4000 and R4400 ...
245 * Todo: For even better performance we should have a routine optimized for
246 * every legal combination of dcache / scache linesize. When I (Ralf) tried
247 * this the kernel crashed shortly after mounting the root filesystem. CPU
248 * bug? Weirdo cache instruction semantics?
250 static void r4k_clear_page_s16(void * page)
252 __asm__ __volatile__(
253 ".set\tnoreorder\n\t"
257 "1:\tcache\t%3,(%0)\n\t"
260 "cache\t%3,16(%0)\n\t"
264 "cache\t%3,-32(%0)\n\t"
267 "cache\t%3,-16(%0)\n\t"
277 "i" (Create_Dirty_Excl_SD)
281 static void r4k_clear_page_s32(void * page)
283 __asm__ __volatile__(
284 ".set\tnoreorder\n\t"
288 "1:\tcache\t%3,(%0)\n\t"
294 "cache\t%3,-32(%0)\n\t"
306 "i" (Create_Dirty_Excl_SD)
310 static void r4k_clear_page_s64(void * page)
312 __asm__ __volatile__(
313 ".set\tnoreorder\n\t"
317 "1:\tcache\t%3,(%0)\n\t"
334 "i" (Create_Dirty_Excl_SD)
338 static void r4k_clear_page_s128(void * page)
340 __asm__ __volatile__(
341 ".set\tnoreorder\n\t"
345 "1:\tcache\t%3,(%0)\n\t"
370 "i" (Create_Dirty_Excl_SD)
376 * This is still inefficient. We only can do better if we know the
377 * virtual address where the copy will be accessed.
380 static void r4k_copy_page_d16(void * to, void * from)
382 unsigned long dummy1, dummy2;
383 unsigned long reg1, reg2, reg3, reg4;
385 __asm__ __volatile__(
386 ".set\tnoreorder\n\t"
390 "1:\tcache\t%9,(%0)\n\t"
399 "cache\t%9,16(%0)\n\t"
408 "cache\t%9,32(%0)\n\t"
419 "cache\t%9,-16(%0)\n\t"
432 :"=r" (dummy1), "=r" (dummy2),
433 "=&r" (reg1), "=&r" (reg2), "=&r" (reg3), "=&r" (reg4)
434 :"0" (to), "1" (from),
436 "i" (Create_Dirty_Excl_D));
439 static void r4k_copy_page_d32(void * to, void * from)
441 unsigned long dummy1, dummy2;
442 unsigned long reg1, reg2, reg3, reg4;
444 __asm__ __volatile__(
445 ".set\tnoreorder\n\t"
449 "1:\tcache\t%9,(%0)\n\t"
466 "cache\t%9,32(%0)\n\t"
489 :"=r" (dummy1), "=r" (dummy2),
490 "=&r" (reg1), "=&r" (reg2), "=&r" (reg3), "=&r" (reg4)
491 :"0" (to), "1" (from),
493 "i" (Create_Dirty_Excl_D));
497 * Again a special version for the R4600 V1.x
499 static void r4k_copy_page_r4600_v1(void * to, void * from)
501 unsigned long dummy1, dummy2;
502 unsigned long reg1, reg2, reg3, reg4;
504 __asm__ __volatile__(
505 ".set\tnoreorder\n\t"
513 "\tcache\t%9,(%0)\n\t"
534 "cache\t%9,32(%0)\n\t"
557 :"=r" (dummy1), "=r" (dummy2),
558 "=&r" (reg1), "=&r" (reg2), "=&r" (reg3), "=&r" (reg4)
559 :"0" (to), "1" (from),
561 "i" (Create_Dirty_Excl_D));
564 static void r4k_copy_page_r4600_v2(void * to, void * from)
566 unsigned long dummy1, dummy2;
567 unsigned long reg1, reg2, reg3, reg4;
570 __save_and_cli(flags);
571 __asm__ __volatile__(
572 ".set\tnoreorder\n\t"
580 "\tcache\t%9,(%0)\n\t"
601 "cache\t%9,32(%0)\n\t"
624 :"=r" (dummy1), "=r" (dummy2),
625 "=&r" (reg1), "=&r" (reg2), "=&r" (reg3), "=&r" (reg4)
626 :"0" (to), "1" (from),
628 "i" (Create_Dirty_Excl_D));
629 restore_flags(flags);
633 * These are for R4000SC / R4400MC
635 static void r4k_copy_page_s16(void * to, void * from)
637 unsigned long dummy1, dummy2;
638 unsigned long reg1, reg2, reg3, reg4;
640 __asm__ __volatile__(
641 ".set\tnoreorder\n\t"
645 "1:\tcache\t%9,(%0)\n\t"
654 "cache\t%9,16(%0)\n\t"
663 "cache\t%9,32(%0)\n\t"
674 "cache\t%9,-16(%0)\n\t"
687 :"=r" (dummy1), "=r" (dummy2),
688 "=&r" (reg1), "=&r" (reg2), "=&r" (reg3), "=&r" (reg4)
689 :"0" (to), "1" (from),
691 "i" (Create_Dirty_Excl_SD));
694 static void r4k_copy_page_s32(void * to, void * from)
696 unsigned long dummy1, dummy2;
697 unsigned long reg1, reg2, reg3, reg4;
699 __asm__ __volatile__(
700 ".set\tnoreorder\n\t"
704 "1:\tcache\t%9,(%0)\n\t"
721 "cache\t%9,32(%0)\n\t"
744 :"=r" (dummy1), "=r" (dummy2),
745 "=&r" (reg1), "=&r" (reg2), "=&r" (reg3), "=&r" (reg4)
746 :"0" (to), "1" (from),
748 "i" (Create_Dirty_Excl_SD));
751 static void r4k_copy_page_s64(void * to, void * from)
753 unsigned long dummy1, dummy2;
754 unsigned long reg1, reg2, reg3, reg4;
756 __asm__ __volatile__(
757 ".set\tnoreorder\n\t"
761 "1:\tcache\t%9,(%0)\n\t"
800 :"=r" (dummy1), "=r" (dummy2),
801 "=&r" (reg1), "=&r" (reg2), "=&r" (reg3), "=&r" (reg4)
802 :"0" (to), "1" (from),
804 "i" (Create_Dirty_Excl_SD));
807 static void r4k_copy_page_s128(void * to, void * from)
809 unsigned long dummy1, dummy2;
810 unsigned long reg1, reg2, reg3, reg4;
812 __asm__ __volatile__(
813 ".set\tnoreorder\n\t"
817 "1:\tcache\t%9,(%0)\n\t"
888 :"=r" (dummy1), "=r" (dummy2),
889 "=&r" (reg1), "=&r" (reg2), "=&r" (reg3), "=&r" (reg4)
890 :"0" (to), "1" (from),
892 "i" (Create_Dirty_Excl_SD));
897 * If you think for one second that this stuff coming up is a lot
898 * of bulky code eating too many kernel cache lines. Think _again_.
901 * 1) Taken branches have a 3 cycle penalty on R4k
902 * 2) The branch itself is a real dead cycle on even R4600/R5000.
903 * 3) Only one of the following variants of each type is even used by
904 * the kernel based upon the cache parameters we detect at boot time.
909 static inline void r4k_flush_cache_all_s16d16i16(void)
914 blast_dcache16(); blast_icache16(); blast_scache16();
915 restore_flags(flags);
918 static inline void r4k_flush_cache_all_s32d16i16(void)
923 blast_dcache16(); blast_icache16(); blast_scache32();
924 restore_flags(flags);
927 static inline void r4k_flush_cache_all_s64d16i16(void)
932 blast_dcache16(); blast_icache16(); blast_scache64();
933 restore_flags(flags);
936 static inline void r4k_flush_cache_all_s128d16i16(void)
941 blast_dcache16(); blast_icache16(); blast_scache128();
942 restore_flags(flags);
945 static inline void r4k_flush_cache_all_s32d32i32(void)
950 blast_dcache32(); blast_icache32(); blast_scache32();
951 restore_flags(flags);
954 static inline void r4k_flush_cache_all_s64d32i32(void)
959 blast_dcache32(); blast_icache32(); blast_scache64();
960 restore_flags(flags);
963 static inline void r4k_flush_cache_all_s128d32i32(void)
968 blast_dcache32(); blast_icache32(); blast_scache128();
969 restore_flags(flags);
972 static inline void r4k_flush_cache_all_d16i16(void)
977 blast_dcache16(); blast_icache16();
978 restore_flags(flags);
981 static inline void r4k_flush_cache_all_d32i32(void)
986 blast_dcache32(); blast_icache32();
987 restore_flags(flags);
991 r4k_flush_cache_range_s16d16i16(struct mm_struct *mm,
995 struct vm_area_struct *vma;
998 if (mm->context == 0)
1003 printk("crange[%d,%08lx,%08lx]", (int)mm->context, start, end);
1005 vma = find_vma(mm, start);
1007 if (mm->context != current->active_mm->context) {
1008 r4k_flush_cache_all_s16d16i16();
1015 save_and_cli(flags);
1016 text = vma->vm_flags & VM_EXEC;
1017 while(start < end) {
1018 pgd = pgd_offset(mm, start);
1019 pmd = pmd_offset(pgd, start);
1020 pte = pte_offset(pmd, start);
1022 if(pte_val(*pte) & _PAGE_VALID)
1023 blast_scache16_page(start);
1026 restore_flags(flags);
1032 r4k_flush_cache_range_s32d16i16(struct mm_struct *mm,
1033 unsigned long start,
1036 struct vm_area_struct *vma;
1037 unsigned long flags;
1039 if (mm->context == 0)
1044 printk("crange[%d,%08lx,%08lx]", (int)mm->context, start, end);
1046 vma = find_vma(mm, start);
1048 if (mm->context != current->active_mm->context) {
1049 r4k_flush_cache_all_s32d16i16();
1056 save_and_cli(flags);
1057 text = vma->vm_flags & VM_EXEC;
1058 while(start < end) {
1059 pgd = pgd_offset(mm, start);
1060 pmd = pmd_offset(pgd, start);
1061 pte = pte_offset(pmd, start);
1063 if(pte_val(*pte) & _PAGE_VALID)
1064 blast_scache32_page(start);
1067 restore_flags(flags);
1072 static void r4k_flush_cache_range_s64d16i16(struct mm_struct *mm,
1073 unsigned long start,
1076 struct vm_area_struct *vma;
1077 unsigned long flags;
1079 if (mm->context == 0)
1084 printk("crange[%d,%08lx,%08lx]", (int)mm->context, start, end);
1086 vma = find_vma(mm, start);
1088 if (mm->context != current->active_mm->context) {
1089 r4k_flush_cache_all_s64d16i16();
1096 save_and_cli(flags);
1097 text = vma->vm_flags & VM_EXEC;
1098 while(start < end) {
1099 pgd = pgd_offset(mm, start);
1100 pmd = pmd_offset(pgd, start);
1101 pte = pte_offset(pmd, start);
1103 if(pte_val(*pte) & _PAGE_VALID)
1104 blast_scache64_page(start);
1107 restore_flags(flags);
1112 static void r4k_flush_cache_range_s128d16i16(struct mm_struct *mm,
1113 unsigned long start,
1116 struct vm_area_struct *vma;
1117 unsigned long flags;
1119 if (mm->context == 0)
1124 printk("crange[%d,%08lx,%08lx]", (int)mm->context, start, end);
1126 vma = find_vma(mm, start);
1128 if (mm->context != current->active_mm->context) {
1129 r4k_flush_cache_all_s128d16i16();
1136 save_and_cli(flags);
1137 text = vma->vm_flags & VM_EXEC;
1138 while(start < end) {
1139 pgd = pgd_offset(mm, start);
1140 pmd = pmd_offset(pgd, start);
1141 pte = pte_offset(pmd, start);
1143 if(pte_val(*pte) & _PAGE_VALID)
1144 blast_scache128_page(start);
1147 restore_flags(flags);
1152 static void r4k_flush_cache_range_s32d32i32(struct mm_struct *mm,
1153 unsigned long start,
1156 struct vm_area_struct *vma;
1157 unsigned long flags;
1159 if (mm->context == 0)
1164 printk("crange[%d,%08lx,%08lx]", (int)mm->context, start, end);
1166 vma = find_vma(mm, start);
1168 if (mm->context != current->active_mm->context) {
1169 r4k_flush_cache_all_s32d32i32();
1176 save_and_cli(flags);
1177 text = vma->vm_flags & VM_EXEC;
1178 while(start < end) {
1179 pgd = pgd_offset(mm, start);
1180 pmd = pmd_offset(pgd, start);
1181 pte = pte_offset(pmd, start);
1183 if(pte_val(*pte) & _PAGE_VALID)
1184 blast_scache32_page(start);
1187 restore_flags(flags);
1192 static void r4k_flush_cache_range_s64d32i32(struct mm_struct *mm,
1193 unsigned long start,
1196 struct vm_area_struct *vma;
1197 unsigned long flags;
1199 if (mm->context == 0)
1204 printk("crange[%d,%08lx,%08lx]", (int)mm->context, start, end);
1206 vma = find_vma(mm, start);
1208 if (mm->context != current->active_mm->context) {
1209 r4k_flush_cache_all_s64d32i32();
1216 save_and_cli(flags);
1217 text = vma->vm_flags & VM_EXEC;
1218 while(start < end) {
1219 pgd = pgd_offset(mm, start);
1220 pmd = pmd_offset(pgd, start);
1221 pte = pte_offset(pmd, start);
1223 if(pte_val(*pte) & _PAGE_VALID)
1224 blast_scache64_page(start);
1227 restore_flags(flags);
1232 static void r4k_flush_cache_range_s128d32i32(struct mm_struct *mm,
1233 unsigned long start,
1236 struct vm_area_struct *vma;
1237 unsigned long flags;
1239 if (mm->context == 0)
1244 printk("crange[%d,%08lx,%08lx]", (int)mm->context, start, end);
1246 vma = find_vma(mm, start);
1248 if (mm->context != current->active_mm->context) {
1249 r4k_flush_cache_all_s128d32i32();
1256 save_and_cli(flags);
1257 text = vma->vm_flags & VM_EXEC;
1258 while(start < end) {
1259 pgd = pgd_offset(mm, start);
1260 pmd = pmd_offset(pgd, start);
1261 pte = pte_offset(pmd, start);
1263 if(pte_val(*pte) & _PAGE_VALID)
1264 blast_scache128_page(start);
1267 restore_flags(flags);
1272 static void r4k_flush_cache_range_d16i16(struct mm_struct *mm,
1273 unsigned long start,
1276 if (mm->context != 0) {
1277 unsigned long flags;
1280 printk("crange[%d,%08lx,%08lx]", (int)mm->context, start, end);
1282 save_and_cli(flags);
1283 blast_dcache16(); blast_icache16();
1284 restore_flags(flags);
1288 static void r4k_flush_cache_range_d32i32(struct mm_struct *mm,
1289 unsigned long start,
1292 if (mm->context != 0) {
1293 unsigned long flags;
1296 printk("crange[%d,%08lx,%08lx]", (int)mm->context, start, end);
1298 save_and_cli(flags);
1299 blast_dcache32(); blast_icache32();
1300 restore_flags(flags);
1305 * On architectures like the Sparc, we could get rid of lines in
1306 * the cache created only by a certain context, but on the MIPS
1307 * (and actually certain Sparc's) we cannot.
1309 static void r4k_flush_cache_mm_s16d16i16(struct mm_struct *mm)
1311 if (mm->context != 0) {
1313 printk("cmm[%d]", (int)mm->context);
1315 r4k_flush_cache_all_s16d16i16();
1319 static void r4k_flush_cache_mm_s32d16i16(struct mm_struct *mm)
1321 if (mm->context != 0) {
1323 printk("cmm[%d]", (int)mm->context);
1325 r4k_flush_cache_all_s32d16i16();
1329 static void r4k_flush_cache_mm_s64d16i16(struct mm_struct *mm)
1331 if (mm->context != 0) {
1333 printk("cmm[%d]", (int)mm->context);
1335 r4k_flush_cache_all_s64d16i16();
1339 static void r4k_flush_cache_mm_s128d16i16(struct mm_struct *mm)
1341 if (mm->context != 0) {
1343 printk("cmm[%d]", (int)mm->context);
1345 r4k_flush_cache_all_s128d16i16();
1349 static void r4k_flush_cache_mm_s32d32i32(struct mm_struct *mm)
1351 if (mm->context != 0) {
1353 printk("cmm[%d]", (int)mm->context);
1355 r4k_flush_cache_all_s32d32i32();
1359 static void r4k_flush_cache_mm_s64d32i32(struct mm_struct *mm)
1361 if (mm->context != 0) {
1363 printk("cmm[%d]", (int)mm->context);
1365 r4k_flush_cache_all_s64d32i32();
1369 static void r4k_flush_cache_mm_s128d32i32(struct mm_struct *mm)
1371 if (mm->context != 0) {
1373 printk("cmm[%d]", (int)mm->context);
1375 r4k_flush_cache_all_s128d32i32();
1379 static void r4k_flush_cache_mm_d16i16(struct mm_struct *mm)
1381 if (mm->context != 0) {
1383 printk("cmm[%d]", (int)mm->context);
1385 r4k_flush_cache_all_d16i16();
1389 static void r4k_flush_cache_mm_d32i32(struct mm_struct *mm)
1391 if (mm->context != 0) {
1393 printk("cmm[%d]", (int)mm->context);
1395 r4k_flush_cache_all_d32i32();
1399 static void r4k_flush_cache_page_s16d16i16(struct vm_area_struct *vma,
1402 struct mm_struct *mm = vma->vm_mm;
1403 unsigned long flags;
1410 * If ownes no valid ASID yet, cannot possibly have gotten
1411 * this page into the cache.
1413 if (mm->context == 0)
1417 printk("cpage[%d,%08lx]", (int)mm->context, page);
1419 save_and_cli(flags);
1421 pgdp = pgd_offset(mm, page);
1422 pmdp = pmd_offset(pgdp, page);
1423 ptep = pte_offset(pmdp, page);
1426 * If the page isn't marked valid, the page cannot possibly be
1429 if (!(pte_val(*ptep) & _PAGE_VALID))
1432 text = (vma->vm_flags & VM_EXEC);
1433 /* Doing flushes for another ASID than the current one is
1434 * too difficult since stupid R4k caches do a TLB translation
1435 * for every cache flush operation. So we do indexed flushes
1436 * in that case, which doesn't overly flush the cache too much.
1438 if (mm->context != current->active_mm->context) {
1439 /* Do indexed flush, too much work to get the (possible)
1440 * tlb refills to work correctly.
1442 page = (KSEG0 + (page & (scache_size - 1)));
1443 blast_dcache16_page_indexed(page);
1445 blast_icache16_page_indexed(page);
1446 blast_scache16_page_indexed(page);
1448 blast_scache16_page(page);
1450 restore_flags(flags);
1453 static void r4k_flush_cache_page_s32d16i16(struct vm_area_struct *vma,
1456 struct mm_struct *mm = vma->vm_mm;
1457 unsigned long flags;
1464 * If ownes no valid ASID yet, cannot possibly have gotten
1465 * this page into the cache.
1467 if (mm->context == 0)
1471 printk("cpage[%d,%08lx]", (int)mm->context, page);
1473 save_and_cli(flags);
1475 pgdp = pgd_offset(mm, page);
1476 pmdp = pmd_offset(pgdp, page);
1477 ptep = pte_offset(pmdp, page);
1479 /* If the page isn't marked valid, the page cannot possibly be
1482 if (!(pte_val(*ptep) & _PAGE_VALID))
1485 text = (vma->vm_flags & VM_EXEC);
1486 /* Doing flushes for another ASID than the current one is
1487 * too difficult since stupid R4k caches do a TLB translation
1488 * for every cache flush operation. So we do indexed flushes
1489 * in that case, which doesn't overly flush the cache too much.
1491 if (mm->context != current->active_mm->context) {
1492 /* Do indexed flush, too much work to get the (possible)
1493 * tlb refills to work correctly.
1495 page = (KSEG0 + (page & (scache_size - 1)));
1496 blast_dcache16_page_indexed(page);
1498 blast_icache16_page_indexed(page);
1499 blast_scache32_page_indexed(page);
1501 blast_scache32_page(page);
1503 restore_flags(flags);
1506 static void r4k_flush_cache_page_s64d16i16(struct vm_area_struct *vma,
1509 struct mm_struct *mm = vma->vm_mm;
1510 unsigned long flags;
1517 * If ownes no valid ASID yet, cannot possibly have gotten
1518 * this page into the cache.
1520 if (mm->context == 0)
1524 printk("cpage[%d,%08lx]", (int)mm->context, page);
1526 save_and_cli(flags);
1528 pgdp = pgd_offset(mm, page);
1529 pmdp = pmd_offset(pgdp, page);
1530 ptep = pte_offset(pmdp, page);
1532 /* If the page isn't marked valid, the page cannot possibly be
1535 if (!(pte_val(*ptep) & _PAGE_VALID))
1538 text = (vma->vm_flags & VM_EXEC);
1540 * Doing flushes for another ASID than the current one is
1541 * too difficult since stupid R4k caches do a TLB translation
1542 * for every cache flush operation. So we do indexed flushes
1543 * in that case, which doesn't overly flush the cache too much.
1545 if (mm->context != current->active_mm->context) {
1546 /* Do indexed flush, too much work to get the (possible)
1547 * tlb refills to work correctly.
1549 page = (KSEG0 + (page & (scache_size - 1)));
1550 blast_dcache16_page_indexed(page);
1552 blast_icache16_page_indexed(page);
1553 blast_scache64_page_indexed(page);
1555 blast_scache64_page(page);
1557 restore_flags(flags);
1560 static void r4k_flush_cache_page_s128d16i16(struct vm_area_struct *vma,
1563 struct mm_struct *mm = vma->vm_mm;
1564 unsigned long flags;
1571 * If ownes no valid ASID yet, cannot possibly have gotten
1572 * this page into the cache.
1574 if (mm->context == 0)
1578 printk("cpage[%d,%08lx]", (int)mm->context, page);
1580 save_and_cli(flags);
1582 pgdp = pgd_offset(mm, page);
1583 pmdp = pmd_offset(pgdp, page);
1584 ptep = pte_offset(pmdp, page);
1587 * If the page isn't marked valid, the page cannot possibly be
1590 if (!(pte_val(*ptep) & _PAGE_VALID))
1593 text = (vma->vm_flags & VM_EXEC);
1594 /* Doing flushes for another ASID than the current one is
1595 * too difficult since stupid R4k caches do a TLB translation
1596 * for every cache flush operation. So we do indexed flushes
1597 * in that case, which doesn't overly flush the cache too much.
1599 if (mm->context != current->active_mm->context) {
1601 * Do indexed flush, too much work to get the (possible)
1602 * tlb refills to work correctly.
1604 page = (KSEG0 + (page & (scache_size - 1)));
1605 blast_dcache16_page_indexed(page);
1607 blast_icache16_page_indexed(page);
1608 blast_scache128_page_indexed(page);
1610 blast_scache128_page(page);
1612 restore_flags(flags);
1615 static void r4k_flush_cache_page_s32d32i32(struct vm_area_struct *vma,
1618 struct mm_struct *mm = vma->vm_mm;
1619 unsigned long flags;
1626 * If ownes no valid ASID yet, cannot possibly have gotten
1627 * this page into the cache.
1629 if (mm->context == 0)
1633 printk("cpage[%d,%08lx]", (int)mm->context, page);
1635 save_and_cli(flags);
1637 pgdp = pgd_offset(mm, page);
1638 pmdp = pmd_offset(pgdp, page);
1639 ptep = pte_offset(pmdp, page);
1642 * If the page isn't marked valid, the page cannot possibly be
1645 if (!(pte_val(*ptep) & _PAGE_VALID))
1648 text = (vma->vm_flags & VM_EXEC);
1650 * Doing flushes for another ASID than the current one is
1651 * too difficult since stupid R4k caches do a TLB translation
1652 * for every cache flush operation. So we do indexed flushes
1653 * in that case, which doesn't overly flush the cache too much.
1655 if (mm->context != current->active_mm->context) {
1657 * Do indexed flush, too much work to get the (possible)
1658 * tlb refills to work correctly.
1660 page = (KSEG0 + (page & (scache_size - 1)));
1661 blast_dcache32_page_indexed(page);
1663 blast_icache32_page_indexed(page);
1664 blast_scache32_page_indexed(page);
1666 blast_scache32_page(page);
1668 restore_flags(flags);
1671 static void r4k_flush_cache_page_s64d32i32(struct vm_area_struct *vma,
1674 struct mm_struct *mm = vma->vm_mm;
1675 unsigned long flags;
1682 * If ownes no valid ASID yet, cannot possibly have gotten
1683 * this page into the cache.
1685 if (mm->context == 0)
1689 printk("cpage[%d,%08lx]", (int)mm->context, page);
1691 save_and_cli(flags);
1693 pgdp = pgd_offset(mm, page);
1694 pmdp = pmd_offset(pgdp, page);
1695 ptep = pte_offset(pmdp, page);
1698 * If the page isn't marked valid, the page cannot possibly be
1701 if (!(pte_val(*ptep) & _PAGE_VALID))
1704 text = (vma->vm_flags & VM_EXEC);
1706 * Doing flushes for another ASID than the current one is
1707 * too difficult since stupid R4k caches do a TLB translation
1708 * for every cache flush operation. So we do indexed flushes
1709 * in that case, which doesn't overly flush the cache too much.
1711 if (mm->context != current->active_mm->context) {
1713 * Do indexed flush, too much work to get the (possible)
1714 * tlb refills to work correctly.
1716 page = (KSEG0 + (page & (scache_size - 1)));
1717 blast_dcache32_page_indexed(page);
1719 blast_icache32_page_indexed(page);
1720 blast_scache64_page_indexed(page);
1722 blast_scache64_page(page);
1724 restore_flags(flags);
1727 static void r4k_flush_cache_page_s128d32i32(struct vm_area_struct *vma,
1730 struct mm_struct *mm = vma->vm_mm;
1731 unsigned long flags;
1738 * If ownes no valid ASID yet, cannot possibly have gotten
1739 * this page into the cache.
1741 if (mm->context == 0)
1745 printk("cpage[%d,%08lx]", (int)mm->context, page);
1747 save_and_cli(flags);
1749 pgdp = pgd_offset(mm, page);
1750 pmdp = pmd_offset(pgdp, page);
1751 ptep = pte_offset(pmdp, page);
1753 /* If the page isn't marked valid, the page cannot possibly be
1756 if (!(pte_val(*ptep) & _PAGE_VALID))
1759 text = (vma->vm_flags & VM_EXEC);
1761 * Doing flushes for another ASID than the current one is
1762 * too difficult since stupid R4k caches do a TLB translation
1763 * for every cache flush operation. So we do indexed flushes
1764 * in that case, which doesn't overly flush the cache too much.
1766 if (mm->context != current->active_mm->context) {
1767 /* Do indexed flush, too much work to get the (possible)
1768 * tlb refills to work correctly.
1770 page = (KSEG0 + (page & (scache_size - 1)));
1771 blast_dcache32_page_indexed(page);
1773 blast_icache32_page_indexed(page);
1774 blast_scache128_page_indexed(page);
1776 blast_scache128_page(page);
1778 restore_flags(flags);
1781 static void r4k_flush_cache_page_d16i16(struct vm_area_struct *vma,
1784 struct mm_struct *mm = vma->vm_mm;
1785 unsigned long flags;
1792 * If ownes no valid ASID yet, cannot possibly have gotten
1793 * this page into the cache.
1795 if (mm->context == 0)
1799 printk("cpage[%d,%08lx]", (int)mm->context, page);
1801 save_and_cli(flags);
1803 pgdp = pgd_offset(mm, page);
1804 pmdp = pmd_offset(pgdp, page);
1805 ptep = pte_offset(pmdp, page);
1807 /* If the page isn't marked valid, the page cannot possibly be
1810 if (!(pte_val(*ptep) & _PAGE_VALID))
1813 text = (vma->vm_flags & VM_EXEC);
1815 * Doing flushes for another ASID than the current one is
1816 * too difficult since stupid R4k caches do a TLB translation
1817 * for every cache flush operation. So we do indexed flushes
1818 * in that case, which doesn't overly flush the cache too much.
1820 if (mm == current->active_mm) {
1821 blast_dcache16_page(page);
1823 blast_icache16_page(page);
1825 /* Do indexed flush, too much work to get the (possible)
1826 * tlb refills to work correctly.
1828 page = (KSEG0 + (page & (dcache_size - 1)));
1829 blast_dcache16_page_indexed(page);
1831 blast_icache16_page_indexed(page);
1834 restore_flags(flags);
1837 static void r4k_flush_cache_page_d32i32(struct vm_area_struct *vma,
1840 struct mm_struct *mm = vma->vm_mm;
1841 unsigned long flags;
1848 * If ownes no valid ASID yet, cannot possibly have gotten
1849 * this page into the cache.
1851 if (mm->context == 0)
1855 printk("cpage[%d,%08lx]", (int)mm->context, page);
1857 save_and_cli(flags);
1859 pgdp = pgd_offset(mm, page);
1860 pmdp = pmd_offset(pgdp, page);
1861 ptep = pte_offset(pmdp, page);
1864 * If the page isn't marked valid, the page cannot possibly be
1867 if (!(pte_val(*ptep) & _PAGE_PRESENT))
1870 text = (vma->vm_flags & VM_EXEC);
1872 * Doing flushes for another ASID than the current one is
1873 * too difficult since stupid R4k caches do a TLB translation
1874 * for every cache flush operation. So we do indexed flushes
1875 * in that case, which doesn't overly flush the cache too much.
1877 if ((mm == current->active_mm) && (pte_val(*ptep) & _PAGE_VALID)) {
1878 blast_dcache32_page(page);
1880 blast_icache32_page(page);
1883 * Do indexed flush, too much work to get the (possible)
1884 * tlb refills to work correctly.
1886 page = (KSEG0 + (page & (dcache_size - 1)));
1887 blast_dcache32_page_indexed(page);
1889 blast_icache32_page_indexed(page);
1892 restore_flags(flags);
1895 static void r4k_flush_cache_page_d32i32_r4600(struct vm_area_struct *vma,
1898 struct mm_struct *mm = vma->vm_mm;
1899 unsigned long flags;
1906 * If ownes no valid ASID yet, cannot possibly have gotten
1907 * this page into the cache.
1909 if (mm->context == 0)
1913 printk("cpage[%d,%08lx]", (int)mm->context, page);
1915 save_and_cli(flags);
1917 pgdp = pgd_offset(mm, page);
1918 pmdp = pmd_offset(pgdp, page);
1919 ptep = pte_offset(pmdp, page);
1922 * If the page isn't marked valid, the page cannot possibly be
1925 if (!(pte_val(*ptep) & _PAGE_PRESENT))
1928 text = (vma->vm_flags & VM_EXEC);
1930 * Doing flushes for another ASID than the current one is
1931 * too difficult since stupid R4k caches do a TLB translation
1932 * for every cache flush operation. So we do indexed flushes
1933 * in that case, which doesn't overly flush the cache too much.
1935 if ((mm == current->active_mm) && (pte_val(*ptep) & _PAGE_VALID)) {
1936 blast_dcache32_page(page);
1938 blast_icache32_page(page);
1940 /* Do indexed flush, too much work to get the (possible)
1941 * tlb refills to work correctly.
1943 page = (KSEG0 + (page & (dcache_size - 1)));
1944 blast_dcache32_page_indexed(page);
1945 blast_dcache32_page_indexed(page ^ dcache_waybit);
1947 blast_icache32_page_indexed(page);
1948 blast_icache32_page_indexed(page ^ icache_waybit);
1952 restore_flags(flags);
1955 /* If the addresses passed to these routines are valid, they are
1958 * 1) In KSEG0, so we can do a direct flush of the page.
1959 * 2) In KSEG2, and since every process can translate those
1960 * addresses all the time in kernel mode we can do a direct
1962 * 3) In KSEG1, no flush necessary.
1964 static void r4k_flush_page_to_ram_s16d16i16(struct page * page)
1966 unsigned long addr = (unsigned long) page_address(page) & PAGE_MASK;
1968 if ((addr >= KSEG0 && addr < KSEG1) || (addr >= KSEG2)) {
1970 printk("cram[%08lx]", addr);
1972 blast_scache16_page(addr);
1976 static void r4k_flush_page_to_ram_s32d16i16(struct page * page)
1978 unsigned long addr = (unsigned long) page_address(page) & PAGE_MASK;
1980 if ((addr >= KSEG0 && addr < KSEG1) || (addr >= KSEG2)) {
1982 printk("cram[%08lx]", addr);
1984 blast_scache32_page(addr);
1988 static void r4k_flush_page_to_ram_s64d16i16(struct page * page)
1990 unsigned long addr = (unsigned long) page_address(page) & PAGE_MASK;
1992 if ((addr >= KSEG0 && addr < KSEG1) || (addr >= KSEG2)) {
1994 printk("cram[%08lx]", addr);
1996 blast_scache64_page(addr);
2000 static void r4k_flush_page_to_ram_s128d16i16(struct page * page)
2002 unsigned long addr = (unsigned long) page_address(page) & PAGE_MASK;
2004 if ((addr >= KSEG0 && addr < KSEG1) || (addr >= KSEG2)) {
2006 printk("cram[%08lx]", addr);
2008 blast_scache128_page(addr);
2012 static void r4k_flush_page_to_ram_s32d32i32(struct page * page)
2014 unsigned long addr = (unsigned long) page_address(page) & PAGE_MASK;
2016 if ((addr >= KSEG0 && addr < KSEG1) || (addr >= KSEG2)) {
2018 printk("cram[%08lx]", addr);
2020 blast_scache32_page(addr);
2024 static void r4k_flush_page_to_ram_s64d32i32(struct page * page)
2026 unsigned long addr = (unsigned long) page_address(page) & PAGE_MASK;
2028 if ((addr >= KSEG0 && addr < KSEG1) || (addr >= KSEG2)) {
2030 printk("cram[%08lx]", addr);
2032 blast_scache64_page(addr);
2036 static void r4k_flush_page_to_ram_s128d32i32(struct page * page)
2038 unsigned long addr = (unsigned long) page_address(page) & PAGE_MASK;
2040 if ((addr >= KSEG0 && addr < KSEG1) || (addr >= KSEG2)) {
2042 printk("cram[%08lx]", addr);
2044 blast_scache128_page(addr);
2048 static void r4k_flush_page_to_ram_d16i16(struct page * page)
2050 unsigned long addr = (unsigned long) page_address(page) & PAGE_MASK;
2052 if ((addr >= KSEG0 && addr < KSEG1) || (addr >= KSEG2)) {
2053 unsigned long flags;
2056 printk("cram[%08lx]", addr);
2058 __save_and_cli(flags);
2059 blast_dcache16_page(addr);
2060 __restore_flags(flags);
2064 static void r4k_flush_page_to_ram_d32i32(struct page * page)
2066 unsigned long addr = (unsigned long) page_address(page) & PAGE_MASK;
2068 if ((addr >= KSEG0 && addr < KSEG1) || (addr >= KSEG2)) {
2069 unsigned long flags;
2072 printk("cram[%08lx]", addr);
2074 __save_and_cli(flags);
2075 blast_dcache32_page(addr);
2076 __restore_flags(flags);
2081 * Writeback and invalidate the primary cache dcache before DMA.
2083 * R4600 v2.0 bug: "The CACHE instructions Hit_Writeback_Inv_D,
2084 * Hit_Writeback_D, Hit_Invalidate_D and Create_Dirty_Exclusive_D will only
2085 * operate correctly if the internal data cache refill buffer is empty. These
2086 * CACHE instructions should be separated from any potential data cache miss
2087 * by a load instruction to an uncached address to empty the response buffer."
2088 * (Revision 2.0 device errata from IDT available on http://www.idt.com/
2092 r4k_dma_cache_wback_inv_pc(unsigned long addr, unsigned long size)
2094 unsigned long end, a;
2097 if (size >= dcache_size) {
2100 /* Workaround for R4600 bug. See comment above. */
2101 save_and_cli(flags);
2102 *(volatile unsigned long *)KSEG1;
2104 a = addr & ~(dc_lsize - 1);
2105 end = (addr + size) & ~(dc_lsize - 1);
2107 flush_dcache_line(a); /* Hit_Writeback_Inv_D */
2108 if (a == end) break;
2111 restore_flags(flags);
2113 bcops->bc_wback_inv(addr, size);
2117 r4k_dma_cache_wback_inv_sc(unsigned long addr, unsigned long size)
2119 unsigned long end, a;
2121 if (size >= scache_size) {
2126 a = addr & ~(sc_lsize - 1);
2127 end = (addr + size) & ~(sc_lsize - 1);
2129 flush_scache_line(a); /* Hit_Writeback_Inv_SD */
2130 if (a == end) break;
2136 r4k_dma_cache_inv_pc(unsigned long addr, unsigned long size)
2138 unsigned long end, a;
2141 if (size >= dcache_size) {
2144 /* Workaround for R4600 bug. See comment above. */
2145 save_and_cli(flags);
2146 *(volatile unsigned long *)KSEG1;
2148 a = addr & ~(dc_lsize - 1);
2149 end = (addr + size) & ~(dc_lsize - 1);
2151 flush_dcache_line(a); /* Hit_Writeback_Inv_D */
2152 if (a == end) break;
2155 restore_flags(flags);
2158 bcops->bc_inv(addr, size);
2162 r4k_dma_cache_inv_sc(unsigned long addr, unsigned long size)
2164 unsigned long end, a;
2166 if (size >= scache_size) {
2171 a = addr & ~(sc_lsize - 1);
2172 end = (addr + size) & ~(sc_lsize - 1);
2174 flush_scache_line(a); /* Hit_Writeback_Inv_SD */
2175 if (a == end) break;
2181 r4k_dma_cache_wback(unsigned long addr, unsigned long size)
2183 panic("r4k_dma_cache called - should not happen.\n");
2187 * While we're protected against bad userland addresses we don't care
2188 * very much about what happens in that case. Usually a segmentation
2189 * fault will dump the process later on anyway ...
2191 static void r4k_flush_cache_sigtramp(unsigned long addr)
2193 unsigned long daddr, iaddr;
2195 daddr = addr & ~(dc_lsize - 1);
2196 __asm__ __volatile__("nop;nop;nop;nop"); /* R4600 V1.7 */
2197 protected_writeback_dcache_line(daddr);
2198 protected_writeback_dcache_line(daddr + dc_lsize);
2199 iaddr = addr & ~(ic_lsize - 1);
2200 protected_flush_icache_line(iaddr);
2201 protected_flush_icache_line(iaddr + ic_lsize);
2204 static void r4600v20k_flush_cache_sigtramp(unsigned long addr)
2206 unsigned long daddr, iaddr;
2209 daddr = addr & ~(dc_lsize - 1);
2210 __save_and_cli(flags);
2212 /* Clear internal cache refill buffer */
2213 *(volatile unsigned int *)KSEG1;
2215 protected_writeback_dcache_line(daddr);
2216 protected_writeback_dcache_line(daddr + dc_lsize);
2217 iaddr = addr & ~(ic_lsize - 1);
2218 protected_flush_icache_line(iaddr);
2219 protected_flush_icache_line(iaddr + ic_lsize);
2220 __restore_flags(flags);
2224 #undef DEBUG_TLBUPDATE
2226 #define NTLB_ENTRIES 48 /* Fixed on all R4XX0 variants... */
2228 #define NTLB_ENTRIES_HALF 24 /* Fixed on all R4XX0 variants... */
2230 void flush_tlb_all(void)
2232 unsigned long flags;
2233 unsigned long old_ctx;
2240 save_and_cli(flags);
2241 /* Save old context and create impossible VPN2 value */
2242 old_ctx = (get_entryhi() & 0xff);
2248 entry = get_wired();
2250 /* Blast 'em all away. */
2251 while(entry < NTLB_ENTRIES) {
2254 tlb_write_indexed();
2259 set_entryhi(old_ctx);
2260 restore_flags(flags);
2263 void flush_tlb_mm(struct mm_struct *mm)
2265 if (mm->context != 0) {
2266 unsigned long flags;
2269 printk("[tlbmm<%d>]", mm->context);
2271 save_and_cli(flags);
2272 get_new_mmu_context(mm, asid_cache);
2273 if (mm == current->active_mm)
2274 set_entryhi(mm->context & 0xff);
2275 restore_flags(flags);
2279 void flush_tlb_range(struct mm_struct *mm, unsigned long start,
2282 if(mm->context != 0) {
2283 unsigned long flags;
2287 printk("[tlbrange<%02x,%08lx,%08lx>]", (mm->context & 0xff),
2290 save_and_cli(flags);
2291 size = (end - start + (PAGE_SIZE - 1)) >> PAGE_SHIFT;
2292 size = (size + 1) >> 1;
2293 if(size <= NTLB_ENTRIES_HALF) {
2294 int oldpid = (get_entryhi() & 0xff);
2295 int newpid = (mm->context & 0xff);
2297 start &= (PAGE_MASK << 1);
2298 end += ((PAGE_SIZE << 1) - 1);
2299 end &= (PAGE_MASK << 1);
2300 while(start < end) {
2303 set_entryhi(start | newpid);
2304 start += (PAGE_SIZE << 1);
2315 tlb_write_indexed();
2318 set_entryhi(oldpid);
2320 get_new_mmu_context(mm, asid_cache);
2321 if (mm == current->active_mm)
2322 set_entryhi(mm->context & 0xff);
2324 restore_flags(flags);
2328 void flush_tlb_page(struct vm_area_struct *vma, unsigned long page)
2330 if (vma->vm_mm->context != 0) {
2331 unsigned long flags;
2332 int oldpid, newpid, idx;
2335 printk("[tlbpage<%d,%08lx>]", vma->vm_mm->context, page);
2337 newpid = (vma->vm_mm->context & 0xff);
2338 page &= (PAGE_MASK << 1);
2339 save_and_cli(flags);
2340 oldpid = (get_entryhi() & 0xff);
2341 set_entryhi(page | newpid);
2352 tlb_write_indexed();
2356 set_entryhi(oldpid);
2357 restore_flags(flags);
2361 /* Load a new root pointer into the TLB. */
2362 void load_pgd(unsigned long pg_dir)
2366 void pgd_init(unsigned long page)
2368 unsigned long *p = (unsigned long *) page;
2371 for(i = 0; i < USER_PTRS_PER_PGD; i+=8) {
2372 p[i + 0] = (unsigned long) invalid_pte_table;
2373 p[i + 1] = (unsigned long) invalid_pte_table;
2374 p[i + 2] = (unsigned long) invalid_pte_table;
2375 p[i + 3] = (unsigned long) invalid_pte_table;
2376 p[i + 4] = (unsigned long) invalid_pte_table;
2377 p[i + 5] = (unsigned long) invalid_pte_table;
2378 p[i + 6] = (unsigned long) invalid_pte_table;
2379 p[i + 7] = (unsigned long) invalid_pte_table;
2383 #ifdef DEBUG_TLBUPDATE
2384 static unsigned long ehi_debug[NTLB_ENTRIES];
2385 static unsigned long el0_debug[NTLB_ENTRIES];
2386 static unsigned long el1_debug[NTLB_ENTRIES];
2389 /* We will need multiple versions of update_mmu_cache(), one that just
2390 * updates the TLB with the new pte(s), and another which also checks
2391 * for the R4k "end of page" hardware bug and does the needy.
2393 void update_mmu_cache(struct vm_area_struct * vma,
2394 unsigned long address, pte_t pte)
2396 unsigned long flags;
2403 * Handle debugger faulting in for debugee.
2405 if (current->active_mm != vma->vm_mm)
2408 pid = get_entryhi() & 0xff;
2411 if((pid != (vma->vm_mm->context & 0xff)) || (vma->vm_mm->context == 0)) {
2412 printk("update_mmu_cache: Wheee, bogus tlbpid mmpid=%d tlbpid=%d\n",
2413 (int) (vma->vm_mm->context & 0xff), pid);
2417 save_and_cli(flags);
2418 address &= (PAGE_MASK << 1);
2419 set_entryhi(address | (pid));
2420 pgdp = pgd_offset(vma->vm_mm, address);
2424 pmdp = pmd_offset(pgdp, address);
2426 ptep = pte_offset(pmdp, address);
2428 set_entrylo0(pte_val(*ptep++) >> 6);
2429 set_entrylo1(pte_val(*ptep) >> 6);
2430 set_entryhi(address | (pid));
2435 tlb_write_indexed();
2440 restore_flags(flags);
2444 static void r4k_update_mmu_cache_hwbug(struct vm_area_struct * vma,
2445 unsigned long address, pte_t pte)
2447 unsigned long flags;
2453 save_and_cli(flags);
2454 address &= (PAGE_MASK << 1);
2455 set_entryhi(address | (get_entryhi() & 0xff));
2456 pgdp = pgd_offset(vma->vm_mm, address);
2458 pmdp = pmd_offset(pgdp, address);
2460 ptep = pte_offset(pmdp, address);
2461 set_entrylo0(pte_val(*ptep++) >> 6);
2462 set_entrylo1(pte_val(*ptep) >> 6);
2467 tlb_write_indexed();
2469 restore_flags(flags);
2473 void show_regs(struct pt_regs * regs)
2475 /* Saved main processor registers. */
2476 printk("$0 : %08lx %08lx %08lx %08lx\n",
2477 0UL, regs->regs[1], regs->regs[2], regs->regs[3]);
2478 printk("$4 : %08lx %08lx %08lx %08lx\n",
2479 regs->regs[4], regs->regs[5], regs->regs[6], regs->regs[7]);
2480 printk("$8 : %08lx %08lx %08lx %08lx\n",
2481 regs->regs[8], regs->regs[9], regs->regs[10], regs->regs[11]);
2482 printk("$12: %08lx %08lx %08lx %08lx\n",
2483 regs->regs[12], regs->regs[13], regs->regs[14], regs->regs[15]);
2484 printk("$16: %08lx %08lx %08lx %08lx\n",
2485 regs->regs[16], regs->regs[17], regs->regs[18], regs->regs[19]);
2486 printk("$20: %08lx %08lx %08lx %08lx\n",
2487 regs->regs[20], regs->regs[21], regs->regs[22], regs->regs[23]);
2488 printk("$24: %08lx %08lx\n",
2489 regs->regs[24], regs->regs[25]);
2490 printk("$28: %08lx %08lx %08lx %08lx\n",
2491 regs->regs[28], regs->regs[29], regs->regs[30], regs->regs[31]);
2493 /* Saved cp0 registers. */
2494 printk("epc : %08lx\nStatus: %08lx\nCause : %08lx\n",
2495 regs->cp0_epc, regs->cp0_status, regs->cp0_cause);
2498 void add_wired_entry(unsigned long entrylo0, unsigned long entrylo1,
2499 unsigned long entryhi, unsigned long pagemask)
2501 unsigned long flags;
2502 unsigned long wired;
2503 unsigned long old_pagemask;
2504 unsigned long old_ctx;
2506 save_and_cli(flags);
2507 /* Save old context and create impossible VPN2 value */
2508 old_ctx = (get_entryhi() & 0xff);
2509 old_pagemask = get_pagemask();
2510 wired = get_wired();
2511 set_wired (wired + 1);
2514 set_pagemask (pagemask);
2515 set_entryhi(entryhi);
2516 set_entrylo0(entrylo0);
2517 set_entrylo1(entrylo1);
2519 tlb_write_indexed();
2522 set_entryhi(old_ctx);
2524 set_pagemask (old_pagemask);
2526 restore_flags(flags);
2529 /* Detect and size the various r4k caches. */
2530 static void __init probe_icache(unsigned long config)
2532 icache_size = 1 << (12 + ((config >> 9) & 7));
2533 ic_lsize = 16 << ((config >> 5) & 1);
2535 printk("Primary instruction cache %dkb, linesize %d bytes)\n",
2536 icache_size >> 10, ic_lsize);
2539 static void __init probe_dcache(unsigned long config)
2541 dcache_size = 1 << (12 + ((config >> 6) & 7));
2542 dc_lsize = 16 << ((config >> 4) & 1);
2544 printk("Primary data cache %dkb, linesize %d bytes)\n",
2545 dcache_size >> 10, dc_lsize);
2549 /* If you even _breathe_ on this function, look at the gcc output
2550 * and make sure it does not pop things on and off the stack for
2551 * the cache sizing loop that executes in KSEG1 space or else
2552 * you will crash and burn badly. You have been warned.
2554 static int __init probe_scache(unsigned long config)
2556 extern unsigned long stext;
2557 unsigned long flags, addr, begin, end, pow2;
2560 tmp = ((config >> 17) & 1);
2563 tmp = ((config >> 22) & 3);
2579 begin = (unsigned long) &stext;
2580 begin &= ~((4 * 1024 * 1024) - 1);
2581 end = begin + (4 * 1024 * 1024);
2583 /* This is such a bitch, you'd think they would make it
2584 * easy to do this. Away you daemons of stupidity!
2586 save_and_cli(flags);
2588 /* Fill each size-multiple cache line with a valid tag. */
2590 for(addr = begin; addr < end; addr = (begin + pow2)) {
2591 unsigned long *p = (unsigned long *) addr;
2592 __asm__ __volatile__("nop" : : "r" (*p)); /* whee... */
2596 /* Load first line with zero (therefore invalid) tag. */
2599 __asm__ __volatile__("nop; nop; nop; nop;"); /* avoid the hazard */
2600 __asm__ __volatile__("\n\t.set noreorder\n\t"
2604 ".set reorder\n\t" : : "r" (begin));
2605 __asm__ __volatile__("\n\t.set noreorder\n\t"
2609 ".set reorder\n\t" : : "r" (begin));
2610 __asm__ __volatile__("\n\t.set noreorder\n\t"
2612 "cache 11, (%0)\n\t"
2614 ".set reorder\n\t" : : "r" (begin));
2616 /* Now search for the wrap around point. */
2617 pow2 = (128 * 1024);
2619 for(addr = (begin + (128 * 1024)); addr < (end); addr = (begin + pow2)) {
2620 __asm__ __volatile__("\n\t.set noreorder\n\t"
2624 ".set reorder\n\t" : : "r" (addr));
2625 __asm__ __volatile__("nop; nop; nop; nop;"); /* hazard... */
2630 restore_flags(flags);
2632 printk("Secondary cache sized at %dK linesize %d\n",
2633 (int) (addr >> 10), sc_lsize);
2638 static void __init setup_noscache_funcs(void)
2644 _clear_page = r4k_clear_page_d16;
2645 _copy_page = r4k_copy_page_d16;
2646 _flush_cache_all = r4k_flush_cache_all_d16i16;
2647 _flush_cache_mm = r4k_flush_cache_mm_d16i16;
2648 _flush_cache_range = r4k_flush_cache_range_d16i16;
2649 _flush_cache_page = r4k_flush_cache_page_d16i16;
2650 _flush_page_to_ram = r4k_flush_page_to_ram_d16i16;
2653 prid = read_32bit_cp0_register(CP0_PRID) & 0xfff0;
2654 if (prid == 0x2010) { /* R4600 V1.7 */
2655 _clear_page = r4k_clear_page_r4600_v1;
2656 _copy_page = r4k_copy_page_r4600_v1;
2657 } else if (prid == 0x2020) { /* R4600 V2.0 */
2658 _clear_page = r4k_clear_page_r4600_v2;
2659 _copy_page = r4k_copy_page_r4600_v2;
2661 _clear_page = r4k_clear_page_d32;
2662 _copy_page = r4k_copy_page_d32;
2664 _flush_cache_all = r4k_flush_cache_all_d32i32;
2665 _flush_cache_mm = r4k_flush_cache_mm_d32i32;
2666 _flush_cache_range = r4k_flush_cache_range_d32i32;
2667 _flush_cache_page = r4k_flush_cache_page_d32i32;
2668 _flush_page_to_ram = r4k_flush_page_to_ram_d32i32;
2671 _dma_cache_wback_inv = r4k_dma_cache_wback_inv_pc;
2672 _dma_cache_wback = r4k_dma_cache_wback;
2673 _dma_cache_inv = r4k_dma_cache_inv_pc;
2676 static void __init setup_scache_funcs(void)
2682 _flush_cache_all = r4k_flush_cache_all_s16d16i16;
2683 _flush_cache_mm = r4k_flush_cache_mm_s16d16i16;
2684 _flush_cache_range = r4k_flush_cache_range_s16d16i16;
2685 _flush_cache_page = r4k_flush_cache_page_s16d16i16;
2686 _flush_page_to_ram = r4k_flush_page_to_ram_s16d16i16;
2689 panic("Invalid cache configuration detected");
2691 _clear_page = r4k_clear_page_s16;
2692 _copy_page = r4k_copy_page_s16;
2697 _flush_cache_all = r4k_flush_cache_all_s32d16i16;
2698 _flush_cache_mm = r4k_flush_cache_mm_s32d16i16;
2699 _flush_cache_range = r4k_flush_cache_range_s32d16i16;
2700 _flush_cache_page = r4k_flush_cache_page_s32d16i16;
2701 _flush_page_to_ram = r4k_flush_page_to_ram_s32d16i16;
2704 _flush_cache_all = r4k_flush_cache_all_s32d32i32;
2705 _flush_cache_mm = r4k_flush_cache_mm_s32d32i32;
2706 _flush_cache_range = r4k_flush_cache_range_s32d32i32;
2707 _flush_cache_page = r4k_flush_cache_page_s32d32i32;
2708 _flush_page_to_ram = r4k_flush_page_to_ram_s32d32i32;
2711 _clear_page = r4k_clear_page_s32;
2712 _copy_page = r4k_copy_page_s32;
2717 _flush_cache_all = r4k_flush_cache_all_s64d16i16;
2718 _flush_cache_mm = r4k_flush_cache_mm_s64d16i16;
2719 _flush_cache_range = r4k_flush_cache_range_s64d16i16;
2720 _flush_cache_page = r4k_flush_cache_page_s64d16i16;
2721 _flush_page_to_ram = r4k_flush_page_to_ram_s64d16i16;
2724 _flush_cache_all = r4k_flush_cache_all_s64d32i32;
2725 _flush_cache_mm = r4k_flush_cache_mm_s64d32i32;
2726 _flush_cache_range = r4k_flush_cache_range_s64d32i32;
2727 _flush_cache_page = r4k_flush_cache_page_s64d32i32;
2728 _flush_page_to_ram = r4k_flush_page_to_ram_s64d32i32;
2731 _clear_page = r4k_clear_page_s64;
2732 _copy_page = r4k_copy_page_s64;
2737 _flush_cache_all = r4k_flush_cache_all_s128d16i16;
2738 _flush_cache_mm = r4k_flush_cache_mm_s128d16i16;
2739 _flush_cache_range = r4k_flush_cache_range_s128d16i16;
2740 _flush_cache_page = r4k_flush_cache_page_s128d16i16;
2741 _flush_page_to_ram = r4k_flush_page_to_ram_s128d16i16;
2744 _flush_cache_all = r4k_flush_cache_all_s128d32i32;
2745 _flush_cache_mm = r4k_flush_cache_mm_s128d32i32;
2746 _flush_cache_range = r4k_flush_cache_range_s128d32i32;
2747 _flush_cache_page = r4k_flush_cache_page_s128d32i32;
2748 _flush_page_to_ram = r4k_flush_page_to_ram_s128d32i32;
2751 _clear_page = r4k_clear_page_s128;
2752 _copy_page = r4k_copy_page_s128;
2755 _dma_cache_wback_inv = r4k_dma_cache_wback_inv_sc;
2756 _dma_cache_wback = r4k_dma_cache_wback;
2757 _dma_cache_inv = r4k_dma_cache_inv_sc;
2760 typedef int (*probe_func_t)(unsigned long);
2762 static inline void __init setup_scache(unsigned int config)
2764 probe_func_t probe_scache_kseg1;
2767 /* Maybe the cpu knows about a l2 cache? */
2768 probe_scache_kseg1 = (probe_func_t) (KSEG1ADDR(&probe_scache));
2769 sc_present = probe_scache_kseg1(config);
2772 setup_scache_funcs();
2776 setup_noscache_funcs();
2779 void __init ld_mmu_r4xx0(void)
2781 unsigned long config = read_32bit_cp0_register(CP0_CONFIG);
2783 printk("CPU revision is: %08x\n", read_32bit_cp0_register(CP0_PRID));
2785 set_cp0_config(CONF_CM_CMASK, CONF_CM_CACHABLE_NONCOHERENT);
2787 probe_icache(config);
2788 probe_dcache(config);
2789 setup_scache(config);
2791 switch(mips_cputype) {
2792 case CPU_R4600: /* QED style two way caches? */
2796 _flush_cache_page = r4k_flush_cache_page_d32i32_r4600;
2799 _flush_cache_sigtramp = r4k_flush_cache_sigtramp;
2800 if ((read_32bit_cp0_register(CP0_PRID) & 0xfff0) == 0x2020) {
2801 _flush_cache_sigtramp = r4600v20k_flush_cache_sigtramp;
2805 write_32bit_cp0_register(CP0_WIRED, 0);
2808 * You should never change this register:
2809 * - On R4600 1.7 the tlbp never hits for pages smaller than
2810 * the value in the c0_pagemask register.
2811 * - The entire mm handling assumes the c0_pagemask register to
2812 * be set for 4kb pages.
2814 write_32bit_cp0_register(CP0_PAGEMASK, PM_4K);