2 * Copyright (C) 1996 David S. Miller (dm@engr.sgi.com)
3 * Copyright (C) 1997, 2001 Ralf Baechle (ralf@gnu.org)
4 * Copyright (C) 2000, 2001 Broadcom Corporation
6 * This program is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU General Public License
8 * as published by the Free Software Foundation; either version 2
9 * of the License, or (at your option) any later version.
11 * This program is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 * GNU General Public License for more details.
16 * You should have received a copy of the GNU General Public License
17 * along with this program; if not, write to the Free Software
18 * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
20 #include <linux/config.h>
21 #include <linux/init.h>
22 #include <asm/mmu_context.h>
23 #include <asm/bootinfo.h>
24 #include <asm/cacheops.h>
26 #include <asm/uaccess.h>
28 #ifdef CONFIG_SIBYTE_DMA_PAGEOPS
29 extern void sb1_dma_init(void);
30 extern void sb1_clear_page_dma(void * page);
31 extern void sb1_copy_page_dma(void * to, void * from);
33 extern void sb1_clear_page(void * page);
34 extern void sb1_copy_page(void * to, void * from);
37 /* These are probed at ld_mmu time */
38 static unsigned long icache_size;
39 static unsigned long dcache_size;
41 static unsigned long icache_line_size;
42 static unsigned long dcache_line_size;
44 static unsigned int icache_index_mask;
45 static unsigned int dcache_index_mask;
47 static unsigned long icache_assoc;
48 static unsigned long dcache_assoc;
50 static unsigned int icache_sets;
51 static unsigned int dcache_sets;
53 static unsigned int icache_range_cutoff;
54 static unsigned int dcache_range_cutoff;
57 * The dcache is fully coherent to the system, with one
58 * big caveat: the instruction stream. In other words,
59 * if we miss in the icache, and have dirty data in the
60 * L1 dcache, then we'll go out to memory (or the L2) and
61 * get the not-as-recent data.
63 * So the only time we have to flush the dcache is when
64 * we're flushing the icache. Since the L2 is fully
65 * coherent to everything, including I/O, we never have
69 #define cache_set_op(op, addr) \
70 __asm__ __volatile__( \
71 " .set noreorder \n" \
72 " .set mips64\n\t \n" \
73 " cache %0, (0<<13)(%1) \n" \
74 " cache %0, (1<<13)(%1) \n" \
75 " cache %0, (2<<13)(%1) \n" \
76 " cache %0, (3<<13)(%1) \n" \
80 : "i" (op), "r" (addr))
84 " .set mips64\n\t \n" \
89 * Writeback and invalidate the entire dcache
91 static inline void __sb1_writeback_inv_dcache_all(void)
93 unsigned long addr = 0;
95 while (addr < dcache_line_size * dcache_sets) {
96 cache_set_op(Index_Writeback_Inv_D, addr);
97 addr += dcache_line_size;
102 * Writeback and invalidate a range of the dcache. The addresses are
103 * virtual, and since we're using index ops and bit 12 is part of both
104 * the virtual frame and physical index, we have to clear both sets
105 * (bit 12 set and cleared).
107 static inline void __sb1_writeback_inv_dcache_range(unsigned long start,
110 start &= ~(dcache_line_size - 1);
111 end = (end + dcache_line_size - 1) & ~(dcache_line_size - 1);
113 while (start != end) {
114 cache_set_op(Index_Writeback_Inv_D, start);
115 cache_set_op(Index_Writeback_Inv_D, start ^ (1<<12));
116 start += dcache_line_size;
121 * Writeback and invalidate a range of the dcache. With physical
122 * addresseses, we don't have to worry about possible bit 12 aliasing.
123 * XXXKW is it worth turning on KX and using hit ops with xkphys?
125 static inline void __sb1_writeback_inv_dcache_phys_range(unsigned long start,
128 start &= ~(dcache_line_size - 1);
129 end = (end + dcache_line_size - 1) & ~(dcache_line_size - 1);
131 while (start != end) {
132 cache_set_op(Index_Writeback_Inv_D, start & dcache_index_mask);
133 start += dcache_line_size;
140 * Invalidate the entire icache
142 static inline void __sb1_flush_icache_all(void)
144 unsigned long addr = 0;
146 while (addr < icache_line_size * icache_sets) {
147 cache_set_op(Index_Invalidate_I, addr);
148 addr += icache_line_size;
153 * Flush the icache for a given physical page. Need to writeback the
154 * dcache first, then invalidate the icache. If the page isn't
155 * executable, nothing is required.
157 static void local_sb1_flush_cache_page(struct vm_area_struct *vma,
160 int cpu = smp_processor_id();
163 if (!(vma->vm_flags & VM_EXEC))
167 __sb1_writeback_inv_dcache_range(addr, addr + PAGE_SIZE);
170 * Bumping the ASID is probably cheaper than the flush ...
172 if (cpu_context(cpu, vma->vm_mm) != 0)
173 drop_mmu_context(vma->vm_mm, cpu);
177 struct flush_cache_page_args {
178 struct vm_area_struct *vma;
182 static void sb1_flush_cache_page_ipi(void *info)
184 struct flush_cache_page_args *args = info;
186 local_sb1_flush_cache_page(args->vma, args->addr);
189 /* Dirty dcache could be on another CPU, so do the IPIs */
190 static void sb1_flush_cache_page(struct vm_area_struct *vma, unsigned long addr)
192 struct flush_cache_page_args args;
194 if (!(vma->vm_flags & VM_EXEC))
199 on_each_cpu(sb1_flush_cache_page_ipi, (void *) &args, 1, 1);
202 void sb1_flush_cache_page(struct vm_area_struct *vma, unsigned long addr);
203 asm("sb1_flush_cache_page = local_sb1_flush_cache_page");
207 * Invalidate a range of the icache. The addresses are virtual, and
208 * the cache is virtually indexed and tagged. However, we don't
209 * necessarily have the right ASID context, so use index ops instead
212 static inline void __sb1_flush_icache_range(unsigned long start,
215 start &= ~(icache_line_size - 1);
216 end = (end + icache_line_size - 1) & ~(icache_line_size - 1);
218 while (start != end) {
219 cache_set_op(Index_Invalidate_I, start & icache_index_mask);
220 start += icache_line_size;
223 __asm__ __volatile__(
224 " bnezl $0, 1f \n" /* Force mispredict */
232 * Invalidate all caches on this CPU
234 static void local_sb1___flush_cache_all(void)
236 __sb1_writeback_inv_dcache_all();
237 __sb1_flush_icache_all();
241 extern void sb1___flush_cache_all_ipi(void *ignored);
242 asm("sb1___flush_cache_all_ipi = local_sb1___flush_cache_all");
244 static void sb1___flush_cache_all(void)
246 on_each_cpu(sb1___flush_cache_all_ipi, 0, 1, 1);
249 extern void sb1___flush_cache_all(void);
250 asm("sb1___flush_cache_all = local_sb1___flush_cache_all");
254 * When flushing a range in the icache, we have to first writeback
255 * the dcache for the same range, so new ifetches will see any
256 * data that was dirty in the dcache.
258 * The start/end arguments are Kseg addresses (possibly mapped Kseg).
261 static void local_sb1_flush_icache_range(unsigned long start,
264 /* Just wb-inv the whole dcache if the range is big enough */
265 if ((end - start) > dcache_range_cutoff)
266 __sb1_writeback_inv_dcache_all();
268 __sb1_writeback_inv_dcache_range(start, end);
270 /* Just flush the whole icache if the range is big enough */
271 if ((end - start) > icache_range_cutoff)
272 __sb1_flush_icache_all();
274 __sb1_flush_icache_range(start, end);
278 struct flush_icache_range_args {
283 static void sb1_flush_icache_range_ipi(void *info)
285 struct flush_icache_range_args *args = info;
287 local_sb1_flush_icache_range(args->start, args->end);
290 void sb1_flush_icache_range(unsigned long start, unsigned long end)
292 struct flush_icache_range_args args;
296 on_each_cpu(sb1_flush_icache_range_ipi, &args, 1, 1);
299 void sb1_flush_icache_range(unsigned long start, unsigned long end);
300 asm("sb1_flush_icache_range = local_sb1_flush_icache_range");
304 * Flush the icache for a given physical page. Need to writeback the
305 * dcache first, then invalidate the icache. If the page isn't
306 * executable, nothing is required.
308 static void local_sb1_flush_icache_page(struct vm_area_struct *vma,
312 int cpu = smp_processor_id();
315 if (!(vma->vm_flags & VM_EXEC))
319 /* Need to writeback any dirty data for that page, we have the PA */
320 start = (unsigned long)(page-mem_map) << PAGE_SHIFT;
321 __sb1_writeback_inv_dcache_phys_range(start, start + PAGE_SIZE);
323 * If there's a context, bump the ASID (cheaper than a flush,
324 * since we don't know VAs!)
326 if (cpu_context(cpu, vma->vm_mm) != 0) {
327 drop_mmu_context(vma->vm_mm, cpu);
332 struct flush_icache_page_args {
333 struct vm_area_struct *vma;
337 static void sb1_flush_icache_page_ipi(void *info)
339 struct flush_icache_page_args *args = info;
340 local_sb1_flush_icache_page(args->vma, args->page);
343 /* Dirty dcache could be on another CPU, so do the IPIs */
344 static void sb1_flush_icache_page(struct vm_area_struct *vma,
347 struct flush_icache_page_args args;
349 if (!(vma->vm_flags & VM_EXEC))
353 on_each_cpu(sb1_flush_icache_page_ipi, (void *) &args, 1, 1);
356 void sb1_flush_icache_page(struct vm_area_struct *vma, struct page *page);
357 asm("sb1_flush_icache_page = local_sb1_flush_icache_page");
361 * A signal trampoline must fit into a single cacheline.
363 static void local_sb1_flush_cache_sigtramp(unsigned long addr)
365 __asm__ __volatile__ (
370 " cache %2, (0<<13)(%0) \n" /* Index-inval this address */
371 " cache %2, (1<<13)(%0) \n" /* Index-inval this address */
372 " cache %2, (2<<13)(%0) \n" /* Index-inval this address */
373 " cache %2, (3<<13)(%0) \n" /* Index-inval this address */
374 " xori $1, %0, 1<<12 \n" /* Flip index bit 12 */
375 " cache %2, (0<<13)($1) \n" /* Index-inval this address */
376 " cache %2, (1<<13)($1) \n" /* Index-inval this address */
377 " cache %2, (2<<13)($1) \n" /* Index-inval this address */
378 " cache %2, (3<<13)($1) \n" /* Index-inval this address */
379 " cache %3, (0<<13)(%1) \n" /* Index-inval this address */
380 " cache %3, (1<<13)(%1) \n" /* Index-inval this address */
381 " cache %3, (2<<13)(%1) \n" /* Index-inval this address */
382 " cache %3, (3<<13)(%1) \n" /* Index-inval this address */
383 " bnezl $0, 1f \n" /* Force mispredict */
388 : "r" (addr & dcache_index_mask), "r" (addr & icache_index_mask),
389 "i" (Index_Writeback_Inv_D), "i" (Index_Invalidate_I));
393 static void sb1_flush_cache_sigtramp_ipi(void *info)
395 unsigned long iaddr = (unsigned long) info;
396 local_sb1_flush_cache_sigtramp(iaddr);
399 static void sb1_flush_cache_sigtramp(unsigned long addr)
401 on_each_cpu(sb1_flush_cache_sigtramp_ipi, (void *) addr, 1, 1);
404 void sb1_flush_cache_sigtramp(unsigned long addr);
405 asm("sb1_flush_cache_sigtramp = local_sb1_flush_cache_sigtramp");
410 * Anything that just flushes dcache state can be ignored, as we're always
411 * coherent in dcache space. This is just a dummy function that all the
412 * nop'ed routines point to
414 static void sb1_nop(void)
419 * Cache set values (from the mips64 spec)
430 static unsigned int decode_cache_sets(unsigned int config_field)
432 if (config_field == 7) {
433 /* JDCXXX - Find a graceful way to abort. */
436 return (1<<(config_field + 6));
440 * Cache line size values (from the mips64 spec)
441 * 0 - No cache present.
451 static unsigned int decode_cache_line_size(unsigned int config_field)
453 if (config_field == 0) {
455 } else if (config_field == 7) {
456 /* JDCXXX - Find a graceful way to abort. */
459 return (1<<(config_field + 1));
463 * Relevant bits of the config1 register format (from the MIPS32/MIPS64 specs)
465 * 24:22 Icache sets per way
466 * 21:19 Icache line size
467 * 18:16 Icache Associativity
468 * 15:13 Dcache sets per way
469 * 12:10 Dcache line size
470 * 9:7 Dcache Associativity
473 static __init void probe_cache_sizes(void)
477 config1 = read_c0_config1();
478 icache_line_size = decode_cache_line_size((config1 >> 19) & 0x7);
479 dcache_line_size = decode_cache_line_size((config1 >> 10) & 0x7);
480 icache_sets = decode_cache_sets((config1 >> 22) & 0x7);
481 dcache_sets = decode_cache_sets((config1 >> 13) & 0x7);
482 icache_assoc = ((config1 >> 16) & 0x7) + 1;
483 dcache_assoc = ((config1 >> 7) & 0x7) + 1;
484 icache_size = icache_line_size * icache_sets * icache_assoc;
485 dcache_size = dcache_line_size * dcache_sets * dcache_assoc;
486 /* Need to remove non-index bits for index ops */
487 icache_index_mask = (icache_sets - 1) * icache_line_size;
488 dcache_index_mask = (dcache_sets - 1) * dcache_line_size;
490 * These are for choosing range (index ops) versus all.
491 * icache flushes all ways for each set, so drop icache_assoc.
492 * dcache flushes all ways and each setting of bit 12 for each
493 * index, so drop dcache_assoc and halve the dcache_sets.
495 icache_range_cutoff = icache_sets * icache_line_size;
496 dcache_range_cutoff = (dcache_sets / 2) * icache_line_size;
500 * This is called from loadmmu.c. We have to set up all the
501 * memory management function pointers, as well as initialize
502 * the caches and tlbs
504 void ld_mmu_sb1(void)
506 extern char except_vec2_sb1;
508 /* Special cache error handler for SB1 */
509 memcpy((void *)(KSEG0 + 0x100), &except_vec2_sb1, 0x80);
510 memcpy((void *)(KSEG1 + 0x100), &except_vec2_sb1, 0x80);
514 #ifdef CONFIG_SIBYTE_DMA_PAGEOPS
515 _clear_page = sb1_clear_page_dma;
516 _copy_page = sb1_copy_page_dma;
519 _clear_page = sb1_clear_page;
520 _copy_page = sb1_copy_page;
524 * None of these are needed for the SB1 - the Dcache is
525 * physically indexed and tagged, so no virtual aliasing can
528 flush_cache_range = (void *) sb1_nop;
529 flush_cache_page = sb1_flush_cache_page;
530 flush_cache_mm = (void (*)(struct mm_struct *))sb1_nop;
531 flush_cache_all = sb1_nop;
533 /* These routines are for Icache coherence with the Dcache */
534 flush_icache_range = sb1_flush_icache_range;
535 flush_icache_page = sb1_flush_icache_page;
536 flush_icache_all = __sb1_flush_icache_all; /* local only */
538 flush_cache_sigtramp = sb1_flush_cache_sigtramp;
539 flush_data_cache_page = (void *) sb1_nop;
542 __flush_cache_all = sb1___flush_cache_all;
544 change_c0_config(CONF_CM_CMASK, CONF_CM_DEFAULT);
547 * This is the only way to force the update of K0 to complete
548 * before subsequent instruction fetch.
550 write_c0_epc(&&here);
552 __asm__ __volatile__(