1 /* $Id: spitfire.h,v 1.11 2001/03/03 10:34:45 davem Exp $
2 * spitfire.h: SpitFire/BlackBird/Cheetah inline MMU operations.
4 * Copyright (C) 1996 David S. Miller (davem@caip.rutgers.edu)
7 #ifndef _SPARC64_SPITFIRE_H
8 #define _SPARC64_SPITFIRE_H
12 /* The following register addresses are accessible via ASI_DMMU
13 * and ASI_IMMU, that is there is a distinct and unique copy of
14 * each these registers for each TLB.
16 #define TSB_TAG_TARGET 0x0000000000000000
17 #define TLB_SFSR 0x0000000000000018
18 #define TSB_REG 0x0000000000000028
19 #define TLB_TAG_ACCESS 0x0000000000000030
21 /* These registers only exist as one entity, and are accessed
24 #define PRIMARY_CONTEXT 0x0000000000000008
25 #define SECONDARY_CONTEXT 0x0000000000000010
26 #define DMMU_SFAR 0x0000000000000020
27 #define VIRT_WATCHPOINT 0x0000000000000038
28 #define PHYS_WATCHPOINT 0x0000000000000040
32 enum ultra_tlb_layout {
37 extern enum ultra_tlb_layout tlb_type;
39 #define SPITFIRE_HIGHEST_LOCKED_TLBENT (64 - 1)
40 #define CHEETAH_HIGHEST_LOCKED_TLBENT (16 - 1)
42 #define sparc64_highest_locked_tlbent() \
43 (tlb_type == spitfire ? \
44 SPITFIRE_HIGHEST_LOCKED_TLBENT : \
45 CHEETAH_HIGHEST_LOCKED_TLBENT)
47 extern __inline__ unsigned long spitfire_get_isfsr(void)
51 __asm__ __volatile__("ldxa [%1] %2, %0"
53 : "r" (TLB_SFSR), "i" (ASI_IMMU));
57 extern __inline__ unsigned long spitfire_get_dsfsr(void)
61 __asm__ __volatile__("ldxa [%1] %2, %0"
63 : "r" (TLB_SFSR), "i" (ASI_DMMU));
67 extern __inline__ unsigned long spitfire_get_sfar(void)
71 __asm__ __volatile__("ldxa [%1] %2, %0"
73 : "r" (DMMU_SFAR), "i" (ASI_DMMU));
77 extern __inline__ void spitfire_put_isfsr(unsigned long sfsr)
79 __asm__ __volatile__("stxa %0, [%1] %2" :
80 : "r" (sfsr), "r" (TLB_SFSR), "i" (ASI_IMMU));
83 extern __inline__ void spitfire_put_dsfsr(unsigned long sfsr)
85 __asm__ __volatile__("stxa %0, [%1] %2" :
86 : "r" (sfsr), "r" (TLB_SFSR), "i" (ASI_DMMU));
89 extern __inline__ unsigned long spitfire_get_primary_context(void)
93 __asm__ __volatile__("ldxa [%1] %2, %0"
95 : "r" (PRIMARY_CONTEXT), "i" (ASI_DMMU));
99 extern __inline__ void spitfire_set_primary_context(unsigned long ctx)
101 __asm__ __volatile__("stxa %0, [%1] %2"
104 "r" (PRIMARY_CONTEXT), "i" (ASI_DMMU));
108 extern __inline__ unsigned long spitfire_get_secondary_context(void)
112 __asm__ __volatile__("ldxa [%1] %2, %0"
114 : "r" (SECONDARY_CONTEXT), "i" (ASI_DMMU));
118 extern __inline__ void spitfire_set_secondary_context(unsigned long ctx)
120 __asm__ __volatile__("stxa %0, [%1] %2"
123 "r" (SECONDARY_CONTEXT), "i" (ASI_DMMU));
127 /* The data cache is write through, so this just invalidates the
130 extern __inline__ void spitfire_put_dcache_tag(unsigned long addr, unsigned long tag)
132 __asm__ __volatile__("stxa %0, [%1] %2"
134 : "r" (tag), "r" (addr), "i" (ASI_DCACHE_TAG));
138 /* The instruction cache lines are flushed with this, but note that
139 * this does not flush the pipeline. It is possible for a line to
140 * get flushed but stale instructions to still be in the pipeline,
141 * a flush instruction (to any address) is sufficient to handle
142 * this issue after the line is invalidated.
144 extern __inline__ void spitfire_put_icache_tag(unsigned long addr, unsigned long tag)
146 __asm__ __volatile__("stxa %0, [%1] %2"
148 : "r" (tag), "r" (addr), "i" (ASI_IC_TAG));
151 extern __inline__ unsigned long spitfire_get_dtlb_data(int entry)
155 __asm__ __volatile__("ldxa [%1] %2, %0"
157 : "r" (entry << 3), "i" (ASI_DTLB_DATA_ACCESS));
159 /* Clear TTE diag bits. */
160 data &= ~0x0003fe0000000000UL;
165 extern __inline__ unsigned long spitfire_get_dtlb_tag(int entry)
169 __asm__ __volatile__("ldxa [%1] %2, %0"
171 : "r" (entry << 3), "i" (ASI_DTLB_TAG_READ));
175 extern __inline__ void spitfire_put_dtlb_data(int entry, unsigned long data)
177 __asm__ __volatile__("stxa %0, [%1] %2"
179 : "r" (data), "r" (entry << 3),
180 "i" (ASI_DTLB_DATA_ACCESS));
183 extern __inline__ unsigned long spitfire_get_itlb_data(int entry)
187 __asm__ __volatile__("ldxa [%1] %2, %0"
189 : "r" (entry << 3), "i" (ASI_ITLB_DATA_ACCESS));
191 /* Clear TTE diag bits. */
192 data &= ~0x0003fe0000000000UL;
197 extern __inline__ unsigned long spitfire_get_itlb_tag(int entry)
201 __asm__ __volatile__("ldxa [%1] %2, %0"
203 : "r" (entry << 3), "i" (ASI_ITLB_TAG_READ));
207 extern __inline__ void spitfire_put_itlb_data(int entry, unsigned long data)
209 __asm__ __volatile__("stxa %0, [%1] %2"
211 : "r" (data), "r" (entry << 3),
212 "i" (ASI_ITLB_DATA_ACCESS));
215 /* Spitfire hardware assisted TLB flushes. */
217 /* Context level flushes. */
218 extern __inline__ void spitfire_flush_dtlb_primary_context(void)
220 __asm__ __volatile__("stxa %%g0, [%0] %1"
222 : "r" (0x40), "i" (ASI_DMMU_DEMAP));
225 extern __inline__ void spitfire_flush_itlb_primary_context(void)
227 __asm__ __volatile__("stxa %%g0, [%0] %1"
229 : "r" (0x40), "i" (ASI_IMMU_DEMAP));
232 extern __inline__ void spitfire_flush_dtlb_secondary_context(void)
234 __asm__ __volatile__("stxa %%g0, [%0] %1"
236 : "r" (0x50), "i" (ASI_DMMU_DEMAP));
239 extern __inline__ void spitfire_flush_itlb_secondary_context(void)
241 __asm__ __volatile__("stxa %%g0, [%0] %1"
243 : "r" (0x50), "i" (ASI_IMMU_DEMAP));
246 extern __inline__ void spitfire_flush_dtlb_nucleus_context(void)
248 __asm__ __volatile__("stxa %%g0, [%0] %1"
250 : "r" (0x60), "i" (ASI_DMMU_DEMAP));
253 extern __inline__ void spitfire_flush_itlb_nucleus_context(void)
255 __asm__ __volatile__("stxa %%g0, [%0] %1"
257 : "r" (0x60), "i" (ASI_IMMU_DEMAP));
260 /* Page level flushes. */
261 extern __inline__ void spitfire_flush_dtlb_primary_page(unsigned long page)
263 __asm__ __volatile__("stxa %%g0, [%0] %1"
265 : "r" (page), "i" (ASI_DMMU_DEMAP));
268 extern __inline__ void spitfire_flush_itlb_primary_page(unsigned long page)
270 __asm__ __volatile__("stxa %%g0, [%0] %1"
272 : "r" (page), "i" (ASI_IMMU_DEMAP));
275 extern __inline__ void spitfire_flush_dtlb_secondary_page(unsigned long page)
277 __asm__ __volatile__("stxa %%g0, [%0] %1"
279 : "r" (page | 0x10), "i" (ASI_DMMU_DEMAP));
282 extern __inline__ void spitfire_flush_itlb_secondary_page(unsigned long page)
284 __asm__ __volatile__("stxa %%g0, [%0] %1"
286 : "r" (page | 0x10), "i" (ASI_IMMU_DEMAP));
289 extern __inline__ void spitfire_flush_dtlb_nucleus_page(unsigned long page)
291 __asm__ __volatile__("stxa %%g0, [%0] %1"
293 : "r" (page | 0x20), "i" (ASI_DMMU_DEMAP));
296 extern __inline__ void spitfire_flush_itlb_nucleus_page(unsigned long page)
298 __asm__ __volatile__("stxa %%g0, [%0] %1"
300 : "r" (page | 0x20), "i" (ASI_IMMU_DEMAP));
303 /* Cheetah has "all non-locked" tlb flushes. */
304 extern __inline__ void cheetah_flush_dtlb_all(void)
306 __asm__ __volatile__("stxa %%g0, [%0] %1"
308 : "r" (0x80), "i" (ASI_DMMU_DEMAP));
311 extern __inline__ void cheetah_flush_itlb_all(void)
313 __asm__ __volatile__("stxa %%g0, [%0] %1"
315 : "r" (0x80), "i" (ASI_IMMU_DEMAP));
318 /* Cheetah has a 4-tlb layout so direct access is a bit different.
319 * The first two TLBs are fully assosciative, hold 16 entries, and are
320 * used only for locked and >8K sized translations. One exists for
321 * data accesses and one for instruction accesses.
323 * The third TLB is for data accesses to 8K non-locked translations, is
324 * 2 way assosciative, and holds 512 entries. The fourth TLB is for
325 * instruction accesses to 8K non-locked translations, is 2 way
326 * assosciative, and holds 128 entries.
328 extern __inline__ unsigned long cheetah_get_ldtlb_data(int entry)
332 __asm__ __volatile__("ldxa [%1] %2, %0"
334 : "r" ((0 << 16) | (entry << 3)),
335 "i" (ASI_DTLB_DATA_ACCESS));
340 extern __inline__ unsigned long cheetah_get_litlb_data(int entry)
344 __asm__ __volatile__("ldxa [%1] %2, %0"
346 : "r" ((0 << 16) | (entry << 3)),
347 "i" (ASI_ITLB_DATA_ACCESS));
352 extern __inline__ unsigned long cheetah_get_ldtlb_tag(int entry)
356 __asm__ __volatile__("ldxa [%1] %2, %0"
358 : "r" ((0 << 16) | (entry << 3)),
359 "i" (ASI_DTLB_TAG_READ));
364 extern __inline__ unsigned long cheetah_get_litlb_tag(int entry)
368 __asm__ __volatile__("ldxa [%1] %2, %0"
370 : "r" ((0 << 16) | (entry << 3)),
371 "i" (ASI_ITLB_TAG_READ));
376 extern __inline__ void cheetah_put_ldtlb_data(int entry, unsigned long data)
378 __asm__ __volatile__("stxa %0, [%1] %2"
381 "r" ((0 << 16) | (entry << 3)),
382 "i" (ASI_DTLB_DATA_ACCESS));
385 extern __inline__ void cheetah_put_litlb_data(int entry, unsigned long data)
387 __asm__ __volatile__("stxa %0, [%1] %2"
390 "r" ((0 << 16) | (entry << 3)),
391 "i" (ASI_ITLB_DATA_ACCESS));
394 extern __inline__ unsigned long cheetah_get_dtlb_data(int entry)
398 __asm__ __volatile__("ldxa [%1] %2, %0"
400 : "r" ((2 << 16) | (entry << 3)), "i" (ASI_DTLB_DATA_ACCESS));
405 extern __inline__ unsigned long cheetah_get_dtlb_tag(int entry)
409 __asm__ __volatile__("ldxa [%1] %2, %0"
411 : "r" ((2 << 16) | (entry << 3)), "i" (ASI_DTLB_TAG_READ));
415 extern __inline__ void cheetah_put_dtlb_data(int entry, unsigned long data)
417 __asm__ __volatile__("stxa %0, [%1] %2"
420 "r" ((2 << 16) | (entry << 3)),
421 "i" (ASI_DTLB_DATA_ACCESS));
424 extern __inline__ unsigned long cheetah_get_itlb_data(int entry)
428 __asm__ __volatile__("ldxa [%1] %2, %0"
430 : "r" ((2 << 16) | (entry << 3)),
431 "i" (ASI_ITLB_DATA_ACCESS));
436 extern __inline__ unsigned long cheetah_get_itlb_tag(int entry)
440 __asm__ __volatile__("ldxa [%1] %2, %0"
442 : "r" ((2 << 16) | (entry << 3)), "i" (ASI_ITLB_TAG_READ));
446 extern __inline__ void cheetah_put_itlb_data(int entry, unsigned long data)
448 __asm__ __volatile__("stxa %0, [%1] %2"
450 : "r" (data), "r" ((2 << 16) | (entry << 3)),
451 "i" (ASI_ITLB_DATA_ACCESS));
454 #endif /* !(__ASSEMBLY__) */
456 #endif /* !(_SPARC64_SPITFIRE_H) */