2 * Switch a MMU context.
4 * This file is subject to the terms and conditions of the GNU General Public
5 * License. See the file "COPYING" in the main directory of this archive
8 * Copyright (C) 1996, 1997, 1998, 1999 by Ralf Baechle
9 * Copyright (C) 1999 Silicon Graphics, Inc.
11 #ifndef _ASM_MMU_CONTEXT_H
12 #define _ASM_MMU_CONTEXT_H
14 #include <linux/config.h>
15 #include <linux/errno.h>
16 #include <linux/sched.h>
17 #include <linux/slab.h>
18 #include <asm/cacheflush.h>
19 #include <asm/tlbflush.h>
22 * For the fast tlb miss handlers, we currently keep a per cpu array
23 * of pointers to the current pgd for each processor. Also, the proc.
24 * id is stuffed into the context register. This should be changed to
25 * use the processor id via current->processor, where current is stored
26 * in watchhi/lo. The context register should be used to contiguously
27 * map the page tables.
29 #define TLBMISS_HANDLER_SETUP_PGD(pgd) \
30 pgd_current[smp_processor_id()] = (unsigned long)(pgd)
31 #define TLBMISS_HANDLER_SETUP() \
32 write_c0_context(((long)(&pgd_current[smp_processor_id()])) << 23); \
33 TLBMISS_HANDLER_SETUP_PGD(swapper_pg_dir)
34 extern unsigned long pgd_current[];
37 #define ASID_MASK 0xff
39 #define cpu_context(cpu, mm) ((mm)->context[cpu])
40 #define cpu_asid(cpu, mm) (cpu_context((cpu), (mm)) & ASID_MASK)
41 #define asid_cache(cpu) (cpu_data[cpu].asid_cache)
43 static inline void enter_lazy_tlb(struct mm_struct *mm, struct task_struct *tsk)
48 * All unused by hardware upper bits will be considered
49 * as a software asid extension.
51 #define ASID_VERSION_MASK ((unsigned long)~(ASID_MASK|(ASID_MASK-1)))
52 #define ASID_FIRST_VERSION ((unsigned long)(~ASID_VERSION_MASK) + 1)
55 get_new_mmu_context(struct mm_struct *mm, unsigned long cpu)
57 unsigned long asid = asid_cache(cpu);
59 if (! ((asid += ASID_INC) & ASID_MASK) ) {
60 #ifdef CONFIG_VTAG_ICACHE
63 local_flush_tlb_all(); /* start new asid cycle */
64 if (!asid) /* fix version if needed */
65 asid = ASID_FIRST_VERSION;
67 cpu_context(cpu, mm) = asid_cache(cpu) = asid;
71 * Initialize the context related info for a new mm_struct
75 init_new_context(struct task_struct *tsk, struct mm_struct *mm)
79 for (i = 0; i < num_online_cpus(); i++)
80 cpu_context(i, mm) = 0;
85 static inline void switch_mm(struct mm_struct *prev, struct mm_struct *next,
86 struct task_struct *tsk, unsigned cpu)
90 local_irq_save(flags);
92 /* Check if our ASID is of an older version and thus invalid */
93 if ((cpu_context(cpu, next) ^ asid_cache(cpu)) & ASID_VERSION_MASK)
94 get_new_mmu_context(next, cpu);
96 write_c0_entryhi(cpu_context(cpu, next));
97 TLBMISS_HANDLER_SETUP_PGD(next->pgd);
100 * Mark current->active_mm as not "active" anymore.
101 * We don't want to mislead possible IPI tlb flush routines.
103 clear_bit(cpu, &prev->cpu_vm_mask);
104 set_bit(cpu, &next->cpu_vm_mask);
106 local_irq_restore(flags);
110 * Destroy context related info for an mm_struct that is about
113 static inline void destroy_context(struct mm_struct *mm)
117 #define deactivate_mm(tsk,mm) do { } while (0)
120 * After we have set current->mm to a new value, this activates
121 * the context for the new mm so we see the new mappings.
124 activate_mm(struct mm_struct *prev, struct mm_struct *next)
127 int cpu = smp_processor_id();
129 local_irq_save(flags);
131 /* Unconditionally get a new ASID. */
132 get_new_mmu_context(next, cpu);
134 write_c0_entryhi(cpu_context(cpu, next));
135 TLBMISS_HANDLER_SETUP_PGD(next->pgd);
137 /* mark mmu ownership change */
138 clear_bit(cpu, &prev->cpu_vm_mask);
139 set_bit(cpu, &next->cpu_vm_mask);
141 local_irq_restore(flags);
145 * If mm is currently active_mm, we can't really drop it. Instead,
146 * we will get a new one for it.
149 drop_mmu_context(struct mm_struct *mm, unsigned cpu)
153 local_irq_save(flags);
155 if (test_bit(cpu, &mm->cpu_vm_mask)) {
156 get_new_mmu_context(mm, cpu);
157 write_c0_entryhi(cpu_asid(cpu, mm));
159 /* will get a new context next time */
160 cpu_context(cpu, mm) = 0;
163 local_irq_restore(flags);
166 #endif /* _ASM_MMU_CONTEXT_H */