1 #ifndef _ASM_X86_MMU_CONTEXT_H
2 #define _ASM_X86_MMU_CONTEXT_H
5 #include <linux/atomic.h>
6 #include <asm/pgalloc.h>
7 #include <asm/tlbflush.h>
9 void arch_exit_mmap(struct mm_struct *mm);
10 void arch_dup_mmap(struct mm_struct *oldmm, struct mm_struct *mm);
12 void mm_pin(struct mm_struct *mm);
13 void mm_unpin(struct mm_struct *mm);
14 void mm_pin_all(void);
16 static inline void xen_activate_mm(struct mm_struct *prev,
17 struct mm_struct *next)
19 if (!PagePinned(virt_to_page(next->pgd)))
24 * Used for LDT copy/destruction.
26 int init_new_context(struct task_struct *tsk, struct mm_struct *mm);
27 void destroy_context(struct mm_struct *mm);
30 static inline void enter_lazy_tlb(struct mm_struct *mm, struct task_struct *tsk)
32 #if defined(CONFIG_SMP) && !defined(CONFIG_XEN) /* XEN: no lazy tlb */
33 if (percpu_read(cpu_tlbstate.state) == TLBSTATE_OK)
34 percpu_write(cpu_tlbstate.state, TLBSTATE_LAZY);
38 #define prepare_arch_switch(next) __prepare_arch_switch()
40 static inline void __prepare_arch_switch(void)
44 * Save away %gs. No need to save %fs, as it was saved on the
45 * stack on entry. No need to save %es and %ds, as those are
46 * always kernel segments while inside the kernel.
48 lazy_save_gs(current->thread.gs);
49 lazy_load_gs(__KERNEL_STACK_CANARY);
52 * Save away %es, %ds, %fs and %gs. Must happen before reload
53 * of cr3/ldt (i.e., not in __switch_to).
55 __asm__ __volatile__ (
56 "mov %%es,%0 ; mov %%ds,%1 ; mov %%fs,%2 ; mov %%gs,%3"
57 : "=m" (current->thread.es),
58 "=m" (current->thread.ds),
59 "=m" (current->thread.fsindex),
60 "=m" (current->thread.gsindex) );
62 if (current->thread.ds)
63 __asm__ __volatile__ ( "movl %0,%%ds" : : "r" (0) );
65 if (current->thread.es)
66 __asm__ __volatile__ ( "movl %0,%%es" : : "r" (0) );
68 if (current->thread.fsindex) {
69 __asm__ __volatile__ ( "movl %0,%%fs" : : "r" (0) );
70 current->thread.fs = 0;
73 if (current->thread.gsindex) {
75 current->thread.gs = 0;
80 static inline void switch_mm(struct mm_struct *prev, struct mm_struct *next,
81 struct task_struct *tsk)
83 unsigned cpu = smp_processor_id();
84 struct mmuext_op _op[2 + (sizeof(long) > 4)], *op = _op;
89 if (likely(prev != next)) {
90 BUG_ON(!xen_feature(XENFEAT_writable_page_tables) &&
91 !PagePinned(virt_to_page(next->pgd)));
93 #if defined(CONFIG_SMP) && !defined(CONFIG_XEN) /* XEN: no lazy tlb */
94 percpu_write(cpu_tlbstate.state, TLBSTATE_OK);
95 percpu_write(cpu_tlbstate.active_mm, next);
97 cpumask_set_cpu(cpu, mm_cpumask(next));
99 /* Re-load page tables: load_cr3(next->pgd) */
100 op->cmd = MMUEXT_NEW_BASEPTR;
101 op->arg1.mfn = virt_to_mfn(next->pgd);
104 /* xen_new_user_pt(next->pgd) */
106 op->cmd = MMUEXT_NEW_USER_BASEPTR;
107 upgd = __user_pgd(next->pgd);
108 op->arg1.mfn = likely(upgd) ? virt_to_mfn(upgd) : 0;
113 * load the LDT, if the LDT is different:
115 if (unlikely(prev->context.ldt != next->context.ldt)) {
116 /* load_LDT_nolock(&next->context) */
117 op->cmd = MMUEXT_SET_LDT;
118 op->arg1.linear_addr = (unsigned long)next->context.ldt;
119 op->arg2.nr_ents = next->context.size;
123 BUG_ON(HYPERVISOR_mmuext_op(_op, op-_op, NULL, DOMID_SELF));
125 /* stop TLB flushes for the previous mm */
126 cpumask_clear_cpu(cpu, mm_cpumask(prev));
128 #if defined(CONFIG_SMP) && !defined(CONFIG_XEN) /* XEN: no lazy tlb */
130 percpu_write(cpu_tlbstate.state, TLBSTATE_OK);
131 BUG_ON(percpu_read(cpu_tlbstate.active_mm) != next);
133 if (!cpumask_test_and_set_cpu(cpu, mm_cpumask(next))) {
134 /* We were in lazy tlb mode and leave_mm disabled
135 * tlb flush IPI delivery. We must reload CR3
136 * to make sure to use no freed page tables.
139 xen_new_user_pt(next->pgd);
140 load_LDT_nolock(&next->context);
146 #define activate_mm(prev, next) \
148 xen_activate_mm(prev, next); \
149 switch_mm((prev), (next), NULL); \
153 #define deactivate_mm(tsk, mm) \
158 #define deactivate_mm(tsk, mm) \
161 loadsegment(fs, 0); \
165 #endif /* _ASM_X86_MMU_CONTEXT_H */