1 #ifndef __X86_64_MMU_CONTEXT_H
2 #define __X86_64_MMU_CONTEXT_H
5 #include <asm/atomic.h>
6 #include <asm/pgalloc.h>
9 #include <asm/pgtable.h>
10 #include <asm/tlbflush.h>
12 void arch_exit_mmap(struct mm_struct *mm);
13 void arch_dup_mmap(struct mm_struct *oldmm, struct mm_struct *mm);
16 * possibly do the LDT unload here?
18 int init_new_context(struct task_struct *tsk, struct mm_struct *mm);
19 void destroy_context(struct mm_struct *mm);
21 static inline void enter_lazy_tlb(struct mm_struct *mm, struct task_struct *tsk)
23 #if defined(CONFIG_SMP) && !defined(CONFIG_XEN)
24 if (read_pda(mmu_state) == TLBSTATE_OK)
25 write_pda(mmu_state, TLBSTATE_LAZY);
29 #define prepare_arch_switch(next) __prepare_arch_switch()
31 static inline void __prepare_arch_switch(void)
34 * Save away %es, %ds, %fs and %gs. Must happen before reload
35 * of cr3/ldt (i.e., not in __switch_to).
37 __asm__ __volatile__ (
38 "mov %%es,%0 ; mov %%ds,%1 ; mov %%fs,%2 ; mov %%gs,%3"
39 : "=m" (current->thread.es),
40 "=m" (current->thread.ds),
41 "=m" (current->thread.fsindex),
42 "=m" (current->thread.gsindex) );
44 if (current->thread.ds)
45 __asm__ __volatile__ ( "movl %0,%%ds" : : "r" (0) );
47 if (current->thread.es)
48 __asm__ __volatile__ ( "movl %0,%%es" : : "r" (0) );
50 if (current->thread.fsindex) {
51 __asm__ __volatile__ ( "movl %0,%%fs" : : "r" (0) );
52 current->thread.fs = 0;
55 if (current->thread.gsindex) {
57 current->thread.gs = 0;
61 extern void mm_pin(struct mm_struct *mm);
62 extern void mm_unpin(struct mm_struct *mm);
63 void mm_pin_all(void);
65 static inline void load_cr3(pgd_t *pgd)
67 asm volatile("movq %0,%%cr3" :: "r" (phys_to_machine(__pa(pgd))) :
71 static inline void switch_mm(struct mm_struct *prev, struct mm_struct *next,
72 struct task_struct *tsk)
74 unsigned cpu = smp_processor_id();
75 struct mmuext_op _op[3], *op = _op;
77 if (likely(prev != next)) {
78 BUG_ON(!xen_feature(XENFEAT_writable_page_tables) &&
79 !next->context.pinned);
81 /* stop flush ipis for the previous mm */
82 cpu_clear(cpu, prev->cpu_vm_mask);
83 #if defined(CONFIG_SMP) && !defined(CONFIG_XEN)
84 write_pda(mmu_state, TLBSTATE_OK);
85 write_pda(active_mm, next);
87 cpu_set(cpu, next->cpu_vm_mask);
89 /* load_cr3(next->pgd) */
90 op->cmd = MMUEXT_NEW_BASEPTR;
91 op->arg1.mfn = pfn_to_mfn(__pa(next->pgd) >> PAGE_SHIFT);
94 /* xen_new_user_pt(__pa(__user_pgd(next->pgd))) */
95 op->cmd = MMUEXT_NEW_USER_BASEPTR;
96 op->arg1.mfn = pfn_to_mfn(__pa(__user_pgd(next->pgd)) >> PAGE_SHIFT);
99 if (unlikely(next->context.ldt != prev->context.ldt)) {
100 /* load_LDT_nolock(&next->context, cpu) */
101 op->cmd = MMUEXT_SET_LDT;
102 op->arg1.linear_addr = (unsigned long)next->context.ldt;
103 op->arg2.nr_ents = next->context.size;
107 BUG_ON(HYPERVISOR_mmuext_op(_op, op-_op, NULL, DOMID_SELF));
109 #if defined(CONFIG_SMP) && !defined(CONFIG_XEN)
111 write_pda(mmu_state, TLBSTATE_OK);
112 if (read_pda(active_mm) != next)
114 if (!cpu_test_and_set(cpu, next->cpu_vm_mask)) {
115 /* We were in lazy tlb mode and leave_mm disabled
116 * tlb flush IPI delivery. We must reload CR3
117 * to make sure to use no freed page tables.
120 xen_new_user_pt(__pa(__user_pgd(next->pgd)));
121 load_LDT_nolock(&next->context, cpu);
127 #define deactivate_mm(tsk,mm) do { \
129 asm volatile("movl %0,%%fs"::"r"(0)); \
132 static inline void activate_mm(struct mm_struct *prev, struct mm_struct *next)
134 if (!next->context.pinned)
136 switch_mm(prev, next, NULL);