- patches.rt/0001-sched-count-of-queued-RT-tasks.patch: Delete.
[linux-flexiantxendom0-3.2.10.git] / include / asm-x86 / mach-xen / asm / mmu_context_32.h
1 #ifndef __I386_SCHED_H
2 #define __I386_SCHED_H
3
4 #include <asm/desc.h>
5 #include <asm/atomic.h>
6 #include <asm/pgalloc.h>
7 #include <asm/tlbflush.h>
8
9 void arch_exit_mmap(struct mm_struct *mm);
10 void arch_dup_mmap(struct mm_struct *oldmm, struct mm_struct *mm);
11
12 void mm_pin(struct mm_struct *mm);
13 void mm_unpin(struct mm_struct *mm);
14 void mm_pin_all(void);
15
16 static inline void xen_activate_mm(struct mm_struct *prev,
17                                    struct mm_struct *next)
18 {
19         if (!test_bit(PG_pinned, &virt_to_page(next->pgd)->flags))
20                 mm_pin(next);
21 }
22
23 /*
24  * Used for LDT copy/destruction.
25  */
26 int init_new_context(struct task_struct *tsk, struct mm_struct *mm);
27 void destroy_context(struct mm_struct *mm);
28
29
30 static inline void enter_lazy_tlb(struct mm_struct *mm, struct task_struct *tsk)
31 {
32 #if 0 /* XEN: no lazy tlb */
33         unsigned cpu = smp_processor_id();
34         if (per_cpu(cpu_tlbstate, cpu).state == TLBSTATE_OK)
35                 per_cpu(cpu_tlbstate, cpu).state = TLBSTATE_LAZY;
36 #endif
37 }
38
39 #define prepare_arch_switch(next)       __prepare_arch_switch()
40
41 static inline void __prepare_arch_switch(void)
42 {
43         /*
44          * Save away %gs. No need to save %fs, as it was saved on the
45          * stack on entry.  No need to save %es and %ds, as those are
46          * always kernel segments while inside the kernel.
47          */
48         asm volatile ( "mov %%gs,%0"
49                 : "=m" (current->thread.gs));
50         asm volatile ( "movl %0,%%gs"
51                 : : "r" (0) );
52 }
53
54 void leave_mm(unsigned long cpu);
55
56 static inline void switch_mm(struct mm_struct *prev,
57                              struct mm_struct *next,
58                              struct task_struct *tsk)
59 {
60         int cpu = smp_processor_id();
61         struct mmuext_op _op[2], *op = _op;
62
63         if (likely(prev != next)) {
64                 BUG_ON(!xen_feature(XENFEAT_writable_page_tables) &&
65                        !test_bit(PG_pinned, &virt_to_page(next->pgd)->flags));
66
67                 /* stop flush ipis for the previous mm */
68                 cpu_clear(cpu, prev->cpu_vm_mask);
69 #if 0 /* XEN: no lazy tlb */
70                 per_cpu(cpu_tlbstate, cpu).state = TLBSTATE_OK;
71                 per_cpu(cpu_tlbstate, cpu).active_mm = next;
72 #endif
73                 cpu_set(cpu, next->cpu_vm_mask);
74
75                 /* Re-load page tables: load_cr3(next->pgd) */
76                 op->cmd = MMUEXT_NEW_BASEPTR;
77                 op->arg1.mfn = pfn_to_mfn(__pa(next->pgd) >> PAGE_SHIFT);
78                 op++;
79
80                 /*
81                  * load the LDT, if the LDT is different:
82                  */
83                 if (unlikely(prev->context.ldt != next->context.ldt)) {
84                         /* load_LDT_nolock(&next->context, cpu) */
85                         op->cmd = MMUEXT_SET_LDT;
86                         op->arg1.linear_addr = (unsigned long)next->context.ldt;
87                         op->arg2.nr_ents     = next->context.size;
88                         op++;
89                 }
90
91                 BUG_ON(HYPERVISOR_mmuext_op(_op, op-_op, NULL, DOMID_SELF));
92         }
93 #if 0 /* XEN: no lazy tlb */
94         else {
95                 per_cpu(cpu_tlbstate, cpu).state = TLBSTATE_OK;
96                 BUG_ON(per_cpu(cpu_tlbstate, cpu).active_mm != next);
97
98                 if (!cpu_test_and_set(cpu, next->cpu_vm_mask)) {
99                         /* We were in lazy tlb mode and leave_mm disabled 
100                          * tlb flush IPI delivery. We must reload %cr3.
101                          */
102                         load_cr3(next->pgd);
103                         load_LDT_nolock(&next->context);
104                 }
105         }
106 #endif
107 }
108
109 #define deactivate_mm(tsk, mm)                  \
110         asm("movl %0,%%gs": :"r" (0));
111
112 #define activate_mm(prev, next)                         \
113         do {                                            \
114                 xen_activate_mm(prev, next);            \
115                 switch_mm((prev),(next),NULL);          \
116         } while(0)
117
118 #endif