Update to 3.4-final.
[linux-flexiantxendom0-3.2.10.git] / arch / x86 / include / mach-xen / asm / tlbflush.h
1 #ifndef _ASM_X86_TLBFLUSH_H
2 #define _ASM_X86_TLBFLUSH_H
3
4 #include <linux/mm.h>
5 #include <linux/sched.h>
6
7 #include <asm/processor.h>
8 #include <asm/special_insns.h>
9
10 #define __flush_tlb() xen_tlb_flush()
11 #define __flush_tlb_global() xen_tlb_flush()
12 #define __flush_tlb_single(addr) xen_invlpg(addr)
13 #define __flush_tlb_all() xen_tlb_flush()
14 #define __flush_tlb_one(addr) xen_invlpg(addr)
15
16 #ifdef CONFIG_X86_32
17 # define TLB_FLUSH_ALL  0xffffffff
18 #else
19 # define TLB_FLUSH_ALL  -1ULL
20 #endif
21
22 /*
23  * TLB flushing:
24  *
25  *  - flush_tlb() flushes the current mm struct TLBs
26  *  - flush_tlb_all() flushes all processes TLBs
27  *  - flush_tlb_mm(mm) flushes the specified mm context TLB's
28  *  - flush_tlb_page(vma, vmaddr) flushes one page
29  *  - flush_tlb_range(vma, start, end) flushes a range of pages
30  *  - flush_tlb_kernel_range(start, end) flushes a range of kernel pages
31  *
32  * ..but the i386 has somewhat limited tlb flushing capabilities,
33  * and page-granular flushes are available only on i486 and up.
34  *
35  * x86-64 can only flush individual pages or full VMs. For a range flush
36  * we always do the full VM. Might be worth trying if for a small
37  * range a few INVLPGs in a row are a win.
38  */
39
40 #ifndef CONFIG_SMP
41
42 #define flush_tlb() __flush_tlb()
43 #define flush_tlb_all() __flush_tlb_all()
44 #define local_flush_tlb() __flush_tlb()
45
46 static inline void flush_tlb_mm(struct mm_struct *mm)
47 {
48         if (mm == current->active_mm)
49                 __flush_tlb();
50 }
51
52 static inline void flush_tlb_page(struct vm_area_struct *vma,
53                                   unsigned long addr)
54 {
55         if (vma->vm_mm == current->active_mm)
56                 __flush_tlb_one(addr);
57 }
58
59 static inline void flush_tlb_range(struct vm_area_struct *vma,
60                                    unsigned long start, unsigned long end)
61 {
62         if (vma->vm_mm == current->active_mm)
63                 __flush_tlb();
64 }
65
66 static inline void reset_lazy_tlbstate(void)
67 {
68 }
69
70 #else  /* SMP */
71
72 #include <asm/smp.h>
73
74 #define local_flush_tlb() __flush_tlb()
75
76 #define flush_tlb_all xen_tlb_flush_all
77 #define flush_tlb_current_task() xen_tlb_flush_mask(mm_cpumask(current->mm))
78 #define flush_tlb_mm(mm) xen_tlb_flush_mask(mm_cpumask(mm))
79 #define flush_tlb_page(vma, va) xen_invlpg_mask(mm_cpumask((vma)->vm_mm), va)
80
81 #define flush_tlb()     flush_tlb_current_task()
82
83 static inline void flush_tlb_range(struct vm_area_struct *vma,
84                                    unsigned long start, unsigned long end)
85 {
86         flush_tlb_mm(vma->vm_mm);
87 }
88
89 #ifndef CONFIG_XEN
90 #define TLBSTATE_OK     1
91 #define TLBSTATE_LAZY   2
92
93 struct tlb_state {
94         struct mm_struct *active_mm;
95         int state;
96 };
97 DECLARE_PER_CPU_SHARED_ALIGNED(struct tlb_state, cpu_tlbstate);
98
99 static inline void reset_lazy_tlbstate(void)
100 {
101         percpu_write(cpu_tlbstate.state, 0);
102         percpu_write(cpu_tlbstate.active_mm, &init_mm);
103 }
104 #endif
105
106 #endif  /* SMP */
107
108 static inline void flush_tlb_kernel_range(unsigned long start,
109                                           unsigned long end)
110 {
111         flush_tlb_all();
112 }
113
114 #endif /* _ASM_X86_TLBFLUSH_H */