smp: introduce a generic on_each_cpu_mask() function
[linux-flexiantxendom0-3.2.10.git] / arch / arm / kernel / smp_tlb.c
1 /*
2  *  linux/arch/arm/kernel/smp_tlb.c
3  *
4  *  Copyright (C) 2002 ARM Limited, All Rights Reserved.
5  *
6  * This program is free software; you can redistribute it and/or modify
7  * it under the terms of the GNU General Public License version 2 as
8  * published by the Free Software Foundation.
9  */
10 #include <linux/preempt.h>
11 #include <linux/smp.h>
12
13 #include <asm/smp_plat.h>
14 #include <asm/tlbflush.h>
15
16 /**********************************************************************/
17
18 /*
19  * TLB operations
20  */
21 struct tlb_args {
22         struct vm_area_struct *ta_vma;
23         unsigned long ta_start;
24         unsigned long ta_end;
25 };
26
27 static inline void ipi_flush_tlb_all(void *ignored)
28 {
29         local_flush_tlb_all();
30 }
31
32 static inline void ipi_flush_tlb_mm(void *arg)
33 {
34         struct mm_struct *mm = (struct mm_struct *)arg;
35
36         local_flush_tlb_mm(mm);
37 }
38
39 static inline void ipi_flush_tlb_page(void *arg)
40 {
41         struct tlb_args *ta = (struct tlb_args *)arg;
42
43         local_flush_tlb_page(ta->ta_vma, ta->ta_start);
44 }
45
46 static inline void ipi_flush_tlb_kernel_page(void *arg)
47 {
48         struct tlb_args *ta = (struct tlb_args *)arg;
49
50         local_flush_tlb_kernel_page(ta->ta_start);
51 }
52
53 static inline void ipi_flush_tlb_range(void *arg)
54 {
55         struct tlb_args *ta = (struct tlb_args *)arg;
56
57         local_flush_tlb_range(ta->ta_vma, ta->ta_start, ta->ta_end);
58 }
59
60 static inline void ipi_flush_tlb_kernel_range(void *arg)
61 {
62         struct tlb_args *ta = (struct tlb_args *)arg;
63
64         local_flush_tlb_kernel_range(ta->ta_start, ta->ta_end);
65 }
66
67 void flush_tlb_all(void)
68 {
69         if (tlb_ops_need_broadcast())
70                 on_each_cpu(ipi_flush_tlb_all, NULL, 1);
71         else
72                 local_flush_tlb_all();
73 }
74
75 void flush_tlb_mm(struct mm_struct *mm)
76 {
77         if (tlb_ops_need_broadcast())
78                 on_each_cpu_mask(mm_cpumask(mm), ipi_flush_tlb_mm, mm, 1);
79         else
80                 local_flush_tlb_mm(mm);
81 }
82
83 void flush_tlb_page(struct vm_area_struct *vma, unsigned long uaddr)
84 {
85         if (tlb_ops_need_broadcast()) {
86                 struct tlb_args ta;
87                 ta.ta_vma = vma;
88                 ta.ta_start = uaddr;
89                 on_each_cpu_mask(mm_cpumask(vma->vm_mm), ipi_flush_tlb_page,
90                                         &ta, 1);
91         } else
92                 local_flush_tlb_page(vma, uaddr);
93 }
94
95 void flush_tlb_kernel_page(unsigned long kaddr)
96 {
97         if (tlb_ops_need_broadcast()) {
98                 struct tlb_args ta;
99                 ta.ta_start = kaddr;
100                 on_each_cpu(ipi_flush_tlb_kernel_page, &ta, 1);
101         } else
102                 local_flush_tlb_kernel_page(kaddr);
103 }
104
105 void flush_tlb_range(struct vm_area_struct *vma,
106                      unsigned long start, unsigned long end)
107 {
108         if (tlb_ops_need_broadcast()) {
109                 struct tlb_args ta;
110                 ta.ta_vma = vma;
111                 ta.ta_start = start;
112                 ta.ta_end = end;
113                 on_each_cpu_mask(mm_cpumask(vma->vm_mm), ipi_flush_tlb_range,
114                                         &ta, 1);
115         } else
116                 local_flush_tlb_range(vma, start, end);
117 }
118
119 void flush_tlb_kernel_range(unsigned long start, unsigned long end)
120 {
121         if (tlb_ops_need_broadcast()) {
122                 struct tlb_args ta;
123                 ta.ta_start = start;
124                 ta.ta_end = end;
125                 on_each_cpu(ipi_flush_tlb_kernel_range, &ta, 1);
126         } else
127                 local_flush_tlb_kernel_range(start, end);
128 }
129