Linux-2.6.12-rc2
[linux-flexiantxendom0-natty.git] / arch / ia64 / mm / tlb.c
1 /*
2  * TLB support routines.
3  *
4  * Copyright (C) 1998-2001, 2003 Hewlett-Packard Co
5  *      David Mosberger-Tang <davidm@hpl.hp.com>
6  *
7  * 08/02/00 A. Mallick <asit.k.mallick@intel.com>
8  *              Modified RID allocation for SMP
9  *          Goutham Rao <goutham.rao@intel.com>
10  *              IPI based ptc implementation and A-step IPI implementation.
11  */
12 #include <linux/config.h>
13 #include <linux/module.h>
14 #include <linux/init.h>
15 #include <linux/kernel.h>
16 #include <linux/sched.h>
17 #include <linux/smp.h>
18 #include <linux/mm.h>
19
20 #include <asm/delay.h>
21 #include <asm/mmu_context.h>
22 #include <asm/pgalloc.h>
23 #include <asm/pal.h>
24 #include <asm/tlbflush.h>
25
26 static struct {
27         unsigned long mask;     /* mask of supported purge page-sizes */
28         unsigned long max_bits; /* log2() of largest supported purge page-size */
29 } purge;
30
31 struct ia64_ctx ia64_ctx = {
32         .lock =         SPIN_LOCK_UNLOCKED,
33         .next =         1,
34         .limit =        (1 << 15) - 1,          /* start out with the safe (architected) limit */
35         .max_ctx =      ~0U
36 };
37
38 DEFINE_PER_CPU(u8, ia64_need_tlb_flush);
39
40 /*
41  * Acquire the ia64_ctx.lock before calling this function!
42  */
43 void
44 wrap_mmu_context (struct mm_struct *mm)
45 {
46         unsigned long tsk_context, max_ctx = ia64_ctx.max_ctx;
47         struct task_struct *tsk;
48         int i;
49
50         if (ia64_ctx.next > max_ctx)
51                 ia64_ctx.next = 300;    /* skip daemons */
52         ia64_ctx.limit = max_ctx + 1;
53
54         /*
55          * Scan all the task's mm->context and set proper safe range
56          */
57
58         read_lock(&tasklist_lock);
59   repeat:
60         for_each_process(tsk) {
61                 if (!tsk->mm)
62                         continue;
63                 tsk_context = tsk->mm->context;
64                 if (tsk_context == ia64_ctx.next) {
65                         if (++ia64_ctx.next >= ia64_ctx.limit) {
66                                 /* empty range: reset the range limit and start over */
67                                 if (ia64_ctx.next > max_ctx)
68                                         ia64_ctx.next = 300;
69                                 ia64_ctx.limit = max_ctx + 1;
70                                 goto repeat;
71                         }
72                 }
73                 if ((tsk_context > ia64_ctx.next) && (tsk_context < ia64_ctx.limit))
74                         ia64_ctx.limit = tsk_context;
75         }
76         read_unlock(&tasklist_lock);
77         /* can't call flush_tlb_all() here because of race condition with O(1) scheduler [EF] */
78         {
79                 int cpu = get_cpu(); /* prevent preemption/migration */
80                 for (i = 0; i < NR_CPUS; ++i)
81                         if (cpu_online(i) && (i != cpu))
82                                 per_cpu(ia64_need_tlb_flush, i) = 1;
83                 put_cpu();
84         }
85         local_flush_tlb_all();
86 }
87
88 void
89 ia64_global_tlb_purge (unsigned long start, unsigned long end, unsigned long nbits)
90 {
91         static DEFINE_SPINLOCK(ptcg_lock);
92
93         /* HW requires global serialization of ptc.ga.  */
94         spin_lock(&ptcg_lock);
95         {
96                 do {
97                         /*
98                          * Flush ALAT entries also.
99                          */
100                         ia64_ptcga(start, (nbits<<2));
101                         ia64_srlz_i();
102                         start += (1UL << nbits);
103                 } while (start < end);
104         }
105         spin_unlock(&ptcg_lock);
106 }
107
108 void
109 local_flush_tlb_all (void)
110 {
111         unsigned long i, j, flags, count0, count1, stride0, stride1, addr;
112
113         addr    = local_cpu_data->ptce_base;
114         count0  = local_cpu_data->ptce_count[0];
115         count1  = local_cpu_data->ptce_count[1];
116         stride0 = local_cpu_data->ptce_stride[0];
117         stride1 = local_cpu_data->ptce_stride[1];
118
119         local_irq_save(flags);
120         for (i = 0; i < count0; ++i) {
121                 for (j = 0; j < count1; ++j) {
122                         ia64_ptce(addr);
123                         addr += stride1;
124                 }
125                 addr += stride0;
126         }
127         local_irq_restore(flags);
128         ia64_srlz_i();                  /* srlz.i implies srlz.d */
129 }
130
131 void
132 flush_tlb_range (struct vm_area_struct *vma, unsigned long start, unsigned long end)
133 {
134         struct mm_struct *mm = vma->vm_mm;
135         unsigned long size = end - start;
136         unsigned long nbits;
137
138         if (mm != current->active_mm) {
139                 /* this does happen, but perhaps it's not worth optimizing for? */
140 #ifdef CONFIG_SMP
141                 flush_tlb_all();
142 #else
143                 mm->context = 0;
144 #endif
145                 return;
146         }
147
148         nbits = ia64_fls(size + 0xfff);
149         while (unlikely (((1UL << nbits) & purge.mask) == 0) && (nbits < purge.max_bits))
150                 ++nbits;
151         if (nbits > purge.max_bits)
152                 nbits = purge.max_bits;
153         start &= ~((1UL << nbits) - 1);
154
155 # ifdef CONFIG_SMP
156         platform_global_tlb_purge(start, end, nbits);
157 # else
158         do {
159                 ia64_ptcl(start, (nbits<<2));
160                 start += (1UL << nbits);
161         } while (start < end);
162 # endif
163
164         ia64_srlz_i();                  /* srlz.i implies srlz.d */
165 }
166 EXPORT_SYMBOL(flush_tlb_range);
167
168 void __devinit
169 ia64_tlb_init (void)
170 {
171         ia64_ptce_info_t ptce_info;
172         unsigned long tr_pgbits;
173         long status;
174
175         if ((status = ia64_pal_vm_page_size(&tr_pgbits, &purge.mask)) != 0) {
176                 printk(KERN_ERR "PAL_VM_PAGE_SIZE failed with status=%ld;"
177                        "defaulting to architected purge page-sizes.\n", status);
178                 purge.mask = 0x115557000UL;
179         }
180         purge.max_bits = ia64_fls(purge.mask);
181
182         ia64_get_ptce(&ptce_info);
183         local_cpu_data->ptce_base = ptce_info.base;
184         local_cpu_data->ptce_count[0] = ptce_info.count[0];
185         local_cpu_data->ptce_count[1] = ptce_info.count[1];
186         local_cpu_data->ptce_stride[0] = ptce_info.stride[0];
187         local_cpu_data->ptce_stride[1] = ptce_info.stride[1];
188
189         local_flush_tlb_all();          /* nuke left overs from bootstrapping... */
190 }