2 * TLB support routines.
4 * Copyright (C) 1998-2000 Hewlett-Packard Co
5 * Copyright (C) 1998-2000 David Mosberger-Tang <davidm@hpl.hp.com>
7 * 08/02/00 A. Mallick <asit.k.mallick@intel.com>
8 * Modified RID allocation for SMP
9 * Goutham Rao <goutham.rao@intel.com>
10 * IPI based ptc implementation and A-step IPI implementation.
12 #include <linux/config.h>
13 #include <linux/init.h>
14 #include <linux/kernel.h>
15 #include <linux/sched.h>
16 #include <linux/smp.h>
19 #include <asm/mmu_context.h>
20 #include <asm/pgalloc.h>
22 #include <asm/delay.h>
24 #define SUPPORTED_PGBITS ( \
25 1 << _PAGE_SIZE_256M | \
26 1 << _PAGE_SIZE_64M | \
27 1 << _PAGE_SIZE_16M | \
28 1 << _PAGE_SIZE_4M | \
29 1 << _PAGE_SIZE_1M | \
30 1 << _PAGE_SIZE_256K | \
31 1 << _PAGE_SIZE_64K | \
32 1 << _PAGE_SIZE_16K | \
33 1 << _PAGE_SIZE_8K | \
36 struct ia64_ctx ia64_ctx = {
37 lock: SPIN_LOCK_UNLOCKED,
39 limit: (1 << 15) - 1, /* start out with the safe (architected) limit */
44 * Seralize usage of ptc.g
46 spinlock_t ptcg_lock = SPIN_LOCK_UNLOCKED; /* see <asm/pgtable.h> */
48 #if defined(CONFIG_SMP) && !defined(CONFIG_ITANIUM_PTCG)
50 #include <linux/irq.h>
52 unsigned long flush_end, flush_start, flush_nbits, flush_rid;
53 atomic_t flush_cpu_count;
56 * flush_tlb_no_ptcg is called with ptcg_lock locked
59 flush_tlb_no_ptcg (unsigned long start, unsigned long end, unsigned long nbits)
61 extern void smp_send_flush_tlb (void);
62 unsigned long saved_tpr = 0;
66 * Some times this is called with interrupts disabled and causes
67 * dead-lock; to avoid this we enable interrupt and raise the TPR
71 if (!(flags & IA64_PSR_I)) {
72 saved_tpr = ia64_get_tpr();
74 ia64_set_tpr(IPI_IRQ - 16);
79 spin_lock(&ptcg_lock);
80 flush_rid = ia64_get_rr(start);
85 atomic_set(&flush_cpu_count, smp_num_cpus - 1);
88 * Purge local TLB entries. ALAT invalidation is done in ia64_leave_kernel.
91 asm volatile ("ptc.l %0,%1" :: "r"(start), "r"(nbits<<2) : "memory");
92 start += (1UL << nbits);
93 } while (start < end);
95 ia64_srlz_i(); /* srlz.i implies srlz.d */
98 * Wait for other CPUs to finish purging entries.
100 #if (defined(CONFIG_ITANIUM_ASTEP_SPECIFIC) || defined(CONFIG_ITANIUM_BSTEP_SPECIFIC))
102 unsigned long start = ia64_get_itc();
103 while (atomic_read(&flush_cpu_count) > 0) {
104 if ((ia64_get_itc() - start) > 40000UL) {
105 atomic_set(&flush_cpu_count, smp_num_cpus - 1);
106 smp_send_flush_tlb();
107 start = ia64_get_itc();
112 while (atomic_read(&flush_cpu_count)) {
116 if (!(flags & IA64_PSR_I)) {
118 ia64_set_tpr(saved_tpr);
123 #endif /* CONFIG_SMP && !CONFIG_ITANIUM_PTCG */
126 * Acquire the ia64_ctx.lock before calling this function!
129 wrap_mmu_context (struct mm_struct *mm)
131 unsigned long tsk_context, max_ctx = ia64_ctx.max_ctx;
132 struct task_struct *tsk;
134 if (ia64_ctx.next > max_ctx)
135 ia64_ctx.next = 300; /* skip daemons */
136 ia64_ctx.limit = max_ctx + 1;
139 * Scan all the task's mm->context and set proper safe range
142 read_lock(&tasklist_lock);
147 tsk_context = tsk->mm->context;
148 if (tsk_context == ia64_ctx.next) {
149 if (++ia64_ctx.next >= ia64_ctx.limit) {
150 /* empty range: reset the range limit and start over */
151 if (ia64_ctx.next > max_ctx)
153 ia64_ctx.limit = max_ctx + 1;
157 if ((tsk_context > ia64_ctx.next) && (tsk_context < ia64_ctx.limit))
158 ia64_ctx.limit = tsk_context;
160 read_unlock(&tasklist_lock);
165 __flush_tlb_all (void)
167 unsigned long i, j, flags, count0, count1, stride0, stride1, addr;
169 addr = my_cpu_data.ptce_base;
170 count0 = my_cpu_data.ptce_count[0];
171 count1 = my_cpu_data.ptce_count[1];
172 stride0 = my_cpu_data.ptce_stride[0];
173 stride1 = my_cpu_data.ptce_stride[1];
175 local_irq_save(flags);
176 for (i = 0; i < count0; ++i) {
177 for (j = 0; j < count1; ++j) {
178 asm volatile ("ptc.e %0" :: "r"(addr));
183 local_irq_restore(flags);
184 ia64_insn_group_barrier();
185 ia64_srlz_i(); /* srlz.i implies srlz.d */
186 ia64_insn_group_barrier();
190 flush_tlb_range (struct mm_struct *mm, unsigned long start, unsigned long end)
192 unsigned long size = end - start;
195 if (mm != current->active_mm) {
196 /* this does happen, but perhaps it's not worth optimizing for? */
205 nbits = ia64_fls(size + 0xfff);
206 if (((1UL << nbits) & SUPPORTED_PGBITS) == 0) {
207 if (nbits > _PAGE_SIZE_256M)
208 nbits = _PAGE_SIZE_256M;
211 * Some page sizes are not implemented in the
212 * IA-64 arch, so if we get asked to clear an
213 * unsupported page size, round up to the
214 * nearest page size. Note that we depend on
215 * the fact that if page size N is not
216 * implemented, 2*N _is_ implemented.
219 if (((1UL << nbits) & SUPPORTED_PGBITS) == 0)
220 panic("flush_tlb_range: BUG: nbits=%lu\n", nbits);
222 start &= ~((1UL << nbits) - 1);
224 #if defined(CONFIG_SMP) && !defined(CONFIG_ITANIUM_PTCG)
225 flush_tlb_no_ptcg(start, end, nbits);
227 spin_lock(&ptcg_lock);
231 * Flush ALAT entries also.
233 asm volatile ("ptc.ga %0,%1;;srlz.i;;" :: "r"(start), "r"(nbits<<2) : "memory");
235 asm volatile ("ptc.l %0,%1" :: "r"(start), "r"(nbits<<2) : "memory");
237 start += (1UL << nbits);
238 } while (start < end);
239 #endif /* CONFIG_SMP && !defined(CONFIG_ITANIUM_PTCG) */
240 spin_unlock(&ptcg_lock);
241 ia64_insn_group_barrier();
242 ia64_srlz_i(); /* srlz.i implies srlz.d */
243 ia64_insn_group_barrier();
249 ia64_ptce_info_t ptce_info;
251 ia64_get_ptce(&ptce_info);
252 my_cpu_data.ptce_base = ptce_info.base;
253 my_cpu_data.ptce_count[0] = ptce_info.count[0];
254 my_cpu_data.ptce_count[1] = ptce_info.count[1];
255 my_cpu_data.ptce_stride[0] = ptce_info.stride[0];
256 my_cpu_data.ptce_stride[1] = ptce_info.stride[1];
258 __flush_tlb_all(); /* nuke left overs from bootstrapping... */