2 * Intel SMP support routines.
4 * (c) 1995 Alan Cox, Building #3 <alan@redhat.com>
5 * (c) 1998-99, 2000 Ingo Molnar <mingo@redhat.com>
6 * (c) 2002,2003 Andi Kleen, SuSE Labs.
8 * This code is released under the GNU General Public License version 2 or
12 #include <linux/init.h>
15 #include <linux/delay.h>
16 #include <linux/spinlock.h>
17 #include <linux/smp.h>
18 #include <linux/kernel_stat.h>
19 #include <linux/mc146818rtc.h>
20 #include <linux/interrupt.h>
23 #include <asm/pgalloc.h>
24 #include <asm/tlbflush.h>
25 #include <asm/mach_apic.h>
26 #include <asm/mmu_context.h>
27 #include <asm/proto.h>
28 #include <asm/apicdef.h>
31 #include <xen/evtchn.h>
36 * Smarter SMP flushing macros.
39 * These mean you can really definitely utterly forget about
40 * writing to user space from interrupts. (Its not allowed anyway).
42 * Optimizations Manfred Spraul <manfred@colorfullife.com>
44 * More scalable flush, from Andi Kleen
46 * To avoid global state use 8 different call vectors.
47 * Each CPU uses a specific vector to trigger flushes on other
48 * CPUs. Depending on the received vector the target CPUs look into
49 * the right per cpu variable for the flush data.
51 * With more than 8 CPUs they are hashed to the 8 available
52 * vectors. The limited global vector space forces us to this right now.
53 * In future when interrupts are split into per CPU domains this could be
54 * fixed, at the cost of triggering multiple IPIs in some cases.
57 union smp_flush_state {
59 cpumask_t flush_cpumask;
60 struct mm_struct *flush_mm;
61 unsigned long flush_va;
62 #define FLUSH_ALL -1ULL
63 spinlock_t tlbstate_lock;
65 char pad[SMP_CACHE_BYTES];
66 } ____cacheline_aligned;
68 /* State is put into the per CPU data section, but padded
69 to a full cache line because other CPUs can access it and we don't
70 want false sharing in the per cpu data segment. */
71 static DEFINE_PER_CPU(union smp_flush_state, flush_state);
75 * We cannot call mmdrop() because we are in interrupt context,
76 * instead update mm->cpu_vm_mask.
78 static inline void leave_mm(unsigned long cpu)
80 if (read_pda(mmu_state) == TLBSTATE_OK)
82 cpu_clear(cpu, read_pda(active_mm)->cpu_vm_mask);
83 load_cr3(swapper_pg_dir);
89 * The flush IPI assumes that a thread switch happens in this order:
90 * [cpu0: the cpu that switches]
91 * 1) switch_mm() either 1a) or 1b)
92 * 1a) thread switch to a different mm
93 * 1a1) cpu_clear(cpu, old_mm->cpu_vm_mask);
94 * Stop ipi delivery for the old mm. This is not synchronized with
95 * the other cpus, but smp_invalidate_interrupt ignore flush ipis
96 * for the wrong mm, and in the worst case we perform a superfluous
98 * 1a2) set cpu mmu_state to TLBSTATE_OK
99 * Now the smp_invalidate_interrupt won't call leave_mm if cpu0
100 * was in lazy tlb mode.
101 * 1a3) update cpu active_mm
102 * Now cpu0 accepts tlb flushes for the new mm.
103 * 1a4) cpu_set(cpu, new_mm->cpu_vm_mask);
104 * Now the other cpus will send tlb flush ipis.
106 * 1b) thread switch without mm change
107 * cpu active_mm is correct, cpu0 already handles
109 * 1b1) set cpu mmu_state to TLBSTATE_OK
110 * 1b2) test_and_set the cpu bit in cpu_vm_mask.
111 * Atomically set the bit [other cpus will start sending flush ipis],
113 * 1b3) if the bit was 0: leave_mm was called, flush the tlb.
114 * 2) switch %%esp, ie current
116 * The interrupt must handle 2 special cases:
117 * - cr3 is changed before %%esp, ie. it cannot use current->{active_,}mm.
118 * - the cpu performs speculative tlb reads, i.e. even if the cpu only
119 * runs in kernel space, the cpu could load tlb entries for user space
122 * The good news is that cpu mmu_state is local to each cpu, no
123 * write/read ordering problems.
129 * 1) Flush the tlb entries if the cpu uses the mm that's being flushed.
130 * 2) Leave the mm if we are in the lazy tlb mode.
132 * Interrupts are disabled.
135 asmlinkage void smp_invalidate_interrupt(struct pt_regs *regs)
139 union smp_flush_state *f;
141 cpu = smp_processor_id();
143 * orig_rax contains the negated interrupt vector.
144 * Use that to determine where the sender put the data.
146 sender = ~regs->orig_rax - INVALIDATE_TLB_VECTOR_START;
147 f = &per_cpu(flush_state, sender);
149 if (!cpu_isset(cpu, f->flush_cpumask))
152 * This was a BUG() but until someone can quote me the
153 * line from the intel manual that guarantees an IPI to
154 * multiple CPUs is retried _only_ on the erroring CPUs
155 * its staying as a return
160 if (f->flush_mm == read_pda(active_mm)) {
161 if (read_pda(mmu_state) == TLBSTATE_OK) {
162 if (f->flush_va == FLUSH_ALL)
165 __flush_tlb_one(f->flush_va);
171 cpu_clear(cpu, f->flush_cpumask);
172 add_pda(irq_tlb_count, 1);
175 static void flush_tlb_others(cpumask_t cpumask, struct mm_struct *mm,
179 union smp_flush_state *f;
181 /* Caller has disabled preemption */
182 sender = smp_processor_id() % NUM_INVALIDATE_TLB_VECTORS;
183 f = &per_cpu(flush_state, sender);
185 /* Could avoid this lock when
186 num_online_cpus() <= NUM_INVALIDATE_TLB_VECTORS, but it is
187 probably not worth checking this for a cache-hot lock. */
188 spin_lock(&f->tlbstate_lock);
192 cpus_or(f->flush_cpumask, cpumask, f->flush_cpumask);
195 * We have to send the IPI only to
198 send_IPI_mask(cpumask, INVALIDATE_TLB_VECTOR_START + sender);
200 while (!cpus_empty(f->flush_cpumask))
205 spin_unlock(&f->tlbstate_lock);
208 int __cpuinit init_smp_flush(void)
211 for_each_cpu_mask(i, cpu_possible_map) {
212 spin_lock_init(&per_cpu(flush_state, i).tlbstate_lock);
217 core_initcall(init_smp_flush);
219 void flush_tlb_current_task(void)
221 struct mm_struct *mm = current->mm;
225 cpu_mask = mm->cpu_vm_mask;
226 cpu_clear(smp_processor_id(), cpu_mask);
229 if (!cpus_empty(cpu_mask))
230 flush_tlb_others(cpu_mask, mm, FLUSH_ALL);
233 EXPORT_SYMBOL(flush_tlb_current_task);
235 void flush_tlb_mm (struct mm_struct * mm)
240 cpu_mask = mm->cpu_vm_mask;
241 cpu_clear(smp_processor_id(), cpu_mask);
243 if (current->active_mm == mm) {
247 leave_mm(smp_processor_id());
249 if (!cpus_empty(cpu_mask))
250 flush_tlb_others(cpu_mask, mm, FLUSH_ALL);
254 EXPORT_SYMBOL(flush_tlb_mm);
256 void flush_tlb_page(struct vm_area_struct * vma, unsigned long va)
258 struct mm_struct *mm = vma->vm_mm;
262 cpu_mask = mm->cpu_vm_mask;
263 cpu_clear(smp_processor_id(), cpu_mask);
265 if (current->active_mm == mm) {
269 leave_mm(smp_processor_id());
272 if (!cpus_empty(cpu_mask))
273 flush_tlb_others(cpu_mask, mm, va);
277 EXPORT_SYMBOL(flush_tlb_page);
279 static void do_flush_tlb_all(void* info)
281 unsigned long cpu = smp_processor_id();
284 if (read_pda(mmu_state) == TLBSTATE_LAZY)
288 void flush_tlb_all(void)
290 on_each_cpu(do_flush_tlb_all, NULL, 1, 1);
295 * this function sends a 'reschedule' IPI to another CPU.
296 * it goes straight through and wastes no time serializing
297 * anything. Worst case is that we lose a reschedule ...
300 void smp_send_reschedule(int cpu)
302 send_IPI_mask(cpumask_of_cpu(cpu), RESCHEDULE_VECTOR);
306 * Structure and data for smp_call_function(). This is designed to minimise
307 * static memory requirements. It also looks cleaner.
309 static DEFINE_SPINLOCK(call_lock);
311 struct call_data_struct {
312 void (*func) (void *info);
319 static struct call_data_struct * call_data;
321 void lock_ipi_call_lock(void)
323 spin_lock_irq(&call_lock);
326 void unlock_ipi_call_lock(void)
328 spin_unlock_irq(&call_lock);
332 * this function sends a 'generic call function' IPI to all other CPU
333 * of the system defined in the mask.
337 __smp_call_function_mask(cpumask_t mask,
338 void (*func)(void *), void *info,
341 struct call_data_struct data;
342 cpumask_t allbutself;
345 allbutself = cpu_online_map;
346 cpu_clear(smp_processor_id(), allbutself);
348 cpus_and(mask, mask, allbutself);
349 cpus = cpus_weight(mask);
356 atomic_set(&data.started, 0);
359 atomic_set(&data.finished, 0);
364 /* Send a message to other CPUs */
365 if (cpus_equal(mask, allbutself))
366 send_IPI_allbutself(CALL_FUNCTION_VECTOR);
368 send_IPI_mask(mask, CALL_FUNCTION_VECTOR);
370 /* Wait for response */
371 while (atomic_read(&data.started) != cpus)
377 while (atomic_read(&data.finished) != cpus)
383 * smp_call_function_mask(): Run a function on a set of other CPUs.
384 * @mask: The set of cpus to run on. Must not include the current cpu.
385 * @func: The function to run. This must be fast and non-blocking.
386 * @info: An arbitrary pointer to pass to the function.
387 * @wait: If true, wait (atomically) until function has completed on other CPUs.
389 * Returns 0 on success, else a negative status code.
391 * If @wait is true, then returns once @func has returned; otherwise
392 * it returns just before the target cpu calls @func.
394 * You must not call this function with disabled interrupts or from a
395 * hardware interrupt handler or from a bottom half handler.
397 int smp_call_function_mask(cpumask_t mask,
398 void (*func)(void *), void *info,
403 /* Can deadlock when called with interrupts disabled */
404 WARN_ON(irqs_disabled());
406 spin_lock(&call_lock);
407 ret = __smp_call_function_mask(mask, func, info, wait);
408 spin_unlock(&call_lock);
411 EXPORT_SYMBOL(smp_call_function_mask);
414 * smp_call_function_single - Run a function on a specific CPU
415 * @func: The function to run. This must be fast and non-blocking.
416 * @info: An arbitrary pointer to pass to the function.
417 * @nonatomic: Currently unused.
418 * @wait: If true, wait until function has completed on other CPUs.
420 * Retrurns 0 on success, else a negative status code.
422 * Does not return until the remote CPU is nearly ready to execute <func>
423 * or is or has executed.
426 int smp_call_function_single (int cpu, void (*func) (void *info), void *info,
427 int nonatomic, int wait)
429 /* prevent preemption and reschedule on another processor */
433 /* Can deadlock when called with interrupts disabled */
434 WARN_ON(irqs_disabled());
444 ret = smp_call_function_mask(cpumask_of_cpu(cpu), func, info, wait);
449 EXPORT_SYMBOL(smp_call_function_single);
452 * smp_call_function - run a function on all other CPUs.
453 * @func: The function to run. This must be fast and non-blocking.
454 * @info: An arbitrary pointer to pass to the function.
455 * @nonatomic: currently unused.
456 * @wait: If true, wait (atomically) until function has completed on other
459 * Returns 0 on success, else a negative status code. Does not return until
460 * remote CPUs are nearly ready to execute func or are or have executed.
462 * You must not call this function with disabled interrupts or from a
463 * hardware interrupt handler or from a bottom half handler.
464 * Actually there are a few legal cases, like panic.
466 int smp_call_function (void (*func) (void *info), void *info, int nonatomic,
469 return smp_call_function_mask(cpu_online_map, func, info, wait);
471 EXPORT_SYMBOL(smp_call_function);
473 static void stop_this_cpu(void *dummy)
479 cpu_clear(smp_processor_id(), cpu_online_map);
480 disable_all_local_evtchn();
485 void smp_send_stop(void)
495 /* Don't deadlock on the call lock in panic */
496 nolock = !spin_trylock(&call_lock);
497 local_irq_save(flags);
498 __smp_call_function_mask(cpu_online_map, stop_this_cpu, NULL, 0);
500 spin_unlock(&call_lock);
501 disable_all_local_evtchn();
502 local_irq_restore(flags);
506 * Reschedule call back. Nothing to do,
507 * all the work is done automatically when
508 * we return from the interrupt.
511 asmlinkage void smp_reschedule_interrupt(void)
513 asmlinkage irqreturn_t smp_reschedule_interrupt(int irq, void *ctx)
519 add_pda(irq_resched_count, 1);
526 asmlinkage void smp_call_function_interrupt(void)
528 asmlinkage irqreturn_t smp_call_function_interrupt(int irq, void *ctx)
531 void (*func) (void *info) = call_data->func;
532 void *info = call_data->info;
533 int wait = call_data->wait;
539 * Notify initiating CPU that I've grabbed the data and am
540 * about to execute the function
543 atomic_inc(&call_data->started);
545 * At this point the info structure may be out of scope unless wait==1
550 add_pda(irq_call_count, 1);
554 atomic_inc(&call_data->finished);