2 * Intel SMP support routines.
4 * (c) 1995 Alan Cox, Building #3 <alan@redhat.com>
5 * (c) 1998-99, 2000 Ingo Molnar <mingo@redhat.com>
7 * This code is released under the GNU General Public License version 2 or
11 #include <linux/init.h>
14 #include <linux/delay.h>
15 #include <linux/spinlock.h>
16 #include <linux/kernel_stat.h>
17 #include <linux/mc146818rtc.h>
18 #include <linux/cache.h>
19 #include <linux/interrupt.h>
20 #include <linux/cpu.h>
21 #include <linux/module.h>
24 #include <asm/tlbflush.h>
25 #include <asm/mmu_context.h>
27 #include <mach_apic.h>
29 #include <xen/evtchn.h>
32 * Some notes on x86 processor bugs affecting SMP operation:
34 * Pentium, Pentium Pro, II, III (and all CPUs) have bugs.
35 * The Linux implications for SMP are handled as follows:
37 * Pentium III / [Xeon]
38 * None of the E1AP-E3AP errata are visible to the user.
45 * None of the A1AP-A3AP errata are visible to the user.
52 * None of 1AP-9AP errata are visible to the normal user,
53 * except occasional delivery of 'spurious interrupt' as trap #15.
54 * This is very rare and a non-problem.
56 * 1AP. Linux maps APIC as non-cacheable
57 * 2AP. worked around in hardware
58 * 3AP. fixed in C0 and above steppings microcode update.
59 * Linux does not use excessive STARTUP_IPIs.
60 * 4AP. worked around in hardware
61 * 5AP. symmetric IO mode (normal Linux operation) not affected.
62 * 'noapic' mode has vector 0xf filled out properly.
63 * 6AP. 'noapic' mode might be affected - fixed in later steppings
64 * 7AP. We do not assume writes to the LVT deassering IRQs
65 * 8AP. We do not enable low power mode (deep sleep) during MP bootup
66 * 9AP. We do not use mixed mode
69 * There is a marginal case where REP MOVS on 100MHz SMP
70 * machines with B stepping processors can fail. XXX should provide
71 * an L1cache=Writethrough or L1cache=off option.
73 * B stepping CPUs may hang. There are hardware work arounds
74 * for this. We warn about it in case your board doesn't have the work
75 * arounds. Basically that's so I can tell anyone with a B stepping
76 * CPU and SMP problems "tough".
78 * Specific items [From Pentium Processor Specification Update]
80 * 1AP. Linux doesn't use remote read
81 * 2AP. Linux doesn't trust APIC errors
82 * 3AP. We work around this
83 * 4AP. Linux never generated 3 interrupts of the same priority
84 * to cause a lost local interrupt.
85 * 5AP. Remote read is never used
86 * 6AP. not affected - worked around in hardware
87 * 7AP. not affected - worked around in hardware
88 * 8AP. worked around in hardware - we get explicit CS errors if not
89 * 9AP. only 'noapic' mode affected. Might generate spurious
90 * interrupts, we log only the first one and count the
92 * 10AP. not affected - worked around in hardware
93 * 11AP. Linux reads the APIC between writes to avoid this, as per
94 * the documentation. Make sure you preserve this as it affects
95 * the C stepping chips too.
96 * 12AP. not affected - worked around in hardware
97 * 13AP. not affected - worked around in hardware
98 * 14AP. we always deassert INIT during bootup
99 * 15AP. not affected - worked around in hardware
100 * 16AP. not affected - worked around in hardware
101 * 17AP. not affected - worked around in hardware
102 * 18AP. not affected - worked around in hardware
103 * 19AP. not affected - worked around in BIOS
105 * If this sounds worrying believe me these bugs are either ___RARE___,
106 * or are signal timing bugs worked around in hardware and there's
107 * about nothing of note with C stepping upwards.
110 DEFINE_PER_CPU(struct tlb_state, cpu_tlbstate) ____cacheline_aligned = { &init_mm, 0, };
113 * the following functions deal with sending IPIs between CPUs.
115 * We use 'broadcast', CPU->CPU IPIs and self-IPIs too.
119 static inline int __prepare_ICR (unsigned int shortcut, int vector)
121 unsigned int icr = shortcut | APIC_DEST_LOGICAL;
125 icr |= APIC_DM_FIXED | vector;
134 static inline int __prepare_ICR2 (unsigned int mask)
136 return SET_APIC_DEST_FIELD(mask);
140 DECLARE_PER_CPU(int, ipi_to_irq[NR_IPIS]);
142 static inline void __send_IPI_one(unsigned int cpu, int vector)
144 int irq = per_cpu(ipi_to_irq, cpu)[vector];
146 notify_remote_via_irq(irq);
149 void __send_IPI_shortcut(unsigned int shortcut, int vector)
155 __send_IPI_one(smp_processor_id(), vector);
157 case APIC_DEST_ALLBUT:
158 for (cpu = 0; cpu < NR_CPUS; ++cpu) {
159 if (cpu == smp_processor_id())
161 if (cpu_isset(cpu, cpu_online_map)) {
162 __send_IPI_one(cpu, vector);
167 printk("XXXXXX __send_IPI_shortcut %08x vector %d\n", shortcut,
173 void fastcall send_IPI_self(int vector)
175 __send_IPI_shortcut(APIC_DEST_SELF, vector);
179 * This is only used on smaller machines.
181 void send_IPI_mask_bitmask(cpumask_t mask, int vector)
186 local_irq_save(flags);
187 WARN_ON(cpus_addr(mask)[0] & ~cpus_addr(cpu_online_map)[0]);
189 for (cpu = 0; cpu < NR_CPUS; ++cpu) {
190 if (cpu_isset(cpu, mask)) {
191 __send_IPI_one(cpu, vector);
195 local_irq_restore(flags);
198 void send_IPI_mask_sequence(cpumask_t mask, int vector)
201 send_IPI_mask_bitmask(mask, vector);
204 #include <mach_ipi.h> /* must come after the send_IPI functions above for inlining */
208 * Smarter SMP flushing macros.
209 * c/o Linus Torvalds.
211 * These mean you can really definitely utterly forget about
212 * writing to user space from interrupts. (Its not allowed anyway).
214 * Optimizations Manfred Spraul <manfred@colorfullife.com>
217 static cpumask_t flush_cpumask;
218 static struct mm_struct * flush_mm;
219 static unsigned long flush_va;
220 static DEFINE_SPINLOCK(tlbstate_lock);
223 * We cannot call mmdrop() because we are in interrupt context,
224 * instead update mm->cpu_vm_mask.
226 * We need to reload %cr3 since the page tables may be going
227 * away from under us..
229 void leave_mm(unsigned long cpu)
231 if (per_cpu(cpu_tlbstate, cpu).state == TLBSTATE_OK)
233 cpu_clear(cpu, per_cpu(cpu_tlbstate, cpu).active_mm->cpu_vm_mask);
234 load_cr3(swapper_pg_dir);
239 * The flush IPI assumes that a thread switch happens in this order:
240 * [cpu0: the cpu that switches]
241 * 1) switch_mm() either 1a) or 1b)
242 * 1a) thread switch to a different mm
243 * 1a1) cpu_clear(cpu, old_mm->cpu_vm_mask);
244 * Stop ipi delivery for the old mm. This is not synchronized with
245 * the other cpus, but smp_invalidate_interrupt ignore flush ipis
246 * for the wrong mm, and in the worst case we perform a superfluous
248 * 1a2) set cpu_tlbstate to TLBSTATE_OK
249 * Now the smp_invalidate_interrupt won't call leave_mm if cpu0
250 * was in lazy tlb mode.
251 * 1a3) update cpu_tlbstate[].active_mm
252 * Now cpu0 accepts tlb flushes for the new mm.
253 * 1a4) cpu_set(cpu, new_mm->cpu_vm_mask);
254 * Now the other cpus will send tlb flush ipis.
256 * 1b) thread switch without mm change
257 * cpu_tlbstate[].active_mm is correct, cpu0 already handles
259 * 1b1) set cpu_tlbstate to TLBSTATE_OK
260 * 1b2) test_and_set the cpu bit in cpu_vm_mask.
261 * Atomically set the bit [other cpus will start sending flush ipis],
263 * 1b3) if the bit was 0: leave_mm was called, flush the tlb.
264 * 2) switch %%esp, ie current
266 * The interrupt must handle 2 special cases:
267 * - cr3 is changed before %%esp, ie. it cannot use current->{active_,}mm.
268 * - the cpu performs speculative tlb reads, i.e. even if the cpu only
269 * runs in kernel space, the cpu could load tlb entries for user space
272 * The good news is that cpu_tlbstate is local to each cpu, no
273 * write/read ordering problems.
279 * 1) Flush the tlb entries if the cpu uses the mm that's being flushed.
280 * 2) Leave the mm if we are in the lazy tlb mode.
283 irqreturn_t smp_invalidate_interrupt(int irq, void *dev_id)
289 if (!cpu_isset(cpu, flush_cpumask))
292 * This was a BUG() but until someone can quote me the
293 * line from the intel manual that guarantees an IPI to
294 * multiple CPUs is retried _only_ on the erroring CPUs
295 * its staying as a return
300 if (flush_mm == per_cpu(cpu_tlbstate, cpu).active_mm) {
301 if (per_cpu(cpu_tlbstate, cpu).state == TLBSTATE_OK) {
302 if (flush_va == TLB_FLUSH_ALL)
305 __flush_tlb_one(flush_va);
309 smp_mb__before_clear_bit();
310 cpu_clear(cpu, flush_cpumask);
311 smp_mb__after_clear_bit();
313 put_cpu_no_resched();
314 __get_cpu_var(irq_stat).irq_tlb_count++;
319 void native_flush_tlb_others(const cpumask_t *cpumaskp, struct mm_struct *mm,
322 cpumask_t cpumask = *cpumaskp;
325 * A couple of (to be removed) sanity checks:
327 * - current CPU must not be in mask
328 * - mask must exist :)
330 BUG_ON(cpus_empty(cpumask));
331 BUG_ON(cpu_isset(smp_processor_id(), cpumask));
334 #ifdef CONFIG_HOTPLUG_CPU
335 /* If a CPU which we ran on has gone down, OK. */
336 cpus_and(cpumask, cpumask, cpu_online_map);
337 if (unlikely(cpus_empty(cpumask)))
342 * i'm not happy about this global shared spinlock in the
343 * MM hot path, but we'll see how contended it is.
344 * AK: x86-64 has a faster method that could be ported.
346 spin_lock(&tlbstate_lock);
350 cpus_or(flush_cpumask, cpumask, flush_cpumask);
352 * We have to send the IPI only to
355 send_IPI_mask(cpumask, INVALIDATE_TLB_VECTOR);
357 while (!cpus_empty(flush_cpumask))
358 /* nothing. lockup detection does not belong here */
363 spin_unlock(&tlbstate_lock);
366 void flush_tlb_current_task(void)
368 struct mm_struct *mm = current->mm;
372 cpu_mask = mm->cpu_vm_mask;
373 cpu_clear(smp_processor_id(), cpu_mask);
376 if (!cpus_empty(cpu_mask))
377 flush_tlb_others(cpu_mask, mm, TLB_FLUSH_ALL);
381 void flush_tlb_mm (struct mm_struct * mm)
386 cpu_mask = mm->cpu_vm_mask;
387 cpu_clear(smp_processor_id(), cpu_mask);
389 if (current->active_mm == mm) {
393 leave_mm(smp_processor_id());
395 if (!cpus_empty(cpu_mask))
396 flush_tlb_others(cpu_mask, mm, TLB_FLUSH_ALL);
401 void flush_tlb_page(struct vm_area_struct * vma, unsigned long va)
403 struct mm_struct *mm = vma->vm_mm;
407 cpu_mask = mm->cpu_vm_mask;
408 cpu_clear(smp_processor_id(), cpu_mask);
410 if (current->active_mm == mm) {
414 leave_mm(smp_processor_id());
417 if (!cpus_empty(cpu_mask))
418 flush_tlb_others(cpu_mask, mm, va);
422 EXPORT_SYMBOL(flush_tlb_page);
424 static void do_flush_tlb_all(void* info)
426 unsigned long cpu = smp_processor_id();
429 if (per_cpu(cpu_tlbstate, cpu).state == TLBSTATE_LAZY)
433 void flush_tlb_all(void)
435 on_each_cpu(do_flush_tlb_all, NULL, 1, 1);
441 * this function sends a 'reschedule' IPI to another CPU.
442 * it goes straight through and wastes no time serializing
443 * anything. Worst case is that we lose a reschedule ...
445 void xen_smp_send_reschedule(int cpu)
447 WARN_ON(cpu_is_offline(cpu));
448 send_IPI_mask(cpumask_of_cpu(cpu), RESCHEDULE_VECTOR);
452 * Structure and data for smp_call_function(). This is designed to minimise
453 * static memory requirements. It also looks cleaner.
455 static DEFINE_SPINLOCK(call_lock);
457 struct call_data_struct {
458 void (*func) (void *info);
465 void lock_ipi_call_lock(void)
467 spin_lock_irq(&call_lock);
470 void unlock_ipi_call_lock(void)
472 spin_unlock_irq(&call_lock);
475 static struct call_data_struct *call_data;
477 static void __smp_call_function(void (*func) (void *info), void *info,
478 int nonatomic, int wait)
480 struct call_data_struct data;
481 int cpus = num_online_cpus() - 1;
488 atomic_set(&data.started, 0);
491 atomic_set(&data.finished, 0);
496 /* Send a message to all other CPUs and wait for them to respond */
497 send_IPI_allbutself(CALL_FUNCTION_VECTOR);
499 /* Wait for response */
500 while (atomic_read(&data.started) != cpus)
504 while (atomic_read(&data.finished) != cpus)
510 * smp_call_function_mask(): Run a function on a set of other CPUs.
511 * @mask: The set of cpus to run on. Must not include the current cpu.
512 * @func: The function to run. This must be fast and non-blocking.
513 * @info: An arbitrary pointer to pass to the function.
514 * @wait: If true, wait (atomically) until function has completed on other CPUs.
516 * Returns 0 on success, else a negative status code.
518 * If @wait is true, then returns once @func has returned; otherwise
519 * it returns just before the target cpu calls @func.
521 * You must not call this function with disabled interrupts or from a
522 * hardware interrupt handler or from a bottom half handler.
525 xen_smp_call_function_mask(cpumask_t mask,
526 void (*func)(void *), void *info,
529 struct call_data_struct data;
530 cpumask_t allbutself;
533 /* Can deadlock when called with interrupts disabled */
534 WARN_ON(irqs_disabled());
536 /* Holding any lock stops cpus from going down. */
537 spin_lock(&call_lock);
539 allbutself = cpu_online_map;
540 cpu_clear(smp_processor_id(), allbutself);
542 cpus_and(mask, mask, allbutself);
543 cpus = cpus_weight(mask);
546 spin_unlock(&call_lock);
552 atomic_set(&data.started, 0);
555 atomic_set(&data.finished, 0);
560 /* Send a message to other CPUs */
561 if (cpus_equal(mask, allbutself))
562 send_IPI_allbutself(CALL_FUNCTION_VECTOR);
564 send_IPI_mask(mask, CALL_FUNCTION_VECTOR);
566 /* Wait for response */
567 while (atomic_read(&data.started) != cpus)
571 while (atomic_read(&data.finished) != cpus)
573 spin_unlock(&call_lock);
578 static void stop_this_cpu (void * dummy)
584 cpu_clear(smp_processor_id(), cpu_online_map);
585 disable_all_local_evtchn();
586 if (cpu_data(smp_processor_id()).hlt_works_ok)
592 * this function calls the 'stop' function on all other CPUs in the system.
595 void xen_smp_send_stop(void)
597 /* Don't deadlock on the call lock in panic */
598 int nolock = !spin_trylock(&call_lock);
601 local_irq_save(flags);
602 __smp_call_function(stop_this_cpu, NULL, 0, 0);
604 spin_unlock(&call_lock);
605 disable_all_local_evtchn();
606 local_irq_restore(flags);
610 * Reschedule call back. Nothing to do,
611 * all the work is done automatically when
612 * we return from the interrupt.
614 irqreturn_t smp_reschedule_interrupt(int irq, void *dev_id)
616 __get_cpu_var(irq_stat).irq_resched_count++;
621 #include <linux/kallsyms.h>
622 irqreturn_t smp_call_function_interrupt(int irq, void *dev_id)
624 void (*func) (void *info) = call_data->func;
625 void *info = call_data->info;
626 int wait = call_data->wait;
629 * Notify initiating CPU that I've grabbed the data and am
630 * about to execute the function
633 atomic_inc(&call_data->started);
635 * At this point the info structure may be out of scope unless wait==1
639 __get_cpu_var(irq_stat).irq_call_count++;
644 atomic_inc(&call_data->finished);