2 * x86 SMP booting functions
4 * (c) 1995 Alan Cox, Building #3 <alan@redhat.com>
5 * (c) 1998, 1999, 2000 Ingo Molnar <mingo@redhat.com>
7 * Much of the core SMP work is based on previous work by Thomas Radke, to
8 * whom a great many thanks are extended.
10 * Thanks to Intel for making available several different Pentium,
11 * Pentium Pro and Pentium-II/Xeon MP machines.
12 * Original development of Linux SMP code supported by Caldera.
14 * This code is released under the GNU General Public License version 2 or
18 * Felix Koop : NR_CPUS used properly
19 * Jose Renau : Handle single CPU case.
20 * Alan Cox : By repeated request 8) - Total BogoMIPS report.
21 * Greg Wright : Fix for kernel stacks panic.
22 * Erich Boleyn : MP v1.4 and additional changes.
23 * Matthias Sattler : Changes for 2.1 kernel map.
24 * Michel Lespinasse : Changes for 2.1 kernel map.
25 * Michael Chastain : Change trampoline.S to gnu as.
26 * Alan Cox : Dumb bug: 'B' step PPro's are fine
27 * Ingo Molnar : Added APIC timers, based on code
29 * Ingo Molnar : various cleanups and rewrites
30 * Tigran Aivazian : fixed "0.00 in /proc/uptime on SMP" bug.
31 * Maciej W. Rozycki : Bits for genuine 82489DX APICs
32 * Martin J. Bligh : Added support for multi-quad systems
33 * Dave Jones : Report invalid combinations of Athlon CPUs.
34 * Rusty Russell : Hacked into shape for new "hotplug" boot process. */
36 #include <linux/module.h>
37 #include <linux/config.h>
38 #include <linux/init.h>
39 #include <linux/kernel.h>
42 #include <linux/sched.h>
43 #include <linux/kernel_stat.h>
44 #include <linux/smp_lock.h>
45 #include <linux/irq.h>
46 #include <linux/bootmem.h>
48 #include <linux/kdb.h>
49 #endif /* CONFIG_KDB */
51 #include <linux/delay.h>
52 #include <linux/mc146818rtc.h>
53 #include <asm/tlbflush.h>
55 #include <asm/arch_hooks.h>
57 #include <mach_apic.h>
58 #include <mach_wakecpu.h>
59 #include <smpboot_hooks.h>
61 /* Set if we find a B stepping CPU */
62 static int __initdata smp_b_stepping;
64 /* Number of siblings per CPU package */
65 int smp_num_siblings = 1;
66 int phys_proc_id[NR_CPUS]; /* Package ID of each logical CPU */
68 /* bitmap of online cpus */
69 cpumask_t cpu_online_map;
71 static cpumask_t cpu_callin_map;
72 cpumask_t cpu_callout_map;
73 static cpumask_t smp_commenced_mask;
75 /* Per CPU bogomips and other parameters */
76 struct cpuinfo_x86 cpu_data[NR_CPUS] __cacheline_aligned;
78 u8 x86_cpu_to_apicid[NR_CPUS] =
79 { [0 ... NR_CPUS-1] = 0xff };
80 EXPORT_SYMBOL(x86_cpu_to_apicid);
82 /* Set when the idlers are all forked */
83 int smp_threads_ready;
86 * Trampoline 80x86 program as an array.
89 extern unsigned char trampoline_data [];
90 extern unsigned char trampoline_end [];
91 static unsigned char *trampoline_base;
92 static int trampoline_exec;
95 * Currently trivial. Write the real->protected mode
96 * bootstrap into the page concerned. The caller
97 * has made sure it's suitably aligned.
100 static unsigned long __init setup_trampoline(void)
102 memcpy(trampoline_base, trampoline_data, trampoline_end - trampoline_data);
103 return virt_to_phys(trampoline_base);
107 * We are called very early to get the low memory for the
108 * SMP bootup trampoline page.
110 void __init smp_alloc_memory(void)
112 trampoline_base = (void *) alloc_bootmem_low_pages(PAGE_SIZE);
114 * Has to be in very low memory so we can execute
117 if (__pa(trampoline_base) >= 0x9F000)
120 * Make the SMP trampoline executable:
122 trampoline_exec = set_kernel_exec((unsigned long)trampoline_base, 1);
126 * The bootstrap kernel entry code has set these up. Save them for
130 static void __init smp_store_cpu_info(int id)
132 struct cpuinfo_x86 *c = cpu_data + id;
138 * Mask B, Pentium, but not Pentium MMX
140 if (c->x86_vendor == X86_VENDOR_INTEL &&
142 c->x86_mask >= 1 && c->x86_mask <= 4 &&
145 * Remember we have B step Pentia with bugs
150 * Certain Athlons might work (for various values of 'work') in SMP
151 * but they are not certified as MP capable.
153 if ((c->x86_vendor == X86_VENDOR_AMD) && (c->x86 == 6)) {
155 /* Athlon 660/661 is valid. */
156 if ((c->x86_model==6) && ((c->x86_mask==0) || (c->x86_mask==1)))
159 /* Duron 670 is valid */
160 if ((c->x86_model==7) && (c->x86_mask==0))
164 * Athlon 662, Duron 671, and Athlon >model 7 have capability bit.
165 * It's worth noting that the A5 stepping (662) of some Athlon XP's
166 * have the MP bit set.
167 * See http://www.heise.de/newsticker/data/jow-18.10.01-000 for more.
169 if (((c->x86_model==6) && (c->x86_mask>=2)) ||
170 ((c->x86_model==7) && (c->x86_mask>=1)) ||
175 /* If we get here, it's not a certified SMP capable AMD system. */
176 tainted |= TAINT_UNSAFE_SMP;
184 * TSC synchronization.
186 * We first check whether all CPUs have their TSC's synchronized,
187 * then we print a warning if not, and always resync.
190 static atomic_t tsc_start_flag = ATOMIC_INIT(0);
191 static atomic_t tsc_count_start = ATOMIC_INIT(0);
192 static atomic_t tsc_count_stop = ATOMIC_INIT(0);
193 static unsigned long long tsc_values[NR_CPUS];
198 * accurate 64-bit/32-bit division, expanded to 32-bit divisions and 64-bit
199 * multiplication. Not terribly optimized but we need it at boot time only
203 * == (a1 + a2*(2^32)) / b
204 * == a1/b + a2*(2^32/b)
205 * == a1/b + a2*((2^32-1)/b) + a2/b + (a2*((2^32-1) % b))/b
206 * ^---- (this multiplication can overflow)
209 static unsigned long long __init div64 (unsigned long long a, unsigned long b0)
212 unsigned long long res;
214 a1 = ((unsigned int*)&a)[0];
215 a2 = ((unsigned int*)&a)[1];
218 (unsigned long long)a2 * (unsigned long long)(0xffffffff/b0) +
220 (a2 * (0xffffffff % b0)) / b0;
225 static void __init synchronize_tsc_bp (void)
228 unsigned long long t0;
229 unsigned long long sum, avg;
231 unsigned long one_usec;
234 printk("checking TSC synchronization across %u CPUs: ", num_booting_cpus());
236 /* convert from kcyc/sec to cyc/usec */
237 one_usec = cpu_khz / 1000;
239 atomic_set(&tsc_start_flag, 1);
243 * We loop a few times to get a primed instruction cache,
244 * then the last pass is more or less synchronized and
245 * the BP and APs set their cycle counters to zero all at
246 * once. This reduces the chance of having random offsets
247 * between the processors, and guarantees that the maximum
248 * delay between the cycle counters is never bigger than
249 * the latency of information-passing (cachelines) between
252 for (i = 0; i < NR_LOOPS; i++) {
254 * all APs synchronize but they loop on '== num_cpus'
256 while (atomic_read(&tsc_count_start) != num_booting_cpus()-1)
258 atomic_set(&tsc_count_stop, 0);
261 * this lets the APs save their current TSC:
263 atomic_inc(&tsc_count_start);
265 rdtscll(tsc_values[smp_processor_id()]);
267 * We clear the TSC in the last loop:
273 * Wait for all APs to leave the synchronization point:
275 while (atomic_read(&tsc_count_stop) != num_booting_cpus()-1)
277 atomic_set(&tsc_count_start, 0);
279 atomic_inc(&tsc_count_stop);
283 for (i = 0; i < NR_CPUS; i++) {
284 if (cpu_isset(i, cpu_callout_map)) {
289 avg = div64(sum, num_booting_cpus());
292 for (i = 0; i < NR_CPUS; i++) {
293 if (!cpu_isset(i, cpu_callout_map))
295 delta = tsc_values[i] - avg;
299 * We report bigger than 2 microseconds clock differences.
301 if (delta > 2*one_usec) {
307 realdelta = div64(delta, one_usec);
308 if (tsc_values[i] < avg)
309 realdelta = -realdelta;
311 printk("BIOS BUG: CPU#%d improperly initialized, has %ld usecs TSC skew! FIXED.\n", i, realdelta);
321 static void __init synchronize_tsc_ap (void)
326 * Not every cpu is online at the time
327 * this gets called, so we first wait for the BP to
328 * finish SMP initialization:
330 while (!atomic_read(&tsc_start_flag)) mb();
332 for (i = 0; i < NR_LOOPS; i++) {
333 atomic_inc(&tsc_count_start);
334 while (atomic_read(&tsc_count_start) != num_booting_cpus())
337 rdtscll(tsc_values[smp_processor_id()]);
341 atomic_inc(&tsc_count_stop);
342 while (atomic_read(&tsc_count_stop) != num_booting_cpus()) mb();
347 extern void calibrate_delay(void);
349 static atomic_t init_deasserted;
351 void __init smp_callin(void)
354 unsigned long timeout;
357 * If waken up by an INIT in an 82489DX configuration
358 * we may get here before an INIT-deassert IPI reaches
359 * our local APIC. We have to wait for the IPI or we'll
360 * lock up on an APIC access.
362 wait_for_init_deassert(&init_deasserted);
365 * (This works even if the APIC is not enabled.)
367 phys_id = GET_APIC_ID(apic_read(APIC_ID));
368 cpuid = smp_processor_id();
369 if (cpu_isset(cpuid, cpu_callin_map)) {
370 printk("huh, phys CPU#%d, CPU#%d already present??\n",
374 Dprintk("CPU#%d (phys ID: %d) waiting for CALLOUT\n", cpuid, phys_id);
377 * STARTUP IPIs are fragile beasts as they might sometimes
378 * trigger some glue motherboard logic. Complete APIC bus
379 * silence for 1 second, this overestimates the time the
380 * boot CPU is spending to send the up to 2 STARTUP IPIs
381 * by a factor of two. This should be enough.
385 * Waiting 2s total for startup (udelay is not yet working)
387 timeout = jiffies + 2*HZ;
388 while (time_before(jiffies, timeout)) {
390 * Has the boot CPU finished it's STARTUP sequence?
392 if (cpu_isset(cpuid, cpu_callout_map))
397 if (!time_before(jiffies, timeout)) {
398 printk("BUG: CPU%d started up but did not get a callout!\n",
404 * the boot CPU has finished the init stage and is spinning
405 * on callin_map until we finish. We are free to set up this
406 * CPU, first the APIC. (this is probably redundant on most
410 Dprintk("CALLIN, before setup_local_APIC().\n");
411 smp_callin_clear_local_apic();
413 map_cpu_to_logical_apicid();
421 Dprintk("Stack at about %p\n",&cpuid);
424 * Save our processor parameters
426 smp_store_cpu_info(cpuid);
428 disable_APIC_timer();
431 * Allow the master to continue.
433 cpu_set(cpuid, cpu_callin_map);
436 /* Activate any preset global breakpoints on this cpu */
437 kdb(KDB_REASON_SILENT, 0, 0);
438 #endif /* CONFIG_KDB */
441 * Synchronize the TSC with the BP
443 if (cpu_has_tsc && cpu_khz)
444 synchronize_tsc_ap();
449 extern int cpu_idle(void);
452 * Activate a secondary processor.
454 int __init start_secondary(void *unused)
457 * Dont put anything before smp_callin(), SMP
458 * booting is too fragile that we want to limit the
459 * things done here to the most necessary things.
463 while (!cpu_isset(smp_processor_id(), smp_commenced_mask))
465 setup_secondary_APIC_clock();
466 if (nmi_watchdog == NMI_IO_APIC) {
467 disable_8259A_irq(0);
468 enable_NMI_through_LVT0(NULL);
473 * low-memory mappings have been cleared, flush them from
474 * the local TLBs too.
477 cpu_set(smp_processor_id(), cpu_online_map);
483 * Everything has been set up for the secondary
484 * CPUs - they just need to reload everything
485 * from the task structure
486 * This function must not return.
488 void __init initialize_secondary(void)
491 * We don't actually need to load the full TSS,
492 * basically just the stack pointer and the eip.
499 :"r" (current->thread.esp),"r" (current->thread.eip));
507 static struct task_struct * __init fork_by_hand(void)
511 * don't care about the eip and regs settings since
512 * we'll never reschedule the forked task.
514 return copy_process(CLONE_VM|CLONE_IDLETASK, 0, ®s, 0, NULL, NULL);
519 /* which logical CPUs are on which nodes */
520 cpumask_t node_2_cpu_mask[MAX_NUMNODES] =
521 { [0 ... MAX_NUMNODES-1] = CPU_MASK_NONE };
522 /* which node each logical CPU is on */
523 int cpu_2_node[NR_CPUS] = { [0 ... NR_CPUS-1] = 0 };
524 EXPORT_SYMBOL(cpu_2_node);
526 /* set up a mapping between cpu and node. */
527 static inline void map_cpu_to_node(int cpu, int node)
529 printk("Mapping cpu %d to node %d\n", cpu, node);
530 cpu_set(cpu, node_2_cpu_mask[node]);
531 cpu_2_node[cpu] = node;
534 /* undo a mapping between cpu and node. */
535 static inline void unmap_cpu_to_node(int cpu)
539 printk("Unmapping cpu %d from all nodes\n", cpu);
540 for (node = 0; node < MAX_NUMNODES; node ++)
541 cpu_clear(cpu, node_2_cpu_mask[node]);
544 #else /* !CONFIG_NUMA */
546 #define map_cpu_to_node(cpu, node) ({})
547 #define unmap_cpu_to_node(cpu) ({})
549 #endif /* CONFIG_NUMA */
551 u8 cpu_2_logical_apicid[NR_CPUS] = { [0 ... NR_CPUS-1] = BAD_APICID };
553 void map_cpu_to_logical_apicid(void)
555 int cpu = smp_processor_id();
556 int apicid = logical_smp_processor_id();
558 cpu_2_logical_apicid[cpu] = apicid;
559 map_cpu_to_node(cpu, apicid_to_node(apicid));
562 void unmap_cpu_to_logical_apicid(int cpu)
564 cpu_2_logical_apicid[cpu] = BAD_APICID;
565 unmap_cpu_to_node(cpu);
569 static inline void __inquire_remote_apic(int apicid)
571 int i, regs[] = { APIC_ID >> 4, APIC_LVR >> 4, APIC_SPIV >> 4 };
572 char *names[] = { "ID", "VERSION", "SPIV" };
575 printk("Inquiring remote APIC #%d...\n", apicid);
577 for (i = 0; i < sizeof(regs) / sizeof(*regs); i++) {
578 printk("... APIC #%d %s: ", apicid, names[i]);
583 apic_wait_icr_idle();
585 apic_write_around(APIC_ICR2, SET_APIC_DEST_FIELD(apicid));
586 apic_write_around(APIC_ICR, APIC_DM_REMRD | regs[i]);
591 status = apic_read(APIC_ICR) & APIC_ICR_RR_MASK;
592 } while (status == APIC_ICR_RR_INPROG && timeout++ < 1000);
595 case APIC_ICR_RR_VALID:
596 status = apic_read(APIC_RRR);
597 printk("%08x\n", status);
606 #ifdef WAKE_SECONDARY_VIA_NMI
608 * Poke the other CPU in the eye via NMI to wake it up. Remember that the normal
609 * INIT, INIT, STARTUP sequence will reset the chip hard for us, and this
610 * won't ... remember to clear down the APIC, etc later.
613 wakeup_secondary_cpu(int logical_apicid, unsigned long start_eip)
615 unsigned long send_status = 0, accept_status = 0;
619 apic_write_around(APIC_ICR2, SET_APIC_DEST_FIELD(logical_apicid));
621 /* Boot on the stack */
622 /* Kick the second */
623 apic_write_around(APIC_ICR, APIC_DM_NMI | APIC_DEST_LOGICAL);
625 Dprintk("Waiting for send to finish...\n");
630 send_status = apic_read(APIC_ICR) & APIC_ICR_BUSY;
631 } while (send_status && (timeout++ < 1000));
634 * Give the other CPU some time to accept the IPI.
638 * Due to the Pentium erratum 3AP.
640 maxlvt = get_maxlvt();
642 apic_read_around(APIC_SPIV);
643 apic_write(APIC_ESR, 0);
645 accept_status = (apic_read(APIC_ESR) & 0xEF);
646 Dprintk("NMI sent.\n");
649 printk("APIC never delivered???\n");
651 printk("APIC delivery error (%lx).\n", accept_status);
653 return (send_status | accept_status);
655 #endif /* WAKE_SECONDARY_VIA_NMI */
657 #ifdef WAKE_SECONDARY_VIA_INIT
659 wakeup_secondary_cpu(int phys_apicid, unsigned long start_eip)
661 unsigned long send_status = 0, accept_status = 0;
662 int maxlvt, timeout, num_starts, j;
665 * Be paranoid about clearing APIC errors.
667 if (APIC_INTEGRATED(apic_version[phys_apicid])) {
668 apic_read_around(APIC_SPIV);
669 apic_write(APIC_ESR, 0);
673 Dprintk("Asserting INIT.\n");
676 * Turn INIT on target chip
678 apic_write_around(APIC_ICR2, SET_APIC_DEST_FIELD(phys_apicid));
683 apic_write_around(APIC_ICR, APIC_INT_LEVELTRIG | APIC_INT_ASSERT
686 Dprintk("Waiting for send to finish...\n");
691 send_status = apic_read(APIC_ICR) & APIC_ICR_BUSY;
692 } while (send_status && (timeout++ < 1000));
696 Dprintk("Deasserting INIT.\n");
699 apic_write_around(APIC_ICR2, SET_APIC_DEST_FIELD(phys_apicid));
702 apic_write_around(APIC_ICR, APIC_INT_LEVELTRIG | APIC_DM_INIT);
704 Dprintk("Waiting for send to finish...\n");
709 send_status = apic_read(APIC_ICR) & APIC_ICR_BUSY;
710 } while (send_status && (timeout++ < 1000));
712 atomic_set(&init_deasserted, 1);
715 * Should we send STARTUP IPIs ?
717 * Determine this based on the APIC version.
718 * If we don't have an integrated APIC, don't send the STARTUP IPIs.
720 if (APIC_INTEGRATED(apic_version[phys_apicid]))
726 * Run STARTUP IPI loop.
728 Dprintk("#startup loops: %d.\n", num_starts);
730 maxlvt = get_maxlvt();
732 for (j = 1; j <= num_starts; j++) {
733 Dprintk("Sending STARTUP #%d.\n",j);
734 apic_read_around(APIC_SPIV);
735 apic_write(APIC_ESR, 0);
737 Dprintk("After apic_write.\n");
744 apic_write_around(APIC_ICR2, SET_APIC_DEST_FIELD(phys_apicid));
746 /* Boot on the stack */
747 /* Kick the second */
748 apic_write_around(APIC_ICR, APIC_DM_STARTUP
749 | (start_eip >> 12));
752 * Give the other CPU some time to accept the IPI.
756 Dprintk("Startup point 1.\n");
758 Dprintk("Waiting for send to finish...\n");
763 send_status = apic_read(APIC_ICR) & APIC_ICR_BUSY;
764 } while (send_status && (timeout++ < 1000));
767 * Give the other CPU some time to accept the IPI.
771 * Due to the Pentium erratum 3AP.
774 apic_read_around(APIC_SPIV);
775 apic_write(APIC_ESR, 0);
777 accept_status = (apic_read(APIC_ESR) & 0xEF);
778 if (send_status || accept_status)
781 Dprintk("After Startup.\n");
784 printk("APIC never delivered???\n");
786 printk("APIC delivery error (%lx).\n", accept_status);
788 return (send_status | accept_status);
790 #endif /* WAKE_SECONDARY_VIA_INIT */
792 extern cpumask_t cpu_initialized;
794 static int __init do_boot_cpu(int apicid)
796 * NOTE - on most systems this is a PHYSICAL apic ID, but on multiquad
797 * (ie clustered apic addressing mode), this is a LOGICAL apic ID.
798 * Returns zero if CPU booted OK, else error code from wakeup_secondary_cpu.
801 struct task_struct *idle;
802 unsigned long boot_error;
804 unsigned long start_eip;
805 unsigned short nmi_high = 0, nmi_low = 0;
809 * We can't use kernel_thread since we must avoid to
810 * reschedule the child.
812 idle = fork_by_hand();
814 panic("failed fork for CPU %d", cpu);
815 wake_up_forked_process(idle);
818 * We remove it from the pidhash and the runqueue
819 * once we got the process:
821 init_idle(idle, cpu);
823 idle->thread.eip = (unsigned long) start_secondary;
825 unhash_process(idle);
827 /* start_eip had better be page-aligned! */
828 start_eip = setup_trampoline();
830 /* So we see what's up */
831 printk("Booting processor %d/%d eip %lx\n", cpu, apicid, start_eip);
832 /* Stack for startup_32 can be just as for start_secondary onwards */
833 stack_start.esp = (void *) idle->thread.esp;
838 * This grunge runs the startup process for
839 * the targeted processor.
842 atomic_set(&init_deasserted, 0);
844 Dprintk("Setting warm reset code and vector.\n");
846 store_NMI_vector(&nmi_high, &nmi_low);
848 smpboot_setup_warm_reset_vector(start_eip);
851 * Starting actual IPI sequence...
853 boot_error = wakeup_secondary_cpu(apicid, start_eip);
857 * allow APs to start initializing.
859 Dprintk("Before Callout %d.\n", cpu);
860 cpu_set(cpu, cpu_callout_map);
861 Dprintk("After Callout %d.\n", cpu);
864 * Wait 5s total for a response
866 for (timeout = 0; timeout < 50000; timeout++) {
867 if (cpu_isset(cpu, cpu_callin_map))
868 break; /* It has booted */
872 if (cpu_isset(cpu, cpu_callin_map)) {
873 /* number CPUs logically, starting from 1 (BSP is 0) */
875 printk("CPU%d: ", cpu);
876 print_cpu_info(&cpu_data[cpu]);
877 Dprintk("CPU has booted.\n");
880 if (*((volatile unsigned char *)trampoline_base)
882 /* trampoline started but...? */
883 printk("Stuck ??\n");
885 /* trampoline code not run */
886 printk("Not responding.\n");
887 inquire_remote_apic(apicid);
890 x86_cpu_to_apicid[cpu] = apicid;
892 /* Try to put things back the way they were before ... */
893 unmap_cpu_to_logical_apicid(cpu);
894 cpu_clear(cpu, cpu_callout_map); /* was set here (do_boot_cpu()) */
895 cpu_clear(cpu, cpu_initialized); /* was set by cpu_init() */
899 /* mark "stuck" area as not stuck */
900 *((volatile unsigned long *)trampoline_base) = 0;
905 cycles_t cacheflush_time;
906 unsigned long cache_decay_ticks;
908 static void smp_tune_scheduling (void)
910 unsigned long cachesize; /* kB */
911 unsigned long bandwidth = 350; /* MB/s */
913 * Rough estimation for SMP scheduling, this is the number of
914 * cycles it takes for a fully memory-limited process to flush
915 * the SMP-local cache.
917 * (For a P5 this pretty much means we will choose another idle
918 * CPU almost always at wakeup time (this is due to the small
919 * L1 cache), on PIIs it's around 50-100 usecs, depending on
925 * this basically disables processor-affinity
926 * scheduling on SMP without a TSC.
931 cachesize = boot_cpu_data.x86_cache_size;
932 if (cachesize == -1) {
933 cachesize = 16; /* Pentiums, 2x8kB cache */
937 cacheflush_time = (cpu_khz>>10) * (cachesize<<10) / bandwidth;
940 cache_decay_ticks = (long)cacheflush_time/cpu_khz + 1;
942 printk("per-CPU timeslice cutoff: %ld.%02ld usecs.\n",
943 (long)cacheflush_time/(cpu_khz/1000),
944 ((long)cacheflush_time*100/(cpu_khz/1000)) % 100);
945 printk("task migration cache decay timeout: %ld msecs.\n",
950 * Cycle through the processors sending APIC IPIs to boot each.
953 static int boot_cpu_logical_apicid;
954 /* Where the IO area was mapped on multiquad, always 0 otherwise */
957 cpumask_t cpu_sibling_map[NR_CPUS] __cacheline_aligned;
959 static void __init smp_boot_cpus(unsigned int max_cpus)
961 int apicid, cpu, bit, kicked;
962 unsigned long bogosum = 0;
965 * Setup boot CPU information
967 smp_store_cpu_info(0); /* Final full version of the data */
968 printk("CPU%d: ", 0);
969 print_cpu_info(&cpu_data[0]);
971 boot_cpu_physical_apicid = GET_APIC_ID(apic_read(APIC_ID));
972 boot_cpu_logical_apicid = logical_smp_processor_id();
973 x86_cpu_to_apicid[0] = boot_cpu_physical_apicid;
975 current_thread_info()->cpu = 0;
976 smp_tune_scheduling();
977 cpus_clear(cpu_sibling_map[0]);
978 cpu_set(0, cpu_sibling_map[0]);
981 * If we couldn't find an SMP configuration at boot time,
982 * get out of here now!
984 if (!smp_found_config && !acpi_lapic) {
985 printk(KERN_NOTICE "SMP motherboard not detected.\n");
986 smpboot_clear_io_apic_irqs();
987 phys_cpu_present_map = physid_mask_of_physid(0);
988 if (APIC_init_uniprocessor())
989 printk(KERN_NOTICE "Local APIC not detected."
990 " Using dummy APIC emulation.\n");
991 map_cpu_to_logical_apicid();
996 * Should not be necessary because the MP table should list the boot
997 * CPU too, but we do it for the sake of robustness anyway.
998 * Makes no sense to do this check in clustered apic mode, so skip it
1000 if (!check_phys_apicid_present(boot_cpu_physical_apicid)) {
1001 printk("weird, boot CPU (#%d) not listed by the BIOS.\n",
1002 boot_cpu_physical_apicid);
1003 physid_set(hard_smp_processor_id(), phys_cpu_present_map);
1007 * If we couldn't find a local APIC, then get out of here now!
1009 if (APIC_INTEGRATED(apic_version[boot_cpu_physical_apicid]) && !cpu_has_apic) {
1010 printk(KERN_ERR "BIOS bug, local APIC #%d not detected!...\n",
1011 boot_cpu_physical_apicid);
1012 printk(KERN_ERR "... forcing use of dummy APIC emulation. (tell your hw vendor)\n");
1013 smpboot_clear_io_apic_irqs();
1014 phys_cpu_present_map = physid_mask_of_physid(0);
1018 verify_local_APIC();
1021 * If SMP should be disabled, then really disable it!
1024 smp_found_config = 0;
1025 printk(KERN_INFO "SMP mode deactivated, forcing use of dummy APIC emulation.\n");
1026 smpboot_clear_io_apic_irqs();
1027 phys_cpu_present_map = physid_mask_of_physid(0);
1033 map_cpu_to_logical_apicid();
1036 setup_portio_remap();
1039 * Scan the CPU present map and fire up the other CPUs via do_boot_cpu
1041 * In clustered apic mode, phys_cpu_present_map is a constructed thus:
1042 * bits 0-3 are quad0, 4-7 are quad1, etc. A perverse twist on the
1043 * clustered apic ID.
1045 Dprintk("CPU present map: %lx\n", physids_coerce(phys_cpu_present_map));
1048 for (bit = 0; kicked < NR_CPUS && bit < MAX_APICS; bit++) {
1049 apicid = cpu_present_to_apicid(bit);
1051 * Don't even attempt to start the boot CPU!
1053 if ((apicid == boot_cpu_apicid) || (apicid == BAD_APICID))
1056 if (!check_apicid_present(bit))
1058 if (max_cpus <= cpucount+1)
1061 if (do_boot_cpu(apicid))
1062 printk("CPU #%d not responding - cannot use it.\n",
1069 * Cleanup possible dangling ends...
1071 smpboot_restore_warm_reset_vector();
1074 * Allow the user to impress friends.
1076 Dprintk("Before bogomips.\n");
1077 for (cpu = 0; cpu < NR_CPUS; cpu++)
1078 if (cpu_isset(cpu, cpu_callout_map))
1079 bogosum += cpu_data[cpu].loops_per_jiffy;
1081 "Total of %d processors activated (%lu.%02lu BogoMIPS).\n",
1083 bogosum/(500000/HZ),
1084 (bogosum/(5000/HZ))%100);
1086 Dprintk("Before bogocount - setting activated=1.\n");
1089 printk(KERN_WARNING "WARNING: SMP operation may be unreliable with B stepping processors.\n");
1092 * Don't taint if we are running SMP kernel on a single non-MP
1095 if (tainted & TAINT_UNSAFE_SMP) {
1097 printk (KERN_INFO "WARNING: This combination of AMD processors is not suitable for SMP.\n");
1099 tainted &= ~TAINT_UNSAFE_SMP;
1102 Dprintk("Boot done.\n");
1105 * construct cpu_sibling_map[], so that we can tell sibling CPUs
1108 for (cpu = 0; cpu < NR_CPUS; cpu++)
1109 cpus_clear(cpu_sibling_map[cpu]);
1111 for (cpu = 0; cpu < NR_CPUS; cpu++) {
1114 if (!cpu_isset(cpu, cpu_callout_map))
1117 if (smp_num_siblings > 1) {
1118 for (i = 0; i < NR_CPUS; i++) {
1119 if (!cpu_isset(i, cpu_callout_map))
1121 if (phys_proc_id[cpu] == phys_proc_id[i]) {
1123 cpu_set(i, cpu_sibling_map[cpu]);
1128 cpu_set(cpu, cpu_sibling_map[cpu]);
1131 if (siblings != smp_num_siblings)
1132 printk(KERN_WARNING "WARNING: %d siblings found for CPU%d, should be %d\n", siblings, cpu, smp_num_siblings);
1135 if (nmi_watchdog == NMI_LOCAL_APIC)
1136 check_nmi_watchdog();
1138 smpboot_setup_io_apic();
1140 setup_boot_APIC_clock();
1143 * Synchronize the TSC with the AP
1145 if (cpu_has_tsc && cpucount && cpu_khz)
1146 synchronize_tsc_bp();
1149 #ifdef CONFIG_SCHED_SMT
1151 static struct sched_group sched_group_cpus[NR_CPUS];
1152 static struct sched_group sched_group_phys[NR_CPUS];
1153 static struct sched_group sched_group_nodes[MAX_NUMNODES];
1154 static DEFINE_PER_CPU(struct sched_domain, cpu_domains);
1155 static DEFINE_PER_CPU(struct sched_domain, phys_domains);
1156 static DEFINE_PER_CPU(struct sched_domain, node_domains);
1157 __init void arch_init_sched_domains(void)
1160 struct sched_group *first = NULL, *last = NULL;
1162 /* Set up domains */
1164 struct sched_domain *cpu_domain = &per_cpu(cpu_domains, i);
1165 struct sched_domain *phys_domain = &per_cpu(phys_domains, i);
1166 struct sched_domain *node_domain = &per_cpu(node_domains, i);
1167 int node = cpu_to_node(i);
1168 cpumask_t nodemask = node_to_cpumask(node);
1170 *cpu_domain = SD_SIBLING_INIT;
1171 cpu_domain->span = cpu_sibling_map[i];
1172 cpu_domain->parent = phys_domain;
1173 cpu_domain->groups = &sched_group_cpus[i];
1175 *phys_domain = SD_CPU_INIT;
1176 phys_domain->span = nodemask;
1177 phys_domain->parent = node_domain;
1178 phys_domain->groups = &sched_group_phys[first_cpu(cpu_domain->span)];
1180 *node_domain = SD_NODE_INIT;
1181 node_domain->span = cpu_possible_map;
1182 node_domain->groups = &sched_group_nodes[cpu_to_node(i)];
1185 /* Set up CPU (sibling) groups */
1187 struct sched_domain *cpu_domain = &per_cpu(cpu_domains, i);
1189 first = last = NULL;
1191 if (i != first_cpu(cpu_domain->span))
1194 for_each_cpu_mask(j, cpu_domain->span) {
1195 struct sched_group *cpu = &sched_group_cpus[j];
1197 cpu->cpumask = CPU_MASK_NONE;
1198 cpu_set(j, cpu->cpumask);
1199 cpu->cpu_power = SCHED_LOAD_SCALE;
1210 for (i = 0; i < MAX_NUMNODES; i++) {
1213 struct sched_group *node = &sched_group_nodes[i];
1214 cpumask_t node_cpumask = node_to_cpumask(i);
1216 cpus_and(nodemask, node_cpumask, cpu_possible_map);
1218 if (cpus_empty(nodemask))
1221 first = last = NULL;
1222 /* Set up physical groups */
1223 for_each_cpu_mask(j, nodemask) {
1224 struct sched_domain *cpu_domain = &per_cpu(cpu_domains, j);
1225 struct sched_group *cpu = &sched_group_phys[j];
1227 if (j != first_cpu(cpu_domain->span))
1230 cpu->cpumask = cpu_domain->span;
1232 * Make each extra sibling increase power by 10% of
1233 * the basic CPU. This is very arbitrary.
1235 cpu->cpu_power = SCHED_LOAD_SCALE + SCHED_LOAD_SCALE*(cpus_weight(cpu->cpumask)-1) / 10;
1236 node->cpu_power += cpu->cpu_power;
1248 first = last = NULL;
1249 for (i = 0; i < MAX_NUMNODES; i++) {
1250 struct sched_group *cpu = &sched_group_nodes[i];
1252 cpumask_t node_cpumask = node_to_cpumask(i);
1254 cpus_and(nodemask, node_cpumask, cpu_possible_map);
1256 if (cpus_empty(nodemask))
1259 cpu->cpumask = nodemask;
1260 /* ->cpu_power already setup */
1272 struct sched_domain *cpu_domain = &per_cpu(cpu_domains, i);
1273 cpu_attach_domain(cpu_domain, i);
1276 #else /* !CONFIG_NUMA */
1277 static struct sched_group sched_group_cpus[NR_CPUS];
1278 static struct sched_group sched_group_phys[NR_CPUS];
1279 static DEFINE_PER_CPU(struct sched_domain, cpu_domains);
1280 static DEFINE_PER_CPU(struct sched_domain, phys_domains);
1281 __init void arch_init_sched_domains(void)
1284 struct sched_group *first = NULL, *last = NULL;
1286 /* Set up domains */
1288 struct sched_domain *cpu_domain = &per_cpu(cpu_domains, i);
1289 struct sched_domain *phys_domain = &per_cpu(phys_domains, i);
1291 *cpu_domain = SD_SIBLING_INIT;
1292 cpu_domain->span = cpu_sibling_map[i];
1293 cpu_domain->parent = phys_domain;
1294 cpu_domain->groups = &sched_group_cpus[i];
1296 *phys_domain = SD_CPU_INIT;
1297 phys_domain->span = cpu_possible_map;
1298 phys_domain->groups = &sched_group_phys[first_cpu(cpu_domain->span)];
1301 /* Set up CPU (sibling) groups */
1303 struct sched_domain *cpu_domain = &per_cpu(cpu_domains, i);
1305 first = last = NULL;
1307 if (i != first_cpu(cpu_domain->span))
1310 for_each_cpu_mask(j, cpu_domain->span) {
1311 struct sched_group *cpu = &sched_group_cpus[j];
1313 cpus_clear(cpu->cpumask);
1314 cpu_set(j, cpu->cpumask);
1315 cpu->cpu_power = SCHED_LOAD_SCALE;
1326 first = last = NULL;
1327 /* Set up physical groups */
1329 struct sched_domain *cpu_domain = &per_cpu(cpu_domains, i);
1330 struct sched_group *cpu = &sched_group_phys[i];
1332 if (i != first_cpu(cpu_domain->span))
1335 cpu->cpumask = cpu_domain->span;
1336 /* See SMT+NUMA setup for comment */
1337 cpu->cpu_power = SCHED_LOAD_SCALE + SCHED_LOAD_SCALE*(cpus_weight(cpu->cpumask)-1) / 10;
1349 struct sched_domain *cpu_domain = &per_cpu(cpu_domains, i);
1350 cpu_attach_domain(cpu_domain, i);
1353 #endif /* CONFIG_NUMA */
1354 #endif /* CONFIG_SCHED_SMT */
1356 /* These are wrappers to interface to the new boot process. Someone
1357 who understands all this stuff should rewrite it properly. --RR 15/Jul/02 */
1358 void __init smp_prepare_cpus(unsigned int max_cpus)
1360 smp_boot_cpus(max_cpus);
1363 void __devinit smp_prepare_boot_cpu(void)
1365 cpu_set(smp_processor_id(), cpu_online_map);
1366 cpu_set(smp_processor_id(), cpu_callout_map);
1369 int __devinit __cpu_up(unsigned int cpu)
1371 /* This only works at boot for x86. See "rewrite" above. */
1372 if (cpu_isset(cpu, smp_commenced_mask)) {
1377 /* In case one didn't come up */
1378 if (!cpu_isset(cpu, cpu_callin_map)) {
1384 /* Unleash the CPU! */
1385 cpu_set(cpu, smp_commenced_mask);
1386 while (!cpu_isset(cpu, cpu_online_map))
1391 void __init smp_cpus_done(unsigned int max_cpus)
1393 #ifdef CONFIG_X86_IO_APIC
1394 setup_ioapic_dest();
1398 * Disable executability of the SMP trampoline:
1400 set_kernel_exec((unsigned long)trampoline_base, trampoline_exec);
1403 void __init smp_intr_init(void)
1406 * IRQ0 must be given a fixed assignment and initialized,
1407 * because it's used before the IO-APIC is set up.
1409 set_intr_gate(FIRST_DEVICE_VECTOR, interrupt[0]);
1412 * The reschedule interrupt is a CPU-to-CPU reschedule-helper
1413 * IPI, driven by wakeup.
1415 set_intr_gate(RESCHEDULE_VECTOR, reschedule_interrupt);
1417 /* IPI for invalidation */
1418 set_intr_gate(INVALIDATE_TLB_VECTOR, invalidate_interrupt);
1420 /* IPI for generic function call */
1421 set_intr_gate(CALL_FUNCTION_VECTOR, call_function_interrupt);