2 * SMP boot-related support
4 * Copyright (C) 1998-2003 Hewlett-Packard Co
5 * David Mosberger-Tang <davidm@hpl.hp.com>
7 * 01/05/16 Rohit Seth <rohit.seth@intel.com> Moved SMP booting functions from smp.c to here.
8 * 01/04/27 David Mosberger <davidm@hpl.hp.com> Added ITC synching code.
9 * 02/07/31 David Mosberger <davidm@hpl.hp.com> Switch over to hotplug-CPU boot-sequence.
10 * smp_boot_cpus()/smp_commence() is replaced by
11 * smp_prepare_cpus()/__cpu_up()/smp_cpus_done().
15 #define __KERNEL_SYSCALLS__
17 #include <linux/config.h>
19 #include <linux/acpi.h>
20 #include <linux/bootmem.h>
21 #include <linux/delay.h>
22 #include <linux/init.h>
23 #include <linux/interrupt.h>
24 #include <linux/irq.h>
25 #include <linux/kernel.h>
26 #include <linux/kernel_stat.h>
28 #include <linux/smp.h>
29 #include <linux/smp_lock.h>
30 #include <linux/spinlock.h>
31 #include <linux/efi.h>
33 #include <asm/atomic.h>
34 #include <asm/bitops.h>
35 #include <asm/cache.h>
36 #include <asm/current.h>
37 #include <asm/delay.h>
40 #include <asm/machvec.h>
43 #include <asm/pgalloc.h>
44 #include <asm/pgtable.h>
45 #include <asm/processor.h>
46 #include <asm/ptrace.h>
48 #include <asm/system.h>
49 #include <asm/unistd.h>
54 #define Dprintk(x...) printk(x)
61 * ITC synchronization related stuff:
64 #define SLAVE (SMP_CACHE_BYTES/8)
66 #define NUM_ROUNDS 64 /* magic value */
67 #define NUM_ITERS 5 /* likewise */
69 static spinlock_t itc_sync_lock = SPIN_LOCK_UNLOCKED;
70 static volatile unsigned long go[SLAVE + 1];
72 #define DEBUG_ITC_SYNC 0
74 extern void __init calibrate_delay (void);
75 extern void start_ap (void);
76 extern unsigned long ia64_iobase;
79 task_t *task_for_booting_cpu;
81 /* Bitmask of currently online CPUs */
82 volatile unsigned long cpu_online_map;
83 unsigned long phys_cpu_present_map;
85 /* which logical CPU number maps to which CPU (physical APIC ID) */
86 volatile int ia64_cpu_to_sapicid[NR_CPUS];
88 static volatile unsigned long cpu_callin_map;
90 struct smp_boot_data smp_boot_data __initdata;
92 unsigned long ap_wakeup_vector = -1; /* External Int use to wakeup APs */
94 char __initdata no_int_routing;
96 unsigned char smp_int_redirect; /* are INT and IPI redirectable by the chipset? */
99 nointroute (char *str)
105 __setup("nointroute", nointroute);
108 sync_master (void *arg)
110 unsigned long flags, i;
114 local_irq_save(flags);
116 for (i = 0; i < NUM_ROUNDS*NUM_ITERS; ++i) {
119 go[SLAVE] = ia64_get_itc();
122 local_irq_restore(flags);
126 * Return the number of cycles by which our itc differs from the itc on the master
127 * (time-keeper) CPU. A positive number indicates our itc is ahead of the master,
128 * negative that it is behind.
131 get_delta (long *rt, long *master)
133 unsigned long best_t0 = 0, best_t1 = ~0UL, best_tm = 0;
134 unsigned long tcenter, t0, t1, tm;
137 for (i = 0; i < NUM_ITERS; ++i) {
140 while (!(tm = go[SLAVE]));
144 if (t1 - t0 < best_t1 - best_t0)
145 best_t0 = t0, best_t1 = t1, best_tm = tm;
148 *rt = best_t1 - best_t0;
149 *master = best_tm - best_t0;
151 /* average best_t0 and best_t1 without overflow: */
152 tcenter = (best_t0/2 + best_t1/2);
153 if (best_t0 % 2 + best_t1 % 2 == 2)
155 return tcenter - best_tm;
159 * Synchronize ar.itc of the current (slave) CPU with the ar.itc of the MASTER CPU
160 * (normally the time-keeper CPU). We use a closed loop to eliminate the possibility of
161 * unaccounted-for errors (such as getting a machine check in the middle of a calibration
162 * step). The basic idea is for the slave to ask the master what itc value it has and to
163 * read its own itc before and after the master responds. Each iteration gives us three
177 * The goal is to adjust the slave's ar.itc such that tm falls exactly half-way between t0
178 * and t1. If we achieve this, the clocks are synchronized provided the interconnect
179 * between the slave and the master is symmetric. Even if the interconnect were
180 * asymmetric, we would still know that the synchronization error is smaller than the
181 * roundtrip latency (t0 - t1).
183 * When the interconnect is quiet and symmetric, this lets us synchronize the itc to
184 * within one or two cycles. However, we can only *guarantee* that the synchronization is
185 * accurate to within a round-trip time, which is typically in the range of several
186 * hundred cycles (e.g., ~500 cycles). In practice, this means that the itc's are usually
187 * almost perfectly synchronized, but we shouldn't assume that the accuracy is much better
188 * than half a micro second or so.
191 ia64_sync_itc (unsigned int master)
193 long i, delta, adj, adjust_latency = 0, done = 0;
194 unsigned long flags, rt, master_time_stamp, bound;
197 long rt; /* roundtrip time */
198 long master; /* master's timestamp */
199 long diff; /* difference between midpoint and master's timestamp */
200 long lat; /* estimate of itc adjustment latency */
206 if (smp_call_function_single(master, sync_master, NULL, 1, 0) < 0) {
207 printk(KERN_ERR "sync_itc: failed to get attention of CPU %u!\n", master);
211 while (go[MASTER]); /* wait for master to be ready */
213 spin_lock_irqsave(&itc_sync_lock, flags);
215 for (i = 0; i < NUM_ROUNDS; ++i) {
216 delta = get_delta(&rt, &master_time_stamp);
218 done = 1; /* let's lock on to this... */
224 adjust_latency += -delta;
225 adj = -delta + adjust_latency/4;
229 ia64_set_itc(ia64_get_itc() + adj);
233 t[i].master = master_time_stamp;
235 t[i].lat = adjust_latency/4;
239 spin_unlock_irqrestore(&itc_sync_lock, flags);
242 for (i = 0; i < NUM_ROUNDS; ++i)
243 printk("rt=%5ld master=%5ld diff=%5ld adjlat=%5ld\n",
244 t[i].rt, t[i].master, t[i].diff, t[i].lat);
247 printk(KERN_INFO "CPU %d: synchronized ITC with CPU %u (last diff %ld cycles, "
248 "maxerr %lu cycles)\n", smp_processor_id(), master, delta, rt);
252 * Ideally sets up per-cpu profiling hooks. Doesn't do much now...
254 static inline void __init
255 smp_setup_percpu_timer (void)
257 local_cpu_data->prof_counter = 1;
258 local_cpu_data->prof_multiplier = 1;
265 extern void ia64_init_itm(void);
267 #ifdef CONFIG_PERFMON
268 extern void pfm_init_percpu(void);
271 cpuid = smp_processor_id();
272 phys_id = hard_smp_processor_id();
274 if (test_and_set_bit(cpuid, &cpu_online_map)) {
275 printk(KERN_ERR "huh, phys CPU#0x%x, CPU#0x%x already present??\n",
280 smp_setup_percpu_timer();
282 if (!(sal_platform_features & IA64_SAL_PLATFORM_FEATURE_ITC_DRIFT)) {
284 * Synchronize the ITC with the BP
286 Dprintk("Going to syncup ITC with BP.\n");
297 * Set I/O port base per CPU
299 ia64_set_kr(IA64_KR_IO_BASE, __pa(ia64_iobase));
301 #ifdef CONFIG_IA64_MCA
302 ia64_mca_cmc_vector_setup(); /* Setup vector on AP & enable */
303 ia64_mca_check_errors(); /* For post-failure MCA error logging */
306 #ifdef CONFIG_PERFMON
312 local_cpu_data->loops_per_jiffy = loops_per_jiffy;
314 * Allow the master to continue.
316 set_bit(cpuid, &cpu_callin_map);
317 Dprintk("Stack on CPU %d at about %p\n",cpuid, &cpuid);
322 * Activate a secondary processor. head.S calls this.
325 start_secondary (void *unused)
327 extern int cpu_idle (void);
329 Dprintk("start_secondary: starting CPU 0x%x\n", hard_smp_processor_id());
337 static struct task_struct * __init
341 * don't care about the eip and regs settings since we'll never reschedule the
344 return do_fork(CLONE_VM|CLONE_IDLETASK, 0, 0, 0, NULL, NULL);
348 do_boot_cpu (int sapicid, int cpu)
350 struct task_struct *idle;
354 * We can't use kernel_thread since we must avoid to reschedule the child.
356 idle = fork_by_hand();
358 panic("failed fork for CPU %d", cpu);
361 * We remove it from the pidhash and the runqueue
362 * once we got the process:
364 init_idle(idle, cpu);
366 unhash_process(idle);
368 task_for_booting_cpu = idle;
370 Dprintk("Sending wakeup vector %lu to AP 0x%x/0x%x.\n", ap_wakeup_vector, cpu, sapicid);
372 platform_send_ipi(cpu, ap_wakeup_vector, IA64_IPI_DM_INT, 0);
375 * Wait 10s total for the AP to start
377 Dprintk("Waiting on callin_map ...");
378 for (timeout = 0; timeout < 100000; timeout++) {
379 if (test_bit(cpu, &cpu_callin_map))
380 break; /* It has booted */
385 if (test_bit(cpu, &cpu_callin_map)) {
386 /* number CPUs logically, starting from 1 (BSP is 0) */
387 printk(KERN_INFO "CPU%d: CPU has booted.\n", cpu);
389 printk(KERN_ERR "Processor 0x%x/0x%x is stuck.\n", cpu, sapicid);
390 ia64_cpu_to_sapicid[cpu] = -1;
391 clear_bit(cpu, &cpu_online_map); /* was set in smp_callin() */
397 unsigned long cache_decay_ticks; /* # of ticks an idle task is considered cache-hot */
400 smp_tune_scheduling (void)
402 cache_decay_ticks = 10; /* XXX base this on PAL info and cache-bandwidth estimate */
404 printk(KERN_INFO "task migration cache decay timeout: %ld msecs.\n",
405 (cache_decay_ticks + 1) * 1000 / HZ);
409 * Initialize the logical CPU number to SAPICID mapping
412 smp_build_cpu_map (void)
415 int boot_cpu_id = hard_smp_processor_id();
417 for (cpu = 0; cpu < NR_CPUS; cpu++)
418 ia64_cpu_to_sapicid[cpu] = -1;
420 ia64_cpu_to_sapicid[0] = boot_cpu_id;
421 phys_cpu_present_map = 1;
423 for (cpu = 1, i = 0; i < smp_boot_data.cpu_count; i++) {
424 sapicid = smp_boot_data.cpu_phys_id[i];
425 if (sapicid == -1 || sapicid == boot_cpu_id)
427 phys_cpu_present_map |= (1 << cpu);
428 ia64_cpu_to_sapicid[cpu] = sapicid;
435 /* on which node is each logical CPU (one cacheline even for 64 CPUs) */
436 volatile char cpu_to_node_map[NR_CPUS] __cacheline_aligned;
437 /* which logical CPUs are on which nodes */
438 volatile unsigned long node_to_cpu_mask[MAX_NUMNODES] __cacheline_aligned;
441 * Build cpu to node mapping and initialize the per node cpu masks.
444 build_cpu_to_node_map (void)
448 for(node=0; node<MAX_NUMNODES; node++)
449 node_to_cpu_mask[node] = 0;
450 for(cpu = 0; cpu < NR_CPUS; ++cpu) {
452 * All Itanium NUMA platforms I know use ACPI, so maybe we
453 * can drop this ifdef completely. [EF]
455 #ifdef CONFIG_ACPI_NUMA
457 for (i = 0; i < NR_CPUS; ++i)
458 if (cpu_physical_id(cpu) == node_cpuid[i].phys_id) {
459 node = node_cpuid[i].nid;
463 # error Fixme: Dunno how to build CPU-to-node map.
465 cpu_to_node_map[cpu] = node;
467 node_to_cpu_mask[node] |= (1UL << cpu);
471 #endif /* CONFIG_NUMA */
474 * Cycle through the APs sending Wakeup IPIs to boot each.
477 smp_prepare_cpus (unsigned int max_cpus)
479 int boot_cpu_id = hard_smp_processor_id();
482 * Initialize the per-CPU profiling counter/multiplier
485 smp_setup_percpu_timer();
488 * We have the boot CPU online for sure.
490 set_bit(0, &cpu_online_map);
491 set_bit(0, &cpu_callin_map);
493 local_cpu_data->loops_per_jiffy = loops_per_jiffy;
494 ia64_cpu_to_sapicid[0] = boot_cpu_id;
496 printk(KERN_INFO "Boot processor id 0x%x/0x%x\n", 0, boot_cpu_id);
498 current_thread_info()->cpu = 0;
499 smp_tune_scheduling();
502 * If SMP should be disabled, then really disable it!
505 printk(KERN_INFO "SMP mode deactivated.\n");
506 cpu_online_map = phys_cpu_present_map = 1;
511 void __devinit smp_prepare_boot_cpu(void)
513 set_bit(smp_processor_id(), &cpu_online_map);
514 set_bit(smp_processor_id(), &cpu_callin_map);
518 smp_cpus_done (unsigned int dummy)
521 unsigned long bogosum = 0;
524 * Allow the user to impress friends.
527 for (cpu = 0; cpu < NR_CPUS; cpu++)
529 bogosum += cpu_data(cpu)->loops_per_jiffy;
531 printk(KERN_INFO "Total of %d processors activated (%lu.%02lu BogoMIPS).\n",
532 num_online_cpus(), bogosum/(500000/HZ), (bogosum/(5000/HZ))%100);
536 __cpu_up (unsigned int cpu)
541 sapicid = ia64_cpu_to_sapicid[cpu];
545 printk(KERN_INFO "Processor %d/%d is spinning up...\n", sapicid, cpu);
547 /* Processor goes to start_secondary(), sets online flag */
548 ret = do_boot_cpu(sapicid, cpu);
552 printk(KERN_INFO "Processor %d has spun up...\n", cpu);
557 * Assume that CPU's have been discovered by some platform-dependent interface. For
558 * SoftSDV/Lion, that would be ACPI.
560 * Setup of the IPI irq handler is done in irq.c:init_IRQ_SMP().
563 init_smp_config(void)
571 /* Tell SAL where to drop the AP's. */
572 ap_startup = (struct fptr *) start_ap;
573 sal_ret = ia64_sal_set_vectors(SAL_VECTOR_OS_BOOT_RENDEZ,
574 __pa(ap_startup->fp), __pa(ap_startup->gp), 0, 0, 0, 0);
576 printk(KERN_ERR "SMP: Can't set SAL AP Boot Rendezvous: %s\n",
577 ia64_sal_strerror(sal_ret));