4 ** Copyright (C) 1999 Walt Drummond <drummond@valinux.com>
5 ** Copyright (C) 1999 David Mosberger-Tang <davidm@hpl.hp.com>
6 ** Copyright (C) 2001 Grant Grundler <grundler@parisc-linux.org>
8 ** Lots of stuff stolen from arch/alpha/kernel/smp.c
9 ** ...and then parisc stole from arch/ia64/kernel/smp.c. Thanks David! :^)
11 ** Thanks to John Curry and Ullas Ponnadi. I learned alot from their work.
14 ** This program is free software; you can redistribute it and/or modify
15 ** it under the terms of the GNU General Public License as published by
16 ** the Free Software Foundation; either version 2 of the License, or
17 ** (at your option) any later version.
19 #define __KERNEL_SYSCALLS__
20 #undef ENTRY_SYS_CPUS /* syscall support for iCOD-like functionality */
22 #include <linux/autoconf.h>
24 #include <linux/types.h>
25 #include <linux/spinlock.h>
26 #include <linux/slab.h>
28 #include <linux/kernel.h>
29 #include <linux/sched.h>
30 #include <linux/init.h>
31 #include <linux/interrupt.h>
32 #include <linux/smp.h>
33 #include <linux/kernel_stat.h>
35 #include <linux/delay.h>
37 #include <asm/system.h>
38 #include <asm/atomic.h>
39 #include <asm/bitops.h>
40 #include <asm/current.h>
41 #include <asm/delay.h>
42 #include <asm/pgalloc.h> /* for flush_tlb_all() proto/macro */
45 #include <asm/irq.h> /* for CPU_IRQ_REGION and friends */
46 #include <asm/mmu_context.h>
48 #include <asm/pgtable.h>
49 #include <asm/pgalloc.h>
50 #include <asm/processor.h>
51 #include <asm/ptrace.h>
52 #include <asm/unistd.h>
53 #include <asm/cacheflush.h>
57 spinlock_t pa_dbit_lock = SPIN_LOCK_UNLOCKED;
59 spinlock_t smp_lock = SPIN_LOCK_UNLOCKED;
61 volatile struct task_struct *smp_init_current_idle_task;
63 static volatile int smp_commenced = 0; /* Set when the idlers are all forked */
64 static volatile int cpu_now_booting = 0; /* track which CPU is booting */
65 volatile unsigned long cpu_online_map = 0; /* Bitmap of online CPUs */
66 #define IS_LOGGED_IN(cpunum) (test_bit(cpunum, (atomic_t *)&cpu_online_map))
69 int smp_threads_ready = 0;
70 unsigned long cache_decay_ticks;
71 static int max_cpus = -1; /* Command line */
72 unsigned long cpu_present_mask;
74 struct smp_call_struct {
75 void (*func) (void *info);
78 atomic_t unstarted_count;
79 atomic_t unfinished_count;
81 static volatile struct smp_call_struct *smp_call_function_data;
83 enum ipi_message_type {
93 /********** SMP inter processor interrupt and communication routines */
95 #undef PER_CPU_IRQ_REGION
96 #ifdef PER_CPU_IRQ_REGION
97 /* XXX REVISIT Ignore for now.
98 ** *May* need this "hook" to register IPI handler
99 ** once we have perCPU ExtIntr switch tables.
105 /* If CPU is present ... */
106 #ifdef ENTRY_SYS_CPUS
107 /* *and* running (not stopped) ... */
108 #error iCOD support wants state checked here.
111 #error verify IRQ_OFFSET(IPI_IRQ) is ipi_interrupt() in new IRQ region
113 if(IS_LOGGED_IN(cpuid) )
115 switch_to_idle_task(current);
124 ** Yoink this CPU from the runnable list...
130 #ifdef ENTRY_SYS_CPUS
131 #error halt_processor() needs rework
133 ** o migrate I/O interrupts off this CPU.
134 ** o leave IPI enabled - __cli() will disable IPI.
135 ** o leave CPU in online map - just change the state
137 cpu_data[this_cpu].state = STATE_STOPPED;
140 /* REVISIT : redirect I/O Interrupts to another CPU? */
141 /* REVISIT : does PM *know* this CPU isn't available? */
142 clear_bit(smp_processor_id(), (void *)&cpu_online_map);
151 ipi_interrupt(int irq, void *dev_id, struct pt_regs *regs)
153 int this_cpu = smp_processor_id();
154 struct cpuinfo_parisc *p = &cpu_data[this_cpu];
158 /* Count this now; we may make a call that never returns. */
161 mb(); /* Order interrupt and bit testing. */
164 spin_lock_irqsave(&(p->lock),flags);
165 ops = p->pending_ipi;
167 spin_unlock_irqrestore(&(p->lock),flags);
169 mb(); /* Order bit clearing and data access. */
175 unsigned long which = ffz(~ops);
180 printk(KERN_DEBUG "CPU%d IPI_RESCHEDULE\n",this_cpu);
182 ops &= ~(1 << IPI_RESCHEDULE);
184 * Reschedule callback. Everything to be
185 * done is done by the interrupt return path.
191 printk(KERN_DEBUG "CPU%d IPI_CALL_FUNC\n",this_cpu);
193 ops &= ~(1 << IPI_CALL_FUNC);
195 volatile struct smp_call_struct *data;
196 void (*func)(void *info);
200 data = smp_call_function_data;
206 atomic_dec ((atomic_t *)&data->unstarted_count);
208 /* At this point, *data can't
214 /* Notify the sending CPU that the
219 atomic_dec ((atomic_t *)&data->unfinished_count);
225 printk(KERN_DEBUG "CPU%d IPI_CPU_START\n",this_cpu);
227 ops &= ~(1 << IPI_CPU_START);
228 #ifdef ENTRY_SYS_CPUS
229 p->state = STATE_RUNNING;
235 printk(KERN_DEBUG "CPU%d IPI_CPU_STOP\n",this_cpu);
237 ops &= ~(1 << IPI_CPU_STOP);
238 #ifdef ENTRY_SYS_CPUS
246 printk(KERN_DEBUG "CPU%d is alive!\n",this_cpu);
248 ops &= ~(1 << IPI_CPU_TEST);
252 printk(KERN_CRIT "Unknown IPI num on CPU%d: %lu\n",
254 ops &= ~(1 << which);
264 ipi_send(int cpu, enum ipi_message_type op)
266 struct cpuinfo_parisc *p = &cpu_data[cpu];
269 spin_lock_irqsave(&(p->lock),flags);
270 p->pending_ipi |= 1 << op;
271 __raw_writel(IRQ_OFFSET(IPI_IRQ), cpu_data[cpu].hpa);
272 spin_unlock_irqrestore(&(p->lock),flags);
277 send_IPI_single(int dest_cpu, enum ipi_message_type op)
279 if (dest_cpu == NO_PROC_ID) {
284 ipi_send(dest_cpu, op);
288 send_IPI_allbutself(enum ipi_message_type op)
292 for (i = 0; i < smp_num_cpus; i++) {
293 if (i != smp_processor_id())
294 send_IPI_single(i, op);
299 smp_send_stop(void) { send_IPI_allbutself(IPI_CPU_STOP); }
302 smp_send_start(void) { send_IPI_allbutself(IPI_CPU_START); }
305 smp_send_reschedule(int cpu) { send_IPI_single(cpu, IPI_RESCHEDULE); }
309 * Run a function on all other CPUs.
310 * <func> The function to run. This must be fast and non-blocking.
311 * <info> An arbitrary pointer to pass to the function.
312 * <retry> If true, keep retrying until ready.
313 * <wait> If true, wait until function has completed on other CPUs.
314 * [RETURNS] 0 on success, else a negative status code.
316 * Does not return until remote CPUs are nearly ready to execute <func>
321 smp_call_function (void (*func) (void *info), void *info, int retry, int wait)
323 struct smp_call_struct data;
325 static spinlock_t lock = SPIN_LOCK_UNLOCKED;
330 atomic_set(&data.unstarted_count, smp_num_cpus - 1);
331 atomic_set(&data.unfinished_count, smp_num_cpus - 1);
335 while (smp_call_function_data != 0)
340 if (smp_call_function_data) {
346 smp_call_function_data = &data;
349 /* Send a message to all other CPUs and wait for them to respond */
350 send_IPI_allbutself(IPI_CALL_FUNC);
352 /* Wait for response */
353 timeout = jiffies + HZ;
354 while ( (atomic_read (&data.unstarted_count) > 0) &&
355 time_before (jiffies, timeout) )
358 /* We either got one or timed out. Release the lock */
361 smp_call_function_data = NULL;
362 if (atomic_read (&data.unstarted_count) > 0) {
363 printk(KERN_CRIT "SMP CALL FUNCTION TIMED OUT! (cpu=%d)\n",
368 while (wait && atomic_read (&data.unfinished_count) > 0)
377 * Setup routine for controlling SMP activation
379 * Command-line option of "nosmp" or "maxcpus=0" will disable SMP
380 * activation entirely (the MPS table probe still happens, though).
382 * Command-line option of "maxcpus=<NUM>", where <NUM> is an integer
383 * greater than 0, limits the maximum number of CPUs activated in
387 static int __init nosmp(char *str)
393 __setup("nosmp", nosmp);
395 static int __init maxcpus(char *str)
397 get_option(&str, &max_cpus);
401 __setup("maxcpus=", maxcpus);
404 * Flush all other CPU's tlb and then mine. Do this with on_each_cpu()
405 * as we want to ensure all TLB's flushed before proceeding.
408 extern void flush_tlb_all_local(void);
411 smp_flush_tlb_all(void)
413 on_each_cpu((void (*)(void *))flush_tlb_all_local, NULL, 1, 1);
418 smp_do_timer(struct pt_regs *regs)
420 int cpu = smp_processor_id();
421 struct cpuinfo_parisc *data = &cpu_data[cpu];
423 if (!--data->prof_counter) {
424 data->prof_counter = data->prof_multiplier;
425 update_process_times(user_mode(regs));
430 * Called by secondaries to update state and initialize CPU registers.
433 smp_cpu_init(int cpunum)
435 extern int init_per_cpu(int); /* arch/parisc/kernel/setup.c */
436 extern void init_IRQ(void); /* arch/parisc/kernel/irq.c */
438 /* Set modes and Enable floating point coprocessor */
439 (void) init_per_cpu(cpunum);
441 disable_sr_hashing();
445 /* Well, support 2.4 linux scheme as well. */
446 if (test_and_set_bit(cpunum, (unsigned long *) (&cpu_online_map)))
448 extern void machine_halt(void); /* arch/parisc.../process.c */
450 printk(KERN_CRIT "CPU#%d already initialized!\n", cpunum);
454 /* Initialise the idle task for this CPU */
455 atomic_inc(&init_mm.mm_count);
456 current->active_mm = &init_mm;
459 enter_lazy_tlb(&init_mm, current, cpunum);
461 init_IRQ(); /* make sure no IRQ's are enabled or pending */
466 * Slaves start using C here. Indirectly called from smp_slave_stext.
467 * Do what start_kernel() and main() do for boot strap processor (aka monarch)
469 void __init smp_callin(void)
471 extern void cpu_idle(void); /* arch/parisc/kernel/process.c */
472 int slave_id = cpu_now_booting;
477 smp_cpu_init(slave_id);
479 #if 0 /* NOT WORKING YET - see entry.S */
480 istack = (void *)__get_free_pages(GFP_KERNEL,ISTACK_ORDER);
481 if (istack == NULL) {
482 printk(KERN_CRIT "Failed to allocate interrupt stack for cpu %d\n",slave_id);
488 flush_cache_all_local(); /* start with known state */
489 flush_tlb_all_local();
491 local_irq_enable(); /* Interrupts have been off until now */
493 /* Slaves wait here until Big Poppa daddy say "jump" */
495 while (!smp_commenced) ;
498 cpu_idle(); /* Wait for timer to schedule some work */
501 panic("smp_callin() AAAAaaaaahhhh....\n");
505 * Create the idle task for a new Slave CPU. DO NOT use kernel_thread()
506 * because that could end up calling schedule(). If it did, the new idle
507 * task could get scheduled before we had a chance to remove it from the
510 static struct task_struct *fork_by_hand(void)
515 * don't care about the regs settings since
516 * we'll never reschedule the forked task.
518 return copy_process(CLONE_VM|CLONE_IDLETASK, 0, ®s, 0, NULL, NULL);
523 * Bring one cpu online.
525 static int __init smp_boot_one_cpu(int cpuid, int cpunum)
527 struct task_struct *idle;
531 * Create an idle task for this CPU. Note the address wed* give
532 * to kernel_thread is irrelevant -- it's going to start
533 * where OS_BOOT_RENDEVZ vector in SAL says to start. But
534 * this gets all the other task-y sort of data structures set
535 * up like we wish. We need to pull the just created idle task
536 * off the run queue and stuff it into the init_tasks[] array.
540 idle = fork_by_hand();
542 panic("SMP: fork failed for CPU:%d", cpuid);
544 wake_up_forked_process(idle);
545 init_idle(idle, cpunum);
546 unhash_process(idle);
547 idle->thread_info->cpu = cpunum;
549 /* Let _start know what logical CPU we're booting
550 ** (offset into init_tasks[],cpu_data[])
552 cpu_now_booting = cpunum;
555 ** boot strap code needs to know the task address since
556 ** it also contains the process stack.
558 smp_init_current_idle_task = idle ;
562 ** This gets PDC to release the CPU from a very tight loop.
563 ** See MEM_RENDEZ comments in head.S.
565 __raw_writel(IRQ_OFFSET(TIMER_IRQ), cpu_data[cpunum].hpa);
569 * OK, wait a bit for that CPU to finish staggering about.
570 * Slave will set a bit when it reaches smp_cpu_init() and then
571 * wait for smp_commenced to be 1.
572 * Once we see the bit change, we can move on.
574 for (timeout = 0; timeout < 10000; timeout++) {
575 if(IS_LOGGED_IN(cpunum)) {
576 /* Which implies Slave has started up */
578 smp_init_current_idle_task = NULL;
585 put_task_struct(idle);
588 printk(KERN_CRIT "SMP: CPU:%d is stuck.\n", cpuid);
592 /* Remember the Slave data */
594 printk(KERN_DEBUG "SMP: CPU:%d (num %d) came alive after %ld _us\n",
595 cpuid, cpunum, timeout * 100);
597 #ifdef ENTRY_SYS_CPUS
598 cpu_data[cpunum].state = STATE_RUNNING;
607 ** inventory.c:do_inventory() has already 'discovered' the additional CPU's.
608 ** We are ready to wrest them from PDC's control now.
609 ** Called by smp_init bring all the secondaries online and hold them.
611 ** o Setup of the IPI irq handler is done in irq.c.
612 ** o MEM_RENDEZ is initialzed in head.S:stext()
615 void __init smp_boot_cpus(void)
617 int i, cpu_count = 1;
618 unsigned long bogosum = cpu_data[0].loops_per_jiffy; /* Count Monarch */
620 /* REVISIT - assumes first CPU reported by PAT PDC is BSP */
621 int bootstrap_processor=cpu_data[0].cpuid; /* CPU ID of BSP */
623 /* Setup BSP mappings */
624 printk(KERN_DEBUG "SMP: bootstrap CPU ID is %d\n",bootstrap_processor);
625 init_task.thread_info->cpu = bootstrap_processor;
626 current->thread_info->cpu = bootstrap_processor;
627 cpu_online_map = 1 << bootstrap_processor; /* Mark Boostrap processor as present */
628 current->active_mm = &init_mm;
630 #ifdef ENTRY_SYS_CPUS
631 cpu_data[0].state = STATE_RUNNING;
633 cpu_present_mask = 1UL << bootstrap_processor;
635 /* Nothing to do when told not to. */
637 printk(KERN_INFO "SMP mode deactivated.\n");
642 printk(KERN_INFO "Limiting CPUs to %d\n", max_cpus);
644 /* We found more than one CPU.... */
645 if (boot_cpu_data.cpu_count > 1) {
647 for (i = 0; i < NR_CPUS; i++) {
648 if (cpu_data[i].cpuid == NO_PROC_ID ||
649 cpu_data[i].cpuid == bootstrap_processor)
652 if (smp_boot_one_cpu(cpu_data[i].cpuid, cpu_count) < 0)
655 bogosum += cpu_data[i].loops_per_jiffy;
656 cpu_count++; /* Count good CPUs only... */
658 cpu_present_mask |= 1UL << i;
660 /* Bail when we've started as many CPUS as told to */
661 if (cpu_count == max_cpus)
665 if (cpu_count == 1) {
666 printk(KERN_INFO "SMP: Bootstrap processor only.\n");
672 cache_decay_ticks = HZ/100;
674 printk(KERN_INFO "SMP: Total %d of %d processors activated "
675 "(%lu.%02lu BogoMIPS noticed) (Present Mask: %lu).\n",
676 cpu_count, boot_cpu_data.cpu_count, (bogosum + 25) / 5000,
677 ((bogosum + 25) / 50) % 100, cpu_present_mask);
679 smp_num_cpus = cpu_count;
680 #ifdef PER_CPU_IRQ_REGION
687 * Called from main.c by Monarch Processor.
688 * After this, any CPU can schedule any task.
690 void smp_commence(void)
698 * XXX FIXME : do nothing
700 void smp_cpus_done(unsigned int cpu_max)
702 smp_threads_ready = 1;
705 void __init smp_prepare_cpus(unsigned int max_cpus)
710 void __devinit smp_prepare_boot_cpu(void)
712 set_bit(smp_processor_id(), &cpu_online_map);
713 set_bit(smp_processor_id(), &cpu_present_mask);
716 int __devinit __cpu_up(unsigned int cpu)
718 return cpu_online(cpu) ? 0 : -ENOSYS;
723 #ifdef ENTRY_SYS_CPUS
724 /* Code goes along with:
725 ** entry.s: ENTRY_NAME(sys_cpus) / * 215, for cpu stat * /
727 int sys_cpus(int argc, char **argv)
730 extern int current_pid(int cpu);
733 printk("sys_cpus:Only one argument supported\n");
738 #ifdef DUMP_MORE_STATE
739 for(i=0; i<NR_CPUS; i++) {
740 int cpus_per_line = 4;
741 if(IS_LOGGED_IN(i)) {
742 if (j++ % cpus_per_line)
752 } else if((argc==2) && !(strcmp(argv[1],"-l"))) {
753 printk("\nCPUSTATE TASK CPUNUM CPUID HARDCPU(HPA)\n");
754 #ifdef DUMP_MORE_STATE
755 for(i=0;i<NR_CPUS;i++) {
756 if (!IS_LOGGED_IN(i))
758 if (cpu_data[i].cpuid != NO_PROC_ID) {
759 switch(cpu_data[i].state) {
760 case STATE_RENDEZVOUS:
764 printk((current_pid(i)!=0) ? "RUNNING " : "IDLING ");
773 printk("%08x?", cpu_data[i].state);
776 if(IS_LOGGED_IN(i)) {
777 printk(" %4d",current_pid(i));
779 printk(" %6d",cpu_number_map(i));
781 printk(" 0x%lx\n",cpu_data[i].hpa);
785 printk("\n%s %4d 0 0 --------",
786 (current->pid)?"RUNNING ": "IDLING ",current->pid);
788 } else if ((argc==2) && !(strcmp(argv[1],"-s"))) {
789 #ifdef DUMP_MORE_STATE
790 printk("\nCPUSTATE CPUID\n");
791 for (i=0;i<NR_CPUS;i++) {
792 if (!IS_LOGGED_IN(i))
794 if (cpu_data[i].cpuid != NO_PROC_ID) {
795 switch(cpu_data[i].state) {
796 case STATE_RENDEZVOUS:
797 printk("RENDEZVS");break;
799 printk((current_pid(i)!=0) ? "RUNNING " : "IDLING");
802 printk("STOPPED ");break;
804 printk("HALTED ");break;
811 printk("\n%s CPU0",(current->pid==0)?"RUNNING ":"IDLING ");
814 printk("sys_cpus:Unknown request\n");
819 #endif /* ENTRY_SYS_CPUS */
821 #ifdef CONFIG_PROC_FS
823 setup_profiling_timer(unsigned int multiplier)