2 * linux/arch/arm/kernel/smp.c
4 * Copyright (C) 2002 ARM Limited, All Rights Reserved.
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License version 2 as
8 * published by the Free Software Foundation.
10 #include <linux/module.h>
11 #include <linux/delay.h>
12 #include <linux/init.h>
13 #include <linux/spinlock.h>
14 #include <linux/sched.h>
15 #include <linux/interrupt.h>
16 #include <linux/cache.h>
17 #include <linux/profile.h>
18 #include <linux/errno.h>
20 #include <linux/err.h>
21 #include <linux/cpu.h>
22 #include <linux/smp.h>
23 #include <linux/seq_file.h>
24 #include <linux/irq.h>
25 #include <linux/percpu.h>
26 #include <linux/clockchips.h>
28 #include <asm/atomic.h>
29 #include <asm/cacheflush.h>
31 #include <asm/cputype.h>
32 #include <asm/mmu_context.h>
33 #include <asm/pgtable.h>
34 #include <asm/pgalloc.h>
35 #include <asm/processor.h>
36 #include <asm/sections.h>
37 #include <asm/tlbflush.h>
38 #include <asm/ptrace.h>
39 #include <asm/localtimer.h>
40 #include <asm/smp_plat.h>
43 * as from 2.5, kernels no longer have an init_tasks structure
44 * so we need some other way of telling a new secondary core
45 * where to place its SVC stack
47 struct secondary_data secondary_data;
50 * structures for inter-processor calls
51 * - A collection of single bit ipi messages.
55 unsigned long ipi_count;
59 static DEFINE_PER_CPU(struct ipi_data, ipi_data) = {
60 .lock = SPIN_LOCK_UNLOCKED,
71 static inline void identity_mapping_add(pgd_t *pgd, unsigned long start,
74 unsigned long addr, prot;
77 prot = PMD_TYPE_SECT | PMD_SECT_AP_WRITE;
78 if (cpu_architecture() <= CPU_ARCH_ARMv5TEJ && !cpu_is_xscale())
81 for (addr = start & PGDIR_MASK; addr < end;) {
82 pmd = pmd_offset(pgd + pgd_index(addr), addr);
83 pmd[0] = __pmd(addr | prot);
85 pmd[1] = __pmd(addr | prot);
91 static inline void identity_mapping_del(pgd_t *pgd, unsigned long start,
97 for (addr = start & PGDIR_MASK; addr < end; addr += PGDIR_SIZE) {
98 pmd = pmd_offset(pgd + pgd_index(addr), addr);
101 clean_pmd_entry(pmd);
105 int __cpuinit __cpu_up(unsigned int cpu)
107 struct cpuinfo_arm *ci = &per_cpu(cpu_data, cpu);
108 struct task_struct *idle = ci->idle;
113 * Spawn a new process manually, if not already done.
114 * Grab a pointer to its task struct so we can mess with it
117 idle = fork_idle(cpu);
119 printk(KERN_ERR "CPU%u: fork() failed\n", cpu);
120 return PTR_ERR(idle);
125 * Since this idle thread is being re-used, call
126 * init_idle() to reinitialize the thread structure.
128 init_idle(idle, cpu);
132 * Allocate initial page tables to allow the new CPU to
133 * enable the MMU safely. This essentially means a set
134 * of our "standard" page tables, with the addition of
135 * a 1:1 mapping for the physical address of the kernel.
137 pgd = pgd_alloc(&init_mm);
141 if (PHYS_OFFSET != PAGE_OFFSET) {
142 #ifndef CONFIG_HOTPLUG_CPU
143 identity_mapping_add(pgd, __pa(__init_begin), __pa(__init_end));
145 identity_mapping_add(pgd, __pa(_stext), __pa(_etext));
146 identity_mapping_add(pgd, __pa(_sdata), __pa(_edata));
150 * We need to tell the secondary core where to find
151 * its stack and the page tables.
153 secondary_data.stack = task_stack_page(idle) + THREAD_START_SP;
154 secondary_data.pgdir = virt_to_phys(pgd);
155 __cpuc_flush_dcache_area(&secondary_data, sizeof(secondary_data));
156 outer_clean_range(__pa(&secondary_data), __pa(&secondary_data + 1));
159 * Now bring the CPU into our world.
161 ret = boot_secondary(cpu, idle);
163 unsigned long timeout;
166 * CPU was successfully started, wait for it
167 * to come online or time out.
169 timeout = jiffies + HZ;
170 while (time_before(jiffies, timeout)) {
178 if (!cpu_online(cpu))
182 secondary_data.stack = NULL;
183 secondary_data.pgdir = 0;
185 if (PHYS_OFFSET != PAGE_OFFSET) {
186 #ifndef CONFIG_HOTPLUG_CPU
187 identity_mapping_del(pgd, __pa(__init_begin), __pa(__init_end));
189 identity_mapping_del(pgd, __pa(_stext), __pa(_etext));
190 identity_mapping_del(pgd, __pa(_sdata), __pa(_edata));
193 pgd_free(&init_mm, pgd);
196 printk(KERN_CRIT "CPU%u: processor failed to boot\n", cpu);
199 * FIXME: We need to clean up the new idle thread. --rmk
206 #ifdef CONFIG_HOTPLUG_CPU
208 * __cpu_disable runs on the processor to be shutdown.
210 int __cpu_disable(void)
212 unsigned int cpu = smp_processor_id();
213 struct task_struct *p;
216 ret = platform_cpu_disable(cpu);
221 * Take this CPU offline. Once we clear this, we can't return,
222 * and we must not schedule until we're ready to give up the cpu.
224 set_cpu_online(cpu, false);
227 * OK - migrate IRQs away from this CPU
232 * Stop the local timer for this CPU.
237 * Flush user cache and TLB mappings, and then remove this CPU
238 * from the vm mask set of all processes.
241 local_flush_tlb_all();
243 read_lock(&tasklist_lock);
244 for_each_process(p) {
246 cpumask_clear_cpu(cpu, mm_cpumask(p->mm));
248 read_unlock(&tasklist_lock);
254 * called on the thread which is asking for a CPU to be shutdown -
255 * waits until shutdown has completed, or it is timed out.
257 void __cpu_die(unsigned int cpu)
259 if (!platform_cpu_kill(cpu))
260 printk("CPU%u: unable to kill\n", cpu);
264 * Called from the idle thread for the CPU which has been shutdown.
266 * Note that we disable IRQs here, but do not re-enable them
267 * before returning to the caller. This is also the behaviour
268 * of the other hotplug-cpu capable cores, so presumably coming
269 * out of idle fixes this.
271 void __ref cpu_die(void)
273 unsigned int cpu = smp_processor_id();
279 * actual CPU shutdown procedure is at least platform (if not
282 platform_cpu_die(cpu);
285 * Do not return to the idle loop - jump back to the secondary
286 * cpu initialisation. There's some initialisation which needs
287 * to be repeated to undo the effects of taking the CPU offline.
289 __asm__("mov sp, %0\n"
290 " b secondary_start_kernel"
292 : "r" (task_stack_page(current) + THREAD_SIZE - 8));
294 #endif /* CONFIG_HOTPLUG_CPU */
297 * This is the secondary CPU boot entry. We're using this CPUs
298 * idle thread stack, but a set of temporary page tables.
300 asmlinkage void __cpuinit secondary_start_kernel(void)
302 struct mm_struct *mm = &init_mm;
303 unsigned int cpu = smp_processor_id();
305 printk("CPU%u: Booted secondary processor\n", cpu);
308 * All kernel threads share the same mm context; grab a
309 * reference and switch to it.
311 atomic_inc(&mm->mm_users);
312 atomic_inc(&mm->mm_count);
313 current->active_mm = mm;
314 cpumask_set_cpu(cpu, mm_cpumask(mm));
315 cpu_switch_mm(mm->pgd, mm);
316 enter_lazy_tlb(mm, current);
317 local_flush_tlb_all();
323 * Give the platform a chance to do its own initialisation.
325 platform_secondary_init(cpu);
328 * Enable local interrupts.
330 notify_cpu_starting(cpu);
335 * Setup the percpu timer for this CPU.
337 percpu_timer_setup();
341 smp_store_cpu_info(cpu);
344 * OK, now it's safe to let the boot CPU continue
346 set_cpu_online(cpu, true);
349 * OK, it's off to the idle thread for us
355 * Called by both boot and secondaries to move global data into
356 * per-processor storage.
358 void __cpuinit smp_store_cpu_info(unsigned int cpuid)
360 struct cpuinfo_arm *cpu_info = &per_cpu(cpu_data, cpuid);
362 cpu_info->loops_per_jiffy = loops_per_jiffy;
365 void __init smp_cpus_done(unsigned int max_cpus)
368 unsigned long bogosum = 0;
370 for_each_online_cpu(cpu)
371 bogosum += per_cpu(cpu_data, cpu).loops_per_jiffy;
373 printk(KERN_INFO "SMP: Total of %d processors activated "
374 "(%lu.%02lu BogoMIPS).\n",
376 bogosum / (500000/HZ),
377 (bogosum / (5000/HZ)) % 100);
380 void __init smp_prepare_boot_cpu(void)
382 unsigned int cpu = smp_processor_id();
384 per_cpu(cpu_data, cpu).idle = current;
387 static void send_ipi_message(const struct cpumask *mask, enum ipi_msg_type msg)
392 local_irq_save(flags);
394 for_each_cpu(cpu, mask) {
395 struct ipi_data *ipi = &per_cpu(ipi_data, cpu);
397 spin_lock(&ipi->lock);
398 ipi->bits |= 1 << msg;
399 spin_unlock(&ipi->lock);
403 * Call the platform specific cross-CPU call function.
405 smp_cross_call(mask);
407 local_irq_restore(flags);
410 void arch_send_call_function_ipi_mask(const struct cpumask *mask)
412 send_ipi_message(mask, IPI_CALL_FUNC);
415 void arch_send_call_function_single_ipi(int cpu)
417 send_ipi_message(cpumask_of(cpu), IPI_CALL_FUNC_SINGLE);
420 void show_ipi_list(struct seq_file *p)
426 for_each_present_cpu(cpu)
427 seq_printf(p, " %10lu", per_cpu(ipi_data, cpu).ipi_count);
432 void show_local_irqs(struct seq_file *p)
436 seq_printf(p, "LOC: ");
438 for_each_present_cpu(cpu)
439 seq_printf(p, "%10u ", irq_stat[cpu].local_timer_irqs);
445 * Timer (local or broadcast) support
447 static DEFINE_PER_CPU(struct clock_event_device, percpu_clockevent);
449 static void ipi_timer(void)
451 struct clock_event_device *evt = &__get_cpu_var(percpu_clockevent);
453 evt->event_handler(evt);
457 #ifdef CONFIG_LOCAL_TIMERS
458 asmlinkage void __exception do_local_timer(struct pt_regs *regs)
460 struct pt_regs *old_regs = set_irq_regs(regs);
461 int cpu = smp_processor_id();
463 if (local_timer_ack()) {
464 irq_stat[cpu].local_timer_irqs++;
468 set_irq_regs(old_regs);
472 #ifdef CONFIG_GENERIC_CLOCKEVENTS_BROADCAST
473 static void smp_timer_broadcast(const struct cpumask *mask)
475 send_ipi_message(mask, IPI_TIMER);
478 #define smp_timer_broadcast NULL
481 #ifndef CONFIG_LOCAL_TIMERS
482 static void broadcast_timer_set_mode(enum clock_event_mode mode,
483 struct clock_event_device *evt)
487 static void local_timer_setup(struct clock_event_device *evt)
489 evt->name = "dummy_timer";
490 evt->features = CLOCK_EVT_FEAT_ONESHOT |
491 CLOCK_EVT_FEAT_PERIODIC |
492 CLOCK_EVT_FEAT_DUMMY;
495 evt->set_mode = broadcast_timer_set_mode;
497 clockevents_register_device(evt);
501 void __cpuinit percpu_timer_setup(void)
503 unsigned int cpu = smp_processor_id();
504 struct clock_event_device *evt = &per_cpu(percpu_clockevent, cpu);
506 evt->cpumask = cpumask_of(cpu);
507 evt->broadcast = smp_timer_broadcast;
509 local_timer_setup(evt);
512 static DEFINE_SPINLOCK(stop_lock);
515 * ipi_cpu_stop - handle IPI from smp_send_stop()
517 static void ipi_cpu_stop(unsigned int cpu)
519 if (system_state == SYSTEM_BOOTING ||
520 system_state == SYSTEM_RUNNING) {
521 spin_lock(&stop_lock);
522 printk(KERN_CRIT "CPU%u: stopping\n", cpu);
524 spin_unlock(&stop_lock);
527 set_cpu_online(cpu, false);
537 * Main handler for inter-processor interrupts
539 * For ARM, the ipimask now only identifies a single
540 * category of IPI (Bit 1 IPIs have been replaced by a
541 * different mechanism):
543 * Bit 0 - Inter-processor function call
545 asmlinkage void __exception do_IPI(struct pt_regs *regs)
547 unsigned int cpu = smp_processor_id();
548 struct ipi_data *ipi = &per_cpu(ipi_data, cpu);
549 struct pt_regs *old_regs = set_irq_regs(regs);
556 spin_lock(&ipi->lock);
559 spin_unlock(&ipi->lock);
567 nextmsg = msgs & -msgs;
569 nextmsg = ffz(~nextmsg);
578 * nothing more to do - eveything is
579 * done on the interrupt return path
584 generic_smp_call_function_interrupt();
587 case IPI_CALL_FUNC_SINGLE:
588 generic_smp_call_function_single_interrupt();
596 printk(KERN_CRIT "CPU%u: Unknown IPI message 0x%x\n",
603 set_irq_regs(old_regs);
606 void smp_send_reschedule(int cpu)
608 send_ipi_message(cpumask_of(cpu), IPI_RESCHEDULE);
611 void smp_send_stop(void)
613 cpumask_t mask = cpu_online_map;
614 cpu_clear(smp_processor_id(), mask);
615 if (!cpus_empty(mask))
616 send_ipi_message(&mask, IPI_CPU_STOP);
622 int setup_profiling_timer(unsigned int multiplier)
628 on_each_cpu_mask(void (*func)(void *), void *info, int wait,
629 const struct cpumask *mask)
633 smp_call_function_many(mask, func, info, wait);
634 if (cpumask_test_cpu(smp_processor_id(), mask))
640 /**********************************************************************/
646 struct vm_area_struct *ta_vma;
647 unsigned long ta_start;
648 unsigned long ta_end;
651 static inline void ipi_flush_tlb_all(void *ignored)
653 local_flush_tlb_all();
656 static inline void ipi_flush_tlb_mm(void *arg)
658 struct mm_struct *mm = (struct mm_struct *)arg;
660 local_flush_tlb_mm(mm);
663 static inline void ipi_flush_tlb_page(void *arg)
665 struct tlb_args *ta = (struct tlb_args *)arg;
667 local_flush_tlb_page(ta->ta_vma, ta->ta_start);
670 static inline void ipi_flush_tlb_kernel_page(void *arg)
672 struct tlb_args *ta = (struct tlb_args *)arg;
674 local_flush_tlb_kernel_page(ta->ta_start);
677 static inline void ipi_flush_tlb_range(void *arg)
679 struct tlb_args *ta = (struct tlb_args *)arg;
681 local_flush_tlb_range(ta->ta_vma, ta->ta_start, ta->ta_end);
684 static inline void ipi_flush_tlb_kernel_range(void *arg)
686 struct tlb_args *ta = (struct tlb_args *)arg;
688 local_flush_tlb_kernel_range(ta->ta_start, ta->ta_end);
691 void flush_tlb_all(void)
693 if (tlb_ops_need_broadcast())
694 on_each_cpu(ipi_flush_tlb_all, NULL, 1);
696 local_flush_tlb_all();
699 void flush_tlb_mm(struct mm_struct *mm)
701 if (tlb_ops_need_broadcast())
702 on_each_cpu_mask(ipi_flush_tlb_mm, mm, 1, mm_cpumask(mm));
704 local_flush_tlb_mm(mm);
707 void flush_tlb_page(struct vm_area_struct *vma, unsigned long uaddr)
709 if (tlb_ops_need_broadcast()) {
713 on_each_cpu_mask(ipi_flush_tlb_page, &ta, 1, mm_cpumask(vma->vm_mm));
715 local_flush_tlb_page(vma, uaddr);
718 void flush_tlb_kernel_page(unsigned long kaddr)
720 if (tlb_ops_need_broadcast()) {
723 on_each_cpu(ipi_flush_tlb_kernel_page, &ta, 1);
725 local_flush_tlb_kernel_page(kaddr);
728 void flush_tlb_range(struct vm_area_struct *vma,
729 unsigned long start, unsigned long end)
731 if (tlb_ops_need_broadcast()) {
736 on_each_cpu_mask(ipi_flush_tlb_range, &ta, 1, mm_cpumask(vma->vm_mm));
738 local_flush_tlb_range(vma, start, end);
741 void flush_tlb_kernel_range(unsigned long start, unsigned long end)
743 if (tlb_ops_need_broadcast()) {
747 on_each_cpu(ipi_flush_tlb_kernel_range, &ta, 1);
749 local_flush_tlb_kernel_range(start, end);