Merge tag 'for-linus' of git://github.com/rustyrussell/linux
[linux-flexiantxendom0-3.2.10.git] / arch / arm / kernel / smp.c
index 854ce33..addbbe8 100644 (file)
@@ -31,6 +31,7 @@
 #include <asm/cpu.h>
 #include <asm/cputype.h>
 #include <asm/exception.h>
+#include <asm/idmap.h>
 #include <asm/topology.h>
 #include <asm/mmu_context.h>
 #include <asm/pgtable.h>
@@ -57,11 +58,12 @@ enum ipi_msg_type {
        IPI_CPU_STOP,
 };
 
+static DECLARE_COMPLETION(cpu_running);
+
 int __cpuinit __cpu_up(unsigned int cpu)
 {
        struct cpuinfo_arm *ci = &per_cpu(cpu_data, cpu);
        struct task_struct *idle = ci->idle;
-       pgd_t *pgd;
        int ret;
 
        /*
@@ -84,29 +86,11 @@ int __cpuinit __cpu_up(unsigned int cpu)
        }
 
        /*
-        * Allocate initial page tables to allow the new CPU to
-        * enable the MMU safely.  This essentially means a set
-        * of our "standard" page tables, with the addition of
-        * a 1:1 mapping for the physical address of the kernel.
-        */
-       pgd = pgd_alloc(&init_mm);
-       if (!pgd)
-               return -ENOMEM;
-
-       if (PHYS_OFFSET != PAGE_OFFSET) {
-#ifndef CONFIG_HOTPLUG_CPU
-               identity_mapping_add(pgd, __pa(__init_begin), __pa(__init_end));
-#endif
-               identity_mapping_add(pgd, __pa(_stext), __pa(_etext));
-               identity_mapping_add(pgd, __pa(_sdata), __pa(_edata));
-       }
-
-       /*
         * We need to tell the secondary core where to find
         * its stack and the page tables.
         */
        secondary_data.stack = task_stack_page(idle) + THREAD_START_SP;
-       secondary_data.pgdir = virt_to_phys(pgd);
+       secondary_data.pgdir = virt_to_phys(idmap_pgd);
        secondary_data.swapper_pg_dir = virt_to_phys(swapper_pg_dir);
        __cpuc_flush_dcache_area(&secondary_data, sizeof(secondary_data));
        outer_clean_range(__pa(&secondary_data), __pa(&secondary_data + 1));
@@ -116,20 +100,12 @@ int __cpuinit __cpu_up(unsigned int cpu)
         */
        ret = boot_secondary(cpu, idle);
        if (ret == 0) {
-               unsigned long timeout;
-
                /*
                 * CPU was successfully started, wait for it
                 * to come online or time out.
                 */
-               timeout = jiffies + HZ;
-               while (time_before(jiffies, timeout)) {
-                       if (cpu_online(cpu))
-                               break;
-
-                       udelay(10);
-                       barrier();
-               }
+               wait_for_completion_timeout(&cpu_running,
+                                                msecs_to_jiffies(1000));
 
                if (!cpu_online(cpu)) {
                        pr_crit("CPU%u: failed to come online\n", cpu);
@@ -142,16 +118,6 @@ int __cpuinit __cpu_up(unsigned int cpu)
        secondary_data.stack = NULL;
        secondary_data.pgdir = 0;
 
-       if (PHYS_OFFSET != PAGE_OFFSET) {
-#ifndef CONFIG_HOTPLUG_CPU
-               identity_mapping_del(pgd, __pa(__init_begin), __pa(__init_end));
-#endif
-               identity_mapping_del(pgd, __pa(_stext), __pa(_etext));
-               identity_mapping_del(pgd, __pa(_sdata), __pa(_edata));
-       }
-
-       pgd_free(&init_mm, pgd);
-
        return ret;
 }
 
@@ -261,20 +227,6 @@ void __ref cpu_die(void)
 }
 #endif /* CONFIG_HOTPLUG_CPU */
 
-int __cpu_logical_map[NR_CPUS];
-
-void __init smp_setup_processor_id(void)
-{
-       int i;
-       u32 cpu = is_smp() ? read_cpuid_mpidr() & 0xff : 0;
-
-       cpu_logical_map(0) = cpu;
-       for (i = 1; i < NR_CPUS; ++i)
-               cpu_logical_map(i) = i == cpu ? 0 : i;
-
-       printk(KERN_INFO "Booting Linux on physical CPU %d\n", cpu);
-}
-
 /*
  * Called by both boot and secondaries to move global data into
  * per-processor storage.
@@ -288,6 +240,8 @@ static void __cpuinit smp_store_cpu_info(unsigned int cpuid)
        store_cpu_topology(cpuid);
 }
 
+static void percpu_timer_setup(void);
+
 /*
  * This is the secondary CPU boot entry.  We're using this CPUs
  * idle thread stack, but a set of temporary page tables.
@@ -328,22 +282,16 @@ asmlinkage void __cpuinit secondary_start_kernel(void)
        /*
         * OK, now it's safe to let the boot CPU continue.  Wait for
         * the CPU migration code to notice that the CPU is online
-        * before we continue.
+        * before we continue - which happens after __cpu_up returns.
         */
        set_cpu_online(cpu, true);
+       complete(&cpu_running);
 
        /*
         * Setup the percpu timer for this CPU.
         */
        percpu_timer_setup();
 
-       while (!cpu_active(cpu))
-               cpu_relax();
-
-       /*
-        * cpu_active bit is set, so it's safe to enalbe interrupts
-        * now.
-        */
        local_irq_enable();
        local_fiq_enable();
 
@@ -401,7 +349,7 @@ void __init smp_prepare_cpus(unsigned int max_cpus)
                 * re-initialize the map in platform_smp_prepare_cpus() if
                 * present != possible (e.g. physical hotplug).
                 */
-               init_cpu_present(&cpu_possible_map);
+               init_cpu_present(cpu_possible_mask);
 
                /*
                 * Initialise the SCU if there are more than one CPU
@@ -460,10 +408,6 @@ u64 smp_irq_stat_cpu(unsigned int cpu)
        for (i = 0; i < NR_IPI; i++)
                sum += __get_irq_stat(cpu, ipi_irqs[i]);
 
-#ifdef CONFIG_LOCAL_TIMERS
-       sum += __get_irq_stat(cpu, local_timer_irqs);
-#endif
-
        return sum;
 }
 
@@ -475,42 +419,8 @@ static DEFINE_PER_CPU(struct clock_event_device, percpu_clockevent);
 static void ipi_timer(void)
 {
        struct clock_event_device *evt = &__get_cpu_var(percpu_clockevent);
-       irq_enter();
        evt->event_handler(evt);
-       irq_exit();
-}
-
-#ifdef CONFIG_LOCAL_TIMERS
-asmlinkage void __exception_irq_entry do_local_timer(struct pt_regs *regs)
-{
-       handle_local_timer(regs);
-}
-
-void handle_local_timer(struct pt_regs *regs)
-{
-       struct pt_regs *old_regs = set_irq_regs(regs);
-       int cpu = smp_processor_id();
-
-       if (local_timer_ack()) {
-               __inc_irq_stat(cpu, local_timer_irqs);
-               ipi_timer();
-       }
-
-       set_irq_regs(old_regs);
-}
-
-void show_local_irqs(struct seq_file *p, int prec)
-{
-       unsigned int cpu;
-
-       seq_printf(p, "%*s: ", prec, "LOC");
-
-       for_each_present_cpu(cpu)
-               seq_printf(p, "%10u ", __get_irq_stat(cpu, local_timer_irqs));
-
-       seq_printf(p, " Local timer interrupts\n");
 }
-#endif
 
 #ifdef CONFIG_GENERIC_CLOCKEVENTS_BROADCAST
 static void smp_timer_broadcast(const struct cpumask *mask)
@@ -539,7 +449,20 @@ static void __cpuinit broadcast_timer_setup(struct clock_event_device *evt)
        clockevents_register_device(evt);
 }
 
-void __cpuinit percpu_timer_setup(void)
+static struct local_timer_ops *lt_ops;
+
+#ifdef CONFIG_LOCAL_TIMERS
+int local_timer_register(struct local_timer_ops *ops)
+{
+       if (lt_ops)
+               return -EBUSY;
+
+       lt_ops = ops;
+       return 0;
+}
+#endif
+
+static void __cpuinit percpu_timer_setup(void)
 {
        unsigned int cpu = smp_processor_id();
        struct clock_event_device *evt = &per_cpu(percpu_clockevent, cpu);
@@ -547,7 +470,7 @@ void __cpuinit percpu_timer_setup(void)
        evt->cpumask = cpumask_of(cpu);
        evt->broadcast = smp_timer_broadcast;
 
-       if (local_timer_setup(evt))
+       if (!lt_ops || lt_ops->setup(evt))
                broadcast_timer_setup(evt);
 }
 
@@ -562,11 +485,12 @@ static void percpu_timer_stop(void)
        unsigned int cpu = smp_processor_id();
        struct clock_event_device *evt = &per_cpu(percpu_clockevent, cpu);
 
-       evt->set_mode(CLOCK_EVT_MODE_UNUSED, evt);
+       if (lt_ops)
+               lt_ops->stop(evt);
 }
 #endif
 
-static DEFINE_SPINLOCK(stop_lock);
+static DEFINE_RAW_SPINLOCK(stop_lock);
 
 /*
  * ipi_cpu_stop - handle IPI from smp_send_stop()
@@ -575,10 +499,10 @@ static void ipi_cpu_stop(unsigned int cpu)
 {
        if (system_state == SYSTEM_BOOTING ||
            system_state == SYSTEM_RUNNING) {
-               spin_lock(&stop_lock);
+               raw_spin_lock(&stop_lock);
                printk(KERN_CRIT "CPU%u: stopping\n", cpu);
                dump_stack();
-               spin_unlock(&stop_lock);
+               raw_spin_unlock(&stop_lock);
        }
 
        set_cpu_online(cpu, false);
@@ -586,6 +510,10 @@ static void ipi_cpu_stop(unsigned int cpu)
        local_fiq_disable();
        local_irq_disable();
 
+#ifdef CONFIG_HOTPLUG_CPU
+       platform_cpu_kill(cpu);
+#endif
+
        while (1)
                cpu_relax();
 }
@@ -608,7 +536,9 @@ void handle_IPI(int ipinr, struct pt_regs *regs)
 
        switch (ipinr) {
        case IPI_TIMER:
+               irq_enter();
                ipi_timer();
+               irq_exit();
                break;
 
        case IPI_RESCHEDULE:
@@ -616,15 +546,21 @@ void handle_IPI(int ipinr, struct pt_regs *regs)
                break;
 
        case IPI_CALL_FUNC:
+               irq_enter();
                generic_smp_call_function_interrupt();
+               irq_exit();
                break;
 
        case IPI_CALL_FUNC_SINGLE:
+               irq_enter();
                generic_smp_call_function_single_interrupt();
+               irq_exit();
                break;
 
        case IPI_CPU_STOP:
+               irq_enter();
                ipi_cpu_stop(cpu);
+               irq_exit();
                break;
 
        default:
@@ -645,8 +581,9 @@ void smp_send_stop(void)
        unsigned long timeout;
 
        if (num_online_cpus() > 1) {
-               cpumask_t mask = cpu_online_map;
-               cpu_clear(smp_processor_id(), mask);
+               struct cpumask mask;
+               cpumask_copy(&mask, cpu_online_mask);
+               cpumask_clear_cpu(smp_processor_id(), &mask);
 
                smp_cross_call(&mask, IPI_CPU_STOP);
        }