Update to 3.4-final.
[linux-flexiantxendom0-3.2.10.git] / kernel / stop_machine.c
index 7e3f918..2f194e9 100644 (file)
 #include <linux/cpu.h>
 #include <linux/init.h>
 #include <linux/kthread.h>
-#include <linux/module.h>
+#include <linux/export.h>
 #include <linux/percpu.h>
 #include <linux/sched.h>
 #include <linux/stop_machine.h>
 #include <linux/interrupt.h>
 #include <linux/kallsyms.h>
 
-#include <asm/atomic.h>
+#include <linux/atomic.h>
 
 /*
  * Structure to determine completion condition and record errors.  May
@@ -35,12 +35,13 @@ struct cpu_stop_done {
 /* the actual stopper, one per every possible cpu, enabled on online cpus */
 struct cpu_stopper {
        spinlock_t              lock;
+       bool                    enabled;        /* is this stopper enabled? */
        struct list_head        works;          /* list of pending works */
        struct task_struct      *thread;        /* stopper thread */
-       bool                    enabled;        /* is this stopper enabled? */
 };
 
 static DEFINE_PER_CPU(struct cpu_stopper, cpu_stopper);
+static bool stop_machine_initialized = false;
 
 static void cpu_stop_init_done(struct cpu_stop_done *done, unsigned int nr_todo)
 {
@@ -136,10 +137,11 @@ void stop_one_cpu_nowait(unsigned int cpu, cpu_stop_fn_t fn, void *arg,
 static DEFINE_MUTEX(stop_cpus_mutex);
 static DEFINE_PER_CPU(struct cpu_stop_work, stop_cpus_work);
 
-int __stop_cpus(const struct cpumask *cpumask, cpu_stop_fn_t fn, void *arg)
+static void queue_stop_cpus_work(const struct cpumask *cpumask,
+                                cpu_stop_fn_t fn, void *arg,
+                                struct cpu_stop_done *done)
 {
        struct cpu_stop_work *work;
-       struct cpu_stop_done done;
        unsigned int cpu;
 
        /* initialize works and done */
@@ -147,9 +149,8 @@ int __stop_cpus(const struct cpumask *cpumask, cpu_stop_fn_t fn, void *arg)
                work = &per_cpu(stop_cpus_work, cpu);
                work->fn = fn;
                work->arg = arg;
-               work->done = &done;
+               work->done = done;
        }
-       cpu_stop_init_done(&done, cpumask_weight(cpumask));
 
        /*
         * Disable preemption while queueing to avoid getting
@@ -161,7 +162,15 @@ int __stop_cpus(const struct cpumask *cpumask, cpu_stop_fn_t fn, void *arg)
                cpu_stop_queue_work(&per_cpu(cpu_stopper, cpu),
                                    &per_cpu(stop_cpus_work, cpu));
        preempt_enable();
+}
+
+static int __stop_cpus(const struct cpumask *cpumask,
+                      cpu_stop_fn_t fn, void *arg)
+{
+       struct cpu_stop_done done;
 
+       cpu_stop_init_done(&done, cpumask_weight(cpumask));
+       queue_stop_cpus_work(cpumask, fn, arg, &done);
        wait_for_completion(&done.completion);
        return done.executed ? done.ret : -ENOENT;
 }
@@ -262,7 +271,7 @@ repeat:
                cpu_stop_fn_t fn = work->fn;
                void *arg = work->arg;
                struct cpu_stop_done *done = work->done;
-               char ksym_buf[KSYM_NAME_LEN];
+               char ksym_buf[KSYM_NAME_LEN] __maybe_unused;
 
                __set_current_state(TASK_RUNNING);
 
@@ -287,31 +296,33 @@ repeat:
        goto repeat;
 }
 
+extern void sched_set_stop_task(int cpu, struct task_struct *stop);
+
 /* manage stopper for a cpu, mostly lifted from sched migration thread mgmt */
 static int __cpuinit cpu_stop_cpu_callback(struct notifier_block *nfb,
                                           unsigned long action, void *hcpu)
 {
-       struct sched_param param = { .sched_priority = MAX_RT_PRIO - 1 };
        unsigned int cpu = (unsigned long)hcpu;
        struct cpu_stopper *stopper = &per_cpu(cpu_stopper, cpu);
-       struct cpu_stop_work *work;
        struct task_struct *p;
 
        switch (action & ~CPU_TASKS_FROZEN) {
        case CPU_UP_PREPARE:
                BUG_ON(stopper->thread || stopper->enabled ||
                       !list_empty(&stopper->works));
-               p = kthread_create(cpu_stopper_thread, stopper, "stopper/%d",
-                                  cpu);
+               p = kthread_create_on_node(cpu_stopper_thread,
+                                          stopper,
+                                          cpu_to_node(cpu),
+                                          "migration/%d", cpu);
                if (IS_ERR(p))
-                       return NOTIFY_BAD;
-               sched_setscheduler_nocheck(p, SCHED_FIFO, &param);
+                       return notifier_from_errno(PTR_ERR(p));
                get_task_struct(p);
+               kthread_bind(p, cpu);
+               sched_set_stop_task(cpu, p);
                stopper->thread = p;
                break;
 
        case CPU_ONLINE:
-               kthread_bind(stopper->thread, cpu);
                /* strictly unnecessary, as first user will wake it */
                wake_up_process(stopper->thread);
                /* mark enabled */
@@ -322,7 +333,11 @@ static int __cpuinit cpu_stop_cpu_callback(struct notifier_block *nfb,
 
 #ifdef CONFIG_HOTPLUG_CPU
        case CPU_UP_CANCELED:
-       case CPU_DEAD:
+       case CPU_POST_DEAD:
+       {
+               struct cpu_stop_work *work;
+
+               sched_set_stop_task(cpu, NULL);
                /* kill the stopper */
                kthread_stop(stopper->thread);
                /* drain remaining works */
@@ -335,6 +350,7 @@ static int __cpuinit cpu_stop_cpu_callback(struct notifier_block *nfb,
                put_task_struct(stopper->thread);
                stopper->thread = NULL;
                break;
+       }
 #endif
        }
 
@@ -367,14 +383,18 @@ static int __init cpu_stop_init(void)
        /* start one for the boot cpu */
        err = cpu_stop_cpu_callback(&cpu_stop_cpu_notifier, CPU_UP_PREPARE,
                                    bcpu);
-       BUG_ON(err == NOTIFY_BAD);
+       BUG_ON(err != NOTIFY_OK);
        cpu_stop_cpu_callback(&cpu_stop_cpu_notifier, CPU_ONLINE, bcpu);
        register_cpu_notifier(&cpu_stop_cpu_notifier);
 
+       stop_machine_initialized = true;
+
        return 0;
 }
 early_initcall(cpu_stop_init);
 
+#ifdef CONFIG_STOP_MACHINE
+
 /* This controls the threads on each CPU. */
 enum stopmachine_state {
        /* Dummy starting state for thread. */
@@ -388,174 +408,173 @@ enum stopmachine_state {
        /* Exit */
        STOPMACHINE_EXIT,
 };
-static enum stopmachine_state state;
 
 struct stop_machine_data {
-       int (*fn)(void *);
-       void *data;
-       int fnret;
+       int                     (*fn)(void *);
+       void                    *data;
+       /* Like num_online_cpus(), but hotplug cpu uses us, so we need this. */
+       unsigned int            num_threads;
+       const struct cpumask    *active_cpus;
+
+       enum stopmachine_state  state;
+       atomic_t                thread_ack;
 };
 
-/* Like num_online_cpus(), but hotplug cpu uses us, so we need this. */
-static unsigned int num_threads;
-static atomic_t thread_ack;
-static DEFINE_MUTEX(lock);
-/* setup_lock protects refcount, stop_machine_wq and stop_machine_work. */
-static DEFINE_MUTEX(setup_lock);
-/* Users of stop_machine. */
-static int refcount;
-static struct workqueue_struct *stop_machine_wq;
-static struct stop_machine_data active, idle;
-static const struct cpumask *active_cpus;
-static void __percpu *stop_machine_work;
-
-static void set_state(enum stopmachine_state newstate)
+static void set_state(struct stop_machine_data *smdata,
+                     enum stopmachine_state newstate)
 {
        /* Reset ack counter. */
-       atomic_set(&thread_ack, num_threads);
+       atomic_set(&smdata->thread_ack, smdata->num_threads);
        smp_wmb();
-       state = newstate;
+       smdata->state = newstate;
 }
 
 /* Last one to ack a state moves to the next state. */
-static void ack_state(void)
+static void ack_state(struct stop_machine_data *smdata)
 {
-       if (atomic_dec_and_test(&thread_ack))
-               set_state(state + 1);
+       if (atomic_dec_and_test(&smdata->thread_ack))
+               set_state(smdata, smdata->state + 1);
 }
 
-/* This is the actual function which stops the CPU. It runs
- * in the context of a dedicated stopmachine workqueue. */
-static void stop_cpu(struct work_struct *unused)
+/* This is the cpu_stop function which stops the CPU. */
+static int stop_machine_cpu_stop(void *data)
 {
+       struct stop_machine_data *smdata = data;
        enum stopmachine_state curstate = STOPMACHINE_NONE;
-       struct stop_machine_data *smdata = &idle;
-       int cpu = smp_processor_id();
-       int err;
+       int cpu = smp_processor_id(), err = 0;
+       unsigned long flags;
+       bool is_active;
+
+       /*
+        * When called from stop_machine_from_inactive_cpu(), irq might
+        * already be disabled.  Save the state and restore it on exit.
+        */
+       local_save_flags(flags);
+
+       if (!smdata->active_cpus)
+               is_active = cpu == cpumask_first(cpu_online_mask);
+       else
+               is_active = cpumask_test_cpu(cpu, smdata->active_cpus);
 
-       if (!active_cpus) {
-               if (cpu == cpumask_first(cpu_online_mask))
-                       smdata = &active;
-       } else {
-               if (cpumask_test_cpu(cpu, active_cpus))
-                       smdata = &active;
-       }
        /* Simple state machine */
        do {
                /* Chill out and ensure we re-read stopmachine_state. */
                cpu_relax();
-               if (state != curstate) {
-                       curstate = state;
+               if (smdata->state != curstate) {
+                       curstate = smdata->state;
                        switch (curstate) {
                        case STOPMACHINE_DISABLE_IRQ:
                                local_irq_disable();
                                hard_irq_disable();
                                break;
                        case STOPMACHINE_RUN:
-                               /* On multiple CPUs only a single error code
-                                * is needed to tell that something failed. */
-                               err = smdata->fn(smdata->data);
-                               if (err)
-                                       smdata->fnret = err;
+                               if (is_active)
+                                       err = smdata->fn(smdata->data);
                                break;
                        default:
                                break;
                        }
-                       ack_state();
+                       ack_state(smdata);
                }
        } while (curstate != STOPMACHINE_EXIT);
 
-       local_irq_enable();
-}
-
-/* Callback for CPUs which aren't supposed to do anything. */
-static int chill(void *unused)
-{
-       return 0;
-}
-
-int stop_machine_create(void)
-{
-       mutex_lock(&setup_lock);
-       if (refcount)
-               goto done;
-       stop_machine_wq = create_rt_workqueue("kstop");
-       if (!stop_machine_wq)
-               goto err_out;
-       stop_machine_work = alloc_percpu(struct work_struct);
-       if (!stop_machine_work)
-               goto err_out;
-done:
-       refcount++;
-       mutex_unlock(&setup_lock);
-       return 0;
-
-err_out:
-       if (stop_machine_wq)
-               destroy_workqueue(stop_machine_wq);
-       mutex_unlock(&setup_lock);
-       return -ENOMEM;
+       local_irq_restore(flags);
+       return err;
 }
-EXPORT_SYMBOL_GPL(stop_machine_create);
-
-void stop_machine_destroy(void)
-{
-       mutex_lock(&setup_lock);
-       refcount--;
-       if (refcount)
-               goto done;
-       destroy_workqueue(stop_machine_wq);
-       free_percpu(stop_machine_work);
-done:
-       mutex_unlock(&setup_lock);
-}
-EXPORT_SYMBOL_GPL(stop_machine_destroy);
 
 int __stop_machine(int (*fn)(void *), void *data, const struct cpumask *cpus)
 {
-       struct work_struct *sm_work;
-       int i, ret;
-
-       /* Set up initial state. */
-       mutex_lock(&lock);
-       num_threads = num_online_cpus();
-       active_cpus = cpus;
-       active.fn = fn;
-       active.data = data;
-       active.fnret = 0;
-       idle.fn = chill;
-       idle.data = NULL;
-
-       set_state(STOPMACHINE_PREPARE);
-
-       /* Schedule the stop_cpu work on all cpus: hold this CPU so one
-        * doesn't hit this CPU until we're ready. */
-       get_cpu();
-       for_each_online_cpu(i) {
-               sm_work = per_cpu_ptr(stop_machine_work, i);
-               INIT_WORK(sm_work, stop_cpu);
-               queue_work_on(i, stop_machine_wq, sm_work);
+       struct stop_machine_data smdata = { .fn = fn, .data = data,
+                                           .num_threads = num_online_cpus(),
+                                           .active_cpus = cpus };
+
+       if (!stop_machine_initialized) {
+               /*
+                * Handle the case where stop_machine() is called
+                * early in boot before stop_machine() has been
+                * initialized.
+                */
+               unsigned long flags;
+               int ret;
+
+               WARN_ON_ONCE(smdata.num_threads != 1);
+
+               local_irq_save(flags);
+               hard_irq_disable();
+               ret = (*fn)(data);
+               local_irq_restore(flags);
+
+               return ret;
        }
-       /* This will release the thread on our CPU. */
-       put_cpu();
-       flush_workqueue(stop_machine_wq);
-       ret = active.fnret;
-       mutex_unlock(&lock);
-       return ret;
+
+       /* Set the initial state and stop all online cpus. */
+       set_state(&smdata, STOPMACHINE_PREPARE);
+       return stop_cpus(cpu_online_mask, stop_machine_cpu_stop, &smdata);
 }
 
 int stop_machine(int (*fn)(void *), void *data, const struct cpumask *cpus)
 {
        int ret;
 
-       ret = stop_machine_create();
-       if (ret)
-               return ret;
        /* No CPUs can come up or down during this. */
        get_online_cpus();
        ret = __stop_machine(fn, data, cpus);
        put_online_cpus();
-       stop_machine_destroy();
        return ret;
 }
 EXPORT_SYMBOL_GPL(stop_machine);
+
+/**
+ * stop_machine_from_inactive_cpu - stop_machine() from inactive CPU
+ * @fn: the function to run
+ * @data: the data ptr for the @fn()
+ * @cpus: the cpus to run the @fn() on (NULL = any online cpu)
+ *
+ * This is identical to stop_machine() but can be called from a CPU which
+ * is not active.  The local CPU is in the process of hotplug (so no other
+ * CPU hotplug can start) and not marked active and doesn't have enough
+ * context to sleep.
+ *
+ * This function provides stop_machine() functionality for such state by
+ * using busy-wait for synchronization and executing @fn directly for local
+ * CPU.
+ *
+ * CONTEXT:
+ * Local CPU is inactive.  Temporarily stops all active CPUs.
+ *
+ * RETURNS:
+ * 0 if all executions of @fn returned 0, any non zero return value if any
+ * returned non zero.
+ */
+int stop_machine_from_inactive_cpu(int (*fn)(void *), void *data,
+                                 const struct cpumask *cpus)
+{
+       struct stop_machine_data smdata = { .fn = fn, .data = data,
+                                           .active_cpus = cpus };
+       struct cpu_stop_done done;
+       int ret;
+
+       /* Local CPU must be inactive and CPU hotplug in progress. */
+       BUG_ON(cpu_active(raw_smp_processor_id()));
+       smdata.num_threads = num_active_cpus() + 1;     /* +1 for local */
+
+       /* No proper task established and can't sleep - busy wait for lock. */
+       while (!mutex_trylock(&stop_cpus_mutex))
+               cpu_relax();
+
+       /* Schedule work on other CPUs and execute directly for local CPU */
+       set_state(&smdata, STOPMACHINE_PREPARE);
+       cpu_stop_init_done(&done, num_active_cpus());
+       queue_stop_cpus_work(cpu_active_mask, stop_machine_cpu_stop, &smdata,
+                            &done);
+       ret = stop_machine_cpu_stop(&smdata);
+
+       /* Busy wait for completion. */
+       while (!completion_done(&done.completion))
+               cpu_relax();
+
+       mutex_unlock(&stop_cpus_mutex);
+       return ret ?: done.ret;
+}
+
+#endif /* CONFIG_STOP_MACHINE */