sysctl: use bitmap library functions
[linux-flexiantxendom0-3.2.10.git] / kernel / stop_machine.c
index 70f8d90..2f194e9 100644 (file)
 #include <linux/cpu.h>
 #include <linux/init.h>
 #include <linux/kthread.h>
-#include <linux/module.h>
+#include <linux/export.h>
 #include <linux/percpu.h>
 #include <linux/sched.h>
 #include <linux/stop_machine.h>
 #include <linux/interrupt.h>
 #include <linux/kallsyms.h>
 
-#include <asm/atomic.h>
+#include <linux/atomic.h>
 
 /*
  * Structure to determine completion condition and record errors.  May
@@ -35,12 +35,13 @@ struct cpu_stop_done {
 /* the actual stopper, one per every possible cpu, enabled on online cpus */
 struct cpu_stopper {
        spinlock_t              lock;
+       bool                    enabled;        /* is this stopper enabled? */
        struct list_head        works;          /* list of pending works */
        struct task_struct      *thread;        /* stopper thread */
-       bool                    enabled;        /* is this stopper enabled? */
 };
 
 static DEFINE_PER_CPU(struct cpu_stopper, cpu_stopper);
+static bool stop_machine_initialized = false;
 
 static void cpu_stop_init_done(struct cpu_stop_done *done, unsigned int nr_todo)
 {
@@ -136,10 +137,11 @@ void stop_one_cpu_nowait(unsigned int cpu, cpu_stop_fn_t fn, void *arg,
 static DEFINE_MUTEX(stop_cpus_mutex);
 static DEFINE_PER_CPU(struct cpu_stop_work, stop_cpus_work);
 
-int __stop_cpus(const struct cpumask *cpumask, cpu_stop_fn_t fn, void *arg)
+static void queue_stop_cpus_work(const struct cpumask *cpumask,
+                                cpu_stop_fn_t fn, void *arg,
+                                struct cpu_stop_done *done)
 {
        struct cpu_stop_work *work;
-       struct cpu_stop_done done;
        unsigned int cpu;
 
        /* initialize works and done */
@@ -147,9 +149,8 @@ int __stop_cpus(const struct cpumask *cpumask, cpu_stop_fn_t fn, void *arg)
                work = &per_cpu(stop_cpus_work, cpu);
                work->fn = fn;
                work->arg = arg;
-               work->done = &done;
+               work->done = done;
        }
-       cpu_stop_init_done(&done, cpumask_weight(cpumask));
 
        /*
         * Disable preemption while queueing to avoid getting
@@ -161,7 +162,15 @@ int __stop_cpus(const struct cpumask *cpumask, cpu_stop_fn_t fn, void *arg)
                cpu_stop_queue_work(&per_cpu(cpu_stopper, cpu),
                                    &per_cpu(stop_cpus_work, cpu));
        preempt_enable();
+}
 
+static int __stop_cpus(const struct cpumask *cpumask,
+                      cpu_stop_fn_t fn, void *arg)
+{
+       struct cpu_stop_done done;
+
+       cpu_stop_init_done(&done, cpumask_weight(cpumask));
+       queue_stop_cpus_work(cpumask, fn, arg, &done);
        wait_for_completion(&done.completion);
        return done.executed ? done.ret : -ENOENT;
 }
@@ -262,7 +271,7 @@ repeat:
                cpu_stop_fn_t fn = work->fn;
                void *arg = work->arg;
                struct cpu_stop_done *done = work->done;
-               char ksym_buf[KSYM_NAME_LEN];
+               char ksym_buf[KSYM_NAME_LEN] __maybe_unused;
 
                __set_current_state(TASK_RUNNING);
 
@@ -287,11 +296,12 @@ repeat:
        goto repeat;
 }
 
+extern void sched_set_stop_task(int cpu, struct task_struct *stop);
+
 /* manage stopper for a cpu, mostly lifted from sched migration thread mgmt */
 static int __cpuinit cpu_stop_cpu_callback(struct notifier_block *nfb,
                                           unsigned long action, void *hcpu)
 {
-       struct sched_param param = { .sched_priority = MAX_RT_PRIO - 1 };
        unsigned int cpu = (unsigned long)hcpu;
        struct cpu_stopper *stopper = &per_cpu(cpu_stopper, cpu);
        struct task_struct *p;
@@ -300,17 +310,19 @@ static int __cpuinit cpu_stop_cpu_callback(struct notifier_block *nfb,
        case CPU_UP_PREPARE:
                BUG_ON(stopper->thread || stopper->enabled ||
                       !list_empty(&stopper->works));
-               p = kthread_create(cpu_stopper_thread, stopper, "migration/%d",
-                                  cpu);
+               p = kthread_create_on_node(cpu_stopper_thread,
+                                          stopper,
+                                          cpu_to_node(cpu),
+                                          "migration/%d", cpu);
                if (IS_ERR(p))
-                       return NOTIFY_BAD;
-               sched_setscheduler_nocheck(p, SCHED_FIFO, &param);
+                       return notifier_from_errno(PTR_ERR(p));
                get_task_struct(p);
+               kthread_bind(p, cpu);
+               sched_set_stop_task(cpu, p);
                stopper->thread = p;
                break;
 
        case CPU_ONLINE:
-               kthread_bind(stopper->thread, cpu);
                /* strictly unnecessary, as first user will wake it */
                wake_up_process(stopper->thread);
                /* mark enabled */
@@ -325,6 +337,7 @@ static int __cpuinit cpu_stop_cpu_callback(struct notifier_block *nfb,
        {
                struct cpu_stop_work *work;
 
+               sched_set_stop_task(cpu, NULL);
                /* kill the stopper */
                kthread_stop(stopper->thread);
                /* drain remaining works */
@@ -370,10 +383,12 @@ static int __init cpu_stop_init(void)
        /* start one for the boot cpu */
        err = cpu_stop_cpu_callback(&cpu_stop_cpu_notifier, CPU_UP_PREPARE,
                                    bcpu);
-       BUG_ON(err == NOTIFY_BAD);
+       BUG_ON(err != NOTIFY_OK);
        cpu_stop_cpu_callback(&cpu_stop_cpu_notifier, CPU_ONLINE, bcpu);
        register_cpu_notifier(&cpu_stop_cpu_notifier);
 
+       stop_machine_initialized = true;
+
        return 0;
 }
 early_initcall(cpu_stop_init);
@@ -427,8 +442,15 @@ static int stop_machine_cpu_stop(void *data)
        struct stop_machine_data *smdata = data;
        enum stopmachine_state curstate = STOPMACHINE_NONE;
        int cpu = smp_processor_id(), err = 0;
+       unsigned long flags;
        bool is_active;
 
+       /*
+        * When called from stop_machine_from_inactive_cpu(), irq might
+        * already be disabled.  Save the state and restore it on exit.
+        */
+       local_save_flags(flags);
+
        if (!smdata->active_cpus)
                is_active = cpu == cpumask_first(cpu_online_mask);
        else
@@ -456,7 +478,7 @@ static int stop_machine_cpu_stop(void *data)
                }
        } while (curstate != STOPMACHINE_EXIT);
 
-       local_irq_enable();
+       local_irq_restore(flags);
        return err;
 }
 
@@ -466,6 +488,25 @@ int __stop_machine(int (*fn)(void *), void *data, const struct cpumask *cpus)
                                            .num_threads = num_online_cpus(),
                                            .active_cpus = cpus };
 
+       if (!stop_machine_initialized) {
+               /*
+                * Handle the case where stop_machine() is called
+                * early in boot before stop_machine() has been
+                * initialized.
+                */
+               unsigned long flags;
+               int ret;
+
+               WARN_ON_ONCE(smdata.num_threads != 1);
+
+               local_irq_save(flags);
+               hard_irq_disable();
+               ret = (*fn)(data);
+               local_irq_restore(flags);
+
+               return ret;
+       }
+
        /* Set the initial state and stop all online cpus. */
        set_state(&smdata, STOPMACHINE_PREPARE);
        return stop_cpus(cpu_online_mask, stop_machine_cpu_stop, &smdata);
@@ -483,4 +524,57 @@ int stop_machine(int (*fn)(void *), void *data, const struct cpumask *cpus)
 }
 EXPORT_SYMBOL_GPL(stop_machine);
 
+/**
+ * stop_machine_from_inactive_cpu - stop_machine() from inactive CPU
+ * @fn: the function to run
+ * @data: the data ptr for the @fn()
+ * @cpus: the cpus to run the @fn() on (NULL = any online cpu)
+ *
+ * This is identical to stop_machine() but can be called from a CPU which
+ * is not active.  The local CPU is in the process of hotplug (so no other
+ * CPU hotplug can start) and not marked active and doesn't have enough
+ * context to sleep.
+ *
+ * This function provides stop_machine() functionality for such state by
+ * using busy-wait for synchronization and executing @fn directly for local
+ * CPU.
+ *
+ * CONTEXT:
+ * Local CPU is inactive.  Temporarily stops all active CPUs.
+ *
+ * RETURNS:
+ * 0 if all executions of @fn returned 0, any non zero return value if any
+ * returned non zero.
+ */
+int stop_machine_from_inactive_cpu(int (*fn)(void *), void *data,
+                                 const struct cpumask *cpus)
+{
+       struct stop_machine_data smdata = { .fn = fn, .data = data,
+                                           .active_cpus = cpus };
+       struct cpu_stop_done done;
+       int ret;
+
+       /* Local CPU must be inactive and CPU hotplug in progress. */
+       BUG_ON(cpu_active(raw_smp_processor_id()));
+       smdata.num_threads = num_active_cpus() + 1;     /* +1 for local */
+
+       /* No proper task established and can't sleep - busy wait for lock. */
+       while (!mutex_trylock(&stop_cpus_mutex))
+               cpu_relax();
+
+       /* Schedule work on other CPUs and execute directly for local CPU */
+       set_state(&smdata, STOPMACHINE_PREPARE);
+       cpu_stop_init_done(&done, num_active_cpus());
+       queue_stop_cpus_work(cpu_active_mask, stop_machine_cpu_stop, &smdata,
+                            &done);
+       ret = stop_machine_cpu_stop(&smdata);
+
+       /* Busy wait for completion. */
+       while (!completion_done(&done.completion))
+               cpu_relax();
+
+       mutex_unlock(&stop_cpus_mutex);
+       return ret ?: done.ret;
+}
+
 #endif /* CONFIG_STOP_MACHINE */