Update to 3.4-final.
[linux-flexiantxendom0-3.2.10.git] / kernel / workqueue.c
index b59c946..5abf42f 100644 (file)
@@ -1,22 +1,29 @@
 /*
- * linux/kernel/workqueue.c
+ * kernel/workqueue.c - generic async execution with shared worker pool
  *
- * Generic mechanism for defining kernel helper threads for running
- * arbitrary tasks in process context.
+ * Copyright (C) 2002          Ingo Molnar
  *
- * Started by Ingo Molnar, Copyright (C) 2002
+ *   Derived from the taskqueue/keventd code by:
+ *     David Woodhouse <dwmw2@infradead.org>
+ *     Andrew Morton
+ *     Kai Petzke <wpp@marie.physik.tu-berlin.de>
+ *     Theodore Ts'o <tytso@mit.edu>
  *
- * Derived from the taskqueue/keventd code by:
+ * Made to use alloc_percpu by Christoph Lameter.
  *
- *   David Woodhouse <dwmw2@infradead.org>
- *   Andrew Morton
- *   Kai Petzke <wpp@marie.physik.tu-berlin.de>
- *   Theodore Ts'o <tytso@mit.edu>
+ * Copyright (C) 2010          SUSE Linux Products GmbH
+ * Copyright (C) 2010          Tejun Heo <tj@kernel.org>
  *
- * Made to use alloc_percpu by Christoph Lameter.
+ * This is the generic async execution mechanism.  Work items as are
+ * executed in process context.  The worker pool is shared and
+ * automatically managed.  There is one worker pool for each CPU and
+ * one extra for works which are better served by workers which are
+ * not bound to any specific CPU.
+ *
+ * Please read Documentation/workqueue.txt for details.
  */
 
-#include <linux/module.h>
+#include <linux/export.h>
 #include <linux/kernel.h>
 #include <linux/sched.h>
 #include <linux/init.h>
@@ -53,9 +60,10 @@ enum {
        WORKER_ROGUE            = 1 << 4,       /* not bound to any cpu */
        WORKER_REBIND           = 1 << 5,       /* mom is home, come back */
        WORKER_CPU_INTENSIVE    = 1 << 6,       /* cpu intensive */
+       WORKER_UNBOUND          = 1 << 7,       /* worker is unbound */
 
        WORKER_NOT_RUNNING      = WORKER_PREP | WORKER_ROGUE | WORKER_REBIND |
-                                 WORKER_CPU_INTENSIVE,
+                                 WORKER_CPU_INTENSIVE | WORKER_UNBOUND,
 
        /* gcwq->trustee_state */
        TRUSTEE_START           = 0,            /* start */
@@ -71,7 +79,9 @@ enum {
        MAX_IDLE_WORKERS_RATIO  = 4,            /* 1/4 of busy can be idle */
        IDLE_WORKER_TIMEOUT     = 300 * HZ,     /* keep idle ones for 5 mins */
 
-       MAYDAY_INITIAL_TIMEOUT  = HZ / 100,     /* call for help after 10ms */
+       MAYDAY_INITIAL_TIMEOUT  = HZ / 100 >= 2 ? HZ / 100 : 2,
+                                               /* call for help after 10ms
+                                                  (min two ticks) */
        MAYDAY_INTERVAL         = HZ / 10,      /* and then every 100ms */
        CREATE_COOLDOWN         = HZ,           /* time to breath after fail */
        TRUSTEE_COOLDOWN        = HZ / 10,      /* for trustee draining */
@@ -86,7 +96,8 @@ enum {
 /*
  * Structure fields follow one of the following exclusion rules.
  *
- * I: Set during initialization and read-only afterwards.
+ * I: Modifiable by initialization/destruction paths and read-only for
+ *    everyone else.
  *
  * P: Preemption protected.  Disabling preemption is enough and should
  *    only be modified and accessed from the local cpu.
@@ -96,7 +107,7 @@ enum {
  * X: During normal operation, modification requires gcwq->lock and
  *    should be done only from local cpu.  Either disabling preemption
  *    on local cpu or grabbing gcwq->lock is enough for read access.
- *    While trustee is in charge, it's identical to L.
+ *    If GCWQ_DISASSOCIATED is set, it's identical to L.
  *
  * F: wq->flush_mutex protected.
  *
@@ -185,12 +196,37 @@ struct wq_flusher {
 };
 
 /*
+ * All cpumasks are assumed to be always set on UP and thus can't be
+ * used to determine whether there's something to be done.
+ */
+#ifdef CONFIG_SMP
+typedef cpumask_var_t mayday_mask_t;
+#define mayday_test_and_set_cpu(cpu, mask)     \
+       cpumask_test_and_set_cpu((cpu), (mask))
+#define mayday_clear_cpu(cpu, mask)            cpumask_clear_cpu((cpu), (mask))
+#define for_each_mayday_cpu(cpu, mask)         for_each_cpu((cpu), (mask))
+#define alloc_mayday_mask(maskp, gfp)          zalloc_cpumask_var((maskp), (gfp))
+#define free_mayday_mask(mask)                 free_cpumask_var((mask))
+#else
+typedef unsigned long mayday_mask_t;
+#define mayday_test_and_set_cpu(cpu, mask)     test_and_set_bit(0, &(mask))
+#define mayday_clear_cpu(cpu, mask)            clear_bit(0, &(mask))
+#define for_each_mayday_cpu(cpu, mask)         if ((cpu) = 0, (mask))
+#define alloc_mayday_mask(maskp, gfp)          true
+#define free_mayday_mask(mask)                 do { } while (0)
+#endif
+
+/*
  * The externally visible workqueue abstraction is an array of
  * per-CPU workqueues:
  */
 struct workqueue_struct {
-       unsigned int            flags;          /* I: WQ_* flags */
-       struct cpu_workqueue_struct *cpu_wq;    /* I: cwq's */
+       unsigned int            flags;          /* W: WQ_* flags */
+       union {
+               struct cpu_workqueue_struct __percpu    *pcpu;
+               struct cpu_workqueue_struct             *single;
+               unsigned long                           v;
+       } cpu_wq;                               /* I: cwq's */
        struct list_head        list;           /* W: list of all workqueues */
 
        struct mutex            flush_mutex;    /* protects wq flushing */
@@ -201,33 +237,95 @@ struct workqueue_struct {
        struct list_head        flusher_queue;  /* F: flush waiters */
        struct list_head        flusher_overflow; /* F: flush overflow list */
 
-       unsigned long           single_cpu;     /* cpu for single cpu wq */
-
-       cpumask_var_t           mayday_mask;    /* cpus requesting rescue */
+       mayday_mask_t           mayday_mask;    /* cpus requesting rescue */
        struct worker           *rescuer;       /* I: rescue worker */
 
+       int                     nr_drainers;    /* W: drain in progress */
        int                     saved_max_active; /* W: saved cwq max_active */
-       const char              *name;          /* I: workqueue name */
 #ifdef CONFIG_LOCKDEP
        struct lockdep_map      lockdep_map;
 #endif
+       char                    name[];         /* I: workqueue name */
 };
 
 struct workqueue_struct *system_wq __read_mostly;
 struct workqueue_struct *system_long_wq __read_mostly;
 struct workqueue_struct *system_nrt_wq __read_mostly;
+struct workqueue_struct *system_unbound_wq __read_mostly;
+struct workqueue_struct *system_freezable_wq __read_mostly;
+struct workqueue_struct *system_nrt_freezable_wq __read_mostly;
 EXPORT_SYMBOL_GPL(system_wq);
 EXPORT_SYMBOL_GPL(system_long_wq);
 EXPORT_SYMBOL_GPL(system_nrt_wq);
+EXPORT_SYMBOL_GPL(system_unbound_wq);
+EXPORT_SYMBOL_GPL(system_freezable_wq);
+EXPORT_SYMBOL_GPL(system_nrt_freezable_wq);
+
+#define CREATE_TRACE_POINTS
+#include <trace/events/workqueue.h>
 
 #define for_each_busy_worker(worker, i, pos, gcwq)                     \
        for (i = 0; i < BUSY_WORKER_HASH_SIZE; i++)                     \
                hlist_for_each_entry(worker, pos, &gcwq->busy_hash[i], hentry)
 
+static inline int __next_gcwq_cpu(int cpu, const struct cpumask *mask,
+                                 unsigned int sw)
+{
+       if (cpu < nr_cpu_ids) {
+               if (sw & 1) {
+                       cpu = cpumask_next(cpu, mask);
+                       if (cpu < nr_cpu_ids)
+                               return cpu;
+               }
+               if (sw & 2)
+                       return WORK_CPU_UNBOUND;
+       }
+       return WORK_CPU_NONE;
+}
+
+static inline int __next_wq_cpu(int cpu, const struct cpumask *mask,
+                               struct workqueue_struct *wq)
+{
+       return __next_gcwq_cpu(cpu, mask, !(wq->flags & WQ_UNBOUND) ? 1 : 2);
+}
+
+/*
+ * CPU iterators
+ *
+ * An extra gcwq is defined for an invalid cpu number
+ * (WORK_CPU_UNBOUND) to host workqueues which are not bound to any
+ * specific CPU.  The following iterators are similar to
+ * for_each_*_cpu() iterators but also considers the unbound gcwq.
+ *
+ * for_each_gcwq_cpu()         : possible CPUs + WORK_CPU_UNBOUND
+ * for_each_online_gcwq_cpu()  : online CPUs + WORK_CPU_UNBOUND
+ * for_each_cwq_cpu()          : possible CPUs for bound workqueues,
+ *                               WORK_CPU_UNBOUND for unbound workqueues
+ */
+#define for_each_gcwq_cpu(cpu)                                         \
+       for ((cpu) = __next_gcwq_cpu(-1, cpu_possible_mask, 3);         \
+            (cpu) < WORK_CPU_NONE;                                     \
+            (cpu) = __next_gcwq_cpu((cpu), cpu_possible_mask, 3))
+
+#define for_each_online_gcwq_cpu(cpu)                                  \
+       for ((cpu) = __next_gcwq_cpu(-1, cpu_online_mask, 3);           \
+            (cpu) < WORK_CPU_NONE;                                     \
+            (cpu) = __next_gcwq_cpu((cpu), cpu_online_mask, 3))
+
+#define for_each_cwq_cpu(cpu, wq)                                      \
+       for ((cpu) = __next_wq_cpu(-1, cpu_possible_mask, (wq));        \
+            (cpu) < WORK_CPU_NONE;                                     \
+            (cpu) = __next_wq_cpu((cpu), cpu_possible_mask, (wq)))
+
 #ifdef CONFIG_DEBUG_OBJECTS_WORK
 
 static struct debug_obj_descr work_debug_descr;
 
+static void *work_debug_hint(void *addr)
+{
+       return ((struct work_struct *) addr)->func;
+}
+
 /*
  * fixup_init is called when:
  * - an active object is initialized
@@ -299,6 +397,7 @@ static int work_fixup_free(void *addr, enum debug_obj_state state)
 
 static struct debug_obj_descr work_debug_descr = {
        .name           = "work_struct",
+       .debug_hint     = work_debug_hint,
        .fixup_init     = work_fixup_init,
        .fixup_activate = work_fixup_activate,
        .fixup_free     = work_fixup_free,
@@ -347,22 +446,41 @@ static bool workqueue_freezing;           /* W: have wqs started freezing? */
 static DEFINE_PER_CPU(struct global_cwq, global_cwq);
 static DEFINE_PER_CPU_SHARED_ALIGNED(atomic_t, gcwq_nr_running);
 
+/*
+ * Global cpu workqueue and nr_running counter for unbound gcwq.  The
+ * gcwq is always online, has GCWQ_DISASSOCIATED set, and all its
+ * workers have WORKER_UNBOUND set.
+ */
+static struct global_cwq unbound_global_cwq;
+static atomic_t unbound_gcwq_nr_running = ATOMIC_INIT(0);      /* always 0 */
+
 static int worker_thread(void *__worker);
 
 static struct global_cwq *get_gcwq(unsigned int cpu)
 {
-       return &per_cpu(global_cwq, cpu);
+       if (cpu != WORK_CPU_UNBOUND)
+               return &per_cpu(global_cwq, cpu);
+       else
+               return &unbound_global_cwq;
 }
 
 static atomic_t *get_gcwq_nr_running(unsigned int cpu)
 {
-       return &per_cpu(gcwq_nr_running, cpu);
+       if (cpu != WORK_CPU_UNBOUND)
+               return &per_cpu(gcwq_nr_running, cpu);
+       else
+               return &unbound_gcwq_nr_running;
 }
 
 static struct cpu_workqueue_struct *get_cwq(unsigned int cpu,
                                            struct workqueue_struct *wq)
 {
-       return per_cpu_ptr(wq->cpu_wq, cpu);
+       if (!(wq->flags & WQ_UNBOUND)) {
+               if (likely(cpu < nr_cpu_ids))
+                       return per_cpu_ptr(wq->cpu_wq.pcpu, cpu);
+       } else if (likely(cpu == WORK_CPU_UNBOUND))
+               return wq->cpu_wq.single;
+       return NULL;
 }
 
 static unsigned int work_color_to_flags(int color)
@@ -382,10 +500,9 @@ static int work_next_color(int color)
 }
 
 /*
- * Work data points to the cwq while a work is on queue.  Once
- * execution starts, it points to the cpu the work was last on.  This
- * can be distinguished by comparing the data value against
- * PAGE_OFFSET.
+ * A work's data points to the cwq with WORK_STRUCT_CWQ set while the
+ * work is on queue.  Once execution starts, WORK_STRUCT_CWQ is
+ * cleared and the work data contains the cpu number it was last on.
  *
  * set_work_{cwq|cpu}() and clear_work_data() can be used to set the
  * cwq, cpu or clear work->data.  These functions should only be
@@ -408,7 +525,7 @@ static void set_work_cwq(struct work_struct *work,
                         unsigned long extra_flags)
 {
        set_work_data(work, (unsigned long)cwq,
-                     WORK_STRUCT_PENDING | extra_flags);
+                     WORK_STRUCT_PENDING | WORK_STRUCT_CWQ | extra_flags);
 }
 
 static void set_work_cpu(struct work_struct *work, unsigned int cpu)
@@ -421,31 +538,30 @@ static void clear_work_data(struct work_struct *work)
        set_work_data(work, WORK_STRUCT_NO_CPU, 0);
 }
 
-static inline unsigned long get_work_data(struct work_struct *work)
-{
-       return atomic_long_read(&work->data) & WORK_STRUCT_WQ_DATA_MASK;
-}
-
 static struct cpu_workqueue_struct *get_work_cwq(struct work_struct *work)
 {
-       unsigned long data = get_work_data(work);
+       unsigned long data = atomic_long_read(&work->data);
 
-       return data >= PAGE_OFFSET ? (void *)data : NULL;
+       if (data & WORK_STRUCT_CWQ)
+               return (void *)(data & WORK_STRUCT_WQ_DATA_MASK);
+       else
+               return NULL;
 }
 
 static struct global_cwq *get_work_gcwq(struct work_struct *work)
 {
-       unsigned long data = get_work_data(work);
+       unsigned long data = atomic_long_read(&work->data);
        unsigned int cpu;
 
-       if (data >= PAGE_OFFSET)
-               return ((struct cpu_workqueue_struct *)data)->gcwq;
+       if (data & WORK_STRUCT_CWQ)
+               return ((struct cpu_workqueue_struct *)
+                       (data & WORK_STRUCT_WQ_DATA_MASK))->gcwq;
 
        cpu = data >> WORK_STRUCT_FLAG_BITS;
-       if (cpu == NR_CPUS)
+       if (cpu == WORK_CPU_NONE)
                return NULL;
 
-       BUG_ON(cpu >= num_possible_cpus());
+       BUG_ON(cpu >= nr_cpu_ids && cpu != WORK_CPU_UNBOUND);
        return get_gcwq(cpu);
 }
 
@@ -481,7 +597,9 @@ static bool keep_working(struct global_cwq *gcwq)
 {
        atomic_t *nr_running = get_gcwq_nr_running(gcwq->cpu);
 
-       return !list_empty(&gcwq->worklist) && atomic_read(nr_running) <= 1;
+       return !list_empty(&gcwq->worklist) &&
+               (atomic_read(nr_running) <= 1 ||
+                gcwq->flags & GCWQ_HIGHPRI_PENDING);
 }
 
 /* Do we need a new worker?  Called from manager. */
@@ -551,7 +669,7 @@ void wq_worker_waking_up(struct task_struct *task, unsigned int cpu)
 {
        struct worker *worker = kthread_data(task);
 
-       if (likely(!(worker->flags & WORKER_NOT_RUNNING)))
+       if (!(worker->flags & WORKER_NOT_RUNNING))
                atomic_inc(get_gcwq_nr_running(cpu));
 }
 
@@ -577,7 +695,7 @@ struct task_struct *wq_worker_sleeping(struct task_struct *task,
        struct global_cwq *gcwq = get_gcwq(cpu);
        atomic_t *nr_running = get_gcwq_nr_running(cpu);
 
-       if (unlikely(worker->flags & WORKER_NOT_RUNNING))
+       if (worker->flags & WORKER_NOT_RUNNING)
                return NULL;
 
        /* this can only happen on the local cpu */
@@ -658,7 +776,11 @@ static inline void worker_clr_flags(struct worker *worker, unsigned int flags)
 
        worker->flags &= ~flags;
 
-       /* if transitioning out of NOT_RUNNING, increment nr_running */
+       /*
+        * If transitioning out of NOT_RUNNING, increment nr_running.  Note
+        * that the nested NOT_RUNNING is not a noop.  NOT_RUNNING is mask
+        * of multiple flags, not a single flag.
+        */
        if ((flags & WORKER_NOT_RUNNING) && (oflags & WORKER_NOT_RUNNING))
                if (!(worker->flags & WORKER_NOT_RUNNING))
                        atomic_inc(get_gcwq_nr_running(gcwq->cpu));
@@ -822,32 +944,36 @@ static void insert_work(struct cpu_workqueue_struct *cwq,
                wake_up_worker(gcwq);
 }
 
-/**
- * cwq_unbind_single_cpu - unbind cwq from single cpu workqueue processing
- * @cwq: cwq to unbind
- *
- * Try to unbind @cwq from single cpu workqueue processing.  If
- * @cwq->wq is frozen, unbind is delayed till the workqueue is thawed.
- *
- * CONTEXT:
- * spin_lock_irq(gcwq->lock).
+/*
+ * Test whether @work is being queued from another work executing on the
+ * same workqueue.  This is rather expensive and should only be used from
+ * cold paths.
  */
-static void cwq_unbind_single_cpu(struct cpu_workqueue_struct *cwq)
+static bool is_chained_work(struct workqueue_struct *wq)
 {
-       struct workqueue_struct *wq = cwq->wq;
-       struct global_cwq *gcwq = cwq->gcwq;
+       unsigned long flags;
+       unsigned int cpu;
 
-       BUG_ON(wq->single_cpu != gcwq->cpu);
-       /*
-        * Unbind from workqueue if @cwq is not frozen.  If frozen,
-        * thaw_workqueues() will either restart processing on this
-        * cpu or unbind if empty.  This keeps works queued while
-        * frozen fully ordered and flushable.
-        */
-       if (likely(!(gcwq->flags & GCWQ_FREEZING))) {
-               smp_wmb();      /* paired with cmpxchg() in __queue_work() */
-               wq->single_cpu = NR_CPUS;
+       for_each_gcwq_cpu(cpu) {
+               struct global_cwq *gcwq = get_gcwq(cpu);
+               struct worker *worker;
+               struct hlist_node *pos;
+               int i;
+
+               spin_lock_irqsave(&gcwq->lock, flags);
+               for_each_busy_worker(worker, i, pos, gcwq) {
+                       if (worker->task != current)
+                               continue;
+                       spin_unlock_irqrestore(&gcwq->lock, flags);
+                       /*
+                        * I'm @worker, no locking necessary.  See if @work
+                        * is headed to the same workqueue.
+                        */
+                       return worker->current_cwq->wq == wq;
+               }
+               spin_unlock_irqrestore(&gcwq->lock, flags);
        }
+       return false;
 }
 
 static void __queue_work(unsigned int cpu, struct workqueue_struct *wq,
@@ -856,18 +982,23 @@ static void __queue_work(unsigned int cpu, struct workqueue_struct *wq,
        struct global_cwq *gcwq;
        struct cpu_workqueue_struct *cwq;
        struct list_head *worklist;
+       unsigned int work_flags;
        unsigned long flags;
-       bool arbitrate;
 
        debug_work_activate(work);
 
-       /*
-        * Determine gcwq to use.  SINGLE_CPU is inherently
-        * NON_REENTRANT, so test it first.
-        */
-       if (!(wq->flags & WQ_SINGLE_CPU)) {
+       /* if dying, only works from the same workqueue are allowed */
+       if (unlikely(wq->flags & WQ_DRAINING) &&
+           WARN_ON_ONCE(!is_chained_work(wq)))
+               return;
+
+       /* determine gcwq to use */
+       if (!(wq->flags & WQ_UNBOUND)) {
                struct global_cwq *last_gcwq;
 
+               if (unlikely(cpu == WORK_CPU_UNBOUND))
+                       cpu = raw_smp_processor_id();
+
                /*
                 * It's multi cpu.  If @wq is non-reentrant and @work
                 * was previously on a different cpu, it might still
@@ -893,53 +1024,29 @@ static void __queue_work(unsigned int cpu, struct workqueue_struct *wq,
                } else
                        spin_lock_irqsave(&gcwq->lock, flags);
        } else {
-               unsigned int req_cpu = cpu;
-
-               /*
-                * It's a bit more complex for single cpu workqueues.
-                * We first need to determine which cpu is going to be
-                * used.  If no cpu is currently serving this
-                * workqueue, arbitrate using atomic accesses to
-                * wq->single_cpu; otherwise, use the current one.
-                */
-       retry:
-               cpu = wq->single_cpu;
-               arbitrate = cpu == NR_CPUS;
-               if (arbitrate)
-                       cpu = req_cpu;
-
-               gcwq = get_gcwq(cpu);
+               gcwq = get_gcwq(WORK_CPU_UNBOUND);
                spin_lock_irqsave(&gcwq->lock, flags);
-
-               /*
-                * The following cmpxchg() is a full barrier paired
-                * with smp_wmb() in cwq_unbind_single_cpu() and
-                * guarantees that all changes to wq->st_* fields are
-                * visible on the new cpu after this point.
-                */
-               if (arbitrate)
-                       cmpxchg(&wq->single_cpu, NR_CPUS, cpu);
-
-               if (unlikely(wq->single_cpu != cpu)) {
-                       spin_unlock_irqrestore(&gcwq->lock, flags);
-                       goto retry;
-               }
        }
 
        /* gcwq determined, get cwq and queue */
        cwq = get_cwq(gcwq->cpu, wq);
+       trace_workqueue_queue_work(cpu, cwq, work);
 
        BUG_ON(!list_empty(&work->entry));
 
        cwq->nr_in_flight[cwq->work_color]++;
+       work_flags = work_color_to_flags(cwq->work_color);
 
        if (likely(cwq->nr_active < cwq->max_active)) {
+               trace_workqueue_activate_work(work);
                cwq->nr_active++;
                worklist = gcwq_determine_ins_pos(gcwq, cwq);
-       } else
+       } else {
+               work_flags |= WORK_STRUCT_DELAYED;
                worklist = &cwq->delayed_works;
+       }
 
-       insert_work(cwq, work, worklist, work_color_to_flags(cwq->work_color));
+       insert_work(cwq, work, worklist, work_flags);
 
        spin_unlock_irqrestore(&gcwq->lock, flags);
 }
@@ -1032,19 +1139,30 @@ int queue_delayed_work_on(int cpu, struct workqueue_struct *wq,
        struct work_struct *work = &dwork->work;
 
        if (!test_and_set_bit(WORK_STRUCT_PENDING_BIT, work_data_bits(work))) {
-               struct global_cwq *gcwq = get_work_gcwq(work);
-               unsigned int lcpu = gcwq ? gcwq->cpu : raw_smp_processor_id();
+               unsigned int lcpu;
 
                BUG_ON(timer_pending(timer));
                BUG_ON(!list_empty(&work->entry));
 
                timer_stats_timer_set_start_info(&dwork->timer);
+
                /*
                 * This stores cwq for the moment, for the timer_fn.
                 * Note that the work's gcwq is preserved to allow
                 * reentrance detection for delayed works.
                 */
+               if (!(wq->flags & WQ_UNBOUND)) {
+                       struct global_cwq *gcwq = get_work_gcwq(work);
+
+                       if (gcwq && gcwq->cpu != WORK_CPU_UNBOUND)
+                               lcpu = gcwq->cpu;
+                       else
+                               lcpu = raw_smp_processor_id();
+               } else
+                       lcpu = WORK_CPU_UNBOUND;
+
                set_work_cwq(work, get_cwq(lcpu, wq), 0);
+
                timer->expires = jiffies + delay;
                timer->data = (unsigned long)dwork;
                timer->function = delayed_work_timer_fn;
@@ -1147,6 +1265,7 @@ static void worker_leave_idle(struct worker *worker)
  * bound), %false if offline.
  */
 static bool worker_maybe_bind_and_lock(struct worker *worker)
+__acquires(&gcwq->lock)
 {
        struct global_cwq *gcwq = worker->gcwq;
        struct task_struct *task = worker->task;
@@ -1158,7 +1277,8 @@ static bool worker_maybe_bind_and_lock(struct worker *worker)
                 * it races with cpu hotunplug operation.  Verify
                 * against GCWQ_DISASSOCIATED.
                 */
-               set_cpus_allowed_ptr(task, get_cpu_mask(gcwq->cpu));
+               if (!(gcwq->flags & GCWQ_DISASSOCIATED))
+                       set_cpus_allowed_ptr(task, get_cpu_mask(gcwq->cpu));
 
                spin_lock_irq(&gcwq->lock);
                if (gcwq->flags & GCWQ_DISASSOCIATED)
@@ -1169,8 +1289,14 @@ static bool worker_maybe_bind_and_lock(struct worker *worker)
                        return true;
                spin_unlock_irq(&gcwq->lock);
 
-               /* CPU has come up inbetween, retry migration */
+               /*
+                * We've raced with CPU hot[un]plug.  Give it a breather
+                * and retry migration.  cond_resched() is required here;
+                * otherwise, we might deadlock against cpu_stop trying to
+                * bring down the CPU on non-preemptive kernel.
+                */
                cpu_relax();
+               cond_resched();
        }
 }
 
@@ -1223,8 +1349,9 @@ static struct worker *alloc_worker(void)
  */
 static struct worker *create_worker(struct global_cwq *gcwq, bool bind)
 {
-       int id = -1;
+       bool on_unbound_cpu = gcwq->cpu == WORK_CPU_UNBOUND;
        struct worker *worker = NULL;
+       int id = -1;
 
        spin_lock_irq(&gcwq->lock);
        while (ida_get_new(&gcwq->worker_ida, &id)) {
@@ -1242,8 +1369,14 @@ static struct worker *create_worker(struct global_cwq *gcwq, bool bind)
        worker->gcwq = gcwq;
        worker->id = id;
 
-       worker->task = kthread_create(worker_thread, worker, "kworker/%u:%d",
-                                     gcwq->cpu, id);
+       if (!on_unbound_cpu)
+               worker->task = kthread_create_on_node(worker_thread,
+                                                     worker,
+                                                     cpu_to_node(gcwq->cpu),
+                                                     "kworker/%u:%d", gcwq->cpu, id);
+       else
+               worker->task = kthread_create(worker_thread, worker,
+                                             "kworker/u:%d", id);
        if (IS_ERR(worker->task))
                goto fail;
 
@@ -1252,10 +1385,13 @@ static struct worker *create_worker(struct global_cwq *gcwq, bool bind)
         * online later on.  Make sure every worker has
         * PF_THREAD_BOUND set.
         */
-       if (bind)
+       if (bind && !on_unbound_cpu)
                kthread_bind(worker->task, gcwq->cpu);
-       else
+       else {
                worker->task->flags |= PF_THREAD_BOUND;
+               if (on_unbound_cpu)
+                       worker->flags |= WORKER_UNBOUND;
+       }
 
        return worker;
 fail:
@@ -1350,12 +1486,17 @@ static bool send_mayday(struct work_struct *work)
 {
        struct cpu_workqueue_struct *cwq = get_work_cwq(work);
        struct workqueue_struct *wq = cwq->wq;
+       unsigned int cpu;
 
        if (!(wq->flags & WQ_RESCUER))
                return false;
 
        /* mayday mayday mayday */
-       if (!cpumask_test_and_set_cpu(cwq->gcwq->cpu, wq->mayday_mask))
+       cpu = cwq->gcwq->cpu;
+       /* WORK_CPU_UNBOUND can't be set in cpumask, use cpu 0 instead */
+       if (cpu == WORK_CPU_UNBOUND)
+               cpu = 0;
+       if (!mayday_test_and_set_cpu(cpu, wq->mayday_mask))
                wake_up_process(wq->rescuer->task);
        return true;
 }
@@ -1406,18 +1547,20 @@ static void gcwq_mayday_timeout(unsigned long __gcwq)
  * otherwise.
  */
 static bool maybe_create_worker(struct global_cwq *gcwq)
+__releases(&gcwq->lock)
+__acquires(&gcwq->lock)
 {
        if (!need_to_create_worker(gcwq))
                return false;
 restart:
+       spin_unlock_irq(&gcwq->lock);
+
        /* if we don't make progress in MAYDAY_INITIAL_TIMEOUT, call for help */
        mod_timer(&gcwq->mayday_timer, jiffies + MAYDAY_INITIAL_TIMEOUT);
 
        while (true) {
                struct worker *worker;
 
-               spin_unlock_irq(&gcwq->lock);
-
                worker = create_worker(gcwq, true);
                if (worker) {
                        del_timer_sync(&gcwq->mayday_timer);
@@ -1430,15 +1573,13 @@ restart:
                if (!need_to_create_worker(gcwq))
                        break;
 
-               spin_unlock_irq(&gcwq->lock);
                __set_current_state(TASK_INTERRUPTIBLE);
                schedule_timeout(CREATE_COOLDOWN);
-               spin_lock_irq(&gcwq->lock);
+
                if (!need_to_create_worker(gcwq))
                        break;
        }
 
-       spin_unlock_irq(&gcwq->lock);
        del_timer_sync(&gcwq->mayday_timer);
        spin_lock_irq(&gcwq->lock);
        if (need_to_create_worker(gcwq))
@@ -1581,7 +1722,9 @@ static void cwq_activate_first_delayed(struct cpu_workqueue_struct *cwq)
                                                    struct work_struct, entry);
        struct list_head *pos = gcwq_determine_ins_pos(cwq->gcwq, cwq);
 
+       trace_workqueue_activate_work(work);
        move_linked_works(work, pos, NULL);
+       __clear_bit(WORK_STRUCT_DELAYED_BIT, work_data_bits(work));
        cwq->nr_active++;
 }
 
@@ -1589,6 +1732,7 @@ static void cwq_activate_first_delayed(struct cpu_workqueue_struct *cwq)
  * cwq_dec_nr_in_flight - decrement cwq's nr_in_flight
  * @cwq: cwq of interest
  * @color: color of work which left the queue
+ * @delayed: for a delayed work
  *
  * A work either has completed or is removed from pending queue,
  * decrement nr_in_flight of its cwq and handle workqueue flushing.
@@ -1596,22 +1740,22 @@ static void cwq_activate_first_delayed(struct cpu_workqueue_struct *cwq)
  * CONTEXT:
  * spin_lock_irq(gcwq->lock).
  */
-static void cwq_dec_nr_in_flight(struct cpu_workqueue_struct *cwq, int color)
+static void cwq_dec_nr_in_flight(struct cpu_workqueue_struct *cwq, int color,
+                                bool delayed)
 {
        /* ignore uncolored works */
        if (color == WORK_NO_COLOR)
                return;
 
        cwq->nr_in_flight[color]--;
-       cwq->nr_active--;
-
-       if (!list_empty(&cwq->delayed_works)) {
-               /* one down, submit a delayed one */
-               if (cwq->nr_active < cwq->max_active)
-                       cwq_activate_first_delayed(cwq);
-       } else if (!cwq->nr_active && cwq->wq->flags & WQ_SINGLE_CPU) {
-               /* this was the last work, unbind from single cpu */
-               cwq_unbind_single_cpu(cwq);
+
+       if (!delayed) {
+               cwq->nr_active--;
+               if (!list_empty(&cwq->delayed_works)) {
+                       /* one down, submit a delayed one */
+                       if (cwq->nr_active < cwq->max_active)
+                               cwq_activate_first_delayed(cwq);
+               }
        }
 
        /* is flush in progress and are we at the flushing tip? */
@@ -1648,6 +1792,8 @@ static void cwq_dec_nr_in_flight(struct cpu_workqueue_struct *cwq, int color)
  * spin_lock_irq(gcwq->lock) which is released and regrabbed.
  */
 static void process_one_work(struct worker *worker, struct work_struct *work)
+__releases(&gcwq->lock)
+__acquires(&gcwq->lock)
 {
        struct cpu_workqueue_struct *cwq = get_work_cwq(work);
        struct global_cwq *gcwq = cwq->gcwq;
@@ -1714,9 +1860,15 @@ static void process_one_work(struct worker *worker, struct work_struct *work)
        spin_unlock_irq(&gcwq->lock);
 
        work_clear_pending(work);
-       lock_map_acquire(&cwq->wq->lockdep_map);
+       lock_map_acquire_read(&cwq->wq->lockdep_map);
        lock_map_acquire(&lockdep_map);
+       trace_workqueue_execute_start(work);
        f(work);
+       /*
+        * While we must be careful to not use "work" after this, the trace
+        * point will only record its address.
+        */
+       trace_workqueue_execute_end(work);
        lock_map_release(&lockdep_map);
        lock_map_release(&cwq->wq->lockdep_map);
 
@@ -1740,7 +1892,7 @@ static void process_one_work(struct worker *worker, struct work_struct *work)
        hlist_del_init(&worker->hentry);
        worker->current_work = NULL;
        worker->current_cwq = NULL;
-       cwq_dec_nr_in_flight(cwq, work_color);
+       cwq_dec_nr_in_flight(cwq, work_color, false);
 }
 
 /**
@@ -1832,10 +1984,10 @@ recheck:
        } while (keep_working(gcwq));
 
        worker_set_flags(worker, WORKER_PREP, false);
-
+sleep:
        if (unlikely(need_to_manage_workers(gcwq)) && manage_workers(worker))
                goto recheck;
-sleep:
+
        /*
         * gcwq->lock is held and there's no work to process and no
         * need to manage, sleep.  Workers are woken up only while
@@ -1874,6 +2026,7 @@ static int rescuer_thread(void *__wq)
        struct workqueue_struct *wq = __wq;
        struct worker *rescuer = wq->rescuer;
        struct list_head *scheduled = &rescuer->scheduled;
+       bool is_unbound = wq->flags & WQ_UNBOUND;
        unsigned int cpu;
 
        set_user_nice(current, RESCUER_NICE_LEVEL);
@@ -1883,13 +2036,18 @@ repeat:
        if (kthread_should_stop())
                return 0;
 
-       for_each_cpu(cpu, wq->mayday_mask) {
-               struct cpu_workqueue_struct *cwq = get_cwq(cpu, wq);
+       /*
+        * See whether any cpu is asking for help.  Unbounded
+        * workqueues use cpu 0 in mayday_mask for CPU_UNBOUND.
+        */
+       for_each_mayday_cpu(cpu, wq->mayday_mask) {
+               unsigned int tcpu = is_unbound ? WORK_CPU_UNBOUND : cpu;
+               struct cpu_workqueue_struct *cwq = get_cwq(tcpu, wq);
                struct global_cwq *gcwq = cwq->gcwq;
                struct work_struct *work, *n;
 
                __set_current_state(TASK_RUNNING);
-               cpumask_clear_cpu(cpu, wq->mayday_mask);
+               mayday_clear_cpu(cpu, wq->mayday_mask);
 
                /* migrate to the target cpu if possible */
                rescuer->gcwq = gcwq;
@@ -1905,6 +2063,15 @@ repeat:
                                move_linked_works(work, scheduled, &n);
 
                process_scheduled_works(rescuer);
+
+               /*
+                * Leave this gcwq.  If keep_working() is %true, notify a
+                * regular worker; otherwise, we end up with 0 concurrency
+                * and stalling the execution.
+                */
+               if (keep_working(gcwq))
+                       wake_up_worker(gcwq);
+
                spin_unlock_irq(&gcwq->lock);
        }
 
@@ -1960,7 +2127,7 @@ static void insert_wq_barrier(struct cpu_workqueue_struct *cwq,
         * checks and call back into the fixup functions where we
         * might deadlock.
         */
-       INIT_WORK_ON_STACK(&barr->work, wq_barrier_func);
+       INIT_WORK_ONSTACK(&barr->work, wq_barrier_func);
        __set_bit(WORK_STRUCT_PENDING_BIT, work_data_bits(&barr->work));
        init_completion(&barr->done);
 
@@ -2026,7 +2193,7 @@ static bool flush_workqueue_prep_cwqs(struct workqueue_struct *wq,
                atomic_set(&wq->nr_cwqs_to_flush, 1);
        }
 
-       for_each_possible_cpu(cpu) {
+       for_each_cwq_cpu(cpu, wq) {
                struct cpu_workqueue_struct *cwq = get_cwq(cpu, wq);
                struct global_cwq *gcwq = cwq->gcwq;
 
@@ -2213,26 +2380,69 @@ out_unlock:
 EXPORT_SYMBOL_GPL(flush_workqueue);
 
 /**
- * flush_work - block until a work_struct's callback has terminated
- * @work: the work which is to be flushed
+ * drain_workqueue - drain a workqueue
+ * @wq: workqueue to drain
  *
- * Returns false if @work has already terminated.
- *
- * It is expected that, prior to calling flush_work(), the caller has
- * arranged for the work to not be requeued, otherwise it doesn't make
- * sense to use this function.
+ * Wait until the workqueue becomes empty.  While draining is in progress,
+ * only chain queueing is allowed.  IOW, only currently pending or running
+ * work items on @wq can queue further work items on it.  @wq is flushed
+ * repeatedly until it becomes empty.  The number of flushing is detemined
+ * by the depth of chaining and should be relatively short.  Whine if it
+ * takes too long.
  */
-int flush_work(struct work_struct *work)
+void drain_workqueue(struct workqueue_struct *wq)
+{
+       unsigned int flush_cnt = 0;
+       unsigned int cpu;
+
+       /*
+        * __queue_work() needs to test whether there are drainers, is much
+        * hotter than drain_workqueue() and already looks at @wq->flags.
+        * Use WQ_DRAINING so that queue doesn't have to check nr_drainers.
+        */
+       spin_lock(&workqueue_lock);
+       if (!wq->nr_drainers++)
+               wq->flags |= WQ_DRAINING;
+       spin_unlock(&workqueue_lock);
+reflush:
+       flush_workqueue(wq);
+
+       for_each_cwq_cpu(cpu, wq) {
+               struct cpu_workqueue_struct *cwq = get_cwq(cpu, wq);
+               bool drained;
+
+               spin_lock_irq(&cwq->gcwq->lock);
+               drained = !cwq->nr_active && list_empty(&cwq->delayed_works);
+               spin_unlock_irq(&cwq->gcwq->lock);
+
+               if (drained)
+                       continue;
+
+               if (++flush_cnt == 10 ||
+                   (flush_cnt % 100 == 0 && flush_cnt <= 1000))
+                       pr_warning("workqueue %s: flush on destruction isn't complete after %u tries\n",
+                                  wq->name, flush_cnt);
+               goto reflush;
+       }
+
+       spin_lock(&workqueue_lock);
+       if (!--wq->nr_drainers)
+               wq->flags &= ~WQ_DRAINING;
+       spin_unlock(&workqueue_lock);
+}
+EXPORT_SYMBOL_GPL(drain_workqueue);
+
+static bool start_flush_work(struct work_struct *work, struct wq_barrier *barr,
+                            bool wait_executing)
 {
        struct worker *worker = NULL;
        struct global_cwq *gcwq;
        struct cpu_workqueue_struct *cwq;
-       struct wq_barrier barr;
 
        might_sleep();
        gcwq = get_work_gcwq(work);
        if (!gcwq)
-               return 0;
+               return false;
 
        spin_lock_irq(&gcwq->lock);
        if (!list_empty(&work->entry)) {
@@ -2245,28 +2455,137 @@ int flush_work(struct work_struct *work)
                cwq = get_work_cwq(work);
                if (unlikely(!cwq || gcwq != cwq->gcwq))
                        goto already_gone;
-       } else {
+       } else if (wait_executing) {
                worker = find_worker_executing_work(gcwq, work);
                if (!worker)
                        goto already_gone;
                cwq = worker->current_cwq;
-       }
+       } else
+               goto already_gone;
 
-       insert_wq_barrier(cwq, &barr, work, worker);
+       insert_wq_barrier(cwq, barr, work, worker);
        spin_unlock_irq(&gcwq->lock);
 
-       lock_map_acquire(&cwq->wq->lockdep_map);
+       /*
+        * If @max_active is 1 or rescuer is in use, flushing another work
+        * item on the same workqueue may lead to deadlock.  Make sure the
+        * flusher is not running on the same workqueue by verifying write
+        * access.
+        */
+       if (cwq->wq->saved_max_active == 1 || cwq->wq->flags & WQ_RESCUER)
+               lock_map_acquire(&cwq->wq->lockdep_map);
+       else
+               lock_map_acquire_read(&cwq->wq->lockdep_map);
        lock_map_release(&cwq->wq->lockdep_map);
 
-       wait_for_completion(&barr.done);
-       destroy_work_on_stack(&barr.work);
-       return 1;
+       return true;
 already_gone:
        spin_unlock_irq(&gcwq->lock);
-       return 0;
+       return false;
+}
+
+/**
+ * flush_work - wait for a work to finish executing the last queueing instance
+ * @work: the work to flush
+ *
+ * Wait until @work has finished execution.  This function considers
+ * only the last queueing instance of @work.  If @work has been
+ * enqueued across different CPUs on a non-reentrant workqueue or on
+ * multiple workqueues, @work might still be executing on return on
+ * some of the CPUs from earlier queueing.
+ *
+ * If @work was queued only on a non-reentrant, ordered or unbound
+ * workqueue, @work is guaranteed to be idle on return if it hasn't
+ * been requeued since flush started.
+ *
+ * RETURNS:
+ * %true if flush_work() waited for the work to finish execution,
+ * %false if it was already idle.
+ */
+bool flush_work(struct work_struct *work)
+{
+       struct wq_barrier barr;
+
+       if (start_flush_work(work, &barr, true)) {
+               wait_for_completion(&barr.done);
+               destroy_work_on_stack(&barr.work);
+               return true;
+       } else
+               return false;
 }
 EXPORT_SYMBOL_GPL(flush_work);
 
+static bool wait_on_cpu_work(struct global_cwq *gcwq, struct work_struct *work)
+{
+       struct wq_barrier barr;
+       struct worker *worker;
+
+       spin_lock_irq(&gcwq->lock);
+
+       worker = find_worker_executing_work(gcwq, work);
+       if (unlikely(worker))
+               insert_wq_barrier(worker->current_cwq, &barr, work, worker);
+
+       spin_unlock_irq(&gcwq->lock);
+
+       if (unlikely(worker)) {
+               wait_for_completion(&barr.done);
+               destroy_work_on_stack(&barr.work);
+               return true;
+       } else
+               return false;
+}
+
+static bool wait_on_work(struct work_struct *work)
+{
+       bool ret = false;
+       int cpu;
+
+       might_sleep();
+
+       lock_map_acquire(&work->lockdep_map);
+       lock_map_release(&work->lockdep_map);
+
+       for_each_gcwq_cpu(cpu)
+               ret |= wait_on_cpu_work(get_gcwq(cpu), work);
+       return ret;
+}
+
+/**
+ * flush_work_sync - wait until a work has finished execution
+ * @work: the work to flush
+ *
+ * Wait until @work has finished execution.  On return, it's
+ * guaranteed that all queueing instances of @work which happened
+ * before this function is called are finished.  In other words, if
+ * @work hasn't been requeued since this function was called, @work is
+ * guaranteed to be idle on return.
+ *
+ * RETURNS:
+ * %true if flush_work_sync() waited for the work to finish execution,
+ * %false if it was already idle.
+ */
+bool flush_work_sync(struct work_struct *work)
+{
+       struct wq_barrier barr;
+       bool pending, waited;
+
+       /* we'll wait for executions separately, queue barr only if pending */
+       pending = start_flush_work(work, &barr, false);
+
+       /* wait for executions to finish */
+       waited = wait_on_work(work);
+
+       /* wait for the pending one */
+       if (pending) {
+               wait_for_completion(&barr.done);
+               destroy_work_on_stack(&barr.work);
+       }
+
+       return pending || waited;
+}
+EXPORT_SYMBOL_GPL(flush_work_sync);
+
 /*
  * Upon a successful return (>= 0), the caller "owns" WORK_STRUCT_PENDING bit,
  * so this work can't be re-armed in any way.
@@ -2299,7 +2618,8 @@ static int try_to_grab_pending(struct work_struct *work)
                        debug_work_deactivate(work);
                        list_del_init(&work->entry);
                        cwq_dec_nr_in_flight(get_work_cwq(work),
-                                            get_work_color(work));
+                               get_work_color(work),
+                               *work_data_bits(work) & WORK_STRUCT_DELAYED);
                        ret = 1;
                }
        }
@@ -2308,39 +2628,7 @@ static int try_to_grab_pending(struct work_struct *work)
        return ret;
 }
 
-static void wait_on_cpu_work(struct global_cwq *gcwq, struct work_struct *work)
-{
-       struct wq_barrier barr;
-       struct worker *worker;
-
-       spin_lock_irq(&gcwq->lock);
-
-       worker = find_worker_executing_work(gcwq, work);
-       if (unlikely(worker))
-               insert_wq_barrier(worker->current_cwq, &barr, work, worker);
-
-       spin_unlock_irq(&gcwq->lock);
-
-       if (unlikely(worker)) {
-               wait_for_completion(&barr.done);
-               destroy_work_on_stack(&barr.work);
-       }
-}
-
-static void wait_on_work(struct work_struct *work)
-{
-       int cpu;
-
-       might_sleep();
-
-       lock_map_acquire(&work->lockdep_map);
-       lock_map_release(&work->lockdep_map);
-
-       for_each_possible_cpu(cpu)
-               wait_on_cpu_work(get_gcwq(cpu), work);
-}
-
-static int __cancel_work_timer(struct work_struct *work,
+static bool __cancel_work_timer(struct work_struct *work,
                                struct timer_list* timer)
 {
        int ret;
@@ -2357,42 +2645,81 @@ static int __cancel_work_timer(struct work_struct *work,
 }
 
 /**
- * cancel_work_sync - block until a work_struct's callback has terminated
- * @work: the work which is to be flushed
- *
- * Returns true if @work was pending.
+ * cancel_work_sync - cancel a work and wait for it to finish
+ * @work: the work to cancel
  *
- * cancel_work_sync() will cancel the work if it is queued. If the work's
- * callback appears to be running, cancel_work_sync() will block until it
- * has completed.
+ * Cancel @work and wait for its execution to finish.  This function
+ * can be used even if the work re-queues itself or migrates to
+ * another workqueue.  On return from this function, @work is
+ * guaranteed to be not pending or executing on any CPU.
  *
- * It is possible to use this function if the work re-queues itself. It can
- * cancel the work even if it migrates to another workqueue, however in that
- * case it only guarantees that work->func() has completed on the last queued
- * workqueue.
+ * cancel_work_sync(&delayed_work->work) must not be used for
+ * delayed_work's.  Use cancel_delayed_work_sync() instead.
  *
- * cancel_work_sync(&delayed_work->work) should be used only if ->timer is not
- * pending, otherwise it goes into a busy-wait loop until the timer expires.
- *
- * The caller must ensure that workqueue_struct on which this work was last
+ * The caller must ensure that the workqueue on which @work was last
  * queued can't be destroyed before this function returns.
+ *
+ * RETURNS:
+ * %true if @work was pending, %false otherwise.
  */
-int cancel_work_sync(struct work_struct *work)
+bool cancel_work_sync(struct work_struct *work)
 {
        return __cancel_work_timer(work, NULL);
 }
 EXPORT_SYMBOL_GPL(cancel_work_sync);
 
 /**
- * cancel_delayed_work_sync - reliably kill off a delayed work.
- * @dwork: the delayed work struct
+ * flush_delayed_work - wait for a dwork to finish executing the last queueing
+ * @dwork: the delayed work to flush
+ *
+ * Delayed timer is cancelled and the pending work is queued for
+ * immediate execution.  Like flush_work(), this function only
+ * considers the last queueing instance of @dwork.
+ *
+ * RETURNS:
+ * %true if flush_work() waited for the work to finish execution,
+ * %false if it was already idle.
+ */
+bool flush_delayed_work(struct delayed_work *dwork)
+{
+       if (del_timer_sync(&dwork->timer))
+               __queue_work(raw_smp_processor_id(),
+                            get_work_cwq(&dwork->work)->wq, &dwork->work);
+       return flush_work(&dwork->work);
+}
+EXPORT_SYMBOL(flush_delayed_work);
+
+/**
+ * flush_delayed_work_sync - wait for a dwork to finish
+ * @dwork: the delayed work to flush
  *
- * Returns true if @dwork was pending.
+ * Delayed timer is cancelled and the pending work is queued for
+ * execution immediately.  Other than timer handling, its behavior
+ * is identical to flush_work_sync().
  *
- * It is possible to use this function if @dwork rearms itself via queue_work()
- * or queue_delayed_work(). See also the comment for cancel_work_sync().
+ * RETURNS:
+ * %true if flush_work_sync() waited for the work to finish execution,
+ * %false if it was already idle.
+ */
+bool flush_delayed_work_sync(struct delayed_work *dwork)
+{
+       if (del_timer_sync(&dwork->timer))
+               __queue_work(raw_smp_processor_id(),
+                            get_work_cwq(&dwork->work)->wq, &dwork->work);
+       return flush_work_sync(&dwork->work);
+}
+EXPORT_SYMBOL(flush_delayed_work_sync);
+
+/**
+ * cancel_delayed_work_sync - cancel a delayed work and wait for it to finish
+ * @dwork: the delayed work cancel
+ *
+ * This is cancel_work_sync() for delayed works.
+ *
+ * RETURNS:
+ * %true if @dwork was pending, %false otherwise.
  */
-int cancel_delayed_work_sync(struct delayed_work *dwork)
+bool cancel_delayed_work_sync(struct delayed_work *dwork)
 {
        return __cancel_work_timer(&dwork->work, &dwork->timer);
 }
@@ -2444,23 +2771,6 @@ int schedule_delayed_work(struct delayed_work *dwork,
 EXPORT_SYMBOL(schedule_delayed_work);
 
 /**
- * flush_delayed_work - block until a dwork_struct's callback has terminated
- * @dwork: the delayed work which is to be flushed
- *
- * Any timeout is cancelled, and any pending work is run immediately.
- */
-void flush_delayed_work(struct delayed_work *dwork)
-{
-       if (del_timer_sync(&dwork->timer)) {
-               __queue_work(get_cpu(), get_work_cwq(&dwork->work)->wq,
-                            &dwork->work);
-               put_cpu();
-       }
-       flush_work(&dwork->work);
-}
-EXPORT_SYMBOL(flush_delayed_work);
-
-/**
  * schedule_delayed_work_on - queue work in global workqueue on CPU after delay
  * @cpu: cpu to use
  * @dwork: job to be done
@@ -2477,18 +2787,20 @@ int schedule_delayed_work_on(int cpu,
 EXPORT_SYMBOL(schedule_delayed_work_on);
 
 /**
- * schedule_on_each_cpu - call a function on each online CPU from keventd
+ * schedule_on_each_cpu - execute a function synchronously on each online CPU
  * @func: the function to call
  *
- * Returns zero on success.
- * Returns -ve errno on failure.
- *
+ * schedule_on_each_cpu() executes @func on each online CPU using the
+ * system workqueue and blocks until all CPUs have completed.
  * schedule_on_each_cpu() is very slow.
+ *
+ * RETURNS:
+ * 0 on success, -errno on failure.
  */
 int schedule_on_each_cpu(work_func_t func)
 {
        int cpu;
-       struct work_struct *works;
+       struct work_struct __percpu *works;
 
        works = alloc_percpu(struct work_struct);
        if (!works)
@@ -2572,7 +2884,7 @@ int keventd_up(void)
        return system_wq != NULL;
 }
 
-static struct cpu_workqueue_struct *alloc_cwqs(void)
+static int alloc_cwqs(struct workqueue_struct *wq)
 {
        /*
         * cwqs are forced aligned according to WORK_STRUCT_FLAG_BITS.
@@ -2582,86 +2894,108 @@ static struct cpu_workqueue_struct *alloc_cwqs(void)
        const size_t size = sizeof(struct cpu_workqueue_struct);
        const size_t align = max_t(size_t, 1 << WORK_STRUCT_FLAG_BITS,
                                   __alignof__(unsigned long long));
-       struct cpu_workqueue_struct *cwqs;
-#ifndef CONFIG_SMP
-       void *ptr;
 
-       /*
-        * On UP, percpu allocator doesn't honor alignment parameter
-        * and simply uses arch-dependent default.  Allocate enough
-        * room to align cwq and put an extra pointer at the end
-        * pointing back to the originally allocated pointer which
-        * will be used for free.
-        *
-        * FIXME: This really belongs to UP percpu code.  Update UP
-        * percpu code to honor alignment and remove this ugliness.
-        */
-       ptr = __alloc_percpu(size + align + sizeof(void *), 1);
-       cwqs = PTR_ALIGN(ptr, align);
-       *(void **)per_cpu_ptr(cwqs + 1, 0) = ptr;
-#else
-       /* On SMP, percpu allocator can do it itself */
-       cwqs = __alloc_percpu(size, align);
-#endif
+       if (!(wq->flags & WQ_UNBOUND))
+               wq->cpu_wq.pcpu = __alloc_percpu(size, align);
+       else {
+               void *ptr;
+
+               /*
+                * Allocate enough room to align cwq and put an extra
+                * pointer at the end pointing back to the originally
+                * allocated pointer which will be used for free.
+                */
+               ptr = kzalloc(size + align + sizeof(void *), GFP_KERNEL);
+               if (ptr) {
+                       wq->cpu_wq.single = PTR_ALIGN(ptr, align);
+                       *(void **)(wq->cpu_wq.single + 1) = ptr;
+               }
+       }
+
        /* just in case, make sure it's actually aligned */
-       BUG_ON(!IS_ALIGNED((unsigned long)cwqs, align));
-       return cwqs;
+       BUG_ON(!IS_ALIGNED(wq->cpu_wq.v, align));
+       return wq->cpu_wq.v ? 0 : -ENOMEM;
 }
 
-static void free_cwqs(struct cpu_workqueue_struct *cwqs)
+static void free_cwqs(struct workqueue_struct *wq)
 {
-#ifndef CONFIG_SMP
-       /* on UP, the pointer to free is stored right after the cwq */
-       if (cwqs)
-               free_percpu(*(void **)per_cpu_ptr(cwqs + 1, 0));
-#else
-       free_percpu(cwqs);
-#endif
+       if (!(wq->flags & WQ_UNBOUND))
+               free_percpu(wq->cpu_wq.pcpu);
+       else if (wq->cpu_wq.single) {
+               /* the pointer to free is stored right after the cwq */
+               kfree(*(void **)(wq->cpu_wq.single + 1));
+       }
 }
 
-static int wq_clamp_max_active(int max_active, const char *name)
+static int wq_clamp_max_active(int max_active, unsigned int flags,
+                              const char *name)
 {
-       if (max_active < 1 || max_active > WQ_MAX_ACTIVE)
+       int lim = flags & WQ_UNBOUND ? WQ_UNBOUND_MAX_ACTIVE : WQ_MAX_ACTIVE;
+
+       if (max_active < 1 || max_active > lim)
                printk(KERN_WARNING "workqueue: max_active %d requested for %s "
                       "is out of range, clamping between %d and %d\n",
-                      max_active, name, 1, WQ_MAX_ACTIVE);
+                      max_active, name, 1, lim);
 
-       return clamp_val(max_active, 1, WQ_MAX_ACTIVE);
+       return clamp_val(max_active, 1, lim);
 }
 
-struct workqueue_struct *__alloc_workqueue_key(const char *name,
+struct workqueue_struct *__alloc_workqueue_key(const char *fmt,
                                               unsigned int flags,
                                               int max_active,
                                               struct lock_class_key *key,
-                                              const char *lock_name)
+                                              const char *lock_name, ...)
 {
+       va_list args, args1;
        struct workqueue_struct *wq;
        unsigned int cpu;
+       size_t namelen;
 
-       max_active = max_active ?: WQ_DFL_ACTIVE;
-       max_active = wq_clamp_max_active(max_active, name);
+       /* determine namelen, allocate wq and format name */
+       va_start(args, lock_name);
+       va_copy(args1, args);
+       namelen = vsnprintf(NULL, 0, fmt, args) + 1;
 
-       wq = kzalloc(sizeof(*wq), GFP_KERNEL);
+       wq = kzalloc(sizeof(*wq) + namelen, GFP_KERNEL);
        if (!wq)
                goto err;
 
-       wq->cpu_wq = alloc_cwqs();
-       if (!wq->cpu_wq)
-               goto err;
+       vsnprintf(wq->name, namelen, fmt, args1);
+       va_end(args);
+       va_end(args1);
+
+       /*
+        * Workqueues which may be used during memory reclaim should
+        * have a rescuer to guarantee forward progress.
+        */
+       if (flags & WQ_MEM_RECLAIM)
+               flags |= WQ_RESCUER;
+
+       /*
+        * Unbound workqueues aren't concurrency managed and should be
+        * dispatched to workers immediately.
+        */
+       if (flags & WQ_UNBOUND)
+               flags |= WQ_HIGHPRI;
+
+       max_active = max_active ?: WQ_DFL_ACTIVE;
+       max_active = wq_clamp_max_active(max_active, flags, wq->name);
 
+       /* init wq */
        wq->flags = flags;
        wq->saved_max_active = max_active;
        mutex_init(&wq->flush_mutex);
        atomic_set(&wq->nr_cwqs_to_flush, 0);
        INIT_LIST_HEAD(&wq->flusher_queue);
        INIT_LIST_HEAD(&wq->flusher_overflow);
-       wq->single_cpu = NR_CPUS;
 
-       wq->name = name;
        lockdep_init_map(&wq->lockdep_map, lock_name, key, 0);
        INIT_LIST_HEAD(&wq->list);
 
-       for_each_possible_cpu(cpu) {
+       if (alloc_cwqs(wq) < 0)
+               goto err;
+
+       for_each_cwq_cpu(cpu, wq) {
                struct cpu_workqueue_struct *cwq = get_cwq(cpu, wq);
                struct global_cwq *gcwq = get_gcwq(cpu);
 
@@ -2676,18 +3010,18 @@ struct workqueue_struct *__alloc_workqueue_key(const char *name,
        if (flags & WQ_RESCUER) {
                struct worker *rescuer;
 
-               if (!alloc_cpumask_var(&wq->mayday_mask, GFP_KERNEL))
+               if (!alloc_mayday_mask(&wq->mayday_mask, GFP_KERNEL))
                        goto err;
 
                wq->rescuer = rescuer = alloc_worker();
                if (!rescuer)
                        goto err;
 
-               rescuer->task = kthread_create(rescuer_thread, wq, "%s", name);
+               rescuer->task = kthread_create(rescuer_thread, wq, "%s",
+                                              wq->name);
                if (IS_ERR(rescuer->task))
                        goto err;
 
-               wq->rescuer = rescuer;
                rescuer->task->flags |= PF_THREAD_BOUND;
                wake_up_process(rescuer->task);
        }
@@ -2699,8 +3033,8 @@ struct workqueue_struct *__alloc_workqueue_key(const char *name,
         */
        spin_lock(&workqueue_lock);
 
-       if (workqueue_freezing && wq->flags & WQ_FREEZEABLE)
-               for_each_possible_cpu(cpu)
+       if (workqueue_freezing && wq->flags & WQ_FREEZABLE)
+               for_each_cwq_cpu(cpu, wq)
                        get_cwq(cpu, wq)->max_active = 0;
 
        list_add(&wq->list, &workqueues);
@@ -2710,8 +3044,8 @@ struct workqueue_struct *__alloc_workqueue_key(const char *name,
        return wq;
 err:
        if (wq) {
-               free_cwqs(wq->cpu_wq);
-               free_cpumask_var(wq->mayday_mask);
+               free_cwqs(wq);
+               free_mayday_mask(wq->mayday_mask);
                kfree(wq->rescuer);
                kfree(wq);
        }
@@ -2729,7 +3063,8 @@ void destroy_workqueue(struct workqueue_struct *wq)
 {
        unsigned int cpu;
 
-       flush_workqueue(wq);
+       /* drain it before proceeding with destruction */
+       drain_workqueue(wq);
 
        /*
         * wq list is used to freeze wq, remove from list after
@@ -2740,7 +3075,7 @@ void destroy_workqueue(struct workqueue_struct *wq)
        spin_unlock(&workqueue_lock);
 
        /* sanity check */
-       for_each_possible_cpu(cpu) {
+       for_each_cwq_cpu(cpu, wq) {
                struct cpu_workqueue_struct *cwq = get_cwq(cpu, wq);
                int i;
 
@@ -2752,10 +3087,11 @@ void destroy_workqueue(struct workqueue_struct *wq)
 
        if (wq->flags & WQ_RESCUER) {
                kthread_stop(wq->rescuer->task);
-               free_cpumask_var(wq->mayday_mask);
+               free_mayday_mask(wq->mayday_mask);
+               kfree(wq->rescuer);
        }
 
-       free_cwqs(wq->cpu_wq);
+       free_cwqs(wq);
        kfree(wq);
 }
 EXPORT_SYMBOL_GPL(destroy_workqueue);
@@ -2774,18 +3110,18 @@ void workqueue_set_max_active(struct workqueue_struct *wq, int max_active)
 {
        unsigned int cpu;
 
-       max_active = wq_clamp_max_active(max_active, wq->name);
+       max_active = wq_clamp_max_active(max_active, wq->flags, wq->name);
 
        spin_lock(&workqueue_lock);
 
        wq->saved_max_active = max_active;
 
-       for_each_possible_cpu(cpu) {
+       for_each_cwq_cpu(cpu, wq) {
                struct global_cwq *gcwq = get_gcwq(cpu);
 
                spin_lock_irq(&gcwq->lock);
 
-               if (!(wq->flags & WQ_FREEZEABLE) ||
+               if (!(wq->flags & WQ_FREEZABLE) ||
                    !(gcwq->flags & GCWQ_FREEZING))
                        get_cwq(gcwq->cpu, wq)->max_active = max_active;
 
@@ -2821,13 +3157,13 @@ EXPORT_SYMBOL_GPL(workqueue_congested);
  * @work: the work of interest
  *
  * RETURNS:
- * CPU number if @work was ever queued.  NR_CPUS otherwise.
+ * CPU number if @work was ever queued.  WORK_CPU_NONE otherwise.
  */
 unsigned int work_cpu(struct work_struct *work)
 {
        struct global_cwq *gcwq = get_work_gcwq(work);
 
-       return gcwq ? gcwq->cpu : NR_CPUS;
+       return gcwq ? gcwq->cpu : WORK_CPU_NONE;
 }
 EXPORT_SYMBOL_GPL(work_cpu);
 
@@ -3035,7 +3371,7 @@ static int __cpuinit trustee_thread(void *__gcwq)
         * want to get it over with ASAP - spam rescuers, wake up as
         * many idlers as necessary and create new ones till the
         * worklist is empty.  Note that if the gcwq is frozen, there
-        * may be frozen works in freezeable cwqs.  Don't declare
+        * may be frozen works in freezable cwqs.  Don't declare
         * completion while frozen.
         */
        while (gcwq->nr_workers != gcwq->nr_idle ||
@@ -3135,6 +3471,8 @@ static int __cpuinit trustee_thread(void *__gcwq)
  * multiple times.  To be used by cpu_callback.
  */
 static void __cpuinit wait_trustee_state(struct global_cwq *gcwq, int state)
+__releases(&gcwq->lock)
+__acquires(&gcwq->lock)
 {
        if (!(gcwq->trustee_state == state ||
              gcwq->trustee_state == TRUSTEE_DONE)) {
@@ -3291,16 +3629,15 @@ EXPORT_SYMBOL_GPL(work_on_cpu);
 /**
  * freeze_workqueues_begin - begin freezing workqueues
  *
- * Start freezing workqueues.  After this function returns, all
- * freezeable workqueues will queue new works to their frozen_works
- * list instead of gcwq->worklist.
+ * Start freezing workqueues.  After this function returns, all freezable
+ * workqueues will queue new works to their frozen_works list instead of
+ * gcwq->worklist.
  *
  * CONTEXT:
  * Grabs and releases workqueue_lock and gcwq->lock's.
  */
 void freeze_workqueues_begin(void)
 {
-       struct workqueue_struct *wq;
        unsigned int cpu;
 
        spin_lock(&workqueue_lock);
@@ -3308,8 +3645,9 @@ void freeze_workqueues_begin(void)
        BUG_ON(workqueue_freezing);
        workqueue_freezing = true;
 
-       for_each_possible_cpu(cpu) {
+       for_each_gcwq_cpu(cpu) {
                struct global_cwq *gcwq = get_gcwq(cpu);
+               struct workqueue_struct *wq;
 
                spin_lock_irq(&gcwq->lock);
 
@@ -3319,7 +3657,7 @@ void freeze_workqueues_begin(void)
                list_for_each_entry(wq, &workqueues, list) {
                        struct cpu_workqueue_struct *cwq = get_cwq(cpu, wq);
 
-                       if (wq->flags & WQ_FREEZEABLE)
+                       if (cwq && wq->flags & WQ_FREEZABLE)
                                cwq->max_active = 0;
                }
 
@@ -3330,7 +3668,7 @@ void freeze_workqueues_begin(void)
 }
 
 /**
- * freeze_workqueues_busy - are freezeable workqueues still busy?
+ * freeze_workqueues_busy - are freezable workqueues still busy?
  *
  * Check whether freezing is complete.  This function must be called
  * between freeze_workqueues_begin() and thaw_workqueues().
@@ -3339,12 +3677,11 @@ void freeze_workqueues_begin(void)
  * Grabs and releases workqueue_lock.
  *
  * RETURNS:
- * %true if some freezeable workqueues are still busy.  %false if
- * freezing is complete.
+ * %true if some freezable workqueues are still busy.  %false if freezing
+ * is complete.
  */
 bool freeze_workqueues_busy(void)
 {
-       struct workqueue_struct *wq;
        unsigned int cpu;
        bool busy = false;
 
@@ -3352,7 +3689,8 @@ bool freeze_workqueues_busy(void)
 
        BUG_ON(!workqueue_freezing);
 
-       for_each_possible_cpu(cpu) {
+       for_each_gcwq_cpu(cpu) {
+               struct workqueue_struct *wq;
                /*
                 * nr_active is monotonically decreasing.  It's safe
                 * to peek without lock.
@@ -3360,7 +3698,7 @@ bool freeze_workqueues_busy(void)
                list_for_each_entry(wq, &workqueues, list) {
                        struct cpu_workqueue_struct *cwq = get_cwq(cpu, wq);
 
-                       if (!(wq->flags & WQ_FREEZEABLE))
+                       if (!cwq || !(wq->flags & WQ_FREEZABLE))
                                continue;
 
                        BUG_ON(cwq->nr_active < 0);
@@ -3386,7 +3724,6 @@ out_unlock:
  */
 void thaw_workqueues(void)
 {
-       struct workqueue_struct *wq;
        unsigned int cpu;
 
        spin_lock(&workqueue_lock);
@@ -3394,8 +3731,9 @@ void thaw_workqueues(void)
        if (!workqueue_freezing)
                goto out_unlock;
 
-       for_each_possible_cpu(cpu) {
+       for_each_gcwq_cpu(cpu) {
                struct global_cwq *gcwq = get_gcwq(cpu);
+               struct workqueue_struct *wq;
 
                spin_lock_irq(&gcwq->lock);
 
@@ -3405,7 +3743,7 @@ void thaw_workqueues(void)
                list_for_each_entry(wq, &workqueues, list) {
                        struct cpu_workqueue_struct *cwq = get_cwq(cpu, wq);
 
-                       if (!(wq->flags & WQ_FREEZEABLE))
+                       if (!cwq || !(wq->flags & WQ_FREEZABLE))
                                continue;
 
                        /* restore max_active and repopulate worklist */
@@ -3414,11 +3752,6 @@ void thaw_workqueues(void)
                        while (!list_empty(&cwq->delayed_works) &&
                               cwq->nr_active < cwq->max_active)
                                cwq_activate_first_delayed(cwq);
-
-                       /* perform delayed unbind from single cpu if empty */
-                       if (wq->single_cpu == gcwq->cpu &&
-                           !cwq->nr_active && list_empty(&cwq->delayed_works))
-                               cwq_unbind_single_cpu(cwq);
                }
 
                wake_up_worker(gcwq);
@@ -3432,28 +3765,21 @@ out_unlock:
 }
 #endif /* CONFIG_FREEZER */
 
-void __init init_workqueues(void)
+static int __init init_workqueues(void)
 {
        unsigned int cpu;
        int i;
 
-       /*
-        * The pointer part of work->data is either pointing to the
-        * cwq or contains the cpu number the work ran last on.  Make
-        * sure cpu number won't overflow into kernel pointer area so
-        * that they can be distinguished.
-        */
-       BUILD_BUG_ON(NR_CPUS << WORK_STRUCT_FLAG_BITS >= PAGE_OFFSET);
-
-       hotcpu_notifier(workqueue_cpu_callback, CPU_PRI_WORKQUEUE);
+       cpu_notifier(workqueue_cpu_callback, CPU_PRI_WORKQUEUE);
 
        /* initialize gcwqs */
-       for_each_possible_cpu(cpu) {
+       for_each_gcwq_cpu(cpu) {
                struct global_cwq *gcwq = get_gcwq(cpu);
 
                spin_lock_init(&gcwq->lock);
                INIT_LIST_HEAD(&gcwq->worklist);
                gcwq->cpu = cpu;
+               gcwq->flags |= GCWQ_DISASSOCIATED;
 
                INIT_LIST_HEAD(&gcwq->idle_list);
                for (i = 0; i < BUSY_WORKER_HASH_SIZE; i++)
@@ -3473,10 +3799,12 @@ void __init init_workqueues(void)
        }
 
        /* create the initial worker */
-       for_each_online_cpu(cpu) {
+       for_each_online_gcwq_cpu(cpu) {
                struct global_cwq *gcwq = get_gcwq(cpu);
                struct worker *worker;
 
+               if (cpu != WORK_CPU_UNBOUND)
+                       gcwq->flags &= ~GCWQ_DISASSOCIATED;
                worker = create_worker(gcwq, true);
                BUG_ON(!worker);
                spin_lock_irq(&gcwq->lock);
@@ -3487,5 +3815,15 @@ void __init init_workqueues(void)
        system_wq = alloc_workqueue("events", 0, 0);
        system_long_wq = alloc_workqueue("events_long", 0, 0);
        system_nrt_wq = alloc_workqueue("events_nrt", WQ_NON_REENTRANT, 0);
-       BUG_ON(!system_wq || !system_long_wq || !system_nrt_wq);
+       system_unbound_wq = alloc_workqueue("events_unbound", WQ_UNBOUND,
+                                           WQ_UNBOUND_MAX_ACTIVE);
+       system_freezable_wq = alloc_workqueue("events_freezable",
+                                             WQ_FREEZABLE, 0);
+       system_nrt_freezable_wq = alloc_workqueue("events_nrt_freezable",
+                       WQ_NON_REENTRANT | WQ_FREEZABLE, 0);
+       BUG_ON(!system_wq || !system_long_wq || !system_nrt_wq ||
+              !system_unbound_wq || !system_freezable_wq ||
+               !system_nrt_freezable_wq);
+       return 0;
 }
+early_initcall(init_workqueues);