Merge branch 'fixes' of git://git.linaro.org/people/rmk/linux-arm
[linux-flexiantxendom0-3.2.10.git] / kernel / workqueue.c
index 026f778..5abf42f 100644 (file)
@@ -23,7 +23,7 @@
  * Please read Documentation/workqueue.txt for details.
  */
 
-#include <linux/module.h>
+#include <linux/export.h>
 #include <linux/kernel.h>
 #include <linux/sched.h>
 #include <linux/init.h>
@@ -79,7 +79,9 @@ enum {
        MAX_IDLE_WORKERS_RATIO  = 4,            /* 1/4 of busy can be idle */
        IDLE_WORKER_TIMEOUT     = 300 * HZ,     /* keep idle ones for 5 mins */
 
-       MAYDAY_INITIAL_TIMEOUT  = HZ / 100,     /* call for help after 10ms */
+       MAYDAY_INITIAL_TIMEOUT  = HZ / 100 >= 2 ? HZ / 100 : 2,
+                                               /* call for help after 10ms
+                                                  (min two ticks) */
        MAYDAY_INTERVAL         = HZ / 10,      /* and then every 100ms */
        CREATE_COOLDOWN         = HZ,           /* time to breath after fail */
        TRUSTEE_COOLDOWN        = HZ / 10,      /* for trustee draining */
@@ -219,7 +221,7 @@ typedef unsigned long mayday_mask_t;
  * per-CPU workqueues:
  */
 struct workqueue_struct {
-       unsigned int            flags;          /* I: WQ_* flags */
+       unsigned int            flags;          /* W: WQ_* flags */
        union {
                struct cpu_workqueue_struct __percpu    *pcpu;
                struct cpu_workqueue_struct             *single;
@@ -238,21 +240,26 @@ struct workqueue_struct {
        mayday_mask_t           mayday_mask;    /* cpus requesting rescue */
        struct worker           *rescuer;       /* I: rescue worker */
 
+       int                     nr_drainers;    /* W: drain in progress */
        int                     saved_max_active; /* W: saved cwq max_active */
-       const char              *name;          /* I: workqueue name */
 #ifdef CONFIG_LOCKDEP
        struct lockdep_map      lockdep_map;
 #endif
+       char                    name[];         /* I: workqueue name */
 };
 
 struct workqueue_struct *system_wq __read_mostly;
 struct workqueue_struct *system_long_wq __read_mostly;
 struct workqueue_struct *system_nrt_wq __read_mostly;
 struct workqueue_struct *system_unbound_wq __read_mostly;
+struct workqueue_struct *system_freezable_wq __read_mostly;
+struct workqueue_struct *system_nrt_freezable_wq __read_mostly;
 EXPORT_SYMBOL_GPL(system_wq);
 EXPORT_SYMBOL_GPL(system_long_wq);
 EXPORT_SYMBOL_GPL(system_nrt_wq);
 EXPORT_SYMBOL_GPL(system_unbound_wq);
+EXPORT_SYMBOL_GPL(system_freezable_wq);
+EXPORT_SYMBOL_GPL(system_nrt_freezable_wq);
 
 #define CREATE_TRACE_POINTS
 #include <trace/events/workqueue.h>
@@ -310,25 +317,15 @@ static inline int __next_wq_cpu(int cpu, const struct cpumask *mask,
             (cpu) < WORK_CPU_NONE;                                     \
             (cpu) = __next_wq_cpu((cpu), cpu_possible_mask, (wq)))
 
-#ifdef CONFIG_LOCKDEP
-/**
- * in_workqueue_context() - in context of specified workqueue?
- * @wq: the workqueue of interest
- *
- * Checks lockdep state to see if the current task is executing from
- * within a workqueue item.  This function exists only if lockdep is
- * enabled.
- */
-int in_workqueue_context(struct workqueue_struct *wq)
-{
-       return lock_is_held(&wq->lockdep_map);
-}
-#endif
-
 #ifdef CONFIG_DEBUG_OBJECTS_WORK
 
 static struct debug_obj_descr work_debug_descr;
 
+static void *work_debug_hint(void *addr)
+{
+       return ((struct work_struct *) addr)->func;
+}
+
 /*
  * fixup_init is called when:
  * - an active object is initialized
@@ -400,6 +397,7 @@ static int work_fixup_free(void *addr, enum debug_obj_state state)
 
 static struct debug_obj_descr work_debug_descr = {
        .name           = "work_struct",
+       .debug_hint     = work_debug_hint,
        .fixup_init     = work_fixup_init,
        .fixup_activate = work_fixup_activate,
        .fixup_free     = work_fixup_free,
@@ -478,13 +476,8 @@ static struct cpu_workqueue_struct *get_cwq(unsigned int cpu,
                                            struct workqueue_struct *wq)
 {
        if (!(wq->flags & WQ_UNBOUND)) {
-               if (likely(cpu < nr_cpu_ids)) {
-#ifdef CONFIG_SMP
+               if (likely(cpu < nr_cpu_ids))
                        return per_cpu_ptr(wq->cpu_wq.pcpu, cpu);
-#else
-                       return wq->cpu_wq.single;
-#endif
-               }
        } else if (likely(cpu == WORK_CPU_UNBOUND))
                return wq->cpu_wq.single;
        return NULL;
@@ -604,7 +597,9 @@ static bool keep_working(struct global_cwq *gcwq)
 {
        atomic_t *nr_running = get_gcwq_nr_running(gcwq->cpu);
 
-       return !list_empty(&gcwq->worklist) && atomic_read(nr_running) <= 1;
+       return !list_empty(&gcwq->worklist) &&
+               (atomic_read(nr_running) <= 1 ||
+                gcwq->flags & GCWQ_HIGHPRI_PENDING);
 }
 
 /* Do we need a new worker?  Called from manager. */
@@ -674,7 +669,7 @@ void wq_worker_waking_up(struct task_struct *task, unsigned int cpu)
 {
        struct worker *worker = kthread_data(task);
 
-       if (likely(!(worker->flags & WORKER_NOT_RUNNING)))
+       if (!(worker->flags & WORKER_NOT_RUNNING))
                atomic_inc(get_gcwq_nr_running(cpu));
 }
 
@@ -700,7 +695,7 @@ struct task_struct *wq_worker_sleeping(struct task_struct *task,
        struct global_cwq *gcwq = get_gcwq(cpu);
        atomic_t *nr_running = get_gcwq_nr_running(cpu);
 
-       if (unlikely(worker->flags & WORKER_NOT_RUNNING))
+       if (worker->flags & WORKER_NOT_RUNNING)
                return NULL;
 
        /* this can only happen on the local cpu */
@@ -781,7 +776,11 @@ static inline void worker_clr_flags(struct worker *worker, unsigned int flags)
 
        worker->flags &= ~flags;
 
-       /* if transitioning out of NOT_RUNNING, increment nr_running */
+       /*
+        * If transitioning out of NOT_RUNNING, increment nr_running.  Note
+        * that the nested NOT_RUNNING is not a noop.  NOT_RUNNING is mask
+        * of multiple flags, not a single flag.
+        */
        if ((flags & WORKER_NOT_RUNNING) && (oflags & WORKER_NOT_RUNNING))
                if (!(worker->flags & WORKER_NOT_RUNNING))
                        atomic_inc(get_gcwq_nr_running(gcwq->cpu));
@@ -945,6 +944,38 @@ static void insert_work(struct cpu_workqueue_struct *cwq,
                wake_up_worker(gcwq);
 }
 
+/*
+ * Test whether @work is being queued from another work executing on the
+ * same workqueue.  This is rather expensive and should only be used from
+ * cold paths.
+ */
+static bool is_chained_work(struct workqueue_struct *wq)
+{
+       unsigned long flags;
+       unsigned int cpu;
+
+       for_each_gcwq_cpu(cpu) {
+               struct global_cwq *gcwq = get_gcwq(cpu);
+               struct worker *worker;
+               struct hlist_node *pos;
+               int i;
+
+               spin_lock_irqsave(&gcwq->lock, flags);
+               for_each_busy_worker(worker, i, pos, gcwq) {
+                       if (worker->task != current)
+                               continue;
+                       spin_unlock_irqrestore(&gcwq->lock, flags);
+                       /*
+                        * I'm @worker, no locking necessary.  See if @work
+                        * is headed to the same workqueue.
+                        */
+                       return worker->current_cwq->wq == wq;
+               }
+               spin_unlock_irqrestore(&gcwq->lock, flags);
+       }
+       return false;
+}
+
 static void __queue_work(unsigned int cpu, struct workqueue_struct *wq,
                         struct work_struct *work)
 {
@@ -956,7 +987,9 @@ static void __queue_work(unsigned int cpu, struct workqueue_struct *wq,
 
        debug_work_activate(work);
 
-       if (WARN_ON_ONCE(wq->flags & WQ_DYING))
+       /* if dying, only works from the same workqueue are allowed */
+       if (unlikely(wq->flags & WQ_DRAINING) &&
+           WARN_ON_ONCE(!is_chained_work(wq)))
                return;
 
        /* determine gcwq to use */
@@ -997,6 +1030,7 @@ static void __queue_work(unsigned int cpu, struct workqueue_struct *wq,
 
        /* gcwq determined, get cwq and queue */
        cwq = get_cwq(gcwq->cpu, wq);
+       trace_workqueue_queue_work(cpu, cwq, work);
 
        BUG_ON(!list_empty(&work->entry));
 
@@ -1004,6 +1038,7 @@ static void __queue_work(unsigned int cpu, struct workqueue_struct *wq,
        work_flags = work_color_to_flags(cwq->work_color);
 
        if (likely(cwq->nr_active < cwq->max_active)) {
+               trace_workqueue_activate_work(work);
                cwq->nr_active++;
                worklist = gcwq_determine_ins_pos(gcwq, cwq);
        } else {
@@ -1254,8 +1289,14 @@ __acquires(&gcwq->lock)
                        return true;
                spin_unlock_irq(&gcwq->lock);
 
-               /* CPU has come up inbetween, retry migration */
+               /*
+                * We've raced with CPU hot[un]plug.  Give it a breather
+                * and retry migration.  cond_resched() is required here;
+                * otherwise, we might deadlock against cpu_stop trying to
+                * bring down the CPU on non-preemptive kernel.
+                */
                cpu_relax();
+               cond_resched();
        }
 }
 
@@ -1329,8 +1370,10 @@ static struct worker *create_worker(struct global_cwq *gcwq, bool bind)
        worker->id = id;
 
        if (!on_unbound_cpu)
-               worker->task = kthread_create(worker_thread, worker,
-                                             "kworker/%u:%d", gcwq->cpu, id);
+               worker->task = kthread_create_on_node(worker_thread,
+                                                     worker,
+                                                     cpu_to_node(gcwq->cpu),
+                                                     "kworker/%u:%d", gcwq->cpu, id);
        else
                worker->task = kthread_create(worker_thread, worker,
                                              "kworker/u:%d", id);
@@ -1679,6 +1722,7 @@ static void cwq_activate_first_delayed(struct cpu_workqueue_struct *cwq)
                                                    struct work_struct, entry);
        struct list_head *pos = gcwq_determine_ins_pos(cwq->gcwq, cwq);
 
+       trace_workqueue_activate_work(work);
        move_linked_works(work, pos, NULL);
        __clear_bit(WORK_STRUCT_DELAYED_BIT, work_data_bits(work));
        cwq->nr_active++;
@@ -1816,7 +1860,7 @@ __acquires(&gcwq->lock)
        spin_unlock_irq(&gcwq->lock);
 
        work_clear_pending(work);
-       lock_map_acquire(&cwq->wq->lockdep_map);
+       lock_map_acquire_read(&cwq->wq->lockdep_map);
        lock_map_acquire(&lockdep_map);
        trace_workqueue_execute_start(work);
        f(work);
@@ -2019,6 +2063,15 @@ repeat:
                                move_linked_works(work, scheduled, &n);
 
                process_scheduled_works(rescuer);
+
+               /*
+                * Leave this gcwq.  If keep_working() is %true, notify a
+                * regular worker; otherwise, we end up with 0 concurrency
+                * and stalling the execution.
+                */
+               if (keep_working(gcwq))
+                       wake_up_worker(gcwq);
+
                spin_unlock_irq(&gcwq->lock);
        }
 
@@ -2074,7 +2127,7 @@ static void insert_wq_barrier(struct cpu_workqueue_struct *cwq,
         * checks and call back into the fixup functions where we
         * might deadlock.
         */
-       INIT_WORK_ON_STACK(&barr->work, wq_barrier_func);
+       INIT_WORK_ONSTACK(&barr->work, wq_barrier_func);
        __set_bit(WORK_STRUCT_PENDING_BIT, work_data_bits(&barr->work));
        init_completion(&barr->done);
 
@@ -2326,6 +2379,59 @@ out_unlock:
 }
 EXPORT_SYMBOL_GPL(flush_workqueue);
 
+/**
+ * drain_workqueue - drain a workqueue
+ * @wq: workqueue to drain
+ *
+ * Wait until the workqueue becomes empty.  While draining is in progress,
+ * only chain queueing is allowed.  IOW, only currently pending or running
+ * work items on @wq can queue further work items on it.  @wq is flushed
+ * repeatedly until it becomes empty.  The number of flushing is detemined
+ * by the depth of chaining and should be relatively short.  Whine if it
+ * takes too long.
+ */
+void drain_workqueue(struct workqueue_struct *wq)
+{
+       unsigned int flush_cnt = 0;
+       unsigned int cpu;
+
+       /*
+        * __queue_work() needs to test whether there are drainers, is much
+        * hotter than drain_workqueue() and already looks at @wq->flags.
+        * Use WQ_DRAINING so that queue doesn't have to check nr_drainers.
+        */
+       spin_lock(&workqueue_lock);
+       if (!wq->nr_drainers++)
+               wq->flags |= WQ_DRAINING;
+       spin_unlock(&workqueue_lock);
+reflush:
+       flush_workqueue(wq);
+
+       for_each_cwq_cpu(cpu, wq) {
+               struct cpu_workqueue_struct *cwq = get_cwq(cpu, wq);
+               bool drained;
+
+               spin_lock_irq(&cwq->gcwq->lock);
+               drained = !cwq->nr_active && list_empty(&cwq->delayed_works);
+               spin_unlock_irq(&cwq->gcwq->lock);
+
+               if (drained)
+                       continue;
+
+               if (++flush_cnt == 10 ||
+                   (flush_cnt % 100 == 0 && flush_cnt <= 1000))
+                       pr_warning("workqueue %s: flush on destruction isn't complete after %u tries\n",
+                                  wq->name, flush_cnt);
+               goto reflush;
+       }
+
+       spin_lock(&workqueue_lock);
+       if (!--wq->nr_drainers)
+               wq->flags &= ~WQ_DRAINING;
+       spin_unlock(&workqueue_lock);
+}
+EXPORT_SYMBOL_GPL(drain_workqueue);
+
 static bool start_flush_work(struct work_struct *work, struct wq_barrier *barr,
                             bool wait_executing)
 {
@@ -2360,8 +2466,18 @@ static bool start_flush_work(struct work_struct *work, struct wq_barrier *barr,
        insert_wq_barrier(cwq, barr, work, worker);
        spin_unlock_irq(&gcwq->lock);
 
-       lock_map_acquire(&cwq->wq->lockdep_map);
+       /*
+        * If @max_active is 1 or rescuer is in use, flushing another work
+        * item on the same workqueue may lead to deadlock.  Make sure the
+        * flusher is not running on the same workqueue by verifying write
+        * access.
+        */
+       if (cwq->wq->saved_max_active == 1 || cwq->wq->flags & WQ_RESCUER)
+               lock_map_acquire(&cwq->wq->lockdep_map);
+       else
+               lock_map_acquire_read(&cwq->wq->lockdep_map);
        lock_map_release(&cwq->wq->lockdep_map);
+
        return true;
 already_gone:
        spin_unlock_irq(&gcwq->lock);
@@ -2671,13 +2787,15 @@ int schedule_delayed_work_on(int cpu,
 EXPORT_SYMBOL(schedule_delayed_work_on);
 
 /**
- * schedule_on_each_cpu - call a function on each online CPU from keventd
+ * schedule_on_each_cpu - execute a function synchronously on each online CPU
  * @func: the function to call
  *
- * Returns zero on success.
- * Returns -ve errno on failure.
- *
+ * schedule_on_each_cpu() executes @func on each online CPU using the
+ * system workqueue and blocks until all CPUs have completed.
  * schedule_on_each_cpu() is very slow.
+ *
+ * RETURNS:
+ * 0 on success, -errno on failure.
  */
 int schedule_on_each_cpu(work_func_t func)
 {
@@ -2776,13 +2894,8 @@ static int alloc_cwqs(struct workqueue_struct *wq)
        const size_t size = sizeof(struct cpu_workqueue_struct);
        const size_t align = max_t(size_t, 1 << WORK_STRUCT_FLAG_BITS,
                                   __alignof__(unsigned long long));
-#ifdef CONFIG_SMP
-       bool percpu = !(wq->flags & WQ_UNBOUND);
-#else
-       bool percpu = false;
-#endif
 
-       if (percpu)
+       if (!(wq->flags & WQ_UNBOUND))
                wq->cpu_wq.pcpu = __alloc_percpu(size, align);
        else {
                void *ptr;
@@ -2806,13 +2919,7 @@ static int alloc_cwqs(struct workqueue_struct *wq)
 
 static void free_cwqs(struct workqueue_struct *wq)
 {
-#ifdef CONFIG_SMP
-       bool percpu = !(wq->flags & WQ_UNBOUND);
-#else
-       bool percpu = false;
-#endif
-
-       if (percpu)
+       if (!(wq->flags & WQ_UNBOUND))
                free_percpu(wq->cpu_wq.pcpu);
        else if (wq->cpu_wq.single) {
                /* the pointer to free is stored right after the cwq */
@@ -2833,14 +2940,36 @@ static int wq_clamp_max_active(int max_active, unsigned int flags,
        return clamp_val(max_active, 1, lim);
 }
 
-struct workqueue_struct *__alloc_workqueue_key(const char *name,
+struct workqueue_struct *__alloc_workqueue_key(const char *fmt,
                                               unsigned int flags,
                                               int max_active,
                                               struct lock_class_key *key,
-                                              const char *lock_name)
+                                              const char *lock_name, ...)
 {
+       va_list args, args1;
        struct workqueue_struct *wq;
        unsigned int cpu;
+       size_t namelen;
+
+       /* determine namelen, allocate wq and format name */
+       va_start(args, lock_name);
+       va_copy(args1, args);
+       namelen = vsnprintf(NULL, 0, fmt, args) + 1;
+
+       wq = kzalloc(sizeof(*wq) + namelen, GFP_KERNEL);
+       if (!wq)
+               goto err;
+
+       vsnprintf(wq->name, namelen, fmt, args1);
+       va_end(args);
+       va_end(args1);
+
+       /*
+        * Workqueues which may be used during memory reclaim should
+        * have a rescuer to guarantee forward progress.
+        */
+       if (flags & WQ_MEM_RECLAIM)
+               flags |= WQ_RESCUER;
 
        /*
         * Unbound workqueues aren't concurrency managed and should be
@@ -2850,12 +2979,9 @@ struct workqueue_struct *__alloc_workqueue_key(const char *name,
                flags |= WQ_HIGHPRI;
 
        max_active = max_active ?: WQ_DFL_ACTIVE;
-       max_active = wq_clamp_max_active(max_active, flags, name);
-
-       wq = kzalloc(sizeof(*wq), GFP_KERNEL);
-       if (!wq)
-               goto err;
+       max_active = wq_clamp_max_active(max_active, flags, wq->name);
 
+       /* init wq */
        wq->flags = flags;
        wq->saved_max_active = max_active;
        mutex_init(&wq->flush_mutex);
@@ -2863,7 +2989,6 @@ struct workqueue_struct *__alloc_workqueue_key(const char *name,
        INIT_LIST_HEAD(&wq->flusher_queue);
        INIT_LIST_HEAD(&wq->flusher_overflow);
 
-       wq->name = name;
        lockdep_init_map(&wq->lockdep_map, lock_name, key, 0);
        INIT_LIST_HEAD(&wq->list);
 
@@ -2892,7 +3017,8 @@ struct workqueue_struct *__alloc_workqueue_key(const char *name,
                if (!rescuer)
                        goto err;
 
-               rescuer->task = kthread_create(rescuer_thread, wq, "%s", name);
+               rescuer->task = kthread_create(rescuer_thread, wq, "%s",
+                                              wq->name);
                if (IS_ERR(rescuer->task))
                        goto err;
 
@@ -2907,7 +3033,7 @@ struct workqueue_struct *__alloc_workqueue_key(const char *name,
         */
        spin_lock(&workqueue_lock);
 
-       if (workqueue_freezing && wq->flags & WQ_FREEZEABLE)
+       if (workqueue_freezing && wq->flags & WQ_FREEZABLE)
                for_each_cwq_cpu(cpu, wq)
                        get_cwq(cpu, wq)->max_active = 0;
 
@@ -2937,8 +3063,8 @@ void destroy_workqueue(struct workqueue_struct *wq)
 {
        unsigned int cpu;
 
-       wq->flags |= WQ_DYING;
-       flush_workqueue(wq);
+       /* drain it before proceeding with destruction */
+       drain_workqueue(wq);
 
        /*
         * wq list is used to freeze wq, remove from list after
@@ -2995,7 +3121,7 @@ void workqueue_set_max_active(struct workqueue_struct *wq, int max_active)
 
                spin_lock_irq(&gcwq->lock);
 
-               if (!(wq->flags & WQ_FREEZEABLE) ||
+               if (!(wq->flags & WQ_FREEZABLE) ||
                    !(gcwq->flags & GCWQ_FREEZING))
                        get_cwq(gcwq->cpu, wq)->max_active = max_active;
 
@@ -3245,7 +3371,7 @@ static int __cpuinit trustee_thread(void *__gcwq)
         * want to get it over with ASAP - spam rescuers, wake up as
         * many idlers as necessary and create new ones till the
         * worklist is empty.  Note that if the gcwq is frozen, there
-        * may be frozen works in freezeable cwqs.  Don't declare
+        * may be frozen works in freezable cwqs.  Don't declare
         * completion while frozen.
         */
        while (gcwq->nr_workers != gcwq->nr_idle ||
@@ -3503,9 +3629,9 @@ EXPORT_SYMBOL_GPL(work_on_cpu);
 /**
  * freeze_workqueues_begin - begin freezing workqueues
  *
- * Start freezing workqueues.  After this function returns, all
- * freezeable workqueues will queue new works to their frozen_works
- * list instead of gcwq->worklist.
+ * Start freezing workqueues.  After this function returns, all freezable
+ * workqueues will queue new works to their frozen_works list instead of
+ * gcwq->worklist.
  *
  * CONTEXT:
  * Grabs and releases workqueue_lock and gcwq->lock's.
@@ -3531,7 +3657,7 @@ void freeze_workqueues_begin(void)
                list_for_each_entry(wq, &workqueues, list) {
                        struct cpu_workqueue_struct *cwq = get_cwq(cpu, wq);
 
-                       if (cwq && wq->flags & WQ_FREEZEABLE)
+                       if (cwq && wq->flags & WQ_FREEZABLE)
                                cwq->max_active = 0;
                }
 
@@ -3542,7 +3668,7 @@ void freeze_workqueues_begin(void)
 }
 
 /**
- * freeze_workqueues_busy - are freezeable workqueues still busy?
+ * freeze_workqueues_busy - are freezable workqueues still busy?
  *
  * Check whether freezing is complete.  This function must be called
  * between freeze_workqueues_begin() and thaw_workqueues().
@@ -3551,8 +3677,8 @@ void freeze_workqueues_begin(void)
  * Grabs and releases workqueue_lock.
  *
  * RETURNS:
- * %true if some freezeable workqueues are still busy.  %false if
- * freezing is complete.
+ * %true if some freezable workqueues are still busy.  %false if freezing
+ * is complete.
  */
 bool freeze_workqueues_busy(void)
 {
@@ -3572,7 +3698,7 @@ bool freeze_workqueues_busy(void)
                list_for_each_entry(wq, &workqueues, list) {
                        struct cpu_workqueue_struct *cwq = get_cwq(cpu, wq);
 
-                       if (!cwq || !(wq->flags & WQ_FREEZEABLE))
+                       if (!cwq || !(wq->flags & WQ_FREEZABLE))
                                continue;
 
                        BUG_ON(cwq->nr_active < 0);
@@ -3617,7 +3743,7 @@ void thaw_workqueues(void)
                list_for_each_entry(wq, &workqueues, list) {
                        struct cpu_workqueue_struct *cwq = get_cwq(cpu, wq);
 
-                       if (!cwq || !(wq->flags & WQ_FREEZEABLE))
+                       if (!cwq || !(wq->flags & WQ_FREEZABLE))
                                continue;
 
                        /* restore max_active and repopulate worklist */
@@ -3691,7 +3817,13 @@ static int __init init_workqueues(void)
        system_nrt_wq = alloc_workqueue("events_nrt", WQ_NON_REENTRANT, 0);
        system_unbound_wq = alloc_workqueue("events_unbound", WQ_UNBOUND,
                                            WQ_UNBOUND_MAX_ACTIVE);
-       BUG_ON(!system_wq || !system_long_wq || !system_nrt_wq);
+       system_freezable_wq = alloc_workqueue("events_freezable",
+                                             WQ_FREEZABLE, 0);
+       system_nrt_freezable_wq = alloc_workqueue("events_nrt_freezable",
+                       WQ_NON_REENTRANT | WQ_FREEZABLE, 0);
+       BUG_ON(!system_wq || !system_long_wq || !system_nrt_wq ||
+              !system_unbound_wq || !system_freezable_wq ||
+               !system_nrt_freezable_wq);
        return 0;
 }
 early_initcall(init_workqueues);