2 * linux/kernel/workqueue.c
4 * Generic mechanism for defining kernel helper threads for running
5 * arbitrary tasks in process context.
7 * Started by Ingo Molnar, Copyright (C) 2002
9 * Derived from the taskqueue/keventd code by:
11 * David Woodhouse <dwmw2@infradead.org>
13 * Kai Petzke <wpp@marie.physik.tu-berlin.de>
14 * Theodore Ts'o <tytso@mit.edu>
16 * Made to use alloc_percpu by Christoph Lameter.
19 #include <linux/module.h>
20 #include <linux/kernel.h>
21 #include <linux/sched.h>
22 #include <linux/init.h>
23 #include <linux/signal.h>
24 #include <linux/completion.h>
25 #include <linux/workqueue.h>
26 #include <linux/slab.h>
27 #include <linux/cpu.h>
28 #include <linux/notifier.h>
29 #include <linux/kthread.h>
30 #include <linux/hardirq.h>
31 #include <linux/mempolicy.h>
32 #include <linux/freezer.h>
33 #include <linux/kallsyms.h>
34 #include <linux/debug_locks.h>
35 #include <linux/lockdep.h>
36 #include <linux/idr.h>
39 /* global_cwq flags */
40 GCWQ_FREEZING = 1 << 3, /* freeze in progress */
43 WORKER_STARTED = 1 << 0, /* started */
44 WORKER_DIE = 1 << 1, /* die die die */
45 WORKER_IDLE = 1 << 2, /* is idle */
46 WORKER_ROGUE = 1 << 4, /* not bound to any cpu */
48 /* gcwq->trustee_state */
49 TRUSTEE_START = 0, /* start */
50 TRUSTEE_IN_CHARGE = 1, /* trustee in charge of gcwq */
51 TRUSTEE_BUTCHER = 2, /* butcher workers */
52 TRUSTEE_RELEASE = 3, /* release workers */
53 TRUSTEE_DONE = 4, /* trustee is done */
55 BUSY_WORKER_HASH_ORDER = 6, /* 64 pointers */
56 BUSY_WORKER_HASH_SIZE = 1 << BUSY_WORKER_HASH_ORDER,
57 BUSY_WORKER_HASH_MASK = BUSY_WORKER_HASH_SIZE - 1,
59 TRUSTEE_COOLDOWN = HZ / 10, /* for trustee draining */
63 * Structure fields follow one of the following exclusion rules.
65 * I: Set during initialization and read-only afterwards.
67 * L: gcwq->lock protected. Access with gcwq->lock held.
69 * F: wq->flush_mutex protected.
71 * W: workqueue_lock protected.
75 struct cpu_workqueue_struct;
78 /* on idle list while idle, on busy hash table while busy */
80 struct list_head entry; /* L: while idle */
81 struct hlist_node hentry; /* L: while busy */
84 struct work_struct *current_work; /* L: work being processed */
85 struct cpu_workqueue_struct *current_cwq; /* L: current_work's cwq */
86 struct list_head scheduled; /* L: scheduled works */
87 struct task_struct *task; /* I: worker task */
88 struct global_cwq *gcwq; /* I: the associated gcwq */
89 struct cpu_workqueue_struct *cwq; /* I: the associated cwq */
90 unsigned int flags; /* L: flags */
91 int id; /* I: worker id */
95 * Global per-cpu workqueue.
98 spinlock_t lock; /* the gcwq lock */
99 unsigned int cpu; /* I: the associated cpu */
100 unsigned int flags; /* L: GCWQ_* flags */
102 int nr_workers; /* L: total number of workers */
103 int nr_idle; /* L: currently idle ones */
105 /* workers are chained either in the idle_list or busy_hash */
106 struct list_head idle_list; /* L: list of idle workers */
107 struct hlist_head busy_hash[BUSY_WORKER_HASH_SIZE];
108 /* L: hash of busy workers */
110 struct ida worker_ida; /* L: for worker IDs */
112 struct task_struct *trustee; /* L: for gcwq shutdown */
113 unsigned int trustee_state; /* L: trustee state */
114 wait_queue_head_t trustee_wait; /* trustee wait */
115 } ____cacheline_aligned_in_smp;
118 * The per-CPU workqueue. The lower WORK_STRUCT_FLAG_BITS of
119 * work_struct->data are used for flags and thus cwqs need to be
120 * aligned at two's power of the number of flag bits.
122 struct cpu_workqueue_struct {
123 struct global_cwq *gcwq; /* I: the associated gcwq */
124 struct list_head worklist;
125 struct worker *worker;
126 struct workqueue_struct *wq; /* I: the owning workqueue */
127 int work_color; /* L: current color */
128 int flush_color; /* L: flushing color */
129 int nr_in_flight[WORK_NR_COLORS];
130 /* L: nr of in_flight works */
131 int nr_active; /* L: nr of active works */
132 int max_active; /* L: max active works */
133 struct list_head delayed_works; /* L: delayed works */
137 * Structure used to wait for workqueue flush.
140 struct list_head list; /* F: list of flushers */
141 int flush_color; /* F: flush color waiting for */
142 struct completion done; /* flush completion */
146 * The externally visible workqueue abstraction is an array of
147 * per-CPU workqueues:
149 struct workqueue_struct {
150 unsigned int flags; /* I: WQ_* flags */
151 struct cpu_workqueue_struct *cpu_wq; /* I: cwq's */
152 struct list_head list; /* W: list of all workqueues */
154 struct mutex flush_mutex; /* protects wq flushing */
155 int work_color; /* F: current work color */
156 int flush_color; /* F: current flush color */
157 atomic_t nr_cwqs_to_flush; /* flush in progress */
158 struct wq_flusher *first_flusher; /* F: first flusher */
159 struct list_head flusher_queue; /* F: flush waiters */
160 struct list_head flusher_overflow; /* F: flush overflow list */
162 unsigned long single_cpu; /* cpu for single cpu wq */
164 int saved_max_active; /* I: saved cwq max_active */
165 const char *name; /* I: workqueue name */
166 #ifdef CONFIG_LOCKDEP
167 struct lockdep_map lockdep_map;
171 #define for_each_busy_worker(worker, i, pos, gcwq) \
172 for (i = 0; i < BUSY_WORKER_HASH_SIZE; i++) \
173 hlist_for_each_entry(worker, pos, &gcwq->busy_hash[i], hentry)
175 #ifdef CONFIG_DEBUG_OBJECTS_WORK
177 static struct debug_obj_descr work_debug_descr;
180 * fixup_init is called when:
181 * - an active object is initialized
183 static int work_fixup_init(void *addr, enum debug_obj_state state)
185 struct work_struct *work = addr;
188 case ODEBUG_STATE_ACTIVE:
189 cancel_work_sync(work);
190 debug_object_init(work, &work_debug_descr);
198 * fixup_activate is called when:
199 * - an active object is activated
200 * - an unknown object is activated (might be a statically initialized object)
202 static int work_fixup_activate(void *addr, enum debug_obj_state state)
204 struct work_struct *work = addr;
208 case ODEBUG_STATE_NOTAVAILABLE:
210 * This is not really a fixup. The work struct was
211 * statically initialized. We just make sure that it
212 * is tracked in the object tracker.
214 if (test_bit(WORK_STRUCT_STATIC_BIT, work_data_bits(work))) {
215 debug_object_init(work, &work_debug_descr);
216 debug_object_activate(work, &work_debug_descr);
222 case ODEBUG_STATE_ACTIVE:
231 * fixup_free is called when:
232 * - an active object is freed
234 static int work_fixup_free(void *addr, enum debug_obj_state state)
236 struct work_struct *work = addr;
239 case ODEBUG_STATE_ACTIVE:
240 cancel_work_sync(work);
241 debug_object_free(work, &work_debug_descr);
248 static struct debug_obj_descr work_debug_descr = {
249 .name = "work_struct",
250 .fixup_init = work_fixup_init,
251 .fixup_activate = work_fixup_activate,
252 .fixup_free = work_fixup_free,
255 static inline void debug_work_activate(struct work_struct *work)
257 debug_object_activate(work, &work_debug_descr);
260 static inline void debug_work_deactivate(struct work_struct *work)
262 debug_object_deactivate(work, &work_debug_descr);
265 void __init_work(struct work_struct *work, int onstack)
268 debug_object_init_on_stack(work, &work_debug_descr);
270 debug_object_init(work, &work_debug_descr);
272 EXPORT_SYMBOL_GPL(__init_work);
274 void destroy_work_on_stack(struct work_struct *work)
276 debug_object_free(work, &work_debug_descr);
278 EXPORT_SYMBOL_GPL(destroy_work_on_stack);
281 static inline void debug_work_activate(struct work_struct *work) { }
282 static inline void debug_work_deactivate(struct work_struct *work) { }
285 /* Serializes the accesses to the list of workqueues. */
286 static DEFINE_SPINLOCK(workqueue_lock);
287 static LIST_HEAD(workqueues);
288 static bool workqueue_freezing; /* W: have wqs started freezing? */
290 static DEFINE_PER_CPU(struct global_cwq, global_cwq);
292 static int worker_thread(void *__worker);
294 static struct global_cwq *get_gcwq(unsigned int cpu)
296 return &per_cpu(global_cwq, cpu);
299 static struct cpu_workqueue_struct *get_cwq(unsigned int cpu,
300 struct workqueue_struct *wq)
302 return per_cpu_ptr(wq->cpu_wq, cpu);
305 static unsigned int work_color_to_flags(int color)
307 return color << WORK_STRUCT_COLOR_SHIFT;
310 static int get_work_color(struct work_struct *work)
312 return (*work_data_bits(work) >> WORK_STRUCT_COLOR_SHIFT) &
313 ((1 << WORK_STRUCT_COLOR_BITS) - 1);
316 static int work_next_color(int color)
318 return (color + 1) % WORK_NR_COLORS;
322 * Work data points to the cwq while a work is on queue. Once
323 * execution starts, it points to the cpu the work was last on. This
324 * can be distinguished by comparing the data value against
327 * set_work_{cwq|cpu}() and clear_work_data() can be used to set the
328 * cwq, cpu or clear work->data. These functions should only be
329 * called while the work is owned - ie. while the PENDING bit is set.
331 * get_work_[g]cwq() can be used to obtain the gcwq or cwq
332 * corresponding to a work. gcwq is available once the work has been
333 * queued anywhere after initialization. cwq is available only from
334 * queueing until execution starts.
336 static inline void set_work_data(struct work_struct *work, unsigned long data,
339 BUG_ON(!work_pending(work));
340 atomic_long_set(&work->data, data | flags | work_static(work));
343 static void set_work_cwq(struct work_struct *work,
344 struct cpu_workqueue_struct *cwq,
345 unsigned long extra_flags)
347 set_work_data(work, (unsigned long)cwq,
348 WORK_STRUCT_PENDING | extra_flags);
351 static void set_work_cpu(struct work_struct *work, unsigned int cpu)
353 set_work_data(work, cpu << WORK_STRUCT_FLAG_BITS, WORK_STRUCT_PENDING);
356 static void clear_work_data(struct work_struct *work)
358 set_work_data(work, WORK_STRUCT_NO_CPU, 0);
361 static inline unsigned long get_work_data(struct work_struct *work)
363 return atomic_long_read(&work->data) & WORK_STRUCT_WQ_DATA_MASK;
366 static struct cpu_workqueue_struct *get_work_cwq(struct work_struct *work)
368 unsigned long data = get_work_data(work);
370 return data >= PAGE_OFFSET ? (void *)data : NULL;
373 static struct global_cwq *get_work_gcwq(struct work_struct *work)
375 unsigned long data = get_work_data(work);
378 if (data >= PAGE_OFFSET)
379 return ((struct cpu_workqueue_struct *)data)->gcwq;
381 cpu = data >> WORK_STRUCT_FLAG_BITS;
385 BUG_ON(cpu >= num_possible_cpus());
386 return get_gcwq(cpu);
390 * busy_worker_head - return the busy hash head for a work
391 * @gcwq: gcwq of interest
392 * @work: work to be hashed
394 * Return hash head of @gcwq for @work.
397 * spin_lock_irq(gcwq->lock).
400 * Pointer to the hash head.
402 static struct hlist_head *busy_worker_head(struct global_cwq *gcwq,
403 struct work_struct *work)
405 const int base_shift = ilog2(sizeof(struct work_struct));
406 unsigned long v = (unsigned long)work;
408 /* simple shift and fold hash, do we need something better? */
410 v += v >> BUSY_WORKER_HASH_ORDER;
411 v &= BUSY_WORKER_HASH_MASK;
413 return &gcwq->busy_hash[v];
417 * __find_worker_executing_work - find worker which is executing a work
418 * @gcwq: gcwq of interest
419 * @bwh: hash head as returned by busy_worker_head()
420 * @work: work to find worker for
422 * Find a worker which is executing @work on @gcwq. @bwh should be
423 * the hash head obtained by calling busy_worker_head() with the same
427 * spin_lock_irq(gcwq->lock).
430 * Pointer to worker which is executing @work if found, NULL
433 static struct worker *__find_worker_executing_work(struct global_cwq *gcwq,
434 struct hlist_head *bwh,
435 struct work_struct *work)
437 struct worker *worker;
438 struct hlist_node *tmp;
440 hlist_for_each_entry(worker, tmp, bwh, hentry)
441 if (worker->current_work == work)
447 * find_worker_executing_work - find worker which is executing a work
448 * @gcwq: gcwq of interest
449 * @work: work to find worker for
451 * Find a worker which is executing @work on @gcwq. This function is
452 * identical to __find_worker_executing_work() except that this
453 * function calculates @bwh itself.
456 * spin_lock_irq(gcwq->lock).
459 * Pointer to worker which is executing @work if found, NULL
462 static struct worker *find_worker_executing_work(struct global_cwq *gcwq,
463 struct work_struct *work)
465 return __find_worker_executing_work(gcwq, busy_worker_head(gcwq, work),
470 * insert_work - insert a work into cwq
471 * @cwq: cwq @work belongs to
472 * @work: work to insert
473 * @head: insertion point
474 * @extra_flags: extra WORK_STRUCT_* flags to set
476 * Insert @work into @cwq after @head.
479 * spin_lock_irq(gcwq->lock).
481 static void insert_work(struct cpu_workqueue_struct *cwq,
482 struct work_struct *work, struct list_head *head,
483 unsigned int extra_flags)
485 /* we own @work, set data and link */
486 set_work_cwq(work, cwq, extra_flags);
489 * Ensure that we get the right work->data if we see the
490 * result of list_add() below, see try_to_grab_pending().
494 list_add_tail(&work->entry, head);
495 wake_up_process(cwq->worker->task);
499 * cwq_unbind_single_cpu - unbind cwq from single cpu workqueue processing
500 * @cwq: cwq to unbind
502 * Try to unbind @cwq from single cpu workqueue processing. If
503 * @cwq->wq is frozen, unbind is delayed till the workqueue is thawed.
506 * spin_lock_irq(gcwq->lock).
508 static void cwq_unbind_single_cpu(struct cpu_workqueue_struct *cwq)
510 struct workqueue_struct *wq = cwq->wq;
511 struct global_cwq *gcwq = cwq->gcwq;
513 BUG_ON(wq->single_cpu != gcwq->cpu);
515 * Unbind from workqueue if @cwq is not frozen. If frozen,
516 * thaw_workqueues() will either restart processing on this
517 * cpu or unbind if empty. This keeps works queued while
518 * frozen fully ordered and flushable.
520 if (likely(!(gcwq->flags & GCWQ_FREEZING))) {
521 smp_wmb(); /* paired with cmpxchg() in __queue_work() */
522 wq->single_cpu = NR_CPUS;
526 static void __queue_work(unsigned int cpu, struct workqueue_struct *wq,
527 struct work_struct *work)
529 struct global_cwq *gcwq;
530 struct cpu_workqueue_struct *cwq;
531 struct list_head *worklist;
535 debug_work_activate(work);
538 * Determine gcwq to use. SINGLE_CPU is inherently
539 * NON_REENTRANT, so test it first.
541 if (!(wq->flags & WQ_SINGLE_CPU)) {
542 struct global_cwq *last_gcwq;
545 * It's multi cpu. If @wq is non-reentrant and @work
546 * was previously on a different cpu, it might still
547 * be running there, in which case the work needs to
548 * be queued on that cpu to guarantee non-reentrance.
550 gcwq = get_gcwq(cpu);
551 if (wq->flags & WQ_NON_REENTRANT &&
552 (last_gcwq = get_work_gcwq(work)) && last_gcwq != gcwq) {
553 struct worker *worker;
555 spin_lock_irqsave(&last_gcwq->lock, flags);
557 worker = find_worker_executing_work(last_gcwq, work);
559 if (worker && worker->current_cwq->wq == wq)
562 /* meh... not running there, queue here */
563 spin_unlock_irqrestore(&last_gcwq->lock, flags);
564 spin_lock_irqsave(&gcwq->lock, flags);
567 spin_lock_irqsave(&gcwq->lock, flags);
569 unsigned int req_cpu = cpu;
572 * It's a bit more complex for single cpu workqueues.
573 * We first need to determine which cpu is going to be
574 * used. If no cpu is currently serving this
575 * workqueue, arbitrate using atomic accesses to
576 * wq->single_cpu; otherwise, use the current one.
579 cpu = wq->single_cpu;
580 arbitrate = cpu == NR_CPUS;
584 gcwq = get_gcwq(cpu);
585 spin_lock_irqsave(&gcwq->lock, flags);
588 * The following cmpxchg() is a full barrier paired
589 * with smp_wmb() in cwq_unbind_single_cpu() and
590 * guarantees that all changes to wq->st_* fields are
591 * visible on the new cpu after this point.
594 cmpxchg(&wq->single_cpu, NR_CPUS, cpu);
596 if (unlikely(wq->single_cpu != cpu)) {
597 spin_unlock_irqrestore(&gcwq->lock, flags);
602 /* gcwq determined, get cwq and queue */
603 cwq = get_cwq(gcwq->cpu, wq);
605 BUG_ON(!list_empty(&work->entry));
607 cwq->nr_in_flight[cwq->work_color]++;
609 if (likely(cwq->nr_active < cwq->max_active)) {
611 worklist = &cwq->worklist;
613 worklist = &cwq->delayed_works;
615 insert_work(cwq, work, worklist, work_color_to_flags(cwq->work_color));
617 spin_unlock_irqrestore(&gcwq->lock, flags);
621 * queue_work - queue work on a workqueue
622 * @wq: workqueue to use
623 * @work: work to queue
625 * Returns 0 if @work was already on a queue, non-zero otherwise.
627 * We queue the work to the CPU on which it was submitted, but if the CPU dies
628 * it can be processed by another CPU.
630 int queue_work(struct workqueue_struct *wq, struct work_struct *work)
634 ret = queue_work_on(get_cpu(), wq, work);
639 EXPORT_SYMBOL_GPL(queue_work);
642 * queue_work_on - queue work on specific cpu
643 * @cpu: CPU number to execute work on
644 * @wq: workqueue to use
645 * @work: work to queue
647 * Returns 0 if @work was already on a queue, non-zero otherwise.
649 * We queue the work to a specific CPU, the caller must ensure it
653 queue_work_on(int cpu, struct workqueue_struct *wq, struct work_struct *work)
657 if (!test_and_set_bit(WORK_STRUCT_PENDING_BIT, work_data_bits(work))) {
658 __queue_work(cpu, wq, work);
663 EXPORT_SYMBOL_GPL(queue_work_on);
665 static void delayed_work_timer_fn(unsigned long __data)
667 struct delayed_work *dwork = (struct delayed_work *)__data;
668 struct cpu_workqueue_struct *cwq = get_work_cwq(&dwork->work);
670 __queue_work(smp_processor_id(), cwq->wq, &dwork->work);
674 * queue_delayed_work - queue work on a workqueue after delay
675 * @wq: workqueue to use
676 * @dwork: delayable work to queue
677 * @delay: number of jiffies to wait before queueing
679 * Returns 0 if @work was already on a queue, non-zero otherwise.
681 int queue_delayed_work(struct workqueue_struct *wq,
682 struct delayed_work *dwork, unsigned long delay)
685 return queue_work(wq, &dwork->work);
687 return queue_delayed_work_on(-1, wq, dwork, delay);
689 EXPORT_SYMBOL_GPL(queue_delayed_work);
692 * queue_delayed_work_on - queue work on specific CPU after delay
693 * @cpu: CPU number to execute work on
694 * @wq: workqueue to use
695 * @dwork: work to queue
696 * @delay: number of jiffies to wait before queueing
698 * Returns 0 if @work was already on a queue, non-zero otherwise.
700 int queue_delayed_work_on(int cpu, struct workqueue_struct *wq,
701 struct delayed_work *dwork, unsigned long delay)
704 struct timer_list *timer = &dwork->timer;
705 struct work_struct *work = &dwork->work;
707 if (!test_and_set_bit(WORK_STRUCT_PENDING_BIT, work_data_bits(work))) {
708 struct global_cwq *gcwq = get_work_gcwq(work);
709 unsigned int lcpu = gcwq ? gcwq->cpu : raw_smp_processor_id();
711 BUG_ON(timer_pending(timer));
712 BUG_ON(!list_empty(&work->entry));
714 timer_stats_timer_set_start_info(&dwork->timer);
716 * This stores cwq for the moment, for the timer_fn.
717 * Note that the work's gcwq is preserved to allow
718 * reentrance detection for delayed works.
720 set_work_cwq(work, get_cwq(lcpu, wq), 0);
721 timer->expires = jiffies + delay;
722 timer->data = (unsigned long)dwork;
723 timer->function = delayed_work_timer_fn;
725 if (unlikely(cpu >= 0))
726 add_timer_on(timer, cpu);
733 EXPORT_SYMBOL_GPL(queue_delayed_work_on);
736 * worker_enter_idle - enter idle state
737 * @worker: worker which is entering idle state
739 * @worker is entering idle state. Update stats and idle timer if
743 * spin_lock_irq(gcwq->lock).
745 static void worker_enter_idle(struct worker *worker)
747 struct global_cwq *gcwq = worker->gcwq;
749 BUG_ON(worker->flags & WORKER_IDLE);
750 BUG_ON(!list_empty(&worker->entry) &&
751 (worker->hentry.next || worker->hentry.pprev));
753 worker->flags |= WORKER_IDLE;
756 /* idle_list is LIFO */
757 list_add(&worker->entry, &gcwq->idle_list);
759 if (unlikely(worker->flags & WORKER_ROGUE))
760 wake_up_all(&gcwq->trustee_wait);
764 * worker_leave_idle - leave idle state
765 * @worker: worker which is leaving idle state
767 * @worker is leaving idle state. Update stats.
770 * spin_lock_irq(gcwq->lock).
772 static void worker_leave_idle(struct worker *worker)
774 struct global_cwq *gcwq = worker->gcwq;
776 BUG_ON(!(worker->flags & WORKER_IDLE));
777 worker->flags &= ~WORKER_IDLE;
779 list_del_init(&worker->entry);
782 static struct worker *alloc_worker(void)
784 struct worker *worker;
786 worker = kzalloc(sizeof(*worker), GFP_KERNEL);
788 INIT_LIST_HEAD(&worker->entry);
789 INIT_LIST_HEAD(&worker->scheduled);
795 * create_worker - create a new workqueue worker
796 * @cwq: cwq the new worker will belong to
797 * @bind: whether to set affinity to @cpu or not
799 * Create a new worker which is bound to @cwq. The returned worker
800 * can be started by calling start_worker() or destroyed using
804 * Might sleep. Does GFP_KERNEL allocations.
807 * Pointer to the newly created worker.
809 static struct worker *create_worker(struct cpu_workqueue_struct *cwq, bool bind)
811 struct global_cwq *gcwq = cwq->gcwq;
813 struct worker *worker = NULL;
815 spin_lock_irq(&gcwq->lock);
816 while (ida_get_new(&gcwq->worker_ida, &id)) {
817 spin_unlock_irq(&gcwq->lock);
818 if (!ida_pre_get(&gcwq->worker_ida, GFP_KERNEL))
820 spin_lock_irq(&gcwq->lock);
822 spin_unlock_irq(&gcwq->lock);
824 worker = alloc_worker();
832 worker->task = kthread_create(worker_thread, worker, "kworker/%u:%d",
834 if (IS_ERR(worker->task))
838 * A rogue worker will become a regular one if CPU comes
839 * online later on. Make sure every worker has
840 * PF_THREAD_BOUND set.
843 kthread_bind(worker->task, gcwq->cpu);
845 worker->task->flags |= PF_THREAD_BOUND;
850 spin_lock_irq(&gcwq->lock);
851 ida_remove(&gcwq->worker_ida, id);
852 spin_unlock_irq(&gcwq->lock);
859 * start_worker - start a newly created worker
860 * @worker: worker to start
862 * Make the gcwq aware of @worker and start it.
865 * spin_lock_irq(gcwq->lock).
867 static void start_worker(struct worker *worker)
869 worker->flags |= WORKER_STARTED;
870 worker->gcwq->nr_workers++;
871 worker_enter_idle(worker);
872 wake_up_process(worker->task);
876 * destroy_worker - destroy a workqueue worker
877 * @worker: worker to be destroyed
879 * Destroy @worker and adjust @gcwq stats accordingly.
882 * spin_lock_irq(gcwq->lock) which is released and regrabbed.
884 static void destroy_worker(struct worker *worker)
886 struct global_cwq *gcwq = worker->gcwq;
889 /* sanity check frenzy */
890 BUG_ON(worker->current_work);
891 BUG_ON(!list_empty(&worker->scheduled));
893 if (worker->flags & WORKER_STARTED)
895 if (worker->flags & WORKER_IDLE)
898 list_del_init(&worker->entry);
899 worker->flags |= WORKER_DIE;
901 spin_unlock_irq(&gcwq->lock);
903 kthread_stop(worker->task);
906 spin_lock_irq(&gcwq->lock);
907 ida_remove(&gcwq->worker_ida, id);
911 * move_linked_works - move linked works to a list
912 * @work: start of series of works to be scheduled
913 * @head: target list to append @work to
914 * @nextp: out paramter for nested worklist walking
916 * Schedule linked works starting from @work to @head. Work series to
917 * be scheduled starts at @work and includes any consecutive work with
918 * WORK_STRUCT_LINKED set in its predecessor.
920 * If @nextp is not NULL, it's updated to point to the next work of
921 * the last scheduled work. This allows move_linked_works() to be
922 * nested inside outer list_for_each_entry_safe().
925 * spin_lock_irq(gcwq->lock).
927 static void move_linked_works(struct work_struct *work, struct list_head *head,
928 struct work_struct **nextp)
930 struct work_struct *n;
933 * Linked worklist will always end before the end of the list,
934 * use NULL for list head.
936 list_for_each_entry_safe_from(work, n, NULL, entry) {
937 list_move_tail(&work->entry, head);
938 if (!(*work_data_bits(work) & WORK_STRUCT_LINKED))
943 * If we're already inside safe list traversal and have moved
944 * multiple works to the scheduled queue, the next position
945 * needs to be updated.
951 static void cwq_activate_first_delayed(struct cpu_workqueue_struct *cwq)
953 struct work_struct *work = list_first_entry(&cwq->delayed_works,
954 struct work_struct, entry);
956 move_linked_works(work, &cwq->worklist, NULL);
961 * cwq_dec_nr_in_flight - decrement cwq's nr_in_flight
962 * @cwq: cwq of interest
963 * @color: color of work which left the queue
965 * A work either has completed or is removed from pending queue,
966 * decrement nr_in_flight of its cwq and handle workqueue flushing.
969 * spin_lock_irq(gcwq->lock).
971 static void cwq_dec_nr_in_flight(struct cpu_workqueue_struct *cwq, int color)
973 /* ignore uncolored works */
974 if (color == WORK_NO_COLOR)
977 cwq->nr_in_flight[color]--;
980 if (!list_empty(&cwq->delayed_works)) {
981 /* one down, submit a delayed one */
982 if (cwq->nr_active < cwq->max_active)
983 cwq_activate_first_delayed(cwq);
984 } else if (!cwq->nr_active && cwq->wq->flags & WQ_SINGLE_CPU) {
985 /* this was the last work, unbind from single cpu */
986 cwq_unbind_single_cpu(cwq);
989 /* is flush in progress and are we at the flushing tip? */
990 if (likely(cwq->flush_color != color))
993 /* are there still in-flight works? */
994 if (cwq->nr_in_flight[color])
997 /* this cwq is done, clear flush_color */
998 cwq->flush_color = -1;
1001 * If this was the last cwq, wake up the first flusher. It
1002 * will handle the rest.
1004 if (atomic_dec_and_test(&cwq->wq->nr_cwqs_to_flush))
1005 complete(&cwq->wq->first_flusher->done);
1009 * process_one_work - process single work
1011 * @work: work to process
1013 * Process @work. This function contains all the logics necessary to
1014 * process a single work including synchronization against and
1015 * interaction with other workers on the same cpu, queueing and
1016 * flushing. As long as context requirement is met, any worker can
1017 * call this function to process a work.
1020 * spin_lock_irq(gcwq->lock) which is released and regrabbed.
1022 static void process_one_work(struct worker *worker, struct work_struct *work)
1024 struct cpu_workqueue_struct *cwq = worker->cwq;
1025 struct global_cwq *gcwq = cwq->gcwq;
1026 struct hlist_head *bwh = busy_worker_head(gcwq, work);
1027 work_func_t f = work->func;
1029 #ifdef CONFIG_LOCKDEP
1031 * It is permissible to free the struct work_struct from
1032 * inside the function that is called from it, this we need to
1033 * take into account for lockdep too. To avoid bogus "held
1034 * lock freed" warnings as well as problems when looking into
1035 * work->lockdep_map, make a copy and use that here.
1037 struct lockdep_map lockdep_map = work->lockdep_map;
1039 /* claim and process */
1040 debug_work_deactivate(work);
1041 hlist_add_head(&worker->hentry, bwh);
1042 worker->current_work = work;
1043 worker->current_cwq = cwq;
1044 work_color = get_work_color(work);
1046 BUG_ON(get_work_cwq(work) != cwq);
1047 /* record the current cpu number in the work data and dequeue */
1048 set_work_cpu(work, gcwq->cpu);
1049 list_del_init(&work->entry);
1051 spin_unlock_irq(&gcwq->lock);
1053 work_clear_pending(work);
1054 lock_map_acquire(&cwq->wq->lockdep_map);
1055 lock_map_acquire(&lockdep_map);
1057 lock_map_release(&lockdep_map);
1058 lock_map_release(&cwq->wq->lockdep_map);
1060 if (unlikely(in_atomic() || lockdep_depth(current) > 0)) {
1061 printk(KERN_ERR "BUG: workqueue leaked lock or atomic: "
1063 current->comm, preempt_count(), task_pid_nr(current));
1064 printk(KERN_ERR " last function: ");
1065 print_symbol("%s\n", (unsigned long)f);
1066 debug_show_held_locks(current);
1070 spin_lock_irq(&gcwq->lock);
1072 /* we're done with it, release */
1073 hlist_del_init(&worker->hentry);
1074 worker->current_work = NULL;
1075 worker->current_cwq = NULL;
1076 cwq_dec_nr_in_flight(cwq, work_color);
1080 * process_scheduled_works - process scheduled works
1083 * Process all scheduled works. Please note that the scheduled list
1084 * may change while processing a work, so this function repeatedly
1085 * fetches a work from the top and executes it.
1088 * spin_lock_irq(gcwq->lock) which may be released and regrabbed
1091 static void process_scheduled_works(struct worker *worker)
1093 while (!list_empty(&worker->scheduled)) {
1094 struct work_struct *work = list_first_entry(&worker->scheduled,
1095 struct work_struct, entry);
1096 process_one_work(worker, work);
1101 * worker_thread - the worker thread function
1104 * The cwq worker thread function.
1106 static int worker_thread(void *__worker)
1108 struct worker *worker = __worker;
1109 struct global_cwq *gcwq = worker->gcwq;
1110 struct cpu_workqueue_struct *cwq = worker->cwq;
1113 spin_lock_irq(&gcwq->lock);
1115 /* DIE can be set only while we're idle, checking here is enough */
1116 if (worker->flags & WORKER_DIE) {
1117 spin_unlock_irq(&gcwq->lock);
1121 worker_leave_idle(worker);
1124 * ->scheduled list can only be filled while a worker is
1125 * preparing to process a work or actually processing it.
1126 * Make sure nobody diddled with it while I was sleeping.
1128 BUG_ON(!list_empty(&worker->scheduled));
1130 while (!list_empty(&cwq->worklist)) {
1131 struct work_struct *work =
1132 list_first_entry(&cwq->worklist,
1133 struct work_struct, entry);
1136 * The following is a rather inefficient way to close
1137 * race window against cpu hotplug operations. Will
1140 if (unlikely(!(worker->flags & WORKER_ROGUE) &&
1141 !cpumask_equal(&worker->task->cpus_allowed,
1142 get_cpu_mask(gcwq->cpu)))) {
1143 spin_unlock_irq(&gcwq->lock);
1144 set_cpus_allowed_ptr(worker->task,
1145 get_cpu_mask(gcwq->cpu));
1147 spin_lock_irq(&gcwq->lock);
1151 if (likely(!(*work_data_bits(work) & WORK_STRUCT_LINKED))) {
1152 /* optimization path, not strictly necessary */
1153 process_one_work(worker, work);
1154 if (unlikely(!list_empty(&worker->scheduled)))
1155 process_scheduled_works(worker);
1157 move_linked_works(work, &worker->scheduled, NULL);
1158 process_scheduled_works(worker);
1163 * gcwq->lock is held and there's no work to process, sleep.
1164 * Workers are woken up only while holding gcwq->lock, so
1165 * setting the current state before releasing gcwq->lock is
1166 * enough to prevent losing any event.
1168 worker_enter_idle(worker);
1169 __set_current_state(TASK_INTERRUPTIBLE);
1170 spin_unlock_irq(&gcwq->lock);
1176 struct work_struct work;
1177 struct completion done;
1180 static void wq_barrier_func(struct work_struct *work)
1182 struct wq_barrier *barr = container_of(work, struct wq_barrier, work);
1183 complete(&barr->done);
1187 * insert_wq_barrier - insert a barrier work
1188 * @cwq: cwq to insert barrier into
1189 * @barr: wq_barrier to insert
1190 * @target: target work to attach @barr to
1191 * @worker: worker currently executing @target, NULL if @target is not executing
1193 * @barr is linked to @target such that @barr is completed only after
1194 * @target finishes execution. Please note that the ordering
1195 * guarantee is observed only with respect to @target and on the local
1198 * Currently, a queued barrier can't be canceled. This is because
1199 * try_to_grab_pending() can't determine whether the work to be
1200 * grabbed is at the head of the queue and thus can't clear LINKED
1201 * flag of the previous work while there must be a valid next work
1202 * after a work with LINKED flag set.
1204 * Note that when @worker is non-NULL, @target may be modified
1205 * underneath us, so we can't reliably determine cwq from @target.
1208 * spin_lock_irq(gcwq->lock).
1210 static void insert_wq_barrier(struct cpu_workqueue_struct *cwq,
1211 struct wq_barrier *barr,
1212 struct work_struct *target, struct worker *worker)
1214 struct list_head *head;
1215 unsigned int linked = 0;
1218 * debugobject calls are safe here even with gcwq->lock locked
1219 * as we know for sure that this will not trigger any of the
1220 * checks and call back into the fixup functions where we
1223 INIT_WORK_ON_STACK(&barr->work, wq_barrier_func);
1224 __set_bit(WORK_STRUCT_PENDING_BIT, work_data_bits(&barr->work));
1225 init_completion(&barr->done);
1228 * If @target is currently being executed, schedule the
1229 * barrier to the worker; otherwise, put it after @target.
1232 head = worker->scheduled.next;
1234 unsigned long *bits = work_data_bits(target);
1236 head = target->entry.next;
1237 /* there can already be other linked works, inherit and set */
1238 linked = *bits & WORK_STRUCT_LINKED;
1239 __set_bit(WORK_STRUCT_LINKED_BIT, bits);
1242 debug_work_activate(&barr->work);
1243 insert_work(cwq, &barr->work, head,
1244 work_color_to_flags(WORK_NO_COLOR) | linked);
1248 * flush_workqueue_prep_cwqs - prepare cwqs for workqueue flushing
1249 * @wq: workqueue being flushed
1250 * @flush_color: new flush color, < 0 for no-op
1251 * @work_color: new work color, < 0 for no-op
1253 * Prepare cwqs for workqueue flushing.
1255 * If @flush_color is non-negative, flush_color on all cwqs should be
1256 * -1. If no cwq has in-flight commands at the specified color, all
1257 * cwq->flush_color's stay at -1 and %false is returned. If any cwq
1258 * has in flight commands, its cwq->flush_color is set to
1259 * @flush_color, @wq->nr_cwqs_to_flush is updated accordingly, cwq
1260 * wakeup logic is armed and %true is returned.
1262 * The caller should have initialized @wq->first_flusher prior to
1263 * calling this function with non-negative @flush_color. If
1264 * @flush_color is negative, no flush color update is done and %false
1267 * If @work_color is non-negative, all cwqs should have the same
1268 * work_color which is previous to @work_color and all will be
1269 * advanced to @work_color.
1272 * mutex_lock(wq->flush_mutex).
1275 * %true if @flush_color >= 0 and there's something to flush. %false
1278 static bool flush_workqueue_prep_cwqs(struct workqueue_struct *wq,
1279 int flush_color, int work_color)
1284 if (flush_color >= 0) {
1285 BUG_ON(atomic_read(&wq->nr_cwqs_to_flush));
1286 atomic_set(&wq->nr_cwqs_to_flush, 1);
1289 for_each_possible_cpu(cpu) {
1290 struct cpu_workqueue_struct *cwq = get_cwq(cpu, wq);
1291 struct global_cwq *gcwq = cwq->gcwq;
1293 spin_lock_irq(&gcwq->lock);
1295 if (flush_color >= 0) {
1296 BUG_ON(cwq->flush_color != -1);
1298 if (cwq->nr_in_flight[flush_color]) {
1299 cwq->flush_color = flush_color;
1300 atomic_inc(&wq->nr_cwqs_to_flush);
1305 if (work_color >= 0) {
1306 BUG_ON(work_color != work_next_color(cwq->work_color));
1307 cwq->work_color = work_color;
1310 spin_unlock_irq(&gcwq->lock);
1313 if (flush_color >= 0 && atomic_dec_and_test(&wq->nr_cwqs_to_flush))
1314 complete(&wq->first_flusher->done);
1320 * flush_workqueue - ensure that any scheduled work has run to completion.
1321 * @wq: workqueue to flush
1323 * Forces execution of the workqueue and blocks until its completion.
1324 * This is typically used in driver shutdown handlers.
1326 * We sleep until all works which were queued on entry have been handled,
1327 * but we are not livelocked by new incoming ones.
1329 void flush_workqueue(struct workqueue_struct *wq)
1331 struct wq_flusher this_flusher = {
1332 .list = LIST_HEAD_INIT(this_flusher.list),
1334 .done = COMPLETION_INITIALIZER_ONSTACK(this_flusher.done),
1338 lock_map_acquire(&wq->lockdep_map);
1339 lock_map_release(&wq->lockdep_map);
1341 mutex_lock(&wq->flush_mutex);
1344 * Start-to-wait phase
1346 next_color = work_next_color(wq->work_color);
1348 if (next_color != wq->flush_color) {
1350 * Color space is not full. The current work_color
1351 * becomes our flush_color and work_color is advanced
1354 BUG_ON(!list_empty(&wq->flusher_overflow));
1355 this_flusher.flush_color = wq->work_color;
1356 wq->work_color = next_color;
1358 if (!wq->first_flusher) {
1359 /* no flush in progress, become the first flusher */
1360 BUG_ON(wq->flush_color != this_flusher.flush_color);
1362 wq->first_flusher = &this_flusher;
1364 if (!flush_workqueue_prep_cwqs(wq, wq->flush_color,
1366 /* nothing to flush, done */
1367 wq->flush_color = next_color;
1368 wq->first_flusher = NULL;
1373 BUG_ON(wq->flush_color == this_flusher.flush_color);
1374 list_add_tail(&this_flusher.list, &wq->flusher_queue);
1375 flush_workqueue_prep_cwqs(wq, -1, wq->work_color);
1379 * Oops, color space is full, wait on overflow queue.
1380 * The next flush completion will assign us
1381 * flush_color and transfer to flusher_queue.
1383 list_add_tail(&this_flusher.list, &wq->flusher_overflow);
1386 mutex_unlock(&wq->flush_mutex);
1388 wait_for_completion(&this_flusher.done);
1391 * Wake-up-and-cascade phase
1393 * First flushers are responsible for cascading flushes and
1394 * handling overflow. Non-first flushers can simply return.
1396 if (wq->first_flusher != &this_flusher)
1399 mutex_lock(&wq->flush_mutex);
1401 wq->first_flusher = NULL;
1403 BUG_ON(!list_empty(&this_flusher.list));
1404 BUG_ON(wq->flush_color != this_flusher.flush_color);
1407 struct wq_flusher *next, *tmp;
1409 /* complete all the flushers sharing the current flush color */
1410 list_for_each_entry_safe(next, tmp, &wq->flusher_queue, list) {
1411 if (next->flush_color != wq->flush_color)
1413 list_del_init(&next->list);
1414 complete(&next->done);
1417 BUG_ON(!list_empty(&wq->flusher_overflow) &&
1418 wq->flush_color != work_next_color(wq->work_color));
1420 /* this flush_color is finished, advance by one */
1421 wq->flush_color = work_next_color(wq->flush_color);
1423 /* one color has been freed, handle overflow queue */
1424 if (!list_empty(&wq->flusher_overflow)) {
1426 * Assign the same color to all overflowed
1427 * flushers, advance work_color and append to
1428 * flusher_queue. This is the start-to-wait
1429 * phase for these overflowed flushers.
1431 list_for_each_entry(tmp, &wq->flusher_overflow, list)
1432 tmp->flush_color = wq->work_color;
1434 wq->work_color = work_next_color(wq->work_color);
1436 list_splice_tail_init(&wq->flusher_overflow,
1437 &wq->flusher_queue);
1438 flush_workqueue_prep_cwqs(wq, -1, wq->work_color);
1441 if (list_empty(&wq->flusher_queue)) {
1442 BUG_ON(wq->flush_color != wq->work_color);
1447 * Need to flush more colors. Make the next flusher
1448 * the new first flusher and arm cwqs.
1450 BUG_ON(wq->flush_color == wq->work_color);
1451 BUG_ON(wq->flush_color != next->flush_color);
1453 list_del_init(&next->list);
1454 wq->first_flusher = next;
1456 if (flush_workqueue_prep_cwqs(wq, wq->flush_color, -1))
1460 * Meh... this color is already done, clear first
1461 * flusher and repeat cascading.
1463 wq->first_flusher = NULL;
1467 mutex_unlock(&wq->flush_mutex);
1469 EXPORT_SYMBOL_GPL(flush_workqueue);
1472 * flush_work - block until a work_struct's callback has terminated
1473 * @work: the work which is to be flushed
1475 * Returns false if @work has already terminated.
1477 * It is expected that, prior to calling flush_work(), the caller has
1478 * arranged for the work to not be requeued, otherwise it doesn't make
1479 * sense to use this function.
1481 int flush_work(struct work_struct *work)
1483 struct worker *worker = NULL;
1484 struct global_cwq *gcwq;
1485 struct cpu_workqueue_struct *cwq;
1486 struct wq_barrier barr;
1489 gcwq = get_work_gcwq(work);
1493 spin_lock_irq(&gcwq->lock);
1494 if (!list_empty(&work->entry)) {
1496 * See the comment near try_to_grab_pending()->smp_rmb().
1497 * If it was re-queued to a different gcwq under us, we
1498 * are not going to wait.
1501 cwq = get_work_cwq(work);
1502 if (unlikely(!cwq || gcwq != cwq->gcwq))
1505 worker = find_worker_executing_work(gcwq, work);
1508 cwq = worker->current_cwq;
1511 insert_wq_barrier(cwq, &barr, work, worker);
1512 spin_unlock_irq(&gcwq->lock);
1514 lock_map_acquire(&cwq->wq->lockdep_map);
1515 lock_map_release(&cwq->wq->lockdep_map);
1517 wait_for_completion(&barr.done);
1518 destroy_work_on_stack(&barr.work);
1521 spin_unlock_irq(&gcwq->lock);
1524 EXPORT_SYMBOL_GPL(flush_work);
1527 * Upon a successful return (>= 0), the caller "owns" WORK_STRUCT_PENDING bit,
1528 * so this work can't be re-armed in any way.
1530 static int try_to_grab_pending(struct work_struct *work)
1532 struct global_cwq *gcwq;
1535 if (!test_and_set_bit(WORK_STRUCT_PENDING_BIT, work_data_bits(work)))
1539 * The queueing is in progress, or it is already queued. Try to
1540 * steal it from ->worklist without clearing WORK_STRUCT_PENDING.
1542 gcwq = get_work_gcwq(work);
1546 spin_lock_irq(&gcwq->lock);
1547 if (!list_empty(&work->entry)) {
1549 * This work is queued, but perhaps we locked the wrong gcwq.
1550 * In that case we must see the new value after rmb(), see
1551 * insert_work()->wmb().
1554 if (gcwq == get_work_gcwq(work)) {
1555 debug_work_deactivate(work);
1556 list_del_init(&work->entry);
1557 cwq_dec_nr_in_flight(get_work_cwq(work),
1558 get_work_color(work));
1562 spin_unlock_irq(&gcwq->lock);
1567 static void wait_on_cpu_work(struct global_cwq *gcwq, struct work_struct *work)
1569 struct wq_barrier barr;
1570 struct worker *worker;
1572 spin_lock_irq(&gcwq->lock);
1574 worker = find_worker_executing_work(gcwq, work);
1575 if (unlikely(worker))
1576 insert_wq_barrier(worker->current_cwq, &barr, work, worker);
1578 spin_unlock_irq(&gcwq->lock);
1580 if (unlikely(worker)) {
1581 wait_for_completion(&barr.done);
1582 destroy_work_on_stack(&barr.work);
1586 static void wait_on_work(struct work_struct *work)
1592 lock_map_acquire(&work->lockdep_map);
1593 lock_map_release(&work->lockdep_map);
1595 for_each_possible_cpu(cpu)
1596 wait_on_cpu_work(get_gcwq(cpu), work);
1599 static int __cancel_work_timer(struct work_struct *work,
1600 struct timer_list* timer)
1605 ret = (timer && likely(del_timer(timer)));
1607 ret = try_to_grab_pending(work);
1609 } while (unlikely(ret < 0));
1611 clear_work_data(work);
1616 * cancel_work_sync - block until a work_struct's callback has terminated
1617 * @work: the work which is to be flushed
1619 * Returns true if @work was pending.
1621 * cancel_work_sync() will cancel the work if it is queued. If the work's
1622 * callback appears to be running, cancel_work_sync() will block until it
1625 * It is possible to use this function if the work re-queues itself. It can
1626 * cancel the work even if it migrates to another workqueue, however in that
1627 * case it only guarantees that work->func() has completed on the last queued
1630 * cancel_work_sync(&delayed_work->work) should be used only if ->timer is not
1631 * pending, otherwise it goes into a busy-wait loop until the timer expires.
1633 * The caller must ensure that workqueue_struct on which this work was last
1634 * queued can't be destroyed before this function returns.
1636 int cancel_work_sync(struct work_struct *work)
1638 return __cancel_work_timer(work, NULL);
1640 EXPORT_SYMBOL_GPL(cancel_work_sync);
1643 * cancel_delayed_work_sync - reliably kill off a delayed work.
1644 * @dwork: the delayed work struct
1646 * Returns true if @dwork was pending.
1648 * It is possible to use this function if @dwork rearms itself via queue_work()
1649 * or queue_delayed_work(). See also the comment for cancel_work_sync().
1651 int cancel_delayed_work_sync(struct delayed_work *dwork)
1653 return __cancel_work_timer(&dwork->work, &dwork->timer);
1655 EXPORT_SYMBOL(cancel_delayed_work_sync);
1657 static struct workqueue_struct *keventd_wq __read_mostly;
1660 * schedule_work - put work task in global workqueue
1661 * @work: job to be done
1663 * Returns zero if @work was already on the kernel-global workqueue and
1664 * non-zero otherwise.
1666 * This puts a job in the kernel-global workqueue if it was not already
1667 * queued and leaves it in the same position on the kernel-global
1668 * workqueue otherwise.
1670 int schedule_work(struct work_struct *work)
1672 return queue_work(keventd_wq, work);
1674 EXPORT_SYMBOL(schedule_work);
1677 * schedule_work_on - put work task on a specific cpu
1678 * @cpu: cpu to put the work task on
1679 * @work: job to be done
1681 * This puts a job on a specific cpu
1683 int schedule_work_on(int cpu, struct work_struct *work)
1685 return queue_work_on(cpu, keventd_wq, work);
1687 EXPORT_SYMBOL(schedule_work_on);
1690 * schedule_delayed_work - put work task in global workqueue after delay
1691 * @dwork: job to be done
1692 * @delay: number of jiffies to wait or 0 for immediate execution
1694 * After waiting for a given time this puts a job in the kernel-global
1697 int schedule_delayed_work(struct delayed_work *dwork,
1698 unsigned long delay)
1700 return queue_delayed_work(keventd_wq, dwork, delay);
1702 EXPORT_SYMBOL(schedule_delayed_work);
1705 * flush_delayed_work - block until a dwork_struct's callback has terminated
1706 * @dwork: the delayed work which is to be flushed
1708 * Any timeout is cancelled, and any pending work is run immediately.
1710 void flush_delayed_work(struct delayed_work *dwork)
1712 if (del_timer_sync(&dwork->timer)) {
1713 __queue_work(get_cpu(), get_work_cwq(&dwork->work)->wq,
1717 flush_work(&dwork->work);
1719 EXPORT_SYMBOL(flush_delayed_work);
1722 * schedule_delayed_work_on - queue work in global workqueue on CPU after delay
1724 * @dwork: job to be done
1725 * @delay: number of jiffies to wait
1727 * After waiting for a given time this puts a job in the kernel-global
1728 * workqueue on the specified CPU.
1730 int schedule_delayed_work_on(int cpu,
1731 struct delayed_work *dwork, unsigned long delay)
1733 return queue_delayed_work_on(cpu, keventd_wq, dwork, delay);
1735 EXPORT_SYMBOL(schedule_delayed_work_on);
1738 * schedule_on_each_cpu - call a function on each online CPU from keventd
1739 * @func: the function to call
1741 * Returns zero on success.
1742 * Returns -ve errno on failure.
1744 * schedule_on_each_cpu() is very slow.
1746 int schedule_on_each_cpu(work_func_t func)
1750 struct work_struct *works;
1752 works = alloc_percpu(struct work_struct);
1759 * When running in keventd don't schedule a work item on
1760 * itself. Can just call directly because the work queue is
1761 * already bound. This also is faster.
1763 if (current_is_keventd())
1764 orig = raw_smp_processor_id();
1766 for_each_online_cpu(cpu) {
1767 struct work_struct *work = per_cpu_ptr(works, cpu);
1769 INIT_WORK(work, func);
1771 schedule_work_on(cpu, work);
1774 func(per_cpu_ptr(works, orig));
1776 for_each_online_cpu(cpu)
1777 flush_work(per_cpu_ptr(works, cpu));
1785 * flush_scheduled_work - ensure that any scheduled work has run to completion.
1787 * Forces execution of the kernel-global workqueue and blocks until its
1790 * Think twice before calling this function! It's very easy to get into
1791 * trouble if you don't take great care. Either of the following situations
1792 * will lead to deadlock:
1794 * One of the work items currently on the workqueue needs to acquire
1795 * a lock held by your code or its caller.
1797 * Your code is running in the context of a work routine.
1799 * They will be detected by lockdep when they occur, but the first might not
1800 * occur very often. It depends on what work items are on the workqueue and
1801 * what locks they need, which you have no control over.
1803 * In most situations flushing the entire workqueue is overkill; you merely
1804 * need to know that a particular work item isn't queued and isn't running.
1805 * In such cases you should use cancel_delayed_work_sync() or
1806 * cancel_work_sync() instead.
1808 void flush_scheduled_work(void)
1810 flush_workqueue(keventd_wq);
1812 EXPORT_SYMBOL(flush_scheduled_work);
1815 * execute_in_process_context - reliably execute the routine with user context
1816 * @fn: the function to execute
1817 * @ew: guaranteed storage for the execute work structure (must
1818 * be available when the work executes)
1820 * Executes the function immediately if process context is available,
1821 * otherwise schedules the function for delayed execution.
1823 * Returns: 0 - function was executed
1824 * 1 - function was scheduled for execution
1826 int execute_in_process_context(work_func_t fn, struct execute_work *ew)
1828 if (!in_interrupt()) {
1833 INIT_WORK(&ew->work, fn);
1834 schedule_work(&ew->work);
1838 EXPORT_SYMBOL_GPL(execute_in_process_context);
1840 int keventd_up(void)
1842 return keventd_wq != NULL;
1845 int current_is_keventd(void)
1847 struct cpu_workqueue_struct *cwq;
1848 int cpu = raw_smp_processor_id(); /* preempt-safe: keventd is per-cpu */
1851 BUG_ON(!keventd_wq);
1853 cwq = get_cwq(cpu, keventd_wq);
1854 if (current == cwq->worker->task)
1861 static struct cpu_workqueue_struct *alloc_cwqs(void)
1864 * cwqs are forced aligned according to WORK_STRUCT_FLAG_BITS.
1865 * Make sure that the alignment isn't lower than that of
1866 * unsigned long long.
1868 const size_t size = sizeof(struct cpu_workqueue_struct);
1869 const size_t align = max_t(size_t, 1 << WORK_STRUCT_FLAG_BITS,
1870 __alignof__(unsigned long long));
1871 struct cpu_workqueue_struct *cwqs;
1876 * On UP, percpu allocator doesn't honor alignment parameter
1877 * and simply uses arch-dependent default. Allocate enough
1878 * room to align cwq and put an extra pointer at the end
1879 * pointing back to the originally allocated pointer which
1880 * will be used for free.
1882 * FIXME: This really belongs to UP percpu code. Update UP
1883 * percpu code to honor alignment and remove this ugliness.
1885 ptr = __alloc_percpu(size + align + sizeof(void *), 1);
1886 cwqs = PTR_ALIGN(ptr, align);
1887 *(void **)per_cpu_ptr(cwqs + 1, 0) = ptr;
1889 /* On SMP, percpu allocator can do it itself */
1890 cwqs = __alloc_percpu(size, align);
1892 /* just in case, make sure it's actually aligned */
1893 BUG_ON(!IS_ALIGNED((unsigned long)cwqs, align));
1897 static void free_cwqs(struct cpu_workqueue_struct *cwqs)
1900 /* on UP, the pointer to free is stored right after the cwq */
1902 free_percpu(*(void **)per_cpu_ptr(cwqs + 1, 0));
1908 struct workqueue_struct *__create_workqueue_key(const char *name,
1911 struct lock_class_key *key,
1912 const char *lock_name)
1914 struct workqueue_struct *wq;
1915 bool failed = false;
1918 max_active = clamp_val(max_active, 1, INT_MAX);
1920 wq = kzalloc(sizeof(*wq), GFP_KERNEL);
1924 wq->cpu_wq = alloc_cwqs();
1929 wq->saved_max_active = max_active;
1930 mutex_init(&wq->flush_mutex);
1931 atomic_set(&wq->nr_cwqs_to_flush, 0);
1932 INIT_LIST_HEAD(&wq->flusher_queue);
1933 INIT_LIST_HEAD(&wq->flusher_overflow);
1934 wq->single_cpu = NR_CPUS;
1937 lockdep_init_map(&wq->lockdep_map, lock_name, key, 0);
1938 INIT_LIST_HEAD(&wq->list);
1940 cpu_maps_update_begin();
1942 * We must initialize cwqs for each possible cpu even if we
1943 * are going to call destroy_workqueue() finally. Otherwise
1944 * cpu_up() can hit the uninitialized cwq once we drop the
1947 for_each_possible_cpu(cpu) {
1948 struct cpu_workqueue_struct *cwq = get_cwq(cpu, wq);
1949 struct global_cwq *gcwq = get_gcwq(cpu);
1951 BUG_ON((unsigned long)cwq & WORK_STRUCT_FLAG_MASK);
1954 cwq->flush_color = -1;
1955 cwq->max_active = max_active;
1956 INIT_LIST_HEAD(&cwq->worklist);
1957 INIT_LIST_HEAD(&cwq->delayed_works);
1961 cwq->worker = create_worker(cwq, cpu_online(cpu));
1963 start_worker(cwq->worker);
1969 * workqueue_lock protects global freeze state and workqueues
1970 * list. Grab it, set max_active accordingly and add the new
1971 * workqueue to workqueues list.
1973 spin_lock(&workqueue_lock);
1975 if (workqueue_freezing && wq->flags & WQ_FREEZEABLE)
1976 for_each_possible_cpu(cpu)
1977 get_cwq(cpu, wq)->max_active = 0;
1979 list_add(&wq->list, &workqueues);
1981 spin_unlock(&workqueue_lock);
1983 cpu_maps_update_done();
1986 destroy_workqueue(wq);
1992 free_cwqs(wq->cpu_wq);
1997 EXPORT_SYMBOL_GPL(__create_workqueue_key);
2000 * destroy_workqueue - safely terminate a workqueue
2001 * @wq: target workqueue
2003 * Safely destroy a workqueue. All work currently pending will be done first.
2005 void destroy_workqueue(struct workqueue_struct *wq)
2009 flush_workqueue(wq);
2012 * wq list is used to freeze wq, remove from list after
2013 * flushing is complete in case freeze races us.
2015 cpu_maps_update_begin();
2016 spin_lock(&workqueue_lock);
2017 list_del(&wq->list);
2018 spin_unlock(&workqueue_lock);
2019 cpu_maps_update_done();
2021 for_each_possible_cpu(cpu) {
2022 struct cpu_workqueue_struct *cwq = get_cwq(cpu, wq);
2026 spin_lock_irq(&cwq->gcwq->lock);
2027 destroy_worker(cwq->worker);
2029 spin_unlock_irq(&cwq->gcwq->lock);
2032 for (i = 0; i < WORK_NR_COLORS; i++)
2033 BUG_ON(cwq->nr_in_flight[i]);
2034 BUG_ON(cwq->nr_active);
2035 BUG_ON(!list_empty(&cwq->delayed_works));
2038 free_cwqs(wq->cpu_wq);
2041 EXPORT_SYMBOL_GPL(destroy_workqueue);
2046 * CPU hotplug is implemented by allowing cwqs to be detached from
2047 * CPU, running with unbound workers and allowing them to be
2048 * reattached later if the cpu comes back online. A separate thread
2049 * is created to govern cwqs in such state and is called the trustee.
2051 * Trustee states and their descriptions.
2053 * START Command state used on startup. On CPU_DOWN_PREPARE, a
2054 * new trustee is started with this state.
2056 * IN_CHARGE Once started, trustee will enter this state after
2057 * making all existing workers rogue. DOWN_PREPARE waits
2058 * for trustee to enter this state. After reaching
2059 * IN_CHARGE, trustee tries to execute the pending
2060 * worklist until it's empty and the state is set to
2061 * BUTCHER, or the state is set to RELEASE.
2063 * BUTCHER Command state which is set by the cpu callback after
2064 * the cpu has went down. Once this state is set trustee
2065 * knows that there will be no new works on the worklist
2066 * and once the worklist is empty it can proceed to
2067 * killing idle workers.
2069 * RELEASE Command state which is set by the cpu callback if the
2070 * cpu down has been canceled or it has come online
2071 * again. After recognizing this state, trustee stops
2072 * trying to drain or butcher and transits to DONE.
2074 * DONE Trustee will enter this state after BUTCHER or RELEASE
2077 * trustee CPU draining
2078 * took over down complete
2079 * START -----------> IN_CHARGE -----------> BUTCHER -----------> DONE
2081 * | CPU is back online v return workers |
2082 * ----------------> RELEASE --------------
2086 * trustee_wait_event_timeout - timed event wait for trustee
2087 * @cond: condition to wait for
2088 * @timeout: timeout in jiffies
2090 * wait_event_timeout() for trustee to use. Handles locking and
2091 * checks for RELEASE request.
2094 * spin_lock_irq(gcwq->lock) which may be released and regrabbed
2095 * multiple times. To be used by trustee.
2098 * Positive indicating left time if @cond is satisfied, 0 if timed
2099 * out, -1 if canceled.
2101 #define trustee_wait_event_timeout(cond, timeout) ({ \
2102 long __ret = (timeout); \
2103 while (!((cond) || (gcwq->trustee_state == TRUSTEE_RELEASE)) && \
2105 spin_unlock_irq(&gcwq->lock); \
2106 __wait_event_timeout(gcwq->trustee_wait, (cond) || \
2107 (gcwq->trustee_state == TRUSTEE_RELEASE), \
2109 spin_lock_irq(&gcwq->lock); \
2111 gcwq->trustee_state == TRUSTEE_RELEASE ? -1 : (__ret); \
2115 * trustee_wait_event - event wait for trustee
2116 * @cond: condition to wait for
2118 * wait_event() for trustee to use. Automatically handles locking and
2119 * checks for CANCEL request.
2122 * spin_lock_irq(gcwq->lock) which may be released and regrabbed
2123 * multiple times. To be used by trustee.
2126 * 0 if @cond is satisfied, -1 if canceled.
2128 #define trustee_wait_event(cond) ({ \
2130 __ret1 = trustee_wait_event_timeout(cond, MAX_SCHEDULE_TIMEOUT);\
2131 __ret1 < 0 ? -1 : 0; \
2134 static int __cpuinit trustee_thread(void *__gcwq)
2136 struct global_cwq *gcwq = __gcwq;
2137 struct worker *worker;
2138 struct hlist_node *pos;
2141 BUG_ON(gcwq->cpu != smp_processor_id());
2143 spin_lock_irq(&gcwq->lock);
2145 * Make all workers rogue. Trustee must be bound to the
2146 * target cpu and can't be cancelled.
2148 BUG_ON(gcwq->cpu != smp_processor_id());
2150 list_for_each_entry(worker, &gcwq->idle_list, entry)
2151 worker->flags |= WORKER_ROGUE;
2153 for_each_busy_worker(worker, i, pos, gcwq)
2154 worker->flags |= WORKER_ROGUE;
2157 * We're now in charge. Notify and proceed to drain. We need
2158 * to keep the gcwq running during the whole CPU down
2159 * procedure as other cpu hotunplug callbacks may need to
2160 * flush currently running tasks.
2162 gcwq->trustee_state = TRUSTEE_IN_CHARGE;
2163 wake_up_all(&gcwq->trustee_wait);
2166 * The original cpu is in the process of dying and may go away
2167 * anytime now. When that happens, we and all workers would
2168 * be migrated to other cpus. Try draining any left work.
2169 * Note that if the gcwq is frozen, there may be frozen works
2170 * in freezeable cwqs. Don't declare completion while frozen.
2172 while (gcwq->nr_workers != gcwq->nr_idle ||
2173 gcwq->flags & GCWQ_FREEZING ||
2174 gcwq->trustee_state == TRUSTEE_IN_CHARGE) {
2175 /* give a breather */
2176 if (trustee_wait_event_timeout(false, TRUSTEE_COOLDOWN) < 0)
2180 /* notify completion */
2181 gcwq->trustee = NULL;
2182 gcwq->trustee_state = TRUSTEE_DONE;
2183 wake_up_all(&gcwq->trustee_wait);
2184 spin_unlock_irq(&gcwq->lock);
2189 * wait_trustee_state - wait for trustee to enter the specified state
2190 * @gcwq: gcwq the trustee of interest belongs to
2191 * @state: target state to wait for
2193 * Wait for the trustee to reach @state. DONE is already matched.
2196 * spin_lock_irq(gcwq->lock) which may be released and regrabbed
2197 * multiple times. To be used by cpu_callback.
2199 static void __cpuinit wait_trustee_state(struct global_cwq *gcwq, int state)
2201 if (!(gcwq->trustee_state == state ||
2202 gcwq->trustee_state == TRUSTEE_DONE)) {
2203 spin_unlock_irq(&gcwq->lock);
2204 __wait_event(gcwq->trustee_wait,
2205 gcwq->trustee_state == state ||
2206 gcwq->trustee_state == TRUSTEE_DONE);
2207 spin_lock_irq(&gcwq->lock);
2211 static int __devinit workqueue_cpu_callback(struct notifier_block *nfb,
2212 unsigned long action,
2215 unsigned int cpu = (unsigned long)hcpu;
2216 struct global_cwq *gcwq = get_gcwq(cpu);
2217 struct task_struct *new_trustee = NULL;
2218 struct worker *worker;
2219 struct hlist_node *pos;
2220 unsigned long flags;
2223 action &= ~CPU_TASKS_FROZEN;
2226 case CPU_DOWN_PREPARE:
2227 new_trustee = kthread_create(trustee_thread, gcwq,
2228 "workqueue_trustee/%d\n", cpu);
2229 if (IS_ERR(new_trustee))
2230 return notifier_from_errno(PTR_ERR(new_trustee));
2231 kthread_bind(new_trustee, cpu);
2234 /* some are called w/ irq disabled, don't disturb irq status */
2235 spin_lock_irqsave(&gcwq->lock, flags);
2238 case CPU_DOWN_PREPARE:
2239 /* initialize trustee and tell it to acquire the gcwq */
2240 BUG_ON(gcwq->trustee || gcwq->trustee_state != TRUSTEE_DONE);
2241 gcwq->trustee = new_trustee;
2242 gcwq->trustee_state = TRUSTEE_START;
2243 wake_up_process(gcwq->trustee);
2244 wait_trustee_state(gcwq, TRUSTEE_IN_CHARGE);
2248 gcwq->trustee_state = TRUSTEE_BUTCHER;
2251 case CPU_DOWN_FAILED:
2253 if (gcwq->trustee_state != TRUSTEE_DONE) {
2254 gcwq->trustee_state = TRUSTEE_RELEASE;
2255 wake_up_process(gcwq->trustee);
2256 wait_trustee_state(gcwq, TRUSTEE_DONE);
2259 /* clear ROGUE from all workers */
2260 list_for_each_entry(worker, &gcwq->idle_list, entry)
2261 worker->flags &= ~WORKER_ROGUE;
2263 for_each_busy_worker(worker, i, pos, gcwq)
2264 worker->flags &= ~WORKER_ROGUE;
2268 spin_unlock_irqrestore(&gcwq->lock, flags);
2270 return notifier_from_errno(0);
2275 struct work_for_cpu {
2276 struct completion completion;
2282 static int do_work_for_cpu(void *_wfc)
2284 struct work_for_cpu *wfc = _wfc;
2285 wfc->ret = wfc->fn(wfc->arg);
2286 complete(&wfc->completion);
2291 * work_on_cpu - run a function in user context on a particular cpu
2292 * @cpu: the cpu to run on
2293 * @fn: the function to run
2294 * @arg: the function arg
2296 * This will return the value @fn returns.
2297 * It is up to the caller to ensure that the cpu doesn't go offline.
2298 * The caller must not hold any locks which would prevent @fn from completing.
2300 long work_on_cpu(unsigned int cpu, long (*fn)(void *), void *arg)
2302 struct task_struct *sub_thread;
2303 struct work_for_cpu wfc = {
2304 .completion = COMPLETION_INITIALIZER_ONSTACK(wfc.completion),
2309 sub_thread = kthread_create(do_work_for_cpu, &wfc, "work_for_cpu");
2310 if (IS_ERR(sub_thread))
2311 return PTR_ERR(sub_thread);
2312 kthread_bind(sub_thread, cpu);
2313 wake_up_process(sub_thread);
2314 wait_for_completion(&wfc.completion);
2317 EXPORT_SYMBOL_GPL(work_on_cpu);
2318 #endif /* CONFIG_SMP */
2320 #ifdef CONFIG_FREEZER
2323 * freeze_workqueues_begin - begin freezing workqueues
2325 * Start freezing workqueues. After this function returns, all
2326 * freezeable workqueues will queue new works to their frozen_works
2327 * list instead of the cwq ones.
2330 * Grabs and releases workqueue_lock and gcwq->lock's.
2332 void freeze_workqueues_begin(void)
2334 struct workqueue_struct *wq;
2337 spin_lock(&workqueue_lock);
2339 BUG_ON(workqueue_freezing);
2340 workqueue_freezing = true;
2342 for_each_possible_cpu(cpu) {
2343 struct global_cwq *gcwq = get_gcwq(cpu);
2345 spin_lock_irq(&gcwq->lock);
2347 BUG_ON(gcwq->flags & GCWQ_FREEZING);
2348 gcwq->flags |= GCWQ_FREEZING;
2350 list_for_each_entry(wq, &workqueues, list) {
2351 struct cpu_workqueue_struct *cwq = get_cwq(cpu, wq);
2353 if (wq->flags & WQ_FREEZEABLE)
2354 cwq->max_active = 0;
2357 spin_unlock_irq(&gcwq->lock);
2360 spin_unlock(&workqueue_lock);
2364 * freeze_workqueues_busy - are freezeable workqueues still busy?
2366 * Check whether freezing is complete. This function must be called
2367 * between freeze_workqueues_begin() and thaw_workqueues().
2370 * Grabs and releases workqueue_lock.
2373 * %true if some freezeable workqueues are still busy. %false if
2374 * freezing is complete.
2376 bool freeze_workqueues_busy(void)
2378 struct workqueue_struct *wq;
2382 spin_lock(&workqueue_lock);
2384 BUG_ON(!workqueue_freezing);
2386 for_each_possible_cpu(cpu) {
2388 * nr_active is monotonically decreasing. It's safe
2389 * to peek without lock.
2391 list_for_each_entry(wq, &workqueues, list) {
2392 struct cpu_workqueue_struct *cwq = get_cwq(cpu, wq);
2394 if (!(wq->flags & WQ_FREEZEABLE))
2397 BUG_ON(cwq->nr_active < 0);
2398 if (cwq->nr_active) {
2405 spin_unlock(&workqueue_lock);
2410 * thaw_workqueues - thaw workqueues
2412 * Thaw workqueues. Normal queueing is restored and all collected
2413 * frozen works are transferred to their respective cwq worklists.
2416 * Grabs and releases workqueue_lock and gcwq->lock's.
2418 void thaw_workqueues(void)
2420 struct workqueue_struct *wq;
2423 spin_lock(&workqueue_lock);
2425 if (!workqueue_freezing)
2428 for_each_possible_cpu(cpu) {
2429 struct global_cwq *gcwq = get_gcwq(cpu);
2431 spin_lock_irq(&gcwq->lock);
2433 BUG_ON(!(gcwq->flags & GCWQ_FREEZING));
2434 gcwq->flags &= ~GCWQ_FREEZING;
2436 list_for_each_entry(wq, &workqueues, list) {
2437 struct cpu_workqueue_struct *cwq = get_cwq(cpu, wq);
2439 if (!(wq->flags & WQ_FREEZEABLE))
2442 /* restore max_active and repopulate worklist */
2443 cwq->max_active = wq->saved_max_active;
2445 while (!list_empty(&cwq->delayed_works) &&
2446 cwq->nr_active < cwq->max_active)
2447 cwq_activate_first_delayed(cwq);
2449 /* perform delayed unbind from single cpu if empty */
2450 if (wq->single_cpu == gcwq->cpu &&
2451 !cwq->nr_active && list_empty(&cwq->delayed_works))
2452 cwq_unbind_single_cpu(cwq);
2454 wake_up_process(cwq->worker->task);
2457 spin_unlock_irq(&gcwq->lock);
2460 workqueue_freezing = false;
2462 spin_unlock(&workqueue_lock);
2464 #endif /* CONFIG_FREEZER */
2466 void __init init_workqueues(void)
2472 * The pointer part of work->data is either pointing to the
2473 * cwq or contains the cpu number the work ran last on. Make
2474 * sure cpu number won't overflow into kernel pointer area so
2475 * that they can be distinguished.
2477 BUILD_BUG_ON(NR_CPUS << WORK_STRUCT_FLAG_BITS >= PAGE_OFFSET);
2479 hotcpu_notifier(workqueue_cpu_callback, CPU_PRI_WORKQUEUE);
2481 /* initialize gcwqs */
2482 for_each_possible_cpu(cpu) {
2483 struct global_cwq *gcwq = get_gcwq(cpu);
2485 spin_lock_init(&gcwq->lock);
2488 INIT_LIST_HEAD(&gcwq->idle_list);
2489 for (i = 0; i < BUSY_WORKER_HASH_SIZE; i++)
2490 INIT_HLIST_HEAD(&gcwq->busy_hash[i]);
2492 ida_init(&gcwq->worker_ida);
2494 gcwq->trustee_state = TRUSTEE_DONE;
2495 init_waitqueue_head(&gcwq->trustee_wait);
2498 keventd_wq = create_workqueue("events");
2499 BUG_ON(!keventd_wq);