workqueue: implement WQ_NON_REENTRANT
[linux-flexiantxendom0-3.2.10.git] / kernel / workqueue.c
1 /*
2  * linux/kernel/workqueue.c
3  *
4  * Generic mechanism for defining kernel helper threads for running
5  * arbitrary tasks in process context.
6  *
7  * Started by Ingo Molnar, Copyright (C) 2002
8  *
9  * Derived from the taskqueue/keventd code by:
10  *
11  *   David Woodhouse <dwmw2@infradead.org>
12  *   Andrew Morton
13  *   Kai Petzke <wpp@marie.physik.tu-berlin.de>
14  *   Theodore Ts'o <tytso@mit.edu>
15  *
16  * Made to use alloc_percpu by Christoph Lameter.
17  */
18
19 #include <linux/module.h>
20 #include <linux/kernel.h>
21 #include <linux/sched.h>
22 #include <linux/init.h>
23 #include <linux/signal.h>
24 #include <linux/completion.h>
25 #include <linux/workqueue.h>
26 #include <linux/slab.h>
27 #include <linux/cpu.h>
28 #include <linux/notifier.h>
29 #include <linux/kthread.h>
30 #include <linux/hardirq.h>
31 #include <linux/mempolicy.h>
32 #include <linux/freezer.h>
33 #include <linux/kallsyms.h>
34 #include <linux/debug_locks.h>
35 #include <linux/lockdep.h>
36 #include <linux/idr.h>
37
38 enum {
39         /* global_cwq flags */
40         GCWQ_FREEZING           = 1 << 3,       /* freeze in progress */
41
42         /* worker flags */
43         WORKER_STARTED          = 1 << 0,       /* started */
44         WORKER_DIE              = 1 << 1,       /* die die die */
45         WORKER_IDLE             = 1 << 2,       /* is idle */
46         WORKER_ROGUE            = 1 << 4,       /* not bound to any cpu */
47
48         /* gcwq->trustee_state */
49         TRUSTEE_START           = 0,            /* start */
50         TRUSTEE_IN_CHARGE       = 1,            /* trustee in charge of gcwq */
51         TRUSTEE_BUTCHER         = 2,            /* butcher workers */
52         TRUSTEE_RELEASE         = 3,            /* release workers */
53         TRUSTEE_DONE            = 4,            /* trustee is done */
54
55         BUSY_WORKER_HASH_ORDER  = 6,            /* 64 pointers */
56         BUSY_WORKER_HASH_SIZE   = 1 << BUSY_WORKER_HASH_ORDER,
57         BUSY_WORKER_HASH_MASK   = BUSY_WORKER_HASH_SIZE - 1,
58
59         TRUSTEE_COOLDOWN        = HZ / 10,      /* for trustee draining */
60 };
61
62 /*
63  * Structure fields follow one of the following exclusion rules.
64  *
65  * I: Set during initialization and read-only afterwards.
66  *
67  * L: gcwq->lock protected.  Access with gcwq->lock held.
68  *
69  * F: wq->flush_mutex protected.
70  *
71  * W: workqueue_lock protected.
72  */
73
74 struct global_cwq;
75 struct cpu_workqueue_struct;
76
77 struct worker {
78         /* on idle list while idle, on busy hash table while busy */
79         union {
80                 struct list_head        entry;  /* L: while idle */
81                 struct hlist_node       hentry; /* L: while busy */
82         };
83
84         struct work_struct      *current_work;  /* L: work being processed */
85         struct cpu_workqueue_struct *current_cwq; /* L: current_work's cwq */
86         struct list_head        scheduled;      /* L: scheduled works */
87         struct task_struct      *task;          /* I: worker task */
88         struct global_cwq       *gcwq;          /* I: the associated gcwq */
89         struct cpu_workqueue_struct *cwq;       /* I: the associated cwq */
90         unsigned int            flags;          /* L: flags */
91         int                     id;             /* I: worker id */
92 };
93
94 /*
95  * Global per-cpu workqueue.
96  */
97 struct global_cwq {
98         spinlock_t              lock;           /* the gcwq lock */
99         unsigned int            cpu;            /* I: the associated cpu */
100         unsigned int            flags;          /* L: GCWQ_* flags */
101
102         int                     nr_workers;     /* L: total number of workers */
103         int                     nr_idle;        /* L: currently idle ones */
104
105         /* workers are chained either in the idle_list or busy_hash */
106         struct list_head        idle_list;      /* L: list of idle workers */
107         struct hlist_head       busy_hash[BUSY_WORKER_HASH_SIZE];
108                                                 /* L: hash of busy workers */
109
110         struct ida              worker_ida;     /* L: for worker IDs */
111
112         struct task_struct      *trustee;       /* L: for gcwq shutdown */
113         unsigned int            trustee_state;  /* L: trustee state */
114         wait_queue_head_t       trustee_wait;   /* trustee wait */
115 } ____cacheline_aligned_in_smp;
116
117 /*
118  * The per-CPU workqueue.  The lower WORK_STRUCT_FLAG_BITS of
119  * work_struct->data are used for flags and thus cwqs need to be
120  * aligned at two's power of the number of flag bits.
121  */
122 struct cpu_workqueue_struct {
123         struct global_cwq       *gcwq;          /* I: the associated gcwq */
124         struct list_head worklist;
125         struct worker           *worker;
126         struct workqueue_struct *wq;            /* I: the owning workqueue */
127         int                     work_color;     /* L: current color */
128         int                     flush_color;    /* L: flushing color */
129         int                     nr_in_flight[WORK_NR_COLORS];
130                                                 /* L: nr of in_flight works */
131         int                     nr_active;      /* L: nr of active works */
132         int                     max_active;     /* L: max active works */
133         struct list_head        delayed_works;  /* L: delayed works */
134 };
135
136 /*
137  * Structure used to wait for workqueue flush.
138  */
139 struct wq_flusher {
140         struct list_head        list;           /* F: list of flushers */
141         int                     flush_color;    /* F: flush color waiting for */
142         struct completion       done;           /* flush completion */
143 };
144
145 /*
146  * The externally visible workqueue abstraction is an array of
147  * per-CPU workqueues:
148  */
149 struct workqueue_struct {
150         unsigned int            flags;          /* I: WQ_* flags */
151         struct cpu_workqueue_struct *cpu_wq;    /* I: cwq's */
152         struct list_head        list;           /* W: list of all workqueues */
153
154         struct mutex            flush_mutex;    /* protects wq flushing */
155         int                     work_color;     /* F: current work color */
156         int                     flush_color;    /* F: current flush color */
157         atomic_t                nr_cwqs_to_flush; /* flush in progress */
158         struct wq_flusher       *first_flusher; /* F: first flusher */
159         struct list_head        flusher_queue;  /* F: flush waiters */
160         struct list_head        flusher_overflow; /* F: flush overflow list */
161
162         unsigned long           single_cpu;     /* cpu for single cpu wq */
163
164         int                     saved_max_active; /* I: saved cwq max_active */
165         const char              *name;          /* I: workqueue name */
166 #ifdef CONFIG_LOCKDEP
167         struct lockdep_map      lockdep_map;
168 #endif
169 };
170
171 #define for_each_busy_worker(worker, i, pos, gcwq)                      \
172         for (i = 0; i < BUSY_WORKER_HASH_SIZE; i++)                     \
173                 hlist_for_each_entry(worker, pos, &gcwq->busy_hash[i], hentry)
174
175 #ifdef CONFIG_DEBUG_OBJECTS_WORK
176
177 static struct debug_obj_descr work_debug_descr;
178
179 /*
180  * fixup_init is called when:
181  * - an active object is initialized
182  */
183 static int work_fixup_init(void *addr, enum debug_obj_state state)
184 {
185         struct work_struct *work = addr;
186
187         switch (state) {
188         case ODEBUG_STATE_ACTIVE:
189                 cancel_work_sync(work);
190                 debug_object_init(work, &work_debug_descr);
191                 return 1;
192         default:
193                 return 0;
194         }
195 }
196
197 /*
198  * fixup_activate is called when:
199  * - an active object is activated
200  * - an unknown object is activated (might be a statically initialized object)
201  */
202 static int work_fixup_activate(void *addr, enum debug_obj_state state)
203 {
204         struct work_struct *work = addr;
205
206         switch (state) {
207
208         case ODEBUG_STATE_NOTAVAILABLE:
209                 /*
210                  * This is not really a fixup. The work struct was
211                  * statically initialized. We just make sure that it
212                  * is tracked in the object tracker.
213                  */
214                 if (test_bit(WORK_STRUCT_STATIC_BIT, work_data_bits(work))) {
215                         debug_object_init(work, &work_debug_descr);
216                         debug_object_activate(work, &work_debug_descr);
217                         return 0;
218                 }
219                 WARN_ON_ONCE(1);
220                 return 0;
221
222         case ODEBUG_STATE_ACTIVE:
223                 WARN_ON(1);
224
225         default:
226                 return 0;
227         }
228 }
229
230 /*
231  * fixup_free is called when:
232  * - an active object is freed
233  */
234 static int work_fixup_free(void *addr, enum debug_obj_state state)
235 {
236         struct work_struct *work = addr;
237
238         switch (state) {
239         case ODEBUG_STATE_ACTIVE:
240                 cancel_work_sync(work);
241                 debug_object_free(work, &work_debug_descr);
242                 return 1;
243         default:
244                 return 0;
245         }
246 }
247
248 static struct debug_obj_descr work_debug_descr = {
249         .name           = "work_struct",
250         .fixup_init     = work_fixup_init,
251         .fixup_activate = work_fixup_activate,
252         .fixup_free     = work_fixup_free,
253 };
254
255 static inline void debug_work_activate(struct work_struct *work)
256 {
257         debug_object_activate(work, &work_debug_descr);
258 }
259
260 static inline void debug_work_deactivate(struct work_struct *work)
261 {
262         debug_object_deactivate(work, &work_debug_descr);
263 }
264
265 void __init_work(struct work_struct *work, int onstack)
266 {
267         if (onstack)
268                 debug_object_init_on_stack(work, &work_debug_descr);
269         else
270                 debug_object_init(work, &work_debug_descr);
271 }
272 EXPORT_SYMBOL_GPL(__init_work);
273
274 void destroy_work_on_stack(struct work_struct *work)
275 {
276         debug_object_free(work, &work_debug_descr);
277 }
278 EXPORT_SYMBOL_GPL(destroy_work_on_stack);
279
280 #else
281 static inline void debug_work_activate(struct work_struct *work) { }
282 static inline void debug_work_deactivate(struct work_struct *work) { }
283 #endif
284
285 /* Serializes the accesses to the list of workqueues. */
286 static DEFINE_SPINLOCK(workqueue_lock);
287 static LIST_HEAD(workqueues);
288 static bool workqueue_freezing;         /* W: have wqs started freezing? */
289
290 static DEFINE_PER_CPU(struct global_cwq, global_cwq);
291
292 static int worker_thread(void *__worker);
293
294 static struct global_cwq *get_gcwq(unsigned int cpu)
295 {
296         return &per_cpu(global_cwq, cpu);
297 }
298
299 static struct cpu_workqueue_struct *get_cwq(unsigned int cpu,
300                                             struct workqueue_struct *wq)
301 {
302         return per_cpu_ptr(wq->cpu_wq, cpu);
303 }
304
305 static unsigned int work_color_to_flags(int color)
306 {
307         return color << WORK_STRUCT_COLOR_SHIFT;
308 }
309
310 static int get_work_color(struct work_struct *work)
311 {
312         return (*work_data_bits(work) >> WORK_STRUCT_COLOR_SHIFT) &
313                 ((1 << WORK_STRUCT_COLOR_BITS) - 1);
314 }
315
316 static int work_next_color(int color)
317 {
318         return (color + 1) % WORK_NR_COLORS;
319 }
320
321 /*
322  * Work data points to the cwq while a work is on queue.  Once
323  * execution starts, it points to the cpu the work was last on.  This
324  * can be distinguished by comparing the data value against
325  * PAGE_OFFSET.
326  *
327  * set_work_{cwq|cpu}() and clear_work_data() can be used to set the
328  * cwq, cpu or clear work->data.  These functions should only be
329  * called while the work is owned - ie. while the PENDING bit is set.
330  *
331  * get_work_[g]cwq() can be used to obtain the gcwq or cwq
332  * corresponding to a work.  gcwq is available once the work has been
333  * queued anywhere after initialization.  cwq is available only from
334  * queueing until execution starts.
335  */
336 static inline void set_work_data(struct work_struct *work, unsigned long data,
337                                  unsigned long flags)
338 {
339         BUG_ON(!work_pending(work));
340         atomic_long_set(&work->data, data | flags | work_static(work));
341 }
342
343 static void set_work_cwq(struct work_struct *work,
344                          struct cpu_workqueue_struct *cwq,
345                          unsigned long extra_flags)
346 {
347         set_work_data(work, (unsigned long)cwq,
348                       WORK_STRUCT_PENDING | extra_flags);
349 }
350
351 static void set_work_cpu(struct work_struct *work, unsigned int cpu)
352 {
353         set_work_data(work, cpu << WORK_STRUCT_FLAG_BITS, WORK_STRUCT_PENDING);
354 }
355
356 static void clear_work_data(struct work_struct *work)
357 {
358         set_work_data(work, WORK_STRUCT_NO_CPU, 0);
359 }
360
361 static inline unsigned long get_work_data(struct work_struct *work)
362 {
363         return atomic_long_read(&work->data) & WORK_STRUCT_WQ_DATA_MASK;
364 }
365
366 static struct cpu_workqueue_struct *get_work_cwq(struct work_struct *work)
367 {
368         unsigned long data = get_work_data(work);
369
370         return data >= PAGE_OFFSET ? (void *)data : NULL;
371 }
372
373 static struct global_cwq *get_work_gcwq(struct work_struct *work)
374 {
375         unsigned long data = get_work_data(work);
376         unsigned int cpu;
377
378         if (data >= PAGE_OFFSET)
379                 return ((struct cpu_workqueue_struct *)data)->gcwq;
380
381         cpu = data >> WORK_STRUCT_FLAG_BITS;
382         if (cpu == NR_CPUS)
383                 return NULL;
384
385         BUG_ON(cpu >= num_possible_cpus());
386         return get_gcwq(cpu);
387 }
388
389 /**
390  * busy_worker_head - return the busy hash head for a work
391  * @gcwq: gcwq of interest
392  * @work: work to be hashed
393  *
394  * Return hash head of @gcwq for @work.
395  *
396  * CONTEXT:
397  * spin_lock_irq(gcwq->lock).
398  *
399  * RETURNS:
400  * Pointer to the hash head.
401  */
402 static struct hlist_head *busy_worker_head(struct global_cwq *gcwq,
403                                            struct work_struct *work)
404 {
405         const int base_shift = ilog2(sizeof(struct work_struct));
406         unsigned long v = (unsigned long)work;
407
408         /* simple shift and fold hash, do we need something better? */
409         v >>= base_shift;
410         v += v >> BUSY_WORKER_HASH_ORDER;
411         v &= BUSY_WORKER_HASH_MASK;
412
413         return &gcwq->busy_hash[v];
414 }
415
416 /**
417  * __find_worker_executing_work - find worker which is executing a work
418  * @gcwq: gcwq of interest
419  * @bwh: hash head as returned by busy_worker_head()
420  * @work: work to find worker for
421  *
422  * Find a worker which is executing @work on @gcwq.  @bwh should be
423  * the hash head obtained by calling busy_worker_head() with the same
424  * work.
425  *
426  * CONTEXT:
427  * spin_lock_irq(gcwq->lock).
428  *
429  * RETURNS:
430  * Pointer to worker which is executing @work if found, NULL
431  * otherwise.
432  */
433 static struct worker *__find_worker_executing_work(struct global_cwq *gcwq,
434                                                    struct hlist_head *bwh,
435                                                    struct work_struct *work)
436 {
437         struct worker *worker;
438         struct hlist_node *tmp;
439
440         hlist_for_each_entry(worker, tmp, bwh, hentry)
441                 if (worker->current_work == work)
442                         return worker;
443         return NULL;
444 }
445
446 /**
447  * find_worker_executing_work - find worker which is executing a work
448  * @gcwq: gcwq of interest
449  * @work: work to find worker for
450  *
451  * Find a worker which is executing @work on @gcwq.  This function is
452  * identical to __find_worker_executing_work() except that this
453  * function calculates @bwh itself.
454  *
455  * CONTEXT:
456  * spin_lock_irq(gcwq->lock).
457  *
458  * RETURNS:
459  * Pointer to worker which is executing @work if found, NULL
460  * otherwise.
461  */
462 static struct worker *find_worker_executing_work(struct global_cwq *gcwq,
463                                                  struct work_struct *work)
464 {
465         return __find_worker_executing_work(gcwq, busy_worker_head(gcwq, work),
466                                             work);
467 }
468
469 /**
470  * insert_work - insert a work into cwq
471  * @cwq: cwq @work belongs to
472  * @work: work to insert
473  * @head: insertion point
474  * @extra_flags: extra WORK_STRUCT_* flags to set
475  *
476  * Insert @work into @cwq after @head.
477  *
478  * CONTEXT:
479  * spin_lock_irq(gcwq->lock).
480  */
481 static void insert_work(struct cpu_workqueue_struct *cwq,
482                         struct work_struct *work, struct list_head *head,
483                         unsigned int extra_flags)
484 {
485         /* we own @work, set data and link */
486         set_work_cwq(work, cwq, extra_flags);
487
488         /*
489          * Ensure that we get the right work->data if we see the
490          * result of list_add() below, see try_to_grab_pending().
491          */
492         smp_wmb();
493
494         list_add_tail(&work->entry, head);
495         wake_up_process(cwq->worker->task);
496 }
497
498 /**
499  * cwq_unbind_single_cpu - unbind cwq from single cpu workqueue processing
500  * @cwq: cwq to unbind
501  *
502  * Try to unbind @cwq from single cpu workqueue processing.  If
503  * @cwq->wq is frozen, unbind is delayed till the workqueue is thawed.
504  *
505  * CONTEXT:
506  * spin_lock_irq(gcwq->lock).
507  */
508 static void cwq_unbind_single_cpu(struct cpu_workqueue_struct *cwq)
509 {
510         struct workqueue_struct *wq = cwq->wq;
511         struct global_cwq *gcwq = cwq->gcwq;
512
513         BUG_ON(wq->single_cpu != gcwq->cpu);
514         /*
515          * Unbind from workqueue if @cwq is not frozen.  If frozen,
516          * thaw_workqueues() will either restart processing on this
517          * cpu or unbind if empty.  This keeps works queued while
518          * frozen fully ordered and flushable.
519          */
520         if (likely(!(gcwq->flags & GCWQ_FREEZING))) {
521                 smp_wmb();      /* paired with cmpxchg() in __queue_work() */
522                 wq->single_cpu = NR_CPUS;
523         }
524 }
525
526 static void __queue_work(unsigned int cpu, struct workqueue_struct *wq,
527                          struct work_struct *work)
528 {
529         struct global_cwq *gcwq;
530         struct cpu_workqueue_struct *cwq;
531         struct list_head *worklist;
532         unsigned long flags;
533         bool arbitrate;
534
535         debug_work_activate(work);
536
537         /*
538          * Determine gcwq to use.  SINGLE_CPU is inherently
539          * NON_REENTRANT, so test it first.
540          */
541         if (!(wq->flags & WQ_SINGLE_CPU)) {
542                 struct global_cwq *last_gcwq;
543
544                 /*
545                  * It's multi cpu.  If @wq is non-reentrant and @work
546                  * was previously on a different cpu, it might still
547                  * be running there, in which case the work needs to
548                  * be queued on that cpu to guarantee non-reentrance.
549                  */
550                 gcwq = get_gcwq(cpu);
551                 if (wq->flags & WQ_NON_REENTRANT &&
552                     (last_gcwq = get_work_gcwq(work)) && last_gcwq != gcwq) {
553                         struct worker *worker;
554
555                         spin_lock_irqsave(&last_gcwq->lock, flags);
556
557                         worker = find_worker_executing_work(last_gcwq, work);
558
559                         if (worker && worker->current_cwq->wq == wq)
560                                 gcwq = last_gcwq;
561                         else {
562                                 /* meh... not running there, queue here */
563                                 spin_unlock_irqrestore(&last_gcwq->lock, flags);
564                                 spin_lock_irqsave(&gcwq->lock, flags);
565                         }
566                 } else
567                         spin_lock_irqsave(&gcwq->lock, flags);
568         } else {
569                 unsigned int req_cpu = cpu;
570
571                 /*
572                  * It's a bit more complex for single cpu workqueues.
573                  * We first need to determine which cpu is going to be
574                  * used.  If no cpu is currently serving this
575                  * workqueue, arbitrate using atomic accesses to
576                  * wq->single_cpu; otherwise, use the current one.
577                  */
578         retry:
579                 cpu = wq->single_cpu;
580                 arbitrate = cpu == NR_CPUS;
581                 if (arbitrate)
582                         cpu = req_cpu;
583
584                 gcwq = get_gcwq(cpu);
585                 spin_lock_irqsave(&gcwq->lock, flags);
586
587                 /*
588                  * The following cmpxchg() is a full barrier paired
589                  * with smp_wmb() in cwq_unbind_single_cpu() and
590                  * guarantees that all changes to wq->st_* fields are
591                  * visible on the new cpu after this point.
592                  */
593                 if (arbitrate)
594                         cmpxchg(&wq->single_cpu, NR_CPUS, cpu);
595
596                 if (unlikely(wq->single_cpu != cpu)) {
597                         spin_unlock_irqrestore(&gcwq->lock, flags);
598                         goto retry;
599                 }
600         }
601
602         /* gcwq determined, get cwq and queue */
603         cwq = get_cwq(gcwq->cpu, wq);
604
605         BUG_ON(!list_empty(&work->entry));
606
607         cwq->nr_in_flight[cwq->work_color]++;
608
609         if (likely(cwq->nr_active < cwq->max_active)) {
610                 cwq->nr_active++;
611                 worklist = &cwq->worklist;
612         } else
613                 worklist = &cwq->delayed_works;
614
615         insert_work(cwq, work, worklist, work_color_to_flags(cwq->work_color));
616
617         spin_unlock_irqrestore(&gcwq->lock, flags);
618 }
619
620 /**
621  * queue_work - queue work on a workqueue
622  * @wq: workqueue to use
623  * @work: work to queue
624  *
625  * Returns 0 if @work was already on a queue, non-zero otherwise.
626  *
627  * We queue the work to the CPU on which it was submitted, but if the CPU dies
628  * it can be processed by another CPU.
629  */
630 int queue_work(struct workqueue_struct *wq, struct work_struct *work)
631 {
632         int ret;
633
634         ret = queue_work_on(get_cpu(), wq, work);
635         put_cpu();
636
637         return ret;
638 }
639 EXPORT_SYMBOL_GPL(queue_work);
640
641 /**
642  * queue_work_on - queue work on specific cpu
643  * @cpu: CPU number to execute work on
644  * @wq: workqueue to use
645  * @work: work to queue
646  *
647  * Returns 0 if @work was already on a queue, non-zero otherwise.
648  *
649  * We queue the work to a specific CPU, the caller must ensure it
650  * can't go away.
651  */
652 int
653 queue_work_on(int cpu, struct workqueue_struct *wq, struct work_struct *work)
654 {
655         int ret = 0;
656
657         if (!test_and_set_bit(WORK_STRUCT_PENDING_BIT, work_data_bits(work))) {
658                 __queue_work(cpu, wq, work);
659                 ret = 1;
660         }
661         return ret;
662 }
663 EXPORT_SYMBOL_GPL(queue_work_on);
664
665 static void delayed_work_timer_fn(unsigned long __data)
666 {
667         struct delayed_work *dwork = (struct delayed_work *)__data;
668         struct cpu_workqueue_struct *cwq = get_work_cwq(&dwork->work);
669
670         __queue_work(smp_processor_id(), cwq->wq, &dwork->work);
671 }
672
673 /**
674  * queue_delayed_work - queue work on a workqueue after delay
675  * @wq: workqueue to use
676  * @dwork: delayable work to queue
677  * @delay: number of jiffies to wait before queueing
678  *
679  * Returns 0 if @work was already on a queue, non-zero otherwise.
680  */
681 int queue_delayed_work(struct workqueue_struct *wq,
682                         struct delayed_work *dwork, unsigned long delay)
683 {
684         if (delay == 0)
685                 return queue_work(wq, &dwork->work);
686
687         return queue_delayed_work_on(-1, wq, dwork, delay);
688 }
689 EXPORT_SYMBOL_GPL(queue_delayed_work);
690
691 /**
692  * queue_delayed_work_on - queue work on specific CPU after delay
693  * @cpu: CPU number to execute work on
694  * @wq: workqueue to use
695  * @dwork: work to queue
696  * @delay: number of jiffies to wait before queueing
697  *
698  * Returns 0 if @work was already on a queue, non-zero otherwise.
699  */
700 int queue_delayed_work_on(int cpu, struct workqueue_struct *wq,
701                         struct delayed_work *dwork, unsigned long delay)
702 {
703         int ret = 0;
704         struct timer_list *timer = &dwork->timer;
705         struct work_struct *work = &dwork->work;
706
707         if (!test_and_set_bit(WORK_STRUCT_PENDING_BIT, work_data_bits(work))) {
708                 struct global_cwq *gcwq = get_work_gcwq(work);
709                 unsigned int lcpu = gcwq ? gcwq->cpu : raw_smp_processor_id();
710
711                 BUG_ON(timer_pending(timer));
712                 BUG_ON(!list_empty(&work->entry));
713
714                 timer_stats_timer_set_start_info(&dwork->timer);
715                 /*
716                  * This stores cwq for the moment, for the timer_fn.
717                  * Note that the work's gcwq is preserved to allow
718                  * reentrance detection for delayed works.
719                  */
720                 set_work_cwq(work, get_cwq(lcpu, wq), 0);
721                 timer->expires = jiffies + delay;
722                 timer->data = (unsigned long)dwork;
723                 timer->function = delayed_work_timer_fn;
724
725                 if (unlikely(cpu >= 0))
726                         add_timer_on(timer, cpu);
727                 else
728                         add_timer(timer);
729                 ret = 1;
730         }
731         return ret;
732 }
733 EXPORT_SYMBOL_GPL(queue_delayed_work_on);
734
735 /**
736  * worker_enter_idle - enter idle state
737  * @worker: worker which is entering idle state
738  *
739  * @worker is entering idle state.  Update stats and idle timer if
740  * necessary.
741  *
742  * LOCKING:
743  * spin_lock_irq(gcwq->lock).
744  */
745 static void worker_enter_idle(struct worker *worker)
746 {
747         struct global_cwq *gcwq = worker->gcwq;
748
749         BUG_ON(worker->flags & WORKER_IDLE);
750         BUG_ON(!list_empty(&worker->entry) &&
751                (worker->hentry.next || worker->hentry.pprev));
752
753         worker->flags |= WORKER_IDLE;
754         gcwq->nr_idle++;
755
756         /* idle_list is LIFO */
757         list_add(&worker->entry, &gcwq->idle_list);
758
759         if (unlikely(worker->flags & WORKER_ROGUE))
760                 wake_up_all(&gcwq->trustee_wait);
761 }
762
763 /**
764  * worker_leave_idle - leave idle state
765  * @worker: worker which is leaving idle state
766  *
767  * @worker is leaving idle state.  Update stats.
768  *
769  * LOCKING:
770  * spin_lock_irq(gcwq->lock).
771  */
772 static void worker_leave_idle(struct worker *worker)
773 {
774         struct global_cwq *gcwq = worker->gcwq;
775
776         BUG_ON(!(worker->flags & WORKER_IDLE));
777         worker->flags &= ~WORKER_IDLE;
778         gcwq->nr_idle--;
779         list_del_init(&worker->entry);
780 }
781
782 static struct worker *alloc_worker(void)
783 {
784         struct worker *worker;
785
786         worker = kzalloc(sizeof(*worker), GFP_KERNEL);
787         if (worker) {
788                 INIT_LIST_HEAD(&worker->entry);
789                 INIT_LIST_HEAD(&worker->scheduled);
790         }
791         return worker;
792 }
793
794 /**
795  * create_worker - create a new workqueue worker
796  * @cwq: cwq the new worker will belong to
797  * @bind: whether to set affinity to @cpu or not
798  *
799  * Create a new worker which is bound to @cwq.  The returned worker
800  * can be started by calling start_worker() or destroyed using
801  * destroy_worker().
802  *
803  * CONTEXT:
804  * Might sleep.  Does GFP_KERNEL allocations.
805  *
806  * RETURNS:
807  * Pointer to the newly created worker.
808  */
809 static struct worker *create_worker(struct cpu_workqueue_struct *cwq, bool bind)
810 {
811         struct global_cwq *gcwq = cwq->gcwq;
812         int id = -1;
813         struct worker *worker = NULL;
814
815         spin_lock_irq(&gcwq->lock);
816         while (ida_get_new(&gcwq->worker_ida, &id)) {
817                 spin_unlock_irq(&gcwq->lock);
818                 if (!ida_pre_get(&gcwq->worker_ida, GFP_KERNEL))
819                         goto fail;
820                 spin_lock_irq(&gcwq->lock);
821         }
822         spin_unlock_irq(&gcwq->lock);
823
824         worker = alloc_worker();
825         if (!worker)
826                 goto fail;
827
828         worker->gcwq = gcwq;
829         worker->cwq = cwq;
830         worker->id = id;
831
832         worker->task = kthread_create(worker_thread, worker, "kworker/%u:%d",
833                                       gcwq->cpu, id);
834         if (IS_ERR(worker->task))
835                 goto fail;
836
837         /*
838          * A rogue worker will become a regular one if CPU comes
839          * online later on.  Make sure every worker has
840          * PF_THREAD_BOUND set.
841          */
842         if (bind)
843                 kthread_bind(worker->task, gcwq->cpu);
844         else
845                 worker->task->flags |= PF_THREAD_BOUND;
846
847         return worker;
848 fail:
849         if (id >= 0) {
850                 spin_lock_irq(&gcwq->lock);
851                 ida_remove(&gcwq->worker_ida, id);
852                 spin_unlock_irq(&gcwq->lock);
853         }
854         kfree(worker);
855         return NULL;
856 }
857
858 /**
859  * start_worker - start a newly created worker
860  * @worker: worker to start
861  *
862  * Make the gcwq aware of @worker and start it.
863  *
864  * CONTEXT:
865  * spin_lock_irq(gcwq->lock).
866  */
867 static void start_worker(struct worker *worker)
868 {
869         worker->flags |= WORKER_STARTED;
870         worker->gcwq->nr_workers++;
871         worker_enter_idle(worker);
872         wake_up_process(worker->task);
873 }
874
875 /**
876  * destroy_worker - destroy a workqueue worker
877  * @worker: worker to be destroyed
878  *
879  * Destroy @worker and adjust @gcwq stats accordingly.
880  *
881  * CONTEXT:
882  * spin_lock_irq(gcwq->lock) which is released and regrabbed.
883  */
884 static void destroy_worker(struct worker *worker)
885 {
886         struct global_cwq *gcwq = worker->gcwq;
887         int id = worker->id;
888
889         /* sanity check frenzy */
890         BUG_ON(worker->current_work);
891         BUG_ON(!list_empty(&worker->scheduled));
892
893         if (worker->flags & WORKER_STARTED)
894                 gcwq->nr_workers--;
895         if (worker->flags & WORKER_IDLE)
896                 gcwq->nr_idle--;
897
898         list_del_init(&worker->entry);
899         worker->flags |= WORKER_DIE;
900
901         spin_unlock_irq(&gcwq->lock);
902
903         kthread_stop(worker->task);
904         kfree(worker);
905
906         spin_lock_irq(&gcwq->lock);
907         ida_remove(&gcwq->worker_ida, id);
908 }
909
910 /**
911  * move_linked_works - move linked works to a list
912  * @work: start of series of works to be scheduled
913  * @head: target list to append @work to
914  * @nextp: out paramter for nested worklist walking
915  *
916  * Schedule linked works starting from @work to @head.  Work series to
917  * be scheduled starts at @work and includes any consecutive work with
918  * WORK_STRUCT_LINKED set in its predecessor.
919  *
920  * If @nextp is not NULL, it's updated to point to the next work of
921  * the last scheduled work.  This allows move_linked_works() to be
922  * nested inside outer list_for_each_entry_safe().
923  *
924  * CONTEXT:
925  * spin_lock_irq(gcwq->lock).
926  */
927 static void move_linked_works(struct work_struct *work, struct list_head *head,
928                               struct work_struct **nextp)
929 {
930         struct work_struct *n;
931
932         /*
933          * Linked worklist will always end before the end of the list,
934          * use NULL for list head.
935          */
936         list_for_each_entry_safe_from(work, n, NULL, entry) {
937                 list_move_tail(&work->entry, head);
938                 if (!(*work_data_bits(work) & WORK_STRUCT_LINKED))
939                         break;
940         }
941
942         /*
943          * If we're already inside safe list traversal and have moved
944          * multiple works to the scheduled queue, the next position
945          * needs to be updated.
946          */
947         if (nextp)
948                 *nextp = n;
949 }
950
951 static void cwq_activate_first_delayed(struct cpu_workqueue_struct *cwq)
952 {
953         struct work_struct *work = list_first_entry(&cwq->delayed_works,
954                                                     struct work_struct, entry);
955
956         move_linked_works(work, &cwq->worklist, NULL);
957         cwq->nr_active++;
958 }
959
960 /**
961  * cwq_dec_nr_in_flight - decrement cwq's nr_in_flight
962  * @cwq: cwq of interest
963  * @color: color of work which left the queue
964  *
965  * A work either has completed or is removed from pending queue,
966  * decrement nr_in_flight of its cwq and handle workqueue flushing.
967  *
968  * CONTEXT:
969  * spin_lock_irq(gcwq->lock).
970  */
971 static void cwq_dec_nr_in_flight(struct cpu_workqueue_struct *cwq, int color)
972 {
973         /* ignore uncolored works */
974         if (color == WORK_NO_COLOR)
975                 return;
976
977         cwq->nr_in_flight[color]--;
978         cwq->nr_active--;
979
980         if (!list_empty(&cwq->delayed_works)) {
981                 /* one down, submit a delayed one */
982                 if (cwq->nr_active < cwq->max_active)
983                         cwq_activate_first_delayed(cwq);
984         } else if (!cwq->nr_active && cwq->wq->flags & WQ_SINGLE_CPU) {
985                 /* this was the last work, unbind from single cpu */
986                 cwq_unbind_single_cpu(cwq);
987         }
988
989         /* is flush in progress and are we at the flushing tip? */
990         if (likely(cwq->flush_color != color))
991                 return;
992
993         /* are there still in-flight works? */
994         if (cwq->nr_in_flight[color])
995                 return;
996
997         /* this cwq is done, clear flush_color */
998         cwq->flush_color = -1;
999
1000         /*
1001          * If this was the last cwq, wake up the first flusher.  It
1002          * will handle the rest.
1003          */
1004         if (atomic_dec_and_test(&cwq->wq->nr_cwqs_to_flush))
1005                 complete(&cwq->wq->first_flusher->done);
1006 }
1007
1008 /**
1009  * process_one_work - process single work
1010  * @worker: self
1011  * @work: work to process
1012  *
1013  * Process @work.  This function contains all the logics necessary to
1014  * process a single work including synchronization against and
1015  * interaction with other workers on the same cpu, queueing and
1016  * flushing.  As long as context requirement is met, any worker can
1017  * call this function to process a work.
1018  *
1019  * CONTEXT:
1020  * spin_lock_irq(gcwq->lock) which is released and regrabbed.
1021  */
1022 static void process_one_work(struct worker *worker, struct work_struct *work)
1023 {
1024         struct cpu_workqueue_struct *cwq = worker->cwq;
1025         struct global_cwq *gcwq = cwq->gcwq;
1026         struct hlist_head *bwh = busy_worker_head(gcwq, work);
1027         work_func_t f = work->func;
1028         int work_color;
1029 #ifdef CONFIG_LOCKDEP
1030         /*
1031          * It is permissible to free the struct work_struct from
1032          * inside the function that is called from it, this we need to
1033          * take into account for lockdep too.  To avoid bogus "held
1034          * lock freed" warnings as well as problems when looking into
1035          * work->lockdep_map, make a copy and use that here.
1036          */
1037         struct lockdep_map lockdep_map = work->lockdep_map;
1038 #endif
1039         /* claim and process */
1040         debug_work_deactivate(work);
1041         hlist_add_head(&worker->hentry, bwh);
1042         worker->current_work = work;
1043         worker->current_cwq = cwq;
1044         work_color = get_work_color(work);
1045
1046         BUG_ON(get_work_cwq(work) != cwq);
1047         /* record the current cpu number in the work data and dequeue */
1048         set_work_cpu(work, gcwq->cpu);
1049         list_del_init(&work->entry);
1050
1051         spin_unlock_irq(&gcwq->lock);
1052
1053         work_clear_pending(work);
1054         lock_map_acquire(&cwq->wq->lockdep_map);
1055         lock_map_acquire(&lockdep_map);
1056         f(work);
1057         lock_map_release(&lockdep_map);
1058         lock_map_release(&cwq->wq->lockdep_map);
1059
1060         if (unlikely(in_atomic() || lockdep_depth(current) > 0)) {
1061                 printk(KERN_ERR "BUG: workqueue leaked lock or atomic: "
1062                        "%s/0x%08x/%d\n",
1063                        current->comm, preempt_count(), task_pid_nr(current));
1064                 printk(KERN_ERR "    last function: ");
1065                 print_symbol("%s\n", (unsigned long)f);
1066                 debug_show_held_locks(current);
1067                 dump_stack();
1068         }
1069
1070         spin_lock_irq(&gcwq->lock);
1071
1072         /* we're done with it, release */
1073         hlist_del_init(&worker->hentry);
1074         worker->current_work = NULL;
1075         worker->current_cwq = NULL;
1076         cwq_dec_nr_in_flight(cwq, work_color);
1077 }
1078
1079 /**
1080  * process_scheduled_works - process scheduled works
1081  * @worker: self
1082  *
1083  * Process all scheduled works.  Please note that the scheduled list
1084  * may change while processing a work, so this function repeatedly
1085  * fetches a work from the top and executes it.
1086  *
1087  * CONTEXT:
1088  * spin_lock_irq(gcwq->lock) which may be released and regrabbed
1089  * multiple times.
1090  */
1091 static void process_scheduled_works(struct worker *worker)
1092 {
1093         while (!list_empty(&worker->scheduled)) {
1094                 struct work_struct *work = list_first_entry(&worker->scheduled,
1095                                                 struct work_struct, entry);
1096                 process_one_work(worker, work);
1097         }
1098 }
1099
1100 /**
1101  * worker_thread - the worker thread function
1102  * @__worker: self
1103  *
1104  * The cwq worker thread function.
1105  */
1106 static int worker_thread(void *__worker)
1107 {
1108         struct worker *worker = __worker;
1109         struct global_cwq *gcwq = worker->gcwq;
1110         struct cpu_workqueue_struct *cwq = worker->cwq;
1111
1112 woke_up:
1113         spin_lock_irq(&gcwq->lock);
1114
1115         /* DIE can be set only while we're idle, checking here is enough */
1116         if (worker->flags & WORKER_DIE) {
1117                 spin_unlock_irq(&gcwq->lock);
1118                 return 0;
1119         }
1120
1121         worker_leave_idle(worker);
1122 recheck:
1123         /*
1124          * ->scheduled list can only be filled while a worker is
1125          * preparing to process a work or actually processing it.
1126          * Make sure nobody diddled with it while I was sleeping.
1127          */
1128         BUG_ON(!list_empty(&worker->scheduled));
1129
1130         while (!list_empty(&cwq->worklist)) {
1131                 struct work_struct *work =
1132                         list_first_entry(&cwq->worklist,
1133                                          struct work_struct, entry);
1134
1135                 /*
1136                  * The following is a rather inefficient way to close
1137                  * race window against cpu hotplug operations.  Will
1138                  * be replaced soon.
1139                  */
1140                 if (unlikely(!(worker->flags & WORKER_ROGUE) &&
1141                              !cpumask_equal(&worker->task->cpus_allowed,
1142                                             get_cpu_mask(gcwq->cpu)))) {
1143                         spin_unlock_irq(&gcwq->lock);
1144                         set_cpus_allowed_ptr(worker->task,
1145                                              get_cpu_mask(gcwq->cpu));
1146                         cpu_relax();
1147                         spin_lock_irq(&gcwq->lock);
1148                         goto recheck;
1149                 }
1150
1151                 if (likely(!(*work_data_bits(work) & WORK_STRUCT_LINKED))) {
1152                         /* optimization path, not strictly necessary */
1153                         process_one_work(worker, work);
1154                         if (unlikely(!list_empty(&worker->scheduled)))
1155                                 process_scheduled_works(worker);
1156                 } else {
1157                         move_linked_works(work, &worker->scheduled, NULL);
1158                         process_scheduled_works(worker);
1159                 }
1160         }
1161
1162         /*
1163          * gcwq->lock is held and there's no work to process, sleep.
1164          * Workers are woken up only while holding gcwq->lock, so
1165          * setting the current state before releasing gcwq->lock is
1166          * enough to prevent losing any event.
1167          */
1168         worker_enter_idle(worker);
1169         __set_current_state(TASK_INTERRUPTIBLE);
1170         spin_unlock_irq(&gcwq->lock);
1171         schedule();
1172         goto woke_up;
1173 }
1174
1175 struct wq_barrier {
1176         struct work_struct      work;
1177         struct completion       done;
1178 };
1179
1180 static void wq_barrier_func(struct work_struct *work)
1181 {
1182         struct wq_barrier *barr = container_of(work, struct wq_barrier, work);
1183         complete(&barr->done);
1184 }
1185
1186 /**
1187  * insert_wq_barrier - insert a barrier work
1188  * @cwq: cwq to insert barrier into
1189  * @barr: wq_barrier to insert
1190  * @target: target work to attach @barr to
1191  * @worker: worker currently executing @target, NULL if @target is not executing
1192  *
1193  * @barr is linked to @target such that @barr is completed only after
1194  * @target finishes execution.  Please note that the ordering
1195  * guarantee is observed only with respect to @target and on the local
1196  * cpu.
1197  *
1198  * Currently, a queued barrier can't be canceled.  This is because
1199  * try_to_grab_pending() can't determine whether the work to be
1200  * grabbed is at the head of the queue and thus can't clear LINKED
1201  * flag of the previous work while there must be a valid next work
1202  * after a work with LINKED flag set.
1203  *
1204  * Note that when @worker is non-NULL, @target may be modified
1205  * underneath us, so we can't reliably determine cwq from @target.
1206  *
1207  * CONTEXT:
1208  * spin_lock_irq(gcwq->lock).
1209  */
1210 static void insert_wq_barrier(struct cpu_workqueue_struct *cwq,
1211                               struct wq_barrier *barr,
1212                               struct work_struct *target, struct worker *worker)
1213 {
1214         struct list_head *head;
1215         unsigned int linked = 0;
1216
1217         /*
1218          * debugobject calls are safe here even with gcwq->lock locked
1219          * as we know for sure that this will not trigger any of the
1220          * checks and call back into the fixup functions where we
1221          * might deadlock.
1222          */
1223         INIT_WORK_ON_STACK(&barr->work, wq_barrier_func);
1224         __set_bit(WORK_STRUCT_PENDING_BIT, work_data_bits(&barr->work));
1225         init_completion(&barr->done);
1226
1227         /*
1228          * If @target is currently being executed, schedule the
1229          * barrier to the worker; otherwise, put it after @target.
1230          */
1231         if (worker)
1232                 head = worker->scheduled.next;
1233         else {
1234                 unsigned long *bits = work_data_bits(target);
1235
1236                 head = target->entry.next;
1237                 /* there can already be other linked works, inherit and set */
1238                 linked = *bits & WORK_STRUCT_LINKED;
1239                 __set_bit(WORK_STRUCT_LINKED_BIT, bits);
1240         }
1241
1242         debug_work_activate(&barr->work);
1243         insert_work(cwq, &barr->work, head,
1244                     work_color_to_flags(WORK_NO_COLOR) | linked);
1245 }
1246
1247 /**
1248  * flush_workqueue_prep_cwqs - prepare cwqs for workqueue flushing
1249  * @wq: workqueue being flushed
1250  * @flush_color: new flush color, < 0 for no-op
1251  * @work_color: new work color, < 0 for no-op
1252  *
1253  * Prepare cwqs for workqueue flushing.
1254  *
1255  * If @flush_color is non-negative, flush_color on all cwqs should be
1256  * -1.  If no cwq has in-flight commands at the specified color, all
1257  * cwq->flush_color's stay at -1 and %false is returned.  If any cwq
1258  * has in flight commands, its cwq->flush_color is set to
1259  * @flush_color, @wq->nr_cwqs_to_flush is updated accordingly, cwq
1260  * wakeup logic is armed and %true is returned.
1261  *
1262  * The caller should have initialized @wq->first_flusher prior to
1263  * calling this function with non-negative @flush_color.  If
1264  * @flush_color is negative, no flush color update is done and %false
1265  * is returned.
1266  *
1267  * If @work_color is non-negative, all cwqs should have the same
1268  * work_color which is previous to @work_color and all will be
1269  * advanced to @work_color.
1270  *
1271  * CONTEXT:
1272  * mutex_lock(wq->flush_mutex).
1273  *
1274  * RETURNS:
1275  * %true if @flush_color >= 0 and there's something to flush.  %false
1276  * otherwise.
1277  */
1278 static bool flush_workqueue_prep_cwqs(struct workqueue_struct *wq,
1279                                       int flush_color, int work_color)
1280 {
1281         bool wait = false;
1282         unsigned int cpu;
1283
1284         if (flush_color >= 0) {
1285                 BUG_ON(atomic_read(&wq->nr_cwqs_to_flush));
1286                 atomic_set(&wq->nr_cwqs_to_flush, 1);
1287         }
1288
1289         for_each_possible_cpu(cpu) {
1290                 struct cpu_workqueue_struct *cwq = get_cwq(cpu, wq);
1291                 struct global_cwq *gcwq = cwq->gcwq;
1292
1293                 spin_lock_irq(&gcwq->lock);
1294
1295                 if (flush_color >= 0) {
1296                         BUG_ON(cwq->flush_color != -1);
1297
1298                         if (cwq->nr_in_flight[flush_color]) {
1299                                 cwq->flush_color = flush_color;
1300                                 atomic_inc(&wq->nr_cwqs_to_flush);
1301                                 wait = true;
1302                         }
1303                 }
1304
1305                 if (work_color >= 0) {
1306                         BUG_ON(work_color != work_next_color(cwq->work_color));
1307                         cwq->work_color = work_color;
1308                 }
1309
1310                 spin_unlock_irq(&gcwq->lock);
1311         }
1312
1313         if (flush_color >= 0 && atomic_dec_and_test(&wq->nr_cwqs_to_flush))
1314                 complete(&wq->first_flusher->done);
1315
1316         return wait;
1317 }
1318
1319 /**
1320  * flush_workqueue - ensure that any scheduled work has run to completion.
1321  * @wq: workqueue to flush
1322  *
1323  * Forces execution of the workqueue and blocks until its completion.
1324  * This is typically used in driver shutdown handlers.
1325  *
1326  * We sleep until all works which were queued on entry have been handled,
1327  * but we are not livelocked by new incoming ones.
1328  */
1329 void flush_workqueue(struct workqueue_struct *wq)
1330 {
1331         struct wq_flusher this_flusher = {
1332                 .list = LIST_HEAD_INIT(this_flusher.list),
1333                 .flush_color = -1,
1334                 .done = COMPLETION_INITIALIZER_ONSTACK(this_flusher.done),
1335         };
1336         int next_color;
1337
1338         lock_map_acquire(&wq->lockdep_map);
1339         lock_map_release(&wq->lockdep_map);
1340
1341         mutex_lock(&wq->flush_mutex);
1342
1343         /*
1344          * Start-to-wait phase
1345          */
1346         next_color = work_next_color(wq->work_color);
1347
1348         if (next_color != wq->flush_color) {
1349                 /*
1350                  * Color space is not full.  The current work_color
1351                  * becomes our flush_color and work_color is advanced
1352                  * by one.
1353                  */
1354                 BUG_ON(!list_empty(&wq->flusher_overflow));
1355                 this_flusher.flush_color = wq->work_color;
1356                 wq->work_color = next_color;
1357
1358                 if (!wq->first_flusher) {
1359                         /* no flush in progress, become the first flusher */
1360                         BUG_ON(wq->flush_color != this_flusher.flush_color);
1361
1362                         wq->first_flusher = &this_flusher;
1363
1364                         if (!flush_workqueue_prep_cwqs(wq, wq->flush_color,
1365                                                        wq->work_color)) {
1366                                 /* nothing to flush, done */
1367                                 wq->flush_color = next_color;
1368                                 wq->first_flusher = NULL;
1369                                 goto out_unlock;
1370                         }
1371                 } else {
1372                         /* wait in queue */
1373                         BUG_ON(wq->flush_color == this_flusher.flush_color);
1374                         list_add_tail(&this_flusher.list, &wq->flusher_queue);
1375                         flush_workqueue_prep_cwqs(wq, -1, wq->work_color);
1376                 }
1377         } else {
1378                 /*
1379                  * Oops, color space is full, wait on overflow queue.
1380                  * The next flush completion will assign us
1381                  * flush_color and transfer to flusher_queue.
1382                  */
1383                 list_add_tail(&this_flusher.list, &wq->flusher_overflow);
1384         }
1385
1386         mutex_unlock(&wq->flush_mutex);
1387
1388         wait_for_completion(&this_flusher.done);
1389
1390         /*
1391          * Wake-up-and-cascade phase
1392          *
1393          * First flushers are responsible for cascading flushes and
1394          * handling overflow.  Non-first flushers can simply return.
1395          */
1396         if (wq->first_flusher != &this_flusher)
1397                 return;
1398
1399         mutex_lock(&wq->flush_mutex);
1400
1401         wq->first_flusher = NULL;
1402
1403         BUG_ON(!list_empty(&this_flusher.list));
1404         BUG_ON(wq->flush_color != this_flusher.flush_color);
1405
1406         while (true) {
1407                 struct wq_flusher *next, *tmp;
1408
1409                 /* complete all the flushers sharing the current flush color */
1410                 list_for_each_entry_safe(next, tmp, &wq->flusher_queue, list) {
1411                         if (next->flush_color != wq->flush_color)
1412                                 break;
1413                         list_del_init(&next->list);
1414                         complete(&next->done);
1415                 }
1416
1417                 BUG_ON(!list_empty(&wq->flusher_overflow) &&
1418                        wq->flush_color != work_next_color(wq->work_color));
1419
1420                 /* this flush_color is finished, advance by one */
1421                 wq->flush_color = work_next_color(wq->flush_color);
1422
1423                 /* one color has been freed, handle overflow queue */
1424                 if (!list_empty(&wq->flusher_overflow)) {
1425                         /*
1426                          * Assign the same color to all overflowed
1427                          * flushers, advance work_color and append to
1428                          * flusher_queue.  This is the start-to-wait
1429                          * phase for these overflowed flushers.
1430                          */
1431                         list_for_each_entry(tmp, &wq->flusher_overflow, list)
1432                                 tmp->flush_color = wq->work_color;
1433
1434                         wq->work_color = work_next_color(wq->work_color);
1435
1436                         list_splice_tail_init(&wq->flusher_overflow,
1437                                               &wq->flusher_queue);
1438                         flush_workqueue_prep_cwqs(wq, -1, wq->work_color);
1439                 }
1440
1441                 if (list_empty(&wq->flusher_queue)) {
1442                         BUG_ON(wq->flush_color != wq->work_color);
1443                         break;
1444                 }
1445
1446                 /*
1447                  * Need to flush more colors.  Make the next flusher
1448                  * the new first flusher and arm cwqs.
1449                  */
1450                 BUG_ON(wq->flush_color == wq->work_color);
1451                 BUG_ON(wq->flush_color != next->flush_color);
1452
1453                 list_del_init(&next->list);
1454                 wq->first_flusher = next;
1455
1456                 if (flush_workqueue_prep_cwqs(wq, wq->flush_color, -1))
1457                         break;
1458
1459                 /*
1460                  * Meh... this color is already done, clear first
1461                  * flusher and repeat cascading.
1462                  */
1463                 wq->first_flusher = NULL;
1464         }
1465
1466 out_unlock:
1467         mutex_unlock(&wq->flush_mutex);
1468 }
1469 EXPORT_SYMBOL_GPL(flush_workqueue);
1470
1471 /**
1472  * flush_work - block until a work_struct's callback has terminated
1473  * @work: the work which is to be flushed
1474  *
1475  * Returns false if @work has already terminated.
1476  *
1477  * It is expected that, prior to calling flush_work(), the caller has
1478  * arranged for the work to not be requeued, otherwise it doesn't make
1479  * sense to use this function.
1480  */
1481 int flush_work(struct work_struct *work)
1482 {
1483         struct worker *worker = NULL;
1484         struct global_cwq *gcwq;
1485         struct cpu_workqueue_struct *cwq;
1486         struct wq_barrier barr;
1487
1488         might_sleep();
1489         gcwq = get_work_gcwq(work);
1490         if (!gcwq)
1491                 return 0;
1492
1493         spin_lock_irq(&gcwq->lock);
1494         if (!list_empty(&work->entry)) {
1495                 /*
1496                  * See the comment near try_to_grab_pending()->smp_rmb().
1497                  * If it was re-queued to a different gcwq under us, we
1498                  * are not going to wait.
1499                  */
1500                 smp_rmb();
1501                 cwq = get_work_cwq(work);
1502                 if (unlikely(!cwq || gcwq != cwq->gcwq))
1503                         goto already_gone;
1504         } else {
1505                 worker = find_worker_executing_work(gcwq, work);
1506                 if (!worker)
1507                         goto already_gone;
1508                 cwq = worker->current_cwq;
1509         }
1510
1511         insert_wq_barrier(cwq, &barr, work, worker);
1512         spin_unlock_irq(&gcwq->lock);
1513
1514         lock_map_acquire(&cwq->wq->lockdep_map);
1515         lock_map_release(&cwq->wq->lockdep_map);
1516
1517         wait_for_completion(&barr.done);
1518         destroy_work_on_stack(&barr.work);
1519         return 1;
1520 already_gone:
1521         spin_unlock_irq(&gcwq->lock);
1522         return 0;
1523 }
1524 EXPORT_SYMBOL_GPL(flush_work);
1525
1526 /*
1527  * Upon a successful return (>= 0), the caller "owns" WORK_STRUCT_PENDING bit,
1528  * so this work can't be re-armed in any way.
1529  */
1530 static int try_to_grab_pending(struct work_struct *work)
1531 {
1532         struct global_cwq *gcwq;
1533         int ret = -1;
1534
1535         if (!test_and_set_bit(WORK_STRUCT_PENDING_BIT, work_data_bits(work)))
1536                 return 0;
1537
1538         /*
1539          * The queueing is in progress, or it is already queued. Try to
1540          * steal it from ->worklist without clearing WORK_STRUCT_PENDING.
1541          */
1542         gcwq = get_work_gcwq(work);
1543         if (!gcwq)
1544                 return ret;
1545
1546         spin_lock_irq(&gcwq->lock);
1547         if (!list_empty(&work->entry)) {
1548                 /*
1549                  * This work is queued, but perhaps we locked the wrong gcwq.
1550                  * In that case we must see the new value after rmb(), see
1551                  * insert_work()->wmb().
1552                  */
1553                 smp_rmb();
1554                 if (gcwq == get_work_gcwq(work)) {
1555                         debug_work_deactivate(work);
1556                         list_del_init(&work->entry);
1557                         cwq_dec_nr_in_flight(get_work_cwq(work),
1558                                              get_work_color(work));
1559                         ret = 1;
1560                 }
1561         }
1562         spin_unlock_irq(&gcwq->lock);
1563
1564         return ret;
1565 }
1566
1567 static void wait_on_cpu_work(struct global_cwq *gcwq, struct work_struct *work)
1568 {
1569         struct wq_barrier barr;
1570         struct worker *worker;
1571
1572         spin_lock_irq(&gcwq->lock);
1573
1574         worker = find_worker_executing_work(gcwq, work);
1575         if (unlikely(worker))
1576                 insert_wq_barrier(worker->current_cwq, &barr, work, worker);
1577
1578         spin_unlock_irq(&gcwq->lock);
1579
1580         if (unlikely(worker)) {
1581                 wait_for_completion(&barr.done);
1582                 destroy_work_on_stack(&barr.work);
1583         }
1584 }
1585
1586 static void wait_on_work(struct work_struct *work)
1587 {
1588         int cpu;
1589
1590         might_sleep();
1591
1592         lock_map_acquire(&work->lockdep_map);
1593         lock_map_release(&work->lockdep_map);
1594
1595         for_each_possible_cpu(cpu)
1596                 wait_on_cpu_work(get_gcwq(cpu), work);
1597 }
1598
1599 static int __cancel_work_timer(struct work_struct *work,
1600                                 struct timer_list* timer)
1601 {
1602         int ret;
1603
1604         do {
1605                 ret = (timer && likely(del_timer(timer)));
1606                 if (!ret)
1607                         ret = try_to_grab_pending(work);
1608                 wait_on_work(work);
1609         } while (unlikely(ret < 0));
1610
1611         clear_work_data(work);
1612         return ret;
1613 }
1614
1615 /**
1616  * cancel_work_sync - block until a work_struct's callback has terminated
1617  * @work: the work which is to be flushed
1618  *
1619  * Returns true if @work was pending.
1620  *
1621  * cancel_work_sync() will cancel the work if it is queued. If the work's
1622  * callback appears to be running, cancel_work_sync() will block until it
1623  * has completed.
1624  *
1625  * It is possible to use this function if the work re-queues itself. It can
1626  * cancel the work even if it migrates to another workqueue, however in that
1627  * case it only guarantees that work->func() has completed on the last queued
1628  * workqueue.
1629  *
1630  * cancel_work_sync(&delayed_work->work) should be used only if ->timer is not
1631  * pending, otherwise it goes into a busy-wait loop until the timer expires.
1632  *
1633  * The caller must ensure that workqueue_struct on which this work was last
1634  * queued can't be destroyed before this function returns.
1635  */
1636 int cancel_work_sync(struct work_struct *work)
1637 {
1638         return __cancel_work_timer(work, NULL);
1639 }
1640 EXPORT_SYMBOL_GPL(cancel_work_sync);
1641
1642 /**
1643  * cancel_delayed_work_sync - reliably kill off a delayed work.
1644  * @dwork: the delayed work struct
1645  *
1646  * Returns true if @dwork was pending.
1647  *
1648  * It is possible to use this function if @dwork rearms itself via queue_work()
1649  * or queue_delayed_work(). See also the comment for cancel_work_sync().
1650  */
1651 int cancel_delayed_work_sync(struct delayed_work *dwork)
1652 {
1653         return __cancel_work_timer(&dwork->work, &dwork->timer);
1654 }
1655 EXPORT_SYMBOL(cancel_delayed_work_sync);
1656
1657 static struct workqueue_struct *keventd_wq __read_mostly;
1658
1659 /**
1660  * schedule_work - put work task in global workqueue
1661  * @work: job to be done
1662  *
1663  * Returns zero if @work was already on the kernel-global workqueue and
1664  * non-zero otherwise.
1665  *
1666  * This puts a job in the kernel-global workqueue if it was not already
1667  * queued and leaves it in the same position on the kernel-global
1668  * workqueue otherwise.
1669  */
1670 int schedule_work(struct work_struct *work)
1671 {
1672         return queue_work(keventd_wq, work);
1673 }
1674 EXPORT_SYMBOL(schedule_work);
1675
1676 /*
1677  * schedule_work_on - put work task on a specific cpu
1678  * @cpu: cpu to put the work task on
1679  * @work: job to be done
1680  *
1681  * This puts a job on a specific cpu
1682  */
1683 int schedule_work_on(int cpu, struct work_struct *work)
1684 {
1685         return queue_work_on(cpu, keventd_wq, work);
1686 }
1687 EXPORT_SYMBOL(schedule_work_on);
1688
1689 /**
1690  * schedule_delayed_work - put work task in global workqueue after delay
1691  * @dwork: job to be done
1692  * @delay: number of jiffies to wait or 0 for immediate execution
1693  *
1694  * After waiting for a given time this puts a job in the kernel-global
1695  * workqueue.
1696  */
1697 int schedule_delayed_work(struct delayed_work *dwork,
1698                                         unsigned long delay)
1699 {
1700         return queue_delayed_work(keventd_wq, dwork, delay);
1701 }
1702 EXPORT_SYMBOL(schedule_delayed_work);
1703
1704 /**
1705  * flush_delayed_work - block until a dwork_struct's callback has terminated
1706  * @dwork: the delayed work which is to be flushed
1707  *
1708  * Any timeout is cancelled, and any pending work is run immediately.
1709  */
1710 void flush_delayed_work(struct delayed_work *dwork)
1711 {
1712         if (del_timer_sync(&dwork->timer)) {
1713                 __queue_work(get_cpu(), get_work_cwq(&dwork->work)->wq,
1714                              &dwork->work);
1715                 put_cpu();
1716         }
1717         flush_work(&dwork->work);
1718 }
1719 EXPORT_SYMBOL(flush_delayed_work);
1720
1721 /**
1722  * schedule_delayed_work_on - queue work in global workqueue on CPU after delay
1723  * @cpu: cpu to use
1724  * @dwork: job to be done
1725  * @delay: number of jiffies to wait
1726  *
1727  * After waiting for a given time this puts a job in the kernel-global
1728  * workqueue on the specified CPU.
1729  */
1730 int schedule_delayed_work_on(int cpu,
1731                         struct delayed_work *dwork, unsigned long delay)
1732 {
1733         return queue_delayed_work_on(cpu, keventd_wq, dwork, delay);
1734 }
1735 EXPORT_SYMBOL(schedule_delayed_work_on);
1736
1737 /**
1738  * schedule_on_each_cpu - call a function on each online CPU from keventd
1739  * @func: the function to call
1740  *
1741  * Returns zero on success.
1742  * Returns -ve errno on failure.
1743  *
1744  * schedule_on_each_cpu() is very slow.
1745  */
1746 int schedule_on_each_cpu(work_func_t func)
1747 {
1748         int cpu;
1749         int orig = -1;
1750         struct work_struct *works;
1751
1752         works = alloc_percpu(struct work_struct);
1753         if (!works)
1754                 return -ENOMEM;
1755
1756         get_online_cpus();
1757
1758         /*
1759          * When running in keventd don't schedule a work item on
1760          * itself.  Can just call directly because the work queue is
1761          * already bound.  This also is faster.
1762          */
1763         if (current_is_keventd())
1764                 orig = raw_smp_processor_id();
1765
1766         for_each_online_cpu(cpu) {
1767                 struct work_struct *work = per_cpu_ptr(works, cpu);
1768
1769                 INIT_WORK(work, func);
1770                 if (cpu != orig)
1771                         schedule_work_on(cpu, work);
1772         }
1773         if (orig >= 0)
1774                 func(per_cpu_ptr(works, orig));
1775
1776         for_each_online_cpu(cpu)
1777                 flush_work(per_cpu_ptr(works, cpu));
1778
1779         put_online_cpus();
1780         free_percpu(works);
1781         return 0;
1782 }
1783
1784 /**
1785  * flush_scheduled_work - ensure that any scheduled work has run to completion.
1786  *
1787  * Forces execution of the kernel-global workqueue and blocks until its
1788  * completion.
1789  *
1790  * Think twice before calling this function!  It's very easy to get into
1791  * trouble if you don't take great care.  Either of the following situations
1792  * will lead to deadlock:
1793  *
1794  *      One of the work items currently on the workqueue needs to acquire
1795  *      a lock held by your code or its caller.
1796  *
1797  *      Your code is running in the context of a work routine.
1798  *
1799  * They will be detected by lockdep when they occur, but the first might not
1800  * occur very often.  It depends on what work items are on the workqueue and
1801  * what locks they need, which you have no control over.
1802  *
1803  * In most situations flushing the entire workqueue is overkill; you merely
1804  * need to know that a particular work item isn't queued and isn't running.
1805  * In such cases you should use cancel_delayed_work_sync() or
1806  * cancel_work_sync() instead.
1807  */
1808 void flush_scheduled_work(void)
1809 {
1810         flush_workqueue(keventd_wq);
1811 }
1812 EXPORT_SYMBOL(flush_scheduled_work);
1813
1814 /**
1815  * execute_in_process_context - reliably execute the routine with user context
1816  * @fn:         the function to execute
1817  * @ew:         guaranteed storage for the execute work structure (must
1818  *              be available when the work executes)
1819  *
1820  * Executes the function immediately if process context is available,
1821  * otherwise schedules the function for delayed execution.
1822  *
1823  * Returns:     0 - function was executed
1824  *              1 - function was scheduled for execution
1825  */
1826 int execute_in_process_context(work_func_t fn, struct execute_work *ew)
1827 {
1828         if (!in_interrupt()) {
1829                 fn(&ew->work);
1830                 return 0;
1831         }
1832
1833         INIT_WORK(&ew->work, fn);
1834         schedule_work(&ew->work);
1835
1836         return 1;
1837 }
1838 EXPORT_SYMBOL_GPL(execute_in_process_context);
1839
1840 int keventd_up(void)
1841 {
1842         return keventd_wq != NULL;
1843 }
1844
1845 int current_is_keventd(void)
1846 {
1847         struct cpu_workqueue_struct *cwq;
1848         int cpu = raw_smp_processor_id(); /* preempt-safe: keventd is per-cpu */
1849         int ret = 0;
1850
1851         BUG_ON(!keventd_wq);
1852
1853         cwq = get_cwq(cpu, keventd_wq);
1854         if (current == cwq->worker->task)
1855                 ret = 1;
1856
1857         return ret;
1858
1859 }
1860
1861 static struct cpu_workqueue_struct *alloc_cwqs(void)
1862 {
1863         /*
1864          * cwqs are forced aligned according to WORK_STRUCT_FLAG_BITS.
1865          * Make sure that the alignment isn't lower than that of
1866          * unsigned long long.
1867          */
1868         const size_t size = sizeof(struct cpu_workqueue_struct);
1869         const size_t align = max_t(size_t, 1 << WORK_STRUCT_FLAG_BITS,
1870                                    __alignof__(unsigned long long));
1871         struct cpu_workqueue_struct *cwqs;
1872 #ifndef CONFIG_SMP
1873         void *ptr;
1874
1875         /*
1876          * On UP, percpu allocator doesn't honor alignment parameter
1877          * and simply uses arch-dependent default.  Allocate enough
1878          * room to align cwq and put an extra pointer at the end
1879          * pointing back to the originally allocated pointer which
1880          * will be used for free.
1881          *
1882          * FIXME: This really belongs to UP percpu code.  Update UP
1883          * percpu code to honor alignment and remove this ugliness.
1884          */
1885         ptr = __alloc_percpu(size + align + sizeof(void *), 1);
1886         cwqs = PTR_ALIGN(ptr, align);
1887         *(void **)per_cpu_ptr(cwqs + 1, 0) = ptr;
1888 #else
1889         /* On SMP, percpu allocator can do it itself */
1890         cwqs = __alloc_percpu(size, align);
1891 #endif
1892         /* just in case, make sure it's actually aligned */
1893         BUG_ON(!IS_ALIGNED((unsigned long)cwqs, align));
1894         return cwqs;
1895 }
1896
1897 static void free_cwqs(struct cpu_workqueue_struct *cwqs)
1898 {
1899 #ifndef CONFIG_SMP
1900         /* on UP, the pointer to free is stored right after the cwq */
1901         if (cwqs)
1902                 free_percpu(*(void **)per_cpu_ptr(cwqs + 1, 0));
1903 #else
1904         free_percpu(cwqs);
1905 #endif
1906 }
1907
1908 struct workqueue_struct *__create_workqueue_key(const char *name,
1909                                                 unsigned int flags,
1910                                                 int max_active,
1911                                                 struct lock_class_key *key,
1912                                                 const char *lock_name)
1913 {
1914         struct workqueue_struct *wq;
1915         bool failed = false;
1916         unsigned int cpu;
1917
1918         max_active = clamp_val(max_active, 1, INT_MAX);
1919
1920         wq = kzalloc(sizeof(*wq), GFP_KERNEL);
1921         if (!wq)
1922                 goto err;
1923
1924         wq->cpu_wq = alloc_cwqs();
1925         if (!wq->cpu_wq)
1926                 goto err;
1927
1928         wq->flags = flags;
1929         wq->saved_max_active = max_active;
1930         mutex_init(&wq->flush_mutex);
1931         atomic_set(&wq->nr_cwqs_to_flush, 0);
1932         INIT_LIST_HEAD(&wq->flusher_queue);
1933         INIT_LIST_HEAD(&wq->flusher_overflow);
1934         wq->single_cpu = NR_CPUS;
1935
1936         wq->name = name;
1937         lockdep_init_map(&wq->lockdep_map, lock_name, key, 0);
1938         INIT_LIST_HEAD(&wq->list);
1939
1940         cpu_maps_update_begin();
1941         /*
1942          * We must initialize cwqs for each possible cpu even if we
1943          * are going to call destroy_workqueue() finally. Otherwise
1944          * cpu_up() can hit the uninitialized cwq once we drop the
1945          * lock.
1946          */
1947         for_each_possible_cpu(cpu) {
1948                 struct cpu_workqueue_struct *cwq = get_cwq(cpu, wq);
1949                 struct global_cwq *gcwq = get_gcwq(cpu);
1950
1951                 BUG_ON((unsigned long)cwq & WORK_STRUCT_FLAG_MASK);
1952                 cwq->gcwq = gcwq;
1953                 cwq->wq = wq;
1954                 cwq->flush_color = -1;
1955                 cwq->max_active = max_active;
1956                 INIT_LIST_HEAD(&cwq->worklist);
1957                 INIT_LIST_HEAD(&cwq->delayed_works);
1958
1959                 if (failed)
1960                         continue;
1961                 cwq->worker = create_worker(cwq, cpu_online(cpu));
1962                 if (cwq->worker)
1963                         start_worker(cwq->worker);
1964                 else
1965                         failed = true;
1966         }
1967
1968         /*
1969          * workqueue_lock protects global freeze state and workqueues
1970          * list.  Grab it, set max_active accordingly and add the new
1971          * workqueue to workqueues list.
1972          */
1973         spin_lock(&workqueue_lock);
1974
1975         if (workqueue_freezing && wq->flags & WQ_FREEZEABLE)
1976                 for_each_possible_cpu(cpu)
1977                         get_cwq(cpu, wq)->max_active = 0;
1978
1979         list_add(&wq->list, &workqueues);
1980
1981         spin_unlock(&workqueue_lock);
1982
1983         cpu_maps_update_done();
1984
1985         if (failed) {
1986                 destroy_workqueue(wq);
1987                 wq = NULL;
1988         }
1989         return wq;
1990 err:
1991         if (wq) {
1992                 free_cwqs(wq->cpu_wq);
1993                 kfree(wq);
1994         }
1995         return NULL;
1996 }
1997 EXPORT_SYMBOL_GPL(__create_workqueue_key);
1998
1999 /**
2000  * destroy_workqueue - safely terminate a workqueue
2001  * @wq: target workqueue
2002  *
2003  * Safely destroy a workqueue. All work currently pending will be done first.
2004  */
2005 void destroy_workqueue(struct workqueue_struct *wq)
2006 {
2007         unsigned int cpu;
2008
2009         flush_workqueue(wq);
2010
2011         /*
2012          * wq list is used to freeze wq, remove from list after
2013          * flushing is complete in case freeze races us.
2014          */
2015         cpu_maps_update_begin();
2016         spin_lock(&workqueue_lock);
2017         list_del(&wq->list);
2018         spin_unlock(&workqueue_lock);
2019         cpu_maps_update_done();
2020
2021         for_each_possible_cpu(cpu) {
2022                 struct cpu_workqueue_struct *cwq = get_cwq(cpu, wq);
2023                 int i;
2024
2025                 if (cwq->worker) {
2026                         spin_lock_irq(&cwq->gcwq->lock);
2027                         destroy_worker(cwq->worker);
2028                         cwq->worker = NULL;
2029                         spin_unlock_irq(&cwq->gcwq->lock);
2030                 }
2031
2032                 for (i = 0; i < WORK_NR_COLORS; i++)
2033                         BUG_ON(cwq->nr_in_flight[i]);
2034                 BUG_ON(cwq->nr_active);
2035                 BUG_ON(!list_empty(&cwq->delayed_works));
2036         }
2037
2038         free_cwqs(wq->cpu_wq);
2039         kfree(wq);
2040 }
2041 EXPORT_SYMBOL_GPL(destroy_workqueue);
2042
2043 /*
2044  * CPU hotplug.
2045  *
2046  * CPU hotplug is implemented by allowing cwqs to be detached from
2047  * CPU, running with unbound workers and allowing them to be
2048  * reattached later if the cpu comes back online.  A separate thread
2049  * is created to govern cwqs in such state and is called the trustee.
2050  *
2051  * Trustee states and their descriptions.
2052  *
2053  * START        Command state used on startup.  On CPU_DOWN_PREPARE, a
2054  *              new trustee is started with this state.
2055  *
2056  * IN_CHARGE    Once started, trustee will enter this state after
2057  *              making all existing workers rogue.  DOWN_PREPARE waits
2058  *              for trustee to enter this state.  After reaching
2059  *              IN_CHARGE, trustee tries to execute the pending
2060  *              worklist until it's empty and the state is set to
2061  *              BUTCHER, or the state is set to RELEASE.
2062  *
2063  * BUTCHER      Command state which is set by the cpu callback after
2064  *              the cpu has went down.  Once this state is set trustee
2065  *              knows that there will be no new works on the worklist
2066  *              and once the worklist is empty it can proceed to
2067  *              killing idle workers.
2068  *
2069  * RELEASE      Command state which is set by the cpu callback if the
2070  *              cpu down has been canceled or it has come online
2071  *              again.  After recognizing this state, trustee stops
2072  *              trying to drain or butcher and transits to DONE.
2073  *
2074  * DONE         Trustee will enter this state after BUTCHER or RELEASE
2075  *              is complete.
2076  *
2077  *          trustee                 CPU                draining
2078  *         took over                down               complete
2079  * START -----------> IN_CHARGE -----------> BUTCHER -----------> DONE
2080  *                        |                     |                  ^
2081  *                        | CPU is back online  v   return workers |
2082  *                         ----------------> RELEASE --------------
2083  */
2084
2085 /**
2086  * trustee_wait_event_timeout - timed event wait for trustee
2087  * @cond: condition to wait for
2088  * @timeout: timeout in jiffies
2089  *
2090  * wait_event_timeout() for trustee to use.  Handles locking and
2091  * checks for RELEASE request.
2092  *
2093  * CONTEXT:
2094  * spin_lock_irq(gcwq->lock) which may be released and regrabbed
2095  * multiple times.  To be used by trustee.
2096  *
2097  * RETURNS:
2098  * Positive indicating left time if @cond is satisfied, 0 if timed
2099  * out, -1 if canceled.
2100  */
2101 #define trustee_wait_event_timeout(cond, timeout) ({                    \
2102         long __ret = (timeout);                                         \
2103         while (!((cond) || (gcwq->trustee_state == TRUSTEE_RELEASE)) && \
2104                __ret) {                                                 \
2105                 spin_unlock_irq(&gcwq->lock);                           \
2106                 __wait_event_timeout(gcwq->trustee_wait, (cond) ||      \
2107                         (gcwq->trustee_state == TRUSTEE_RELEASE),       \
2108                         __ret);                                         \
2109                 spin_lock_irq(&gcwq->lock);                             \
2110         }                                                               \
2111         gcwq->trustee_state == TRUSTEE_RELEASE ? -1 : (__ret);          \
2112 })
2113
2114 /**
2115  * trustee_wait_event - event wait for trustee
2116  * @cond: condition to wait for
2117  *
2118  * wait_event() for trustee to use.  Automatically handles locking and
2119  * checks for CANCEL request.
2120  *
2121  * CONTEXT:
2122  * spin_lock_irq(gcwq->lock) which may be released and regrabbed
2123  * multiple times.  To be used by trustee.
2124  *
2125  * RETURNS:
2126  * 0 if @cond is satisfied, -1 if canceled.
2127  */
2128 #define trustee_wait_event(cond) ({                                     \
2129         long __ret1;                                                    \
2130         __ret1 = trustee_wait_event_timeout(cond, MAX_SCHEDULE_TIMEOUT);\
2131         __ret1 < 0 ? -1 : 0;                                            \
2132 })
2133
2134 static int __cpuinit trustee_thread(void *__gcwq)
2135 {
2136         struct global_cwq *gcwq = __gcwq;
2137         struct worker *worker;
2138         struct hlist_node *pos;
2139         int i;
2140
2141         BUG_ON(gcwq->cpu != smp_processor_id());
2142
2143         spin_lock_irq(&gcwq->lock);
2144         /*
2145          * Make all workers rogue.  Trustee must be bound to the
2146          * target cpu and can't be cancelled.
2147          */
2148         BUG_ON(gcwq->cpu != smp_processor_id());
2149
2150         list_for_each_entry(worker, &gcwq->idle_list, entry)
2151                 worker->flags |= WORKER_ROGUE;
2152
2153         for_each_busy_worker(worker, i, pos, gcwq)
2154                 worker->flags |= WORKER_ROGUE;
2155
2156         /*
2157          * We're now in charge.  Notify and proceed to drain.  We need
2158          * to keep the gcwq running during the whole CPU down
2159          * procedure as other cpu hotunplug callbacks may need to
2160          * flush currently running tasks.
2161          */
2162         gcwq->trustee_state = TRUSTEE_IN_CHARGE;
2163         wake_up_all(&gcwq->trustee_wait);
2164
2165         /*
2166          * The original cpu is in the process of dying and may go away
2167          * anytime now.  When that happens, we and all workers would
2168          * be migrated to other cpus.  Try draining any left work.
2169          * Note that if the gcwq is frozen, there may be frozen works
2170          * in freezeable cwqs.  Don't declare completion while frozen.
2171          */
2172         while (gcwq->nr_workers != gcwq->nr_idle ||
2173                gcwq->flags & GCWQ_FREEZING ||
2174                gcwq->trustee_state == TRUSTEE_IN_CHARGE) {
2175                 /* give a breather */
2176                 if (trustee_wait_event_timeout(false, TRUSTEE_COOLDOWN) < 0)
2177                         break;
2178         }
2179
2180         /* notify completion */
2181         gcwq->trustee = NULL;
2182         gcwq->trustee_state = TRUSTEE_DONE;
2183         wake_up_all(&gcwq->trustee_wait);
2184         spin_unlock_irq(&gcwq->lock);
2185         return 0;
2186 }
2187
2188 /**
2189  * wait_trustee_state - wait for trustee to enter the specified state
2190  * @gcwq: gcwq the trustee of interest belongs to
2191  * @state: target state to wait for
2192  *
2193  * Wait for the trustee to reach @state.  DONE is already matched.
2194  *
2195  * CONTEXT:
2196  * spin_lock_irq(gcwq->lock) which may be released and regrabbed
2197  * multiple times.  To be used by cpu_callback.
2198  */
2199 static void __cpuinit wait_trustee_state(struct global_cwq *gcwq, int state)
2200 {
2201         if (!(gcwq->trustee_state == state ||
2202               gcwq->trustee_state == TRUSTEE_DONE)) {
2203                 spin_unlock_irq(&gcwq->lock);
2204                 __wait_event(gcwq->trustee_wait,
2205                              gcwq->trustee_state == state ||
2206                              gcwq->trustee_state == TRUSTEE_DONE);
2207                 spin_lock_irq(&gcwq->lock);
2208         }
2209 }
2210
2211 static int __devinit workqueue_cpu_callback(struct notifier_block *nfb,
2212                                                 unsigned long action,
2213                                                 void *hcpu)
2214 {
2215         unsigned int cpu = (unsigned long)hcpu;
2216         struct global_cwq *gcwq = get_gcwq(cpu);
2217         struct task_struct *new_trustee = NULL;
2218         struct worker *worker;
2219         struct hlist_node *pos;
2220         unsigned long flags;
2221         int i;
2222
2223         action &= ~CPU_TASKS_FROZEN;
2224
2225         switch (action) {
2226         case CPU_DOWN_PREPARE:
2227                 new_trustee = kthread_create(trustee_thread, gcwq,
2228                                              "workqueue_trustee/%d\n", cpu);
2229                 if (IS_ERR(new_trustee))
2230                         return notifier_from_errno(PTR_ERR(new_trustee));
2231                 kthread_bind(new_trustee, cpu);
2232         }
2233
2234         /* some are called w/ irq disabled, don't disturb irq status */
2235         spin_lock_irqsave(&gcwq->lock, flags);
2236
2237         switch (action) {
2238         case CPU_DOWN_PREPARE:
2239                 /* initialize trustee and tell it to acquire the gcwq */
2240                 BUG_ON(gcwq->trustee || gcwq->trustee_state != TRUSTEE_DONE);
2241                 gcwq->trustee = new_trustee;
2242                 gcwq->trustee_state = TRUSTEE_START;
2243                 wake_up_process(gcwq->trustee);
2244                 wait_trustee_state(gcwq, TRUSTEE_IN_CHARGE);
2245                 break;
2246
2247         case CPU_POST_DEAD:
2248                 gcwq->trustee_state = TRUSTEE_BUTCHER;
2249                 break;
2250
2251         case CPU_DOWN_FAILED:
2252         case CPU_ONLINE:
2253                 if (gcwq->trustee_state != TRUSTEE_DONE) {
2254                         gcwq->trustee_state = TRUSTEE_RELEASE;
2255                         wake_up_process(gcwq->trustee);
2256                         wait_trustee_state(gcwq, TRUSTEE_DONE);
2257                 }
2258
2259                 /* clear ROGUE from all workers */
2260                 list_for_each_entry(worker, &gcwq->idle_list, entry)
2261                         worker->flags &= ~WORKER_ROGUE;
2262
2263                 for_each_busy_worker(worker, i, pos, gcwq)
2264                         worker->flags &= ~WORKER_ROGUE;
2265                 break;
2266         }
2267
2268         spin_unlock_irqrestore(&gcwq->lock, flags);
2269
2270         return notifier_from_errno(0);
2271 }
2272
2273 #ifdef CONFIG_SMP
2274
2275 struct work_for_cpu {
2276         struct completion completion;
2277         long (*fn)(void *);
2278         void *arg;
2279         long ret;
2280 };
2281
2282 static int do_work_for_cpu(void *_wfc)
2283 {
2284         struct work_for_cpu *wfc = _wfc;
2285         wfc->ret = wfc->fn(wfc->arg);
2286         complete(&wfc->completion);
2287         return 0;
2288 }
2289
2290 /**
2291  * work_on_cpu - run a function in user context on a particular cpu
2292  * @cpu: the cpu to run on
2293  * @fn: the function to run
2294  * @arg: the function arg
2295  *
2296  * This will return the value @fn returns.
2297  * It is up to the caller to ensure that the cpu doesn't go offline.
2298  * The caller must not hold any locks which would prevent @fn from completing.
2299  */
2300 long work_on_cpu(unsigned int cpu, long (*fn)(void *), void *arg)
2301 {
2302         struct task_struct *sub_thread;
2303         struct work_for_cpu wfc = {
2304                 .completion = COMPLETION_INITIALIZER_ONSTACK(wfc.completion),
2305                 .fn = fn,
2306                 .arg = arg,
2307         };
2308
2309         sub_thread = kthread_create(do_work_for_cpu, &wfc, "work_for_cpu");
2310         if (IS_ERR(sub_thread))
2311                 return PTR_ERR(sub_thread);
2312         kthread_bind(sub_thread, cpu);
2313         wake_up_process(sub_thread);
2314         wait_for_completion(&wfc.completion);
2315         return wfc.ret;
2316 }
2317 EXPORT_SYMBOL_GPL(work_on_cpu);
2318 #endif /* CONFIG_SMP */
2319
2320 #ifdef CONFIG_FREEZER
2321
2322 /**
2323  * freeze_workqueues_begin - begin freezing workqueues
2324  *
2325  * Start freezing workqueues.  After this function returns, all
2326  * freezeable workqueues will queue new works to their frozen_works
2327  * list instead of the cwq ones.
2328  *
2329  * CONTEXT:
2330  * Grabs and releases workqueue_lock and gcwq->lock's.
2331  */
2332 void freeze_workqueues_begin(void)
2333 {
2334         struct workqueue_struct *wq;
2335         unsigned int cpu;
2336
2337         spin_lock(&workqueue_lock);
2338
2339         BUG_ON(workqueue_freezing);
2340         workqueue_freezing = true;
2341
2342         for_each_possible_cpu(cpu) {
2343                 struct global_cwq *gcwq = get_gcwq(cpu);
2344
2345                 spin_lock_irq(&gcwq->lock);
2346
2347                 BUG_ON(gcwq->flags & GCWQ_FREEZING);
2348                 gcwq->flags |= GCWQ_FREEZING;
2349
2350                 list_for_each_entry(wq, &workqueues, list) {
2351                         struct cpu_workqueue_struct *cwq = get_cwq(cpu, wq);
2352
2353                         if (wq->flags & WQ_FREEZEABLE)
2354                                 cwq->max_active = 0;
2355                 }
2356
2357                 spin_unlock_irq(&gcwq->lock);
2358         }
2359
2360         spin_unlock(&workqueue_lock);
2361 }
2362
2363 /**
2364  * freeze_workqueues_busy - are freezeable workqueues still busy?
2365  *
2366  * Check whether freezing is complete.  This function must be called
2367  * between freeze_workqueues_begin() and thaw_workqueues().
2368  *
2369  * CONTEXT:
2370  * Grabs and releases workqueue_lock.
2371  *
2372  * RETURNS:
2373  * %true if some freezeable workqueues are still busy.  %false if
2374  * freezing is complete.
2375  */
2376 bool freeze_workqueues_busy(void)
2377 {
2378         struct workqueue_struct *wq;
2379         unsigned int cpu;
2380         bool busy = false;
2381
2382         spin_lock(&workqueue_lock);
2383
2384         BUG_ON(!workqueue_freezing);
2385
2386         for_each_possible_cpu(cpu) {
2387                 /*
2388                  * nr_active is monotonically decreasing.  It's safe
2389                  * to peek without lock.
2390                  */
2391                 list_for_each_entry(wq, &workqueues, list) {
2392                         struct cpu_workqueue_struct *cwq = get_cwq(cpu, wq);
2393
2394                         if (!(wq->flags & WQ_FREEZEABLE))
2395                                 continue;
2396
2397                         BUG_ON(cwq->nr_active < 0);
2398                         if (cwq->nr_active) {
2399                                 busy = true;
2400                                 goto out_unlock;
2401                         }
2402                 }
2403         }
2404 out_unlock:
2405         spin_unlock(&workqueue_lock);
2406         return busy;
2407 }
2408
2409 /**
2410  * thaw_workqueues - thaw workqueues
2411  *
2412  * Thaw workqueues.  Normal queueing is restored and all collected
2413  * frozen works are transferred to their respective cwq worklists.
2414  *
2415  * CONTEXT:
2416  * Grabs and releases workqueue_lock and gcwq->lock's.
2417  */
2418 void thaw_workqueues(void)
2419 {
2420         struct workqueue_struct *wq;
2421         unsigned int cpu;
2422
2423         spin_lock(&workqueue_lock);
2424
2425         if (!workqueue_freezing)
2426                 goto out_unlock;
2427
2428         for_each_possible_cpu(cpu) {
2429                 struct global_cwq *gcwq = get_gcwq(cpu);
2430
2431                 spin_lock_irq(&gcwq->lock);
2432
2433                 BUG_ON(!(gcwq->flags & GCWQ_FREEZING));
2434                 gcwq->flags &= ~GCWQ_FREEZING;
2435
2436                 list_for_each_entry(wq, &workqueues, list) {
2437                         struct cpu_workqueue_struct *cwq = get_cwq(cpu, wq);
2438
2439                         if (!(wq->flags & WQ_FREEZEABLE))
2440                                 continue;
2441
2442                         /* restore max_active and repopulate worklist */
2443                         cwq->max_active = wq->saved_max_active;
2444
2445                         while (!list_empty(&cwq->delayed_works) &&
2446                                cwq->nr_active < cwq->max_active)
2447                                 cwq_activate_first_delayed(cwq);
2448
2449                         /* perform delayed unbind from single cpu if empty */
2450                         if (wq->single_cpu == gcwq->cpu &&
2451                             !cwq->nr_active && list_empty(&cwq->delayed_works))
2452                                 cwq_unbind_single_cpu(cwq);
2453
2454                         wake_up_process(cwq->worker->task);
2455                 }
2456
2457                 spin_unlock_irq(&gcwq->lock);
2458         }
2459
2460         workqueue_freezing = false;
2461 out_unlock:
2462         spin_unlock(&workqueue_lock);
2463 }
2464 #endif /* CONFIG_FREEZER */
2465
2466 void __init init_workqueues(void)
2467 {
2468         unsigned int cpu;
2469         int i;
2470
2471         /*
2472          * The pointer part of work->data is either pointing to the
2473          * cwq or contains the cpu number the work ran last on.  Make
2474          * sure cpu number won't overflow into kernel pointer area so
2475          * that they can be distinguished.
2476          */
2477         BUILD_BUG_ON(NR_CPUS << WORK_STRUCT_FLAG_BITS >= PAGE_OFFSET);
2478
2479         hotcpu_notifier(workqueue_cpu_callback, CPU_PRI_WORKQUEUE);
2480
2481         /* initialize gcwqs */
2482         for_each_possible_cpu(cpu) {
2483                 struct global_cwq *gcwq = get_gcwq(cpu);
2484
2485                 spin_lock_init(&gcwq->lock);
2486                 gcwq->cpu = cpu;
2487
2488                 INIT_LIST_HEAD(&gcwq->idle_list);
2489                 for (i = 0; i < BUSY_WORKER_HASH_SIZE; i++)
2490                         INIT_HLIST_HEAD(&gcwq->busy_hash[i]);
2491
2492                 ida_init(&gcwq->worker_ida);
2493
2494                 gcwq->trustee_state = TRUSTEE_DONE;
2495                 init_waitqueue_head(&gcwq->trustee_wait);
2496         }
2497
2498         keventd_wq = create_workqueue("events");
2499         BUG_ON(!keventd_wq);
2500 }