2 * linux/kernel/workqueue.c
4 * Generic mechanism for defining kernel helper threads for running
5 * arbitrary tasks in process context.
7 * Started by Ingo Molnar, Copyright (C) 2002
9 * Derived from the taskqueue/keventd code by:
11 * David Woodhouse <dwmw2@redhat.com>
12 * Andrew Morton <andrewm@uow.edu.au>
13 * Kai Petzke <wpp@marie.physik.tu-berlin.de>
14 * Theodore Ts'o <tytso@mit.edu>
17 #include <linux/module.h>
18 #include <linux/kernel.h>
19 #include <linux/sched.h>
20 #include <linux/init.h>
21 #include <linux/signal.h>
22 #include <linux/completion.h>
23 #include <linux/workqueue.h>
24 #include <linux/slab.h>
25 #include <linux/kthread.h>
28 * The per-CPU workqueue.
30 * The sequence counters are for flush_scheduled_work(). It wants to wait
31 * until until all currently-scheduled works are completed, but it doesn't
32 * want to be livelocked by new, incoming ones. So it waits until
33 * remove_sequence is >= the insert_sequence which pertained when
34 * flush_scheduled_work() was called.
36 struct cpu_workqueue_struct {
40 long remove_sequence; /* Least-recently added (next to run) */
41 long insert_sequence; /* Next to add */
43 struct list_head worklist;
44 wait_queue_head_t more_work;
45 wait_queue_head_t work_done;
47 struct workqueue_struct *wq;
50 } ____cacheline_aligned;
53 * The externally visible workqueue abstraction is an array of
56 struct workqueue_struct {
57 struct cpu_workqueue_struct cpu_wq[NR_CPUS];
60 /* Preempt must be disabled. */
61 static void __queue_work(struct cpu_workqueue_struct *cwq,
62 struct work_struct *work)
66 spin_lock_irqsave(&cwq->lock, flags);
68 list_add_tail(&work->entry, &cwq->worklist);
69 cwq->insert_sequence++;
70 wake_up(&cwq->more_work);
71 spin_unlock_irqrestore(&cwq->lock, flags);
75 * Queue work on a workqueue. Return non-zero if it was successfully
78 * We queue the work to the CPU it was submitted, but there is no
79 * guarantee that it will be processed by that CPU.
81 int queue_work(struct workqueue_struct *wq, struct work_struct *work)
83 int ret = 0, cpu = get_cpu();
85 if (!test_and_set_bit(0, &work->pending)) {
86 BUG_ON(!list_empty(&work->entry));
87 __queue_work(wq->cpu_wq + cpu, work);
94 static void delayed_work_timer_fn(unsigned long __data)
96 struct work_struct *work = (struct work_struct *)__data;
97 struct workqueue_struct *wq = work->wq_data;
99 __queue_work(wq->cpu_wq + smp_processor_id(), work);
102 int queue_delayed_work(struct workqueue_struct *wq,
103 struct work_struct *work, unsigned long delay)
106 struct timer_list *timer = &work->timer;
108 if (!test_and_set_bit(0, &work->pending)) {
109 BUG_ON(timer_pending(timer));
110 BUG_ON(!list_empty(&work->entry));
112 /* This stores wq for the moment, for the timer_fn */
114 timer->expires = jiffies + delay;
115 timer->data = (unsigned long)work;
116 timer->function = delayed_work_timer_fn;
123 static inline void run_workqueue(struct cpu_workqueue_struct *cwq)
128 * Keep taking off work from the queue until
131 spin_lock_irqsave(&cwq->lock, flags);
132 while (!list_empty(&cwq->worklist)) {
133 struct work_struct *work = list_entry(cwq->worklist.next,
134 struct work_struct, entry);
135 void (*f) (void *) = work->func;
136 void *data = work->data;
138 list_del_init(cwq->worklist.next);
139 spin_unlock_irqrestore(&cwq->lock, flags);
141 BUG_ON(work->wq_data != cwq);
142 clear_bit(0, &work->pending);
145 spin_lock_irqsave(&cwq->lock, flags);
146 cwq->remove_sequence++;
147 wake_up(&cwq->work_done);
149 spin_unlock_irqrestore(&cwq->lock, flags);
152 static int worker_thread(void *__cwq)
154 struct cpu_workqueue_struct *cwq = __cwq;
155 int cpu = cwq - cwq->wq->cpu_wq;
156 DECLARE_WAITQUEUE(wait, current);
157 struct k_sigaction sa;
160 current->flags |= PF_IOTHREAD;
162 set_user_nice(current, -10);
163 BUG_ON(smp_processor_id() != cpu);
165 /* Block and flush all signals */
166 sigfillset(&blocked);
167 sigprocmask(SIG_BLOCK, &blocked, NULL);
168 flush_signals(current);
170 /* SIG_IGN makes children autoreap: see do_notify_parent(). */
171 sa.sa.sa_handler = SIG_IGN;
173 siginitset(&sa.sa.sa_mask, sigmask(SIGCHLD));
174 do_sigaction(SIGCHLD, &sa, (struct k_sigaction *)0);
176 while (!kthread_should_stop()) {
177 set_task_state(current, TASK_INTERRUPTIBLE);
179 add_wait_queue(&cwq->more_work, &wait);
180 if (list_empty(&cwq->worklist))
183 set_task_state(current, TASK_RUNNING);
184 remove_wait_queue(&cwq->more_work, &wait);
186 if (!list_empty(&cwq->worklist))
193 * flush_workqueue - ensure that any scheduled work has run to completion.
195 * Forces execution of the workqueue and blocks until its completion.
196 * This is typically used in driver shutdown handlers.
198 * This function will sample each workqueue's current insert_sequence number and
199 * will sleep until the head sequence is greater than or equal to that. This
200 * means that we sleep until all works which were queued on entry have been
201 * handled, but we are not livelocked by new incoming ones.
203 * This function used to run the workqueues itself. Now we just wait for the
204 * helper threads to do it.
206 void flush_workqueue(struct workqueue_struct *wq)
208 struct cpu_workqueue_struct *cwq;
213 for (cpu = 0; cpu < NR_CPUS; cpu++) {
215 long sequence_needed;
217 if (!cpu_online(cpu))
219 cwq = wq->cpu_wq + cpu;
221 spin_lock_irq(&cwq->lock);
222 sequence_needed = cwq->insert_sequence;
224 while (sequence_needed - cwq->remove_sequence > 0) {
225 prepare_to_wait(&cwq->work_done, &wait,
226 TASK_UNINTERRUPTIBLE);
227 spin_unlock_irq(&cwq->lock);
229 spin_lock_irq(&cwq->lock);
231 finish_wait(&cwq->work_done, &wait);
232 spin_unlock_irq(&cwq->lock);
236 static int create_workqueue_thread(struct workqueue_struct *wq,
240 struct cpu_workqueue_struct *cwq = wq->cpu_wq + cpu;
241 struct task_struct *p;
243 spin_lock_init(&cwq->lock);
246 cwq->insert_sequence = 0;
247 cwq->remove_sequence = 0;
248 INIT_LIST_HEAD(&cwq->worklist);
249 init_waitqueue_head(&cwq->more_work);
250 init_waitqueue_head(&cwq->work_done);
252 p = kthread_create(worker_thread, cwq, "%s/%d", name, cpu);
256 kthread_bind(p, cpu);
260 struct workqueue_struct *create_workqueue(const char *name)
262 int cpu, destroy = 0;
263 struct workqueue_struct *wq;
265 BUG_ON(strlen(name) > 10);
267 wq = kmalloc(sizeof(*wq), GFP_KERNEL);
271 for (cpu = 0; cpu < NR_CPUS; cpu++) {
272 if (!cpu_online(cpu))
274 if (create_workqueue_thread(wq, name, cpu) < 0)
277 wake_up_process(wq->cpu_wq[cpu].thread);
280 * Was there any error during startup? If yes then clean up:
283 destroy_workqueue(wq);
289 static void cleanup_workqueue_thread(struct workqueue_struct *wq, int cpu)
291 struct cpu_workqueue_struct *cwq;
293 cwq = wq->cpu_wq + cpu;
295 kthread_stop(cwq->thread);
298 void destroy_workqueue(struct workqueue_struct *wq)
304 for (cpu = 0; cpu < NR_CPUS; cpu++) {
306 cleanup_workqueue_thread(wq, cpu);
311 static struct workqueue_struct *keventd_wq;
313 int schedule_work(struct work_struct *work)
315 return queue_work(keventd_wq, work);
318 int schedule_delayed_work(struct work_struct *work, unsigned long delay)
320 return queue_delayed_work(keventd_wq, work, delay);
323 void flush_scheduled_work(void)
325 flush_workqueue(keventd_wq);
330 return keventd_wq != NULL;
333 int current_is_keventd(void)
335 struct cpu_workqueue_struct *cwq;
341 cwq = keventd_wq->cpu_wq + cpu;
342 if (current == cwq->thread)
348 void init_workqueues(void)
350 keventd_wq = create_workqueue("events");
354 EXPORT_SYMBOL_GPL(create_workqueue);
355 EXPORT_SYMBOL_GPL(queue_work);
356 EXPORT_SYMBOL_GPL(queue_delayed_work);
357 EXPORT_SYMBOL_GPL(flush_workqueue);
358 EXPORT_SYMBOL_GPL(destroy_workqueue);
360 EXPORT_SYMBOL(schedule_work);
361 EXPORT_SYMBOL(schedule_delayed_work);
362 EXPORT_SYMBOL(flush_scheduled_work);