2 * padata.c - generic interface to process data streams in parallel
4 * Copyright (C) 2008, 2009 secunet Security Networks AG
5 * Copyright (C) 2008, 2009 Steffen Klassert <steffen.klassert@secunet.com>
7 * This program is free software; you can redistribute it and/or modify it
8 * under the terms and conditions of the GNU General Public License,
9 * version 2, as published by the Free Software Foundation.
11 * This program is distributed in the hope it will be useful, but WITHOUT
12 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
13 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
16 * You should have received a copy of the GNU General Public License along with
17 * this program; if not, write to the Free Software Foundation, Inc.,
18 * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
21 #include <linux/module.h>
22 #include <linux/cpumask.h>
23 #include <linux/err.h>
24 #include <linux/cpu.h>
25 #include <linux/padata.h>
26 #include <linux/mutex.h>
27 #include <linux/sched.h>
28 #include <linux/slab.h>
29 #include <linux/rcupdate.h>
31 #define MAX_SEQ_NR (INT_MAX - NR_CPUS)
32 #define MAX_OBJ_NUM 1000
34 static int padata_index_to_cpu(struct parallel_data *pd, int cpu_index)
38 target_cpu = cpumask_first(pd->cpumask);
39 for (cpu = 0; cpu < cpu_index; cpu++)
40 target_cpu = cpumask_next(target_cpu, pd->cpumask);
45 static int padata_cpu_hash(struct padata_priv *padata)
48 struct parallel_data *pd;
53 * Hash the sequence numbers to the cpus by taking
54 * seq_nr mod. number of cpus in use.
56 cpu_index = padata->seq_nr % cpumask_weight(pd->cpumask);
58 return padata_index_to_cpu(pd, cpu_index);
61 static void padata_parallel_worker(struct work_struct *work)
63 struct padata_queue *queue;
64 struct parallel_data *pd;
65 struct padata_instance *pinst;
66 LIST_HEAD(local_list);
69 queue = container_of(work, struct padata_queue, pwork);
73 spin_lock(&queue->parallel.lock);
74 list_replace_init(&queue->parallel.list, &local_list);
75 spin_unlock(&queue->parallel.lock);
77 while (!list_empty(&local_list)) {
78 struct padata_priv *padata;
80 padata = list_entry(local_list.next,
81 struct padata_priv, list);
83 list_del_init(&padata->list);
85 padata->parallel(padata);
92 * padata_do_parallel - padata parallelization function
94 * @pinst: padata instance
95 * @padata: object to be parallelized
96 * @cb_cpu: cpu the serialization callback function will run on,
97 * must be in the cpumask of padata.
99 * The parallelization callback function will run with BHs off.
100 * Note: Every object which is parallelized by padata_do_parallel
101 * must be seen by padata_do_serial.
103 int padata_do_parallel(struct padata_instance *pinst,
104 struct padata_priv *padata, int cb_cpu)
107 struct padata_queue *queue;
108 struct parallel_data *pd;
112 pd = rcu_dereference(pinst->pd);
115 if (!(pinst->flags & PADATA_INIT))
118 if (!cpumask_test_cpu(cb_cpu, pd->cpumask))
122 if ((pinst->flags & PADATA_RESET))
125 if (atomic_read(&pd->refcnt) >= MAX_OBJ_NUM)
129 atomic_inc(&pd->refcnt);
131 padata->cb_cpu = cb_cpu;
133 if (unlikely(atomic_read(&pd->seq_nr) == pd->max_seq_nr))
134 atomic_set(&pd->seq_nr, -1);
136 padata->seq_nr = atomic_inc_return(&pd->seq_nr);
138 target_cpu = padata_cpu_hash(padata);
139 queue = per_cpu_ptr(pd->queue, target_cpu);
141 spin_lock(&queue->parallel.lock);
142 list_add_tail(&padata->list, &queue->parallel.list);
143 spin_unlock(&queue->parallel.lock);
145 queue_work_on(target_cpu, pinst->wq, &queue->pwork);
148 rcu_read_unlock_bh();
152 EXPORT_SYMBOL(padata_do_parallel);
155 * padata_get_next - Get the next object that needs serialization.
159 * A pointer to the control struct of the next object that needs
160 * serialization, if present in one of the percpu reorder queues.
162 * NULL, if all percpu reorder queues are empty.
164 * -EINPROGRESS, if the next object that needs serialization will
165 * be parallel processed by another cpu and is not yet present in
166 * the cpu's reorder queue.
168 * -ENODATA, if this cpu has to do the parallel processing for
171 static struct padata_priv *padata_get_next(struct parallel_data *pd)
174 int next_nr, next_index;
175 struct padata_queue *queue, *next_queue;
176 struct padata_priv *padata;
177 struct padata_list *reorder;
179 num_cpus = cpumask_weight(pd->cpumask);
182 * Calculate the percpu reorder queue and the sequence
183 * number of the next object.
185 next_nr = pd->processed;
186 next_index = next_nr % num_cpus;
187 cpu = padata_index_to_cpu(pd, next_index);
188 next_queue = per_cpu_ptr(pd->queue, cpu);
190 if (unlikely(next_nr > pd->max_seq_nr)) {
191 next_nr = next_nr - pd->max_seq_nr - 1;
192 next_index = next_nr % num_cpus;
193 cpu = padata_index_to_cpu(pd, next_index);
194 next_queue = per_cpu_ptr(pd->queue, cpu);
200 reorder = &next_queue->reorder;
202 if (!list_empty(&reorder->list)) {
203 padata = list_entry(reorder->list.next,
204 struct padata_priv, list);
206 BUG_ON(next_nr != padata->seq_nr);
208 spin_lock(&reorder->lock);
209 list_del_init(&padata->list);
210 atomic_dec(&pd->reorder_objects);
211 spin_unlock(&reorder->lock);
218 queue = per_cpu_ptr(pd->queue, smp_processor_id());
219 if (queue->cpu_index == next_queue->cpu_index) {
220 padata = ERR_PTR(-ENODATA);
224 padata = ERR_PTR(-EINPROGRESS);
229 static void padata_reorder(struct parallel_data *pd)
231 struct padata_priv *padata;
232 struct padata_queue *queue;
233 struct padata_instance *pinst = pd->pinst;
236 * We need to ensure that only one cpu can work on dequeueing of
237 * the reorder queue the time. Calculating in which percpu reorder
238 * queue the next object will arrive takes some time. A spinlock
239 * would be highly contended. Also it is not clear in which order
240 * the objects arrive to the reorder queues. So a cpu could wait to
241 * get the lock just to notice that there is nothing to do at the
242 * moment. Therefore we use a trylock and let the holder of the lock
243 * care for all the objects enqueued during the holdtime of the lock.
245 if (!spin_trylock_bh(&pd->lock))
249 padata = padata_get_next(pd);
252 * All reorder queues are empty, or the next object that needs
253 * serialization is parallel processed by another cpu and is
254 * still on it's way to the cpu's reorder queue, nothing to
257 if (!padata || PTR_ERR(padata) == -EINPROGRESS)
261 * This cpu has to do the parallel processing of the next
262 * object. It's waiting in the cpu's parallelization queue,
263 * so exit imediately.
265 if (PTR_ERR(padata) == -ENODATA) {
266 del_timer(&pd->timer);
267 spin_unlock_bh(&pd->lock);
271 queue = per_cpu_ptr(pd->queue, padata->cb_cpu);
273 spin_lock(&queue->serial.lock);
274 list_add_tail(&padata->list, &queue->serial.list);
275 spin_unlock(&queue->serial.lock);
277 queue_work_on(padata->cb_cpu, pinst->wq, &queue->swork);
280 spin_unlock_bh(&pd->lock);
283 * The next object that needs serialization might have arrived to
284 * the reorder queues in the meantime, we will be called again
285 * from the timer function if noone else cares for it.
287 if (atomic_read(&pd->reorder_objects)
288 && !(pinst->flags & PADATA_RESET))
289 mod_timer(&pd->timer, jiffies + HZ);
291 del_timer(&pd->timer);
296 static void padata_reorder_timer(unsigned long arg)
298 struct parallel_data *pd = (struct parallel_data *)arg;
303 static void padata_serial_worker(struct work_struct *work)
305 struct padata_queue *queue;
306 struct parallel_data *pd;
307 LIST_HEAD(local_list);
310 queue = container_of(work, struct padata_queue, swork);
313 spin_lock(&queue->serial.lock);
314 list_replace_init(&queue->serial.list, &local_list);
315 spin_unlock(&queue->serial.lock);
317 while (!list_empty(&local_list)) {
318 struct padata_priv *padata;
320 padata = list_entry(local_list.next,
321 struct padata_priv, list);
323 list_del_init(&padata->list);
325 padata->serial(padata);
326 atomic_dec(&pd->refcnt);
332 * padata_do_serial - padata serialization function
334 * @padata: object to be serialized.
336 * padata_do_serial must be called for every parallelized object.
337 * The serialization callback function will run with BHs off.
339 void padata_do_serial(struct padata_priv *padata)
342 struct padata_queue *queue;
343 struct parallel_data *pd;
348 queue = per_cpu_ptr(pd->queue, cpu);
350 spin_lock(&queue->reorder.lock);
351 atomic_inc(&pd->reorder_objects);
352 list_add_tail(&padata->list, &queue->reorder.list);
353 spin_unlock(&queue->reorder.lock);
359 EXPORT_SYMBOL(padata_do_serial);
361 /* Allocate and initialize the internal cpumask dependend resources. */
362 static struct parallel_data *padata_alloc_pd(struct padata_instance *pinst,
363 const struct cpumask *cpumask)
365 int cpu, cpu_index, num_cpus;
366 struct padata_queue *queue;
367 struct parallel_data *pd;
371 pd = kzalloc(sizeof(struct parallel_data), GFP_KERNEL);
375 pd->queue = alloc_percpu(struct padata_queue);
379 if (!alloc_cpumask_var(&pd->cpumask, GFP_KERNEL))
382 cpumask_and(pd->cpumask, cpumask, cpu_active_mask);
384 for_each_cpu(cpu, pd->cpumask) {
385 queue = per_cpu_ptr(pd->queue, cpu);
389 queue->cpu_index = cpu_index;
392 INIT_LIST_HEAD(&queue->reorder.list);
393 INIT_LIST_HEAD(&queue->parallel.list);
394 INIT_LIST_HEAD(&queue->serial.list);
395 spin_lock_init(&queue->reorder.lock);
396 spin_lock_init(&queue->parallel.lock);
397 spin_lock_init(&queue->serial.lock);
399 INIT_WORK(&queue->pwork, padata_parallel_worker);
400 INIT_WORK(&queue->swork, padata_serial_worker);
403 num_cpus = cpumask_weight(pd->cpumask);
404 pd->max_seq_nr = (MAX_SEQ_NR / num_cpus) * num_cpus - 1;
406 setup_timer(&pd->timer, padata_reorder_timer, (unsigned long)pd);
407 atomic_set(&pd->seq_nr, -1);
408 atomic_set(&pd->reorder_objects, 0);
409 atomic_set(&pd->refcnt, 0);
411 spin_lock_init(&pd->lock);
416 free_percpu(pd->queue);
423 static void padata_free_pd(struct parallel_data *pd)
425 free_cpumask_var(pd->cpumask);
426 free_percpu(pd->queue);
430 /* Flush all objects out of the padata queues. */
431 static void padata_flush_queues(struct parallel_data *pd)
434 struct padata_queue *queue;
436 for_each_cpu(cpu, pd->cpumask) {
437 queue = per_cpu_ptr(pd->queue, cpu);
438 flush_work(&queue->pwork);
441 del_timer_sync(&pd->timer);
443 if (atomic_read(&pd->reorder_objects))
446 for_each_cpu(cpu, pd->cpumask) {
447 queue = per_cpu_ptr(pd->queue, cpu);
448 flush_work(&queue->swork);
451 BUG_ON(atomic_read(&pd->refcnt) != 0);
454 static void __padata_start(struct padata_instance *pinst)
456 pinst->flags |= PADATA_INIT;
459 static void __padata_stop(struct padata_instance *pinst)
461 if (!(pinst->flags & PADATA_INIT))
464 pinst->flags &= ~PADATA_INIT;
469 padata_flush_queues(pinst->pd);
473 /* Replace the internal control stucture with a new one. */
474 static void padata_replace(struct padata_instance *pinst,
475 struct parallel_data *pd_new)
477 struct parallel_data *pd_old = pinst->pd;
479 pinst->flags |= PADATA_RESET;
481 rcu_assign_pointer(pinst->pd, pd_new);
486 padata_flush_queues(pd_old);
487 padata_free_pd(pd_old);
490 pinst->flags &= ~PADATA_RESET;
493 /* If cpumask contains no active cpu, we mark the instance as invalid. */
494 static bool padata_validate_cpumask(struct padata_instance *pinst,
495 const struct cpumask *cpumask)
497 if (!cpumask_intersects(cpumask, cpu_active_mask)) {
498 pinst->flags |= PADATA_INVALID;
502 pinst->flags &= ~PADATA_INVALID;
507 * padata_set_cpumask - set the cpumask that padata should use
509 * @pinst: padata instance
510 * @cpumask: the cpumask to use
512 int padata_set_cpumask(struct padata_instance *pinst,
513 cpumask_var_t cpumask)
517 struct parallel_data *pd = NULL;
519 mutex_lock(&pinst->lock);
521 valid = padata_validate_cpumask(pinst, cpumask);
523 __padata_stop(pinst);
529 pd = padata_alloc_pd(pinst, cpumask);
536 cpumask_copy(pinst->cpumask, cpumask);
538 padata_replace(pinst, pd);
541 __padata_start(pinst);
546 mutex_unlock(&pinst->lock);
550 EXPORT_SYMBOL(padata_set_cpumask);
552 static int __padata_add_cpu(struct padata_instance *pinst, int cpu)
554 struct parallel_data *pd;
556 if (cpumask_test_cpu(cpu, cpu_active_mask)) {
557 pd = padata_alloc_pd(pinst, pinst->cpumask);
561 padata_replace(pinst, pd);
563 if (padata_validate_cpumask(pinst, pinst->cpumask))
564 __padata_start(pinst);
571 * padata_add_cpu - add a cpu to the padata cpumask
573 * @pinst: padata instance
576 int padata_add_cpu(struct padata_instance *pinst, int cpu)
580 mutex_lock(&pinst->lock);
583 cpumask_set_cpu(cpu, pinst->cpumask);
584 err = __padata_add_cpu(pinst, cpu);
587 mutex_unlock(&pinst->lock);
591 EXPORT_SYMBOL(padata_add_cpu);
593 static int __padata_remove_cpu(struct padata_instance *pinst, int cpu)
595 struct parallel_data *pd = NULL;
597 if (cpumask_test_cpu(cpu, cpu_online_mask)) {
599 if (!padata_validate_cpumask(pinst, pinst->cpumask)) {
600 __padata_stop(pinst);
601 padata_replace(pinst, pd);
605 pd = padata_alloc_pd(pinst, pinst->cpumask);
609 padata_replace(pinst, pd);
617 * padata_remove_cpu - remove a cpu from the padata cpumask
619 * @pinst: padata instance
620 * @cpu: cpu to remove
622 int padata_remove_cpu(struct padata_instance *pinst, int cpu)
626 mutex_lock(&pinst->lock);
629 cpumask_clear_cpu(cpu, pinst->cpumask);
630 err = __padata_remove_cpu(pinst, cpu);
633 mutex_unlock(&pinst->lock);
637 EXPORT_SYMBOL(padata_remove_cpu);
640 * padata_start - start the parallel processing
642 * @pinst: padata instance to start
644 int padata_start(struct padata_instance *pinst)
648 mutex_lock(&pinst->lock);
650 if (pinst->flags & PADATA_INVALID)
653 __padata_start(pinst);
655 mutex_unlock(&pinst->lock);
659 EXPORT_SYMBOL(padata_start);
662 * padata_stop - stop the parallel processing
664 * @pinst: padata instance to stop
666 void padata_stop(struct padata_instance *pinst)
668 mutex_lock(&pinst->lock);
669 __padata_stop(pinst);
670 mutex_unlock(&pinst->lock);
672 EXPORT_SYMBOL(padata_stop);
674 #ifdef CONFIG_HOTPLUG_CPU
675 static int padata_cpu_callback(struct notifier_block *nfb,
676 unsigned long action, void *hcpu)
679 struct padata_instance *pinst;
680 int cpu = (unsigned long)hcpu;
682 pinst = container_of(nfb, struct padata_instance, cpu_notifier);
686 case CPU_ONLINE_FROZEN:
687 if (!cpumask_test_cpu(cpu, pinst->cpumask))
689 mutex_lock(&pinst->lock);
690 err = __padata_add_cpu(pinst, cpu);
691 mutex_unlock(&pinst->lock);
696 case CPU_DOWN_PREPARE:
697 case CPU_DOWN_PREPARE_FROZEN:
698 if (!cpumask_test_cpu(cpu, pinst->cpumask))
700 mutex_lock(&pinst->lock);
701 err = __padata_remove_cpu(pinst, cpu);
702 mutex_unlock(&pinst->lock);
707 case CPU_UP_CANCELED:
708 case CPU_UP_CANCELED_FROZEN:
709 if (!cpumask_test_cpu(cpu, pinst->cpumask))
711 mutex_lock(&pinst->lock);
712 __padata_remove_cpu(pinst, cpu);
713 mutex_unlock(&pinst->lock);
715 case CPU_DOWN_FAILED:
716 case CPU_DOWN_FAILED_FROZEN:
717 if (!cpumask_test_cpu(cpu, pinst->cpumask))
719 mutex_lock(&pinst->lock);
720 __padata_add_cpu(pinst, cpu);
721 mutex_unlock(&pinst->lock);
729 * padata_alloc - allocate and initialize a padata instance
731 * @cpumask: cpumask that padata uses for parallelization
732 * @wq: workqueue to use for the allocated padata instance
734 struct padata_instance *padata_alloc(const struct cpumask *cpumask,
735 struct workqueue_struct *wq)
737 struct padata_instance *pinst;
738 struct parallel_data *pd = NULL;
740 pinst = kzalloc(sizeof(struct padata_instance), GFP_KERNEL);
746 if (!alloc_cpumask_var(&pinst->cpumask, GFP_KERNEL))
749 if (padata_validate_cpumask(pinst, cpumask)) {
750 pd = padata_alloc_pd(pinst, cpumask);
755 rcu_assign_pointer(pinst->pd, pd);
759 cpumask_copy(pinst->cpumask, cpumask);
763 #ifdef CONFIG_HOTPLUG_CPU
764 pinst->cpu_notifier.notifier_call = padata_cpu_callback;
765 pinst->cpu_notifier.priority = 0;
766 register_hotcpu_notifier(&pinst->cpu_notifier);
771 mutex_init(&pinst->lock);
776 free_cpumask_var(pinst->cpumask);
783 EXPORT_SYMBOL(padata_alloc);
786 * padata_free - free a padata instance
788 * @padata_inst: padata instance to free
790 void padata_free(struct padata_instance *pinst)
792 #ifdef CONFIG_HOTPLUG_CPU
793 unregister_hotcpu_notifier(&pinst->cpu_notifier);
797 padata_free_pd(pinst->pd);
798 free_cpumask_var(pinst->cpumask);
801 EXPORT_SYMBOL(padata_free);