softirq: remove useless function __local_bh_enable
[linux-flexiantxendom0-natty.git] / kernel / softirq.c
1 /*
2  *      linux/kernel/softirq.c
3  *
4  *      Copyright (C) 1992 Linus Torvalds
5  *
6  *      Distribute under GPLv2.
7  *
8  *      Rewritten. Old one was good in 2.2, but in 2.3 it was immoral. --ANK (990903)
9  *
10  *      Remote softirq infrastructure is by Jens Axboe.
11  */
12
13 #include <linux/module.h>
14 #include <linux/kernel_stat.h>
15 #include <linux/interrupt.h>
16 #include <linux/init.h>
17 #include <linux/mm.h>
18 #include <linux/notifier.h>
19 #include <linux/percpu.h>
20 #include <linux/cpu.h>
21 #include <linux/freezer.h>
22 #include <linux/kthread.h>
23 #include <linux/rcupdate.h>
24 #include <linux/smp.h>
25 #include <linux/tick.h>
26
27 #include <asm/irq.h>
28 /*
29    - No shared variables, all the data are CPU local.
30    - If a softirq needs serialization, let it serialize itself
31      by its own spinlocks.
32    - Even if softirq is serialized, only local cpu is marked for
33      execution. Hence, we get something sort of weak cpu binding.
34      Though it is still not clear, will it result in better locality
35      or will not.
36
37    Examples:
38    - NET RX softirq. It is multithreaded and does not require
39      any global serialization.
40    - NET TX softirq. It kicks software netdevice queues, hence
41      it is logically serialized per device, but this serialization
42      is invisible to common code.
43    - Tasklets: serialized wrt itself.
44  */
45
46 #ifndef __ARCH_IRQ_STAT
47 irq_cpustat_t irq_stat[NR_CPUS] ____cacheline_aligned;
48 EXPORT_SYMBOL(irq_stat);
49 #endif
50
51 static struct softirq_action softirq_vec[NR_SOFTIRQS] __cacheline_aligned_in_smp;
52
53 static DEFINE_PER_CPU(struct task_struct *, ksoftirqd);
54
55 /*
56  * we cannot loop indefinitely here to avoid userspace starvation,
57  * but we also don't want to introduce a worst case 1/HZ latency
58  * to the pending events, so lets the scheduler to balance
59  * the softirq load for us.
60  */
61 static inline void wakeup_softirqd(void)
62 {
63         /* Interrupts are disabled: no need to stop preemption */
64         struct task_struct *tsk = __get_cpu_var(ksoftirqd);
65
66         if (tsk && tsk->state != TASK_RUNNING)
67                 wake_up_process(tsk);
68 }
69
70 /*
71  * This one is for softirq.c-internal use,
72  * where hardirqs are disabled legitimately:
73  */
74 #ifdef CONFIG_TRACE_IRQFLAGS
75 static void __local_bh_disable(unsigned long ip)
76 {
77         unsigned long flags;
78
79         WARN_ON_ONCE(in_irq());
80
81         raw_local_irq_save(flags);
82         add_preempt_count(SOFTIRQ_OFFSET);
83         /*
84          * Were softirqs turned off above:
85          */
86         if (softirq_count() == SOFTIRQ_OFFSET)
87                 trace_softirqs_off(ip);
88         raw_local_irq_restore(flags);
89 }
90 #else /* !CONFIG_TRACE_IRQFLAGS */
91 static inline void __local_bh_disable(unsigned long ip)
92 {
93         add_preempt_count(SOFTIRQ_OFFSET);
94         barrier();
95 }
96 #endif /* CONFIG_TRACE_IRQFLAGS */
97
98 void local_bh_disable(void)
99 {
100         __local_bh_disable((unsigned long)__builtin_return_address(0));
101 }
102
103 EXPORT_SYMBOL(local_bh_disable);
104
105 /*
106  * Special-case - softirqs can safely be enabled in
107  * cond_resched_softirq(), or by __do_softirq(),
108  * without processing still-pending softirqs:
109  */
110 void _local_bh_enable(void)
111 {
112         WARN_ON_ONCE(in_irq());
113         WARN_ON_ONCE(!irqs_disabled());
114
115         if (softirq_count() == SOFTIRQ_OFFSET)
116                 trace_softirqs_on((unsigned long)__builtin_return_address(0));
117         sub_preempt_count(SOFTIRQ_OFFSET);
118 }
119
120 EXPORT_SYMBOL(_local_bh_enable);
121
122 static inline void _local_bh_enable_ip(unsigned long ip)
123 {
124         WARN_ON_ONCE(in_irq() || irqs_disabled());
125 #ifdef CONFIG_TRACE_IRQFLAGS
126         local_irq_disable();
127 #endif
128         /*
129          * Are softirqs going to be turned on now:
130          */
131         if (softirq_count() == SOFTIRQ_OFFSET)
132                 trace_softirqs_on(ip);
133         /*
134          * Keep preemption disabled until we are done with
135          * softirq processing:
136          */
137         sub_preempt_count(SOFTIRQ_OFFSET - 1);
138
139         if (unlikely(!in_interrupt() && local_softirq_pending()))
140                 do_softirq();
141
142         dec_preempt_count();
143 #ifdef CONFIG_TRACE_IRQFLAGS
144         local_irq_enable();
145 #endif
146         preempt_check_resched();
147 }
148
149 void local_bh_enable(void)
150 {
151         _local_bh_enable_ip((unsigned long)__builtin_return_address(0));
152 }
153 EXPORT_SYMBOL(local_bh_enable);
154
155 void local_bh_enable_ip(unsigned long ip)
156 {
157         _local_bh_enable_ip(ip);
158 }
159 EXPORT_SYMBOL(local_bh_enable_ip);
160
161 /*
162  * We restart softirq processing MAX_SOFTIRQ_RESTART times,
163  * and we fall back to softirqd after that.
164  *
165  * This number has been established via experimentation.
166  * The two things to balance is latency against fairness -
167  * we want to handle softirqs as soon as possible, but they
168  * should not be able to lock up the box.
169  */
170 #define MAX_SOFTIRQ_RESTART 10
171
172 asmlinkage void __do_softirq(void)
173 {
174         struct softirq_action *h;
175         __u32 pending;
176         int max_restart = MAX_SOFTIRQ_RESTART;
177         int cpu;
178
179         pending = local_softirq_pending();
180         account_system_vtime(current);
181
182         __local_bh_disable((unsigned long)__builtin_return_address(0));
183         trace_softirq_enter();
184
185         cpu = smp_processor_id();
186 restart:
187         /* Reset the pending bitmask before enabling irqs */
188         set_softirq_pending(0);
189
190         local_irq_enable();
191
192         h = softirq_vec;
193
194         do {
195                 if (pending & 1) {
196                         int prev_count = preempt_count();
197
198                         h->action(h);
199
200                         if (unlikely(prev_count != preempt_count())) {
201                                 printk(KERN_ERR "huh, entered softirq %td %p"
202                                        "with preempt_count %08x,"
203                                        " exited with %08x?\n", h - softirq_vec,
204                                        h->action, prev_count, preempt_count());
205                                 preempt_count() = prev_count;
206                         }
207
208                         rcu_bh_qsctr_inc(cpu);
209                 }
210                 h++;
211                 pending >>= 1;
212         } while (pending);
213
214         local_irq_disable();
215
216         pending = local_softirq_pending();
217         if (pending && --max_restart)
218                 goto restart;
219
220         if (pending)
221                 wakeup_softirqd();
222
223         trace_softirq_exit();
224
225         account_system_vtime(current);
226         _local_bh_enable();
227 }
228
229 #ifndef __ARCH_HAS_DO_SOFTIRQ
230
231 asmlinkage void do_softirq(void)
232 {
233         __u32 pending;
234         unsigned long flags;
235
236         if (in_interrupt())
237                 return;
238
239         local_irq_save(flags);
240
241         pending = local_softirq_pending();
242
243         if (pending)
244                 __do_softirq();
245
246         local_irq_restore(flags);
247 }
248
249 #endif
250
251 /*
252  * Enter an interrupt context.
253  */
254 void irq_enter(void)
255 {
256         int cpu = smp_processor_id();
257
258         if (idle_cpu(cpu) && !in_interrupt()) {
259                 __irq_enter();
260                 tick_check_idle(cpu);
261         } else
262                 __irq_enter();
263 }
264
265 #ifdef __ARCH_IRQ_EXIT_IRQS_DISABLED
266 # define invoke_softirq()       __do_softirq()
267 #else
268 # define invoke_softirq()       do_softirq()
269 #endif
270
271 /*
272  * Exit an interrupt context. Process softirqs if needed and possible:
273  */
274 void irq_exit(void)
275 {
276         account_system_vtime(current);
277         trace_hardirq_exit();
278         sub_preempt_count(IRQ_EXIT_OFFSET);
279         if (!in_interrupt() && local_softirq_pending())
280                 invoke_softirq();
281
282 #ifdef CONFIG_NO_HZ
283         /* Make sure that timer wheel updates are propagated */
284         if (!in_interrupt() && idle_cpu(smp_processor_id()) && !need_resched())
285                 tick_nohz_stop_sched_tick(0);
286         rcu_irq_exit();
287 #endif
288         preempt_enable_no_resched();
289 }
290
291 /*
292  * This function must run with irqs disabled!
293  */
294 inline void raise_softirq_irqoff(unsigned int nr)
295 {
296         __raise_softirq_irqoff(nr);
297
298         /*
299          * If we're in an interrupt or softirq, we're done
300          * (this also catches softirq-disabled code). We will
301          * actually run the softirq once we return from
302          * the irq or softirq.
303          *
304          * Otherwise we wake up ksoftirqd to make sure we
305          * schedule the softirq soon.
306          */
307         if (!in_interrupt())
308                 wakeup_softirqd();
309 }
310
311 void raise_softirq(unsigned int nr)
312 {
313         unsigned long flags;
314
315         local_irq_save(flags);
316         raise_softirq_irqoff(nr);
317         local_irq_restore(flags);
318 }
319
320 void open_softirq(int nr, void (*action)(struct softirq_action *))
321 {
322         softirq_vec[nr].action = action;
323 }
324
325 /* Tasklets */
326 struct tasklet_head
327 {
328         struct tasklet_struct *head;
329         struct tasklet_struct **tail;
330 };
331
332 static DEFINE_PER_CPU(struct tasklet_head, tasklet_vec);
333 static DEFINE_PER_CPU(struct tasklet_head, tasklet_hi_vec);
334
335 void __tasklet_schedule(struct tasklet_struct *t)
336 {
337         unsigned long flags;
338
339         local_irq_save(flags);
340         t->next = NULL;
341         *__get_cpu_var(tasklet_vec).tail = t;
342         __get_cpu_var(tasklet_vec).tail = &(t->next);
343         raise_softirq_irqoff(TASKLET_SOFTIRQ);
344         local_irq_restore(flags);
345 }
346
347 EXPORT_SYMBOL(__tasklet_schedule);
348
349 void __tasklet_hi_schedule(struct tasklet_struct *t)
350 {
351         unsigned long flags;
352
353         local_irq_save(flags);
354         t->next = NULL;
355         *__get_cpu_var(tasklet_hi_vec).tail = t;
356         __get_cpu_var(tasklet_hi_vec).tail = &(t->next);
357         raise_softirq_irqoff(HI_SOFTIRQ);
358         local_irq_restore(flags);
359 }
360
361 EXPORT_SYMBOL(__tasklet_hi_schedule);
362
363 static void tasklet_action(struct softirq_action *a)
364 {
365         struct tasklet_struct *list;
366
367         local_irq_disable();
368         list = __get_cpu_var(tasklet_vec).head;
369         __get_cpu_var(tasklet_vec).head = NULL;
370         __get_cpu_var(tasklet_vec).tail = &__get_cpu_var(tasklet_vec).head;
371         local_irq_enable();
372
373         while (list) {
374                 struct tasklet_struct *t = list;
375
376                 list = list->next;
377
378                 if (tasklet_trylock(t)) {
379                         if (!atomic_read(&t->count)) {
380                                 if (!test_and_clear_bit(TASKLET_STATE_SCHED, &t->state))
381                                         BUG();
382                                 t->func(t->data);
383                                 tasklet_unlock(t);
384                                 continue;
385                         }
386                         tasklet_unlock(t);
387                 }
388
389                 local_irq_disable();
390                 t->next = NULL;
391                 *__get_cpu_var(tasklet_vec).tail = t;
392                 __get_cpu_var(tasklet_vec).tail = &(t->next);
393                 __raise_softirq_irqoff(TASKLET_SOFTIRQ);
394                 local_irq_enable();
395         }
396 }
397
398 static void tasklet_hi_action(struct softirq_action *a)
399 {
400         struct tasklet_struct *list;
401
402         local_irq_disable();
403         list = __get_cpu_var(tasklet_hi_vec).head;
404         __get_cpu_var(tasklet_hi_vec).head = NULL;
405         __get_cpu_var(tasklet_hi_vec).tail = &__get_cpu_var(tasklet_hi_vec).head;
406         local_irq_enable();
407
408         while (list) {
409                 struct tasklet_struct *t = list;
410
411                 list = list->next;
412
413                 if (tasklet_trylock(t)) {
414                         if (!atomic_read(&t->count)) {
415                                 if (!test_and_clear_bit(TASKLET_STATE_SCHED, &t->state))
416                                         BUG();
417                                 t->func(t->data);
418                                 tasklet_unlock(t);
419                                 continue;
420                         }
421                         tasklet_unlock(t);
422                 }
423
424                 local_irq_disable();
425                 t->next = NULL;
426                 *__get_cpu_var(tasklet_hi_vec).tail = t;
427                 __get_cpu_var(tasklet_hi_vec).tail = &(t->next);
428                 __raise_softirq_irqoff(HI_SOFTIRQ);
429                 local_irq_enable();
430         }
431 }
432
433
434 void tasklet_init(struct tasklet_struct *t,
435                   void (*func)(unsigned long), unsigned long data)
436 {
437         t->next = NULL;
438         t->state = 0;
439         atomic_set(&t->count, 0);
440         t->func = func;
441         t->data = data;
442 }
443
444 EXPORT_SYMBOL(tasklet_init);
445
446 void tasklet_kill(struct tasklet_struct *t)
447 {
448         if (in_interrupt())
449                 printk("Attempt to kill tasklet from interrupt\n");
450
451         while (test_and_set_bit(TASKLET_STATE_SCHED, &t->state)) {
452                 do
453                         yield();
454                 while (test_bit(TASKLET_STATE_SCHED, &t->state));
455         }
456         tasklet_unlock_wait(t);
457         clear_bit(TASKLET_STATE_SCHED, &t->state);
458 }
459
460 EXPORT_SYMBOL(tasklet_kill);
461
462 DEFINE_PER_CPU(struct list_head [NR_SOFTIRQS], softirq_work_list);
463 EXPORT_PER_CPU_SYMBOL(softirq_work_list);
464
465 static void __local_trigger(struct call_single_data *cp, int softirq)
466 {
467         struct list_head *head = &__get_cpu_var(softirq_work_list[softirq]);
468
469         list_add_tail(&cp->list, head);
470
471         /* Trigger the softirq only if the list was previously empty.  */
472         if (head->next == &cp->list)
473                 raise_softirq_irqoff(softirq);
474 }
475
476 #ifdef CONFIG_USE_GENERIC_SMP_HELPERS
477 static void remote_softirq_receive(void *data)
478 {
479         struct call_single_data *cp = data;
480         unsigned long flags;
481         int softirq;
482
483         softirq = cp->priv;
484
485         local_irq_save(flags);
486         __local_trigger(cp, softirq);
487         local_irq_restore(flags);
488 }
489
490 static int __try_remote_softirq(struct call_single_data *cp, int cpu, int softirq)
491 {
492         if (cpu_online(cpu)) {
493                 cp->func = remote_softirq_receive;
494                 cp->info = cp;
495                 cp->flags = 0;
496                 cp->priv = softirq;
497
498                 __smp_call_function_single(cpu, cp);
499                 return 0;
500         }
501         return 1;
502 }
503 #else /* CONFIG_USE_GENERIC_SMP_HELPERS */
504 static int __try_remote_softirq(struct call_single_data *cp, int cpu, int softirq)
505 {
506         return 1;
507 }
508 #endif
509
510 /**
511  * __send_remote_softirq - try to schedule softirq work on a remote cpu
512  * @cp: private SMP call function data area
513  * @cpu: the remote cpu
514  * @this_cpu: the currently executing cpu
515  * @softirq: the softirq for the work
516  *
517  * Attempt to schedule softirq work on a remote cpu.  If this cannot be
518  * done, the work is instead queued up on the local cpu.
519  *
520  * Interrupts must be disabled.
521  */
522 void __send_remote_softirq(struct call_single_data *cp, int cpu, int this_cpu, int softirq)
523 {
524         if (cpu == this_cpu || __try_remote_softirq(cp, cpu, softirq))
525                 __local_trigger(cp, softirq);
526 }
527 EXPORT_SYMBOL(__send_remote_softirq);
528
529 /**
530  * send_remote_softirq - try to schedule softirq work on a remote cpu
531  * @cp: private SMP call function data area
532  * @cpu: the remote cpu
533  * @softirq: the softirq for the work
534  *
535  * Like __send_remote_softirq except that disabling interrupts and
536  * computing the current cpu is done for the caller.
537  */
538 void send_remote_softirq(struct call_single_data *cp, int cpu, int softirq)
539 {
540         unsigned long flags;
541         int this_cpu;
542
543         local_irq_save(flags);
544         this_cpu = smp_processor_id();
545         __send_remote_softirq(cp, cpu, this_cpu, softirq);
546         local_irq_restore(flags);
547 }
548 EXPORT_SYMBOL(send_remote_softirq);
549
550 static int __cpuinit remote_softirq_cpu_notify(struct notifier_block *self,
551                                                unsigned long action, void *hcpu)
552 {
553         /*
554          * If a CPU goes away, splice its entries to the current CPU
555          * and trigger a run of the softirq
556          */
557         if (action == CPU_DEAD || action == CPU_DEAD_FROZEN) {
558                 int cpu = (unsigned long) hcpu;
559                 int i;
560
561                 local_irq_disable();
562                 for (i = 0; i < NR_SOFTIRQS; i++) {
563                         struct list_head *head = &per_cpu(softirq_work_list[i], cpu);
564                         struct list_head *local_head;
565
566                         if (list_empty(head))
567                                 continue;
568
569                         local_head = &__get_cpu_var(softirq_work_list[i]);
570                         list_splice_init(head, local_head);
571                         raise_softirq_irqoff(i);
572                 }
573                 local_irq_enable();
574         }
575
576         return NOTIFY_OK;
577 }
578
579 static struct notifier_block __cpuinitdata remote_softirq_cpu_notifier = {
580         .notifier_call  = remote_softirq_cpu_notify,
581 };
582
583 void __init softirq_init(void)
584 {
585         int cpu;
586
587         for_each_possible_cpu(cpu) {
588                 int i;
589
590                 per_cpu(tasklet_vec, cpu).tail =
591                         &per_cpu(tasklet_vec, cpu).head;
592                 per_cpu(tasklet_hi_vec, cpu).tail =
593                         &per_cpu(tasklet_hi_vec, cpu).head;
594                 for (i = 0; i < NR_SOFTIRQS; i++)
595                         INIT_LIST_HEAD(&per_cpu(softirq_work_list[i], cpu));
596         }
597
598         register_hotcpu_notifier(&remote_softirq_cpu_notifier);
599
600         open_softirq(TASKLET_SOFTIRQ, tasklet_action);
601         open_softirq(HI_SOFTIRQ, tasklet_hi_action);
602 }
603
604 static int ksoftirqd(void * __bind_cpu)
605 {
606         set_current_state(TASK_INTERRUPTIBLE);
607
608         while (!kthread_should_stop()) {
609                 preempt_disable();
610                 if (!local_softirq_pending()) {
611                         preempt_enable_no_resched();
612                         schedule();
613                         preempt_disable();
614                 }
615
616                 __set_current_state(TASK_RUNNING);
617
618                 while (local_softirq_pending()) {
619                         /* Preempt disable stops cpu going offline.
620                            If already offline, we'll be on wrong CPU:
621                            don't process */
622                         if (cpu_is_offline((long)__bind_cpu))
623                                 goto wait_to_die;
624                         do_softirq();
625                         preempt_enable_no_resched();
626                         cond_resched();
627                         preempt_disable();
628                 }
629                 preempt_enable();
630                 set_current_state(TASK_INTERRUPTIBLE);
631         }
632         __set_current_state(TASK_RUNNING);
633         return 0;
634
635 wait_to_die:
636         preempt_enable();
637         /* Wait for kthread_stop */
638         set_current_state(TASK_INTERRUPTIBLE);
639         while (!kthread_should_stop()) {
640                 schedule();
641                 set_current_state(TASK_INTERRUPTIBLE);
642         }
643         __set_current_state(TASK_RUNNING);
644         return 0;
645 }
646
647 #ifdef CONFIG_HOTPLUG_CPU
648 /*
649  * tasklet_kill_immediate is called to remove a tasklet which can already be
650  * scheduled for execution on @cpu.
651  *
652  * Unlike tasklet_kill, this function removes the tasklet
653  * _immediately_, even if the tasklet is in TASKLET_STATE_SCHED state.
654  *
655  * When this function is called, @cpu must be in the CPU_DEAD state.
656  */
657 void tasklet_kill_immediate(struct tasklet_struct *t, unsigned int cpu)
658 {
659         struct tasklet_struct **i;
660
661         BUG_ON(cpu_online(cpu));
662         BUG_ON(test_bit(TASKLET_STATE_RUN, &t->state));
663
664         if (!test_bit(TASKLET_STATE_SCHED, &t->state))
665                 return;
666
667         /* CPU is dead, so no lock needed. */
668         for (i = &per_cpu(tasklet_vec, cpu).head; *i; i = &(*i)->next) {
669                 if (*i == t) {
670                         *i = t->next;
671                         /* If this was the tail element, move the tail ptr */
672                         if (*i == NULL)
673                                 per_cpu(tasklet_vec, cpu).tail = i;
674                         return;
675                 }
676         }
677         BUG();
678 }
679
680 static void takeover_tasklets(unsigned int cpu)
681 {
682         /* CPU is dead, so no lock needed. */
683         local_irq_disable();
684
685         /* Find end, append list for that CPU. */
686         if (&per_cpu(tasklet_vec, cpu).head != per_cpu(tasklet_vec, cpu).tail) {
687                 *(__get_cpu_var(tasklet_vec).tail) = per_cpu(tasklet_vec, cpu).head;
688                 __get_cpu_var(tasklet_vec).tail = per_cpu(tasklet_vec, cpu).tail;
689                 per_cpu(tasklet_vec, cpu).head = NULL;
690                 per_cpu(tasklet_vec, cpu).tail = &per_cpu(tasklet_vec, cpu).head;
691         }
692         raise_softirq_irqoff(TASKLET_SOFTIRQ);
693
694         if (&per_cpu(tasklet_hi_vec, cpu).head != per_cpu(tasklet_hi_vec, cpu).tail) {
695                 *__get_cpu_var(tasklet_hi_vec).tail = per_cpu(tasklet_hi_vec, cpu).head;
696                 __get_cpu_var(tasklet_hi_vec).tail = per_cpu(tasklet_hi_vec, cpu).tail;
697                 per_cpu(tasklet_hi_vec, cpu).head = NULL;
698                 per_cpu(tasklet_hi_vec, cpu).tail = &per_cpu(tasklet_hi_vec, cpu).head;
699         }
700         raise_softirq_irqoff(HI_SOFTIRQ);
701
702         local_irq_enable();
703 }
704 #endif /* CONFIG_HOTPLUG_CPU */
705
706 static int __cpuinit cpu_callback(struct notifier_block *nfb,
707                                   unsigned long action,
708                                   void *hcpu)
709 {
710         int hotcpu = (unsigned long)hcpu;
711         struct task_struct *p;
712
713         switch (action) {
714         case CPU_UP_PREPARE:
715         case CPU_UP_PREPARE_FROZEN:
716                 p = kthread_create(ksoftirqd, hcpu, "ksoftirqd/%d", hotcpu);
717                 if (IS_ERR(p)) {
718                         printk("ksoftirqd for %i failed\n", hotcpu);
719                         return NOTIFY_BAD;
720                 }
721                 kthread_bind(p, hotcpu);
722                 per_cpu(ksoftirqd, hotcpu) = p;
723                 break;
724         case CPU_ONLINE:
725         case CPU_ONLINE_FROZEN:
726                 wake_up_process(per_cpu(ksoftirqd, hotcpu));
727                 break;
728 #ifdef CONFIG_HOTPLUG_CPU
729         case CPU_UP_CANCELED:
730         case CPU_UP_CANCELED_FROZEN:
731                 if (!per_cpu(ksoftirqd, hotcpu))
732                         break;
733                 /* Unbind so it can run.  Fall thru. */
734                 kthread_bind(per_cpu(ksoftirqd, hotcpu),
735                              any_online_cpu(cpu_online_map));
736         case CPU_DEAD:
737         case CPU_DEAD_FROZEN: {
738                 struct sched_param param = { .sched_priority = MAX_RT_PRIO-1 };
739
740                 p = per_cpu(ksoftirqd, hotcpu);
741                 per_cpu(ksoftirqd, hotcpu) = NULL;
742                 sched_setscheduler_nocheck(p, SCHED_FIFO, &param);
743                 kthread_stop(p);
744                 takeover_tasklets(hotcpu);
745                 break;
746         }
747 #endif /* CONFIG_HOTPLUG_CPU */
748         }
749         return NOTIFY_OK;
750 }
751
752 static struct notifier_block __cpuinitdata cpu_nfb = {
753         .notifier_call = cpu_callback
754 };
755
756 static __init int spawn_ksoftirqd(void)
757 {
758         void *cpu = (void *)(long)smp_processor_id();
759         int err = cpu_callback(&cpu_nfb, CPU_UP_PREPARE, cpu);
760
761         BUG_ON(err == NOTIFY_BAD);
762         cpu_callback(&cpu_nfb, CPU_ONLINE, cpu);
763         register_cpu_notifier(&cpu_nfb);
764         return 0;
765 }
766 early_initcall(spawn_ksoftirqd);
767
768 #ifdef CONFIG_SMP
769 /*
770  * Call a function on all processors
771  */
772 int on_each_cpu(void (*func) (void *info), void *info, int wait)
773 {
774         int ret = 0;
775
776         preempt_disable();
777         ret = smp_call_function(func, info, wait);
778         local_irq_disable();
779         func(info);
780         local_irq_enable();
781         preempt_enable();
782         return ret;
783 }
784 EXPORT_SYMBOL(on_each_cpu);
785 #endif