2 * linux/kernel/softirq.c
4 * Copyright (C) 1992 Linus Torvalds
6 * Rewritten. Old one was good in 2.2, but in 2.3 it was immoral. --ANK (990903)
9 #include <linux/module.h>
10 #include <linux/kernel_stat.h>
11 #include <linux/interrupt.h>
12 #include <linux/notifier.h>
13 #include <linux/percpu.h>
14 #include <linux/init.h>
16 #include <linux/notifier.h>
19 - No shared variables, all the data are CPU local.
20 - If a softirq needs serialization, let it serialize itself
22 - Even if softirq is serialized, only local cpu is marked for
23 execution. Hence, we get something sort of weak cpu binding.
24 Though it is still not clear, will it result in better locality
28 - NET RX softirq. It is multithreaded and does not require
29 any global serialization.
30 - NET TX softirq. It kicks software netdevice queues, hence
31 it is logically serialized per device, but this serialization
32 is invisible to common code.
33 - Tasklets: serialized wrt itself.
36 /* No separate irq_stat for ia64, it is part of PSA */
37 #if !defined(CONFIG_IA64)
38 irq_cpustat_t irq_stat[NR_CPUS] ____cacheline_aligned;
39 #endif /* CONFIG_IA64 */
41 static struct softirq_action softirq_vec[32] __cacheline_aligned_in_smp;
44 * we cannot loop indefinitely here to avoid userspace starvation,
45 * but we also don't want to introduce a worst case 1/HZ latency
46 * to the pending events, so lets the scheduler to balance
47 * the softirq load for us.
49 static inline void wakeup_softirqd(unsigned cpu)
51 struct task_struct * tsk = ksoftirqd_task(cpu);
53 if (tsk && tsk->state != TASK_RUNNING)
57 asmlinkage void do_softirq(void)
66 local_irq_save(flags);
68 pending = local_softirq_pending();
71 struct softirq_action *h;
76 /* Reset the pending bitmask before enabling irqs */
77 local_softirq_pending() = 0;
92 pending = local_softirq_pending();
98 wakeup_softirqd(smp_processor_id());
102 local_irq_restore(flags);
105 void local_bh_enable(void)
108 WARN_ON(irqs_disabled());
109 if (unlikely(!in_interrupt() &&
110 local_softirq_pending()))
112 preempt_check_resched();
114 EXPORT_SYMBOL(local_bh_enable);
117 * This function must run with irqs disabled!
119 inline void cpu_raise_softirq(unsigned int cpu, unsigned int nr)
121 __cpu_raise_softirq(cpu, nr);
124 * If we're in an interrupt or softirq, we're done
125 * (this also catches softirq-disabled code). We will
126 * actually run the softirq once we return from
127 * the irq or softirq.
129 * Otherwise we wake up ksoftirqd to make sure we
130 * schedule the softirq soon.
133 wakeup_softirqd(cpu);
136 void raise_softirq(unsigned int nr)
140 local_irq_save(flags);
141 cpu_raise_softirq(smp_processor_id(), nr);
142 local_irq_restore(flags);
145 void open_softirq(int nr, void (*action)(struct softirq_action*), void *data)
147 softirq_vec[nr].data = data;
148 softirq_vec[nr].action = action;
155 struct tasklet_struct *list;
158 /* Some compilers disobey section attribute on statics when not
160 static DEFINE_PER_CPU(struct tasklet_head, tasklet_vec) = { NULL };
161 static DEFINE_PER_CPU(struct tasklet_head, tasklet_hi_vec) = { NULL };
163 void __tasklet_schedule(struct tasklet_struct *t)
167 local_irq_save(flags);
168 t->next = __get_cpu_var(tasklet_vec).list;
169 __get_cpu_var(tasklet_vec).list = t;
170 cpu_raise_softirq(smp_processor_id(), TASKLET_SOFTIRQ);
171 local_irq_restore(flags);
174 void __tasklet_hi_schedule(struct tasklet_struct *t)
178 local_irq_save(flags);
179 t->next = __get_cpu_var(tasklet_hi_vec).list;
180 __get_cpu_var(tasklet_hi_vec).list = t;
181 cpu_raise_softirq(smp_processor_id(), HI_SOFTIRQ);
182 local_irq_restore(flags);
185 static void tasklet_action(struct softirq_action *a)
187 struct tasklet_struct *list;
190 list = __get_cpu_var(tasklet_vec).list;
191 __get_cpu_var(tasklet_vec).list = NULL;
195 struct tasklet_struct *t = list;
199 if (tasklet_trylock(t)) {
200 if (!atomic_read(&t->count)) {
201 if (!test_and_clear_bit(TASKLET_STATE_SCHED, &t->state))
211 t->next = __get_cpu_var(tasklet_vec).list;
212 __get_cpu_var(tasklet_vec).list = t;
213 __cpu_raise_softirq(smp_processor_id(), TASKLET_SOFTIRQ);
218 static void tasklet_hi_action(struct softirq_action *a)
220 struct tasklet_struct *list;
223 list = __get_cpu_var(tasklet_hi_vec).list;
224 __get_cpu_var(tasklet_hi_vec).list = NULL;
228 struct tasklet_struct *t = list;
232 if (tasklet_trylock(t)) {
233 if (!atomic_read(&t->count)) {
234 if (!test_and_clear_bit(TASKLET_STATE_SCHED, &t->state))
244 t->next = __get_cpu_var(tasklet_hi_vec).list;
245 __get_cpu_var(tasklet_hi_vec).list = t;
246 __cpu_raise_softirq(smp_processor_id(), HI_SOFTIRQ);
252 void tasklet_init(struct tasklet_struct *t,
253 void (*func)(unsigned long), unsigned long data)
257 atomic_set(&t->count, 0);
262 void tasklet_kill(struct tasklet_struct *t)
265 printk("Attempt to kill tasklet from interrupt\n");
267 while (test_and_set_bit(TASKLET_STATE_SCHED, &t->state)) {
270 while (test_bit(TASKLET_STATE_SCHED, &t->state));
272 tasklet_unlock_wait(t);
273 clear_bit(TASKLET_STATE_SCHED, &t->state);
277 static void tasklet_init_cpu(int cpu)
279 per_cpu(tasklet_vec, cpu).list = NULL;
280 per_cpu(tasklet_hi_vec, cpu).list = NULL;
283 static int tasklet_cpu_notify(struct notifier_block *self,
284 unsigned long action, void *hcpu)
286 long cpu = (long)hcpu;
289 tasklet_init_cpu(cpu);
297 static struct notifier_block tasklet_nb = {
298 .notifier_call = tasklet_cpu_notify,
302 void __init softirq_init(void)
304 open_softirq(TASKLET_SOFTIRQ, tasklet_action, NULL);
305 open_softirq(HI_SOFTIRQ, tasklet_hi_action, NULL);
306 tasklet_cpu_notify(&tasklet_nb, (unsigned long)CPU_UP_PREPARE,
307 (void *)(long)smp_processor_id());
308 register_cpu_notifier(&tasklet_nb);
311 static int ksoftirqd(void * __bind_cpu)
313 int cpu = (int) (long) __bind_cpu;
315 daemonize("ksoftirqd/%d", cpu);
316 set_user_nice(current, 19);
317 current->flags |= PF_IOTHREAD;
319 /* Migrate to the right CPU */
320 set_cpus_allowed(current, 1UL << cpu);
321 if (smp_processor_id() != cpu)
324 __set_current_state(TASK_INTERRUPTIBLE);
327 local_ksoftirqd_task() = current;
330 if (!local_softirq_pending())
333 __set_current_state(TASK_RUNNING);
335 while (local_softirq_pending()) {
340 __set_current_state(TASK_INTERRUPTIBLE);
344 static int __devinit cpu_callback(struct notifier_block *nfb,
345 unsigned long action,
348 int hotcpu = (unsigned long)hcpu;
350 if (action == CPU_ONLINE) {
351 if (kernel_thread(ksoftirqd, hcpu, CLONE_KERNEL) < 0) {
352 printk("ksoftirqd for %i failed\n", hotcpu);
356 while (!ksoftirqd_task(hotcpu))
362 static struct notifier_block __devinitdata cpu_nfb = {
363 .notifier_call = cpu_callback
366 __init int spawn_ksoftirqd(void)
368 cpu_callback(&cpu_nfb, CPU_ONLINE, (void *)(long)smp_processor_id());
369 register_cpu_notifier(&cpu_nfb);