2 * Read-Copy Update mechanism for mutual exclusion
4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of the GNU General Public License as published by
6 * the Free Software Foundation; either version 2 of the License, or
7 * (at your option) any later version.
9 * This program is distributed in the hope that it will be useful,
10 * but WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12 * GNU General Public License for more details.
14 * You should have received a copy of the GNU General Public License
15 * along with this program; if not, write to the Free Software
16 * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
18 * Copyright (C) IBM Corporation, 2001
20 * Authors: Dipankar Sarma <dipankar@in.ibm.com>
21 * Manfred Spraul <manfred@colorfullife.com>
23 * Based on the original work by Paul McKenney <paulmck@us.ibm.com>
24 * and inputs from Rusty Russell, Andrea Arcangeli and Andi Kleen.
26 * http://www.rdrop.com/users/paulmck/paper/rclockpdcsproof.pdf
27 * http://lse.sourceforge.net/locking/rclock_OLS.2001.05.01c.sc.pdf (OLS2001)
29 * For detailed explanation of Read-Copy Update mechanism see -
30 * http://lse.sourceforge.net/locking/rcupdate.html
33 #include <linux/types.h>
34 #include <linux/kernel.h>
35 #include <linux/init.h>
36 #include <linux/spinlock.h>
37 #include <linux/smp.h>
38 #include <linux/rcupdate.h>
39 #include <linux/interrupt.h>
40 #include <linux/sched.h>
41 #include <asm/atomic.h>
42 #include <linux/bitops.h>
43 #include <linux/module.h>
44 #include <linux/completion.h>
45 #include <linux/moduleparam.h>
46 #include <linux/percpu.h>
47 #include <linux/notifier.h>
48 #include <linux/rcupdate.h>
49 #include <linux/cpu.h>
50 #include <linux/mutex.h>
51 #include <linux/jiffies.h>
53 /* Definition for rcupdate control block. */
54 static struct rcu_ctrlblk rcu_ctrlblk = {
57 .lock = SPIN_LOCK_UNLOCKED,
58 .cpumask = CPU_MASK_NONE,
60 static struct rcu_ctrlblk rcu_bh_ctrlblk = {
63 .lock = SPIN_LOCK_UNLOCKED,
64 .cpumask = CPU_MASK_NONE,
67 DEFINE_PER_CPU(struct rcu_data, rcu_data) = { 0L };
68 DEFINE_PER_CPU(struct rcu_data, rcu_bh_data) = { 0L };
71 * Set the following to 1, 3, 7, 15, ... to slow down the rate at which RCU
72 * callbacks are processed. WARNING - make sure the value is 2**n-1
76 /* Is it time to process a batch on this cpu */
77 static inline int rcu_time(int cpu)
79 return (((jiffies - cpu) & rcu_mask) == 0);
83 /* Fake initialization required by compiler */
84 static DEFINE_PER_CPU(struct tasklet_struct, rcu_tasklet) = {NULL};
85 /* Tasklet for processing rcu callbacks remotely */
86 static DEFINE_PER_CPU(struct tasklet_struct, rcu_remote_tasklet) = {NULL};
87 static int blimit = 10;
88 static int qhimark = 10000;
89 static int qlowmark = 100;
91 static int rsinterval = 1000;
94 static atomic_t rcu_barrier_cpu_count;
95 static DEFINE_MUTEX(rcu_barrier_mutex);
96 static struct completion rcu_barrier_completion;
99 static void force_quiescent_state(struct rcu_data *rdp,
100 struct rcu_ctrlblk *rcp)
105 if (unlikely(rdp->qlen - rdp->last_rs_qlen > rsinterval)) {
106 rdp->last_rs_qlen = rdp->qlen;
108 * Don't send IPI to itself. With irqs disabled,
109 * rdp->cpu is the current cpu.
111 cpumask = rcp->cpumask;
112 cpu_clear(rdp->cpu, cpumask);
113 for_each_cpu_mask(cpu, cpumask)
114 smp_send_reschedule(cpu);
118 static inline void force_quiescent_state(struct rcu_data *rdp,
119 struct rcu_ctrlblk *rcp)
126 * Variables and routines for remote rcu callback processing
128 * Remote callback processing allows specified (configured) cpus to have
129 * their list of rcu callbacks processed by other (non-configured) cpus,
130 * thus reducing the amount of overhead and latency seen by configured
133 * This is accomplished by having non-configured cpus process the donelist
134 * of the configured cpus from tasklet context. The remote cpu donelists
135 * are processed in a round-robin fashion, one list per cpu. Since only
136 * donelist processing is affected, other rcu list and quiescent state
137 * processing is unaffected.
139 * Configuration of a cpu for remote callback processing is done via the
140 * rcu_set_remote_rcu() and rcu_clear_remote_rcu() routines.
142 #define RCU_BATCH_IDLE 0
143 #define RCU_BATCH_LOCAL 1
144 #define RCU_BATCH_REMOTE 2
146 #if defined(CONFIG_NUMA) && defined(CONFIG_IA64)
147 #define remote_rcu_callbacks 1
149 /* cpus configured for remote callback processing, this rarely changes */
150 static cpumask_t __read_mostly cpu_remotercu_map = CPU_MASK_NONE;
152 /* next cpu for which we need to do remote callback processing */
153 static int cpu_remotercu_next = -1;
155 /* lock cpu_remotercu_next and changes to cpu_remotercu_map */
156 static DEFINE_SPINLOCK(cpu_remotercu_lock);
159 * Return a mask of online cpus configured for remote rcu processing.
161 static void rcu_remote_cpus(cpumask_t * mask)
163 cpus_and(*mask, cpu_remotercu_map, cpu_online_map);
167 * Is this cpu configured for remote rcu callback processing?
169 static int rcu_callbacks_processed_remotely(int cpu)
173 rcu_remote_cpus(&mask);
174 return(cpu_isset(cpu, mask));
178 * Should this cpu be processing rcu callbacks for cpus configured as such?
180 static int rcu_process_remote(int cpu)
184 rcu_remote_cpus(&mask);
186 * If the system has some cpus configured for remote callbacks and
187 * this cpu is not one of those, then this cpu processes remote rcu
190 return (!cpus_empty(mask) && !cpu_isset(cpu, mask));
194 * Get the next cpu on which to do remote rcu callback processing
195 * We simply round-robin across all cpus configured for remote callbacks.
197 static int rcu_next_remotercu(void)
203 rcu_remote_cpus(&mask);
204 if (unlikely(cpus_empty(mask)))
206 spin_lock_irqsave(&cpu_remotercu_lock, flags);
207 cpu_remotercu_next = next_cpu(cpu_remotercu_next, mask);
208 if (cpu_remotercu_next >= NR_CPUS)
209 cpu_remotercu_next = first_cpu(mask);
210 cpu = cpu_remotercu_next;
211 spin_unlock_irqrestore(&cpu_remotercu_lock, flags);
216 static void rcu_rm_lock(struct rcu_data *rdp)
218 spin_lock_irq(&rdp->rmlock);
221 static void rcu_rm_unlock(struct rcu_data *rdp)
223 spin_unlock_irq(&rdp->rmlock);
226 static void rcu_set_batch_stat(struct rcu_data *rdp, short stat)
228 rdp->batch_stat = stat;
232 * Update the batch processing status only if no current callback processing.
234 static short rcu_setcmp_batch_stat(struct rcu_data *rdp, short stat)
236 return cmpxchg(&rdp->batch_stat, RCU_BATCH_IDLE, stat) == RCU_BATCH_IDLE;
240 * Update qlen and return the new value.
242 static long rcu_updated_qlen(struct rcu_data *rdp)
246 /* Update qlen safely if configured for remote callbacks */
247 if (unlikely(rcu_callbacks_processed_remotely(smp_processor_id()))) {
251 } while (cmpxchg(&rdp->qlen, old, new) != old);
258 * Configure a cpu for remote rcu callback processing.
260 int rcu_set_remote_rcu(int cpu)
264 if (cpu_online(cpu)) {
265 spin_lock_irqsave(&cpu_remotercu_lock, flags);
266 cpu_set(cpu, cpu_remotercu_map);
267 spin_unlock_irqrestore(&cpu_remotercu_lock, flags);
272 EXPORT_SYMBOL_GPL(rcu_set_remote_rcu);
275 * Configure a cpu for standard rcu callback processing.
277 int rcu_clear_remote_rcu(int cpu)
281 if (cpu_online(cpu)) {
282 spin_lock_irqsave(&cpu_remotercu_lock, flags);
283 cpu_clear(cpu, cpu_remotercu_map);
284 spin_unlock_irqrestore(&cpu_remotercu_lock, flags);
289 EXPORT_SYMBOL_GPL(rcu_clear_remote_rcu);
291 #define remote_rcu_callbacks 0
292 static int rcu_callbacks_processed_remotely(int cpu) { return 0; }
293 static int rcu_process_remote(int cpu) { return 0; }
294 static void rcu_rm_lock(struct rcu_data *rdp) {}
295 static void rcu_rm_unlock(struct rcu_data *rdp) {}
296 static void rcu_set_batch_stat(struct rcu_data *rdp, short stat) {}
297 static int rcu_setcmp_batch_stat(struct rcu_data *rdp, short stat) { return 1; }
298 static long rcu_updated_qlen(struct rcu_data *rdp)
302 static void rcu_clear_remote_rcu(int cpu) {}
305 * call_rcu - Queue an RCU callback for invocation after a grace period.
306 * @head: structure to be used for queueing the RCU updates.
307 * @func: actual update function to be invoked after the grace period
309 * The update function will be invoked some time after a full grace
310 * period elapses, in other words after all currently executing RCU
311 * read-side critical sections have completed. RCU read-side critical
312 * sections are delimited by rcu_read_lock() and rcu_read_unlock(),
315 void fastcall call_rcu(struct rcu_head *head,
316 void (*func)(struct rcu_head *rcu))
319 struct rcu_data *rdp;
323 local_irq_save(flags);
324 rdp = &__get_cpu_var(rcu_data);
325 *rdp->nxttail = head;
326 rdp->nxttail = &head->next;
327 if (unlikely(rcu_updated_qlen(rdp) > qhimark)) {
328 rdp->blimit = INT_MAX;
329 force_quiescent_state(rdp, &rcu_ctrlblk);
331 local_irq_restore(flags);
335 * call_rcu_bh - Queue an RCU for invocation after a quicker grace period.
336 * @head: structure to be used for queueing the RCU updates.
337 * @func: actual update function to be invoked after the grace period
339 * The update function will be invoked some time after a full grace
340 * period elapses, in other words after all currently executing RCU
341 * read-side critical sections have completed. call_rcu_bh() assumes
342 * that the read-side critical sections end on completion of a softirq
343 * handler. This means that read-side critical sections in process
344 * context must not be interrupted by softirqs. This interface is to be
345 * used when most of the read-side critical sections are in softirq context.
346 * RCU read-side critical sections are delimited by rcu_read_lock() and
347 * rcu_read_unlock(), * if in interrupt context or rcu_read_lock_bh()
348 * and rcu_read_unlock_bh(), if in process context. These may be nested.
350 void fastcall call_rcu_bh(struct rcu_head *head,
351 void (*func)(struct rcu_head *rcu))
354 struct rcu_data *rdp;
358 local_irq_save(flags);
359 rdp = &__get_cpu_var(rcu_bh_data);
360 *rdp->nxttail = head;
361 rdp->nxttail = &head->next;
363 if (unlikely(rcu_updated_qlen(rdp) > qhimark)) {
364 rdp->blimit = INT_MAX;
365 force_quiescent_state(rdp, &rcu_bh_ctrlblk);
368 local_irq_restore(flags);
372 * Return the number of RCU batches processed thus far. Useful
373 * for debug and statistics.
375 long rcu_batches_completed(void)
377 return rcu_ctrlblk.completed;
380 static void rcu_barrier_callback(struct rcu_head *notused)
382 if (atomic_dec_and_test(&rcu_barrier_cpu_count))
383 complete(&rcu_barrier_completion);
387 * Called with preemption disabled, and from cross-cpu IRQ context.
389 static void rcu_barrier_func(void *notused)
391 int cpu = smp_processor_id();
392 struct rcu_data *rdp = &per_cpu(rcu_data, cpu);
393 struct rcu_head *head;
395 head = &rdp->barrier;
396 atomic_inc(&rcu_barrier_cpu_count);
397 call_rcu(head, rcu_barrier_callback);
401 * rcu_barrier - Wait until all the in-flight RCUs are complete.
403 void rcu_barrier(void)
405 BUG_ON(in_interrupt());
406 /* Take cpucontrol mutex to protect against CPU hotplug */
407 mutex_lock(&rcu_barrier_mutex);
408 init_completion(&rcu_barrier_completion);
409 atomic_set(&rcu_barrier_cpu_count, 0);
410 on_each_cpu(rcu_barrier_func, NULL, 0, 1);
411 wait_for_completion(&rcu_barrier_completion);
412 mutex_unlock(&rcu_barrier_mutex);
414 EXPORT_SYMBOL_GPL(rcu_barrier);
417 * Invoke the completed RCU callbacks. They are expected to be in
420 static void rcu_do_batch(struct rcu_data *rdp)
422 struct rcu_head *next, *list;
425 list = rdp->donelist;
427 next = rdp->donelist = list->next;
431 if (++count >= rdp->blimit)
434 if (rdp->blimit == INT_MAX && rdp->qlen <= qlowmark)
435 rdp->blimit = blimit;
437 rdp->donetail = &rdp->donelist;
439 tasklet_schedule(&per_cpu(rcu_tasklet, rdp->cpu));
443 * Grace period handling:
444 * The grace period handling consists out of two steps:
445 * - A new grace period is started.
446 * This is done by rcu_start_batch. The start is not broadcasted to
447 * all cpus, they must pick this up by comparing rcp->cur with
448 * rdp->quiescbatch. All cpus are recorded in the
449 * rcu_ctrlblk.cpumask bitmap.
450 * - All cpus must go through a quiescent state.
451 * Since the start of the grace period is not broadcasted, at least two
452 * calls to rcu_check_quiescent_state are required:
453 * The first call just notices that a new grace period is running. The
454 * following calls check if there was a quiescent state since the beginning
455 * of the grace period. If so, it updates rcu_ctrlblk.cpumask. If
456 * the bitmap is empty, then the grace period is completed.
457 * rcu_check_quiescent_state calls rcu_start_batch(0) to start the next grace
458 * period (if necessary).
461 * Register a new batch of callbacks, and start it up if there is currently no
462 * active batch and the batch to be registered has not already occurred.
463 * Caller must hold rcu_ctrlblk.lock.
465 static void rcu_start_batch(struct rcu_ctrlblk *rcp)
467 if (rcp->next_pending &&
468 rcp->completed == rcp->cur) {
469 rcp->next_pending = 0;
471 * next_pending == 0 must be visible in
472 * __rcu_process_callbacks() before it can see new value of cur.
478 * Accessing nohz_cpu_mask before incrementing rcp->cur needs a
479 * Barrier Otherwise it can cause tickless idle CPUs to be
480 * included in rcp->cpumask, which will extend graceperiods
484 cpus_andnot(rcp->cpumask, cpu_online_map, nohz_cpu_mask);
490 * cpu went through a quiescent state since the beginning of the grace period.
491 * Clear it from the cpu mask and complete the grace period if it was the last
492 * cpu. Start another grace period if someone has further entries pending
494 static void cpu_quiet(int cpu, struct rcu_ctrlblk *rcp)
496 cpu_clear(cpu, rcp->cpumask);
497 if (cpus_empty(rcp->cpumask)) {
498 /* batch completed ! */
499 rcp->completed = rcp->cur;
500 rcu_start_batch(rcp);
505 * Check if the cpu has gone through a quiescent state (say context
506 * switch). If so and if it already hasn't done so in this RCU
507 * quiescent cycle, then indicate that it has done so.
509 static void rcu_check_quiescent_state(struct rcu_ctrlblk *rcp,
510 struct rcu_data *rdp)
512 if (rdp->quiescbatch != rcp->cur) {
513 /* start new grace period: */
515 rdp->passed_quiesc = 0;
516 rdp->quiescbatch = rcp->cur;
520 /* Grace period already completed for this cpu?
521 * qs_pending is checked instead of the actual bitmap to avoid
522 * cacheline trashing.
524 if (!rdp->qs_pending)
528 * Was there a quiescent state since the beginning of the grace
529 * period? If no, then exit and wait for the next call.
531 if (!rdp->passed_quiesc)
535 spin_lock(&rcp->lock);
537 * rdp->quiescbatch/rcp->cur and the cpu bitmap can come out of sync
538 * during cpu startup. Ignore the quiescent state.
540 if (likely(rdp->quiescbatch == rcp->cur))
541 cpu_quiet(rdp->cpu, rcp);
543 spin_unlock(&rcp->lock);
547 #ifdef CONFIG_HOTPLUG_CPU
549 /* warning! helper for rcu_offline_cpu. do not use elsewhere without reviewing
550 * locking requirements, the list it's pulling from has to belong to a cpu
551 * which is dead and hence not processing interrupts.
553 static void rcu_move_batch(struct rcu_data *this_rdp, struct rcu_head *list,
554 struct rcu_head **tail)
557 *this_rdp->nxttail = list;
559 this_rdp->nxttail = tail;
563 static void __rcu_offline_cpu(struct rcu_data *this_rdp,
564 struct rcu_ctrlblk *rcp, struct rcu_data *rdp)
566 /* if the cpu going offline owns the grace period
567 * we can block indefinitely waiting for it, so flush
570 spin_lock_bh(&rcp->lock);
571 if (rcp->cur != rcp->completed)
572 cpu_quiet(rdp->cpu, rcp);
573 spin_unlock_bh(&rcp->lock);
574 rcu_move_batch(this_rdp, rdp->curlist, rdp->curtail);
575 rcu_move_batch(this_rdp, rdp->nxtlist, rdp->nxttail);
576 rcu_move_batch(this_rdp, rdp->donelist, rdp->donetail);
579 static void rcu_offline_cpu(int cpu)
581 struct rcu_data *this_rdp = &get_cpu_var(rcu_data);
582 struct rcu_data *this_bh_rdp = &get_cpu_var(rcu_bh_data);
584 rcu_clear_remote_rcu(cpu);
586 __rcu_offline_cpu(this_rdp, &rcu_ctrlblk,
587 &per_cpu(rcu_data, cpu));
588 __rcu_offline_cpu(this_bh_rdp, &rcu_bh_ctrlblk,
589 &per_cpu(rcu_bh_data, cpu));
590 put_cpu_var(rcu_data);
591 put_cpu_var(rcu_bh_data);
592 tasklet_kill_immediate(&per_cpu(rcu_tasklet, cpu), cpu);
593 tasklet_kill_immediate(&per_cpu(rcu_remote_tasklet, cpu), cpu);
598 static void rcu_offline_cpu(int cpu)
605 * This does the RCU processing work from tasklet context.
607 static void __rcu_process_callbacks(struct rcu_ctrlblk *rcp,
608 struct rcu_data *rdp)
610 int cpu = smp_processor_id();
612 if (rdp->curlist && !rcu_batch_before(rcp->completed, rdp->batch)) {
614 * If this cpu is configured for remote rcu callback
615 * processing, grab the lock to protect donelist from
616 * changes done by remote callback processing.
618 * Remote callback processing should only try this lock,
619 * then move on, so contention should be minimal.
621 if (unlikely(rcu_callbacks_processed_remotely(cpu))) {
623 *rdp->donetail = rdp->curlist;
624 rdp->donetail = rdp->curtail;
627 *rdp->donetail = rdp->curlist;
628 rdp->donetail = rdp->curtail;
631 rdp->curtail = &rdp->curlist;
634 if (rdp->nxtlist && !rdp->curlist) {
636 rdp->curlist = rdp->nxtlist;
637 rdp->curtail = rdp->nxttail;
639 rdp->nxttail = &rdp->nxtlist;
643 * start the next batch of callbacks
646 /* determine batch number */
647 rdp->batch = rcp->cur + 1;
648 /* see the comment and corresponding wmb() in
649 * the rcu_start_batch()
653 if (!rcp->next_pending) {
654 /* and start it/schedule start if it's a new batch */
655 spin_lock(&rcp->lock);
656 rcp->next_pending = 1;
657 rcu_start_batch(rcp);
658 spin_unlock(&rcp->lock);
662 rcu_check_quiescent_state(rcp, rdp);
663 if (remote_rcu_callbacks) {
664 if (rdp->donelist && !rcu_callbacks_processed_remotely(cpu) &&
665 rcu_setcmp_batch_stat(rdp, RCU_BATCH_LOCAL)) {
667 rcu_set_batch_stat(rdp, RCU_BATCH_IDLE);
669 } else if (rdp->donelist) {
674 static void rcu_process_callbacks(unsigned long unused)
676 __rcu_process_callbacks(&rcu_ctrlblk, &__get_cpu_var(rcu_data));
677 __rcu_process_callbacks(&rcu_bh_ctrlblk, &__get_cpu_var(rcu_bh_data));
680 #if defined(CONFIG_NUMA) && defined(CONFIG_IA64)
682 * Do callback processing for cpus marked as such.
684 * This will only be run on systems with cpus configured for remote
685 * callback processing, but only on cpus not configured as such.
687 * We process both regular and bh donelists for only one cpu at a
690 static void rcu_process_remote_callbacks(unsigned long unused)
692 struct rcu_data *rdp, *rdp_bh;
693 struct rcu_head * list = NULL;
694 struct rcu_head * list_bh = NULL;
698 /* Get the cpu for which we will process the donelists */
699 cpu = rcu_next_remotercu();
700 if (unlikely(cpu == -1))
704 * We process whatever remote callbacks we can at this moment for
705 * this cpu. If the list protection locks are held, we move on,
706 * as we don't want contention.
708 rdp = &per_cpu(rcu_data, cpu);
709 if (spin_trylock_irq(&rdp->rmlock)) {
711 * batch_stat ensures cpu isn't still running rcu_do_batch.
712 * This can happen if we've just configured on the fly.
714 if (rcu_setcmp_batch_stat(rdp, RCU_BATCH_REMOTE)) {
715 list = xchg(&rdp->donelist, NULL);
717 rdp->donetail = &rdp->donelist;
719 spin_unlock_irq(&rdp->rmlock);
722 rdp_bh = &per_cpu(rcu_bh_data, cpu);
723 if (spin_trylock_irq(&rdp_bh->rmlock)) {
724 if (rcu_setcmp_batch_stat(rdp_bh, RCU_BATCH_REMOTE)) {
725 list_bh = xchg(&rdp_bh->donelist, NULL);
727 rdp_bh->donetail = &rdp_bh->donelist;
729 spin_unlock_irq(&rdp_bh->rmlock);
732 /* Process the donelists */
740 /* Safely update qlen without lock contention */
745 } while (cmpxchg(&rdp->qlen, old, new) != old);
748 if (rdp->batch_stat == RCU_BATCH_REMOTE)
749 rcu_set_batch_stat(rdp, RCU_BATCH_IDLE);
753 list_bh->func(list_bh);
754 list_bh = list_bh->next;
762 } while (cmpxchg(&rdp_bh->qlen, old, new)!=old);
764 if (rdp_bh->batch_stat == RCU_BATCH_REMOTE)
765 rcu_set_batch_stat(rdp_bh, RCU_BATCH_IDLE);
768 static void rcu_process_remote_callbacks(unsigned long unused) {}
771 static int __rcu_pending(struct rcu_ctrlblk *rcp, struct rcu_data *rdp)
773 /* This cpu has pending rcu entries and the grace period
774 * for them has completed.
776 if (rdp->curlist && !rcu_batch_before(rcp->completed, rdp->batch))
779 /* This cpu has no pending entries, but there are new entries */
780 if (!rdp->curlist && rdp->nxtlist)
783 /* This cpu has finished callbacks to invoke */
787 /* The rcu core waits for a quiescent state from the cpu */
788 if (rdp->quiescbatch != rcp->cur || rdp->qs_pending)
796 * Check to see if there is any immediate RCU-related work to be done
797 * by the current CPU, returning 1 if so. This function is part of the
798 * RCU implementation; it is -not- an exported member of the RCU API.
800 int rcu_pending(int cpu)
803 * Schedule remote callback processing on this cpu only if
804 * there are cpus set up for remote callback processing, and
807 if (unlikely(rcu_process_remote(cpu)))
808 tasklet_schedule(&per_cpu(rcu_remote_tasklet, cpu));
809 return __rcu_pending(&rcu_ctrlblk, &per_cpu(rcu_data, cpu)) ||
810 __rcu_pending(&rcu_bh_ctrlblk, &per_cpu(rcu_bh_data, cpu));
814 * Check to see if any future RCU-related work will need to be done
815 * by the current CPU, even if none need be done immediately, returning
816 * 1 if so. This function is part of the RCU implementation; it is -not-
817 * an exported member of the RCU API.
819 int rcu_needs_cpu(int cpu)
821 struct rcu_data *rdp = &per_cpu(rcu_data, cpu);
822 struct rcu_data *rdp_bh = &per_cpu(rcu_bh_data, cpu);
824 return (!!rdp->curlist || !!rdp_bh->curlist || rcu_pending(cpu));
827 void rcu_check_callbacks(int cpu, int user)
833 (idle_cpu(cpu) && !in_softirq() &&
834 hardirq_count() <= (1 << HARDIRQ_SHIFT))) {
836 rcu_bh_qsctr_inc(cpu);
837 } else if (!in_softirq())
838 rcu_bh_qsctr_inc(cpu);
839 tasklet_schedule(&per_cpu(rcu_tasklet, cpu));
842 static void rcu_init_percpu_data(int cpu, struct rcu_ctrlblk *rcp,
843 struct rcu_data *rdp)
845 memset(rdp, 0, sizeof(*rdp));
846 rdp->curtail = &rdp->curlist;
847 rdp->nxttail = &rdp->nxtlist;
848 rdp->donetail = &rdp->donelist;
849 rdp->quiescbatch = rcp->completed;
852 rdp->blimit = blimit;
853 #if defined(CONFIG_SMP) && defined(CONFIG_IA64)
854 spin_lock_init(&rdp->rmlock);
858 static void __devinit rcu_online_cpu(int cpu)
860 struct rcu_data *rdp = &per_cpu(rcu_data, cpu);
861 struct rcu_data *bh_rdp = &per_cpu(rcu_bh_data, cpu);
863 rcu_init_percpu_data(cpu, &rcu_ctrlblk, rdp);
864 rcu_init_percpu_data(cpu, &rcu_bh_ctrlblk, bh_rdp);
865 tasklet_init(&per_cpu(rcu_tasklet, cpu), rcu_process_callbacks, 0UL);
866 tasklet_init(&per_cpu(rcu_remote_tasklet, cpu),
867 rcu_process_remote_callbacks, 0UL);
870 static int rcu_cpu_notify(struct notifier_block *self,
871 unsigned long action, void *hcpu)
873 long cpu = (long)hcpu;
879 rcu_offline_cpu(cpu);
887 static struct notifier_block rcu_nb = {
888 .notifier_call = rcu_cpu_notify,
892 * Initializes rcu mechanism. Assumed to be called early.
893 * That is before local timer(SMP) or jiffie timer (uniproc) is setup.
894 * Note that rcu_qsctr and friends are implicitly
895 * initialized due to the choice of ``0'' for RCU_CTR_INVALID.
897 void __init rcu_init(void)
899 rcu_cpu_notify(&rcu_nb, CPU_UP_PREPARE,
900 (void *)(long)smp_processor_id());
901 /* Register notifier for non-boot CPUs */
902 register_cpu_notifier(&rcu_nb);
905 struct rcu_synchronize {
906 struct rcu_head head;
907 struct completion completion;
910 /* Because of FASTCALL declaration of complete, we use this wrapper */
911 static void wakeme_after_rcu(struct rcu_head *head)
913 struct rcu_synchronize *rcu;
915 rcu = container_of(head, struct rcu_synchronize, head);
916 complete(&rcu->completion);
920 * synchronize_rcu - wait until a grace period has elapsed.
922 * Control will return to the caller some time after a full grace
923 * period has elapsed, in other words after all currently executing RCU
924 * read-side critical sections have completed. RCU read-side critical
925 * sections are delimited by rcu_read_lock() and rcu_read_unlock(),
928 * If your read-side code is not protected by rcu_read_lock(), do -not-
929 * use synchronize_rcu().
931 void synchronize_rcu(void)
933 struct rcu_synchronize rcu;
935 init_completion(&rcu.completion);
936 /* Will wake me after RCU finished */
937 call_rcu(&rcu.head, wakeme_after_rcu);
940 wait_for_completion(&rcu.completion);
944 * Deprecated, use synchronize_rcu() or synchronize_sched() instead.
946 void synchronize_kernel(void)
951 module_param(blimit, int, 0);
952 module_param(qhimark, int, 0);
953 module_param(qlowmark, int, 0);
955 module_param(rsinterval, int, 0);
957 EXPORT_SYMBOL_GPL(rcu_batches_completed);
958 EXPORT_SYMBOL_GPL_FUTURE(call_rcu); /* WARNING: GPL-only in April 2006. */
959 EXPORT_SYMBOL_GPL_FUTURE(call_rcu_bh); /* WARNING: GPL-only in April 2006. */
960 EXPORT_SYMBOL_GPL(synchronize_rcu);
961 EXPORT_SYMBOL_GPL_FUTURE(synchronize_kernel); /* WARNING: GPL-only in April 2006. */