rcu: Permit call_rcu() from CPU_DYING notifiers
[linux-flexiantxendom0-3.2.10.git] / kernel / rcutree_plugin.h
index a21413d..c023464 100644 (file)
@@ -1,7 +1,7 @@
 /*
  * Read-Copy Update mechanism for mutual exclusion (tree-based version)
  * Internal non-public definitions that provide either classic
- * or preemptable semantics.
+ * or preemptible semantics.
  *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of the GNU General Public License as published by
  */
 
 #include <linux/delay.h>
-#include <linux/stop_machine.h>
+
+#define RCU_KTHREAD_PRIO 1
+
+#ifdef CONFIG_RCU_BOOST
+#define RCU_BOOST_PRIO CONFIG_RCU_BOOST_PRIO
+#else
+#define RCU_BOOST_PRIO RCU_KTHREAD_PRIO
+#endif
 
 /*
  * Check the RCU kernel configuration parameters and print informative
@@ -55,7 +62,10 @@ static void __init rcu_bootup_announce_oddness(void)
        printk(KERN_INFO "\tRCU torture testing starts during boot.\n");
 #endif
 #if defined(CONFIG_TREE_PREEMPT_RCU) && !defined(CONFIG_RCU_CPU_STALL_VERBOSE)
-       printk(KERN_INFO "\tVerbose stalled-CPUs detection is disabled.\n");
+       printk(KERN_INFO "\tDump stacks of tasks blocking RCU-preempt GP.\n");
+#endif
+#if defined(CONFIG_RCU_CPU_STALL_INFO)
+       printk(KERN_INFO "\tAdditional per-CPU info printed with stalls.\n");
 #endif
 #if NUM_RCU_LVL_4 != 0
        printk(KERN_INFO "\tExperimental four-level hierarchy is enabled.\n");
@@ -64,10 +74,11 @@ static void __init rcu_bootup_announce_oddness(void)
 
 #ifdef CONFIG_TREE_PREEMPT_RCU
 
-struct rcu_state rcu_preempt_state = RCU_STATE_INITIALIZER(rcu_preempt_state);
+struct rcu_state rcu_preempt_state = RCU_STATE_INITIALIZER(rcu_preempt);
 DEFINE_PER_CPU(struct rcu_data, rcu_preempt_data);
 static struct rcu_state *rcu_state = &rcu_preempt_state;
 
+static void rcu_read_unlock_special(struct task_struct *t);
 static int rcu_preempted_readers_exp(struct rcu_node *rnp);
 
 /*
@@ -75,7 +86,7 @@ static int rcu_preempted_readers_exp(struct rcu_node *rnp);
  */
 static void __init rcu_bootup_announce(void)
 {
-       printk(KERN_INFO "Preemptable hierarchical RCU implementation.\n");
+       printk(KERN_INFO "Preemptible hierarchical RCU implementation.\n");
        rcu_bootup_announce_oddness();
 }
 
@@ -108,7 +119,7 @@ void rcu_force_quiescent_state(void)
 EXPORT_SYMBOL_GPL(rcu_force_quiescent_state);
 
 /*
- * Record a preemptable-RCU quiescent state for the specified CPU.  Note
+ * Record a preemptible-RCU quiescent state for the specified CPU.  Note
  * that this just means that the task currently running on the CPU is
  * not in a quiescent state.  There might be any number of tasks blocked
  * while in an RCU read-side critical section.
@@ -121,9 +132,11 @@ static void rcu_preempt_qs(int cpu)
 {
        struct rcu_data *rdp = &per_cpu(rcu_preempt_data, cpu);
 
-       rdp->passed_quiesc_completed = rdp->gpnum - 1;
+       rdp->passed_quiesce_gpnum = rdp->gpnum;
        barrier();
-       rdp->passed_quiesc = 1;
+       if (rdp->passed_quiesce == 0)
+               trace_rcu_grace_period("rcu_preempt", rdp->gpnum, "cpuqs");
+       rdp->passed_quiesce = 1;
        current->rcu_read_unlock_special &= ~RCU_READ_UNLOCK_NEED_QS;
 }
 
@@ -147,7 +160,7 @@ static void rcu_preempt_note_context_switch(int cpu)
        struct rcu_data *rdp;
        struct rcu_node *rnp;
 
-       if (t->rcu_read_lock_nesting &&
+       if (t->rcu_read_lock_nesting > 0 &&
            (t->rcu_read_unlock_special & RCU_READ_UNLOCK_BLOCKED) == 0) {
 
                /* Possibly blocking in an RCU read-side critical section. */
@@ -189,7 +202,20 @@ static void rcu_preempt_note_context_switch(int cpu)
                        if (rnp->qsmask & rdp->grpmask)
                                rnp->gp_tasks = &t->rcu_node_entry;
                }
+               trace_rcu_preempt_task(rdp->rsp->name,
+                                      t->pid,
+                                      (rnp->qsmask & rdp->grpmask)
+                                      ? rnp->gpnum
+                                      : rnp->gpnum + 1);
                raw_spin_unlock_irqrestore(&rnp->lock, flags);
+       } else if (t->rcu_read_lock_nesting < 0 &&
+                  t->rcu_read_unlock_special) {
+
+               /*
+                * Complete exit from RCU read-side critical section on
+                * behalf of preempted instance of __rcu_read_unlock().
+                */
+               rcu_read_unlock_special(t);
        }
 
        /*
@@ -207,7 +233,7 @@ static void rcu_preempt_note_context_switch(int cpu)
 }
 
 /*
- * Tree-preemptable RCU implementation for rcu_read_lock().
+ * Tree-preemptible RCU implementation for rcu_read_lock().
  * Just increment ->rcu_read_lock_nesting, shared state will be updated
  * if we block.
  */
@@ -284,12 +310,16 @@ static struct list_head *rcu_next_node_entry(struct task_struct *t,
  * notify RCU core processing or task having blocked during the RCU
  * read-side critical section.
  */
-static void rcu_read_unlock_special(struct task_struct *t)
+static noinline void rcu_read_unlock_special(struct task_struct *t)
 {
        int empty;
        int empty_exp;
+       int empty_exp_now;
        unsigned long flags;
        struct list_head *np;
+#ifdef CONFIG_RCU_BOOST
+       struct rt_mutex *rbmp = NULL;
+#endif /* #ifdef CONFIG_RCU_BOOST */
        struct rcu_node *rnp;
        int special;
 
@@ -309,7 +339,7 @@ static void rcu_read_unlock_special(struct task_struct *t)
        }
 
        /* Hardware IRQ handlers cannot block. */
-       if (in_irq()) {
+       if (in_irq() || in_serving_softirq()) {
                local_irq_restore(flags);
                return;
        }
@@ -335,6 +365,9 @@ static void rcu_read_unlock_special(struct task_struct *t)
                smp_mb(); /* ensure expedited fastpath sees end of RCU c-s. */
                np = rcu_next_node_entry(t, rnp);
                list_del_init(&t->rcu_node_entry);
+               t->rcu_blocked_node = NULL;
+               trace_rcu_unlock_preempted_task("rcu_preempt",
+                                               rnp->gpnum, t->pid);
                if (&t->rcu_node_entry == rnp->gp_tasks)
                        rnp->gp_tasks = np;
                if (&t->rcu_node_entry == rnp->exp_tasks)
@@ -342,41 +375,51 @@ static void rcu_read_unlock_special(struct task_struct *t)
 #ifdef CONFIG_RCU_BOOST
                if (&t->rcu_node_entry == rnp->boost_tasks)
                        rnp->boost_tasks = np;
+               /* Snapshot/clear ->rcu_boost_mutex with rcu_node lock held. */
+               if (t->rcu_boost_mutex) {
+                       rbmp = t->rcu_boost_mutex;
+                       t->rcu_boost_mutex = NULL;
+               }
 #endif /* #ifdef CONFIG_RCU_BOOST */
-               t->rcu_blocked_node = NULL;
 
                /*
                 * If this was the last task on the current list, and if
                 * we aren't waiting on any CPUs, report the quiescent state.
-                * Note that rcu_report_unblock_qs_rnp() releases rnp->lock.
+                * Note that rcu_report_unblock_qs_rnp() releases rnp->lock,
+                * so we must take a snapshot of the expedited state.
                 */
-               if (empty)
-                       raw_spin_unlock_irqrestore(&rnp->lock, flags);
-               else
+               empty_exp_now = !rcu_preempted_readers_exp(rnp);
+               if (!empty && !rcu_preempt_blocked_readers_cgp(rnp)) {
+                       trace_rcu_quiescent_state_report("preempt_rcu",
+                                                        rnp->gpnum,
+                                                        0, rnp->qsmask,
+                                                        rnp->level,
+                                                        rnp->grplo,
+                                                        rnp->grphi,
+                                                        !!rnp->gp_tasks);
                        rcu_report_unblock_qs_rnp(rnp, flags);
+               } else
+                       raw_spin_unlock_irqrestore(&rnp->lock, flags);
 
 #ifdef CONFIG_RCU_BOOST
                /* Unboost if we were boosted. */
-               if (special & RCU_READ_UNLOCK_BOOSTED) {
-                       t->rcu_read_unlock_special &= ~RCU_READ_UNLOCK_BOOSTED;
-                       rt_mutex_unlock(t->rcu_boost_mutex);
-                       t->rcu_boost_mutex = NULL;
-               }
+               if (rbmp)
+                       rt_mutex_unlock(rbmp);
 #endif /* #ifdef CONFIG_RCU_BOOST */
 
                /*
                 * If this was the last task on the expedited lists,
                 * then we need to report up the rcu_node hierarchy.
                 */
-               if (!empty_exp && !rcu_preempted_readers_exp(rnp))
-                       rcu_report_exp_rnp(&rcu_preempt_state, rnp);
+               if (!empty_exp && empty_exp_now)
+                       rcu_report_exp_rnp(&rcu_preempt_state, rnp, true);
        } else {
                local_irq_restore(flags);
        }
 }
 
 /*
- * Tree-preemptable RCU implementation for rcu_read_unlock().
+ * Tree-preemptible RCU implementation for rcu_read_unlock().
  * Decrement ->rcu_read_lock_nesting.  If the result is zero (outermost
  * rcu_read_unlock()) and ->rcu_read_unlock_special is non-zero, then
  * invoke rcu_read_unlock_special() to clean up after a context switch
@@ -386,14 +429,23 @@ void __rcu_read_unlock(void)
 {
        struct task_struct *t = current;
 
-       barrier();  /* needed if we ever invoke rcu_read_unlock in rcutree.c */
-       --t->rcu_read_lock_nesting;
-       barrier();  /* decrement before load of ->rcu_read_unlock_special */
-       if (t->rcu_read_lock_nesting == 0 &&
-           unlikely(ACCESS_ONCE(t->rcu_read_unlock_special)))
-               rcu_read_unlock_special(t);
+       if (t->rcu_read_lock_nesting != 1)
+               --t->rcu_read_lock_nesting;
+       else {
+               barrier();  /* critical section before exit code. */
+               t->rcu_read_lock_nesting = INT_MIN;
+               barrier();  /* assign before ->rcu_read_unlock_special load */
+               if (unlikely(ACCESS_ONCE(t->rcu_read_unlock_special)))
+                       rcu_read_unlock_special(t);
+               barrier();  /* ->rcu_read_unlock_special load before assign */
+               t->rcu_read_lock_nesting = 0;
+       }
 #ifdef CONFIG_PROVE_LOCKING
-       WARN_ON_ONCE(ACCESS_ONCE(t->rcu_read_lock_nesting) < 0);
+       {
+               int rrln = ACCESS_ONCE(t->rcu_read_lock_nesting);
+
+               WARN_ON_ONCE(rrln < 0 && rrln > INT_MIN / 2);
+       }
 #endif /* #ifdef CONFIG_PROVE_LOCKING */
 }
 EXPORT_SYMBOL_GPL(__rcu_read_unlock);
@@ -440,20 +492,51 @@ static void rcu_print_detail_task_stall(struct rcu_state *rsp)
 
 #endif /* #else #ifdef CONFIG_RCU_CPU_STALL_VERBOSE */
 
+#ifdef CONFIG_RCU_CPU_STALL_INFO
+
+static void rcu_print_task_stall_begin(struct rcu_node *rnp)
+{
+       printk(KERN_ERR "\tTasks blocked on level-%d rcu_node (CPUs %d-%d):",
+              rnp->level, rnp->grplo, rnp->grphi);
+}
+
+static void rcu_print_task_stall_end(void)
+{
+       printk(KERN_CONT "\n");
+}
+
+#else /* #ifdef CONFIG_RCU_CPU_STALL_INFO */
+
+static void rcu_print_task_stall_begin(struct rcu_node *rnp)
+{
+}
+
+static void rcu_print_task_stall_end(void)
+{
+}
+
+#endif /* #else #ifdef CONFIG_RCU_CPU_STALL_INFO */
+
 /*
  * Scan the current list of tasks blocked within RCU read-side critical
  * sections, printing out the tid of each.
  */
-static void rcu_print_task_stall(struct rcu_node *rnp)
+static int rcu_print_task_stall(struct rcu_node *rnp)
 {
        struct task_struct *t;
+       int ndetected = 0;
 
        if (!rcu_preempt_blocked_readers_cgp(rnp))
-               return;
+               return 0;
+       rcu_print_task_stall_begin(rnp);
        t = list_entry(rnp->gp_tasks,
                       struct task_struct, rcu_node_entry);
-       list_for_each_entry_continue(t, &rnp->blkd_tasks, rcu_node_entry)
-               printk(" P%d", t->pid);
+       list_for_each_entry_continue(t, &rnp->blkd_tasks, rcu_node_entry) {
+               printk(KERN_CONT " P%d", t->pid);
+               ndetected++;
+       }
+       rcu_print_task_stall_end();
+       return ndetected;
 }
 
 /*
@@ -527,7 +610,7 @@ static int rcu_preempt_offline_tasks(struct rcu_state *rsp,
         * absolutely necessary, but this is a good performance/complexity
         * tradeoff.
         */
-       if (rcu_preempt_blocked_readers_cgp(rnp))
+       if (rcu_preempt_blocked_readers_cgp(rnp) && rnp->qsmask == 0)
                retval |= RCU_OFL_TASKS_NORM_GP;
        if (rcu_preempted_readers_exp(rnp))
                retval |= RCU_OFL_TASKS_EXP_GP;
@@ -564,16 +647,16 @@ static int rcu_preempt_offline_tasks(struct rcu_state *rsp,
        return retval;
 }
 
+#endif /* #ifdef CONFIG_HOTPLUG_CPU */
+
 /*
- * Do CPU-offline processing for preemptable RCU.
+ * Do CPU-offline processing for preemptible RCU.
  */
-static void rcu_preempt_offline_cpu(int cpu)
+static void rcu_preempt_cleanup_dead_cpu(int cpu)
 {
-       __rcu_offline_cpu(cpu, &rcu_preempt_state);
+       rcu_cleanup_dead_cpu(cpu, &rcu_preempt_state);
 }
 
-#endif /* #ifdef CONFIG_HOTPLUG_CPU */
-
 /*
  * Check for a quiescent state from the current CPU.  When a task blocks,
  * the task is recorded in the corresponding CPU's rcu_node structure,
@@ -589,12 +672,13 @@ static void rcu_preempt_check_callbacks(int cpu)
                rcu_preempt_qs(cpu);
                return;
        }
-       if (per_cpu(rcu_preempt_data, cpu).qs_pending)
+       if (t->rcu_read_lock_nesting > 0 &&
+           per_cpu(rcu_preempt_data, cpu).qs_pending)
                t->rcu_read_unlock_special |= RCU_READ_UNLOCK_NEED_QS;
 }
 
 /*
- * Process callbacks for preemptable RCU.
+ * Process callbacks for preemptible RCU.
  */
 static void rcu_preempt_process_callbacks(void)
 {
@@ -602,15 +686,38 @@ static void rcu_preempt_process_callbacks(void)
                                &__get_cpu_var(rcu_preempt_data));
 }
 
+#ifdef CONFIG_RCU_BOOST
+
+static void rcu_preempt_do_callbacks(void)
+{
+       rcu_do_batch(&rcu_preempt_state, &__get_cpu_var(rcu_preempt_data));
+}
+
+#endif /* #ifdef CONFIG_RCU_BOOST */
+
 /*
- * Queue a preemptable-RCU callback for invocation after a grace period.
+ * Queue a preemptible-RCU callback for invocation after a grace period.
  */
 void call_rcu(struct rcu_head *head, void (*func)(struct rcu_head *rcu))
 {
-       __call_rcu(head, func, &rcu_preempt_state);
+       __call_rcu(head, func, &rcu_preempt_state, 0);
 }
 EXPORT_SYMBOL_GPL(call_rcu);
 
+/*
+ * Queue an RCU callback for lazy invocation after a grace period.
+ * This will likely be later named something like "call_rcu_lazy()",
+ * but this change will require some way of tagging the lazy RCU
+ * callbacks in the list of pending callbacks.  Until then, this
+ * function may only be called from __kfree_rcu().
+ */
+void kfree_call_rcu(struct rcu_head *head,
+                   void (*func)(struct rcu_head *rcu))
+{
+       __call_rcu(head, func, &rcu_preempt_state, 1);
+}
+EXPORT_SYMBOL_GPL(kfree_call_rcu);
+
 /**
  * synchronize_rcu - wait until a grace period has elapsed.
  *
@@ -624,18 +731,13 @@ EXPORT_SYMBOL_GPL(call_rcu);
  */
 void synchronize_rcu(void)
 {
-       struct rcu_synchronize rcu;
-
+       rcu_lockdep_assert(!lock_is_held(&rcu_bh_lock_map) &&
+                          !lock_is_held(&rcu_lock_map) &&
+                          !lock_is_held(&rcu_sched_lock_map),
+                          "Illegal synchronize_rcu() in RCU read-side critical section");
        if (!rcu_scheduler_active)
                return;
-
-       init_rcu_head_on_stack(&rcu.head);
-       init_completion(&rcu.completion);
-       /* Will wake me after RCU finished. */
-       call_rcu(&rcu.head, wakeme_after_rcu);
-       /* Wait for it. */
-       wait_for_completion(&rcu.completion);
-       destroy_rcu_head_on_stack(&rcu.head);
+       wait_rcu_gp(call_rcu);
 }
 EXPORT_SYMBOL_GPL(synchronize_rcu);
 
@@ -677,19 +779,27 @@ static int sync_rcu_preempt_exp_done(struct rcu_node *rnp)
  * recursively up the tree.  (Calm down, calm down, we do the recursion
  * iteratively!)
  *
+ * Most callers will set the "wake" flag, but the task initiating the
+ * expedited grace period need not wake itself.
+ *
  * Caller must hold sync_rcu_preempt_exp_mutex.
  */
-static void rcu_report_exp_rnp(struct rcu_state *rsp, struct rcu_node *rnp)
+static void rcu_report_exp_rnp(struct rcu_state *rsp, struct rcu_node *rnp,
+                              bool wake)
 {
        unsigned long flags;
        unsigned long mask;
 
        raw_spin_lock_irqsave(&rnp->lock, flags);
        for (;;) {
-               if (!sync_rcu_preempt_exp_done(rnp))
+               if (!sync_rcu_preempt_exp_done(rnp)) {
+                       raw_spin_unlock_irqrestore(&rnp->lock, flags);
                        break;
+               }
                if (rnp->parent == NULL) {
-                       wake_up(&sync_rcu_preempt_exp_wq);
+                       raw_spin_unlock_irqrestore(&rnp->lock, flags);
+                       if (wake)
+                               wake_up(&sync_rcu_preempt_exp_wq);
                        break;
                }
                mask = rnp->grpmask;
@@ -698,7 +808,6 @@ static void rcu_report_exp_rnp(struct rcu_state *rsp, struct rcu_node *rnp)
                raw_spin_lock(&rnp->lock); /* irqs already disabled */
                rnp->expmask &= ~mask;
        }
-       raw_spin_unlock_irqrestore(&rnp->lock, flags);
 }
 
 /*
@@ -711,23 +820,37 @@ static void rcu_report_exp_rnp(struct rcu_state *rsp, struct rcu_node *rnp)
 static void
 sync_rcu_preempt_exp_init(struct rcu_state *rsp, struct rcu_node *rnp)
 {
+       unsigned long flags;
        int must_wait = 0;
 
-       raw_spin_lock(&rnp->lock); /* irqs already disabled */
-       if (!list_empty(&rnp->blkd_tasks)) {
+       raw_spin_lock_irqsave(&rnp->lock, flags);
+       if (list_empty(&rnp->blkd_tasks))
+               raw_spin_unlock_irqrestore(&rnp->lock, flags);
+       else {
                rnp->exp_tasks = rnp->blkd_tasks.next;
-               rcu_initiate_boost(rnp);
+               rcu_initiate_boost(rnp, flags);  /* releases rnp->lock */
                must_wait = 1;
        }
-       raw_spin_unlock(&rnp->lock); /* irqs remain disabled */
        if (!must_wait)
-               rcu_report_exp_rnp(rsp, rnp);
+               rcu_report_exp_rnp(rsp, rnp, false); /* Don't wake self. */
 }
 
-/*
- * Wait for an rcu-preempt grace period, but expedite it.  The basic idea
- * is to invoke synchronize_sched_expedited() to push all the tasks to
- * the ->blkd_tasks lists and wait for this list to drain.
+/**
+ * synchronize_rcu_expedited - Brute-force RCU grace period
+ *
+ * Wait for an RCU-preempt grace period, but expedite it.  The basic
+ * idea is to invoke synchronize_sched_expedited() to push all the tasks to
+ * the ->blkd_tasks lists and wait for this list to drain.  This consumes
+ * significant time on all CPUs and is unfriendly to real-time workloads,
+ * so is thus not recommended for any sort of common-case code.
+ * In fact, if you are using synchronize_rcu_expedited() in a loop,
+ * please restructure your code to batch your updates, and then Use a
+ * single synchronize_rcu() instead.
+ *
+ * Note that it is illegal to call this function while holding any lock
+ * that is acquired by a CPU-hotplug notifier.  And yes, it is also illegal
+ * to call this function from a CPU-hotplug notifier.  Failing to observe
+ * these restriction will result in deadlock.
  */
 void synchronize_rcu_expedited(void)
 {
@@ -795,7 +918,7 @@ mb_ret:
 EXPORT_SYMBOL_GPL(synchronize_rcu_expedited);
 
 /*
- * Check to see if there is any immediate preemptable-RCU-related work
+ * Check to see if there is any immediate preemptible-RCU-related work
  * to be done.
  */
 static int rcu_preempt_pending(int cpu)
@@ -805,9 +928,9 @@ static int rcu_preempt_pending(int cpu)
 }
 
 /*
- * Does preemptable RCU need the CPU to stay out of dynticks mode?
+ * Does preemptible RCU have callbacks on this CPU?
  */
-static int rcu_preempt_needs_cpu(int cpu)
+static int rcu_preempt_cpu_has_callbacks(int cpu)
 {
        return !!per_cpu(rcu_preempt_data, cpu).nxtlist;
 }
@@ -822,7 +945,7 @@ void rcu_barrier(void)
 EXPORT_SYMBOL_GPL(rcu_barrier);
 
 /*
- * Initialize preemptable RCU's per-CPU data.
+ * Initialize preemptible RCU's per-CPU data.
  */
 static void __cpuinit rcu_preempt_init_percpu_data(int cpu)
 {
@@ -830,15 +953,16 @@ static void __cpuinit rcu_preempt_init_percpu_data(int cpu)
 }
 
 /*
- * Move preemptable RCU's callbacks from dying CPU to other online CPU.
+ * Move preemptible RCU's callbacks from dying CPU to other online CPU
+ * and record a quiescent state.
  */
-static void rcu_preempt_send_cbs_to_online(void)
+static void rcu_preempt_cleanup_dying_cpu(void)
 {
-       rcu_send_cbs_to_online(&rcu_preempt_state);
+       rcu_cleanup_dying_cpu(&rcu_preempt_state);
 }
 
 /*
- * Initialize preemptable RCU's state structures.
+ * Initialize preemptible RCU's state structures.
  */
 static void __init __rcu_init_preempt(void)
 {
@@ -846,7 +970,7 @@ static void __init __rcu_init_preempt(void)
 }
 
 /*
- * Check for a task exiting while in a preemptable-RCU read-side
+ * Check for a task exiting while in a preemptible-RCU read-side
  * critical section, clean up if so.  No need to issue warnings,
  * as debug_check_no_locks_held() already does this if lockdep
  * is enabled.
@@ -858,7 +982,7 @@ void exit_rcu(void)
        if (t->rcu_read_lock_nesting == 0)
                return;
        t->rcu_read_lock_nesting = 1;
-       rcu_read_unlock();
+       __rcu_read_unlock();
 }
 
 #else /* #ifdef CONFIG_TREE_PREEMPT_RCU */
@@ -894,7 +1018,7 @@ void rcu_force_quiescent_state(void)
 EXPORT_SYMBOL_GPL(rcu_force_quiescent_state);
 
 /*
- * Because preemptable RCU does not exist, we never have to check for
+ * Because preemptible RCU does not exist, we never have to check for
  * CPUs being in quiescent states.
  */
 static void rcu_preempt_note_context_switch(int cpu)
@@ -902,7 +1026,7 @@ static void rcu_preempt_note_context_switch(int cpu)
 }
 
 /*
- * Because preemptable RCU does not exist, there are never any preempted
+ * Because preemptible RCU does not exist, there are never any preempted
  * RCU readers.
  */
 static int rcu_preempt_blocked_readers_cgp(struct rcu_node *rnp)
@@ -921,7 +1045,7 @@ static void rcu_report_unblock_qs_rnp(struct rcu_node *rnp, unsigned long flags)
 #endif /* #ifdef CONFIG_HOTPLUG_CPU */
 
 /*
- * Because preemptable RCU does not exist, we never have to check for
+ * Because preemptible RCU does not exist, we never have to check for
  * tasks blocked within RCU read-side critical sections.
  */
 static void rcu_print_detail_task_stall(struct rcu_state *rsp)
@@ -929,11 +1053,12 @@ static void rcu_print_detail_task_stall(struct rcu_state *rsp)
 }
 
 /*
- * Because preemptable RCU does not exist, we never have to check for
+ * Because preemptible RCU does not exist, we never have to check for
  * tasks blocked within RCU read-side critical sections.
  */
-static void rcu_print_task_stall(struct rcu_node *rnp)
+static int rcu_print_task_stall(struct rcu_node *rnp)
 {
+       return 0;
 }
 
 /*
@@ -945,7 +1070,7 @@ static void rcu_preempt_stall_reset(void)
 }
 
 /*
- * Because there is no preemptable RCU, there can be no readers blocked,
+ * Because there is no preemptible RCU, there can be no readers blocked,
  * so there is no need to check for blocked tasks.  So check only for
  * bogus qsmask values.
  */
@@ -957,7 +1082,7 @@ static void rcu_preempt_check_blocked_tasks(struct rcu_node *rnp)
 #ifdef CONFIG_HOTPLUG_CPU
 
 /*
- * Because preemptable RCU does not exist, it never needs to migrate
+ * Because preemptible RCU does not exist, it never needs to migrate
  * tasks that were blocked within RCU read-side critical sections, and
  * such non-existent tasks cannot possibly have been blocking the current
  * grace period.
@@ -969,18 +1094,18 @@ static int rcu_preempt_offline_tasks(struct rcu_state *rsp,
        return 0;
 }
 
+#endif /* #ifdef CONFIG_HOTPLUG_CPU */
+
 /*
- * Because preemptable RCU does not exist, it never needs CPU-offline
+ * Because preemptible RCU does not exist, it never needs CPU-offline
  * processing.
  */
-static void rcu_preempt_offline_cpu(int cpu)
+static void rcu_preempt_cleanup_dead_cpu(int cpu)
 {
 }
 
-#endif /* #ifdef CONFIG_HOTPLUG_CPU */
-
 /*
- * Because preemptable RCU does not exist, it never has any callbacks
+ * Because preemptible RCU does not exist, it never has any callbacks
  * to check.
  */
 static void rcu_preempt_check_callbacks(int cpu)
@@ -988,7 +1113,7 @@ static void rcu_preempt_check_callbacks(int cpu)
 }
 
 /*
- * Because preemptable RCU does not exist, it never has any callbacks
+ * Because preemptible RCU does not exist, it never has any callbacks
  * to process.
  */
 static void rcu_preempt_process_callbacks(void)
@@ -996,8 +1121,24 @@ static void rcu_preempt_process_callbacks(void)
 }
 
 /*
+ * Queue an RCU callback for lazy invocation after a grace period.
+ * This will likely be later named something like "call_rcu_lazy()",
+ * but this change will require some way of tagging the lazy RCU
+ * callbacks in the list of pending callbacks.  Until then, this
+ * function may only be called from __kfree_rcu().
+ *
+ * Because there is no preemptible RCU, we use RCU-sched instead.
+ */
+void kfree_call_rcu(struct rcu_head *head,
+                   void (*func)(struct rcu_head *rcu))
+{
+       __call_rcu(head, func, &rcu_sched_state, 1);
+}
+EXPORT_SYMBOL_GPL(kfree_call_rcu);
+
+/*
  * Wait for an rcu-preempt grace period, but make it happen quickly.
- * But because preemptable RCU does not exist, map to rcu-sched.
+ * But because preemptible RCU does not exist, map to rcu-sched.
  */
 void synchronize_rcu_expedited(void)
 {
@@ -1008,19 +1149,19 @@ EXPORT_SYMBOL_GPL(synchronize_rcu_expedited);
 #ifdef CONFIG_HOTPLUG_CPU
 
 /*
- * Because preemptable RCU does not exist, there is never any need to
+ * Because preemptible RCU does not exist, there is never any need to
  * report on tasks preempted in RCU read-side critical sections during
  * expedited RCU grace periods.
  */
-static void rcu_report_exp_rnp(struct rcu_state *rsp, struct rcu_node *rnp)
+static void rcu_report_exp_rnp(struct rcu_state *rsp, struct rcu_node *rnp,
+                              bool wake)
 {
-       return;
 }
 
 #endif /* #ifdef CONFIG_HOTPLUG_CPU */
 
 /*
- * Because preemptable RCU does not exist, it never has any work to do.
+ * Because preemptible RCU does not exist, it never has any work to do.
  */
 static int rcu_preempt_pending(int cpu)
 {
@@ -1028,15 +1169,15 @@ static int rcu_preempt_pending(int cpu)
 }
 
 /*
- * Because preemptable RCU does not exist, it never needs any CPU.
+ * Because preemptible RCU does not exist, it never has callbacks
  */
-static int rcu_preempt_needs_cpu(int cpu)
+static int rcu_preempt_cpu_has_callbacks(int cpu)
 {
        return 0;
 }
 
 /*
- * Because preemptable RCU does not exist, rcu_barrier() is just
+ * Because preemptible RCU does not exist, rcu_barrier() is just
  * another name for rcu_barrier_sched().
  */
 void rcu_barrier(void)
@@ -1046,7 +1187,7 @@ void rcu_barrier(void)
 EXPORT_SYMBOL_GPL(rcu_barrier);
 
 /*
- * Because preemptable RCU does not exist, there is no per-CPU
+ * Because preemptible RCU does not exist, there is no per-CPU
  * data to initialize.
  */
 static void __cpuinit rcu_preempt_init_percpu_data(int cpu)
@@ -1054,14 +1195,14 @@ static void __cpuinit rcu_preempt_init_percpu_data(int cpu)
 }
 
 /*
- * Because there is no preemptable RCU, there are no callbacks to move.
+ * Because there is no preemptible RCU, there is no cleanup to do.
  */
-static void rcu_preempt_send_cbs_to_online(void)
+static void rcu_preempt_cleanup_dying_cpu(void)
 {
 }
 
 /*
- * Because preemptable RCU does not exist, it need not be initialized.
+ * Because preemptible RCU does not exist, it need not be initialized.
  */
 static void __init __rcu_init_preempt(void)
 {
@@ -1163,12 +1304,12 @@ static int rcu_boost(struct rcu_node *rnp)
        t = container_of(tb, struct task_struct, rcu_node_entry);
        rt_mutex_init_proxy_locked(&mtx, t);
        t->rcu_boost_mutex = &mtx;
-       t->rcu_read_unlock_special |= RCU_READ_UNLOCK_BOOSTED;
        raw_spin_unlock_irqrestore(&rnp->lock, flags);
        rt_mutex_lock(&mtx);  /* Side effect: boosts task t's priority. */
        rt_mutex_unlock(&mtx);  /* Keep lockdep happy. */
 
-       return rnp->exp_tasks != NULL || rnp->boost_tasks != NULL;
+       return ACCESS_ONCE(rnp->exp_tasks) != NULL ||
+              ACCESS_ONCE(rnp->boost_tasks) != NULL;
 }
 
 /*
@@ -1179,12 +1320,7 @@ static int rcu_boost(struct rcu_node *rnp)
  */
 static void rcu_boost_kthread_timer(unsigned long arg)
 {
-       unsigned long flags;
-       struct rcu_node *rnp = (struct rcu_node *)arg;
-
-       raw_spin_lock_irqsave(&rnp->lock, flags);
-       invoke_rcu_node_kthread(rnp);
-       raw_spin_unlock_irqrestore(&rnp->lock, flags);
+       invoke_rcu_node_kthread((struct rcu_node *)arg);
 }
 
 /*
@@ -1197,13 +1333,12 @@ static int rcu_boost_kthread(void *arg)
        int spincnt = 0;
        int more2boost;
 
+       trace_rcu_utilization("Start boost kthread@init");
        for (;;) {
                rnp->boost_kthread_status = RCU_KTHREAD_WAITING;
-               wait_event_interruptible(rnp->boost_wq, rnp->boost_tasks ||
-                                                       rnp->exp_tasks ||
-                                                       kthread_should_stop());
-               if (kthread_should_stop())
-                       break;
+               trace_rcu_utilization("End boost kthread@rcu_wait");
+               rcu_wait(rnp->boost_tasks || rnp->exp_tasks);
+               trace_rcu_utilization("Start boost kthread@rcu_wait");
                rnp->boost_kthread_status = RCU_KTHREAD_RUNNING;
                more2boost = rcu_boost(rnp);
                if (more2boost)
@@ -1211,11 +1346,14 @@ static int rcu_boost_kthread(void *arg)
                else
                        spincnt = 0;
                if (spincnt > 10) {
+                       trace_rcu_utilization("End boost kthread@rcu_yield");
                        rcu_yield(rcu_boost_kthread_timer, (unsigned long)rnp);
+                       trace_rcu_utilization("Start boost kthread@rcu_yield");
                        spincnt = 0;
                }
        }
-       rnp->boost_kthread_status = RCU_KTHREAD_STOPPED;
+       /* NOTREACHED */
+       trace_rcu_utilization("End boost kthread@notreached");
        return 0;
 }
 
@@ -1225,14 +1363,17 @@ static int rcu_boost_kthread(void *arg)
  * kthread to start boosting them.  If there is an expedited grace
  * period in progress, it is always time to boost.
  *
- * The caller must hold rnp->lock.
+ * The caller must hold rnp->lock, which this function releases,
+ * but irqs remain disabled.  The ->boost_kthread_task is immortal,
+ * so we don't need to worry about it going away.
  */
-static void rcu_initiate_boost(struct rcu_node *rnp)
+static void rcu_initiate_boost(struct rcu_node *rnp, unsigned long flags)
 {
        struct task_struct *t;
 
        if (!rcu_preempt_blocked_readers_cgp(rnp) && rnp->exp_tasks == NULL) {
                rnp->n_balk_exp_gp_tasks++;
+               raw_spin_unlock_irqrestore(&rnp->lock, flags);
                return;
        }
        if (rnp->exp_tasks != NULL ||
@@ -1242,11 +1383,38 @@ static void rcu_initiate_boost(struct rcu_node *rnp)
             ULONG_CMP_GE(jiffies, rnp->boost_time))) {
                if (rnp->exp_tasks == NULL)
                        rnp->boost_tasks = rnp->gp_tasks;
+               raw_spin_unlock_irqrestore(&rnp->lock, flags);
                t = rnp->boost_kthread_task;
                if (t != NULL)
                        wake_up_process(t);
-       } else
+       } else {
                rcu_initiate_boost_trace(rnp);
+               raw_spin_unlock_irqrestore(&rnp->lock, flags);
+       }
+}
+
+/*
+ * Wake up the per-CPU kthread to invoke RCU callbacks.
+ */
+static void invoke_rcu_callbacks_kthread(void)
+{
+       unsigned long flags;
+
+       local_irq_save(flags);
+       __this_cpu_write(rcu_cpu_has_work, 1);
+       if (__this_cpu_read(rcu_cpu_kthread_task) != NULL &&
+           current != __this_cpu_read(rcu_cpu_kthread_task))
+               wake_up_process(__this_cpu_read(rcu_cpu_kthread_task));
+       local_irq_restore(flags);
+}
+
+/*
+ * Is the current CPU running the RCU-callbacks kthread?
+ * Caller must have preemption disabled.
+ */
+static bool rcu_is_callbacks_kthread(void)
+{
+       return __get_cpu_var(rcu_cpu_kthread_task) == current;
 }
 
 /*
@@ -1275,14 +1443,6 @@ static void rcu_preempt_boost_start_gp(struct rcu_node *rnp)
 }
 
 /*
- * Initialize the RCU-boost waitqueue.
- */
-static void __init rcu_init_boost_waitqueue(struct rcu_node *rnp)
-{
-       init_waitqueue_head(&rnp->boost_wq);
-}
-
-/*
  * Create an RCU-boost kthread for the specified node if one does not
  * already exist.  We only create this kthread for preemptible RCU.
  * Returns zero if all is well, a negated errno otherwise.
@@ -1297,199 +1457,447 @@ static int __cpuinit rcu_spawn_one_boost_kthread(struct rcu_state *rsp,
 
        if (&rcu_preempt_state != rsp)
                return 0;
+       rsp->boost = 1;
        if (rnp->boost_kthread_task != NULL)
                return 0;
        t = kthread_create(rcu_boost_kthread, (void *)rnp,
-                          "rcub%d", rnp_index);
+                          "rcub/%d", rnp_index);
        if (IS_ERR(t))
                return PTR_ERR(t);
        raw_spin_lock_irqsave(&rnp->lock, flags);
        rnp->boost_kthread_task = t;
        raw_spin_unlock_irqrestore(&rnp->lock, flags);
-       wake_up_process(t);
-       sp.sched_priority = RCU_KTHREAD_PRIO;
+       sp.sched_priority = RCU_BOOST_PRIO;
        sched_setscheduler_nocheck(t, SCHED_FIFO, &sp);
+       wake_up_process(t); /* get to TASK_INTERRUPTIBLE quickly. */
        return 0;
 }
 
 #ifdef CONFIG_HOTPLUG_CPU
 
-static void rcu_stop_boost_kthread(struct rcu_node *rnp)
+/*
+ * Stop the RCU's per-CPU kthread when its CPU goes offline,.
+ */
+static void rcu_stop_cpu_kthread(int cpu)
 {
-       unsigned long flags;
        struct task_struct *t;
 
-       raw_spin_lock_irqsave(&rnp->lock, flags);
-       t = rnp->boost_kthread_task;
-       rnp->boost_kthread_task = NULL;
-       raw_spin_unlock_irqrestore(&rnp->lock, flags);
-       if (t != NULL)
+       /* Stop the CPU's kthread. */
+       t = per_cpu(rcu_cpu_kthread_task, cpu);
+       if (t != NULL) {
+               per_cpu(rcu_cpu_kthread_task, cpu) = NULL;
                kthread_stop(t);
+       }
 }
 
 #endif /* #ifdef CONFIG_HOTPLUG_CPU */
 
-#else /* #ifdef CONFIG_RCU_BOOST */
+static void rcu_kthread_do_work(void)
+{
+       rcu_do_batch(&rcu_sched_state, &__get_cpu_var(rcu_sched_data));
+       rcu_do_batch(&rcu_bh_state, &__get_cpu_var(rcu_bh_data));
+       rcu_preempt_do_callbacks();
+}
 
-static void rcu_initiate_boost(struct rcu_node *rnp)
+/*
+ * Wake up the specified per-rcu_node-structure kthread.
+ * Because the per-rcu_node kthreads are immortal, we don't need
+ * to do anything to keep them alive.
+ */
+static void invoke_rcu_node_kthread(struct rcu_node *rnp)
 {
+       struct task_struct *t;
+
+       t = rnp->node_kthread_task;
+       if (t != NULL)
+               wake_up_process(t);
 }
 
-static void rcu_boost_kthread_setaffinity(struct rcu_node *rnp,
-                                         cpumask_var_t cm)
+/*
+ * Set the specified CPU's kthread to run RT or not, as specified by
+ * the to_rt argument.  The CPU-hotplug locks are held, so the task
+ * is not going away.
+ */
+static void rcu_cpu_kthread_setrt(int cpu, int to_rt)
 {
+       int policy;
+       struct sched_param sp;
+       struct task_struct *t;
+
+       t = per_cpu(rcu_cpu_kthread_task, cpu);
+       if (t == NULL)
+               return;
+       if (to_rt) {
+               policy = SCHED_FIFO;
+               sp.sched_priority = RCU_KTHREAD_PRIO;
+       } else {
+               policy = SCHED_NORMAL;
+               sp.sched_priority = 0;
+       }
+       sched_setscheduler_nocheck(t, policy, &sp);
 }
 
-static void rcu_preempt_boost_start_gp(struct rcu_node *rnp)
+/*
+ * Timer handler to initiate the waking up of per-CPU kthreads that
+ * have yielded the CPU due to excess numbers of RCU callbacks.
+ * We wake up the per-rcu_node kthread, which in turn will wake up
+ * the booster kthread.
+ */
+static void rcu_cpu_kthread_timer(unsigned long arg)
 {
+       struct rcu_data *rdp = per_cpu_ptr(rcu_state->rda, arg);
+       struct rcu_node *rnp = rdp->mynode;
+
+       atomic_or(rdp->grpmask, &rnp->wakemask);
+       invoke_rcu_node_kthread(rnp);
 }
 
-static void __init rcu_init_boost_waitqueue(struct rcu_node *rnp)
+/*
+ * Drop to non-real-time priority and yield, but only after posting a
+ * timer that will cause us to regain our real-time priority if we
+ * remain preempted.  Either way, we restore our real-time priority
+ * before returning.
+ */
+static void rcu_yield(void (*f)(unsigned long), unsigned long arg)
 {
+       struct sched_param sp;
+       struct timer_list yield_timer;
+       int prio = current->rt_priority;
+
+       setup_timer_on_stack(&yield_timer, f, arg);
+       mod_timer(&yield_timer, jiffies + 2);
+       sp.sched_priority = 0;
+       sched_setscheduler_nocheck(current, SCHED_NORMAL, &sp);
+       set_user_nice(current, 19);
+       schedule();
+       set_user_nice(current, 0);
+       sp.sched_priority = prio;
+       sched_setscheduler_nocheck(current, SCHED_FIFO, &sp);
+       del_timer(&yield_timer);
+}
+
+/*
+ * Handle cases where the rcu_cpu_kthread() ends up on the wrong CPU.
+ * This can happen while the corresponding CPU is either coming online
+ * or going offline.  We cannot wait until the CPU is fully online
+ * before starting the kthread, because the various notifier functions
+ * can wait for RCU grace periods.  So we park rcu_cpu_kthread() until
+ * the corresponding CPU is online.
+ *
+ * Return 1 if the kthread needs to stop, 0 otherwise.
+ *
+ * Caller must disable bh.  This function can momentarily enable it.
+ */
+static int rcu_cpu_kthread_should_stop(int cpu)
+{
+       while (cpu_is_offline(cpu) ||
+              !cpumask_equal(&current->cpus_allowed, cpumask_of(cpu)) ||
+              smp_processor_id() != cpu) {
+               if (kthread_should_stop())
+                       return 1;
+               per_cpu(rcu_cpu_kthread_status, cpu) = RCU_KTHREAD_OFFCPU;
+               per_cpu(rcu_cpu_kthread_cpu, cpu) = raw_smp_processor_id();
+               local_bh_enable();
+               schedule_timeout_uninterruptible(1);
+               if (!cpumask_equal(&current->cpus_allowed, cpumask_of(cpu)))
+                       set_cpus_allowed_ptr(current, cpumask_of(cpu));
+               local_bh_disable();
+       }
+       per_cpu(rcu_cpu_kthread_cpu, cpu) = cpu;
+       return 0;
 }
 
-static int __cpuinit rcu_spawn_one_boost_kthread(struct rcu_state *rsp,
-                                                struct rcu_node *rnp,
-                                                int rnp_index)
+/*
+ * Per-CPU kernel thread that invokes RCU callbacks.  This replaces the
+ * RCU softirq used in flavors and configurations of RCU that do not
+ * support RCU priority boosting.
+ */
+static int rcu_cpu_kthread(void *arg)
 {
+       int cpu = (int)(long)arg;
+       unsigned long flags;
+       int spincnt = 0;
+       unsigned int *statusp = &per_cpu(rcu_cpu_kthread_status, cpu);
+       char work;
+       char *workp = &per_cpu(rcu_cpu_has_work, cpu);
+
+       trace_rcu_utilization("Start CPU kthread@init");
+       for (;;) {
+               *statusp = RCU_KTHREAD_WAITING;
+               trace_rcu_utilization("End CPU kthread@rcu_wait");
+               rcu_wait(*workp != 0 || kthread_should_stop());
+               trace_rcu_utilization("Start CPU kthread@rcu_wait");
+               local_bh_disable();
+               if (rcu_cpu_kthread_should_stop(cpu)) {
+                       local_bh_enable();
+                       break;
+               }
+               *statusp = RCU_KTHREAD_RUNNING;
+               per_cpu(rcu_cpu_kthread_loops, cpu)++;
+               local_irq_save(flags);
+               work = *workp;
+               *workp = 0;
+               local_irq_restore(flags);
+               if (work)
+                       rcu_kthread_do_work();
+               local_bh_enable();
+               if (*workp != 0)
+                       spincnt++;
+               else
+                       spincnt = 0;
+               if (spincnt > 10) {
+                       *statusp = RCU_KTHREAD_YIELDING;
+                       trace_rcu_utilization("End CPU kthread@rcu_yield");
+                       rcu_yield(rcu_cpu_kthread_timer, (unsigned long)cpu);
+                       trace_rcu_utilization("Start CPU kthread@rcu_yield");
+                       spincnt = 0;
+               }
+       }
+       *statusp = RCU_KTHREAD_STOPPED;
+       trace_rcu_utilization("End CPU kthread@term");
        return 0;
 }
 
-#ifdef CONFIG_HOTPLUG_CPU
+/*
+ * Spawn a per-CPU kthread, setting up affinity and priority.
+ * Because the CPU hotplug lock is held, no other CPU will be attempting
+ * to manipulate rcu_cpu_kthread_task.  There might be another CPU
+ * attempting to access it during boot, but the locking in kthread_bind()
+ * will enforce sufficient ordering.
+ *
+ * Please note that we cannot simply refuse to wake up the per-CPU
+ * kthread because kthreads are created in TASK_UNINTERRUPTIBLE state,
+ * which can result in softlockup complaints if the task ends up being
+ * idle for more than a couple of minutes.
+ *
+ * However, please note also that we cannot bind the per-CPU kthread to its
+ * CPU until that CPU is fully online.  We also cannot wait until the
+ * CPU is fully online before we create its per-CPU kthread, as this would
+ * deadlock the system when CPU notifiers tried waiting for grace
+ * periods.  So we bind the per-CPU kthread to its CPU only if the CPU
+ * is online.  If its CPU is not yet fully online, then the code in
+ * rcu_cpu_kthread() will wait until it is fully online, and then do
+ * the binding.
+ */
+static int __cpuinit rcu_spawn_one_cpu_kthread(int cpu)
+{
+       struct sched_param sp;
+       struct task_struct *t;
+
+       if (!rcu_scheduler_fully_active ||
+           per_cpu(rcu_cpu_kthread_task, cpu) != NULL)
+               return 0;
+       t = kthread_create_on_node(rcu_cpu_kthread,
+                                  (void *)(long)cpu,
+                                  cpu_to_node(cpu),
+                                  "rcuc/%d", cpu);
+       if (IS_ERR(t))
+               return PTR_ERR(t);
+       if (cpu_online(cpu))
+               kthread_bind(t, cpu);
+       per_cpu(rcu_cpu_kthread_cpu, cpu) = cpu;
+       WARN_ON_ONCE(per_cpu(rcu_cpu_kthread_task, cpu) != NULL);
+       sp.sched_priority = RCU_KTHREAD_PRIO;
+       sched_setscheduler_nocheck(t, SCHED_FIFO, &sp);
+       per_cpu(rcu_cpu_kthread_task, cpu) = t;
+       wake_up_process(t); /* Get to TASK_INTERRUPTIBLE quickly. */
+       return 0;
+}
 
-static void rcu_stop_boost_kthread(struct rcu_node *rnp)
+/*
+ * Per-rcu_node kthread, which is in charge of waking up the per-CPU
+ * kthreads when needed.  We ignore requests to wake up kthreads
+ * for offline CPUs, which is OK because force_quiescent_state()
+ * takes care of this case.
+ */
+static int rcu_node_kthread(void *arg)
 {
+       int cpu;
+       unsigned long flags;
+       unsigned long mask;
+       struct rcu_node *rnp = (struct rcu_node *)arg;
+       struct sched_param sp;
+       struct task_struct *t;
+
+       for (;;) {
+               rnp->node_kthread_status = RCU_KTHREAD_WAITING;
+               rcu_wait(atomic_read(&rnp->wakemask) != 0);
+               rnp->node_kthread_status = RCU_KTHREAD_RUNNING;
+               raw_spin_lock_irqsave(&rnp->lock, flags);
+               mask = atomic_xchg(&rnp->wakemask, 0);
+               rcu_initiate_boost(rnp, flags); /* releases rnp->lock. */
+               for (cpu = rnp->grplo; cpu <= rnp->grphi; cpu++, mask >>= 1) {
+                       if ((mask & 0x1) == 0)
+                               continue;
+                       preempt_disable();
+                       t = per_cpu(rcu_cpu_kthread_task, cpu);
+                       if (!cpu_online(cpu) || t == NULL) {
+                               preempt_enable();
+                               continue;
+                       }
+                       per_cpu(rcu_cpu_has_work, cpu) = 1;
+                       sp.sched_priority = RCU_KTHREAD_PRIO;
+                       sched_setscheduler_nocheck(t, SCHED_FIFO, &sp);
+                       preempt_enable();
+               }
+       }
+       /* NOTREACHED */
+       rnp->node_kthread_status = RCU_KTHREAD_STOPPED;
+       return 0;
 }
 
-#endif /* #ifdef CONFIG_HOTPLUG_CPU */
+/*
+ * Set the per-rcu_node kthread's affinity to cover all CPUs that are
+ * served by the rcu_node in question.  The CPU hotplug lock is still
+ * held, so the value of rnp->qsmaskinit will be stable.
+ *
+ * We don't include outgoingcpu in the affinity set, use -1 if there is
+ * no outgoing CPU.  If there are no CPUs left in the affinity set,
+ * this function allows the kthread to execute on any CPU.
+ */
+static void rcu_node_kthread_setaffinity(struct rcu_node *rnp, int outgoingcpu)
+{
+       cpumask_var_t cm;
+       int cpu;
+       unsigned long mask = rnp->qsmaskinit;
 
-#endif /* #else #ifdef CONFIG_RCU_BOOST */
+       if (rnp->node_kthread_task == NULL)
+               return;
+       if (!alloc_cpumask_var(&cm, GFP_KERNEL))
+               return;
+       cpumask_clear(cm);
+       for (cpu = rnp->grplo; cpu <= rnp->grphi; cpu++, mask >>= 1)
+               if ((mask & 0x1) && cpu != outgoingcpu)
+                       cpumask_set_cpu(cpu, cm);
+       if (cpumask_weight(cm) == 0) {
+               cpumask_setall(cm);
+               for (cpu = rnp->grplo; cpu <= rnp->grphi; cpu++)
+                       cpumask_clear_cpu(cpu, cm);
+               WARN_ON_ONCE(cpumask_weight(cm) == 0);
+       }
+       set_cpus_allowed_ptr(rnp->node_kthread_task, cm);
+       rcu_boost_kthread_setaffinity(rnp, cm);
+       free_cpumask_var(cm);
+}
 
-#ifndef CONFIG_SMP
+/*
+ * Spawn a per-rcu_node kthread, setting priority and affinity.
+ * Called during boot before online/offline can happen, or, if
+ * during runtime, with the main CPU-hotplug locks held.  So only
+ * one of these can be executing at a time.
+ */
+static int __cpuinit rcu_spawn_one_node_kthread(struct rcu_state *rsp,
+                                               struct rcu_node *rnp)
+{
+       unsigned long flags;
+       int rnp_index = rnp - &rsp->node[0];
+       struct sched_param sp;
+       struct task_struct *t;
+
+       if (!rcu_scheduler_fully_active ||
+           rnp->qsmaskinit == 0)
+               return 0;
+       if (rnp->node_kthread_task == NULL) {
+               t = kthread_create(rcu_node_kthread, (void *)rnp,
+                                  "rcun/%d", rnp_index);
+               if (IS_ERR(t))
+                       return PTR_ERR(t);
+               raw_spin_lock_irqsave(&rnp->lock, flags);
+               rnp->node_kthread_task = t;
+               raw_spin_unlock_irqrestore(&rnp->lock, flags);
+               sp.sched_priority = 99;
+               sched_setscheduler_nocheck(t, SCHED_FIFO, &sp);
+               wake_up_process(t); /* get to TASK_INTERRUPTIBLE quickly. */
+       }
+       return rcu_spawn_one_boost_kthread(rsp, rnp, rnp_index);
+}
 
-void synchronize_sched_expedited(void)
+/*
+ * Spawn all kthreads -- called as soon as the scheduler is running.
+ */
+static int __init rcu_spawn_kthreads(void)
 {
-       cond_resched();
+       int cpu;
+       struct rcu_node *rnp;
+
+       rcu_scheduler_fully_active = 1;
+       for_each_possible_cpu(cpu) {
+               per_cpu(rcu_cpu_has_work, cpu) = 0;
+               if (cpu_online(cpu))
+                       (void)rcu_spawn_one_cpu_kthread(cpu);
+       }
+       rnp = rcu_get_root(rcu_state);
+       (void)rcu_spawn_one_node_kthread(rcu_state, rnp);
+       if (NUM_RCU_NODES > 1) {
+               rcu_for_each_leaf_node(rcu_state, rnp)
+                       (void)rcu_spawn_one_node_kthread(rcu_state, rnp);
+       }
+       return 0;
 }
-EXPORT_SYMBOL_GPL(synchronize_sched_expedited);
+early_initcall(rcu_spawn_kthreads);
+
+static void __cpuinit rcu_prepare_kthreads(int cpu)
+{
+       struct rcu_data *rdp = per_cpu_ptr(rcu_state->rda, cpu);
+       struct rcu_node *rnp = rdp->mynode;
 
-#else /* #ifndef CONFIG_SMP */
+       /* Fire up the incoming CPU's kthread and leaf rcu_node kthread. */
+       if (rcu_scheduler_fully_active) {
+               (void)rcu_spawn_one_cpu_kthread(cpu);
+               if (rnp->node_kthread_task == NULL)
+                       (void)rcu_spawn_one_node_kthread(rcu_state, rnp);
+       }
+}
 
-static atomic_t sync_sched_expedited_started = ATOMIC_INIT(0);
-static atomic_t sync_sched_expedited_done = ATOMIC_INIT(0);
+#else /* #ifdef CONFIG_RCU_BOOST */
 
-static int synchronize_sched_expedited_cpu_stop(void *data)
+static void rcu_initiate_boost(struct rcu_node *rnp, unsigned long flags)
 {
-       /*
-        * There must be a full memory barrier on each affected CPU
-        * between the time that try_stop_cpus() is called and the
-        * time that it returns.
-        *
-        * In the current initial implementation of cpu_stop, the
-        * above condition is already met when the control reaches
-        * this point and the following smp_mb() is not strictly
-        * necessary.  Do smp_mb() anyway for documentation and
-        * robustness against future implementation changes.
-        */
-       smp_mb(); /* See above comment block. */
-       return 0;
+       raw_spin_unlock_irqrestore(&rnp->lock, flags);
 }
 
-/*
- * Wait for an rcu-sched grace period to elapse, but use "big hammer"
- * approach to force grace period to end quickly.  This consumes
- * significant time on all CPUs, and is thus not recommended for
- * any sort of common-case code.
- *
- * Note that it is illegal to call this function while holding any
- * lock that is acquired by a CPU-hotplug notifier.  Failing to
- * observe this restriction will result in deadlock.
- *
- * This implementation can be thought of as an application of ticket
- * locking to RCU, with sync_sched_expedited_started and
- * sync_sched_expedited_done taking on the roles of the halves
- * of the ticket-lock word.  Each task atomically increments
- * sync_sched_expedited_started upon entry, snapshotting the old value,
- * then attempts to stop all the CPUs.  If this succeeds, then each
- * CPU will have executed a context switch, resulting in an RCU-sched
- * grace period.  We are then done, so we use atomic_cmpxchg() to
- * update sync_sched_expedited_done to match our snapshot -- but
- * only if someone else has not already advanced past our snapshot.
- *
- * On the other hand, if try_stop_cpus() fails, we check the value
- * of sync_sched_expedited_done.  If it has advanced past our
- * initial snapshot, then someone else must have forced a grace period
- * some time after we took our snapshot.  In this case, our work is
- * done for us, and we can simply return.  Otherwise, we try again,
- * but keep our initial snapshot for purposes of checking for someone
- * doing our work for us.
- *
- * If we fail too many times in a row, we fall back to synchronize_sched().
- */
-void synchronize_sched_expedited(void)
+static void invoke_rcu_callbacks_kthread(void)
 {
-       int firstsnap, s, snap, trycount = 0;
+       WARN_ON_ONCE(1);
+}
 
-       /* Note that atomic_inc_return() implies full memory barrier. */
-       firstsnap = snap = atomic_inc_return(&sync_sched_expedited_started);
-       get_online_cpus();
+static bool rcu_is_callbacks_kthread(void)
+{
+       return false;
+}
 
-       /*
-        * Each pass through the following loop attempts to force a
-        * context switch on each CPU.
-        */
-       while (try_stop_cpus(cpu_online_mask,
-                            synchronize_sched_expedited_cpu_stop,
-                            NULL) == -EAGAIN) {
-               put_online_cpus();
+static void rcu_preempt_boost_start_gp(struct rcu_node *rnp)
+{
+}
 
-               /* No joy, try again later.  Or just synchronize_sched(). */
-               if (trycount++ < 10)
-                       udelay(trycount * num_online_cpus());
-               else {
-                       synchronize_sched();
-                       return;
-               }
+#ifdef CONFIG_HOTPLUG_CPU
 
-               /* Check to see if someone else did our work for us. */
-               s = atomic_read(&sync_sched_expedited_done);
-               if (UINT_CMP_GE((unsigned)s, (unsigned)firstsnap)) {
-                       smp_mb(); /* ensure test happens before caller kfree */
-                       return;
-               }
+static void rcu_stop_cpu_kthread(int cpu)
+{
+}
 
-               /*
-                * Refetching sync_sched_expedited_started allows later
-                * callers to piggyback on our grace period.  We subtract
-                * 1 to get the same token that the last incrementer got.
-                * We retry after they started, so our grace period works
-                * for them, and they started after our first try, so their
-                * grace period works for us.
-                */
-               get_online_cpus();
-               snap = atomic_read(&sync_sched_expedited_started) - 1;
-               smp_mb(); /* ensure read is before try_stop_cpus(). */
-       }
+#endif /* #ifdef CONFIG_HOTPLUG_CPU */
 
-       /*
-        * Everyone up to our most recent fetch is covered by our grace
-        * period.  Update the counter, but only if our work is still
-        * relevant -- which it won't be if someone who started later
-        * than we did beat us to the punch.
-        */
-       do {
-               s = atomic_read(&sync_sched_expedited_done);
-               if (UINT_CMP_GE((unsigned)s, (unsigned)snap)) {
-                       smp_mb(); /* ensure test happens before caller kfree */
-                       break;
-               }
-       } while (atomic_cmpxchg(&sync_sched_expedited_done, s, snap) != s);
+static void rcu_node_kthread_setaffinity(struct rcu_node *rnp, int outgoingcpu)
+{
+}
 
-       put_online_cpus();
+static void rcu_cpu_kthread_setrt(int cpu, int to_rt)
+{
 }
-EXPORT_SYMBOL_GPL(synchronize_sched_expedited);
 
-#endif /* #else #ifndef CONFIG_SMP */
+static int __init rcu_scheduler_really_started(void)
+{
+       rcu_scheduler_fully_active = 1;
+       return 0;
+}
+early_initcall(rcu_scheduler_really_started);
+
+static void __cpuinit rcu_prepare_kthreads(int cpu)
+{
+}
+
+#endif /* #else #ifdef CONFIG_RCU_BOOST */
 
 #if !defined(CONFIG_RCU_FAST_NO_HZ)
 
@@ -1499,113 +1907,395 @@ EXPORT_SYMBOL_GPL(synchronize_sched_expedited);
  * 1 if so.  This function is part of the RCU implementation; it is -not-
  * an exported member of the RCU API.
  *
- * Because we have preemptible RCU, just check whether this CPU needs
- * any flavor of RCU.  Do not chew up lots of CPU cycles with preemption
- * disabled in a most-likely vain attempt to cause RCU not to need this CPU.
+ * Because we not have RCU_FAST_NO_HZ, just check whether this CPU needs
+ * any flavor of RCU.
  */
 int rcu_needs_cpu(int cpu)
 {
-       return rcu_needs_cpu_quick_check(cpu);
+       return rcu_cpu_has_callbacks(cpu);
+}
+
+/*
+ * Because we do not have RCU_FAST_NO_HZ, don't bother initializing for it.
+ */
+static void rcu_prepare_for_idle_init(int cpu)
+{
+}
+
+/*
+ * Because we do not have RCU_FAST_NO_HZ, don't bother cleaning up
+ * after it.
+ */
+static void rcu_cleanup_after_idle(int cpu)
+{
 }
 
 /*
- * Check to see if we need to continue a callback-flush operations to
- * allow the last CPU to enter dyntick-idle mode.  But fast dyntick-idle
- * entry is not configured, so we never do need to.
+ * Do the idle-entry grace-period work, which, because CONFIG_RCU_FAST_NO_HZ=n,
+ * is nothing.
  */
-static void rcu_needs_cpu_flush(void)
+static void rcu_prepare_for_idle(int cpu)
 {
 }
 
 #else /* #if !defined(CONFIG_RCU_FAST_NO_HZ) */
 
-#define RCU_NEEDS_CPU_FLUSHES 5
+/*
+ * This code is invoked when a CPU goes idle, at which point we want
+ * to have the CPU do everything required for RCU so that it can enter
+ * the energy-efficient dyntick-idle mode.  This is handled by a
+ * state machine implemented by rcu_prepare_for_idle() below.
+ *
+ * The following three proprocessor symbols control this state machine:
+ *
+ * RCU_IDLE_FLUSHES gives the maximum number of times that we will attempt
+ *     to satisfy RCU.  Beyond this point, it is better to incur a periodic
+ *     scheduling-clock interrupt than to loop through the state machine
+ *     at full power.
+ * RCU_IDLE_OPT_FLUSHES gives the number of RCU_IDLE_FLUSHES that are
+ *     optional if RCU does not need anything immediately from this
+ *     CPU, even if this CPU still has RCU callbacks queued.  The first
+ *     times through the state machine are mandatory: we need to give
+ *     the state machine a chance to communicate a quiescent state
+ *     to the RCU core.
+ * RCU_IDLE_GP_DELAY gives the number of jiffies that a CPU is permitted
+ *     to sleep in dyntick-idle mode with RCU callbacks pending.  This
+ *     is sized to be roughly one RCU grace period.  Those energy-efficiency
+ *     benchmarkers who might otherwise be tempted to set this to a large
+ *     number, be warned: Setting RCU_IDLE_GP_DELAY too high can hang your
+ *     system.  And if you are -that- concerned about energy efficiency,
+ *     just power the system down and be done with it!
+ * RCU_IDLE_LAZY_GP_DELAY gives the number of jiffies that a CPU is
+ *     permitted to sleep in dyntick-idle mode with only lazy RCU
+ *     callbacks pending.  Setting this too high can OOM your system.
+ *
+ * The values below work well in practice.  If future workloads require
+ * adjustment, they can be converted into kernel config parameters, though
+ * making the state machine smarter might be a better option.
+ */
+#define RCU_IDLE_FLUSHES 5             /* Number of dyntick-idle tries. */
+#define RCU_IDLE_OPT_FLUSHES 3         /* Optional dyntick-idle tries. */
+#define RCU_IDLE_GP_DELAY 6            /* Roughly one grace period. */
+#define RCU_IDLE_LAZY_GP_DELAY (6 * HZ)        /* Roughly six seconds. */
+
 static DEFINE_PER_CPU(int, rcu_dyntick_drain);
 static DEFINE_PER_CPU(unsigned long, rcu_dyntick_holdoff);
+static DEFINE_PER_CPU(struct hrtimer, rcu_idle_gp_timer);
+static ktime_t rcu_idle_gp_wait;       /* If some non-lazy callbacks. */
+static ktime_t rcu_idle_lazy_gp_wait;  /* If only lazy callbacks. */
 
 /*
- * Check to see if any future RCU-related work will need to be done
- * by the current CPU, even if none need be done immediately, returning
- * 1 if so.  This function is part of the RCU implementation; it is -not-
- * an exported member of the RCU API.
+ * Allow the CPU to enter dyntick-idle mode if either: (1) There are no
+ * callbacks on this CPU, (2) this CPU has not yet attempted to enter
+ * dyntick-idle mode, or (3) this CPU is in the process of attempting to
+ * enter dyntick-idle mode.  Otherwise, if we have recently tried and failed
+ * to enter dyntick-idle mode, we refuse to try to enter it.  After all,
+ * it is better to incur scheduling-clock interrupts than to spin
+ * continuously for the same time duration!
+ */
+int rcu_needs_cpu(int cpu)
+{
+       /* If no callbacks, RCU doesn't need the CPU. */
+       if (!rcu_cpu_has_callbacks(cpu))
+               return 0;
+       /* Otherwise, RCU needs the CPU only if it recently tried and failed. */
+       return per_cpu(rcu_dyntick_holdoff, cpu) == jiffies;
+}
+
+/*
+ * Does the specified flavor of RCU have non-lazy callbacks pending on
+ * the specified CPU?  Both RCU flavor and CPU are specified by the
+ * rcu_data structure.
+ */
+static bool __rcu_cpu_has_nonlazy_callbacks(struct rcu_data *rdp)
+{
+       return rdp->qlen != rdp->qlen_lazy;
+}
+
+#ifdef CONFIG_TREE_PREEMPT_RCU
+
+/*
+ * Are there non-lazy RCU-preempt callbacks?  (There cannot be if there
+ * is no RCU-preempt in the kernel.)
+ */
+static bool rcu_preempt_cpu_has_nonlazy_callbacks(int cpu)
+{
+       struct rcu_data *rdp = &per_cpu(rcu_preempt_data, cpu);
+
+       return __rcu_cpu_has_nonlazy_callbacks(rdp);
+}
+
+#else /* #ifdef CONFIG_TREE_PREEMPT_RCU */
+
+static bool rcu_preempt_cpu_has_nonlazy_callbacks(int cpu)
+{
+       return 0;
+}
+
+#endif /* else #ifdef CONFIG_TREE_PREEMPT_RCU */
+
+/*
+ * Does any flavor of RCU have non-lazy callbacks on the specified CPU?
+ */
+static bool rcu_cpu_has_nonlazy_callbacks(int cpu)
+{
+       return __rcu_cpu_has_nonlazy_callbacks(&per_cpu(rcu_sched_data, cpu)) ||
+              __rcu_cpu_has_nonlazy_callbacks(&per_cpu(rcu_bh_data, cpu)) ||
+              rcu_preempt_cpu_has_nonlazy_callbacks(cpu);
+}
+
+/*
+ * Timer handler used to force CPU to start pushing its remaining RCU
+ * callbacks in the case where it entered dyntick-idle mode with callbacks
+ * pending.  The hander doesn't really need to do anything because the
+ * real work is done upon re-entry to idle, or by the next scheduling-clock
+ * interrupt should idle not be re-entered.
+ */
+static enum hrtimer_restart rcu_idle_gp_timer_func(struct hrtimer *hrtp)
+{
+       trace_rcu_prep_idle("Timer");
+       return HRTIMER_NORESTART;
+}
+
+/*
+ * Initialize the timer used to pull CPUs out of dyntick-idle mode.
+ */
+static void rcu_prepare_for_idle_init(int cpu)
+{
+       static int firsttime = 1;
+       struct hrtimer *hrtp = &per_cpu(rcu_idle_gp_timer, cpu);
+
+       hrtimer_init(hrtp, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
+       hrtp->function = rcu_idle_gp_timer_func;
+       if (firsttime) {
+               unsigned int upj = jiffies_to_usecs(RCU_IDLE_GP_DELAY);
+
+               rcu_idle_gp_wait = ns_to_ktime(upj * (u64)1000);
+               upj = jiffies_to_usecs(RCU_IDLE_LAZY_GP_DELAY);
+               rcu_idle_lazy_gp_wait = ns_to_ktime(upj * (u64)1000);
+               firsttime = 0;
+       }
+}
+
+/*
+ * Clean up for exit from idle.  Because we are exiting from idle, there
+ * is no longer any point to rcu_idle_gp_timer, so cancel it.  This will
+ * do nothing if this timer is not active, so just cancel it unconditionally.
+ */
+static void rcu_cleanup_after_idle(int cpu)
+{
+       hrtimer_cancel(&per_cpu(rcu_idle_gp_timer, cpu));
+}
+
+/*
+ * Check to see if any RCU-related work can be done by the current CPU,
+ * and if so, schedule a softirq to get it done.  This function is part
+ * of the RCU implementation; it is -not- an exported member of the RCU API.
  *
- * Because we are not supporting preemptible RCU, attempt to accelerate
- * any current grace periods so that RCU no longer needs this CPU, but
- * only if all other CPUs are already in dynticks-idle mode.  This will
- * allow the CPU cores to be powered down immediately, as opposed to after
- * waiting many milliseconds for grace periods to elapse.
+ * The idea is for the current CPU to clear out all work required by the
+ * RCU core for the current grace period, so that this CPU can be permitted
+ * to enter dyntick-idle mode.  In some cases, it will need to be awakened
+ * at the end of the grace period by whatever CPU ends the grace period.
+ * This allows CPUs to go dyntick-idle more quickly, and to reduce the
+ * number of wakeups by a modest integer factor.
  *
  * Because it is not legal to invoke rcu_process_callbacks() with irqs
  * disabled, we do one pass of force_quiescent_state(), then do a
- * invoke_rcu_cpu_kthread() to cause rcu_process_callbacks() to be invoked
+ * invoke_rcu_core() to cause rcu_process_callbacks() to be invoked
  * later.  The per-cpu rcu_dyntick_drain variable controls the sequencing.
+ *
+ * The caller must have disabled interrupts.
  */
-int rcu_needs_cpu(int cpu)
+static void rcu_prepare_for_idle(int cpu)
 {
-       int c = 0;
-       int snap;
-       int thatcpu;
-
-       /* Check for being in the holdoff period. */
-       if (per_cpu(rcu_dyntick_holdoff, cpu) == jiffies)
-               return rcu_needs_cpu_quick_check(cpu);
-
-       /* Don't bother unless we are the last non-dyntick-idle CPU. */
-       for_each_online_cpu(thatcpu) {
-               if (thatcpu == cpu)
-                       continue;
-               snap = atomic_add_return(0, &per_cpu(rcu_dynticks,
-                                                    thatcpu).dynticks);
-               smp_mb(); /* Order sampling of snap with end of grace period. */
-               if ((snap & 0x1) != 0) {
-                       per_cpu(rcu_dyntick_drain, cpu) = 0;
-                       per_cpu(rcu_dyntick_holdoff, cpu) = jiffies - 1;
-                       return rcu_needs_cpu_quick_check(cpu);
-               }
+       /*
+        * If there are no callbacks on this CPU, enter dyntick-idle mode.
+        * Also reset state to avoid prejudicing later attempts.
+        */
+       if (!rcu_cpu_has_callbacks(cpu)) {
+               per_cpu(rcu_dyntick_holdoff, cpu) = jiffies - 1;
+               per_cpu(rcu_dyntick_drain, cpu) = 0;
+               trace_rcu_prep_idle("No callbacks");
+               return;
+       }
+
+       /*
+        * If in holdoff mode, just return.  We will presumably have
+        * refrained from disabling the scheduling-clock tick.
+        */
+       if (per_cpu(rcu_dyntick_holdoff, cpu) == jiffies) {
+               trace_rcu_prep_idle("In holdoff");
+               return;
        }
 
        /* Check and update the rcu_dyntick_drain sequencing. */
        if (per_cpu(rcu_dyntick_drain, cpu) <= 0) {
                /* First time through, initialize the counter. */
-               per_cpu(rcu_dyntick_drain, cpu) = RCU_NEEDS_CPU_FLUSHES;
+               per_cpu(rcu_dyntick_drain, cpu) = RCU_IDLE_FLUSHES;
+       } else if (per_cpu(rcu_dyntick_drain, cpu) <= RCU_IDLE_OPT_FLUSHES &&
+                  !rcu_pending(cpu) &&
+                  !local_softirq_pending()) {
+               /* Can we go dyntick-idle despite still having callbacks? */
+               trace_rcu_prep_idle("Dyntick with callbacks");
+               per_cpu(rcu_dyntick_drain, cpu) = 0;
+               per_cpu(rcu_dyntick_holdoff, cpu) = jiffies;
+               if (rcu_cpu_has_nonlazy_callbacks(cpu))
+                       hrtimer_start(&per_cpu(rcu_idle_gp_timer, cpu),
+                                     rcu_idle_gp_wait, HRTIMER_MODE_REL);
+               else
+                       hrtimer_start(&per_cpu(rcu_idle_gp_timer, cpu),
+                                     rcu_idle_lazy_gp_wait, HRTIMER_MODE_REL);
+               return; /* Nothing more to do immediately. */
        } else if (--per_cpu(rcu_dyntick_drain, cpu) <= 0) {
                /* We have hit the limit, so time to give up. */
                per_cpu(rcu_dyntick_holdoff, cpu) = jiffies;
-               return rcu_needs_cpu_quick_check(cpu);
+               trace_rcu_prep_idle("Begin holdoff");
+               invoke_rcu_core();  /* Force the CPU out of dyntick-idle. */
+               return;
        }
 
-       /* Do one step pushing remaining RCU callbacks through. */
+       /*
+        * Do one step of pushing the remaining RCU callbacks through
+        * the RCU core state machine.
+        */
+#ifdef CONFIG_TREE_PREEMPT_RCU
+       if (per_cpu(rcu_preempt_data, cpu).nxtlist) {
+               rcu_preempt_qs(cpu);
+               force_quiescent_state(&rcu_preempt_state, 0);
+       }
+#endif /* #ifdef CONFIG_TREE_PREEMPT_RCU */
        if (per_cpu(rcu_sched_data, cpu).nxtlist) {
                rcu_sched_qs(cpu);
                force_quiescent_state(&rcu_sched_state, 0);
-               c = c || per_cpu(rcu_sched_data, cpu).nxtlist;
        }
        if (per_cpu(rcu_bh_data, cpu).nxtlist) {
                rcu_bh_qs(cpu);
                force_quiescent_state(&rcu_bh_state, 0);
-               c = c || per_cpu(rcu_bh_data, cpu).nxtlist;
        }
 
-       /* If RCU callbacks are still pending, RCU still needs this CPU. */
-       if (c)
-               invoke_rcu_cpu_kthread();
-       return c;
+       /*
+        * If RCU callbacks are still pending, RCU still needs this CPU.
+        * So try forcing the callbacks through the grace period.
+        */
+       if (rcu_cpu_has_callbacks(cpu)) {
+               trace_rcu_prep_idle("More callbacks");
+               invoke_rcu_core();
+       } else
+               trace_rcu_prep_idle("Callbacks drained");
+}
+
+#endif /* #else #if !defined(CONFIG_RCU_FAST_NO_HZ) */
+
+#ifdef CONFIG_RCU_CPU_STALL_INFO
+
+#ifdef CONFIG_RCU_FAST_NO_HZ
+
+static void print_cpu_stall_fast_no_hz(char *cp, int cpu)
+{
+       struct hrtimer *hrtp = &per_cpu(rcu_idle_gp_timer, cpu);
+
+       sprintf(cp, "drain=%d %c timer=%lld",
+               per_cpu(rcu_dyntick_drain, cpu),
+               per_cpu(rcu_dyntick_holdoff, cpu) == jiffies ? 'H' : '.',
+               hrtimer_active(hrtp)
+                       ? ktime_to_us(hrtimer_get_remaining(hrtp))
+                       : -1);
+}
+
+#else /* #ifdef CONFIG_RCU_FAST_NO_HZ */
+
+static void print_cpu_stall_fast_no_hz(char *cp, int cpu)
+{
+}
+
+#endif /* #else #ifdef CONFIG_RCU_FAST_NO_HZ */
+
+/* Initiate the stall-info list. */
+static void print_cpu_stall_info_begin(void)
+{
+       printk(KERN_CONT "\n");
 }
 
 /*
- * Check to see if we need to continue a callback-flush operations to
- * allow the last CPU to enter dyntick-idle mode.
+ * Print out diagnostic information for the specified stalled CPU.
+ *
+ * If the specified CPU is aware of the current RCU grace period
+ * (flavor specified by rsp), then print the number of scheduling
+ * clock interrupts the CPU has taken during the time that it has
+ * been aware.  Otherwise, print the number of RCU grace periods
+ * that this CPU is ignorant of, for example, "1" if the CPU was
+ * aware of the previous grace period.
+ *
+ * Also print out idle and (if CONFIG_RCU_FAST_NO_HZ) idle-entry info.
  */
-static void rcu_needs_cpu_flush(void)
+static void print_cpu_stall_info(struct rcu_state *rsp, int cpu)
 {
-       int cpu = smp_processor_id();
-       unsigned long flags;
+       char fast_no_hz[72];
+       struct rcu_data *rdp = per_cpu_ptr(rsp->rda, cpu);
+       struct rcu_dynticks *rdtp = rdp->dynticks;
+       char *ticks_title;
+       unsigned long ticks_value;
 
-       if (per_cpu(rcu_dyntick_drain, cpu) <= 0)
-               return;
-       local_irq_save(flags);
-       (void)rcu_needs_cpu(cpu);
-       local_irq_restore(flags);
+       if (rsp->gpnum == rdp->gpnum) {
+               ticks_title = "ticks this GP";
+               ticks_value = rdp->ticks_this_gp;
+       } else {
+               ticks_title = "GPs behind";
+               ticks_value = rsp->gpnum - rdp->gpnum;
+       }
+       print_cpu_stall_fast_no_hz(fast_no_hz, cpu);
+       printk(KERN_ERR "\t%d: (%lu %s) idle=%03x/%llx/%d %s\n",
+              cpu, ticks_value, ticks_title,
+              atomic_read(&rdtp->dynticks) & 0xfff,
+              rdtp->dynticks_nesting, rdtp->dynticks_nmi_nesting,
+              fast_no_hz);
 }
 
-#endif /* #else #if !defined(CONFIG_RCU_FAST_NO_HZ) */
+/* Terminate the stall-info list. */
+static void print_cpu_stall_info_end(void)
+{
+       printk(KERN_ERR "\t");
+}
+
+/* Zero ->ticks_this_gp for all flavors of RCU. */
+static void zero_cpu_stall_ticks(struct rcu_data *rdp)
+{
+       rdp->ticks_this_gp = 0;
+}
+
+/* Increment ->ticks_this_gp for all flavors of RCU. */
+static void increment_cpu_stall_ticks(void)
+{
+       __get_cpu_var(rcu_sched_data).ticks_this_gp++;
+       __get_cpu_var(rcu_bh_data).ticks_this_gp++;
+#ifdef CONFIG_TREE_PREEMPT_RCU
+       __get_cpu_var(rcu_preempt_data).ticks_this_gp++;
+#endif /* #ifdef CONFIG_TREE_PREEMPT_RCU */
+}
+
+#else /* #ifdef CONFIG_RCU_CPU_STALL_INFO */
+
+static void print_cpu_stall_info_begin(void)
+{
+       printk(KERN_CONT " {");
+}
+
+static void print_cpu_stall_info(struct rcu_state *rsp, int cpu)
+{
+       printk(KERN_CONT " %d", cpu);
+}
+
+static void print_cpu_stall_info_end(void)
+{
+       printk(KERN_CONT "} ");
+}
+
+static void zero_cpu_stall_ticks(struct rcu_data *rdp)
+{
+}
+
+static void increment_cpu_stall_ticks(void)
+{
+}
+
+#endif /* #else #ifdef CONFIG_RCU_CPU_STALL_INFO */