Merge branch 'sched/arch' into sched/urgent
[linux-flexiantxendom0-3.2.10.git] / kernel / rcutree_plugin.h
index 3a0ae03..8bb35d7 100644 (file)
 #include <linux/delay.h>
 #include <linux/stop_machine.h>
 
+#define RCU_KTHREAD_PRIO 1
+
+#ifdef CONFIG_RCU_BOOST
+#define RCU_BOOST_PRIO CONFIG_RCU_BOOST_PRIO
+#else
+#define RCU_BOOST_PRIO RCU_KTHREAD_PRIO
+#endif
+
 /*
  * Check the RCU kernel configuration parameters and print informative
  * messages about anything out of the ordinary.  If you like #ifdef, you
@@ -64,10 +72,11 @@ static void __init rcu_bootup_announce_oddness(void)
 
 #ifdef CONFIG_TREE_PREEMPT_RCU
 
-struct rcu_state rcu_preempt_state = RCU_STATE_INITIALIZER(rcu_preempt_state);
+struct rcu_state rcu_preempt_state = RCU_STATE_INITIALIZER(rcu_preempt);
 DEFINE_PER_CPU(struct rcu_data, rcu_preempt_data);
 static struct rcu_state *rcu_state = &rcu_preempt_state;
 
+static void rcu_read_unlock_special(struct task_struct *t);
 static int rcu_preempted_readers_exp(struct rcu_node *rnp);
 
 /*
@@ -121,9 +130,11 @@ static void rcu_preempt_qs(int cpu)
 {
        struct rcu_data *rdp = &per_cpu(rcu_preempt_data, cpu);
 
-       rdp->passed_quiesc_completed = rdp->gpnum - 1;
+       rdp->passed_quiesce_gpnum = rdp->gpnum;
        barrier();
-       rdp->passed_quiesc = 1;
+       if (rdp->passed_quiesce == 0)
+               trace_rcu_grace_period("rcu_preempt", rdp->gpnum, "cpuqs");
+       rdp->passed_quiesce = 1;
        current->rcu_read_unlock_special &= ~RCU_READ_UNLOCK_NEED_QS;
 }
 
@@ -147,7 +158,7 @@ static void rcu_preempt_note_context_switch(int cpu)
        struct rcu_data *rdp;
        struct rcu_node *rnp;
 
-       if (t->rcu_read_lock_nesting &&
+       if (t->rcu_read_lock_nesting > 0 &&
            (t->rcu_read_unlock_special & RCU_READ_UNLOCK_BLOCKED) == 0) {
 
                /* Possibly blocking in an RCU read-side critical section. */
@@ -189,7 +200,20 @@ static void rcu_preempt_note_context_switch(int cpu)
                        if (rnp->qsmask & rdp->grpmask)
                                rnp->gp_tasks = &t->rcu_node_entry;
                }
+               trace_rcu_preempt_task(rdp->rsp->name,
+                                      t->pid,
+                                      (rnp->qsmask & rdp->grpmask)
+                                      ? rnp->gpnum
+                                      : rnp->gpnum + 1);
                raw_spin_unlock_irqrestore(&rnp->lock, flags);
+       } else if (t->rcu_read_lock_nesting < 0 &&
+                  t->rcu_read_unlock_special) {
+
+               /*
+                * Complete exit from RCU read-side critical section on
+                * behalf of preempted instance of __rcu_read_unlock().
+                */
+               rcu_read_unlock_special(t);
        }
 
        /*
@@ -284,12 +308,16 @@ static struct list_head *rcu_next_node_entry(struct task_struct *t,
  * notify RCU core processing or task having blocked during the RCU
  * read-side critical section.
  */
-static void rcu_read_unlock_special(struct task_struct *t)
+static noinline void rcu_read_unlock_special(struct task_struct *t)
 {
        int empty;
        int empty_exp;
+       int empty_exp_now;
        unsigned long flags;
        struct list_head *np;
+#ifdef CONFIG_RCU_BOOST
+       struct rt_mutex *rbmp = NULL;
+#endif /* #ifdef CONFIG_RCU_BOOST */
        struct rcu_node *rnp;
        int special;
 
@@ -309,7 +337,7 @@ static void rcu_read_unlock_special(struct task_struct *t)
        }
 
        /* Hardware IRQ handlers cannot block. */
-       if (in_irq()) {
+       if (in_irq() || in_serving_softirq()) {
                local_irq_restore(flags);
                return;
        }
@@ -335,6 +363,9 @@ static void rcu_read_unlock_special(struct task_struct *t)
                smp_mb(); /* ensure expedited fastpath sees end of RCU c-s. */
                np = rcu_next_node_entry(t, rnp);
                list_del_init(&t->rcu_node_entry);
+               t->rcu_blocked_node = NULL;
+               trace_rcu_unlock_preempted_task("rcu_preempt",
+                                               rnp->gpnum, t->pid);
                if (&t->rcu_node_entry == rnp->gp_tasks)
                        rnp->gp_tasks = np;
                if (&t->rcu_node_entry == rnp->exp_tasks)
@@ -342,38 +373,44 @@ static void rcu_read_unlock_special(struct task_struct *t)
 #ifdef CONFIG_RCU_BOOST
                if (&t->rcu_node_entry == rnp->boost_tasks)
                        rnp->boost_tasks = np;
-               /* Snapshot and clear ->rcu_boosted with rcu_node lock held. */
-               if (t->rcu_boosted) {
-                       special |= RCU_READ_UNLOCK_BOOSTED;
-                       t->rcu_boosted = 0;
+               /* Snapshot/clear ->rcu_boost_mutex with rcu_node lock held. */
+               if (t->rcu_boost_mutex) {
+                       rbmp = t->rcu_boost_mutex;
+                       t->rcu_boost_mutex = NULL;
                }
 #endif /* #ifdef CONFIG_RCU_BOOST */
-               t->rcu_blocked_node = NULL;
 
                /*
                 * If this was the last task on the current list, and if
                 * we aren't waiting on any CPUs, report the quiescent state.
-                * Note that rcu_report_unblock_qs_rnp() releases rnp->lock.
+                * Note that rcu_report_unblock_qs_rnp() releases rnp->lock,
+                * so we must take a snapshot of the expedited state.
                 */
-               if (empty)
-                       raw_spin_unlock_irqrestore(&rnp->lock, flags);
-               else
+               empty_exp_now = !rcu_preempted_readers_exp(rnp);
+               if (!empty && !rcu_preempt_blocked_readers_cgp(rnp)) {
+                       trace_rcu_quiescent_state_report("preempt_rcu",
+                                                        rnp->gpnum,
+                                                        0, rnp->qsmask,
+                                                        rnp->level,
+                                                        rnp->grplo,
+                                                        rnp->grphi,
+                                                        !!rnp->gp_tasks);
                        rcu_report_unblock_qs_rnp(rnp, flags);
+               } else
+                       raw_spin_unlock_irqrestore(&rnp->lock, flags);
 
 #ifdef CONFIG_RCU_BOOST
                /* Unboost if we were boosted. */
-               if (special & RCU_READ_UNLOCK_BOOSTED) {
-                       rt_mutex_unlock(t->rcu_boost_mutex);
-                       t->rcu_boost_mutex = NULL;
-               }
+               if (rbmp)
+                       rt_mutex_unlock(rbmp);
 #endif /* #ifdef CONFIG_RCU_BOOST */
 
                /*
                 * If this was the last task on the expedited lists,
                 * then we need to report up the rcu_node hierarchy.
                 */
-               if (!empty_exp && !rcu_preempted_readers_exp(rnp))
-                       rcu_report_exp_rnp(&rcu_preempt_state, rnp);
+               if (!empty_exp && empty_exp_now)
+                       rcu_report_exp_rnp(&rcu_preempt_state, rnp, true);
        } else {
                local_irq_restore(flags);
        }
@@ -390,14 +427,23 @@ void __rcu_read_unlock(void)
 {
        struct task_struct *t = current;
 
-       barrier();  /* needed if we ever invoke rcu_read_unlock in rcutree.c */
-       --t->rcu_read_lock_nesting;
-       barrier();  /* decrement before load of ->rcu_read_unlock_special */
-       if (t->rcu_read_lock_nesting == 0 &&
-           unlikely(ACCESS_ONCE(t->rcu_read_unlock_special)))
-               rcu_read_unlock_special(t);
+       if (t->rcu_read_lock_nesting != 1)
+               --t->rcu_read_lock_nesting;
+       else {
+               barrier();  /* critical section before exit code. */
+               t->rcu_read_lock_nesting = INT_MIN;
+               barrier();  /* assign before ->rcu_read_unlock_special load */
+               if (unlikely(ACCESS_ONCE(t->rcu_read_unlock_special)))
+                       rcu_read_unlock_special(t);
+               barrier();  /* ->rcu_read_unlock_special load before assign */
+               t->rcu_read_lock_nesting = 0;
+       }
 #ifdef CONFIG_PROVE_LOCKING
-       WARN_ON_ONCE(ACCESS_ONCE(t->rcu_read_lock_nesting) < 0);
+       {
+               int rrln = ACCESS_ONCE(t->rcu_read_lock_nesting);
+
+               WARN_ON_ONCE(rrln < 0 && rrln > INT_MIN / 2);
+       }
 #endif /* #ifdef CONFIG_PROVE_LOCKING */
 }
 EXPORT_SYMBOL_GPL(__rcu_read_unlock);
@@ -448,16 +494,20 @@ static void rcu_print_detail_task_stall(struct rcu_state *rsp)
  * Scan the current list of tasks blocked within RCU read-side critical
  * sections, printing out the tid of each.
  */
-static void rcu_print_task_stall(struct rcu_node *rnp)
+static int rcu_print_task_stall(struct rcu_node *rnp)
 {
        struct task_struct *t;
+       int ndetected = 0;
 
        if (!rcu_preempt_blocked_readers_cgp(rnp))
-               return;
+               return 0;
        t = list_entry(rnp->gp_tasks,
                       struct task_struct, rcu_node_entry);
-       list_for_each_entry_continue(t, &rnp->blkd_tasks, rcu_node_entry)
+       list_for_each_entry_continue(t, &rnp->blkd_tasks, rcu_node_entry) {
                printk(" P%d", t->pid);
+               ndetected++;
+       }
+       return ndetected;
 }
 
 /*
@@ -593,7 +643,8 @@ static void rcu_preempt_check_callbacks(int cpu)
                rcu_preempt_qs(cpu);
                return;
        }
-       if (per_cpu(rcu_preempt_data, cpu).qs_pending)
+       if (t->rcu_read_lock_nesting > 0 &&
+           per_cpu(rcu_preempt_data, cpu).qs_pending)
                t->rcu_read_unlock_special |= RCU_READ_UNLOCK_NEED_QS;
 }
 
@@ -637,18 +688,9 @@ EXPORT_SYMBOL_GPL(call_rcu);
  */
 void synchronize_rcu(void)
 {
-       struct rcu_synchronize rcu;
-
        if (!rcu_scheduler_active)
                return;
-
-       init_rcu_head_on_stack(&rcu.head);
-       init_completion(&rcu.completion);
-       /* Will wake me after RCU finished. */
-       call_rcu(&rcu.head, wakeme_after_rcu);
-       /* Wait for it. */
-       wait_for_completion(&rcu.completion);
-       destroy_rcu_head_on_stack(&rcu.head);
+       wait_rcu_gp(call_rcu);
 }
 EXPORT_SYMBOL_GPL(synchronize_rcu);
 
@@ -690,9 +732,13 @@ static int sync_rcu_preempt_exp_done(struct rcu_node *rnp)
  * recursively up the tree.  (Calm down, calm down, we do the recursion
  * iteratively!)
  *
+ * Most callers will set the "wake" flag, but the task initiating the
+ * expedited grace period need not wake itself.
+ *
  * Caller must hold sync_rcu_preempt_exp_mutex.
  */
-static void rcu_report_exp_rnp(struct rcu_state *rsp, struct rcu_node *rnp)
+static void rcu_report_exp_rnp(struct rcu_state *rsp, struct rcu_node *rnp,
+                              bool wake)
 {
        unsigned long flags;
        unsigned long mask;
@@ -705,7 +751,8 @@ static void rcu_report_exp_rnp(struct rcu_state *rsp, struct rcu_node *rnp)
                }
                if (rnp->parent == NULL) {
                        raw_spin_unlock_irqrestore(&rnp->lock, flags);
-                       wake_up(&sync_rcu_preempt_exp_wq);
+                       if (wake)
+                               wake_up(&sync_rcu_preempt_exp_wq);
                        break;
                }
                mask = rnp->grpmask;
@@ -738,7 +785,7 @@ sync_rcu_preempt_exp_init(struct rcu_state *rsp, struct rcu_node *rnp)
                must_wait = 1;
        }
        if (!must_wait)
-               rcu_report_exp_rnp(rsp, rnp);
+               rcu_report_exp_rnp(rsp, rnp, false); /* Don't wake self. */
 }
 
 /*
@@ -949,8 +996,9 @@ static void rcu_print_detail_task_stall(struct rcu_state *rsp)
  * Because preemptible RCU does not exist, we never have to check for
  * tasks blocked within RCU read-side critical sections.
  */
-static void rcu_print_task_stall(struct rcu_node *rnp)
+static int rcu_print_task_stall(struct rcu_node *rnp)
 {
+       return 0;
 }
 
 /*
@@ -1029,9 +1077,9 @@ EXPORT_SYMBOL_GPL(synchronize_rcu_expedited);
  * report on tasks preempted in RCU read-side critical sections during
  * expedited RCU grace periods.
  */
-static void rcu_report_exp_rnp(struct rcu_state *rsp, struct rcu_node *rnp)
+static void rcu_report_exp_rnp(struct rcu_state *rsp, struct rcu_node *rnp,
+                              bool wake)
 {
-       return;
 }
 
 #endif /* #ifdef CONFIG_HOTPLUG_CPU */
@@ -1180,12 +1228,12 @@ static int rcu_boost(struct rcu_node *rnp)
        t = container_of(tb, struct task_struct, rcu_node_entry);
        rt_mutex_init_proxy_locked(&mtx, t);
        t->rcu_boost_mutex = &mtx;
-       t->rcu_boosted = 1;
        raw_spin_unlock_irqrestore(&rnp->lock, flags);
        rt_mutex_lock(&mtx);  /* Side effect: boosts task t's priority. */
        rt_mutex_unlock(&mtx);  /* Keep lockdep happy. */
 
-       return rnp->exp_tasks != NULL || rnp->boost_tasks != NULL;
+       return ACCESS_ONCE(rnp->exp_tasks) != NULL ||
+              ACCESS_ONCE(rnp->boost_tasks) != NULL;
 }
 
 /*
@@ -1209,9 +1257,12 @@ static int rcu_boost_kthread(void *arg)
        int spincnt = 0;
        int more2boost;
 
+       trace_rcu_utilization("Start boost kthread@init");
        for (;;) {
                rnp->boost_kthread_status = RCU_KTHREAD_WAITING;
+               trace_rcu_utilization("End boost kthread@rcu_wait");
                rcu_wait(rnp->boost_tasks || rnp->exp_tasks);
+               trace_rcu_utilization("Start boost kthread@rcu_wait");
                rnp->boost_kthread_status = RCU_KTHREAD_RUNNING;
                more2boost = rcu_boost(rnp);
                if (more2boost)
@@ -1219,11 +1270,14 @@ static int rcu_boost_kthread(void *arg)
                else
                        spincnt = 0;
                if (spincnt > 10) {
+                       trace_rcu_utilization("End boost kthread@rcu_yield");
                        rcu_yield(rcu_boost_kthread_timer, (unsigned long)rnp);
+                       trace_rcu_utilization("Start boost kthread@rcu_yield");
                        spincnt = 0;
                }
        }
        /* NOTREACHED */
+       trace_rcu_utilization("End boost kthread@notreached");
        return 0;
 }
 
@@ -1272,15 +1326,22 @@ static void invoke_rcu_callbacks_kthread(void)
 
        local_irq_save(flags);
        __this_cpu_write(rcu_cpu_has_work, 1);
-       if (__this_cpu_read(rcu_cpu_kthread_task) == NULL) {
-               local_irq_restore(flags);
-               return;
-       }
-       wake_up_process(__this_cpu_read(rcu_cpu_kthread_task));
+       if (__this_cpu_read(rcu_cpu_kthread_task) != NULL &&
+           current != __this_cpu_read(rcu_cpu_kthread_task))
+               wake_up_process(__this_cpu_read(rcu_cpu_kthread_task));
        local_irq_restore(flags);
 }
 
 /*
+ * Is the current CPU running the RCU-callbacks kthread?
+ * Caller must have preemption disabled.
+ */
+static bool rcu_is_callbacks_kthread(void)
+{
+       return __get_cpu_var(rcu_cpu_kthread_task) == current;
+}
+
+/*
  * Set the affinity of the boost kthread.  The CPU-hotplug locks are
  * held, so no one should be messing with the existence of the boost
  * kthread.
@@ -1324,13 +1385,13 @@ static int __cpuinit rcu_spawn_one_boost_kthread(struct rcu_state *rsp,
        if (rnp->boost_kthread_task != NULL)
                return 0;
        t = kthread_create(rcu_boost_kthread, (void *)rnp,
-                          "rcub%d", rnp_index);
+                          "rcub/%d", rnp_index);
        if (IS_ERR(t))
                return PTR_ERR(t);
        raw_spin_lock_irqsave(&rnp->lock, flags);
        rnp->boost_kthread_task = t;
        raw_spin_unlock_irqrestore(&rnp->lock, flags);
-       sp.sched_priority = RCU_KTHREAD_PRIO;
+       sp.sched_priority = RCU_BOOST_PRIO;
        sched_setscheduler_nocheck(t, SCHED_FIFO, &sp);
        wake_up_process(t); /* get to TASK_INTERRUPTIBLE quickly. */
        return 0;
@@ -1425,6 +1486,7 @@ static void rcu_yield(void (*f)(unsigned long), unsigned long arg)
 {
        struct sched_param sp;
        struct timer_list yield_timer;
+       int prio = current->rt_priority;
 
        setup_timer_on_stack(&yield_timer, f, arg);
        mod_timer(&yield_timer, jiffies + 2);
@@ -1432,7 +1494,8 @@ static void rcu_yield(void (*f)(unsigned long), unsigned long arg)
        sched_setscheduler_nocheck(current, SCHED_NORMAL, &sp);
        set_user_nice(current, 19);
        schedule();
-       sp.sched_priority = RCU_KTHREAD_PRIO;
+       set_user_nice(current, 0);
+       sp.sched_priority = prio;
        sched_setscheduler_nocheck(current, SCHED_FIFO, &sp);
        del_timer(&yield_timer);
 }
@@ -1470,7 +1533,8 @@ static int rcu_cpu_kthread_should_stop(int cpu)
 
 /*
  * Per-CPU kernel thread that invokes RCU callbacks.  This replaces the
- * earlier RCU softirq.
+ * RCU softirq used in flavors and configurations of RCU that do not
+ * support RCU priority boosting.
  */
 static int rcu_cpu_kthread(void *arg)
 {
@@ -1481,9 +1545,12 @@ static int rcu_cpu_kthread(void *arg)
        char work;
        char *workp = &per_cpu(rcu_cpu_has_work, cpu);
 
+       trace_rcu_utilization("Start CPU kthread@init");
        for (;;) {
                *statusp = RCU_KTHREAD_WAITING;
+               trace_rcu_utilization("End CPU kthread@rcu_wait");
                rcu_wait(*workp != 0 || kthread_should_stop());
+               trace_rcu_utilization("Start CPU kthread@rcu_wait");
                local_bh_disable();
                if (rcu_cpu_kthread_should_stop(cpu)) {
                        local_bh_enable();
@@ -1504,11 +1571,14 @@ static int rcu_cpu_kthread(void *arg)
                        spincnt = 0;
                if (spincnt > 10) {
                        *statusp = RCU_KTHREAD_YIELDING;
+                       trace_rcu_utilization("End CPU kthread@rcu_yield");
                        rcu_yield(rcu_cpu_kthread_timer, (unsigned long)cpu);
+                       trace_rcu_utilization("Start CPU kthread@rcu_yield");
                        spincnt = 0;
                }
        }
        *statusp = RCU_KTHREAD_STOPPED;
+       trace_rcu_utilization("End CPU kthread@term");
        return 0;
 }
 
@@ -1541,7 +1611,10 @@ static int __cpuinit rcu_spawn_one_cpu_kthread(int cpu)
        if (!rcu_scheduler_fully_active ||
            per_cpu(rcu_cpu_kthread_task, cpu) != NULL)
                return 0;
-       t = kthread_create(rcu_cpu_kthread, (void *)(long)cpu, "rcuc%d", cpu);
+       t = kthread_create_on_node(rcu_cpu_kthread,
+                                  (void *)(long)cpu,
+                                  cpu_to_node(cpu),
+                                  "rcuc/%d", cpu);
        if (IS_ERR(t))
                return PTR_ERR(t);
        if (cpu_online(cpu))
@@ -1650,7 +1723,7 @@ static int __cpuinit rcu_spawn_one_node_kthread(struct rcu_state *rsp,
                return 0;
        if (rnp->node_kthread_task == NULL) {
                t = kthread_create(rcu_node_kthread, (void *)rnp,
-                                  "rcun%d", rnp_index);
+                                  "rcun/%d", rnp_index);
                if (IS_ERR(t))
                        return PTR_ERR(t);
                raw_spin_lock_irqsave(&rnp->lock, flags);
@@ -1712,6 +1785,11 @@ static void invoke_rcu_callbacks_kthread(void)
        WARN_ON_ONCE(1);
 }
 
+static bool rcu_is_callbacks_kthread(void)
+{
+       return false;
+}
+
 static void rcu_preempt_boost_start_gp(struct rcu_node *rnp)
 {
 }
@@ -1847,7 +1925,7 @@ void synchronize_sched_expedited(void)
                 * grace period works for us.
                 */
                get_online_cpus();
-               snap = atomic_read(&sync_sched_expedited_started) - 1;
+               snap = atomic_read(&sync_sched_expedited_started);
                smp_mb(); /* ensure read is before try_stop_cpus(). */
        }
 
@@ -1879,113 +1957,243 @@ EXPORT_SYMBOL_GPL(synchronize_sched_expedited);
  * 1 if so.  This function is part of the RCU implementation; it is -not-
  * an exported member of the RCU API.
  *
- * Because we have preemptible RCU, just check whether this CPU needs
- * any flavor of RCU.  Do not chew up lots of CPU cycles with preemption
- * disabled in a most-likely vain attempt to cause RCU not to need this CPU.
+ * Because we not have RCU_FAST_NO_HZ, just check whether this CPU needs
+ * any flavor of RCU.
  */
 int rcu_needs_cpu(int cpu)
 {
-       return rcu_needs_cpu_quick_check(cpu);
+       return rcu_cpu_has_callbacks(cpu);
+}
+
+/*
+ * Because we do not have RCU_FAST_NO_HZ, don't bother initializing for it.
+ */
+static void rcu_prepare_for_idle_init(int cpu)
+{
+}
+
+/*
+ * Because we do not have RCU_FAST_NO_HZ, don't bother cleaning up
+ * after it.
+ */
+static void rcu_cleanup_after_idle(int cpu)
+{
 }
 
 /*
- * Check to see if we need to continue a callback-flush operations to
- * allow the last CPU to enter dyntick-idle mode.  But fast dyntick-idle
- * entry is not configured, so we never do need to.
+ * Do the idle-entry grace-period work, which, because CONFIG_RCU_FAST_NO_HZ=y,
+ * is nothing.
  */
-static void rcu_needs_cpu_flush(void)
+static void rcu_prepare_for_idle(int cpu)
 {
 }
 
 #else /* #if !defined(CONFIG_RCU_FAST_NO_HZ) */
 
-#define RCU_NEEDS_CPU_FLUSHES 5
+/*
+ * This code is invoked when a CPU goes idle, at which point we want
+ * to have the CPU do everything required for RCU so that it can enter
+ * the energy-efficient dyntick-idle mode.  This is handled by a
+ * state machine implemented by rcu_prepare_for_idle() below.
+ *
+ * The following three proprocessor symbols control this state machine:
+ *
+ * RCU_IDLE_FLUSHES gives the maximum number of times that we will attempt
+ *     to satisfy RCU.  Beyond this point, it is better to incur a periodic
+ *     scheduling-clock interrupt than to loop through the state machine
+ *     at full power.
+ * RCU_IDLE_OPT_FLUSHES gives the number of RCU_IDLE_FLUSHES that are
+ *     optional if RCU does not need anything immediately from this
+ *     CPU, even if this CPU still has RCU callbacks queued.  The first
+ *     times through the state machine are mandatory: we need to give
+ *     the state machine a chance to communicate a quiescent state
+ *     to the RCU core.
+ * RCU_IDLE_GP_DELAY gives the number of jiffies that a CPU is permitted
+ *     to sleep in dyntick-idle mode with RCU callbacks pending.  This
+ *     is sized to be roughly one RCU grace period.  Those energy-efficiency
+ *     benchmarkers who might otherwise be tempted to set this to a large
+ *     number, be warned: Setting RCU_IDLE_GP_DELAY too high can hang your
+ *     system.  And if you are -that- concerned about energy efficiency,
+ *     just power the system down and be done with it!
+ *
+ * The values below work well in practice.  If future workloads require
+ * adjustment, they can be converted into kernel config parameters, though
+ * making the state machine smarter might be a better option.
+ */
+#define RCU_IDLE_FLUSHES 5             /* Number of dyntick-idle tries. */
+#define RCU_IDLE_OPT_FLUSHES 3         /* Optional dyntick-idle tries. */
+#define RCU_IDLE_GP_DELAY 6            /* Roughly one grace period. */
+
 static DEFINE_PER_CPU(int, rcu_dyntick_drain);
 static DEFINE_PER_CPU(unsigned long, rcu_dyntick_holdoff);
+static DEFINE_PER_CPU(struct hrtimer, rcu_idle_gp_timer);
+static ktime_t rcu_idle_gp_wait;
 
 /*
- * Check to see if any future RCU-related work will need to be done
- * by the current CPU, even if none need be done immediately, returning
- * 1 if so.  This function is part of the RCU implementation; it is -not-
- * an exported member of the RCU API.
+ * Allow the CPU to enter dyntick-idle mode if either: (1) There are no
+ * callbacks on this CPU, (2) this CPU has not yet attempted to enter
+ * dyntick-idle mode, or (3) this CPU is in the process of attempting to
+ * enter dyntick-idle mode.  Otherwise, if we have recently tried and failed
+ * to enter dyntick-idle mode, we refuse to try to enter it.  After all,
+ * it is better to incur scheduling-clock interrupts than to spin
+ * continuously for the same time duration!
+ */
+int rcu_needs_cpu(int cpu)
+{
+       /* If no callbacks, RCU doesn't need the CPU. */
+       if (!rcu_cpu_has_callbacks(cpu))
+               return 0;
+       /* Otherwise, RCU needs the CPU only if it recently tried and failed. */
+       return per_cpu(rcu_dyntick_holdoff, cpu) == jiffies;
+}
+
+/*
+ * Timer handler used to force CPU to start pushing its remaining RCU
+ * callbacks in the case where it entered dyntick-idle mode with callbacks
+ * pending.  The hander doesn't really need to do anything because the
+ * real work is done upon re-entry to idle, or by the next scheduling-clock
+ * interrupt should idle not be re-entered.
+ */
+static enum hrtimer_restart rcu_idle_gp_timer_func(struct hrtimer *hrtp)
+{
+       trace_rcu_prep_idle("Timer");
+       return HRTIMER_NORESTART;
+}
+
+/*
+ * Initialize the timer used to pull CPUs out of dyntick-idle mode.
+ */
+static void rcu_prepare_for_idle_init(int cpu)
+{
+       static int firsttime = 1;
+       struct hrtimer *hrtp = &per_cpu(rcu_idle_gp_timer, cpu);
+
+       hrtimer_init(hrtp, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
+       hrtp->function = rcu_idle_gp_timer_func;
+       if (firsttime) {
+               unsigned int upj = jiffies_to_usecs(RCU_IDLE_GP_DELAY);
+
+               rcu_idle_gp_wait = ns_to_ktime(upj * (u64)1000);
+               firsttime = 0;
+       }
+}
+
+/*
+ * Clean up for exit from idle.  Because we are exiting from idle, there
+ * is no longer any point to rcu_idle_gp_timer, so cancel it.  This will
+ * do nothing if this timer is not active, so just cancel it unconditionally.
+ */
+static void rcu_cleanup_after_idle(int cpu)
+{
+       hrtimer_cancel(&per_cpu(rcu_idle_gp_timer, cpu));
+}
+
+/*
+ * Check to see if any RCU-related work can be done by the current CPU,
+ * and if so, schedule a softirq to get it done.  This function is part
+ * of the RCU implementation; it is -not- an exported member of the RCU API.
  *
- * Because we are not supporting preemptible RCU, attempt to accelerate
- * any current grace periods so that RCU no longer needs this CPU, but
- * only if all other CPUs are already in dynticks-idle mode.  This will
- * allow the CPU cores to be powered down immediately, as opposed to after
- * waiting many milliseconds for grace periods to elapse.
+ * The idea is for the current CPU to clear out all work required by the
+ * RCU core for the current grace period, so that this CPU can be permitted
+ * to enter dyntick-idle mode.  In some cases, it will need to be awakened
+ * at the end of the grace period by whatever CPU ends the grace period.
+ * This allows CPUs to go dyntick-idle more quickly, and to reduce the
+ * number of wakeups by a modest integer factor.
  *
  * Because it is not legal to invoke rcu_process_callbacks() with irqs
  * disabled, we do one pass of force_quiescent_state(), then do a
  * invoke_rcu_core() to cause rcu_process_callbacks() to be invoked
  * later.  The per-cpu rcu_dyntick_drain variable controls the sequencing.
+ *
+ * The caller must have disabled interrupts.
  */
-int rcu_needs_cpu(int cpu)
+static void rcu_prepare_for_idle(int cpu)
 {
-       int c = 0;
-       int snap;
-       int thatcpu;
-
-       /* Check for being in the holdoff period. */
-       if (per_cpu(rcu_dyntick_holdoff, cpu) == jiffies)
-               return rcu_needs_cpu_quick_check(cpu);
-
-       /* Don't bother unless we are the last non-dyntick-idle CPU. */
-       for_each_online_cpu(thatcpu) {
-               if (thatcpu == cpu)
-                       continue;
-               snap = atomic_add_return(0, &per_cpu(rcu_dynticks,
-                                                    thatcpu).dynticks);
-               smp_mb(); /* Order sampling of snap with end of grace period. */
-               if ((snap & 0x1) != 0) {
-                       per_cpu(rcu_dyntick_drain, cpu) = 0;
-                       per_cpu(rcu_dyntick_holdoff, cpu) = jiffies - 1;
-                       return rcu_needs_cpu_quick_check(cpu);
-               }
+       unsigned long flags;
+
+       local_irq_save(flags);
+
+       /*
+        * If there are no callbacks on this CPU, enter dyntick-idle mode.
+        * Also reset state to avoid prejudicing later attempts.
+        */
+       if (!rcu_cpu_has_callbacks(cpu)) {
+               per_cpu(rcu_dyntick_holdoff, cpu) = jiffies - 1;
+               per_cpu(rcu_dyntick_drain, cpu) = 0;
+               local_irq_restore(flags);
+               trace_rcu_prep_idle("No callbacks");
+               return;
+       }
+
+       /*
+        * If in holdoff mode, just return.  We will presumably have
+        * refrained from disabling the scheduling-clock tick.
+        */
+       if (per_cpu(rcu_dyntick_holdoff, cpu) == jiffies) {
+               local_irq_restore(flags);
+               trace_rcu_prep_idle("In holdoff");
+               return;
        }
 
        /* Check and update the rcu_dyntick_drain sequencing. */
        if (per_cpu(rcu_dyntick_drain, cpu) <= 0) {
                /* First time through, initialize the counter. */
-               per_cpu(rcu_dyntick_drain, cpu) = RCU_NEEDS_CPU_FLUSHES;
+               per_cpu(rcu_dyntick_drain, cpu) = RCU_IDLE_FLUSHES;
+       } else if (per_cpu(rcu_dyntick_drain, cpu) <= RCU_IDLE_OPT_FLUSHES &&
+                  !rcu_pending(cpu)) {
+               /* Can we go dyntick-idle despite still having callbacks? */
+               trace_rcu_prep_idle("Dyntick with callbacks");
+               per_cpu(rcu_dyntick_drain, cpu) = 0;
+               per_cpu(rcu_dyntick_holdoff, cpu) = jiffies - 1;
+               hrtimer_start(&per_cpu(rcu_idle_gp_timer, cpu),
+                             rcu_idle_gp_wait, HRTIMER_MODE_REL);
+               return; /* Nothing more to do immediately. */
        } else if (--per_cpu(rcu_dyntick_drain, cpu) <= 0) {
                /* We have hit the limit, so time to give up. */
                per_cpu(rcu_dyntick_holdoff, cpu) = jiffies;
-               return rcu_needs_cpu_quick_check(cpu);
+               local_irq_restore(flags);
+               trace_rcu_prep_idle("Begin holdoff");
+               invoke_rcu_core();  /* Force the CPU out of dyntick-idle. */
+               return;
        }
 
-       /* Do one step pushing remaining RCU callbacks through. */
+       /*
+        * Do one step of pushing the remaining RCU callbacks through
+        * the RCU core state machine.
+        */
+#ifdef CONFIG_TREE_PREEMPT_RCU
+       if (per_cpu(rcu_preempt_data, cpu).nxtlist) {
+               local_irq_restore(flags);
+               rcu_preempt_qs(cpu);
+               force_quiescent_state(&rcu_preempt_state, 0);
+               local_irq_save(flags);
+       }
+#endif /* #ifdef CONFIG_TREE_PREEMPT_RCU */
        if (per_cpu(rcu_sched_data, cpu).nxtlist) {
+               local_irq_restore(flags);
                rcu_sched_qs(cpu);
                force_quiescent_state(&rcu_sched_state, 0);
-               c = c || per_cpu(rcu_sched_data, cpu).nxtlist;
+               local_irq_save(flags);
        }
        if (per_cpu(rcu_bh_data, cpu).nxtlist) {
+               local_irq_restore(flags);
                rcu_bh_qs(cpu);
                force_quiescent_state(&rcu_bh_state, 0);
-               c = c || per_cpu(rcu_bh_data, cpu).nxtlist;
+               local_irq_save(flags);
        }
 
-       /* If RCU callbacks are still pending, RCU still needs this CPU. */
-       if (c)
+       /*
+        * If RCU callbacks are still pending, RCU still needs this CPU.
+        * So try forcing the callbacks through the grace period.
+        */
+       if (rcu_cpu_has_callbacks(cpu)) {
+               local_irq_restore(flags);
+               trace_rcu_prep_idle("More callbacks");
                invoke_rcu_core();
-       return c;
-}
-
-/*
- * Check to see if we need to continue a callback-flush operations to
- * allow the last CPU to enter dyntick-idle mode.
- */
-static void rcu_needs_cpu_flush(void)
-{
-       int cpu = smp_processor_id();
-       unsigned long flags;
-
-       if (per_cpu(rcu_dyntick_drain, cpu) <= 0)
-               return;
-       local_irq_save(flags);
-       (void)rcu_needs_cpu(cpu);
-       local_irq_restore(flags);
+       } else {
+               local_irq_restore(flags);
+               trace_rcu_prep_idle("Callbacks drained");
+       }
 }
 
 #endif /* #else #if !defined(CONFIG_RCU_FAST_NO_HZ) */