nohz: Fix stale jiffies update in tick_nohz_restart()
[linux-flexiantxendom0-3.2.10.git] / kernel / rcutree_plugin.h
index 98ce17c..c023464 100644 (file)
@@ -610,7 +610,7 @@ static int rcu_preempt_offline_tasks(struct rcu_state *rsp,
         * absolutely necessary, but this is a good performance/complexity
         * tradeoff.
         */
-       if (rcu_preempt_blocked_readers_cgp(rnp))
+       if (rcu_preempt_blocked_readers_cgp(rnp) && rnp->qsmask == 0)
                retval |= RCU_OFL_TASKS_NORM_GP;
        if (rcu_preempted_readers_exp(rnp))
                retval |= RCU_OFL_TASKS_EXP_GP;
@@ -835,10 +835,22 @@ sync_rcu_preempt_exp_init(struct rcu_state *rsp, struct rcu_node *rnp)
                rcu_report_exp_rnp(rsp, rnp, false); /* Don't wake self. */
 }
 
-/*
- * Wait for an rcu-preempt grace period, but expedite it.  The basic idea
- * is to invoke synchronize_sched_expedited() to push all the tasks to
- * the ->blkd_tasks lists and wait for this list to drain.
+/**
+ * synchronize_rcu_expedited - Brute-force RCU grace period
+ *
+ * Wait for an RCU-preempt grace period, but expedite it.  The basic
+ * idea is to invoke synchronize_sched_expedited() to push all the tasks to
+ * the ->blkd_tasks lists and wait for this list to drain.  This consumes
+ * significant time on all CPUs and is unfriendly to real-time workloads,
+ * so is thus not recommended for any sort of common-case code.
+ * In fact, if you are using synchronize_rcu_expedited() in a loop,
+ * please restructure your code to batch your updates, and then Use a
+ * single synchronize_rcu() instead.
+ *
+ * Note that it is illegal to call this function while holding any lock
+ * that is acquired by a CPU-hotplug notifier.  And yes, it is also illegal
+ * to call this function from a CPU-hotplug notifier.  Failing to observe
+ * these restriction will result in deadlock.
  */
 void synchronize_rcu_expedited(void)
 {
@@ -2096,10 +2108,6 @@ static void rcu_cleanup_after_idle(int cpu)
  */
 static void rcu_prepare_for_idle(int cpu)
 {
-       unsigned long flags;
-
-       local_irq_save(flags);
-
        /*
         * If there are no callbacks on this CPU, enter dyntick-idle mode.
         * Also reset state to avoid prejudicing later attempts.
@@ -2107,7 +2115,6 @@ static void rcu_prepare_for_idle(int cpu)
        if (!rcu_cpu_has_callbacks(cpu)) {
                per_cpu(rcu_dyntick_holdoff, cpu) = jiffies - 1;
                per_cpu(rcu_dyntick_drain, cpu) = 0;
-               local_irq_restore(flags);
                trace_rcu_prep_idle("No callbacks");
                return;
        }
@@ -2117,7 +2124,6 @@ static void rcu_prepare_for_idle(int cpu)
         * refrained from disabling the scheduling-clock tick.
         */
        if (per_cpu(rcu_dyntick_holdoff, cpu) == jiffies) {
-               local_irq_restore(flags);
                trace_rcu_prep_idle("In holdoff");
                return;
        }
@@ -2127,11 +2133,12 @@ static void rcu_prepare_for_idle(int cpu)
                /* First time through, initialize the counter. */
                per_cpu(rcu_dyntick_drain, cpu) = RCU_IDLE_FLUSHES;
        } else if (per_cpu(rcu_dyntick_drain, cpu) <= RCU_IDLE_OPT_FLUSHES &&
-                  !rcu_pending(cpu)) {
+                  !rcu_pending(cpu) &&
+                  !local_softirq_pending()) {
                /* Can we go dyntick-idle despite still having callbacks? */
                trace_rcu_prep_idle("Dyntick with callbacks");
                per_cpu(rcu_dyntick_drain, cpu) = 0;
-               per_cpu(rcu_dyntick_holdoff, cpu) = jiffies - 1;
+               per_cpu(rcu_dyntick_holdoff, cpu) = jiffies;
                if (rcu_cpu_has_nonlazy_callbacks(cpu))
                        hrtimer_start(&per_cpu(rcu_idle_gp_timer, cpu),
                                      rcu_idle_gp_wait, HRTIMER_MODE_REL);
@@ -2142,7 +2149,6 @@ static void rcu_prepare_for_idle(int cpu)
        } else if (--per_cpu(rcu_dyntick_drain, cpu) <= 0) {
                /* We have hit the limit, so time to give up. */
                per_cpu(rcu_dyntick_holdoff, cpu) = jiffies;
-               local_irq_restore(flags);
                trace_rcu_prep_idle("Begin holdoff");
                invoke_rcu_core();  /* Force the CPU out of dyntick-idle. */
                return;
@@ -2154,23 +2160,17 @@ static void rcu_prepare_for_idle(int cpu)
         */
 #ifdef CONFIG_TREE_PREEMPT_RCU
        if (per_cpu(rcu_preempt_data, cpu).nxtlist) {
-               local_irq_restore(flags);
                rcu_preempt_qs(cpu);
                force_quiescent_state(&rcu_preempt_state, 0);
-               local_irq_save(flags);
        }
 #endif /* #ifdef CONFIG_TREE_PREEMPT_RCU */
        if (per_cpu(rcu_sched_data, cpu).nxtlist) {
-               local_irq_restore(flags);
                rcu_sched_qs(cpu);
                force_quiescent_state(&rcu_sched_state, 0);
-               local_irq_save(flags);
        }
        if (per_cpu(rcu_bh_data, cpu).nxtlist) {
-               local_irq_restore(flags);
                rcu_bh_qs(cpu);
                force_quiescent_state(&rcu_bh_state, 0);
-               local_irq_save(flags);
        }
 
        /*
@@ -2178,13 +2178,10 @@ static void rcu_prepare_for_idle(int cpu)
         * So try forcing the callbacks through the grace period.
         */
        if (rcu_cpu_has_callbacks(cpu)) {
-               local_irq_restore(flags);
                trace_rcu_prep_idle("More callbacks");
                invoke_rcu_core();
-       } else {
-               local_irq_restore(flags);
+       } else
                trace_rcu_prep_idle("Callbacks drained");
-       }
 }
 
 #endif /* #else #if !defined(CONFIG_RCU_FAST_NO_HZ) */