drm/radeon: disable MSI on RV515
[linux-flexiantxendom0.git] / kernel / rcutree_plugin.h
index 14dc7dd..4b9b9f8 100644 (file)
 #include <linux/delay.h>
 #include <linux/stop_machine.h>
 
+#define RCU_KTHREAD_PRIO 1
+
+#ifdef CONFIG_RCU_BOOST
+#define RCU_BOOST_PRIO CONFIG_RCU_BOOST_PRIO
+#else
+#define RCU_BOOST_PRIO RCU_KTHREAD_PRIO
+#endif
+
 /*
  * Check the RCU kernel configuration parameters and print informative
  * messages about anything out of the ordinary.  If you like #ifdef, you
@@ -64,10 +72,11 @@ static void __init rcu_bootup_announce_oddness(void)
 
 #ifdef CONFIG_TREE_PREEMPT_RCU
 
-struct rcu_state rcu_preempt_state = RCU_STATE_INITIALIZER(rcu_preempt_state);
+struct rcu_state rcu_preempt_state = RCU_STATE_INITIALIZER(rcu_preempt);
 DEFINE_PER_CPU(struct rcu_data, rcu_preempt_data);
 static struct rcu_state *rcu_state = &rcu_preempt_state;
 
+static void rcu_read_unlock_special(struct task_struct *t);
 static int rcu_preempted_readers_exp(struct rcu_node *rnp);
 
 /*
@@ -121,9 +130,11 @@ static void rcu_preempt_qs(int cpu)
 {
        struct rcu_data *rdp = &per_cpu(rcu_preempt_data, cpu);
 
-       rdp->passed_quiesc_completed = rdp->gpnum - 1;
+       rdp->passed_quiesce_gpnum = rdp->gpnum;
        barrier();
-       rdp->passed_quiesc = 1;
+       if (rdp->passed_quiesce == 0)
+               trace_rcu_grace_period("rcu_preempt", rdp->gpnum, "cpuqs");
+       rdp->passed_quiesce = 1;
        current->rcu_read_unlock_special &= ~RCU_READ_UNLOCK_NEED_QS;
 }
 
@@ -147,7 +158,7 @@ static void rcu_preempt_note_context_switch(int cpu)
        struct rcu_data *rdp;
        struct rcu_node *rnp;
 
-       if (t->rcu_read_lock_nesting &&
+       if (t->rcu_read_lock_nesting > 0 &&
            (t->rcu_read_unlock_special & RCU_READ_UNLOCK_BLOCKED) == 0) {
 
                /* Possibly blocking in an RCU read-side critical section. */
@@ -189,7 +200,20 @@ static void rcu_preempt_note_context_switch(int cpu)
                        if (rnp->qsmask & rdp->grpmask)
                                rnp->gp_tasks = &t->rcu_node_entry;
                }
+               trace_rcu_preempt_task(rdp->rsp->name,
+                                      t->pid,
+                                      (rnp->qsmask & rdp->grpmask)
+                                      ? rnp->gpnum
+                                      : rnp->gpnum + 1);
                raw_spin_unlock_irqrestore(&rnp->lock, flags);
+       } else if (t->rcu_read_lock_nesting < 0 &&
+                  t->rcu_read_unlock_special) {
+
+               /*
+                * Complete exit from RCU read-side critical section on
+                * behalf of preempted instance of __rcu_read_unlock().
+                */
+               rcu_read_unlock_special(t);
        }
 
        /*
@@ -284,12 +308,15 @@ static struct list_head *rcu_next_node_entry(struct task_struct *t,
  * notify RCU core processing or task having blocked during the RCU
  * read-side critical section.
  */
-static void rcu_read_unlock_special(struct task_struct *t)
+static noinline void rcu_read_unlock_special(struct task_struct *t)
 {
        int empty;
        int empty_exp;
        unsigned long flags;
        struct list_head *np;
+#ifdef CONFIG_RCU_BOOST
+       struct rt_mutex *rbmp = NULL;
+#endif /* #ifdef CONFIG_RCU_BOOST */
        struct rcu_node *rnp;
        int special;
 
@@ -309,7 +336,7 @@ static void rcu_read_unlock_special(struct task_struct *t)
        }
 
        /* Hardware IRQ handlers cannot block. */
-       if (in_irq()) {
+       if (in_irq() || in_serving_softirq()) {
                local_irq_restore(flags);
                return;
        }
@@ -335,6 +362,9 @@ static void rcu_read_unlock_special(struct task_struct *t)
                smp_mb(); /* ensure expedited fastpath sees end of RCU c-s. */
                np = rcu_next_node_entry(t, rnp);
                list_del_init(&t->rcu_node_entry);
+               t->rcu_blocked_node = NULL;
+               trace_rcu_unlock_preempted_task("rcu_preempt",
+                                               rnp->gpnum, t->pid);
                if (&t->rcu_node_entry == rnp->gp_tasks)
                        rnp->gp_tasks = np;
                if (&t->rcu_node_entry == rnp->exp_tasks)
@@ -342,26 +372,34 @@ static void rcu_read_unlock_special(struct task_struct *t)
 #ifdef CONFIG_RCU_BOOST
                if (&t->rcu_node_entry == rnp->boost_tasks)
                        rnp->boost_tasks = np;
+               /* Snapshot/clear ->rcu_boost_mutex with rcu_node lock held. */
+               if (t->rcu_boost_mutex) {
+                       rbmp = t->rcu_boost_mutex;
+                       t->rcu_boost_mutex = NULL;
+               }
 #endif /* #ifdef CONFIG_RCU_BOOST */
-               t->rcu_blocked_node = NULL;
 
                /*
                 * If this was the last task on the current list, and if
                 * we aren't waiting on any CPUs, report the quiescent state.
                 * Note that rcu_report_unblock_qs_rnp() releases rnp->lock.
                 */
-               if (empty)
-                       raw_spin_unlock_irqrestore(&rnp->lock, flags);
-               else
+               if (!empty && !rcu_preempt_blocked_readers_cgp(rnp)) {
+                       trace_rcu_quiescent_state_report("preempt_rcu",
+                                                        rnp->gpnum,
+                                                        0, rnp->qsmask,
+                                                        rnp->level,
+                                                        rnp->grplo,
+                                                        rnp->grphi,
+                                                        !!rnp->gp_tasks);
                        rcu_report_unblock_qs_rnp(rnp, flags);
+               } else
+                       raw_spin_unlock_irqrestore(&rnp->lock, flags);
 
 #ifdef CONFIG_RCU_BOOST
                /* Unboost if we were boosted. */
-               if (special & RCU_READ_UNLOCK_BOOSTED) {
-                       t->rcu_read_unlock_special &= ~RCU_READ_UNLOCK_BOOSTED;
-                       rt_mutex_unlock(t->rcu_boost_mutex);
-                       t->rcu_boost_mutex = NULL;
-               }
+               if (rbmp)
+                       rt_mutex_unlock(rbmp);
 #endif /* #ifdef CONFIG_RCU_BOOST */
 
                /*
@@ -386,14 +424,23 @@ void __rcu_read_unlock(void)
 {
        struct task_struct *t = current;
 
-       barrier();  /* needed if we ever invoke rcu_read_unlock in rcutree.c */
-       --t->rcu_read_lock_nesting;
-       barrier();  /* decrement before load of ->rcu_read_unlock_special */
-       if (t->rcu_read_lock_nesting == 0 &&
-           unlikely(ACCESS_ONCE(t->rcu_read_unlock_special)))
-               rcu_read_unlock_special(t);
+       if (t->rcu_read_lock_nesting != 1)
+               --t->rcu_read_lock_nesting;
+       else {
+               barrier();  /* critical section before exit code. */
+               t->rcu_read_lock_nesting = INT_MIN;
+               barrier();  /* assign before ->rcu_read_unlock_special load */
+               if (unlikely(ACCESS_ONCE(t->rcu_read_unlock_special)))
+                       rcu_read_unlock_special(t);
+               barrier();  /* ->rcu_read_unlock_special load before assign */
+               t->rcu_read_lock_nesting = 0;
+       }
 #ifdef CONFIG_PROVE_LOCKING
-       WARN_ON_ONCE(ACCESS_ONCE(t->rcu_read_lock_nesting) < 0);
+       {
+               int rrln = ACCESS_ONCE(t->rcu_read_lock_nesting);
+
+               WARN_ON_ONCE(rrln < 0 && rrln > INT_MIN / 2);
+       }
 #endif /* #ifdef CONFIG_PROVE_LOCKING */
 }
 EXPORT_SYMBOL_GPL(__rcu_read_unlock);
@@ -444,16 +491,20 @@ static void rcu_print_detail_task_stall(struct rcu_state *rsp)
  * Scan the current list of tasks blocked within RCU read-side critical
  * sections, printing out the tid of each.
  */
-static void rcu_print_task_stall(struct rcu_node *rnp)
+static int rcu_print_task_stall(struct rcu_node *rnp)
 {
        struct task_struct *t;
+       int ndetected = 0;
 
        if (!rcu_preempt_blocked_readers_cgp(rnp))
-               return;
+               return 0;
        t = list_entry(rnp->gp_tasks,
                       struct task_struct, rcu_node_entry);
-       list_for_each_entry_continue(t, &rnp->blkd_tasks, rcu_node_entry)
+       list_for_each_entry_continue(t, &rnp->blkd_tasks, rcu_node_entry) {
                printk(" P%d", t->pid);
+               ndetected++;
+       }
+       return ndetected;
 }
 
 /*
@@ -589,7 +640,8 @@ static void rcu_preempt_check_callbacks(int cpu)
                rcu_preempt_qs(cpu);
                return;
        }
-       if (per_cpu(rcu_preempt_data, cpu).qs_pending)
+       if (t->rcu_read_lock_nesting > 0 &&
+           per_cpu(rcu_preempt_data, cpu).qs_pending)
                t->rcu_read_unlock_special |= RCU_READ_UNLOCK_NEED_QS;
 }
 
@@ -633,18 +685,9 @@ EXPORT_SYMBOL_GPL(call_rcu);
  */
 void synchronize_rcu(void)
 {
-       struct rcu_synchronize rcu;
-
        if (!rcu_scheduler_active)
                return;
-
-       init_rcu_head_on_stack(&rcu.head);
-       init_completion(&rcu.completion);
-       /* Will wake me after RCU finished. */
-       call_rcu(&rcu.head, wakeme_after_rcu);
-       /* Wait for it. */
-       wait_for_completion(&rcu.completion);
-       destroy_rcu_head_on_stack(&rcu.head);
+       wait_rcu_gp(call_rcu);
 }
 EXPORT_SYMBOL_GPL(synchronize_rcu);
 
@@ -695,9 +738,12 @@ static void rcu_report_exp_rnp(struct rcu_state *rsp, struct rcu_node *rnp)
 
        raw_spin_lock_irqsave(&rnp->lock, flags);
        for (;;) {
-               if (!sync_rcu_preempt_exp_done(rnp))
+               if (!sync_rcu_preempt_exp_done(rnp)) {
+                       raw_spin_unlock_irqrestore(&rnp->lock, flags);
                        break;
+               }
                if (rnp->parent == NULL) {
+                       raw_spin_unlock_irqrestore(&rnp->lock, flags);
                        wake_up(&sync_rcu_preempt_exp_wq);
                        break;
                }
@@ -707,7 +753,6 @@ static void rcu_report_exp_rnp(struct rcu_state *rsp, struct rcu_node *rnp)
                raw_spin_lock(&rnp->lock); /* irqs already disabled */
                rnp->expmask &= ~mask;
        }
-       raw_spin_unlock_irqrestore(&rnp->lock, flags);
 }
 
 /*
@@ -943,8 +988,9 @@ static void rcu_print_detail_task_stall(struct rcu_state *rsp)
  * Because preemptible RCU does not exist, we never have to check for
  * tasks blocked within RCU read-side critical sections.
  */
-static void rcu_print_task_stall(struct rcu_node *rnp)
+static int rcu_print_task_stall(struct rcu_node *rnp)
 {
+       return 0;
 }
 
 /*
@@ -1111,6 +1157,8 @@ static void rcu_initiate_boost_trace(struct rcu_node *rnp)
 
 #endif /* #else #ifdef CONFIG_RCU_TRACE */
 
+static struct lock_class_key rcu_boost_class;
+
 /*
  * Carry out RCU priority boosting on the task indicated by ->exp_tasks
  * or ->boost_tasks, advancing the pointer to the next task in the
@@ -1173,8 +1221,10 @@ static int rcu_boost(struct rcu_node *rnp)
         */
        t = container_of(tb, struct task_struct, rcu_node_entry);
        rt_mutex_init_proxy_locked(&mtx, t);
+       /* Avoid lockdep false positives.  This rt_mutex is its own thing. */
+       lockdep_set_class_and_name(&mtx.wait_lock, &rcu_boost_class,
+                                  "rcu_boost_mutex");
        t->rcu_boost_mutex = &mtx;
-       t->rcu_read_unlock_special |= RCU_READ_UNLOCK_BOOSTED;
        raw_spin_unlock_irqrestore(&rnp->lock, flags);
        rt_mutex_lock(&mtx);  /* Side effect: boosts task t's priority. */
        rt_mutex_unlock(&mtx);  /* Keep lockdep happy. */
@@ -1203,9 +1253,12 @@ static int rcu_boost_kthread(void *arg)
        int spincnt = 0;
        int more2boost;
 
+       trace_rcu_utilization("Start boost kthread@init");
        for (;;) {
                rnp->boost_kthread_status = RCU_KTHREAD_WAITING;
+               trace_rcu_utilization("End boost kthread@rcu_wait");
                rcu_wait(rnp->boost_tasks || rnp->exp_tasks);
+               trace_rcu_utilization("Start boost kthread@rcu_wait");
                rnp->boost_kthread_status = RCU_KTHREAD_RUNNING;
                more2boost = rcu_boost(rnp);
                if (more2boost)
@@ -1213,11 +1266,14 @@ static int rcu_boost_kthread(void *arg)
                else
                        spincnt = 0;
                if (spincnt > 10) {
+                       trace_rcu_utilization("End boost kthread@rcu_yield");
                        rcu_yield(rcu_boost_kthread_timer, (unsigned long)rnp);
+                       trace_rcu_utilization("Start boost kthread@rcu_yield");
                        spincnt = 0;
                }
        }
        /* NOTREACHED */
+       trace_rcu_utilization("End boost kthread@notreached");
        return 0;
 }
 
@@ -1266,11 +1322,9 @@ static void invoke_rcu_callbacks_kthread(void)
 
        local_irq_save(flags);
        __this_cpu_write(rcu_cpu_has_work, 1);
-       if (__this_cpu_read(rcu_cpu_kthread_task) == NULL) {
-               local_irq_restore(flags);
-               return;
-       }
-       wake_up_process(__this_cpu_read(rcu_cpu_kthread_task));
+       if (__this_cpu_read(rcu_cpu_kthread_task) != NULL &&
+           current != __this_cpu_read(rcu_cpu_kthread_task))
+               wake_up_process(__this_cpu_read(rcu_cpu_kthread_task));
        local_irq_restore(flags);
 }
 
@@ -1318,13 +1372,13 @@ static int __cpuinit rcu_spawn_one_boost_kthread(struct rcu_state *rsp,
        if (rnp->boost_kthread_task != NULL)
                return 0;
        t = kthread_create(rcu_boost_kthread, (void *)rnp,
-                          "rcub%d", rnp_index);
+                          "rcub/%d", rnp_index);
        if (IS_ERR(t))
                return PTR_ERR(t);
        raw_spin_lock_irqsave(&rnp->lock, flags);
        rnp->boost_kthread_task = t;
        raw_spin_unlock_irqrestore(&rnp->lock, flags);
-       sp.sched_priority = RCU_KTHREAD_PRIO;
+       sp.sched_priority = RCU_BOOST_PRIO;
        sched_setscheduler_nocheck(t, SCHED_FIFO, &sp);
        wake_up_process(t); /* get to TASK_INTERRUPTIBLE quickly. */
        return 0;
@@ -1419,6 +1473,7 @@ static void rcu_yield(void (*f)(unsigned long), unsigned long arg)
 {
        struct sched_param sp;
        struct timer_list yield_timer;
+       int prio = current->rt_priority;
 
        setup_timer_on_stack(&yield_timer, f, arg);
        mod_timer(&yield_timer, jiffies + 2);
@@ -1426,7 +1481,8 @@ static void rcu_yield(void (*f)(unsigned long), unsigned long arg)
        sched_setscheduler_nocheck(current, SCHED_NORMAL, &sp);
        set_user_nice(current, 19);
        schedule();
-       sp.sched_priority = RCU_KTHREAD_PRIO;
+       set_user_nice(current, 0);
+       sp.sched_priority = prio;
        sched_setscheduler_nocheck(current, SCHED_FIFO, &sp);
        del_timer(&yield_timer);
 }
@@ -1464,7 +1520,8 @@ static int rcu_cpu_kthread_should_stop(int cpu)
 
 /*
  * Per-CPU kernel thread that invokes RCU callbacks.  This replaces the
- * earlier RCU softirq.
+ * RCU softirq used in flavors and configurations of RCU that do not
+ * support RCU priority boosting.
  */
 static int rcu_cpu_kthread(void *arg)
 {
@@ -1475,9 +1532,12 @@ static int rcu_cpu_kthread(void *arg)
        char work;
        char *workp = &per_cpu(rcu_cpu_has_work, cpu);
 
+       trace_rcu_utilization("Start CPU kthread@init");
        for (;;) {
                *statusp = RCU_KTHREAD_WAITING;
+               trace_rcu_utilization("End CPU kthread@rcu_wait");
                rcu_wait(*workp != 0 || kthread_should_stop());
+               trace_rcu_utilization("Start CPU kthread@rcu_wait");
                local_bh_disable();
                if (rcu_cpu_kthread_should_stop(cpu)) {
                        local_bh_enable();
@@ -1498,11 +1558,14 @@ static int rcu_cpu_kthread(void *arg)
                        spincnt = 0;
                if (spincnt > 10) {
                        *statusp = RCU_KTHREAD_YIELDING;
+                       trace_rcu_utilization("End CPU kthread@rcu_yield");
                        rcu_yield(rcu_cpu_kthread_timer, (unsigned long)cpu);
+                       trace_rcu_utilization("Start CPU kthread@rcu_yield");
                        spincnt = 0;
                }
        }
        *statusp = RCU_KTHREAD_STOPPED;
+       trace_rcu_utilization("End CPU kthread@term");
        return 0;
 }
 
@@ -1532,10 +1595,13 @@ static int __cpuinit rcu_spawn_one_cpu_kthread(int cpu)
        struct sched_param sp;
        struct task_struct *t;
 
-       if (!rcu_kthreads_spawnable ||
+       if (!rcu_scheduler_fully_active ||
            per_cpu(rcu_cpu_kthread_task, cpu) != NULL)
                return 0;
-       t = kthread_create(rcu_cpu_kthread, (void *)(long)cpu, "rcuc%d", cpu);
+       t = kthread_create_on_node(rcu_cpu_kthread,
+                                  (void *)(long)cpu,
+                                  cpu_to_node(cpu),
+                                  "rcuc/%d", cpu);
        if (IS_ERR(t))
                return PTR_ERR(t);
        if (cpu_online(cpu))
@@ -1639,12 +1705,12 @@ static int __cpuinit rcu_spawn_one_node_kthread(struct rcu_state *rsp,
        struct sched_param sp;
        struct task_struct *t;
 
-       if (!rcu_kthreads_spawnable ||
+       if (!rcu_scheduler_fully_active ||
            rnp->qsmaskinit == 0)
                return 0;
        if (rnp->node_kthread_task == NULL) {
                t = kthread_create(rcu_node_kthread, (void *)rnp,
-                                  "rcun%d", rnp_index);
+                                  "rcun/%d", rnp_index);
                if (IS_ERR(t))
                        return PTR_ERR(t);
                raw_spin_lock_irqsave(&rnp->lock, flags);
@@ -1665,7 +1731,7 @@ static int __init rcu_spawn_kthreads(void)
        int cpu;
        struct rcu_node *rnp;
 
-       rcu_kthreads_spawnable = 1;
+       rcu_scheduler_fully_active = 1;
        for_each_possible_cpu(cpu) {
                per_cpu(rcu_cpu_has_work, cpu) = 0;
                if (cpu_online(cpu))
@@ -1687,7 +1753,7 @@ static void __cpuinit rcu_prepare_kthreads(int cpu)
        struct rcu_node *rnp = rdp->mynode;
 
        /* Fire up the incoming CPU's kthread and leaf rcu_node kthread. */
-       if (rcu_kthreads_spawnable) {
+       if (rcu_scheduler_fully_active) {
                (void)rcu_spawn_one_cpu_kthread(cpu);
                if (rnp->node_kthread_task == NULL)
                        (void)rcu_spawn_one_node_kthread(rcu_state, rnp);
@@ -1726,6 +1792,13 @@ static void rcu_cpu_kthread_setrt(int cpu, int to_rt)
 {
 }
 
+static int __init rcu_scheduler_really_started(void)
+{
+       rcu_scheduler_fully_active = 1;
+       return 0;
+}
+early_initcall(rcu_scheduler_really_started);
+
 static void __cpuinit rcu_prepare_kthreads(int cpu)
 {
 }
@@ -1875,15 +1948,6 @@ int rcu_needs_cpu(int cpu)
        return rcu_needs_cpu_quick_check(cpu);
 }
 
-/*
- * Check to see if we need to continue a callback-flush operations to
- * allow the last CPU to enter dyntick-idle mode.  But fast dyntick-idle
- * entry is not configured, so we never do need to.
- */
-static void rcu_needs_cpu_flush(void)
-{
-}
-
 #else /* #if !defined(CONFIG_RCU_FAST_NO_HZ) */
 
 #define RCU_NEEDS_CPU_FLUSHES 5
@@ -1959,20 +2023,4 @@ int rcu_needs_cpu(int cpu)
        return c;
 }
 
-/*
- * Check to see if we need to continue a callback-flush operations to
- * allow the last CPU to enter dyntick-idle mode.
- */
-static void rcu_needs_cpu_flush(void)
-{
-       int cpu = smp_processor_id();
-       unsigned long flags;
-
-       if (per_cpu(rcu_dyntick_drain, cpu) <= 0)
-               return;
-       local_irq_save(flags);
-       (void)rcu_needs_cpu(cpu);
-       local_irq_restore(flags);
-}
-
 #endif /* #else #if !defined(CONFIG_RCU_FAST_NO_HZ) */