smp: add func to IPI cpus based on parameter func
[linux-flexiantxendom0-3.2.10.git] / kernel / lockdep.c
index b2e08c9..ea9ee45 100644 (file)
@@ -431,6 +431,7 @@ unsigned int max_lockdep_depth;
  * about it later on, in lockdep_info().
  */
 static int lockdep_init_error;
+static const char *lock_init_error;
 static unsigned long lockdep_init_trace_data[20];
 static struct stack_trace lockdep_init_trace = {
        .max_entries = ARRAY_SIZE(lockdep_init_trace_data),
@@ -499,36 +500,32 @@ void get_usage_chars(struct lock_class *class, char usage[LOCK_USAGE_CHARS])
        usage[i] = '\0';
 }
 
-static int __print_lock_name(struct lock_class *class)
+static void __print_lock_name(struct lock_class *class)
 {
        char str[KSYM_NAME_LEN];
        const char *name;
 
        name = class->name;
-       if (!name)
-               name = __get_key_name(class->key, str);
-
-       return printk("%s", name);
-}
-
-static void print_lock_name(struct lock_class *class)
-{
-       char str[KSYM_NAME_LEN], usage[LOCK_USAGE_CHARS];
-       const char *name;
-
-       get_usage_chars(class, usage);
-
-       name = class->name;
        if (!name) {
                name = __get_key_name(class->key, str);
-               printk(" (%s", name);
+               printk("%s", name);
        } else {
-               printk(" (%s", name);
+               printk("%s", name);
                if (class->name_version > 1)
                        printk("#%d", class->name_version);
                if (class->subclass)
                        printk("/%d", class->subclass);
        }
+}
+
+static void print_lock_name(struct lock_class *class)
+{
+       char usage[LOCK_USAGE_CHARS];
+
+       get_usage_chars(class, usage);
+
+       printk(" (");
+       __print_lock_name(class);
        printk("){%s}", usage);
 }
 
@@ -568,11 +565,12 @@ static void lockdep_print_held_locks(struct task_struct *curr)
        }
 }
 
-static void print_kernel_version(void)
+static void print_kernel_ident(void)
 {
-       printk("%s %.*s\n", init_utsname()->release,
+       printk("%s %.*s %s\n", init_utsname()->release,
                (int)strcspn(init_utsname()->version, " "),
-               init_utsname()->version);
+               init_utsname()->version,
+               print_tainted());
 }
 
 static int very_verbose(struct lock_class *class)
@@ -656,6 +654,7 @@ look_up_lock_class(struct lockdep_map *lock, unsigned int subclass)
        if (unlikely(!lockdep_initialized)) {
                lockdep_init();
                lockdep_init_error = 1;
+               lock_init_error = lock->name;
                save_stack_trace(&lockdep_init_trace);
        }
 #endif
@@ -723,7 +722,7 @@ register_lock_class(struct lockdep_map *lock, unsigned int subclass, int force)
 
        class = look_up_lock_class(lock, subclass);
        if (likely(class))
-               return class;
+               goto out_set_class_cache;
 
        /*
         * Debug-check: all keys must be persistent!
@@ -808,6 +807,7 @@ out_unlock_set:
        graph_unlock();
        raw_local_irq_restore(flags);
 
+out_set_class_cache:
        if (!subclass || force)
                lock->class_cache[0] = class;
        else if (subclass < NR_LOCKDEP_CACHING_CLASSES)
@@ -1149,7 +1149,7 @@ print_circular_bug_header(struct lock_list *entry, unsigned int depth,
        printk("\n");
        printk("======================================================\n");
        printk("[ INFO: possible circular locking dependency detected ]\n");
-       print_kernel_version();
+       print_kernel_ident();
        printk("-------------------------------------------------------\n");
        printk("%s/%d is trying to acquire lock:\n",
                curr->comm, task_pid_nr(curr));
@@ -1488,7 +1488,7 @@ print_bad_irq_dependency(struct task_struct *curr,
        printk("======================================================\n");
        printk("[ INFO: %s-safe -> %s-unsafe lock order detected ]\n",
                irqclass, irqclass);
-       print_kernel_version();
+       print_kernel_ident();
        printk("------------------------------------------------------\n");
        printk("%s/%d [HC%u[%lu]:SC%u[%lu]:HE%u:SE%u] is trying to acquire:\n",
                curr->comm, task_pid_nr(curr),
@@ -1717,7 +1717,7 @@ print_deadlock_bug(struct task_struct *curr, struct held_lock *prev,
        printk("\n");
        printk("=============================================\n");
        printk("[ INFO: possible recursive locking detected ]\n");
-       print_kernel_version();
+       print_kernel_ident();
        printk("---------------------------------------------\n");
        printk("%s/%d is trying to acquire lock:\n",
                curr->comm, task_pid_nr(curr));
@@ -2224,7 +2224,7 @@ print_usage_bug(struct task_struct *curr, struct held_lock *this,
        printk("\n");
        printk("=================================\n");
        printk("[ INFO: inconsistent lock state ]\n");
-       print_kernel_version();
+       print_kernel_ident();
        printk("---------------------------------\n");
 
        printk("inconsistent {%s} -> {%s} usage.\n",
@@ -2289,7 +2289,7 @@ print_irq_inversion_bug(struct task_struct *curr,
        printk("\n");
        printk("=========================================================\n");
        printk("[ INFO: possible irq lock inversion dependency detected ]\n");
-       print_kernel_version();
+       print_kernel_ident();
        printk("---------------------------------------------------------\n");
        printk("%s/%d just changed the state of lock:\n",
                curr->comm, task_pid_nr(curr));
@@ -3175,6 +3175,7 @@ print_unlock_inbalance_bug(struct task_struct *curr, struct lockdep_map *lock,
        printk("\n");
        printk("=====================================\n");
        printk("[ BUG: bad unlock balance detected! ]\n");
+       print_kernel_ident();
        printk("-------------------------------------\n");
        printk("%s/%d is trying to release lock (",
                curr->comm, task_pid_nr(curr));
@@ -3619,6 +3620,7 @@ print_lock_contention_bug(struct task_struct *curr, struct lockdep_map *lock,
        printk("\n");
        printk("=================================\n");
        printk("[ BUG: bad contention detected! ]\n");
+       print_kernel_ident();
        printk("---------------------------------\n");
        printk("%s/%d is trying to contend lock (",
                curr->comm, task_pid_nr(curr));
@@ -3974,7 +3976,8 @@ void __init lockdep_info(void)
 
 #ifdef CONFIG_DEBUG_LOCKDEP
        if (lockdep_init_error) {
-               printk("WARNING: lockdep init error! Arch code didn't call lockdep_init() early enough?\n");
+               printk("WARNING: lockdep init error! lock-%s was acquired"
+                       "before lockdep_init\n", lock_init_error);
                printk("Call stack leading to lockdep invocation was:\n");
                print_stack_trace(&lockdep_init_trace, 0);
        }
@@ -3993,6 +3996,7 @@ print_freed_lock_bug(struct task_struct *curr, const void *mem_from,
        printk("\n");
        printk("=========================\n");
        printk("[ BUG: held lock freed! ]\n");
+       print_kernel_ident();
        printk("-------------------------\n");
        printk("%s/%d is freeing memory %p-%p, with a lock still held there!\n",
                curr->comm, task_pid_nr(curr), mem_from, mem_to-1);
@@ -4050,6 +4054,7 @@ static void print_held_locks_bug(struct task_struct *curr)
        printk("\n");
        printk("=====================================\n");
        printk("[ BUG: lock held at task exit time! ]\n");
+       print_kernel_ident();
        printk("-------------------------------------\n");
        printk("%s/%d is exiting with locks still held!\n",
                curr->comm, task_pid_nr(curr));
@@ -4147,6 +4152,7 @@ void lockdep_sys_exit(void)
                printk("\n");
                printk("================================================\n");
                printk("[ BUG: lock held when returning to user space! ]\n");
+               print_kernel_ident();
                printk("------------------------------------------------\n");
                printk("%s/%d is leaving the kernel with locks still held!\n",
                                curr->comm, curr->pid);
@@ -4166,10 +4172,39 @@ void lockdep_rcu_suspicious(const char *file, const int line, const char *s)
        printk("\n");
        printk("===============================\n");
        printk("[ INFO: suspicious RCU usage. ]\n");
+       print_kernel_ident();
        printk("-------------------------------\n");
        printk("%s:%d %s!\n", file, line, s);
        printk("\nother info that might help us debug this:\n\n");
-       printk("\nrcu_scheduler_active = %d, debug_locks = %d\n", rcu_scheduler_active, debug_locks);
+       printk("\n%srcu_scheduler_active = %d, debug_locks = %d\n",
+              !rcu_lockdep_current_cpu_online()
+                       ? "RCU used illegally from offline CPU!\n"
+                       : rcu_is_cpu_idle()
+                               ? "RCU used illegally from idle CPU!\n"
+                               : "",
+              rcu_scheduler_active, debug_locks);
+
+       /*
+        * If a CPU is in the RCU-free window in idle (ie: in the section
+        * between rcu_idle_enter() and rcu_idle_exit(), then RCU
+        * considers that CPU to be in an "extended quiescent state",
+        * which means that RCU will be completely ignoring that CPU.
+        * Therefore, rcu_read_lock() and friends have absolutely no
+        * effect on a CPU running in that state. In other words, even if
+        * such an RCU-idle CPU has called rcu_read_lock(), RCU might well
+        * delete data structures out from under it.  RCU really has no
+        * choice here: we need to keep an RCU-free window in idle where
+        * the CPU may possibly enter into low power mode. This way we can
+        * notice an extended quiescent state to other CPUs that started a grace
+        * period. Otherwise we would delay any grace period as long as we run
+        * in the idle task.
+        *
+        * So complain bitterly if someone does call rcu_read_lock(),
+        * rcu_read_lock_bh() and so on from extended quiescent states.
+        */
+       if (rcu_is_cpu_idle())
+               printk("RCU used illegally from extended quiescent state!\n");
+
        lockdep_print_held_locks(curr);
        printk("\nstack backtrace:\n");
        dump_stack();