UBUNTU: Ubuntu-2.6.38-12.51
[linux-flexiantxendom0-natty.git] / kernel / lockdep.c
index 78325f8..0d2058d 100644 (file)
@@ -146,7 +146,7 @@ static DEFINE_PER_CPU(struct lock_class_stats[MAX_LOCKDEP_KEYS],
 
 static inline u64 lockstat_clock(void)
 {
-       return cpu_clock(smp_processor_id());
+       return local_clock();
 }
 
 static int lock_point(unsigned long points[], unsigned long ip)
@@ -639,6 +639,16 @@ look_up_lock_class(struct lockdep_map *lock, unsigned int subclass)
        }
 #endif
 
+       if (unlikely(subclass >= MAX_LOCKDEP_SUBCLASSES)) {
+               debug_locks_off();
+               printk(KERN_ERR
+                       "BUG: looking up invalid subclass: %u\n", subclass);
+               printk(KERN_ERR
+                       "turning off the locking correctness validator.\n");
+               dump_stack();
+               return NULL;
+       }
+
        /*
         * Static locks do not have their class-keys yet - for them the key
         * is the lock object itself:
@@ -774,7 +784,9 @@ out_unlock_set:
        raw_local_irq_restore(flags);
 
        if (!subclass || force)
-               lock->class_cache = class;
+               lock->class_cache[0] = class;
+       else if (subclass < NR_LOCKDEP_CACHING_CLASSES)
+               lock->class_cache[subclass] = class;
 
        if (DEBUG_LOCKS_WARN_ON(class->subclass != subclass))
                return NULL;
@@ -805,7 +817,8 @@ static struct lock_list *alloc_list_entry(void)
  * Add a new dependency to the head of the list:
  */
 static int add_lock_to_list(struct lock_class *class, struct lock_class *this,
-                           struct list_head *head, unsigned long ip, int distance)
+                           struct list_head *head, unsigned long ip,
+                           int distance, struct stack_trace *trace)
 {
        struct lock_list *entry;
        /*
@@ -816,11 +829,9 @@ static int add_lock_to_list(struct lock_class *class, struct lock_class *this,
        if (!entry)
                return 0;
 
-       if (!save_trace(&entry->trace))
-               return 0;
-
        entry->class = this;
        entry->distance = distance;
+       entry->trace = *trace;
        /*
         * Since we never remove from the dependency list, the list can
         * be walked lockless by other CPUs, it's only allocation
@@ -1622,12 +1633,20 @@ check_deadlock(struct task_struct *curr, struct held_lock *next,
  */
 static int
 check_prev_add(struct task_struct *curr, struct held_lock *prev,
-              struct held_lock *next, int distance)
+              struct held_lock *next, int distance, int trylock_loop)
 {
        struct lock_list *entry;
        int ret;
        struct lock_list this;
        struct lock_list *uninitialized_var(target_entry);
+       /*
+        * Static variable, serialized by the graph_lock().
+        *
+        * We use this static variable to save the stack trace in case
+        * we call into this function multiple times due to encountering
+        * trylocks in the held lock stack.
+        */
+       static struct stack_trace trace;
 
        /*
         * Prove that the new <prev> -> <next> dependency would not
@@ -1675,20 +1694,23 @@ check_prev_add(struct task_struct *curr, struct held_lock *prev,
                }
        }
 
+       if (!trylock_loop && !save_trace(&trace))
+               return 0;
+
        /*
         * Ok, all validations passed, add the new lock
         * to the previous lock's dependency list:
         */
        ret = add_lock_to_list(hlock_class(prev), hlock_class(next),
                               &hlock_class(prev)->locks_after,
-                              next->acquire_ip, distance);
+                              next->acquire_ip, distance, &trace);
 
        if (!ret)
                return 0;
 
        ret = add_lock_to_list(hlock_class(next), hlock_class(prev),
                               &hlock_class(next)->locks_before,
-                              next->acquire_ip, distance);
+                              next->acquire_ip, distance, &trace);
        if (!ret)
                return 0;
 
@@ -1718,6 +1740,7 @@ static int
 check_prevs_add(struct task_struct *curr, struct held_lock *next)
 {
        int depth = curr->lockdep_depth;
+       int trylock_loop = 0;
        struct held_lock *hlock;
 
        /*
@@ -1743,7 +1766,8 @@ check_prevs_add(struct task_struct *curr, struct held_lock *next)
                 * added:
                 */
                if (hlock->read != 2) {
-                       if (!check_prev_add(curr, hlock, next, distance))
+                       if (!check_prev_add(curr, hlock, next,
+                                               distance, trylock_loop))
                                return 0;
                        /*
                         * Stop after the first non-trylock entry,
@@ -1766,6 +1790,7 @@ check_prevs_add(struct task_struct *curr, struct held_lock *next)
                if (curr->held_locks[depth].irq_context !=
                                curr->held_locks[depth-1].irq_context)
                        break;
+               trylock_loop = 1;
        }
        return 1;
 out_bug:
@@ -2267,22 +2292,6 @@ mark_held_locks(struct task_struct *curr, enum mark_type mark)
 }
 
 /*
- * Debugging helper: via this flag we know that we are in
- * 'early bootup code', and will warn about any invalid irqs-on event:
- */
-static int early_boot_irqs_enabled;
-
-void early_boot_irqs_off(void)
-{
-       early_boot_irqs_enabled = 0;
-}
-
-void early_boot_irqs_on(void)
-{
-       early_boot_irqs_enabled = 1;
-}
-
-/*
  * Hardirqs will be enabled:
  */
 void trace_hardirqs_on_caller(unsigned long ip)
@@ -2294,11 +2303,16 @@ void trace_hardirqs_on_caller(unsigned long ip)
        if (unlikely(!debug_locks || current->lockdep_recursion))
                return;
 
-       if (DEBUG_LOCKS_WARN_ON(unlikely(!early_boot_irqs_enabled)))
+       if (DEBUG_LOCKS_WARN_ON(unlikely(early_boot_irqs_disabled)))
                return;
 
        if (unlikely(curr->hardirqs_enabled)) {
-               debug_atomic_inc(redundant_hardirqs_on);
+               /*
+                * Neither irq nor preemption are disabled here
+                * so this is racy by nature but loosing one hit
+                * in a stat is not a big deal.
+                */
+               __debug_atomic_inc(redundant_hardirqs_on);
                return;
        }
        /* we'll do an OFF -> ON transition: */
@@ -2661,7 +2675,11 @@ static int mark_lock(struct task_struct *curr, struct held_lock *this,
 void lockdep_init_map(struct lockdep_map *lock, const char *name,
                      struct lock_class_key *key, int subclass)
 {
-       lock->class_cache = NULL;
+       int i;
+
+       for (i = 0; i < NR_LOCKDEP_CACHING_CLASSES; i++)
+               lock->class_cache[i] = NULL;
+
 #ifdef CONFIG_LOCK_STAT
        lock->cpu = raw_smp_processor_id();
 #endif
@@ -2693,6 +2711,8 @@ void lockdep_init_map(struct lockdep_map *lock, const char *name,
 }
 EXPORT_SYMBOL_GPL(lockdep_init_map);
 
+struct lock_class_key __lockdep_no_validate__;
+
 /*
  * This gets called for every mutex_lock*()/spin_lock*() operation.
  * We maintain the dependency maps and validate the locking attempt:
@@ -2719,18 +2739,13 @@ static int __lock_acquire(struct lockdep_map *lock, unsigned int subclass,
        if (DEBUG_LOCKS_WARN_ON(!irqs_disabled()))
                return 0;
 
-       if (unlikely(subclass >= MAX_LOCKDEP_SUBCLASSES)) {
-               debug_locks_off();
-               printk("BUG: MAX_LOCKDEP_SUBCLASSES too low!\n");
-               printk("turning off the locking correctness validator.\n");
-               dump_stack();
-               return 0;
-       }
+       if (lock->key == &__lockdep_no_validate__)
+               check = 1;
 
-       if (!subclass)
-               class = lock->class_cache;
+       if (subclass < NR_LOCKDEP_CACHING_CLASSES)
+               class = lock->class_cache[subclass];
        /*
-        * Not cached yet or subclass?
+        * Not cached?
         */
        if (unlikely(!class)) {
                class = register_lock_class(lock, subclass, 0);
@@ -2895,7 +2910,7 @@ static int match_held_lock(struct held_lock *hlock, struct lockdep_map *lock)
                return 1;
 
        if (hlock->references) {
-               struct lock_class *class = lock->class_cache;
+               struct lock_class *class = lock->class_cache[0];
 
                if (!class)
                        class = look_up_lock_class(lock, 0);
@@ -3214,7 +3229,7 @@ void lock_release(struct lockdep_map *lock, int nested,
        raw_local_irq_save(flags);
        check_flags(flags);
        current->lockdep_recursion = 1;
-       trace_lock_release(lock, nested, ip);
+       trace_lock_release(lock, ip);
        __lock_release(lock, nested, ip);
        current->lockdep_recursion = 0;
        raw_local_irq_restore(flags);
@@ -3367,7 +3382,7 @@ found_it:
                hlock->holdtime_stamp = now;
        }
 
-       trace_lock_acquired(lock, ip, waittime);
+       trace_lock_acquired(lock, ip);
 
        stats = get_lock_stats(hlock_class(hlock));
        if (waittime) {
@@ -3536,7 +3551,12 @@ void lockdep_reset_lock(struct lockdep_map *lock)
                if (list_empty(head))
                        continue;
                list_for_each_entry_safe(class, next, head, hash_entry) {
-                       if (unlikely(class == lock->class_cache)) {
+                       int match = 0;
+
+                       for (j = 0; j < NR_LOCKDEP_CACHING_CLASSES; j++)
+                               match |= class == lock->class_cache[j];
+
+                       if (unlikely(match)) {
                                if (debug_locks_off_graph_unlock())
                                        WARN_ON(1);
                                goto out_restore;
@@ -3752,7 +3772,7 @@ EXPORT_SYMBOL_GPL(debug_show_all_locks);
  * Careful: only use this function if you are sure that
  * the task cannot run in parallel!
  */
-void __debug_show_held_locks(struct task_struct *task)
+void debug_show_held_locks(struct task_struct *task)
 {
        if (unlikely(!debug_locks)) {
                printk("INFO: lockdep is turned off.\n");
@@ -3760,12 +3780,6 @@ void __debug_show_held_locks(struct task_struct *task)
        }
        lockdep_print_held_locks(task);
 }
-EXPORT_SYMBOL_GPL(__debug_show_held_locks);
-
-void debug_show_held_locks(struct task_struct *task)
-{
-               __debug_show_held_locks(task);
-}
 EXPORT_SYMBOL_GPL(debug_show_held_locks);
 
 void lockdep_sys_exit(void)
@@ -3788,8 +3802,11 @@ void lockdep_rcu_dereference(const char *file, const int line)
 {
        struct task_struct *curr = current;
 
+#ifndef CONFIG_PROVE_RCU_REPEATEDLY
        if (!debug_locks_off())
                return;
+#endif /* #ifdef CONFIG_PROVE_RCU_REPEATEDLY */
+       /* Note: the following can be executed concurrently, so be careful. */
        printk("\n===================================================\n");
        printk(  "[ INFO: suspicious rcu_dereference_check() usage. ]\n");
        printk(  "---------------------------------------------------\n");