lockdep, rtmutex, bug: Show taint flags on error
[linux-flexiantxendom0-3.2.10.git] / kernel / lockdep.c
1 /*
2  * kernel/lockdep.c
3  *
4  * Runtime locking correctness validator
5  *
6  * Started by Ingo Molnar:
7  *
8  *  Copyright (C) 2006,2007 Red Hat, Inc., Ingo Molnar <mingo@redhat.com>
9  *  Copyright (C) 2007 Red Hat, Inc., Peter Zijlstra <pzijlstr@redhat.com>
10  *
11  * this code maps all the lock dependencies as they occur in a live kernel
12  * and will warn about the following classes of locking bugs:
13  *
14  * - lock inversion scenarios
15  * - circular lock dependencies
16  * - hardirq/softirq safe/unsafe locking bugs
17  *
18  * Bugs are reported even if the current locking scenario does not cause
19  * any deadlock at this point.
20  *
21  * I.e. if anytime in the past two locks were taken in a different order,
22  * even if it happened for another task, even if those were different
23  * locks (but of the same class as this lock), this code will detect it.
24  *
25  * Thanks to Arjan van de Ven for coming up with the initial idea of
26  * mapping lock dependencies runtime.
27  */
28 #define DISABLE_BRANCH_PROFILING
29 #include <linux/mutex.h>
30 #include <linux/sched.h>
31 #include <linux/delay.h>
32 #include <linux/module.h>
33 #include <linux/proc_fs.h>
34 #include <linux/seq_file.h>
35 #include <linux/spinlock.h>
36 #include <linux/kallsyms.h>
37 #include <linux/interrupt.h>
38 #include <linux/stacktrace.h>
39 #include <linux/debug_locks.h>
40 #include <linux/irqflags.h>
41 #include <linux/utsname.h>
42 #include <linux/hash.h>
43 #include <linux/ftrace.h>
44 #include <linux/stringify.h>
45 #include <linux/bitops.h>
46 #include <linux/gfp.h>
47
48 #include <asm/sections.h>
49
50 #include "lockdep_internals.h"
51
52 #define CREATE_TRACE_POINTS
53 #include <trace/events/lock.h>
54
55 #ifdef CONFIG_PROVE_LOCKING
56 int prove_locking = 1;
57 module_param(prove_locking, int, 0644);
58 #else
59 #define prove_locking 0
60 #endif
61
62 #ifdef CONFIG_LOCK_STAT
63 int lock_stat = 1;
64 module_param(lock_stat, int, 0644);
65 #else
66 #define lock_stat 0
67 #endif
68
69 /*
70  * lockdep_lock: protects the lockdep graph, the hashes and the
71  *               class/list/hash allocators.
72  *
73  * This is one of the rare exceptions where it's justified
74  * to use a raw spinlock - we really dont want the spinlock
75  * code to recurse back into the lockdep code...
76  */
77 static arch_spinlock_t lockdep_lock = (arch_spinlock_t)__ARCH_SPIN_LOCK_UNLOCKED;
78
79 static int graph_lock(void)
80 {
81         arch_spin_lock(&lockdep_lock);
82         /*
83          * Make sure that if another CPU detected a bug while
84          * walking the graph we dont change it (while the other
85          * CPU is busy printing out stuff with the graph lock
86          * dropped already)
87          */
88         if (!debug_locks) {
89                 arch_spin_unlock(&lockdep_lock);
90                 return 0;
91         }
92         /* prevent any recursions within lockdep from causing deadlocks */
93         current->lockdep_recursion++;
94         return 1;
95 }
96
97 static inline int graph_unlock(void)
98 {
99         if (debug_locks && !arch_spin_is_locked(&lockdep_lock)) {
100                 /*
101                  * The lockdep graph lock isn't locked while we expect it to
102                  * be, we're confused now, bye!
103                  */
104                 return DEBUG_LOCKS_WARN_ON(1);
105         }
106
107         current->lockdep_recursion--;
108         arch_spin_unlock(&lockdep_lock);
109         return 0;
110 }
111
112 /*
113  * Turn lock debugging off and return with 0 if it was off already,
114  * and also release the graph lock:
115  */
116 static inline int debug_locks_off_graph_unlock(void)
117 {
118         int ret = debug_locks_off();
119
120         arch_spin_unlock(&lockdep_lock);
121
122         return ret;
123 }
124
125 static int lockdep_initialized;
126
127 unsigned long nr_list_entries;
128 static struct lock_list list_entries[MAX_LOCKDEP_ENTRIES];
129
130 /*
131  * All data structures here are protected by the global debug_lock.
132  *
133  * Mutex key structs only get allocated, once during bootup, and never
134  * get freed - this significantly simplifies the debugging code.
135  */
136 unsigned long nr_lock_classes;
137 static struct lock_class lock_classes[MAX_LOCKDEP_KEYS];
138
139 static inline struct lock_class *hlock_class(struct held_lock *hlock)
140 {
141         if (!hlock->class_idx) {
142                 /*
143                  * Someone passed in garbage, we give up.
144                  */
145                 DEBUG_LOCKS_WARN_ON(1);
146                 return NULL;
147         }
148         return lock_classes + hlock->class_idx - 1;
149 }
150
151 #ifdef CONFIG_LOCK_STAT
152 static DEFINE_PER_CPU(struct lock_class_stats[MAX_LOCKDEP_KEYS],
153                       cpu_lock_stats);
154
155 static inline u64 lockstat_clock(void)
156 {
157         return local_clock();
158 }
159
160 static int lock_point(unsigned long points[], unsigned long ip)
161 {
162         int i;
163
164         for (i = 0; i < LOCKSTAT_POINTS; i++) {
165                 if (points[i] == 0) {
166                         points[i] = ip;
167                         break;
168                 }
169                 if (points[i] == ip)
170                         break;
171         }
172
173         return i;
174 }
175
176 static void lock_time_inc(struct lock_time *lt, u64 time)
177 {
178         if (time > lt->max)
179                 lt->max = time;
180
181         if (time < lt->min || !lt->nr)
182                 lt->min = time;
183
184         lt->total += time;
185         lt->nr++;
186 }
187
188 static inline void lock_time_add(struct lock_time *src, struct lock_time *dst)
189 {
190         if (!src->nr)
191                 return;
192
193         if (src->max > dst->max)
194                 dst->max = src->max;
195
196         if (src->min < dst->min || !dst->nr)
197                 dst->min = src->min;
198
199         dst->total += src->total;
200         dst->nr += src->nr;
201 }
202
203 struct lock_class_stats lock_stats(struct lock_class *class)
204 {
205         struct lock_class_stats stats;
206         int cpu, i;
207
208         memset(&stats, 0, sizeof(struct lock_class_stats));
209         for_each_possible_cpu(cpu) {
210                 struct lock_class_stats *pcs =
211                         &per_cpu(cpu_lock_stats, cpu)[class - lock_classes];
212
213                 for (i = 0; i < ARRAY_SIZE(stats.contention_point); i++)
214                         stats.contention_point[i] += pcs->contention_point[i];
215
216                 for (i = 0; i < ARRAY_SIZE(stats.contending_point); i++)
217                         stats.contending_point[i] += pcs->contending_point[i];
218
219                 lock_time_add(&pcs->read_waittime, &stats.read_waittime);
220                 lock_time_add(&pcs->write_waittime, &stats.write_waittime);
221
222                 lock_time_add(&pcs->read_holdtime, &stats.read_holdtime);
223                 lock_time_add(&pcs->write_holdtime, &stats.write_holdtime);
224
225                 for (i = 0; i < ARRAY_SIZE(stats.bounces); i++)
226                         stats.bounces[i] += pcs->bounces[i];
227         }
228
229         return stats;
230 }
231
232 void clear_lock_stats(struct lock_class *class)
233 {
234         int cpu;
235
236         for_each_possible_cpu(cpu) {
237                 struct lock_class_stats *cpu_stats =
238                         &per_cpu(cpu_lock_stats, cpu)[class - lock_classes];
239
240                 memset(cpu_stats, 0, sizeof(struct lock_class_stats));
241         }
242         memset(class->contention_point, 0, sizeof(class->contention_point));
243         memset(class->contending_point, 0, sizeof(class->contending_point));
244 }
245
246 static struct lock_class_stats *get_lock_stats(struct lock_class *class)
247 {
248         return &get_cpu_var(cpu_lock_stats)[class - lock_classes];
249 }
250
251 static void put_lock_stats(struct lock_class_stats *stats)
252 {
253         put_cpu_var(cpu_lock_stats);
254 }
255
256 static void lock_release_holdtime(struct held_lock *hlock)
257 {
258         struct lock_class_stats *stats;
259         u64 holdtime;
260
261         if (!lock_stat)
262                 return;
263
264         holdtime = lockstat_clock() - hlock->holdtime_stamp;
265
266         stats = get_lock_stats(hlock_class(hlock));
267         if (hlock->read)
268                 lock_time_inc(&stats->read_holdtime, holdtime);
269         else
270                 lock_time_inc(&stats->write_holdtime, holdtime);
271         put_lock_stats(stats);
272 }
273 #else
274 static inline void lock_release_holdtime(struct held_lock *hlock)
275 {
276 }
277 #endif
278
279 /*
280  * We keep a global list of all lock classes. The list only grows,
281  * never shrinks. The list is only accessed with the lockdep
282  * spinlock lock held.
283  */
284 LIST_HEAD(all_lock_classes);
285
286 /*
287  * The lockdep classes are in a hash-table as well, for fast lookup:
288  */
289 #define CLASSHASH_BITS          (MAX_LOCKDEP_KEYS_BITS - 1)
290 #define CLASSHASH_SIZE          (1UL << CLASSHASH_BITS)
291 #define __classhashfn(key)      hash_long((unsigned long)key, CLASSHASH_BITS)
292 #define classhashentry(key)     (classhash_table + __classhashfn((key)))
293
294 static struct list_head classhash_table[CLASSHASH_SIZE];
295
296 /*
297  * We put the lock dependency chains into a hash-table as well, to cache
298  * their existence:
299  */
300 #define CHAINHASH_BITS          (MAX_LOCKDEP_CHAINS_BITS-1)
301 #define CHAINHASH_SIZE          (1UL << CHAINHASH_BITS)
302 #define __chainhashfn(chain)    hash_long(chain, CHAINHASH_BITS)
303 #define chainhashentry(chain)   (chainhash_table + __chainhashfn((chain)))
304
305 static struct list_head chainhash_table[CHAINHASH_SIZE];
306
307 /*
308  * The hash key of the lock dependency chains is a hash itself too:
309  * it's a hash of all locks taken up to that lock, including that lock.
310  * It's a 64-bit hash, because it's important for the keys to be
311  * unique.
312  */
313 #define iterate_chain_key(key1, key2) \
314         (((key1) << MAX_LOCKDEP_KEYS_BITS) ^ \
315         ((key1) >> (64-MAX_LOCKDEP_KEYS_BITS)) ^ \
316         (key2))
317
318 void lockdep_off(void)
319 {
320         current->lockdep_recursion++;
321 }
322 EXPORT_SYMBOL(lockdep_off);
323
324 void lockdep_on(void)
325 {
326         current->lockdep_recursion--;
327 }
328 EXPORT_SYMBOL(lockdep_on);
329
330 /*
331  * Debugging switches:
332  */
333
334 #define VERBOSE                 0
335 #define VERY_VERBOSE            0
336
337 #if VERBOSE
338 # define HARDIRQ_VERBOSE        1
339 # define SOFTIRQ_VERBOSE        1
340 # define RECLAIM_VERBOSE        1
341 #else
342 # define HARDIRQ_VERBOSE        0
343 # define SOFTIRQ_VERBOSE        0
344 # define RECLAIM_VERBOSE        0
345 #endif
346
347 #if VERBOSE || HARDIRQ_VERBOSE || SOFTIRQ_VERBOSE || RECLAIM_VERBOSE
348 /*
349  * Quick filtering for interesting events:
350  */
351 static int class_filter(struct lock_class *class)
352 {
353 #if 0
354         /* Example */
355         if (class->name_version == 1 &&
356                         !strcmp(class->name, "lockname"))
357                 return 1;
358         if (class->name_version == 1 &&
359                         !strcmp(class->name, "&struct->lockfield"))
360                 return 1;
361 #endif
362         /* Filter everything else. 1 would be to allow everything else */
363         return 0;
364 }
365 #endif
366
367 static int verbose(struct lock_class *class)
368 {
369 #if VERBOSE
370         return class_filter(class);
371 #endif
372         return 0;
373 }
374
375 /*
376  * Stack-trace: tightly packed array of stack backtrace
377  * addresses. Protected by the graph_lock.
378  */
379 unsigned long nr_stack_trace_entries;
380 static unsigned long stack_trace[MAX_STACK_TRACE_ENTRIES];
381
382 static int save_trace(struct stack_trace *trace)
383 {
384         trace->nr_entries = 0;
385         trace->max_entries = MAX_STACK_TRACE_ENTRIES - nr_stack_trace_entries;
386         trace->entries = stack_trace + nr_stack_trace_entries;
387
388         trace->skip = 3;
389
390         save_stack_trace(trace);
391
392         /*
393          * Some daft arches put -1 at the end to indicate its a full trace.
394          *
395          * <rant> this is buggy anyway, since it takes a whole extra entry so a
396          * complete trace that maxes out the entries provided will be reported
397          * as incomplete, friggin useless </rant>
398          */
399         if (trace->nr_entries != 0 &&
400             trace->entries[trace->nr_entries-1] == ULONG_MAX)
401                 trace->nr_entries--;
402
403         trace->max_entries = trace->nr_entries;
404
405         nr_stack_trace_entries += trace->nr_entries;
406
407         if (nr_stack_trace_entries >= MAX_STACK_TRACE_ENTRIES-1) {
408                 if (!debug_locks_off_graph_unlock())
409                         return 0;
410
411                 printk("BUG: MAX_STACK_TRACE_ENTRIES too low!\n");
412                 printk("turning off the locking correctness validator.\n");
413                 dump_stack();
414
415                 return 0;
416         }
417
418         return 1;
419 }
420
421 unsigned int nr_hardirq_chains;
422 unsigned int nr_softirq_chains;
423 unsigned int nr_process_chains;
424 unsigned int max_lockdep_depth;
425
426 #ifdef CONFIG_DEBUG_LOCKDEP
427 /*
428  * We cannot printk in early bootup code. Not even early_printk()
429  * might work. So we mark any initialization errors and printk
430  * about it later on, in lockdep_info().
431  */
432 static int lockdep_init_error;
433 static unsigned long lockdep_init_trace_data[20];
434 static struct stack_trace lockdep_init_trace = {
435         .max_entries = ARRAY_SIZE(lockdep_init_trace_data),
436         .entries = lockdep_init_trace_data,
437 };
438
439 /*
440  * Various lockdep statistics:
441  */
442 DEFINE_PER_CPU(struct lockdep_stats, lockdep_stats);
443 #endif
444
445 /*
446  * Locking printouts:
447  */
448
449 #define __USAGE(__STATE)                                                \
450         [LOCK_USED_IN_##__STATE] = "IN-"__stringify(__STATE)"-W",       \
451         [LOCK_ENABLED_##__STATE] = __stringify(__STATE)"-ON-W",         \
452         [LOCK_USED_IN_##__STATE##_READ] = "IN-"__stringify(__STATE)"-R",\
453         [LOCK_ENABLED_##__STATE##_READ] = __stringify(__STATE)"-ON-R",
454
455 static const char *usage_str[] =
456 {
457 #define LOCKDEP_STATE(__STATE) __USAGE(__STATE)
458 #include "lockdep_states.h"
459 #undef LOCKDEP_STATE
460         [LOCK_USED] = "INITIAL USE",
461 };
462
463 const char * __get_key_name(struct lockdep_subclass_key *key, char *str)
464 {
465         return kallsyms_lookup((unsigned long)key, NULL, NULL, NULL, str);
466 }
467
468 static inline unsigned long lock_flag(enum lock_usage_bit bit)
469 {
470         return 1UL << bit;
471 }
472
473 static char get_usage_char(struct lock_class *class, enum lock_usage_bit bit)
474 {
475         char c = '.';
476
477         if (class->usage_mask & lock_flag(bit + 2))
478                 c = '+';
479         if (class->usage_mask & lock_flag(bit)) {
480                 c = '-';
481                 if (class->usage_mask & lock_flag(bit + 2))
482                         c = '?';
483         }
484
485         return c;
486 }
487
488 void get_usage_chars(struct lock_class *class, char usage[LOCK_USAGE_CHARS])
489 {
490         int i = 0;
491
492 #define LOCKDEP_STATE(__STATE)                                          \
493         usage[i++] = get_usage_char(class, LOCK_USED_IN_##__STATE);     \
494         usage[i++] = get_usage_char(class, LOCK_USED_IN_##__STATE##_READ);
495 #include "lockdep_states.h"
496 #undef LOCKDEP_STATE
497
498         usage[i] = '\0';
499 }
500
501 static int __print_lock_name(struct lock_class *class)
502 {
503         char str[KSYM_NAME_LEN];
504         const char *name;
505
506         name = class->name;
507         if (!name)
508                 name = __get_key_name(class->key, str);
509
510         return printk("%s", name);
511 }
512
513 static void print_lock_name(struct lock_class *class)
514 {
515         char str[KSYM_NAME_LEN], usage[LOCK_USAGE_CHARS];
516         const char *name;
517
518         get_usage_chars(class, usage);
519
520         name = class->name;
521         if (!name) {
522                 name = __get_key_name(class->key, str);
523                 printk(" (%s", name);
524         } else {
525                 printk(" (%s", name);
526                 if (class->name_version > 1)
527                         printk("#%d", class->name_version);
528                 if (class->subclass)
529                         printk("/%d", class->subclass);
530         }
531         printk("){%s}", usage);
532 }
533
534 static void print_lockdep_cache(struct lockdep_map *lock)
535 {
536         const char *name;
537         char str[KSYM_NAME_LEN];
538
539         name = lock->name;
540         if (!name)
541                 name = __get_key_name(lock->key->subkeys, str);
542
543         printk("%s", name);
544 }
545
546 static void print_lock(struct held_lock *hlock)
547 {
548         print_lock_name(hlock_class(hlock));
549         printk(", at: ");
550         print_ip_sym(hlock->acquire_ip);
551 }
552
553 static void lockdep_print_held_locks(struct task_struct *curr)
554 {
555         int i, depth = curr->lockdep_depth;
556
557         if (!depth) {
558                 printk("no locks held by %s/%d.\n", curr->comm, task_pid_nr(curr));
559                 return;
560         }
561         printk("%d lock%s held by %s/%d:\n",
562                 depth, depth > 1 ? "s" : "", curr->comm, task_pid_nr(curr));
563
564         for (i = 0; i < depth; i++) {
565                 printk(" #%d: ", i);
566                 print_lock(curr->held_locks + i);
567         }
568 }
569
570 static void print_kernel_ident(void)
571 {
572         printk("%s %.*s %s\n", init_utsname()->release,
573                 (int)strcspn(init_utsname()->version, " "),
574                 init_utsname()->version,
575                 print_tainted());
576 }
577
578 static int very_verbose(struct lock_class *class)
579 {
580 #if VERY_VERBOSE
581         return class_filter(class);
582 #endif
583         return 0;
584 }
585
586 /*
587  * Is this the address of a static object:
588  */
589 static int static_obj(void *obj)
590 {
591         unsigned long start = (unsigned long) &_stext,
592                       end   = (unsigned long) &_end,
593                       addr  = (unsigned long) obj;
594
595         /*
596          * static variable?
597          */
598         if ((addr >= start) && (addr < end))
599                 return 1;
600
601         if (arch_is_kernel_data(addr))
602                 return 1;
603
604         /*
605          * in-kernel percpu var?
606          */
607         if (is_kernel_percpu_address(addr))
608                 return 1;
609
610         /*
611          * module static or percpu var?
612          */
613         return is_module_address(addr) || is_module_percpu_address(addr);
614 }
615
616 /*
617  * To make lock name printouts unique, we calculate a unique
618  * class->name_version generation counter:
619  */
620 static int count_matching_names(struct lock_class *new_class)
621 {
622         struct lock_class *class;
623         int count = 0;
624
625         if (!new_class->name)
626                 return 0;
627
628         list_for_each_entry(class, &all_lock_classes, lock_entry) {
629                 if (new_class->key - new_class->subclass == class->key)
630                         return class->name_version;
631                 if (class->name && !strcmp(class->name, new_class->name))
632                         count = max(count, class->name_version);
633         }
634
635         return count + 1;
636 }
637
638 /*
639  * Register a lock's class in the hash-table, if the class is not present
640  * yet. Otherwise we look it up. We cache the result in the lock object
641  * itself, so actual lookup of the hash should be once per lock object.
642  */
643 static inline struct lock_class *
644 look_up_lock_class(struct lockdep_map *lock, unsigned int subclass)
645 {
646         struct lockdep_subclass_key *key;
647         struct list_head *hash_head;
648         struct lock_class *class;
649
650 #ifdef CONFIG_DEBUG_LOCKDEP
651         /*
652          * If the architecture calls into lockdep before initializing
653          * the hashes then we'll warn about it later. (we cannot printk
654          * right now)
655          */
656         if (unlikely(!lockdep_initialized)) {
657                 lockdep_init();
658                 lockdep_init_error = 1;
659                 save_stack_trace(&lockdep_init_trace);
660         }
661 #endif
662
663         if (unlikely(subclass >= MAX_LOCKDEP_SUBCLASSES)) {
664                 debug_locks_off();
665                 printk(KERN_ERR
666                         "BUG: looking up invalid subclass: %u\n", subclass);
667                 printk(KERN_ERR
668                         "turning off the locking correctness validator.\n");
669                 dump_stack();
670                 return NULL;
671         }
672
673         /*
674          * Static locks do not have their class-keys yet - for them the key
675          * is the lock object itself:
676          */
677         if (unlikely(!lock->key))
678                 lock->key = (void *)lock;
679
680         /*
681          * NOTE: the class-key must be unique. For dynamic locks, a static
682          * lock_class_key variable is passed in through the mutex_init()
683          * (or spin_lock_init()) call - which acts as the key. For static
684          * locks we use the lock object itself as the key.
685          */
686         BUILD_BUG_ON(sizeof(struct lock_class_key) >
687                         sizeof(struct lockdep_map));
688
689         key = lock->key->subkeys + subclass;
690
691         hash_head = classhashentry(key);
692
693         /*
694          * We can walk the hash lockfree, because the hash only
695          * grows, and we are careful when adding entries to the end:
696          */
697         list_for_each_entry(class, hash_head, hash_entry) {
698                 if (class->key == key) {
699                         /*
700                          * Huh! same key, different name? Did someone trample
701                          * on some memory? We're most confused.
702                          */
703                         WARN_ON_ONCE(class->name != lock->name);
704                         return class;
705                 }
706         }
707
708         return NULL;
709 }
710
711 /*
712  * Register a lock's class in the hash-table, if the class is not present
713  * yet. Otherwise we look it up. We cache the result in the lock object
714  * itself, so actual lookup of the hash should be once per lock object.
715  */
716 static inline struct lock_class *
717 register_lock_class(struct lockdep_map *lock, unsigned int subclass, int force)
718 {
719         struct lockdep_subclass_key *key;
720         struct list_head *hash_head;
721         struct lock_class *class;
722         unsigned long flags;
723
724         class = look_up_lock_class(lock, subclass);
725         if (likely(class))
726                 goto out_set_class_cache;
727
728         /*
729          * Debug-check: all keys must be persistent!
730          */
731         if (!static_obj(lock->key)) {
732                 debug_locks_off();
733                 printk("INFO: trying to register non-static key.\n");
734                 printk("the code is fine but needs lockdep annotation.\n");
735                 printk("turning off the locking correctness validator.\n");
736                 dump_stack();
737
738                 return NULL;
739         }
740
741         key = lock->key->subkeys + subclass;
742         hash_head = classhashentry(key);
743
744         raw_local_irq_save(flags);
745         if (!graph_lock()) {
746                 raw_local_irq_restore(flags);
747                 return NULL;
748         }
749         /*
750          * We have to do the hash-walk again, to avoid races
751          * with another CPU:
752          */
753         list_for_each_entry(class, hash_head, hash_entry)
754                 if (class->key == key)
755                         goto out_unlock_set;
756         /*
757          * Allocate a new key from the static array, and add it to
758          * the hash:
759          */
760         if (nr_lock_classes >= MAX_LOCKDEP_KEYS) {
761                 if (!debug_locks_off_graph_unlock()) {
762                         raw_local_irq_restore(flags);
763                         return NULL;
764                 }
765                 raw_local_irq_restore(flags);
766
767                 printk("BUG: MAX_LOCKDEP_KEYS too low!\n");
768                 printk("turning off the locking correctness validator.\n");
769                 dump_stack();
770                 return NULL;
771         }
772         class = lock_classes + nr_lock_classes++;
773         debug_atomic_inc(nr_unused_locks);
774         class->key = key;
775         class->name = lock->name;
776         class->subclass = subclass;
777         INIT_LIST_HEAD(&class->lock_entry);
778         INIT_LIST_HEAD(&class->locks_before);
779         INIT_LIST_HEAD(&class->locks_after);
780         class->name_version = count_matching_names(class);
781         /*
782          * We use RCU's safe list-add method to make
783          * parallel walking of the hash-list safe:
784          */
785         list_add_tail_rcu(&class->hash_entry, hash_head);
786         /*
787          * Add it to the global list of classes:
788          */
789         list_add_tail_rcu(&class->lock_entry, &all_lock_classes);
790
791         if (verbose(class)) {
792                 graph_unlock();
793                 raw_local_irq_restore(flags);
794
795                 printk("\nnew class %p: %s", class->key, class->name);
796                 if (class->name_version > 1)
797                         printk("#%d", class->name_version);
798                 printk("\n");
799                 dump_stack();
800
801                 raw_local_irq_save(flags);
802                 if (!graph_lock()) {
803                         raw_local_irq_restore(flags);
804                         return NULL;
805                 }
806         }
807 out_unlock_set:
808         graph_unlock();
809         raw_local_irq_restore(flags);
810
811 out_set_class_cache:
812         if (!subclass || force)
813                 lock->class_cache[0] = class;
814         else if (subclass < NR_LOCKDEP_CACHING_CLASSES)
815                 lock->class_cache[subclass] = class;
816
817         /*
818          * Hash collision, did we smoke some? We found a class with a matching
819          * hash but the subclass -- which is hashed in -- didn't match.
820          */
821         if (DEBUG_LOCKS_WARN_ON(class->subclass != subclass))
822                 return NULL;
823
824         return class;
825 }
826
827 #ifdef CONFIG_PROVE_LOCKING
828 /*
829  * Allocate a lockdep entry. (assumes the graph_lock held, returns
830  * with NULL on failure)
831  */
832 static struct lock_list *alloc_list_entry(void)
833 {
834         if (nr_list_entries >= MAX_LOCKDEP_ENTRIES) {
835                 if (!debug_locks_off_graph_unlock())
836                         return NULL;
837
838                 printk("BUG: MAX_LOCKDEP_ENTRIES too low!\n");
839                 printk("turning off the locking correctness validator.\n");
840                 dump_stack();
841                 return NULL;
842         }
843         return list_entries + nr_list_entries++;
844 }
845
846 /*
847  * Add a new dependency to the head of the list:
848  */
849 static int add_lock_to_list(struct lock_class *class, struct lock_class *this,
850                             struct list_head *head, unsigned long ip,
851                             int distance, struct stack_trace *trace)
852 {
853         struct lock_list *entry;
854         /*
855          * Lock not present yet - get a new dependency struct and
856          * add it to the list:
857          */
858         entry = alloc_list_entry();
859         if (!entry)
860                 return 0;
861
862         entry->class = this;
863         entry->distance = distance;
864         entry->trace = *trace;
865         /*
866          * Since we never remove from the dependency list, the list can
867          * be walked lockless by other CPUs, it's only allocation
868          * that must be protected by the spinlock. But this also means
869          * we must make new entries visible only once writes to the
870          * entry become visible - hence the RCU op:
871          */
872         list_add_tail_rcu(&entry->entry, head);
873
874         return 1;
875 }
876
877 /*
878  * For good efficiency of modular, we use power of 2
879  */
880 #define MAX_CIRCULAR_QUEUE_SIZE         4096UL
881 #define CQ_MASK                         (MAX_CIRCULAR_QUEUE_SIZE-1)
882
883 /*
884  * The circular_queue and helpers is used to implement the
885  * breadth-first search(BFS)algorithem, by which we can build
886  * the shortest path from the next lock to be acquired to the
887  * previous held lock if there is a circular between them.
888  */
889 struct circular_queue {
890         unsigned long element[MAX_CIRCULAR_QUEUE_SIZE];
891         unsigned int  front, rear;
892 };
893
894 static struct circular_queue lock_cq;
895
896 unsigned int max_bfs_queue_depth;
897
898 static unsigned int lockdep_dependency_gen_id;
899
900 static inline void __cq_init(struct circular_queue *cq)
901 {
902         cq->front = cq->rear = 0;
903         lockdep_dependency_gen_id++;
904 }
905
906 static inline int __cq_empty(struct circular_queue *cq)
907 {
908         return (cq->front == cq->rear);
909 }
910
911 static inline int __cq_full(struct circular_queue *cq)
912 {
913         return ((cq->rear + 1) & CQ_MASK) == cq->front;
914 }
915
916 static inline int __cq_enqueue(struct circular_queue *cq, unsigned long elem)
917 {
918         if (__cq_full(cq))
919                 return -1;
920
921         cq->element[cq->rear] = elem;
922         cq->rear = (cq->rear + 1) & CQ_MASK;
923         return 0;
924 }
925
926 static inline int __cq_dequeue(struct circular_queue *cq, unsigned long *elem)
927 {
928         if (__cq_empty(cq))
929                 return -1;
930
931         *elem = cq->element[cq->front];
932         cq->front = (cq->front + 1) & CQ_MASK;
933         return 0;
934 }
935
936 static inline unsigned int  __cq_get_elem_count(struct circular_queue *cq)
937 {
938         return (cq->rear - cq->front) & CQ_MASK;
939 }
940
941 static inline void mark_lock_accessed(struct lock_list *lock,
942                                         struct lock_list *parent)
943 {
944         unsigned long nr;
945
946         nr = lock - list_entries;
947         WARN_ON(nr >= nr_list_entries); /* Out-of-bounds, input fail */
948         lock->parent = parent;
949         lock->class->dep_gen_id = lockdep_dependency_gen_id;
950 }
951
952 static inline unsigned long lock_accessed(struct lock_list *lock)
953 {
954         unsigned long nr;
955
956         nr = lock - list_entries;
957         WARN_ON(nr >= nr_list_entries); /* Out-of-bounds, input fail */
958         return lock->class->dep_gen_id == lockdep_dependency_gen_id;
959 }
960
961 static inline struct lock_list *get_lock_parent(struct lock_list *child)
962 {
963         return child->parent;
964 }
965
966 static inline int get_lock_depth(struct lock_list *child)
967 {
968         int depth = 0;
969         struct lock_list *parent;
970
971         while ((parent = get_lock_parent(child))) {
972                 child = parent;
973                 depth++;
974         }
975         return depth;
976 }
977
978 static int __bfs(struct lock_list *source_entry,
979                  void *data,
980                  int (*match)(struct lock_list *entry, void *data),
981                  struct lock_list **target_entry,
982                  int forward)
983 {
984         struct lock_list *entry;
985         struct list_head *head;
986         struct circular_queue *cq = &lock_cq;
987         int ret = 1;
988
989         if (match(source_entry, data)) {
990                 *target_entry = source_entry;
991                 ret = 0;
992                 goto exit;
993         }
994
995         if (forward)
996                 head = &source_entry->class->locks_after;
997         else
998                 head = &source_entry->class->locks_before;
999
1000         if (list_empty(head))
1001                 goto exit;
1002
1003         __cq_init(cq);
1004         __cq_enqueue(cq, (unsigned long)source_entry);
1005
1006         while (!__cq_empty(cq)) {
1007                 struct lock_list *lock;
1008
1009                 __cq_dequeue(cq, (unsigned long *)&lock);
1010
1011                 if (!lock->class) {
1012                         ret = -2;
1013                         goto exit;
1014                 }
1015
1016                 if (forward)
1017                         head = &lock->class->locks_after;
1018                 else
1019                         head = &lock->class->locks_before;
1020
1021                 list_for_each_entry(entry, head, entry) {
1022                         if (!lock_accessed(entry)) {
1023                                 unsigned int cq_depth;
1024                                 mark_lock_accessed(entry, lock);
1025                                 if (match(entry, data)) {
1026                                         *target_entry = entry;
1027                                         ret = 0;
1028                                         goto exit;
1029                                 }
1030
1031                                 if (__cq_enqueue(cq, (unsigned long)entry)) {
1032                                         ret = -1;
1033                                         goto exit;
1034                                 }
1035                                 cq_depth = __cq_get_elem_count(cq);
1036                                 if (max_bfs_queue_depth < cq_depth)
1037                                         max_bfs_queue_depth = cq_depth;
1038                         }
1039                 }
1040         }
1041 exit:
1042         return ret;
1043 }
1044
1045 static inline int __bfs_forwards(struct lock_list *src_entry,
1046                         void *data,
1047                         int (*match)(struct lock_list *entry, void *data),
1048                         struct lock_list **target_entry)
1049 {
1050         return __bfs(src_entry, data, match, target_entry, 1);
1051
1052 }
1053
1054 static inline int __bfs_backwards(struct lock_list *src_entry,
1055                         void *data,
1056                         int (*match)(struct lock_list *entry, void *data),
1057                         struct lock_list **target_entry)
1058 {
1059         return __bfs(src_entry, data, match, target_entry, 0);
1060
1061 }
1062
1063 /*
1064  * Recursive, forwards-direction lock-dependency checking, used for
1065  * both noncyclic checking and for hardirq-unsafe/softirq-unsafe
1066  * checking.
1067  */
1068
1069 /*
1070  * Print a dependency chain entry (this is only done when a deadlock
1071  * has been detected):
1072  */
1073 static noinline int
1074 print_circular_bug_entry(struct lock_list *target, int depth)
1075 {
1076         if (debug_locks_silent)
1077                 return 0;
1078         printk("\n-> #%u", depth);
1079         print_lock_name(target->class);
1080         printk(":\n");
1081         print_stack_trace(&target->trace, 6);
1082
1083         return 0;
1084 }
1085
1086 static void
1087 print_circular_lock_scenario(struct held_lock *src,
1088                              struct held_lock *tgt,
1089                              struct lock_list *prt)
1090 {
1091         struct lock_class *source = hlock_class(src);
1092         struct lock_class *target = hlock_class(tgt);
1093         struct lock_class *parent = prt->class;
1094
1095         /*
1096          * A direct locking problem where unsafe_class lock is taken
1097          * directly by safe_class lock, then all we need to show
1098          * is the deadlock scenario, as it is obvious that the
1099          * unsafe lock is taken under the safe lock.
1100          *
1101          * But if there is a chain instead, where the safe lock takes
1102          * an intermediate lock (middle_class) where this lock is
1103          * not the same as the safe lock, then the lock chain is
1104          * used to describe the problem. Otherwise we would need
1105          * to show a different CPU case for each link in the chain
1106          * from the safe_class lock to the unsafe_class lock.
1107          */
1108         if (parent != source) {
1109                 printk("Chain exists of:\n  ");
1110                 __print_lock_name(source);
1111                 printk(" --> ");
1112                 __print_lock_name(parent);
1113                 printk(" --> ");
1114                 __print_lock_name(target);
1115                 printk("\n\n");
1116         }
1117
1118         printk(" Possible unsafe locking scenario:\n\n");
1119         printk("       CPU0                    CPU1\n");
1120         printk("       ----                    ----\n");
1121         printk("  lock(");
1122         __print_lock_name(target);
1123         printk(");\n");
1124         printk("                               lock(");
1125         __print_lock_name(parent);
1126         printk(");\n");
1127         printk("                               lock(");
1128         __print_lock_name(target);
1129         printk(");\n");
1130         printk("  lock(");
1131         __print_lock_name(source);
1132         printk(");\n");
1133         printk("\n *** DEADLOCK ***\n\n");
1134 }
1135
1136 /*
1137  * When a circular dependency is detected, print the
1138  * header first:
1139  */
1140 static noinline int
1141 print_circular_bug_header(struct lock_list *entry, unsigned int depth,
1142                         struct held_lock *check_src,
1143                         struct held_lock *check_tgt)
1144 {
1145         struct task_struct *curr = current;
1146
1147         if (debug_locks_silent)
1148                 return 0;
1149
1150         printk("\n");
1151         printk("======================================================\n");
1152         printk("[ INFO: possible circular locking dependency detected ]\n");
1153         print_kernel_ident();
1154         printk("-------------------------------------------------------\n");
1155         printk("%s/%d is trying to acquire lock:\n",
1156                 curr->comm, task_pid_nr(curr));
1157         print_lock(check_src);
1158         printk("\nbut task is already holding lock:\n");
1159         print_lock(check_tgt);
1160         printk("\nwhich lock already depends on the new lock.\n\n");
1161         printk("\nthe existing dependency chain (in reverse order) is:\n");
1162
1163         print_circular_bug_entry(entry, depth);
1164
1165         return 0;
1166 }
1167
1168 static inline int class_equal(struct lock_list *entry, void *data)
1169 {
1170         return entry->class == data;
1171 }
1172
1173 static noinline int print_circular_bug(struct lock_list *this,
1174                                 struct lock_list *target,
1175                                 struct held_lock *check_src,
1176                                 struct held_lock *check_tgt)
1177 {
1178         struct task_struct *curr = current;
1179         struct lock_list *parent;
1180         struct lock_list *first_parent;
1181         int depth;
1182
1183         if (!debug_locks_off_graph_unlock() || debug_locks_silent)
1184                 return 0;
1185
1186         if (!save_trace(&this->trace))
1187                 return 0;
1188
1189         depth = get_lock_depth(target);
1190
1191         print_circular_bug_header(target, depth, check_src, check_tgt);
1192
1193         parent = get_lock_parent(target);
1194         first_parent = parent;
1195
1196         while (parent) {
1197                 print_circular_bug_entry(parent, --depth);
1198                 parent = get_lock_parent(parent);
1199         }
1200
1201         printk("\nother info that might help us debug this:\n\n");
1202         print_circular_lock_scenario(check_src, check_tgt,
1203                                      first_parent);
1204
1205         lockdep_print_held_locks(curr);
1206
1207         printk("\nstack backtrace:\n");
1208         dump_stack();
1209
1210         return 0;
1211 }
1212
1213 static noinline int print_bfs_bug(int ret)
1214 {
1215         if (!debug_locks_off_graph_unlock())
1216                 return 0;
1217
1218         /*
1219          * Breadth-first-search failed, graph got corrupted?
1220          */
1221         WARN(1, "lockdep bfs error:%d\n", ret);
1222
1223         return 0;
1224 }
1225
1226 static int noop_count(struct lock_list *entry, void *data)
1227 {
1228         (*(unsigned long *)data)++;
1229         return 0;
1230 }
1231
1232 unsigned long __lockdep_count_forward_deps(struct lock_list *this)
1233 {
1234         unsigned long  count = 0;
1235         struct lock_list *uninitialized_var(target_entry);
1236
1237         __bfs_forwards(this, (void *)&count, noop_count, &target_entry);
1238
1239         return count;
1240 }
1241 unsigned long lockdep_count_forward_deps(struct lock_class *class)
1242 {
1243         unsigned long ret, flags;
1244         struct lock_list this;
1245
1246         this.parent = NULL;
1247         this.class = class;
1248
1249         local_irq_save(flags);
1250         arch_spin_lock(&lockdep_lock);
1251         ret = __lockdep_count_forward_deps(&this);
1252         arch_spin_unlock(&lockdep_lock);
1253         local_irq_restore(flags);
1254
1255         return ret;
1256 }
1257
1258 unsigned long __lockdep_count_backward_deps(struct lock_list *this)
1259 {
1260         unsigned long  count = 0;
1261         struct lock_list *uninitialized_var(target_entry);
1262
1263         __bfs_backwards(this, (void *)&count, noop_count, &target_entry);
1264
1265         return count;
1266 }
1267
1268 unsigned long lockdep_count_backward_deps(struct lock_class *class)
1269 {
1270         unsigned long ret, flags;
1271         struct lock_list this;
1272
1273         this.parent = NULL;
1274         this.class = class;
1275
1276         local_irq_save(flags);
1277         arch_spin_lock(&lockdep_lock);
1278         ret = __lockdep_count_backward_deps(&this);
1279         arch_spin_unlock(&lockdep_lock);
1280         local_irq_restore(flags);
1281
1282         return ret;
1283 }
1284
1285 /*
1286  * Prove that the dependency graph starting at <entry> can not
1287  * lead to <target>. Print an error and return 0 if it does.
1288  */
1289 static noinline int
1290 check_noncircular(struct lock_list *root, struct lock_class *target,
1291                 struct lock_list **target_entry)
1292 {
1293         int result;
1294
1295         debug_atomic_inc(nr_cyclic_checks);
1296
1297         result = __bfs_forwards(root, target, class_equal, target_entry);
1298
1299         return result;
1300 }
1301
1302 #if defined(CONFIG_TRACE_IRQFLAGS) && defined(CONFIG_PROVE_LOCKING)
1303 /*
1304  * Forwards and backwards subgraph searching, for the purposes of
1305  * proving that two subgraphs can be connected by a new dependency
1306  * without creating any illegal irq-safe -> irq-unsafe lock dependency.
1307  */
1308
1309 static inline int usage_match(struct lock_list *entry, void *bit)
1310 {
1311         return entry->class->usage_mask & (1 << (enum lock_usage_bit)bit);
1312 }
1313
1314
1315
1316 /*
1317  * Find a node in the forwards-direction dependency sub-graph starting
1318  * at @root->class that matches @bit.
1319  *
1320  * Return 0 if such a node exists in the subgraph, and put that node
1321  * into *@target_entry.
1322  *
1323  * Return 1 otherwise and keep *@target_entry unchanged.
1324  * Return <0 on error.
1325  */
1326 static int
1327 find_usage_forwards(struct lock_list *root, enum lock_usage_bit bit,
1328                         struct lock_list **target_entry)
1329 {
1330         int result;
1331
1332         debug_atomic_inc(nr_find_usage_forwards_checks);
1333
1334         result = __bfs_forwards(root, (void *)bit, usage_match, target_entry);
1335
1336         return result;
1337 }
1338
1339 /*
1340  * Find a node in the backwards-direction dependency sub-graph starting
1341  * at @root->class that matches @bit.
1342  *
1343  * Return 0 if such a node exists in the subgraph, and put that node
1344  * into *@target_entry.
1345  *
1346  * Return 1 otherwise and keep *@target_entry unchanged.
1347  * Return <0 on error.
1348  */
1349 static int
1350 find_usage_backwards(struct lock_list *root, enum lock_usage_bit bit,
1351                         struct lock_list **target_entry)
1352 {
1353         int result;
1354
1355         debug_atomic_inc(nr_find_usage_backwards_checks);
1356
1357         result = __bfs_backwards(root, (void *)bit, usage_match, target_entry);
1358
1359         return result;
1360 }
1361
1362 static void print_lock_class_header(struct lock_class *class, int depth)
1363 {
1364         int bit;
1365
1366         printk("%*s->", depth, "");
1367         print_lock_name(class);
1368         printk(" ops: %lu", class->ops);
1369         printk(" {\n");
1370
1371         for (bit = 0; bit < LOCK_USAGE_STATES; bit++) {
1372                 if (class->usage_mask & (1 << bit)) {
1373                         int len = depth;
1374
1375                         len += printk("%*s   %s", depth, "", usage_str[bit]);
1376                         len += printk(" at:\n");
1377                         print_stack_trace(class->usage_traces + bit, len);
1378                 }
1379         }
1380         printk("%*s }\n", depth, "");
1381
1382         printk("%*s ... key      at: ",depth,"");
1383         print_ip_sym((unsigned long)class->key);
1384 }
1385
1386 /*
1387  * printk the shortest lock dependencies from @start to @end in reverse order:
1388  */
1389 static void __used
1390 print_shortest_lock_dependencies(struct lock_list *leaf,
1391                                 struct lock_list *root)
1392 {
1393         struct lock_list *entry = leaf;
1394         int depth;
1395
1396         /*compute depth from generated tree by BFS*/
1397         depth = get_lock_depth(leaf);
1398
1399         do {
1400                 print_lock_class_header(entry->class, depth);
1401                 printk("%*s ... acquired at:\n", depth, "");
1402                 print_stack_trace(&entry->trace, 2);
1403                 printk("\n");
1404
1405                 if (depth == 0 && (entry != root)) {
1406                         printk("lockdep:%s bad path found in chain graph\n", __func__);
1407                         break;
1408                 }
1409
1410                 entry = get_lock_parent(entry);
1411                 depth--;
1412         } while (entry && (depth >= 0));
1413
1414         return;
1415 }
1416
1417 static void
1418 print_irq_lock_scenario(struct lock_list *safe_entry,
1419                         struct lock_list *unsafe_entry,
1420                         struct lock_class *prev_class,
1421                         struct lock_class *next_class)
1422 {
1423         struct lock_class *safe_class = safe_entry->class;
1424         struct lock_class *unsafe_class = unsafe_entry->class;
1425         struct lock_class *middle_class = prev_class;
1426
1427         if (middle_class == safe_class)
1428                 middle_class = next_class;
1429
1430         /*
1431          * A direct locking problem where unsafe_class lock is taken
1432          * directly by safe_class lock, then all we need to show
1433          * is the deadlock scenario, as it is obvious that the
1434          * unsafe lock is taken under the safe lock.
1435          *
1436          * But if there is a chain instead, where the safe lock takes
1437          * an intermediate lock (middle_class) where this lock is
1438          * not the same as the safe lock, then the lock chain is
1439          * used to describe the problem. Otherwise we would need
1440          * to show a different CPU case for each link in the chain
1441          * from the safe_class lock to the unsafe_class lock.
1442          */
1443         if (middle_class != unsafe_class) {
1444                 printk("Chain exists of:\n  ");
1445                 __print_lock_name(safe_class);
1446                 printk(" --> ");
1447                 __print_lock_name(middle_class);
1448                 printk(" --> ");
1449                 __print_lock_name(unsafe_class);
1450                 printk("\n\n");
1451         }
1452
1453         printk(" Possible interrupt unsafe locking scenario:\n\n");
1454         printk("       CPU0                    CPU1\n");
1455         printk("       ----                    ----\n");
1456         printk("  lock(");
1457         __print_lock_name(unsafe_class);
1458         printk(");\n");
1459         printk("                               local_irq_disable();\n");
1460         printk("                               lock(");
1461         __print_lock_name(safe_class);
1462         printk(");\n");
1463         printk("                               lock(");
1464         __print_lock_name(middle_class);
1465         printk(");\n");
1466         printk("  <Interrupt>\n");
1467         printk("    lock(");
1468         __print_lock_name(safe_class);
1469         printk(");\n");
1470         printk("\n *** DEADLOCK ***\n\n");
1471 }
1472
1473 static int
1474 print_bad_irq_dependency(struct task_struct *curr,
1475                          struct lock_list *prev_root,
1476                          struct lock_list *next_root,
1477                          struct lock_list *backwards_entry,
1478                          struct lock_list *forwards_entry,
1479                          struct held_lock *prev,
1480                          struct held_lock *next,
1481                          enum lock_usage_bit bit1,
1482                          enum lock_usage_bit bit2,
1483                          const char *irqclass)
1484 {
1485         if (!debug_locks_off_graph_unlock() || debug_locks_silent)
1486                 return 0;
1487
1488         printk("\n");
1489         printk("======================================================\n");
1490         printk("[ INFO: %s-safe -> %s-unsafe lock order detected ]\n",
1491                 irqclass, irqclass);
1492         print_kernel_ident();
1493         printk("------------------------------------------------------\n");
1494         printk("%s/%d [HC%u[%lu]:SC%u[%lu]:HE%u:SE%u] is trying to acquire:\n",
1495                 curr->comm, task_pid_nr(curr),
1496                 curr->hardirq_context, hardirq_count() >> HARDIRQ_SHIFT,
1497                 curr->softirq_context, softirq_count() >> SOFTIRQ_SHIFT,
1498                 curr->hardirqs_enabled,
1499                 curr->softirqs_enabled);
1500         print_lock(next);
1501
1502         printk("\nand this task is already holding:\n");
1503         print_lock(prev);
1504         printk("which would create a new lock dependency:\n");
1505         print_lock_name(hlock_class(prev));
1506         printk(" ->");
1507         print_lock_name(hlock_class(next));
1508         printk("\n");
1509
1510         printk("\nbut this new dependency connects a %s-irq-safe lock:\n",
1511                 irqclass);
1512         print_lock_name(backwards_entry->class);
1513         printk("\n... which became %s-irq-safe at:\n", irqclass);
1514
1515         print_stack_trace(backwards_entry->class->usage_traces + bit1, 1);
1516
1517         printk("\nto a %s-irq-unsafe lock:\n", irqclass);
1518         print_lock_name(forwards_entry->class);
1519         printk("\n... which became %s-irq-unsafe at:\n", irqclass);
1520         printk("...");
1521
1522         print_stack_trace(forwards_entry->class->usage_traces + bit2, 1);
1523
1524         printk("\nother info that might help us debug this:\n\n");
1525         print_irq_lock_scenario(backwards_entry, forwards_entry,
1526                                 hlock_class(prev), hlock_class(next));
1527
1528         lockdep_print_held_locks(curr);
1529
1530         printk("\nthe dependencies between %s-irq-safe lock", irqclass);
1531         printk(" and the holding lock:\n");
1532         if (!save_trace(&prev_root->trace))
1533                 return 0;
1534         print_shortest_lock_dependencies(backwards_entry, prev_root);
1535
1536         printk("\nthe dependencies between the lock to be acquired");
1537         printk(" and %s-irq-unsafe lock:\n", irqclass);
1538         if (!save_trace(&next_root->trace))
1539                 return 0;
1540         print_shortest_lock_dependencies(forwards_entry, next_root);
1541
1542         printk("\nstack backtrace:\n");
1543         dump_stack();
1544
1545         return 0;
1546 }
1547
1548 static int
1549 check_usage(struct task_struct *curr, struct held_lock *prev,
1550             struct held_lock *next, enum lock_usage_bit bit_backwards,
1551             enum lock_usage_bit bit_forwards, const char *irqclass)
1552 {
1553         int ret;
1554         struct lock_list this, that;
1555         struct lock_list *uninitialized_var(target_entry);
1556         struct lock_list *uninitialized_var(target_entry1);
1557
1558         this.parent = NULL;
1559
1560         this.class = hlock_class(prev);
1561         ret = find_usage_backwards(&this, bit_backwards, &target_entry);
1562         if (ret < 0)
1563                 return print_bfs_bug(ret);
1564         if (ret == 1)
1565                 return ret;
1566
1567         that.parent = NULL;
1568         that.class = hlock_class(next);
1569         ret = find_usage_forwards(&that, bit_forwards, &target_entry1);
1570         if (ret < 0)
1571                 return print_bfs_bug(ret);
1572         if (ret == 1)
1573                 return ret;
1574
1575         return print_bad_irq_dependency(curr, &this, &that,
1576                         target_entry, target_entry1,
1577                         prev, next,
1578                         bit_backwards, bit_forwards, irqclass);
1579 }
1580
1581 static const char *state_names[] = {
1582 #define LOCKDEP_STATE(__STATE) \
1583         __stringify(__STATE),
1584 #include "lockdep_states.h"
1585 #undef LOCKDEP_STATE
1586 };
1587
1588 static const char *state_rnames[] = {
1589 #define LOCKDEP_STATE(__STATE) \
1590         __stringify(__STATE)"-READ",
1591 #include "lockdep_states.h"
1592 #undef LOCKDEP_STATE
1593 };
1594
1595 static inline const char *state_name(enum lock_usage_bit bit)
1596 {
1597         return (bit & 1) ? state_rnames[bit >> 2] : state_names[bit >> 2];
1598 }
1599
1600 static int exclusive_bit(int new_bit)
1601 {
1602         /*
1603          * USED_IN
1604          * USED_IN_READ
1605          * ENABLED
1606          * ENABLED_READ
1607          *
1608          * bit 0 - write/read
1609          * bit 1 - used_in/enabled
1610          * bit 2+  state
1611          */
1612
1613         int state = new_bit & ~3;
1614         int dir = new_bit & 2;
1615
1616         /*
1617          * keep state, bit flip the direction and strip read.
1618          */
1619         return state | (dir ^ 2);
1620 }
1621
1622 static int check_irq_usage(struct task_struct *curr, struct held_lock *prev,
1623                            struct held_lock *next, enum lock_usage_bit bit)
1624 {
1625         /*
1626          * Prove that the new dependency does not connect a hardirq-safe
1627          * lock with a hardirq-unsafe lock - to achieve this we search
1628          * the backwards-subgraph starting at <prev>, and the
1629          * forwards-subgraph starting at <next>:
1630          */
1631         if (!check_usage(curr, prev, next, bit,
1632                            exclusive_bit(bit), state_name(bit)))
1633                 return 0;
1634
1635         bit++; /* _READ */
1636
1637         /*
1638          * Prove that the new dependency does not connect a hardirq-safe-read
1639          * lock with a hardirq-unsafe lock - to achieve this we search
1640          * the backwards-subgraph starting at <prev>, and the
1641          * forwards-subgraph starting at <next>:
1642          */
1643         if (!check_usage(curr, prev, next, bit,
1644                            exclusive_bit(bit), state_name(bit)))
1645                 return 0;
1646
1647         return 1;
1648 }
1649
1650 static int
1651 check_prev_add_irq(struct task_struct *curr, struct held_lock *prev,
1652                 struct held_lock *next)
1653 {
1654 #define LOCKDEP_STATE(__STATE)                                          \
1655         if (!check_irq_usage(curr, prev, next, LOCK_USED_IN_##__STATE)) \
1656                 return 0;
1657 #include "lockdep_states.h"
1658 #undef LOCKDEP_STATE
1659
1660         return 1;
1661 }
1662
1663 static void inc_chains(void)
1664 {
1665         if (current->hardirq_context)
1666                 nr_hardirq_chains++;
1667         else {
1668                 if (current->softirq_context)
1669                         nr_softirq_chains++;
1670                 else
1671                         nr_process_chains++;
1672         }
1673 }
1674
1675 #else
1676
1677 static inline int
1678 check_prev_add_irq(struct task_struct *curr, struct held_lock *prev,
1679                 struct held_lock *next)
1680 {
1681         return 1;
1682 }
1683
1684 static inline void inc_chains(void)
1685 {
1686         nr_process_chains++;
1687 }
1688
1689 #endif
1690
1691 static void
1692 print_deadlock_scenario(struct held_lock *nxt,
1693                              struct held_lock *prv)
1694 {
1695         struct lock_class *next = hlock_class(nxt);
1696         struct lock_class *prev = hlock_class(prv);
1697
1698         printk(" Possible unsafe locking scenario:\n\n");
1699         printk("       CPU0\n");
1700         printk("       ----\n");
1701         printk("  lock(");
1702         __print_lock_name(prev);
1703         printk(");\n");
1704         printk("  lock(");
1705         __print_lock_name(next);
1706         printk(");\n");
1707         printk("\n *** DEADLOCK ***\n\n");
1708         printk(" May be due to missing lock nesting notation\n\n");
1709 }
1710
1711 static int
1712 print_deadlock_bug(struct task_struct *curr, struct held_lock *prev,
1713                    struct held_lock *next)
1714 {
1715         if (!debug_locks_off_graph_unlock() || debug_locks_silent)
1716                 return 0;
1717
1718         printk("\n");
1719         printk("=============================================\n");
1720         printk("[ INFO: possible recursive locking detected ]\n");
1721         print_kernel_ident();
1722         printk("---------------------------------------------\n");
1723         printk("%s/%d is trying to acquire lock:\n",
1724                 curr->comm, task_pid_nr(curr));
1725         print_lock(next);
1726         printk("\nbut task is already holding lock:\n");
1727         print_lock(prev);
1728
1729         printk("\nother info that might help us debug this:\n");
1730         print_deadlock_scenario(next, prev);
1731         lockdep_print_held_locks(curr);
1732
1733         printk("\nstack backtrace:\n");
1734         dump_stack();
1735
1736         return 0;
1737 }
1738
1739 /*
1740  * Check whether we are holding such a class already.
1741  *
1742  * (Note that this has to be done separately, because the graph cannot
1743  * detect such classes of deadlocks.)
1744  *
1745  * Returns: 0 on deadlock detected, 1 on OK, 2 on recursive read
1746  */
1747 static int
1748 check_deadlock(struct task_struct *curr, struct held_lock *next,
1749                struct lockdep_map *next_instance, int read)
1750 {
1751         struct held_lock *prev;
1752         struct held_lock *nest = NULL;
1753         int i;
1754
1755         for (i = 0; i < curr->lockdep_depth; i++) {
1756                 prev = curr->held_locks + i;
1757
1758                 if (prev->instance == next->nest_lock)
1759                         nest = prev;
1760
1761                 if (hlock_class(prev) != hlock_class(next))
1762                         continue;
1763
1764                 /*
1765                  * Allow read-after-read recursion of the same
1766                  * lock class (i.e. read_lock(lock)+read_lock(lock)):
1767                  */
1768                 if ((read == 2) && prev->read)
1769                         return 2;
1770
1771                 /*
1772                  * We're holding the nest_lock, which serializes this lock's
1773                  * nesting behaviour.
1774                  */
1775                 if (nest)
1776                         return 2;
1777
1778                 return print_deadlock_bug(curr, prev, next);
1779         }
1780         return 1;
1781 }
1782
1783 /*
1784  * There was a chain-cache miss, and we are about to add a new dependency
1785  * to a previous lock. We recursively validate the following rules:
1786  *
1787  *  - would the adding of the <prev> -> <next> dependency create a
1788  *    circular dependency in the graph? [== circular deadlock]
1789  *
1790  *  - does the new prev->next dependency connect any hardirq-safe lock
1791  *    (in the full backwards-subgraph starting at <prev>) with any
1792  *    hardirq-unsafe lock (in the full forwards-subgraph starting at
1793  *    <next>)? [== illegal lock inversion with hardirq contexts]
1794  *
1795  *  - does the new prev->next dependency connect any softirq-safe lock
1796  *    (in the full backwards-subgraph starting at <prev>) with any
1797  *    softirq-unsafe lock (in the full forwards-subgraph starting at
1798  *    <next>)? [== illegal lock inversion with softirq contexts]
1799  *
1800  * any of these scenarios could lead to a deadlock.
1801  *
1802  * Then if all the validations pass, we add the forwards and backwards
1803  * dependency.
1804  */
1805 static int
1806 check_prev_add(struct task_struct *curr, struct held_lock *prev,
1807                struct held_lock *next, int distance, int trylock_loop)
1808 {
1809         struct lock_list *entry;
1810         int ret;
1811         struct lock_list this;
1812         struct lock_list *uninitialized_var(target_entry);
1813         /*
1814          * Static variable, serialized by the graph_lock().
1815          *
1816          * We use this static variable to save the stack trace in case
1817          * we call into this function multiple times due to encountering
1818          * trylocks in the held lock stack.
1819          */
1820         static struct stack_trace trace;
1821
1822         /*
1823          * Prove that the new <prev> -> <next> dependency would not
1824          * create a circular dependency in the graph. (We do this by
1825          * forward-recursing into the graph starting at <next>, and
1826          * checking whether we can reach <prev>.)
1827          *
1828          * We are using global variables to control the recursion, to
1829          * keep the stackframe size of the recursive functions low:
1830          */
1831         this.class = hlock_class(next);
1832         this.parent = NULL;
1833         ret = check_noncircular(&this, hlock_class(prev), &target_entry);
1834         if (unlikely(!ret))
1835                 return print_circular_bug(&this, target_entry, next, prev);
1836         else if (unlikely(ret < 0))
1837                 return print_bfs_bug(ret);
1838
1839         if (!check_prev_add_irq(curr, prev, next))
1840                 return 0;
1841
1842         /*
1843          * For recursive read-locks we do all the dependency checks,
1844          * but we dont store read-triggered dependencies (only
1845          * write-triggered dependencies). This ensures that only the
1846          * write-side dependencies matter, and that if for example a
1847          * write-lock never takes any other locks, then the reads are
1848          * equivalent to a NOP.
1849          */
1850         if (next->read == 2 || prev->read == 2)
1851                 return 1;
1852         /*
1853          * Is the <prev> -> <next> dependency already present?
1854          *
1855          * (this may occur even though this is a new chain: consider
1856          *  e.g. the L1 -> L2 -> L3 -> L4 and the L5 -> L1 -> L2 -> L3
1857          *  chains - the second one will be new, but L1 already has
1858          *  L2 added to its dependency list, due to the first chain.)
1859          */
1860         list_for_each_entry(entry, &hlock_class(prev)->locks_after, entry) {
1861                 if (entry->class == hlock_class(next)) {
1862                         if (distance == 1)
1863                                 entry->distance = 1;
1864                         return 2;
1865                 }
1866         }
1867
1868         if (!trylock_loop && !save_trace(&trace))
1869                 return 0;
1870
1871         /*
1872          * Ok, all validations passed, add the new lock
1873          * to the previous lock's dependency list:
1874          */
1875         ret = add_lock_to_list(hlock_class(prev), hlock_class(next),
1876                                &hlock_class(prev)->locks_after,
1877                                next->acquire_ip, distance, &trace);
1878
1879         if (!ret)
1880                 return 0;
1881
1882         ret = add_lock_to_list(hlock_class(next), hlock_class(prev),
1883                                &hlock_class(next)->locks_before,
1884                                next->acquire_ip, distance, &trace);
1885         if (!ret)
1886                 return 0;
1887
1888         /*
1889          * Debugging printouts:
1890          */
1891         if (verbose(hlock_class(prev)) || verbose(hlock_class(next))) {
1892                 graph_unlock();
1893                 printk("\n new dependency: ");
1894                 print_lock_name(hlock_class(prev));
1895                 printk(" => ");
1896                 print_lock_name(hlock_class(next));
1897                 printk("\n");
1898                 dump_stack();
1899                 return graph_lock();
1900         }
1901         return 1;
1902 }
1903
1904 /*
1905  * Add the dependency to all directly-previous locks that are 'relevant'.
1906  * The ones that are relevant are (in increasing distance from curr):
1907  * all consecutive trylock entries and the final non-trylock entry - or
1908  * the end of this context's lock-chain - whichever comes first.
1909  */
1910 static int
1911 check_prevs_add(struct task_struct *curr, struct held_lock *next)
1912 {
1913         int depth = curr->lockdep_depth;
1914         int trylock_loop = 0;
1915         struct held_lock *hlock;
1916
1917         /*
1918          * Debugging checks.
1919          *
1920          * Depth must not be zero for a non-head lock:
1921          */
1922         if (!depth)
1923                 goto out_bug;
1924         /*
1925          * At least two relevant locks must exist for this
1926          * to be a head:
1927          */
1928         if (curr->held_locks[depth].irq_context !=
1929                         curr->held_locks[depth-1].irq_context)
1930                 goto out_bug;
1931
1932         for (;;) {
1933                 int distance = curr->lockdep_depth - depth + 1;
1934                 hlock = curr->held_locks + depth-1;
1935                 /*
1936                  * Only non-recursive-read entries get new dependencies
1937                  * added:
1938                  */
1939                 if (hlock->read != 2) {
1940                         if (!check_prev_add(curr, hlock, next,
1941                                                 distance, trylock_loop))
1942                                 return 0;
1943                         /*
1944                          * Stop after the first non-trylock entry,
1945                          * as non-trylock entries have added their
1946                          * own direct dependencies already, so this
1947                          * lock is connected to them indirectly:
1948                          */
1949                         if (!hlock->trylock)
1950                                 break;
1951                 }
1952                 depth--;
1953                 /*
1954                  * End of lock-stack?
1955                  */
1956                 if (!depth)
1957                         break;
1958                 /*
1959                  * Stop the search if we cross into another context:
1960                  */
1961                 if (curr->held_locks[depth].irq_context !=
1962                                 curr->held_locks[depth-1].irq_context)
1963                         break;
1964                 trylock_loop = 1;
1965         }
1966         return 1;
1967 out_bug:
1968         if (!debug_locks_off_graph_unlock())
1969                 return 0;
1970
1971         /*
1972          * Clearly we all shouldn't be here, but since we made it we
1973          * can reliable say we messed up our state. See the above two
1974          * gotos for reasons why we could possibly end up here.
1975          */
1976         WARN_ON(1);
1977
1978         return 0;
1979 }
1980
1981 unsigned long nr_lock_chains;
1982 struct lock_chain lock_chains[MAX_LOCKDEP_CHAINS];
1983 int nr_chain_hlocks;
1984 static u16 chain_hlocks[MAX_LOCKDEP_CHAIN_HLOCKS];
1985
1986 struct lock_class *lock_chain_get_class(struct lock_chain *chain, int i)
1987 {
1988         return lock_classes + chain_hlocks[chain->base + i];
1989 }
1990
1991 /*
1992  * Look up a dependency chain. If the key is not present yet then
1993  * add it and return 1 - in this case the new dependency chain is
1994  * validated. If the key is already hashed, return 0.
1995  * (On return with 1 graph_lock is held.)
1996  */
1997 static inline int lookup_chain_cache(struct task_struct *curr,
1998                                      struct held_lock *hlock,
1999                                      u64 chain_key)
2000 {
2001         struct lock_class *class = hlock_class(hlock);
2002         struct list_head *hash_head = chainhashentry(chain_key);
2003         struct lock_chain *chain;
2004         struct held_lock *hlock_curr, *hlock_next;
2005         int i, j;
2006
2007         /*
2008          * We might need to take the graph lock, ensure we've got IRQs
2009          * disabled to make this an IRQ-safe lock.. for recursion reasons
2010          * lockdep won't complain about its own locking errors.
2011          */
2012         if (DEBUG_LOCKS_WARN_ON(!irqs_disabled()))
2013                 return 0;
2014         /*
2015          * We can walk it lock-free, because entries only get added
2016          * to the hash:
2017          */
2018         list_for_each_entry(chain, hash_head, entry) {
2019                 if (chain->chain_key == chain_key) {
2020 cache_hit:
2021                         debug_atomic_inc(chain_lookup_hits);
2022                         if (very_verbose(class))
2023                                 printk("\nhash chain already cached, key: "
2024                                         "%016Lx tail class: [%p] %s\n",
2025                                         (unsigned long long)chain_key,
2026                                         class->key, class->name);
2027                         return 0;
2028                 }
2029         }
2030         if (very_verbose(class))
2031                 printk("\nnew hash chain, key: %016Lx tail class: [%p] %s\n",
2032                         (unsigned long long)chain_key, class->key, class->name);
2033         /*
2034          * Allocate a new chain entry from the static array, and add
2035          * it to the hash:
2036          */
2037         if (!graph_lock())
2038                 return 0;
2039         /*
2040          * We have to walk the chain again locked - to avoid duplicates:
2041          */
2042         list_for_each_entry(chain, hash_head, entry) {
2043                 if (chain->chain_key == chain_key) {
2044                         graph_unlock();
2045                         goto cache_hit;
2046                 }
2047         }
2048         if (unlikely(nr_lock_chains >= MAX_LOCKDEP_CHAINS)) {
2049                 if (!debug_locks_off_graph_unlock())
2050                         return 0;
2051
2052                 printk("BUG: MAX_LOCKDEP_CHAINS too low!\n");
2053                 printk("turning off the locking correctness validator.\n");
2054                 dump_stack();
2055                 return 0;
2056         }
2057         chain = lock_chains + nr_lock_chains++;
2058         chain->chain_key = chain_key;
2059         chain->irq_context = hlock->irq_context;
2060         /* Find the first held_lock of current chain */
2061         hlock_next = hlock;
2062         for (i = curr->lockdep_depth - 1; i >= 0; i--) {
2063                 hlock_curr = curr->held_locks + i;
2064                 if (hlock_curr->irq_context != hlock_next->irq_context)
2065                         break;
2066                 hlock_next = hlock;
2067         }
2068         i++;
2069         chain->depth = curr->lockdep_depth + 1 - i;
2070         if (likely(nr_chain_hlocks + chain->depth <= MAX_LOCKDEP_CHAIN_HLOCKS)) {
2071                 chain->base = nr_chain_hlocks;
2072                 nr_chain_hlocks += chain->depth;
2073                 for (j = 0; j < chain->depth - 1; j++, i++) {
2074                         int lock_id = curr->held_locks[i].class_idx - 1;
2075                         chain_hlocks[chain->base + j] = lock_id;
2076                 }
2077                 chain_hlocks[chain->base + j] = class - lock_classes;
2078         }
2079         list_add_tail_rcu(&chain->entry, hash_head);
2080         debug_atomic_inc(chain_lookup_misses);
2081         inc_chains();
2082
2083         return 1;
2084 }
2085
2086 static int validate_chain(struct task_struct *curr, struct lockdep_map *lock,
2087                 struct held_lock *hlock, int chain_head, u64 chain_key)
2088 {
2089         /*
2090          * Trylock needs to maintain the stack of held locks, but it
2091          * does not add new dependencies, because trylock can be done
2092          * in any order.
2093          *
2094          * We look up the chain_key and do the O(N^2) check and update of
2095          * the dependencies only if this is a new dependency chain.
2096          * (If lookup_chain_cache() returns with 1 it acquires
2097          * graph_lock for us)
2098          */
2099         if (!hlock->trylock && (hlock->check == 2) &&
2100             lookup_chain_cache(curr, hlock, chain_key)) {
2101                 /*
2102                  * Check whether last held lock:
2103                  *
2104                  * - is irq-safe, if this lock is irq-unsafe
2105                  * - is softirq-safe, if this lock is hardirq-unsafe
2106                  *
2107                  * And check whether the new lock's dependency graph
2108                  * could lead back to the previous lock.
2109                  *
2110                  * any of these scenarios could lead to a deadlock. If
2111                  * All validations
2112                  */
2113                 int ret = check_deadlock(curr, hlock, lock, hlock->read);
2114
2115                 if (!ret)
2116                         return 0;
2117                 /*
2118                  * Mark recursive read, as we jump over it when
2119                  * building dependencies (just like we jump over
2120                  * trylock entries):
2121                  */
2122                 if (ret == 2)
2123                         hlock->read = 2;
2124                 /*
2125                  * Add dependency only if this lock is not the head
2126                  * of the chain, and if it's not a secondary read-lock:
2127                  */
2128                 if (!chain_head && ret != 2)
2129                         if (!check_prevs_add(curr, hlock))
2130                                 return 0;
2131                 graph_unlock();
2132         } else
2133                 /* after lookup_chain_cache(): */
2134                 if (unlikely(!debug_locks))
2135                         return 0;
2136
2137         return 1;
2138 }
2139 #else
2140 static inline int validate_chain(struct task_struct *curr,
2141                 struct lockdep_map *lock, struct held_lock *hlock,
2142                 int chain_head, u64 chain_key)
2143 {
2144         return 1;
2145 }
2146 #endif
2147
2148 /*
2149  * We are building curr_chain_key incrementally, so double-check
2150  * it from scratch, to make sure that it's done correctly:
2151  */
2152 static void check_chain_key(struct task_struct *curr)
2153 {
2154 #ifdef CONFIG_DEBUG_LOCKDEP
2155         struct held_lock *hlock, *prev_hlock = NULL;
2156         unsigned int i, id;
2157         u64 chain_key = 0;
2158
2159         for (i = 0; i < curr->lockdep_depth; i++) {
2160                 hlock = curr->held_locks + i;
2161                 if (chain_key != hlock->prev_chain_key) {
2162                         debug_locks_off();
2163                         /*
2164                          * We got mighty confused, our chain keys don't match
2165                          * with what we expect, someone trample on our task state?
2166                          */
2167                         WARN(1, "hm#1, depth: %u [%u], %016Lx != %016Lx\n",
2168                                 curr->lockdep_depth, i,
2169                                 (unsigned long long)chain_key,
2170                                 (unsigned long long)hlock->prev_chain_key);
2171                         return;
2172                 }
2173                 id = hlock->class_idx - 1;
2174                 /*
2175                  * Whoops ran out of static storage again?
2176                  */
2177                 if (DEBUG_LOCKS_WARN_ON(id >= MAX_LOCKDEP_KEYS))
2178                         return;
2179
2180                 if (prev_hlock && (prev_hlock->irq_context !=
2181                                                         hlock->irq_context))
2182                         chain_key = 0;
2183                 chain_key = iterate_chain_key(chain_key, id);
2184                 prev_hlock = hlock;
2185         }
2186         if (chain_key != curr->curr_chain_key) {
2187                 debug_locks_off();
2188                 /*
2189                  * More smoking hash instead of calculating it, damn see these
2190                  * numbers float.. I bet that a pink elephant stepped on my memory.
2191                  */
2192                 WARN(1, "hm#2, depth: %u [%u], %016Lx != %016Lx\n",
2193                         curr->lockdep_depth, i,
2194                         (unsigned long long)chain_key,
2195                         (unsigned long long)curr->curr_chain_key);
2196         }
2197 #endif
2198 }
2199
2200 static void
2201 print_usage_bug_scenario(struct held_lock *lock)
2202 {
2203         struct lock_class *class = hlock_class(lock);
2204
2205         printk(" Possible unsafe locking scenario:\n\n");
2206         printk("       CPU0\n");
2207         printk("       ----\n");
2208         printk("  lock(");
2209         __print_lock_name(class);
2210         printk(");\n");
2211         printk("  <Interrupt>\n");
2212         printk("    lock(");
2213         __print_lock_name(class);
2214         printk(");\n");
2215         printk("\n *** DEADLOCK ***\n\n");
2216 }
2217
2218 static int
2219 print_usage_bug(struct task_struct *curr, struct held_lock *this,
2220                 enum lock_usage_bit prev_bit, enum lock_usage_bit new_bit)
2221 {
2222         if (!debug_locks_off_graph_unlock() || debug_locks_silent)
2223                 return 0;
2224
2225         printk("\n");
2226         printk("=================================\n");
2227         printk("[ INFO: inconsistent lock state ]\n");
2228         print_kernel_ident();
2229         printk("---------------------------------\n");
2230
2231         printk("inconsistent {%s} -> {%s} usage.\n",
2232                 usage_str[prev_bit], usage_str[new_bit]);
2233
2234         printk("%s/%d [HC%u[%lu]:SC%u[%lu]:HE%u:SE%u] takes:\n",
2235                 curr->comm, task_pid_nr(curr),
2236                 trace_hardirq_context(curr), hardirq_count() >> HARDIRQ_SHIFT,
2237                 trace_softirq_context(curr), softirq_count() >> SOFTIRQ_SHIFT,
2238                 trace_hardirqs_enabled(curr),
2239                 trace_softirqs_enabled(curr));
2240         print_lock(this);
2241
2242         printk("{%s} state was registered at:\n", usage_str[prev_bit]);
2243         print_stack_trace(hlock_class(this)->usage_traces + prev_bit, 1);
2244
2245         print_irqtrace_events(curr);
2246         printk("\nother info that might help us debug this:\n");
2247         print_usage_bug_scenario(this);
2248
2249         lockdep_print_held_locks(curr);
2250
2251         printk("\nstack backtrace:\n");
2252         dump_stack();
2253
2254         return 0;
2255 }
2256
2257 /*
2258  * Print out an error if an invalid bit is set:
2259  */
2260 static inline int
2261 valid_state(struct task_struct *curr, struct held_lock *this,
2262             enum lock_usage_bit new_bit, enum lock_usage_bit bad_bit)
2263 {
2264         if (unlikely(hlock_class(this)->usage_mask & (1 << bad_bit)))
2265                 return print_usage_bug(curr, this, bad_bit, new_bit);
2266         return 1;
2267 }
2268
2269 static int mark_lock(struct task_struct *curr, struct held_lock *this,
2270                      enum lock_usage_bit new_bit);
2271
2272 #if defined(CONFIG_TRACE_IRQFLAGS) && defined(CONFIG_PROVE_LOCKING)
2273
2274 /*
2275  * print irq inversion bug:
2276  */
2277 static int
2278 print_irq_inversion_bug(struct task_struct *curr,
2279                         struct lock_list *root, struct lock_list *other,
2280                         struct held_lock *this, int forwards,
2281                         const char *irqclass)
2282 {
2283         struct lock_list *entry = other;
2284         struct lock_list *middle = NULL;
2285         int depth;
2286
2287         if (!debug_locks_off_graph_unlock() || debug_locks_silent)
2288                 return 0;
2289
2290         printk("\n");
2291         printk("=========================================================\n");
2292         printk("[ INFO: possible irq lock inversion dependency detected ]\n");
2293         print_kernel_ident();
2294         printk("---------------------------------------------------------\n");
2295         printk("%s/%d just changed the state of lock:\n",
2296                 curr->comm, task_pid_nr(curr));
2297         print_lock(this);
2298         if (forwards)
2299                 printk("but this lock took another, %s-unsafe lock in the past:\n", irqclass);
2300         else
2301                 printk("but this lock was taken by another, %s-safe lock in the past:\n", irqclass);
2302         print_lock_name(other->class);
2303         printk("\n\nand interrupts could create inverse lock ordering between them.\n\n");
2304
2305         printk("\nother info that might help us debug this:\n");
2306
2307         /* Find a middle lock (if one exists) */
2308         depth = get_lock_depth(other);
2309         do {
2310                 if (depth == 0 && (entry != root)) {
2311                         printk("lockdep:%s bad path found in chain graph\n", __func__);
2312                         break;
2313                 }
2314                 middle = entry;
2315                 entry = get_lock_parent(entry);
2316                 depth--;
2317         } while (entry && entry != root && (depth >= 0));
2318         if (forwards)
2319                 print_irq_lock_scenario(root, other,
2320                         middle ? middle->class : root->class, other->class);
2321         else
2322                 print_irq_lock_scenario(other, root,
2323                         middle ? middle->class : other->class, root->class);
2324
2325         lockdep_print_held_locks(curr);
2326
2327         printk("\nthe shortest dependencies between 2nd lock and 1st lock:\n");
2328         if (!save_trace(&root->trace))
2329                 return 0;
2330         print_shortest_lock_dependencies(other, root);
2331
2332         printk("\nstack backtrace:\n");
2333         dump_stack();
2334
2335         return 0;
2336 }
2337
2338 /*
2339  * Prove that in the forwards-direction subgraph starting at <this>
2340  * there is no lock matching <mask>:
2341  */
2342 static int
2343 check_usage_forwards(struct task_struct *curr, struct held_lock *this,
2344                      enum lock_usage_bit bit, const char *irqclass)
2345 {
2346         int ret;
2347         struct lock_list root;
2348         struct lock_list *uninitialized_var(target_entry);
2349
2350         root.parent = NULL;
2351         root.class = hlock_class(this);
2352         ret = find_usage_forwards(&root, bit, &target_entry);
2353         if (ret < 0)
2354                 return print_bfs_bug(ret);
2355         if (ret == 1)
2356                 return ret;
2357
2358         return print_irq_inversion_bug(curr, &root, target_entry,
2359                                         this, 1, irqclass);
2360 }
2361
2362 /*
2363  * Prove that in the backwards-direction subgraph starting at <this>
2364  * there is no lock matching <mask>:
2365  */
2366 static int
2367 check_usage_backwards(struct task_struct *curr, struct held_lock *this,
2368                       enum lock_usage_bit bit, const char *irqclass)
2369 {
2370         int ret;
2371         struct lock_list root;
2372         struct lock_list *uninitialized_var(target_entry);
2373
2374         root.parent = NULL;
2375         root.class = hlock_class(this);
2376         ret = find_usage_backwards(&root, bit, &target_entry);
2377         if (ret < 0)
2378                 return print_bfs_bug(ret);
2379         if (ret == 1)
2380                 return ret;
2381
2382         return print_irq_inversion_bug(curr, &root, target_entry,
2383                                         this, 0, irqclass);
2384 }
2385
2386 void print_irqtrace_events(struct task_struct *curr)
2387 {
2388         printk("irq event stamp: %u\n", curr->irq_events);
2389         printk("hardirqs last  enabled at (%u): ", curr->hardirq_enable_event);
2390         print_ip_sym(curr->hardirq_enable_ip);
2391         printk("hardirqs last disabled at (%u): ", curr->hardirq_disable_event);
2392         print_ip_sym(curr->hardirq_disable_ip);
2393         printk("softirqs last  enabled at (%u): ", curr->softirq_enable_event);
2394         print_ip_sym(curr->softirq_enable_ip);
2395         printk("softirqs last disabled at (%u): ", curr->softirq_disable_event);
2396         print_ip_sym(curr->softirq_disable_ip);
2397 }
2398
2399 static int HARDIRQ_verbose(struct lock_class *class)
2400 {
2401 #if HARDIRQ_VERBOSE
2402         return class_filter(class);
2403 #endif
2404         return 0;
2405 }
2406
2407 static int SOFTIRQ_verbose(struct lock_class *class)
2408 {
2409 #if SOFTIRQ_VERBOSE
2410         return class_filter(class);
2411 #endif
2412         return 0;
2413 }
2414
2415 static int RECLAIM_FS_verbose(struct lock_class *class)
2416 {
2417 #if RECLAIM_VERBOSE
2418         return class_filter(class);
2419 #endif
2420         return 0;
2421 }
2422
2423 #define STRICT_READ_CHECKS      1
2424
2425 static int (*state_verbose_f[])(struct lock_class *class) = {
2426 #define LOCKDEP_STATE(__STATE) \
2427         __STATE##_verbose,
2428 #include "lockdep_states.h"
2429 #undef LOCKDEP_STATE
2430 };
2431
2432 static inline int state_verbose(enum lock_usage_bit bit,
2433                                 struct lock_class *class)
2434 {
2435         return state_verbose_f[bit >> 2](class);
2436 }
2437
2438 typedef int (*check_usage_f)(struct task_struct *, struct held_lock *,
2439                              enum lock_usage_bit bit, const char *name);
2440
2441 static int
2442 mark_lock_irq(struct task_struct *curr, struct held_lock *this,
2443                 enum lock_usage_bit new_bit)
2444 {
2445         int excl_bit = exclusive_bit(new_bit);
2446         int read = new_bit & 1;
2447         int dir = new_bit & 2;
2448
2449         /*
2450          * mark USED_IN has to look forwards -- to ensure no dependency
2451          * has ENABLED state, which would allow recursion deadlocks.
2452          *
2453          * mark ENABLED has to look backwards -- to ensure no dependee
2454          * has USED_IN state, which, again, would allow  recursion deadlocks.
2455          */
2456         check_usage_f usage = dir ?
2457                 check_usage_backwards : check_usage_forwards;
2458
2459         /*
2460          * Validate that this particular lock does not have conflicting
2461          * usage states.
2462          */
2463         if (!valid_state(curr, this, new_bit, excl_bit))
2464                 return 0;
2465
2466         /*
2467          * Validate that the lock dependencies don't have conflicting usage
2468          * states.
2469          */
2470         if ((!read || !dir || STRICT_READ_CHECKS) &&
2471                         !usage(curr, this, excl_bit, state_name(new_bit & ~1)))
2472                 return 0;
2473
2474         /*
2475          * Check for read in write conflicts
2476          */
2477         if (!read) {
2478                 if (!valid_state(curr, this, new_bit, excl_bit + 1))
2479                         return 0;
2480
2481                 if (STRICT_READ_CHECKS &&
2482                         !usage(curr, this, excl_bit + 1,
2483                                 state_name(new_bit + 1)))
2484                         return 0;
2485         }
2486
2487         if (state_verbose(new_bit, hlock_class(this)))
2488                 return 2;
2489
2490         return 1;
2491 }
2492
2493 enum mark_type {
2494 #define LOCKDEP_STATE(__STATE)  __STATE,
2495 #include "lockdep_states.h"
2496 #undef LOCKDEP_STATE
2497 };
2498
2499 /*
2500  * Mark all held locks with a usage bit:
2501  */
2502 static int
2503 mark_held_locks(struct task_struct *curr, enum mark_type mark)
2504 {
2505         enum lock_usage_bit usage_bit;
2506         struct held_lock *hlock;
2507         int i;
2508
2509         for (i = 0; i < curr->lockdep_depth; i++) {
2510                 hlock = curr->held_locks + i;
2511
2512                 usage_bit = 2 + (mark << 2); /* ENABLED */
2513                 if (hlock->read)
2514                         usage_bit += 1; /* READ */
2515
2516                 BUG_ON(usage_bit >= LOCK_USAGE_STATES);
2517
2518                 if (hlock_class(hlock)->key == __lockdep_no_validate__.subkeys)
2519                         continue;
2520
2521                 if (!mark_lock(curr, hlock, usage_bit))
2522                         return 0;
2523         }
2524
2525         return 1;
2526 }
2527
2528 /*
2529  * Hardirqs will be enabled:
2530  */
2531 static void __trace_hardirqs_on_caller(unsigned long ip)
2532 {
2533         struct task_struct *curr = current;
2534
2535         /* we'll do an OFF -> ON transition: */
2536         curr->hardirqs_enabled = 1;
2537
2538         /*
2539          * We are going to turn hardirqs on, so set the
2540          * usage bit for all held locks:
2541          */
2542         if (!mark_held_locks(curr, HARDIRQ))
2543                 return;
2544         /*
2545          * If we have softirqs enabled, then set the usage
2546          * bit for all held locks. (disabled hardirqs prevented
2547          * this bit from being set before)
2548          */
2549         if (curr->softirqs_enabled)
2550                 if (!mark_held_locks(curr, SOFTIRQ))
2551                         return;
2552
2553         curr->hardirq_enable_ip = ip;
2554         curr->hardirq_enable_event = ++curr->irq_events;
2555         debug_atomic_inc(hardirqs_on_events);
2556 }
2557
2558 void trace_hardirqs_on_caller(unsigned long ip)
2559 {
2560         time_hardirqs_on(CALLER_ADDR0, ip);
2561
2562         if (unlikely(!debug_locks || current->lockdep_recursion))
2563                 return;
2564
2565         if (unlikely(current->hardirqs_enabled)) {
2566                 /*
2567                  * Neither irq nor preemption are disabled here
2568                  * so this is racy by nature but losing one hit
2569                  * in a stat is not a big deal.
2570                  */
2571                 __debug_atomic_inc(redundant_hardirqs_on);
2572                 return;
2573         }
2574
2575         /*
2576          * We're enabling irqs and according to our state above irqs weren't
2577          * already enabled, yet we find the hardware thinks they are in fact
2578          * enabled.. someone messed up their IRQ state tracing.
2579          */
2580         if (DEBUG_LOCKS_WARN_ON(!irqs_disabled()))
2581                 return;
2582
2583         /*
2584          * See the fine text that goes along with this variable definition.
2585          */
2586         if (DEBUG_LOCKS_WARN_ON(unlikely(early_boot_irqs_disabled)))
2587                 return;
2588
2589         /*
2590          * Can't allow enabling interrupts while in an interrupt handler,
2591          * that's general bad form and such. Recursion, limited stack etc..
2592          */
2593         if (DEBUG_LOCKS_WARN_ON(current->hardirq_context))
2594                 return;
2595
2596         current->lockdep_recursion = 1;
2597         __trace_hardirqs_on_caller(ip);
2598         current->lockdep_recursion = 0;
2599 }
2600 EXPORT_SYMBOL(trace_hardirqs_on_caller);
2601
2602 void trace_hardirqs_on(void)
2603 {
2604         trace_hardirqs_on_caller(CALLER_ADDR0);
2605 }
2606 EXPORT_SYMBOL(trace_hardirqs_on);
2607
2608 /*
2609  * Hardirqs were disabled:
2610  */
2611 void trace_hardirqs_off_caller(unsigned long ip)
2612 {
2613         struct task_struct *curr = current;
2614
2615         time_hardirqs_off(CALLER_ADDR0, ip);
2616
2617         if (unlikely(!debug_locks || current->lockdep_recursion))
2618                 return;
2619
2620         /*
2621          * So we're supposed to get called after you mask local IRQs, but for
2622          * some reason the hardware doesn't quite think you did a proper job.
2623          */
2624         if (DEBUG_LOCKS_WARN_ON(!irqs_disabled()))
2625                 return;
2626
2627         if (curr->hardirqs_enabled) {
2628                 /*
2629                  * We have done an ON -> OFF transition:
2630                  */
2631                 curr->hardirqs_enabled = 0;
2632                 curr->hardirq_disable_ip = ip;
2633                 curr->hardirq_disable_event = ++curr->irq_events;
2634                 debug_atomic_inc(hardirqs_off_events);
2635         } else
2636                 debug_atomic_inc(redundant_hardirqs_off);
2637 }
2638 EXPORT_SYMBOL(trace_hardirqs_off_caller);
2639
2640 void trace_hardirqs_off(void)
2641 {
2642         trace_hardirqs_off_caller(CALLER_ADDR0);
2643 }
2644 EXPORT_SYMBOL(trace_hardirqs_off);
2645
2646 /*
2647  * Softirqs will be enabled:
2648  */
2649 void trace_softirqs_on(unsigned long ip)
2650 {
2651         struct task_struct *curr = current;
2652
2653         if (unlikely(!debug_locks || current->lockdep_recursion))
2654                 return;
2655
2656         /*
2657          * We fancy IRQs being disabled here, see softirq.c, avoids
2658          * funny state and nesting things.
2659          */
2660         if (DEBUG_LOCKS_WARN_ON(!irqs_disabled()))
2661                 return;
2662
2663         if (curr->softirqs_enabled) {
2664                 debug_atomic_inc(redundant_softirqs_on);
2665                 return;
2666         }
2667
2668         current->lockdep_recursion = 1;
2669         /*
2670          * We'll do an OFF -> ON transition:
2671          */
2672         curr->softirqs_enabled = 1;
2673         curr->softirq_enable_ip = ip;
2674         curr->softirq_enable_event = ++curr->irq_events;
2675         debug_atomic_inc(softirqs_on_events);
2676         /*
2677          * We are going to turn softirqs on, so set the
2678          * usage bit for all held locks, if hardirqs are
2679          * enabled too:
2680          */
2681         if (curr->hardirqs_enabled)
2682                 mark_held_locks(curr, SOFTIRQ);
2683         current->lockdep_recursion = 0;
2684 }
2685
2686 /*
2687  * Softirqs were disabled:
2688  */
2689 void trace_softirqs_off(unsigned long ip)
2690 {
2691         struct task_struct *curr = current;
2692
2693         if (unlikely(!debug_locks || current->lockdep_recursion))
2694                 return;
2695
2696         /*
2697          * We fancy IRQs being disabled here, see softirq.c
2698          */
2699         if (DEBUG_LOCKS_WARN_ON(!irqs_disabled()))
2700                 return;
2701
2702         if (curr->softirqs_enabled) {
2703                 /*
2704                  * We have done an ON -> OFF transition:
2705                  */
2706                 curr->softirqs_enabled = 0;
2707                 curr->softirq_disable_ip = ip;
2708                 curr->softirq_disable_event = ++curr->irq_events;
2709                 debug_atomic_inc(softirqs_off_events);
2710                 /*
2711                  * Whoops, we wanted softirqs off, so why aren't they?
2712                  */
2713                 DEBUG_LOCKS_WARN_ON(!softirq_count());
2714         } else
2715                 debug_atomic_inc(redundant_softirqs_off);
2716 }
2717
2718 static void __lockdep_trace_alloc(gfp_t gfp_mask, unsigned long flags)
2719 {
2720         struct task_struct *curr = current;
2721
2722         if (unlikely(!debug_locks))
2723                 return;
2724
2725         /* no reclaim without waiting on it */
2726         if (!(gfp_mask & __GFP_WAIT))
2727                 return;
2728
2729         /* this guy won't enter reclaim */
2730         if ((curr->flags & PF_MEMALLOC) && !(gfp_mask & __GFP_NOMEMALLOC))
2731                 return;
2732
2733         /* We're only interested __GFP_FS allocations for now */
2734         if (!(gfp_mask & __GFP_FS))
2735                 return;
2736
2737         /*
2738          * Oi! Can't be having __GFP_FS allocations with IRQs disabled.
2739          */
2740         if (DEBUG_LOCKS_WARN_ON(irqs_disabled_flags(flags)))
2741                 return;
2742
2743         mark_held_locks(curr, RECLAIM_FS);
2744 }
2745
2746 static void check_flags(unsigned long flags);
2747
2748 void lockdep_trace_alloc(gfp_t gfp_mask)
2749 {
2750         unsigned long flags;
2751
2752         if (unlikely(current->lockdep_recursion))
2753                 return;
2754
2755         raw_local_irq_save(flags);
2756         check_flags(flags);
2757         current->lockdep_recursion = 1;
2758         __lockdep_trace_alloc(gfp_mask, flags);
2759         current->lockdep_recursion = 0;
2760         raw_local_irq_restore(flags);
2761 }
2762
2763 static int mark_irqflags(struct task_struct *curr, struct held_lock *hlock)
2764 {
2765         /*
2766          * If non-trylock use in a hardirq or softirq context, then
2767          * mark the lock as used in these contexts:
2768          */
2769         if (!hlock->trylock) {
2770                 if (hlock->read) {
2771                         if (curr->hardirq_context)
2772                                 if (!mark_lock(curr, hlock,
2773                                                 LOCK_USED_IN_HARDIRQ_READ))
2774                                         return 0;
2775                         if (curr->softirq_context)
2776                                 if (!mark_lock(curr, hlock,
2777                                                 LOCK_USED_IN_SOFTIRQ_READ))
2778                                         return 0;
2779                 } else {
2780                         if (curr->hardirq_context)
2781                                 if (!mark_lock(curr, hlock, LOCK_USED_IN_HARDIRQ))
2782                                         return 0;
2783                         if (curr->softirq_context)
2784                                 if (!mark_lock(curr, hlock, LOCK_USED_IN_SOFTIRQ))
2785                                         return 0;
2786                 }
2787         }
2788         if (!hlock->hardirqs_off) {
2789                 if (hlock->read) {
2790                         if (!mark_lock(curr, hlock,
2791                                         LOCK_ENABLED_HARDIRQ_READ))
2792                                 return 0;
2793                         if (curr->softirqs_enabled)
2794                                 if (!mark_lock(curr, hlock,
2795                                                 LOCK_ENABLED_SOFTIRQ_READ))
2796                                         return 0;
2797                 } else {
2798                         if (!mark_lock(curr, hlock,
2799                                         LOCK_ENABLED_HARDIRQ))
2800                                 return 0;
2801                         if (curr->softirqs_enabled)
2802                                 if (!mark_lock(curr, hlock,
2803                                                 LOCK_ENABLED_SOFTIRQ))
2804                                         return 0;
2805                 }
2806         }
2807
2808         /*
2809          * We reuse the irq context infrastructure more broadly as a general
2810          * context checking code. This tests GFP_FS recursion (a lock taken
2811          * during reclaim for a GFP_FS allocation is held over a GFP_FS
2812          * allocation).
2813          */
2814         if (!hlock->trylock && (curr->lockdep_reclaim_gfp & __GFP_FS)) {
2815                 if (hlock->read) {
2816                         if (!mark_lock(curr, hlock, LOCK_USED_IN_RECLAIM_FS_READ))
2817                                         return 0;
2818                 } else {
2819                         if (!mark_lock(curr, hlock, LOCK_USED_IN_RECLAIM_FS))
2820                                         return 0;
2821                 }
2822         }
2823
2824         return 1;
2825 }
2826
2827 static int separate_irq_context(struct task_struct *curr,
2828                 struct held_lock *hlock)
2829 {
2830         unsigned int depth = curr->lockdep_depth;
2831
2832         /*
2833          * Keep track of points where we cross into an interrupt context:
2834          */
2835         hlock->irq_context = 2*(curr->hardirq_context ? 1 : 0) +
2836                                 curr->softirq_context;
2837         if (depth) {
2838                 struct held_lock *prev_hlock;
2839
2840                 prev_hlock = curr->held_locks + depth-1;
2841                 /*
2842                  * If we cross into another context, reset the
2843                  * hash key (this also prevents the checking and the
2844                  * adding of the dependency to 'prev'):
2845                  */
2846                 if (prev_hlock->irq_context != hlock->irq_context)
2847                         return 1;
2848         }
2849         return 0;
2850 }
2851
2852 #else /* defined(CONFIG_TRACE_IRQFLAGS) && defined(CONFIG_PROVE_LOCKING) */
2853
2854 static inline
2855 int mark_lock_irq(struct task_struct *curr, struct held_lock *this,
2856                 enum lock_usage_bit new_bit)
2857 {
2858         WARN_ON(1); /* Impossible innit? when we don't have TRACE_IRQFLAG */
2859         return 1;
2860 }
2861
2862 static inline int mark_irqflags(struct task_struct *curr,
2863                 struct held_lock *hlock)
2864 {
2865         return 1;
2866 }
2867
2868 static inline int separate_irq_context(struct task_struct *curr,
2869                 struct held_lock *hlock)
2870 {
2871         return 0;
2872 }
2873
2874 void lockdep_trace_alloc(gfp_t gfp_mask)
2875 {
2876 }
2877
2878 #endif /* defined(CONFIG_TRACE_IRQFLAGS) && defined(CONFIG_PROVE_LOCKING) */
2879
2880 /*
2881  * Mark a lock with a usage bit, and validate the state transition:
2882  */
2883 static int mark_lock(struct task_struct *curr, struct held_lock *this,
2884                              enum lock_usage_bit new_bit)
2885 {
2886         unsigned int new_mask = 1 << new_bit, ret = 1;
2887
2888         /*
2889          * If already set then do not dirty the cacheline,
2890          * nor do any checks:
2891          */
2892         if (likely(hlock_class(this)->usage_mask & new_mask))
2893                 return 1;
2894
2895         if (!graph_lock())
2896                 return 0;
2897         /*
2898          * Make sure we didn't race:
2899          */
2900         if (unlikely(hlock_class(this)->usage_mask & new_mask)) {
2901                 graph_unlock();
2902                 return 1;
2903         }
2904
2905         hlock_class(this)->usage_mask |= new_mask;
2906
2907         if (!save_trace(hlock_class(this)->usage_traces + new_bit))
2908                 return 0;
2909
2910         switch (new_bit) {
2911 #define LOCKDEP_STATE(__STATE)                  \
2912         case LOCK_USED_IN_##__STATE:            \
2913         case LOCK_USED_IN_##__STATE##_READ:     \
2914         case LOCK_ENABLED_##__STATE:            \
2915         case LOCK_ENABLED_##__STATE##_READ:
2916 #include "lockdep_states.h"
2917 #undef LOCKDEP_STATE
2918                 ret = mark_lock_irq(curr, this, new_bit);
2919                 if (!ret)
2920                         return 0;
2921                 break;
2922         case LOCK_USED:
2923                 debug_atomic_dec(nr_unused_locks);
2924                 break;
2925         default:
2926                 if (!debug_locks_off_graph_unlock())
2927                         return 0;
2928                 WARN_ON(1);
2929                 return 0;
2930         }
2931
2932         graph_unlock();
2933
2934         /*
2935          * We must printk outside of the graph_lock:
2936          */
2937         if (ret == 2) {
2938                 printk("\nmarked lock as {%s}:\n", usage_str[new_bit]);
2939                 print_lock(this);
2940                 print_irqtrace_events(curr);
2941                 dump_stack();
2942         }
2943
2944         return ret;
2945 }
2946
2947 /*
2948  * Initialize a lock instance's lock-class mapping info:
2949  */
2950 void lockdep_init_map(struct lockdep_map *lock, const char *name,
2951                       struct lock_class_key *key, int subclass)
2952 {
2953         memset(lock, 0, sizeof(*lock));
2954
2955 #ifdef CONFIG_LOCK_STAT
2956         lock->cpu = raw_smp_processor_id();
2957 #endif
2958
2959         /*
2960          * Can't be having no nameless bastards around this place!
2961          */
2962         if (DEBUG_LOCKS_WARN_ON(!name)) {
2963                 lock->name = "NULL";
2964                 return;
2965         }
2966
2967         lock->name = name;
2968
2969         /*
2970          * No key, no joy, we need to hash something.
2971          */
2972         if (DEBUG_LOCKS_WARN_ON(!key))
2973                 return;
2974         /*
2975          * Sanity check, the lock-class key must be persistent:
2976          */
2977         if (!static_obj(key)) {
2978                 printk("BUG: key %p not in .data!\n", key);
2979                 /*
2980                  * What it says above ^^^^^, I suggest you read it.
2981                  */
2982                 DEBUG_LOCKS_WARN_ON(1);
2983                 return;
2984         }
2985         lock->key = key;
2986
2987         if (unlikely(!debug_locks))
2988                 return;
2989
2990         if (subclass)
2991                 register_lock_class(lock, subclass, 1);
2992 }
2993 EXPORT_SYMBOL_GPL(lockdep_init_map);
2994
2995 struct lock_class_key __lockdep_no_validate__;
2996
2997 /*
2998  * This gets called for every mutex_lock*()/spin_lock*() operation.
2999  * We maintain the dependency maps and validate the locking attempt:
3000  */
3001 static int __lock_acquire(struct lockdep_map *lock, unsigned int subclass,
3002                           int trylock, int read, int check, int hardirqs_off,
3003                           struct lockdep_map *nest_lock, unsigned long ip,
3004                           int references)
3005 {
3006         struct task_struct *curr = current;
3007         struct lock_class *class = NULL;
3008         struct held_lock *hlock;
3009         unsigned int depth, id;
3010         int chain_head = 0;
3011         int class_idx;
3012         u64 chain_key;
3013
3014         if (!prove_locking)
3015                 check = 1;
3016
3017         if (unlikely(!debug_locks))
3018                 return 0;
3019
3020         /*
3021          * Lockdep should run with IRQs disabled, otherwise we could
3022          * get an interrupt which would want to take locks, which would
3023          * end up in lockdep and have you got a head-ache already?
3024          */
3025         if (DEBUG_LOCKS_WARN_ON(!irqs_disabled()))
3026                 return 0;
3027
3028         if (lock->key == &__lockdep_no_validate__)
3029                 check = 1;
3030
3031         if (subclass < NR_LOCKDEP_CACHING_CLASSES)
3032                 class = lock->class_cache[subclass];
3033         /*
3034          * Not cached?
3035          */
3036         if (unlikely(!class)) {
3037                 class = register_lock_class(lock, subclass, 0);
3038                 if (!class)
3039                         return 0;
3040         }
3041         atomic_inc((atomic_t *)&class->ops);
3042         if (very_verbose(class)) {
3043                 printk("\nacquire class [%p] %s", class->key, class->name);
3044                 if (class->name_version > 1)
3045                         printk("#%d", class->name_version);
3046                 printk("\n");
3047                 dump_stack();
3048         }
3049
3050         /*
3051          * Add the lock to the list of currently held locks.
3052          * (we dont increase the depth just yet, up until the
3053          * dependency checks are done)
3054          */
3055         depth = curr->lockdep_depth;
3056         /*
3057          * Ran out of static storage for our per-task lock stack again have we?
3058          */
3059         if (DEBUG_LOCKS_WARN_ON(depth >= MAX_LOCK_DEPTH))
3060                 return 0;
3061
3062         class_idx = class - lock_classes + 1;
3063
3064         if (depth) {
3065                 hlock = curr->held_locks + depth - 1;
3066                 if (hlock->class_idx == class_idx && nest_lock) {
3067                         if (hlock->references)
3068                                 hlock->references++;
3069                         else
3070                                 hlock->references = 2;
3071
3072                         return 1;
3073                 }
3074         }
3075
3076         hlock = curr->held_locks + depth;
3077         /*
3078          * Plain impossible, we just registered it and checked it weren't no
3079          * NULL like.. I bet this mushroom I ate was good!
3080          */
3081         if (DEBUG_LOCKS_WARN_ON(!class))
3082                 return 0;
3083         hlock->class_idx = class_idx;
3084         hlock->acquire_ip = ip;
3085         hlock->instance = lock;
3086         hlock->nest_lock = nest_lock;
3087         hlock->trylock = trylock;
3088         hlock->read = read;
3089         hlock->check = check;
3090         hlock->hardirqs_off = !!hardirqs_off;
3091         hlock->references = references;
3092 #ifdef CONFIG_LOCK_STAT
3093         hlock->waittime_stamp = 0;
3094         hlock->holdtime_stamp = lockstat_clock();
3095 #endif
3096
3097         if (check == 2 && !mark_irqflags(curr, hlock))
3098                 return 0;
3099
3100         /* mark it as used: */
3101         if (!mark_lock(curr, hlock, LOCK_USED))
3102                 return 0;
3103
3104         /*
3105          * Calculate the chain hash: it's the combined hash of all the
3106          * lock keys along the dependency chain. We save the hash value
3107          * at every step so that we can get the current hash easily
3108          * after unlock. The chain hash is then used to cache dependency
3109          * results.
3110          *
3111          * The 'key ID' is what is the most compact key value to drive
3112          * the hash, not class->key.
3113          */
3114         id = class - lock_classes;
3115         /*
3116          * Whoops, we did it again.. ran straight out of our static allocation.
3117          */
3118         if (DEBUG_LOCKS_WARN_ON(id >= MAX_LOCKDEP_KEYS))
3119                 return 0;
3120
3121         chain_key = curr->curr_chain_key;
3122         if (!depth) {
3123                 /*
3124                  * How can we have a chain hash when we ain't got no keys?!
3125                  */
3126                 if (DEBUG_LOCKS_WARN_ON(chain_key != 0))
3127                         return 0;
3128                 chain_head = 1;
3129         }
3130
3131         hlock->prev_chain_key = chain_key;
3132         if (separate_irq_context(curr, hlock)) {
3133                 chain_key = 0;
3134                 chain_head = 1;
3135         }
3136         chain_key = iterate_chain_key(chain_key, id);
3137
3138         if (!validate_chain(curr, lock, hlock, chain_head, chain_key))
3139                 return 0;
3140
3141         curr->curr_chain_key = chain_key;
3142         curr->lockdep_depth++;
3143         check_chain_key(curr);
3144 #ifdef CONFIG_DEBUG_LOCKDEP
3145         if (unlikely(!debug_locks))
3146                 return 0;
3147 #endif
3148         if (unlikely(curr->lockdep_depth >= MAX_LOCK_DEPTH)) {
3149                 debug_locks_off();
3150                 printk("BUG: MAX_LOCK_DEPTH too low!\n");
3151                 printk("turning off the locking correctness validator.\n");
3152                 dump_stack();
3153                 return 0;
3154         }
3155
3156         if (unlikely(curr->lockdep_depth > max_lockdep_depth))
3157                 max_lockdep_depth = curr->lockdep_depth;
3158
3159         return 1;
3160 }
3161
3162 static int
3163 print_unlock_inbalance_bug(struct task_struct *curr, struct lockdep_map *lock,
3164                            unsigned long ip)
3165 {
3166         if (!debug_locks_off())
3167                 return 0;
3168         if (debug_locks_silent)
3169                 return 0;
3170
3171         printk("\n");
3172         printk("=====================================\n");
3173         printk("[ BUG: bad unlock balance detected! ]\n");
3174         print_kernel_ident();
3175         printk("-------------------------------------\n");
3176         printk("%s/%d is trying to release lock (",
3177                 curr->comm, task_pid_nr(curr));
3178         print_lockdep_cache(lock);
3179         printk(") at:\n");
3180         print_ip_sym(ip);
3181         printk("but there are no more locks to release!\n");
3182         printk("\nother info that might help us debug this:\n");
3183         lockdep_print_held_locks(curr);
3184
3185         printk("\nstack backtrace:\n");
3186         dump_stack();
3187
3188         return 0;
3189 }
3190
3191 /*
3192  * Common debugging checks for both nested and non-nested unlock:
3193  */
3194 static int check_unlock(struct task_struct *curr, struct lockdep_map *lock,
3195                         unsigned long ip)
3196 {
3197         if (unlikely(!debug_locks))
3198                 return 0;
3199         /*
3200          * Lockdep should run with IRQs disabled, recursion, head-ache, etc..
3201          */
3202         if (DEBUG_LOCKS_WARN_ON(!irqs_disabled()))
3203                 return 0;
3204
3205         if (curr->lockdep_depth <= 0)
3206                 return print_unlock_inbalance_bug(curr, lock, ip);
3207
3208         return 1;
3209 }
3210
3211 static int match_held_lock(struct held_lock *hlock, struct lockdep_map *lock)
3212 {
3213         if (hlock->instance == lock)
3214                 return 1;
3215
3216         if (hlock->references) {
3217                 struct lock_class *class = lock->class_cache[0];
3218
3219                 if (!class)
3220                         class = look_up_lock_class(lock, 0);
3221
3222                 /*
3223                  * If look_up_lock_class() failed to find a class, we're trying
3224                  * to test if we hold a lock that has never yet been acquired.
3225                  * Clearly if the lock hasn't been acquired _ever_, we're not
3226                  * holding it either, so report failure.
3227                  */
3228                 if (!class)
3229                         return 0;
3230
3231                 /*
3232                  * References, but not a lock we're actually ref-counting?
3233                  * State got messed up, follow the sites that change ->references
3234                  * and try to make sense of it.
3235                  */
3236                 if (DEBUG_LOCKS_WARN_ON(!hlock->nest_lock))
3237                         return 0;
3238
3239                 if (hlock->class_idx == class - lock_classes + 1)
3240                         return 1;
3241         }
3242
3243         return 0;
3244 }
3245
3246 static int
3247 __lock_set_class(struct lockdep_map *lock, const char *name,
3248                  struct lock_class_key *key, unsigned int subclass,
3249                  unsigned long ip)
3250 {
3251         struct task_struct *curr = current;
3252         struct held_lock *hlock, *prev_hlock;
3253         struct lock_class *class;
3254         unsigned int depth;
3255         int i;
3256
3257         depth = curr->lockdep_depth;
3258         /*
3259          * This function is about (re)setting the class of a held lock,
3260          * yet we're not actually holding any locks. Naughty user!
3261          */
3262         if (DEBUG_LOCKS_WARN_ON(!depth))
3263                 return 0;
3264
3265         prev_hlock = NULL;
3266         for (i = depth-1; i >= 0; i--) {
3267                 hlock = curr->held_locks + i;
3268                 /*
3269                  * We must not cross into another context:
3270                  */
3271                 if (prev_hlock && prev_hlock->irq_context != hlock->irq_context)
3272                         break;
3273                 if (match_held_lock(hlock, lock))
3274                         goto found_it;
3275                 prev_hlock = hlock;
3276         }
3277         return print_unlock_inbalance_bug(curr, lock, ip);
3278
3279 found_it:
3280         lockdep_init_map(lock, name, key, 0);
3281         class = register_lock_class(lock, subclass, 0);
3282         hlock->class_idx = class - lock_classes + 1;
3283
3284         curr->lockdep_depth = i;
3285         curr->curr_chain_key = hlock->prev_chain_key;
3286
3287         for (; i < depth; i++) {
3288                 hlock = curr->held_locks + i;
3289                 if (!__lock_acquire(hlock->instance,
3290                         hlock_class(hlock)->subclass, hlock->trylock,
3291                                 hlock->read, hlock->check, hlock->hardirqs_off,
3292                                 hlock->nest_lock, hlock->acquire_ip,
3293                                 hlock->references))
3294                         return 0;
3295         }
3296
3297         /*
3298          * I took it apart and put it back together again, except now I have
3299          * these 'spare' parts.. where shall I put them.
3300          */
3301         if (DEBUG_LOCKS_WARN_ON(curr->lockdep_depth != depth))
3302                 return 0;
3303         return 1;
3304 }
3305
3306 /*
3307  * Remove the lock to the list of currently held locks in a
3308  * potentially non-nested (out of order) manner. This is a
3309  * relatively rare operation, as all the unlock APIs default
3310  * to nested mode (which uses lock_release()):
3311  */
3312 static int
3313 lock_release_non_nested(struct task_struct *curr,
3314                         struct lockdep_map *lock, unsigned long ip)
3315 {
3316         struct held_lock *hlock, *prev_hlock;
3317         unsigned int depth;
3318         int i;
3319
3320         /*
3321          * Check whether the lock exists in the current stack
3322          * of held locks:
3323          */
3324         depth = curr->lockdep_depth;
3325         /*
3326          * So we're all set to release this lock.. wait what lock? We don't
3327          * own any locks, you've been drinking again?
3328          */
3329         if (DEBUG_LOCKS_WARN_ON(!depth))
3330                 return 0;
3331
3332         prev_hlock = NULL;
3333         for (i = depth-1; i >= 0; i--) {
3334                 hlock = curr->held_locks + i;
3335                 /*
3336                  * We must not cross into another context:
3337                  */
3338                 if (prev_hlock && prev_hlock->irq_context != hlock->irq_context)
3339                         break;
3340                 if (match_held_lock(hlock, lock))
3341                         goto found_it;
3342                 prev_hlock = hlock;
3343         }
3344         return print_unlock_inbalance_bug(curr, lock, ip);
3345
3346 found_it:
3347         if (hlock->instance == lock)
3348                 lock_release_holdtime(hlock);
3349
3350         if (hlock->references) {
3351                 hlock->references--;
3352                 if (hlock->references) {
3353                         /*
3354                          * We had, and after removing one, still have
3355                          * references, the current lock stack is still
3356                          * valid. We're done!
3357                          */
3358                         return 1;
3359                 }
3360         }
3361
3362         /*
3363          * We have the right lock to unlock, 'hlock' points to it.
3364          * Now we remove it from the stack, and add back the other
3365          * entries (if any), recalculating the hash along the way:
3366          */
3367
3368         curr->lockdep_depth = i;
3369         curr->curr_chain_key = hlock->prev_chain_key;
3370
3371         for (i++; i < depth; i++) {
3372                 hlock = curr->held_locks + i;
3373                 if (!__lock_acquire(hlock->instance,
3374                         hlock_class(hlock)->subclass, hlock->trylock,
3375                                 hlock->read, hlock->check, hlock->hardirqs_off,
3376                                 hlock->nest_lock, hlock->acquire_ip,
3377                                 hlock->references))
3378                         return 0;
3379         }
3380
3381         /*
3382          * We had N bottles of beer on the wall, we drank one, but now
3383          * there's not N-1 bottles of beer left on the wall...
3384          */
3385         if (DEBUG_LOCKS_WARN_ON(curr->lockdep_depth != depth - 1))
3386                 return 0;
3387         return 1;
3388 }
3389
3390 /*
3391  * Remove the lock to the list of currently held locks - this gets
3392  * called on mutex_unlock()/spin_unlock*() (or on a failed
3393  * mutex_lock_interruptible()). This is done for unlocks that nest
3394  * perfectly. (i.e. the current top of the lock-stack is unlocked)
3395  */
3396 static int lock_release_nested(struct task_struct *curr,
3397                                struct lockdep_map *lock, unsigned long ip)
3398 {
3399         struct held_lock *hlock;
3400         unsigned int depth;
3401
3402         /*
3403          * Pop off the top of the lock stack:
3404          */
3405         depth = curr->lockdep_depth - 1;
3406         hlock = curr->held_locks + depth;
3407
3408         /*
3409          * Is the unlock non-nested:
3410          */
3411         if (hlock->instance != lock || hlock->references)
3412                 return lock_release_non_nested(curr, lock, ip);
3413         curr->lockdep_depth--;
3414
3415         /*
3416          * No more locks, but somehow we've got hash left over, who left it?
3417          */
3418         if (DEBUG_LOCKS_WARN_ON(!depth && (hlock->prev_chain_key != 0)))
3419                 return 0;
3420
3421         curr->curr_chain_key = hlock->prev_chain_key;
3422
3423         lock_release_holdtime(hlock);
3424
3425 #ifdef CONFIG_DEBUG_LOCKDEP
3426         hlock->prev_chain_key = 0;
3427         hlock->class_idx = 0;
3428         hlock->acquire_ip = 0;
3429         hlock->irq_context = 0;
3430 #endif
3431         return 1;
3432 }
3433
3434 /*
3435  * Remove the lock to the list of currently held locks - this gets
3436  * called on mutex_unlock()/spin_unlock*() (or on a failed
3437  * mutex_lock_interruptible()). This is done for unlocks that nest
3438  * perfectly. (i.e. the current top of the lock-stack is unlocked)
3439  */
3440 static void
3441 __lock_release(struct lockdep_map *lock, int nested, unsigned long ip)
3442 {
3443         struct task_struct *curr = current;
3444
3445         if (!check_unlock(curr, lock, ip))
3446                 return;
3447
3448         if (nested) {
3449                 if (!lock_release_nested(curr, lock, ip))
3450                         return;
3451         } else {
3452                 if (!lock_release_non_nested(curr, lock, ip))
3453                         return;
3454         }
3455
3456         check_chain_key(curr);
3457 }
3458
3459 static int __lock_is_held(struct lockdep_map *lock)
3460 {
3461         struct task_struct *curr = current;
3462         int i;
3463
3464         for (i = 0; i < curr->lockdep_depth; i++) {
3465                 struct held_lock *hlock = curr->held_locks + i;
3466
3467                 if (match_held_lock(hlock, lock))
3468                         return 1;
3469         }
3470
3471         return 0;
3472 }
3473
3474 /*
3475  * Check whether we follow the irq-flags state precisely:
3476  */
3477 static void check_flags(unsigned long flags)
3478 {
3479 #if defined(CONFIG_PROVE_LOCKING) && defined(CONFIG_DEBUG_LOCKDEP) && \
3480     defined(CONFIG_TRACE_IRQFLAGS)
3481         if (!debug_locks)
3482                 return;
3483
3484         if (irqs_disabled_flags(flags)) {
3485                 if (DEBUG_LOCKS_WARN_ON(current->hardirqs_enabled)) {
3486                         printk("possible reason: unannotated irqs-off.\n");
3487                 }
3488         } else {
3489                 if (DEBUG_LOCKS_WARN_ON(!current->hardirqs_enabled)) {
3490                         printk("possible reason: unannotated irqs-on.\n");
3491                 }
3492         }
3493
3494         /*
3495          * We dont accurately track softirq state in e.g.
3496          * hardirq contexts (such as on 4KSTACKS), so only
3497          * check if not in hardirq contexts:
3498          */
3499         if (!hardirq_count()) {
3500                 if (softirq_count()) {
3501                         /* like the above, but with softirqs */
3502                         DEBUG_LOCKS_WARN_ON(current->softirqs_enabled);
3503                 } else {
3504                         /* lick the above, does it taste good? */
3505                         DEBUG_LOCKS_WARN_ON(!current->softirqs_enabled);
3506                 }
3507         }
3508
3509         if (!debug_locks)
3510                 print_irqtrace_events(current);
3511 #endif
3512 }
3513
3514 void lock_set_class(struct lockdep_map *lock, const char *name,
3515                     struct lock_class_key *key, unsigned int subclass,
3516                     unsigned long ip)
3517 {
3518         unsigned long flags;
3519
3520         if (unlikely(current->lockdep_recursion))
3521                 return;
3522
3523         raw_local_irq_save(flags);
3524         current->lockdep_recursion = 1;
3525         check_flags(flags);
3526         if (__lock_set_class(lock, name, key, subclass, ip))
3527                 check_chain_key(current);
3528         current->lockdep_recursion = 0;
3529         raw_local_irq_restore(flags);
3530 }
3531 EXPORT_SYMBOL_GPL(lock_set_class);
3532
3533 /*
3534  * We are not always called with irqs disabled - do that here,
3535  * and also avoid lockdep recursion:
3536  */
3537 void lock_acquire(struct lockdep_map *lock, unsigned int subclass,
3538                           int trylock, int read, int check,
3539                           struct lockdep_map *nest_lock, unsigned long ip)
3540 {
3541         unsigned long flags;
3542
3543         if (unlikely(current->lockdep_recursion))
3544                 return;
3545
3546         raw_local_irq_save(flags);
3547         check_flags(flags);
3548
3549         current->lockdep_recursion = 1;
3550         trace_lock_acquire(lock, subclass, trylock, read, check, nest_lock, ip);
3551         __lock_acquire(lock, subclass, trylock, read, check,
3552                        irqs_disabled_flags(flags), nest_lock, ip, 0);
3553         current->lockdep_recursion = 0;
3554         raw_local_irq_restore(flags);
3555 }
3556 EXPORT_SYMBOL_GPL(lock_acquire);
3557
3558 void lock_release(struct lockdep_map *lock, int nested,
3559                           unsigned long ip)
3560 {
3561         unsigned long flags;
3562
3563         if (unlikely(current->lockdep_recursion))
3564                 return;
3565
3566         raw_local_irq_save(flags);
3567         check_flags(flags);
3568         current->lockdep_recursion = 1;
3569         trace_lock_release(lock, ip);
3570         __lock_release(lock, nested, ip);
3571         current->lockdep_recursion = 0;
3572         raw_local_irq_restore(flags);
3573 }
3574 EXPORT_SYMBOL_GPL(lock_release);
3575
3576 int lock_is_held(struct lockdep_map *lock)
3577 {
3578         unsigned long flags;
3579         int ret = 0;
3580
3581         if (unlikely(current->lockdep_recursion))
3582                 return 1; /* avoid false negative lockdep_assert_held() */
3583
3584         raw_local_irq_save(flags);
3585         check_flags(flags);
3586
3587         current->lockdep_recursion = 1;
3588         ret = __lock_is_held(lock);
3589         current->lockdep_recursion = 0;
3590         raw_local_irq_restore(flags);
3591
3592         return ret;
3593 }
3594 EXPORT_SYMBOL_GPL(lock_is_held);
3595
3596 void lockdep_set_current_reclaim_state(gfp_t gfp_mask)
3597 {
3598         current->lockdep_reclaim_gfp = gfp_mask;
3599 }
3600
3601 void lockdep_clear_current_reclaim_state(void)
3602 {
3603         current->lockdep_reclaim_gfp = 0;
3604 }
3605
3606 #ifdef CONFIG_LOCK_STAT
3607 static int
3608 print_lock_contention_bug(struct task_struct *curr, struct lockdep_map *lock,
3609                            unsigned long ip)
3610 {
3611         if (!debug_locks_off())
3612                 return 0;
3613         if (debug_locks_silent)
3614                 return 0;
3615
3616         printk("\n");
3617         printk("=================================\n");
3618         printk("[ BUG: bad contention detected! ]\n");
3619         print_kernel_ident();
3620         printk("---------------------------------\n");
3621         printk("%s/%d is trying to contend lock (",
3622                 curr->comm, task_pid_nr(curr));
3623         print_lockdep_cache(lock);
3624         printk(") at:\n");
3625         print_ip_sym(ip);
3626         printk("but there are no locks held!\n");
3627         printk("\nother info that might help us debug this:\n");
3628         lockdep_print_held_locks(curr);
3629
3630         printk("\nstack backtrace:\n");
3631         dump_stack();
3632
3633         return 0;
3634 }
3635
3636 static void
3637 __lock_contended(struct lockdep_map *lock, unsigned long ip)
3638 {
3639         struct task_struct *curr = current;
3640         struct held_lock *hlock, *prev_hlock;
3641         struct lock_class_stats *stats;
3642         unsigned int depth;
3643         int i, contention_point, contending_point;
3644
3645         depth = curr->lockdep_depth;
3646         /*
3647          * Whee, we contended on this lock, except it seems we're not
3648          * actually trying to acquire anything much at all..
3649          */
3650         if (DEBUG_LOCKS_WARN_ON(!depth))
3651                 return;
3652
3653         prev_hlock = NULL;
3654         for (i = depth-1; i >= 0; i--) {
3655                 hlock = curr->held_locks + i;
3656                 /*
3657                  * We must not cross into another context:
3658                  */
3659                 if (prev_hlock && prev_hlock->irq_context != hlock->irq_context)
3660                         break;
3661                 if (match_held_lock(hlock, lock))
3662                         goto found_it;
3663                 prev_hlock = hlock;
3664         }
3665         print_lock_contention_bug(curr, lock, ip);
3666         return;
3667
3668 found_it:
3669         if (hlock->instance != lock)
3670                 return;
3671
3672         hlock->waittime_stamp = lockstat_clock();
3673
3674         contention_point = lock_point(hlock_class(hlock)->contention_point, ip);
3675         contending_point = lock_point(hlock_class(hlock)->contending_point,
3676                                       lock->ip);
3677
3678         stats = get_lock_stats(hlock_class(hlock));
3679         if (contention_point < LOCKSTAT_POINTS)
3680                 stats->contention_point[contention_point]++;
3681         if (contending_point < LOCKSTAT_POINTS)
3682                 stats->contending_point[contending_point]++;
3683         if (lock->cpu != smp_processor_id())
3684                 stats->bounces[bounce_contended + !!hlock->read]++;
3685         put_lock_stats(stats);
3686 }
3687
3688 static void
3689 __lock_acquired(struct lockdep_map *lock, unsigned long ip)
3690 {
3691         struct task_struct *curr = current;
3692         struct held_lock *hlock, *prev_hlock;
3693         struct lock_class_stats *stats;
3694         unsigned int depth;
3695         u64 now, waittime = 0;
3696         int i, cpu;
3697
3698         depth = curr->lockdep_depth;
3699         /*
3700          * Yay, we acquired ownership of this lock we didn't try to
3701          * acquire, how the heck did that happen?
3702          */
3703         if (DEBUG_LOCKS_WARN_ON(!depth))
3704                 return;
3705
3706         prev_hlock = NULL;
3707         for (i = depth-1; i >= 0; i--) {
3708                 hlock = curr->held_locks + i;
3709                 /*
3710                  * We must not cross into another context:
3711                  */
3712                 if (prev_hlock && prev_hlock->irq_context != hlock->irq_context)
3713                         break;
3714                 if (match_held_lock(hlock, lock))
3715                         goto found_it;
3716                 prev_hlock = hlock;
3717         }
3718         print_lock_contention_bug(curr, lock, _RET_IP_);
3719         return;
3720
3721 found_it:
3722         if (hlock->instance != lock)
3723                 return;
3724
3725         cpu = smp_processor_id();
3726         if (hlock->waittime_stamp) {
3727                 now = lockstat_clock();
3728                 waittime = now - hlock->waittime_stamp;
3729                 hlock->holdtime_stamp = now;
3730         }
3731
3732         trace_lock_acquired(lock, ip);
3733
3734         stats = get_lock_stats(hlock_class(hlock));
3735         if (waittime) {
3736                 if (hlock->read)
3737                         lock_time_inc(&stats->read_waittime, waittime);
3738                 else
3739                         lock_time_inc(&stats->write_waittime, waittime);
3740         }
3741         if (lock->cpu != cpu)
3742                 stats->bounces[bounce_acquired + !!hlock->read]++;
3743         put_lock_stats(stats);
3744
3745         lock->cpu = cpu;
3746         lock->ip = ip;
3747 }
3748
3749 void lock_contended(struct lockdep_map *lock, unsigned long ip)
3750 {
3751         unsigned long flags;
3752
3753         if (unlikely(!lock_stat))
3754                 return;
3755
3756         if (unlikely(current->lockdep_recursion))
3757                 return;
3758
3759         raw_local_irq_save(flags);
3760         check_flags(flags);
3761         current->lockdep_recursion = 1;
3762         trace_lock_contended(lock, ip);
3763         __lock_contended(lock, ip);
3764         current->lockdep_recursion = 0;
3765         raw_local_irq_restore(flags);
3766 }
3767 EXPORT_SYMBOL_GPL(lock_contended);
3768
3769 void lock_acquired(struct lockdep_map *lock, unsigned long ip)
3770 {
3771         unsigned long flags;
3772
3773         if (unlikely(!lock_stat))
3774                 return;
3775
3776         if (unlikely(current->lockdep_recursion))
3777                 return;
3778
3779         raw_local_irq_save(flags);
3780         check_flags(flags);
3781         current->lockdep_recursion = 1;
3782         __lock_acquired(lock, ip);
3783         current->lockdep_recursion = 0;
3784         raw_local_irq_restore(flags);
3785 }
3786 EXPORT_SYMBOL_GPL(lock_acquired);
3787 #endif
3788
3789 /*
3790  * Used by the testsuite, sanitize the validator state
3791  * after a simulated failure:
3792  */
3793
3794 void lockdep_reset(void)
3795 {
3796         unsigned long flags;
3797         int i;
3798
3799         raw_local_irq_save(flags);
3800         current->curr_chain_key = 0;
3801         current->lockdep_depth = 0;
3802         current->lockdep_recursion = 0;
3803         memset(current->held_locks, 0, MAX_LOCK_DEPTH*sizeof(struct held_lock));
3804         nr_hardirq_chains = 0;
3805         nr_softirq_chains = 0;
3806         nr_process_chains = 0;
3807         debug_locks = 1;
3808         for (i = 0; i < CHAINHASH_SIZE; i++)
3809                 INIT_LIST_HEAD(chainhash_table + i);
3810         raw_local_irq_restore(flags);
3811 }
3812
3813 static void zap_class(struct lock_class *class)
3814 {
3815         int i;
3816
3817         /*
3818          * Remove all dependencies this lock is
3819          * involved in:
3820          */
3821         for (i = 0; i < nr_list_entries; i++) {
3822                 if (list_entries[i].class == class)
3823                         list_del_rcu(&list_entries[i].entry);
3824         }
3825         /*
3826          * Unhash the class and remove it from the all_lock_classes list:
3827          */
3828         list_del_rcu(&class->hash_entry);
3829         list_del_rcu(&class->lock_entry);
3830
3831         class->key = NULL;
3832 }
3833
3834 static inline int within(const void *addr, void *start, unsigned long size)
3835 {
3836         return addr >= start && addr < start + size;
3837 }
3838
3839 void lockdep_free_key_range(void *start, unsigned long size)
3840 {
3841         struct lock_class *class, *next;
3842         struct list_head *head;
3843         unsigned long flags;
3844         int i;
3845         int locked;
3846
3847         raw_local_irq_save(flags);
3848         locked = graph_lock();
3849
3850         /*
3851          * Unhash all classes that were created by this module:
3852          */
3853         for (i = 0; i < CLASSHASH_SIZE; i++) {
3854                 head = classhash_table + i;
3855                 if (list_empty(head))
3856                         continue;
3857                 list_for_each_entry_safe(class, next, head, hash_entry) {
3858                         if (within(class->key, start, size))
3859                                 zap_class(class);
3860                         else if (within(class->name, start, size))
3861                                 zap_class(class);
3862                 }
3863         }
3864
3865         if (locked)
3866                 graph_unlock();
3867         raw_local_irq_restore(flags);
3868 }
3869
3870 void lockdep_reset_lock(struct lockdep_map *lock)
3871 {
3872         struct lock_class *class, *next;
3873         struct list_head *head;
3874         unsigned long flags;
3875         int i, j;
3876         int locked;
3877
3878         raw_local_irq_save(flags);
3879
3880         /*
3881          * Remove all classes this lock might have:
3882          */
3883         for (j = 0; j < MAX_LOCKDEP_SUBCLASSES; j++) {
3884                 /*
3885                  * If the class exists we look it up and zap it:
3886                  */
3887                 class = look_up_lock_class(lock, j);
3888                 if (class)
3889                         zap_class(class);
3890         }
3891         /*
3892          * Debug check: in the end all mapped classes should
3893          * be gone.
3894          */
3895         locked = graph_lock();
3896         for (i = 0; i < CLASSHASH_SIZE; i++) {
3897                 head = classhash_table + i;
3898                 if (list_empty(head))
3899                         continue;
3900                 list_for_each_entry_safe(class, next, head, hash_entry) {
3901                         int match = 0;
3902
3903                         for (j = 0; j < NR_LOCKDEP_CACHING_CLASSES; j++)
3904                                 match |= class == lock->class_cache[j];
3905
3906                         if (unlikely(match)) {
3907                                 if (debug_locks_off_graph_unlock()) {
3908                                         /*
3909                                          * We all just reset everything, how did it match?
3910                                          */
3911                                         WARN_ON(1);
3912                                 }
3913                                 goto out_restore;
3914                         }
3915                 }
3916         }
3917         if (locked)
3918                 graph_unlock();
3919
3920 out_restore:
3921         raw_local_irq_restore(flags);
3922 }
3923
3924 void lockdep_init(void)
3925 {
3926         int i;
3927
3928         /*
3929          * Some architectures have their own start_kernel()
3930          * code which calls lockdep_init(), while we also
3931          * call lockdep_init() from the start_kernel() itself,
3932          * and we want to initialize the hashes only once:
3933          */
3934         if (lockdep_initialized)
3935                 return;
3936
3937         for (i = 0; i < CLASSHASH_SIZE; i++)
3938                 INIT_LIST_HEAD(classhash_table + i);
3939
3940         for (i = 0; i < CHAINHASH_SIZE; i++)
3941                 INIT_LIST_HEAD(chainhash_table + i);
3942
3943         lockdep_initialized = 1;
3944 }
3945
3946 void __init lockdep_info(void)
3947 {
3948         printk("Lock dependency validator: Copyright (c) 2006 Red Hat, Inc., Ingo Molnar\n");
3949
3950         printk("... MAX_LOCKDEP_SUBCLASSES:  %lu\n", MAX_LOCKDEP_SUBCLASSES);
3951         printk("... MAX_LOCK_DEPTH:          %lu\n", MAX_LOCK_DEPTH);
3952         printk("... MAX_LOCKDEP_KEYS:        %lu\n", MAX_LOCKDEP_KEYS);
3953         printk("... CLASSHASH_SIZE:          %lu\n", CLASSHASH_SIZE);
3954         printk("... MAX_LOCKDEP_ENTRIES:     %lu\n", MAX_LOCKDEP_ENTRIES);
3955         printk("... MAX_LOCKDEP_CHAINS:      %lu\n", MAX_LOCKDEP_CHAINS);
3956         printk("... CHAINHASH_SIZE:          %lu\n", CHAINHASH_SIZE);
3957
3958         printk(" memory used by lock dependency info: %lu kB\n",
3959                 (sizeof(struct lock_class) * MAX_LOCKDEP_KEYS +
3960                 sizeof(struct list_head) * CLASSHASH_SIZE +
3961                 sizeof(struct lock_list) * MAX_LOCKDEP_ENTRIES +
3962                 sizeof(struct lock_chain) * MAX_LOCKDEP_CHAINS +
3963                 sizeof(struct list_head) * CHAINHASH_SIZE
3964 #ifdef CONFIG_PROVE_LOCKING
3965                 + sizeof(struct circular_queue)
3966 #endif
3967                 ) / 1024
3968                 );
3969
3970         printk(" per task-struct memory footprint: %lu bytes\n",
3971                 sizeof(struct held_lock) * MAX_LOCK_DEPTH);
3972
3973 #ifdef CONFIG_DEBUG_LOCKDEP
3974         if (lockdep_init_error) {
3975                 printk("WARNING: lockdep init error! Arch code didn't call lockdep_init() early enough?\n");
3976                 printk("Call stack leading to lockdep invocation was:\n");
3977                 print_stack_trace(&lockdep_init_trace, 0);
3978         }
3979 #endif
3980 }
3981
3982 static void
3983 print_freed_lock_bug(struct task_struct *curr, const void *mem_from,
3984                      const void *mem_to, struct held_lock *hlock)
3985 {
3986         if (!debug_locks_off())
3987                 return;
3988         if (debug_locks_silent)
3989                 return;
3990
3991         printk("\n");
3992         printk("=========================\n");
3993         printk("[ BUG: held lock freed! ]\n");
3994         print_kernel_ident();
3995         printk("-------------------------\n");
3996         printk("%s/%d is freeing memory %p-%p, with a lock still held there!\n",
3997                 curr->comm, task_pid_nr(curr), mem_from, mem_to-1);
3998         print_lock(hlock);
3999         lockdep_print_held_locks(curr);
4000
4001         printk("\nstack backtrace:\n");
4002         dump_stack();
4003 }
4004
4005 static inline int not_in_range(const void* mem_from, unsigned long mem_len,
4006                                 const void* lock_from, unsigned long lock_len)
4007 {
4008         return lock_from + lock_len <= mem_from ||
4009                 mem_from + mem_len <= lock_from;
4010 }
4011
4012 /*
4013  * Called when kernel memory is freed (or unmapped), or if a lock
4014  * is destroyed or reinitialized - this code checks whether there is
4015  * any held lock in the memory range of <from> to <to>:
4016  */
4017 void debug_check_no_locks_freed(const void *mem_from, unsigned long mem_len)
4018 {
4019         struct task_struct *curr = current;
4020         struct held_lock *hlock;
4021         unsigned long flags;
4022         int i;
4023
4024         if (unlikely(!debug_locks))
4025                 return;
4026
4027         local_irq_save(flags);
4028         for (i = 0; i < curr->lockdep_depth; i++) {
4029                 hlock = curr->held_locks + i;
4030
4031                 if (not_in_range(mem_from, mem_len, hlock->instance,
4032                                         sizeof(*hlock->instance)))
4033                         continue;
4034
4035                 print_freed_lock_bug(curr, mem_from, mem_from + mem_len, hlock);
4036                 break;
4037         }
4038         local_irq_restore(flags);
4039 }
4040 EXPORT_SYMBOL_GPL(debug_check_no_locks_freed);
4041
4042 static void print_held_locks_bug(struct task_struct *curr)
4043 {
4044         if (!debug_locks_off())
4045                 return;
4046         if (debug_locks_silent)
4047                 return;
4048
4049         printk("\n");
4050         printk("=====================================\n");
4051         printk("[ BUG: lock held at task exit time! ]\n");
4052         print_kernel_ident();
4053         printk("-------------------------------------\n");
4054         printk("%s/%d is exiting with locks still held!\n",
4055                 curr->comm, task_pid_nr(curr));
4056         lockdep_print_held_locks(curr);
4057
4058         printk("\nstack backtrace:\n");
4059         dump_stack();
4060 }
4061
4062 void debug_check_no_locks_held(struct task_struct *task)
4063 {
4064         if (unlikely(task->lockdep_depth > 0))
4065                 print_held_locks_bug(task);
4066 }
4067
4068 void debug_show_all_locks(void)
4069 {
4070         struct task_struct *g, *p;
4071         int count = 10;
4072         int unlock = 1;
4073
4074         if (unlikely(!debug_locks)) {
4075                 printk("INFO: lockdep is turned off.\n");
4076                 return;
4077         }
4078         printk("\nShowing all locks held in the system:\n");
4079
4080         /*
4081          * Here we try to get the tasklist_lock as hard as possible,
4082          * if not successful after 2 seconds we ignore it (but keep
4083          * trying). This is to enable a debug printout even if a
4084          * tasklist_lock-holding task deadlocks or crashes.
4085          */
4086 retry:
4087         if (!read_trylock(&tasklist_lock)) {
4088                 if (count == 10)
4089                         printk("hm, tasklist_lock locked, retrying... ");
4090                 if (count) {
4091                         count--;
4092                         printk(" #%d", 10-count);
4093                         mdelay(200);
4094                         goto retry;
4095                 }
4096                 printk(" ignoring it.\n");
4097                 unlock = 0;
4098         } else {
4099                 if (count != 10)
4100                         printk(KERN_CONT " locked it.\n");
4101         }
4102
4103         do_each_thread(g, p) {
4104                 /*
4105                  * It's not reliable to print a task's held locks
4106                  * if it's not sleeping (or if it's not the current
4107                  * task):
4108                  */
4109                 if (p->state == TASK_RUNNING && p != current)
4110                         continue;
4111                 if (p->lockdep_depth)
4112                         lockdep_print_held_locks(p);
4113                 if (!unlock)
4114                         if (read_trylock(&tasklist_lock))
4115                                 unlock = 1;
4116         } while_each_thread(g, p);
4117
4118         printk("\n");
4119         printk("=============================================\n\n");
4120
4121         if (unlock)
4122                 read_unlock(&tasklist_lock);
4123 }
4124 EXPORT_SYMBOL_GPL(debug_show_all_locks);
4125
4126 /*
4127  * Careful: only use this function if you are sure that
4128  * the task cannot run in parallel!
4129  */
4130 void debug_show_held_locks(struct task_struct *task)
4131 {
4132         if (unlikely(!debug_locks)) {
4133                 printk("INFO: lockdep is turned off.\n");
4134                 return;
4135         }
4136         lockdep_print_held_locks(task);
4137 }
4138 EXPORT_SYMBOL_GPL(debug_show_held_locks);
4139
4140 void lockdep_sys_exit(void)
4141 {
4142         struct task_struct *curr = current;
4143
4144         if (unlikely(curr->lockdep_depth)) {
4145                 if (!debug_locks_off())
4146                         return;
4147                 printk("\n");
4148                 printk("================================================\n");
4149                 printk("[ BUG: lock held when returning to user space! ]\n");
4150                 print_kernel_ident();
4151                 printk("------------------------------------------------\n");
4152                 printk("%s/%d is leaving the kernel with locks still held!\n",
4153                                 curr->comm, curr->pid);
4154                 lockdep_print_held_locks(curr);
4155         }
4156 }
4157
4158 void lockdep_rcu_suspicious(const char *file, const int line, const char *s)
4159 {
4160         struct task_struct *curr = current;
4161
4162 #ifndef CONFIG_PROVE_RCU_REPEATEDLY
4163         if (!debug_locks_off())
4164                 return;
4165 #endif /* #ifdef CONFIG_PROVE_RCU_REPEATEDLY */
4166         /* Note: the following can be executed concurrently, so be careful. */
4167         printk("\n");
4168         printk("===============================\n");
4169         printk("[ INFO: suspicious RCU usage. ]\n");
4170         print_kernel_ident();
4171         printk("-------------------------------\n");
4172         printk("%s:%d %s!\n", file, line, s);
4173         printk("\nother info that might help us debug this:\n\n");
4174         printk("\nrcu_scheduler_active = %d, debug_locks = %d\n", rcu_scheduler_active, debug_locks);
4175         lockdep_print_held_locks(curr);
4176         printk("\nstack backtrace:\n");
4177         dump_stack();
4178 }
4179 EXPORT_SYMBOL_GPL(lockdep_rcu_suspicious);