Merge branches 'perf-urgent-for-linus', 'x86-urgent-for-linus' and 'sched-urgent...
[linux-flexiantxendom0-3.2.10.git] / mm / kmemleak.c
index 8b528e3..45eb621 100644 (file)
 
 #include <linux/kmemcheck.h>
 #include <linux/kmemleak.h>
+#include <linux/memory_hotplug.h>
 
 /*
  * Kmemleak configuration and common defines.
@@ -230,8 +231,10 @@ static int kmemleak_skip_disable;
 /* kmemleak operation type for early logging */
 enum {
        KMEMLEAK_ALLOC,
+       KMEMLEAK_ALLOC_PERCPU,
        KMEMLEAK_FREE,
        KMEMLEAK_FREE_PART,
+       KMEMLEAK_FREE_PERCPU,
        KMEMLEAK_NOT_LEAK,
        KMEMLEAK_IGNORE,
        KMEMLEAK_SCAN_AREA,
@@ -797,9 +800,13 @@ static void __init log_early(int op_type, const void *ptr, size_t size,
        unsigned long flags;
        struct early_log *log;
 
+       if (atomic_read(&kmemleak_error)) {
+               /* kmemleak stopped recording, just count the requests */
+               crt_early_log++;
+               return;
+       }
+
        if (crt_early_log >= ARRAY_SIZE(early_log)) {
-               pr_warning("Early log buffer exceeded, "
-                          "please increase DEBUG_KMEMLEAK_EARLY_LOG_SIZE\n");
                kmemleak_disable();
                return;
        }
@@ -848,6 +855,20 @@ out:
        rcu_read_unlock();
 }
 
+/*
+ * Log an early allocated block and populate the stack trace.
+ */
+static void early_alloc_percpu(struct early_log *log)
+{
+       unsigned int cpu;
+       const void __percpu *ptr = log->ptr;
+
+       for_each_possible_cpu(cpu) {
+               log->ptr = per_cpu_ptr(ptr, cpu);
+               early_alloc(log);
+       }
+}
+
 /**
  * kmemleak_alloc - register a newly allocated object
  * @ptr:       pointer to beginning of the object
@@ -875,6 +896,34 @@ void __ref kmemleak_alloc(const void *ptr, size_t size, int min_count,
 EXPORT_SYMBOL_GPL(kmemleak_alloc);
 
 /**
+ * kmemleak_alloc_percpu - register a newly allocated __percpu object
+ * @ptr:       __percpu pointer to beginning of the object
+ * @size:      size of the object
+ *
+ * This function is called from the kernel percpu allocator when a new object
+ * (memory block) is allocated (alloc_percpu). It assumes GFP_KERNEL
+ * allocation.
+ */
+void __ref kmemleak_alloc_percpu(const void __percpu *ptr, size_t size)
+{
+       unsigned int cpu;
+
+       pr_debug("%s(0x%p, %zu)\n", __func__, ptr, size);
+
+       /*
+        * Percpu allocations are only scanned and not reported as leaks
+        * (min_count is set to 0).
+        */
+       if (atomic_read(&kmemleak_enabled) && ptr && !IS_ERR(ptr))
+               for_each_possible_cpu(cpu)
+                       create_object((unsigned long)per_cpu_ptr(ptr, cpu),
+                                     size, 0, GFP_KERNEL);
+       else if (atomic_read(&kmemleak_early_log))
+               log_early(KMEMLEAK_ALLOC_PERCPU, ptr, size, 0);
+}
+EXPORT_SYMBOL_GPL(kmemleak_alloc_percpu);
+
+/**
  * kmemleak_free - unregister a previously registered object
  * @ptr:       pointer to beginning of the object
  *
@@ -913,6 +962,28 @@ void __ref kmemleak_free_part(const void *ptr, size_t size)
 EXPORT_SYMBOL_GPL(kmemleak_free_part);
 
 /**
+ * kmemleak_free_percpu - unregister a previously registered __percpu object
+ * @ptr:       __percpu pointer to beginning of the object
+ *
+ * This function is called from the kernel percpu allocator when an object
+ * (memory block) is freed (free_percpu).
+ */
+void __ref kmemleak_free_percpu(const void __percpu *ptr)
+{
+       unsigned int cpu;
+
+       pr_debug("%s(0x%p)\n", __func__, ptr);
+
+       if (atomic_read(&kmemleak_enabled) && ptr && !IS_ERR(ptr))
+               for_each_possible_cpu(cpu)
+                       delete_object_full((unsigned long)per_cpu_ptr(ptr,
+                                                                     cpu));
+       else if (atomic_read(&kmemleak_early_log))
+               log_early(KMEMLEAK_FREE_PERCPU, ptr, 0, 0);
+}
+EXPORT_SYMBOL_GPL(kmemleak_free_percpu);
+
+/**
  * kmemleak_not_leak - mark an allocated object as false positive
  * @ptr:       pointer to beginning of the object
  *
@@ -965,7 +1036,7 @@ void __ref kmemleak_scan_area(const void *ptr, size_t size, gfp_t gfp)
 {
        pr_debug("%s(0x%p)\n", __func__, ptr);
 
-       if (atomic_read(&kmemleak_enabled) && ptr && !IS_ERR(ptr))
+       if (atomic_read(&kmemleak_enabled) && ptr && size && !IS_ERR(ptr))
                add_scan_area((unsigned long)ptr, size, gfp);
        else if (atomic_read(&kmemleak_early_log))
                log_early(KMEMLEAK_SCAN_AREA, ptr, size, 0);
@@ -1222,9 +1293,9 @@ static void kmemleak_scan(void)
 #endif
 
        /*
-        * Struct page scanning for each node. The code below is not yet safe
-        * with MEMORY_HOTPLUG.
+        * Struct page scanning for each node.
         */
+       lock_memory_hotplug();
        for_each_online_node(i) {
                pg_data_t *pgdat = NODE_DATA(i);
                unsigned long start_pfn = pgdat->node_start_pfn;
@@ -1243,6 +1314,7 @@ static void kmemleak_scan(void)
                        scan_block(page, page + 1, NULL, 1);
                }
        }
+       unlock_memory_hotplug();
 
        /*
         * Scanning the task stacks (may introduce false negatives).
@@ -1469,9 +1541,6 @@ static const struct seq_operations kmemleak_seq_ops = {
 
 static int kmemleak_open(struct inode *inode, struct file *file)
 {
-       if (!atomic_read(&kmemleak_enabled))
-               return -EBUSY;
-
        return seq_open(file, &kmemleak_seq_ops);
 }
 
@@ -1545,6 +1614,9 @@ static ssize_t kmemleak_write(struct file *file, const char __user *user_buf,
        int buf_size;
        int ret;
 
+       if (!atomic_read(&kmemleak_enabled))
+               return -EBUSY;
+
        buf_size = min(size, (sizeof(buf) - 1));
        if (strncpy_from_user(buf, user_buf, buf_size) < 0)
                return -EFAULT;
@@ -1604,20 +1676,24 @@ static const struct file_operations kmemleak_fops = {
 };
 
 /*
- * Perform the freeing of the kmemleak internal objects after waiting for any
- * current memory scan to complete.
+ * Stop the memory scanning thread and free the kmemleak internal objects if
+ * no previous scan thread (otherwise, kmemleak may still have some useful
+ * information on memory leaks).
  */
 static void kmemleak_do_cleanup(struct work_struct *work)
 {
        struct kmemleak_object *object;
+       bool cleanup = scan_thread == NULL;
 
        mutex_lock(&scan_mutex);
        stop_scan_thread();
 
-       rcu_read_lock();
-       list_for_each_entry_rcu(object, &object_list, object_list)
-               delete_object_full(object->pointer);
-       rcu_read_unlock();
+       if (cleanup) {
+               rcu_read_lock();
+               list_for_each_entry_rcu(object, &object_list, object_list)
+                       delete_object_full(object->pointer);
+               rcu_read_unlock();
+       }
        mutex_unlock(&scan_mutex);
 }
 
@@ -1634,7 +1710,6 @@ static void kmemleak_disable(void)
                return;
 
        /* stop any memory operation tracing */
-       atomic_set(&kmemleak_early_log, 0);
        atomic_set(&kmemleak_enabled, 0);
 
        /* check whether it is too early for a kernel thread */
@@ -1682,6 +1757,7 @@ void __init kmemleak_init(void)
 
 #ifdef CONFIG_DEBUG_KMEMLEAK_DEFAULT_OFF
        if (!kmemleak_skip_disable) {
+               atomic_set(&kmemleak_early_log, 0);
                kmemleak_disable();
                return;
        }
@@ -1694,12 +1770,18 @@ void __init kmemleak_init(void)
        scan_area_cache = KMEM_CACHE(kmemleak_scan_area, SLAB_NOLEAKTRACE);
        INIT_PRIO_TREE_ROOT(&object_tree_root);
 
+       if (crt_early_log >= ARRAY_SIZE(early_log))
+               pr_warning("Early log buffer exceeded (%d), please increase "
+                          "DEBUG_KMEMLEAK_EARLY_LOG_SIZE\n", crt_early_log);
+
        /* the kernel is still in UP mode, so disabling the IRQs is enough */
        local_irq_save(flags);
-       if (!atomic_read(&kmemleak_error)) {
+       atomic_set(&kmemleak_early_log, 0);
+       if (atomic_read(&kmemleak_error)) {
+               local_irq_restore(flags);
+               return;
+       } else
                atomic_set(&kmemleak_enabled, 1);
-               atomic_set(&kmemleak_early_log, 0);
-       }
        local_irq_restore(flags);
 
        /*
@@ -1714,12 +1796,18 @@ void __init kmemleak_init(void)
                case KMEMLEAK_ALLOC:
                        early_alloc(log);
                        break;
+               case KMEMLEAK_ALLOC_PERCPU:
+                       early_alloc_percpu(log);
+                       break;
                case KMEMLEAK_FREE:
                        kmemleak_free(log->ptr);
                        break;
                case KMEMLEAK_FREE_PART:
                        kmemleak_free_part(log->ptr, log->size);
                        break;
+               case KMEMLEAK_FREE_PERCPU:
+                       kmemleak_free_percpu(log->ptr);
+                       break;
                case KMEMLEAK_NOT_LEAK:
                        kmemleak_not_leak(log->ptr);
                        break;