Update to 3.4-final.
[linux-flexiantxendom0-3.2.10.git] / mm / slub.c
index c952fac..80848cd 100644 (file)
--- a/mm/slub.c
+++ b/mm/slub.c
@@ -2,10 +2,11 @@
  * SLUB: A slab allocator that limits cache line use instead of queuing
  * objects in per cpu and per node lists.
  *
- * The allocator synchronizes using per slab locks and only
- * uses a centralized lock to manage a pool of partial slabs.
+ * The allocator synchronizes using per slab locks or atomic operatios
+ * and only uses a centralized lock to manage a pool of partial slabs.
  *
  * (C) 2007 SGI, Christoph Lameter
+ * (C) 2011 Linux Foundation, Christoph Lameter
  */
 
 #include <linux/mm.h>
 #include <linux/memory.h>
 #include <linux/math64.h>
 #include <linux/fault-inject.h>
+#include <linux/stacktrace.h>
+#include <linux/prefetch.h>
 
 #include <trace/events/kmem.h>
 
 /*
  * Lock order:
- *   1. slab_lock(page)
- *   2. slab->list_lock
+ *   1. slub_lock (Global Semaphore)
+ *   2. node->list_lock
+ *   3. slab_lock(page) (Only on some arches and for debugging)
  *
- *   The slab_lock protects operations on the object of a particular
- *   slab and its metadata in the page struct. If the slab lock
- *   has been taken then no allocations nor frees can be performed
- *   on the objects in the slab nor can the slab be added or removed
- *   from the partial or full lists since this would mean modifying
- *   the page_struct of the slab.
+ *   slub_lock
+ *
+ *   The role of the slub_lock is to protect the list of all the slabs
+ *   and to synchronize major metadata changes to slab cache structures.
+ *
+ *   The slab_lock is only used for debugging and on arches that do not
+ *   have the ability to do a cmpxchg_double. It only protects the second
+ *   double word in the page struct. Meaning
+ *     A. page->freelist       -> List of object free in a page
+ *     B. page->counters       -> Counters of objects
+ *     C. page->frozen         -> frozen state
+ *
+ *   If a slab is frozen then it is exempt from list management. It is not
+ *   on any list. The processor that froze the slab is the one who can
+ *   perform list operations on the page. Other processors may put objects
+ *   onto the freelist but the processor that froze the slab is the only
+ *   one that can retrieve the objects from the page's freelist.
  *
  *   The list_lock protects the partial and full list on each node and
  *   the partial slab counter. If taken then no new slabs may be added or
  *   slabs, operations can continue without any centralized lock. F.e.
  *   allocating a long series of objects that fill up slabs does not require
  *   the list lock.
- *
- *   The lock order is sometimes inverted when we are trying to get a slab
- *   off a list. We take the list_lock and then look for a page on the list
- *   to use. While we do that objects in the slabs may be freed. We can
- *   only operate on the slab if we have also taken the slab_lock. So we use
- *   a slab_trylock() on the slab. If trylock was successful then no frees
- *   can occur anymore and we can use the slab for allocations etc. If the
- *   slab_trylock() does not succeed then frees are in progress in the slab and
- *   we must stay away from it for a while since we may cause a bouncing
- *   cacheline if we try to acquire the lock. So go onto the next slab.
- *   If all pages are busy then we may allocate a new slab instead of reusing
- *   a partial slab. A new slab has noone operating on it and thus there is
- *   no danger of cacheline contention.
- *
  *   Interrupts are disabled during allocation and deallocation in order to
  *   make the slab allocator safe to use in the context of an irq. In addition
  *   interrupts are disabled to ensure that the processor does not change
@@ -131,6 +132,9 @@ static inline int kmem_cache_debug(struct kmem_cache *s)
 /* Enable to test recovery from slab corruption on boot */
 #undef SLUB_RESILIENCY_TEST
 
+/* Enable to log cmpxchg failures */
+#undef SLUB_DEBUG_CMPXCHG
+
 /*
  * Mininum number of partial slabs. These will be left on the partial
  * lists even if they are empty. kmem_cache_shrink may reclaim them.
@@ -166,10 +170,11 @@ static inline int kmem_cache_debug(struct kmem_cache *s)
 
 #define OO_SHIFT       16
 #define OO_MASK                ((1 << OO_SHIFT) - 1)
-#define MAX_OBJS_PER_PAGE      65535 /* since page.objects is u16 */
+#define MAX_OBJS_PER_PAGE      32767 /* since page.objects is u15 */
 
 /* Internal SLUB flags */
 #define __OBJECT_POISON                0x80000000UL /* Poison object */
+#define __CMPXCHG_DOUBLE       0x40000000UL /* Use cmpxchg_double */
 
 static int kmem_size = sizeof(struct kmem_cache);
 
@@ -191,8 +196,12 @@ static LIST_HEAD(slab_caches);
 /*
  * Tracking user of a slab.
  */
+#define TRACK_ADDRS_COUNT 16
 struct track {
        unsigned long addr;     /* Called from address */
+#ifdef CONFIG_STACKTRACE
+       unsigned long addrs[TRACK_ADDRS_COUNT]; /* Called from address */
+#endif
        int cpu;                /* Was running on cpu */
        int pid;                /* Pid context */
        unsigned long when;     /* When did the operation occur */
@@ -261,6 +270,23 @@ static inline void *get_freepointer(struct kmem_cache *s, void *object)
        return *(void **)(object + s->offset);
 }
 
+static void prefetch_freepointer(const struct kmem_cache *s, void *object)
+{
+       prefetch(object + s->offset);
+}
+
+static inline void *get_freepointer_safe(struct kmem_cache *s, void *object)
+{
+       void *p;
+
+#ifdef CONFIG_DEBUG_PAGEALLOC
+       probe_kernel_read(&p, (void **)(object + s->offset), sizeof(p));
+#else
+       p = get_freepointer(s, object);
+#endif
+       return p;
+}
+
 static inline void set_freepointer(struct kmem_cache *s, void *object, void *fp)
 {
        *(void **)(object + s->offset) = fp;
@@ -327,9 +353,100 @@ static inline int oo_objects(struct kmem_cache_order_objects x)
 }
 
 /*
+ * Per slab locking using the pagelock
+ */
+static __always_inline void slab_lock(struct page *page)
+{
+       bit_spin_lock(PG_locked, &page->flags);
+}
+
+static __always_inline void slab_unlock(struct page *page)
+{
+       __bit_spin_unlock(PG_locked, &page->flags);
+}
+
+/* Interrupts must be disabled (for the fallback code to work right) */
+static inline bool __cmpxchg_double_slab(struct kmem_cache *s, struct page *page,
+               void *freelist_old, unsigned long counters_old,
+               void *freelist_new, unsigned long counters_new,
+               const char *n)
+{
+       VM_BUG_ON(!irqs_disabled());
+#if defined(CONFIG_HAVE_CMPXCHG_DOUBLE) && \
+    defined(CONFIG_HAVE_ALIGNED_STRUCT_PAGE)
+       if (s->flags & __CMPXCHG_DOUBLE) {
+               if (cmpxchg_double(&page->freelist, &page->counters,
+                       freelist_old, counters_old,
+                       freelist_new, counters_new))
+               return 1;
+       } else
+#endif
+       {
+               slab_lock(page);
+               if (page->freelist == freelist_old && page->counters == counters_old) {
+                       page->freelist = freelist_new;
+                       page->counters = counters_new;
+                       slab_unlock(page);
+                       return 1;
+               }
+               slab_unlock(page);
+       }
+
+       cpu_relax();
+       stat(s, CMPXCHG_DOUBLE_FAIL);
+
+#ifdef SLUB_DEBUG_CMPXCHG
+       printk(KERN_INFO "%s %s: cmpxchg double redo ", n, s->name);
+#endif
+
+       return 0;
+}
+
+static inline bool cmpxchg_double_slab(struct kmem_cache *s, struct page *page,
+               void *freelist_old, unsigned long counters_old,
+               void *freelist_new, unsigned long counters_new,
+               const char *n)
+{
+#if defined(CONFIG_HAVE_CMPXCHG_DOUBLE) && \
+    defined(CONFIG_HAVE_ALIGNED_STRUCT_PAGE)
+       if (s->flags & __CMPXCHG_DOUBLE) {
+               if (cmpxchg_double(&page->freelist, &page->counters,
+                       freelist_old, counters_old,
+                       freelist_new, counters_new))
+               return 1;
+       } else
+#endif
+       {
+               unsigned long flags;
+
+               local_irq_save(flags);
+               slab_lock(page);
+               if (page->freelist == freelist_old && page->counters == counters_old) {
+                       page->freelist = freelist_new;
+                       page->counters = counters_new;
+                       slab_unlock(page);
+                       local_irq_restore(flags);
+                       return 1;
+               }
+               slab_unlock(page);
+               local_irq_restore(flags);
+       }
+
+       cpu_relax();
+       stat(s, CMPXCHG_DOUBLE_FAIL);
+
+#ifdef SLUB_DEBUG_CMPXCHG
+       printk(KERN_INFO "%s %s: cmpxchg double redo ", n, s->name);
+#endif
+
+       return 0;
+}
+
+#ifdef CONFIG_SLUB_DEBUG
+/*
  * Determine a map of object in use on a page.
  *
- * Slab lock or node listlock must be held to guarantee that the page does
+ * Node listlock must be held to guarantee that the page does
  * not vanish from under us.
  */
 static void get_map(struct kmem_cache *s, struct page *page, unsigned long *map)
@@ -341,7 +458,6 @@ static void get_map(struct kmem_cache *s, struct page *page, unsigned long *map)
                set_bit(slab_index(p, s, addr), map);
 }
 
-#ifdef CONFIG_SLUB_DEBUG
 /*
  * Debug settings:
  */
@@ -359,34 +475,8 @@ static int disable_higher_order_debug;
  */
 static void print_section(char *text, u8 *addr, unsigned int length)
 {
-       int i, offset;
-       int newline = 1;
-       char ascii[17];
-
-       ascii[16] = 0;
-
-       for (i = 0; i < length; i++) {
-               if (newline) {
-                       printk(KERN_ERR "%8s 0x%p: ", text, addr + i);
-                       newline = 0;
-               }
-               printk(KERN_CONT " %02x", addr[i]);
-               offset = i % 16;
-               ascii[offset] = isgraph(addr[i]) ? addr[i] : '.';
-               if (offset == 15) {
-                       printk(KERN_CONT " %s\n", ascii);
-                       newline = 1;
-               }
-       }
-       if (!newline) {
-               i %= 16;
-               while (i < 16) {
-                       printk(KERN_CONT "   ");
-                       ascii[i] = ' ';
-                       i++;
-               }
-               printk(KERN_CONT " %s\n", ascii);
-       }
+       print_hex_dump(KERN_ERR, text, DUMP_PREFIX_ADDRESS, 16, 1, addr,
+                       length, 1);
 }
 
 static struct track *get_track(struct kmem_cache *s, void *object,
@@ -408,6 +498,24 @@ static void set_track(struct kmem_cache *s, void *object,
        struct track *p = get_track(s, object, alloc);
 
        if (addr) {
+#ifdef CONFIG_STACKTRACE
+               struct stack_trace trace;
+               int i;
+
+               trace.nr_entries = 0;
+               trace.max_entries = TRACK_ADDRS_COUNT;
+               trace.entries = p->addrs;
+               trace.skip = 3;
+               save_stack_trace(&trace);
+
+               /* See rant in lockdep.c */
+               if (trace.nr_entries != 0 &&
+                   trace.entries[trace.nr_entries - 1] == ULONG_MAX)
+                       trace.nr_entries--;
+
+               for (i = trace.nr_entries; i < TRACK_ADDRS_COUNT; i++)
+                       p->addrs[i] = 0;
+#endif
                p->addr = addr;
                p->cpu = smp_processor_id();
                p->pid = current->pid;
@@ -432,6 +540,16 @@ static void print_track(const char *s, struct track *t)
 
        printk(KERN_ERR "INFO: %s in %pS age=%lu cpu=%u pid=%d\n",
                s, (void *)t->addr, jiffies - t->when, t->cpu, t->pid);
+#ifdef CONFIG_STACKTRACE
+       {
+               int i;
+               for (i = 0; i < TRACK_ADDRS_COUNT; i++)
+                       if (t->addrs[i])
+                               printk(KERN_ERR "\t%pS\n", (void *)t->addrs[i]);
+                       else
+                               break;
+       }
+#endif
 }
 
 static void print_tracking(struct kmem_cache *s, void *object)
@@ -460,7 +578,7 @@ static void slab_bug(struct kmem_cache *s, char *fmt, ...)
        va_end(args);
        printk(KERN_ERR "========================================"
                        "=====================================\n");
-       printk(KERN_ERR "BUG %s: %s\n", s->name, buf);
+       printk(KERN_ERR "BUG %s (%s): %s\n", s->name, print_tainted(), buf);
        printk(KERN_ERR "----------------------------------------"
                        "-------------------------------------\n\n");
 }
@@ -489,12 +607,12 @@ static void print_trailer(struct kmem_cache *s, struct page *page, u8 *p)
                        p, p - addr, get_freepointer(s, p));
 
        if (p > addr + 16)
-               print_section("Bytes b4", p - 16, 16);
-
-       print_section("Object", p, min_t(unsigned long, s->objsize, PAGE_SIZE));
+               print_section("Bytes b4 ", p - 16, 16);
 
+       print_section("Object ", p, min_t(unsigned long, s->objsize,
+                               PAGE_SIZE));
        if (s->flags & SLAB_RED_ZONE)
-               print_section("Redzone", p + s->objsize,
+               print_section("Redzone ", p + s->objsize,
                        s->inuse - s->objsize);
 
        if (s->offset)
@@ -507,7 +625,7 @@ static void print_trailer(struct kmem_cache *s, struct page *page, u8 *p)
 
        if (off != s->size)
                /* Beginning of the filler is the free pointer */
-               print_section("Padding", p + off, s->size - off);
+               print_section("Padding ", p + off, s->size - off);
 
        dump_stack();
 }
@@ -545,17 +663,6 @@ static void init_object(struct kmem_cache *s, void *object, u8 val)
                memset(p + s->objsize, val, s->inuse - s->objsize);
 }
 
-static u8 *check_bytes(u8 *start, unsigned int value, unsigned int bytes)
-{
-       while (bytes) {
-               if (*start != (u8)value)
-                       return start;
-               start++;
-               bytes--;
-       }
-       return NULL;
-}
-
 static void restore_bytes(struct kmem_cache *s, char *message, u8 data,
                                                void *from, void *to)
 {
@@ -570,7 +677,7 @@ static int check_bytes_and_report(struct kmem_cache *s, struct page *page,
        u8 *fault;
        u8 *end;
 
-       fault = check_bytes(start, value, bytes);
+       fault = memchr_inv(start, value, bytes);
        if (!fault)
                return 1;
 
@@ -663,14 +770,14 @@ static int slab_pad_check(struct kmem_cache *s, struct page *page)
        if (!remainder)
                return 1;
 
-       fault = check_bytes(end - remainder, POISON_INUSE, remainder);
+       fault = memchr_inv(end - remainder, POISON_INUSE, remainder);
        if (!fault)
                return 1;
        while (end > fault && end[-1] == POISON_INUSE)
                end--;
 
        slab_err(s, page, "Padding overwritten. 0x%p-0x%p", fault, end - 1);
-       print_section("Padding", end - remainder, remainder);
+       print_section("Padding ", end - remainder, remainder);
 
        restore_bytes(s, "slab padding", POISON_INUSE, end - remainder, end);
        return 0;
@@ -761,10 +868,11 @@ static int check_slab(struct kmem_cache *s, struct page *page)
 static int on_freelist(struct kmem_cache *s, struct page *page, void *search)
 {
        int nr = 0;
-       void *fp = page->freelist;
+       void *fp;
        void *object = NULL;
        unsigned long max_objects;
 
+       fp = page->freelist;
        while (fp && nr <= page->objects) {
                if (fp == search)
                        return 1;
@@ -818,7 +926,7 @@ static void trace(struct kmem_cache *s, struct page *page, void *object,
                        page->freelist);
 
                if (!alloc)
-                       print_section("Object", (void *)object, s->objsize);
+                       print_section("Object ", (void *)object, s->objsize);
 
                dump_stack();
        }
@@ -869,26 +977,27 @@ static inline void slab_free_hook(struct kmem_cache *s, void *x)
 
 /*
  * Tracking of fully allocated slabs for debugging purposes.
+ *
+ * list_lock must be held.
  */
-static void add_full(struct kmem_cache_node *n, struct page *page)
+static void add_full(struct kmem_cache *s,
+       struct kmem_cache_node *n, struct page *page)
 {
-       spin_lock(&n->list_lock);
+       if (!(s->flags & SLAB_STORE_USER))
+               return;
+
        list_add(&page->lru, &n->full);
-       spin_unlock(&n->list_lock);
 }
 
+/*
+ * list_lock must be held.
+ */
 static void remove_full(struct kmem_cache *s, struct page *page)
 {
-       struct kmem_cache_node *n;
-
        if (!(s->flags & SLAB_STORE_USER))
                return;
 
-       n = get_node(s, page_to_nid(page));
-
-       spin_lock(&n->list_lock);
        list_del(&page->lru);
-       spin_unlock(&n->list_lock);
 }
 
 /* Tracking of the number of slabs for debugging purposes */
@@ -944,11 +1053,6 @@ static noinline int alloc_debug_processing(struct kmem_cache *s, struct page *pa
        if (!check_slab(s, page))
                goto bad;
 
-       if (!on_freelist(s, page, object)) {
-               object_err(s, page, object, "Object already allocated");
-               goto bad;
-       }
-
        if (!check_valid_pointer(s, page, object)) {
                object_err(s, page, object, "Freelist Pointer check fails");
                goto bad;
@@ -981,6 +1085,12 @@ bad:
 static noinline int free_debug_processing(struct kmem_cache *s,
                 struct page *page, void *object, unsigned long addr)
 {
+       unsigned long flags;
+       int rc = 0;
+
+       local_irq_save(flags);
+       slab_lock(page);
+
        if (!check_slab(s, page))
                goto fail;
 
@@ -995,7 +1105,7 @@ static noinline int free_debug_processing(struct kmem_cache *s,
        }
 
        if (!check_object(s, page, object, SLUB_RED_ACTIVE))
-               return 0;
+               goto out;
 
        if (unlikely(s != page->slab)) {
                if (!PageSlab(page)) {
@@ -1012,18 +1122,19 @@ static noinline int free_debug_processing(struct kmem_cache *s,
                goto fail;
        }
 
-       /* Special debug activities for freeing objects */
-       if (!PageSlubFrozen(page) && !page->freelist)
-               remove_full(s, page);
        if (s->flags & SLAB_STORE_USER)
                set_track(s, object, TRACK_FREE, addr);
        trace(s, page, object, 0);
        init_object(s, object, SLUB_RED_INACTIVE);
-       return 1;
+       rc = 1;
+out:
+       slab_unlock(page);
+       local_irq_restore(flags);
+       return rc;
 
 fail:
        slab_fix(s, "Object at 0x%p not freed", object);
-       return 0;
+       goto out;
 }
 
 static int __init setup_slub_debug(char *str)
@@ -1123,7 +1234,9 @@ static inline int slab_pad_check(struct kmem_cache *s, struct page *page)
                        { return 1; }
 static inline int check_object(struct kmem_cache *s, struct page *page,
                        void *object, u8 val) { return 1; }
-static inline void add_full(struct kmem_cache_node *n, struct page *page) {}
+static inline void add_full(struct kmem_cache *s, struct kmem_cache_node *n,
+                                       struct page *page) {}
+static inline void remove_full(struct kmem_cache *s, struct page *page) {}
 static inline unsigned long kmem_cache_flags(unsigned long objsize,
        unsigned long flags, const char *name,
        void (*ctor)(void *))
@@ -1175,6 +1288,11 @@ static struct page *allocate_slab(struct kmem_cache *s, gfp_t flags, int node)
        struct kmem_cache_order_objects oo = s->oo;
        gfp_t alloc_gfp;
 
+       flags &= gfp_allowed_mask;
+
+       if (flags & __GFP_WAIT)
+               local_irq_enable();
+
        flags |= s->allocflags;
 
        /*
@@ -1191,12 +1309,17 @@ static struct page *allocate_slab(struct kmem_cache *s, gfp_t flags, int node)
                 * Try a lower order alloc if possible
                 */
                page = alloc_slab_page(flags, node, oo);
-               if (!page)
-                       return NULL;
 
-               stat(s, ORDER_FALLBACK);
+               if (page)
+                       stat(s, ORDER_FALLBACK);
        }
 
+       if (flags & __GFP_WAIT)
+               local_irq_disable();
+
+       if (!page)
+               return NULL;
+
        if (kmemcheck_enabled
                && !(s->flags & (SLAB_NOTRACK | DEBUG_DEFAULT_FLAGS))) {
                int pages = 1 << oo_order(oo);
@@ -1263,7 +1386,8 @@ static struct page *new_slab(struct kmem_cache *s, gfp_t flags, int node)
        set_freepointer(s, last, NULL);
 
        page->freelist = start;
-       page->inuse = 0;
+       page->inuse = page->objects;
+       page->frozen = 1;
 out:
        return page;
 }
@@ -1341,79 +1465,80 @@ static void discard_slab(struct kmem_cache *s, struct page *page)
 }
 
 /*
- * Per slab locking using the pagelock
- */
-static __always_inline void slab_lock(struct page *page)
-{
-       bit_spin_lock(PG_locked, &page->flags);
-}
-
-static __always_inline void slab_unlock(struct page *page)
-{
-       __bit_spin_unlock(PG_locked, &page->flags);
-}
-
-static __always_inline int slab_trylock(struct page *page)
-{
-       int rc = 1;
-
-       rc = bit_spin_trylock(PG_locked, &page->flags);
-       return rc;
-}
-
-/*
- * Management of partially allocated slabs
+ * Management of partially allocated slabs.
+ *
+ * list_lock must be held.
  */
-static void add_partial(struct kmem_cache_node *n,
+static inline void add_partial(struct kmem_cache_node *n,
                                struct page *page, int tail)
 {
-       spin_lock(&n->list_lock);
        n->nr_partial++;
-       if (tail)
+       if (tail == DEACTIVATE_TO_TAIL)
                list_add_tail(&page->lru, &n->partial);
        else
                list_add(&page->lru, &n->partial);
-       spin_unlock(&n->list_lock);
 }
 
-static inline void __remove_partial(struct kmem_cache_node *n,
+/*
+ * list_lock must be held.
+ */
+static inline void remove_partial(struct kmem_cache_node *n,
                                        struct page *page)
 {
        list_del(&page->lru);
        n->nr_partial--;
 }
 
-static void remove_partial(struct kmem_cache *s, struct page *page)
-{
-       struct kmem_cache_node *n = get_node(s, page_to_nid(page));
-
-       spin_lock(&n->list_lock);
-       __remove_partial(n, page);
-       spin_unlock(&n->list_lock);
-}
-
 /*
- * Lock slab and remove from the partial list.
+ * Lock slab, remove from the partial list and put the object into the
+ * per cpu freelist.
+ *
+ * Returns a list of objects or NULL if it fails.
  *
  * Must hold list_lock.
  */
-static inline int lock_and_freeze_slab(struct kmem_cache_node *n,
-                                                       struct page *page)
+static inline void *acquire_slab(struct kmem_cache *s,
+               struct kmem_cache_node *n, struct page *page,
+               int mode)
 {
-       if (slab_trylock(page)) {
-               __remove_partial(n, page);
-               __SetPageSlubFrozen(page);
-               return 1;
-       }
-       return 0;
+       void *freelist;
+       unsigned long counters;
+       struct page new;
+
+       /*
+        * Zap the freelist and set the frozen bit.
+        * The old freelist is the list of objects for the
+        * per cpu allocation list.
+        */
+       do {
+               freelist = page->freelist;
+               counters = page->counters;
+               new.counters = counters;
+               if (mode)
+                       new.inuse = page->objects;
+
+               VM_BUG_ON(new.frozen);
+               new.frozen = 1;
+
+       } while (!__cmpxchg_double_slab(s, page,
+                       freelist, counters,
+                       NULL, new.counters,
+                       "lock and freeze"));
+
+       remove_partial(n, page);
+       return freelist;
 }
 
+static int put_cpu_partial(struct kmem_cache *s, struct page *page, int drain);
+
 /*
  * Try to allocate a partial slab from a specific node.
  */
-static struct page *get_partial_node(struct kmem_cache_node *n)
+static void *get_partial_node(struct kmem_cache *s,
+               struct kmem_cache_node *n, struct kmem_cache_cpu *c)
 {
-       struct page *page;
+       struct page *page, *page2;
+       void *object = NULL;
 
        /*
         * Racy check. If we mistakenly see no partial slabs then we
@@ -1425,26 +1550,45 @@ static struct page *get_partial_node(struct kmem_cache_node *n)
                return NULL;
 
        spin_lock(&n->list_lock);
-       list_for_each_entry(page, &n->partial, lru)
-               if (lock_and_freeze_slab(n, page))
-                       goto out;
-       page = NULL;
-out:
+       list_for_each_entry_safe(page, page2, &n->partial, lru) {
+               void *t = acquire_slab(s, n, page, object == NULL);
+               int available;
+
+               if (!t)
+                       break;
+
+               if (!object) {
+                       c->page = page;
+                       c->node = page_to_nid(page);
+                       stat(s, ALLOC_FROM_PARTIAL);
+                       object = t;
+                       available =  page->objects - page->inuse;
+               } else {
+                       page->freelist = t;
+                       available = put_cpu_partial(s, page, 0);
+                       stat(s, CPU_PARTIAL_NODE);
+               }
+               if (kmem_cache_debug(s) || available > s->cpu_partial / 2)
+                       break;
+
+       }
        spin_unlock(&n->list_lock);
-       return page;
+       return object;
 }
 
 /*
  * Get a page from somewhere. Search in increasing NUMA distances.
  */
-static struct page *get_any_partial(struct kmem_cache *s, gfp_t flags)
+static struct page *get_any_partial(struct kmem_cache *s, gfp_t flags,
+               struct kmem_cache_cpu *c)
 {
 #ifdef CONFIG_NUMA
        struct zonelist *zonelist;
        struct zoneref *z;
        struct zone *zone;
        enum zone_type high_zoneidx = gfp_zone(flags);
-       struct page *page;
+       void *object;
+       unsigned int cpuset_mems_cookie;
 
        /*
         * The defrag ratio allows a configuration of the tradeoffs between
@@ -1468,23 +1612,32 @@ static struct page *get_any_partial(struct kmem_cache *s, gfp_t flags)
                        get_cycles() % 1024 > s->remote_node_defrag_ratio)
                return NULL;
 
-       get_mems_allowed();
-       zonelist = node_zonelist(slab_node(current->mempolicy), flags);
-       for_each_zone_zonelist(zone, z, zonelist, high_zoneidx) {
-               struct kmem_cache_node *n;
-
-               n = get_node(s, zone_to_nid(zone));
-
-               if (n && cpuset_zone_allowed_hardwall(zone, flags) &&
-                               n->nr_partial > s->min_partial) {
-                       page = get_partial_node(n);
-                       if (page) {
-                               put_mems_allowed();
-                               return page;
+       do {
+               cpuset_mems_cookie = get_mems_allowed();
+               zonelist = node_zonelist(slab_node(current->mempolicy), flags);
+               for_each_zone_zonelist(zone, z, zonelist, high_zoneidx) {
+                       struct kmem_cache_node *n;
+
+                       n = get_node(s, zone_to_nid(zone));
+
+                       if (n && cpuset_zone_allowed_hardwall(zone, flags) &&
+                                       n->nr_partial > s->min_partial) {
+                               object = get_partial_node(s, n, c);
+                               if (object) {
+                                       /*
+                                        * Return the object even if
+                                        * put_mems_allowed indicated that
+                                        * the cpuset mems_allowed was
+                                        * updated in parallel. It's a
+                                        * harmless race between the alloc
+                                        * and the cpuset update.
+                                        */
+                                       put_mems_allowed(cpuset_mems_cookie);
+                                       return object;
+                               }
                        }
                }
-       }
-       put_mems_allowed();
+       } while (!put_mems_allowed(cpuset_mems_cookie));
 #endif
        return NULL;
 }
@@ -1492,66 +1645,19 @@ static struct page *get_any_partial(struct kmem_cache *s, gfp_t flags)
 /*
  * Get a partial page, lock it and return it.
  */
-static struct page *get_partial(struct kmem_cache *s, gfp_t flags, int node)
+static void *get_partial(struct kmem_cache *s, gfp_t flags, int node,
+               struct kmem_cache_cpu *c)
 {
-       struct page *page;
+       void *object;
        int searchnode = (node == NUMA_NO_NODE) ? numa_node_id() : node;
 
-       page = get_partial_node(get_node(s, searchnode));
-       if (page || node != NUMA_NO_NODE)
-               return page;
+       object = get_partial_node(s, get_node(s, searchnode), c);
+       if (object || node != NUMA_NO_NODE)
+               return object;
 
-       return get_any_partial(s, flags);
+       return get_any_partial(s, flags, c);
 }
 
-/*
- * Move a page back to the lists.
- *
- * Must be called with the slab lock held.
- *
- * On exit the slab lock will have been dropped.
- */
-static void unfreeze_slab(struct kmem_cache *s, struct page *page, int tail)
-       __releases(bitlock)
-{
-       struct kmem_cache_node *n = get_node(s, page_to_nid(page));
-
-       __ClearPageSlubFrozen(page);
-       if (page->inuse) {
-
-               if (page->freelist) {
-                       add_partial(n, page, tail);
-                       stat(s, tail ? DEACTIVATE_TO_TAIL : DEACTIVATE_TO_HEAD);
-               } else {
-                       stat(s, DEACTIVATE_FULL);
-                       if (kmem_cache_debug(s) && (s->flags & SLAB_STORE_USER))
-                               add_full(n, page);
-               }
-               slab_unlock(page);
-       } else {
-               stat(s, DEACTIVATE_EMPTY);
-               if (n->nr_partial < s->min_partial) {
-                       /*
-                        * Adding an empty slab to the partial slabs in order
-                        * to avoid page allocator overhead. This slab needs
-                        * to come after the other slabs with objects in
-                        * so that the others get filled first. That way the
-                        * size of the partial list stays small.
-                        *
-                        * kmem_cache_shrink can reclaim any empty slabs from
-                        * the partial list.
-                        */
-                       add_partial(n, page, 1);
-                       slab_unlock(page);
-               } else {
-                       slab_unlock(page);
-                       stat(s, FREE_SLAB);
-                       discard_slab(s, page);
-               }
-       }
-}
-
-#ifdef CONFIG_CMPXCHG_LOCAL
 #ifdef CONFIG_PREEMPT
 /*
  * Calculate the next globally unique transaction for disambiguiation
@@ -1611,59 +1717,297 @@ static inline void note_cmpxchg_failure(const char *n,
        stat(s, CMPXCHG_DOUBLE_CPU_FAIL);
 }
 
-#endif
-
 void init_kmem_cache_cpus(struct kmem_cache *s)
 {
-#ifdef CONFIG_CMPXCHG_LOCAL
        int cpu;
 
        for_each_possible_cpu(cpu)
                per_cpu_ptr(s->cpu_slab, cpu)->tid = init_tid(cpu);
-#endif
-
 }
+
 /*
  * Remove the cpu slab
  */
 static void deactivate_slab(struct kmem_cache *s, struct kmem_cache_cpu *c)
-       __releases(bitlock)
 {
+       enum slab_modes { M_NONE, M_PARTIAL, M_FULL, M_FREE };
        struct page *page = c->page;
-       int tail = 1;
-
-       if (page->freelist)
+       struct kmem_cache_node *n = get_node(s, page_to_nid(page));
+       int lock = 0;
+       enum slab_modes l = M_NONE, m = M_NONE;
+       void *freelist;
+       void *nextfree;
+       int tail = DEACTIVATE_TO_HEAD;
+       struct page new;
+       struct page old;
+
+       if (page->freelist) {
                stat(s, DEACTIVATE_REMOTE_FREES);
+               tail = DEACTIVATE_TO_TAIL;
+       }
+
+       c->tid = next_tid(c->tid);
+       c->page = NULL;
+       freelist = c->freelist;
+       c->freelist = NULL;
+
        /*
-        * Merge cpu freelist into slab freelist. Typically we get here
-        * because both freelists are empty. So this is unlikely
-        * to occur.
+        * Stage one: Free all available per cpu objects back
+        * to the page freelist while it is still frozen. Leave the
+        * last one.
+        *
+        * There is no need to take the list->lock because the page
+        * is still frozen.
         */
-       while (unlikely(c->freelist)) {
-               void **object;
+       while (freelist && (nextfree = get_freepointer(s, freelist))) {
+               void *prior;
+               unsigned long counters;
+
+               do {
+                       prior = page->freelist;
+                       counters = page->counters;
+                       set_freepointer(s, freelist, prior);
+                       new.counters = counters;
+                       new.inuse--;
+                       VM_BUG_ON(!new.frozen);
+
+               } while (!__cmpxchg_double_slab(s, page,
+                       prior, counters,
+                       freelist, new.counters,
+                       "drain percpu freelist"));
+
+               freelist = nextfree;
+       }
+
+       /*
+        * Stage two: Ensure that the page is unfrozen while the
+        * list presence reflects the actual number of objects
+        * during unfreeze.
+        *
+        * We setup the list membership and then perform a cmpxchg
+        * with the count. If there is a mismatch then the page
+        * is not unfrozen but the page is on the wrong list.
+        *
+        * Then we restart the process which may have to remove
+        * the page from the list that we just put it on again
+        * because the number of objects in the slab may have
+        * changed.
+        */
+redo:
+
+       old.freelist = page->freelist;
+       old.counters = page->counters;
+       VM_BUG_ON(!old.frozen);
 
-               tail = 0;       /* Hot objects. Put the slab first */
+       /* Determine target state of the slab */
+       new.counters = old.counters;
+       if (freelist) {
+               new.inuse--;
+               set_freepointer(s, freelist, old.freelist);
+               new.freelist = freelist;
+       } else
+               new.freelist = old.freelist;
 
-               /* Retrieve object from cpu_freelist */
-               object = c->freelist;
-               c->freelist = get_freepointer(s, c->freelist);
+       new.frozen = 0;
 
-               /* And put onto the regular freelist */
-               set_freepointer(s, object, page->freelist);
-               page->freelist = object;
-               page->inuse--;
+       if (!new.inuse && n->nr_partial > s->min_partial)
+               m = M_FREE;
+       else if (new.freelist) {
+               m = M_PARTIAL;
+               if (!lock) {
+                       lock = 1;
+                       /*
+                        * Taking the spinlock removes the possiblity
+                        * that acquire_slab() will see a slab page that
+                        * is frozen
+                        */
+                       spin_lock(&n->list_lock);
+               }
+       } else {
+               m = M_FULL;
+               if (kmem_cache_debug(s) && !lock) {
+                       lock = 1;
+                       /*
+                        * This also ensures that the scanning of full
+                        * slabs from diagnostic functions will not see
+                        * any frozen slabs.
+                        */
+                       spin_lock(&n->list_lock);
+               }
+       }
+
+       if (l != m) {
+
+               if (l == M_PARTIAL)
+
+                       remove_partial(n, page);
+
+               else if (l == M_FULL)
+
+                       remove_full(s, page);
+
+               if (m == M_PARTIAL) {
+
+                       add_partial(n, page, tail);
+                       stat(s, tail);
+
+               } else if (m == M_FULL) {
+
+                       stat(s, DEACTIVATE_FULL);
+                       add_full(s, n, page);
+
+               }
+       }
+
+       l = m;
+       if (!__cmpxchg_double_slab(s, page,
+                               old.freelist, old.counters,
+                               new.freelist, new.counters,
+                               "unfreezing slab"))
+               goto redo;
+
+       if (lock)
+               spin_unlock(&n->list_lock);
+
+       if (m == M_FREE) {
+               stat(s, DEACTIVATE_EMPTY);
+               discard_slab(s, page);
+               stat(s, FREE_SLAB);
        }
-       c->page = NULL;
-#ifdef CONFIG_CMPXCHG_LOCAL
-       c->tid = next_tid(c->tid);
-#endif
-       unfreeze_slab(s, page, tail);
+}
+
+/* Unfreeze all the cpu partial slabs */
+static void unfreeze_partials(struct kmem_cache *s)
+{
+       struct kmem_cache_node *n = NULL;
+       struct kmem_cache_cpu *c = this_cpu_ptr(s->cpu_slab);
+       struct page *page, *discard_page = NULL;
+
+       while ((page = c->partial)) {
+               enum slab_modes { M_PARTIAL, M_FREE };
+               enum slab_modes l, m;
+               struct page new;
+               struct page old;
+
+               c->partial = page->next;
+               l = M_FREE;
+
+               do {
+
+                       old.freelist = page->freelist;
+                       old.counters = page->counters;
+                       VM_BUG_ON(!old.frozen);
+
+                       new.counters = old.counters;
+                       new.freelist = old.freelist;
+
+                       new.frozen = 0;
+
+                       if (!new.inuse && (!n || n->nr_partial > s->min_partial))
+                               m = M_FREE;
+                       else {
+                               struct kmem_cache_node *n2 = get_node(s,
+                                                       page_to_nid(page));
+
+                               m = M_PARTIAL;
+                               if (n != n2) {
+                                       if (n)
+                                               spin_unlock(&n->list_lock);
+
+                                       n = n2;
+                                       spin_lock(&n->list_lock);
+                               }
+                       }
+
+                       if (l != m) {
+                               if (l == M_PARTIAL) {
+                                       remove_partial(n, page);
+                                       stat(s, FREE_REMOVE_PARTIAL);
+                               } else {
+                                       add_partial(n, page,
+                                               DEACTIVATE_TO_TAIL);
+                                       stat(s, FREE_ADD_PARTIAL);
+                               }
+
+                               l = m;
+                       }
+
+               } while (!cmpxchg_double_slab(s, page,
+                               old.freelist, old.counters,
+                               new.freelist, new.counters,
+                               "unfreezing slab"));
+
+               if (m == M_FREE) {
+                       page->next = discard_page;
+                       discard_page = page;
+               }
+       }
+
+       if (n)
+               spin_unlock(&n->list_lock);
+
+       while (discard_page) {
+               page = discard_page;
+               discard_page = discard_page->next;
+
+               stat(s, DEACTIVATE_EMPTY);
+               discard_slab(s, page);
+               stat(s, FREE_SLAB);
+       }
+}
+
+/*
+ * Put a page that was just frozen (in __slab_free) into a partial page
+ * slot if available. This is done without interrupts disabled and without
+ * preemption disabled. The cmpxchg is racy and may put the partial page
+ * onto a random cpus partial slot.
+ *
+ * If we did not find a slot then simply move all the partials to the
+ * per node partial list.
+ */
+int put_cpu_partial(struct kmem_cache *s, struct page *page, int drain)
+{
+       struct page *oldpage;
+       int pages;
+       int pobjects;
+
+       do {
+               pages = 0;
+               pobjects = 0;
+               oldpage = this_cpu_read(s->cpu_slab->partial);
+
+               if (oldpage) {
+                       pobjects = oldpage->pobjects;
+                       pages = oldpage->pages;
+                       if (drain && pobjects > s->cpu_partial) {
+                               unsigned long flags;
+                               /*
+                                * partial array is full. Move the existing
+                                * set to the per node partial list.
+                                */
+                               local_irq_save(flags);
+                               unfreeze_partials(s);
+                               local_irq_restore(flags);
+                               pobjects = 0;
+                               pages = 0;
+                               stat(s, CPU_PARTIAL_DRAIN);
+                       }
+               }
+
+               pages++;
+               pobjects += page->objects - page->inuse;
+
+               page->pages = pages;
+               page->pobjects = pobjects;
+               page->next = oldpage;
+
+       } while (this_cpu_cmpxchg(s->cpu_slab->partial, oldpage, page) != oldpage);
+       return pobjects;
 }
 
 static inline void flush_slab(struct kmem_cache *s, struct kmem_cache_cpu *c)
 {
        stat(s, CPUSLAB_FLUSH);
-       slab_lock(c->page);
        deactivate_slab(s, c);
 }
 
@@ -1676,8 +2020,12 @@ static inline void __flush_cpu_slab(struct kmem_cache *s, int cpu)
 {
        struct kmem_cache_cpu *c = per_cpu_ptr(s->cpu_slab, cpu);
 
-       if (likely(c && c->page))
-               flush_slab(s, c);
+       if (likely(c)) {
+               if (c->page)
+                       flush_slab(s, c);
+
+               unfreeze_partials(s);
+       }
 }
 
 static void flush_cpu_slab(void *d)
@@ -1687,9 +2035,17 @@ static void flush_cpu_slab(void *d)
        __flush_cpu_slab(s, smp_processor_id());
 }
 
+static bool has_cpu_slab(int cpu, void *info)
+{
+       struct kmem_cache *s = info;
+       struct kmem_cache_cpu *c = per_cpu_ptr(s->cpu_slab, cpu);
+
+       return c->page || c->partial;
+}
+
 static void flush_all(struct kmem_cache *s)
 {
-       on_each_cpu(flush_cpu_slab, s, 1);
+       on_each_cpu_cond(has_cpu_slab, flush_cpu_slab, s, 1, GFP_ATOMIC);
 }
 
 /*
@@ -1755,25 +2111,83 @@ slab_out_of_memory(struct kmem_cache *s, gfp_t gfpflags, int nid)
                unsigned long nr_objs;
                unsigned long nr_free;
 
-               if (!n)
-                       continue;
+               if (!n)
+                       continue;
+
+               nr_free  = count_partial(n, count_free);
+               nr_slabs = node_nr_slabs(n);
+               nr_objs  = node_nr_objs(n);
+
+               printk(KERN_WARNING
+                       "  node %d: slabs: %ld, objs: %ld, free: %ld\n",
+                       node, nr_slabs, nr_objs, nr_free);
+       }
+}
+
+static inline void *new_slab_objects(struct kmem_cache *s, gfp_t flags,
+                       int node, struct kmem_cache_cpu **pc)
+{
+       void *object;
+       struct kmem_cache_cpu *c;
+       struct page *page = new_slab(s, flags, node);
+
+       if (page) {
+               c = __this_cpu_ptr(s->cpu_slab);
+               if (c->page)
+                       flush_slab(s, c);
+
+               /*
+                * No other reference to the page yet so we can
+                * muck around with it freely without cmpxchg
+                */
+               object = page->freelist;
+               page->freelist = NULL;
+
+               stat(s, ALLOC_SLAB);
+               c->node = page_to_nid(page);
+               c->page = page;
+               *pc = c;
+       } else
+               object = NULL;
+
+       return object;
+}
+
+/*
+ * Check the page->freelist of a page and either transfer the freelist to the per cpu freelist
+ * or deactivate the page.
+ *
+ * The page is still frozen if the return value is not NULL.
+ *
+ * If this function returns NULL then the page has been unfrozen.
+ */
+static inline void *get_freelist(struct kmem_cache *s, struct page *page)
+{
+       struct page new;
+       unsigned long counters;
+       void *freelist;
+
+       do {
+               freelist = page->freelist;
+               counters = page->counters;
+               new.counters = counters;
+               VM_BUG_ON(!new.frozen);
 
-               nr_free  = count_partial(n, count_free);
-               nr_slabs = node_nr_slabs(n);
-               nr_objs  = node_nr_objs(n);
+               new.inuse = page->objects;
+               new.frozen = freelist != NULL;
 
-               printk(KERN_WARNING
-                       "  node %d: slabs: %ld, objs: %ld, free: %ld\n",
-                       node, nr_slabs, nr_objs, nr_free);
-       }
+       } while (!cmpxchg_double_slab(s, page,
+               freelist, counters,
+               NULL, new.counters,
+               "get_freelist"));
+
+       return freelist;
 }
 
 /*
  * Slow path. The lockless freelist is empty or we need to perform
  * debugging duties.
  *
- * Interrupts are disabled.
- *
  * Processing is still very fast if new objects have been freed to the
  * regular freelist. In that case we simply take over the regular freelist
  * as the lockless freelist and zap the regular freelist.
@@ -1790,8 +2204,6 @@ static void *__slab_alloc(struct kmem_cache *s, gfp_t gfpflags, int node,
                          unsigned long addr, struct kmem_cache_cpu *c)
 {
        void **object;
-       struct page *page;
-#ifdef CONFIG_CMPXCHG_LOCAL
        unsigned long flags;
 
        local_irq_save(flags);
@@ -1803,88 +2215,78 @@ static void *__slab_alloc(struct kmem_cache *s, gfp_t gfpflags, int node,
         */
        c = this_cpu_ptr(s->cpu_slab);
 #endif
-#endif
 
-       /* We handle __GFP_ZERO in the caller */
-       gfpflags &= ~__GFP_ZERO;
-
-       page = c->page;
-       if (!page)
+       if (!c->page)
+               goto new_slab;
+redo:
+       if (unlikely(!node_match(c, node))) {
+               stat(s, ALLOC_NODE_MISMATCH);
+               deactivate_slab(s, c);
                goto new_slab;
+       }
 
-       slab_lock(page);
-       if (unlikely(!node_match(c, node)))
-               goto another_slab;
+       /* must check again c->freelist in case of cpu migration or IRQ */
+       object = c->freelist;
+       if (object)
+               goto load_freelist;
+
+       stat(s, ALLOC_SLOWPATH);
+
+       object = get_freelist(s, c->page);
+
+       if (!object) {
+               c->page = NULL;
+               stat(s, DEACTIVATE_BYPASS);
+               goto new_slab;
+       }
 
        stat(s, ALLOC_REFILL);
 
 load_freelist:
-       object = page->freelist;
-       if (unlikely(!object))
-               goto another_slab;
-       if (kmem_cache_debug(s))
-               goto debug;
-
        c->freelist = get_freepointer(s, object);
-       page->inuse = page->objects;
-       page->freelist = NULL;
-
-unlock_out:
-       slab_unlock(page);
-#ifdef CONFIG_CMPXCHG_LOCAL
        c->tid = next_tid(c->tid);
        local_irq_restore(flags);
-#endif
-       stat(s, ALLOC_SLOWPATH);
        return object;
 
-another_slab:
-       deactivate_slab(s, c);
-
 new_slab:
-       page = get_partial(s, gfpflags, node);
-       if (page) {
-               stat(s, ALLOC_FROM_PARTIAL);
-load_from_page:
-               c->node = page_to_nid(page);
-               c->page = page;
-               goto load_freelist;
-       }
 
-       gfpflags &= gfp_allowed_mask;
-       if (gfpflags & __GFP_WAIT)
-               local_irq_enable();
+       if (c->partial) {
+               c->page = c->partial;
+               c->partial = c->page->next;
+               c->node = page_to_nid(c->page);
+               stat(s, CPU_PARTIAL_ALLOC);
+               c->freelist = NULL;
+               goto redo;
+       }
 
-       page = new_slab(s, gfpflags, node);
+       /* Then do expensive stuff like retrieving pages from the partial lists */
+       object = get_partial(s, gfpflags, node, c);
 
-       if (gfpflags & __GFP_WAIT)
-               local_irq_disable();
+       if (unlikely(!object)) {
 
-       if (page) {
-               c = __this_cpu_ptr(s->cpu_slab);
-               stat(s, ALLOC_SLAB);
-               if (c->page)
-                       flush_slab(s, c);
+               object = new_slab_objects(s, gfpflags, node, &c);
 
-               slab_lock(page);
-               __SetPageSlubFrozen(page);
+               if (unlikely(!object)) {
+                       if (!(gfpflags & __GFP_NOWARN) && printk_ratelimit())
+                               slab_out_of_memory(s, gfpflags, node);
 
-               goto load_from_page;
+                       local_irq_restore(flags);
+                       return NULL;
+               }
        }
-       if (!(gfpflags & __GFP_NOWARN) && printk_ratelimit())
-               slab_out_of_memory(s, gfpflags, node);
-#ifdef CONFIG_CMPXCHG_LOCAL
-       local_irq_restore(flags);
-#endif
-       return NULL;
-debug:
-       if (!alloc_debug_processing(s, page, object, addr))
-               goto another_slab;
 
-       page->inuse++;
-       page->freelist = get_freepointer(s, object);
+       if (likely(!kmem_cache_debug(s)))
+               goto load_freelist;
+
+       /* Only entered in the debug case */
+       if (!alloc_debug_processing(s, c->page, object, addr))
+               goto new_slab;  /* Slab failed checks. Next slab needed */
+
+       c->freelist = get_freepointer(s, object);
+       deactivate_slab(s, c);
        c->node = NUMA_NO_NODE;
-       goto unlock_out;
+       local_irq_restore(flags);
+       return object;
 }
 
 /*
@@ -1902,20 +2304,12 @@ static __always_inline void *slab_alloc(struct kmem_cache *s,
 {
        void **object;
        struct kmem_cache_cpu *c;
-#ifdef CONFIG_CMPXCHG_LOCAL
        unsigned long tid;
-#else
-       unsigned long flags;
-#endif
 
        if (slab_pre_alloc_hook(s, gfpflags))
                return NULL;
 
-#ifndef CONFIG_CMPXCHG_LOCAL
-       local_irq_save(flags);
-#else
 redo:
-#endif
 
        /*
         * Must read kmem_cache cpu data via this cpu ptr. Preemption is
@@ -1925,7 +2319,6 @@ redo:
         */
        c = __this_cpu_ptr(s->cpu_slab);
 
-#ifdef CONFIG_CMPXCHG_LOCAL
        /*
         * The transaction ids are globally unique per cpu and per operation on
         * a per cpu queue. Thus they can be guarantee that the cmpxchg_double
@@ -1934,7 +2327,6 @@ redo:
         */
        tid = c->tid;
        barrier();
-#endif
 
        object = c->freelist;
        if (unlikely(!object || !node_match(c, node)))
@@ -1942,9 +2334,10 @@ redo:
                object = __slab_alloc(s, gfpflags, node, addr, c);
 
        else {
-#ifdef CONFIG_CMPXCHG_LOCAL
+               void *next_object = get_freepointer_safe(s, object);
+
                /*
-                * The cmpxchg will only match if there was no additonal
+                * The cmpxchg will only match if there was no additional
                 * operation and if we are on the right processor.
                 *
                 * The cmpxchg does the following atomically (without lock semantics!)
@@ -1958,21 +2351,15 @@ redo:
                if (unlikely(!this_cpu_cmpxchg_double(
                                s->cpu_slab->freelist, s->cpu_slab->tid,
                                object, tid,
-                               get_freepointer(s, object), next_tid(tid)))) {
+                               next_object, next_tid(tid)))) {
 
                        note_cmpxchg_failure("slab_alloc", s, tid);
                        goto redo;
                }
-#else
-               c->freelist = get_freepointer(s, object);
-#endif
+               prefetch_freepointer(s, next_object);
                stat(s, ALLOC_FASTPATH);
        }
 
-#ifndef CONFIG_CMPXCHG_LOCAL
-       local_irq_restore(flags);
-#endif
-
        if (unlikely(gfpflags & __GFP_ZERO) && object)
                memset(object, 0, s->objsize);
 
@@ -2049,58 +2436,111 @@ static void __slab_free(struct kmem_cache *s, struct page *page,
 {
        void *prior;
        void **object = (void *)x;
-#ifdef CONFIG_CMPXCHG_LOCAL
-       unsigned long flags;
+       int was_frozen;
+       int inuse;
+       struct page new;
+       unsigned long counters;
+       struct kmem_cache_node *n = NULL;
+       unsigned long uninitialized_var(flags);
 
-       local_irq_save(flags);
-#endif
-       slab_lock(page);
        stat(s, FREE_SLOWPATH);
 
        if (kmem_cache_debug(s) && !free_debug_processing(s, page, x, addr))
-               goto out_unlock;
+               return;
 
-       prior = page->freelist;
-       set_freepointer(s, object, prior);
-       page->freelist = object;
-       page->inuse--;
+       do {
+               prior = page->freelist;
+               counters = page->counters;
+               set_freepointer(s, object, prior);
+               new.counters = counters;
+               was_frozen = new.frozen;
+               new.inuse--;
+               if ((!new.inuse || !prior) && !was_frozen && !n) {
 
-       if (unlikely(PageSlubFrozen(page))) {
-               stat(s, FREE_FROZEN);
-               goto out_unlock;
-       }
+                       if (!kmem_cache_debug(s) && !prior)
+
+                               /*
+                                * Slab was on no list before and will be partially empty
+                                * We can defer the list move and instead freeze it.
+                                */
+                               new.frozen = 1;
+
+                       else { /* Needs to be taken off a list */
+
+                               n = get_node(s, page_to_nid(page));
+                               /*
+                                * Speculatively acquire the list_lock.
+                                * If the cmpxchg does not succeed then we may
+                                * drop the list_lock without any processing.
+                                *
+                                * Otherwise the list_lock will synchronize with
+                                * other processors updating the list of slabs.
+                                */
+                               spin_lock_irqsave(&n->list_lock, flags);
+
+                       }
+               }
+               inuse = new.inuse;
+
+       } while (!cmpxchg_double_slab(s, page,
+               prior, counters,
+               object, new.counters,
+               "__slab_free"));
+
+       if (likely(!n)) {
 
-       if (unlikely(!page->inuse))
-               goto slab_empty;
+               /*
+                * If we just froze the page then put it onto the
+                * per cpu partial list.
+                */
+               if (new.frozen && !was_frozen) {
+                       put_cpu_partial(s, page, 1);
+                       stat(s, CPU_PARTIAL_FREE);
+               }
+               /*
+                * The list lock was not taken therefore no list
+                * activity can be necessary.
+                */
+                if (was_frozen)
+                        stat(s, FREE_FROZEN);
+                return;
+        }
 
        /*
-        * Objects left in the slab. If it was not on the partial list before
-        * then add it.
+        * was_frozen may have been set after we acquired the list_lock in
+        * an earlier loop. So we need to check it here again.
         */
-       if (unlikely(!prior)) {
-               add_partial(get_node(s, page_to_nid(page)), page, 1);
-               stat(s, FREE_ADD_PARTIAL);
-       }
+       if (was_frozen)
+               stat(s, FREE_FROZEN);
+       else {
+               if (unlikely(!inuse && n->nr_partial > s->min_partial))
+                        goto slab_empty;
 
-out_unlock:
-       slab_unlock(page);
-#ifdef CONFIG_CMPXCHG_LOCAL
-       local_irq_restore(flags);
-#endif
+               /*
+                * Objects left in the slab. If it was not on the partial list before
+                * then add it.
+                */
+               if (unlikely(!prior)) {
+                       remove_full(s, page);
+                       add_partial(n, page, DEACTIVATE_TO_TAIL);
+                       stat(s, FREE_ADD_PARTIAL);
+               }
+       }
+       spin_unlock_irqrestore(&n->list_lock, flags);
        return;
 
 slab_empty:
        if (prior) {
                /*
-                * Slab still on the partial list.
+                * Slab on the partial list.
                 */
-               remove_partial(s, page);
+               remove_partial(n, page);
                stat(s, FREE_REMOVE_PARTIAL);
-       }
-       slab_unlock(page);
-#ifdef CONFIG_CMPXCHG_LOCAL
-       local_irq_restore(flags);
-#endif
+       } else
+               /* Slab must be on the full list */
+               remove_full(s, page);
+
+       spin_unlock_irqrestore(&n->list_lock, flags);
        stat(s, FREE_SLAB);
        discard_slab(s, page);
 }
@@ -2121,21 +2561,11 @@ static __always_inline void slab_free(struct kmem_cache *s,
 {
        void **object = (void *)x;
        struct kmem_cache_cpu *c;
-#ifdef CONFIG_CMPXCHG_LOCAL
        unsigned long tid;
-#else
-       unsigned long flags;
-#endif
 
        slab_free_hook(s, x);
 
-#ifndef CONFIG_CMPXCHG_LOCAL
-       local_irq_save(flags);
-
-#else
 redo:
-#endif
-
        /*
         * Determine the currently cpus per cpu slab.
         * The cpu may change afterward. However that does not matter since
@@ -2144,15 +2574,12 @@ redo:
         */
        c = __this_cpu_ptr(s->cpu_slab);
 
-#ifdef CONFIG_CMPXCHG_LOCAL
        tid = c->tid;
        barrier();
-#endif
 
-       if (likely(page == c->page && c->node != NUMA_NO_NODE)) {
+       if (likely(page == c->page)) {
                set_freepointer(s, object, c->freelist);
 
-#ifdef CONFIG_CMPXCHG_LOCAL
                if (unlikely(!this_cpu_cmpxchg_double(
                                s->cpu_slab->freelist, s->cpu_slab->tid,
                                c->freelist, tid,
@@ -2161,16 +2588,10 @@ redo:
                        note_cmpxchg_failure("slab_free", s, tid);
                        goto redo;
                }
-#else
-               c->freelist = object;
-#endif
                stat(s, FREE_FASTPATH);
        } else
                __slab_free(s, page, x, addr);
 
-#ifndef CONFIG_CMPXCHG_LOCAL
-       local_irq_restore(flags);
-#endif
 }
 
 void kmem_cache_free(struct kmem_cache *s, void *x)
@@ -2362,16 +2783,12 @@ static inline int alloc_kmem_cache_cpus(struct kmem_cache *s)
        BUILD_BUG_ON(PERCPU_DYNAMIC_EARLY_SIZE <
                        SLUB_PAGE_SHIFT * sizeof(struct kmem_cache_cpu));
 
-#ifdef CONFIG_CMPXCHG_LOCAL
        /*
-        * Must align to double word boundary for the double cmpxchg instructions
-        * to work.
+        * Must align to double word boundary for the double cmpxchg
+        * instructions to work; see __pcpu_double_call_return_bool().
         */
-       s->cpu_slab = __alloc_percpu(sizeof(struct kmem_cache_cpu), 2 * sizeof(void *));
-#else
-       /* Regular alignment is sufficient */
-       s->cpu_slab = alloc_percpu(struct kmem_cache_cpu);
-#endif
+       s->cpu_slab = __alloc_percpu(sizeof(struct kmem_cache_cpu),
+                                    2 * sizeof(void *));
 
        if (!s->cpu_slab)
                return 0;
@@ -2396,7 +2813,6 @@ static void early_kmem_cache_node_alloc(int node)
 {
        struct page *page;
        struct kmem_cache_node *n;
-       unsigned long flags;
 
        BUG_ON(kmem_cache_node->size < sizeof(struct kmem_cache_node));
 
@@ -2413,7 +2829,8 @@ static void early_kmem_cache_node_alloc(int node)
        n = page->freelist;
        BUG_ON(!n);
        page->freelist = get_freepointer(kmem_cache_node, n);
-       page->inuse++;
+       page->inuse = 1;
+       page->frozen = 0;
        kmem_cache_node->node[node] = n;
 #ifdef CONFIG_SLUB_DEBUG
        init_object(kmem_cache_node, n, SLUB_RED_ACTIVE);
@@ -2422,14 +2839,7 @@ static void early_kmem_cache_node_alloc(int node)
        init_kmem_cache_node(n, kmem_cache_node);
        inc_slabs_node(kmem_cache_node, node, page->objects);
 
-       /*
-        * lockdep requires consistent irq usage for each lock
-        * so even though there cannot be a race this early in
-        * the boot sequence, we still disable irqs.
-        */
-       local_irq_save(flags);
-       add_partial(n, page, 0);
-       local_irq_restore(flags);
+       add_partial(n, page, DEACTIVATE_TO_HEAD);
 }
 
 static void free_kmem_cache_nodes(struct kmem_cache *s)
@@ -2635,11 +3045,47 @@ static int kmem_cache_open(struct kmem_cache *s,
                }
        }
 
+#if defined(CONFIG_HAVE_CMPXCHG_DOUBLE) && \
+    defined(CONFIG_HAVE_ALIGNED_STRUCT_PAGE)
+       if (system_has_cmpxchg_double() && (s->flags & SLAB_DEBUG_FLAGS) == 0)
+               /* Enable fast mode */
+               s->flags |= __CMPXCHG_DOUBLE;
+#endif
+
        /*
         * The larger the object size is, the more pages we want on the partial
         * list to avoid pounding the page allocator excessively.
         */
-       set_min_partial(s, ilog2(s->size));
+       set_min_partial(s, ilog2(s->size) / 2);
+
+       /*
+        * cpu_partial determined the maximum number of objects kept in the
+        * per cpu partial lists of a processor.
+        *
+        * Per cpu partial lists mainly contain slabs that just have one
+        * object freed. If they are used for allocation then they can be
+        * filled up again with minimal effort. The slab will never hit the
+        * per node partial lists and therefore no locking will be required.
+        *
+        * This setting also determines
+        *
+        * A) The number of objects from per cpu partial slabs dumped to the
+        *    per node list when we reach the limit.
+        * B) The number of objects in cpu partial slabs to extract from the
+        *    per node list when we run out of per cpu objects. We only fetch 50%
+        *    to keep some capacity around for frees.
+        */
+       if (kmem_cache_debug(s))
+               s->cpu_partial = 0;
+       else if (s->size >= PAGE_SIZE)
+               s->cpu_partial = 2;
+       else if (s->size >= 1024)
+               s->cpu_partial = 6;
+       else if (s->size >= 256)
+               s->cpu_partial = 13;
+       else
+               s->cpu_partial = 30;
+
        s->refcount = 1;
 #ifdef CONFIG_NUMA
        s->remote_node_defrag_ratio = 1000;
@@ -2698,23 +3144,22 @@ static void list_slab_objects(struct kmem_cache *s, struct page *page,
 
 /*
  * Attempt to free all partial slabs on a node.
+ * This is called from kmem_cache_close(). We must be the last thread
+ * using the cache and therefore we do not need to lock anymore.
  */
 static void free_partial(struct kmem_cache *s, struct kmem_cache_node *n)
 {
-       unsigned long flags;
        struct page *page, *h;
 
-       spin_lock_irqsave(&n->list_lock, flags);
        list_for_each_entry_safe(page, h, &n->partial, lru) {
                if (!page->inuse) {
-                       __remove_partial(n, page);
+                       remove_partial(n, page);
                        discard_slab(s, page);
                } else {
                        list_slab_objects(s, page,
                                "Objects remaining on kmem_cache_close()");
                }
        }
-       spin_unlock_irqrestore(&n->list_lock, flags);
 }
 
 /*
@@ -2748,6 +3193,7 @@ void kmem_cache_destroy(struct kmem_cache *s)
        s->refcount--;
        if (!s->refcount) {
                list_del(&s->list);
+               up_write(&slub_lock);
                if (kmem_cache_close(s)) {
                        printk(KERN_ERR "SLUB %s: %s called for cache that "
                                "still has objects.\n", s->name, __func__);
@@ -2756,8 +3202,8 @@ void kmem_cache_destroy(struct kmem_cache *s)
                if (s->flags & SLAB_DESTROY_BY_RCU)
                        rcu_barrier();
                sysfs_slab_remove(s);
-       }
-       up_write(&slub_lock);
+       } else
+               up_write(&slub_lock);
 }
 EXPORT_SYMBOL(kmem_cache_destroy);
 
@@ -2974,6 +3420,42 @@ size_t ksize(const void *object)
 }
 EXPORT_SYMBOL(ksize);
 
+#ifdef CONFIG_SLUB_DEBUG
+bool verify_mem_not_deleted(const void *x)
+{
+       struct page *page;
+       void *object = (void *)x;
+       unsigned long flags;
+       bool rv;
+
+       if (unlikely(ZERO_OR_NULL_PTR(x)))
+               return false;
+
+       local_irq_save(flags);
+
+       page = virt_to_head_page(x);
+       if (unlikely(!PageSlab(page))) {
+               /* maybe it was from stack? */
+               rv = true;
+               goto out_unlock;
+       }
+
+       slab_lock(page);
+       if (on_freelist(page->slab, page, object)) {
+               object_err(page->slab, page, object, "Object is on free-list");
+               rv = false;
+       } else {
+               rv = true;
+       }
+       slab_unlock(page);
+
+out_unlock:
+       local_irq_restore(flags);
+       return rv;
+}
+EXPORT_SYMBOL(verify_mem_not_deleted);
+#endif
+
 void kfree(const void *x)
 {
        struct page *page;
@@ -3039,29 +3521,23 @@ int kmem_cache_shrink(struct kmem_cache *s)
                 * list_lock. page->inuse here is the upper limit.
                 */
                list_for_each_entry_safe(page, t, &n->partial, lru) {
-                       if (!page->inuse && slab_trylock(page)) {
-                               /*
-                                * Must hold slab lock here because slab_free
-                                * may have freed the last object and be
-                                * waiting to release the slab.
-                                */
-                               __remove_partial(n, page);
-                               slab_unlock(page);
-                               discard_slab(s, page);
-                       } else {
-                               list_move(&page->lru,
-                               slabs_by_inuse + page->inuse);
-                       }
+                       list_move(&page->lru, slabs_by_inuse + page->inuse);
+                       if (!page->inuse)
+                               n->nr_partial--;
                }
 
                /*
                 * Rebuild the partial list with the slabs filled up most
                 * first and the least used slabs at the end.
                 */
-               for (i = objects - 1; i >= 0; i--)
+               for (i = objects - 1; i > 0; i--)
                        list_splice(slabs_by_inuse + i, n->partial.prev);
 
                spin_unlock_irqrestore(&n->list_lock, flags);
+
+               /* Release empty slabs */
+               list_for_each_entry_safe(page, t, slabs_by_inuse, lru)
+                       discard_slab(s, page);
        }
 
        kfree(slabs_by_inuse);
@@ -3227,6 +3703,9 @@ void __init kmem_cache_init(void)
        struct kmem_cache *temp_kmem_cache_node;
        unsigned long kmalloc_size;
 
+       if (debug_guardpage_minorder())
+               slub_max_order = 0;
+
        kmem_size = offsetof(struct kmem_cache, node) +
                                nr_node_ids * sizeof(struct kmem_cache_node *);
 
@@ -3479,13 +3958,14 @@ struct kmem_cache *kmem_cache_create(const char *name, size_t size,
                if (kmem_cache_open(s, n,
                                size, align, flags, ctor)) {
                        list_add(&s->list, &slab_caches);
+                       up_write(&slub_lock);
                        if (sysfs_slab_add(s)) {
+                               down_write(&slub_lock);
                                list_del(&s->list);
                                kfree(n);
                                kfree(s);
                                goto err;
                        }
-                       up_write(&slub_lock);
                        return s;
                }
                kfree(n);
@@ -3554,7 +4034,7 @@ void *__kmalloc_track_caller(size_t size, gfp_t gfpflags, unsigned long caller)
 
        ret = slab_alloc(s, gfpflags, NUMA_NO_NODE, caller);
 
-       /* Honor the call site pointer we recieved. */
+       /* Honor the call site pointer we received. */
        trace_kmalloc(caller, ret, size, s->size, gfpflags);
 
        return ret;
@@ -3584,7 +4064,7 @@ void *__kmalloc_node_track_caller(size_t size, gfp_t gfpflags,
 
        ret = slab_alloc(s, gfpflags, node, caller);
 
-       /* Honor the call site pointer we recieved. */
+       /* Honor the call site pointer we received. */
        trace_kmalloc_node(caller, ret, size, s->size, gfpflags, node);
 
        return ret;
@@ -3634,12 +4114,9 @@ static int validate_slab(struct kmem_cache *s, struct page *page,
 static void validate_slab_slab(struct kmem_cache *s, struct page *page,
                                                unsigned long *map)
 {
-       if (slab_trylock(page)) {
-               validate_slab(s, page, map);
-               slab_unlock(page);
-       } else
-               printk(KERN_INFO "SLUB %s: Skipped busy slab 0x%p\n",
-                       s->name, page);
+       slab_lock(page);
+       validate_slab(s, page, map);
+       slab_unlock(page);
 }
 
 static int validate_slab_node(struct kmem_cache *s,
@@ -4020,22 +4497,31 @@ static ssize_t show_slab_objects(struct kmem_cache *s,
 
                for_each_possible_cpu(cpu) {
                        struct kmem_cache_cpu *c = per_cpu_ptr(s->cpu_slab, cpu);
+                       int node = ACCESS_ONCE(c->node);
+                       struct page *page;
 
-                       if (!c || c->node < 0)
+                       if (node < 0)
                                continue;
-
-                       if (c->page) {
-                                       if (flags & SO_TOTAL)
-                                               x = c->page->objects;
+                       page = ACCESS_ONCE(c->page);
+                       if (page) {
+                               if (flags & SO_TOTAL)
+                                       x = page->objects;
                                else if (flags & SO_OBJECTS)
-                                       x = c->page->inuse;
+                                       x = page->inuse;
                                else
                                        x = 1;
 
                                total += x;
-                               nodes[c->node] += x;
+                               nodes[node] += x;
+                       }
+                       page = c->partial;
+
+                       if (page) {
+                               x = page->pobjects;
+                               total += x;
+                               nodes[node] += x;
                        }
-                       per_cpu[c->node]++;
+                       per_cpu[node]++;
                }
        }
 
@@ -4104,7 +4590,7 @@ static int any_slab_objects(struct kmem_cache *s)
 #endif
 
 #define to_slab_attr(n) container_of(n, struct slab_attribute, attr)
-#define to_slab(n) container_of(n, struct kmem_cache, kobj);
+#define to_slab(n) container_of(n, struct kmem_cache, kobj)
 
 struct slab_attribute {
        struct attribute attr;
@@ -4113,11 +4599,12 @@ struct slab_attribute {
 };
 
 #define SLAB_ATTR_RO(_name) \
-       static struct slab_attribute _name##_attr = __ATTR_RO(_name)
+       static struct slab_attribute _name##_attr = \
+       __ATTR(_name, 0400, _name##_show, NULL)
 
 #define SLAB_ATTR(_name) \
        static struct slab_attribute _name##_attr =  \
-       __ATTR(_name, 0644, _name##_show, _name##_store)
+       __ATTR(_name, 0600, _name##_show, _name##_store)
 
 static ssize_t slab_size_show(struct kmem_cache *s, char *buf)
 {
@@ -4186,6 +4673,29 @@ static ssize_t min_partial_store(struct kmem_cache *s, const char *buf,
 }
 SLAB_ATTR(min_partial);
 
+static ssize_t cpu_partial_show(struct kmem_cache *s, char *buf)
+{
+       return sprintf(buf, "%u\n", s->cpu_partial);
+}
+
+static ssize_t cpu_partial_store(struct kmem_cache *s, const char *buf,
+                                size_t length)
+{
+       unsigned long objects;
+       int err;
+
+       err = strict_strtoul(buf, 10, &objects);
+       if (err)
+               return err;
+       if (objects && kmem_cache_debug(s))
+               return -EINVAL;
+
+       s->cpu_partial = objects;
+       flush_all(s);
+       return length;
+}
+SLAB_ATTR(cpu_partial);
+
 static ssize_t ctor_show(struct kmem_cache *s, char *buf)
 {
        if (!s->ctor)
@@ -4224,6 +4734,37 @@ static ssize_t objects_partial_show(struct kmem_cache *s, char *buf)
 }
 SLAB_ATTR_RO(objects_partial);
 
+static ssize_t slabs_cpu_partial_show(struct kmem_cache *s, char *buf)
+{
+       int objects = 0;
+       int pages = 0;
+       int cpu;
+       int len;
+
+       for_each_online_cpu(cpu) {
+               struct page *page = per_cpu_ptr(s->cpu_slab, cpu)->partial;
+
+               if (page) {
+                       pages += page->pages;
+                       objects += page->pobjects;
+               }
+       }
+
+       len = sprintf(buf, "%d(%d)", objects, pages);
+
+#ifdef CONFIG_SMP
+       for_each_online_cpu(cpu) {
+               struct page *page = per_cpu_ptr(s->cpu_slab, cpu) ->partial;
+
+               if (page && len < PAGE_SIZE - 20)
+                       len += sprintf(buf + len, " C%d=%d(%d)", cpu,
+                               page->pobjects, page->pages);
+       }
+#endif
+       return len + sprintf(buf + len, "\n");
+}
+SLAB_ATTR_RO(slabs_cpu_partial);
+
 static ssize_t reclaim_account_show(struct kmem_cache *s, char *buf)
 {
        return sprintf(buf, "%d\n", !!(s->flags & SLAB_RECLAIM_ACCOUNT));
@@ -4287,8 +4828,10 @@ static ssize_t sanity_checks_store(struct kmem_cache *s,
                                const char *buf, size_t length)
 {
        s->flags &= ~SLAB_DEBUG_FREE;
-       if (buf[0] == '1')
+       if (buf[0] == '1') {
+               s->flags &= ~__CMPXCHG_DOUBLE;
                s->flags |= SLAB_DEBUG_FREE;
+       }
        return length;
 }
 SLAB_ATTR(sanity_checks);
@@ -4302,8 +4845,10 @@ static ssize_t trace_store(struct kmem_cache *s, const char *buf,
                                                        size_t length)
 {
        s->flags &= ~SLAB_TRACE;
-       if (buf[0] == '1')
+       if (buf[0] == '1') {
+               s->flags &= ~__CMPXCHG_DOUBLE;
                s->flags |= SLAB_TRACE;
+       }
        return length;
 }
 SLAB_ATTR(trace);
@@ -4320,8 +4865,10 @@ static ssize_t red_zone_store(struct kmem_cache *s,
                return -EBUSY;
 
        s->flags &= ~SLAB_RED_ZONE;
-       if (buf[0] == '1')
+       if (buf[0] == '1') {
+               s->flags &= ~__CMPXCHG_DOUBLE;
                s->flags |= SLAB_RED_ZONE;
+       }
        calculate_sizes(s, -1);
        return length;
 }
@@ -4339,8 +4886,10 @@ static ssize_t poison_store(struct kmem_cache *s,
                return -EBUSY;
 
        s->flags &= ~SLAB_POISON;
-       if (buf[0] == '1')
+       if (buf[0] == '1') {
+               s->flags &= ~__CMPXCHG_DOUBLE;
                s->flags |= SLAB_POISON;
+       }
        calculate_sizes(s, -1);
        return length;
 }
@@ -4358,8 +4907,10 @@ static ssize_t store_user_store(struct kmem_cache *s,
                return -EBUSY;
 
        s->flags &= ~SLAB_STORE_USER;
-       if (buf[0] == '1')
+       if (buf[0] == '1') {
+               s->flags &= ~__CMPXCHG_DOUBLE;
                s->flags |= SLAB_STORE_USER;
+       }
        calculate_sizes(s, -1);
        return length;
 }
@@ -4524,6 +5075,7 @@ STAT_ATTR(FREE_REMOVE_PARTIAL, free_remove_partial);
 STAT_ATTR(ALLOC_FROM_PARTIAL, alloc_from_partial);
 STAT_ATTR(ALLOC_SLAB, alloc_slab);
 STAT_ATTR(ALLOC_REFILL, alloc_refill);
+STAT_ATTR(ALLOC_NODE_MISMATCH, alloc_node_mismatch);
 STAT_ATTR(FREE_SLAB, free_slab);
 STAT_ATTR(CPUSLAB_FLUSH, cpuslab_flush);
 STAT_ATTR(DEACTIVATE_FULL, deactivate_full);
@@ -4531,7 +5083,14 @@ STAT_ATTR(DEACTIVATE_EMPTY, deactivate_empty);
 STAT_ATTR(DEACTIVATE_TO_HEAD, deactivate_to_head);
 STAT_ATTR(DEACTIVATE_TO_TAIL, deactivate_to_tail);
 STAT_ATTR(DEACTIVATE_REMOTE_FREES, deactivate_remote_frees);
+STAT_ATTR(DEACTIVATE_BYPASS, deactivate_bypass);
 STAT_ATTR(ORDER_FALLBACK, order_fallback);
+STAT_ATTR(CMPXCHG_DOUBLE_CPU_FAIL, cmpxchg_double_cpu_fail);
+STAT_ATTR(CMPXCHG_DOUBLE_FAIL, cmpxchg_double_fail);
+STAT_ATTR(CPU_PARTIAL_ALLOC, cpu_partial_alloc);
+STAT_ATTR(CPU_PARTIAL_FREE, cpu_partial_free);
+STAT_ATTR(CPU_PARTIAL_NODE, cpu_partial_node);
+STAT_ATTR(CPU_PARTIAL_DRAIN, cpu_partial_drain);
 #endif
 
 static struct attribute *slab_attrs[] = {
@@ -4540,6 +5099,7 @@ static struct attribute *slab_attrs[] = {
        &objs_per_slab_attr.attr,
        &order_attr.attr,
        &min_partial_attr.attr,
+       &cpu_partial_attr.attr,
        &objects_attr.attr,
        &objects_partial_attr.attr,
        &partial_attr.attr,
@@ -4552,6 +5112,7 @@ static struct attribute *slab_attrs[] = {
        &destroy_by_rcu_attr.attr,
        &shrink_attr.attr,
        &reserved_attr.attr,
+       &slabs_cpu_partial_attr.attr,
 #ifdef CONFIG_SLUB_DEBUG
        &total_objects_attr.attr,
        &slabs_attr.attr,
@@ -4581,6 +5142,7 @@ static struct attribute *slab_attrs[] = {
        &alloc_from_partial_attr.attr,
        &alloc_slab_attr.attr,
        &alloc_refill_attr.attr,
+       &alloc_node_mismatch_attr.attr,
        &free_slab_attr.attr,
        &cpuslab_flush_attr.attr,
        &deactivate_full_attr.attr,
@@ -4588,7 +5150,14 @@ static struct attribute *slab_attrs[] = {
        &deactivate_to_head_attr.attr,
        &deactivate_to_tail_attr.attr,
        &deactivate_remote_frees_attr.attr,
+       &deactivate_bypass_attr.attr,
        &order_fallback_attr.attr,
+       &cmpxchg_double_fail_attr.attr,
+       &cmpxchg_double_cpu_fail_attr.attr,
+       &cpu_partial_alloc_attr.attr,
+       &cpu_partial_free_attr.attr,
+       &cpu_partial_node_attr.attr,
+       &cpu_partial_drain_attr.attr,
 #endif
 #ifdef CONFIG_FAILSLAB
        &failslab_attr.attr,
@@ -4940,7 +5509,7 @@ static const struct file_operations proc_slabinfo_operations = {
 
 static int __init slab_proc_init(void)
 {
-       proc_create("slabinfo", S_IRUGO, NULL, &proc_slabinfo_operations);
+       proc_create("slabinfo", S_IRUSR, NULL, &proc_slabinfo_operations);
        return 0;
 }
 module_init(slab_proc_init);