#include <linux/swap.h> /* struct reclaim_state */
#include <linux/cache.h>
#include <linux/init.h>
-#include <linux/module.h>
+#include <linux/export.h>
#include <linux/rcupdate.h>
#include <linux/list.h>
-#include <linux/kmemtrace.h>
#include <linux/kmemleak.h>
-#include <asm/atomic.h>
-#include "internal.h"
+
+#include <trace/events/kmem.h>
+
+#include <linux/atomic.h>
/*
* slob_block has a field 'units', which indicates size of block if +ve,
static DEFINE_SPINLOCK(slob_lock);
/*
- * tracks the reserve state for the allocator.
- */
-static int slob_reserve;
-
-/*
* Encode the given size and next info into a free slob block s.
*/
static void set_slob(slob_t *s, slobidx_t size, slob_t *next)
static void *slob_new_pages(gfp_t gfp, int order, int node)
{
- struct page *page;
+ void *page;
#ifdef CONFIG_NUMA
if (node != -1)
if (!page)
return NULL;
- slob_reserve = page->reserve;
-
return page_address(page);
}
slob_t *b = NULL;
unsigned long flags;
- if (unlikely(slob_reserve)) {
- if (!(gfp_to_alloc_flags(gfp) & ALLOC_NO_WATERMARKS))
- goto grow;
- }
-
if (size < SLOB_BREAK1)
slob_list = &free_slob_small;
else if (size < SLOB_BREAK2)
}
spin_unlock_irqrestore(&slob_lock, flags);
-grow:
/* Not enough space: must allocate a new page */
if (!b) {
b = slob_new_pages(gfp & ~__GFP_ZERO, 0, node);
slob_t *prev, *next, *b = (slob_t *)block;
slobidx_t units;
unsigned long flags;
+ struct list_head *slob_list;
if (unlikely(ZERO_OR_NULL_PTR(block)))
return;
set_slob(b, units,
(void *)((unsigned long)(b +
SLOB_UNITS(PAGE_SIZE)) & PAGE_MASK));
- set_slob_page_free(sp, &free_slob_small);
+ if (size < SLOB_BREAK1)
+ slob_list = &free_slob_small;
+ else if (size < SLOB_BREAK2)
+ slob_list = &free_slob_medium;
+ else
+ slob_list = &free_slob_large;
+ set_slob_page_free(sp, slob_list);
goto out;
}
int align = max(ARCH_KMALLOC_MINALIGN, ARCH_SLAB_MINALIGN);
void *ret;
+ gfp &= gfp_allowed_mask;
+
lockdep_trace_alloc(gfp);
if (size < PAGE_SIZE - align) {
} else {
unsigned int order = get_order(size);
- ret = slob_new_pages(gfp | __GFP_COMP, get_order(size), node);
+ if (likely(order))
+ gfp |= __GFP_COMP;
+ ret = slob_new_pages(gfp, order, node);
if (ret) {
struct page *page;
page = virt_to_page(ret);
{
void *b;
+ flags &= gfp_allowed_mask;
+
+ lockdep_trace_alloc(flags);
+
if (c->size < PAGE_SIZE) {
b = slob_alloc(c->size, flags, c->align, node);
trace_kmem_cache_alloc_node(_RET_IP_, b, c->size,
if (unlikely(c->flags & SLAB_DESTROY_BY_RCU)) {
struct slob_rcu *slob_rcu;
slob_rcu = b + (c->size - sizeof(struct slob_rcu));
- INIT_RCU_HEAD(&slob_rcu->head);
slob_rcu->size = c->size;
call_rcu(&slob_rcu->head, kmem_rcu_free);
} else {
}
EXPORT_SYMBOL(kmem_cache_size);
-const char *kmem_cache_name(struct kmem_cache *c)
-{
- return c->name;
-}
-EXPORT_SYMBOL(kmem_cache_name);
-
int kmem_cache_shrink(struct kmem_cache *d)
{
return 0;
}
EXPORT_SYMBOL(kmem_cache_shrink);
-int kmem_ptr_validate(struct kmem_cache *a, const void *b)
-{
- return 0;
-}
-
static unsigned int slob_ready __read_mostly;
int slab_is_available(void)
return slob_ready;
}
-static __slob_estimate(unsigned size, unsigned align, unsigned objects)
-{
- unsigned nr_pages;
-
- size = SLOB_UNIT * SLOB_UNITS(size + align - 1);
-
- if (size <= PAGE_SIZE) {
- nr_pages = DIV_ROUND_UP(objects, PAGE_SIZE / size);
- } else {
- nr_pages = objects << get_order(size);
- }
-
- return nr_pages;
-}
-
-/*
- * Calculate the upper bound of pages required to sequentially allocate
- * @objects objects from @cachep.
- */
-unsigned kmem_alloc_estimate(struct kmem_cache *c, gfp_t flags, int objects)
-{
- unsigned size = c->size;
-
- if (c->flags & SLAB_DESTROY_BY_RCU)
- size += sizeof(struct slob_rcu);
-
- return __slob_estimate(size, c->align, objects);
-}
-
-/*
- * Calculate the upper bound of pages required to sequentially allocate
- * @count objects of @size bytes from kmalloc given @flags.
- */
-unsigned kmalloc_estimate_objs(size_t size, gfp_t flags, int count)
-{
- unsigned align = max(ARCH_KMALLOC_MINALIGN, ARCH_SLAB_MINALIGN);
-
- return __slob_estimate(size, align, count);
-}
-EXPORT_SYMBOL_GPL(kmalloc_estimate_objs);
-
-/*
- * Calculate the upper bound of pages requires to sequentially allocate @bytes
- * from kmalloc in an unspecified number of allocations of nonuniform size.
- */
-unsigned kmalloc_estimate_bytes(gfp_t flags, size_t bytes)
-{
- unsigned long pages;
-
- /*
- * Multiply by two, in order to account the worst case slack space
- * due to the power-of-two allocation sizes.
- *
- * While not true for slob, it cannot do worse than that for sequential
- * allocations.
- */
- pages = DIV_ROUND_UP(2 * bytes, PAGE_SIZE);
-
- /*
- * Our power of two series starts at PAGE_SIZE, so add one page.
- */
- pages++;
-
- return pages;
-}
-EXPORT_SYMBOL_GPL(kmalloc_estimate_bytes);
-
void __init kmem_cache_init(void)
{
slob_ready = 1;