Update to 3.4-final.
[linux-flexiantxendom0-3.2.10.git] / drivers / md / dm-memcache.c
1 /*
2  * Copyright (C) 2006-2008 Red Hat, Inc. All rights reserved.
3  *
4  * Module Author: Heinz Mauelshagen <heinzm@redhat.com>
5  *
6  * Device-mapper memory object handling:
7  *
8  * o allocate/free total_pages in a per client page pool.
9  *
10  * o allocate/free memory objects with chunks (1..n) of
11  *   pages_per_chunk pages hanging off.
12  *
13  * This file is released under the GPL.
14  */
15
16 #define DM_MEM_CACHE_VERSION    "0.2"
17
18 #include "dm.h"
19 #include "dm-memcache.h"
20 #include <linux/dm-io.h>
21 #include <linux/slab.h>
22 #include <linux/module.h>
23
24 struct dm_mem_cache_client {
25         spinlock_t lock;
26         mempool_t *objs_pool;
27         struct page_list *free_list;
28         unsigned objects;
29         unsigned chunks;
30         unsigned pages_per_chunk;
31         unsigned free_pages;
32         unsigned total_pages;
33 };
34
35 /*
36  * Free pages and page_list elements of client.
37  */
38 static void free_cache_pages(struct page_list *list)
39 {
40         while (list) {
41                 struct page_list *pl = list;
42
43                 list = pl->next;
44                 BUG_ON(!pl->page);
45                 __free_page(pl->page);
46                 kfree(pl);
47         }
48 }
49
50 /*
51  * Alloc number of pages and page_list elements as required by client.
52  */
53 static struct page_list *alloc_cache_pages(unsigned pages)
54 {
55         struct page_list *pl, *ret = NULL;
56         struct page *page;
57
58         while (pages--) {
59                 page = alloc_page(GFP_NOIO);
60                 if (!page)
61                         goto err;
62
63                 pl = kmalloc(sizeof(*pl), GFP_NOIO);
64                 if (!pl) {
65                         __free_page(page);
66                         goto err;
67                 }
68
69                 pl->page = page;
70                 pl->next = ret;
71                 ret = pl;
72         }
73
74         return ret;
75
76 err:
77         free_cache_pages(ret);
78         return NULL;
79 }
80
81 /*
82  * Allocate page_list elements from the pool to chunks of the memory object.
83  */
84 static void alloc_chunks(struct dm_mem_cache_client *cl,
85                          struct dm_mem_cache_object *obj)
86 {
87         unsigned chunks = cl->chunks;
88         unsigned long flags;
89
90         local_irq_save(flags);
91         local_irq_disable();
92         while (chunks--) {
93                 unsigned p = cl->pages_per_chunk;
94
95                 obj[chunks].pl = NULL;
96
97                 while (p--) {
98                         struct page_list *pl;
99
100                         /* Take next element from free list */
101                         spin_lock(&cl->lock);
102                         pl = cl->free_list;
103                         BUG_ON(!pl);
104                         cl->free_list = pl->next;
105                         spin_unlock(&cl->lock);
106
107                         pl->next = obj[chunks].pl;
108                         obj[chunks].pl = pl;
109                 }
110         }
111
112         local_irq_restore(flags);
113 }
114
115 /*
116  * Free page_list elements putting them back onto free list
117  */
118 static void free_chunks(struct dm_mem_cache_client *cl,
119                         struct dm_mem_cache_object *obj)
120 {
121         unsigned chunks = cl->chunks;
122         unsigned long flags;
123         struct page_list *next, *pl;
124
125         local_irq_save(flags);
126         local_irq_disable();
127         while (chunks--) {
128                 for (pl = obj[chunks].pl; pl; pl = next) {
129                         next = pl->next;
130
131                         spin_lock(&cl->lock);
132                         pl->next = cl->free_list;
133                         cl->free_list = pl;
134                         cl->free_pages++;
135                         spin_unlock(&cl->lock);
136                 }
137         }
138
139         local_irq_restore(flags);
140 }
141
142 /*
143  * Create/destroy dm memory cache client resources.
144  */
145 struct dm_mem_cache_client *
146 dm_mem_cache_client_create(unsigned objects, unsigned chunks,
147                            unsigned pages_per_chunk)
148 {
149         unsigned total_pages = objects * chunks * pages_per_chunk;
150         struct dm_mem_cache_client *client;
151
152         BUG_ON(!total_pages);
153         client = kzalloc(sizeof(*client), GFP_KERNEL);
154         if (!client)
155                 return ERR_PTR(-ENOMEM);
156
157         client->objs_pool = mempool_create_kmalloc_pool(objects,
158                                 chunks * sizeof(struct dm_mem_cache_object));
159         if (!client->objs_pool)
160                 goto err;
161
162         client->free_list = alloc_cache_pages(total_pages);
163         if (!client->free_list)
164                 goto err1;
165
166         spin_lock_init(&client->lock);
167         client->objects = objects;
168         client->chunks = chunks;
169         client->pages_per_chunk = pages_per_chunk;
170         client->free_pages = client->total_pages = total_pages;
171         return client;
172
173 err1:
174         mempool_destroy(client->objs_pool);
175 err:
176         kfree(client);
177         return ERR_PTR(-ENOMEM);
178 }
179 EXPORT_SYMBOL(dm_mem_cache_client_create);
180
181 void dm_mem_cache_client_destroy(struct dm_mem_cache_client *cl)
182 {
183         BUG_ON(cl->free_pages != cl->total_pages);
184         free_cache_pages(cl->free_list);
185         mempool_destroy(cl->objs_pool);
186         kfree(cl);
187 }
188 EXPORT_SYMBOL(dm_mem_cache_client_destroy);
189
190 /*
191  * Grow a clients cache by an amount of pages.
192  *
193  * Don't call from interrupt context!
194  */
195 int dm_mem_cache_grow(struct dm_mem_cache_client *cl, unsigned objects)
196 {
197         unsigned pages = objects * cl->chunks * cl->pages_per_chunk;
198         struct page_list *pl, *last;
199
200         BUG_ON(!pages);
201         pl = alloc_cache_pages(pages);
202         if (!pl)
203                 return -ENOMEM;
204
205         last = pl;
206         while (last->next)
207                 last = last->next;
208
209         spin_lock_irq(&cl->lock);
210         last->next = cl->free_list;
211         cl->free_list = pl;
212         cl->free_pages += pages;
213         cl->total_pages += pages;
214         cl->objects += objects;
215         spin_unlock_irq(&cl->lock);
216
217         mempool_resize(cl->objs_pool, cl->objects, GFP_NOIO);
218         return 0;
219 }
220 EXPORT_SYMBOL(dm_mem_cache_grow);
221
222 /* Shrink a clients cache by an amount of pages */
223 int dm_mem_cache_shrink(struct dm_mem_cache_client *cl, unsigned objects)
224 {
225         int r;
226         unsigned pages = objects * cl->chunks * cl->pages_per_chunk, p = pages;
227         unsigned long flags;
228         struct page_list *last = NULL, *pl, *pos;
229
230         BUG_ON(!pages);
231
232         spin_lock_irqsave(&cl->lock, flags);
233         pl = pos = cl->free_list;
234         while (p-- && pos->next) {
235                 last = pos;
236                 pos = pos->next;
237         }
238
239         if (++p)
240                 r = -ENOMEM;
241         else {
242                 r = 0;
243                 cl->free_list = pos;
244                 cl->free_pages -= pages;
245                 cl->total_pages -= pages;
246                 cl->objects -= objects;
247                 last->next = NULL;
248         }
249         spin_unlock_irqrestore(&cl->lock, flags);
250
251         if (!r) {
252                 free_cache_pages(pl);
253                 mempool_resize(cl->objs_pool, cl->objects, GFP_NOIO);
254         }
255
256         return r;
257 }
258 EXPORT_SYMBOL(dm_mem_cache_shrink);
259
260 /*
261  * Allocate/free a memory object
262  *
263  * Can be called from interrupt context
264  */
265 struct dm_mem_cache_object *dm_mem_cache_alloc(struct dm_mem_cache_client *cl)
266 {
267         int r = 0;
268         unsigned pages = cl->chunks * cl->pages_per_chunk;
269         unsigned long flags;
270         struct dm_mem_cache_object *obj;
271
272         obj = mempool_alloc(cl->objs_pool, GFP_NOIO);
273         if (!obj)
274                 return ERR_PTR(-ENOMEM);
275
276         spin_lock_irqsave(&cl->lock, flags);
277         if (pages > cl->free_pages)
278                 r = -ENOMEM;
279         else
280                 cl->free_pages -= pages;
281         spin_unlock_irqrestore(&cl->lock, flags);
282
283         if (r) {
284                 mempool_free(obj, cl->objs_pool);
285                 return ERR_PTR(r);
286         }
287
288         alloc_chunks(cl, obj);
289         return obj;
290 }
291 EXPORT_SYMBOL(dm_mem_cache_alloc);
292
293 void dm_mem_cache_free(struct dm_mem_cache_client *cl,
294                        struct dm_mem_cache_object *obj)
295 {
296         free_chunks(cl, obj);
297         mempool_free(obj, cl->objs_pool);
298 }
299 EXPORT_SYMBOL(dm_mem_cache_free);
300
301 MODULE_DESCRIPTION(DM_NAME " dm memory cache");
302 MODULE_AUTHOR("Heinz Mauelshagen <heinzm@redhat.com>");
303 MODULE_LICENSE("GPL");