Update to 3.4-final.
[linux-flexiantxendom0-3.2.10.git] / drivers / xen / blktap2 / request.c
1 #include <linux/spinlock.h>
2 #include <xen/balloon.h>
3
4 #include "blktap.h"
5
6 #define MAX_BUCKETS                      8
7 #define BUCKET_SIZE                      MAX_PENDING_REQS
8
9 #define BLKTAP_POOL_CLOSING              1
10
11 struct blktap_request_bucket;
12
13 struct blktap_request_handle {
14         int                              slot;
15         uint8_t                          inuse;
16         struct blktap_request            request;
17         struct blktap_request_bucket    *bucket;
18 };
19
20 struct blktap_request_bucket {
21         atomic_t                         reqs_in_use;
22         struct blktap_request_handle     handles[BUCKET_SIZE];
23         struct page                    **foreign_pages;
24 };
25
26 struct blktap_request_pool {
27         spinlock_t                       lock;
28         uint8_t                          status;
29         struct list_head                 free_list;
30         atomic_t                         reqs_in_use;
31         wait_queue_head_t                wait_queue;
32         struct blktap_request_bucket    *buckets[MAX_BUCKETS];
33 };
34
35 static struct blktap_request_pool pool;
36
37 static inline struct blktap_request_handle *
38 blktap_request_to_handle(struct blktap_request *req)
39 {
40         return container_of(req, struct blktap_request_handle, request);
41 }
42
43 static void
44 blktap_request_pool_init_request(struct blktap_request *request)
45 {
46         int i;
47
48         request->usr_idx  = -1;
49         request->nr_pages = 0;
50         request->status   = BLKTAP_REQUEST_FREE;
51         INIT_LIST_HEAD(&request->free_list);
52         for (i = 0; i < ARRAY_SIZE(request->handles); i++) {
53                 request->handles[i].user   = INVALID_GRANT_HANDLE;
54                 request->handles[i].kernel = INVALID_GRANT_HANDLE;
55         }
56 }
57
58 static int
59 blktap_request_pool_allocate_bucket(void)
60 {
61         int i, idx;
62         unsigned long flags;
63         struct blktap_request *request;
64         struct blktap_request_handle *handle;
65         struct blktap_request_bucket *bucket;
66
67         bucket = kzalloc(sizeof(struct blktap_request_bucket), GFP_KERNEL);
68         if (!bucket)
69                 goto fail;
70
71         bucket->foreign_pages = alloc_empty_pages_and_pagevec(MMAP_PAGES);
72         if (!bucket->foreign_pages)
73                 goto fail;
74
75         spin_lock_irqsave(&pool.lock, flags);
76
77         idx = -1;
78         for (i = 0; i < MAX_BUCKETS; i++) {
79                 if (!pool.buckets[i]) {
80                         idx = i;
81                         pool.buckets[idx] = bucket;
82                         break;
83                 }
84         }
85
86         if (idx == -1) {
87                 spin_unlock_irqrestore(&pool.lock, flags);
88                 goto fail;
89         }
90
91         for (i = 0; i < BUCKET_SIZE; i++) {
92                 handle  = bucket->handles + i;
93                 request = &handle->request;
94
95                 handle->slot   = i;
96                 handle->inuse  = 0;
97                 handle->bucket = bucket;
98
99                 blktap_request_pool_init_request(request);
100                 list_add_tail(&request->free_list, &pool.free_list);
101         }
102
103         spin_unlock_irqrestore(&pool.lock, flags);
104
105         return 0;
106
107 fail:
108         if (bucket && bucket->foreign_pages)
109                 free_empty_pages_and_pagevec(bucket->foreign_pages, MMAP_PAGES);
110         kfree(bucket);
111         return -ENOMEM;
112 }
113
114 static void
115 blktap_request_pool_free_bucket(struct blktap_request_bucket *bucket)
116 {
117         if (!bucket)
118                 return;
119
120         BTDBG("freeing bucket %p\n", bucket);
121
122         free_empty_pages_and_pagevec(bucket->foreign_pages, MMAP_PAGES);
123         kfree(bucket);
124 }
125
126 struct page *
127 request_to_page(struct blktap_request *req, int seg)
128 {
129         struct blktap_request_handle *handle = blktap_request_to_handle(req);
130         int idx = handle->slot * BLKIF_MAX_SEGMENTS_PER_REQUEST + seg;
131         return handle->bucket->foreign_pages[idx];
132 }
133
134 int
135 blktap_request_pool_shrink(void)
136 {
137         int i, err;
138         unsigned long flags;
139         struct blktap_request_bucket *bucket;
140
141         err = -EAGAIN;
142
143         spin_lock_irqsave(&pool.lock, flags);
144
145         /* always keep at least one bucket */
146         for (i = 1; i < MAX_BUCKETS; i++) {
147                 bucket = pool.buckets[i];
148                 if (!bucket)
149                         continue;
150
151                 if (atomic_read(&bucket->reqs_in_use))
152                         continue;
153
154                 blktap_request_pool_free_bucket(bucket);
155                 pool.buckets[i] = NULL;
156                 err = 0;
157                 break;
158         }
159
160         spin_unlock_irqrestore(&pool.lock, flags);
161
162         return err;
163 }
164
165 int
166 blktap_request_pool_grow(void)
167 {
168         return blktap_request_pool_allocate_bucket();
169 }
170
171 struct blktap_request *
172 blktap_request_allocate(struct blktap *tap)
173 {
174         int i;
175         uint16_t usr_idx;
176         unsigned long flags;
177         struct blktap_request *request;
178
179         usr_idx = -1;
180         request = NULL;
181
182         spin_lock_irqsave(&pool.lock, flags);
183
184         if (pool.status == BLKTAP_POOL_CLOSING)
185                 goto out;
186
187         for (i = 0; i < ARRAY_SIZE(tap->pending_requests); i++)
188                 if (!tap->pending_requests[i]) {
189                         usr_idx = i;
190                         break;
191                 }
192
193         if (usr_idx == (uint16_t)-1)
194                 goto out;
195
196         if (!list_empty(&pool.free_list)) {
197                 request = list_entry(pool.free_list.next,
198                                      struct blktap_request, free_list);
199                 list_del(&request->free_list);
200         }
201
202         if (request) {
203                 struct blktap_request_handle *handle;
204
205                 atomic_inc(&pool.reqs_in_use);
206
207                 handle = blktap_request_to_handle(request);
208                 atomic_inc(&handle->bucket->reqs_in_use);
209                 handle->inuse = 1;
210
211                 request->usr_idx = usr_idx;
212
213                 tap->pending_requests[usr_idx] = request;
214                 tap->pending_cnt++;
215         }
216
217 out:
218         spin_unlock_irqrestore(&pool.lock, flags);
219         return request;
220 }
221
222 void
223 blktap_request_free(struct blktap *tap, struct blktap_request *request)
224 {
225         int free;
226         unsigned long flags;
227         struct blktap_request_handle *handle;
228
229         BUG_ON(request->usr_idx >= ARRAY_SIZE(tap->pending_requests));
230         handle = blktap_request_to_handle(request);
231
232         spin_lock_irqsave(&pool.lock, flags);
233
234         handle->inuse = 0;
235         tap->pending_requests[request->usr_idx] = NULL;
236         blktap_request_pool_init_request(request);
237         list_add(&request->free_list, &pool.free_list);
238         atomic_dec(&handle->bucket->reqs_in_use);
239         free = atomic_dec_and_test(&pool.reqs_in_use);
240
241         spin_unlock_irqrestore(&pool.lock, flags);
242
243         if (--tap->pending_cnt == 0)
244                 wake_up_interruptible(&tap->wq);
245
246         if (free)
247                 wake_up(&pool.wait_queue);
248 }
249
250 void
251 blktap_request_pool_free(void)
252 {
253         int i;
254         unsigned long flags;
255
256         spin_lock_irqsave(&pool.lock, flags);
257
258         pool.status = BLKTAP_POOL_CLOSING;
259         while (atomic_read(&pool.reqs_in_use)) {
260                 spin_unlock_irqrestore(&pool.lock, flags);
261                 wait_event(pool.wait_queue, !atomic_read(&pool.reqs_in_use));
262                 spin_lock_irqsave(&pool.lock, flags);
263         }
264
265         for (i = 0; i < MAX_BUCKETS; i++) {
266                 blktap_request_pool_free_bucket(pool.buckets[i]);
267                 pool.buckets[i] = NULL;
268         }
269
270         spin_unlock_irqrestore(&pool.lock, flags);
271 }
272
273 int __init
274 blktap_request_pool_init(void)
275 {
276         int i, err;
277
278         memset(&pool, 0, sizeof(pool));
279
280         spin_lock_init(&pool.lock);
281         INIT_LIST_HEAD(&pool.free_list);
282         atomic_set(&pool.reqs_in_use, 0);
283         init_waitqueue_head(&pool.wait_queue);
284
285         for (i = 0; i < 2; i++) {
286                 err = blktap_request_pool_allocate_bucket();
287                 if (err)
288                         goto fail;
289         }
290
291         return 0;
292
293 fail:
294         blktap_request_pool_free();
295         return err;
296 }