1 #include <linux/spinlock.h>
2 #include <xen/balloon.h>
7 #define BUCKET_SIZE MAX_PENDING_REQS
9 #define BLKTAP_POOL_CLOSING 1
11 struct blktap_request_bucket;
13 struct blktap_request_handle {
16 struct blktap_request request;
17 struct blktap_request_bucket *bucket;
20 struct blktap_request_bucket {
22 struct blktap_request_handle handles[BUCKET_SIZE];
23 struct page **foreign_pages;
26 struct blktap_request_pool {
29 struct list_head free_list;
31 wait_queue_head_t wait_queue;
32 struct blktap_request_bucket *buckets[MAX_BUCKETS];
35 static struct blktap_request_pool pool;
37 static inline struct blktap_request_handle *
38 blktap_request_to_handle(struct blktap_request *req)
40 return container_of(req, struct blktap_request_handle, request);
44 blktap_request_pool_init_request(struct blktap_request *request)
48 request->usr_idx = -1;
49 request->nr_pages = 0;
50 request->status = BLKTAP_REQUEST_FREE;
51 INIT_LIST_HEAD(&request->free_list);
52 for (i = 0; i < ARRAY_SIZE(request->handles); i++) {
53 request->handles[i].user = INVALID_GRANT_HANDLE;
54 request->handles[i].kernel = INVALID_GRANT_HANDLE;
59 blktap_request_pool_allocate_bucket(void)
63 struct blktap_request *request;
64 struct blktap_request_handle *handle;
65 struct blktap_request_bucket *bucket;
67 bucket = kzalloc(sizeof(struct blktap_request_bucket), GFP_KERNEL);
71 bucket->foreign_pages = alloc_empty_pages_and_pagevec(MMAP_PAGES);
72 if (!bucket->foreign_pages)
75 spin_lock_irqsave(&pool.lock, flags);
78 for (i = 0; i < MAX_BUCKETS; i++) {
79 if (!pool.buckets[i]) {
81 pool.buckets[idx] = bucket;
87 spin_unlock_irqrestore(&pool.lock, flags);
91 for (i = 0; i < BUCKET_SIZE; i++) {
92 handle = bucket->handles + i;
93 request = &handle->request;
97 handle->bucket = bucket;
99 blktap_request_pool_init_request(request);
100 list_add_tail(&request->free_list, &pool.free_list);
103 spin_unlock_irqrestore(&pool.lock, flags);
108 if (bucket && bucket->foreign_pages)
109 free_empty_pages_and_pagevec(bucket->foreign_pages, MMAP_PAGES);
115 blktap_request_pool_free_bucket(struct blktap_request_bucket *bucket)
120 BTDBG("freeing bucket %p\n", bucket);
122 free_empty_pages_and_pagevec(bucket->foreign_pages, MMAP_PAGES);
127 request_to_page(struct blktap_request *req, int seg)
129 struct blktap_request_handle *handle = blktap_request_to_handle(req);
130 int idx = handle->slot * BLKIF_MAX_SEGMENTS_PER_REQUEST + seg;
131 return handle->bucket->foreign_pages[idx];
135 blktap_request_pool_shrink(void)
139 struct blktap_request_bucket *bucket;
143 spin_lock_irqsave(&pool.lock, flags);
145 /* always keep at least one bucket */
146 for (i = 1; i < MAX_BUCKETS; i++) {
147 bucket = pool.buckets[i];
151 if (atomic_read(&bucket->reqs_in_use))
154 blktap_request_pool_free_bucket(bucket);
155 pool.buckets[i] = NULL;
160 spin_unlock_irqrestore(&pool.lock, flags);
166 blktap_request_pool_grow(void)
168 return blktap_request_pool_allocate_bucket();
171 struct blktap_request *
172 blktap_request_allocate(struct blktap *tap)
177 struct blktap_request *request;
182 spin_lock_irqsave(&pool.lock, flags);
184 if (pool.status == BLKTAP_POOL_CLOSING)
187 for (i = 0; i < ARRAY_SIZE(tap->pending_requests); i++)
188 if (!tap->pending_requests[i]) {
193 if (usr_idx == (uint16_t)-1)
196 if (!list_empty(&pool.free_list)) {
197 request = list_entry(pool.free_list.next,
198 struct blktap_request, free_list);
199 list_del(&request->free_list);
203 struct blktap_request_handle *handle;
205 atomic_inc(&pool.reqs_in_use);
207 handle = blktap_request_to_handle(request);
208 atomic_inc(&handle->bucket->reqs_in_use);
211 request->usr_idx = usr_idx;
213 tap->pending_requests[usr_idx] = request;
218 spin_unlock_irqrestore(&pool.lock, flags);
223 blktap_request_free(struct blktap *tap, struct blktap_request *request)
227 struct blktap_request_handle *handle;
229 BUG_ON(request->usr_idx >= ARRAY_SIZE(tap->pending_requests));
230 handle = blktap_request_to_handle(request);
232 spin_lock_irqsave(&pool.lock, flags);
235 tap->pending_requests[request->usr_idx] = NULL;
236 blktap_request_pool_init_request(request);
237 list_add(&request->free_list, &pool.free_list);
238 atomic_dec(&handle->bucket->reqs_in_use);
239 free = atomic_dec_and_test(&pool.reqs_in_use);
241 spin_unlock_irqrestore(&pool.lock, flags);
243 if (--tap->pending_cnt == 0)
244 wake_up_interruptible(&tap->wq);
247 wake_up(&pool.wait_queue);
251 blktap_request_pool_free(void)
256 spin_lock_irqsave(&pool.lock, flags);
258 pool.status = BLKTAP_POOL_CLOSING;
259 while (atomic_read(&pool.reqs_in_use)) {
260 spin_unlock_irqrestore(&pool.lock, flags);
261 wait_event(pool.wait_queue, !atomic_read(&pool.reqs_in_use));
262 spin_lock_irqsave(&pool.lock, flags);
265 for (i = 0; i < MAX_BUCKETS; i++) {
266 blktap_request_pool_free_bucket(pool.buckets[i]);
267 pool.buckets[i] = NULL;
270 spin_unlock_irqrestore(&pool.lock, flags);
274 blktap_request_pool_init(void)
278 memset(&pool, 0, sizeof(pool));
280 spin_lock_init(&pool.lock);
281 INIT_LIST_HEAD(&pool.free_list);
282 atomic_set(&pool.reqs_in_use, 0);
283 init_waitqueue_head(&pool.wait_queue);
285 for (i = 0; i < 2; i++) {
286 err = blktap_request_pool_allocate_bucket();
294 blktap_request_pool_free();