1 /******************************************************************************
2 * arch/xen/drivers/blkif/backend/main.c
4 * Back-end of the driver for virtual block devices. This portion of the
5 * driver exports a 'unified' block-device interface that can be accessed
6 * by any operating system that implements a compatible front end. A
7 * reference front-end implementation can be found in:
8 * arch/xen/drivers/blkif/frontend
10 * Copyright (c) 2003-2004, Keir Fraser & Steve Hand
11 * Copyright (c) 2005, Christopher Clark
13 * This program is free software; you can redistribute it and/or
14 * modify it under the terms of the GNU General Public License version 2
15 * as published by the Free Software Foundation; or, when distributed
16 * separately from the Linux kernel or incorporated into other
17 * software packages, subject to the following license:
19 * Permission is hereby granted, free of charge, to any person obtaining a copy
20 * of this source file (the "Software"), to deal in the Software without
21 * restriction, including without limitation the rights to use, copy, modify,
22 * merge, publish, distribute, sublicense, and/or sell copies of the Software,
23 * and to permit persons to whom the Software is furnished to do so, subject to
24 * the following conditions:
26 * The above copyright notice and this permission notice shall be included in
27 * all copies or substantial portions of the Software.
29 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
30 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
31 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
32 * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
33 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
34 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
38 #include <linux/spinlock.h>
39 #include <linux/kthread.h>
40 #include <linux/freezer.h>
41 #include <linux/list.h>
42 #include <linux/module.h>
43 #include <linux/delay.h>
44 #include <xen/balloon.h>
45 #include <xen/evtchn.h>
46 #include <xen/gnttab.h>
47 #include <asm/hypervisor.h>
51 * These are rather arbitrary. They are fairly large because adjacent requests
52 * pulled from a communication ring are quite likely to end up being part of
53 * the same scatter/gather request at the disc.
55 * ** TRY INCREASING 'blkif_reqs' IF WRITE SPEEDS SEEM TOO LOW **
57 * This will increase the chances of being able to write whole tracks.
58 * 64 should be enough to keep us competitive with Linux.
60 static int blkif_reqs = 64;
61 module_param_named(reqs, blkif_reqs, int, 0);
62 MODULE_PARM_DESC(reqs, "Number of blkback requests to allocate");
64 /* Run-time switchable: /sys/module/blkback/parameters/ */
65 static unsigned int log_stats = 0;
66 static unsigned int debug_lvl = 0;
67 module_param(log_stats, int, 0644);
68 module_param(debug_lvl, int, 0644);
71 * Each outstanding request that we've passed to the lower device layers has a
72 * 'pending_req' allocated to it. Each buffer_head that completes decrements
73 * the pendcnt towards zero. When it hits zero, the specified domain has a
74 * response queued for it, with the saved 'id' passed back.
80 unsigned short nr_pages;
81 unsigned short operation;
82 struct list_head free_list;
85 static pending_req_t *pending_reqs;
86 static struct list_head pending_free;
87 static DEFINE_SPINLOCK(pending_free_lock);
88 static DECLARE_WAIT_QUEUE_HEAD(pending_free_wq);
90 #define BLKBACK_INVALID_HANDLE (~0)
92 static struct page **pending_pages;
93 static grant_handle_t *pending_grant_handles;
95 static inline int vaddr_pagenr(pending_req_t *req, int seg)
97 return (req - pending_reqs) * BLKIF_MAX_SEGMENTS_PER_REQUEST + seg;
100 #define pending_page(req, seg) pending_pages[vaddr_pagenr(req, seg)]
102 static inline unsigned long vaddr(pending_req_t *req, int seg)
104 unsigned long pfn = page_to_pfn(pending_page(req, seg));
105 return (unsigned long)pfn_to_kaddr(pfn);
108 #define pending_handle(_req, _seg) \
109 (pending_grant_handles[vaddr_pagenr(_req, _seg)])
112 static int do_block_io_op(blkif_t *blkif);
113 static void dispatch_rw_block_io(blkif_t *blkif,
114 blkif_request_t *req,
115 pending_req_t *pending_req);
116 static void make_response(blkif_t *blkif, u64 id,
117 unsigned short op, int st);
119 /******************************************************************
122 static pending_req_t* alloc_req(void)
124 pending_req_t *req = NULL;
127 spin_lock_irqsave(&pending_free_lock, flags);
128 if (!list_empty(&pending_free)) {
129 req = list_entry(pending_free.next, pending_req_t, free_list);
130 list_del(&req->free_list);
132 spin_unlock_irqrestore(&pending_free_lock, flags);
136 static void free_req(pending_req_t *req)
141 spin_lock_irqsave(&pending_free_lock, flags);
142 was_empty = list_empty(&pending_free);
143 list_add(&req->free_list, &pending_free);
144 spin_unlock_irqrestore(&pending_free_lock, flags);
146 wake_up(&pending_free_wq);
149 static void unplug_queue(blkif_t *blkif)
151 if (blkif->plug == NULL)
153 kobject_put(&blkif->plug->kobj);
157 static void plug_queue(blkif_t *blkif, struct block_device *bdev)
159 struct request_queue *q = bdev_get_queue(bdev);
161 if (q == blkif->plug)
164 WARN_ON(test_bit(QUEUE_FLAG_DEAD, &q->queue_flags));
165 kobject_get(&q->kobj);
169 static void fast_flush_area(pending_req_t *req)
171 struct gnttab_unmap_grant_ref unmap[BLKIF_MAX_SEGMENTS_PER_REQUEST];
172 unsigned int i, invcount = 0;
173 grant_handle_t handle;
176 for (i = 0; i < req->nr_pages; i++) {
177 handle = pending_handle(req, i);
178 if (handle == BLKBACK_INVALID_HANDLE)
180 blkback_pagemap_clear(pending_page(req, i));
181 gnttab_set_unmap_op(&unmap[invcount], vaddr(req, i),
182 GNTMAP_host_map, handle);
183 pending_handle(req, i) = BLKBACK_INVALID_HANDLE;
187 ret = HYPERVISOR_grant_table_op(
188 GNTTABOP_unmap_grant_ref, unmap, invcount);
192 /******************************************************************
193 * SCHEDULER FUNCTIONS
196 static void print_stats(blkif_t *blkif)
198 printk(KERN_DEBUG "%s: oo %3d | rd %4d | wr %4d | br %4d"
199 " | fl %4d | ds %4d | pk %4d\n",
200 current->comm, blkif->st_oo_req,
201 blkif->st_rd_req, blkif->st_wr_req,
202 blkif->st_br_req, blkif->st_fl_req,
203 blkif->st_ds_req, blkif->st_pk_req);
204 blkif->st_print = jiffies + msecs_to_jiffies(10 * 1000);
205 blkif->st_rd_req = 0;
206 blkif->st_wr_req = 0;
207 blkif->st_oo_req = 0;
208 blkif->st_br_req = 0;
209 blkif->st_fl_req = 0;
210 blkif->st_ds_req = 0;
211 blkif->st_pk_req = 0;
214 int blkif_schedule(void *arg)
216 blkif_t *blkif = arg;
217 struct vbd *vbd = &blkif->vbd;
222 printk(KERN_DEBUG "%s: started\n", current->comm);
224 while (!kthread_should_stop()) {
227 if (unlikely(vbd->size != vbd_size(vbd)))
230 wait_event_interruptible(
232 blkif->waiting_reqs || kthread_should_stop());
233 wait_event_interruptible(
235 !list_empty(&pending_free) || kthread_should_stop());
237 blkif->waiting_reqs = 0;
238 smp_mb(); /* clear flag *before* checking for work */
240 if (do_block_io_op(blkif))
241 blkif->waiting_reqs = 1;
244 if (log_stats && time_after(jiffies, blkif->st_print))
251 printk(KERN_DEBUG "%s: exiting\n", current->comm);
253 blkif->xenblkd = NULL;
259 static void drain_io(blkif_t *blkif)
261 atomic_set(&blkif->drain, 1);
263 /* The initial value is one, and one refcnt taken at the
264 * start of the blkif_schedule thread. */
265 if (atomic_read(&blkif->refcnt) <= 2)
268 wait_for_completion_interruptible_timeout(
269 &blkif->drain_complete, HZ);
271 if (!atomic_read(&blkif->drain))
273 } while (!kthread_should_stop());
274 atomic_set(&blkif->drain, 0);
277 /******************************************************************
278 * COMPLETION CALLBACK -- Called as bh->b_end_io()
281 static void __end_block_io_op(pending_req_t *pending_req, int error)
283 blkif_t *blkif = pending_req->blkif;
284 int status = BLKIF_RSP_OKAY;
286 /* An error fails the entire request. */
287 if ((pending_req->operation == BLKIF_OP_WRITE_BARRIER) &&
288 (error == -EOPNOTSUPP)) {
289 DPRINTK("blkback: write barrier op failed, not supported\n");
290 blkback_barrier(XBT_NIL, blkif->be, 0);
291 status = BLKIF_RSP_EOPNOTSUPP;
292 } else if ((pending_req->operation == BLKIF_OP_FLUSH_DISKCACHE) &&
293 (error == -EOPNOTSUPP)) {
294 DPRINTK("blkback: flush diskcache op failed, not supported\n");
295 blkback_flush_diskcache(XBT_NIL, blkif->be, 0);
296 status = BLKIF_RSP_EOPNOTSUPP;
298 DPRINTK("Buffer not up-to-date at end of operation, "
299 "error=%d\n", error);
300 status = BLKIF_RSP_ERROR;
303 if (atomic_dec_and_test(&pending_req->pendcnt)) {
304 fast_flush_area(pending_req);
305 make_response(blkif, pending_req->id,
306 pending_req->operation, status);
307 free_req(pending_req);
308 if (atomic_read(&blkif->drain)
309 && atomic_read(&blkif->refcnt) <= 2)
310 complete(&blkif->drain_complete);
315 static void end_block_io_op(struct bio *bio, int error)
317 __end_block_io_op(bio->bi_private, error);
322 /******************************************************************************
323 * NOTIFICATION FROM GUEST OS.
326 static void blkif_notify_work(blkif_t *blkif)
328 blkif->waiting_reqs = 1;
332 irqreturn_t blkif_be_int(int irq, void *dev_id)
334 blkif_notify_work(dev_id);
340 /******************************************************************
341 * DOWNWARD CALLS -- These interface with the block-device layer proper.
344 static void dispatch_discard(blkif_t *blkif, struct blkif_request_discard *req)
346 unsigned long secure = (blkif->vbd.discard_secure &&
347 (req->flag & BLKIF_DISCARD_SECURE)) ?
348 BLKDEV_DISCARD_SECURE : 0;
349 struct phys_req preq;
354 preq.dev = req->handle;
355 preq.sector_number = req->sector_number;
356 preq.nr_sects = req->nr_sectors;
358 if (vbd_translate(&preq, blkif, REQ_DISCARD) != 0) {
359 DPRINTK("access denied: discard of [%Lu,%Lu) on dev=%04x\n",
361 preq.sector_number + preq.nr_sects, preq.dev);
362 make_response(blkif, req->id, req->operation, BLKIF_RSP_ERROR);
363 msleep(1); /* back off a bit */
367 plug_queue(blkif, preq.bdev);
369 switch (blkdev_issue_discard(preq.bdev, preq.sector_number,
370 preq.nr_sects, GFP_KERNEL, secure)) {
372 status = BLKIF_RSP_OKAY;
375 DPRINTK("discard op failed, not supported\n");
376 status = BLKIF_RSP_EOPNOTSUPP;
379 status = BLKIF_RSP_ERROR;
383 make_response(blkif, req->id, req->operation, status);
386 static int _do_block_io_op(blkif_t *blkif)
388 blkif_back_rings_t *blk_rings = &blkif->blk_rings;
390 pending_req_t *pending_req;
393 rc = blk_rings->common.req_cons;
394 rp = blk_rings->common.sring->req_prod;
395 rmb(); /* Ensure we see queued requests up to 'rp'. */
398 if (RING_REQUEST_CONS_OVERFLOW(&blk_rings->common, rc))
401 if (kthread_should_stop())
404 switch (blkif->blk_protocol) {
405 case BLKIF_PROTOCOL_NATIVE:
406 req = *RING_GET_REQUEST(&blk_rings->native, rc);
408 case BLKIF_PROTOCOL_X86_32:
409 blkif_get_x86_32_req(&req, RING_GET_REQUEST(&blk_rings->x86_32, rc));
411 case BLKIF_PROTOCOL_X86_64:
412 blkif_get_x86_64_req(&req, RING_GET_REQUEST(&blk_rings->x86_64, rc));
416 return 0; /* make compiler happy */
421 switch (req.operation) {
424 case BLKIF_OP_WRITE_BARRIER:
425 case BLKIF_OP_FLUSH_DISKCACHE:
426 pending_req = alloc_req();
432 /* before make_response() */
433 blk_rings->common.req_cons = rc;
435 /* Apply all sanity checks to /private copy/ of request. */
438 dispatch_rw_block_io(blkif, &req, pending_req);
440 case BLKIF_OP_DISCARD:
441 blk_rings->common.req_cons = rc;
443 dispatch_discard(blkif, (void *)&req);
445 case BLKIF_OP_PACKET:
446 blk_rings->common.req_cons = rc;
449 DPRINTK("error: block operation BLKIF_OP_PACKET not implemented\n");
450 make_response(blkif, req.id, req.operation,
454 /* A good sign something is wrong: sleep for a while to
455 * avoid excessive CPU consumption by a bad guest. */
457 blk_rings->common.req_cons = rc;
459 DPRINTK("error: unknown block io operation [%d]\n",
461 make_response(blkif, req.id, req.operation,
466 /* Yield point for this unbounded loop. */
474 do_block_io_op(blkif_t *blkif)
476 blkif_back_rings_t *blk_rings = &blkif->blk_rings;
480 more_to_do = _do_block_io_op(blkif);
484 RING_FINAL_CHECK_FOR_REQUESTS(&blk_rings->common, more_to_do);
485 } while (more_to_do);
490 static void dispatch_rw_block_io(blkif_t *blkif,
491 blkif_request_t *req,
492 pending_req_t *pending_req)
494 struct gnttab_map_grant_ref map[BLKIF_MAX_SEGMENTS_PER_REQUEST];
495 struct phys_req preq;
497 unsigned long buf; unsigned int nsec;
498 } seg[BLKIF_MAX_SEGMENTS_PER_REQUEST];
500 struct bio *bio = NULL;
505 switch (req->operation) {
514 case BLKIF_OP_WRITE_BARRIER:
516 operation = WRITE_FLUSH_FUA;
518 case BLKIF_OP_FLUSH_DISKCACHE:
520 operation = WRITE_FLUSH;
523 operation = 0; /* make gcc happy */
527 /* Check that number of segments is sane. */
528 nseg = req->nr_segments;
529 if (unlikely(nseg == 0 && !(operation & REQ_FLUSH)) ||
530 unlikely(nseg > BLKIF_MAX_SEGMENTS_PER_REQUEST)) {
531 DPRINTK("Bad number of segments in request (%d)\n", nseg);
535 preq.dev = req->handle;
536 preq.sector_number = req->sector_number;
539 pending_req->blkif = blkif;
540 pending_req->id = req->id;
541 pending_req->operation = req->operation;
542 pending_req->nr_pages = nseg;
544 flags = GNTMAP_host_map;
545 if (operation != READ)
546 flags |= GNTMAP_readonly;
548 for (i = 0; i < nseg; i++) {
549 seg[i].nsec = req->seg[i].last_sect -
550 req->seg[i].first_sect + 1;
552 if ((req->seg[i].last_sect >= (PAGE_SIZE >> 9)) ||
553 (req->seg[i].last_sect < req->seg[i].first_sect))
555 preq.nr_sects += seg[i].nsec;
557 gnttab_set_map_op(&map[i], vaddr(pending_req, i), flags,
558 req->seg[i].gref, blkif->domid);
561 ret = HYPERVISOR_grant_table_op(GNTTABOP_map_grant_ref, map, nseg);
564 for (i = 0; i < nseg; i++) {
565 if (unlikely(map[i].status == GNTST_eagain))
566 gnttab_check_GNTST_eagain_do_while(GNTTABOP_map_grant_ref, &map[i])
567 if (unlikely(map[i].status != GNTST_okay)) {
568 DPRINTK("invalid buffer -- could not remap it\n");
569 map[i].handle = BLKBACK_INVALID_HANDLE;
572 blkback_pagemap_set(vaddr_pagenr(pending_req, i),
573 pending_page(pending_req, i),
574 blkif->domid, req->handle,
578 pending_handle(pending_req, i) = map[i].handle;
584 page_to_pfn(pending_page(pending_req, i)),
585 FOREIGN_FRAME(map[i].dev_bus_addr >> PAGE_SHIFT));
586 seg[i].buf = map[i].dev_bus_addr |
587 (req->seg[i].first_sect << 9);
593 if (vbd_translate(&preq, blkif, operation) != 0) {
594 DPRINTK("access denied: %s of [%llu,%llu] on dev=%04x\n",
595 operation == READ ? "read" : "write",
597 preq.sector_number + preq.nr_sects, preq.dev);
601 /* Wait on all outstanding I/O's and once that has been completed
602 * issue the WRITE_FLUSH.
604 if (req->operation == BLKIF_OP_WRITE_BARRIER)
607 plug_queue(blkif, preq.bdev);
608 atomic_set(&pending_req->pendcnt, 1);
611 for (i = 0; i < nseg; i++) {
612 if (((int)preq.sector_number|(int)seg[i].nsec) &
613 ((bdev_logical_block_size(preq.bdev) >> 9) - 1)) {
614 DPRINTK("Misaligned I/O request from domain %d",
619 while ((bio == NULL) ||
621 pending_page(pending_req, i),
623 seg[i].buf & ~PAGE_MASK) == 0)) {
625 atomic_inc(&pending_req->pendcnt);
626 submit_bio(operation, bio);
629 bio = bio_alloc(GFP_KERNEL, nseg-i);
630 if (unlikely(bio == NULL))
633 bio->bi_bdev = preq.bdev;
634 bio->bi_private = pending_req;
635 bio->bi_end_io = end_block_io_op;
636 bio->bi_sector = preq.sector_number;
639 preq.sector_number += seg[i].nsec;
643 BUG_ON(!(operation & (REQ_FLUSH|REQ_FUA)));
644 bio = bio_alloc(GFP_KERNEL, 0);
645 if (unlikely(bio == NULL))
648 bio->bi_bdev = preq.bdev;
649 bio->bi_private = pending_req;
650 bio->bi_end_io = end_block_io_op;
654 submit_bio(operation, bio);
656 if (operation == READ)
657 blkif->st_rd_sect += preq.nr_sects;
659 blkif->st_wr_sect += preq.nr_sects;
664 fast_flush_area(pending_req);
666 make_response(blkif, req->id, req->operation, BLKIF_RSP_ERROR);
667 free_req(pending_req);
668 msleep(1); /* back off a bit */
672 __end_block_io_op(pending_req, -EINVAL);
676 msleep(1); /* back off a bit */
682 /******************************************************************
683 * MISCELLANEOUS SETUP / TEARDOWN / DEBUGGING
687 static void make_response(blkif_t *blkif, u64 id,
688 unsigned short op, int st)
690 blkif_response_t resp;
692 blkif_back_rings_t *blk_rings = &blkif->blk_rings;
699 spin_lock_irqsave(&blkif->blk_ring_lock, flags);
700 /* Place on the response ring for the relevant domain. */
701 switch (blkif->blk_protocol) {
702 case BLKIF_PROTOCOL_NATIVE:
703 memcpy(RING_GET_RESPONSE(&blk_rings->native, blk_rings->native.rsp_prod_pvt),
704 &resp, sizeof(resp));
706 case BLKIF_PROTOCOL_X86_32:
707 memcpy(RING_GET_RESPONSE(&blk_rings->x86_32, blk_rings->x86_32.rsp_prod_pvt),
708 &resp, sizeof(resp));
710 case BLKIF_PROTOCOL_X86_64:
711 memcpy(RING_GET_RESPONSE(&blk_rings->x86_64, blk_rings->x86_64.rsp_prod_pvt),
712 &resp, sizeof(resp));
717 blk_rings->common.rsp_prod_pvt++;
718 RING_PUSH_RESPONSES_AND_CHECK_NOTIFY(&blk_rings->common, notify);
719 spin_unlock_irqrestore(&blkif->blk_ring_lock, flags);
722 notify_remote_via_irq(blkif->irq);
725 static int __init blkif_init(void)
729 if (!is_running_on_xen())
732 mmap_pages = blkif_reqs * BLKIF_MAX_SEGMENTS_PER_REQUEST;
734 pending_reqs = kzalloc(sizeof(pending_reqs[0]) *
735 blkif_reqs, GFP_KERNEL);
736 pending_grant_handles = kmalloc(sizeof(pending_grant_handles[0]) *
737 mmap_pages, GFP_KERNEL);
738 pending_pages = alloc_empty_pages_and_pagevec(mmap_pages);
740 if (blkback_pagemap_init(mmap_pages))
743 if (!pending_reqs || !pending_grant_handles || !pending_pages)
746 for (i = 0; i < mmap_pages; i++)
747 pending_grant_handles[i] = BLKBACK_INVALID_HANDLE;
749 blkif_interface_init();
751 INIT_LIST_HEAD(&pending_free);
753 for (i = 0; i < blkif_reqs; i++)
754 list_add_tail(&pending_reqs[i].free_list, &pending_free);
762 kfree(pending_grant_handles);
763 free_empty_pages_and_pagevec(pending_pages, mmap_pages);
764 pr_warning("%s: out of memory\n", __FUNCTION__);
768 module_init(blkif_init);
770 MODULE_LICENSE("Dual BSD/GPL");
771 MODULE_ALIAS("xen-backend:vbd");