2 * Xen SCSI backend driver
4 * Copyright (c) 2008, FUJITSU Limited
6 * Based on the blkback driver code.
8 * This program is free software; you can redistribute it and/or
9 * modify it under the terms of the GNU General Public License version 2
10 * as published by the Free Software Foundation; or, when distributed
11 * separately from the Linux kernel or incorporated into other
12 * software packages, subject to the following license:
14 * Permission is hereby granted, free of charge, to any person obtaining a copy
15 * of this source file (the "Software"), to deal in the Software without
16 * restriction, including without limitation the rights to use, copy, modify,
17 * merge, publish, distribute, sublicense, and/or sell copies of the Software,
18 * and to permit persons to whom the Software is furnished to do so, subject to
19 * the following conditions:
21 * The above copyright notice and this permission notice shall be included in
22 * all copies or substantial portions of the Software.
24 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
25 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
26 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
27 * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
28 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
29 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
33 #include <linux/spinlock.h>
34 #include <linux/kthread.h>
35 #include <linux/list.h>
36 #include <linux/delay.h>
37 #include <xen/balloon.h>
38 #include <xen/evtchn.h>
39 #include <xen/gnttab.h>
40 #include <asm/hypervisor.h>
41 #include <scsi/scsi.h>
42 #include <scsi/scsi_cmnd.h>
43 #include <scsi/scsi_host.h>
44 #include <scsi/scsi_device.h>
45 #include <scsi/scsi_dbg.h>
46 #include <scsi/scsi_eh.h>
51 struct list_head pending_free;
52 DEFINE_SPINLOCK(pending_free_lock);
53 DECLARE_WAIT_QUEUE_HEAD(pending_free_wq);
55 int vscsiif_reqs = VSCSIIF_BACK_MAX_PENDING_REQS;
56 module_param_named(reqs, vscsiif_reqs, int, 0);
57 MODULE_PARM_DESC(reqs, "Number of scsiback requests to allocate");
59 static unsigned int log_print_stat = 0;
60 module_param(log_print_stat, int, 0644);
62 #define SCSIBACK_INVALID_HANDLE (~0)
64 static pending_req_t *pending_reqs;
65 static struct page **pending_pages;
66 static grant_handle_t *pending_grant_handles;
68 static int vaddr_pagenr(pending_req_t *req, int seg)
70 return (req - pending_reqs) * VSCSIIF_SG_TABLESIZE + seg;
73 static unsigned long vaddr(pending_req_t *req, int seg)
75 unsigned long pfn = page_to_pfn(pending_pages[vaddr_pagenr(req, seg)]);
76 return (unsigned long)pfn_to_kaddr(pfn);
79 #define pending_handle(_req, _seg) \
80 (pending_grant_handles[vaddr_pagenr(_req, _seg)])
83 void scsiback_fast_flush_area(pending_req_t *req)
85 struct gnttab_unmap_grant_ref unmap[VSCSIIF_SG_TABLESIZE];
86 unsigned int i, invcount = 0;
87 grant_handle_t handle;
90 if (req->nr_segments) {
91 for (i = 0; i < req->nr_segments; i++) {
92 handle = pending_handle(req, i);
93 if (handle == SCSIBACK_INVALID_HANDLE)
95 gnttab_set_unmap_op(&unmap[i], vaddr(req, i),
96 GNTMAP_host_map, handle);
97 pending_handle(req, i) = SCSIBACK_INVALID_HANDLE;
101 err = HYPERVISOR_grant_table_op(
102 GNTTABOP_unmap_grant_ref, unmap, invcount);
111 static pending_req_t * alloc_req(struct vscsibk_info *info)
113 pending_req_t *req = NULL;
116 spin_lock_irqsave(&pending_free_lock, flags);
117 if (!list_empty(&pending_free)) {
118 req = list_entry(pending_free.next, pending_req_t, free_list);
119 list_del(&req->free_list);
121 spin_unlock_irqrestore(&pending_free_lock, flags);
126 static void free_req(pending_req_t *req)
131 spin_lock_irqsave(&pending_free_lock, flags);
132 was_empty = list_empty(&pending_free);
133 list_add(&req->free_list, &pending_free);
134 spin_unlock_irqrestore(&pending_free_lock, flags);
136 wake_up(&pending_free_wq);
140 static void scsiback_notify_work(struct vscsibk_info *info)
142 info->waiting_reqs = 1;
146 void scsiback_do_resp_with_sense(char *sense_buffer, int32_t result,
147 uint32_t resid, pending_req_t *pending_req)
149 vscsiif_response_t *ring_res;
150 struct vscsibk_info *info = pending_req->info;
153 struct scsi_sense_hdr sshdr;
156 DPRINTK("%s\n",__FUNCTION__);
158 spin_lock_irqsave(&info->ring_lock, flags);
160 ring_res = RING_GET_RESPONSE(&info->ring, info->ring.rsp_prod_pvt);
161 info->ring.rsp_prod_pvt++;
163 ring_res->rslt = result;
164 ring_res->rqid = pending_req->rqid;
166 if (sense_buffer != NULL) {
167 if (scsi_normalize_sense(sense_buffer,
168 sizeof(sense_buffer), &sshdr)) {
170 int len = 8 + sense_buffer[7];
172 if (len > VSCSIIF_SENSE_BUFFERSIZE)
173 len = VSCSIIF_SENSE_BUFFERSIZE;
175 memcpy(ring_res->sense_buffer, sense_buffer, len);
176 ring_res->sense_len = len;
179 ring_res->sense_len = 0;
182 ring_res->residual_len = resid;
184 RING_PUSH_RESPONSES_AND_CHECK_NOTIFY(&info->ring, notify);
185 if (info->ring.rsp_prod_pvt == info->ring.req_cons) {
186 RING_FINAL_CHECK_FOR_REQUESTS(&info->ring, more_to_do);
187 } else if (RING_HAS_UNCONSUMED_REQUESTS(&info->ring)) {
191 spin_unlock_irqrestore(&info->ring_lock, flags);
194 scsiback_notify_work(info);
197 notify_remote_via_irq(info->irq);
199 free_req(pending_req);
202 static void scsiback_print_status(char *sense_buffer, int errors,
203 pending_req_t *pending_req)
205 struct scsi_device *sdev = pending_req->sdev;
207 pr_err("scsiback: %d:%d:%d:%d ",
208 sdev->host->host_no, sdev->channel, sdev->id, sdev->lun);
209 pr_err("status = 0x%02x, message = 0x%02x, host = 0x%02x,"
210 " driver = 0x%02x\n",
211 status_byte(errors), msg_byte(errors),
212 host_byte(errors), driver_byte(errors));
214 pr_err("scsiback: cmnd[0]=0x%02X\n", pending_req->cmnd[0]);
216 if (CHECK_CONDITION & status_byte(errors))
217 __scsi_print_sense("scsiback", sense_buffer, SCSI_SENSE_BUFFERSIZE);
221 static void scsiback_cmd_done(struct request *req, int uptodate)
223 pending_req_t *pending_req = req->end_io_data;
224 unsigned char *sense_buffer;
228 sense_buffer = req->sense;
229 resid = blk_rq_bytes(req);
230 errors = req->errors;
234 scsiback_print_status(sense_buffer, errors, pending_req);
237 /* The Host mode is through as for Emulation. */
238 if (pending_req->info->feature != VSCSI_TYPE_HOST)
239 scsiback_rsp_emulation(pending_req);
241 scsiback_fast_flush_area(pending_req);
242 scsiback_do_resp_with_sense(sense_buffer, errors, resid, pending_req);
243 scsiback_put(pending_req->info);
245 __blk_put_request(req->q, req);
249 static int scsiback_gnttab_data_map(vscsiif_request_t *ring_req,
250 pending_req_t *pending_req)
255 unsigned int data_len = 0;
256 struct gnttab_map_grant_ref map[VSCSIIF_SG_TABLESIZE];
257 struct vscsibk_info *info = pending_req->info;
259 int data_dir = (int)pending_req->sc_data_direction;
260 unsigned int nr_segments = (unsigned int)pending_req->nr_segments;
262 write = (data_dir == DMA_TO_DEVICE);
265 struct scatterlist *sg;
267 /* free of (sgl) in fast_flush_area()*/
268 pending_req->sgl = kmalloc(sizeof(struct scatterlist) * nr_segments,
270 if (!pending_req->sgl) {
271 pr_err("scsiback: %s: kmalloc() error\n", __FUNCTION__);
275 sg_init_table(pending_req->sgl, nr_segments);
277 flags = GNTMAP_host_map;
279 flags |= GNTMAP_readonly;
281 for (i = 0; i < nr_segments; i++)
282 gnttab_set_map_op(&map[i], vaddr(pending_req, i), flags,
283 ring_req->seg[i].gref,
286 err = HYPERVISOR_grant_table_op(GNTTABOP_map_grant_ref, map, nr_segments);
289 for_each_sg (pending_req->sgl, sg, nr_segments, i) {
292 /* Retry maps with GNTST_eagain */
293 if (unlikely(map[i].status == GNTST_eagain))
294 gnttab_check_GNTST_eagain_while(GNTTABOP_map_grant_ref, &map[i]);
295 if (unlikely(map[i].status != GNTST_okay)) {
296 pr_err("scsiback: invalid buffer -- could not remap it\n");
297 map[i].handle = SCSIBACK_INVALID_HANDLE;
301 pending_handle(pending_req, i) = map[i].handle;
306 pg = pending_pages[vaddr_pagenr(pending_req, i)];
308 set_phys_to_machine(page_to_pfn(pg),
309 FOREIGN_FRAME(map[i].dev_bus_addr >> PAGE_SHIFT));
311 sg_set_page(sg, pg, ring_req->seg[i].length,
312 ring_req->seg[i].offset);
313 data_len += sg->length;
316 if (sg->offset >= PAGE_SIZE ||
317 sg->length > PAGE_SIZE ||
318 sg->offset + sg->length > PAGE_SIZE)
327 pending_req->request_bufflen = data_len;
332 scsiback_fast_flush_area(pending_req);
336 /* quoted scsi_lib.c/scsi_bi_endio */
337 static void scsiback_bi_endio(struct bio *bio, int error)
344 /* quoted scsi_lib.c/scsi_req_map_sg . */
345 static struct bio *request_map_sg(pending_req_t *pending_req)
347 struct request_queue *q = pending_req->sdev->request_queue;
348 unsigned int nsegs = (unsigned int)pending_req->nr_segments;
349 unsigned int i, len, bytes, off, nr_pages, nr_vecs = 0;
350 struct scatterlist *sg;
352 struct bio *bio = NULL, *bio_first = NULL, *bio_last = NULL;
355 for_each_sg (pending_req->sgl, sg, nsegs, i) {
360 nr_pages = (len + off + PAGE_SIZE - 1) >> PAGE_SHIFT;
362 bytes = min_t(unsigned int, len, PAGE_SIZE - off);
365 nr_vecs = min_t(unsigned int, BIO_MAX_PAGES,
368 bio = bio_alloc(GFP_KERNEL, nr_vecs);
373 bio->bi_end_io = scsiback_bi_endio;
375 bio_last->bi_next = bio;
381 if (bio_add_pc_page(q, bio, page, bytes, off) !=
388 if (bio->bi_vcnt >= nr_vecs) {
389 bio->bi_flags &= ~(1 << BIO_SEG_VALID);
390 if (pending_req->sc_data_direction == WRITE)
391 bio->bi_rw |= REQ_WRITE;
404 while ((bio = bio_first) != NULL) {
405 bio_first = bio->bi_next;
413 void scsiback_cmd_exec(pending_req_t *pending_req)
415 int cmd_len = (int)pending_req->cmd_len;
416 int data_dir = (int)pending_req->sc_data_direction;
417 unsigned int timeout;
421 DPRINTK("%s\n",__FUNCTION__);
423 /* because it doesn't timeout backend earlier than frontend.*/
424 if (pending_req->timeout_per_command)
425 timeout = pending_req->timeout_per_command * HZ;
427 timeout = VSCSIIF_TIMEOUT;
429 write = (data_dir == DMA_TO_DEVICE);
430 if (pending_req->nr_segments) {
431 struct bio *bio = request_map_sg(pending_req);
434 pr_err("scsiback: SG Request Map Error\n");
438 rq = blk_make_request(pending_req->sdev->request_queue, bio,
441 pr_err("scsiback: Make Request Error\n");
447 rq = blk_get_request(pending_req->sdev->request_queue, write,
450 pr_err("scsiback: Get Request Error\n");
455 rq->cmd_type = REQ_TYPE_BLOCK_PC;
456 rq->cmd_len = cmd_len;
457 memcpy(rq->cmd, pending_req->cmnd, cmd_len);
459 memset(pending_req->sense_buffer, 0, VSCSIIF_SENSE_BUFFERSIZE);
460 rq->sense = pending_req->sense_buffer;
463 /* not allowed to retry in backend. */
465 rq->timeout = timeout;
466 rq->end_io_data = pending_req;
468 scsiback_get(pending_req->info);
469 blk_execute_rq_nowait(rq->q, NULL, rq, 1, scsiback_cmd_done);
475 static void scsiback_device_reset_exec(pending_req_t *pending_req)
477 struct vscsibk_info *info = pending_req->info;
479 struct scsi_device *sdev = pending_req->sdev;
482 err = scsi_reset_provider(sdev, SCSI_TRY_RESET_DEVICE);
484 scsiback_do_resp_with_sense(NULL, err, 0, pending_req);
491 irqreturn_t scsiback_intr(int irq, void *dev_id)
493 scsiback_notify_work((struct vscsibk_info *)dev_id);
497 static int prepare_pending_reqs(struct vscsibk_info *info,
498 vscsiif_request_t *ring_req, pending_req_t *pending_req)
500 struct scsi_device *sdev;
501 struct ids_tuple vir;
504 DPRINTK("%s\n",__FUNCTION__);
506 pending_req->rqid = ring_req->rqid;
507 pending_req->act = ring_req->act;
509 pending_req->info = info;
511 pending_req->v_chn = vir.chn = ring_req->channel;
512 pending_req->v_tgt = vir.tgt = ring_req->id;
513 vir.lun = ring_req->lun;
516 sdev = scsiback_do_translation(info, &vir);
518 pending_req->sdev = NULL;
519 DPRINTK("scsiback: doesn't exist.\n");
523 pending_req->sdev = sdev;
525 /* request range check from frontend */
526 pending_req->sc_data_direction = ring_req->sc_data_direction;
528 if ((pending_req->sc_data_direction != DMA_BIDIRECTIONAL) &&
529 (pending_req->sc_data_direction != DMA_TO_DEVICE) &&
530 (pending_req->sc_data_direction != DMA_FROM_DEVICE) &&
531 (pending_req->sc_data_direction != DMA_NONE)) {
532 DPRINTK("scsiback: invalid parameter data_dir = %d\n",
533 pending_req->sc_data_direction);
538 pending_req->nr_segments = ring_req->nr_segments;
540 if (pending_req->nr_segments > VSCSIIF_SG_TABLESIZE) {
541 DPRINTK("scsiback: invalid parameter nr_seg = %d\n",
542 pending_req->nr_segments);
547 pending_req->cmd_len = ring_req->cmd_len;
549 if (pending_req->cmd_len > VSCSIIF_MAX_COMMAND_SIZE) {
550 DPRINTK("scsiback: invalid parameter cmd_len = %d\n",
551 pending_req->cmd_len);
555 memcpy(pending_req->cmnd, ring_req->cmnd, pending_req->cmd_len);
557 pending_req->timeout_per_command = ring_req->timeout_per_command;
559 if(scsiback_gnttab_data_map(ring_req, pending_req)) {
560 DPRINTK("scsiback: invalid buffer\n");
572 static int scsiback_do_cmd_fn(struct vscsibk_info *info)
574 struct vscsiif_back_ring *ring = &info->ring;
575 vscsiif_request_t *ring_req;
577 pending_req_t *pending_req;
579 int err, more_to_do = 0;
581 DPRINTK("%s\n",__FUNCTION__);
584 rp = ring->sring->req_prod;
588 if (RING_REQUEST_CONS_OVERFLOW(ring, rc))
590 pending_req = alloc_req(info);
591 if (NULL == pending_req) {
596 ring_req = RING_GET_REQUEST(ring, rc);
597 ring->req_cons = ++rc;
599 err = prepare_pending_reqs(info, ring_req,
601 if (err == -EINVAL) {
602 scsiback_do_resp_with_sense(NULL, (DRIVER_ERROR << 24),
605 } else if (err == -ENODEV) {
606 scsiback_do_resp_with_sense(NULL, (DID_NO_CONNECT << 16),
611 if (pending_req->act == VSCSIIF_ACT_SCSI_CDB) {
613 /* The Host mode is through as for Emulation. */
614 if (info->feature == VSCSI_TYPE_HOST)
615 scsiback_cmd_exec(pending_req);
617 scsiback_req_emulation_or_cmdexec(pending_req);
619 } else if (pending_req->act == VSCSIIF_ACT_SCSI_RESET) {
620 scsiback_device_reset_exec(pending_req);
622 pr_err("scsiback: invalid parameter for request\n");
623 scsiback_do_resp_with_sense(NULL, (DRIVER_ERROR << 24),
629 if (RING_HAS_UNCONSUMED_REQUESTS(ring))
632 /* Yield point for this unbounded loop. */
639 int scsiback_schedule(void *data)
641 struct vscsibk_info *info = (struct vscsibk_info *)data;
643 DPRINTK("%s\n",__FUNCTION__);
645 while (!kthread_should_stop()) {
646 wait_event_interruptible(
648 info->waiting_reqs || kthread_should_stop());
649 wait_event_interruptible(
651 !list_empty(&pending_free) || kthread_should_stop());
653 info->waiting_reqs = 0;
656 if (scsiback_do_cmd_fn(info))
657 info->waiting_reqs = 1;
664 static int __init scsiback_init(void)
668 if (!is_running_on_xen())
671 mmap_pages = vscsiif_reqs * VSCSIIF_SG_TABLESIZE;
673 pending_reqs = kzalloc(sizeof(pending_reqs[0]) *
674 vscsiif_reqs, GFP_KERNEL);
675 pending_grant_handles = kmalloc(sizeof(pending_grant_handles[0]) *
676 mmap_pages, GFP_KERNEL);
677 pending_pages = alloc_empty_pages_and_pagevec(mmap_pages);
679 if (!pending_reqs || !pending_grant_handles || !pending_pages)
682 for (i = 0; i < mmap_pages; i++)
683 pending_grant_handles[i] = SCSIBACK_INVALID_HANDLE;
685 if (scsiback_interface_init() < 0)
688 INIT_LIST_HEAD(&pending_free);
690 for (i = 0; i < vscsiif_reqs; i++)
691 list_add_tail(&pending_reqs[i].free_list, &pending_free);
693 if (scsiback_xenbus_init())
696 scsiback_emulation_init();
701 scsiback_interface_exit();
704 kfree(pending_grant_handles);
705 free_empty_pages_and_pagevec(pending_pages, mmap_pages);
706 pr_err("scsiback: %s: out of memory\n", __FUNCTION__);
711 static void __exit scsiback_exit(void)
713 scsiback_xenbus_unregister();
714 scsiback_interface_exit();
716 kfree(pending_grant_handles);
717 free_empty_pages_and_pagevec(pending_pages, (vscsiif_reqs * VSCSIIF_SG_TABLESIZE));
722 module_init(scsiback_init);
725 module_exit(scsiback_exit);
728 MODULE_DESCRIPTION("Xen SCSI backend driver");
729 MODULE_LICENSE("Dual BSD/GPL");
730 MODULE_ALIAS("xen-backend:vscsi");