2 * Xen SCSI frontend driver
4 * Copyright (c) 2008, FUJITSU Limited
6 * This program is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU General Public License version 2
8 * as published by the Free Software Foundation; or, when distributed
9 * separately from the Linux kernel or incorporated into other
10 * software packages, subject to the following license:
12 * Permission is hereby granted, free of charge, to any person obtaining a copy
13 * of this source file (the "Software"), to deal in the Software without
14 * restriction, including without limitation the rights to use, copy, modify,
15 * merge, publish, distribute, sublicense, and/or sell copies of the Software,
16 * and to permit persons to whom the Software is furnished to do so, subject to
17 * the following conditions:
19 * The above copyright notice and this permission notice shall be included in
20 * all copies or substantial portions of the Software.
22 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
23 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
24 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
25 * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
26 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
27 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
32 #include <linux/version.h>
35 static int get_id_from_freelist(struct vscsifrnt_info *info)
40 spin_lock_irqsave(&info->shadow_lock, flags);
42 free = info->shadow_free;
43 BUG_ON(free > VSCSIIF_MAX_REQS);
44 info->shadow_free = info->shadow[free].next_free;
45 info->shadow[free].next_free = 0x0fff;
47 info->shadow[free].wait_reset = 0;
49 spin_unlock_irqrestore(&info->shadow_lock, flags);
54 static void add_id_to_freelist(struct vscsifrnt_info *info, uint32_t id)
58 spin_lock_irqsave(&info->shadow_lock, flags);
60 info->shadow[id].next_free = info->shadow_free;
61 info->shadow[id].req_scsi_cmnd = 0;
62 info->shadow_free = id;
64 spin_unlock_irqrestore(&info->shadow_lock, flags);
68 struct vscsiif_request * scsifront_pre_request(struct vscsifrnt_info *info)
70 struct vscsiif_front_ring *ring = &(info->ring);
71 vscsiif_request_t *ring_req;
74 ring_req = RING_GET_REQUEST(&(info->ring), ring->req_prod_pvt);
78 id = get_id_from_freelist(info); /* use id by response */
79 ring_req->rqid = (uint16_t)id;
85 static void scsifront_notify_work(struct vscsifrnt_info *info)
87 info->waiting_resp = 1;
92 static void scsifront_do_request(struct vscsifrnt_info *info)
94 struct vscsiif_front_ring *ring = &(info->ring);
95 unsigned int irq = info->irq;
98 RING_PUSH_REQUESTS_AND_CHECK_NOTIFY(ring, notify);
100 notify_remote_via_irq(irq);
103 irqreturn_t scsifront_intr(int irq, void *dev_id)
105 scsifront_notify_work((struct vscsifrnt_info *)dev_id);
110 static void scsifront_gnttab_done(struct vscsifrnt_shadow *s, uint32_t id)
114 if (s->sc_data_direction == DMA_NONE)
117 if (s->nr_segments) {
118 for (i = 0; i < s->nr_segments; i++) {
119 if (unlikely(gnttab_query_foreign_access(
121 pr_alert("scsifront: "
122 "grant still in use by backend\n");
125 gnttab_end_foreign_access(s->gref[i], 0UL);
133 static void scsifront_cdb_cmd_done(struct vscsifrnt_info *info,
134 vscsiif_response_t *ring_res)
136 struct scsi_cmnd *sc;
141 sc = (struct scsi_cmnd *)info->shadow[id].req_scsi_cmnd;
146 scsifront_gnttab_done(&info->shadow[id], id);
147 add_id_to_freelist(info, id);
149 sc->result = ring_res->rslt;
150 scsi_set_resid(sc, ring_res->residual_len);
152 if (ring_res->sense_len > VSCSIIF_SENSE_BUFFERSIZE)
153 sense_len = VSCSIIF_SENSE_BUFFERSIZE;
155 sense_len = ring_res->sense_len;
158 memcpy(sc->sense_buffer, ring_res->sense_buffer, sense_len);
166 static void scsifront_sync_cmd_done(struct vscsifrnt_info *info,
167 vscsiif_response_t *ring_res)
169 uint16_t id = ring_res->rqid;
172 spin_lock_irqsave(&info->shadow_lock, flags);
173 info->shadow[id].wait_reset = 1;
174 info->shadow[id].rslt_reset = ring_res->rslt;
175 spin_unlock_irqrestore(&info->shadow_lock, flags);
177 wake_up(&(info->shadow[id].wq_reset));
181 int scsifront_cmd_done(struct vscsifrnt_info *info)
183 vscsiif_response_t *ring_res;
189 spin_lock_irqsave(&info->io_lock, flags);
191 rp = info->ring.sring->rsp_prod;
193 for (i = info->ring.rsp_cons; i != rp; i++) {
195 ring_res = RING_GET_RESPONSE(&info->ring, i);
197 if (info->shadow[ring_res->rqid].act == VSCSIIF_ACT_SCSI_CDB)
198 scsifront_cdb_cmd_done(info, ring_res);
200 scsifront_sync_cmd_done(info, ring_res);
203 info->ring.rsp_cons = i;
205 if (i != info->ring.req_prod_pvt) {
206 RING_FINAL_CHECK_FOR_RESPONSES(&info->ring, more_to_do);
208 info->ring.sring->rsp_event = i + 1;
211 spin_unlock_irqrestore(&info->io_lock, flags);
214 /* Yield point for this unbounded loop. */
223 int scsifront_schedule(void *data)
225 struct vscsifrnt_info *info = (struct vscsifrnt_info *)data;
227 while (!kthread_should_stop()) {
228 wait_event_interruptible(
230 info->waiting_resp || kthread_should_stop());
232 info->waiting_resp = 0;
235 if (scsifront_cmd_done(info))
236 info->waiting_resp = 1;
244 static int map_data_for_request(struct vscsifrnt_info *info,
245 struct scsi_cmnd *sc, vscsiif_request_t *ring_req, uint32_t id)
247 grant_ref_t gref_head;
249 int err, ref, ref_cnt = 0;
250 int write = (sc->sc_data_direction == DMA_TO_DEVICE);
251 unsigned int i, nr_pages, off, len, bytes;
252 unsigned long buffer_pfn;
254 if (sc->sc_data_direction == DMA_NONE)
257 err = gnttab_alloc_grant_references(VSCSIIF_SG_TABLESIZE, &gref_head);
259 pr_err("scsifront: gnttab_alloc_grant_references() error\n");
263 if (scsi_bufflen(sc)) {
264 /* quoted scsi_lib.c/scsi_req_map_sg . */
265 struct scatterlist *sg, *sgl = scsi_sglist(sc);
266 unsigned int data_len = scsi_bufflen(sc);
268 nr_pages = (data_len + sgl->offset + PAGE_SIZE - 1) >> PAGE_SHIFT;
269 if (nr_pages > VSCSIIF_SG_TABLESIZE) {
270 pr_err("scsifront: Unable to map request_buffer for command!\n");
275 for_each_sg (sgl, sg, scsi_sg_count(sc), i) {
280 buffer_pfn = page_to_phys(page) >> PAGE_SHIFT;
282 while (len > 0 && data_len > 0) {
284 * sg sends a scatterlist that is larger than
285 * the data_len it wants transferred for certain
288 bytes = min_t(unsigned int, len, PAGE_SIZE - off);
289 bytes = min(bytes, data_len);
291 ref = gnttab_claim_grant_reference(&gref_head);
292 BUG_ON(ref == -ENOSPC);
294 gnttab_grant_foreign_access_ref(ref, info->dev->otherend_id,
297 info->shadow[id].gref[ref_cnt] = ref;
298 ring_req->seg[ref_cnt].gref = ref;
299 ring_req->seg[ref_cnt].offset = (uint16_t)off;
300 ring_req->seg[ref_cnt].length = (uint16_t)bytes;
313 gnttab_free_grant_references(gref_head);
318 static int scsifront_queuecommand(struct Scsi_Host *shost,
319 struct scsi_cmnd *sc)
321 struct vscsifrnt_info *info = shost_priv(shost);
322 vscsiif_request_t *ring_req;
327 /* debug printk to identify more missing scsi commands
328 printk(KERN_INFO "scsicmd: len=%i, 0x%x,0x%x,0x%x,0x%x,0x%x,0x%x,0x%x,0x%x,0x%x,0x%x",sc->cmd_len,
329 sc->cmnd[0],sc->cmnd[1],sc->cmnd[2],sc->cmnd[3],sc->cmnd[4],
330 sc->cmnd[5],sc->cmnd[6],sc->cmnd[7],sc->cmnd[8],sc->cmnd[9]);
332 spin_lock_irqsave(shost->host_lock, flags);
333 scsi_cmd_get_serial(shost, sc);
334 if (RING_FULL(&info->ring)) {
335 spin_unlock_irqrestore(shost->host_lock, flags);
336 return SCSI_MLQUEUE_HOST_BUSY;
341 ring_req = scsifront_pre_request(info);
342 rqid = ring_req->rqid;
343 ring_req->act = VSCSIIF_ACT_SCSI_CDB;
345 ring_req->id = sc->device->id;
346 ring_req->lun = sc->device->lun;
347 ring_req->channel = sc->device->channel;
348 ring_req->cmd_len = sc->cmd_len;
350 BUG_ON(sc->cmd_len > VSCSIIF_MAX_COMMAND_SIZE);
353 memcpy(ring_req->cmnd, sc->cmnd, sc->cmd_len);
355 memset(ring_req->cmnd, 0, VSCSIIF_MAX_COMMAND_SIZE);
357 ring_req->sc_data_direction = (uint8_t)sc->sc_data_direction;
358 ring_req->timeout_per_command = (sc->request->timeout / HZ);
360 info->shadow[rqid].req_scsi_cmnd = (unsigned long)sc;
361 info->shadow[rqid].sc_data_direction = sc->sc_data_direction;
362 info->shadow[rqid].act = ring_req->act;
364 ref_cnt = map_data_for_request(info, sc, ring_req, rqid);
366 add_id_to_freelist(info, rqid);
367 spin_unlock_irqrestore(shost->host_lock, flags);
368 if (ref_cnt == (-ENOMEM))
369 return SCSI_MLQUEUE_HOST_BUSY;
370 sc->result = (DID_ERROR << 16);
375 ring_req->nr_segments = (uint8_t)ref_cnt;
376 info->shadow[rqid].nr_segments = ref_cnt;
378 scsifront_do_request(info);
379 spin_unlock_irqrestore(shost->host_lock, flags);
385 static int scsifront_eh_abort_handler(struct scsi_cmnd *sc)
390 /* vscsi supports only device_reset, because it is each of LUNs */
391 static int scsifront_dev_reset_handler(struct scsi_cmnd *sc)
393 struct Scsi_Host *host = sc->device->host;
394 struct vscsifrnt_info *info = shost_priv(host);
396 vscsiif_request_t *ring_req;
400 #if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,12)
401 spin_lock_irq(host->host_lock);
404 ring_req = scsifront_pre_request(info);
405 ring_req->act = VSCSIIF_ACT_SCSI_RESET;
407 rqid = ring_req->rqid;
408 info->shadow[rqid].act = VSCSIIF_ACT_SCSI_RESET;
410 ring_req->channel = sc->device->channel;
411 ring_req->id = sc->device->id;
412 ring_req->lun = sc->device->lun;
413 ring_req->cmd_len = sc->cmd_len;
416 memcpy(ring_req->cmnd, sc->cmnd, sc->cmd_len);
418 memset(ring_req->cmnd, 0, VSCSIIF_MAX_COMMAND_SIZE);
420 ring_req->sc_data_direction = (uint8_t)sc->sc_data_direction;
421 ring_req->timeout_per_command = (sc->request->timeout / HZ);
422 ring_req->nr_segments = 0;
424 scsifront_do_request(info);
426 spin_unlock_irq(host->host_lock);
427 wait_event_interruptible(info->shadow[rqid].wq_reset,
428 info->shadow[rqid].wait_reset);
429 spin_lock_irq(host->host_lock);
431 err = info->shadow[rqid].rslt_reset;
433 add_id_to_freelist(info, rqid);
435 #if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,12)
436 spin_unlock_irq(host->host_lock);
442 struct scsi_host_template scsifront_sht = {
443 .module = THIS_MODULE,
444 .name = "Xen SCSI frontend driver",
445 .queuecommand = scsifront_queuecommand,
446 .eh_abort_handler = scsifront_eh_abort_handler,
447 .eh_device_reset_handler= scsifront_dev_reset_handler,
448 .cmd_per_lun = VSCSIIF_DEFAULT_CMD_PER_LUN,
449 .can_queue = VSCSIIF_MAX_REQS,
451 .sg_tablesize = VSCSIIF_SG_TABLESIZE,
452 .use_clustering = DISABLE_CLUSTERING,
453 .proc_name = "scsifront",
457 static int __init scsifront_init(void)
461 if (!is_running_on_xen())
464 err = scsifront_xenbus_init();
469 static void __exit scsifront_exit(void)
471 scsifront_xenbus_unregister();
474 module_init(scsifront_init);
475 module_exit(scsifront_exit);
477 MODULE_DESCRIPTION("Xen SCSI frontend driver");
478 MODULE_LICENSE("GPL");