- Update Xen patches to 3.3-rc5 and c/s 1157.
[linux-flexiantxendom0-3.2.10.git] / drivers / xen / scsifront / scsifront.c
1 /*
2  * Xen SCSI frontend driver
3  *
4  * Copyright (c) 2008, FUJITSU Limited
5  *
6  * This program is free software; you can redistribute it and/or
7  * modify it under the terms of the GNU General Public License version 2
8  * as published by the Free Software Foundation; or, when distributed
9  * separately from the Linux kernel or incorporated into other
10  * software packages, subject to the following license:
11  * 
12  * Permission is hereby granted, free of charge, to any person obtaining a copy
13  * of this source file (the "Software"), to deal in the Software without
14  * restriction, including without limitation the rights to use, copy, modify,
15  * merge, publish, distribute, sublicense, and/or sell copies of the Software,
16  * and to permit persons to whom the Software is furnished to do so, subject to
17  * the following conditions:
18  * 
19  * The above copyright notice and this permission notice shall be included in
20  * all copies or substantial portions of the Software.
21  * 
22  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
23  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
24  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
25  * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
26  * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
27  * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
28  * IN THE SOFTWARE.
29  */
30  
31
32 #include <linux/version.h>
33 #include "common.h"
34
35 static int get_id_from_freelist(struct vscsifrnt_info *info)
36 {
37         unsigned long flags;
38         uint32_t free;
39
40         spin_lock_irqsave(&info->shadow_lock, flags);
41
42         free = info->shadow_free;
43         BUG_ON(free > VSCSIIF_MAX_REQS);
44         info->shadow_free = info->shadow[free].next_free;
45         info->shadow[free].next_free = 0x0fff;
46
47         info->shadow[free].wait_reset = 0;
48
49         spin_unlock_irqrestore(&info->shadow_lock, flags);
50
51         return free;
52 }
53
54 static void add_id_to_freelist(struct vscsifrnt_info *info, uint32_t id)
55 {
56         unsigned long flags;
57
58         spin_lock_irqsave(&info->shadow_lock, flags);
59
60         info->shadow[id].next_free  = info->shadow_free;
61         info->shadow[id].req_scsi_cmnd = 0;
62         info->shadow_free = id;
63
64         spin_unlock_irqrestore(&info->shadow_lock, flags);
65 }
66
67
68 struct vscsiif_request * scsifront_pre_request(struct vscsifrnt_info *info)
69 {
70         struct vscsiif_front_ring *ring = &(info->ring);
71         vscsiif_request_t *ring_req;
72         uint32_t id;
73
74         ring_req = RING_GET_REQUEST(&(info->ring), ring->req_prod_pvt);
75
76         ring->req_prod_pvt++;
77         
78         id = get_id_from_freelist(info);        /* use id by response */
79         ring_req->rqid = (uint16_t)id;
80
81         return ring_req;
82 }
83
84
85 static void scsifront_notify_work(struct vscsifrnt_info *info)
86 {
87         info->waiting_resp = 1;
88         wake_up(&info->wq);
89 }
90
91
92 static void scsifront_do_request(struct vscsifrnt_info *info)
93 {
94         struct vscsiif_front_ring *ring = &(info->ring);
95         unsigned int irq = info->irq;
96         int notify;
97
98         RING_PUSH_REQUESTS_AND_CHECK_NOTIFY(ring, notify);
99         if (notify)
100                 notify_remote_via_irq(irq);
101 }
102
103 irqreturn_t scsifront_intr(int irq, void *dev_id)
104 {
105         scsifront_notify_work((struct vscsifrnt_info *)dev_id);
106         return IRQ_HANDLED;
107 }
108
109
110 static void scsifront_gnttab_done(struct vscsifrnt_shadow *s, uint32_t id)
111 {
112         int i;
113
114         if (s->sc_data_direction == DMA_NONE)
115                 return;
116
117         if (s->nr_segments) {
118                 for (i = 0; i < s->nr_segments; i++) {
119                         if (unlikely(gnttab_query_foreign_access(
120                                 s->gref[i]) != 0)) {
121                                 pr_alert("scsifront: "
122                                          "grant still in use by backend\n");
123                                 BUG();
124                         }
125                         gnttab_end_foreign_access(s->gref[i], 0UL);
126                 }
127         }
128
129         return;
130 }
131
132
133 static void scsifront_cdb_cmd_done(struct vscsifrnt_info *info,
134                        vscsiif_response_t *ring_res)
135 {
136         struct scsi_cmnd *sc;
137         uint32_t id;
138         uint8_t sense_len;
139
140         id = ring_res->rqid;
141         sc = (struct scsi_cmnd *)info->shadow[id].req_scsi_cmnd;
142
143         if (sc == NULL)
144                 BUG();
145
146         scsifront_gnttab_done(&info->shadow[id], id);
147         add_id_to_freelist(info, id);
148
149         sc->result = ring_res->rslt;
150         scsi_set_resid(sc, ring_res->residual_len);
151
152         if (ring_res->sense_len > VSCSIIF_SENSE_BUFFERSIZE)
153                 sense_len = VSCSIIF_SENSE_BUFFERSIZE;
154         else
155                 sense_len = ring_res->sense_len;
156
157         if (sense_len)
158                 memcpy(sc->sense_buffer, ring_res->sense_buffer, sense_len);
159
160         sc->scsi_done(sc);
161
162         return;
163 }
164
165
166 static void scsifront_sync_cmd_done(struct vscsifrnt_info *info,
167                                 vscsiif_response_t *ring_res)
168 {
169         uint16_t id = ring_res->rqid;
170         unsigned long flags;
171         
172         spin_lock_irqsave(&info->shadow_lock, flags);
173         info->shadow[id].wait_reset = 1;
174         info->shadow[id].rslt_reset = ring_res->rslt;
175         spin_unlock_irqrestore(&info->shadow_lock, flags);
176
177         wake_up(&(info->shadow[id].wq_reset));
178 }
179
180
181 int scsifront_cmd_done(struct vscsifrnt_info *info)
182 {
183         vscsiif_response_t *ring_res;
184
185         RING_IDX i, rp;
186         int more_to_do = 0;
187         unsigned long flags;
188
189         spin_lock_irqsave(&info->io_lock, flags);
190
191         rp = info->ring.sring->rsp_prod;
192         rmb();
193         for (i = info->ring.rsp_cons; i != rp; i++) {
194                 
195                 ring_res = RING_GET_RESPONSE(&info->ring, i);
196
197                 if (info->shadow[ring_res->rqid].act == VSCSIIF_ACT_SCSI_CDB)
198                         scsifront_cdb_cmd_done(info, ring_res);
199                 else
200                         scsifront_sync_cmd_done(info, ring_res);
201         }
202
203         info->ring.rsp_cons = i;
204
205         if (i != info->ring.req_prod_pvt) {
206                 RING_FINAL_CHECK_FOR_RESPONSES(&info->ring, more_to_do);
207         } else {
208                 info->ring.sring->rsp_event = i + 1;
209         }
210
211         spin_unlock_irqrestore(&info->io_lock, flags);
212
213
214         /* Yield point for this unbounded loop. */
215         cond_resched();
216
217         return more_to_do;
218 }
219
220
221
222
223 int scsifront_schedule(void *data)
224 {
225         struct vscsifrnt_info *info = (struct vscsifrnt_info *)data;
226
227         while (!kthread_should_stop()) {
228                 wait_event_interruptible(
229                         info->wq,
230                         info->waiting_resp || kthread_should_stop());
231
232                 info->waiting_resp = 0;
233                 smp_mb();
234
235                 if (scsifront_cmd_done(info))
236                         info->waiting_resp = 1;
237         }
238
239         return 0;
240 }
241
242
243
244 static int map_data_for_request(struct vscsifrnt_info *info,
245                 struct scsi_cmnd *sc, vscsiif_request_t *ring_req, uint32_t id)
246 {
247         grant_ref_t gref_head;
248         struct page *page;
249         int err, ref, ref_cnt = 0;
250         int write = (sc->sc_data_direction == DMA_TO_DEVICE);
251         unsigned int i, nr_pages, off, len, bytes;
252         unsigned long buffer_pfn;
253
254         if (sc->sc_data_direction == DMA_NONE)
255                 return 0;
256
257         err = gnttab_alloc_grant_references(VSCSIIF_SG_TABLESIZE, &gref_head);
258         if (err) {
259                 pr_err("scsifront: gnttab_alloc_grant_references() error\n");
260                 return -ENOMEM;
261         }
262
263         if (scsi_bufflen(sc)) {
264                 /* quoted scsi_lib.c/scsi_req_map_sg . */
265                 struct scatterlist *sg, *sgl = scsi_sglist(sc);
266                 unsigned int data_len = scsi_bufflen(sc);
267
268                 nr_pages = (data_len + sgl->offset + PAGE_SIZE - 1) >> PAGE_SHIFT;
269                 if (nr_pages > VSCSIIF_SG_TABLESIZE) {
270                         pr_err("scsifront: Unable to map request_buffer for command!\n");
271                         ref_cnt = (-E2BIG);
272                         goto big_to_sg;
273                 }
274
275                 for_each_sg (sgl, sg, scsi_sg_count(sc), i) {
276                         page = sg_page(sg);
277                         off = sg->offset;
278                         len = sg->length;
279
280                         buffer_pfn = page_to_phys(page) >> PAGE_SHIFT;
281
282                         while (len > 0 && data_len > 0) {
283                                 /*
284                                  * sg sends a scatterlist that is larger than
285                                  * the data_len it wants transferred for certain
286                                  * IO sizes
287                                  */
288                                 bytes = min_t(unsigned int, len, PAGE_SIZE - off);
289                                 bytes = min(bytes, data_len);
290                                 
291                                 ref = gnttab_claim_grant_reference(&gref_head);
292                                 BUG_ON(ref == -ENOSPC);
293
294                                 gnttab_grant_foreign_access_ref(ref, info->dev->otherend_id,
295                                         buffer_pfn, write);
296
297                                 info->shadow[id].gref[ref_cnt]  = ref;
298                                 ring_req->seg[ref_cnt].gref     = ref;
299                                 ring_req->seg[ref_cnt].offset   = (uint16_t)off;
300                                 ring_req->seg[ref_cnt].length   = (uint16_t)bytes;
301
302                                 buffer_pfn++;
303                                 len -= bytes;
304                                 data_len -= bytes;
305                                 off = 0;
306                                 ref_cnt++;
307                         }
308                 }
309         }
310
311 big_to_sg:
312
313         gnttab_free_grant_references(gref_head);
314
315         return ref_cnt;
316 }
317
318 static int scsifront_queuecommand(struct Scsi_Host *shost,
319                                   struct scsi_cmnd *sc)
320 {
321         struct vscsifrnt_info *info = shost_priv(shost);
322         vscsiif_request_t *ring_req;
323         unsigned long flags;
324         int ref_cnt;
325         uint16_t rqid;
326
327 /* debug printk to identify more missing scsi commands
328         printk(KERN_INFO "scsicmd: len=%i, 0x%x,0x%x,0x%x,0x%x,0x%x,0x%x,0x%x,0x%x,0x%x,0x%x",sc->cmd_len,
329                 sc->cmnd[0],sc->cmnd[1],sc->cmnd[2],sc->cmnd[3],sc->cmnd[4],
330                 sc->cmnd[5],sc->cmnd[6],sc->cmnd[7],sc->cmnd[8],sc->cmnd[9]);
331 */
332         spin_lock_irqsave(shost->host_lock, flags);
333         scsi_cmd_get_serial(shost, sc);
334         if (RING_FULL(&info->ring)) {
335                 spin_unlock_irqrestore(shost->host_lock, flags);
336                 return SCSI_MLQUEUE_HOST_BUSY;
337         }
338
339         sc->result    = 0;
340
341         ring_req          = scsifront_pre_request(info);
342         rqid              = ring_req->rqid;
343         ring_req->act     = VSCSIIF_ACT_SCSI_CDB;
344
345         ring_req->id      = sc->device->id;
346         ring_req->lun     = sc->device->lun;
347         ring_req->channel = sc->device->channel;
348         ring_req->cmd_len = sc->cmd_len;
349
350         BUG_ON(sc->cmd_len > VSCSIIF_MAX_COMMAND_SIZE);
351
352         if ( sc->cmd_len )
353                 memcpy(ring_req->cmnd, sc->cmnd, sc->cmd_len);
354         else
355                 memset(ring_req->cmnd, 0, VSCSIIF_MAX_COMMAND_SIZE);
356
357         ring_req->sc_data_direction   = (uint8_t)sc->sc_data_direction;
358         ring_req->timeout_per_command = (sc->request->timeout / HZ);
359
360         info->shadow[rqid].req_scsi_cmnd     = (unsigned long)sc;
361         info->shadow[rqid].sc_data_direction = sc->sc_data_direction;
362         info->shadow[rqid].act               = ring_req->act;
363
364         ref_cnt = map_data_for_request(info, sc, ring_req, rqid);
365         if (ref_cnt < 0) {
366                 add_id_to_freelist(info, rqid);
367                 spin_unlock_irqrestore(shost->host_lock, flags);
368                 if (ref_cnt == (-ENOMEM))
369                         return SCSI_MLQUEUE_HOST_BUSY;
370                 sc->result = (DID_ERROR << 16);
371                 sc->scsi_done(sc);
372                 return 0;
373         }
374
375         ring_req->nr_segments          = (uint8_t)ref_cnt;
376         info->shadow[rqid].nr_segments = ref_cnt;
377
378         scsifront_do_request(info);
379         spin_unlock_irqrestore(shost->host_lock, flags);
380
381         return 0;
382 }
383
384
385 static int scsifront_eh_abort_handler(struct scsi_cmnd *sc)
386 {
387         return (FAILED);
388 }
389
390 /* vscsi supports only device_reset, because it is each of LUNs */
391 static int scsifront_dev_reset_handler(struct scsi_cmnd *sc)
392 {
393         struct Scsi_Host *host = sc->device->host;
394         struct vscsifrnt_info *info = shost_priv(host);
395
396         vscsiif_request_t *ring_req;
397         uint16_t rqid;
398         int err;
399
400 #if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,12)
401         spin_lock_irq(host->host_lock);
402 #endif
403
404         ring_req      = scsifront_pre_request(info);
405         ring_req->act = VSCSIIF_ACT_SCSI_RESET;
406
407         rqid          = ring_req->rqid;
408         info->shadow[rqid].act = VSCSIIF_ACT_SCSI_RESET;
409
410         ring_req->channel = sc->device->channel;
411         ring_req->id      = sc->device->id;
412         ring_req->lun     = sc->device->lun;
413         ring_req->cmd_len = sc->cmd_len;
414
415         if ( sc->cmd_len )
416                 memcpy(ring_req->cmnd, sc->cmnd, sc->cmd_len);
417         else
418                 memset(ring_req->cmnd, 0, VSCSIIF_MAX_COMMAND_SIZE);
419
420         ring_req->sc_data_direction   = (uint8_t)sc->sc_data_direction;
421         ring_req->timeout_per_command = (sc->request->timeout / HZ);
422         ring_req->nr_segments         = 0;
423
424         scsifront_do_request(info);     
425
426         spin_unlock_irq(host->host_lock);
427         wait_event_interruptible(info->shadow[rqid].wq_reset,
428                          info->shadow[rqid].wait_reset);
429         spin_lock_irq(host->host_lock);
430
431         err = info->shadow[rqid].rslt_reset;
432
433         add_id_to_freelist(info, rqid);
434
435 #if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,12)
436         spin_unlock_irq(host->host_lock);
437 #endif
438         return (err);
439 }
440
441
442 struct scsi_host_template scsifront_sht = {
443         .module                 = THIS_MODULE,
444         .name                   = "Xen SCSI frontend driver",
445         .queuecommand           = scsifront_queuecommand,
446         .eh_abort_handler       = scsifront_eh_abort_handler,
447         .eh_device_reset_handler= scsifront_dev_reset_handler,
448         .cmd_per_lun            = VSCSIIF_DEFAULT_CMD_PER_LUN,
449         .can_queue              = VSCSIIF_MAX_REQS,
450         .this_id                = -1,
451         .sg_tablesize           = VSCSIIF_SG_TABLESIZE,
452         .use_clustering         = DISABLE_CLUSTERING,
453         .proc_name              = "scsifront",
454 };
455
456
457 static int __init scsifront_init(void)
458 {
459         int err;
460
461         if (!is_running_on_xen())
462                 return -ENODEV;
463
464         err = scsifront_xenbus_init();
465
466         return err;
467 }
468
469 static void __exit scsifront_exit(void)
470 {
471         scsifront_xenbus_unregister();
472 }
473
474 module_init(scsifront_init);
475 module_exit(scsifront_exit);
476
477 MODULE_DESCRIPTION("Xen SCSI frontend driver");
478 MODULE_LICENSE("GPL");