Update to 3.4-final.
[linux-flexiantxendom0-3.2.10.git] / drivers / xen / scsiback / scsiback.c
1 /*
2  * Xen SCSI backend driver
3  *
4  * Copyright (c) 2008, FUJITSU Limited
5  *
6  * Based on the blkback driver code.
7  *
8  * This program is free software; you can redistribute it and/or
9  * modify it under the terms of the GNU General Public License version 2
10  * as published by the Free Software Foundation; or, when distributed
11  * separately from the Linux kernel or incorporated into other
12  * software packages, subject to the following license:
13  * 
14  * Permission is hereby granted, free of charge, to any person obtaining a copy
15  * of this source file (the "Software"), to deal in the Software without
16  * restriction, including without limitation the rights to use, copy, modify,
17  * merge, publish, distribute, sublicense, and/or sell copies of the Software,
18  * and to permit persons to whom the Software is furnished to do so, subject to
19  * the following conditions:
20  * 
21  * The above copyright notice and this permission notice shall be included in
22  * all copies or substantial portions of the Software.
23  * 
24  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
25  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
26  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
27  * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
28  * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
29  * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
30  * IN THE SOFTWARE.
31  */
32
33 #include <linux/spinlock.h>
34 #include <linux/kthread.h>
35 #include <linux/list.h>
36 #include <linux/delay.h>
37 #include <xen/balloon.h>
38 #include <xen/evtchn.h>
39 #include <xen/gnttab.h>
40 #include <asm/hypervisor.h>
41 #include <scsi/scsi.h>
42 #include <scsi/scsi_cmnd.h>
43 #include <scsi/scsi_host.h>
44 #include <scsi/scsi_device.h>
45 #include <scsi/scsi_dbg.h>
46 #include <scsi/scsi_eh.h>
47
48 #include "common.h"
49
50
51 struct list_head pending_free;
52 DEFINE_SPINLOCK(pending_free_lock);
53 DECLARE_WAIT_QUEUE_HEAD(pending_free_wq);
54
55 int vscsiif_reqs = VSCSIIF_BACK_MAX_PENDING_REQS;
56 module_param_named(reqs, vscsiif_reqs, int, 0);
57 MODULE_PARM_DESC(reqs, "Number of scsiback requests to allocate");
58
59 static unsigned int log_print_stat = 0;
60 module_param(log_print_stat, int, 0644);
61
62 #define SCSIBACK_INVALID_HANDLE (~0)
63
64 static pending_req_t *pending_reqs;
65 static struct page **pending_pages;
66 static grant_handle_t *pending_grant_handles;
67
68 static int vaddr_pagenr(pending_req_t *req, int seg)
69 {
70         return (req - pending_reqs) * VSCSIIF_SG_TABLESIZE + seg;
71 }
72
73 static unsigned long vaddr(pending_req_t *req, int seg)
74 {
75         unsigned long pfn = page_to_pfn(pending_pages[vaddr_pagenr(req, seg)]);
76         return (unsigned long)pfn_to_kaddr(pfn);
77 }
78
79 #define pending_handle(_req, _seg) \
80         (pending_grant_handles[vaddr_pagenr(_req, _seg)])
81
82
83 void scsiback_fast_flush_area(pending_req_t *req)
84 {
85         struct gnttab_unmap_grant_ref unmap[VSCSIIF_SG_TABLESIZE];
86         unsigned int i, invcount = 0;
87         grant_handle_t handle;
88         int err;
89
90         if (req->nr_segments) {
91                 for (i = 0; i < req->nr_segments; i++) {
92                         handle = pending_handle(req, i);
93                         if (handle == SCSIBACK_INVALID_HANDLE)
94                                 continue;
95                         gnttab_set_unmap_op(&unmap[i], vaddr(req, i),
96                                                 GNTMAP_host_map, handle);
97                         pending_handle(req, i) = SCSIBACK_INVALID_HANDLE;
98                         invcount++;
99                 }
100
101                 err = HYPERVISOR_grant_table_op(
102                         GNTTABOP_unmap_grant_ref, unmap, invcount);
103                 BUG_ON(err);
104                 kfree(req->sgl);
105         }
106
107         return;
108 }
109
110
111 static pending_req_t * alloc_req(struct vscsibk_info *info)
112 {
113         pending_req_t *req = NULL;
114         unsigned long flags;
115
116         spin_lock_irqsave(&pending_free_lock, flags);
117         if (!list_empty(&pending_free)) {
118                 req = list_entry(pending_free.next, pending_req_t, free_list);
119                 list_del(&req->free_list);
120         }
121         spin_unlock_irqrestore(&pending_free_lock, flags);
122         return req;
123 }
124
125
126 static void free_req(pending_req_t *req)
127 {
128         unsigned long flags;
129         int was_empty;
130
131         spin_lock_irqsave(&pending_free_lock, flags);
132         was_empty = list_empty(&pending_free);
133         list_add(&req->free_list, &pending_free);
134         spin_unlock_irqrestore(&pending_free_lock, flags);
135         if (was_empty)
136                 wake_up(&pending_free_wq);
137 }
138
139
140 static void scsiback_notify_work(struct vscsibk_info *info)
141 {
142         info->waiting_reqs = 1;
143         wake_up(&info->wq);
144 }
145
146 void scsiback_do_resp_with_sense(char *sense_buffer, int32_t result,
147                         uint32_t resid, pending_req_t *pending_req)
148 {
149         vscsiif_response_t *ring_res;
150         struct vscsibk_info *info = pending_req->info;
151         int notify;
152         int more_to_do = 1;
153         struct scsi_sense_hdr sshdr;
154         unsigned long flags;
155
156         DPRINTK("%s\n",__FUNCTION__);
157
158         spin_lock_irqsave(&info->ring_lock, flags);
159
160         ring_res = RING_GET_RESPONSE(&info->ring, info->ring.rsp_prod_pvt);
161         info->ring.rsp_prod_pvt++;
162
163         ring_res->rslt   = result;
164         ring_res->rqid   = pending_req->rqid;
165
166         if (sense_buffer != NULL) {
167                 if (scsi_normalize_sense(sense_buffer,
168                         sizeof(sense_buffer), &sshdr)) {
169
170                         int len = 8 + sense_buffer[7];
171
172                         if (len > VSCSIIF_SENSE_BUFFERSIZE)
173                                 len = VSCSIIF_SENSE_BUFFERSIZE;
174
175                         memcpy(ring_res->sense_buffer, sense_buffer, len);
176                         ring_res->sense_len = len;
177                 }
178         } else {
179                 ring_res->sense_len = 0;
180         }
181
182         ring_res->residual_len = resid;
183
184         RING_PUSH_RESPONSES_AND_CHECK_NOTIFY(&info->ring, notify);
185         if (info->ring.rsp_prod_pvt == info->ring.req_cons) {
186                 RING_FINAL_CHECK_FOR_REQUESTS(&info->ring, more_to_do);
187         } else if (RING_HAS_UNCONSUMED_REQUESTS(&info->ring)) {
188                 more_to_do = 1;
189         }
190         
191         spin_unlock_irqrestore(&info->ring_lock, flags);
192
193         if (more_to_do)
194                 scsiback_notify_work(info);
195
196         if (notify)
197                 notify_remote_via_irq(info->irq);
198
199         free_req(pending_req);
200 }
201
202 static void scsiback_print_status(char *sense_buffer, int errors,
203                                         pending_req_t *pending_req)
204 {
205         struct scsi_device *sdev = pending_req->sdev;
206         
207         pr_err("scsiback: %d:%d:%d:%d ",
208                sdev->host->host_no, sdev->channel, sdev->id, sdev->lun);
209         pr_err("status = 0x%02x, message = 0x%02x, host = 0x%02x,"
210                " driver = 0x%02x\n",
211                status_byte(errors), msg_byte(errors),
212                host_byte(errors), driver_byte(errors));
213
214         pr_err("scsiback: cmnd[0]=0x%02X\n", pending_req->cmnd[0]);
215
216         if (CHECK_CONDITION & status_byte(errors))
217                 __scsi_print_sense("scsiback", sense_buffer, SCSI_SENSE_BUFFERSIZE);
218 }
219
220
221 static void scsiback_cmd_done(struct request *req, int uptodate)
222 {
223         pending_req_t *pending_req = req->end_io_data;
224         unsigned char *sense_buffer;
225         unsigned int resid;
226         int errors;
227
228         sense_buffer = req->sense;
229         resid        = blk_rq_bytes(req);
230         errors       = req->errors;
231
232         if (errors != 0) {
233                 if (log_print_stat)
234                         scsiback_print_status(sense_buffer, errors, pending_req);
235         }
236
237         /* The Host mode is through as for Emulation. */
238         if (pending_req->info->feature != VSCSI_TYPE_HOST)
239                 scsiback_rsp_emulation(pending_req);
240
241         scsiback_fast_flush_area(pending_req);
242         scsiback_do_resp_with_sense(sense_buffer, errors, resid, pending_req);
243         scsiback_put(pending_req->info);
244
245         __blk_put_request(req->q, req);
246 }
247
248
249 static int scsiback_gnttab_data_map(vscsiif_request_t *ring_req,
250                                         pending_req_t *pending_req)
251 {
252         u32 flags;
253         int write;
254         int i, err = 0;
255         unsigned int data_len = 0;
256         struct gnttab_map_grant_ref map[VSCSIIF_SG_TABLESIZE];
257         struct vscsibk_info *info   = pending_req->info;
258
259         int data_dir = (int)pending_req->sc_data_direction;
260         unsigned int nr_segments = (unsigned int)pending_req->nr_segments;
261
262         write = (data_dir == DMA_TO_DEVICE);
263
264         if (nr_segments) {
265                 struct scatterlist *sg;
266
267                 /* free of (sgl) in fast_flush_area()*/
268                 pending_req->sgl = kmalloc(sizeof(struct scatterlist) * nr_segments,
269                                                 GFP_KERNEL);
270                 if (!pending_req->sgl) {
271                         pr_err("scsiback: %s: kmalloc() error\n", __FUNCTION__);
272                         return -ENOMEM;
273                 }
274
275                 sg_init_table(pending_req->sgl, nr_segments);
276
277                 flags = GNTMAP_host_map;
278                 if (write)
279                         flags |= GNTMAP_readonly;
280
281                 for (i = 0; i < nr_segments; i++)
282                         gnttab_set_map_op(&map[i], vaddr(pending_req, i), flags,
283                                                 ring_req->seg[i].gref,
284                                                 info->domid);
285
286                 err = HYPERVISOR_grant_table_op(GNTTABOP_map_grant_ref, map, nr_segments);
287                 BUG_ON(err);
288
289                 for_each_sg (pending_req->sgl, sg, nr_segments, i) {
290                         struct page *pg;
291
292                         /* Retry maps with GNTST_eagain */
293                         if (unlikely(map[i].status == GNTST_eagain))
294                                 gnttab_check_GNTST_eagain_while(GNTTABOP_map_grant_ref, &map[i]);
295                         if (unlikely(map[i].status != GNTST_okay)) {
296                                 pr_err("scsiback: invalid buffer -- could not remap it\n");
297                                 map[i].handle = SCSIBACK_INVALID_HANDLE;
298                                 err |= 1;
299                         }
300
301                         pending_handle(pending_req, i) = map[i].handle;
302
303                         if (err)
304                                 continue;
305
306                         pg = pending_pages[vaddr_pagenr(pending_req, i)];
307
308                         set_phys_to_machine(page_to_pfn(pg),
309                                 FOREIGN_FRAME(map[i].dev_bus_addr >> PAGE_SHIFT));
310
311                         sg_set_page(sg, pg, ring_req->seg[i].length,
312                                     ring_req->seg[i].offset);
313                         data_len += sg->length;
314
315                         barrier();
316                         if (sg->offset >= PAGE_SIZE ||
317                             sg->length > PAGE_SIZE ||
318                             sg->offset + sg->length > PAGE_SIZE)
319                                 err |= 1;
320
321                 }
322
323                 if (err)
324                         goto fail_flush;
325         }
326         
327         pending_req->request_bufflen = data_len;
328         
329         return 0;
330         
331 fail_flush:
332         scsiback_fast_flush_area(pending_req);
333         return -ENOMEM;
334 }
335
336 /* quoted scsi_lib.c/scsi_bi_endio */
337 static void scsiback_bi_endio(struct bio *bio, int error)
338 {
339         bio_put(bio);
340 }
341
342
343
344 /* quoted scsi_lib.c/scsi_req_map_sg . */
345 static struct bio *request_map_sg(pending_req_t *pending_req)
346 {
347         struct request_queue *q = pending_req->sdev->request_queue;
348         unsigned int nsegs = (unsigned int)pending_req->nr_segments;
349         unsigned int i, len, bytes, off, nr_pages, nr_vecs = 0;
350         struct scatterlist *sg;
351         struct page *page;
352         struct bio *bio = NULL, *bio_first = NULL, *bio_last = NULL;
353         int err;
354
355         for_each_sg (pending_req->sgl, sg, nsegs, i) {
356                 page = sg_page(sg);
357                 off = sg->offset;
358                 len = sg->length;
359
360                 nr_pages = (len + off + PAGE_SIZE - 1) >> PAGE_SHIFT;
361                 while (len > 0) {
362                         bytes = min_t(unsigned int, len, PAGE_SIZE - off);
363
364                         if (!bio) {
365                                 nr_vecs = min_t(unsigned int, BIO_MAX_PAGES,
366                                                 nr_pages);
367                                 nr_pages -= nr_vecs;
368                                 bio = bio_alloc(GFP_KERNEL, nr_vecs);
369                                 if (!bio) {
370                                         err = -ENOMEM;
371                                         goto free_bios;
372                                 }
373                                 bio->bi_end_io = scsiback_bi_endio;
374                                 if (bio_last)
375                                         bio_last->bi_next = bio;
376                                 else
377                                         bio_first = bio;
378                                 bio_last = bio;
379                         }
380
381                         if (bio_add_pc_page(q, bio, page, bytes, off) !=
382                                                 bytes) {
383                                 bio_put(bio);
384                                 err = -EINVAL;
385                                 goto free_bios;
386                         }
387
388                         if (bio->bi_vcnt >= nr_vecs) {
389                                 bio->bi_flags &= ~(1 << BIO_SEG_VALID);
390                                 if (pending_req->sc_data_direction == WRITE)
391                                         bio->bi_rw |= REQ_WRITE;
392                                 bio = NULL;
393                         }
394
395                         page++;
396                         len -= bytes;
397                         off = 0;
398                 }
399         }
400
401         return bio_first;
402
403 free_bios:
404         while ((bio = bio_first) != NULL) {
405                 bio_first = bio->bi_next;
406                 bio_put(bio);
407         }
408
409         return ERR_PTR(err);
410 }
411
412
413 void scsiback_cmd_exec(pending_req_t *pending_req)
414 {
415         int cmd_len  = (int)pending_req->cmd_len;
416         int data_dir = (int)pending_req->sc_data_direction;
417         unsigned int timeout;
418         struct request *rq;
419         int write;
420
421         DPRINTK("%s\n",__FUNCTION__);
422
423         /* because it doesn't timeout backend earlier than frontend.*/
424         if (pending_req->timeout_per_command)
425                 timeout = pending_req->timeout_per_command * HZ;
426         else
427                 timeout = VSCSIIF_TIMEOUT;
428
429         write = (data_dir == DMA_TO_DEVICE);
430         if (pending_req->nr_segments) {
431                 struct bio *bio = request_map_sg(pending_req);
432
433                 if (IS_ERR(bio)) {
434                         pr_err("scsiback: SG Request Map Error\n");
435                         return;
436                 }
437
438                 rq = blk_make_request(pending_req->sdev->request_queue, bio,
439                                       GFP_KERNEL);
440                 if (IS_ERR(rq)) {
441                         pr_err("scsiback: Make Request Error\n");
442                         return;
443                 }
444
445                 rq->buffer = NULL;
446         } else {
447                 rq = blk_get_request(pending_req->sdev->request_queue, write,
448                                      GFP_KERNEL);
449                 if (unlikely(!rq)) {
450                         pr_err("scsiback: Get Request Error\n");
451                         return;
452                 }
453         }
454
455         rq->cmd_type = REQ_TYPE_BLOCK_PC;
456         rq->cmd_len = cmd_len;
457         memcpy(rq->cmd, pending_req->cmnd, cmd_len);
458
459         memset(pending_req->sense_buffer, 0, VSCSIIF_SENSE_BUFFERSIZE);
460         rq->sense       = pending_req->sense_buffer;
461         rq->sense_len = 0;
462
463         /* not allowed to retry in backend.                   */
464         rq->retries   = 0;
465         rq->timeout   = timeout;
466         rq->end_io_data = pending_req;
467
468         scsiback_get(pending_req->info);
469         blk_execute_rq_nowait(rq->q, NULL, rq, 1, scsiback_cmd_done);
470
471         return ;
472 }
473
474
475 static void scsiback_device_reset_exec(pending_req_t *pending_req)
476 {
477         struct vscsibk_info *info = pending_req->info;
478         int err;
479         struct scsi_device *sdev = pending_req->sdev;
480
481         scsiback_get(info);
482         err = scsi_reset_provider(sdev, SCSI_TRY_RESET_DEVICE);
483
484         scsiback_do_resp_with_sense(NULL, err, 0, pending_req);
485         scsiback_put(info);
486
487         return;
488 }
489
490
491 irqreturn_t scsiback_intr(int irq, void *dev_id)
492 {
493         scsiback_notify_work((struct vscsibk_info *)dev_id);
494         return IRQ_HANDLED;
495 }
496
497 static int prepare_pending_reqs(struct vscsibk_info *info,
498                 vscsiif_request_t *ring_req, pending_req_t *pending_req)
499 {
500         struct scsi_device *sdev;
501         struct ids_tuple vir;
502         int err = -EINVAL;
503
504         DPRINTK("%s\n",__FUNCTION__);
505
506         pending_req->rqid       = ring_req->rqid;
507         pending_req->act        = ring_req->act;
508
509         pending_req->info       = info;
510
511         pending_req->v_chn = vir.chn = ring_req->channel;
512         pending_req->v_tgt = vir.tgt = ring_req->id;
513         vir.lun = ring_req->lun;
514
515         rmb();
516         sdev = scsiback_do_translation(info, &vir);
517         if (!sdev) {
518                 pending_req->sdev = NULL;
519                 DPRINTK("scsiback: doesn't exist.\n");
520                 err = -ENODEV;
521                 goto invalid_value;
522         }
523         pending_req->sdev = sdev;
524
525         /* request range check from frontend */
526         pending_req->sc_data_direction = ring_req->sc_data_direction;
527         barrier();
528         if ((pending_req->sc_data_direction != DMA_BIDIRECTIONAL) &&
529                 (pending_req->sc_data_direction != DMA_TO_DEVICE) &&
530                 (pending_req->sc_data_direction != DMA_FROM_DEVICE) &&
531                 (pending_req->sc_data_direction != DMA_NONE)) {
532                 DPRINTK("scsiback: invalid parameter data_dir = %d\n",
533                         pending_req->sc_data_direction);
534                 err = -EINVAL;
535                 goto invalid_value;
536         }
537
538         pending_req->nr_segments = ring_req->nr_segments;
539         barrier();
540         if (pending_req->nr_segments > VSCSIIF_SG_TABLESIZE) {
541                 DPRINTK("scsiback: invalid parameter nr_seg = %d\n",
542                         pending_req->nr_segments);
543                 err = -EINVAL;
544                 goto invalid_value;
545         }
546
547         pending_req->cmd_len = ring_req->cmd_len;
548         barrier();
549         if (pending_req->cmd_len > VSCSIIF_MAX_COMMAND_SIZE) {
550                 DPRINTK("scsiback: invalid parameter cmd_len = %d\n",
551                         pending_req->cmd_len);
552                 err = -EINVAL;
553                 goto invalid_value;
554         }
555         memcpy(pending_req->cmnd, ring_req->cmnd, pending_req->cmd_len);
556         
557         pending_req->timeout_per_command = ring_req->timeout_per_command;
558
559         if(scsiback_gnttab_data_map(ring_req, pending_req)) {
560                 DPRINTK("scsiback: invalid buffer\n");
561                 err = -EINVAL;
562                 goto invalid_value;
563         }
564
565         return 0;
566
567 invalid_value:
568         return err;
569 }
570
571
572 static int scsiback_do_cmd_fn(struct vscsibk_info *info)
573 {
574         struct vscsiif_back_ring *ring = &info->ring;
575         vscsiif_request_t  *ring_req;
576
577         pending_req_t *pending_req;
578         RING_IDX rc, rp;
579         int err, more_to_do = 0;
580
581         DPRINTK("%s\n",__FUNCTION__);
582
583         rc = ring->req_cons;
584         rp = ring->sring->req_prod;
585         rmb();
586
587         while ((rc != rp)) {
588                 if (RING_REQUEST_CONS_OVERFLOW(ring, rc))
589                         break;
590                 pending_req = alloc_req(info);
591                 if (NULL == pending_req) {
592                         more_to_do = 1;
593                         break;
594                 }
595
596                 ring_req = RING_GET_REQUEST(ring, rc);
597                 ring->req_cons = ++rc;
598
599                 err = prepare_pending_reqs(info, ring_req,
600                                                 pending_req);
601                 if (err == -EINVAL) {
602                         scsiback_do_resp_with_sense(NULL, (DRIVER_ERROR << 24),
603                                 0, pending_req);
604                         continue;
605                 } else if (err == -ENODEV) {
606                         scsiback_do_resp_with_sense(NULL, (DID_NO_CONNECT << 16),
607                                 0, pending_req);
608                         continue;
609                 }
610
611                 if (pending_req->act == VSCSIIF_ACT_SCSI_CDB) {
612
613                         /* The Host mode is through as for Emulation. */
614                         if (info->feature == VSCSI_TYPE_HOST)
615                                 scsiback_cmd_exec(pending_req);
616                         else
617                                 scsiback_req_emulation_or_cmdexec(pending_req);
618
619                 } else if (pending_req->act == VSCSIIF_ACT_SCSI_RESET) {
620                         scsiback_device_reset_exec(pending_req);
621                 } else {
622                         pr_err("scsiback: invalid parameter for request\n");
623                         scsiback_do_resp_with_sense(NULL, (DRIVER_ERROR << 24),
624                                 0, pending_req);
625                         continue;
626                 }
627         }
628
629         if (RING_HAS_UNCONSUMED_REQUESTS(ring))
630                 more_to_do = 1;
631
632         /* Yield point for this unbounded loop. */
633         cond_resched();
634
635         return more_to_do;
636 }
637
638
639 int scsiback_schedule(void *data)
640 {
641         struct vscsibk_info *info = (struct vscsibk_info *)data;
642
643         DPRINTK("%s\n",__FUNCTION__);
644
645         while (!kthread_should_stop()) {
646                 wait_event_interruptible(
647                         info->wq,
648                         info->waiting_reqs || kthread_should_stop());
649                 wait_event_interruptible(
650                         pending_free_wq,
651                         !list_empty(&pending_free) || kthread_should_stop());
652
653                 info->waiting_reqs = 0;
654                 smp_mb();
655
656                 if (scsiback_do_cmd_fn(info))
657                         info->waiting_reqs = 1;
658         }
659
660         return 0;
661 }
662
663
664 static int __init scsiback_init(void)
665 {
666         int i, mmap_pages;
667
668         if (!is_running_on_xen())
669                 return -ENODEV;
670
671         mmap_pages = vscsiif_reqs * VSCSIIF_SG_TABLESIZE;
672
673         pending_reqs          = kzalloc(sizeof(pending_reqs[0]) *
674                                         vscsiif_reqs, GFP_KERNEL);
675         pending_grant_handles = kmalloc(sizeof(pending_grant_handles[0]) *
676                                         mmap_pages, GFP_KERNEL);
677         pending_pages         = alloc_empty_pages_and_pagevec(mmap_pages);
678
679         if (!pending_reqs || !pending_grant_handles || !pending_pages)
680                 goto out_of_memory;
681
682         for (i = 0; i < mmap_pages; i++)
683                 pending_grant_handles[i] = SCSIBACK_INVALID_HANDLE;
684
685         if (scsiback_interface_init() < 0)
686                 goto out_of_memory;
687
688         INIT_LIST_HEAD(&pending_free);
689
690         for (i = 0; i < vscsiif_reqs; i++)
691                 list_add_tail(&pending_reqs[i].free_list, &pending_free);
692
693         if (scsiback_xenbus_init())
694                 goto out_interface;
695
696         scsiback_emulation_init();
697
698         return 0;
699
700 out_interface:
701         scsiback_interface_exit();
702 out_of_memory:
703         kfree(pending_reqs);
704         kfree(pending_grant_handles);
705         free_empty_pages_and_pagevec(pending_pages, mmap_pages);
706         pr_err("scsiback: %s: out of memory\n", __FUNCTION__);
707         return -ENOMEM;
708 }
709
710 #if 0
711 static void __exit scsiback_exit(void)
712 {
713         scsiback_xenbus_unregister();
714         scsiback_interface_exit();
715         kfree(pending_reqs);
716         kfree(pending_grant_handles);
717         free_empty_pages_and_pagevec(pending_pages, (vscsiif_reqs * VSCSIIF_SG_TABLESIZE));
718
719 }
720 #endif
721
722 module_init(scsiback_init);
723
724 #if 0
725 module_exit(scsiback_exit);
726 #endif
727
728 MODULE_DESCRIPTION("Xen SCSI backend driver");
729 MODULE_LICENSE("Dual BSD/GPL");
730 MODULE_ALIAS("xen-backend:vscsi");