81b094e480e66f0f9d7823a0afeb0a2067d340b1
[linux-flexiantxendom0-natty.git] / drivers / s390 / char / tape_core.c
1 /*
2  *  drivers/s390/char/tape_core.c
3  *    basic function of the tape device driver
4  *
5  *  S390 and zSeries version
6  *    Copyright IBM Corp. 2001, 2009
7  *    Author(s): Carsten Otte <cotte@de.ibm.com>
8  *               Michael Holzheu <holzheu@de.ibm.com>
9  *               Tuan Ngo-Anh <ngoanh@de.ibm.com>
10  *               Martin Schwidefsky <schwidefsky@de.ibm.com>
11  *               Stefan Bader <shbader@de.ibm.com>
12  */
13
14 #define KMSG_COMPONENT "tape"
15 #define pr_fmt(fmt) KMSG_COMPONENT ": " fmt
16
17 #include <linux/module.h>
18 #include <linux/init.h>      // for kernel parameters
19 #include <linux/kmod.h>      // for requesting modules
20 #include <linux/spinlock.h>  // for locks
21 #include <linux/vmalloc.h>
22 #include <linux/list.h>
23
24 #include <asm/types.h>       // for variable types
25
26 #define TAPE_DBF_AREA   tape_core_dbf
27
28 #include "tape.h"
29 #include "tape_std.h"
30
31 #define LONG_BUSY_TIMEOUT 180 /* seconds */
32
33 static void __tape_do_irq (struct ccw_device *, unsigned long, struct irb *);
34 static void tape_delayed_next_request(struct work_struct *);
35 static void tape_long_busy_timeout(unsigned long data);
36
37 /*
38  * One list to contain all tape devices of all disciplines, so
39  * we can assign the devices to minor numbers of the same major
40  * The list is protected by the rwlock
41  */
42 static LIST_HEAD(tape_device_list);
43 static DEFINE_RWLOCK(tape_device_lock);
44
45 /*
46  * Pointer to debug area.
47  */
48 debug_info_t *TAPE_DBF_AREA = NULL;
49 EXPORT_SYMBOL(TAPE_DBF_AREA);
50
51 /*
52  * Printable strings for tape enumerations.
53  */
54 const char *tape_state_verbose[TS_SIZE] =
55 {
56         [TS_UNUSED]   = "UNUSED",
57         [TS_IN_USE]   = "IN_USE",
58         [TS_BLKUSE]   = "BLKUSE",
59         [TS_INIT]     = "INIT  ",
60         [TS_NOT_OPER] = "NOT_OP"
61 };
62
63 const char *tape_op_verbose[TO_SIZE] =
64 {
65         [TO_BLOCK] = "BLK",     [TO_BSB] = "BSB",
66         [TO_BSF] = "BSF",       [TO_DSE] = "DSE",
67         [TO_FSB] = "FSB",       [TO_FSF] = "FSF",
68         [TO_LBL] = "LBL",       [TO_NOP] = "NOP",
69         [TO_RBA] = "RBA",       [TO_RBI] = "RBI",
70         [TO_RFO] = "RFO",       [TO_REW] = "REW",
71         [TO_RUN] = "RUN",       [TO_WRI] = "WRI",
72         [TO_WTM] = "WTM",       [TO_MSEN] = "MSN",
73         [TO_LOAD] = "LOA",      [TO_READ_CONFIG] = "RCF",
74         [TO_READ_ATTMSG] = "RAT",
75         [TO_DIS] = "DIS",       [TO_ASSIGN] = "ASS",
76         [TO_UNASSIGN] = "UAS",  [TO_CRYPT_ON] = "CON",
77         [TO_CRYPT_OFF] = "COF", [TO_KEKL_SET] = "KLS",
78         [TO_KEKL_QUERY] = "KLQ",[TO_RDC] = "RDC",
79 };
80
81 static int devid_to_int(struct ccw_dev_id *dev_id)
82 {
83         return dev_id->devno + (dev_id->ssid << 16);
84 }
85
86 /*
87  * Some channel attached tape specific attributes.
88  *
89  * FIXME: In the future the first_minor and blocksize attribute should be
90  *        replaced by a link to the cdev tree.
91  */
92 static ssize_t
93 tape_medium_state_show(struct device *dev, struct device_attribute *attr, char *buf)
94 {
95         struct tape_device *tdev;
96
97         tdev = dev_get_drvdata(dev);
98         return scnprintf(buf, PAGE_SIZE, "%i\n", tdev->medium_state);
99 }
100
101 static
102 DEVICE_ATTR(medium_state, 0444, tape_medium_state_show, NULL);
103
104 static ssize_t
105 tape_first_minor_show(struct device *dev, struct device_attribute *attr, char *buf)
106 {
107         struct tape_device *tdev;
108
109         tdev = dev_get_drvdata(dev);
110         return scnprintf(buf, PAGE_SIZE, "%i\n", tdev->first_minor);
111 }
112
113 static
114 DEVICE_ATTR(first_minor, 0444, tape_first_minor_show, NULL);
115
116 static ssize_t
117 tape_state_show(struct device *dev, struct device_attribute *attr, char *buf)
118 {
119         struct tape_device *tdev;
120
121         tdev = dev_get_drvdata(dev);
122         return scnprintf(buf, PAGE_SIZE, "%s\n", (tdev->first_minor < 0) ?
123                 "OFFLINE" : tape_state_verbose[tdev->tape_state]);
124 }
125
126 static
127 DEVICE_ATTR(state, 0444, tape_state_show, NULL);
128
129 static ssize_t
130 tape_operation_show(struct device *dev, struct device_attribute *attr, char *buf)
131 {
132         struct tape_device *tdev;
133         ssize_t rc;
134
135         tdev = dev_get_drvdata(dev);
136         if (tdev->first_minor < 0)
137                 return scnprintf(buf, PAGE_SIZE, "N/A\n");
138
139         spin_lock_irq(get_ccwdev_lock(tdev->cdev));
140         if (list_empty(&tdev->req_queue))
141                 rc = scnprintf(buf, PAGE_SIZE, "---\n");
142         else {
143                 struct tape_request *req;
144
145                 req = list_entry(tdev->req_queue.next, struct tape_request,
146                         list);
147                 rc = scnprintf(buf,PAGE_SIZE, "%s\n", tape_op_verbose[req->op]);
148         }
149         spin_unlock_irq(get_ccwdev_lock(tdev->cdev));
150         return rc;
151 }
152
153 static
154 DEVICE_ATTR(operation, 0444, tape_operation_show, NULL);
155
156 static ssize_t
157 tape_blocksize_show(struct device *dev, struct device_attribute *attr, char *buf)
158 {
159         struct tape_device *tdev;
160
161         tdev = dev_get_drvdata(dev);
162
163         return scnprintf(buf, PAGE_SIZE, "%i\n", tdev->char_data.block_size);
164 }
165
166 static
167 DEVICE_ATTR(blocksize, 0444, tape_blocksize_show, NULL);
168
169 static struct attribute *tape_attrs[] = {
170         &dev_attr_medium_state.attr,
171         &dev_attr_first_minor.attr,
172         &dev_attr_state.attr,
173         &dev_attr_operation.attr,
174         &dev_attr_blocksize.attr,
175         NULL
176 };
177
178 static struct attribute_group tape_attr_group = {
179         .attrs = tape_attrs,
180 };
181
182 /*
183  * Tape state functions
184  */
185 void
186 tape_state_set(struct tape_device *device, enum tape_state newstate)
187 {
188         const char *str;
189
190         if (device->tape_state == TS_NOT_OPER) {
191                 DBF_EVENT(3, "ts_set err: not oper\n");
192                 return;
193         }
194         DBF_EVENT(4, "ts. dev:  %x\n", device->first_minor);
195         DBF_EVENT(4, "old ts:\t\n");
196         if (device->tape_state < TS_SIZE && device->tape_state >=0 )
197                 str = tape_state_verbose[device->tape_state];
198         else
199                 str = "UNKNOWN TS";
200         DBF_EVENT(4, "%s\n", str);
201         DBF_EVENT(4, "new ts:\t\n");
202         if (newstate < TS_SIZE && newstate >= 0)
203                 str = tape_state_verbose[newstate];
204         else
205                 str = "UNKNOWN TS";
206         DBF_EVENT(4, "%s\n", str);
207         device->tape_state = newstate;
208         wake_up(&device->state_change_wq);
209 }
210
211 void
212 tape_med_state_set(struct tape_device *device, enum tape_medium_state newstate)
213 {
214         if (device->medium_state == newstate)
215                 return;
216         switch(newstate){
217         case MS_UNLOADED:
218                 device->tape_generic_status |= GMT_DR_OPEN(~0);
219                 if (device->medium_state == MS_LOADED)
220                         pr_info("%s: The tape cartridge has been successfully "
221                                 "unloaded\n", dev_name(&device->cdev->dev));
222                 break;
223         case MS_LOADED:
224                 device->tape_generic_status &= ~GMT_DR_OPEN(~0);
225                 if (device->medium_state == MS_UNLOADED)
226                         pr_info("%s: A tape cartridge has been mounted\n",
227                                 dev_name(&device->cdev->dev));
228                 break;
229         default:
230                 // print nothing
231                 break;
232         }
233         device->medium_state = newstate;
234         wake_up(&device->state_change_wq);
235 }
236
237 /*
238  * Stop running ccw. Has to be called with the device lock held.
239  */
240 static int
241 __tape_cancel_io(struct tape_device *device, struct tape_request *request)
242 {
243         int retries;
244         int rc;
245
246         /* Check if interrupt has already been processed */
247         if (request->callback == NULL)
248                 return 0;
249
250         rc = 0;
251         for (retries = 0; retries < 5; retries++) {
252                 rc = ccw_device_clear(device->cdev, (long) request);
253
254                 switch (rc) {
255                         case 0:
256                                 request->status = TAPE_REQUEST_DONE;
257                                 return 0;
258                         case -EBUSY:
259                                 request->status = TAPE_REQUEST_CANCEL;
260                                 schedule_delayed_work(&device->tape_dnr, 0);
261                                 return 0;
262                         case -ENODEV:
263                                 DBF_EXCEPTION(2, "device gone, retry\n");
264                                 break;
265                         case -EIO:
266                                 DBF_EXCEPTION(2, "I/O error, retry\n");
267                                 break;
268                         default:
269                                 BUG();
270                 }
271         }
272
273         return rc;
274 }
275
276 /*
277  * Add device into the sorted list, giving it the first
278  * available minor number.
279  */
280 static int
281 tape_assign_minor(struct tape_device *device)
282 {
283         struct tape_device *tmp;
284         int minor;
285
286         minor = 0;
287         write_lock(&tape_device_lock);
288         list_for_each_entry(tmp, &tape_device_list, node) {
289                 if (minor < tmp->first_minor)
290                         break;
291                 minor += TAPE_MINORS_PER_DEV;
292         }
293         if (minor >= 256) {
294                 write_unlock(&tape_device_lock);
295                 return -ENODEV;
296         }
297         device->first_minor = minor;
298         list_add_tail(&device->node, &tmp->node);
299         write_unlock(&tape_device_lock);
300         return 0;
301 }
302
303 /* remove device from the list */
304 static void
305 tape_remove_minor(struct tape_device *device)
306 {
307         write_lock(&tape_device_lock);
308         list_del_init(&device->node);
309         device->first_minor = -1;
310         write_unlock(&tape_device_lock);
311 }
312
313 /*
314  * Set a device online.
315  *
316  * This function is called by the common I/O layer to move a device from the
317  * detected but offline into the online state.
318  * If we return an error (RC < 0) the device remains in the offline state. This
319  * can happen if the device is assigned somewhere else, for example.
320  */
321 int
322 tape_generic_online(struct tape_device *device,
323                    struct tape_discipline *discipline)
324 {
325         int rc;
326
327         DBF_LH(6, "tape_enable_device(%p, %p)\n", device, discipline);
328
329         if (device->tape_state != TS_INIT) {
330                 DBF_LH(3, "Tapestate not INIT (%d)\n", device->tape_state);
331                 return -EINVAL;
332         }
333
334         init_timer(&device->lb_timeout);
335         device->lb_timeout.function = tape_long_busy_timeout;
336
337         /* Let the discipline have a go at the device. */
338         device->discipline = discipline;
339         if (!try_module_get(discipline->owner)) {
340                 return -EINVAL;
341         }
342
343         rc = discipline->setup_device(device);
344         if (rc)
345                 goto out;
346         rc = tape_assign_minor(device);
347         if (rc)
348                 goto out_discipline;
349
350         rc = tapechar_setup_device(device);
351         if (rc)
352                 goto out_minor;
353         rc = tapeblock_setup_device(device);
354         if (rc)
355                 goto out_char;
356
357         tape_state_set(device, TS_UNUSED);
358
359         DBF_LH(3, "(%08x): Drive set online\n", device->cdev_id);
360
361         return 0;
362
363 out_char:
364         tapechar_cleanup_device(device);
365 out_minor:
366         tape_remove_minor(device);
367 out_discipline:
368         device->discipline->cleanup_device(device);
369         device->discipline = NULL;
370 out:
371         module_put(discipline->owner);
372         return rc;
373 }
374
375 static void
376 tape_cleanup_device(struct tape_device *device)
377 {
378         tapeblock_cleanup_device(device);
379         tapechar_cleanup_device(device);
380         device->discipline->cleanup_device(device);
381         module_put(device->discipline->owner);
382         tape_remove_minor(device);
383         tape_med_state_set(device, MS_UNKNOWN);
384 }
385
386 /*
387  * Suspend device.
388  *
389  * Called by the common I/O layer if the drive should be suspended on user
390  * request. We refuse to suspend if the device is loaded or in use for the
391  * following reason:
392  * While the Linux guest is suspended, it might be logged off which causes
393  * devices to be detached. Tape devices are automatically rewound and unloaded
394  * during DETACH processing (unless the tape device was attached with the
395  * NOASSIGN or MULTIUSER option). After rewind/unload, there is no way to
396  * resume the original state of the tape device, since we would need to
397  * manually re-load the cartridge which was active at suspend time.
398  */
399 int tape_generic_pm_suspend(struct ccw_device *cdev)
400 {
401         struct tape_device *device;
402
403         device = dev_get_drvdata(&cdev->dev);
404         if (!device) {
405                 return -ENODEV;
406         }
407
408         DBF_LH(3, "(%08x): tape_generic_pm_suspend(%p)\n",
409                 device->cdev_id, device);
410
411         if (device->medium_state != MS_UNLOADED) {
412                 pr_err("A cartridge is loaded in tape device %s, "
413                        "refusing to suspend\n", dev_name(&cdev->dev));
414                 return -EBUSY;
415         }
416
417         spin_lock_irq(get_ccwdev_lock(device->cdev));
418         switch (device->tape_state) {
419                 case TS_INIT:
420                 case TS_NOT_OPER:
421                 case TS_UNUSED:
422                         spin_unlock_irq(get_ccwdev_lock(device->cdev));
423                         break;
424                 default:
425                         pr_err("Tape device %s is busy, refusing to "
426                                "suspend\n", dev_name(&cdev->dev));
427                         spin_unlock_irq(get_ccwdev_lock(device->cdev));
428                         return -EBUSY;
429         }
430
431         DBF_LH(3, "(%08x): Drive suspended.\n", device->cdev_id);
432         return 0;
433 }
434
435 /*
436  * Set device offline.
437  *
438  * Called by the common I/O layer if the drive should set offline on user
439  * request. We may prevent this by returning an error.
440  * Manual offline is only allowed while the drive is not in use.
441  */
442 int
443 tape_generic_offline(struct ccw_device *cdev)
444 {
445         struct tape_device *device;
446
447         device = dev_get_drvdata(&cdev->dev);
448         if (!device) {
449                 return -ENODEV;
450         }
451
452         DBF_LH(3, "(%08x): tape_generic_offline(%p)\n",
453                 device->cdev_id, device);
454
455         spin_lock_irq(get_ccwdev_lock(device->cdev));
456         switch (device->tape_state) {
457                 case TS_INIT:
458                 case TS_NOT_OPER:
459                         spin_unlock_irq(get_ccwdev_lock(device->cdev));
460                         break;
461                 case TS_UNUSED:
462                         tape_state_set(device, TS_INIT);
463                         spin_unlock_irq(get_ccwdev_lock(device->cdev));
464                         tape_cleanup_device(device);
465                         break;
466                 default:
467                         DBF_EVENT(3, "(%08x): Set offline failed "
468                                 "- drive in use.\n",
469                                 device->cdev_id);
470                         spin_unlock_irq(get_ccwdev_lock(device->cdev));
471                         return -EBUSY;
472         }
473
474         DBF_LH(3, "(%08x): Drive set offline.\n", device->cdev_id);
475         return 0;
476 }
477
478 /*
479  * Allocate memory for a new device structure.
480  */
481 static struct tape_device *
482 tape_alloc_device(void)
483 {
484         struct tape_device *device;
485
486         device = kzalloc(sizeof(struct tape_device), GFP_KERNEL);
487         if (device == NULL) {
488                 DBF_EXCEPTION(2, "ti:no mem\n");
489                 return ERR_PTR(-ENOMEM);
490         }
491         device->modeset_byte = kmalloc(1, GFP_KERNEL | GFP_DMA);
492         if (device->modeset_byte == NULL) {
493                 DBF_EXCEPTION(2, "ti:no mem\n");
494                 kfree(device);
495                 return ERR_PTR(-ENOMEM);
496         }
497         mutex_init(&device->mutex);
498         INIT_LIST_HEAD(&device->req_queue);
499         INIT_LIST_HEAD(&device->node);
500         init_waitqueue_head(&device->state_change_wq);
501         init_waitqueue_head(&device->wait_queue);
502         device->tape_state = TS_INIT;
503         device->medium_state = MS_UNKNOWN;
504         *device->modeset_byte = 0;
505         device->first_minor = -1;
506         atomic_set(&device->ref_count, 1);
507         INIT_DELAYED_WORK(&device->tape_dnr, tape_delayed_next_request);
508
509         return device;
510 }
511
512 /*
513  * Get a reference to an existing device structure. This will automatically
514  * increment the reference count.
515  */
516 struct tape_device *
517 tape_get_device(struct tape_device *device)
518 {
519         int count;
520
521         count = atomic_inc_return(&device->ref_count);
522         DBF_EVENT(4, "tape_get_device(%p) = %i\n", device, count);
523         return device;
524 }
525
526 /*
527  * Decrease the reference counter of a devices structure. If the
528  * reference counter reaches zero free the device structure.
529  * The function returns a NULL pointer to be used by the caller
530  * for clearing reference pointers.
531  */
532 void
533 tape_put_device(struct tape_device *device)
534 {
535         int count;
536
537         count = atomic_dec_return(&device->ref_count);
538         DBF_EVENT(4, "tape_put_device(%p) -> %i\n", device, count);
539         BUG_ON(count < 0);
540         if (count == 0) {
541                 kfree(device->modeset_byte);
542                 kfree(device);
543         }
544 }
545
546 /*
547  * Find tape device by a device index.
548  */
549 struct tape_device *
550 tape_find_device(int devindex)
551 {
552         struct tape_device *device, *tmp;
553
554         device = ERR_PTR(-ENODEV);
555         read_lock(&tape_device_lock);
556         list_for_each_entry(tmp, &tape_device_list, node) {
557                 if (tmp->first_minor / TAPE_MINORS_PER_DEV == devindex) {
558                         device = tape_get_device(tmp);
559                         break;
560                 }
561         }
562         read_unlock(&tape_device_lock);
563         return device;
564 }
565
566 /*
567  * Driverfs tape probe function.
568  */
569 int
570 tape_generic_probe(struct ccw_device *cdev)
571 {
572         struct tape_device *device;
573         int ret;
574         struct ccw_dev_id dev_id;
575
576         device = tape_alloc_device();
577         if (IS_ERR(device))
578                 return -ENODEV;
579         ccw_device_set_options(cdev, CCWDEV_DO_PATHGROUP |
580                                      CCWDEV_DO_MULTIPATH);
581         ret = sysfs_create_group(&cdev->dev.kobj, &tape_attr_group);
582         if (ret) {
583                 tape_put_device(device);
584                 return ret;
585         }
586         dev_set_drvdata(&cdev->dev, device);
587         cdev->handler = __tape_do_irq;
588         device->cdev = cdev;
589         ccw_device_get_id(cdev, &dev_id);
590         device->cdev_id = devid_to_int(&dev_id);
591         return ret;
592 }
593
594 static void
595 __tape_discard_requests(struct tape_device *device)
596 {
597         struct tape_request *   request;
598         struct list_head *      l, *n;
599
600         list_for_each_safe(l, n, &device->req_queue) {
601                 request = list_entry(l, struct tape_request, list);
602                 if (request->status == TAPE_REQUEST_IN_IO)
603                         request->status = TAPE_REQUEST_DONE;
604                 list_del(&request->list);
605
606                 /* Decrease ref_count for removed request. */
607                 request->device = NULL;
608                 tape_put_device(device);
609                 request->rc = -EIO;
610                 if (request->callback != NULL)
611                         request->callback(request, request->callback_data);
612         }
613 }
614
615 /*
616  * Driverfs tape remove function.
617  *
618  * This function is called whenever the common I/O layer detects the device
619  * gone. This can happen at any time and we cannot refuse.
620  */
621 void
622 tape_generic_remove(struct ccw_device *cdev)
623 {
624         struct tape_device *    device;
625
626         device = dev_get_drvdata(&cdev->dev);
627         if (!device) {
628                 return;
629         }
630         DBF_LH(3, "(%08x): tape_generic_remove(%p)\n", device->cdev_id, cdev);
631
632         spin_lock_irq(get_ccwdev_lock(device->cdev));
633         switch (device->tape_state) {
634                 case TS_INIT:
635                         tape_state_set(device, TS_NOT_OPER);
636                 case TS_NOT_OPER:
637                         /*
638                          * Nothing to do.
639                          */
640                         spin_unlock_irq(get_ccwdev_lock(device->cdev));
641                         break;
642                 case TS_UNUSED:
643                         /*
644                          * Need only to release the device.
645                          */
646                         tape_state_set(device, TS_NOT_OPER);
647                         spin_unlock_irq(get_ccwdev_lock(device->cdev));
648                         tape_cleanup_device(device);
649                         break;
650                 default:
651                         /*
652                          * There may be requests on the queue. We will not get
653                          * an interrupt for a request that was running. So we
654                          * just post them all as I/O errors.
655                          */
656                         DBF_EVENT(3, "(%08x): Drive in use vanished!\n",
657                                 device->cdev_id);
658                         pr_warning("%s: A tape unit was detached while in "
659                                    "use\n", dev_name(&device->cdev->dev));
660                         tape_state_set(device, TS_NOT_OPER);
661                         __tape_discard_requests(device);
662                         spin_unlock_irq(get_ccwdev_lock(device->cdev));
663                         tape_cleanup_device(device);
664         }
665
666         device = dev_get_drvdata(&cdev->dev);
667         if (device) {
668                 sysfs_remove_group(&cdev->dev.kobj, &tape_attr_group);
669                 dev_set_drvdata(&cdev->dev, NULL);
670                 tape_put_device(device);
671         }
672 }
673
674 /*
675  * Allocate a new tape ccw request
676  */
677 struct tape_request *
678 tape_alloc_request(int cplength, int datasize)
679 {
680         struct tape_request *request;
681
682         BUG_ON(datasize > PAGE_SIZE || (cplength*sizeof(struct ccw1)) > PAGE_SIZE);
683
684         DBF_LH(6, "tape_alloc_request(%d, %d)\n", cplength, datasize);
685
686         request = kzalloc(sizeof(struct tape_request), GFP_KERNEL);
687         if (request == NULL) {
688                 DBF_EXCEPTION(1, "cqra nomem\n");
689                 return ERR_PTR(-ENOMEM);
690         }
691         /* allocate channel program */
692         if (cplength > 0) {
693                 request->cpaddr = kcalloc(cplength, sizeof(struct ccw1),
694                                           GFP_ATOMIC | GFP_DMA);
695                 if (request->cpaddr == NULL) {
696                         DBF_EXCEPTION(1, "cqra nomem\n");
697                         kfree(request);
698                         return ERR_PTR(-ENOMEM);
699                 }
700         }
701         /* alloc small kernel buffer */
702         if (datasize > 0) {
703                 request->cpdata = kzalloc(datasize, GFP_KERNEL | GFP_DMA);
704                 if (request->cpdata == NULL) {
705                         DBF_EXCEPTION(1, "cqra nomem\n");
706                         kfree(request->cpaddr);
707                         kfree(request);
708                         return ERR_PTR(-ENOMEM);
709                 }
710         }
711         DBF_LH(6, "New request %p(%p/%p)\n", request, request->cpaddr,
712                 request->cpdata);
713
714         return request;
715 }
716
717 /*
718  * Free tape ccw request
719  */
720 void
721 tape_free_request (struct tape_request * request)
722 {
723         DBF_LH(6, "Free request %p\n", request);
724
725         if (request->device)
726                 tape_put_device(request->device);
727         kfree(request->cpdata);
728         kfree(request->cpaddr);
729         kfree(request);
730 }
731
732 static int
733 __tape_start_io(struct tape_device *device, struct tape_request *request)
734 {
735         int rc;
736
737 #ifdef CONFIG_S390_TAPE_BLOCK
738         if (request->op == TO_BLOCK)
739                 device->discipline->check_locate(device, request);
740 #endif
741         rc = ccw_device_start(
742                 device->cdev,
743                 request->cpaddr,
744                 (unsigned long) request,
745                 0x00,
746                 request->options
747         );
748         if (rc == 0) {
749                 request->status = TAPE_REQUEST_IN_IO;
750         } else if (rc == -EBUSY) {
751                 /* The common I/O subsystem is currently busy. Retry later. */
752                 request->status = TAPE_REQUEST_QUEUED;
753                 schedule_delayed_work(&device->tape_dnr, 0);
754                 rc = 0;
755         } else {
756                 /* Start failed. Remove request and indicate failure. */
757                 DBF_EVENT(1, "tape: start request failed with RC = %i\n", rc);
758         }
759         return rc;
760 }
761
762 static void
763 __tape_start_next_request(struct tape_device *device)
764 {
765         struct list_head *l, *n;
766         struct tape_request *request;
767         int rc;
768
769         DBF_LH(6, "__tape_start_next_request(%p)\n", device);
770         /*
771          * Try to start each request on request queue until one is
772          * started successful.
773          */
774         list_for_each_safe(l, n, &device->req_queue) {
775                 request = list_entry(l, struct tape_request, list);
776
777                 /*
778                  * Avoid race condition if bottom-half was triggered more than
779                  * once.
780                  */
781                 if (request->status == TAPE_REQUEST_IN_IO)
782                         return;
783                 /*
784                  * Request has already been stopped. We have to wait until
785                  * the request is removed from the queue in the interrupt
786                  * handling.
787                  */
788                 if (request->status == TAPE_REQUEST_DONE)
789                         return;
790
791                 /*
792                  * We wanted to cancel the request but the common I/O layer
793                  * was busy at that time. This can only happen if this
794                  * function is called by delayed_next_request.
795                  * Otherwise we start the next request on the queue.
796                  */
797                 if (request->status == TAPE_REQUEST_CANCEL) {
798                         rc = __tape_cancel_io(device, request);
799                 } else {
800                         rc = __tape_start_io(device, request);
801                 }
802                 if (rc == 0)
803                         return;
804
805                 /* Set ending status. */
806                 request->rc = rc;
807                 request->status = TAPE_REQUEST_DONE;
808
809                 /* Remove from request queue. */
810                 list_del(&request->list);
811
812                 /* Do callback. */
813                 if (request->callback != NULL)
814                         request->callback(request, request->callback_data);
815         }
816 }
817
818 static void
819 tape_delayed_next_request(struct work_struct *work)
820 {
821         struct tape_device *device =
822                 container_of(work, struct tape_device, tape_dnr.work);
823
824         DBF_LH(6, "tape_delayed_next_request(%p)\n", device);
825         spin_lock_irq(get_ccwdev_lock(device->cdev));
826         __tape_start_next_request(device);
827         spin_unlock_irq(get_ccwdev_lock(device->cdev));
828 }
829
830 static void tape_long_busy_timeout(unsigned long data)
831 {
832         struct tape_request *request;
833         struct tape_device *device;
834
835         device = (struct tape_device *) data;
836         spin_lock_irq(get_ccwdev_lock(device->cdev));
837         request = list_entry(device->req_queue.next, struct tape_request, list);
838         BUG_ON(request->status != TAPE_REQUEST_LONG_BUSY);
839         DBF_LH(6, "%08x: Long busy timeout.\n", device->cdev_id);
840         __tape_start_next_request(device);
841         device->lb_timeout.data = 0UL;
842         tape_put_device(device);
843         spin_unlock_irq(get_ccwdev_lock(device->cdev));
844 }
845
846 static void
847 __tape_end_request(
848         struct tape_device *    device,
849         struct tape_request *   request,
850         int                     rc)
851 {
852         DBF_LH(6, "__tape_end_request(%p, %p, %i)\n", device, request, rc);
853         if (request) {
854                 request->rc = rc;
855                 request->status = TAPE_REQUEST_DONE;
856
857                 /* Remove from request queue. */
858                 list_del(&request->list);
859
860                 /* Do callback. */
861                 if (request->callback != NULL)
862                         request->callback(request, request->callback_data);
863         }
864
865         /* Start next request. */
866         if (!list_empty(&device->req_queue))
867                 __tape_start_next_request(device);
868 }
869
870 /*
871  * Write sense data to dbf
872  */
873 void
874 tape_dump_sense_dbf(struct tape_device *device, struct tape_request *request,
875                     struct irb *irb)
876 {
877         unsigned int *sptr;
878         const char* op;
879
880         if (request != NULL)
881                 op = tape_op_verbose[request->op];
882         else
883                 op = "---";
884         DBF_EVENT(3, "DSTAT : %02x   CSTAT: %02x\n",
885                   irb->scsw.cmd.dstat, irb->scsw.cmd.cstat);
886         DBF_EVENT(3, "DEVICE: %08x OP\t: %s\n", device->cdev_id, op);
887         sptr = (unsigned int *) irb->ecw;
888         DBF_EVENT(3, "%08x %08x\n", sptr[0], sptr[1]);
889         DBF_EVENT(3, "%08x %08x\n", sptr[2], sptr[3]);
890         DBF_EVENT(3, "%08x %08x\n", sptr[4], sptr[5]);
891         DBF_EVENT(3, "%08x %08x\n", sptr[6], sptr[7]);
892 }
893
894 /*
895  * I/O helper function. Adds the request to the request queue
896  * and starts it if the tape is idle. Has to be called with
897  * the device lock held.
898  */
899 static int
900 __tape_start_request(struct tape_device *device, struct tape_request *request)
901 {
902         int rc;
903
904         switch (request->op) {
905                 case TO_MSEN:
906                 case TO_ASSIGN:
907                 case TO_UNASSIGN:
908                 case TO_READ_ATTMSG:
909                 case TO_RDC:
910                         if (device->tape_state == TS_INIT)
911                                 break;
912                         if (device->tape_state == TS_UNUSED)
913                                 break;
914                 default:
915                         if (device->tape_state == TS_BLKUSE)
916                                 break;
917                         if (device->tape_state != TS_IN_USE)
918                                 return -ENODEV;
919         }
920
921         /* Increase use count of device for the added request. */
922         request->device = tape_get_device(device);
923
924         if (list_empty(&device->req_queue)) {
925                 /* No other requests are on the queue. Start this one. */
926                 rc = __tape_start_io(device, request);
927                 if (rc)
928                         return rc;
929
930                 DBF_LH(5, "Request %p added for execution.\n", request);
931                 list_add(&request->list, &device->req_queue);
932         } else {
933                 DBF_LH(5, "Request %p add to queue.\n", request);
934                 request->status = TAPE_REQUEST_QUEUED;
935                 list_add_tail(&request->list, &device->req_queue);
936         }
937         return 0;
938 }
939
940 /*
941  * Add the request to the request queue, try to start it if the
942  * tape is idle. Return without waiting for end of i/o.
943  */
944 int
945 tape_do_io_async(struct tape_device *device, struct tape_request *request)
946 {
947         int rc;
948
949         DBF_LH(6, "tape_do_io_async(%p, %p)\n", device, request);
950
951         spin_lock_irq(get_ccwdev_lock(device->cdev));
952         /* Add request to request queue and try to start it. */
953         rc = __tape_start_request(device, request);
954         spin_unlock_irq(get_ccwdev_lock(device->cdev));
955         return rc;
956 }
957
958 /*
959  * tape_do_io/__tape_wake_up
960  * Add the request to the request queue, try to start it if the
961  * tape is idle and wait uninterruptible for its completion.
962  */
963 static void
964 __tape_wake_up(struct tape_request *request, void *data)
965 {
966         request->callback = NULL;
967         wake_up((wait_queue_head_t *) data);
968 }
969
970 int
971 tape_do_io(struct tape_device *device, struct tape_request *request)
972 {
973         int rc;
974
975         spin_lock_irq(get_ccwdev_lock(device->cdev));
976         /* Setup callback */
977         request->callback = __tape_wake_up;
978         request->callback_data = &device->wait_queue;
979         /* Add request to request queue and try to start it. */
980         rc = __tape_start_request(device, request);
981         spin_unlock_irq(get_ccwdev_lock(device->cdev));
982         if (rc)
983                 return rc;
984         /* Request added to the queue. Wait for its completion. */
985         wait_event(device->wait_queue, (request->callback == NULL));
986         /* Get rc from request */
987         return request->rc;
988 }
989
990 /*
991  * tape_do_io_interruptible/__tape_wake_up_interruptible
992  * Add the request to the request queue, try to start it if the
993  * tape is idle and wait uninterruptible for its completion.
994  */
995 static void
996 __tape_wake_up_interruptible(struct tape_request *request, void *data)
997 {
998         request->callback = NULL;
999         wake_up_interruptible((wait_queue_head_t *) data);
1000 }
1001
1002 int
1003 tape_do_io_interruptible(struct tape_device *device,
1004                          struct tape_request *request)
1005 {
1006         int rc;
1007
1008         spin_lock_irq(get_ccwdev_lock(device->cdev));
1009         /* Setup callback */
1010         request->callback = __tape_wake_up_interruptible;
1011         request->callback_data = &device->wait_queue;
1012         rc = __tape_start_request(device, request);
1013         spin_unlock_irq(get_ccwdev_lock(device->cdev));
1014         if (rc)
1015                 return rc;
1016         /* Request added to the queue. Wait for its completion. */
1017         rc = wait_event_interruptible(device->wait_queue,
1018                                       (request->callback == NULL));
1019         if (rc != -ERESTARTSYS)
1020                 /* Request finished normally. */
1021                 return request->rc;
1022
1023         /* Interrupted by a signal. We have to stop the current request. */
1024         spin_lock_irq(get_ccwdev_lock(device->cdev));
1025         rc = __tape_cancel_io(device, request);
1026         spin_unlock_irq(get_ccwdev_lock(device->cdev));
1027         if (rc == 0) {
1028                 /* Wait for the interrupt that acknowledges the halt. */
1029                 do {
1030                         rc = wait_event_interruptible(
1031                                 device->wait_queue,
1032                                 (request->callback == NULL)
1033                         );
1034                 } while (rc == -ERESTARTSYS);
1035
1036                 DBF_EVENT(3, "IO stopped on %08x\n", device->cdev_id);
1037                 rc = -ERESTARTSYS;
1038         }
1039         return rc;
1040 }
1041
1042 /*
1043  * Stop running ccw.
1044  */
1045 int
1046 tape_cancel_io(struct tape_device *device, struct tape_request *request)
1047 {
1048         int rc;
1049
1050         spin_lock_irq(get_ccwdev_lock(device->cdev));
1051         rc = __tape_cancel_io(device, request);
1052         spin_unlock_irq(get_ccwdev_lock(device->cdev));
1053         return rc;
1054 }
1055
1056 /*
1057  * Tape interrupt routine, called from the ccw_device layer
1058  */
1059 static void
1060 __tape_do_irq (struct ccw_device *cdev, unsigned long intparm, struct irb *irb)
1061 {
1062         struct tape_device *device;
1063         struct tape_request *request;
1064         int rc;
1065
1066         device = dev_get_drvdata(&cdev->dev);
1067         if (device == NULL) {
1068                 return;
1069         }
1070         request = (struct tape_request *) intparm;
1071
1072         DBF_LH(6, "__tape_do_irq(device=%p, request=%p)\n", device, request);
1073
1074         /* On special conditions irb is an error pointer */
1075         if (IS_ERR(irb)) {
1076                 /* FIXME: What to do with the request? */
1077                 switch (PTR_ERR(irb)) {
1078                         case -ETIMEDOUT:
1079                                 DBF_LH(1, "(%s): Request timed out\n",
1080                                         dev_name(&cdev->dev));
1081                         case -EIO:
1082                                 __tape_end_request(device, request, -EIO);
1083                                 break;
1084                         default:
1085                                 DBF_LH(1, "(%s): Unexpected i/o error %li\n",
1086                                         dev_name(&cdev->dev),
1087                                         PTR_ERR(irb));
1088                 }
1089                 return;
1090         }
1091
1092         /*
1093          * If the condition code is not zero and the start function bit is
1094          * still set, this is an deferred error and the last start I/O did
1095          * not succeed. At this point the condition that caused the deferred
1096          * error might still apply. So we just schedule the request to be
1097          * started later.
1098          */
1099         if (irb->scsw.cmd.cc != 0 &&
1100             (irb->scsw.cmd.fctl & SCSW_FCTL_START_FUNC) &&
1101             (request->status == TAPE_REQUEST_IN_IO)) {
1102                 DBF_EVENT(3,"(%08x): deferred cc=%i, fctl=%i. restarting\n",
1103                         device->cdev_id, irb->scsw.cmd.cc, irb->scsw.cmd.fctl);
1104                 request->status = TAPE_REQUEST_QUEUED;
1105                 schedule_delayed_work(&device->tape_dnr, HZ);
1106                 return;
1107         }
1108
1109         /* May be an unsolicited irq */
1110         if(request != NULL)
1111                 request->rescnt = irb->scsw.cmd.count;
1112         else if ((irb->scsw.cmd.dstat == 0x85 || irb->scsw.cmd.dstat == 0x80) &&
1113                  !list_empty(&device->req_queue)) {
1114                 /* Not Ready to Ready after long busy ? */
1115                 struct tape_request *req;
1116                 req = list_entry(device->req_queue.next,
1117                                  struct tape_request, list);
1118                 if (req->status == TAPE_REQUEST_LONG_BUSY) {
1119                         DBF_EVENT(3, "(%08x): del timer\n", device->cdev_id);
1120                         if (del_timer(&device->lb_timeout)) {
1121                                 device->lb_timeout.data = 0UL;
1122                                 tape_put_device(device);
1123                                 __tape_start_next_request(device);
1124                         }
1125                         return;
1126                 }
1127         }
1128         if (irb->scsw.cmd.dstat != 0x0c) {
1129                 /* Set the 'ONLINE' flag depending on sense byte 1 */
1130                 if(*(((__u8 *) irb->ecw) + 1) & SENSE_DRIVE_ONLINE)
1131                         device->tape_generic_status |= GMT_ONLINE(~0);
1132                 else
1133                         device->tape_generic_status &= ~GMT_ONLINE(~0);
1134
1135                 /*
1136                  * Any request that does not come back with channel end
1137                  * and device end is unusual. Log the sense data.
1138                  */
1139                 DBF_EVENT(3,"-- Tape Interrupthandler --\n");
1140                 tape_dump_sense_dbf(device, request, irb);
1141         } else {
1142                 /* Upon normal completion the device _is_ online */
1143                 device->tape_generic_status |= GMT_ONLINE(~0);
1144         }
1145         if (device->tape_state == TS_NOT_OPER) {
1146                 DBF_EVENT(6, "tape:device is not operational\n");
1147                 return;
1148         }
1149
1150         /*
1151          * Request that were canceled still come back with an interrupt.
1152          * To detect these request the state will be set to TAPE_REQUEST_DONE.
1153          */
1154         if(request != NULL && request->status == TAPE_REQUEST_DONE) {
1155                 __tape_end_request(device, request, -EIO);
1156                 return;
1157         }
1158
1159         rc = device->discipline->irq(device, request, irb);
1160         /*
1161          * rc < 0 : request finished unsuccessfully.
1162          * rc == TAPE_IO_SUCCESS: request finished successfully.
1163          * rc == TAPE_IO_PENDING: request is still running. Ignore rc.
1164          * rc == TAPE_IO_RETRY: request finished but needs another go.
1165          * rc == TAPE_IO_STOP: request needs to get terminated.
1166          */
1167         switch (rc) {
1168                 case TAPE_IO_SUCCESS:
1169                         /* Upon normal completion the device _is_ online */
1170                         device->tape_generic_status |= GMT_ONLINE(~0);
1171                         __tape_end_request(device, request, rc);
1172                         break;
1173                 case TAPE_IO_PENDING:
1174                         break;
1175                 case TAPE_IO_LONG_BUSY:
1176                         device->lb_timeout.data =
1177                                 (unsigned long) tape_get_device(device);
1178                         device->lb_timeout.expires = jiffies +
1179                                 LONG_BUSY_TIMEOUT * HZ;
1180                         DBF_EVENT(3, "(%08x): add timer\n", device->cdev_id);
1181                         add_timer(&device->lb_timeout);
1182                         request->status = TAPE_REQUEST_LONG_BUSY;
1183                         break;
1184                 case TAPE_IO_RETRY:
1185                         rc = __tape_start_io(device, request);
1186                         if (rc)
1187                                 __tape_end_request(device, request, rc);
1188                         break;
1189                 case TAPE_IO_STOP:
1190                         rc = __tape_cancel_io(device, request);
1191                         if (rc)
1192                                 __tape_end_request(device, request, rc);
1193                         break;
1194                 default:
1195                         if (rc > 0) {
1196                                 DBF_EVENT(6, "xunknownrc\n");
1197                                 __tape_end_request(device, request, -EIO);
1198                         } else {
1199                                 __tape_end_request(device, request, rc);
1200                         }
1201                         break;
1202         }
1203 }
1204
1205 /*
1206  * Tape device open function used by tape_char & tape_block frontends.
1207  */
1208 int
1209 tape_open(struct tape_device *device)
1210 {
1211         int rc;
1212
1213         spin_lock_irq(get_ccwdev_lock(device->cdev));
1214         if (device->tape_state == TS_NOT_OPER) {
1215                 DBF_EVENT(6, "TAPE:nodev\n");
1216                 rc = -ENODEV;
1217         } else if (device->tape_state == TS_IN_USE) {
1218                 DBF_EVENT(6, "TAPE:dbusy\n");
1219                 rc = -EBUSY;
1220         } else if (device->tape_state == TS_BLKUSE) {
1221                 DBF_EVENT(6, "TAPE:dbusy\n");
1222                 rc = -EBUSY;
1223         } else if (device->discipline != NULL &&
1224                    !try_module_get(device->discipline->owner)) {
1225                 DBF_EVENT(6, "TAPE:nodisc\n");
1226                 rc = -ENODEV;
1227         } else {
1228                 tape_state_set(device, TS_IN_USE);
1229                 rc = 0;
1230         }
1231         spin_unlock_irq(get_ccwdev_lock(device->cdev));
1232         return rc;
1233 }
1234
1235 /*
1236  * Tape device release function used by tape_char & tape_block frontends.
1237  */
1238 int
1239 tape_release(struct tape_device *device)
1240 {
1241         spin_lock_irq(get_ccwdev_lock(device->cdev));
1242         if (device->tape_state == TS_IN_USE)
1243                 tape_state_set(device, TS_UNUSED);
1244         module_put(device->discipline->owner);
1245         spin_unlock_irq(get_ccwdev_lock(device->cdev));
1246         return 0;
1247 }
1248
1249 /*
1250  * Execute a magnetic tape command a number of times.
1251  */
1252 int
1253 tape_mtop(struct tape_device *device, int mt_op, int mt_count)
1254 {
1255         tape_mtop_fn fn;
1256         int rc;
1257
1258         DBF_EVENT(6, "TAPE:mtio\n");
1259         DBF_EVENT(6, "TAPE:ioop: %x\n", mt_op);
1260         DBF_EVENT(6, "TAPE:arg:  %x\n", mt_count);
1261
1262         if (mt_op < 0 || mt_op >= TAPE_NR_MTOPS)
1263                 return -EINVAL;
1264         fn = device->discipline->mtop_array[mt_op];
1265         if (fn == NULL)
1266                 return -EINVAL;
1267
1268         /* We assume that the backends can handle count up to 500. */
1269         if (mt_op == MTBSR  || mt_op == MTFSR  || mt_op == MTFSF  ||
1270             mt_op == MTBSF  || mt_op == MTFSFM || mt_op == MTBSFM) {
1271                 rc = 0;
1272                 for (; mt_count > 500; mt_count -= 500)
1273                         if ((rc = fn(device, 500)) != 0)
1274                                 break;
1275                 if (rc == 0)
1276                         rc = fn(device, mt_count);
1277         } else
1278                 rc = fn(device, mt_count);
1279         return rc;
1280
1281 }
1282
1283 /*
1284  * Tape init function.
1285  */
1286 static int
1287 tape_init (void)
1288 {
1289         TAPE_DBF_AREA = debug_register ( "tape", 2, 2, 4*sizeof(long));
1290         debug_register_view(TAPE_DBF_AREA, &debug_sprintf_view);
1291 #ifdef DBF_LIKE_HELL
1292         debug_set_level(TAPE_DBF_AREA, 6);
1293 #endif
1294         DBF_EVENT(3, "tape init\n");
1295         tape_proc_init();
1296         tapechar_init ();
1297         tapeblock_init ();
1298         return 0;
1299 }
1300
1301 /*
1302  * Tape exit function.
1303  */
1304 static void
1305 tape_exit(void)
1306 {
1307         DBF_EVENT(6, "tape exit\n");
1308
1309         /* Get rid of the frontends */
1310         tapechar_exit();
1311         tapeblock_exit();
1312         tape_proc_cleanup();
1313         debug_unregister (TAPE_DBF_AREA);
1314 }
1315
1316 MODULE_AUTHOR("(C) 2001 IBM Deutschland Entwicklung GmbH by Carsten Otte and "
1317               "Michael Holzheu (cotte@de.ibm.com,holzheu@de.ibm.com)");
1318 MODULE_DESCRIPTION("Linux on zSeries channel attached tape device driver");
1319 MODULE_LICENSE("GPL");
1320
1321 module_init(tape_init);
1322 module_exit(tape_exit);
1323
1324 EXPORT_SYMBOL(tape_generic_remove);
1325 EXPORT_SYMBOL(tape_generic_probe);
1326 EXPORT_SYMBOL(tape_generic_online);
1327 EXPORT_SYMBOL(tape_generic_offline);
1328 EXPORT_SYMBOL(tape_generic_pm_suspend);
1329 EXPORT_SYMBOL(tape_put_device);
1330 EXPORT_SYMBOL(tape_get_device);
1331 EXPORT_SYMBOL(tape_state_verbose);
1332 EXPORT_SYMBOL(tape_op_verbose);
1333 EXPORT_SYMBOL(tape_state_set);
1334 EXPORT_SYMBOL(tape_med_state_set);
1335 EXPORT_SYMBOL(tape_alloc_request);
1336 EXPORT_SYMBOL(tape_free_request);
1337 EXPORT_SYMBOL(tape_dump_sense_dbf);
1338 EXPORT_SYMBOL(tape_do_io);
1339 EXPORT_SYMBOL(tape_do_io_async);
1340 EXPORT_SYMBOL(tape_do_io_interruptible);
1341 EXPORT_SYMBOL(tape_cancel_io);
1342 EXPORT_SYMBOL(tape_mtop);