commented early_printk patch because of rejects.
[linux-flexiantxendom0-3.2.10.git] / drivers / ide / ide-tcq.c
1 /*
2  * Copyright (C) 2001, 2002 Jens Axboe <axboe@suse.de>
3  *
4  * This program is free software; you can redistribute it and/or modify
5  * it under the terms of the GNU General Public License version 2 as
6  * published by the Free Software Foundation.
7  *
8  * This program is distributed in the hope that it will be useful,
9  * but WITHOUT ANY WARRANTY; without even the implied warranty of
10  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
11  * GNU General Public License for more details.
12  *
13  * You should have received a copy of the GNU General Public License
14  * along with this program; if not, write to the Free Software
15  * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307  USA
16  */
17
18 /*
19  * Support for the DMA queued protocol, which enables ATA disk drives to
20  * use tagged command queueing.
21  */
22 #include <linux/config.h>
23 #include <linux/types.h>
24 #include <linux/kernel.h>
25 #include <linux/init.h>
26 #include <linux/ide.h>
27
28 #include <asm/io.h>
29 #include <asm/delay.h>
30
31 /*
32  * warning: it will be _very_ verbose if defined
33  */
34 #undef IDE_TCQ_DEBUG
35
36 #ifdef IDE_TCQ_DEBUG
37 #define TCQ_PRINTK printk
38 #else
39 #define TCQ_PRINTK(x...)
40 #endif
41
42 /*
43  * use nIEN or not
44  */
45 #undef IDE_TCQ_NIEN
46
47 /*
48  * we are leaving the SERVICE interrupt alone, IBM drives have it
49  * on per default and it can't be turned off. Doesn't matter, this
50  * is the sane config.
51  */
52 #undef IDE_TCQ_FIDDLE_SI
53
54 /*
55  * bad drive blacklist, for drives that raport tcq capability but don't
56  * work reliably with the default config. initially from freebsd table.
57  */
58 struct ide_tcq_blacklist {
59         char *model;
60         char works;
61         unsigned int max_sectors;
62 };
63
64 static struct ide_tcq_blacklist ide_tcq_blacklist[] = {
65         {
66                 .model =        "IBM-DTTA",
67                 .works =        1,
68                 .max_sectors =  128,
69         },
70         {
71                 .model =        "IBM-DJNA",
72                 .works =        0,
73         },
74         {
75                 .model =        "WDC AC",
76                 .works =        0,
77         },
78         {
79                 .model =        NULL,
80         },
81 };
82
83 ide_startstop_t ide_dmaq_intr(ide_drive_t *drive);
84 ide_startstop_t ide_service(ide_drive_t *drive);
85
86 static struct ide_tcq_blacklist *ide_find_drive_blacklist(ide_drive_t *drive)
87 {
88         struct ide_tcq_blacklist *itb;
89         int i = 0;
90
91         do {
92                 itb = &ide_tcq_blacklist[i];
93
94                 if (!itb->model)
95                         break;
96
97                 if (!strncmp(drive->id->model, itb->model, strlen(itb->model)))
98                         return itb;
99
100                 i++;
101         } while (1);
102
103         return NULL;
104 }
105
106 static inline void drive_ctl_nien(ide_drive_t *drive, int set)
107 {
108 #ifdef IDE_TCQ_NIEN
109         if (IDE_CONTROL_REG) {
110                 int mask = set ? 0x02 : 0x00;
111
112                 hwif->OUTB(drive->ctl | mask, IDE_CONTROL_REG);
113         }
114 #endif
115 }
116
117 static ide_startstop_t ide_tcq_nop_handler(ide_drive_t *drive)
118 {
119         ide_task_t *args = HWGROUP(drive)->rq->special;
120         ide_hwif_t *hwif = HWIF(drive);
121         int auto_poll_check = 0;
122         u8 stat, err;
123
124         if (args->tfRegister[IDE_FEATURE_OFFSET] & 0x01)
125                 auto_poll_check = 1;
126
127         local_irq_enable();
128
129         stat = hwif->INB(IDE_STATUS_REG);
130         err = hwif->INB(IDE_ERROR_REG);
131         ide_end_drive_cmd(drive, stat, err);
132
133         /*
134          * do taskfile and check ABRT bit -- intelligent adapters will not
135          * pass NOP with sub-code 0x01 to device, so the command will not
136          * fail there
137          */
138         if (auto_poll_check) {
139                 if (!(args->tfRegister[IDE_FEATURE_OFFSET] & ABRT_ERR)) {
140                         HWIF(drive)->auto_poll = 1;
141                         printk("%s: NOP Auto-poll enabled\n",HWIF(drive)->name);
142                 }
143         }
144
145         kfree(args);
146         return ide_stopped;
147 }
148
149 /*
150  * if we encounter _any_ error doing I/O to one of the tags, we must
151  * invalidate the pending queue. clear the software busy queue and requeue
152  * on the request queue for restart. issue a WIN_NOP to clear hardware queue
153  */
154 static void ide_tcq_invalidate_queue(ide_drive_t *drive)
155 {
156         ide_hwgroup_t *hwgroup = HWGROUP(drive);
157         request_queue_t *q = drive->queue;
158         struct request *rq;
159         unsigned long flags;
160
161         printk("%s: invalidating tag queue (%d commands)\n", drive->name, ata_pending_commands(drive));
162
163         /*
164          * first kill timer and block queue
165          */
166         spin_lock_irqsave(&ide_lock, flags);
167
168         del_timer(&hwgroup->timer);
169
170         if (HWIF(drive)->dma)
171                 HWIF(drive)->ide_dma_end(drive);
172
173         blk_queue_invalidate_tags(q);
174
175         drive->using_tcq = 0;
176         drive->queue_depth = 1;
177         hwgroup->busy = 0;
178         hwgroup->handler = NULL;
179
180         spin_unlock_irqrestore(&ide_lock, flags);
181
182         /*
183          * now kill hardware queue with a NOP
184          */
185         rq = &hwgroup->wrq;
186         ide_init_drive_cmd(rq);
187         rq->buffer = hwgroup->cmd_buf;
188         memset(rq->buffer, 0, sizeof(hwgroup->cmd_buf));
189         rq->buffer[0] = WIN_NOP;
190         ide_do_drive_cmd(drive, rq, ide_preempt);
191 }
192
193 void ide_tcq_intr_timeout(unsigned long data)
194 {
195         ide_drive_t *drive = (ide_drive_t *) data;
196         ide_hwgroup_t *hwgroup = HWGROUP(drive);
197         ide_hwif_t *hwif = HWIF(drive);
198         unsigned long flags;
199
200         printk(KERN_ERR "ide_tcq_intr_timeout: timeout waiting for %s interrupt\n", hwgroup->rq ? "completion" : "service");
201
202         spin_lock_irqsave(&ide_lock, flags);
203
204         if (!hwgroup->busy)
205                 printk(KERN_ERR "ide_tcq_intr_timeout: hwgroup not busy\n");
206         if (hwgroup->handler == NULL)
207                 printk(KERN_ERR "ide_tcq_intr_timeout: missing isr!\n");
208
209         hwgroup->busy = 1;
210         spin_unlock_irqrestore(&ide_lock, flags);
211
212         /*
213          * if pending commands, try service before giving up
214          */
215         if (ata_pending_commands(drive)) {
216                 u8 stat = hwif->INB(IDE_STATUS_REG);
217
218                 if ((stat & SRV_STAT) && (ide_service(drive) == ide_started))
219                         return;
220         }
221
222         if (drive)
223                 ide_tcq_invalidate_queue(drive);
224 }
225
226 void __ide_tcq_set_intr(ide_hwgroup_t *hwgroup, ide_handler_t *handler)
227 {
228         /*
229          * always just bump the timer for now, the timeout handling will
230          * have to be changed to be per-command
231          */
232         hwgroup->timer.function = ide_tcq_intr_timeout;
233         hwgroup->timer.data = (unsigned long) hwgroup->drive;
234         mod_timer(&hwgroup->timer, jiffies + 5 * HZ);
235
236         hwgroup->handler = handler;
237 }
238
239 void ide_tcq_set_intr(ide_hwgroup_t *hwgroup, ide_handler_t *handler)
240 {
241         unsigned long flags;
242
243         spin_lock_irqsave(&ide_lock, flags);
244         __ide_tcq_set_intr(hwgroup, handler);
245         spin_unlock_irqrestore(&ide_lock, flags);
246 }
247
248 /*
249  * wait 400ns, then poll for busy_mask to clear from alt status
250  */
251 #define IDE_TCQ_WAIT    (10000)
252 int ide_tcq_wait_altstat(ide_drive_t *drive, byte *stat, byte busy_mask)
253 {
254         ide_hwif_t *hwif = HWIF(drive);
255         int i = 0;
256
257         udelay(1);
258
259         do {
260                 *stat = hwif->INB(IDE_ALTSTATUS_REG);
261
262                 if (!(*stat & busy_mask))
263                         break;
264
265                 if (unlikely(i++ > IDE_TCQ_WAIT))
266                         return 1;
267
268                 udelay(10);
269         } while (1);
270
271         return 0;
272 }
273
274 /*
275  * issue SERVICE command to drive -- drive must have been selected first,
276  * and it must have reported a need for service (status has SRV_STAT set)
277  *
278  * Also, nIEN must be set as not to need protection against ide_dmaq_intr
279  */
280 ide_startstop_t ide_service(ide_drive_t *drive)
281 {
282         ide_hwif_t *hwif = HWIF(drive);
283         unsigned long flags;
284         struct request *rq;
285         byte feat, stat;
286         int tag;
287
288         TCQ_PRINTK("%s: started service\n", drive->name);
289
290         /*
291          * could be called with IDE_DMA in-progress from invalidate
292          * handler, refuse to do anything
293          */
294         if (hwif->dma)
295                 return ide_stopped;
296
297         /*
298          * need to select the right drive first...
299          */
300         if (drive != HWGROUP(drive)->drive) {
301                 SELECT_DRIVE(drive);
302                 udelay(10);
303         }
304
305         drive_ctl_nien(drive, 1);
306
307         /*
308          * send SERVICE, wait 400ns, wait for BUSY_STAT to clear
309          */
310         hwif->OUTB(WIN_QUEUED_SERVICE, IDE_COMMAND_REG);
311
312         if (ide_tcq_wait_altstat(drive, &stat, BUSY_STAT)) {
313                 printk(KERN_ERR "ide_service: BUSY clear took too long\n");
314                 ide_dump_status(drive, "ide_service", stat);
315                 ide_tcq_invalidate_queue(drive);
316                 return ide_stopped;
317         }
318
319         drive_ctl_nien(drive, 0);
320
321         /*
322          * FIXME, invalidate queue
323          */
324         if (stat & ERR_STAT) {
325                 ide_dump_status(drive, "ide_service", stat);
326                 ide_tcq_invalidate_queue(drive);
327                 return ide_stopped;
328         }
329
330         /*
331          * should not happen, a buggy device could introduce loop
332          */
333         feat = hwif->INB(IDE_NSECTOR_REG);
334         if (feat & REL) {
335                 HWGROUP(drive)->rq = NULL;
336                 printk(KERN_ERR "%s: release in service\n", drive->name);
337                 return ide_stopped;
338         }
339
340         tag = feat >> 3;
341
342         TCQ_PRINTK("ide_service: stat %x, feat %x\n", stat, feat);
343
344         spin_lock_irqsave(&ide_lock, flags);
345
346         if ((rq = blk_queue_find_tag(drive->queue, tag))) {
347                 HWGROUP(drive)->rq = rq;
348
349                 /*
350                  * we'll start a dma read or write, device will trigger
351                  * interrupt to indicate end of transfer, release is not
352                  * allowed
353                  */
354                 TCQ_PRINTK("ide_service: starting command, stat=%x\n", stat);
355                 spin_unlock_irqrestore(&ide_lock, flags);
356                 return HWIF(drive)->ide_dma_queued_start(drive);
357         }
358
359         printk(KERN_ERR "ide_service: missing request for tag %d\n", tag);
360         spin_unlock_irqrestore(&ide_lock, flags);
361         return ide_stopped;
362 }
363
364 ide_startstop_t ide_check_service(ide_drive_t *drive)
365 {
366         ide_hwif_t *hwif = HWIF(drive);
367         byte stat;
368
369         TCQ_PRINTK("%s: ide_check_service\n", drive->name);
370
371         if (!ata_pending_commands(drive))
372                 return ide_stopped;
373
374         stat = hwif->INB(IDE_STATUS_REG);
375         if (stat & SRV_STAT)
376                 return ide_service(drive);
377
378         /*
379          * we have pending commands, wait for interrupt
380          */
381         TCQ_PRINTK("%s: wait for service interrupt\n", drive->name);
382         ide_tcq_set_intr(HWGROUP(drive), ide_dmaq_intr);
383         return ide_started;
384 }
385
386 ide_startstop_t ide_dmaq_complete(ide_drive_t *drive, struct request *rq, byte stat)
387 {
388         byte dma_stat;
389
390         /*
391          * transfer was in progress, stop DMA engine
392          */
393         dma_stat = HWIF(drive)->ide_dma_end(drive);
394
395         /*
396          * must be end of I/O, check status and complete as necessary
397          */
398         if (unlikely(!OK_STAT(stat, READY_STAT, drive->bad_wstat | DRQ_STAT))) {
399                 printk(KERN_ERR "ide_dmaq_intr: %s: error status %x\n",drive->name,stat);
400                 ide_dump_status(drive, "ide_dmaq_complete", stat);
401                 ide_tcq_invalidate_queue(drive);
402                 return ide_stopped;
403         }
404
405         if (dma_stat)
406                 printk(KERN_WARNING "%s: bad DMA status (dma_stat=%x)\n", drive->name, dma_stat);
407
408         TCQ_PRINTK("ide_dmaq_complete: ending %p, tag %d\n", rq, rq->tag);
409         ide_end_request(drive, 1, rq->nr_sectors);
410
411         /*
412          * we completed this command, check if we can service a new command
413          */
414         return ide_check_service(drive);
415 }
416
417 /*
418  * intr handler for queued dma operations. this can be entered for two
419  * reasons:
420  *
421  * 1) device has completed dma transfer
422  * 2) service request to start a command
423  *
424  * if the drive has an active tag, we first complete that request before
425  * processing any pending SERVICE.
426  */
427 ide_startstop_t ide_dmaq_intr(ide_drive_t *drive)
428 {
429         struct request *rq = HWGROUP(drive)->rq;
430         ide_hwif_t *hwif = HWIF(drive);
431         byte stat = hwif->INB(IDE_STATUS_REG);
432
433         TCQ_PRINTK("ide_dmaq_intr: stat=%x\n", stat);
434
435         /*
436          * if a command completion interrupt is pending, do that first and
437          * check service afterwards
438          */
439         if (rq) {
440                 TCQ_PRINTK("ide_dmaq_intr: completion\n");
441                 return ide_dmaq_complete(drive, rq, stat);
442         }
443
444         /*
445          * service interrupt
446          */
447         if (stat & SRV_STAT) {
448                 TCQ_PRINTK("ide_dmaq_intr: SERV (stat=%x)\n", stat);
449                 return ide_service(drive);
450         }
451
452         printk("ide_dmaq_intr: stat=%x, not expected\n", stat);
453         return ide_check_service(drive);
454 }
455
456 /*
457  * check if the ata adapter this drive is attached to supports the
458  * NOP auto-poll for multiple tcq enabled drives on one channel
459  */
460 static int ide_tcq_check_autopoll(ide_drive_t *drive)
461 {
462         ide_task_t *args;
463         int i, drives;
464
465         /*
466          * only need to probe if both drives on a channel support tcq
467          */
468         for (i = 0, drives = 0; i < MAX_DRIVES; i++)
469                 if (HWIF(drive)->drives[i].present && drive->media == ide_disk)
470                         drives++;
471
472         if (drives <= 1)
473                 return 0;
474
475         /*
476          * what a mess...
477          */
478         args = kmalloc(sizeof(*args), GFP_ATOMIC);
479         if (!args)
480                 return 1;
481
482         memset(args, 0, sizeof(*args));
483
484         args->tfRegister[IDE_FEATURE_OFFSET] = 0x01;
485         args->tfRegister[IDE_COMMAND_OFFSET] = WIN_NOP;
486         args->command_type = ide_cmd_type_parser(args);
487         args->handler = ide_tcq_nop_handler;
488         return ide_raw_taskfile(drive, args, NULL);
489 }
490
491 /*
492  * configure the drive for tcq
493  */
494 static int ide_tcq_configure(ide_drive_t *drive)
495 {
496         int tcq_mask = 1 << 1 | 1 << 14;
497         int tcq_bits = tcq_mask | 1 << 15;
498         ide_task_t *args;
499
500         /*
501          * bit 14 and 1 must be set in word 83 of the device id to indicate
502          * support for dma queued protocol, and bit 15 must be cleared
503          */
504         if ((drive->id->command_set_2 & tcq_bits) ^ tcq_mask) {
505                 printk(KERN_INFO "%s: TCQ not supported\n", drive->name);
506                 return -EIO;
507         }
508
509         args = kmalloc(sizeof(*args), GFP_ATOMIC);
510         if (!args)
511                 return -ENOMEM;
512
513         memset(args, 0, sizeof(ide_task_t));
514         args->tfRegister[IDE_COMMAND_OFFSET] = WIN_SETFEATURES;
515         args->tfRegister[IDE_FEATURE_OFFSET] = SETFEATURES_EN_WCACHE;
516         args->command_type = ide_cmd_type_parser(args);
517
518         if (ide_raw_taskfile(drive, args, NULL)) {
519                 printk(KERN_WARNING "%s: failed to enable write cache\n", drive->name);
520                 goto err;
521         }
522
523         /*
524          * disable RELease interrupt, it's quicker to poll this after
525          * having sent the command opcode
526          */
527         memset(args, 0, sizeof(ide_task_t));
528         args->tfRegister[IDE_COMMAND_OFFSET] = WIN_SETFEATURES;
529         args->tfRegister[IDE_FEATURE_OFFSET] = SETFEATURES_DIS_RI;
530         args->command_type = ide_cmd_type_parser(args);
531
532         if (ide_raw_taskfile(drive, args, NULL)) {
533                 printk(KERN_ERR "%s: disabling release interrupt fail\n", drive->name);
534                 goto err;
535         }
536
537 #ifdef IDE_TCQ_FIDDLE_SI
538         /*
539          * enable SERVICE interrupt
540          */
541         memset(args, 0, sizeof(ide_task_t));
542         args->tfRegister[IDE_COMMAND_OFFSET] = WIN_SETFEATURES;
543         args->tfRegister[IDE_FEATURE_OFFSET] = SETFEATURES_EN_SI;
544         args->command_type = ide_cmd_type_parser(args);
545
546         if (ide_raw_taskfile(drive, args, NULL)) {
547                 printk(KERN_ERR "%s: enabling service interrupt fail\n", drive->name);
548                 goto err;
549         }
550 #endif
551
552         kfree(args);
553         return 0;
554 err:
555         kfree(args);
556         return -EIO;
557 }
558
559 /*
560  * for now assume that command list is always as big as we need and don't
561  * attempt to shrink it on tcq disable
562  */
563 static int ide_enable_queued(ide_drive_t *drive, int on)
564 {
565         struct ide_tcq_blacklist *itb;
566         int depth = drive->using_tcq ? drive->queue_depth : 0;
567
568         /*
569          * disable or adjust queue depth
570          */
571         if (!on) {
572                 if (drive->using_tcq)
573                         printk(KERN_INFO "%s: TCQ disabled\n", drive->name);
574
575                 drive->using_tcq = 0;
576                 return 0;
577         }
578
579         if (ide_tcq_configure(drive)) {
580                 drive->using_tcq = 0;
581                 return 1;
582         }
583
584         /*
585          * some drives need limited transfer size in tcq
586          */
587         itb = ide_find_drive_blacklist(drive);
588         if (itb && itb->max_sectors) {
589                 if (itb->max_sectors > HWIF(drive)->rqsize)
590                         itb->max_sectors = HWIF(drive)->rqsize;
591
592                 blk_queue_max_sectors(drive->queue, itb->max_sectors);
593         }
594
595         /*
596          * enable block tagging
597          */
598         if (!blk_queue_tagged(drive->queue))
599                 blk_queue_init_tags(drive->queue, IDE_MAX_TAG);
600
601         /*
602          * check auto-poll support
603          */
604         ide_tcq_check_autopoll(drive);
605
606         if (depth != drive->queue_depth)
607                 printk(KERN_INFO "%s: tagged command queueing enabled, command queue depth %d\n", drive->name, drive->queue_depth);
608
609         drive->using_tcq = 1;
610         return 0;
611 }
612
613 int ide_tcq_wait_dataphase(ide_drive_t *drive)
614 {
615         ide_hwif_t *hwif = HWIF(drive);
616         byte stat;
617         int i;
618
619         do {
620                 stat = hwif->INB(IDE_STATUS_REG);
621                 if (!(stat & BUSY_STAT))
622                         break;
623
624                 udelay(10);
625         } while (1);
626
627         if (OK_STAT(stat, READY_STAT | DRQ_STAT, drive->bad_wstat))
628                 return 0;
629
630         i = 0;
631         udelay(1);
632         do {
633                 stat = hwif->INB(IDE_STATUS_REG);
634
635                 if (OK_STAT(stat, READY_STAT | DRQ_STAT, drive->bad_wstat))
636                         break;
637
638                 ++i;
639                 if (unlikely(i >= IDE_TCQ_WAIT))
640                         return 1;
641
642                 udelay(10);
643         } while (1);
644
645         return 0;
646 }
647
648 static int ide_tcq_check_blacklist(ide_drive_t *drive)
649 {
650         struct ide_tcq_blacklist *itb = ide_find_drive_blacklist(drive);
651
652         if (!itb)
653                 return 0;
654
655         return !itb->works;
656 }
657
658 int __ide_dma_queued_on(ide_drive_t *drive)
659 {
660         ide_hwif_t *hwif = HWIF(drive);
661
662         if (drive->media != ide_disk)
663                 return 1;
664         if (!drive->using_dma)
665                 return 1;
666         if (hwif->chipset == ide_pdc4030)
667                 return 1;
668         if (ide_tcq_check_blacklist(drive)) {
669                 printk(KERN_WARNING "%s: tcq forbidden by blacklist\n",
670                                         drive->name);
671                 return 1;
672         }
673         if (hwif->drives[0].present && hwif->drives[1].present) {
674                 printk(KERN_WARNING "%s: only one drive on a channel supported"
675                                         " for tcq\n", drive->name);
676                 return 1;
677         }
678         if (ata_pending_commands(drive)) {
679                 printk(KERN_WARNING "ide-tcq; can't toggle tcq feature on "
680                                         "busy drive\n");
681                 return 1;
682         }
683
684         return ide_enable_queued(drive, 1);
685 }
686
687 int __ide_dma_queued_off(ide_drive_t *drive)
688 {
689         if (drive->media != ide_disk)
690                 return 1;
691         if (ata_pending_commands(drive)) {
692                 printk("ide-tcq; can't toggle tcq feature on busy drive\n");
693                 return 1;
694         }
695
696         return ide_enable_queued(drive, 0);
697 }
698
699 static ide_startstop_t ide_dma_queued_rw(ide_drive_t *drive, u8 command)
700 {
701         ide_hwif_t *hwif = HWIF(drive);
702         unsigned long flags;
703         byte stat, feat;
704
705         TCQ_PRINTK("%s: starting tag\n", drive->name);
706
707         /*
708          * set nIEN, tag start operation will enable again when
709          * it is safe
710          */
711         drive_ctl_nien(drive, 1);
712
713         TCQ_PRINTK("%s: sending cmd=%x\n", drive->name, command);
714         hwif->OUTB(command, IDE_COMMAND_REG);
715
716         if (ide_tcq_wait_altstat(drive, &stat, BUSY_STAT)) {
717                 printk("%s: alt stat timeout\n", drive->name);
718                 goto err;
719         }
720
721         drive_ctl_nien(drive, 0);
722
723         if (stat & ERR_STAT)
724                 goto err;
725
726         /*
727          * bus not released, start dma
728          */
729         feat = hwif->INB(IDE_NSECTOR_REG);
730         if (!(feat & REL)) {
731                 TCQ_PRINTK("IMMED in queued_start, feat=%x\n", feat);
732                 return hwif->ide_dma_queued_start(drive);
733         }
734
735         /*
736          * drive released the bus, clear active request and check for service
737          */
738         spin_lock_irqsave(&ide_lock, flags);
739         HWGROUP(drive)->rq = NULL;
740         __ide_tcq_set_intr(HWGROUP(drive), ide_dmaq_intr);
741         spin_unlock_irqrestore(&ide_lock, flags);
742
743         TCQ_PRINTK("REL in queued_start\n");
744
745         stat = hwif->INB(IDE_STATUS_REG);
746         if (stat & SRV_STAT)
747                 return ide_service(drive);
748
749         return ide_released;
750 err:
751         ide_dump_status(drive, "rw_queued", stat);
752         ide_tcq_invalidate_queue(drive);
753         return ide_stopped;
754 }
755
756 ide_startstop_t __ide_dma_queued_read(ide_drive_t *drive)
757 {
758         u8 command = WIN_READDMA_QUEUED;
759
760         if (drive->addressing == 1)
761                  command = WIN_READDMA_QUEUED_EXT;
762
763         return ide_dma_queued_rw(drive, command);
764 }
765
766 ide_startstop_t __ide_dma_queued_write(ide_drive_t *drive)
767 {
768         u8 command = WIN_WRITEDMA_QUEUED;
769
770         if (drive->addressing == 1)
771                  command = WIN_WRITEDMA_QUEUED_EXT;
772
773         return ide_dma_queued_rw(drive, command);
774 }
775
776 ide_startstop_t __ide_dma_queued_start(ide_drive_t *drive)
777 {
778         ide_hwgroup_t *hwgroup = HWGROUP(drive);
779         struct request *rq = hwgroup->rq;
780         ide_hwif_t *hwif = HWIF(drive);
781         unsigned int reading = 0;
782
783         TCQ_PRINTK("ide_dma: setting up queued tag=%d\n", rq->tag);
784
785         if (!hwgroup->busy)
786                 printk(KERN_ERR "queued_rw: hwgroup not busy\n");
787
788         if (ide_tcq_wait_dataphase(drive)) {
789                 printk(KERN_WARNING "timeout waiting for data phase\n");
790                 return ide_stopped;
791         }
792
793         if (rq_data_dir(rq) == READ)
794                 reading = 1 << 3;
795
796         if (ide_start_dma(hwif, drive, reading))
797                 return ide_stopped;
798
799         ide_tcq_set_intr(hwgroup, ide_dmaq_intr);
800
801         if (!hwif->ide_dma_begin(drive))
802                 return ide_started;
803
804         return ide_stopped;
805 }