- patches.fixes/patch-2.6.11-rc1: 2.6.11-rc1.
[linux-flexiantxendom0-3.2.10.git] / drivers / scsi / scsi_transport_spi.c
1 /* 
2  *  Parallel SCSI (SPI) transport specific attributes exported to sysfs.
3  *
4  *  Copyright (c) 2003 Silicon Graphics, Inc.  All rights reserved.
5  *
6  *  This program is free software; you can redistribute it and/or modify
7  *  it under the terms of the GNU General Public License as published by
8  *  the Free Software Foundation; either version 2 of the License, or
9  *  (at your option) any later version.
10  *
11  *  This program is distributed in the hope that it will be useful,
12  *  but WITHOUT ANY WARRANTY; without even the implied warranty of
13  *  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
14  *  GNU General Public License for more details.
15  *
16  *  You should have received a copy of the GNU General Public License
17  *  along with this program; if not, write to the Free Software
18  *  Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307  USA
19  */
20 #include <linux/module.h>
21 #include <linux/init.h>
22 #include <linux/smp_lock.h>
23 #include <linux/list.h>
24 #include <linux/spinlock.h>
25 #include <linux/mm.h>
26 #include <linux/workqueue.h>
27 #include <asm/scatterlist.h>
28 #include <asm/io.h>
29 #include <scsi/scsi.h>
30 #include "scsi_priv.h"
31 #include <scsi/scsi_device.h>
32 #include <scsi/scsi_host.h>
33 #include <scsi/scsi_request.h>
34 #include <scsi/scsi_eh.h>
35 #include <scsi/scsi_transport.h>
36 #include <scsi/scsi_transport_spi.h>
37
38 #define SPI_PRINTK(x, l, f, a...)       dev_printk(l, &(x)->dev, f , ##a)
39
40 static void transport_class_release(struct class_device *class_dev);
41 static void host_class_release(struct class_device *class_dev);
42
43 #define SPI_NUM_ATTRS 10        /* increase this if you add attributes */
44 #define SPI_OTHER_ATTRS 1       /* Increase this if you add "always
45                                  * on" attributes */
46 #define SPI_HOST_ATTRS  1
47
48 #define SPI_MAX_ECHO_BUFFER_SIZE        4096
49
50 /* Private data accessors (keep these out of the header file) */
51 #define spi_dv_pending(x) (((struct spi_transport_attrs *)&(x)->starget_data)->dv_pending)
52 #define spi_dv_sem(x) (((struct spi_transport_attrs *)&(x)->starget_data)->dv_sem)
53
54 struct spi_internal {
55         struct scsi_transport_template t;
56         struct spi_function_template *f;
57         /* The actual attributes */
58         struct class_device_attribute private_attrs[SPI_NUM_ATTRS];
59         /* The array of null terminated pointers to attributes 
60          * needed by scsi_sysfs.c */
61         struct class_device_attribute *attrs[SPI_NUM_ATTRS + SPI_OTHER_ATTRS + 1];
62         struct class_device_attribute private_host_attrs[SPI_HOST_ATTRS];
63         struct class_device_attribute *host_attrs[SPI_HOST_ATTRS + 1];
64 };
65
66 #define to_spi_internal(tmpl)   container_of(tmpl, struct spi_internal, t)
67
68 static const char *const ppr_to_ns[] = {
69         /* The PPR values 0-6 are reserved, fill them in when
70          * the committee defines them */
71         NULL,                   /* 0x00 */
72         NULL,                   /* 0x01 */
73         NULL,                   /* 0x02 */
74         NULL,                   /* 0x03 */
75         NULL,                   /* 0x04 */
76         NULL,                   /* 0x05 */
77         NULL,                   /* 0x06 */
78         "3.125",                /* 0x07 */
79         "6.25",                 /* 0x08 */
80         "12.5",                 /* 0x09 */
81         "25",                   /* 0x0a */
82         "30.3",                 /* 0x0b */
83         "50",                   /* 0x0c */
84 };
85 /* The PPR values at which you calculate the period in ns by multiplying
86  * by 4 */
87 #define SPI_STATIC_PPR  0x0c
88
89 static struct {
90         enum spi_signal_type    value;
91         char                    *name;
92 } signal_types[] = {
93         { SPI_SIGNAL_UNKNOWN, "unknown" },
94         { SPI_SIGNAL_SE, "SE" },
95         { SPI_SIGNAL_LVD, "LVD" },
96         { SPI_SIGNAL_HVD, "HVD" },
97 };
98
99 static inline const char *spi_signal_to_string(enum spi_signal_type type)
100 {
101         int i;
102
103         for (i = 0; i < sizeof(signal_types)/sizeof(signal_types[0]); i++) {
104                 if (type == signal_types[i].value)
105                         return signal_types[i].name;
106         }
107         return NULL;
108 }
109 static inline enum spi_signal_type spi_signal_to_value(const char *name)
110 {
111         int i, len;
112
113         for (i = 0; i < sizeof(signal_types)/sizeof(signal_types[0]); i++) {
114                 len =  strlen(signal_types[i].name);
115                 if (strncmp(name, signal_types[i].name, len) == 0 &&
116                     (name[len] == '\n' || name[len] == '\0'))
117                         return signal_types[i].value;
118         }
119         return SPI_SIGNAL_UNKNOWN;
120 }
121
122
123 struct class spi_transport_class = {
124         .name = "spi_transport",
125         .release = transport_class_release,
126 };
127
128 struct class spi_host_class = {
129         .name = "spi_host",
130         .release = host_class_release,
131 };
132
133 static __init int spi_transport_init(void)
134 {
135         int error = class_register(&spi_host_class);
136         if (error)
137                 return error;
138         return class_register(&spi_transport_class);
139 }
140
141 static void __exit spi_transport_exit(void)
142 {
143         class_unregister(&spi_transport_class);
144         class_unregister(&spi_host_class);
145 }
146
147 static int spi_setup_host_attrs(struct Scsi_Host *shost)
148 {
149         spi_signalling(shost) = SPI_SIGNAL_UNKNOWN;
150
151         return 0;
152 }
153
154 static int spi_configure_device(struct scsi_device *sdev)
155 {
156         struct scsi_target *starget = sdev->sdev_target;
157
158         /* Populate the target capability fields with the values
159          * gleaned from the device inquiry */
160
161         spi_support_sync(starget) = scsi_device_sync(sdev);
162         spi_support_wide(starget) = scsi_device_wide(sdev);
163         spi_support_dt(starget) = scsi_device_dt(sdev);
164         spi_support_dt_only(starget) = scsi_device_dt_only(sdev);
165         spi_support_ius(starget) = scsi_device_ius(sdev);
166         spi_support_qas(starget) = scsi_device_qas(sdev);
167
168         return 0;
169 }
170
171 static int spi_setup_transport_attrs(struct scsi_target *starget)
172 {
173         spi_period(starget) = -1;       /* illegal value */
174         spi_offset(starget) = 0;        /* async */
175         spi_width(starget) = 0; /* narrow */
176         spi_iu(starget) = 0;    /* no IU */
177         spi_dt(starget) = 0;    /* ST */
178         spi_qas(starget) = 0;
179         spi_wr_flow(starget) = 0;
180         spi_rd_strm(starget) = 0;
181         spi_rti(starget) = 0;
182         spi_pcomp_en(starget) = 0;
183         spi_dv_pending(starget) = 0;
184         spi_initial_dv(starget) = 0;
185         init_MUTEX(&spi_dv_sem(starget));
186
187         return 0;
188 }
189
190 static void transport_class_release(struct class_device *class_dev)
191 {
192         struct scsi_target *starget = transport_class_to_starget(class_dev);
193         put_device(&starget->dev);
194 }
195
196 static void host_class_release(struct class_device *class_dev)
197 {
198         struct Scsi_Host *shost = transport_class_to_shost(class_dev);
199         put_device(&shost->shost_gendev);
200 }
201
202 #define spi_transport_show_function(field, format_string)               \
203                                                                         \
204 static ssize_t                                                          \
205 show_spi_transport_##field(struct class_device *cdev, char *buf)        \
206 {                                                                       \
207         struct scsi_target *starget = transport_class_to_starget(cdev); \
208         struct Scsi_Host *shost = dev_to_shost(starget->dev.parent);    \
209         struct spi_transport_attrs *tp;                                 \
210         struct spi_internal *i = to_spi_internal(shost->transportt);    \
211         tp = (struct spi_transport_attrs *)&starget->starget_data;      \
212         if (i->f->get_##field)                                          \
213                 i->f->get_##field(starget);                             \
214         return snprintf(buf, 20, format_string, tp->field);             \
215 }
216
217 #define spi_transport_store_function(field, format_string)              \
218 static ssize_t                                                          \
219 store_spi_transport_##field(struct class_device *cdev, const char *buf, \
220                             size_t count)                               \
221 {                                                                       \
222         int val;                                                        \
223         struct scsi_target *starget = transport_class_to_starget(cdev); \
224         struct Scsi_Host *shost = dev_to_shost(starget->dev.parent);    \
225         struct spi_internal *i = to_spi_internal(shost->transportt);    \
226                                                                         \
227         val = simple_strtoul(buf, NULL, 0);                             \
228         i->f->set_##field(starget, val);                                \
229         return count;                                                   \
230 }
231
232 #define spi_transport_rd_attr(field, format_string)                     \
233         spi_transport_show_function(field, format_string)               \
234         spi_transport_store_function(field, format_string)              \
235 static CLASS_DEVICE_ATTR(field, S_IRUGO | S_IWUSR,                      \
236                          show_spi_transport_##field,                    \
237                          store_spi_transport_##field);
238
239 /* The Parallel SCSI Tranport Attributes: */
240 spi_transport_rd_attr(offset, "%d\n");
241 spi_transport_rd_attr(width, "%d\n");
242 spi_transport_rd_attr(iu, "%d\n");
243 spi_transport_rd_attr(dt, "%d\n");
244 spi_transport_rd_attr(qas, "%d\n");
245 spi_transport_rd_attr(wr_flow, "%d\n");
246 spi_transport_rd_attr(rd_strm, "%d\n");
247 spi_transport_rd_attr(rti, "%d\n");
248 spi_transport_rd_attr(pcomp_en, "%d\n");
249
250 static ssize_t
251 store_spi_revalidate(struct class_device *cdev, const char *buf, size_t count)
252 {
253         struct scsi_target *starget = transport_class_to_starget(cdev);
254
255         /* FIXME: we're relying on an awful lot of device internals
256          * here.  We really need a function to get the first available
257          * child */
258         struct device *dev = container_of(starget->dev.children.next, struct device, node);
259         struct scsi_device *sdev = to_scsi_device(dev);
260         spi_dv_device(sdev);
261         return count;
262 }
263 static CLASS_DEVICE_ATTR(revalidate, S_IWUSR, NULL, store_spi_revalidate);
264
265 /* Translate the period into ns according to the current spec
266  * for SDTR/PPR messages */
267 static ssize_t show_spi_transport_period(struct class_device *cdev, char *buf)
268
269 {
270         struct scsi_target *starget = transport_class_to_starget(cdev);
271         struct Scsi_Host *shost = dev_to_shost(starget->dev.parent);
272         struct spi_transport_attrs *tp;
273         const char *str;
274         struct spi_internal *i = to_spi_internal(shost->transportt);
275
276         tp = (struct spi_transport_attrs *)&starget->starget_data;
277
278         if (i->f->get_period)
279                 i->f->get_period(starget);
280
281         switch(tp->period) {
282
283         case 0x07 ... SPI_STATIC_PPR:
284                 str = ppr_to_ns[tp->period];
285                 if(!str)
286                         str = "reserved";
287                 break;
288
289
290         case (SPI_STATIC_PPR+1) ... 0xff:
291                 return sprintf(buf, "%d\n", tp->period * 4);
292
293         default:
294                 str = "unknown";
295         }
296         return sprintf(buf, "%s\n", str);
297 }
298
299 static ssize_t
300 store_spi_transport_period(struct class_device *cdev, const char *buf,
301                             size_t count)
302 {
303         struct scsi_target *starget = transport_class_to_starget(cdev);
304         struct Scsi_Host *shost = dev_to_shost(starget->dev.parent);
305         struct spi_internal *i = to_spi_internal(shost->transportt);
306         int j, period = -1;
307
308         for (j = 0; j < SPI_STATIC_PPR; j++) {
309                 int len;
310
311                 if(ppr_to_ns[j] == NULL)
312                         continue;
313
314                 len = strlen(ppr_to_ns[j]);
315
316                 if(strncmp(ppr_to_ns[j], buf, len) != 0)
317                         continue;
318
319                 if(buf[len] != '\n')
320                         continue;
321                 
322                 period = j;
323                 break;
324         }
325
326         if (period == -1) {
327                 int val = simple_strtoul(buf, NULL, 0);
328
329
330                 /* Should probably check limits here, but this
331                  * gets reasonably close to OK for most things */
332                 period = val/4;
333         }
334
335         if (period > 0xff)
336                 period = 0xff;
337
338         i->f->set_period(starget, period);
339
340         return count;
341 }
342         
343 static CLASS_DEVICE_ATTR(period, S_IRUGO | S_IWUSR, 
344                          show_spi_transport_period,
345                          store_spi_transport_period);
346
347 static ssize_t show_spi_host_signalling(struct class_device *cdev, char *buf)
348 {
349         struct Scsi_Host *shost = transport_class_to_shost(cdev);
350         struct spi_internal *i = to_spi_internal(shost->transportt);
351
352         if (i->f->get_signalling)
353                 i->f->get_signalling(shost);
354
355         return sprintf(buf, "%s\n", spi_signal_to_string(spi_signalling(shost)));
356 }
357 static ssize_t store_spi_host_signalling(struct class_device *cdev,
358                                          const char *buf, size_t count)
359 {
360         struct Scsi_Host *shost = transport_class_to_shost(cdev);
361         struct spi_internal *i = to_spi_internal(shost->transportt);
362         enum spi_signal_type type = spi_signal_to_value(buf);
363
364         if (type != SPI_SIGNAL_UNKNOWN)
365                 i->f->set_signalling(shost, type);
366
367         return count;
368 }
369 static CLASS_DEVICE_ATTR(signalling, S_IRUGO | S_IWUSR,
370                          show_spi_host_signalling,
371                          store_spi_host_signalling);
372
373 #define DV_SET(x, y)                    \
374         if(i->f->set_##x)               \
375                 i->f->set_##x(sdev->sdev_target, y)
376
377 #define DV_LOOPS        3
378 #define DV_TIMEOUT      (10*HZ)
379 #define DV_RETRIES      3       /* should only need at most 
380                                  * two cc/ua clears */
381
382 enum spi_compare_returns {
383         SPI_COMPARE_SUCCESS,
384         SPI_COMPARE_FAILURE,
385         SPI_COMPARE_SKIP_TEST,
386 };
387
388
389 /* This is for read/write Domain Validation:  If the device supports
390  * an echo buffer, we do read/write tests to it */
391 static enum spi_compare_returns
392 spi_dv_device_echo_buffer(struct scsi_request *sreq, u8 *buffer,
393                           u8 *ptr, const int retries)
394 {
395         struct scsi_device *sdev = sreq->sr_device;
396         int len = ptr - buffer;
397         int j, k, r;
398         unsigned int pattern = 0x0000ffff;
399
400         const char spi_write_buffer[] = {
401                 WRITE_BUFFER, 0x0a, 0, 0, 0, 0, 0, len >> 8, len & 0xff, 0
402         };
403         const char spi_read_buffer[] = {
404                 READ_BUFFER, 0x0a, 0, 0, 0, 0, 0, len >> 8, len & 0xff, 0
405         };
406
407         /* set up the pattern buffer.  Doesn't matter if we spill
408          * slightly beyond since that's where the read buffer is */
409         for (j = 0; j < len; ) {
410
411                 /* fill the buffer with counting (test a) */
412                 for ( ; j < min(len, 32); j++)
413                         buffer[j] = j;
414                 k = j;
415                 /* fill the buffer with alternating words of 0x0 and
416                  * 0xffff (test b) */
417                 for ( ; j < min(len, k + 32); j += 2) {
418                         u16 *word = (u16 *)&buffer[j];
419                         
420                         *word = (j & 0x02) ? 0x0000 : 0xffff;
421                 }
422                 k = j;
423                 /* fill with crosstalk (alternating 0x5555 0xaaa)
424                  * (test c) */
425                 for ( ; j < min(len, k + 32); j += 2) {
426                         u16 *word = (u16 *)&buffer[j];
427
428                         *word = (j & 0x02) ? 0x5555 : 0xaaaa;
429                 }
430                 k = j;
431                 /* fill with shifting bits (test d) */
432                 for ( ; j < min(len, k + 32); j += 4) {
433                         u32 *word = (unsigned int *)&buffer[j];
434                         u32 roll = (pattern & 0x80000000) ? 1 : 0;
435                         
436                         *word = pattern;
437                         pattern = (pattern << 1) | roll;
438                 }
439                 /* don't bother with random data (test e) */
440         }
441
442         for (r = 0; r < retries; r++) {
443                 sreq->sr_cmd_len = 0;   /* wait_req to fill in */
444                 sreq->sr_data_direction = DMA_TO_DEVICE;
445                 scsi_wait_req(sreq, spi_write_buffer, buffer, len,
446                               DV_TIMEOUT, DV_RETRIES);
447                 if(sreq->sr_result || !scsi_device_online(sdev)) {
448                         struct scsi_sense_hdr sshdr;
449
450                         scsi_device_set_state(sdev, SDEV_QUIESCE);
451                         if (scsi_request_normalize_sense(sreq, &sshdr)
452                             && sshdr.sense_key == ILLEGAL_REQUEST
453                             /* INVALID FIELD IN CDB */
454                             && sshdr.asc == 0x24 && sshdr.ascq == 0x00)
455                                 /* This would mean that the drive lied
456                                  * to us about supporting an echo
457                                  * buffer (unfortunately some Western
458                                  * Digital drives do precisely this)
459                                  */
460                                 return SPI_COMPARE_SKIP_TEST;
461
462
463                         SPI_PRINTK(sdev->sdev_target, KERN_ERR, "Write Buffer failure %x\n", sreq->sr_result);
464                         return SPI_COMPARE_FAILURE;
465                 }
466
467                 memset(ptr, 0, len);
468                 sreq->sr_cmd_len = 0;   /* wait_req to fill in */
469                 sreq->sr_data_direction = DMA_FROM_DEVICE;
470                 scsi_wait_req(sreq, spi_read_buffer, ptr, len,
471                               DV_TIMEOUT, DV_RETRIES);
472                 scsi_device_set_state(sdev, SDEV_QUIESCE);
473
474                 if (memcmp(buffer, ptr, len) != 0)
475                         return SPI_COMPARE_FAILURE;
476         }
477         return SPI_COMPARE_SUCCESS;
478 }
479
480 /* This is for the simplest form of Domain Validation: a read test
481  * on the inquiry data from the device */
482 static enum spi_compare_returns
483 spi_dv_device_compare_inquiry(struct scsi_request *sreq, u8 *buffer,
484                               u8 *ptr, const int retries)
485 {
486         int r;
487         const int len = sreq->sr_device->inquiry_len;
488         struct scsi_device *sdev = sreq->sr_device;
489         const char spi_inquiry[] = {
490                 INQUIRY, 0, 0, 0, len, 0
491         };
492
493         for (r = 0; r < retries; r++) {
494                 sreq->sr_cmd_len = 0;   /* wait_req to fill in */
495                 sreq->sr_data_direction = DMA_FROM_DEVICE;
496
497                 memset(ptr, 0, len);
498
499                 scsi_wait_req(sreq, spi_inquiry, ptr, len,
500                               DV_TIMEOUT, DV_RETRIES);
501                 
502                 if(sreq->sr_result || !scsi_device_online(sdev)) {
503                         scsi_device_set_state(sdev, SDEV_QUIESCE);
504                         return SPI_COMPARE_FAILURE;
505                 }
506
507                 /* If we don't have the inquiry data already, the
508                  * first read gets it */
509                 if (ptr == buffer) {
510                         ptr += len;
511                         --r;
512                         continue;
513                 }
514
515                 if (memcmp(buffer, ptr, len) != 0)
516                         /* failure */
517                         return SPI_COMPARE_FAILURE;
518         }
519         return SPI_COMPARE_SUCCESS;
520 }
521
522 static enum spi_compare_returns
523 spi_dv_retrain(struct scsi_request *sreq, u8 *buffer, u8 *ptr,
524                enum spi_compare_returns 
525                (*compare_fn)(struct scsi_request *, u8 *, u8 *, int))
526 {
527         struct spi_internal *i = to_spi_internal(sreq->sr_host->transportt);
528         struct scsi_device *sdev = sreq->sr_device;
529         int period = 0, prevperiod = 0; 
530         enum spi_compare_returns retval;
531
532
533         for (;;) {
534                 int newperiod;
535                 retval = compare_fn(sreq, buffer, ptr, DV_LOOPS);
536
537                 if (retval == SPI_COMPARE_SUCCESS
538                     || retval == SPI_COMPARE_SKIP_TEST)
539                         break;
540
541                 /* OK, retrain, fallback */
542                 if (i->f->get_period)
543                         i->f->get_period(sdev->sdev_target);
544                 newperiod = spi_period(sdev->sdev_target);
545                 period = newperiod > period ? newperiod : period;
546                 if (period < 0x0d)
547                         period++;
548                 else
549                         period += period >> 1;
550
551                 if (unlikely(period > 0xff || period == prevperiod)) {
552                         /* Total failure; set to async and return */
553                         SPI_PRINTK(sdev->sdev_target, KERN_ERR, "Domain Validation Failure, dropping back to Asynchronous\n");
554                         DV_SET(offset, 0);
555                         return SPI_COMPARE_FAILURE;
556                 }
557                 SPI_PRINTK(sdev->sdev_target, KERN_ERR, "Domain Validation detected failure, dropping back\n");
558                 DV_SET(period, period);
559                 prevperiod = period;
560         }
561         return retval;
562 }
563
564 static int
565 spi_dv_device_get_echo_buffer(struct scsi_request *sreq, u8 *buffer)
566 {
567         int l;
568
569         /* first off do a test unit ready.  This can error out 
570          * because of reservations or some other reason.  If it
571          * fails, the device won't let us write to the echo buffer
572          * so just return failure */
573         
574         const char spi_test_unit_ready[] = {
575                 TEST_UNIT_READY, 0, 0, 0, 0, 0
576         };
577
578         const char spi_read_buffer_descriptor[] = {
579                 READ_BUFFER, 0x0b, 0, 0, 0, 0, 0, 0, 4, 0
580         };
581
582         
583         sreq->sr_cmd_len = 0;
584         sreq->sr_data_direction = DMA_NONE;
585
586         /* We send a set of three TURs to clear any outstanding 
587          * unit attention conditions if they exist (Otherwise the
588          * buffer tests won't be happy).  If the TUR still fails
589          * (reservation conflict, device not ready, etc) just
590          * skip the write tests */
591         for (l = 0; ; l++) {
592                 scsi_wait_req(sreq, spi_test_unit_ready, NULL, 0,
593                               DV_TIMEOUT, DV_RETRIES);
594
595                 if(sreq->sr_result) {
596                         if(l >= 3)
597                                 return 0;
598                 } else {
599                         /* TUR succeeded */
600                         break;
601                 }
602         }
603
604         sreq->sr_cmd_len = 0;
605         sreq->sr_data_direction = DMA_FROM_DEVICE;
606
607         scsi_wait_req(sreq, spi_read_buffer_descriptor, buffer, 4,
608                       DV_TIMEOUT, DV_RETRIES);
609
610         if (sreq->sr_result)
611                 /* Device has no echo buffer */
612                 return 0;
613
614         return buffer[3] + ((buffer[2] & 0x1f) << 8);
615 }
616
617 static void
618 spi_dv_device_internal(struct scsi_request *sreq, u8 *buffer)
619 {
620         struct spi_internal *i = to_spi_internal(sreq->sr_host->transportt);
621         struct scsi_device *sdev = sreq->sr_device;
622         int len = sdev->inquiry_len;
623         /* first set us up for narrow async */
624         DV_SET(offset, 0);
625         DV_SET(width, 0);
626         
627         if (spi_dv_device_compare_inquiry(sreq, buffer, buffer, DV_LOOPS)
628             != SPI_COMPARE_SUCCESS) {
629                 SPI_PRINTK(sdev->sdev_target, KERN_ERR, "Domain Validation Initial Inquiry Failed\n");
630                 /* FIXME: should probably offline the device here? */
631                 return;
632         }
633
634         /* test width */
635         if (i->f->set_width && sdev->wdtr) {
636                 i->f->set_width(sdev->sdev_target, 1);
637
638                 if (spi_dv_device_compare_inquiry(sreq, buffer,
639                                                    buffer + len,
640                                                    DV_LOOPS)
641                     != SPI_COMPARE_SUCCESS) {
642                         SPI_PRINTK(sdev->sdev_target, KERN_ERR, "Wide Transfers Fail\n");
643                         i->f->set_width(sdev->sdev_target, 0);
644                 }
645         }
646
647         if (!i->f->set_period)
648                 return;
649
650         /* device can't handle synchronous */
651         if(!sdev->ppr && !sdev->sdtr)
652                 return;
653
654         /* see if the device has an echo buffer.  If it does we can
655          * do the SPI pattern write tests */
656
657         len = 0;
658         if (sdev->ppr)
659                 len = spi_dv_device_get_echo_buffer(sreq, buffer);
660
661  retry:
662
663         /* now set up to the maximum */
664         DV_SET(offset, 255);
665         DV_SET(period, 1);
666
667         if (len == 0) {
668                 SPI_PRINTK(sdev->sdev_target, KERN_INFO, "Domain Validation skipping write tests\n");
669                 spi_dv_retrain(sreq, buffer, buffer + len,
670                                spi_dv_device_compare_inquiry);
671                 return;
672         }
673
674         if (len > SPI_MAX_ECHO_BUFFER_SIZE) {
675                 SPI_PRINTK(sdev->sdev_target, KERN_WARNING, "Echo buffer size %d is too big, trimming to %d\n", len, SPI_MAX_ECHO_BUFFER_SIZE);
676                 len = SPI_MAX_ECHO_BUFFER_SIZE;
677         }
678
679         if (spi_dv_retrain(sreq, buffer, buffer + len,
680                            spi_dv_device_echo_buffer)
681             == SPI_COMPARE_SKIP_TEST) {
682                 /* OK, the stupid drive can't do a write echo buffer
683                  * test after all, fall back to the read tests */
684                 len = 0;
685                 goto retry;
686         }
687 }
688
689
690 /**     spi_dv_device - Do Domain Validation on the device
691  *      @sdev:          scsi device to validate
692  *
693  *      Performs the domain validation on the given device in the
694  *      current execution thread.  Since DV operations may sleep,
695  *      the current thread must have user context.  Also no SCSI
696  *      related locks that would deadlock I/O issued by the DV may
697  *      be held.
698  */
699 void
700 spi_dv_device(struct scsi_device *sdev)
701 {
702         struct scsi_request *sreq = scsi_allocate_request(sdev, GFP_KERNEL);
703         struct scsi_target *starget = sdev->sdev_target;
704         u8 *buffer;
705         const int len = SPI_MAX_ECHO_BUFFER_SIZE*2;
706
707         if (unlikely(!sreq))
708                 return;
709
710         if (unlikely(scsi_device_get(sdev)))
711                 goto out_free_req;
712
713         buffer = kmalloc(len, GFP_KERNEL);
714
715         if (unlikely(!buffer))
716                 goto out_put;
717
718         memset(buffer, 0, len);
719
720         /* We need to verify that the actual device will quiesce; the
721          * later target quiesce is just a nice to have */
722         if (unlikely(scsi_device_quiesce(sdev)))
723                 goto out_free;
724
725         scsi_target_quiesce(starget);
726
727         spi_dv_pending(starget) = 1;
728         down(&spi_dv_sem(starget));
729
730         SPI_PRINTK(starget, KERN_INFO, "Beginning Domain Validation\n");
731
732         spi_dv_device_internal(sreq, buffer);
733
734         SPI_PRINTK(starget, KERN_INFO, "Ending Domain Validation\n");
735
736         up(&spi_dv_sem(starget));
737         spi_dv_pending(starget) = 0;
738
739         scsi_target_resume(starget);
740
741         spi_initial_dv(starget) = 1;
742
743  out_free:
744         kfree(buffer);
745  out_put:
746         scsi_device_put(sdev);
747  out_free_req:
748         scsi_release_request(sreq);
749 }
750 EXPORT_SYMBOL(spi_dv_device);
751
752 struct work_queue_wrapper {
753         struct work_struct      work;
754         struct scsi_device      *sdev;
755 };
756
757 static void
758 spi_dv_device_work_wrapper(void *data)
759 {
760         struct work_queue_wrapper *wqw = (struct work_queue_wrapper *)data;
761         struct scsi_device *sdev = wqw->sdev;
762
763         kfree(wqw);
764         spi_dv_device(sdev);
765         spi_dv_pending(sdev->sdev_target) = 0;
766         scsi_device_put(sdev);
767 }
768
769
770 /**
771  *      spi_schedule_dv_device - schedule domain validation to occur on the device
772  *      @sdev:  The device to validate
773  *
774  *      Identical to spi_dv_device() above, except that the DV will be
775  *      scheduled to occur in a workqueue later.  All memory allocations
776  *      are atomic, so may be called from any context including those holding
777  *      SCSI locks.
778  */
779 void
780 spi_schedule_dv_device(struct scsi_device *sdev)
781 {
782         struct work_queue_wrapper *wqw =
783                 kmalloc(sizeof(struct work_queue_wrapper), GFP_ATOMIC);
784
785         if (unlikely(!wqw))
786                 return;
787
788         if (unlikely(spi_dv_pending(sdev->sdev_target))) {
789                 kfree(wqw);
790                 return;
791         }
792         /* Set pending early (dv_device doesn't check it, only sets it) */
793         spi_dv_pending(sdev->sdev_target) = 1;
794         if (unlikely(scsi_device_get(sdev))) {
795                 kfree(wqw);
796                 spi_dv_pending(sdev->sdev_target) = 0;
797                 return;
798         }
799
800         INIT_WORK(&wqw->work, spi_dv_device_work_wrapper, wqw);
801         wqw->sdev = sdev;
802
803         schedule_work(&wqw->work);
804 }
805 EXPORT_SYMBOL(spi_schedule_dv_device);
806
807 #define SETUP_ATTRIBUTE(field)                                          \
808         i->private_attrs[count] = class_device_attr_##field;            \
809         if (!i->f->set_##field) {                                       \
810                 i->private_attrs[count].attr.mode = S_IRUGO;            \
811                 i->private_attrs[count].store = NULL;                   \
812         }                                                               \
813         i->attrs[count] = &i->private_attrs[count];                     \
814         if (i->f->show_##field)                                         \
815                 count++
816
817 #define SETUP_HOST_ATTRIBUTE(field)                                     \
818         i->private_host_attrs[count] = class_device_attr_##field;       \
819         if (!i->f->set_##field) {                                       \
820                 i->private_host_attrs[count].attr.mode = S_IRUGO;       \
821                 i->private_host_attrs[count].store = NULL;              \
822         }                                                               \
823         i->host_attrs[count] = &i->private_host_attrs[count];           \
824         count++
825
826 struct scsi_transport_template *
827 spi_attach_transport(struct spi_function_template *ft)
828 {
829         struct spi_internal *i = kmalloc(sizeof(struct spi_internal),
830                                          GFP_KERNEL);
831         int count = 0;
832         if (unlikely(!i))
833                 return NULL;
834
835         memset(i, 0, sizeof(struct spi_internal));
836
837
838         i->t.target_attrs = &i->attrs[0];
839         i->t.target_class = &spi_transport_class;
840         i->t.target_setup = &spi_setup_transport_attrs;
841         i->t.device_configure = &spi_configure_device;
842         i->t.target_size = sizeof(struct spi_transport_attrs);
843         i->t.host_attrs = &i->host_attrs[0];
844         i->t.host_class = &spi_host_class;
845         i->t.host_setup = &spi_setup_host_attrs;
846         i->t.host_size = sizeof(struct spi_host_attrs);
847         i->f = ft;
848
849         SETUP_ATTRIBUTE(period);
850         SETUP_ATTRIBUTE(offset);
851         SETUP_ATTRIBUTE(width);
852         SETUP_ATTRIBUTE(iu);
853         SETUP_ATTRIBUTE(dt);
854         SETUP_ATTRIBUTE(qas);
855         SETUP_ATTRIBUTE(wr_flow);
856         SETUP_ATTRIBUTE(rd_strm);
857         SETUP_ATTRIBUTE(rti);
858         SETUP_ATTRIBUTE(pcomp_en);
859
860         /* if you add an attribute but forget to increase SPI_NUM_ATTRS
861          * this bug will trigger */
862         BUG_ON(count > SPI_NUM_ATTRS);
863
864         i->attrs[count++] = &class_device_attr_revalidate;
865
866         i->attrs[count] = NULL;
867
868         count = 0;
869         SETUP_HOST_ATTRIBUTE(signalling);
870
871         BUG_ON(count > SPI_HOST_ATTRS);
872
873         i->host_attrs[count] = NULL;
874
875         return &i->t;
876 }
877 EXPORT_SYMBOL(spi_attach_transport);
878
879 void spi_release_transport(struct scsi_transport_template *t)
880 {
881         struct spi_internal *i = to_spi_internal(t);
882
883         kfree(i);
884 }
885 EXPORT_SYMBOL(spi_release_transport);
886
887
888 MODULE_AUTHOR("Martin Hicks");
889 MODULE_DESCRIPTION("SPI Transport Attributes");
890 MODULE_LICENSE("GPL");
891
892 module_init(spi_transport_init);
893 module_exit(spi_transport_exit);