2 * Common Flash Interface support:
3 * Intel Extended Vendor Command Set (ID 0x0001)
5 * (C) 2000 Red Hat. GPL'd
7 * $Id: cfi_cmdset_0001.c,v 1.21 2000/07/13 10:36:14 dwmw2 Exp $
10 #include <linux/module.h>
11 #include <linux/types.h>
12 #include <linux/kernel.h>
13 #include <linux/sched.h>
15 #include <asm/byteorder.h>
17 #include <linux/errno.h>
18 #include <linux/malloc.h>
19 #include <linux/delay.h>
20 #include <linux/mtd/map.h>
21 #include <linux/mtd/cfi.h>
23 #if LINUX_VERSION_CODE < 0x20300
24 #define set_current_state(x) current->state = (x);
26 static int cfi_intelext_read_1_by_16 (struct mtd_info *, loff_t, size_t, size_t *, u_char *);
27 static int cfi_intelext_write_1_by_16(struct mtd_info *, loff_t, size_t, size_t *, const u_char *);
28 static int cfi_intelext_erase_1_by_16 (struct mtd_info *, struct erase_info *);
29 static void cfi_intelext_sync (struct mtd_info *);
30 static int cfi_intelext_suspend (struct mtd_info *);
31 static void cfi_intelext_resume (struct mtd_info *);
33 static void cfi_intelext_destroy(struct mtd_info *);
35 static void cfi_cmdset_0001(struct map_info *, int, unsigned long);
37 static struct mtd_info *cfi_intelext_setup (struct map_info *);
39 static const char im_name[] = "cfi_cmdset_0001";
41 /* This routine is made available to other mtd code via
42 * inter_module_register. It must only be accessed through
43 * inter_module_get which will bump the use count of this module. The
44 * addresses passed back in cfi are valid as long as the use count of
45 * this module is non-zero, i.e. between inter_module_get and
46 * inter_module_put. Keith Owens <kaos@ocs.com.au> 29 Oct 2000.
48 static void cfi_cmdset_0001(struct map_info *map, int primary, unsigned long base)
50 struct cfi_private *cfi = map->fldrv_priv;
52 struct cfi_pri_intelext *extp;
54 __u16 adr = primary?cfi->cfiq.P_ADR:cfi->cfiq.A_ADR;
56 printk(" Intel/Sharp Extended Query Table at 0x%4.4X\n", adr);
61 /* Switch it into Query Mode */
62 switch(map->buswidth) {
64 map->write8(map, 0x98, 0x55);
67 map->write16(map, 0x9898, 0xaa);
70 map->write32(map, 0x98989898, 0x154);
74 extp = kmalloc(sizeof(*extp), GFP_KERNEL);
76 printk("Failed to allocate memory\n");
80 /* Read in the Extended Query Table */
81 for (i=0; i<sizeof(*extp); i++) {
82 ((unsigned char *)extp)[i] =
83 map->read8(map, (base+((adr+i)*map->buswidth)));
86 if (extp->MajorVersion != '1' ||
87 (extp->MinorVersion < '0' || extp->MinorVersion > '2')) {
88 printk(" Unknown IntelExt Extended Query version %c.%c.\n",
89 extp->MajorVersion, extp->MinorVersion);
94 /* Do some byteswapping if necessary */
95 extp->FeatureSupport = le32_to_cpu(extp->FeatureSupport);
96 extp->BlkStatusRegMask = le32_to_cpu(extp->BlkStatusRegMask);
99 /* Tell the user about it in lots of lovely detail */
101 printk(" Feature/Command Support: %4.4X\n", extp->FeatureSupport);
102 printk(" - Chip Erase: %s\n", extp->FeatureSupport&1?"supported":"unsupported");
103 printk(" - Suspend Erase: %s\n", extp->FeatureSupport&2?"supported":"unsupported");
104 printk(" - Suspend Program: %s\n", extp->FeatureSupport&4?"supported":"unsupported");
105 printk(" - Legacy Lock/Unlock: %s\n", extp->FeatureSupport&8?"supported":"unsupported");
106 printk(" - Queued Erase: %s\n", extp->FeatureSupport&16?"supported":"unsupported");
107 printk(" - Instant block lock: %s\n", extp->FeatureSupport&32?"supported":"unsupported");
108 printk(" - Protection Bits: %s\n", extp->FeatureSupport&64?"supported":"unsupported");
109 printk(" - Page-mode read: %s\n", extp->FeatureSupport&128?"supported":"unsupported");
110 printk(" - Synchronous read: %s\n", extp->FeatureSupport&256?"supported":"unsupported");
111 for (i=9; i<32; i++) {
112 if (extp->FeatureSupport & (1<<i))
113 printk(" - Unknown Bit %X: supported\n", i);
116 printk(" Supported functions after Suspend: %2.2X\n", extp->SuspendCmdSupport);
117 printk(" - Program after Erase Suspend: %s\n", extp->SuspendCmdSupport&1?"supported":"unsupported");
118 for (i=1; i<8; i++) {
119 if (extp->SuspendCmdSupport & (1<<i))
120 printk(" - Unknown Bit %X: supported\n", i);
123 printk(" Block Status Register Mask: %4.4X\n", extp->BlkStatusRegMask);
124 printk(" - Lock Bit Active: %s\n", extp->BlkStatusRegMask&1?"yes":"no");
125 printk(" - Valid Bit Active: %s\n", extp->BlkStatusRegMask&2?"yes":"no");
126 for (i=2; i<16; i++) {
127 if (extp->BlkStatusRegMask & (1<<i))
128 printk(" - Unknown Bit %X Active: yes\n",i);
131 printk(" Vcc Logic Supply Optimum Program/Erase Voltage: %d.%d V\n",
132 extp->VccOptimal >> 8, extp->VccOptimal & 0xf);
133 if (extp->VppOptimal)
134 printk(" Vpp Programming Supply Optimum Program/Erase Voltage: %d.%d V\n",
135 extp->VppOptimal >> 8, extp->VppOptimal & 0xf);
137 /* OK. We like it. Take over the control of it. */
139 /* Switch it into Read Mode */
140 switch(map->buswidth) {
142 map->write8(map, 0xff, 0x55);
145 map->write16(map, 0xffff, 0xaa);
148 map->write32(map, 0xffffffff, 0x154);
153 /* If there was an old setup function, decrease its use count */
154 if (cfi->cmdset_setup)
155 inter_module_put(cfi->im_name);
156 if (cfi->cmdset_priv)
157 kfree(cfi->cmdset_priv);
159 for (i=0; i< cfi->numchips; i++) {
160 cfi->chips[i].word_write_time = 128;
161 cfi->chips[i].buffer_write_time = 128;
162 cfi->chips[i].erase_time = 1024;
166 cfi->cmdset_setup = cfi_intelext_setup;
167 cfi->im_name = im_name;
168 cfi->cmdset_priv = extp;
173 static struct mtd_info *cfi_intelext_setup(struct map_info *map)
175 struct cfi_private *cfi = map->fldrv_priv;
176 struct mtd_info *mtd;
178 mtd = kmalloc(sizeof(*mtd), GFP_KERNEL);
179 printk("number of CFI chips: %d\n", cfi->numchips);
182 printk("Failed to allocate memory for MTD device\n");
183 kfree(cfi->cmdset_priv);
187 memset(mtd, 0, sizeof(*mtd));
189 mtd->type = MTD_NORFLASH;
190 mtd->erasesize = 0x20000; /* FIXME */
191 /* Also select the correct geometry setup too */
192 mtd->size = (1 << cfi->cfiq.DevSize) * cfi->numchips;
193 mtd->erase = cfi_intelext_erase_1_by_16;
194 mtd->read = cfi_intelext_read_1_by_16;
195 mtd->write = cfi_intelext_write_1_by_16;
196 mtd->sync = cfi_intelext_sync;
197 mtd->suspend = cfi_intelext_suspend;
198 mtd->resume = cfi_intelext_resume;
199 mtd->flags = MTD_CAP_NORFLASH;
200 map->fldrv_destroy = cfi_intelext_destroy;
201 mtd->name = map->name;
205 static inline int do_read_1_by_16_onechip(struct map_info *map, struct flchip *chip, loff_t adr, size_t len, u_char *buf)
208 unsigned long timeo = jiffies + HZ;
209 DECLARE_WAITQUEUE(wait, current);
214 spin_lock_bh(chip->mutex);
216 /* Check that the chip's ready to talk to us.
217 * Later, we can actually think about interrupting it
218 * if it's in FL_ERASING or FL_WRITING state.
219 * Not just yet, though.
221 switch (chip->state) {
225 /* Suspend the operation, set state to FL_xxx_SUSPENDED */
231 map->write16(map, cpu_to_le16(0x0070), adr);
232 chip->state = FL_STATUS;
235 status = le16_to_cpu(map->read16(map, adr));
237 if (!(status & (1<<7))) {
239 /* Urgh. Chip not yet ready to talk to us. */
240 if (time_after(jiffies, timeo)) {
241 spin_unlock_bh(chip->mutex);
242 printk("waiting for chip to be ready timed out in read");
246 /* Latency issues. Drop the lock, wait a while and retry */
247 spin_unlock_bh(chip->mutex);
250 if ( 0 && !(z % 100 ))
251 printk("chip not ready yet before read. looping\n");
260 printk("Waiting for chip, status = %d\n", chip->state);
262 /* Stick ourselves on a wait queue to be woken when
263 someone changes the status */
265 set_current_state(TASK_INTERRUPTIBLE);
266 add_wait_queue(&chip->wq, &wait);
268 spin_unlock_bh(chip->mutex);
271 remove_wait_queue(&chip->wq, &wait);
273 if(signal_pending(current))
277 timeo = jiffies + HZ;
282 map->write16(map, cpu_to_le16(0x00ff), adr);
283 chip->state = FL_READY;
285 map->copy_from(map, buf, adr, len);
287 if (chip->state == FL_ERASE_SUSPENDED ||
288 chip->state == FL_WRITE_SUSPENDED) {
289 printk("Who in hell suspended the pending operation? I didn't write that code yet!\n");
290 /* Restart it and set the state accordingly */
294 spin_unlock_bh(chip->mutex);
299 static int cfi_intelext_read_1_by_16 (struct mtd_info *mtd, loff_t from, size_t len, size_t *retlen, u_char *buf)
301 struct map_info *map = mtd->priv;
302 struct cfi_private *cfi = map->fldrv_priv;
307 /* ofs: offset within the first chip that the first read should start */
308 chipnum = (from >> cfi->chipshift);
309 ofs = from - (chipnum << cfi->chipshift);
314 unsigned long thislen;
316 if (chipnum >= cfi->numchips)
319 if ((len + ofs -1) >> cfi->chipshift)
320 thislen = (1<<cfi->chipshift) - ofs;
324 ret = do_read_1_by_16_onechip(map, &cfi->chips[chipnum], ofs, thislen, buf);
338 static inline int do_write_1_by_16_oneword(struct map_info *map, struct flchip *chip, unsigned long adr, __u16 datum)
341 unsigned long timeo = jiffies + HZ;
342 DECLARE_WAITQUEUE(wait, current);
347 spin_lock_bh(chip->mutex);
349 /* Check that the chip's ready to talk to us.
350 * Later, we can actually think about interrupting it
351 * if it's in FL_ERASING state.
352 * Not just yet, though.
354 switch (chip->state) {
358 map->write16(map, cpu_to_le16(0x0070), adr);
359 chip->state = FL_STATUS;
360 timeo = jiffies + HZ;
363 status = le16_to_cpu(map->read16(map, adr));
365 if (!(status & (1<<7))) {
367 /* Urgh. Chip not yet ready to talk to us. */
368 if (time_after(jiffies, timeo)) {
369 spin_unlock_bh(chip->mutex);
370 printk("waiting for chip to be ready timed out in read");
374 /* Latency issues. Drop the lock, wait a while and retry */
375 spin_unlock_bh(chip->mutex);
378 if ( 0 && !(z % 100 ))
379 printk("chip not ready yet before write. looping\n");
388 printk("Waiting for chip, status = %d\n", chip->state);
390 /* Stick ourselves on a wait queue to be woken when
391 someone changes the status */
393 set_current_state(TASK_INTERRUPTIBLE);
394 add_wait_queue(&chip->wq, &wait);
396 spin_unlock_bh(chip->mutex);
399 remove_wait_queue(&chip->wq, &wait);
401 if(signal_pending(current))
404 timeo = jiffies + HZ;
409 map->write16(map, cpu_to_le16(0x0040), adr);
410 map->write16(map, datum, adr);
411 chip->state = FL_WRITING;
413 timeo = jiffies + (HZ/2);
415 spin_unlock_bh(chip->mutex);
416 udelay(chip->word_write_time);
417 spin_lock_bh(chip->mutex);
420 while ( !( (status = le16_to_cpu(map->read16(map, adr))) & 0x80 ) ) {
422 if (chip->state != FL_WRITING) {
423 /* Someone's suspended the write. Sleep */
424 set_current_state(TASK_INTERRUPTIBLE);
425 add_wait_queue(&chip->wq, &wait);
427 spin_unlock_bh(chip->mutex);
430 remove_wait_queue(&chip->wq, &wait);
432 if (signal_pending(current))
435 timeo = jiffies + (HZ / 2); /* FIXME */
437 spin_lock_bh(chip->mutex);
441 /* OK Still waiting */
442 if (time_after(jiffies, timeo)) {
443 chip->state = FL_STATUS;
444 spin_unlock_bh(chip->mutex);
445 printk("waiting for chip to be ready timed out in read");
449 /* Latency issues. Drop the lock, wait a while and retry */
450 spin_unlock_bh(chip->mutex);
453 if ( 0 && !(z % 100 ))
454 printk("chip not ready yet after write. looping\n");
458 spin_lock_bh(chip->mutex);
462 chip->word_write_time--;
463 if (!chip->word_write_time)
464 chip->word_write_time++;
467 chip->word_write_time++;
469 /* Done and happy. */
470 chip->state = FL_STATUS;
472 spin_unlock_bh(chip->mutex);
473 // printk("write ret OK at %lx\n", adr);
478 /* This version only uses the 'word write' instruction. We should update it
479 * to write using 'buffer write' if it's available
481 static int cfi_intelext_write_1_by_16 (struct mtd_info *mtd, loff_t to , size_t len, size_t *retlen, const u_char *buf)
483 struct map_info *map = mtd->priv;
484 struct cfi_private *cfi = map->fldrv_priv;
490 chipnum = to >> cfi->chipshift;
491 ofs = to - (chipnum << cfi->chipshift);
493 /* If it's not word-aligned, do the first byte write */
495 #if defined(__LITTLE_ENDIAN)
496 ret = do_write_1_by_16_oneword(map, &cfi->chips[chipnum],
497 ofs, 0xFF | (*buf << 8));
498 #elif defined(__BIG_ENDIAN)
499 ret = do_write_1_by_16_oneword(map, &cfi->chips[chipnum],
500 ofs, 0xFF00 | (*buf));
502 #error define a sensible endianness
512 if (ofs >> cfi->chipshift) {
515 if (chipnum == cfi->numchips)
521 ret = do_write_1_by_16_oneword(map, &cfi->chips[chipnum],
531 if (ofs >> cfi->chipshift) {
534 if (chipnum == cfi->numchips)
540 /* Final byte to write */
541 #if defined(__LITTLE_ENDIAN)
542 ret = do_write_1_by_16_oneword(map, &cfi->chips[chipnum],
543 ofs, 0xFF00 | (*buf));
544 #elif defined(__BIG_ENDIAN)
545 ret = do_write_1_by_16_oneword(map, &cfi->chips[chipnum],
546 ofs, 0xFF | (*buf << 8));
548 #error define a sensible endianness
560 static inline int do_erase_1_by_16_oneblock(struct map_info *map, struct flchip *chip, unsigned long adr)
563 unsigned long timeo = jiffies + HZ;
564 DECLARE_WAITQUEUE(wait, current);
569 spin_lock_bh(chip->mutex);
571 /* Check that the chip's ready to talk to us. */
572 switch (chip->state) {
576 map->write16(map, cpu_to_le16(0x0070), adr);
577 chip->state = FL_STATUS;
578 timeo = jiffies + HZ;
581 status = le16_to_cpu(map->read16(map, adr));
583 if (!(status & (1<<7))) {
585 /* Urgh. Chip not yet ready to talk to us. */
586 if (time_after(jiffies, timeo)) {
587 spin_unlock_bh(chip->mutex);
588 printk("waiting for chip to be ready timed out in erase");
592 /* Latency issues. Drop the lock, wait a while and retry */
593 spin_unlock_bh(chip->mutex);
596 if ( 0 && !(z % 100 ))
597 printk("chip not ready yet before erase. looping\n");
606 printk("Waiting for chip, status = %d\n", chip->state);
608 /* Stick ourselves on a wait queue to be woken when
609 someone changes the status */
611 set_current_state(TASK_INTERRUPTIBLE);
612 add_wait_queue(&chip->wq, &wait);
614 spin_unlock_bh(chip->mutex);
617 remove_wait_queue(&chip->wq, &wait);
619 if(signal_pending(current))
622 timeo = jiffies + HZ;
627 map->write16(map, cpu_to_le16(0x0020), adr);
628 map->write16(map, cpu_to_le16(0x00D0), adr);
630 chip->state = FL_ERASING;
632 timeo = jiffies + (HZ*2);
633 spin_unlock_bh(chip->mutex);
634 schedule_timeout(HZ);
635 spin_lock_bh(chip->mutex);
637 /* FIXME. Use a timer to check this, and return immediately. */
638 /* Once the state machine's known to be working I'll do that */
640 while ( !( (status = le16_to_cpu(map->read16(map, adr))) & 0x80 ) ) {
643 if (chip->state != FL_ERASING) {
644 /* Someone's suspended the erase. Sleep */
645 set_current_state(TASK_INTERRUPTIBLE);
646 add_wait_queue(&chip->wq, &wait);
648 spin_unlock_bh(chip->mutex);
649 printk("erase suspended. Sleeping\n");
652 remove_wait_queue(&chip->wq, &wait);
654 if (signal_pending(current))
657 timeo = jiffies + (HZ*2); /* FIXME */
658 spin_lock_bh(chip->mutex);
662 /* OK Still waiting */
663 if (time_after(jiffies, timeo)) {
664 chip->state = FL_STATUS;
665 spin_unlock_bh(chip->mutex);
666 printk("waiting for erase to complete timed out.");
670 /* Latency issues. Drop the lock, wait a while and retry */
671 spin_unlock_bh(chip->mutex);
674 if ( 0 && !(z % 100 ))
675 printk("chip not ready yet after erase. looping\n");
679 spin_lock_bh(chip->mutex);
683 /* Done and happy. */
684 chip->state = FL_STATUS;
686 spin_unlock_bh(chip->mutex);
687 //printk("erase ret OK\n");
691 static int cfi_intelext_erase_1_by_16 (struct mtd_info *mtd, struct erase_info *instr)
693 struct map_info *map = mtd->priv;
694 struct cfi_private *cfi = map->fldrv_priv;
695 unsigned long adr, len;
696 int chipnum, ret = 0;
698 if (instr->addr & (mtd->erasesize - 1))
701 if (instr->len & (mtd->erasesize -1))
704 if ((instr->len + instr->addr) > mtd->size)
707 chipnum = instr->addr >> cfi->chipshift;
708 adr = instr->addr - (chipnum << cfi->chipshift);
712 ret = do_erase_1_by_16_oneblock(map, &cfi->chips[chipnum], adr);
717 adr += mtd->erasesize;
718 len -= mtd->erasesize;
720 if (adr >> cfi->chipshift) {
724 if (chipnum >= cfi->numchips)
730 instr->callback(instr);
737 static void cfi_intelext_sync (struct mtd_info *mtd)
739 struct map_info *map = mtd->priv;
740 struct cfi_private *cfi = map->fldrv_priv;
744 DECLARE_WAITQUEUE(wait, current);
746 for (i=0; !ret && i<cfi->numchips; i++) {
747 chip = &cfi->chips[i];
750 spin_lock_bh(chip->mutex);
752 switch(chip->state) {
757 chip->oldstate = chip->state;
758 chip->state = FL_SYNCING;
759 /* No need to wake_up() on this state change -
760 * as the whole point is that nobody can do anything
761 * with the chip now anyway.
763 spin_unlock_bh(chip->mutex);
767 /* Not an idle state */
768 add_wait_queue(&chip->wq, &wait);
770 spin_unlock_bh(chip->mutex);
773 remove_wait_queue(&chip->wq, &wait);
779 /* Unlock the chips again */
781 for (i--; i >=0; i--) {
782 chip = &cfi->chips[i];
784 spin_lock_bh(chip->mutex);
786 if (chip->state == FL_SYNCING) {
787 chip->state = chip->oldstate;
790 spin_unlock_bh(chip->mutex);
795 static int cfi_intelext_suspend(struct mtd_info *mtd)
797 struct map_info *map = mtd->priv;
798 struct cfi_private *cfi = map->fldrv_priv;
803 for (i=0; !ret && i<cfi->numchips; i++) {
804 chip = &cfi->chips[i];
806 spin_lock_bh(chip->mutex);
808 switch(chip->state) {
813 chip->oldstate = chip->state;
814 chip->state = FL_PM_SUSPENDED;
815 /* No need to wake_up() on this state change -
816 * as the whole point is that nobody can do anything
817 * with the chip now anyway.
819 spin_unlock_bh(chip->mutex);
828 /* Unlock the chips again */
830 for (i--; i >=0; i--) {
831 chip = &cfi->chips[i];
833 spin_lock_bh(chip->mutex);
835 if (chip->state == FL_PM_SUSPENDED) {
836 chip->state = chip->oldstate;
839 spin_unlock_bh(chip->mutex);
845 static void cfi_intelext_resume(struct mtd_info *mtd)
847 struct map_info *map = mtd->priv;
848 struct cfi_private *cfi = map->fldrv_priv;
852 for (i=0; i<cfi->numchips; i++) {
854 chip = &cfi->chips[i];
856 spin_lock_bh(chip->mutex);
858 if (chip->state == FL_PM_SUSPENDED) {
859 chip->state = chip->oldstate;
863 printk("Argh. Chip not in PM_SUSPENDED state upon resume()\n");
865 spin_unlock_bh(chip->mutex);
869 static void cfi_intelext_destroy(struct mtd_info *mtd)
871 struct map_info *map = mtd->priv;
872 struct cfi_private *cfi = map->fldrv_priv;
873 kfree(cfi->cmdset_priv);
874 inter_module_put(cfi->im_name);
879 static int __init cfi_intelext_init(void)
881 inter_module_register(im_name, THIS_MODULE, &cfi_cmdset_0001);
885 static void __exit cfi_intelext_exit(void)
887 inter_module_unregister(im_name);
890 module_init(cfi_intelext_init);
891 module_exit(cfi_intelext_exit);