2 * Common Flash Interface support:
3 * Intel Extended Vendor Command Set (ID 0x0001)
5 * (C) 2000 Red Hat. GPL'd
7 * $Id: cfi_cmdset_0001.c,v 1.164 2004/11/16 18:29:00 dwmw2 Exp $
10 * 10/10/2000 Nicolas Pitre <nico@cam.org>
11 * - completely revamped method functions so they are aware and
12 * independent of the flash geometry (buswidth, interleave, etc.)
13 * - scalability vs code size is completely set at compile-time
14 * (see include/linux/mtd/cfi.h for selection)
15 * - optimized write buffer method
16 * 02/05/2002 Christopher Hoover <ch@hpl.hp.com>/<ch@murgatroid.com>
17 * - reworked lock/unlock/erase support for var size flash
20 #include <linux/module.h>
21 #include <linux/types.h>
22 #include <linux/kernel.h>
23 #include <linux/sched.h>
24 #include <linux/init.h>
26 #include <asm/byteorder.h>
28 #include <linux/errno.h>
29 #include <linux/slab.h>
30 #include <linux/delay.h>
31 #include <linux/interrupt.h>
32 #include <linux/mtd/xip.h>
33 #include <linux/mtd/map.h>
34 #include <linux/mtd/mtd.h>
35 #include <linux/mtd/compatmac.h>
36 #include <linux/mtd/cfi.h>
38 /* #define CMDSET0001_DISABLE_ERASE_SUSPEND_ON_WRITE */
41 #define CMDSET0001_DISABLE_WRITE_SUSPEND
44 // debugging, turns off buffer write mode if set to 1
45 #define FORCE_WORD_WRITE 0
47 #define MANUFACTURER_INTEL 0x0089
48 #define I82802AB 0x00ad
49 #define I82802AC 0x00ac
50 #define MANUFACTURER_ST 0x0020
51 #define M50LPW080 0x002F
53 static int cfi_intelext_read (struct mtd_info *, loff_t, size_t, size_t *, u_char *);
54 //static int cfi_intelext_read_user_prot_reg (struct mtd_info *, loff_t, size_t, size_t *, u_char *);
55 //static int cfi_intelext_read_fact_prot_reg (struct mtd_info *, loff_t, size_t, size_t *, u_char *);
56 static int cfi_intelext_write_words(struct mtd_info *, loff_t, size_t, size_t *, const u_char *);
57 static int cfi_intelext_write_buffers(struct mtd_info *, loff_t, size_t, size_t *, const u_char *);
58 static int cfi_intelext_erase_varsize(struct mtd_info *, struct erase_info *);
59 static void cfi_intelext_sync (struct mtd_info *);
60 static int cfi_intelext_lock(struct mtd_info *mtd, loff_t ofs, size_t len);
61 static int cfi_intelext_unlock(struct mtd_info *mtd, loff_t ofs, size_t len);
62 static int cfi_intelext_suspend (struct mtd_info *);
63 static void cfi_intelext_resume (struct mtd_info *);
65 static void cfi_intelext_destroy(struct mtd_info *);
67 struct mtd_info *cfi_cmdset_0001(struct map_info *, int);
69 static struct mtd_info *cfi_intelext_setup (struct mtd_info *);
70 static int cfi_intelext_partition_fixup(struct mtd_info *, struct cfi_private **);
72 static int cfi_intelext_point (struct mtd_info *mtd, loff_t from, size_t len,
73 size_t *retlen, u_char **mtdbuf);
74 static void cfi_intelext_unpoint (struct mtd_info *mtd, u_char *addr, loff_t from,
77 static int get_chip(struct map_info *map, struct flchip *chip, unsigned long adr, int mode);
78 static void put_chip(struct map_info *map, struct flchip *chip, unsigned long adr);
84 * *********** SETUP AND PROBE BITS ***********
87 static struct mtd_chip_driver cfi_intelext_chipdrv = {
88 .probe = NULL, /* Not usable directly */
89 .destroy = cfi_intelext_destroy,
90 .name = "cfi_cmdset_0001",
94 /* #define DEBUG_LOCK_BITS */
95 /* #define DEBUG_CFI_FEATURES */
97 #ifdef DEBUG_CFI_FEATURES
98 static void cfi_tell_features(struct cfi_pri_intelext *extp)
101 printk(" Feature/Command Support: %4.4X\n", extp->FeatureSupport);
102 printk(" - Chip Erase: %s\n", extp->FeatureSupport&1?"supported":"unsupported");
103 printk(" - Suspend Erase: %s\n", extp->FeatureSupport&2?"supported":"unsupported");
104 printk(" - Suspend Program: %s\n", extp->FeatureSupport&4?"supported":"unsupported");
105 printk(" - Legacy Lock/Unlock: %s\n", extp->FeatureSupport&8?"supported":"unsupported");
106 printk(" - Queued Erase: %s\n", extp->FeatureSupport&16?"supported":"unsupported");
107 printk(" - Instant block lock: %s\n", extp->FeatureSupport&32?"supported":"unsupported");
108 printk(" - Protection Bits: %s\n", extp->FeatureSupport&64?"supported":"unsupported");
109 printk(" - Page-mode read: %s\n", extp->FeatureSupport&128?"supported":"unsupported");
110 printk(" - Synchronous read: %s\n", extp->FeatureSupport&256?"supported":"unsupported");
111 printk(" - Simultaneous operations: %s\n", extp->FeatureSupport&512?"supported":"unsupported");
112 for (i=10; i<32; i++) {
113 if (extp->FeatureSupport & (1<<i))
114 printk(" - Unknown Bit %X: supported\n", i);
117 printk(" Supported functions after Suspend: %2.2X\n", extp->SuspendCmdSupport);
118 printk(" - Program after Erase Suspend: %s\n", extp->SuspendCmdSupport&1?"supported":"unsupported");
119 for (i=1; i<8; i++) {
120 if (extp->SuspendCmdSupport & (1<<i))
121 printk(" - Unknown Bit %X: supported\n", i);
124 printk(" Block Status Register Mask: %4.4X\n", extp->BlkStatusRegMask);
125 printk(" - Lock Bit Active: %s\n", extp->BlkStatusRegMask&1?"yes":"no");
126 printk(" - Valid Bit Active: %s\n", extp->BlkStatusRegMask&2?"yes":"no");
127 for (i=2; i<16; i++) {
128 if (extp->BlkStatusRegMask & (1<<i))
129 printk(" - Unknown Bit %X Active: yes\n",i);
132 printk(" Vcc Logic Supply Optimum Program/Erase Voltage: %d.%d V\n",
133 extp->VccOptimal >> 4, extp->VccOptimal & 0xf);
134 if (extp->VppOptimal)
135 printk(" Vpp Programming Supply Optimum Program/Erase Voltage: %d.%d V\n",
136 extp->VppOptimal >> 4, extp->VppOptimal & 0xf);
140 #ifdef CMDSET0001_DISABLE_ERASE_SUSPEND_ON_WRITE
141 /* Some Intel Strata Flash prior to FPO revision C has bugs in this area */
142 static void fixup_intel_strataflash(struct mtd_info *mtd, void* param)
144 struct map_info *map = mtd->priv;
145 struct cfi_private *cfi = map->fldrv_priv;
146 struct cfi_pri_amdstd *extp = cfi->cmdset_priv;
148 printk(KERN_WARNING "cfi_cmdset_0001: Suspend "
149 "erase on write disabled.\n");
150 extp->SuspendCmdSupport &= ~1;
154 #ifdef CMDSET0001_DISABLE_WRITE_SUSPEND
155 /* The XIP config appears to have problems using write suspend at the moment */
156 static void fixup_no_write_suspend(struct mtd_info *mtd, void* param)
158 struct map_info *map = mtd->priv;
159 struct cfi_private *cfi = map->fldrv_priv;
160 struct cfi_pri_intelext *cfip = cfi->cmdset_priv;
162 if (cfip && (cfip->FeatureSupport&4)) {
163 cfip->FeatureSupport &= ~4;
164 printk(KERN_WARNING "cfi_cmdset_0001: write suspend disabled\n");
169 static void fixup_st_m28w320ct(struct mtd_info *mtd, void* param)
171 struct map_info *map = mtd->priv;
172 struct cfi_private *cfi = map->fldrv_priv;
174 cfi->cfiq->BufWriteTimeoutTyp = 0; /* Not supported */
175 cfi->cfiq->BufWriteTimeoutMax = 0; /* Not supported */
178 static void fixup_st_m28w320cb(struct mtd_info *mtd, void* param)
180 struct map_info *map = mtd->priv;
181 struct cfi_private *cfi = map->fldrv_priv;
183 /* Note this is done after the region info is endian swapped */
184 cfi->cfiq->EraseRegionInfo[1] =
185 (cfi->cfiq->EraseRegionInfo[1] & 0xffff0000) | 0x3e;
188 static void fixup_use_point(struct mtd_info *mtd, void *param)
190 struct map_info *map = mtd->priv;
191 if (!mtd->point && map_is_linear(map)) {
192 mtd->point = cfi_intelext_point;
193 mtd->unpoint = cfi_intelext_unpoint;
197 static void fixup_use_write_buffers(struct mtd_info *mtd, void *param)
199 struct map_info *map = mtd->priv;
200 struct cfi_private *cfi = map->fldrv_priv;
201 if (cfi->cfiq->BufWriteTimeoutTyp) {
202 printk(KERN_INFO "Using buffer write method\n" );
203 mtd->write = cfi_intelext_write_buffers;
207 static struct cfi_fixup cfi_fixup_table[] = {
208 #ifdef CMDSET0001_DISABLE_ERASE_SUSPEND_ON_WRITE
209 { CFI_MFR_ANY, CFI_ID_ANY, fixup_intel_strataflash, NULL },
211 #ifdef CMDSET0001_DISABLE_WRITE_SUSPEND
212 { CFI_MFR_ANY, CFI_ID_ANY, fixup_no_write_suspend, NULL },
214 #if !FORCE_WORD_WRITE
215 { CFI_MFR_ANY, CFI_ID_ANY, fixup_use_write_buffers, NULL },
217 { CFI_MFR_ST, 0x00ba, /* M28W320CT */ fixup_st_m28w320ct, NULL },
218 { CFI_MFR_ST, 0x00bb, /* M28W320CB */ fixup_st_m28w320cb, NULL },
222 static struct cfi_fixup jedec_fixup_table[] = {
223 { MANUFACTURER_INTEL, I82802AB, fixup_use_fwh_lock, NULL, },
224 { MANUFACTURER_INTEL, I82802AC, fixup_use_fwh_lock, NULL, },
225 { MANUFACTURER_ST, M50LPW080, fixup_use_fwh_lock, NULL, },
228 static struct cfi_fixup fixup_table[] = {
229 /* The CFI vendor ids and the JEDEC vendor IDs appear
230 * to be common. It is like the devices id's are as
231 * well. This table is to pick all cases where
232 * we know that is the case.
234 { CFI_MFR_ANY, CFI_ID_ANY, fixup_use_point, NULL },
238 static inline struct cfi_pri_intelext *
239 read_pri_intelext(struct map_info *map, __u16 adr)
241 struct cfi_pri_intelext *extp;
242 unsigned int extp_size = sizeof(*extp);
245 extp = (struct cfi_pri_intelext *)cfi_read_pri(map, adr, extp_size, "Intel/Sharp");
249 /* Do some byteswapping if necessary */
250 extp->FeatureSupport = le32_to_cpu(extp->FeatureSupport);
251 extp->BlkStatusRegMask = le16_to_cpu(extp->BlkStatusRegMask);
252 extp->ProtRegAddr = le16_to_cpu(extp->ProtRegAddr);
254 if (extp->MajorVersion == '1' && extp->MinorVersion == '3') {
255 unsigned int extra_size = 0;
258 /* Protection Register info */
259 extra_size += (extp->NumProtectionFields - 1) * (4 + 6);
261 /* Burst Read info */
264 /* Number of hardware-partitions */
266 if (extp_size < sizeof(*extp) + extra_size)
268 nb_parts = extp->extra[extra_size - 1];
270 for (i = 0; i < nb_parts; i++) {
271 struct cfi_intelext_regioninfo *rinfo;
272 rinfo = (struct cfi_intelext_regioninfo *)&extp->extra[extra_size];
273 extra_size += sizeof(*rinfo);
274 if (extp_size < sizeof(*extp) + extra_size)
276 rinfo->NumIdentPartitions=le16_to_cpu(rinfo->NumIdentPartitions);
277 extra_size += (rinfo->NumBlockTypes - 1)
278 * sizeof(struct cfi_intelext_blockinfo);
281 if (extp_size < sizeof(*extp) + extra_size) {
283 extp_size = sizeof(*extp) + extra_size;
285 if (extp_size > 4096) {
287 "%s: cfi_pri_intelext is too fat\n",
298 /* This routine is made available to other mtd code via
299 * inter_module_register. It must only be accessed through
300 * inter_module_get which will bump the use count of this module. The
301 * addresses passed back in cfi are valid as long as the use count of
302 * this module is non-zero, i.e. between inter_module_get and
303 * inter_module_put. Keith Owens <kaos@ocs.com.au> 29 Oct 2000.
305 struct mtd_info *cfi_cmdset_0001(struct map_info *map, int primary)
307 struct cfi_private *cfi = map->fldrv_priv;
308 struct mtd_info *mtd;
311 mtd = kmalloc(sizeof(*mtd), GFP_KERNEL);
313 printk(KERN_ERR "Failed to allocate memory for MTD device\n");
316 memset(mtd, 0, sizeof(*mtd));
318 mtd->type = MTD_NORFLASH;
320 /* Fill in the default mtd operations */
321 mtd->erase = cfi_intelext_erase_varsize;
322 mtd->read = cfi_intelext_read;
323 mtd->write = cfi_intelext_write_words;
324 mtd->sync = cfi_intelext_sync;
325 mtd->lock = cfi_intelext_lock;
326 mtd->unlock = cfi_intelext_unlock;
327 mtd->suspend = cfi_intelext_suspend;
328 mtd->resume = cfi_intelext_resume;
329 mtd->flags = MTD_CAP_NORFLASH;
330 mtd->name = map->name;
332 if (cfi->cfi_mode == CFI_MODE_CFI) {
334 * It's a real CFI chip, not one for which the probe
335 * routine faked a CFI structure. So we read the feature
338 __u16 adr = primary?cfi->cfiq->P_ADR:cfi->cfiq->A_ADR;
339 struct cfi_pri_intelext *extp;
341 extp = read_pri_intelext(map, adr);
347 /* Install our own private info structure */
348 cfi->cmdset_priv = extp;
350 cfi_fixup(mtd, cfi_fixup_table);
352 #ifdef DEBUG_CFI_FEATURES
353 /* Tell the user about it in lots of lovely detail */
354 cfi_tell_features(extp);
357 if(extp->SuspendCmdSupport & 1) {
358 printk(KERN_NOTICE "cfi_cmdset_0001: Erase suspend on write enabled\n");
361 else if (cfi->cfi_mode == CFI_MODE_JEDEC) {
362 /* Apply jedec specific fixups */
363 cfi_fixup(mtd, jedec_fixup_table);
365 /* Apply generic fixups */
366 cfi_fixup(mtd, fixup_table);
368 for (i=0; i< cfi->numchips; i++) {
369 cfi->chips[i].word_write_time = 1<<cfi->cfiq->WordWriteTimeoutTyp;
370 cfi->chips[i].buffer_write_time = 1<<cfi->cfiq->BufWriteTimeoutTyp;
371 cfi->chips[i].erase_time = 1<<cfi->cfiq->BlockEraseTimeoutTyp;
372 cfi->chips[i].ref_point_counter = 0;
375 map->fldrv = &cfi_intelext_chipdrv;
377 return cfi_intelext_setup(mtd);
380 static struct mtd_info *cfi_intelext_setup(struct mtd_info *mtd)
382 struct map_info *map = mtd->priv;
383 struct cfi_private *cfi = map->fldrv_priv;
384 unsigned long offset = 0;
386 unsigned long devsize = (1<<cfi->cfiq->DevSize) * cfi->interleave;
388 //printk(KERN_DEBUG "number of CFI chips: %d\n", cfi->numchips);
390 mtd->size = devsize * cfi->numchips;
392 mtd->numeraseregions = cfi->cfiq->NumEraseRegions * cfi->numchips;
393 mtd->eraseregions = kmalloc(sizeof(struct mtd_erase_region_info)
394 * mtd->numeraseregions, GFP_KERNEL);
395 if (!mtd->eraseregions) {
396 printk(KERN_ERR "Failed to allocate memory for MTD erase region info\n");
400 for (i=0; i<cfi->cfiq->NumEraseRegions; i++) {
401 unsigned long ernum, ersize;
402 ersize = ((cfi->cfiq->EraseRegionInfo[i] >> 8) & ~0xff) * cfi->interleave;
403 ernum = (cfi->cfiq->EraseRegionInfo[i] & 0xffff) + 1;
405 if (mtd->erasesize < ersize) {
406 mtd->erasesize = ersize;
408 for (j=0; j<cfi->numchips; j++) {
409 mtd->eraseregions[(j*cfi->cfiq->NumEraseRegions)+i].offset = (j*devsize)+offset;
410 mtd->eraseregions[(j*cfi->cfiq->NumEraseRegions)+i].erasesize = ersize;
411 mtd->eraseregions[(j*cfi->cfiq->NumEraseRegions)+i].numblocks = ernum;
413 offset += (ersize * ernum);
416 if (offset != devsize) {
418 printk(KERN_WARNING "Sum of regions (%lx) != total size of set of interleaved chips (%lx)\n", offset, devsize);
422 for (i=0; i<mtd->numeraseregions;i++){
423 printk(KERN_DEBUG "%d: offset=0x%x,size=0x%x,blocks=%d\n",
424 i,mtd->eraseregions[i].offset,
425 mtd->eraseregions[i].erasesize,
426 mtd->eraseregions[i].numblocks);
430 mtd->read_user_prot_reg = cfi_intelext_read_user_prot_reg;
431 mtd->read_fact_prot_reg = cfi_intelext_read_fact_prot_reg;
434 /* This function has the potential to distort the reality
435 a bit and therefore should be called last. */
436 if (cfi_intelext_partition_fixup(mtd, &cfi) != 0)
439 __module_get(THIS_MODULE);
444 if(mtd->eraseregions)
445 kfree(mtd->eraseregions);
448 kfree(cfi->cmdset_priv);
452 static int cfi_intelext_partition_fixup(struct mtd_info *mtd,
453 struct cfi_private **pcfi)
455 struct map_info *map = mtd->priv;
456 struct cfi_private *cfi = *pcfi;
457 struct cfi_pri_intelext *extp = cfi->cmdset_priv;
460 * Probing of multi-partition flash ships.
462 * To support multiple partitions when available, we simply arrange
463 * for each of them to have their own flchip structure even if they
464 * are on the same physical chip. This means completely recreating
465 * a new cfi_private structure right here which is a blatent code
466 * layering violation, but this is still the least intrusive
467 * arrangement at this point. This can be rearranged in the future
468 * if someone feels motivated enough. --nico
470 if (extp && extp->MajorVersion == '1' && extp->MinorVersion == '3'
471 && extp->FeatureSupport & (1 << 9)) {
472 struct cfi_private *newcfi;
474 struct flchip_shared *shared;
475 int offs, numregions, numparts, partshift, numvirtchips, i, j;
477 /* Protection Register info */
478 offs = (extp->NumProtectionFields - 1) * (4 + 6);
480 /* Burst Read info */
483 /* Number of partition regions */
484 numregions = extp->extra[offs];
487 /* Number of hardware partitions */
489 for (i = 0; i < numregions; i++) {
490 struct cfi_intelext_regioninfo *rinfo;
491 rinfo = (struct cfi_intelext_regioninfo *)&extp->extra[offs];
492 numparts += rinfo->NumIdentPartitions;
493 offs += sizeof(*rinfo)
494 + (rinfo->NumBlockTypes - 1) *
495 sizeof(struct cfi_intelext_blockinfo);
499 * All functions below currently rely on all chips having
500 * the same geometry so we'll just assume that all hardware
501 * partitions are of the same size too.
503 partshift = cfi->chipshift - __ffs(numparts);
505 if ((1 << partshift) < mtd->erasesize) {
507 "%s: bad number of hw partitions (%d)\n",
508 __FUNCTION__, numparts);
512 numvirtchips = cfi->numchips * numparts;
513 newcfi = kmalloc(sizeof(struct cfi_private) + numvirtchips * sizeof(struct flchip), GFP_KERNEL);
516 shared = kmalloc(sizeof(struct flchip_shared) * cfi->numchips, GFP_KERNEL);
521 memcpy(newcfi, cfi, sizeof(struct cfi_private));
522 newcfi->numchips = numvirtchips;
523 newcfi->chipshift = partshift;
525 chip = &newcfi->chips[0];
526 for (i = 0; i < cfi->numchips; i++) {
527 shared[i].writing = shared[i].erasing = NULL;
528 spin_lock_init(&shared[i].lock);
529 for (j = 0; j < numparts; j++) {
530 *chip = cfi->chips[i];
531 chip->start += j << partshift;
532 chip->priv = &shared[i];
533 /* those should be reset too since
534 they create memory references. */
535 init_waitqueue_head(&chip->wq);
536 spin_lock_init(&chip->_spinlock);
537 chip->mutex = &chip->_spinlock;
542 printk(KERN_DEBUG "%s: %d set(s) of %d interleaved chips "
543 "--> %d partitions of %d KiB\n",
544 map->name, cfi->numchips, cfi->interleave,
545 newcfi->numchips, 1<<(newcfi->chipshift-10));
547 map->fldrv_priv = newcfi;
556 * *********** CHIP ACCESS FUNCTIONS ***********
559 static int get_chip(struct map_info *map, struct flchip *chip, unsigned long adr, int mode)
561 DECLARE_WAITQUEUE(wait, current);
562 struct cfi_private *cfi = map->fldrv_priv;
563 map_word status, status_OK = CMD(0x80), status_PWS = CMD(0x01);
565 struct cfi_pri_intelext *cfip = cfi->cmdset_priv;
568 timeo = jiffies + HZ;
570 if (chip->priv && (mode == FL_WRITING || mode == FL_ERASING)) {
572 * OK. We have possibility for contension on the write/erase
573 * operations which are global to the real chip and not per
574 * partition. So let's fight it over in the partition which
575 * currently has authority on the operation.
577 * The rules are as follows:
579 * - any write operation must own shared->writing.
581 * - any erase operation must own _both_ shared->writing and
584 * - contension arbitration is handled in the owner's context.
586 * The 'shared' struct can be read when its lock is taken.
587 * However any writes to it can only be made when the current
588 * owner's lock is also held.
590 struct flchip_shared *shared = chip->priv;
591 struct flchip *contender;
592 spin_lock(&shared->lock);
593 contender = shared->writing;
594 if (contender && contender != chip) {
596 * The engine to perform desired operation on this
597 * partition is already in use by someone else.
598 * Let's fight over it in the context of the chip
599 * currently using it. If it is possible to suspend,
600 * that other partition will do just that, otherwise
601 * it'll happily send us to sleep. In any case, when
602 * get_chip returns success we're clear to go ahead.
604 int ret = spin_trylock(contender->mutex);
605 spin_unlock(&shared->lock);
608 spin_unlock(chip->mutex);
609 ret = get_chip(map, contender, contender->start, mode);
610 spin_lock(chip->mutex);
612 spin_unlock(contender->mutex);
615 timeo = jiffies + HZ;
616 spin_lock(&shared->lock);
620 shared->writing = chip;
621 if (mode == FL_ERASING)
622 shared->erasing = chip;
623 if (contender && contender != chip)
624 spin_unlock(contender->mutex);
625 spin_unlock(&shared->lock);
628 switch (chip->state) {
632 status = map_read(map, adr);
633 if (map_word_andequal(map, status, status_OK, status_OK))
636 /* At this point we're fine with write operations
637 in other partitions as they don't conflict. */
638 if (chip->priv && map_word_andequal(map, status, status_PWS, status_PWS))
641 if (time_after(jiffies, timeo)) {
642 printk(KERN_ERR "Waiting for chip to be ready timed out. Status %lx\n",
646 spin_unlock(chip->mutex);
648 spin_lock(chip->mutex);
649 /* Someone else might have been playing with it. */
660 !(cfip->FeatureSupport & 2) ||
661 !(mode == FL_READY || mode == FL_POINT ||
662 (mode == FL_WRITING && (cfip->SuspendCmdSupport & 1))))
667 map_write(map, CMD(0xB0), adr);
669 /* If the flash has finished erasing, then 'erase suspend'
670 * appears to make some (28F320) flash devices switch to
671 * 'read' mode. Make sure that we switch to 'read status'
672 * mode so we get the right data. --rmk
674 map_write(map, CMD(0x70), adr);
675 chip->oldstate = FL_ERASING;
676 chip->state = FL_ERASE_SUSPENDING;
677 chip->erase_suspended = 1;
679 status = map_read(map, adr);
680 if (map_word_andequal(map, status, status_OK, status_OK))
683 if (time_after(jiffies, timeo)) {
684 /* Urgh. Resume and pretend we weren't here. */
685 map_write(map, CMD(0xd0), adr);
686 /* Make sure we're in 'read status' mode if it had finished */
687 map_write(map, CMD(0x70), adr);
688 chip->state = FL_ERASING;
689 chip->oldstate = FL_READY;
690 printk(KERN_ERR "Chip not ready after erase "
691 "suspended: status = 0x%lx\n", status.x[0]);
695 spin_unlock(chip->mutex);
697 spin_lock(chip->mutex);
698 /* Nobody will touch it while it's in state FL_ERASE_SUSPENDING.
699 So we can just loop here. */
701 chip->state = FL_STATUS;
704 case FL_XIP_WHILE_ERASING:
705 if (mode != FL_READY && mode != FL_POINT &&
706 (mode != FL_WRITING || !cfip || !(cfip->SuspendCmdSupport&1)))
708 chip->oldstate = chip->state;
709 chip->state = FL_READY;
713 /* Only if there's no operation suspended... */
714 if (mode == FL_READY && chip->oldstate == FL_READY)
719 set_current_state(TASK_UNINTERRUPTIBLE);
720 add_wait_queue(&chip->wq, &wait);
721 spin_unlock(chip->mutex);
723 remove_wait_queue(&chip->wq, &wait);
724 spin_lock(chip->mutex);
729 static void put_chip(struct map_info *map, struct flchip *chip, unsigned long adr)
731 struct cfi_private *cfi = map->fldrv_priv;
734 struct flchip_shared *shared = chip->priv;
735 spin_lock(&shared->lock);
736 if (shared->writing == chip) {
737 /* We own the ability to write, but we're done */
738 shared->writing = shared->erasing;
739 if (shared->writing && shared->writing != chip) {
740 /* give back ownership to who we loaned it from */
741 struct flchip *loaner = shared->writing;
742 spin_lock(loaner->mutex);
743 spin_unlock(&shared->lock);
744 spin_unlock(chip->mutex);
745 put_chip(map, loaner, loaner->start);
746 spin_lock(chip->mutex);
747 spin_unlock(loaner->mutex);
749 if (chip->oldstate != FL_ERASING) {
750 shared->erasing = NULL;
751 if (chip->oldstate != FL_WRITING)
752 shared->writing = NULL;
754 spin_unlock(&shared->lock);
757 spin_unlock(&shared->lock);
761 switch(chip->oldstate) {
763 chip->state = chip->oldstate;
764 /* What if one interleaved chip has finished and the
765 other hasn't? The old code would leave the finished
766 one in READY mode. That's bad, and caused -EROFS
767 errors to be returned from do_erase_oneblock because
768 that's the only bit it checked for at the time.
769 As the state machine appears to explicitly allow
770 sending the 0x70 (Read Status) command to an erasing
771 chip and expecting it to be ignored, that's what we
773 map_write(map, CMD(0xd0), adr);
774 map_write(map, CMD(0x70), adr);
775 chip->oldstate = FL_READY;
776 chip->state = FL_ERASING;
779 case FL_XIP_WHILE_ERASING:
780 chip->state = chip->oldstate;
781 chip->oldstate = FL_READY;
787 /* We should really make set_vpp() count, rather than doing this */
791 printk(KERN_ERR "put_chip() called with oldstate %d!!\n", chip->oldstate);
796 #ifdef CONFIG_MTD_XIP
799 * No interrupt what so ever can be serviced while the flash isn't in array
800 * mode. This is ensured by the xip_disable() and xip_enable() functions
801 * enclosing any code path where the flash is known not to be in array mode.
802 * And within a XIP disabled code path, only functions marked with __xipram
803 * may be called and nothing else (it's a good thing to inspect generated
804 * assembly to make sure inline functions were actually inlined and that gcc
805 * didn't emit calls to its own support functions). Also configuring MTD CFI
806 * support to a single buswidth and a single interleave is also recommended.
807 * Note that not only IRQs are disabled but the preemption count is also
808 * increased to prevent other locking primitives (namely spin_unlock) from
809 * decrementing the preempt count to zero and scheduling the CPU away while
813 static void xip_disable(struct map_info *map, struct flchip *chip,
816 /* TODO: chips with no XIP use should ignore and return */
817 (void) map_read(map, adr); /* ensure mmu mapping is up to date */
822 static void __xipram xip_enable(struct map_info *map, struct flchip *chip,
825 struct cfi_private *cfi = map->fldrv_priv;
826 if (chip->state != FL_POINT && chip->state != FL_READY) {
827 map_write(map, CMD(0xff), adr);
828 chip->state = FL_READY;
830 (void) map_read(map, adr);
831 asm volatile (".rep 8; nop; .endr"); /* fill instruction prefetch */
837 * When a delay is required for the flash operation to complete, the
838 * xip_udelay() function is polling for both the given timeout and pending
839 * (but still masked) hardware interrupts. Whenever there is an interrupt
840 * pending then the flash erase or write operation is suspended, array mode
841 * restored and interrupts unmasked. Task scheduling might also happen at that
842 * point. The CPU eventually returns from the interrupt or the call to
843 * schedule() and the suspended flash operation is resumed for the remaining
844 * of the delay period.
846 * Warning: this function _will_ fool interrupt latency tracing tools.
849 static void __xipram xip_udelay(struct map_info *map, struct flchip *chip,
850 unsigned long adr, int usec)
852 struct cfi_private *cfi = map->fldrv_priv;
853 struct cfi_pri_intelext *cfip = cfi->cmdset_priv;
854 map_word status, OK = CMD(0x80);
855 unsigned long suspended, start = xip_currtime();
856 flstate_t oldstate, newstate;
860 if (xip_irqpending() && cfip &&
861 ((chip->state == FL_ERASING && (cfip->FeatureSupport&2)) ||
862 (chip->state == FL_WRITING && (cfip->FeatureSupport&4))) &&
863 (cfi_interleave_is_1(cfi) || chip->oldstate == FL_READY)) {
865 * Let's suspend the erase or write operation when
866 * supported. Note that we currently don't try to
867 * suspend interleaved chips if there is already
868 * another operation suspended (imagine what happens
869 * when one chip was already done with the current
870 * operation while another chip suspended it, then
871 * we resume the whole thing at once). Yes, it
874 map_write(map, CMD(0xb0), adr);
875 map_write(map, CMD(0x70), adr);
876 usec -= xip_elapsed_since(start);
877 suspended = xip_currtime();
879 if (xip_elapsed_since(suspended) > 100000) {
881 * The chip doesn't want to suspend
882 * after waiting for 100 msecs.
883 * This is a critical error but there
884 * is not much we can do here.
888 status = map_read(map, adr);
889 } while (!map_word_andequal(map, status, OK, OK));
891 /* Suspend succeeded */
892 oldstate = chip->state;
893 if (oldstate == FL_ERASING) {
894 if (!map_word_bitsset(map, status, CMD(0x40)))
896 newstate = FL_XIP_WHILE_ERASING;
897 chip->erase_suspended = 1;
899 if (!map_word_bitsset(map, status, CMD(0x04)))
901 newstate = FL_XIP_WHILE_WRITING;
902 chip->write_suspended = 1;
904 chip->state = newstate;
905 map_write(map, CMD(0xff), adr);
906 (void) map_read(map, adr);
907 asm volatile (".rep 8; nop; .endr");
910 asm volatile (".rep 8; nop; .endr");
914 * We're back. However someone else might have
915 * decided to go write to the chip if we are in
916 * a suspended erase state. If so let's wait
920 while (chip->state != newstate) {
921 DECLARE_WAITQUEUE(wait, current);
922 set_current_state(TASK_UNINTERRUPTIBLE);
923 add_wait_queue(&chip->wq, &wait);
926 remove_wait_queue(&chip->wq, &wait);
929 /* Disallow XIP again */
932 /* Resume the write or erase operation */
933 map_write(map, CMD(0xd0), adr);
934 map_write(map, CMD(0x70), adr);
935 chip->state = oldstate;
936 start = xip_currtime();
937 } else if (usec >= 1000000/HZ) {
939 * Try to save on CPU power when waiting delay
940 * is at least a system timer tick period.
941 * No need to be extremely accurate here.
945 status = map_read(map, adr);
946 } while (!map_word_andequal(map, status, OK, OK)
947 && xip_elapsed_since(start) < usec);
950 #define UDELAY(map, chip, adr, usec) xip_udelay(map, chip, adr, usec)
953 * The INVALIDATE_CACHED_RANGE() macro is normally used in parallel while
954 * the flash is actively programming or erasing since we have to poll for
955 * the operation to complete anyway. We can't do that in a generic way with
956 * a XIP setup so do it before the actual flash operation in this case.
958 #undef INVALIDATE_CACHED_RANGE
959 #define INVALIDATE_CACHED_RANGE(x...)
960 #define XIP_INVAL_CACHED_RANGE(map, from, size) \
961 do { if(map->inval_cache) map->inval_cache(map, from, size); } while(0)
966 * Activating this XIP support changes the way the code works a bit. For
967 * example the code to suspend the current process when concurrent access
968 * happens is never executed because xip_udelay() will always return with the
969 * same chip state as it was entered with. This is why there is no care for
970 * the presence of add_wait_queue() or schedule() calls from within a couple
971 * xip_disable()'d areas of code, like in do_erase_oneblock for example.
972 * The queueing and scheduling are always happening within xip_udelay().
974 * Similarly, get_chip() and put_chip() just happen to always be executed
975 * with chip->state set to FL_READY (or FL_XIP_WHILE_*) where flash state
976 * is in array mode, therefore never executing many cases therein and not
977 * causing any problem with XIP.
982 #define xip_disable(map, chip, adr)
983 #define xip_enable(map, chip, adr)
985 #define UDELAY(map, chip, adr, usec) cfi_udelay(usec)
987 #define XIP_INVAL_CACHED_RANGE(x...)
991 static int do_point_onechip (struct map_info *map, struct flchip *chip, loff_t adr, size_t len)
993 unsigned long cmd_addr;
994 struct cfi_private *cfi = map->fldrv_priv;
999 /* Ensure cmd read/writes are aligned. */
1000 cmd_addr = adr & ~(map_bankwidth(map)-1);
1002 spin_lock(chip->mutex);
1004 ret = get_chip(map, chip, cmd_addr, FL_POINT);
1007 if (chip->state != FL_POINT && chip->state != FL_READY)
1008 map_write(map, CMD(0xff), cmd_addr);
1010 chip->state = FL_POINT;
1011 chip->ref_point_counter++;
1013 spin_unlock(chip->mutex);
1018 static int cfi_intelext_point (struct mtd_info *mtd, loff_t from, size_t len, size_t *retlen, u_char **mtdbuf)
1020 struct map_info *map = mtd->priv;
1021 struct cfi_private *cfi = map->fldrv_priv;
1026 if (!map->virt || (from + len > mtd->size))
1029 *mtdbuf = (void *)map->virt + from;
1032 /* Now lock the chip(s) to POINT state */
1034 /* ofs: offset within the first chip that the first read should start */
1035 chipnum = (from >> cfi->chipshift);
1036 ofs = from - (chipnum << cfi->chipshift);
1039 unsigned long thislen;
1041 if (chipnum >= cfi->numchips)
1044 if ((len + ofs -1) >> cfi->chipshift)
1045 thislen = (1<<cfi->chipshift) - ofs;
1049 ret = do_point_onechip(map, &cfi->chips[chipnum], ofs, thislen);
1062 static void cfi_intelext_unpoint (struct mtd_info *mtd, u_char *addr, loff_t from, size_t len)
1064 struct map_info *map = mtd->priv;
1065 struct cfi_private *cfi = map->fldrv_priv;
1069 /* Now unlock the chip(s) POINT state */
1071 /* ofs: offset within the first chip that the first read should start */
1072 chipnum = (from >> cfi->chipshift);
1073 ofs = from - (chipnum << cfi->chipshift);
1076 unsigned long thislen;
1077 struct flchip *chip;
1079 chip = &cfi->chips[chipnum];
1080 if (chipnum >= cfi->numchips)
1083 if ((len + ofs -1) >> cfi->chipshift)
1084 thislen = (1<<cfi->chipshift) - ofs;
1088 spin_lock(chip->mutex);
1089 if (chip->state == FL_POINT) {
1090 chip->ref_point_counter--;
1091 if(chip->ref_point_counter == 0)
1092 chip->state = FL_READY;
1094 printk(KERN_ERR "Warning: unpoint called on non pointed region\n"); /* Should this give an error? */
1096 put_chip(map, chip, chip->start);
1097 spin_unlock(chip->mutex);
1105 static inline int do_read_onechip(struct map_info *map, struct flchip *chip, loff_t adr, size_t len, u_char *buf)
1107 unsigned long cmd_addr;
1108 struct cfi_private *cfi = map->fldrv_priv;
1113 /* Ensure cmd read/writes are aligned. */
1114 cmd_addr = adr & ~(map_bankwidth(map)-1);
1116 spin_lock(chip->mutex);
1117 ret = get_chip(map, chip, cmd_addr, FL_READY);
1119 spin_unlock(chip->mutex);
1123 if (chip->state != FL_POINT && chip->state != FL_READY) {
1124 map_write(map, CMD(0xff), cmd_addr);
1126 chip->state = FL_READY;
1129 map_copy_from(map, buf, adr, len);
1131 put_chip(map, chip, cmd_addr);
1133 spin_unlock(chip->mutex);
1137 static int cfi_intelext_read (struct mtd_info *mtd, loff_t from, size_t len, size_t *retlen, u_char *buf)
1139 struct map_info *map = mtd->priv;
1140 struct cfi_private *cfi = map->fldrv_priv;
1145 /* ofs: offset within the first chip that the first read should start */
1146 chipnum = (from >> cfi->chipshift);
1147 ofs = from - (chipnum << cfi->chipshift);
1152 unsigned long thislen;
1154 if (chipnum >= cfi->numchips)
1157 if ((len + ofs -1) >> cfi->chipshift)
1158 thislen = (1<<cfi->chipshift) - ofs;
1162 ret = do_read_onechip(map, &cfi->chips[chipnum], ofs, thislen, buf);
1177 static int __xipram cfi_intelext_read_prot_reg (struct mtd_info *mtd,
1178 loff_t from, size_t len,
1181 int base_offst, int reg_sz)
1183 struct map_info *map = mtd->priv;
1184 struct cfi_private *cfi = map->fldrv_priv;
1185 struct cfi_pri_intelext *extp = cfi->cmdset_priv;
1186 struct flchip *chip;
1187 int ofs_factor = cfi->interleave * cfi->device_type;
1189 int chip_num, offst;
1192 chip_num = ((unsigned int)from/reg_sz);
1193 offst = from - (reg_sz*chip_num)+base_offst;
1196 /* Calculate which chip & protection register offset we need */
1198 if (chip_num >= cfi->numchips)
1201 chip = &cfi->chips[chip_num];
1203 spin_lock(chip->mutex);
1204 ret = get_chip(map, chip, chip->start, FL_JEDEC_QUERY);
1206 spin_unlock(chip->mutex);
1207 return (len-count)?:ret;
1210 xip_disable(map, chip, chip->start);
1212 if (chip->state != FL_JEDEC_QUERY) {
1213 map_write(map, CMD(0x90), chip->start);
1214 chip->state = FL_JEDEC_QUERY;
1217 while (count && ((offst-base_offst) < reg_sz)) {
1218 *buf = map_read8(map,(chip->start+((extp->ProtRegAddr+1)*ofs_factor)+offst));
1224 xip_enable(map, chip, chip->start);
1225 put_chip(map, chip, chip->start);
1226 spin_unlock(chip->mutex);
1228 /* Move on to the next chip */
1237 static int cfi_intelext_read_user_prot_reg (struct mtd_info *mtd, loff_t from, size_t len, size_t *retlen, u_char *buf)
1239 struct map_info *map = mtd->priv;
1240 struct cfi_private *cfi = map->fldrv_priv;
1241 struct cfi_pri_intelext *extp=cfi->cmdset_priv;
1242 int base_offst,reg_sz;
1244 /* Check that we actually have some protection registers */
1245 if(!extp || !(extp->FeatureSupport&64)){
1246 printk(KERN_WARNING "%s: This flash device has no protection data to read!\n",map->name);
1250 base_offst=(1<<extp->FactProtRegSize);
1251 reg_sz=(1<<extp->UserProtRegSize);
1253 return cfi_intelext_read_prot_reg(mtd, from, len, retlen, buf, base_offst, reg_sz);
1256 static int cfi_intelext_read_fact_prot_reg (struct mtd_info *mtd, loff_t from, size_t len, size_t *retlen, u_char *buf)
1258 struct map_info *map = mtd->priv;
1259 struct cfi_private *cfi = map->fldrv_priv;
1260 struct cfi_pri_intelext *extp=cfi->cmdset_priv;
1261 int base_offst,reg_sz;
1263 /* Check that we actually have some protection registers */
1264 if(!extp || !(extp->FeatureSupport&64)){
1265 printk(KERN_WARNING "%s: This flash device has no protection data to read!\n",map->name);
1270 reg_sz=(1<<extp->FactProtRegSize);
1272 return cfi_intelext_read_prot_reg(mtd, from, len, retlen, buf, base_offst, reg_sz);
1276 static int __xipram do_write_oneword(struct map_info *map, struct flchip *chip,
1277 unsigned long adr, map_word datum)
1279 struct cfi_private *cfi = map->fldrv_priv;
1280 map_word status, status_OK;
1281 unsigned long timeo;
1286 /* Let's determine this according to the interleave only once */
1287 status_OK = CMD(0x80);
1289 spin_lock(chip->mutex);
1290 ret = get_chip(map, chip, adr, FL_WRITING);
1292 spin_unlock(chip->mutex);
1296 XIP_INVAL_CACHED_RANGE(map, adr, map_bankwidth(map));
1298 xip_disable(map, chip, adr);
1299 map_write(map, CMD(0x40), adr);
1300 map_write(map, datum, adr);
1301 chip->state = FL_WRITING;
1303 spin_unlock(chip->mutex);
1304 INVALIDATE_CACHED_RANGE(map, adr, map_bankwidth(map));
1305 UDELAY(map, chip, adr, chip->word_write_time);
1306 spin_lock(chip->mutex);
1308 timeo = jiffies + (HZ/2);
1311 if (chip->state != FL_WRITING) {
1312 /* Someone's suspended the write. Sleep */
1313 DECLARE_WAITQUEUE(wait, current);
1315 set_current_state(TASK_UNINTERRUPTIBLE);
1316 add_wait_queue(&chip->wq, &wait);
1317 spin_unlock(chip->mutex);
1319 remove_wait_queue(&chip->wq, &wait);
1320 timeo = jiffies + (HZ / 2); /* FIXME */
1321 spin_lock(chip->mutex);
1325 status = map_read(map, adr);
1326 if (map_word_andequal(map, status, status_OK, status_OK))
1329 /* OK Still waiting */
1330 if (time_after(jiffies, timeo)) {
1331 chip->state = FL_STATUS;
1332 xip_enable(map, chip, adr);
1333 printk(KERN_ERR "waiting for chip to be ready timed out in word write\n");
1338 /* Latency issues. Drop the lock, wait a while and retry */
1339 spin_unlock(chip->mutex);
1341 UDELAY(map, chip, adr, 1);
1342 spin_lock(chip->mutex);
1345 chip->word_write_time--;
1346 if (!chip->word_write_time)
1347 chip->word_write_time++;
1350 chip->word_write_time++;
1352 /* Done and happy. */
1353 chip->state = FL_STATUS;
1355 /* check for lock bit */
1356 if (map_word_bitsset(map, status, CMD(0x02))) {
1358 map_write(map, CMD(0x50), adr);
1359 /* put back into read status register mode */
1360 map_write(map, CMD(0x70), adr);
1364 xip_enable(map, chip, adr);
1365 out: put_chip(map, chip, adr);
1366 spin_unlock(chip->mutex);
1372 static int cfi_intelext_write_words (struct mtd_info *mtd, loff_t to , size_t len, size_t *retlen, const u_char *buf)
1374 struct map_info *map = mtd->priv;
1375 struct cfi_private *cfi = map->fldrv_priv;
1384 chipnum = to >> cfi->chipshift;
1385 ofs = to - (chipnum << cfi->chipshift);
1387 /* If it's not bus-aligned, do the first byte write */
1388 if (ofs & (map_bankwidth(map)-1)) {
1389 unsigned long bus_ofs = ofs & ~(map_bankwidth(map)-1);
1390 int gap = ofs - bus_ofs;
1394 n = min_t(int, len, map_bankwidth(map)-gap);
1395 datum = map_word_ff(map);
1396 datum = map_word_load_partial(map, datum, buf, gap, n);
1398 ret = do_write_oneword(map, &cfi->chips[chipnum],
1408 if (ofs >> cfi->chipshift) {
1411 if (chipnum == cfi->numchips)
1416 while(len >= map_bankwidth(map)) {
1417 map_word datum = map_word_load(map, buf);
1419 ret = do_write_oneword(map, &cfi->chips[chipnum],
1424 ofs += map_bankwidth(map);
1425 buf += map_bankwidth(map);
1426 (*retlen) += map_bankwidth(map);
1427 len -= map_bankwidth(map);
1429 if (ofs >> cfi->chipshift) {
1432 if (chipnum == cfi->numchips)
1437 if (len & (map_bankwidth(map)-1)) {
1440 datum = map_word_ff(map);
1441 datum = map_word_load_partial(map, datum, buf, 0, len);
1443 ret = do_write_oneword(map, &cfi->chips[chipnum],
1455 static int __xipram do_write_buffer(struct map_info *map, struct flchip *chip,
1456 unsigned long adr, const u_char *buf, int len)
1458 struct cfi_private *cfi = map->fldrv_priv;
1459 map_word status, status_OK;
1460 unsigned long cmd_adr, timeo;
1461 int wbufsize, z, ret=0, bytes, words;
1463 wbufsize = cfi_interleave(cfi) << cfi->cfiq->MaxBufWriteSize;
1465 cmd_adr = adr & ~(wbufsize-1);
1467 /* Let's determine this according to the interleave only once */
1468 status_OK = CMD(0x80);
1470 spin_lock(chip->mutex);
1471 ret = get_chip(map, chip, cmd_adr, FL_WRITING);
1473 spin_unlock(chip->mutex);
1477 XIP_INVAL_CACHED_RANGE(map, adr, len);
1479 xip_disable(map, chip, cmd_adr);
1481 /* §4.8 of the 28FxxxJ3A datasheet says "Any time SR.4 and/or SR.5 is set
1482 [...], the device will not accept any more Write to Buffer commands".
1483 So we must check here and reset those bits if they're set. Otherwise
1484 we're just pissing in the wind */
1485 if (chip->state != FL_STATUS)
1486 map_write(map, CMD(0x70), cmd_adr);
1487 status = map_read(map, cmd_adr);
1488 if (map_word_bitsset(map, status, CMD(0x30))) {
1489 xip_enable(map, chip, cmd_adr);
1490 printk(KERN_WARNING "SR.4 or SR.5 bits set in buffer write (status %lx). Clearing.\n", status.x[0]);
1491 xip_disable(map, chip, cmd_adr);
1492 map_write(map, CMD(0x50), cmd_adr);
1493 map_write(map, CMD(0x70), cmd_adr);
1496 chip->state = FL_WRITING_TO_BUFFER;
1500 map_write(map, CMD(0xe8), cmd_adr);
1502 status = map_read(map, cmd_adr);
1503 if (map_word_andequal(map, status, status_OK, status_OK))
1506 spin_unlock(chip->mutex);
1507 UDELAY(map, chip, cmd_adr, 1);
1508 spin_lock(chip->mutex);
1511 /* Argh. Not ready for write to buffer */
1513 map_write(map, CMD(0x70), cmd_adr);
1514 chip->state = FL_STATUS;
1515 Xstatus = map_read(map, cmd_adr);
1516 /* Odd. Clear status bits */
1517 map_write(map, CMD(0x50), cmd_adr);
1518 map_write(map, CMD(0x70), cmd_adr);
1519 xip_enable(map, chip, cmd_adr);
1520 printk(KERN_ERR "Chip not ready for buffer write. status = %lx, Xstatus = %lx\n",
1521 status.x[0], Xstatus.x[0]);
1527 /* Write length of data to come */
1528 bytes = len & (map_bankwidth(map)-1);
1529 words = len / map_bankwidth(map);
1530 map_write(map, CMD(words - !bytes), cmd_adr );
1534 while(z < words * map_bankwidth(map)) {
1535 map_word datum = map_word_load(map, buf);
1536 map_write(map, datum, adr+z);
1538 z += map_bankwidth(map);
1539 buf += map_bankwidth(map);
1545 datum = map_word_ff(map);
1546 datum = map_word_load_partial(map, datum, buf, 0, bytes);
1547 map_write(map, datum, adr+z);
1551 map_write(map, CMD(0xd0), cmd_adr);
1552 chip->state = FL_WRITING;
1554 spin_unlock(chip->mutex);
1555 INVALIDATE_CACHED_RANGE(map, adr, len);
1556 UDELAY(map, chip, cmd_adr, chip->buffer_write_time);
1557 spin_lock(chip->mutex);
1559 timeo = jiffies + (HZ/2);
1562 if (chip->state != FL_WRITING) {
1563 /* Someone's suspended the write. Sleep */
1564 DECLARE_WAITQUEUE(wait, current);
1565 set_current_state(TASK_UNINTERRUPTIBLE);
1566 add_wait_queue(&chip->wq, &wait);
1567 spin_unlock(chip->mutex);
1569 remove_wait_queue(&chip->wq, &wait);
1570 timeo = jiffies + (HZ / 2); /* FIXME */
1571 spin_lock(chip->mutex);
1575 status = map_read(map, cmd_adr);
1576 if (map_word_andequal(map, status, status_OK, status_OK))
1579 /* OK Still waiting */
1580 if (time_after(jiffies, timeo)) {
1581 chip->state = FL_STATUS;
1582 xip_enable(map, chip, cmd_adr);
1583 printk(KERN_ERR "waiting for chip to be ready timed out in bufwrite\n");
1588 /* Latency issues. Drop the lock, wait a while and retry */
1589 spin_unlock(chip->mutex);
1590 UDELAY(map, chip, cmd_adr, 1);
1592 spin_lock(chip->mutex);
1595 chip->buffer_write_time--;
1596 if (!chip->buffer_write_time)
1597 chip->buffer_write_time++;
1600 chip->buffer_write_time++;
1602 /* Done and happy. */
1603 chip->state = FL_STATUS;
1605 /* check for lock bit */
1606 if (map_word_bitsset(map, status, CMD(0x02))) {
1608 map_write(map, CMD(0x50), cmd_adr);
1609 /* put back into read status register mode */
1610 map_write(map, CMD(0x70), adr);
1614 xip_enable(map, chip, cmd_adr);
1615 out: put_chip(map, chip, cmd_adr);
1616 spin_unlock(chip->mutex);
1620 static int cfi_intelext_write_buffers (struct mtd_info *mtd, loff_t to,
1621 size_t len, size_t *retlen, const u_char *buf)
1623 struct map_info *map = mtd->priv;
1624 struct cfi_private *cfi = map->fldrv_priv;
1625 int wbufsize = cfi_interleave(cfi) << cfi->cfiq->MaxBufWriteSize;
1634 chipnum = to >> cfi->chipshift;
1635 ofs = to - (chipnum << cfi->chipshift);
1637 /* If it's not bus-aligned, do the first word write */
1638 if (ofs & (map_bankwidth(map)-1)) {
1639 size_t local_len = (-ofs)&(map_bankwidth(map)-1);
1640 if (local_len > len)
1642 ret = cfi_intelext_write_words(mtd, to, local_len,
1650 if (ofs >> cfi->chipshift) {
1653 if (chipnum == cfi->numchips)
1659 /* We must not cross write block boundaries */
1660 int size = wbufsize - (ofs & (wbufsize-1));
1664 ret = do_write_buffer(map, &cfi->chips[chipnum],
1674 if (ofs >> cfi->chipshift) {
1677 if (chipnum == cfi->numchips)
1684 static int __xipram do_erase_oneblock(struct map_info *map, struct flchip *chip,
1685 unsigned long adr, int len, void *thunk)
1687 struct cfi_private *cfi = map->fldrv_priv;
1688 map_word status, status_OK;
1689 unsigned long timeo;
1691 DECLARE_WAITQUEUE(wait, current);
1696 /* Let's determine this according to the interleave only once */
1697 status_OK = CMD(0x80);
1700 spin_lock(chip->mutex);
1701 ret = get_chip(map, chip, adr, FL_ERASING);
1703 spin_unlock(chip->mutex);
1707 XIP_INVAL_CACHED_RANGE(map, adr, len);
1709 xip_disable(map, chip, adr);
1711 /* Clear the status register first */
1712 map_write(map, CMD(0x50), adr);
1715 map_write(map, CMD(0x20), adr);
1716 map_write(map, CMD(0xD0), adr);
1717 chip->state = FL_ERASING;
1718 chip->erase_suspended = 0;
1720 spin_unlock(chip->mutex);
1721 INVALIDATE_CACHED_RANGE(map, adr, len);
1722 UDELAY(map, chip, adr, chip->erase_time*1000/2);
1723 spin_lock(chip->mutex);
1725 /* FIXME. Use a timer to check this, and return immediately. */
1726 /* Once the state machine's known to be working I'll do that */
1728 timeo = jiffies + (HZ*20);
1730 if (chip->state != FL_ERASING) {
1731 /* Someone's suspended the erase. Sleep */
1732 set_current_state(TASK_UNINTERRUPTIBLE);
1733 add_wait_queue(&chip->wq, &wait);
1734 spin_unlock(chip->mutex);
1736 remove_wait_queue(&chip->wq, &wait);
1737 spin_lock(chip->mutex);
1740 if (chip->erase_suspended) {
1741 /* This erase was suspended and resumed.
1742 Adjust the timeout */
1743 timeo = jiffies + (HZ*20); /* FIXME */
1744 chip->erase_suspended = 0;
1747 status = map_read(map, adr);
1748 if (map_word_andequal(map, status, status_OK, status_OK))
1751 /* OK Still waiting */
1752 if (time_after(jiffies, timeo)) {
1754 map_write(map, CMD(0x70), adr);
1755 chip->state = FL_STATUS;
1756 Xstatus = map_read(map, adr);
1757 /* Clear status bits */
1758 map_write(map, CMD(0x50), adr);
1759 map_write(map, CMD(0x70), adr);
1760 xip_enable(map, chip, adr);
1761 printk(KERN_ERR "waiting for erase at %08lx to complete timed out. status = %lx, Xstatus = %lx.\n",
1762 adr, status.x[0], Xstatus.x[0]);
1767 /* Latency issues. Drop the lock, wait a while and retry */
1768 spin_unlock(chip->mutex);
1769 UDELAY(map, chip, adr, 1000000/HZ);
1770 spin_lock(chip->mutex);
1773 /* We've broken this before. It doesn't hurt to be safe */
1774 map_write(map, CMD(0x70), adr);
1775 chip->state = FL_STATUS;
1776 status = map_read(map, adr);
1778 /* check for lock bit */
1779 if (map_word_bitsset(map, status, CMD(0x3a))) {
1780 unsigned char chipstatus;
1782 /* Reset the error bits */
1783 map_write(map, CMD(0x50), adr);
1784 map_write(map, CMD(0x70), adr);
1785 xip_enable(map, chip, adr);
1787 chipstatus = status.x[0];
1788 if (!map_word_equal(map, status, CMD(chipstatus))) {
1790 for (w=0; w<map_words(map); w++) {
1791 for (i = 0; i<cfi_interleave(cfi); i++) {
1792 chipstatus |= status.x[w] >> (cfi->device_type * 8);
1795 printk(KERN_WARNING "Status is not identical for all chips: 0x%lx. Merging to give 0x%02x\n",
1796 status.x[0], chipstatus);
1799 if ((chipstatus & 0x30) == 0x30) {
1800 printk(KERN_NOTICE "Chip reports improper command sequence: status 0x%x\n", chipstatus);
1802 } else if (chipstatus & 0x02) {
1803 /* Protection bit set */
1805 } else if (chipstatus & 0x8) {
1807 printk(KERN_WARNING "Chip reports voltage low on erase: status 0x%x\n", chipstatus);
1809 } else if (chipstatus & 0x20) {
1811 printk(KERN_DEBUG "Chip erase failed at 0x%08lx: status 0x%x. Retrying...\n", adr, chipstatus);
1812 timeo = jiffies + HZ;
1813 put_chip(map, chip, adr);
1814 spin_unlock(chip->mutex);
1817 printk(KERN_DEBUG "Chip erase failed at 0x%08lx: status 0x%x\n", adr, chipstatus);
1821 xip_enable(map, chip, adr);
1825 out: put_chip(map, chip, adr);
1826 spin_unlock(chip->mutex);
1830 int cfi_intelext_erase_varsize(struct mtd_info *mtd, struct erase_info *instr)
1832 unsigned long ofs, len;
1838 ret = cfi_varsize_frob(mtd, do_erase_oneblock, ofs, len, NULL);
1842 instr->state = MTD_ERASE_DONE;
1843 mtd_erase_callback(instr);
1848 static void cfi_intelext_sync (struct mtd_info *mtd)
1850 struct map_info *map = mtd->priv;
1851 struct cfi_private *cfi = map->fldrv_priv;
1853 struct flchip *chip;
1856 for (i=0; !ret && i<cfi->numchips; i++) {
1857 chip = &cfi->chips[i];
1859 spin_lock(chip->mutex);
1860 ret = get_chip(map, chip, chip->start, FL_SYNCING);
1863 chip->oldstate = chip->state;
1864 chip->state = FL_SYNCING;
1865 /* No need to wake_up() on this state change -
1866 * as the whole point is that nobody can do anything
1867 * with the chip now anyway.
1870 spin_unlock(chip->mutex);
1873 /* Unlock the chips again */
1875 for (i--; i >=0; i--) {
1876 chip = &cfi->chips[i];
1878 spin_lock(chip->mutex);
1880 if (chip->state == FL_SYNCING) {
1881 chip->state = chip->oldstate;
1884 spin_unlock(chip->mutex);
1888 #ifdef DEBUG_LOCK_BITS
1889 static int __xipram do_printlockstatus_oneblock(struct map_info *map,
1890 struct flchip *chip,
1892 int len, void *thunk)
1894 struct cfi_private *cfi = map->fldrv_priv;
1895 int status, ofs_factor = cfi->interleave * cfi->device_type;
1897 xip_disable(map, chip, adr+(2*ofs_factor));
1898 cfi_send_gen_cmd(0x90, 0x55, 0, map, cfi, cfi->device_type, NULL);
1899 chip->state = FL_JEDEC_QUERY;
1900 status = cfi_read_query(map, adr+(2*ofs_factor));
1901 xip_enable(map, chip, 0);
1902 printk(KERN_DEBUG "block status register for 0x%08lx is %x\n",
1908 #define DO_XXLOCK_ONEBLOCK_LOCK ((void *) 1)
1909 #define DO_XXLOCK_ONEBLOCK_UNLOCK ((void *) 2)
1911 static int __xipram do_xxlock_oneblock(struct map_info *map, struct flchip *chip,
1912 unsigned long adr, int len, void *thunk)
1914 struct cfi_private *cfi = map->fldrv_priv;
1915 map_word status, status_OK;
1916 unsigned long timeo = jiffies + HZ;
1921 /* Let's determine this according to the interleave only once */
1922 status_OK = CMD(0x80);
1924 spin_lock(chip->mutex);
1925 ret = get_chip(map, chip, adr, FL_LOCKING);
1927 spin_unlock(chip->mutex);
1932 xip_disable(map, chip, adr);
1934 map_write(map, CMD(0x60), adr);
1935 if (thunk == DO_XXLOCK_ONEBLOCK_LOCK) {
1936 map_write(map, CMD(0x01), adr);
1937 chip->state = FL_LOCKING;
1938 } else if (thunk == DO_XXLOCK_ONEBLOCK_UNLOCK) {
1939 map_write(map, CMD(0xD0), adr);
1940 chip->state = FL_UNLOCKING;
1944 spin_unlock(chip->mutex);
1945 UDELAY(map, chip, adr, 1000000/HZ);
1946 spin_lock(chip->mutex);
1948 /* FIXME. Use a timer to check this, and return immediately. */
1949 /* Once the state machine's known to be working I'll do that */
1951 timeo = jiffies + (HZ*20);
1954 status = map_read(map, adr);
1955 if (map_word_andequal(map, status, status_OK, status_OK))
1958 /* OK Still waiting */
1959 if (time_after(jiffies, timeo)) {
1961 map_write(map, CMD(0x70), adr);
1962 chip->state = FL_STATUS;
1963 Xstatus = map_read(map, adr);
1964 xip_enable(map, chip, adr);
1965 printk(KERN_ERR "waiting for unlock to complete timed out. status = %lx, Xstatus = %lx.\n",
1966 status.x[0], Xstatus.x[0]);
1967 put_chip(map, chip, adr);
1968 spin_unlock(chip->mutex);
1972 /* Latency issues. Drop the lock, wait a while and retry */
1973 spin_unlock(chip->mutex);
1974 UDELAY(map, chip, adr, 1);
1975 spin_lock(chip->mutex);
1978 /* Done and happy. */
1979 chip->state = FL_STATUS;
1980 xip_enable(map, chip, adr);
1981 put_chip(map, chip, adr);
1982 spin_unlock(chip->mutex);
1986 static int cfi_intelext_lock(struct mtd_info *mtd, loff_t ofs, size_t len)
1990 #ifdef DEBUG_LOCK_BITS
1991 printk(KERN_DEBUG "%s: lock status before, ofs=0x%08llx, len=0x%08X\n",
1992 __FUNCTION__, ofs, len);
1993 cfi_varsize_frob(mtd, do_printlockstatus_oneblock,
1997 ret = cfi_varsize_frob(mtd, do_xxlock_oneblock,
1998 ofs, len, DO_XXLOCK_ONEBLOCK_LOCK);
2000 #ifdef DEBUG_LOCK_BITS
2001 printk(KERN_DEBUG "%s: lock status after, ret=%d\n",
2003 cfi_varsize_frob(mtd, do_printlockstatus_oneblock,
2010 static int cfi_intelext_unlock(struct mtd_info *mtd, loff_t ofs, size_t len)
2014 #ifdef DEBUG_LOCK_BITS
2015 printk(KERN_DEBUG "%s: lock status before, ofs=0x%08llx, len=0x%08X\n",
2016 __FUNCTION__, ofs, len);
2017 cfi_varsize_frob(mtd, do_printlockstatus_oneblock,
2021 ret = cfi_varsize_frob(mtd, do_xxlock_oneblock,
2022 ofs, len, DO_XXLOCK_ONEBLOCK_UNLOCK);
2024 #ifdef DEBUG_LOCK_BITS
2025 printk(KERN_DEBUG "%s: lock status after, ret=%d\n",
2027 cfi_varsize_frob(mtd, do_printlockstatus_oneblock,
2034 static int cfi_intelext_suspend(struct mtd_info *mtd)
2036 struct map_info *map = mtd->priv;
2037 struct cfi_private *cfi = map->fldrv_priv;
2039 struct flchip *chip;
2042 for (i=0; !ret && i<cfi->numchips; i++) {
2043 chip = &cfi->chips[i];
2045 spin_lock(chip->mutex);
2047 switch (chip->state) {
2051 case FL_JEDEC_QUERY:
2052 if (chip->oldstate == FL_READY) {
2053 chip->oldstate = chip->state;
2054 chip->state = FL_PM_SUSPENDED;
2055 /* No need to wake_up() on this state change -
2056 * as the whole point is that nobody can do anything
2057 * with the chip now anyway.
2060 /* There seems to be an operation pending. We must wait for it. */
2061 printk(KERN_NOTICE "Flash device refused suspend due to pending operation (oldstate %d)\n", chip->oldstate);
2066 /* Should we actually wait? Once upon a time these routines weren't
2067 allowed to. Or should we return -EAGAIN, because the upper layers
2068 ought to have already shut down anything which was using the device
2069 anyway? The latter for now. */
2070 printk(KERN_NOTICE "Flash device refused suspend due to active operation (state %d)\n", chip->oldstate);
2072 case FL_PM_SUSPENDED:
2075 spin_unlock(chip->mutex);
2078 /* Unlock the chips again */
2081 for (i--; i >=0; i--) {
2082 chip = &cfi->chips[i];
2084 spin_lock(chip->mutex);
2086 if (chip->state == FL_PM_SUSPENDED) {
2087 /* No need to force it into a known state here,
2088 because we're returning failure, and it didn't
2090 chip->state = chip->oldstate;
2091 chip->oldstate = FL_READY;
2094 spin_unlock(chip->mutex);
2101 static void cfi_intelext_resume(struct mtd_info *mtd)
2103 struct map_info *map = mtd->priv;
2104 struct cfi_private *cfi = map->fldrv_priv;
2106 struct flchip *chip;
2108 for (i=0; i<cfi->numchips; i++) {
2110 chip = &cfi->chips[i];
2112 spin_lock(chip->mutex);
2114 /* Go to known state. Chip may have been power cycled */
2115 if (chip->state == FL_PM_SUSPENDED) {
2116 map_write(map, CMD(0xFF), cfi->chips[i].start);
2117 chip->oldstate = chip->state = FL_READY;
2121 spin_unlock(chip->mutex);
2125 static void cfi_intelext_destroy(struct mtd_info *mtd)
2127 struct map_info *map = mtd->priv;
2128 struct cfi_private *cfi = map->fldrv_priv;
2129 kfree(cfi->cmdset_priv);
2131 kfree(cfi->chips[0].priv);
2133 kfree(mtd->eraseregions);
2136 static char im_name_1[]="cfi_cmdset_0001";
2137 static char im_name_3[]="cfi_cmdset_0003";
2139 static int __init cfi_intelext_init(void)
2141 inter_module_register(im_name_1, THIS_MODULE, &cfi_cmdset_0001);
2142 inter_module_register(im_name_3, THIS_MODULE, &cfi_cmdset_0001);
2146 static void __exit cfi_intelext_exit(void)
2148 inter_module_unregister(im_name_1);
2149 inter_module_unregister(im_name_3);
2152 module_init(cfi_intelext_init);
2153 module_exit(cfi_intelext_exit);
2155 MODULE_LICENSE("GPL");
2156 MODULE_AUTHOR("David Woodhouse <dwmw2@infradead.org> et al.");
2157 MODULE_DESCRIPTION("MTD chip driver for Intel/Sharp flash chips");