- patches.fixes/patch-2.6.11-rc1: 2.6.11-rc1.
[linux-flexiantxendom0-3.2.10.git] / drivers / mtd / chips / cfi_cmdset_0001.c
1 /*
2  * Common Flash Interface support:
3  *   Intel Extended Vendor Command Set (ID 0x0001)
4  *
5  * (C) 2000 Red Hat. GPL'd
6  *
7  * $Id: cfi_cmdset_0001.c,v 1.164 2004/11/16 18:29:00 dwmw2 Exp $
8  *
9  * 
10  * 10/10/2000   Nicolas Pitre <nico@cam.org>
11  *      - completely revamped method functions so they are aware and
12  *        independent of the flash geometry (buswidth, interleave, etc.)
13  *      - scalability vs code size is completely set at compile-time
14  *        (see include/linux/mtd/cfi.h for selection)
15  *      - optimized write buffer method
16  * 02/05/2002   Christopher Hoover <ch@hpl.hp.com>/<ch@murgatroid.com>
17  *      - reworked lock/unlock/erase support for var size flash
18  */
19
20 #include <linux/module.h>
21 #include <linux/types.h>
22 #include <linux/kernel.h>
23 #include <linux/sched.h>
24 #include <linux/init.h>
25 #include <asm/io.h>
26 #include <asm/byteorder.h>
27
28 #include <linux/errno.h>
29 #include <linux/slab.h>
30 #include <linux/delay.h>
31 #include <linux/interrupt.h>
32 #include <linux/mtd/xip.h>
33 #include <linux/mtd/map.h>
34 #include <linux/mtd/mtd.h>
35 #include <linux/mtd/compatmac.h>
36 #include <linux/mtd/cfi.h>
37
38 /* #define CMDSET0001_DISABLE_ERASE_SUSPEND_ON_WRITE */
39
40 #ifdef CONFIG_MTD_XIP
41 #define CMDSET0001_DISABLE_WRITE_SUSPEND
42 #endif
43
44 // debugging, turns off buffer write mode if set to 1
45 #define FORCE_WORD_WRITE 0
46
47 #define MANUFACTURER_INTEL      0x0089
48 #define I82802AB        0x00ad
49 #define I82802AC        0x00ac
50 #define MANUFACTURER_ST         0x0020
51 #define M50LPW080       0x002F
52
53 static int cfi_intelext_read (struct mtd_info *, loff_t, size_t, size_t *, u_char *);
54 //static int cfi_intelext_read_user_prot_reg (struct mtd_info *, loff_t, size_t, size_t *, u_char *);
55 //static int cfi_intelext_read_fact_prot_reg (struct mtd_info *, loff_t, size_t, size_t *, u_char *);
56 static int cfi_intelext_write_words(struct mtd_info *, loff_t, size_t, size_t *, const u_char *);
57 static int cfi_intelext_write_buffers(struct mtd_info *, loff_t, size_t, size_t *, const u_char *);
58 static int cfi_intelext_erase_varsize(struct mtd_info *, struct erase_info *);
59 static void cfi_intelext_sync (struct mtd_info *);
60 static int cfi_intelext_lock(struct mtd_info *mtd, loff_t ofs, size_t len);
61 static int cfi_intelext_unlock(struct mtd_info *mtd, loff_t ofs, size_t len);
62 static int cfi_intelext_suspend (struct mtd_info *);
63 static void cfi_intelext_resume (struct mtd_info *);
64
65 static void cfi_intelext_destroy(struct mtd_info *);
66
67 struct mtd_info *cfi_cmdset_0001(struct map_info *, int);
68
69 static struct mtd_info *cfi_intelext_setup (struct mtd_info *);
70 static int cfi_intelext_partition_fixup(struct mtd_info *, struct cfi_private **);
71
72 static int cfi_intelext_point (struct mtd_info *mtd, loff_t from, size_t len,
73                      size_t *retlen, u_char **mtdbuf);
74 static void cfi_intelext_unpoint (struct mtd_info *mtd, u_char *addr, loff_t from,
75                         size_t len);
76
77 static int get_chip(struct map_info *map, struct flchip *chip, unsigned long adr, int mode);
78 static void put_chip(struct map_info *map, struct flchip *chip, unsigned long adr);
79 #include "fwh_lock.h"
80
81
82
83 /*
84  *  *********** SETUP AND PROBE BITS  ***********
85  */
86
87 static struct mtd_chip_driver cfi_intelext_chipdrv = {
88         .probe          = NULL, /* Not usable directly */
89         .destroy        = cfi_intelext_destroy,
90         .name           = "cfi_cmdset_0001",
91         .module         = THIS_MODULE
92 };
93
94 /* #define DEBUG_LOCK_BITS */
95 /* #define DEBUG_CFI_FEATURES */
96
97 #ifdef DEBUG_CFI_FEATURES
98 static void cfi_tell_features(struct cfi_pri_intelext *extp)
99 {
100         int i;
101         printk("  Feature/Command Support:      %4.4X\n", extp->FeatureSupport);
102         printk("     - Chip Erase:              %s\n", extp->FeatureSupport&1?"supported":"unsupported");
103         printk("     - Suspend Erase:           %s\n", extp->FeatureSupport&2?"supported":"unsupported");
104         printk("     - Suspend Program:         %s\n", extp->FeatureSupport&4?"supported":"unsupported");
105         printk("     - Legacy Lock/Unlock:      %s\n", extp->FeatureSupport&8?"supported":"unsupported");
106         printk("     - Queued Erase:            %s\n", extp->FeatureSupport&16?"supported":"unsupported");
107         printk("     - Instant block lock:      %s\n", extp->FeatureSupport&32?"supported":"unsupported");
108         printk("     - Protection Bits:         %s\n", extp->FeatureSupport&64?"supported":"unsupported");
109         printk("     - Page-mode read:          %s\n", extp->FeatureSupport&128?"supported":"unsupported");
110         printk("     - Synchronous read:        %s\n", extp->FeatureSupport&256?"supported":"unsupported");
111         printk("     - Simultaneous operations: %s\n", extp->FeatureSupport&512?"supported":"unsupported");
112         for (i=10; i<32; i++) {
113                 if (extp->FeatureSupport & (1<<i)) 
114                         printk("     - Unknown Bit %X:      supported\n", i);
115         }
116         
117         printk("  Supported functions after Suspend: %2.2X\n", extp->SuspendCmdSupport);
118         printk("     - Program after Erase Suspend: %s\n", extp->SuspendCmdSupport&1?"supported":"unsupported");
119         for (i=1; i<8; i++) {
120                 if (extp->SuspendCmdSupport & (1<<i))
121                         printk("     - Unknown Bit %X:               supported\n", i);
122         }
123         
124         printk("  Block Status Register Mask: %4.4X\n", extp->BlkStatusRegMask);
125         printk("     - Lock Bit Active:      %s\n", extp->BlkStatusRegMask&1?"yes":"no");
126         printk("     - Valid Bit Active:     %s\n", extp->BlkStatusRegMask&2?"yes":"no");
127         for (i=2; i<16; i++) {
128                 if (extp->BlkStatusRegMask & (1<<i))
129                         printk("     - Unknown Bit %X Active: yes\n",i);
130         }
131         
132         printk("  Vcc Logic Supply Optimum Program/Erase Voltage: %d.%d V\n", 
133                extp->VccOptimal >> 4, extp->VccOptimal & 0xf);
134         if (extp->VppOptimal)
135                 printk("  Vpp Programming Supply Optimum Program/Erase Voltage: %d.%d V\n", 
136                        extp->VppOptimal >> 4, extp->VppOptimal & 0xf);
137 }
138 #endif
139
140 #ifdef CMDSET0001_DISABLE_ERASE_SUSPEND_ON_WRITE
141 /* Some Intel Strata Flash prior to FPO revision C has bugs in this area */ 
142 static void fixup_intel_strataflash(struct mtd_info *mtd, void* param)
143 {
144         struct map_info *map = mtd->priv;
145         struct cfi_private *cfi = map->fldrv_priv;
146         struct cfi_pri_amdstd *extp = cfi->cmdset_priv;
147
148         printk(KERN_WARNING "cfi_cmdset_0001: Suspend "
149                             "erase on write disabled.\n");
150         extp->SuspendCmdSupport &= ~1;
151 }
152 #endif
153
154 #ifdef CMDSET0001_DISABLE_WRITE_SUSPEND
155 /* The XIP config appears to have problems using write suspend at the moment */ 
156 static void fixup_no_write_suspend(struct mtd_info *mtd, void* param)
157 {
158         struct map_info *map = mtd->priv;
159         struct cfi_private *cfi = map->fldrv_priv;
160         struct cfi_pri_intelext *cfip = cfi->cmdset_priv;
161
162         if (cfip && (cfip->FeatureSupport&4)) {
163                 cfip->FeatureSupport &= ~4;
164                 printk(KERN_WARNING "cfi_cmdset_0001: write suspend disabled\n");
165         }
166 }
167 #endif
168
169 static void fixup_st_m28w320ct(struct mtd_info *mtd, void* param)
170 {
171         struct map_info *map = mtd->priv;
172         struct cfi_private *cfi = map->fldrv_priv;
173         
174         cfi->cfiq->BufWriteTimeoutTyp = 0;      /* Not supported */
175         cfi->cfiq->BufWriteTimeoutMax = 0;      /* Not supported */
176 }
177
178 static void fixup_st_m28w320cb(struct mtd_info *mtd, void* param)
179 {
180         struct map_info *map = mtd->priv;
181         struct cfi_private *cfi = map->fldrv_priv;
182         
183         /* Note this is done after the region info is endian swapped */
184         cfi->cfiq->EraseRegionInfo[1] =
185                 (cfi->cfiq->EraseRegionInfo[1] & 0xffff0000) | 0x3e;
186 };
187
188 static void fixup_use_point(struct mtd_info *mtd, void *param)
189 {
190         struct map_info *map = mtd->priv;
191         if (!mtd->point && map_is_linear(map)) {
192                 mtd->point   = cfi_intelext_point;
193                 mtd->unpoint = cfi_intelext_unpoint;
194         }
195 }
196
197 static void fixup_use_write_buffers(struct mtd_info *mtd, void *param)
198 {
199         struct map_info *map = mtd->priv;
200         struct cfi_private *cfi = map->fldrv_priv;
201         if (cfi->cfiq->BufWriteTimeoutTyp) {
202                 printk(KERN_INFO "Using buffer write method\n" );
203                 mtd->write = cfi_intelext_write_buffers;
204         }
205 }
206
207 static struct cfi_fixup cfi_fixup_table[] = {
208 #ifdef CMDSET0001_DISABLE_ERASE_SUSPEND_ON_WRITE
209         { CFI_MFR_ANY, CFI_ID_ANY, fixup_intel_strataflash, NULL }, 
210 #endif
211 #ifdef CMDSET0001_DISABLE_WRITE_SUSPEND
212         { CFI_MFR_ANY, CFI_ID_ANY, fixup_no_write_suspend, NULL },
213 #endif
214 #if !FORCE_WORD_WRITE
215         { CFI_MFR_ANY, CFI_ID_ANY, fixup_use_write_buffers, NULL },
216 #endif
217         { CFI_MFR_ST, 0x00ba, /* M28W320CT */ fixup_st_m28w320ct, NULL },
218         { CFI_MFR_ST, 0x00bb, /* M28W320CB */ fixup_st_m28w320cb, NULL },
219         { 0, 0, NULL, NULL }
220 };
221
222 static struct cfi_fixup jedec_fixup_table[] = {
223         { MANUFACTURER_INTEL, I82802AB,   fixup_use_fwh_lock, NULL, },
224         { MANUFACTURER_INTEL, I82802AC,   fixup_use_fwh_lock, NULL, },
225         { MANUFACTURER_ST,    M50LPW080,  fixup_use_fwh_lock, NULL, },
226         { 0, 0, NULL, NULL }
227 };
228 static struct cfi_fixup fixup_table[] = {
229         /* The CFI vendor ids and the JEDEC vendor IDs appear
230          * to be common.  It is like the devices id's are as
231          * well.  This table is to pick all cases where
232          * we know that is the case.
233          */
234         { CFI_MFR_ANY, CFI_ID_ANY, fixup_use_point, NULL },
235         { 0, 0, NULL, NULL }
236 };
237
238 static inline struct cfi_pri_intelext *
239 read_pri_intelext(struct map_info *map, __u16 adr)
240 {
241         struct cfi_pri_intelext *extp;
242         unsigned int extp_size = sizeof(*extp);
243
244  again:
245         extp = (struct cfi_pri_intelext *)cfi_read_pri(map, adr, extp_size, "Intel/Sharp");
246         if (!extp)
247                 return NULL;
248
249         /* Do some byteswapping if necessary */
250         extp->FeatureSupport = le32_to_cpu(extp->FeatureSupport);
251         extp->BlkStatusRegMask = le16_to_cpu(extp->BlkStatusRegMask);
252         extp->ProtRegAddr = le16_to_cpu(extp->ProtRegAddr);
253
254         if (extp->MajorVersion == '1' && extp->MinorVersion == '3') {
255                 unsigned int extra_size = 0;
256                 int nb_parts, i;
257
258                 /* Protection Register info */
259                 extra_size += (extp->NumProtectionFields - 1) * (4 + 6);
260
261                 /* Burst Read info */
262                 extra_size += 6;
263
264                 /* Number of hardware-partitions */
265                 extra_size += 1;
266                 if (extp_size < sizeof(*extp) + extra_size)
267                         goto need_more;
268                 nb_parts = extp->extra[extra_size - 1];
269
270                 for (i = 0; i < nb_parts; i++) {
271                         struct cfi_intelext_regioninfo *rinfo;
272                         rinfo = (struct cfi_intelext_regioninfo *)&extp->extra[extra_size];
273                         extra_size += sizeof(*rinfo);
274                         if (extp_size < sizeof(*extp) + extra_size)
275                                 goto need_more;
276                         rinfo->NumIdentPartitions=le16_to_cpu(rinfo->NumIdentPartitions);
277                         extra_size += (rinfo->NumBlockTypes - 1)
278                                       * sizeof(struct cfi_intelext_blockinfo);
279                 }
280
281                 if (extp_size < sizeof(*extp) + extra_size) {
282                         need_more:
283                         extp_size = sizeof(*extp) + extra_size;
284                         kfree(extp);
285                         if (extp_size > 4096) {
286                                 printk(KERN_ERR
287                                         "%s: cfi_pri_intelext is too fat\n",
288                                         __FUNCTION__);
289                                 return NULL;
290                         }
291                         goto again;
292                 }
293         }
294                 
295         return extp;
296 }
297
298 /* This routine is made available to other mtd code via
299  * inter_module_register.  It must only be accessed through
300  * inter_module_get which will bump the use count of this module.  The
301  * addresses passed back in cfi are valid as long as the use count of
302  * this module is non-zero, i.e. between inter_module_get and
303  * inter_module_put.  Keith Owens <kaos@ocs.com.au> 29 Oct 2000.
304  */
305 struct mtd_info *cfi_cmdset_0001(struct map_info *map, int primary)
306 {
307         struct cfi_private *cfi = map->fldrv_priv;
308         struct mtd_info *mtd;
309         int i;
310
311         mtd = kmalloc(sizeof(*mtd), GFP_KERNEL);
312         if (!mtd) {
313                 printk(KERN_ERR "Failed to allocate memory for MTD device\n");
314                 return NULL;
315         }
316         memset(mtd, 0, sizeof(*mtd));
317         mtd->priv = map;
318         mtd->type = MTD_NORFLASH;
319
320         /* Fill in the default mtd operations */
321         mtd->erase   = cfi_intelext_erase_varsize;
322         mtd->read    = cfi_intelext_read;
323         mtd->write   = cfi_intelext_write_words;
324         mtd->sync    = cfi_intelext_sync;
325         mtd->lock    = cfi_intelext_lock;
326         mtd->unlock  = cfi_intelext_unlock;
327         mtd->suspend = cfi_intelext_suspend;
328         mtd->resume  = cfi_intelext_resume;
329         mtd->flags   = MTD_CAP_NORFLASH;
330         mtd->name    = map->name;
331         
332         if (cfi->cfi_mode == CFI_MODE_CFI) {
333                 /* 
334                  * It's a real CFI chip, not one for which the probe
335                  * routine faked a CFI structure. So we read the feature
336                  * table from it.
337                  */
338                 __u16 adr = primary?cfi->cfiq->P_ADR:cfi->cfiq->A_ADR;
339                 struct cfi_pri_intelext *extp;
340
341                 extp = read_pri_intelext(map, adr);
342                 if (!extp) {
343                         kfree(mtd);
344                         return NULL;
345                 }
346
347                 /* Install our own private info structure */
348                 cfi->cmdset_priv = extp;        
349
350                 cfi_fixup(mtd, cfi_fixup_table);
351
352 #ifdef DEBUG_CFI_FEATURES
353                 /* Tell the user about it in lots of lovely detail */
354                 cfi_tell_features(extp);
355 #endif  
356
357                 if(extp->SuspendCmdSupport & 1) {
358                         printk(KERN_NOTICE "cfi_cmdset_0001: Erase suspend on write enabled\n");
359                 }
360         }
361         else if (cfi->cfi_mode == CFI_MODE_JEDEC) {
362                 /* Apply jedec specific fixups */
363                 cfi_fixup(mtd, jedec_fixup_table);
364         }
365         /* Apply generic fixups */
366         cfi_fixup(mtd, fixup_table);
367
368         for (i=0; i< cfi->numchips; i++) {
369                 cfi->chips[i].word_write_time = 1<<cfi->cfiq->WordWriteTimeoutTyp;
370                 cfi->chips[i].buffer_write_time = 1<<cfi->cfiq->BufWriteTimeoutTyp;
371                 cfi->chips[i].erase_time = 1<<cfi->cfiq->BlockEraseTimeoutTyp;
372                 cfi->chips[i].ref_point_counter = 0;
373         }               
374
375         map->fldrv = &cfi_intelext_chipdrv;
376         
377         return cfi_intelext_setup(mtd);
378 }
379
380 static struct mtd_info *cfi_intelext_setup(struct mtd_info *mtd)
381 {
382         struct map_info *map = mtd->priv;
383         struct cfi_private *cfi = map->fldrv_priv;
384         unsigned long offset = 0;
385         int i,j;
386         unsigned long devsize = (1<<cfi->cfiq->DevSize) * cfi->interleave;
387
388         //printk(KERN_DEBUG "number of CFI chips: %d\n", cfi->numchips);
389
390         mtd->size = devsize * cfi->numchips;
391
392         mtd->numeraseregions = cfi->cfiq->NumEraseRegions * cfi->numchips;
393         mtd->eraseregions = kmalloc(sizeof(struct mtd_erase_region_info) 
394                         * mtd->numeraseregions, GFP_KERNEL);
395         if (!mtd->eraseregions) { 
396                 printk(KERN_ERR "Failed to allocate memory for MTD erase region info\n");
397                 goto setup_err;
398         }
399         
400         for (i=0; i<cfi->cfiq->NumEraseRegions; i++) {
401                 unsigned long ernum, ersize;
402                 ersize = ((cfi->cfiq->EraseRegionInfo[i] >> 8) & ~0xff) * cfi->interleave;
403                 ernum = (cfi->cfiq->EraseRegionInfo[i] & 0xffff) + 1;
404
405                 if (mtd->erasesize < ersize) {
406                         mtd->erasesize = ersize;
407                 }
408                 for (j=0; j<cfi->numchips; j++) {
409                         mtd->eraseregions[(j*cfi->cfiq->NumEraseRegions)+i].offset = (j*devsize)+offset;
410                         mtd->eraseregions[(j*cfi->cfiq->NumEraseRegions)+i].erasesize = ersize;
411                         mtd->eraseregions[(j*cfi->cfiq->NumEraseRegions)+i].numblocks = ernum;
412                 }
413                 offset += (ersize * ernum);
414         }
415
416         if (offset != devsize) {
417                 /* Argh */
418                 printk(KERN_WARNING "Sum of regions (%lx) != total size of set of interleaved chips (%lx)\n", offset, devsize);
419                 goto setup_err;
420         }
421
422         for (i=0; i<mtd->numeraseregions;i++){
423                 printk(KERN_DEBUG "%d: offset=0x%x,size=0x%x,blocks=%d\n",
424                        i,mtd->eraseregions[i].offset,
425                        mtd->eraseregions[i].erasesize,
426                        mtd->eraseregions[i].numblocks);
427         }
428
429 #if 0
430         mtd->read_user_prot_reg = cfi_intelext_read_user_prot_reg;
431         mtd->read_fact_prot_reg = cfi_intelext_read_fact_prot_reg;
432 #endif
433
434         /* This function has the potential to distort the reality
435            a bit and therefore should be called last. */
436         if (cfi_intelext_partition_fixup(mtd, &cfi) != 0)
437                 goto setup_err;
438
439         __module_get(THIS_MODULE);
440         return mtd;
441
442  setup_err:
443         if(mtd) {
444                 if(mtd->eraseregions)
445                         kfree(mtd->eraseregions);
446                 kfree(mtd);
447         }
448         kfree(cfi->cmdset_priv);
449         return NULL;
450 }
451
452 static int cfi_intelext_partition_fixup(struct mtd_info *mtd,
453                                         struct cfi_private **pcfi)
454 {
455         struct map_info *map = mtd->priv;
456         struct cfi_private *cfi = *pcfi;
457         struct cfi_pri_intelext *extp = cfi->cmdset_priv;
458
459         /*
460          * Probing of multi-partition flash ships.
461          *
462          * To support multiple partitions when available, we simply arrange
463          * for each of them to have their own flchip structure even if they
464          * are on the same physical chip.  This means completely recreating
465          * a new cfi_private structure right here which is a blatent code
466          * layering violation, but this is still the least intrusive
467          * arrangement at this point. This can be rearranged in the future
468          * if someone feels motivated enough.  --nico
469          */
470         if (extp && extp->MajorVersion == '1' && extp->MinorVersion == '3'
471             && extp->FeatureSupport & (1 << 9)) {
472                 struct cfi_private *newcfi;
473                 struct flchip *chip;
474                 struct flchip_shared *shared;
475                 int offs, numregions, numparts, partshift, numvirtchips, i, j;
476
477                 /* Protection Register info */
478                 offs = (extp->NumProtectionFields - 1) * (4 + 6);
479
480                 /* Burst Read info */
481                 offs += 6;
482
483                 /* Number of partition regions */
484                 numregions = extp->extra[offs];
485                 offs += 1;
486
487                 /* Number of hardware partitions */
488                 numparts = 0;
489                 for (i = 0; i < numregions; i++) {
490                         struct cfi_intelext_regioninfo *rinfo;
491                         rinfo = (struct cfi_intelext_regioninfo *)&extp->extra[offs];
492                         numparts += rinfo->NumIdentPartitions;
493                         offs += sizeof(*rinfo)
494                                 + (rinfo->NumBlockTypes - 1) *
495                                   sizeof(struct cfi_intelext_blockinfo);
496                 }
497
498                 /*
499                  * All functions below currently rely on all chips having
500                  * the same geometry so we'll just assume that all hardware
501                  * partitions are of the same size too.
502                  */
503                 partshift = cfi->chipshift - __ffs(numparts);
504
505                 if ((1 << partshift) < mtd->erasesize) {
506                         printk( KERN_ERR
507                                 "%s: bad number of hw partitions (%d)\n",
508                                 __FUNCTION__, numparts);
509                         return -EINVAL;
510                 }
511
512                 numvirtchips = cfi->numchips * numparts;
513                 newcfi = kmalloc(sizeof(struct cfi_private) + numvirtchips * sizeof(struct flchip), GFP_KERNEL);
514                 if (!newcfi)
515                         return -ENOMEM;
516                 shared = kmalloc(sizeof(struct flchip_shared) * cfi->numchips, GFP_KERNEL);
517                 if (!shared) {
518                         kfree(newcfi);
519                         return -ENOMEM;
520                 }
521                 memcpy(newcfi, cfi, sizeof(struct cfi_private));
522                 newcfi->numchips = numvirtchips;
523                 newcfi->chipshift = partshift;
524
525                 chip = &newcfi->chips[0];
526                 for (i = 0; i < cfi->numchips; i++) {
527                         shared[i].writing = shared[i].erasing = NULL;
528                         spin_lock_init(&shared[i].lock);
529                         for (j = 0; j < numparts; j++) {
530                                 *chip = cfi->chips[i];
531                                 chip->start += j << partshift;
532                                 chip->priv = &shared[i];
533                                 /* those should be reset too since
534                                    they create memory references. */
535                                 init_waitqueue_head(&chip->wq);
536                                 spin_lock_init(&chip->_spinlock);
537                                 chip->mutex = &chip->_spinlock;
538                                 chip++;
539                         }
540                 }
541
542                 printk(KERN_DEBUG "%s: %d set(s) of %d interleaved chips "
543                                   "--> %d partitions of %d KiB\n",
544                                   map->name, cfi->numchips, cfi->interleave,
545                                   newcfi->numchips, 1<<(newcfi->chipshift-10));
546
547                 map->fldrv_priv = newcfi;
548                 *pcfi = newcfi;
549                 kfree(cfi);
550         }
551
552         return 0;
553 }
554
555 /*
556  *  *********** CHIP ACCESS FUNCTIONS ***********
557  */
558
559 static int get_chip(struct map_info *map, struct flchip *chip, unsigned long adr, int mode)
560 {
561         DECLARE_WAITQUEUE(wait, current);
562         struct cfi_private *cfi = map->fldrv_priv;
563         map_word status, status_OK = CMD(0x80), status_PWS = CMD(0x01);
564         unsigned long timeo;
565         struct cfi_pri_intelext *cfip = cfi->cmdset_priv;
566
567  resettime:
568         timeo = jiffies + HZ;
569  retry:
570         if (chip->priv && (mode == FL_WRITING || mode == FL_ERASING)) {
571                 /*
572                  * OK. We have possibility for contension on the write/erase
573                  * operations which are global to the real chip and not per
574                  * partition.  So let's fight it over in the partition which
575                  * currently has authority on the operation.
576                  *
577                  * The rules are as follows:
578                  *
579                  * - any write operation must own shared->writing.
580                  *
581                  * - any erase operation must own _both_ shared->writing and
582                  *   shared->erasing.
583                  *
584                  * - contension arbitration is handled in the owner's context.
585                  *
586                  * The 'shared' struct can be read when its lock is taken.
587                  * However any writes to it can only be made when the current
588                  * owner's lock is also held.
589                  */
590                 struct flchip_shared *shared = chip->priv;
591                 struct flchip *contender;
592                 spin_lock(&shared->lock);
593                 contender = shared->writing;
594                 if (contender && contender != chip) {
595                         /*
596                          * The engine to perform desired operation on this
597                          * partition is already in use by someone else.
598                          * Let's fight over it in the context of the chip
599                          * currently using it.  If it is possible to suspend,
600                          * that other partition will do just that, otherwise
601                          * it'll happily send us to sleep.  In any case, when
602                          * get_chip returns success we're clear to go ahead.
603                          */
604                         int ret = spin_trylock(contender->mutex);
605                         spin_unlock(&shared->lock);
606                         if (!ret)
607                                 goto retry;
608                         spin_unlock(chip->mutex);
609                         ret = get_chip(map, contender, contender->start, mode);
610                         spin_lock(chip->mutex);
611                         if (ret) {
612                                 spin_unlock(contender->mutex);
613                                 return ret;
614                         }
615                         timeo = jiffies + HZ;
616                         spin_lock(&shared->lock);
617                 }
618
619                 /* We now own it */
620                 shared->writing = chip;
621                 if (mode == FL_ERASING)
622                         shared->erasing = chip;
623                 if (contender && contender != chip)
624                         spin_unlock(contender->mutex);
625                 spin_unlock(&shared->lock);
626         }
627
628         switch (chip->state) {
629
630         case FL_STATUS:
631                 for (;;) {
632                         status = map_read(map, adr);
633                         if (map_word_andequal(map, status, status_OK, status_OK))
634                                 break;
635
636                         /* At this point we're fine with write operations
637                            in other partitions as they don't conflict. */
638                         if (chip->priv && map_word_andequal(map, status, status_PWS, status_PWS))
639                                 break;
640
641                         if (time_after(jiffies, timeo)) {
642                                 printk(KERN_ERR "Waiting for chip to be ready timed out. Status %lx\n", 
643                                        status.x[0]);
644                                 return -EIO;
645                         }
646                         spin_unlock(chip->mutex);
647                         cfi_udelay(1);
648                         spin_lock(chip->mutex);
649                         /* Someone else might have been playing with it. */
650                         goto retry;
651                 }
652                                 
653         case FL_READY:
654         case FL_CFI_QUERY:
655         case FL_JEDEC_QUERY:
656                 return 0;
657
658         case FL_ERASING:
659                 if (!cfip ||
660                     !(cfip->FeatureSupport & 2) ||
661                     !(mode == FL_READY || mode == FL_POINT ||
662                      (mode == FL_WRITING && (cfip->SuspendCmdSupport & 1))))
663                         goto sleep;
664
665
666                 /* Erase suspend */
667                 map_write(map, CMD(0xB0), adr);
668
669                 /* If the flash has finished erasing, then 'erase suspend'
670                  * appears to make some (28F320) flash devices switch to
671                  * 'read' mode.  Make sure that we switch to 'read status'
672                  * mode so we get the right data. --rmk
673                  */
674                 map_write(map, CMD(0x70), adr);
675                 chip->oldstate = FL_ERASING;
676                 chip->state = FL_ERASE_SUSPENDING;
677                 chip->erase_suspended = 1;
678                 for (;;) {
679                         status = map_read(map, adr);
680                         if (map_word_andequal(map, status, status_OK, status_OK))
681                                 break;
682
683                         if (time_after(jiffies, timeo)) {
684                                 /* Urgh. Resume and pretend we weren't here.  */
685                                 map_write(map, CMD(0xd0), adr);
686                                 /* Make sure we're in 'read status' mode if it had finished */
687                                 map_write(map, CMD(0x70), adr);
688                                 chip->state = FL_ERASING;
689                                 chip->oldstate = FL_READY;
690                                 printk(KERN_ERR "Chip not ready after erase "
691                                        "suspended: status = 0x%lx\n", status.x[0]);
692                                 return -EIO;
693                         }
694
695                         spin_unlock(chip->mutex);
696                         cfi_udelay(1);
697                         spin_lock(chip->mutex);
698                         /* Nobody will touch it while it's in state FL_ERASE_SUSPENDING.
699                            So we can just loop here. */
700                 }
701                 chip->state = FL_STATUS;
702                 return 0;
703
704         case FL_XIP_WHILE_ERASING:
705                 if (mode != FL_READY && mode != FL_POINT &&
706                     (mode != FL_WRITING || !cfip || !(cfip->SuspendCmdSupport&1)))
707                         goto sleep;
708                 chip->oldstate = chip->state;
709                 chip->state = FL_READY;
710                 return 0;
711
712         case FL_POINT:
713                 /* Only if there's no operation suspended... */
714                 if (mode == FL_READY && chip->oldstate == FL_READY)
715                         return 0;
716
717         default:
718         sleep:
719                 set_current_state(TASK_UNINTERRUPTIBLE);
720                 add_wait_queue(&chip->wq, &wait);
721                 spin_unlock(chip->mutex);
722                 schedule();
723                 remove_wait_queue(&chip->wq, &wait);
724                 spin_lock(chip->mutex);
725                 goto resettime;
726         }
727 }
728
729 static void put_chip(struct map_info *map, struct flchip *chip, unsigned long adr)
730 {
731         struct cfi_private *cfi = map->fldrv_priv;
732
733         if (chip->priv) {
734                 struct flchip_shared *shared = chip->priv;
735                 spin_lock(&shared->lock);
736                 if (shared->writing == chip) {
737                         /* We own the ability to write, but we're done */
738                         shared->writing = shared->erasing;
739                         if (shared->writing && shared->writing != chip) {
740                                 /* give back ownership to who we loaned it from */
741                                 struct flchip *loaner = shared->writing;
742                                 spin_lock(loaner->mutex);
743                                 spin_unlock(&shared->lock);
744                                 spin_unlock(chip->mutex);
745                                 put_chip(map, loaner, loaner->start);
746                                 spin_lock(chip->mutex);
747                                 spin_unlock(loaner->mutex);
748                         } else {
749                                 if (chip->oldstate != FL_ERASING) {
750                                         shared->erasing = NULL;
751                                         if (chip->oldstate != FL_WRITING)
752                                                 shared->writing = NULL;
753                                 }
754                                 spin_unlock(&shared->lock);
755                         }
756                 } else {
757                         spin_unlock(&shared->lock);
758                 }
759         }
760
761         switch(chip->oldstate) {
762         case FL_ERASING:
763                 chip->state = chip->oldstate;
764                 /* What if one interleaved chip has finished and the 
765                    other hasn't? The old code would leave the finished
766                    one in READY mode. That's bad, and caused -EROFS 
767                    errors to be returned from do_erase_oneblock because
768                    that's the only bit it checked for at the time.
769                    As the state machine appears to explicitly allow 
770                    sending the 0x70 (Read Status) command to an erasing
771                    chip and expecting it to be ignored, that's what we 
772                    do. */
773                 map_write(map, CMD(0xd0), adr);
774                 map_write(map, CMD(0x70), adr);
775                 chip->oldstate = FL_READY;
776                 chip->state = FL_ERASING;
777                 break;
778
779         case FL_XIP_WHILE_ERASING:
780                 chip->state = chip->oldstate;
781                 chip->oldstate = FL_READY;
782                 break;
783
784         case FL_READY:
785         case FL_STATUS:
786         case FL_JEDEC_QUERY:
787                 /* We should really make set_vpp() count, rather than doing this */
788                 DISABLE_VPP(map);
789                 break;
790         default:
791                 printk(KERN_ERR "put_chip() called with oldstate %d!!\n", chip->oldstate);
792         }
793         wake_up(&chip->wq);
794 }
795
796 #ifdef CONFIG_MTD_XIP
797
798 /*
799  * No interrupt what so ever can be serviced while the flash isn't in array
800  * mode.  This is ensured by the xip_disable() and xip_enable() functions
801  * enclosing any code path where the flash is known not to be in array mode.
802  * And within a XIP disabled code path, only functions marked with __xipram
803  * may be called and nothing else (it's a good thing to inspect generated
804  * assembly to make sure inline functions were actually inlined and that gcc
805  * didn't emit calls to its own support functions). Also configuring MTD CFI
806  * support to a single buswidth and a single interleave is also recommended.
807  * Note that not only IRQs are disabled but the preemption count is also
808  * increased to prevent other locking primitives (namely spin_unlock) from
809  * decrementing the preempt count to zero and scheduling the CPU away while
810  * not in array mode.
811  */
812
813 static void xip_disable(struct map_info *map, struct flchip *chip,
814                         unsigned long adr)
815 {
816         /* TODO: chips with no XIP use should ignore and return */
817         (void) map_read(map, adr); /* ensure mmu mapping is up to date */
818         preempt_disable();
819         local_irq_disable();
820 }
821
822 static void __xipram xip_enable(struct map_info *map, struct flchip *chip,
823                                 unsigned long adr)
824 {
825         struct cfi_private *cfi = map->fldrv_priv;
826         if (chip->state != FL_POINT && chip->state != FL_READY) {
827                 map_write(map, CMD(0xff), adr);
828                 chip->state = FL_READY;
829         }
830         (void) map_read(map, adr);
831         asm volatile (".rep 8; nop; .endr"); /* fill instruction prefetch */
832         local_irq_enable();
833         preempt_enable();
834 }
835
836 /*
837  * When a delay is required for the flash operation to complete, the
838  * xip_udelay() function is polling for both the given timeout and pending
839  * (but still masked) hardware interrupts.  Whenever there is an interrupt
840  * pending then the flash erase or write operation is suspended, array mode
841  * restored and interrupts unmasked.  Task scheduling might also happen at that
842  * point.  The CPU eventually returns from the interrupt or the call to
843  * schedule() and the suspended flash operation is resumed for the remaining
844  * of the delay period.
845  *
846  * Warning: this function _will_ fool interrupt latency tracing tools.
847  */
848
849 static void __xipram xip_udelay(struct map_info *map, struct flchip *chip,
850                                 unsigned long adr, int usec)
851 {
852         struct cfi_private *cfi = map->fldrv_priv;
853         struct cfi_pri_intelext *cfip = cfi->cmdset_priv;
854         map_word status, OK = CMD(0x80);
855         unsigned long suspended, start = xip_currtime();
856         flstate_t oldstate, newstate;
857
858         do {
859                 cpu_relax();
860                 if (xip_irqpending() && cfip &&
861                     ((chip->state == FL_ERASING && (cfip->FeatureSupport&2)) ||
862                      (chip->state == FL_WRITING && (cfip->FeatureSupport&4))) &&
863                     (cfi_interleave_is_1(cfi) || chip->oldstate == FL_READY)) {
864                         /*
865                          * Let's suspend the erase or write operation when
866                          * supported.  Note that we currently don't try to
867                          * suspend interleaved chips if there is already
868                          * another operation suspended (imagine what happens
869                          * when one chip was already done with the current
870                          * operation while another chip suspended it, then
871                          * we resume the whole thing at once).  Yes, it
872                          * can happen!
873                          */
874                         map_write(map, CMD(0xb0), adr);
875                         map_write(map, CMD(0x70), adr);
876                         usec -= xip_elapsed_since(start);
877                         suspended = xip_currtime();
878                         do {
879                                 if (xip_elapsed_since(suspended) > 100000) {
880                                         /*
881                                          * The chip doesn't want to suspend
882                                          * after waiting for 100 msecs.
883                                          * This is a critical error but there
884                                          * is not much we can do here.
885                                          */
886                                         return;
887                                 }
888                                 status = map_read(map, adr);
889                         } while (!map_word_andequal(map, status, OK, OK));
890
891                         /* Suspend succeeded */
892                         oldstate = chip->state;
893                         if (oldstate == FL_ERASING) {
894                                 if (!map_word_bitsset(map, status, CMD(0x40)))
895                                         break;
896                                 newstate = FL_XIP_WHILE_ERASING;
897                                 chip->erase_suspended = 1;
898                         } else {
899                                 if (!map_word_bitsset(map, status, CMD(0x04)))
900                                         break;
901                                 newstate = FL_XIP_WHILE_WRITING;
902                                 chip->write_suspended = 1;
903                         }
904                         chip->state = newstate;
905                         map_write(map, CMD(0xff), adr);
906                         (void) map_read(map, adr);
907                         asm volatile (".rep 8; nop; .endr");
908                         local_irq_enable();
909                         preempt_enable();
910                         asm volatile (".rep 8; nop; .endr");
911                         cond_resched();
912
913                         /*
914                          * We're back.  However someone else might have
915                          * decided to go write to the chip if we are in
916                          * a suspended erase state.  If so let's wait
917                          * until it's done.
918                          */
919                         preempt_disable();
920                         while (chip->state != newstate) {
921                                 DECLARE_WAITQUEUE(wait, current);
922                                 set_current_state(TASK_UNINTERRUPTIBLE);
923                                 add_wait_queue(&chip->wq, &wait);
924                                 preempt_enable();
925                                 schedule();
926                                 remove_wait_queue(&chip->wq, &wait);
927                                 preempt_disable();
928                         }
929                         /* Disallow XIP again */
930                         local_irq_disable();
931
932                         /* Resume the write or erase operation */
933                         map_write(map, CMD(0xd0), adr);
934                         map_write(map, CMD(0x70), adr);
935                         chip->state = oldstate;
936                         start = xip_currtime();
937                 } else if (usec >= 1000000/HZ) {
938                         /*
939                          * Try to save on CPU power when waiting delay
940                          * is at least a system timer tick period.
941                          * No need to be extremely accurate here.
942                          */
943                         xip_cpu_idle();
944                 }
945                 status = map_read(map, adr);
946         } while (!map_word_andequal(map, status, OK, OK)
947                  && xip_elapsed_since(start) < usec);
948 }
949
950 #define UDELAY(map, chip, adr, usec)  xip_udelay(map, chip, adr, usec)
951
952 /*
953  * The INVALIDATE_CACHED_RANGE() macro is normally used in parallel while
954  * the flash is actively programming or erasing since we have to poll for
955  * the operation to complete anyway.  We can't do that in a generic way with
956  * a XIP setup so do it before the actual flash operation in this case.
957  */
958 #undef INVALIDATE_CACHED_RANGE
959 #define INVALIDATE_CACHED_RANGE(x...)
960 #define XIP_INVAL_CACHED_RANGE(map, from, size) \
961         do { if(map->inval_cache) map->inval_cache(map, from, size); } while(0)
962
963 /*
964  * Extra notes:
965  *
966  * Activating this XIP support changes the way the code works a bit.  For
967  * example the code to suspend the current process when concurrent access
968  * happens is never executed because xip_udelay() will always return with the
969  * same chip state as it was entered with.  This is why there is no care for
970  * the presence of add_wait_queue() or schedule() calls from within a couple
971  * xip_disable()'d  areas of code, like in do_erase_oneblock for example.
972  * The queueing and scheduling are always happening within xip_udelay().
973  *
974  * Similarly, get_chip() and put_chip() just happen to always be executed
975  * with chip->state set to FL_READY (or FL_XIP_WHILE_*) where flash state
976  * is in array mode, therefore never executing many cases therein and not
977  * causing any problem with XIP.
978  */
979
980 #else
981
982 #define xip_disable(map, chip, adr)
983 #define xip_enable(map, chip, adr)
984
985 #define UDELAY(map, chip, adr, usec)  cfi_udelay(usec)
986
987 #define XIP_INVAL_CACHED_RANGE(x...)
988
989 #endif
990
991 static int do_point_onechip (struct map_info *map, struct flchip *chip, loff_t adr, size_t len)
992 {
993         unsigned long cmd_addr;
994         struct cfi_private *cfi = map->fldrv_priv;
995         int ret = 0;
996
997         adr += chip->start;
998
999         /* Ensure cmd read/writes are aligned. */ 
1000         cmd_addr = adr & ~(map_bankwidth(map)-1); 
1001
1002         spin_lock(chip->mutex);
1003
1004         ret = get_chip(map, chip, cmd_addr, FL_POINT);
1005
1006         if (!ret) {
1007                 if (chip->state != FL_POINT && chip->state != FL_READY)
1008                         map_write(map, CMD(0xff), cmd_addr);
1009
1010                 chip->state = FL_POINT;
1011                 chip->ref_point_counter++;
1012         }
1013         spin_unlock(chip->mutex);
1014
1015         return ret;
1016 }
1017
1018 static int cfi_intelext_point (struct mtd_info *mtd, loff_t from, size_t len, size_t *retlen, u_char **mtdbuf)
1019 {
1020         struct map_info *map = mtd->priv;
1021         struct cfi_private *cfi = map->fldrv_priv;
1022         unsigned long ofs;
1023         int chipnum;
1024         int ret = 0;
1025
1026         if (!map->virt || (from + len > mtd->size))
1027                 return -EINVAL;
1028         
1029         *mtdbuf = (void *)map->virt + from;
1030         *retlen = 0;
1031
1032         /* Now lock the chip(s) to POINT state */
1033
1034         /* ofs: offset within the first chip that the first read should start */
1035         chipnum = (from >> cfi->chipshift);
1036         ofs = from - (chipnum << cfi->chipshift);
1037
1038         while (len) {
1039                 unsigned long thislen;
1040
1041                 if (chipnum >= cfi->numchips)
1042                         break;
1043
1044                 if ((len + ofs -1) >> cfi->chipshift)
1045                         thislen = (1<<cfi->chipshift) - ofs;
1046                 else
1047                         thislen = len;
1048
1049                 ret = do_point_onechip(map, &cfi->chips[chipnum], ofs, thislen);
1050                 if (ret)
1051                         break;
1052
1053                 *retlen += thislen;
1054                 len -= thislen;
1055                 
1056                 ofs = 0;
1057                 chipnum++;
1058         }
1059         return 0;
1060 }
1061
1062 static void cfi_intelext_unpoint (struct mtd_info *mtd, u_char *addr, loff_t from, size_t len)
1063 {
1064         struct map_info *map = mtd->priv;
1065         struct cfi_private *cfi = map->fldrv_priv;
1066         unsigned long ofs;
1067         int chipnum;
1068
1069         /* Now unlock the chip(s) POINT state */
1070
1071         /* ofs: offset within the first chip that the first read should start */
1072         chipnum = (from >> cfi->chipshift);
1073         ofs = from - (chipnum <<  cfi->chipshift);
1074
1075         while (len) {
1076                 unsigned long thislen;
1077                 struct flchip *chip;
1078
1079                 chip = &cfi->chips[chipnum];
1080                 if (chipnum >= cfi->numchips)
1081                         break;
1082
1083                 if ((len + ofs -1) >> cfi->chipshift)
1084                         thislen = (1<<cfi->chipshift) - ofs;
1085                 else
1086                         thislen = len;
1087
1088                 spin_lock(chip->mutex);
1089                 if (chip->state == FL_POINT) {
1090                         chip->ref_point_counter--;
1091                         if(chip->ref_point_counter == 0)
1092                                 chip->state = FL_READY;
1093                 } else
1094                         printk(KERN_ERR "Warning: unpoint called on non pointed region\n"); /* Should this give an error? */
1095
1096                 put_chip(map, chip, chip->start);
1097                 spin_unlock(chip->mutex);
1098
1099                 len -= thislen;
1100                 ofs = 0;
1101                 chipnum++;
1102         }
1103 }
1104
1105 static inline int do_read_onechip(struct map_info *map, struct flchip *chip, loff_t adr, size_t len, u_char *buf)
1106 {
1107         unsigned long cmd_addr;
1108         struct cfi_private *cfi = map->fldrv_priv;
1109         int ret;
1110
1111         adr += chip->start;
1112
1113         /* Ensure cmd read/writes are aligned. */ 
1114         cmd_addr = adr & ~(map_bankwidth(map)-1); 
1115
1116         spin_lock(chip->mutex);
1117         ret = get_chip(map, chip, cmd_addr, FL_READY);
1118         if (ret) {
1119                 spin_unlock(chip->mutex);
1120                 return ret;
1121         }
1122
1123         if (chip->state != FL_POINT && chip->state != FL_READY) {
1124                 map_write(map, CMD(0xff), cmd_addr);
1125
1126                 chip->state = FL_READY;
1127         }
1128
1129         map_copy_from(map, buf, adr, len);
1130
1131         put_chip(map, chip, cmd_addr);
1132
1133         spin_unlock(chip->mutex);
1134         return 0;
1135 }
1136
1137 static int cfi_intelext_read (struct mtd_info *mtd, loff_t from, size_t len, size_t *retlen, u_char *buf)
1138 {
1139         struct map_info *map = mtd->priv;
1140         struct cfi_private *cfi = map->fldrv_priv;
1141         unsigned long ofs;
1142         int chipnum;
1143         int ret = 0;
1144
1145         /* ofs: offset within the first chip that the first read should start */
1146         chipnum = (from >> cfi->chipshift);
1147         ofs = from - (chipnum <<  cfi->chipshift);
1148
1149         *retlen = 0;
1150
1151         while (len) {
1152                 unsigned long thislen;
1153
1154                 if (chipnum >= cfi->numchips)
1155                         break;
1156
1157                 if ((len + ofs -1) >> cfi->chipshift)
1158                         thislen = (1<<cfi->chipshift) - ofs;
1159                 else
1160                         thislen = len;
1161
1162                 ret = do_read_onechip(map, &cfi->chips[chipnum], ofs, thislen, buf);
1163                 if (ret)
1164                         break;
1165
1166                 *retlen += thislen;
1167                 len -= thislen;
1168                 buf += thislen;
1169                 
1170                 ofs = 0;
1171                 chipnum++;
1172         }
1173         return ret;
1174 }
1175
1176 #if 0
1177 static int __xipram cfi_intelext_read_prot_reg (struct mtd_info *mtd,
1178                                                 loff_t from, size_t len,
1179                                                 size_t *retlen,
1180                                                 u_char *buf,
1181                                                 int base_offst, int reg_sz)
1182 {
1183         struct map_info *map = mtd->priv;
1184         struct cfi_private *cfi = map->fldrv_priv;
1185         struct cfi_pri_intelext *extp = cfi->cmdset_priv;
1186         struct flchip *chip;
1187         int ofs_factor = cfi->interleave * cfi->device_type;
1188         int count = len;
1189         int chip_num, offst;
1190         int ret;
1191
1192         chip_num = ((unsigned int)from/reg_sz);
1193         offst = from - (reg_sz*chip_num)+base_offst;
1194
1195         while (count) {
1196         /* Calculate which chip & protection register offset we need */
1197
1198                 if (chip_num >= cfi->numchips)
1199                         goto out;
1200
1201                 chip = &cfi->chips[chip_num];
1202                 
1203                 spin_lock(chip->mutex);
1204                 ret = get_chip(map, chip, chip->start, FL_JEDEC_QUERY);
1205                 if (ret) {
1206                         spin_unlock(chip->mutex);
1207                         return (len-count)?:ret;
1208                 }
1209
1210                 xip_disable(map, chip, chip->start);
1211
1212                 if (chip->state != FL_JEDEC_QUERY) {
1213                         map_write(map, CMD(0x90), chip->start);
1214                         chip->state = FL_JEDEC_QUERY;
1215                 }
1216
1217                 while (count && ((offst-base_offst) < reg_sz)) {
1218                         *buf = map_read8(map,(chip->start+((extp->ProtRegAddr+1)*ofs_factor)+offst));
1219                         buf++;
1220                         offst++;
1221                         count--;
1222                 }
1223
1224                 xip_enable(map, chip, chip->start);
1225                 put_chip(map, chip, chip->start);
1226                 spin_unlock(chip->mutex);
1227
1228                 /* Move on to the next chip */
1229                 chip_num++;
1230                 offst = base_offst;
1231         }
1232         
1233  out:   
1234         return len-count;
1235 }
1236         
1237 static int cfi_intelext_read_user_prot_reg (struct mtd_info *mtd, loff_t from, size_t len, size_t *retlen, u_char *buf)
1238 {
1239         struct map_info *map = mtd->priv;
1240         struct cfi_private *cfi = map->fldrv_priv;
1241         struct cfi_pri_intelext *extp=cfi->cmdset_priv;
1242         int base_offst,reg_sz;
1243         
1244         /* Check that we actually have some protection registers */
1245         if(!extp || !(extp->FeatureSupport&64)){
1246                 printk(KERN_WARNING "%s: This flash device has no protection data to read!\n",map->name);
1247                 return 0;
1248         }
1249
1250         base_offst=(1<<extp->FactProtRegSize);
1251         reg_sz=(1<<extp->UserProtRegSize);
1252
1253         return cfi_intelext_read_prot_reg(mtd, from, len, retlen, buf, base_offst, reg_sz);
1254 }
1255
1256 static int cfi_intelext_read_fact_prot_reg (struct mtd_info *mtd, loff_t from, size_t len, size_t *retlen, u_char *buf)
1257 {
1258         struct map_info *map = mtd->priv;
1259         struct cfi_private *cfi = map->fldrv_priv;
1260         struct cfi_pri_intelext *extp=cfi->cmdset_priv;
1261         int base_offst,reg_sz;
1262         
1263         /* Check that we actually have some protection registers */
1264         if(!extp || !(extp->FeatureSupport&64)){
1265                 printk(KERN_WARNING "%s: This flash device has no protection data to read!\n",map->name);
1266                 return 0;
1267         }
1268
1269         base_offst=0;
1270         reg_sz=(1<<extp->FactProtRegSize);
1271
1272         return cfi_intelext_read_prot_reg(mtd, from, len, retlen, buf, base_offst, reg_sz);
1273 }
1274 #endif
1275
1276 static int __xipram do_write_oneword(struct map_info *map, struct flchip *chip,
1277                                      unsigned long adr, map_word datum)
1278 {
1279         struct cfi_private *cfi = map->fldrv_priv;
1280         map_word status, status_OK;
1281         unsigned long timeo;
1282         int z, ret=0;
1283
1284         adr += chip->start;
1285
1286         /* Let's determine this according to the interleave only once */
1287         status_OK = CMD(0x80);
1288
1289         spin_lock(chip->mutex);
1290         ret = get_chip(map, chip, adr, FL_WRITING);
1291         if (ret) {
1292                 spin_unlock(chip->mutex);
1293                 return ret;
1294         }
1295
1296         XIP_INVAL_CACHED_RANGE(map, adr, map_bankwidth(map));
1297         ENABLE_VPP(map);
1298         xip_disable(map, chip, adr);
1299         map_write(map, CMD(0x40), adr);
1300         map_write(map, datum, adr);
1301         chip->state = FL_WRITING;
1302
1303         spin_unlock(chip->mutex);
1304         INVALIDATE_CACHED_RANGE(map, adr, map_bankwidth(map));
1305         UDELAY(map, chip, adr, chip->word_write_time);
1306         spin_lock(chip->mutex);
1307
1308         timeo = jiffies + (HZ/2);
1309         z = 0;
1310         for (;;) {
1311                 if (chip->state != FL_WRITING) {
1312                         /* Someone's suspended the write. Sleep */
1313                         DECLARE_WAITQUEUE(wait, current);
1314
1315                         set_current_state(TASK_UNINTERRUPTIBLE);
1316                         add_wait_queue(&chip->wq, &wait);
1317                         spin_unlock(chip->mutex);
1318                         schedule();
1319                         remove_wait_queue(&chip->wq, &wait);
1320                         timeo = jiffies + (HZ / 2); /* FIXME */
1321                         spin_lock(chip->mutex);
1322                         continue;
1323                 }
1324
1325                 status = map_read(map, adr);
1326                 if (map_word_andequal(map, status, status_OK, status_OK))
1327                         break;
1328                 
1329                 /* OK Still waiting */
1330                 if (time_after(jiffies, timeo)) {
1331                         chip->state = FL_STATUS;
1332                         xip_enable(map, chip, adr);
1333                         printk(KERN_ERR "waiting for chip to be ready timed out in word write\n");
1334                         ret = -EIO;
1335                         goto out;
1336                 }
1337
1338                 /* Latency issues. Drop the lock, wait a while and retry */
1339                 spin_unlock(chip->mutex);
1340                 z++;
1341                 UDELAY(map, chip, adr, 1);
1342                 spin_lock(chip->mutex);
1343         }
1344         if (!z) {
1345                 chip->word_write_time--;
1346                 if (!chip->word_write_time)
1347                         chip->word_write_time++;
1348         }
1349         if (z > 1) 
1350                 chip->word_write_time++;
1351
1352         /* Done and happy. */
1353         chip->state = FL_STATUS;
1354
1355         /* check for lock bit */
1356         if (map_word_bitsset(map, status, CMD(0x02))) {
1357                 /* clear status */
1358                 map_write(map, CMD(0x50), adr);
1359                 /* put back into read status register mode */
1360                 map_write(map, CMD(0x70), adr);
1361                 ret = -EROFS;
1362         }
1363
1364         xip_enable(map, chip, adr);
1365  out:   put_chip(map, chip, adr);
1366         spin_unlock(chip->mutex);
1367
1368         return ret;
1369 }
1370
1371
1372 static int cfi_intelext_write_words (struct mtd_info *mtd, loff_t to , size_t len, size_t *retlen, const u_char *buf)
1373 {
1374         struct map_info *map = mtd->priv;
1375         struct cfi_private *cfi = map->fldrv_priv;
1376         int ret = 0;
1377         int chipnum;
1378         unsigned long ofs;
1379
1380         *retlen = 0;
1381         if (!len)
1382                 return 0;
1383
1384         chipnum = to >> cfi->chipshift;
1385         ofs = to  - (chipnum << cfi->chipshift);
1386
1387         /* If it's not bus-aligned, do the first byte write */
1388         if (ofs & (map_bankwidth(map)-1)) {
1389                 unsigned long bus_ofs = ofs & ~(map_bankwidth(map)-1);
1390                 int gap = ofs - bus_ofs;
1391                 int n;
1392                 map_word datum;
1393
1394                 n = min_t(int, len, map_bankwidth(map)-gap);
1395                 datum = map_word_ff(map);
1396                 datum = map_word_load_partial(map, datum, buf, gap, n);
1397
1398                 ret = do_write_oneword(map, &cfi->chips[chipnum],
1399                                                bus_ofs, datum);
1400                 if (ret) 
1401                         return ret;
1402
1403                 len -= n;
1404                 ofs += n;
1405                 buf += n;
1406                 (*retlen) += n;
1407
1408                 if (ofs >> cfi->chipshift) {
1409                         chipnum ++; 
1410                         ofs = 0;
1411                         if (chipnum == cfi->numchips)
1412                                 return 0;
1413                 }
1414         }
1415         
1416         while(len >= map_bankwidth(map)) {
1417                 map_word datum = map_word_load(map, buf);
1418
1419                 ret = do_write_oneword(map, &cfi->chips[chipnum],
1420                                 ofs, datum);
1421                 if (ret)
1422                         return ret;
1423
1424                 ofs += map_bankwidth(map);
1425                 buf += map_bankwidth(map);
1426                 (*retlen) += map_bankwidth(map);
1427                 len -= map_bankwidth(map);
1428
1429                 if (ofs >> cfi->chipshift) {
1430                         chipnum ++; 
1431                         ofs = 0;
1432                         if (chipnum == cfi->numchips)
1433                                 return 0;
1434                 }
1435         }
1436
1437         if (len & (map_bankwidth(map)-1)) {
1438                 map_word datum;
1439
1440                 datum = map_word_ff(map);
1441                 datum = map_word_load_partial(map, datum, buf, 0, len);
1442
1443                 ret = do_write_oneword(map, &cfi->chips[chipnum],
1444                                                ofs, datum);
1445                 if (ret) 
1446                         return ret;
1447                 
1448                 (*retlen) += len;
1449         }
1450
1451         return 0;
1452 }
1453
1454
1455 static int __xipram do_write_buffer(struct map_info *map, struct flchip *chip, 
1456                                     unsigned long adr, const u_char *buf, int len)
1457 {
1458         struct cfi_private *cfi = map->fldrv_priv;
1459         map_word status, status_OK;
1460         unsigned long cmd_adr, timeo;
1461         int wbufsize, z, ret=0, bytes, words;
1462
1463         wbufsize = cfi_interleave(cfi) << cfi->cfiq->MaxBufWriteSize;
1464         adr += chip->start;
1465         cmd_adr = adr & ~(wbufsize-1);
1466         
1467         /* Let's determine this according to the interleave only once */
1468         status_OK = CMD(0x80);
1469
1470         spin_lock(chip->mutex);
1471         ret = get_chip(map, chip, cmd_adr, FL_WRITING);
1472         if (ret) {
1473                 spin_unlock(chip->mutex);
1474                 return ret;
1475         }
1476
1477         XIP_INVAL_CACHED_RANGE(map, adr, len);
1478         ENABLE_VPP(map);
1479         xip_disable(map, chip, cmd_adr);
1480
1481         /* Â§4.8 of the 28FxxxJ3A datasheet says "Any time SR.4 and/or SR.5 is set
1482            [...], the device will not accept any more Write to Buffer commands". 
1483            So we must check here and reset those bits if they're set. Otherwise
1484            we're just pissing in the wind */
1485         if (chip->state != FL_STATUS)
1486                 map_write(map, CMD(0x70), cmd_adr);
1487         status = map_read(map, cmd_adr);
1488         if (map_word_bitsset(map, status, CMD(0x30))) {
1489                 xip_enable(map, chip, cmd_adr);
1490                 printk(KERN_WARNING "SR.4 or SR.5 bits set in buffer write (status %lx). Clearing.\n", status.x[0]);
1491                 xip_disable(map, chip, cmd_adr);
1492                 map_write(map, CMD(0x50), cmd_adr);
1493                 map_write(map, CMD(0x70), cmd_adr);
1494         }
1495
1496         chip->state = FL_WRITING_TO_BUFFER;
1497
1498         z = 0;
1499         for (;;) {
1500                 map_write(map, CMD(0xe8), cmd_adr);
1501
1502                 status = map_read(map, cmd_adr);
1503                 if (map_word_andequal(map, status, status_OK, status_OK))
1504                         break;
1505
1506                 spin_unlock(chip->mutex);
1507                 UDELAY(map, chip, cmd_adr, 1);
1508                 spin_lock(chip->mutex);
1509
1510                 if (++z > 20) {
1511                         /* Argh. Not ready for write to buffer */
1512                         map_word Xstatus;
1513                         map_write(map, CMD(0x70), cmd_adr);
1514                         chip->state = FL_STATUS;
1515                         Xstatus = map_read(map, cmd_adr);
1516                         /* Odd. Clear status bits */
1517                         map_write(map, CMD(0x50), cmd_adr);
1518                         map_write(map, CMD(0x70), cmd_adr);
1519                         xip_enable(map, chip, cmd_adr);
1520                         printk(KERN_ERR "Chip not ready for buffer write. status = %lx, Xstatus = %lx\n",
1521                                status.x[0], Xstatus.x[0]);
1522                         ret = -EIO;
1523                         goto out;
1524                 }
1525         }
1526
1527         /* Write length of data to come */
1528         bytes = len & (map_bankwidth(map)-1);
1529         words = len / map_bankwidth(map);
1530         map_write(map, CMD(words - !bytes), cmd_adr );
1531
1532         /* Write data */
1533         z = 0;
1534         while(z < words * map_bankwidth(map)) {
1535                 map_word datum = map_word_load(map, buf);
1536                 map_write(map, datum, adr+z);
1537
1538                 z += map_bankwidth(map);
1539                 buf += map_bankwidth(map);
1540         }
1541
1542         if (bytes) {
1543                 map_word datum;
1544
1545                 datum = map_word_ff(map);
1546                 datum = map_word_load_partial(map, datum, buf, 0, bytes);
1547                 map_write(map, datum, adr+z);
1548         }
1549
1550         /* GO GO GO */
1551         map_write(map, CMD(0xd0), cmd_adr);
1552         chip->state = FL_WRITING;
1553
1554         spin_unlock(chip->mutex);
1555         INVALIDATE_CACHED_RANGE(map, adr, len);
1556         UDELAY(map, chip, cmd_adr, chip->buffer_write_time);
1557         spin_lock(chip->mutex);
1558
1559         timeo = jiffies + (HZ/2);
1560         z = 0;
1561         for (;;) {
1562                 if (chip->state != FL_WRITING) {
1563                         /* Someone's suspended the write. Sleep */
1564                         DECLARE_WAITQUEUE(wait, current);
1565                         set_current_state(TASK_UNINTERRUPTIBLE);
1566                         add_wait_queue(&chip->wq, &wait);
1567                         spin_unlock(chip->mutex);
1568                         schedule();
1569                         remove_wait_queue(&chip->wq, &wait);
1570                         timeo = jiffies + (HZ / 2); /* FIXME */
1571                         spin_lock(chip->mutex);
1572                         continue;
1573                 }
1574
1575                 status = map_read(map, cmd_adr);
1576                 if (map_word_andequal(map, status, status_OK, status_OK))
1577                         break;
1578
1579                 /* OK Still waiting */
1580                 if (time_after(jiffies, timeo)) {
1581                         chip->state = FL_STATUS;
1582                         xip_enable(map, chip, cmd_adr);
1583                         printk(KERN_ERR "waiting for chip to be ready timed out in bufwrite\n");
1584                         ret = -EIO;
1585                         goto out;
1586                 }
1587                 
1588                 /* Latency issues. Drop the lock, wait a while and retry */
1589                 spin_unlock(chip->mutex);
1590                 UDELAY(map, chip, cmd_adr, 1);
1591                 z++;
1592                 spin_lock(chip->mutex);
1593         }
1594         if (!z) {
1595                 chip->buffer_write_time--;
1596                 if (!chip->buffer_write_time)
1597                         chip->buffer_write_time++;
1598         }
1599         if (z > 1) 
1600                 chip->buffer_write_time++;
1601
1602         /* Done and happy. */
1603         chip->state = FL_STATUS;
1604
1605         /* check for lock bit */
1606         if (map_word_bitsset(map, status, CMD(0x02))) {
1607                 /* clear status */
1608                 map_write(map, CMD(0x50), cmd_adr);
1609                 /* put back into read status register mode */
1610                 map_write(map, CMD(0x70), adr);
1611                 ret = -EROFS;
1612         }
1613
1614         xip_enable(map, chip, cmd_adr);
1615  out:   put_chip(map, chip, cmd_adr);
1616         spin_unlock(chip->mutex);
1617         return ret;
1618 }
1619
1620 static int cfi_intelext_write_buffers (struct mtd_info *mtd, loff_t to, 
1621                                        size_t len, size_t *retlen, const u_char *buf)
1622 {
1623         struct map_info *map = mtd->priv;
1624         struct cfi_private *cfi = map->fldrv_priv;
1625         int wbufsize = cfi_interleave(cfi) << cfi->cfiq->MaxBufWriteSize;
1626         int ret = 0;
1627         int chipnum;
1628         unsigned long ofs;
1629
1630         *retlen = 0;
1631         if (!len)
1632                 return 0;
1633
1634         chipnum = to >> cfi->chipshift;
1635         ofs = to  - (chipnum << cfi->chipshift);
1636
1637         /* If it's not bus-aligned, do the first word write */
1638         if (ofs & (map_bankwidth(map)-1)) {
1639                 size_t local_len = (-ofs)&(map_bankwidth(map)-1);
1640                 if (local_len > len)
1641                         local_len = len;
1642                 ret = cfi_intelext_write_words(mtd, to, local_len,
1643                                                retlen, buf);
1644                 if (ret)
1645                         return ret;
1646                 ofs += local_len;
1647                 buf += local_len;
1648                 len -= local_len;
1649
1650                 if (ofs >> cfi->chipshift) {
1651                         chipnum ++;
1652                         ofs = 0;
1653                         if (chipnum == cfi->numchips)
1654                                 return 0;
1655                 }
1656         }
1657
1658         while(len) {
1659                 /* We must not cross write block boundaries */
1660                 int size = wbufsize - (ofs & (wbufsize-1));
1661
1662                 if (size > len)
1663                         size = len;
1664                 ret = do_write_buffer(map, &cfi->chips[chipnum], 
1665                                       ofs, buf, size);
1666                 if (ret)
1667                         return ret;
1668
1669                 ofs += size;
1670                 buf += size;
1671                 (*retlen) += size;
1672                 len -= size;
1673
1674                 if (ofs >> cfi->chipshift) {
1675                         chipnum ++; 
1676                         ofs = 0;
1677                         if (chipnum == cfi->numchips)
1678                                 return 0;
1679                 }
1680         }
1681         return 0;
1682 }
1683
1684 static int __xipram do_erase_oneblock(struct map_info *map, struct flchip *chip,
1685                                       unsigned long adr, int len, void *thunk)
1686 {
1687         struct cfi_private *cfi = map->fldrv_priv;
1688         map_word status, status_OK;
1689         unsigned long timeo;
1690         int retries = 3;
1691         DECLARE_WAITQUEUE(wait, current);
1692         int ret = 0;
1693
1694         adr += chip->start;
1695
1696         /* Let's determine this according to the interleave only once */
1697         status_OK = CMD(0x80);
1698
1699  retry:
1700         spin_lock(chip->mutex);
1701         ret = get_chip(map, chip, adr, FL_ERASING);
1702         if (ret) {
1703                 spin_unlock(chip->mutex);
1704                 return ret;
1705         }
1706
1707         XIP_INVAL_CACHED_RANGE(map, adr, len);
1708         ENABLE_VPP(map);
1709         xip_disable(map, chip, adr);
1710
1711         /* Clear the status register first */
1712         map_write(map, CMD(0x50), adr);
1713
1714         /* Now erase */
1715         map_write(map, CMD(0x20), adr);
1716         map_write(map, CMD(0xD0), adr);
1717         chip->state = FL_ERASING;
1718         chip->erase_suspended = 0;
1719
1720         spin_unlock(chip->mutex);
1721         INVALIDATE_CACHED_RANGE(map, adr, len);
1722         UDELAY(map, chip, adr, chip->erase_time*1000/2);
1723         spin_lock(chip->mutex);
1724
1725         /* FIXME. Use a timer to check this, and return immediately. */
1726         /* Once the state machine's known to be working I'll do that */
1727
1728         timeo = jiffies + (HZ*20);
1729         for (;;) {
1730                 if (chip->state != FL_ERASING) {
1731                         /* Someone's suspended the erase. Sleep */
1732                         set_current_state(TASK_UNINTERRUPTIBLE);
1733                         add_wait_queue(&chip->wq, &wait);
1734                         spin_unlock(chip->mutex);
1735                         schedule();
1736                         remove_wait_queue(&chip->wq, &wait);
1737                         spin_lock(chip->mutex);
1738                         continue;
1739                 }
1740                 if (chip->erase_suspended) {
1741                         /* This erase was suspended and resumed.
1742                            Adjust the timeout */
1743                         timeo = jiffies + (HZ*20); /* FIXME */
1744                         chip->erase_suspended = 0;
1745                 }
1746
1747                 status = map_read(map, adr);
1748                 if (map_word_andequal(map, status, status_OK, status_OK))
1749                         break;
1750                 
1751                 /* OK Still waiting */
1752                 if (time_after(jiffies, timeo)) {
1753                         map_word Xstatus;
1754                         map_write(map, CMD(0x70), adr);
1755                         chip->state = FL_STATUS;
1756                         Xstatus = map_read(map, adr);
1757                         /* Clear status bits */
1758                         map_write(map, CMD(0x50), adr);
1759                         map_write(map, CMD(0x70), adr);
1760                         xip_enable(map, chip, adr);
1761                         printk(KERN_ERR "waiting for erase at %08lx to complete timed out. status = %lx, Xstatus = %lx.\n",
1762                                adr, status.x[0], Xstatus.x[0]);
1763                         ret = -EIO;
1764                         goto out;
1765                 }
1766                 
1767                 /* Latency issues. Drop the lock, wait a while and retry */
1768                 spin_unlock(chip->mutex);
1769                 UDELAY(map, chip, adr, 1000000/HZ);
1770                 spin_lock(chip->mutex);
1771         }
1772
1773         /* We've broken this before. It doesn't hurt to be safe */
1774         map_write(map, CMD(0x70), adr);
1775         chip->state = FL_STATUS;
1776         status = map_read(map, adr);
1777
1778         /* check for lock bit */
1779         if (map_word_bitsset(map, status, CMD(0x3a))) {
1780                 unsigned char chipstatus;
1781
1782                 /* Reset the error bits */
1783                 map_write(map, CMD(0x50), adr);
1784                 map_write(map, CMD(0x70), adr);
1785                 xip_enable(map, chip, adr);
1786
1787                 chipstatus = status.x[0];
1788                 if (!map_word_equal(map, status, CMD(chipstatus))) {
1789                         int i, w;
1790                         for (w=0; w<map_words(map); w++) {
1791                                 for (i = 0; i<cfi_interleave(cfi); i++) {
1792                                         chipstatus |= status.x[w] >> (cfi->device_type * 8);
1793                                 }
1794                         }
1795                         printk(KERN_WARNING "Status is not identical for all chips: 0x%lx. Merging to give 0x%02x\n",
1796                                status.x[0], chipstatus);
1797                 }
1798
1799                 if ((chipstatus & 0x30) == 0x30) {
1800                         printk(KERN_NOTICE "Chip reports improper command sequence: status 0x%x\n", chipstatus);
1801                         ret = -EIO;
1802                 } else if (chipstatus & 0x02) {
1803                         /* Protection bit set */
1804                         ret = -EROFS;
1805                 } else if (chipstatus & 0x8) {
1806                         /* Voltage */
1807                         printk(KERN_WARNING "Chip reports voltage low on erase: status 0x%x\n", chipstatus);
1808                         ret = -EIO;
1809                 } else if (chipstatus & 0x20) {
1810                         if (retries--) {
1811                                 printk(KERN_DEBUG "Chip erase failed at 0x%08lx: status 0x%x. Retrying...\n", adr, chipstatus);
1812                                 timeo = jiffies + HZ;
1813                                 put_chip(map, chip, adr);
1814                                 spin_unlock(chip->mutex);
1815                                 goto retry;
1816                         }
1817                         printk(KERN_DEBUG "Chip erase failed at 0x%08lx: status 0x%x\n", adr, chipstatus);
1818                         ret = -EIO;
1819                 }
1820         } else {
1821                 xip_enable(map, chip, adr);
1822                 ret = 0;
1823         }
1824
1825  out:   put_chip(map, chip, adr);
1826         spin_unlock(chip->mutex);
1827         return ret;
1828 }
1829
1830 int cfi_intelext_erase_varsize(struct mtd_info *mtd, struct erase_info *instr)
1831 {
1832         unsigned long ofs, len;
1833         int ret;
1834
1835         ofs = instr->addr;
1836         len = instr->len;
1837
1838         ret = cfi_varsize_frob(mtd, do_erase_oneblock, ofs, len, NULL);
1839         if (ret)
1840                 return ret;
1841
1842         instr->state = MTD_ERASE_DONE;
1843         mtd_erase_callback(instr);
1844         
1845         return 0;
1846 }
1847
1848 static void cfi_intelext_sync (struct mtd_info *mtd)
1849 {
1850         struct map_info *map = mtd->priv;
1851         struct cfi_private *cfi = map->fldrv_priv;
1852         int i;
1853         struct flchip *chip;
1854         int ret = 0;
1855
1856         for (i=0; !ret && i<cfi->numchips; i++) {
1857                 chip = &cfi->chips[i];
1858
1859                 spin_lock(chip->mutex);
1860                 ret = get_chip(map, chip, chip->start, FL_SYNCING);
1861
1862                 if (!ret) {
1863                         chip->oldstate = chip->state;
1864                         chip->state = FL_SYNCING;
1865                         /* No need to wake_up() on this state change - 
1866                          * as the whole point is that nobody can do anything
1867                          * with the chip now anyway.
1868                          */
1869                 }
1870                 spin_unlock(chip->mutex);
1871         }
1872
1873         /* Unlock the chips again */
1874
1875         for (i--; i >=0; i--) {
1876                 chip = &cfi->chips[i];
1877
1878                 spin_lock(chip->mutex);
1879                 
1880                 if (chip->state == FL_SYNCING) {
1881                         chip->state = chip->oldstate;
1882                         wake_up(&chip->wq);
1883                 }
1884                 spin_unlock(chip->mutex);
1885         }
1886 }
1887
1888 #ifdef DEBUG_LOCK_BITS
1889 static int __xipram do_printlockstatus_oneblock(struct map_info *map,
1890                                                 struct flchip *chip,
1891                                                 unsigned long adr,
1892                                                 int len, void *thunk)
1893 {
1894         struct cfi_private *cfi = map->fldrv_priv;
1895         int status, ofs_factor = cfi->interleave * cfi->device_type;
1896
1897         xip_disable(map, chip, adr+(2*ofs_factor));
1898         cfi_send_gen_cmd(0x90, 0x55, 0, map, cfi, cfi->device_type, NULL);
1899         chip->state = FL_JEDEC_QUERY;
1900         status = cfi_read_query(map, adr+(2*ofs_factor));
1901         xip_enable(map, chip, 0);
1902         printk(KERN_DEBUG "block status register for 0x%08lx is %x\n",
1903                adr, status);
1904         return 0;
1905 }
1906 #endif
1907
1908 #define DO_XXLOCK_ONEBLOCK_LOCK         ((void *) 1)
1909 #define DO_XXLOCK_ONEBLOCK_UNLOCK       ((void *) 2)
1910
1911 static int __xipram do_xxlock_oneblock(struct map_info *map, struct flchip *chip,
1912                                        unsigned long adr, int len, void *thunk)
1913 {
1914         struct cfi_private *cfi = map->fldrv_priv;
1915         map_word status, status_OK;
1916         unsigned long timeo = jiffies + HZ;
1917         int ret;
1918
1919         adr += chip->start;
1920
1921         /* Let's determine this according to the interleave only once */
1922         status_OK = CMD(0x80);
1923
1924         spin_lock(chip->mutex);
1925         ret = get_chip(map, chip, adr, FL_LOCKING);
1926         if (ret) {
1927                 spin_unlock(chip->mutex);
1928                 return ret;
1929         }
1930
1931         ENABLE_VPP(map);
1932         xip_disable(map, chip, adr);
1933         
1934         map_write(map, CMD(0x60), adr);
1935         if (thunk == DO_XXLOCK_ONEBLOCK_LOCK) {
1936                 map_write(map, CMD(0x01), adr);
1937                 chip->state = FL_LOCKING;
1938         } else if (thunk == DO_XXLOCK_ONEBLOCK_UNLOCK) {
1939                 map_write(map, CMD(0xD0), adr);
1940                 chip->state = FL_UNLOCKING;
1941         } else
1942                 BUG();
1943
1944         spin_unlock(chip->mutex);
1945         UDELAY(map, chip, adr, 1000000/HZ);
1946         spin_lock(chip->mutex);
1947
1948         /* FIXME. Use a timer to check this, and return immediately. */
1949         /* Once the state machine's known to be working I'll do that */
1950
1951         timeo = jiffies + (HZ*20);
1952         for (;;) {
1953
1954                 status = map_read(map, adr);
1955                 if (map_word_andequal(map, status, status_OK, status_OK))
1956                         break;
1957                 
1958                 /* OK Still waiting */
1959                 if (time_after(jiffies, timeo)) {
1960                         map_word Xstatus;
1961                         map_write(map, CMD(0x70), adr);
1962                         chip->state = FL_STATUS;
1963                         Xstatus = map_read(map, adr);
1964                         xip_enable(map, chip, adr);
1965                         printk(KERN_ERR "waiting for unlock to complete timed out. status = %lx, Xstatus = %lx.\n",
1966                                status.x[0], Xstatus.x[0]);
1967                         put_chip(map, chip, adr);
1968                         spin_unlock(chip->mutex);
1969                         return -EIO;
1970                 }
1971                 
1972                 /* Latency issues. Drop the lock, wait a while and retry */
1973                 spin_unlock(chip->mutex);
1974                 UDELAY(map, chip, adr, 1);
1975                 spin_lock(chip->mutex);
1976         }
1977         
1978         /* Done and happy. */
1979         chip->state = FL_STATUS;
1980         xip_enable(map, chip, adr);
1981         put_chip(map, chip, adr);
1982         spin_unlock(chip->mutex);
1983         return 0;
1984 }
1985
1986 static int cfi_intelext_lock(struct mtd_info *mtd, loff_t ofs, size_t len)
1987 {
1988         int ret;
1989
1990 #ifdef DEBUG_LOCK_BITS
1991         printk(KERN_DEBUG "%s: lock status before, ofs=0x%08llx, len=0x%08X\n",
1992                __FUNCTION__, ofs, len);
1993         cfi_varsize_frob(mtd, do_printlockstatus_oneblock,
1994                 ofs, len, 0);
1995 #endif
1996
1997         ret = cfi_varsize_frob(mtd, do_xxlock_oneblock, 
1998                 ofs, len, DO_XXLOCK_ONEBLOCK_LOCK);
1999         
2000 #ifdef DEBUG_LOCK_BITS
2001         printk(KERN_DEBUG "%s: lock status after, ret=%d\n",
2002                __FUNCTION__, ret);
2003         cfi_varsize_frob(mtd, do_printlockstatus_oneblock,
2004                 ofs, len, 0);
2005 #endif
2006
2007         return ret;
2008 }
2009
2010 static int cfi_intelext_unlock(struct mtd_info *mtd, loff_t ofs, size_t len)
2011 {
2012         int ret;
2013
2014 #ifdef DEBUG_LOCK_BITS
2015         printk(KERN_DEBUG "%s: lock status before, ofs=0x%08llx, len=0x%08X\n",
2016                __FUNCTION__, ofs, len);
2017         cfi_varsize_frob(mtd, do_printlockstatus_oneblock,
2018                 ofs, len, 0);
2019 #endif
2020
2021         ret = cfi_varsize_frob(mtd, do_xxlock_oneblock,
2022                                         ofs, len, DO_XXLOCK_ONEBLOCK_UNLOCK);
2023         
2024 #ifdef DEBUG_LOCK_BITS
2025         printk(KERN_DEBUG "%s: lock status after, ret=%d\n",
2026                __FUNCTION__, ret);
2027         cfi_varsize_frob(mtd, do_printlockstatus_oneblock, 
2028                 ofs, len, 0);
2029 #endif
2030         
2031         return ret;
2032 }
2033
2034 static int cfi_intelext_suspend(struct mtd_info *mtd)
2035 {
2036         struct map_info *map = mtd->priv;
2037         struct cfi_private *cfi = map->fldrv_priv;
2038         int i;
2039         struct flchip *chip;
2040         int ret = 0;
2041
2042         for (i=0; !ret && i<cfi->numchips; i++) {
2043                 chip = &cfi->chips[i];
2044
2045                 spin_lock(chip->mutex);
2046
2047                 switch (chip->state) {
2048                 case FL_READY:
2049                 case FL_STATUS:
2050                 case FL_CFI_QUERY:
2051                 case FL_JEDEC_QUERY:
2052                         if (chip->oldstate == FL_READY) {
2053                                 chip->oldstate = chip->state;
2054                                 chip->state = FL_PM_SUSPENDED;
2055                                 /* No need to wake_up() on this state change - 
2056                                  * as the whole point is that nobody can do anything
2057                                  * with the chip now anyway.
2058                                  */
2059                         } else {
2060                                 /* There seems to be an operation pending. We must wait for it. */
2061                                 printk(KERN_NOTICE "Flash device refused suspend due to pending operation (oldstate %d)\n", chip->oldstate);
2062                                 ret = -EAGAIN;
2063                         }
2064                         break;
2065                 default:
2066                         /* Should we actually wait? Once upon a time these routines weren't
2067                            allowed to. Or should we return -EAGAIN, because the upper layers
2068                            ought to have already shut down anything which was using the device
2069                            anyway? The latter for now. */
2070                         printk(KERN_NOTICE "Flash device refused suspend due to active operation (state %d)\n", chip->oldstate);
2071                         ret = -EAGAIN;
2072                 case FL_PM_SUSPENDED:
2073                         break;
2074                 }
2075                 spin_unlock(chip->mutex);
2076         }
2077
2078         /* Unlock the chips again */
2079
2080         if (ret) {
2081                 for (i--; i >=0; i--) {
2082                         chip = &cfi->chips[i];
2083                         
2084                         spin_lock(chip->mutex);
2085                         
2086                         if (chip->state == FL_PM_SUSPENDED) {
2087                                 /* No need to force it into a known state here,
2088                                    because we're returning failure, and it didn't
2089                                    get power cycled */
2090                                 chip->state = chip->oldstate;
2091                                 chip->oldstate = FL_READY;
2092                                 wake_up(&chip->wq);
2093                         }
2094                         spin_unlock(chip->mutex);
2095                 }
2096         } 
2097         
2098         return ret;
2099 }
2100
2101 static void cfi_intelext_resume(struct mtd_info *mtd)
2102 {
2103         struct map_info *map = mtd->priv;
2104         struct cfi_private *cfi = map->fldrv_priv;
2105         int i;
2106         struct flchip *chip;
2107
2108         for (i=0; i<cfi->numchips; i++) {
2109         
2110                 chip = &cfi->chips[i];
2111
2112                 spin_lock(chip->mutex);
2113                 
2114                 /* Go to known state. Chip may have been power cycled */
2115                 if (chip->state == FL_PM_SUSPENDED) {
2116                         map_write(map, CMD(0xFF), cfi->chips[i].start);
2117                         chip->oldstate = chip->state = FL_READY;
2118                         wake_up(&chip->wq);
2119                 }
2120
2121                 spin_unlock(chip->mutex);
2122         }
2123 }
2124
2125 static void cfi_intelext_destroy(struct mtd_info *mtd)
2126 {
2127         struct map_info *map = mtd->priv;
2128         struct cfi_private *cfi = map->fldrv_priv;
2129         kfree(cfi->cmdset_priv);
2130         kfree(cfi->cfiq);
2131         kfree(cfi->chips[0].priv);
2132         kfree(cfi);
2133         kfree(mtd->eraseregions);
2134 }
2135
2136 static char im_name_1[]="cfi_cmdset_0001";
2137 static char im_name_3[]="cfi_cmdset_0003";
2138
2139 static int __init cfi_intelext_init(void)
2140 {
2141         inter_module_register(im_name_1, THIS_MODULE, &cfi_cmdset_0001);
2142         inter_module_register(im_name_3, THIS_MODULE, &cfi_cmdset_0001);
2143         return 0;
2144 }
2145
2146 static void __exit cfi_intelext_exit(void)
2147 {
2148         inter_module_unregister(im_name_1);
2149         inter_module_unregister(im_name_3);
2150 }
2151
2152 module_init(cfi_intelext_init);
2153 module_exit(cfi_intelext_exit);
2154
2155 MODULE_LICENSE("GPL");
2156 MODULE_AUTHOR("David Woodhouse <dwmw2@infradead.org> et al.");
2157 MODULE_DESCRIPTION("MTD chip driver for Intel/Sharp flash chips");